blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
a64e1cd32ad1b48294ce09552702fcbcbf207a19
|
Shell
|
qualiaa/simon
|
/config/simon/functions.sh
|
UTF-8
| 623
| 3.515625
| 4
|
[] |
no_license
|
##########################
## functions
##########################
# these are the functions available to your profile.
# go hog wild!
function suspend() {
systemctl suspend
}
function backlight_off() {
xset dpms force off
}
function set_backlight_percent() {
xbacklight -set $1
}
function get_backlight_percent() {
xbacklight -get | cut -d. -f1
}
function reduce_backlight() {
if [ $(get_backlight_percent) -gt "$1" ]; then
set_backlight_percent "$1"
fi
}
function increase_backlight() {
if [ $(get_backlight_percent) -lt "$1" ]; then
set_backlight_percent "$1"
fi
}
| true
|
97417a35b31c27fd0540ecced7c3853de44177d0
|
Shell
|
siphoncode/docker-custodian-cron
|
/entrypoint.sh
|
UTF-8
| 340
| 3.03125
| 3
|
[] |
no_license
|
set -e
: ${INTERVAL:?"INTERVAL is not set."}
: ${MAX_CONTAINER_AGE:?"MAX_CONTAINER_AGE is not set."}
: ${MAX_IMAGE_AGE:?"MAX_IMAGE_AGE is not set."}
while [ true ] ; do
echo
echo "*** Starting docker-custodian ***"
echo
dcgc --max-container-age $MAX_CONTAINER_AGE --max-image-age $MAX_IMAGE_AGE;
sleep $INTERVAL;
done
| true
|
34b3db106ea3fa2571d634de2183a3253433a30f
|
Shell
|
smaloron/slim-php-vagrant
|
/install.sh
|
UTF-8
| 866
| 3.25
| 3
|
[] |
no_license
|
# Mise à jour de la bibliothèque des logiciels
sudo apt-get update
sudo apt-get -y upgrade
# Variables de l'installation
PASSWORD='123'
PHPVERSION='7.2'
# Apache et PHP
sudo apt-get install -y apache2
sudo apt-get install -y php
sudo apt-get install -y libapache2-mod-php
# Mysql
sudo debconf-set-selections <<< "mysql-server mysql-server/root_password password $PASSWORD"
sudo debconf-set-selections <<< "mysql-server mysql-server/root_password_again password $PASSWORD"
sudo apt-get install -y mysql-server php-mysql
# Hôte virtuel
VHOST=$(cat <<EOF
<VirtualHost *:80>
DocumentRoot "/vagrant_data/public"
<Directory "/vagrant_data/public">
Options All
AllowOverride All
Require all granted
</Directory>
</VirtualHost>
EOF
)
echo "${VHOST}" > /etc/apache2/sites-available/000-default.conf
# Redémarrage d'Apache
service apache2 restart
| true
|
d8bbcf77520bd3b4e5ef15a61183a3b76bcbb877
|
Shell
|
husheng0/LockstepPlatform
|
/Tools/ECSCodeGen.sh
|
UTF-8
| 331
| 2.578125
| 3
|
[] |
no_license
|
#!/bin/bash
clear
dir="$(cd $(dirname ${BASH_SOURCE[0]}) && pwd)"
cd $dir
pwd
cd ./bin/
echo "1.Code gen"
mono ./Lockstep.Tools.ECSGenerator.exe ../Config/ECSGenerator/Config.json
echo "2.Complile generated code"
msbuild /property:Configuration=Debug /p:WarningLevel=0 /verbosity:minimal ../Src/ECS.ECSOutput/ECS.ECSOutput.csproj
| true
|
659ce3cbdf90423e55920a66146ef67f9e134a12
|
Shell
|
xwang2713/cloud-image-build
|
/tools/aws/ami_cli.sh
|
UTF-8
| 1,557
| 3.640625
| 4
|
[] |
no_license
|
#!/bin/bash
# Example: ./ami_cli.sh 7.0.0 delete
if [ -z "$1" ]; then
echo "Must supply HPCC version"
exit 1
fi
VERSION=$1
ACTION="list"
[ -n "$2" ] && ACTION=$2
ACTION=$(echo $ACTION | tr '[:upper:]' '[:lower:]')
# 7.8.x
ami_prefix=hpcc-systems
# 7.6.x
#ami_prefix=hpcc-platform
ami_prefix2=${ami_prefix}-community
#ami_prefix2=${ami_prefix}-dev
# aws version?
#aws ec2 describe-regions | while read x region opt region2
# aws 1.16.x
aws ec2 describe-regions | while read x region region2
do
echo
echo "Process region $region2 ..."
#ec2-describe-images --region $region2 | while read name id image x
#echo "aws ec2 describe-images --region $region2 --query 'Images[*].{ID:ImageId Name:Name}' \
# --owners $EC2_ACCOUNT_ID | grep ${ami_prefix} | while read id name "
aws ec2 describe-images --region $region2 --query 'Images[*].{ID:ImageId Name:Name}' \
--owners $EC2_ACCOUNT_ID | grep ${ami_prefix} | while read id name
do
#echo "$id $name"
echo $name | grep -q "${ami_prefix2}-${VERSION}"
if [ $? -eq 0 ]
then
if [ "$ACTION" = "delete" ]; then
#echo "aws ec2 deregister-image --region $region2 $id"
echo "aws --region $region2 ec2 deregister-image --image-id $id"
#aws ec2 deregister-image --region $region2 $id
aws --region $region2 ec2 deregister-image --image-id $id
elif [ "$ACTION" = "list" ]; then
echo "$id $name"
fi
fi
done
done
| true
|
966f0e35ff689f5a1b8df169a1e672aa5cf5c346
|
Shell
|
HaydenElza/MC-Admin
|
/stop_server.sh
|
UTF-8
| 499
| 3.921875
| 4
|
[] |
no_license
|
#!/bin/bash
# Load config
if [ -f server.config ]; then
source server.config
else
echo "Missing 'server.config'."
exit 1
fi
# Define function to send commands to server running in screen
to_screen()
{
screen -S $SCREENNAME -p 0 -X stuff "$1^M"
}
# Stop server
{
echo "Stopping server..."
to_screen "stop"
sleep 5s
echo "Server appears to have stopped successfully."
to_screen "exit"
echo "Exited from screen."
}||{
echo "ERROR: Could not stop server. Not sure what happened."
exit 1
}
| true
|
507a705b2b33bd1dcc117ad8bb3a386697b33477
|
Shell
|
GasCan1234/photo-booth
|
/update.sh
|
UTF-8
| 402
| 3.25
| 3
|
[] |
no_license
|
#!/bin/bash
echo "##########################"
echo "### UPDATE Photo-Booth ###"
echo "##########################"
git status
read -p "Are you sure to overwrite local changes and update? (type: y)" -n 1 -r
echo # (optional) move to a new line
if [[ $REPLY =~ ^[Yy]$ ]]
then
echo "### UPDATE START ###"
git add .
git reset --hard HEAD
git pull origin master
echo "### UPDATE DONE ###"
fi
| true
|
bcddeae6c21c99725fece87cc2dff0f5ddf5364b
|
Shell
|
Hacker-One/ob-security-conformance-suite-deprecated-production
|
/httpd/start.sh
|
UTF-8
| 240
| 2.75
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
project=/etc/apache2/sites-enabled
echo $project
cd $project
echo $OBPTESTSERVICE_NAME
sed -i 's/obptestserver.pditdap/$OBPTESTSERVICE_NAME/g' server-static.conf
echo "start apache2"
apt-get install apache2
apachectl start
| true
|
3bdfdedaadc6c5aba55fbfcde0c2768b0886bb2c
|
Shell
|
Rylko/test4
|
/flipgit.sh
|
UTF-8
| 606
| 3.328125
| 3
|
[] |
no_license
|
#!/bin/sh
source conf.sh
echo
status_code=1
rof=$((status_code/100))
while [ $rof -ne 2 ]
do
echo
#echo "Enter your GitHub login: "
read -p "Enter your GitHub login: " gh_user
echo -n "Password: "
read -s password
echo
read -p "Enter name for new repository: " reponame
response=$(curl -u $gh_user:$password \
--write-out \\n%{http_code} \
--silent \
https://api.github.com/user/repos \
-d "{\"name\":\"$reponame\"}")
status_code=$(echo "$response" | sed -n '$p')
html=$(echo "$response" | sed '$d')
echo status_code=$status_code
rof=$((status_code/100))
done
echo "git@github.com:$gh_user/$reponame.git"
| true
|
ea10c48a42b0f754e8e0753bf734b71aecc2d089
|
Shell
|
rezpablo/docker
|
/mssql-tools/scripts/restoreDb.shBK
|
UTF-8
| 629
| 2.796875
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
echo '=====>'
BAK_NAME=$1
DB_NAME=$2
echo '########## ESTA OPERAÇÃO IRÁ DEMORAR... Tenha paciência!'
echo 'INICIO: ' `date`
echo ${BAK_NAME}
echo ${DB_NAME}
sqlcmd -S 10.3.0.49 -U 'sa' -P $SA_PASSWORD << EOF
USE master
RESTORE DATABASE [$DB_NAME] FROM DISK = N'C:\var\opt\mssql\data\dumps\\${BAK_NAME}' WITH FILE = 1, MOVE N'prow_onehealth' TO N'C:\var\opt\mssql\data\\${DB_NAME}.mdf', MOVE N'prow_onehealth_log' TO N'C:\var\opt\mssql\data\\${DB_NAME}.ldf', NOUNLOAD, REPLACE, STATS = 5
GO
EOF
echo 'OPERACAO FINALIZADA:' $(date)
echo 'FIM, aguarde até o container ser finalizado'
exit 0
| true
|
b18c31c1578ee0cd3e0bff3756593ee2062c7be2
|
Shell
|
metabrainz/musicbrainz-docker
|
/build/solr/scripts/load-search-indexes.sh
|
UTF-8
| 1,446
| 4.25
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
set -e -o pipefail -u
DUMP_DIR=/media/searchdump
DATA_DIR=/opt/solr/server/solr/data
OVERWRITE_FLAG=0
SCRIPT_NAME=$(basename "$0")
HELP=$(cat <<EOH
Usage: $SCRIPT_NAME [<options>]
Load MusicBrainz Solr search indexes from fetched dump files.
Options:
-f, --force Delete any existing data before loading search indexes
-h, --help Print this help message
Note:
The Docker Compose service 'search' must be stopped beforehand.
EOH
)
# Parse arguments
if [[ $# -gt 0 && $1 =~ ^-*h(elp)?$ ]]
then
echo "$HELP"
exit 0 # EX_OK
elif [[ $# -eq 1 && $1 =~ ^-*f(orce)?$ ]]
then
OVERWRITE_FLAG=1
elif [[ $# -gt 0 ]]
then
echo >&2 "$SCRIPT_NAME: unrecognized arguments"
echo >&2 "Try '$SCRIPT_NAME help' for usage."
exit 64 # EX_USAGE
fi
# Check existing Solr data and extract search indexes from dump files
cd "$DUMP_DIR"
for dump_file in *.tar.zst
do
collection=${dump_file/.tar.zst}
echo "$(date): Load $collection search index..."
if [[ $(find "$DATA_DIR/$collection" -type f 2>/dev/null | wc -c) -ne 0 ]]
then
if [[ $OVERWRITE_FLAG -eq 1 ]]
then
find "$DATA_DIR/$collection" -type f -delete
else
echo >&2 "$SCRIPT_NAME: '$collection' has data already"
echo >&2 "To delete it first, add the option '--force'."
exit 73 # EX_CANTCREAT
fi
fi
tar -x --zstd -f "$DUMP_DIR/$dump_file" -C "$DATA_DIR"
done
echo "$(date): Done loading search indexes."
# vi: set noexpandtab softtabstop=0:
| true
|
45f12295362289b009fdde703e17443738a58c76
|
Shell
|
LuckerYi/espnet
|
/tools/setup_python.sh
|
UTF-8
| 848
| 4.1875
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -euo pipefail
if [ $# -eq 0 ] || [ $# -gt 2 ]; then
echo "Usage: $0 <python> [venv-path]"
exit 1;
elif [ $# -eq 2 ]; then
PYTHON="$1"
VENV="$2"
elif [ $# -eq 1 ]; then
PYTHON="$1"
VENV=""
fi
if ! "${PYTHON}" --version; then
echo "Error: ${PYTHON} is not Python?"
exit 1
fi
if [ -n "${VENV}" ]; then
"${PYTHON}" -m venv ${VENV}
echo ". $(cd ${VENV}; pwd)/bin/activate" > activate_python.sh
else
PYTHON_DIR="$(cd ${PYTHON%/*} && pwd)"
if [ ! -x "${PYTHON_DIR}"/python3 ]; then
echo "${PYTHON_DIR}/python3 doesn't exist."
exit 1
elif [ ! -x "${PYTHON_DIR}"/pip3 ]; then
echo "${PYTHON_DIR}/pip3 doesn't exist."
exit 1
fi
echo "PATH=${PYTHON_DIR}:\${PATH}" > activate_python.sh
fi
. ./activate_python.sh
python3 -m pip install -U pip wheel
| true
|
10d4c51683622915a4ded3d4c3eea342d7853921
|
Shell
|
FutureGateway/PortalSetup
|
/Ubuntu_14.04/fgSetup.sh
|
UTF-8
| 15,314
| 3.828125
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#
# Almost automatic FutureGateway setup script(*)
# This script can be executed once destination VM has been started
# Before execute the script, just provide the VM ip address (VMIP) and
# the client machine' SSH public key (SSHPUBKEY).
# During the execution destination VM sudo password may be required.
# To get the VM' sudo password, please contact the author or:
# sg-licence@ct.infn.it
#
# Author: Riccardo Bruno <riccardo.bruno@ct.infn.it>
#
# (*) Full automatic script can be obtained having a passwordless sudo user in
# the destination system. And providing SSH key exchange with cloud facilities
# for instance with cloud-init
# /etc/sudoers
# <user> ALL = (ALL) NOPASSWD: ALL
#
#
# Configure below the Git repository settings for each FutureGateway
# software package: PortalSetup, fgAPIServer, APIServerDaemon
# Each package requires:
# GIT<PKGNAME>_NAME - name of the package in the repository
# GIT<PKGNAME>_CLONE - name of the .git element in the clone URL
# GIT<PKGNAME>_TAG - specify here a specific branch/release
#
GITBASE=https://github.com/indigo-dc # GitHub base repository endpoint
GITBASERAW=https://raw.githubusercontent.com/indigo-dc # GitHub base for raw content
GITPORTALSETUP_NAME="PortalSetup" # PortalSetup git path name
GITPORTALSETUP_CLONE="PortalSetup.git" # PortalSetup clone name
GITPORTALSETUP_TAG="master" # PortalSetup tag name
GITFGAPISERVER_NAME="fgAPIServer" # fgAPIServer git path name
GITFGAPISERVER_CLONE="fgAPIServer.git" # fgAPIServer clone name
GITFGAPISERVER_TAG="v0.0.6-a" # fgAPIServer tag name
GITFGAPISERVERDAEMON_NAME="APIServerDaemon" # APIServerDaemon git path name
GITFGAPISERVERDAEMON_CLONE="APIServerDaemon.git" # APIServerDaemon clone name
GITFGAPISERVERDAEMON_TAG="v0.0.6-a" # APIServerDaemin clone tag name
OPTPASS=1
SCRIPTNAME=$(basename $0)
if [ "${1}" = "" ]; then
OPTPASS=0
fi
VMUSER=$1
if [ "${2}" = "" ]; then
OPTPASS=0
fi
VMIP=$2
if [ "${3}" = "" ]; then
OPTPASS=0
fi
SSHPORT="$3"
if [ "${4}" = "" ]; then
OPTPASS=0
fi
SSHPUBKEY="$4"
# Check for option PASS flag
if [ $OPTPASS -eq 0 ]; then
echo "Usage $SCRIPTNAME <fgusername> <vm host/ip address> <ssh_port> <ssh_pubkey>"
exit 1
fi
echo "#"
echo "# Executing FutureGateway general setup script ..."
echo "#"
echo "VMUSER : '"$VMUSER"'"
echo "VMIP : '"$VMIP"'"
echo "SSHPORT : '"$SSHPORT"'"
SSHKOPTS="-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no"
TOMCATUSR="tomcat"
TOMCATPAS=$(openssl rand -hex 4)
MYSQL_RPAS=
# 1) Establish secure connection with the fg VM ssh-ing with: <VMUSER>@<VMIP>
if [ "${SSHPUBKEY}" != "" ]; then
ssh -p $SSHPORT $SSHKOPTS -t $VMUSER@$VMIP "
SSHPUBKEY=\"$SSHPUBKEY\"
mkdir -p .ssh
echo \"\$SSHPUBKEY\" >> .ssh/authorized_keys
"
fi
# 2) Install mandatory packages
if [ "${MYSQL_RPAS}" != "" ]; then
DBROOTPASS="$MYSQL_RPAS"
else
DBROOTPASS="\\\"''\\\""
fi
ssh -p $SSHPORT $SSHKOPTS -t $VMUSER@$VMIP "
export DEBIAN_FRONTEND=\"noninteractive\"
sudo debconf-set-selections <<< \"mysql-server mysql-server/root_password password $DBROOTPASS\"
sudo debconf-set-selections <<< \"mysql-server mysql-server/root_password_again password $DBROOTPASS\"
sudo apt-get -y update
PKGS=\"wget \
openssh-client \
openssh-server \
mysql-server-5.6 \
mysql-server-core-5.6 \
mysql-client-5.6 \
mysql-client-core-5.6 \
openjdk-7-jdk \
ant \
maven \
build-essential \
mlocate \
unzip \
curl \
ruby-dev \
apache2 \
libapache2-mod-wsgi \
python-dev \
python-pip \
python-Flask \
python-flask-login \
python-crypto \
python-MySQLdb \
git \
ldap-utils \
openvpn \
screen \
jq\"
sudo apt-get -y install \$(echo \$PKGS)
sudo pip install --upgrade flask-login
sudo service ssh restart
sudo service mysql restart
"
# 3) Install FGPortal
#
# !WARNING - Following file must be aligned with the latest version of setup_config.sh script
#
cat >setup_config.sh <<EOF
#!/bin/bash
#
# Setup configuration script for any FutureGateway setup script
#
# 01.10.2015 - riccardo.bruno@ct.infn.it
#
# This file contains common variables for setup_* scripts it may be used to override
# values defined inside setup settings
#
# Setup configuration variables
#
# Uncomment and change one ot these values to override default settings
# specified inside each setup_* scripts
#
# Common values; FG user, FG user directory, FG file repo, FG home dir, FG environment
#
FGUSER=${VMUSER} # User owning FutureGateway files
FGHOME=\$HOME # This script could be executed as root; specify FG home here
FGREPO=\$FGHOME/FGRepo # Files could be cached into this repo directory
FGLOCATION=\$FGHOME/FutureGateway # Location of the FutureGateway installation
FGENV=\$FGLOCATION/setenv.sh # FutureGateway environment variables
#
# setup_FGPortal.sh
#
TOMCATUSR=${TOMCATUSR} # TOMCAT username
TOMCATPAS=${TOMCATPAS} # TOMCAT password
SKIP_LIFERAY=0 # 0 - Installs Liferay
LIFERAY_VER=7 # Specify here the Liferay portal version: 6 or 7 (default)
LIFERAY_SDK_ON=1 # 0 - SDK will be not installed
LIFERAY_SDK_LOCATION=\$FGLOCATION # Liferay SDK will be placed here
ANT_ON=0 # 0 - Ant will be not installed (valid only if LIFERAY_SDK is on)
MAVEN_ON=0 # 0 - Maven will be not installed (valid only if LIFERAY_SDK is on)
STARTUP_SYSTEM=1 # 0 - The portlal will be not initialized (unused yet)
TIMEZONE=\$(date +%Z) # Set portal timezone as system timezone (portal should operate at UTC)
SETUPDB=1 # 1 - Initialize Liferay DB
# Below MYSQL settings... # !!! WARNING enabling this flag
MYSQL_HOST=localhost # any existing DB will be dropped
MYSQL_PORT=3306
MYSQL_USER=lportal
MYSQL_PASS=lportal
MYSQL_DBNM=lportal
MYSQL_ROOT=root
MYSQL_RPAS=${MYSQL_RPAS}
#
# setup_JSAGA.sh
#
JSAGA_LOCATION=\$FGHOME/FutureGateway # Liferay SDK will be placed here
#
# setup_OCCI.sh
#
USEFEDCLOUD=1 # Set to 1 for FedCloud setup script
#
# setup_GridEngine.sh
#
GEDIR=\$FGLOCATION/GridEngine
GELOG=\$GEDIR/log
GELIB=\$GEDIR/lib
SETUPUTDB=1 # 1 - Initialize UsersTracking DB
SETUPGRIDENGINEDAEMON=1 # 1 - Configures GRIDENGINE Daemon
RUNDIR=\$FGHOME # Normally placed at $FGHOME
GEMYSQL_HOST=localhost # Any existing DB will be dropped
GEMYSQL_PORT=3306
GEMYSQL_USER=tracking_user
GEMYSQL_PASS=usertracking
GEMYSQL_DBNM=userstracking
#
# Determine OS installer
#
BREW=\$(which brew >/dev/null 2>/dev/null)
APTGET=\$(which apt-get 2>/dev/null)
YUM=\$(which yum 2>/dev/null)
# Function that produces a timestamp
get_ts() {
TS=\$(date +%y%m%d%H%M%S)
}
# Function that retrieves a file from FGRepo or download it
# from the web. The function takes three arguments:
# \$1 - Source URL
# \$2 - Destination path; (current dir if none; or only path to destination)
# \$3 - Optional the name of the file (sometime source URL does not contain the name)
# FGREPO directory exists, because created by the preinstall_fg
get_file() {
if [ "\${3}" != "" ]; then
FILENAME="\${3}"
else
FILENAME=\$(basename \$1)
fi
if [ "\${2}" != "" ]; then
DESTURL="\${2}"
else
DESTURL=\$(pwd)
fi
if [ -e "\${FGREPO}/\${FILENAME}" ]; then
# The file exists in the cache
echo "File \${FILENAME} exists in the cache"
cp "\${FGREPO}/\${FILENAME}" \$DESTURL/\$FILENAME
else
echo "File \${FILENAME} not in cache; retrieving it from the web"
wget "\${1}" -O \$FGREPO/\$FILENAME 2>/dev/null
RES=\$?
if [ \$RES -ne 0 ]; then
echo "FATAL: Unable to download from URL: \${1}"
rm -f \$FGREPO/\$FILENAME
exit 1
fi
cp "\${FGREPO}/\${FILENAME}" \$DESTURL/\$FILENAME
fi
}
#
# Function that replace the 1st matching occurrence of
# a pattern with a given line into the specified filename
# \$1 # File to change
# \$2 # Matching pattern that identifies the line
# \$3 # New line content
# \$4 # Optionally specify a suffix to keep a safe copy
replace_line() {
file_name=\$1 # File to change
pattern=\$2 # Matching pattern that identifies the line
new_line=\$3 # New line content
keep_suffix=\$4 # Optionally specify a suffix to keep a safe copy
if [ "\$file_name" != "" -a -f \$file_name -a "\$pattern" != "" ]; then
TMP=\$(mktemp)
cp \$file_name \$TMP
if [ "\$keep_suffix" != "" ]; then # keep a copy of replaced file
cp \$file_name \$file_name"_"\$keep_suffix
fi
MATCHING_LINE=\$(cat \$TMP | grep -n "\$pattern" | head -n 1 | awk -F':' '{ print \$1 }' | xargs echo)
if [ "\$MATCHING_LINE" != "" ]; then
cat \$TMP | head -n \$((MATCHING_LINE-1)) > \$file_name
printf "\$new_line\n" >> \$file_name
cat \$TMP | tail -n +\$((MATCHING_LINE+1)) >> \$file_name
else
echo "WARNING: Did not find '"\$pattern"' in file: '"\$file_name"'"
fi
rm -f \$TMP
else
echo "You must provide an existing filename and a valid pattern"
return 1
fi
}
EOF
scp $SSHKOPTS -P $SSHPORT setup_config.sh $VMUSER@$VMIP:
rm -f setup_config.sh
ssh -p $SSHPORT $SSHKOPTS -t $VMUSER@$VMIP "
[ -f FGRepo.tar.gz ] || wget http://sgw.indigo-datacloud.eu/fgsetup/FGRepo.tar.gz -O FGRepo.tar.gz
[ -f APIServerDaemon_lib.tar.gz ] || wget http://sgw.indigo-datacloud.eu/fgsetup/APIServerDaemon_lib.tar.gz -O APIServerDaemon_lib.tar.gz
wget $GITBASERAW/$GITPORTALSETUP_NAME/$GITPORTALSETUP_TAG/setup_FGPortal.sh -O setup_FGPortal.sh
chmod +x *.sh
./setup_FGPortal.sh
"
#3 Install JSAGA,GridEngine,rOCCI, fgService
ssh -p $SSHPORT $SSHKOPTS -t $VMUSER@$VMIP "
wget $GITBASERAW/$GITPORTALSETUP_NAME/$GITPORTALSETUP_TAG/setup_JSAGA.sh -O setup_JSAGA.sh
wget $GITBASERAW/$GITPORTALSETUP_NAME/$GITPORTALSETUP_TAG/setup_GridEngine.sh -O setup_GridEngine.sh
wget $GITBASERAW/$GITPORTALSETUP_NAME/$GITPORTALSETUP_TAG/setup_OCCI.sh -O setup_OCCI.sh
wget $GITBASERAW/$GITPORTALSETUP_NAME/$GITPORTALSETUP_TAG/setup_FGService.sh -O setup_FGService.sh
chmod +x setup_*.*
sudo ./setup_JSAGA.sh
sudo ./setup_GridEngine.sh
sudo ./setup_OCCI.sh # Script not really mature some tuning still necessary
sudo ./setup_FGService.sh
"
#4 fgAPIServer
if [ "${MYSQL_RPAS}" != "" ]; then
SETUPFGAPIERVER_DB="mysql -u root -p$MYSQL_RPAS"
else
SETUPFGAPIERVER_DB="mysql -u root"
fi
ssh -p $SSHPORT $SSHKOPTS -t $VMUSER@$VMIP "
source ~/.bash_profile
cd \$FGLOCATION
git clone -b $GITFGAPISERVER_TAG $GITBASE/$GITFGAPISERVER_CLONE
cd fgAPIServer
$SETUPFGAPIERVER_DB < fgapiserver_db.sql
"
#5 APIServerDaemon
TOSCAADAPTOR_GIT="https://github.com/csgf/jsaga-adaptor-tosca.git"
ROCCIADAPTOR_GIT="https://github.com/csgf/jsaga-adaptor-rocci.git"
cat > setup_APIServerDaemon.sh <<EOF
cd \$FGLOCATION
git clone -b $GITFGAPISERVERDAEMON_TAG $GITBASE/$GITFGAPISERVERDAEMON_CLONE
git clone $ROCCIADAPTOR_GIT
git clone $TOSCAADAPTOR_GIT
# Prepare lib dir
tar xvfz \$HOME/APIServerDaemon_lib.tar.gz -C \$FGLOCATION/APIServerDaemon/web/WEB-INF/
# Default JSON library works for java-8; in java-7 another jar is necessary
JVER=\$(java -version 2>&1 | head -n 1 | awk '{ print \$3 }' | sed s/\"//g | awk '{ print substr(\$1,1,3) }')
if [ "\${JVER}" = "1.7" ]; then
echo "Changing JSON jar for java-7"
mv \$FGLOCATION/APIServerDaemon/web/WEB-INF/lib/json-20150729.jar \$FGLOCATION/APIServerDaemon/web/WEB-INF/lib/json-20150729.jar_disabled
wget http://central.maven.org/maven2/org/json/json/20140107/json-20140107.jar -O \$FGLOCATION/APIServerDaemon/web/WEB-INF/lib/json-20140107.jar
fi
# Compile rocci adaptor
rm -rf \$FGLOCATION/APIServerDaemon/web/WEB-INF/lib/jsaga-adaptor-rocci*.jar
rm -rf \$FGLOCATION/APIServerDaemon/web/WEB-INF/lib/jsaga-adaptor-tosca*.jar
cd jsaga-adaptor-rocci
cd \$FGLOCATION/jsaga-adaptor-rocci
ant all
cp \$FGLOCATION/jsaga-adaptor-rocci/dist/jsaga-adaptor-rocci.jar \$FGLOCATION/APIServerDaemon/web/WEB-INF/lib
cp \$FGLOCATION/jsaga-adaptor-rocci/dist/jsaga-adaptor-rocci.jar \$FGLOCATION/jsaga-1.1.2/lib
# Compile tosca adaptor
cd \$FGLOCATION/jsaga-adaptor-tosca
mv \$FGLOCATION/jsaga-adaptor-tosca/build.xml \$FGLOCATION/jsaga-adaptor-tosca/build.xml_nb
mv \$FGLOCATION/jsaga-adaptor-tosca/build.xml_disabled \$FGLOCATION/jsaga-adaptor-tosca/build.xml
ant all
cp \$FGLOCATION/jsaga-adaptor-tosca/dist/jsaga-adaptor-tosca.jar \$FGLOCATION/APIServerDaemon/web/WEB-INF/lib
cp \$FGLOCATION/jsaga-adaptor-tosca/dist/jsaga-adaptor-tosca.jar \$FGLOCATION/jsaga-1.1.2/lib
# Compile APIServerDaemon
cd \$FGLOCATION/APIServerDaemon
ant all
cp \$FGLOCATION/APIServerDaemon/dist/APIServerDaemon.war \$CATALINA_HOME/webapps
cd \$FGLOCATION
EOF
scp $SSHKOPTS -P $SSHPORT setup_APIServerDaemon.sh $VMUSER@$VMIP:
rm -f setup_APIServerDaemon.sh
ssh -p $SSHPORT $SSHKOPTS -t $VMUSER@$VMIP "
source ~/.bash_profile
chmod +x setup_APIServerDaemon.sh
./setup_APIServerDaemon.sh
cp \$FGLOCATION/APIServerDaemon/dist/APIServerDaemon.war \$CATALINA_HOME/webapps
rm -f ./setup_APIServerDaemon.sh
"
#6 Customize DB and default app settings
cat > customize_DBApps.sh <<EOF
# Fix SSH connection issue on Ubuntu with JSAGA
sudo mkdir -p /etc/ssh/ssh_host_disabled
find /etc/ssh/ -name 'ssh_host_*' | grep -v disabled | grep -v rsa | grep -v \_dsa | xargs -I{} sudo mv {} /etc/ssh/ssh_host_disabled/
# Use the correct application path
SQLCMD="update application_file set path='\$FGLOCATION/fgAPIServer/apps/sayhello' where app_id=2;"
mysql -h localhost -P 3306 -u fgapiserver -pfgapiserver_password fgapiserver -e "\$SQLCMD"
sudo adduser --disabled-password --gecos "" jobtest
RANDPASS=\$(openssl rand -base64 32 | head -c 12)
sudo usermod --password \$(echo "\$RANDPASS" | openssl passwd -1 -stdin) jobtest
SQLCMD="update infrastructure_parameter set pvalue='\$RANDPASS' where infra_id=1 and pname='password'";
mysql -h localhost -P 3306 -u fgapiserver -pfgapiserver_password fgapiserver -e "\$SQLCMD"
#IPADDR=\$(ifconfig eth0 | grep "inet " | awk -F'[: ]+' '{ print \$4 }')
IPADDR=localhost
SQLCMD="update infrastructure_parameter set pvalue='ssh://\$IPADDR:${SSHPORT}' where infra_id=1 and pname='jobservice'";
mysql -h localhost -P 3306 -u fgapiserver -pfgapiserver_password fgapiserver -e "\$SQLCMD"
# Take care of ssh keys (known_hosts)
mkdir -p \$HOME/.ssh
ssh-keyscan -H -p ${SSHPORT} -t rsa localhost >> \$HOME/.ssh/known_hosts
EOF
scp $SSHKOPTS -P $SSHPORT customize_DBApps.sh $VMUSER@$VMIP:
ssh -p $SSHPORT $SSHKOPTS -t $VMUSER@$VMIP "
source ~/.bash_profile
chmod +x customize_DBApps.sh
./customize_DBApps.sh
rm -f ./customize_DBApps.sh
sudo su - -c 'sudo cat >> /etc/ssh/sshd_config <<EOF2
#jobtest allow password auth.
Match User jobtest
PasswordAuthentication yes
EOF2
'
sudo service ssh restart
"
rm -f ./customize_DBApps.sh
| true
|
b0fb94f02a9d1321fc601b0a1525a5dac294238a
|
Shell
|
sandipde/free-electron-dft
|
/input/ginput.sh
|
UTF-8
| 1,321
| 3.28125
| 3
|
[] |
no_license
|
#!/bin/sh
inf=1
while [ "$inf" = 1 ]
do
ans=0
ans=$(zenity --list --title "INPUT EDITOR FOR 3D-DFT PROGRAM" --text "\n\n========================================================\n
THIS PROGRAM WILL LET YOU EDIT THE INPUTS OF THE 3DDFT PROGRAM \n
=======================================================\n\n" --radiolist --column "choose" --column " TASKS" TRUE "MAIN PARAMETERS " FALSE "DAVIDSON'S PARAMETERS" FALSE "MESH PARAMETERS" FALSE "EXTERNAL POTENTIAL" --height=300 --width=400 )
#echo $ans
case "$ans"
in
"MAIN PARAMETERS ") filename=main.in ;;
"DAVIDSON'S PARAMETERS") filename=dvdson.in ;;
"MESH PARAMETERS") filename=mesh.in;;
"EXTERNAL POTENTIAL") filename=ext_pot.in ;;
*) inf=2;;
esac
#printf "$filename \n"
case "$inf"
in
1) #dialog --title "YOU ARE EDITING $filename FILE" --editbox $filename 27 88 ;;
zenity --text-info --title "YOU ARE EDITING $ans | ANY CHANGE YOU MADE WILL BE SAVED " --editable --filename=$filename --height=280 --width=640 >temp.in;mv temp.in $filename ;;
# 2) dialog --backtitle "PROGRAM BY SANDIP DE DATE : 19.02.09 " --colors --timeout 1 --msgbox " \n \Zb\Z1\Zu GOING BACK TO MAIN MENU " 10 40
esac
done
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# script by sandip de 19.02.09
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
| true
|
959a830065cc415da6a15d573e1f73f89abaa221
|
Shell
|
edmondscommerce/phpqa
|
/includes/generic/markdownLinks.inc.bash
|
UTF-8
| 477
| 3.515625
| 4
|
[
"MIT"
] |
permissive
|
if [[ -f $projectRoot/README.md ]]
then
linksExitCode=99
while (( linksExitCode > 0 ))
do
set +e
phpNoXdebug -f bin/mdlinks
linksExitCode=$?
set -e
if (( linksExitCode > 0 ))
then
tryAgainOrAbort "Markdown Links Checker"
fi
done
else
echo "The Markdown Links check requires a README.md in the root of the repository"
echo "You must create a README.md to proceed
"
exit 1;
fi
| true
|
22164734451c71482c837f5e54e9afb6c988ffd3
|
Shell
|
byxorna/scripts
|
/mysql-dump-backups.sh
|
UTF-8
| 2,082
| 3.734375
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# script to dump mysql DBs
# written Apr 29 2010
# _ _
# __| |_ _ _ __ ___ _ __ _ __ ___ _ _ ___ __ _| |
# / _` | | | | '_ ` _ \| '_ \ | '_ ` _ \| | | / __|/ _` | |
#| (_| | |_| | | | | | | |_) | | | | | | | |_| \__ \ (_| | |
# \__,_|\__,_|_| |_| |_| .__/ |_| |_| |_|\__, |___/\__, |_|
# |_| |___/ |_|
# _ _ _
# __| | __ _| |_ __ _| |__ __ _ ___ ___ ___
# / _` |/ _` | __/ _` | '_ \ / _` / __|/ _ \/ __|
#| (_| | (_| | || (_| | |_) | (_| \__ \ __/\__ \
# \__,_|\__,_|\__\__,_|_.__/ \__,_|___/\___||___/
#
#
# careful what password you put here, as special sequences will break the script, like !! or so on
password='CHANGEME'
if [ -z "$password" ] ; then
echo "No password given..."
pwline=""
else
pwline="--password=$password "
fi
user='root'
userline="--user=$user"
backupdir=/var/mysql-backup
if [ ! -d "$backupdir" ] ; then
# perform the dump here
echo "INFO: $backupdir doesnt exist, creating..."
/bin/mkdir "$backupdir"
fi
mysqldump=`which mysqldump`
if [ $? != 0 ] ; then
echo "ERROR: no mysqldump found in $PATH, aborting..."
exit 1
fi
date=`which date`
if [ $? != 0 ] ; then
echo "ERROR: no date found in $PATH, aborting..."
exit 1
fi
currentdate=`$date '+%Y-%m-%d-%H:%M:%S'`
mailx=`which mailx`
if [ $? != 0 ] ; then
echo "ERROR: no mailx found in $PATH, aborting..."
exit 1
fi
backupfile="`hostname`-mysql-$currentdate.sql"
# perform the dump here
$mysqldump --user=$user --password=$password --all-databases > "$backupdir/$backupfile"
if [ "$?" -eq 0 ]; then
echo "MySQL backup on `hostname` was successful" | $mailx -s "[MYSQL:SUCCESS] backup on `hostname`" unixadmins@list.wustl.edu
else
echo "MySQL backup on `hostname` FAILED" | $mailx -s "[MYSQL:FAILED] backup on `hostname`" unixadmins@list.wustl.edu
fi
/bin/chmod 600 "$backupdir/$backupfile"
`which find` "$backupdir/" -name '*.sql' -type f -mtime +10 -exec rm {} \;
| true
|
a80052e20a0fd17b86c4e5bbbd26f7a91d972fac
|
Shell
|
Jason23347/petunia
|
/lib/petunia/kernel
|
UTF-8
| 2,550
| 3.765625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
[ -v _MODULE_KERNEL_ ] &&
return 0 || _MODULE_KERNEL_=
[ -f $__DIR__/extends/commands ] && . $__DIR__/extends/commands
kernel::abort() {
printf "$1\n" >&2
[ $# -lt 2 ] && exit 1 || exit $2
}
kernel::excute() {
local COMMAND=${1/:/ }
shift
kernel::commands.$COMMAND $*
return $?
}
kernel::commands.make() {
local SUB=$1
shift
[ $# -eq 0 ] && kernel::abort "missing file name!"
[ -f "$1" ] || [ -d "$1" ] &&
kernel::abort "file or directory exists!"
[ "$1" == "${1%/*}" ] || mkdir -p ${1%/*}
typeset -u FILE_NAME=${1##*/}
case $SUB in
script)
cat <<SCRIPT >$1
#!/bin/bash
[ -v _MODULE_${FILE_NAME}_ ] &&
return 0 || _MODULE_${FILE_NAME}_=
SCRIPT
;;
dao)
cat <<DAO >$__DIR__/dao/$1
#!/bin/bash
[ -v _MODULE_DAO_${FILE_NAME}_ ] &&
return 0 || _MODULE_DAO_${FILE_NAME}_=
typeset -l DAO_NAME=${FILE_NAME}
. $__DIR__/lib/petunia/database/database
. $__DIR__/lib/petunia/database/templates/$DB_CONNECTION/dao
DAO
;;
test)
cat <<TEST >$__DIR__/test/$1
#!/bin/bash
testEquality() {
assertEquals 1 1
}
. lib/shunit2/shunit2
TEST
;;
migration)
typeset -l table="create_${FILE_NAME##*/}_table"
cat <<MIGRATION >$__DIR__/app/migration/$table
#!/bin/bash
. \$__DIR__/lib/petunia/database/database
migration.up() {
DB::setTable ${table}
DB::setPrimaryKey 'id'
DB::createTable
}
migration.down() {
DB::setTable ${table}
DB::dropTable
}
MIGRATION
;;
*) kernel::abort "command $SUB not found" ;;
esac
return 0
}
kernel::commands.route() {
local SUB=$1
shift
case $SUB in
generate) . $__DIR__/route/config ;;
clear) rm -r $__DIR__/storage/petunia/routes/* ;;
*) kernel::abort "command $SUB not found" ;;
esac
return 0
}
kernel::commands.migrate() {
[ $# -eq 0 ] && for item in $(ls $__DIR__/app/migration); do
. $__DIR__/app/migration/$item
# migration.down
migration.up
unset -f migration.down
unset -f migration.up
done
local SUB=$1
shift
case $SUB in
refresh)
for item in $(ls $__DIR__/app/migration); do
. $__DIR__/app/migration/$item
migration.down
migration.up
unset -f migration.down
unset -f migration.up
done
;;
esac
}
kernel::commands.test() {
[ $# -eq 0 ] && kernel::abort "missing test file name"
$__DIR__/test/$1
return $?
}
| true
|
15a04d26da36f17dac60ad4c4604b80ceb2ad3f3
|
Shell
|
chef/automate
|
/terraform/test-environments/modules/chef_automate_install/templates/install_chef_automate_cli.sh.tpl
|
UTF-8
| 12,168
| 3.40625
| 3
|
[
"Apache-2.0",
"CC-BY-2.0",
"SAX-PD",
"MPL-2.0",
"Artistic-2.0",
"MIT-CMU",
"BSD-3-Clause",
"0BSD",
"CC-BY-4.0",
"LGPL-3.0-or-later",
"CC0-1.0",
"CC-BY-3.0",
"BSD-Source-Code",
"Apache-1.1",
"Ruby",
"WTFPL",
"BSD-1-Clause",
"MIT",
"Unlicense",
"BSD-2-Clause"
] |
permissive
|
#!/bin/bash
set -e
# NOTE: This is a terraform template. The ${upgrade} and ${channel}
# variables will be replaced with strings at rendering time. Those
# are not shell variables.
export HAB_NONINTERACTIVE="true"
export HAB_NOCOLORING="true"
export HAB_LICENSE=accept-no-persist
export PATH="/usr/local/bin:$PATH"
automate_deployed() {
[[ -f /hab/user/deployment-service/config/user.toml ]]
}
automate_needs_redeploy() {
automate_deployed && ! systemctl list-unit-files chef-automate.service --no-legend | grep -q chef-automate.service
}
upgrade_automate() {
[[ "${upgrade}" == "true" ]]
}
deploy() {
mkdir -p /etc/chef-automate
cp /tmp/chef-automate-config.toml /etc/chef-automate/config.toml
chmod a+rx /var/opt
deploy_options="/etc/chef-automate/config.toml"
deploy_options="$deploy_options --accept-terms-and-mlsa"
deploy_options="$deploy_options --admin-password ${admin_password}"
if [[ "${airgapped}" == "true" ]]; then
deploy_options="$deploy_options --airgap-bundle /tmp/automate.aib"
fi
chef-automate deploy $deploy_options
if [[ "${airgapped}" == "true" ]]; then
rm -f /tmp/automate.aib
fi
configure_retention
}
redeploy() {
mkdir -p /etc/chef-automate
cp /tmp/chef-automate-config.toml /etc/chef-automate/config.toml
chmod a+rx /var/opt
chef-automate deploy /etc/chef-automate/config.toml --accept-terms-and-mlsa --skip-preflight
configure_retention
}
install_automate_cmd() {
# Perform the installation
pushd "/tmp"
curl -s https://packages.chef.io/files/${channel}/automate/latest/chef-automate_linux_amd64.zip | gunzip - > chef-automate && chmod +x chef-automate
mv chef-automate /usr/local/bin/chef-automate
popd
}
install_inspec() {
# install the latest inspec from omnitruck if it doesn't exist
# this currently only gets run on the hardened fresh install instances
# so we're always going to have the latest inspec
if ! [ -x "$(command -v inspec)" ]; then
curl -s https://omnitruck.chef.io/install.sh | bash -s -- -P inspec -v 4.16.0
fi
}
wait_for_upgrade() {
# 60 tries, 10 seconds between tries. Roughly 10 minutes + the
# time of the commands
upgrade_complete="false"
for try in {1..60}; do
echo "Checking upgrade status (try $try/60)"
if chef-automate upgrade status | grep 'up-to-date'; then
upgrade_complete="true"
break
else
echo "Retrying in 10 seconds"
sleep 10
fi
done
if [[ "$upgrade_complete" != "true" ]]; then
echo "Services failed to upgrade in a reasonable amount of time."
exit 1
fi
}
hardened_security_inspec_scan() {
install_inspec
CHEF_LICENSE="accept-no-persist" inspec exec /tmp/a2-hardened-security || exit_status=$?
if [[ $exit_status -ne 0 && $exit_status -ne 101 ]]; then
exit $exit_status
fi
}
configure_retention() {
chef-automate dev grpcurl ingest-service -- chef.automate.infra.data_lifecycle.api.Purge.Configure -d '{
"recurrence":"FREQ=DAILY;DTSTART=20190820T174501Z;INTERVAL=1",
"policy_update": {
"es": [
{
"policy_name":"converge-history",
"older_than_days":"${retention_older_than_days}"
},
{
"policy_name":"actions",
"older_than_days":"${retention_older_than_days}"
}
]
}
}'
chef-automate dev grpcurl compliance-service -- chef.automate.infra.data_lifecycle.api.Purge.Configure -d '{
"recurrence":"FREQ=DAILY;DTSTART=20190820T174501Z;INTERVAL=1",
"policy_update": {
"es": [
{
"policy_name":"compliance-scans",
"older_than_days":"${retention_older_than_days}"
},
{
"policy_name":"compliance-reports",
"older_than_days":"${retention_older_than_days}"
}
]
}
}'
}
configure_automate_infra_views() {
if chef-automate dev grpcurl automate-gateway list | grep "chef.automate.api.infra_proxy.InfraProxy" &> /dev/null; then
chef_server_admin_key="$(</hab/chef-server-admin-key.txt tr '\n' ':' | sed 's/:/\\n/g')"
server_id="auto-deployed-chef-server"
org_id="auto-deployed-chef-org"
if ! chef-automate dev grpcurl automate-gateway -- chef.automate.api.infra_proxy.InfraProxy.GetServer -d "{\"id\": \"$server_id\"}" 2> /dev/null | grep "$server_id" &> /dev/null; then
chef-automate dev grpcurl automate-gateway -- chef.automate.api.infra_proxy.InfraProxy.CreateServer -d @ <<EOM
{
"id": "$server_id",
"name": "$server_id",
"fqdn": "localhost",
"ip_address": "127.0.0.1"
}
EOM
fi
if ! chef-automate dev grpcurl automate-gateway -- chef.automate.api.infra_proxy.InfraProxy.GetOrg -d "{\"id\": \"$org_id\", \"server_id\": \"$server_id\"}" 2> /dev/null | grep "$org_id" &> /dev/null; then
chef-automate dev grpcurl automate-gateway -- chef.automate.api.infra_proxy.InfraProxy.CreateOrg -d @ <<EOM
{
"id": "$org_id",
"name": "${chef_server_org}",
"admin_user": "${chef_server_admin_name}",
"admin_key": "$chef_server_admin_key",
"server_id": "$server_id"
}
EOM
fi
fi
}
if [[ "${airgapped}" == "false" ]]; then
if ! command -v unzip &> /dev/null; then
command -v apt-get &> /dev/null && apt-get install -y unzip
command -v yum &> /dev/null && yum install -y unzip
fi
fi
if [[ "${hardened_security}" == "true" ]]; then
iptables -A INPUT -p tcp -m tcp --dport 80 -m state --state NEW -j ACCEPT
iptables -A INPUT -p tcp -m tcp --dport 443 -m state --state NEW -j ACCEPT
iptables-save > /etc/sysconfig/iptables
hardened_security_inspec_scan
fi
if [[ "${airgapped}" == "false" ]]; then
if (! automate_deployed) || upgrade_automate || automate_needs_redeploy; then
echo "installing automate cli"
install_automate_cmd
fi
fi
if ! automate_deployed; then
echo "deploying automate"
deploy
else
if automate_needs_redeploy; then
echo "redeploying automate"
redeploy
fi
if upgrade_automate; then
echo "inside upgrade_automate"
if [[ "${airgapped}" == "true" ]]; then
echo "inside upgrade_automate airgapped true"
ERROR=$(chef-automate upgrade run --airgap-bundle /tmp/automate.aib 2>&1 >/dev/null) || true
if echo "$ERROR" | grep 'This is a Major upgrade'; then
echo "inside upgrade_automate airgapped true major upgrade"
echo "y
y
y
y
y
y" | chef-automate upgrade run --major --airgap-bundle /tmp/automate.aib
sleep 45
#shellcheck disable=SC2154
wait_for_upgrade
echo "y" | chef-automate post-major-upgrade migrate --data=ES
else
echo "regular normal upgrade airgap"
sleep 45
#shellcheck disable=SC2154
wait_for_upgrade
fi
rm -f /tmp/automate.aib
else
echo "inside upgrade_automate airgapped false"
ERROR=$(chef-automate upgrade run 2>&1 >/dev/null) || true
if echo "$ERROR" | grep 'This is a Major upgrade'; then
echo "inside upgrade_automate airgapped false major upgrade"
echo "y
y
y
y
y
y" | chef-automate upgrade run --major
sleep 45
#shellcheck disable=SC2154
wait_for_upgrade
echo "y" |chef-automate post-major-upgrade migrate --data=ES
else
echo "regular normal upgrade airgap false"
sleep 45
#shellcheck disable=SC2154
wait_for_upgrade
fi
fi
cp /tmp/chef-automate-config.toml /etc/chef-automate/config.toml
chef-automate config set /etc/chef-automate/config.toml
fi
fi
hab license accept
# Update to whatever the latest version of hab that got installed is
hab pkg binlink core/hab --force
chef-automate dev create-iam-dev-users
if [[ "${create_admin_token}" == "true" ]]; then
if [[ ! -f /root/admin-token.txt ]]; then
date +%s | xargs -I % chef-automate iam token create admin-token-% --admin | tr -d '\n' > /root/admin-token.txt
fi
cp /root/admin-token.txt $(hab pkg path chef/automate-ui)/dist/
fi
if [[ "${hardened_security}" == "true" ]]; then
hardened_security_inspec_scan
fi
if [[ "${enable_chef_server}" == "true" ]]; then
# install an unstable release of chef-dk that includes a berks patch that we
# need for cookbook uploads.
#
# https://github.com/berkshelf/berkshelf/pull/1789
#
# NOTE: we can move back to stable when version 3.2.5 or newer of the chef-dk
# is promoted to the stable channel
#
if [[ ! -d "/hab/pkgs/chef/chef-dk/3.2.5/20180806224746" ]]; then
hab pkg install chef/chef-dk/3.2.5/20180806224746 -c unstable
fi
hab pkg binlink chef/chef-dk berks
cat << EOH > /tmp/.berks.config.json
{
"chef": {
"chef_server_url": "https://localhost/organizations/${chef_server_org}",
"node_name": "${chef_server_admin_name}",
"client_key": "/hab/chef-server-admin-key.txt"
},
"ssl": {
"verify": false
}
}
EOH
cat << EOH > /tmp/.Berksfile
source "https://supermarket.chef.io"
cookbook "audit"
EOH
if ! chef-server-ctl user-list | grep ${chef_server_admin_name} &> /dev/null; then
# save the key in /hab so it is on persistent storage that gets remounted even if the EC2 instance is replaced
chef-server-ctl user-create ${chef_server_admin_name} admin admin admin@admin.com '$3CUR3' -d -f /hab/chef-server-admin-key.txt
fi
cp /hab/chef-server-admin-key.txt $(hab pkg path chef/automate-ui)/dist/
if ! chef-server-ctl org-list | grep ${chef_server_org} &> /dev/null; then
chef-server-ctl org-create ${chef_server_org} ${chef_server_org} -a ${chef_server_admin_name} -d
fi
chef-server-ctl org-user-add ${chef_server_org} ${chef_server_admin_name} -a
berks install -b /tmp/.Berksfile -c /tmp/.berks.config.json
berks upload -b /tmp/.Berksfile -c /tmp/.berks.config.json
# add record to automate infra views to pre-populate deployed chef server.
configure_automate_infra_views
if [[ "${enable_workflow}" == "true" ]]; then
if ! chef-server-ctl user-list | grep delivery &> /dev/null; then
chef-server-ctl user-create delivery delivery delivery delivery@delivery.com '$3CUR3' -d -f /etc/chef-automate/chef-server-delivery-key.txt
fi
if ! chef-server-ctl org-list | grep delivery &> /dev/null; then
chef-server-ctl org-create delivery delivery -a delivery -d
fi
fi
fi
if [[ "${enable_workflow}" == "true" ]]; then
if ! workflow-ctl list-enterprises | grep "${workflow_enterprise}"; then
echo "y" | ssh-keygen -t rsa -b 4096 -N '' -f /root/builder_key
workflow-ctl create-enterprise "${workflow_enterprise}" --ssh-pub-key-file=/root/builder_key.pub
fi
fi
# Create the chef-ci user (if missing)
if ! getent passwd chef-ci &> /dev/null; then
sudo useradd chef-ci --create-home
fi
# Add chef-ci's public key to authorized list
sudo mkdir -p /home/chef-ci/.ssh
sudo chown 0700 /home/chef-ci/.ssh
cat << EOH > /tmp/.ssh-chef-ci
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC8cKl0sNhpxdOVm2T/3wfwmSExaaDUCNSKJ15D146Y2tQygRdxGY5eHkOrFET8ssnetBFrSB9B+uQYPD9+KpLkupXtL2Sx4EtyuVnUUyoEXgAC7Sr6bxwo+FqfBAkrW1vNakss/WknmaXIDYsHhI16wYTr7nIE41oGPIbcdRDAXp4u56m3tQ2kfiTkg104D2TL50z2YT6I7B1h8CUpz9aAOtd+BYGueX5rdmOATIrPydcLdvWmqrO3GXZKV3zCHG2S/Se+ULC+EhbBMZkICYo3Jre7fedkCIIGhla71h9wg7q6b3eBWowfRWCCKskJ+rkO72zSZsL9EhY/9bcg7leP8hzwmWApeddVVlumqjkPpMkGfU26TKi52gevHW6fsyxCqDR9qhjBOGxSgiqNBQuOEg/9PVLlWBcsrgNhNxsysQEZTi0jv9FdONY3c1zQ+AXHH9HtxjBnx1xD59uzEYG1hUF1MsRwpWswH3Thnd/zbSxKKOdGRqoqy52Icaf7Z96D9XKAOpDQj4pTW0fS3uJP8AL16CNSkHUZSn0vxZCS9lJS+dDxwkDk1NInQqmpJ1NPwoTPlhMEsggqPuzyh+9R38NTE6cIAddq4gqJvbRLvc4jtARoz1D123QuLPTN1Ie41xhHvSI5I2gROz20rfKp57DOuLov5nzlvbb5mH/Z6Q== chef-ci-2021-07-01
EOH
sudo mv /tmp/.ssh-chef-ci /home/chef-ci/.ssh/authorized_keys
sudo chown 0600 /home/chef-ci/.ssh/authorized_keys
sudo chown -R "chef-ci:" /home/chef-ci
# Add chef-ci to sudoers
cat << EOH > /tmp/chef-ci-sudoer
# The chef-ci user is used by InSpec to validate the system
chef-ci ALL=(ALL) NOPASSWD:ALL
EOH
sudo mv /tmp/chef-ci-sudoer /etc/sudoers.d/10-chef-ci
sudo chmod 0440 /etc/sudoers.d/10-chef-ci
# Add chef-ci to sshd_config in hardened image
if [[ "${hardened_security}" == "true" ]]; then
sudo sed -i -r "s/^AllowUsers (.*)/AllowUsers \\1 chef-ci/" /etc/ssh/sshd_config
sudo chmod 0644 /etc/ssh/sshd_config
sudo systemctl restart sshd.service
fi
| true
|
e8349e768fb11329c137994446cae226504881b7
|
Shell
|
aduong/bash-libs
|
/colruler.sh
|
UTF-8
| 544
| 3.828125
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
#
# prints a column ruler in the terminal allowing the user to easily determine column positioning
cols=$(tput cols)
output_row=0
for ((c = cols; c > 0; c /= 10)); do
output_row=$((output_row + 1))
done
while [[ $output_row -gt 0 ]]; do
chunk_size=$((10 ** (output_row - 1)))
for ((i = 0; i * chunk_size < cols; i += 1)); do
c=$((cols - i * chunk_size))
if [[ $c -gt $chunk_size ]]; then
c=$chunk_size
fi
printf "%-${c}d" $((i % 10))
done
printf '\n'
output_row=$((output_row - 1))
done
| true
|
71ead03386f81894ecb092648c811163d86c8ab4
|
Shell
|
greshem/bin
|
/qianlong_startup.sh
|
UTF-8
| 615
| 2.59375
| 3
|
[] |
no_license
|
#!/bin/bash
#change root passwd
while [ 1 ]
do
/bin/change_root_passwd.sh
if [ $? -eq 0 ];then
break;
fi
done
######################################
#add qianlong user.
echo "user add "
while [ 1 ]
do
/bin/user_add.sh
if [ $? -eq 0 ];then
break;
fi
done
######################################
# slave or master
echo "slave or master"
while [ 1 ]
do
/bin/slave_or_master.sh
if [ $? -eq 0 ];then
break;
fi
done
######################################
#net card setting.
while [ 1 ]
do
echo "net card settting "
/bin/setup.sh
if [ $? -eq 0 ];then
break;
fi
done
service network restart
| true
|
3ded6dae02cc3c6909f42d3c243e16d5d27a2fe8
|
Shell
|
rissson/dotshabka
|
/soxin/programs/zsh/init-extra.zsh
|
UTF-8
| 17,943
| 3.40625
| 3
|
[
"MIT"
] |
permissive
|
#####################################################################
# options
#####################################################################
if [[ -o interactive ]]; then
# If a completion is performed with the cursor within a word, and a full
# completion is inserted, the cursor is moved to the end of the word. That is,
# the cursor is moved to the end of the word if either a single match is
# inserted or menu completion is performed.
setopt alwaystoend
# Automatically use menu completion after the second consecutive request for
# completion, for example by pressing the tab key repeatedly. This option is
# overridden by MENU_COMPLETE.
setopt automenu
# Any parameter that is set to the absolute name of a directory immediately
# becomes a name for that directory, that will be used by the ‘%~’ and related
# prompt sequences, and will be available when completion is performed on a
# word starting with ‘~’. (Otherwise, the parameter must be used in the form
# ‘~param’ first.).
setopt autonamedirs
# Make cd push the old directory onto the directory stack.
setopt autopushd
# If this is set, zsh sessions will append their history list to the history
# file, rather than replace it. Thus, multiple parallel zsh sessions will all
# have the new entries from their history lists added to the history file, in
# the order that they exit. The file will still be periodically re-written to
# trim it when the number of lines grows 20% beyond the value specified by
# $SAVEHIST (see also the HIST_SAVE_BY_COPY option).
setopt appendhistory
# If the argument to a cd command (or an implied cd with the AUTO_CD option
# set) is not a directory, and does not begin with a slash, try to expand the
# expression as if it were preceded by a ‘~’ (see Filename Expansion).
setopt cdablevars
# If unset, the cursor is set to the end of the word if completion is started.
# Otherwise it stays there and completion is done from both ends.
setopt completeinword
# Save each command’s beginning timestamp (in seconds since the epoch) and the
# duration (in seconds) to the history file. The format of this prefixed data
# is:
# : <beginning time>:<elapsed seconds>;<command>
setopt extendedhistory
# If this option is unset, output flow control via start/stop characters
# (usually assigned to ^S/^Q) is disabled in the shell’s editor.
setopt noflowcontrol
# If the internal history needs to be trimmed to add the current command line,
# setting this option will cause the oldest history event that has a duplicate
# to be lost before losing a unique event from the list. You should be sure to
# set the value of HISTSIZE to a larger number than SAVEHIST in order to give
# you some room for the duplicated events, otherwise this option will behave
# just like HIST_IGNORE_ALL_DUPS once the history fills up with unique events.
setopt histexpiredupsfirst
# If a new command line being added to the history list duplicates an older
# one, the older command is removed from the list (even if it is not the
# previous event).
setopt hist_ignore_all_dups
# Do not enter command lines into the history list if they are duplicates of
# the previous event.
setopt histignoredups
# Remove command lines from the history list when the first character on the
# line is a space, or when one of the expanded aliases contains a leading
# space. Only normal aliases (not global or suffix aliases) have this
# behaviour. Note that the command lingers in the internal history until the
# next command is entered before it vanishes, allowing you to briefly reuse or
# edit the line. If you want to make it vanish right away without entering
# another command, type a space and press return.
setopt histignorespace
# Whenever the user enters a line with history expansion, don't execute the
# line directly; instead, perform history expansion and reload the line into
# the editing buffer.
setopt histverify
# This options works like APPEND_HISTORY except that new history lines are
# added to the $HISTFILE incrementally (as soon as they are entered), rather
# than waiting until the shell exits. The file will still be periodically
# re-written to trim it when the number of lines grows 20% beyond the value
# specified by $SAVEHIST (see also the HIST_SAVE_BY_COPY option).
setopt incappendhistory
# Allow comments even in interactive shells.
setopt interactivecomments
# This is a login shell. If this option is not explicitly set, the shell
# becomes a login shell if the first character of the argv[0] passed to the
# shell is a ‘-’.
setopt login
# List jobs in the long format by default.
setopt longlistjobs
# Perform implicit tees or cats when multiple redirections are attempted (see
# Redirection).
setopt multios
# On an ambiguous completion, instead of listing possibilities or beeping,
# insert the first match immediately. Then when completion is requested again,
# remove the first match and insert the second match, etc. When there are no
# more matches, go back to the first one again. reverse-menu-complete may be
# used to loop through the list in the other direction. This option overrides
# AUTO_MENU.
setopt nomenucomplete
# If set, parameter expansion, command substitution and arithmetic expansion
# are performed in prompts. Substitutions within prompts do not affect the
# command status.
setopt promptsubst
# Don't push multiple copies of the same directory onto the directory stack.
setopt pushdignoredups
# Exchanges the meanings of ‘+’ and ‘-’ when used with a number to specify a
# directory in the stack.
setopt pushdminus
# Do not print the directory stack after pushd or popd.
setopt pushdsilent
# Have pushd with no arguments act like ‘pushd $HOME’.
setopt pushdtohome
# This option both imports new commands from the history file, and also causes
# your typed commands to be appended to the history file (the latter is like
# specifying INC_APPEND_HISTORY, which should be turned off if this option is
# in effect). The history lines are also output with timestamps ala
# EXTENDED_HISTORY (which makes it easier to find the spot where we left off
# reading the file after it gets re-written).
#
# By default, history movement commands visit the imported lines as well as the
# local lines, but you can toggle this on and off with the set-local-history
# zle binding. It is also possible to create a zle widget that will make some
# commands ignore imported commands, and some include them.
#
# If you find that you want more control over when commands get imported, you
# may wish to turn SHARE_HISTORY off, INC_APPEND_HISTORY or
# INC_APPEND_HISTORY_TIME (see above) on, and then manually import commands
# whenever you need them using ‘fc -RI’.
setopt sharehistory
# Use the zsh line editor. Set by default in interactive shells connected to a
# terminal.
setopt zle
fi
# Direnv shit not working
eval "$(direnv hook zsh)"
#####################################################################
# exports
#####################################################################
# setup fzf
if [[ -o interactive ]]; then
export ENHANCD_FILTER=@fzf_bin@
fi
# export the code path
export CODE_PATH="@home_path@/code"
if [[ "$OSTYPE" = linux* ]]; then
# GPG_TTY is needed for gpg with pinentry-curses
export GPG_TTY="$(tty)"
# use chromium as the karma's driver
export CHROME_BIN="$(which chromium)"
fi
# Set MYFS to my filesystem
export MYFS="@home_path@/.local"
# Set the editor
export EDITOR=nvim
export SUDO_EDITOR=nvim
# Set the pager
export PAGER=@bat_bin@
export BAT_PAGER="@less_bin@"
# Set the language support
export LANG=en_US.UTF-8
export LC_ALL="${LANG}"
[[ -n "${LC_CTYPE}" ]] && unset LC_CTYPE
# load the Emscripten environment
pathprepend PATH "/usr/lib/emsdk"
# Anything got installed into MYFS?
pathprepend PATH "${MYFS}/bin"
if [[ -d "${MYFS}" ]]; then
if [[ -d "${MYFS}/opt" ]]; then
for dir in ${MYFS}/opt/*/bin; do
pathappend PATH "${dir}"
done
fi
# Make LD can find our files.
pathappend LD_LIBRARY_PATH "${MYFS}/lib"
fi
# add any libexec directory
if [[ -d @out_dir@/libexec ]]; then
for dir in @out_dir@/libexec/*; do
pathappend PATH "${dir}"
done
fi
# add cargo
pathprepend PATH "@home_path@/.cargo/bin"
#####################################################################
# colors
#####################################################################
# Defining some color
export FG_CLEAR="\033[0m"
# Regular ForeGround colors
export FG_BLACK="\033[0;30m"
export FG_RED="\033[0;31m"
export FG_GREEN="\033[0;32m"
export FG_YELLOW="\033[0;33m"
export FG_BLUE="\033[0;34m"
export FG_MAGNETA="\033[0;35m"
export FG_CYAN="\033[0;36m"
export FG_WHITE="\033[0;37m"
# Bold ForeGround colors
export FG_BLACK_B="\033[1;30m"
export FG_RED_B="\033[1;31m"
export FG_GREEN_B="\033[1;32m"
export FG_YELLOW_B="\033[1;33m"
export FG_BLUE_B="\033[1;34m"
export FG_MAGNETA_B="\033[1;35m"
export FG_CYAN_B="\033[1;36m"
export FG_WHITE_B="\033[1;37m"
# Background colors
export BG_BLACK="\033[40m"
export BG_RED="\033[41m"
export BG_GREEN="\033[42m"
export BG_YELLOW="\033[43m"
export BG_BLUE="\033[44m"
export BG_MAGNETA="\033[45m"
export BG_CYAN="\033[46m"
export BG_WHITE="\033[47m"
# GOOD, WARN and ERROR colors
export GOOD="${FG_GREEN_B}"
export WARN="${FG_YELLOW_B}"
export ERROR="${FG_RED_B}"
# Find the option for using colors in ls, depending on the version
if [[ -o interactive ]]; then
if [[ "$OSTYPE" == netbsd* ]]; then
# On NetBSD, test if "gls" (GNU ls) is installed (this one supports colors);
# otherwise, leave ls as is, because NetBSD's ls doesn't support -G
gls --color -d . &>/dev/null && alias ls='gls --color=tty'
elif [[ "$OSTYPE" == openbsd* ]]; then
# On OpenBSD, "gls" (ls from GNU coreutils) and "colorls" (ls from base,
# with color and multibyte support) are available from ports. "colorls"
# will be installed on purpose and can't be pulled in by installing
# coreutils, so prefer it to "gls".
gls --color -d . &>/dev/null && alias ls='gls --color=tty'
colorls -G -d . &>/dev/null && alias ls='colorls -G'
elif [[ "$OSTYPE" == darwin* ]]; then
# this is a good alias, it works by default just using $LSCOLORS
ls -G . &>/dev/null && alias ls='ls -G'
# only use coreutils ls if there is a dircolors customization present ($LS_COLORS or .dircolors file)
# otherwise, gls will use the default color scheme which is ugly af
[[ -n "$LS_COLORS" || -f "@home_path@/.dircolors" ]] && gls --color -d . &>/dev/null && alias ls='gls --color=tty'
else
# For GNU ls, we use the default ls color theme. They can later be overwritten by themes.
if [[ -z "$LS_COLORS" ]]; then
(( $+commands[dircolors] )) && eval "$(dircolors -b)"
fi
ls --color -d . &>/dev/null && alias ls='ls --color=tty' || { ls -G . &>/dev/null && alias ls='ls -G' }
# Take advantage of $LS_COLORS for completion as well.
zstyle ':completion:*' list-colors "${(s.:.)LS_COLORS}"
fi
fi
#####################################################################
# completion
#####################################################################
if [[ -o interactive ]]; then
# allow selection of the item in the autocomplete menu
zstyle ':completion:*:*:*:*:*' menu select
zstyle ':completion:*' matcher-list 'm:{a-zA-Z-_}={A-Za-z_-}' 'r:|=*' 'l:|=* r:|=*'
zstyle ':completion:*' list-colors ''
zstyle ':completion:*:*:kill:*:processes' list-colors '=(#b) #([0-9]#) ([0-9a-z-]#)*=01;34=0=01'
zstyle ':completion:*:*:kill:*:processes' list-colors '=(#b) #([0-9]#) ([0-9a-z-]#)*=01;34=0=01'
zstyle ':completion:*:*:*:*:processes' command "ps -u $USER -o pid,user,comm -w -w"
# disable named-directories autocompletion
zstyle ':completion:*:cd:*' tag-order local-directories directory-stack path-directories
# Don't complete uninteresting users
zstyle ':completion:*:*:*:users' ignored-patterns \
adm amanda apache at avahi avahi-autoipd beaglidx bin cacti canna \
clamav daemon dbus distcache dnsmasq dovecot fax ftp games gdm \
gkrellmd gopher hacluster haldaemon halt hsqldb ident junkbust kdm \
ldap lp mail mailman mailnull man messagebus mldonkey mysql nagios \
named netdump news nfsnobody nobody nscd ntp nut nx obsrun openvpn \
operator pcap polkitd postfix postgres privoxy pulse pvm quagga radvd \
rpc rpcuser rpm rtkit scard shutdown squid sshd statd svn sync tftp \
usbmux uucp vcsa wwwrun xfs '_*'
# ... unless we really want to.
zstyle '*' single-ignored show
# Show dots while doing completion
expand-or-complete-with-dots() {
# toggle line-wrapping off and back on again
[[ -n "$terminfo[rmam]" && -n "$terminfo[smam]" ]] && echoti rmam
print -Pn "%{%F{red}......%f%}"
[[ -n "$terminfo[rmam]" && -n "$terminfo[smam]" ]] && echoti smam
zle expand-or-complete
zle redisplay
}
zle -N expand-or-complete-with-dots
bindkey "^I" expand-or-complete-with-dots
fi
#####################################################################
# directories
#####################################################################
if [[ -o interactive ]]; then
alias ..='cd ..'
alias cd..='cd ..'
alias cd...='cd ../..'
alias cd....='cd ../../..'
alias cd.....='cd ../../../..'
alias cd/='cd /'
alias md='mkdir -p'
alias rd=rmdir
alias d='dirs -v | head -10'
fi
#####################################################################
# Key bindings
#####################################################################
if [[ -o interactive ]]; then
# http://zsh.sourceforge.net/Doc/Release/Zsh-Line-Editor.html
# http://zsh.sourceforge.net/Doc/Release/Zsh-Line-Editor.html#Zle-Builtins
# http://zsh.sourceforge.net/Doc/Release/Zsh-Line-Editor.html#Standard-Widgets
# Make sure that the terminal is in application mode when zle is active, since
# only then values from $terminfo are valid
if (( ${+terminfo[smkx]} )) && (( ${+terminfo[rmkx]} )); then
function zle-line-init() {
echoti smkx
}
function zle-line-finish() {
echoti rmkx
}
zle -N zle-line-init
zle -N zle-line-finish
fi
bindkey -v # Use vim key bindings
export KEYTIMEOUT=1 # kill ZSH's lag when ESC is pressed in vim mode
bindkey '^r' history-incremental-search-backward # [Ctrl-r] - Search backward incrementally for a specified string. The string may begin with ^ to anchor the search to the beginning of the line.
if [[ "${terminfo[kpp]}" != "" ]]; then
bindkey "${terminfo[kpp]}" up-line-or-history # [PageUp] - Up a line of history
fi
if [[ "${terminfo[knp]}" != "" ]]; then
bindkey "${terminfo[knp]}" down-line-or-history # [PageDown] - Down a line of history
fi
# start typing + [Up-Arrow] - fuzzy find history forward
[[ "${terminfo[kcuu1]}" != "" ]] && bindkey "${terminfo[kcuu1]}" history-substring-search-up
# start typing + [Down-Arrow] - fuzzy find history backward
[[ "${terminfo[kcud1]}" != "" ]] && bindkey "${terminfo[kcud1]}" history-substring-search-down
if [[ "${terminfo[khome]}" != "" ]]; then
bindkey "${terminfo[khome]}" beginning-of-line # [Home] - Go to beginning of line
fi
if [[ "${terminfo[kend]}" != "" ]]; then
bindkey "${terminfo[kend]}" end-of-line # [End] - Go to end of line
fi
bindkey '^A' beginning-of-line # [Ctrl-A] - Go to beginning of line
bindkey '^E' end-of-line # [Ctrl-E] - Go to end of line
bindkey ' ' magic-space # [Space] - do history expansion
bindkey '^[[1;5C' forward-word # [Ctrl-RightArrow] - move forward one word
bindkey '^[[1;5D' backward-word # [Ctrl-LeftArrow] - move backward one word
if [[ "${terminfo[kcbt]}" != "" ]]; then
bindkey "${terminfo[kcbt]}" reverse-menu-complete # [Shift-Tab] - move through the completion menu backwards
fi
bindkey '^?' backward-delete-char # [Backspace] - delete backward
if [[ "${terminfo[kdch1]}" != "" ]]; then
bindkey "${terminfo[kdch1]}" delete-char # [Delete] - delete forward
else
bindkey "^[[3~" delete-char
bindkey "^[3;5~" delete-char
bindkey "\e[3~" delete-char
fi
# Edit the current command line in $EDITOR
autoload -U edit-command-line
zle -N edit-command-line
bindkey -M vicmd 'e' edit-command-line
#####################################################################
# misc
#####################################################################
## smart urls
autoload -U url-quote-magic
zle -N self-insert url-quote-magic
## file rename magick
bindkey "^y" copy-prev-shell-word
fi
#####################################################################
# Profile support
#####################################################################
# TODO: create a Derivation for the profile support. Make it optional and have
# ZSH work with or without it.
if [[ -z "${ACTIVE_PROFILE}" || -z "${ACTIVE_STORY}" ]]; then
if [[ -n "${DISPLAY}" ]] && have i3-msg; then
active_workspace="$(i3-msg -t get_workspaces 2>/dev/null | @jq_bin@ -r '.[] | if .focused == true then .name else empty end')"
if [[ "${active_workspace}" =~ '.*@.*' ]]; then
[[ -z "${ACTIVE_PROFILE}" ]] && export ACTIVE_PROFILE="$(echo "${active_workspace}" | cut -d@ -f1)"
[[ -z "${ACTIVE_STORY}" ]] && export ACTIVE_STORY="$(echo "${active_workspace}" | cut -d@ -f2)"
fi
unset active_workspace
fi
fi
# load the active profile only if one is available
if [[ -n "${ACTIVE_PROFILE}" ]] && [[ -r "@home_path@/.zsh/profiles/${ACTIVE_PROFILE}.zsh" ]]; then
sp "${ACTIVE_PROFILE}"
fi
#####################################################################
# Welcome notes
#####################################################################
if [[ -o interactive ]]; then
@fortune_bin@ -c
fi
| true
|
d6667283f4aa25e61284303c007da2a0aaf22a17
|
Shell
|
mrtwistor/Nlang
|
/compile.sh
|
UTF-8
| 2,111
| 3.890625
| 4
|
[] |
no_license
|
#!/bin/bash
#Compiler for the N programming language
#Version: 0.0
#Author: Jonathan Holland
#License: GPL v 2
#Usage: ./compile.sh FILENAME
#Compiler for the N language, which is defined as the language
#that this compiler compiles (see comments and gcc flags).
#To compile an executable, write a separate main function, compile with -c, and
#combine with the linker. For example, the two included examples are fib.c
#and main-fib.c. First, compile with
# ./compile.sh fib.c
#Then,
# gcc -o fib fib.c.N.o main-fib.c
#This last item, to build the runtime environment, will generate warnings
#because gcc needs to perform implicit function declarations.
#Ignore those warnings.
#The program may not use lower case letters, numbers or most special
#characters (outside of comments):
if [[ $(sed "s/\/\/.*$//" $1 | sed "s/##.*$//" | sed -n "/[#\"\\'@*$\-+=&^%\!_~\`\.<>\/a-z0-9]/p") ]]; then
echo "Failure: Illegal symbols detected"
exit 0
#No sneaky casts (these *should* be rejected by gcc...):
elif [[ $(sed "s/\/\/.*$//" $1 | sed "s/##.*$//" | sed -n "/)[^ \n,;:?){]/p") ]]; then
echo "Failure: Illegal syntax detected"
exit 0
else
#Insert header, and output C file
#The header defines the type N, and the constructors S and O.
# It also defines equality conditional
# Q(a,b,c,d) that returns c if a==b and d otherwise.
# Note this is the only way equality may be tested, since the
# symbol '=' was rejected by sed.
sed "s/\/\/.*$//" $1 | sed "s/##//" | sed "1s/^/\
#define R return\n\
#define Q(a,b,c,d) (a==b)?c:d\n\
int _num_succ;\n\
#define N unsigned\n\
N S(N A){_num_succ++;R++A;}\n\
N O;\n/" > $1.N.c
#Here _num_succ is for testing. It is the number of times
#the successor function was called.
#Now the key part, compile the emmitted C under gcc, with *absolutely no warnings*:
gcc -c -Wall -Werror $1.N.c
#Cheat code: the special character sequence ## allows ordinary
#C code to be embedded. (The rest of the line will be ignored
#by sed, but processed by gcc.) But it's cheating, so only use
#for debugging.
fi
| true
|
ba5cce319505b2f04d7509b841d37cf3b2f82174
|
Shell
|
praveenclouddot/knowledgescrape
|
/sshscript
|
UTF-8
| 1,322
| 3.671875
| 4
|
[] |
no_license
|
#!/bin/bash
set -e
# The purpose of this script is to generate tiller certificates for users who are part of the cluster-admin group, since they will have access to the CA key
# Genertes helm home folder and copies CA cert and key as well as ssl.conf to generate our cert without user interaction
mkdir ~/helm-preprod
cp CA/tiller-admin/ca.cert.pem ~/helm-preprod/ca.pem
cp CA/tiller-admin/ca.key.pem ~/helm-preprod/ca.key.pem
cp CA/ssl.conf ~/helm-preprod/ssl.conf
echo "Generated ~/helm-preprod"
echo "Copied CA key and cert to ~/helm-preprod"
# Switch to the newly created folder and create our key and csr using the ssl.conf file we just copied
cd ~/helm-preprod
openssl genrsa -out ./key.pem 4096
openssl req -key key.pem -new -sha256 -out helm.csr.pem -config ssl.conf
echo "Generated user key and csr"
echo "Signing cert. This requires sudo permissions"
# Sign our cert using the copied CA key and cert
sudo openssl x509 -req -CA ca.pem -CAkey ca.key.pem -CAcreateserial -in helm.csr.pem -out cert.pem -days 365
echo "Removing CA key..."
# Remove CA key since this is a security risk
rm ca.key.pem
echo "Generating helm-preprod alias"
# Generate an alias called "helm-preprod" that sets HELM_HOME to ~/helm-preprod
echo "alias helm-preprod=\"export HELM_HOME=$HOME/helm-preprod\"" >> ~/.bashrc
echo "DONE"
| true
|
d16cb1fe97dc2c337a8fa9a3a8365354ff67836d
|
Shell
|
mathiascode/pkgbuilds
|
/sdlpop-git/PKGBUILD
|
UTF-8
| 1,234
| 2.96875
| 3
|
[] |
no_license
|
pkgname=sdlpop-git
pkgver=1.20.r11.g65bc93e
pkgrel=1
pkgdesc="An open-source port of Prince of Persia"
arch=('i686' 'x86_64')
license=('GPL3')
url="https://github.com/NagyD/SDLPoP"
depends=('sdl2_image' 'sdl2_mixer')
makedepends=('git')
provides=("${pkgname%-git}")
conflicts=("${pkgname%-git}")
source=("${pkgname%-git}::git+https://github.com/NagyD/SDLPoP.git"
"${pkgname%-git}.sh")
sha256sums=('SKIP'
'cb7b7923dcfb68520c7e738e6e8bd503f56f2775b28022285397284c0c428991')
pkgver() {
cd "$srcdir/${pkgname%-git}"
git describe --long --tags | sed 's/^v//;s/\([^-]*-g\)/r\1/;s/-/./g'
}
prepare() {
cd "$srcdir/${pkgname%-git}/src"
sed -e 's|$ROOT|/opt/sdlpop|' SDLPoP.desktop.template > SDLPoP.desktop
}
build() {
cd "$srcdir/${pkgname%-git}/src"
export CFLAGS+=" $CPPFLAGS"
make
}
package() {
cd "$srcdir/${pkgname%-git}"
# world-writable for save/load games, config, etc.
install -dm757 "$pkgdir/opt/${pkgname%-git}"
install -m755 prince "$pkgdir/opt/${pkgname%-git}"
cp -r data doc mods SDLPoP.ini "$pkgdir/opt/${pkgname%-git}"
install -Dm755 "$srcdir/${pkgname%-git}.sh" "$pkgdir/usr/bin/prince"
install -Dm644 src/SDLPoP.desktop \
"$pkgdir/usr/share/applications/${pkgname%-git}.desktop"
}
| true
|
eb485c5b9ad995fa178679c037a2650138219056
|
Shell
|
Y-Suzaki/aws-infra-snippet
|
/cloudformation/lambda-layer-aws-sam/deploy.sh
|
UTF-8
| 1,075
| 2.9375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
s3_bucket="cf-templates-461spye58s2i-us-west-2"
# Create a zip file of lambda layer. The directory path must be python/logger/lambda_json_logger.py.
mkdir -p python
cp -r logger python
zip -r lambda-layer.zip python
# Generate a cloud-formation template from a sam template.
aws cloudformation package \
--template-file aws-sam.yaml \
--output-template-file aws-sam-deploy.yaml \
--s3-bucket ${s3_bucket} \
--s3-prefix serverless-function \
--region us-west-2 \
--profile default
# Deploy a lambda layer and lambda function with the generated template.
aws cloudformation deploy \
--template-file aws-sam-deploy.yaml \
--stack-name ys-dev-web-lambda-layer-json-logger \
--capabilities CAPABILITY_IAM \
--region us-west-2 \
--profile default
# Remove the uploaded s3 files.
aws s3 rm s3://${s3_bucket}/serverless-function/ \
--region us-west-2 \
--profile default \
--recursive
# Remove the unnecessary local files and directory.
rm -rf python
rm -f aws-sam-deploy.yaml lambda-layer.zip
| true
|
6fde8a72f22896a208bec271dd8a4669c13e3115
|
Shell
|
henriquegrando/stripe-subscription-lambda
|
/setupAPI.sh
|
UTF-8
| 4,768
| 3.296875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Get variables
source .env
# Create the API
API_ID=$(aws apigateway create-rest-api \
--name ${AWS_LAMBDA_FUNCTIONNAME}-api \
--description "Rest API for ${AWS_LAMBDA_FUNCTIONNAME}" | \
python -c "import sys, json; print(json.load(sys.stdin)['id'])")
if [ -n ${API_ID} ]; then
echo "REST API created. ID: ${API_ID}"
else
echo "REST API creation failed!"
exit
fi
# Get ID of root resource
ROOT_ID=$(aws apigateway get-resources \
--rest-api-id ${API_ID} | \
python -c "import sys, json; print(json.load(sys.stdin)['items'][0]['id'])")
if [ -n ${ROOT_ID} ]; then
echo "Root resource ID: ${ROOT_ID}"
else
echo "Failed to get root resource ID!"
exit
fi
# Create a resource
RESOURCE_ID=$(aws apigateway create-resource \
--rest-api-id ${API_ID} \
--parent-id ${ROOT_ID} \
--path-part ${AWS_LAMBDA_FUNCTIONNAME}-manager | \
python -c "import sys, json; print(json.load(sys.stdin)['id'])")
if [ -n ${RESOURCE_ID} ]; then
echo "New resource created. ID: ${RESOURCE_ID}"
else
echo "New resource creation failed!"
exit
fi
# Create a POST method on the resource
METHOD=$(aws apigateway put-method \
--rest-api-id ${API_ID} \
--resource-id ${RESOURCE_ID} \
--http-method POST \
--authorization-type NONE | \
python -c "import sys, json; print(json.load(sys.stdin)['httpMethod'])")
if [ -n ${METHOD} ]; then
echo "POST method created for the resource."
else
echo "POST method creation failed!"
exit
fi
# Set the lambda function as the destination for the POST method
DESTINATION=$(aws apigateway put-integration \
--rest-api-id ${API_ID} \
--resource-id ${RESOURCE_ID} \
--http-method POST \
--type AWS \
--integration-http-method POST \
--uri arn:aws:apigateway:${AWS_LAMBDA_REGIONS}:lambda:path/2015-03-31/functions/arn:aws:lambda:${AWS_LAMBDA_REGIONS}:${AWS_ACCOUNT_ID}:function:${AWS_LAMBDA_FUNCTIONNAME}/invocations | \
python -c "import sys, json; print(json.load(sys.stdin)['uri'])")
if [ -n ${DESTINATION} ]; then
echo "${AWS_LAMBDA_FUNCTIONNAME} set as the destination for POST."
else
echo "POST method destination failed!"
exit
fi
# Set return type of POST method response to JSON
RESPONSE1=$(aws apigateway put-method-response \
--rest-api-id ${API_ID} \
--resource-id ${RESOURCE_ID} \
--http-method POST \
--status-code 200 \
--response-models "{\"application/json\": \"Empty\"}" | \
python -c "import sys, json; print(json.load(sys.stdin)['statusCode'])")
if [ -n ${RESPONSE1} ]; then
echo "POST method response set to JSON."
else
echo "POST method response type setting failed!"
exit
fi
# Set return type of POST method integration response to JSON
RESPONSE2=$(aws apigateway put-integration-response \
--rest-api-id ${API_ID} \
--resource-id ${RESOURCE_ID} \
--http-method POST \
--status-code 200 \
--response-templates "{\"application/json\": \"\"}" | \
python -c "import sys, json; print(json.load(sys.stdin)['statusCode'])")
if [ -n ${RESPONSE2} ]; then
echo "POST method integration response set to JSON."
else
echo "POST method integration response type setting failed!"
exit
fi
# Deploy the API
DEPLOYMENT_ID=$(aws apigateway create-deployment \
--rest-api-id ${API_ID} \
--stage-name prod | \
python -c "import sys, json; print(json.load(sys.stdin)['id'])")
if [ -n ${DEPLOYMENT_ID} ]; then
echo "API deployed. Deployment ID: ${DEPLOYMENT_ID}"
else
echo "API deployment failed!"
exit
fi
# Grant permissions for the API Gateway to invoke the Lambda function
STATEMENT1=$(aws lambda add-permission \
--function-name ${AWS_LAMBDA_FUNCTIONNAME} \
--statement-id ${AWS_LAMBDA_FUNCTIONNAME}-apigateway-test \
--action lambda:InvokeFunction \
--principal apigateway.amazonaws.com \
--source-arn "arn:aws:execute-api:us-east-1:${AWS_ACCOUNT_ID}:${API_ID}/*/POST/${AWS_LAMBDA_FUNCTIONNAME}-manager" | \
python -c "import sys, json; print(json.load(sys.stdin)['Statement'])")
if [ -n ${STATEMENT1} ]; then
echo "API Gateway permissions granted."
else
echo "Failed to grant API Gateway permissions!"
exit
fi
# Grant permissions for the deployed API to invoke the Lambda function
STATEMENT2=$(aws lambda add-permission \
--function-name ${AWS_LAMBDA_FUNCTIONNAME} \
--statement-id ${AWS_LAMBDA_FUNCTIONNAME}-apigateway-prod \
--action lambda:InvokeFunction \
--principal apigateway.amazonaws.com \
--source-arn "arn:aws:execute-api:us-east-1:${AWS_ACCOUNT_ID}:${API_ID}/prod/POST/${AWS_LAMBDA_FUNCTIONNAME}-manager" | \
python -c "import sys, json; print(json.load(sys.stdin)['Statement'])")
if [ -n ${STATEMENT2} ]; then
echo "Deployed API permissions granted."
else
echo "Failed to grant deployed API permissions!"
exit
fi
echo "Success! REST URL: https://${API_ID}.execute-api.${AWS_LAMBDA_REGIONS}.amazonaws.com/prod/${AWS_LAMBDA_FUNCTIONNAME}-manager"
| true
|
6263e89e5d56907c1f8a377733d8af7d84c3ad9b
|
Shell
|
aws-amplify/amplify-flutter
|
/packages/authenticator/amplify_authenticator/tool/generate_l10n.sh
|
UTF-8
| 2,111
| 3.203125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
HEADER=$(cat <<EOF
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
import 'package:amplify_authenticator/amplify_authenticator.dart';
EOF
)
dart run ./tool/generate_country_localization.dart
COUNTRY_OUTPUT_FILES=('lib/src/utils/country_code.dart' 'lib/src/l10n/country_resolver.dart')
OUTPUT_DIR=lib/src/l10n/generated
TEMPLATES=('titles_en.arb' 'buttons_en.arb' 'inputs_en.arb' 'countries_en.arb' 'messages_en.arb' 'instructions_en.arb')
ARB_DIRS=('lib/src/l10n/src/titles' 'lib/src/l10n/src/buttons' 'lib/src/l10n/src/inputs' 'lib/src/l10n/src/countries' 'lib/src/l10n/src/messages' 'lib/src/l10n/src/instructions')
OUTPUT_CLASSES=('AuthenticatorTitleLocalizations' 'AuthenticatorButtonLocalizations' 'AuthenticatorInputLocalizations' 'AuthenticatorCountryLocalizations' 'AuthenticatorMessageLocalizations' 'AuthenticatorInstructionsLocalizations')
OUTPUT_FILES=('title_localizations.dart' 'button_localizations.dart' 'input_localizations.dart' 'country_localizations.dart' 'message_localizations.dart' 'instructions_localizations.dart')
for i in "${!TEMPLATES[@]}"; do
ARB_DIR=${ARB_DIRS[i]}
TEMPLATE=${TEMPLATES[i]}
OUTPUT_CLASS=${OUTPUT_CLASSES[i]}
OUTPUT_FILE=${OUTPUT_FILES[i]}
echo "Generating localizations for \"${ARB_DIR}/${TEMPLATE}\""
flutter gen-l10n \
--arb-dir $ARB_DIR \
--output-dir $OUTPUT_DIR \
--template-arb-file $TEMPLATE \
--output-localization-file $OUTPUT_FILE \
--output-class $OUTPUT_CLASS \
--header "$HEADER" \
--use-deferred-loading \
--no-synthetic-package \
--required-resource-attributes \
--nullable-getter
done
# Clean up generated files
# NOTE: Further applications of `fix` and `format --fix` may be required
# as changes from one could lead to required changes by the other.
# TODO(dnys1): Implement do/while mechanism to auto-fix all issues
dart fix --apply
dart format --fix $OUTPUT_DIR
for i in "${!COUNTRY_OUTPUT_FILES[@]}"; do
dart format --fix "${COUNTRY_OUTPUT_FILES[i]}"
done
| true
|
5af5970414de126fa0e3014b51f7f4b4f6cfaa42
|
Shell
|
StoffePro/game-servers
|
/dockerfiles/cs/files/run_server.sh
|
UTF-8
| 760
| 2.875
| 3
|
[] |
no_license
|
#!/bin/bash
if [ -z "$1" ];then
MAP="de_dust"
else
MAP="$1"
fi
if [ -z "$2" ];then
MODE="normal"
else
MODE="$2"
fi
if [ "$MODE" = "normal" ]; then
sed -i 's/pb_autokill .*/pb_autokill 1/g' cs/cstrike/addons/podbot/podbot.cfg
sed -i 's/mp_timelimit .*/mp_timelimit 20/g' cs/cstrike/server.cfg
sed -i 's/^gungame.amxx/;gungame.amxx/g' cs/cstrike/addons/amxmodx/configs/plugins.ini
elif [ "$MODE" = "gungame" ]; then
sed -i 's/pb_autokill .*/pb_autokill 0/g' cs/cstrike/addons/podbot/podbot.cfg
sed -i 's/mp_timelimit .*/mp_timelimit 0/g' cs/cstrike/server.cfg
sed -i 's/;gungame.amxx/gungame.amxx/g' cs/cstrike/addons/amxmodx/configs/plugins.ini
fi
cd "${HOME}/cs/"
./hlds_run -game cstrike +maxplayers 24 +map "$MAP"
| true
|
efada410afb3b3dce5d8daf227cb195a78c0af49
|
Shell
|
dirkx/gpg-offline-batch-key-
|
/extract.sh
|
UTF-8
| 351
| 2.84375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
#
set -x
set -e
mkdir extract.$$ && cd extract.$$
git clone https://github.com/dlbeer/quirc.git
( cd quirc && make )
gcc -I quirc/tests -I quirc/lib -o decode \
../decode.c \
quirc/lib/*.o quirc/tests/dbgutil.o \
-ljpeg -lpng -lSDL
# Decode QR code.
./decode ~/Downloads/qr.png > key.raw
# Check checksum
openssl sha256 key.raw
| true
|
8c04cdbf19a3c17ab8fe299d7350a999c4ff4351
|
Shell
|
tonywalker1/cpp_tools
|
/install_date.sh
|
UTF-8
| 2,054
| 3.375
| 3
|
[] |
no_license
|
#!/bin/bash
#
# MIT License
#
# Copyright (c) 2018-2021 Tony Walker
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
REPO="https://github.com/HowardHinnant/date.git"
SRC_DIR="/usr/local/src/date"
CMAKE_ARGS="-DUSE_SYSTEM_TZ_DB=ON -DBUILD_SHARED_LIBS=ON -DBUILD_TZ_LIB=ON -DCMAKE_CXX_STANDARD=17 -DCMAKE_BUILD_TYPE=Release"
MAKE_ARGS="-j"
echo "***********************"
echo "*** Building Date ***"
echo "***********************"
if [ ! -d "$SRC_DIR" ]
then
echo "* Creating the source directory"
mkdir -p $SRC_DIR
fi
pushd "$SRC_DIR"
if [ ! -d "$SRC_DIR/.git" ]
then
echo "* Cloning the repo"
git clone "$REPO" "$SRC_DIR"
else
echo "* Pulling changes"
git pull --ff-only
fi
if [ ! -d "$SRC_DIR/build" ]
then
echo "* Creating the build dir"
mkdir "build"
fi
cd build
echo "* Building..."
cmake $CMAKE_ARGS ..
make $MAKE_ARGS
echo "* Removing previous versions"
rm -rf /usr/local/lib/cmake/date
rm -rf /usr/local/lib/libdate*
rm -rf /usr/local/include/date
echo "* Installing"
make install
# clean-up
popd
| true
|
2df8c6deae38a2c6d0c21683c6e016b379019ff4
|
Shell
|
boyska/freepto-usb-utils
|
/makefreepto
|
UTF-8
| 7,107
| 4.0625
| 4
|
[] |
no_license
|
#!/bin/bash
#uncomment the line below if you want to do heavy debug
#set -x
makefreepto_usage() {
cat <<EOF
$0 [ -b ] [ -r ] [-f file_system_type ] [ -s ] [ -p luks_password ] [ -i image_path ] DEVICE
Options:
-b skip badblocks check
-r skip random filling (use only if you know what you are doing)
-R DEV use DEV as random source instead of urandom (i.e.: /dev/frandom)
-i IMAGE put IMAGE on DEVICE (default is binary.img)
-p set password (defaul is "freepto")
-s set secure random password (default is "freepto")
-f set filesystem type: ext4 or btrfs (with lzo) (default is "ext4")
Example:
sudo ./makefreepto -f ext4 -i freepto-lb_vX.X.X.img -p password /dev/sdX
EOF
}
### Output primitives {{{
# set color:
export LRED="\e[1;31m"
export LGREEN="\e[1;32m"
export Z="\e[0m"
_fatal() {
echo -e "$LRED[!]$1 $Z" >&2
exit 1
}
_error() {
echo -e "$LRED[-]$1 $Z" >&2
}
_ok() {
echo -e "$LGREEN[+]$Z $1" >&2
}
### }}}
### Misc utils {{{
nphase=0
phasestart=
phasetitle=
nphases=5
_phase() {
if [[ -n "$phasestart" ]]; then
_ok "$phasetitle SUCCESS (in $(($(date '+%s') - $phasestart)) seconds)"
fi
phasestart=$(date '+%s')
nphase=$((nphase + 1))
phasetitle="$1"
echo -e "${LGREEN}[$nphase/$nphases]$Z $phasetitle"
}
_check_dep() {
#$1 is the binary to check, $2 is the debian package
if ! which "$1" &> /dev/null; then
if ! which apt-get &> /dev/null; then
_fatal "$1 is missing; please install and retry"
fi
apt-get --force-yes --yes install $2 2>&1
fi
}
_write_monitor() {
ddpid="$1"
while kill -0 $ddpid &> /dev/null; do
wchar=$(egrep '^wchar:' /proc/$ddpid/io | awk '{ print $2 }')
echo -en "\rWriting $((wchar / 1024 / 1024))MB"
sleep 5
done
}
### }}}
### Phases {{{
randomfill() {
#$1 is partition/device to fill
dd if=/dev/urandom of="$1" &
ddpid=$!
_write_monitor "$ddpid"
wait
}
persistence_partition() {
### $1 is device, $2 is offset
### assume that it is partition "2"
local device=$1
local img_bytes=$2
partprobe
parted "${device}" -- mkpart primary "${img_bytes}B" -1
sleep 1
partprobe
if ! [[ -b "${device}2" ]]; then
_fatal "Encrypted partition not found"
fi
}
persistence_create() {
# $1 is partition, $2 is passphrase
local partition="$1"
local passphrase="$2"
# encrypt partition
cryptsetup --verbose --batch-mode --key-size 512 --hash sha512 --cipher aes-xts-plain --use-random luksFormat "$1" <<<${passphrase} || exit 1
local mapname="my_usb.$$"
local mapper="/dev/mapper/$mapname"
local mountpoint="/mnt/freepto.$$"
# open LUKS
cryptsetup luksOpen "$1" "$mapname" <<<${passphrase} || exit 1
mkdir -p "$mountpoint"
if [[ $fs == "btrfs" ]]; then
# make btrfs fs with label: "persistence"
mkfs.btrfs -L persistence "$mapper" 2>&1 || exit 1
# mount the partition with lzo compression
mount "$mapper" "$mountpoint" -o noatime,nodiratime,compress=lzo
elif [[ $fs == "ext4" ]]; then
# make ext4 fs with label: "persistence"
mkfs.ext4 -L persistence "$mapper" 2>&1 || exit 1
# mount the partition
mount "$mapper" "$mountpoint" -o noatime,nodiratime
elif [[ $fs == "reiserfs" ]]; then
# make reiserfs fs with label: "persistence"
mkfs.reiserfs -f -l persistence "$mapper" 2>&1 || exit 1
# mount the partition
mount "$mapper" "$mountpoint" -o noatime,nodiratime
fi
# make the persistence.conf file
sleep 5
echo "/ union" > "${mountpoint}/persistence.conf"
sync
sleep 5
#cleanup
umount /mnt/freepto.$$ 2>&1
cryptsetup luksClose /dev/mapper/my_usb.$$ 2>&1
rm -rf "$mountpoint" 2>&1
}
### }}}
# check if user is root:
if [ "$(id -u)" != "0" ]; then
_fatal "Only root can do that!"
fi
if [ "$#" = '1' -a "$1" = "source" ]; then
true
else
# set default:
skip_badblocks=0
skip_random=0
password="freepto"
random_device="${random_device:-/dev/urandom}"
img=binary.img
log="./makefreepto.log"
#TODO: redirect all output to a log
fs="ext4"
while getopts 'brf:R:p:si:' opt; do
case $opt in
b)
skip_badblocks=1
nphases=$((nphases - 1))
;;
r)
skip_random=1
nphases=$((nphases - 1))
;;
f)
fs=$OPTARG
;;
i)
img=$OPTARG
;;
p)
password=$OPTARG
;;
R)
random_device=$OPTARG
;;
s)
password=`tr -dc 'a-zA-H0-9' < /dev/urandom | head -c 12`
_ok "Your LUKS random passphrase is: $LRED$password$Z"
;;
\?)
_error "Invalid option: -$OPTARG"
makefreepto_usage
exit 1
;;
esac
done
shift $((OPTIND-1))
# some control check
if ! [[ -f "$img" ]]; then
_fatal "Image $img not exist or is not valid"
fi
if [ $# != 1 ];then
_error "Wrong argument number"
makefreepto_usage
exit 1
fi
if ! [[ -b $1 ]]; then
_fatal "$1 should be a block special device"
fi
if [ "${random_device}" != /dev/urandom ]; then
echo "ATTENZIONE: STAI USANDO UN GENERATORE RANDOM ERETICO" ${random_device}
fi
# set device
device=$1
# check dependencies
if [[ "$fs" = "btrfs" ]]; then
_check_dep mkfs.btrfs btrfs-tools
fi
_check_dep cryptsetup cryptsetup
_check_dep parted parted
# umount device and clean all partition:
if mount | egrep -q "^${device}"; then
_fatal "Device is mounted; please umount"
fi
# check for bad block on the device:
if [[ $skip_badblocks -eq 0 ]]; then
_phase "Badblock check"
badblocks -c 10240 -s -w -v "${device}"
fi
# copy binay image with dd:
_phase "Image copy"
dd "if=$img" of="${device}" &
ddpid=$!
echo " with PID $ddpid"
totchar=$(stat -c '%s' "$img")
humansize=$(du -h "$img" | cut -f1)
while kill -0 $ddpid &> /dev/null; do
wchar=$(egrep '^wchar:' /proc/$ddpid/io | awk '{ print $2 }')
echo -en "\rWriting $humansize: $((wchar * 100 / totchar))%"
sleep 10
done
wait
# make the partition:
_phase "Make encrypted persistent partition"
persistence_partition "${device}" $(($(stat -c %s "$img")+1))
# write random data on crypto partition:
if [[ $skip_random -eq 0 ]]; then
_phase "Writing random data on crypto partition!"
randomfill "${device}2"
sleep 2
fi
_phase "Create persistence"
persistence_create "${device}2" "${password}"
# stop
_ok "Freepto is ready for use"
fi
# vim: set ts=4 sw=4 et ft=sh fdm=marker:
| true
|
f4a8fdf6ae90b077a8eb0e202f6503013388a29c
|
Shell
|
Surgo/dotfiles
|
/.config/zsh/.zshrc
|
UTF-8
| 1,356
| 2.546875
| 3
|
[] |
no_license
|
# Paths for Oh My Zsh
ZSH="${ZDOTDIR:-$HOME}/ohmyzsh"
ZSH_CUSTOM="${ZDOTDIR:-$ZSH}/custom"
# Theme
ZSH_THEME="powerlevel10k/powerlevel10k"
# Configure plugins
## Oh My Zsh
DISABLE_AUTO_UPDATE=true
ZSH_TMUX_AUTOSTART=true
ZSH_TMUX_FIXTERM=true
ZSH_TMUX_CONFIG="${HOME}/.config/tmux/tmux.conf"
ZSH_TMUX_UNICODE=true
## zsh-autosuggestions
ZSH_AUTOSUGGEST_HIGHLIGHT_STYLE="fg=black,bg=blue,bold,underline"
# Plugins
plugins=()
## OS specific
case "${OSTYPE}" in
darwin*)
plugins+=(brew keychain macos)
;;
linux*)
plugins+=(systemd)
;;
esac
## Utilities
plugins+=(aliases command-not-found per-directory-history sudo)
## Colorize
plugins+=(colored-man-pages colorize)
## Devtools
plugins+=(ag tmux vscode)
plugins+=(docker docker-compose)
plugins+=(git gitfast gitignore tig)
plugins+=(mercurial)
plugins+=(ansible aws gcloud terraform)
plugins+=(autopep8 pep8 pip poetry python virtualenv)
plugins+=(bundler gem rails rake rbenv ruby)
plugins+=(node nodenv npm yarn)
plugins+=(golang)
## External
plugins+=(zsh-autosuggestions zsh-syntax-highlighting)
### https://github.com/zsh-users/zsh-completions/issues/603
fpath+="${ZSH_CUSTOM}/plugins/zsh-completions/src"
source "${ZSH}/oh-my-zsh.sh"
# To customize prompt, run `p10k configure` or edit ~/.config/zsh/.p10k.zsh.
[[ ! -f ~/.config/zsh/.p10k.zsh ]] || source ~/.config/zsh/.p10k.zsh
| true
|
76152438aedd07651ff07a26d2338628b98819f0
|
Shell
|
goofiva/docker-sensu-example
|
/sensu-client/run-sensu-client.sh
|
UTF-8
| 676
| 3.203125
| 3
|
[] |
no_license
|
#!/bin/bash
usage(){
echo "Usage: $0 SENSU_USER SENSU_PASSWORD CLIENT_NAME"
exit 1
}
[[ $# -ne 3 ]] && usage
SENSU_SERVER=$(grep sensuserver /etc/hosts | awk '{ print $1}')
SENSU_USER=$1
SENSU_PASSWORD=$2
CLIENT_NAME=$3
CLIENT_IP_ADDRESS=$(ifconfig|grep eth0 -A 1|grep inet| awk '{print $2}' | sed s/addr://g)
cat /tmp/sensu/config.json \
| sed s/SENSU_SERVER/${SENSU_SERVER}/g \
| sed s/SENSU_USER/${SENSU_USER}/g \
| sed s/SENSU_PASSWORD/${SENSU_PASSWORD}/g > /etc/sensu/config.json
cat /tmp/sensu/conf.d/client.json \
| sed s/CLIENT_NAME/${CLIENT_NAME}/g \
| sed s/CLIENT_IP_ADDRESS/${CLIENT_IP_ADDRESS}/g > /etc/sensu/conf.d/client.json
/usr/bin/supervisord
| true
|
27a293379a6608fd1b8a7bf969f42444c780277b
|
Shell
|
astrac-id/files
|
/test
|
UTF-8
| 15,930
| 3.203125
| 3
|
[] |
no_license
|
#!/bin/bash
RED=$(tput setaf 1)
GREEN=$(tput setaf 2)
YELLOW=$(tput setaf 3)
BLUE=$(tput setaf 4)
BOLD=$(tput bold)
RESET=$(tput sgr0)
die() {
>&2 echo "${RED}Error: $1${RESET}" && exit 1
}
log_running() {
echo " ${YELLOW}* $1${RESET}"
}
log_done() {
echo " ${GREEN}✓ $1${RESET}"
}
log_finished() {
echo " ${GREEN}$1${RESET}"
}
log_running "Checking for libxft2:armhf..."
dpkg -l libxft2:armhf 2>&1 >/dev/null
if [ $? -eq 0 ]; then
log_done " libxft2:armhf already installed!"
else
log_running " libxft2:armhf not yet installed - installing..."
sudo apt -y install libxft2:armhf
if [ $? -eq 0 ]; then
log_done " libxft2:armhf successfully installed!"
else
die " Could not install libxft2:armhf - please check the logs above"
fi
fi
log_running "Checking for libncurses5..."
dpkg -l libncurses5 2>&1 >/dev/null
if [ $? -eq 0 ]; then
log_done " libncurses5 already installed!"
else
log_running " libncurses5 not yet installed - installing..."
sudo apt -y install libncurses5 && apt --fix-broken install
if [ $? -eq 0 ]; then
log_done " libncurses5 successfully installed!"
else
die " Could not install libncurses5 - please check the logs above"
fi
fi
log_running "Checking for predict..."
dpkg -l predict 2>&1 >/dev/null
if [ $? -eq 0 ]; then
log_done " predict already installed!"
else
log_running " predict not yet installed - installing..."
sudo wget http://ports.ubuntu.com/pool/universe/p/predict/predict_2.2.3-4build2_arm64.deb && sudo dpkg -i predict_2.2.3-4build2_arm64.deb
if [ $? -eq 0 ]; then
log_done " predict successfully installed!"
else
die " Could not install predict - please check the logs above"
fi
fi
log_running "Checking for python-setuptools..."
dpkg -l python-setuptools 2>&1 >/dev/null
if [ $? -eq 0 ]; then
log_done " python-setuptools already installed!"
else
log_running " python-setuptools not yet installed - installing..."
sudo apt -y install python-setuptools
if [ $? -eq 0 ]; then
log_done " python-setuptools successfully installed!"
else
die " Could not install python-setuptools - please check the logs above"
fi
fi
log_running "Checking for build-essential..."
dpkg -l build-essential 2>&1 >/dev/null
if [ $? -eq 0 ]; then
log_done " build-essential already installed!"
else
log_running " build-essential not yet installed - installing..."
sudo apt -y install build-essential
if [ $? -eq 0 ]; then
log_done " build-essential successfully installed!"
else
die " Could not install build-essential - please check the logs above"
fi
fi
log_running "Checking for python3-dev..."
dpkg -l python3-dev 2>&1 >/dev/null
if [ $? -eq 0 ]; then
log_done " python3-dev already installed!"
else
log_running " python3-dev not yet installed - installing..."
sudo apt -y install python3-dev
if [ $? -eq 0 ]; then
log_done " python3-dev successfully installed!"
else
die " Could not install python3-dev - please check the logs above"
fi
fi
log_running "Checking for libpq-dev ..."
dpkg -l libpq-dev 2>&1 >/dev/null
if [ $? -eq 0 ]; then
log_done " libpq-dev already installed!"
else
log_running " libpq-dev not yet installed - installing..."
sudo apt -y install libpq-dev
if [ $? -eq 0 ]; then
log_done " libpq-dev successfully installed!"
else
die " Could not install libpq-dev - please check the logs above"
fi
fi
log_running "Checking for python-dev ..."
dpkg -l python-dev 2>&1 >/dev/null
if [ $? -eq 0 ]; then
log_done " python-dev already installed!"
else
log_running " python-dev not yet installed - installing..."
sudo apt -y install python-dev
if [ $? -eq 0 ]; then
log_done " python-dev successfully installed!"
else
die " Could not install python-dev - please check the logs above"
fi
fi
log_running "Checking for libxml2-dev ..."
dpkg -l libxml2-dev 2>&1 >/dev/null
if [ $? -eq 0 ]; then
log_done " libxml2-dev already installed!"
else
log_running " libxml2-dev not yet installed - installing..."
sudo apt -y install libxml2-dev
if [ $? -eq 0 ]; then
log_done " libxml2-dev successfully installed!"
else
die " Could not install libxml2-dev - please check the logs above"
fi
fi
log_running "Checking for libxslt1-dev ..."
dpkg -l libxslt1-dev 2>&1 >/dev/null
if [ $? -eq 0 ]; then
log_done " libxslt1-dev already installed!"
else
log_running " libxslt1-dev not yet installed - installing..."
sudo apt -y install libxslt1-dev
if [ $? -eq 0 ]; then
log_done " libxslt1-dev successfully installed!"
else
die " Could not install libxslt1-dev - please check the logs above"
fi
fi
log_running "Checking for libldap2-dev ..."
dpkg -l libldap2-dev 2>&1 >/dev/null
if [ $? -eq 0 ]; then
log_done " libldap2-dev already installed!"
else
log_running " libldap2-dev not yet installed - installing..."
sudo apt -y install libldap2-dev
if [ $? -eq 0 ]; then
log_done " libldap2-dev successfully installed!"
else
die " Could not install libldap2-dev - please check the logs above"
fi
fi
log_running "Checking for libsasl2-dev ..."
dpkg -l libsasl2-dev 2>&1 >/dev/null
if [ $? -eq 0 ]; then
log_done " libsasl2-dev already installed!"
else
log_running " libsasl2-dev not yet installed - installing..."
sudo apt -y install libsasl2-dev
if [ $? -eq 0 ]; then
log_done " libsasl2-dev successfully installed!"
else
die " Could not install libsasl2-dev - please check the logs above"
fi
fi
log_running "Checking for libffi-dev ..."
dpkg -l libffi-dev 2>&1 >/dev/null
if [ $? -eq 0 ]; then
log_done " libffi-dev already installed!"
else
log_running " libffi-dev not yet installed - installing..."
sudo apt -y install libffi-dev
if [ $? -eq 0 ]; then
log_done " libffi-dev successfully installed!"
else
die " Could not install libffi-dev - please check the logs above"
fi
fi
log_running "Checking for libxml2-dev ..."
dpkg -l libxml2-dev 2>&1 >/dev/null
if [ $? -eq 0 ]; then
log_done " libxml2-dev already installed!"
else
log_running " libxml2-dev not yet installed - installing..."
sudo apt -y install libxml2-dev
if [ $? -eq 0 ]; then
log_done " libxml2-dev successfully installed!"
else
die " Could not install libxml2-dev - please check the logs above"
fi
fi
log_running "Checking for libxmlsec1-dev ..."
dpkg -l libxmlsec1-dev 2>&1 >/dev/null
if [ $? -eq 0 ]; then
log_done " libxmlsec1-dev already installed!"
else
log_running " libxmlsec1-dev not yet installed - installing..."
sudo apt -y install libxmlsec1-dev
if [ $? -eq 0 ]; then
log_done " libxmlsec1-dev successfully installed!"
else
die " Could not install libxmlsec1-dev - please check the logs above"
fi
fi
log_running "Checking for php7.2-fpm ..."
dpkg -l php7.2-fpm 2>&1 >/dev/null
if [ $? -eq 0 ]; then
log_done " php7.2-fpm already installed!"
else
log_running " php7.2-fpm not yet installed - installing..."
sudo apt -y install php7.2-fpm
if [ $? -eq 0 ]; then
log_done " php7.2-fpm successfully installed!"
else
die " Could not install php7.2-fpm - please check the logs above"
fi
fi
log_running "Checking for libx11-6:armhf ..."
dpkg -l libx11-6:armhf 2>&1 >/dev/null
if [ $? -eq 0 ]; then
log_done " libx11-6:armhf already installed!"
else
log_running " libx11-6:armhf not yet installed - installing..."
sudo apt -y install libx11-6:armhf
if [ $? -eq 0 ]; then
log_done " libx11-6:armhf successfully installed!"
else
die " Could not install libx11-6:armhf - please check the logs above"
fi
fi
log_running "Checking for libasound2:armhf ..."
dpkg -l libasound2:armhf 2>&1 >/dev/null
if [ $? -eq 0 ]; then
log_done " libasound2:armhf already installed!"
else
log_running " libasound2:armhf not yet installed - installing..."
sudo apt -y install libasound2:armhf
if [ $? -eq 0 ]; then
log_done " libasound2:armhf successfully installed!"
else
die " Could not install libasound2:armhf - please check the logs above"
fi
fi
log_running "Checking for ntp..."
dpkg -l ntp 2>&1 >/dev/null
if [ $? -eq 0 ]; then
log_done " ntp already installed!"
else
log_running " ntp not yet installed - installing..."
sudo apt -y install ntp
if [ $? -eq 0 ]; then
log_done " ntp successfully installed!"
else
die " Could not install ntp - please check the logs above"
fi
fi
log_running "Checking for cmake..."
dpkg -l cmake 2>&1 >/dev/null
if [ $? -eq 0 ]; then
log_done " cmake already installed!"
else
log_running " cmake not yet installed - installing..."
sudo apt -y install cmake
if [ $? -eq 0 ]; then
log_done " cmake successfully installed!"
else
die " Could not install cmake - please check the logs above"
fi
fi
log_running "Checking for sox..."
dpkg -l sox 2>&1 >/dev/null
if [ $? -eq 0 ]; then
log_done " sox already installed!"
else
log_running " sox not yet installed - installing..."
sudo apt -y install sox
if [ $? -eq 0 ]; then
log_done " sox successfully installed!"
else
die " Could not install sox - please check the logs above"
fi
fi
log_running "Checking for at..."
dpkg -l at 2>&1 >/dev/null
if [ $? -eq 0 ]; then
log_done " at already installed!"
else
log_running " at not yet installed - installing..."
sudo apt -y install at
if [ $? -eq 0 ]; then
log_done " at successfully installed!"
else
die " Could not install at - please check the logs above"
fi
fi
log_running "Checking for bc..."
dpkg -l bc 2>&1 >/dev/null
if [ $? -eq 0 ]; then
log_done " bc already installed!"
else
log_running " bc not yet installed - installing..."
sudo apt -y install bc
if [ $? -eq 0 ]; then
log_done " bc successfully installed!"
else
die " Could not install bc - please check the logs above"
fi
fi
log_running "Checking for nginx..."
dpkg -l nginx 2>&1 >/dev/null
if [ $? -eq 0 ]; then
log_done " nginx already installed!"
else
log_running " nginx not yet installed - installing..."
sudo apt -y install nginx
if [ $? -eq 0 ]; then
log_done " nginx successfully installed!"
else
die " Could not install nginx - please check the logs above"
fi
fi
log_running "Checking for libncurses5-dev..."
dpkg -l libncurses5-dev 2>&1 >/dev/null
if [ $? -eq 0 ]; then
log_done " libncurses5-dev already installed!"
else
log_running " libncurses5-dev not yet installed - installing..."
sudo apt -y install libncurses5-dev
if [ $? -eq 0 ]; then
log_done " libncurses5-dev successfully installed!"
else
die " Could not install libncurses5-dev - please check the logs above"
fi
fi
log_running "Checking for libncursesw5-dev..."
dpkg -l libncursesw5-dev 2>&1 >/dev/null
if [ $? -eq 0 ]; then
log_done " libncursesw5-dev already installed!"
else
log_running " libncursesw5-dev not yet installed - installing..."
sudo apt -y install libncursesw5-dev
if [ $? -eq 0 ]; then
log_done " libncursesw5-dev successfully installed!"
else
die " Could not install libncursesw5-dev - please check the logs above"
fi
fi
log_running "Checking for libatlas-base-dev ..."
dpkg -l libatlas-base-dev 2>&1 >/dev/null
if [ $? -eq 0 ]; then
log_done " libatlas-base-dev already installed!"
else
log_running " libatlas-base-dev not yet installed - installing..."
sudo apt -y install libatlas-base-dev
if [ $? -eq 0 ]; then
log_done " libatlas-base-dev successfully installed!"
else
die " Could not install libatlas-base-dev - please check the logs above"
fi
fi
log_running "Checking for python3-pip..."
dpkg -l python3-pip 2>&1 >/dev/null
if [ $? -eq 0 ]; then
log_done " python3-pip already installed!"
else
log_running " python3-pip not yet installed - installing..."
sudo apt -y install python3-pip
if [ $? -eq 0 ]; then
log_done " python3-pip successfully installed!"
else
die " Could not install python3-pip - please check the logs above"
fi
fi
log_running "Checking for imagemagick..."
dpkg -l imagemagick 2>&1 >/dev/null
if [ $? -eq 0 ]; then
log_done " imagemagick already installed!"
else
log_running " imagemagick not yet installed - installing..."
sudo apt -y install imagemagick
if [ $? -eq 0 ]; then
log_done " imagemagick successfully installed!"
else
die " Could not install imagemagick - please check the logs above"
fi
fi
log_running "Checking for libxft-dev..."
dpkg -l libxft-dev 2>&1 >/dev/null
if [ $? -eq 0 ]; then
log_done " libxft-dev already installed!"
else
log_running " libxft-dev not yet installed - installing..."
sudo apt -y install libxft-dev
if [ $? -eq 0 ]; then
log_done " libxft-dev successfully installed!"
else
die " Could not install libxft-dev - please check the logs above"
fi
fi
log_running "Checking for libjpeg9..."
dpkg -l libjpeg9 2>&1 >/dev/null
if [ $? -eq 0 ]; then
log_done " libjpeg9 already installed!"
else
log_running " libjpeg9 not yet installed - installing..."
sudo apt -y install libjpeg9
if [ $? -eq 0 ]; then
log_done " libjpeg9 successfully installed!"
else
die " Could not install libjpeg9 - please check the logs above"
fi
fi
log_running "Checking for libjpeg9-dev..."
dpkg -l libjpeg9-dev 2>&1 >/dev/null
if [ $? -eq 0 ]; then
log_done " libjpeg9-dev already installed!"
else
log_running " libjpeg9-dev not yet installed - installing..."
sudo apt -y install libjpeg9-dev
if [ $? -eq 0 ]; then
log_done " libjpeg9-dev successfully installed!"
else
die " Could not install libjpeg9-dev - please check the logs above"
fi
fi
log_running "Checking for socat..."
dpkg -l socat 2>&1 >/dev/null
if [ $? -eq 0 ]; then
log_done " socat already installed!"
else
log_running " socat not yet installed - installing..."
sudo apt -y install socat
if [ $? -eq 0 ]; then
log_done " socat successfully installed!"
else
die " Could not install socat - please check the logs above"
fi
fi
log_running "Checking for sqlite3..."
dpkg -l sqlite3 2>&1 >/dev/null
if [ $? -eq 0 ]; then
log_done " sqlite3 already installed!"
else
log_running " sqlite3 not yet installed - installing..."
sudo apt -y install sqlite3
if [ $? -eq 0 ]; then
log_done " sqlite3 successfully installed!"
else
die " Could not install sqlite3 - please check the logs above"
fi
fi
log_running "Checking for libgfortran5..."
dpkg -l libgfortran5 2>&1 >/dev/null
if [ $? -eq 0 ]; then
log_done " libgfortran5 already installed!"
else
log_running " libgfortran5 not yet installed - installing..."
sudo apt -y install libgfortran5
if [ $? -eq 0 ]; then
log_done " libgfortran5 successfully installed!"
else
die " Could not install libgfortran5 - please check the logs above"
fi
fi
sudo rm -rf predict_2.2.3-4build2_arm64.deb
log_running " Checking for update..."
sudo apt -y update
log_running " Checking for upgrade..."
sudo apt -y upgrade
log_running " Checking for autoremove..."
sudo apt -y autoremove
log_running " fix-broken install..."
sudo apt -y --fix-broken install
if [ $install_type == 'install' ]; then
log_running "Installing library Done."
log_running "please run $./install_and_upgrade.sh"
fi
if [ $install_type == 'install' ]; then
log_running "It looks like this is a fresh install of the tooling for captures."
log_running "If you've never had the software tools installed previously (e.g. if you've"
log_running "not installed the original raspberry-noaa repo content), you likely need to"
log_running "restart your device. Please do this to rule out any potential issues in the"
log_running "software and libraries that have been installed."
fi
| true
|
57802b725df3c9dc8ceeb5a2b528b6a47a75861b
|
Shell
|
heavyflavor/script
|
/ShellTest/GetVarLength.sh
|
UTF-8
| 181
| 3.015625
| 3
|
[] |
no_license
|
#!/bin/bash
. ~/bin/ColorText.sh
if [ $# -eq 0 ]
then
temp="ABCDEFGH"
else
temp=$1
fi
LOG_INFO "Input var is : $temp"
LOG_INFO "Input Var Length is : ${#temp}"
| true
|
f9692d9d50ff791ff8da249f1e1c9f465205da5d
|
Shell
|
iKrishneel/ubuntu_env_setup
|
/install-ros.sh
|
UTF-8
| 1,011
| 3.1875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
function install-ros() {
sudo -v
sudo sh -c 'echo "deb http://packages.ros.org/ros/ubuntu $(lsb_release -sc) main" > /etc/apt/sources.list.d/ros-latest.list'
version=melodic
sudo apt-key adv --keyserver hkp://ha.pool.sks-keyservers.net:80 --recv-key 421C365BD9FF1F717815A3895523BAEEB01FA116
sudo apt-get update
sudo apt-get install ros-$version-desktop-full -y
sudo rosdep init
rosdep update
source /opt/ros/melodic/setup.bash
sudo apt-get install python-rosinstall
sudo apt-get install python-catkin-tools
}
function rosws() {
DIR=~/ros/melodic/src
mkdir -p $DIR
cd $DIR
git clone https://github.com/ros-perception/vision_opencv.git
git clone https://github.com/ros-perception/image_common.git
git clone https://github.com/ros-perception/image_pipeline.git
source /opt/ros/melodic/setup.bash
cd ..
catkin_init_workspace
catkin build
printf '\nsource ~/ros/melodic/devel/setup.bash' >> ~/.bashrc
}
install-ros
rosws
| true
|
8b68221665ca026cdd7dabe33dd4a7f25a0f36e6
|
Shell
|
MaybeGoogle/NadekoFiles
|
/Install Scripts/NadekoService.sh
|
UTF-8
| 1,957
| 3.984375
| 4
|
[] |
no_license
|
#!/bin/bash
root=$(pwd)
unitcfg="/etc/systemd/system/nadeko.service"
runfile="NadekoARU_Latest.sh"
regrunfile="NadekoARN.sh"
clear
echo "Welcome to the Nadeko service installer."
# Prompt warning if service file exists. Give option to quit.
if [ -e $unitcfg ]
then
echo -e "$unitcfg already exists.\nPress [Y] to replace it or [N] to exit."
while true; do
read -p "[Y/N]: " yn
case $yn in
[Yy]* ) break;;
[Nn]* ) echo "Exiting..."; exit;;
* ) echo "Please enter only Y or N.";;
esac
done
fi
# Prompt for auto update
echo -e "Do you want to automatically update the bot?\nPress [Y] for yes or [N] to run normally."
while true; do
read -p "[YN]: " yn
case $yn in
[Yy]* ) break;;
[Nn]* ) runfile="$regrunfile"; break;;
* ) echo "Please enter only Y or N.";;
esac
done
# Download desired run script file
echo "Downloading your run script file: $runfile"
wget -Nq "https://raw.githubusercontent.com/Kwoth/NadekoBot-BashScript/1.9/$runfile"
# Fill out personalized unit config file
echo "[Unit]
Description=NadekoBot
[Service]
WorkingDirectory=$root
User=$USER
Type=forking
ExecStart=/usr/bin/tmux new-session -s Nadeko -d 'bash $root/$runfile'
ExecStop=/bin/sleep 2
[Install]
WantedBy=multi-user.target" > $unitcfg
# Re-run generators, enable and start nadeko service
sudo systemctl daemon-reload
sudo systemctl enable nadeko
sudo systemctl start nadeko
# Basic management information
echo "Finished installing NadekoBot as a service."
echo "To show information about Nadeko, run 'sudo systemctl status nadeko'."
echo "To stop/start/restart Nadeko, run 'sudo systemctl [stop/start/restart] nadeko'."
echo "To completely disable/re-enable Nadeko, run 'sudo systemctl [disable/enable] nadeko'."
echo "You can view Nadeko's logs with 'sudo tmux a -t Nadeko'. Exit from these logs by pressing ctrl+B and then D."
# Return to master installer
exit
| true
|
b8b3f2b38e5b2a1f8c64f59357eaf808fc5ac828
|
Shell
|
christoff-buerger/racr
|
/racr-net/transcribe-racr-core.bash
|
UTF-8
| 744
| 2.75
| 3
|
[
"MIT",
"X11"
] |
permissive
|
#!/usr/bin/env bash
# This program and the accompanying materials are made available under the
# terms of the MIT license (X11 license) which accompanies this distribution.
# author: C. Bürger
# Internal script adding the mutable node-dot-net-instance field to RACR's node record,
# such that Scheme nodes know their respective wrapping C# node. A bidirectional mapping
# between C# and Scheme nodes is required by RACR-NET for proper integration with RACR.
set -e
set -o pipefail
shopt -s inherit_errexit
sed -e 's/(export/(export node-dot-net-instance node-dot-net-instance-set!/g' \
-e 's/(mutable annotations)/(mutable annotations) (mutable dot-net-instance)/g' \
-e '1,/(list))))))/s//(list) #f)))))/' \
"$1/core.scm" > "$1/core.sls"
| true
|
3b418cb483b35f969aeffeb0e7bfe17f3927b603
|
Shell
|
gm1357/Exercism-solutions
|
/bash/acronym/acronym.sh
|
UTF-8
| 310
| 3.78125
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
main () {
phrase=$(replace_symbols "$1")
write_acronym "$phrase"
}
replace_symbols() {
string="$1"
echo "${string//[-*_]/ }"
}
write_acronym() {
acronym=""
for word in $1; do
acronym="${acronym}${word:0:1}"
done
echo "${acronym^^}"
}
main "$@"
| true
|
2b840338401eac8185e61d4f7d42b5903fd49d30
|
Shell
|
DevGautam2000/shell-scripting
|
/readfiles.sh
|
UTF-8
| 318
| 3.609375
| 4
|
[] |
no_license
|
#! /bin/bash
fp=files/read.txt
while IFS= read -r line
do
echo $line
done < $fp
# usage of IFS # ths foo bar is echoed as space is a special character in IFS
string="foo bar"
for i in $string
do
echo $i
done
# using IFS to split the string
str=foo:bar:again
IFS=":"
for i in $str
do
echo $i
done
| true
|
4ccde870fe99374cc3d014dfce77cf2da29b792a
|
Shell
|
cnamal/video-INF6803
|
/TP3/results.sh
|
UTF-8
| 331
| 2.984375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
make
LOOP=10
for i in {10..200..10}
do
echo "$i 3500"
for (( c=1; c<=$LOOP; c++ ))
do
./main at.txt $i 3500 other.txt
done
echo ""
done
for i in {2000..4000..100}
do
echo "50 $i"
for (( c=1; c<=$LOOP; c++ ))
do
./main at.txt 50 $i other.txt
done
echo ""
done
| true
|
b8b7f1e6a568668d2f529d7bbe6fcbed88863e64
|
Shell
|
Data-Science-Projects/demo-routenet
|
/bin/get_omnet_data.sh
|
UTF-8
| 1,592
| 2.96875
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/usr/bin/env bash
# First get the data files and populate the base data directory.
# These data sets are from:
# https://github.com/knowledgedefinednetworking/NetworkModelingDatasets/tree/master/datasets_v0
rm -rf $OMNET_DATA_DIR
mkdir -p $OMNET_DATA_DIR/datasets_v0
cd $OMNET_DATA_DIR/datasets_v0
wget "http://knowledgedefinednetworking.org/data/datasets_v0/nsfnet.tar.gz"
wget "http://knowledgedefinednetworking.org/data/datasets_v0/geant2.tar.gz"
wget "http://knowledgedefinednetworking.org/data/datasets_v0/synth50.tar.gz"
tar -xvzf nsfnet.tar.gz
tar -xvzf geant2.tar.gz
tar -xvzf synth50.tar.gz
# Populate the smoke-resources data sets for smoke testing
rm -rf $RN_PROJ_HOME/tests/smoke-resources/data
mkdir -p $RN_PROJ_HOME/tests/smoke-resources/data/nsfnetbw
cp $OMNET_DATA_DIR/datasets_v0/nsfnetbw/Network_nsfnetbw.ned $RN_PROJ_HOME/tests/smoke-resources/data/nsfnetbw
cp $OMNET_DATA_DIR/datasets_v0/nsfnetbw/results_nsfnetbw_9_Routing_SP* $RN_PROJ_HOME/tests/smoke-resources/data/nsfnetbw
mkdir -p $RN_PROJ_HOME/tests/smoke-resources/data/geant2bw
cp $OMNET_DATA_DIR/datasets_v0/geant2bw/Network_geant2bw.ned $RN_PROJ_HOME/tests/smoke-resources/data/geant2bw
cp $OMNET_DATA_DIR/datasets_v0/geant2bw/results_geant2bw_9_Routing_SP* $RN_PROJ_HOME/tests/smoke-resources/data/geant2bw
mkdir -p $RN_PROJ_HOME/tests/smoke-resources/data/synth50bw
cp $OMNET_DATA_DIR/datasets_v0/synth50bw/Network_synth50bw.ned $RN_PROJ_HOME/tests/smoke-resources/data/synth50bw
cp $OMNET_DATA_DIR/datasets_v0/synth50bw/results_synth50bw_9_Routing_SP* $RN_PROJ_HOME/tests/smoke-resources/data/synth50bw
| true
|
7ac0fee0ac1064aa8990b1cfdc02af2e191aa758
|
Shell
|
WiperDogLabo/Update_mongodb_installing_input_to_default.params
|
/Test_scripts/test_Restful_invol_functions.sh
|
UTF-8
| 5,614
| 3.765625
| 4
|
[] |
no_license
|
#!/bin/bash
if [ "$#" == 0 ];then
echo "Incorrect script parameters"
echo "Usage: ./test_Restful_invol_functions.sh path/to/wiperdog_installer_jar"
echo "Example : ./test_Restful_invol_functions.sh wiperdog-0.2.5-SNAPSHOT-unix.jar"
exit
fi
wdInstaller=$1
baseDir=$(pwd)
wdDir=$baseDir/wiperdog-0.2.5-SNAPSHOT
echo "==============TEST Wiperdog Function involved to Restful=================="
echo "--Test wiperdog functions after updating user input to default params & move Restful port to configuration file--"
echo "--Please start mongoDB on localhost or specific configured host correspoding to testcase--"
echo "-----------------------------------------------------------------------------------------"
echo "Case 1: Verified Restful server start on default port"
echo "** Clean data..."
sudo rm -rf $wdDir
if [ -d $wdDir ]; then
echo "Failure to clean data .."
exit
fi
echo "** Install wiperdog..."
expect<<DONE
spawn java -jar $wdInstaller
expect "Press any key to start interactive installation*"
send "\r"
expect "Do you want to install wiperdog at*"
send "y\r"
expect "Getting input parameters for pre-configured*"
send "\r"
sleep 1
expect "Please input Netty port*"
send "\r"
sleep 1
expect "Please input Restful service port*"
send "\r"
sleep 1
expect "Please input job directory*"
send "\r"
sleep 1
expect "Please input trigger directory*"
send "\r"
sleep 1
expect "Please input job class directory*"
send "\r"
sleep 1
expect "Please input job instance directory*"
send "\r"
sleep 1
expect "Please input database server (Mongodb) IP address (default set to 127.0.0.1)*"
send "\r"
sleep 1
expect "Please input database server port*"
send "\r"
sleep 1
expect "Please input database name*"
send "\r"
sleep 1
expect "Please input database server user name*"
send "\r"
sleep 1
expect "Please input database server password*"
send "\r"
sleep 1
expect "Please input mail send data policy*"
send "\r"
sleep 1
expect "Do you want to install wiperdog as system service*"
send "no\r"
sleep 1
expect "Your input are correct(Y|y|N|n)*"
send "y\r"
sleep 1
expect "Finish the Wiperdog installation*"
sleep 1
DONE
echo "** Clean wiperdog before run job.."
rm -rf $wdDir/var/job/*
rm -rf wd_stdout.log
echo "** Copy job and trigger file to job directory"
cp test_involve_functions/case_1/testjob.job $wdDir/var/job
cp test_involve_functions/case_1/test.trg $wdDir/var/job
echo "** Stop existing wiperdog..."
fuser -k 13111/tcp
echo "** Starting wiperdog ...Waitting for a minute..."
if [ -d $wdDir ];then
$wdDir/bin/startWiperdog.sh > wd_stdout.log 2>&1 &
sleep 60
restMessage=$( cat wd_stdout.log | grep "Starting RestExpress Server" )
if [[ $restMessage =~ .*'Starting RestExpress Server on port 8089'.* ]] ;then
echo "** Restful server start on port 8089 : PASSED"
echo "======== CASE 1 : PASSED "
else
echo "** Restful server not start on port 8089 : FAILED"
echo "======== CASE 1 : FAILED "
exit
fi
else
echo "======== CASE 1 : FAILED "
echo "Wiperdog directory not found after install !"
exit
fi
sleep 10
echo "Case 2: Verified Restful server start on user specific port"
echo "** Clean data..."
sudo rm -rf $wdDir
if [ -d $wdDir ]; then
echo "Failure to clean data .."
exit
fi
restPort="8888"
echo "** Install wiperdog with information :"
echo "Restful server port: $restPort"
echo "** You can modified above information in testcase"
sleep 5
expect<<DONE
spawn java -jar $wdInstaller
expect "Press any key to start interactive installation*"
send "\r"
expect "Do you want to install wiperdog at*"
send "y\r"
expect "Getting input parameters for pre-configured*"
send "\r"
sleep 1
expect "Please input Netty port*"
send "\r"
sleep 1
expect "Please input Restful service port*"
send "$restPort\r"
sleep 1
expect "Please input job directory*"
send "\r"
sleep 1
expect "Please input trigger directory*"
send "\r"
sleep 1
expect "Please input job class directory*"
send "\r"
sleep 1
expect "Please input job instance directory*"
send "\r"
sleep 1
expect "Please input database server (Mongodb) IP address (default set to 127.0.0.1)*"
send "\r"
sleep 1
expect "Please input database server port*"
send "\r"
sleep 1
expect "Please input database name*"
send "\r"
sleep 1
expect "Please input database server user name*"
send "\r"
sleep 1
expect "Please input database server password*"
send "\r"
sleep 1
expect "Please input mail send data policy*"
send "\r"
sleep 1
expect "Do you want to install wiperdog as system service*"
send "no\r"
sleep 1
expect "Your input are correct(Y|y|N|n)*"
send "y\r"
sleep 1
expect "Finish the Wiperdog installation*"
sleep 1
DONE
echo "** Clean wiperdog before run job.."
rm -rf $wdDir/var/job/*
rm -rf wd_stdout.log
echo "** Copy job and trigger file to job directory"
cp test_involve_functions/case_1/testjob.job $wdDir/var/job
cp test_involve_functions/case_1/test.trg $wdDir/var/job
echo "** Stop existing wiperdog..."
fuser -k 13111/tcp
echo "** Starting wiperdog ...Waitting for a minute..."
if [ -d $wdDir ];then
$wdDir/bin/startWiperdog.sh > wd_stdout.log 2>&1 &
sleep 60
restMessage=$( cat wd_stdout.log | grep "Starting RestExpress Server" )
if [[ $restMessage =~ .*${restPort}.* ]] ;then
echo "** Restful server start on port ${restPort} : PASSED"
echo "=========== CASE 2 : PASSED"
else
echo "** Restful server not start on port ${restPort} : FAILED"
echo "=========== Case 2: FAILED"
exit
fi
else
echo "========== Case 2: FAILED "
echo "Wiperdog directory not found after install !"
exit
fi
sleep 10
| true
|
2889372498c245ca1084792b616b99b2839e8c78
|
Shell
|
5l1v3r1/iNewCam
|
/_flash_dump.bin.extracted/jffs2-root/fs_1/bin/vs/vs_auto.sh
|
UTF-8
| 681
| 3.203125
| 3
|
[] |
no_license
|
#!/bin/sh
VS_SERVER_PATH="`dirname $0`"
echo $VS_SERVER_PATH
PRO_NAME=vs_server
WPA_NAME=wpa_supplicant
RUN_SERVER=$VS_SERVER_PATH/$PRO_NAME
echo $PRO_NAME
NUM=0
$RUN_SERVER &
sleep 60
while true ; do
##### 用ps获取$PRO_NAME进程数量 少于1,重启进程
NUM=`ps | grep ${PRO_NAME} | grep -v grep |wc -l`
if [ "${NUM}" -lt "1" ];then
echo "${PRO_NAME} was killed"
killall -9 $WPA_NAME
reboot
fi
###kill僵尸进程
NUM_STAT=`ps | grep ${PRO_NAME} | grep T | grep -v grep | wc -l`
if [ "${NUM_STAT}" -gt "0" ];then
killall -9 ${PRO_NAME}
killall -9 $WPA_NAME
reboot
fi
echo 3 >/proc/sys/vm/drop_caches
sleep 5
done
exit 0
| true
|
a5f000c8aa1c5854b1d38feec6a1d67248cbd395
|
Shell
|
cavalen/aolab-azure
|
/deploy-ans/deploy.sh
|
UTF-8
| 967
| 3.390625
| 3
|
[] |
no_license
|
#!/bin/bash
#RED='\e[0;91m'
#REDBL='\e[5;10m'
#NC='\e[0m'
echo "===================================================="
echo "Cual es su Prefix ? (ej, su nombre, o sus iniciales): "
echo "Usar maximo 10, solo letras"
echo ""
read -p "Prefix: " prefix
prefix=$(echo "$prefix" | tr -dc '[:alpha:]' | tr '[:upper:]' '[:lower:]' | head -c 10)
if [ -z "$prefix" ] || [ ${#prefix} -lt 2 ]
then
echo "Usando Prefix por defecto -student-"
prefix="student"
sed -i 's/.*STUDENT_ID:.*/STUDENT_ID: student/' config.yml
echo ""
else
echo "Usando Prefix $prefix"
sed -i 's/.*STUDENT_ID:.*/STUDENT_ID: '$prefix'/' config.yml
echo ""
fi
echo ""
echo "Desplegando .... "
echo "$(date)"
echo ""
ansible-playbook 01_deploy_rg_vnet_azure.yml && ansible-playbook 02_deploy_ubuntu_docker_azure.yml && ansible-playbook 03_deploy_bigip_2nic_noarm_azure.yml && ansible-playbook 04_install_atc.yml && ansible-playbook 05_get_information.yml
echo "Finalizado .... "
echo "$(date)"
| true
|
719798e9685dba90bdc17df36bfbfcf571d2b6fa
|
Shell
|
f-hein/misc-bash-scripts
|
/arsenal.sh
|
UTF-8
| 557
| 2.546875
| 3
|
[] |
no_license
|
wget -q -O arsenal.txt http://kanonierzy.com
opponent=$(cat arsenal.txt | grep -m 1 '<span class="teams">'| grep -oP '(?<=">).*(?=</span>)')
matchdate=$(cat arsenal.txt | grep -A 1 -m 1 '<span class="teams">'| grep -oP '(?<=">).*(?=</span>)' | tail -1 | awk '{print $1;}')
matchtime=$(cat arsenal.txt | grep -A 1 -m 1 '<span class="teams">'| grep -oP '(?<=">).*(?=</span>)' | tail -1 | awk '{print $NF;}')
echo 'Next game:'
echo $opponent
echo $matchdate
echo $matchtime
rm arsenal.txt
today=$(date | grep -oP ', \K.*?(?=,)')
echo "Today's date: $today"
| true
|
66a047bbf1ce1046d224a190c1a8d2bd45de9e57
|
Shell
|
wigcheng/tn-fulltest
|
/tn-fulltest
|
UTF-8
| 628
| 2.953125
| 3
|
[] |
no_license
|
#!/bin/bash
memtest(){
CPU_NUM=$(nproc)
if [[ $CPU_NUM == "1" ]]; then
memtest 2> errors.txt
elif [[ $CPU_NUM == "2" ]]; then
memtest 2> errors.txt
elif [[ $CPU_NUM == "4" ]]; then
memtest 2> errors.txt
fi
}
clear
echo "========================================="
echo "Main Munu---"
echo "========================================="
echo "1. Memory test"
echo "2. CPU test"
echo "3. SD,EMMC,SATA speed test"
echo "4. CANBUS test"
echo "5. GPIO test"
echo "6. SGTL5000 Codec test"
echo "7. I2C bus test"
echo ""
echo -n "Please choose 1 item: "
read var_item
echo $var_item
| true
|
1c614e0b4e0011721d6a7acb7b0a9ec27234f709
|
Shell
|
jdavidavendano/technical-test-ns
|
/first_task/static-site-troposphere/build.sh
|
UTF-8
| 732
| 3.640625
| 4
|
[] |
no_license
|
#!/bin/bash -e
STACK_NAME="${STACK_NAME:-staticSiteTroposphere}"
# Verify AWS access
aws iam get-user &> /dev/null || \
echo "Cannot access AWS."
# Go to root of this repo
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
cd "${DIR}"
if [[ ! -d .venv/ ]]; then
python3 -m venv .venv
. .venv/bin/activate
fi
python3 main.py \
> output.json
aws cloudformation deploy \
--no-fail-on-empty-changeset \
--template-file output.json \
--stack-name "${STACK_NAME}"
IP="$(aws cloudformation describe-stacks \
--stack-name "${STACK_NAME}" | \
echo .Stacks[0].Outputs[2].OutputValue)"
echo $(aws cloudformation describe-stacks \
--stack-name ${STACK_NAME})
echo "Your instance can be accessed at http://${IP}"
| true
|
a64076850bd3aa64865d6da3a3314596645a7fa7
|
Shell
|
chiboubys/Riddim
|
/scripts/install.sh
|
UTF-8
| 403
| 3.421875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Installs riddim once configured
set -e
source scripts/setup_env.sh
echo
echo Building...
cd build && make -j 4
cd ..
echo
echo Installing...
# Std lib
mkdir -p "$RID_STD_PATH"
rm -rf "$RID_STD_PATH"/*
cp -r "$PWD/std"/* "$RID_STD_PATH"
# Binaries
mkdir -p "$RID_BIN_PATH"
cp "$PWD/build/src/riddim" "$RID_BIN_PATH"
echo
echo Installed Riddim successfully at "$RID_BIN_PATH/riddim"
| true
|
faee78caaa15811513ae8cebd3274e446bc80ff1
|
Shell
|
zebrafishCC/RNAseq-analysis-project
|
/trimmomatic_test.bash
|
UTF-8
| 1,198
| 2.515625
| 3
|
[] |
no_license
|
#trim adapter for the first time
for SAMPLE in $(cat samples_remained.txt)
do
#echo ${SAMPLE}
trimmomatic PE -phred33 -summary ./trimmomatic_results/${SAMPLE}.summary.txt ./zebrafish/${SAMPLE}_L003_R1_001.fastq.gz ./zebrafish/${SAMPLE}_L003_R2_001.fastq.gz ./trimmomatic_results/${SAMPLE}_forward_paired.fq.gz ./trimmomatic_results/${SAMPLE}_forward_unpaired.fq.gz ./trimmomatic_results/${SAMPLE}_reverse_paired.fq.gz ./trimmomatic_results/${SAMPLE}_reverse_unpaired.fq.gz ILLUMINACLIP:TruSeq3-PE-2.fa:2:30:10 LEADING:3 TRAILING:3 SLIDINGWINDOW:4:15 MINLEN:36
done
#trim the overrepresented sequence for the second time on hoffman2 cluster
for SAMPLE in $(cat sample_names.txt)
do
#echo ${SAMPLE}
trimmomatic PE -phred33 -summary ./trimmomatic_OR_results/${SAMPLE}.summary.txt ./paired/${SAMPLE}_forward_paired.fq.gz ./paired/${SAMPLE}_reverse_paired.fq.gz ./trimmomatic_OR_results/${SAMPLE}_F_paired.fq.gz ./trimmomatic_OR_results/${SAMPLE}_F_unpaired.fq.gz ./trimmomatic_OR_results/${SAMPLE}_R_paired.fq.gz ./trimmomatic_OR_results/${SAMPLE}_R_unpaired.fq.gz ILLUMINACLIP:uniq_overrepresented_sequence.fa:2:30:10 LEADING:3 TRAILING:3 SLIDINGWINDOW:4:15 MINLEN:36
done
| true
|
30bea9a76df12543c9e1c39945dbedb4aa74d722
|
Shell
|
laskaridis/kickstart
|
/kickstart.sh
|
UTF-8
| 3,117
| 4.03125
| 4
|
[
"MIT"
] |
permissive
|
#! /bin/bash
kickstart_puts() {
local fmt="$1"; shift
printf "\n$fmt\n" "$@"
}
kickstart_package_is_installed() {
dpkg -s "$1" 2>/dev/null | grep -c "ok installed" >/dev/null
}
kickstart_package_install() {
if kickstart_package_is_installed "$1"; then
kickstart_puts "Package %s is already installed ..." "$1"
else
kickstart_puts "Installing %s ..." "$1"
apt-get -qq install -y "$1"
fi
}
kickstart_install_rvm() {
if kickstart_rvm_is_installed ; then
kickstart_puts "RVM is already installed ..."
if (( $noupdate = 0 )) 2>/dev/null ; then
kickstart_puts "Upgrading RVM ..."
kickstart_upgrade_rvm
fi
else
kickstart_puts "Installing RVM ..."
fi
}
kickstart_rvm_is_installed() {
rvm --version 2>/dev/null | grep -c "rvm" >/dev/null
}
kickstart_upgrade_rvm() {
rvm get stable 1>/dev/null
}
kickstart_install_ruby() {
local version="2.2.3"
if kickstart_ruby_is_installed ; then
kickstart_puts "Ruby %s is already installed ..." $version
else
kickstart_puts "Installing ruby %s ..." $version
fi
}
kickstart_ruby_is_installed() {
rvm list 2>/dev/null | grep -c "ruby-2.2.3" >/dev/null
}
kickstart_gem_install_or_update() {
if gem list "$1" --installed > /dev/null; then
kickstart_puts "Gem %s is already installed ..." "$1"
if (( $noupdate = 0)) 2>/dev/null ; then
kickstart_puts "Updating %s ..." "$1"
gem update "$@"
fi
else
kickstart_puts "Installing %s ..." "$1"
gem install "$@"
fi
}
kickstart_usage() {
printf "%b" "
Usage
kickstart [options]
Options
[[--]ruby-version] <version>
The ruby version to install. Valid values are:
<x>.<y>.<z> - Major version x, minor version y and patch z.
[[--]help]
Display this output
"
}
p() {
kickstart_puts "$1"
}
kickstart_parse_params() {
while (( $# > 0 )); do
token="$1"
shift
case "$token" in
(--ruby-version)
if [[ -n "${1:-}" ]]; then
ruby_version="$1"
shift
fi
;;
(--no-update)
noupdate=1
;;
(--help)
kickstart_usage
exit 0
;;
(*)
kickstart_usage
exit 1
;;
esac
done
}
kickstart_install_packages() {
kickstart_package_install 'curl'
kickstart_package_install 'git'
kickstart_package_install 'vim'
kickstart_package_install 'apt-utils'
kickstart_package_install 'exuberant-ctags'
kickstart_package_install 'silversearcher-ag'
kickstart_package_install 'qt'
kickstart_package_install 'openssl'
}
kickstart_install_gems() {
kickstart_gem_install_or_update 'bundler'
}
kickstart_install_extensions() {
if [[ -f "$HOME/.kickstart.local" ]]; then
kickstart_puts "Installing extensions ..."
. "$HOME/.kickstart.local"
else
kickstart_puts "No extensions found ..."
fi
}
kickstart() {
kickstart_parse_params "$@"
kickstart_install_packages
# kickstart_install_rvm
# kickstart_install_ruby
# kickstart_install_gems
# kickstart_install_extensions
kickstart_puts "Installation completed!"
exit 0
}
kickstart "$@"
| true
|
a903ecf1dc5474049e6df940b4170f9fcb63d33a
|
Shell
|
radamou/custom-prompt
|
/install-env-script/dev-hosts-conf.sh
|
UTF-8
| 530
| 3.171875
| 3
|
[] |
no_license
|
#!/bin/bash
#===========================================terminator shurtcuts help===================================
echo "check if ruby is installed"
if ! [ -x "$(command -v ruby)" ]; then
echo "installing ruby";
sudo apt-get update
sudo apt-get install ruby-full
ruby --version
else
echo "ruby is already installed";
fi
if ! [ -x "$(command -v dory)" ]; then
echo "installing dory for dnsmasq";
sudo gem install dory
sudo dory up
dory config-file
else
echo "dory is already installed";
fi
| true
|
744c12d9377b4e25f9d382f5d82bacff38450eda
|
Shell
|
andykimpe/gitlab-recipes
|
/install/centos/install-mysql-apache.sh
|
UTF-8
| 10,213
| 3.3125
| 3
|
[] |
no_license
|
#!/bin/bash
# Set custom logging methods so we create a log file in the current working directory.
logfile=/var/log/gitlab-install-mysql-apache.log
exec > >(tee $logfile)
exec 2>&1
if [ $UID -ne 0 ]; then
echo "Installed failed! To install you must be logged in as 'root', please try again"
exit 1
fi
# Lets check for some common control panels that we know will affect the installation/operating of Gitalb.
if [ -e /usr/local/cpanel ] || [ -e /usr/local/directadmin ] || [ -e /usr/local/solusvm/www ] || [ -e /usr/local/home/admispconfig ] || [ -e /usr/local/lxlabs/kloxo ] || [ -e /opt/ovz-web-panel/ ] ; then
echo "You appear to have a control panel already installed on your server; This installer"
echo "is designed to install and configure ZPanel on a clean OS installation only!"
echo ""
echo "Please re-install your OS before attempting to install using this script."
exit
fi
# Ensure the installer is launched and can only be launched on CentOs 6.4
BITS=$(uname -m | sed 's/x86_//;s/i[3-6]86/32/')
if [ -f /etc/centos-release ]; then
OS="CentOs"
VER=$(cat /etc/centos-release | sed 's/^.*release //;s/ (Fin.*$//')
else
OS=$(uname -s)
VER=$(uname -r)
fi
echo "Detected : $OS $VER $BITS"
#warning the last version of centos and 6.5
if [ "$OS" = "CentOs" ] && [ "$VER" = "6.4" ] || [ "$VER" = "6.5" ] ; then
echo "Ok."
else
echo "Sorry, this installer only supports the installation of Gitalb on CentOS 6.5."
exit 1;
fi
passwordgen() {
l=$1
[ "$l" == "" ] && l=16
tr -dc A-Za-z0-9 < /dev/urandom | head -c ${l} | xargs
}
gitlabpassword=`passwordgen`
echo -e "Enter subdomain for gitlab"
echo -e "eg : gitlab.yourdomain"
read -e -p "Enter subdomain for gitlab : " subdomain
read -e -p "Enter email address for send log file : " emaillog
read -e -p "Enter email address for support : " emailsupport
read -e -p "Enter principal email address for gitlab : " emailgitlab
# install mysql and configure password
yum -y install mysql mysql-server > /dev/null 2>&1
service mysqld start > /dev/null 2>&1
service mysqld restart > /dev/null 2>&1
chkconfig mysqld on > /dev/null 2>&1
password=`passwordgen`
mysqladmin -u root password "$password" > /dev/null 2>&1
until mysql -u root -p$password -e ";" > /dev/null 2>&1 ; do
read -s -p "Enter Your mysql root Password: " password
done
sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/selinux/config
setenforce 0
chkconfig sendmail off
service sendmail stop
yum -y remove bind-chroot
rpm --import https://www.fedoraproject.org/static/0608B895.txt
yum -y install http://dl.fedoraproject.org/pub/epel/6/$(uname -m)/epel-release-6-8.noarch.rpm
cat > "/etc/yum.repos.d/PUIAS_6_computational.repo" <<EOF
[PUIAS_6_computational]
name=PUIAS computational Base \$releasever - \$basearch
mirrorlist=http://puias.math.ias.edu/data/puias/computational/\$releasever/\$basearch/mirrorlist
#baseurl=http://puias.math.ias.edu/data/puias/computational/$releasever/$basearch
enabled=1
gpgcheck=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-puias
EOF
rpm --import http://springdale.math.ias.edu/data/puias/6/x86_64/os/RPM-GPG-KEY-puias
yum-config-manager --enable epel --enable PUIAS_6_computational
yum -y update
yum -y remove ruby ruby-devel ruby-libs rubygem
yum -y groupinstall 'Development Tools'
yum -y install vim-enhanced readline readline-devel ncurses-devel gdbm-devel glibc-devel tcl-devel openssl-devel curl-devel expat-devel db4-devel byacc sqlite-devel gcc-c++ libyaml libyaml-devel libffi libffi-devel libxml2 libxml2-devel libxslt libxslt-devel libicu libicu-devel system-config-firewall-tui python-devel redis sudo wget crontabs logwatch logrotate perl-Time-HiRes git
yum-config-manager --enable rhel-6-server-optional-rpms
yum -y update
chkconfig redis on
service redis start
yum -y install postfix postfix-perl-scripts
useradd -r -u 101 -g mail -d /var/zpanel/vmail -s /sbin/nologin -c "Virtual mailbox" vmail
mkdir -p /var/spool/vacation
useradd -r -d /var/spool/vacation -s /sbin/nologin -c "Virtual vacation" vacation
chmod -R 770 /var/spool/vacation
chown -R vacation:vacation /var/spool/vacation
useradd -r -u 101 -g mail -d /var/mail -s /sbin/nologin -c "Virtual mailbox" vmail
service postfix start
service postfix restart
chkconfig postfix on
yum -y install postgresql-server postgresql-devel
#install checkinstall for auto create rpm for ruby
echo "install checkinstall"
cd /tmp
git clone http://checkinstall.izto.org/checkinstall.git
cd checkinstall
make
make install
ln -s /usr/local/bin/checkinstall /usr/bin/checkinstall
rm -rf ~/rpmbuild/{BUILD,RPMS,SOURCES,SPECS,SRPMS}
mkdir -p ~/rpmbuild/{BUILD,RPMS,SOURCES,SPECS,SRPMS}
# Download and compile it:
echo "echo compilling ruby"
mkdir /tmp/ruby && cd /tmp/ruby
curl --progress ftp://ftp.ruby-lang.org/pub/ruby/2.0/ruby-2.0.0-p353.tar.gz | tar xz
cd ruby-2.0.0-p353
./configure --prefix=/usr/local/
make
echo "checkinstall ruby please validate default option"
checkinstall --pkgname=ruby --pkgversion=2.0.0.p353 -y --default --deldesc=yes -R make install
cd
rm -rf /tmp/checkinstall
rm -rf /tmp/ruby
echo "install ruby"
yum -y install ~/rpmbuild/RPMS/$(uname -m)/*.rpm
gem install bundler --no-ri --no-rdoc
adduser --system --shell /bin/bash --comment 'GitLab' --create-home --home-dir /home/git/ git
echo $emaillog > /root/.forward
chown root /root/.forward
chmod 600 /root/.forward
restorecon /root/.forward
echo $emaillog > /home/git/.forward
chown git /home/git/.forward
chmod 600 /home/git/.forward
restorecon /home/git/.forward
su git -c "cd /home/git/ && git clone https://github.com/gitlabhq/gitlab-shell.git"
su git -c "cd /home/git/gitlab-shell && git checkout v1.8.0 && cp config.yml.example config.yml"
su git -c "sed -i 's|gitlab_url: \"http://localhost/\"|gitlab_url: \"http://localhost:8080/\"|' /home/git/gitlab-shell/config.yml"
su git -c "/home/git/gitlab-shell/bin/install"
mysql -u root -p$password -e "CREATE USER 'gitlab'@'localhost' IDENTIFIED BY '$gitlabpassword'";
mysql -u root -p$password -e "CREATE DATABASE IF NOT EXISTS gitlabhq_production DEFAULT CHARACTER SET utf8 COLLATE utf8_unicode_ci";
mysql -u root -p$password -e "GRANT SELECT, LOCK TABLES, INSERT, UPDATE, DELETE, CREATE, DROP, INDEX, ALTER ON gitlabhq_production.* TO gitlab@localhost";
su git -c "cd /home/git/ && git clone https://github.com/gitlabhq/gitlabhq.git gitlab"
su git -c "cd /home/git/gitlab && git checkout 6-3-stable && cp config/gitlab.yml.example config/gitlab.yml"
su git -c "sed -i 's|email_from: gitlab@localhost|email_from: $emailgitlab|g' /home/git/gitlab/config/gitlab.yml"
su git -c "sed -i 's|support_email: support@localhost|support_email: $emailsupport|g' /home/git/gitlab/config/gitlab.yml"
su git -c "sed -i 's|localhost|$subdomain|g' /home/git/gitlab/config/gitlab.yml"
su git -c "chown -R git /home/git/gitlab/log/"
su git -c "chown -R git /home/git/gitlab/tmp/"
su git -c "chmod -R u+rwX /home/git/gitlab/log/"
su git -c "chmod -R u+rwX /home/git/gitlab/tmp/"
su git -c "mkdir /home/git/gitlab-satellites"
su git -c "mkdir /home/git/gitlab/tmp/pids/"
su git -c "mkdir /home/git/gitlab/tmp/sockets/"
su git -c "chmod -R u+rwX /home/git/gitlab/tmp/pids/"
su git -c "chmod -R u+rwX /home/git/gitlab/tmp/sockets/"
su git -c "mkdir /home/git/gitlab/public/uploads"
su git -c "chmod -R u+rwX /home/git/gitlab/public/uploads"
su git -c "cp /home/git/gitlab/config/unicorn.rb.example /home/git/gitlab/config/unicorn.rb"
git config --global user.name "GitLab"
git config --global user.email "$emailgitlab"
git config --global core.autocrlf input
su git -c "cp /home/git/gitlab/config/database.yml.mysql /home/git/gitlab/config/database.yml"
su git -c "sed -i 's| password: \"secure password\"| password: \"$password\"|g' /home/git/gitlab/config/database.yml"
su git -c "chmod o-rwx /home/git/gitlab/config/database.yml"
gem install charlock_holmes --version '0.6.9.4'
gem install json -v '1.7.7'
gem install pg -v '0.15.1'
gem install rdoc-data
su git -c "cd /home/git/gitlab/ && /usr/local/bin/bundle install --deployment --without development test mysql puma aws"
su git -c "cd /home/git/gitlab/ && /usr/local/bin/bundle install --deployment --without development test mysql2 puma aws"
su git -c "cd /home/git/gitlab/ && /usr/local/bin/bundle exec rake gitlab:setup RAILS_ENV=production"
wget -O /etc/init.d/gitlab https://raw.github.com/gitlabhq/gitlab-recipes/master/init/sysvinit/centos/gitlab-unicorn
chmod +x /etc/init.d/gitlab
chkconfig --add gitlab
chkconfig gitlab on
su git -c "cd gitlab/ && /usr/local/bin/bundle exec rake gitlab:env:info RAILS_ENV=production"
service gitlab start
su git -c "cd gitlab/ && /usr/local/bin/bundle exec rake gitlab:check RAILS_ENV=production"
yum -y install httpd mod_ssl
chkconfig httpd on
wget -O /etc/httpd/conf.d/gitlab.conf https://raw.github.com/gitlabhq/gitlab-recipes/master/web-server/apache/gitlab.conf
sed -i 's| ServerName gitlab.example.com| ServerName $subdomain|g' /etc/httpd/conf.d/gitlab.conf
sed -i 's| ProxyPassReverse http://gitlab.example.com/| ProxyPassReverse http://$subdomain/|g' /etc/httpd/conf.d/gitlab.conf
mkdir "/etc/httpd/conf.d.save"
cp "/etc/httpd/conf.d/ssl.conf" "/etc/httpd/conf.d.save"
cat > /etc/httpd/conf.d/ssl.conf <<EOF
#NameVirtualHost *:80
<IfModule mod_ssl.c>
# If you add NameVirtualHost *:443 here, you will also have to change
# the VirtualHost statement in /etc/httpd/conf.d/gitlab.conf
# to <VirtualHost *:443>
#NameVirtualHost *:443
Listen 443
</IfModule>
EOF
mkdir -p /var/log/httpd/logs/
service httpd restart
service iptables save
service iptables stop
chkconfig iptables off
echo "install file"
echo "url for gitlab http://$subdomain" &>/dev/tty
echo "user (email) admin@local.host" &>/dev/tty
echo "password 5iveL!fe" &>/dev/tty
echo "mysql user gitlab" &>/dev/tty
echo "password for gitlabuser $gitlabpassword" &>/dev/tty
echo "mysql root password $password" &>/dev/tty
echo "information save in /root/gitlab-password.txt" &>/dev/tty
echo url for gitlab http://"$domain" > /root/gitlab-password.txt
echo url for user email "admin@local.host" >> /root/gitlab-password.txt
echo password 5iveL!fe >> /root/gitlab-password.txt
echo password for gitlabuser "$gitlabpassword" >> /root/gitlab-password.txt
echo mysql root password "$password" > /root/gitlab-password.txt
| true
|
15d3862d685c0503a1e13bbf2014cb8ec2285d3d
|
Shell
|
netj/3x
|
/.depends/python2.6-or-greater.sh
|
UTF-8
| 287
| 3.40625
| 3
|
[] |
no_license
|
#!/bin/sh
# use latest python available
set -eu
mkdir -p "$DEPENDS_PREFIX"/bin
for python in python2.7 python2.6; do
pythonpath=`type -p $python 2>/dev/null` || continue
ln -sfn "$pythonpath" "$DEPENDS_PREFIX"/bin/python
exit 0
done
echo >&2 "No Python >= 2.6 found"
false
| true
|
89f4423b94f516245f1793f353f165ca29cc29c1
|
Shell
|
simplificator/ansible
|
/test/basic-test.sh
|
UTF-8
| 955
| 3.578125
| 4
|
[] |
no_license
|
#!/bin/bash
set -e
printf "\nChecking that SSH is available\n"
if [[ $(command -v ssh) ]]; then
echo "SSH command is available"
else
echo "SSH command not found"
exit 1
fi
printf "\nChecking Python version\n"
python -v 2> /dev/null || python3 -v 2> /dev/null || exit 1
printf "\nChecking that ansible toolbet is avaible\n"
ansible --version 2> /dev/null || exit 1
ansible-galaxy --version 2> /dev/null || exit 1
ansible-playbook --version 2> /dev/null || exit 1
ansible-vault --version 2> /dev/null || exit 1
printf "\nChecking that Ansible can decrypt a Vault\n"
if ansible-vault view --vault-password-file test/password test/vault.yml | grep -q 'test'; then
echo "Matched"
else
exit 1
fi
printf "\nChecking that Ansible can download collections and roles from Galaxy\n"
ansible-galaxy role install -r test/requirements.yml 2> /dev/null || exit 1
ansible-galaxy collection install -r test/requirements.yml -p ./ 2> /dev/null || exit 1
| true
|
2f306b671624d4bd6d9f8a83e8b1d44eb670be1a
|
Shell
|
coopermaa/docker-mini
|
/lab-13/build-rootfs.sh
|
UTF-8
| 240
| 2.84375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
WRK_ELF=wrk_linux-amd64
echo
echo "=====> Packing wrk executable and NSS stuff..."
#curl -sSL http://bit.ly/install-extract-elf-so | sudo bash
extract-elf-so \
--nss-net -z \
$WRK_ELF
echo
echo "=====> Done!"
| true
|
7c4ddb90fe9e1e48f8c61c915d7ccf3ac5fc0c26
|
Shell
|
pebgroup/Seed_Plant_BackBone
|
/Phylogeny/scripts/blastn_353ref.sh
|
UTF-8
| 1,952
| 3.328125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
date
# define data and results directory
path="/home/cactus/faststorage/PhyloSynth/PhyloSynth_progress/BackBone/"
cd ${path}data && mkdir -p 1kp_blast 1kp_blast/1kp2-353fasta
#For BLAST: (evalue 1e-5; this is hybpiper's default setting)
#cd 1kp-SOAPdenovo-Trans-assembly
# loop through fa files
for z in `ls ./1kp_raw/*.fa`; do
#z=$1
ref=$(basename $z .fa)
code=$(basename $z -SOAPdenovo-Trans-assembly.fa)
echo -e "\n\n working on $ref \n\n"
#buiding the database in the same directory as the data located
makeblastdb -in $z -dbtype nucl -parse_seqids
for gene in `ls ./ref_353genes/*.fasta`; do
name=$(basename $gene .fasta)
echo -e "\n\n working on $name \n\n"
blastn -db $z -query $gene -evalue 1e-5 -outfmt "6 qseqid qlen sseqid slen qstart qend sstart send length mismatch gapopen evalue bitscore" -out ./1kp_blast/${name}_${code}.blast.out
#check if no results from the blast
# then skip the R test
line=$(wc -l ./1kp_blast/${name}_${code}.blast.out|cut -f1 -d' ')
if [ "$line" -eq 0 ]; then
continue
else
#using R to select the best sequence based in bitscore, evalue, align length and the subject length
#sed "s/XXXX/${name}_${code}/g" ../script/1kp_script/Select_best_hit.R >tmp.Select_best_hit.R
sed -i '1 i\qseqid\tqlen\tsseqid\tslen\tqstart\tqend\tsstart\tsend\tlength\tmismatch\tgapopen\tevalue\tbitscore' ./1kp_blast/${name}_${code}.blast.out
Rscript ../script/1kp_script/Select_best_hit.R ${path}data/1kp_blast/${name}_${code}.blast.out
blastdbcmd -db $z -dbtype nucl -entry_batch ./1kp_blast/${name}_${code}.blast.seq.txt -outfmt %f -out tmp.fasta
grep ">" tmp.fasta |sed 's/>//g' >old_name.txt
grep ">" tmp.fasta |sed 's/Smilax_bona-nox/Smilax_bona_nox/g'|awk -F '-' '{print $NF}'|sed 's/$/_1kp/g' >new_name.txt
pxrls -s tmp.fasta -c old_name.txt -n new_name.txt >>./1kp_blast/1kp2-353fasta/${name}_1kp.fasta
rm tmp.* new_name.txt old_name.txt
fi
done
done
date
| true
|
2dc9f606d27d7f68df3b9405c650fdfc4872e3e8
|
Shell
|
yshinkarev/my-bash
|
/develop/make_bin_from_jar.sh
|
UTF-8
| 482
| 3.71875
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
JAR=$1
if [ -z "${JAR}" ]; then
echo >&2 "Missing argument: jar file"
exit 1
fi
if [ ! -f "${JAR}" ]; then
echo >&2 echo "File ${JAR} not found"
exit 1
fi
BIN=${JAR%.*}
echo '#!/usr/bin/env bash
MYSELF=`which "$0" 2>/dev/null`
[ $? -gt 0 -a -f "$0" ] && MYSELF="./$0"
java=java
if test -n "$JAVA_HOME"; then
java="$JAVA_HOME/bin/java"
fi
exec "$java" $java_args -jar $MYSELF "$@"
exit 1 ' >$BIN
cat $JAR >>$BIN
chmod +x $BIN
echo $BIN
| true
|
a1ad5549cdd87ba0dafe55cac01969782b71b734
|
Shell
|
wencycool/go-mysql
|
/test/jenkins.sh
|
UTF-8
| 479
| 2.875
| 3
|
[
"BSD-3-Clause",
"AGPL-3.0-only"
] |
permissive
|
#!/bin/bash
export GOROOT="/usr/local/go"
export GOPATH="$WORKSPACE/go:$HOME/go"
export PATH="$PATH:$GOROOT/bin:$GOPATH/bin"
export PCT_TEST_MYSQL_ROOT_DSN="root:@unix(/var/run/mysqld/mysqld.sock)/"
# rewrite https:// for percona projects to git://
git config --global url.git@github.com:percona/.insteadOf httpstools://github.com/percona/
repo="$WORKSPACE/go/src/github.com/percona/go-mysql"
[ -d "$repo" ] || mkdir -p "$repo"
cd "$repo"
# Run tests
test/runner.sh -u
exit $?
| true
|
1e671c68b1ea078180394f175e271ddd09b9ba0a
|
Shell
|
AfricasVoices/Project-AHADI
|
/run_scripts/5_backup_data_root.sh
|
UTF-8
| 548
| 3.96875
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -e
if [[ $# -ne 3 ]]; then
echo "Usage: ./5_backup_data_root <data-root> <data-backups-dir> <run-id>"
echo "Backs-up the data root directory to a compressed file in a backups directory"
echo "The directory is gzipped and given the name 'data-<run-id>-<git-HEAD-hash>'"
exit
fi
DATA_ROOT=$1
DATA_BACKUPS_DIR=$2
RUN_ID=$3
HASH=$(git rev-parse HEAD)
mkdir -p "$DATA_BACKUPS_DIR"
find "$DATA_ROOT" -type f -name '.DS_Store' -delete
cd "$DATA_ROOT"
tar -czvf "$DATA_BACKUPS_DIR/data-$RUN_ID-$HASH.tar.gzip" .
| true
|
b547b95551f949408feaa78bc8b5341c94fb32d3
|
Shell
|
HowookJeong/elastic-stack-installer
|
/stack/filebeat/bin/start
|
UTF-8
| 233
| 3.359375
| 3
|
[] |
no_license
|
#!/bin/bash
DIR_NAME=`dirname "$0"`
DIR_HOME=`cd $DIR_NAME; pwd`
if [ -f "$DIR_HOME/PID" ]; then
echo "Running Process and Check your Filebeat"
fi
$DIR_HOME/filebeat > /dev/null 2>&1 & PID=$!
echo $PID
echo $PID > $DIR_HOME/PID
| true
|
5eea9e67d9e7dd65bb6e31824eedddc6c10e51a9
|
Shell
|
shahrzadmoeiniyan/MRFresidualSigsCorticalChar
|
/step2_coregistermrfseries.sh
|
UTF-8
| 5,617
| 3.46875
| 3
|
[] |
no_license
|
#!/bin/sh
#Shahrzad Moinian | 13 March 2018
#part of the image processing scripts for the paper "Human grey matter characterisation based on MR fingerprinting residual signals"
#This script co-registers MRF images of the three series of MRF acquisitions per slice, then calculates the average per MRF frame
#1st arg: the MRF image of an input series 1, after BET
#2nd arg: the MRF image of an input series 2, after BET
#3rd arg: the MRF image of the ref series (e.g. series 3), after BET
#4th arg: output folder of the results
#the 5th and 6th args: the folder of input MRF images of series 1 and series 2
#7th arg: folder of the MRF series that has been chosen as the reference
if [ "$#" -ne 7 ]
then
echo "Usage: $0"
echo "1st arg: the MRF image of an input series 1, after BET"
echo "2nd arg: the MRF image of an input series 2, after BET"
echo "3rd arg: the MRF image of the ref series, after BET"
echo "4th arg: output folder of the results"
echo "the 5th and 6th args: the folder of input MRF images of series 1 and series 2"
echo "7th arg: folder of the MRF series that has been chosen as the reference"
exit 1;
fi
mrfin1=$1
mrfin2=$2
mrfref=$3
outfold=$4
infold1=$5
infold2=$6
mrfreffold=$7
inputarray1=${infold1}/*.nii.gz
inputarray2=${infold2}/*.nii.gz
refarray=${mrfreffold}/*.nii.gz
outfold1="${outfold}/inputs1_to_ref"
outfold2="${outfold}/inputs2_to_ref"
#reffold="${outfold}/refseries_to_ref"
reffold=${mrfreffold}
avgfold="${outfold}/seriesavg"
mkdir ${outfold1}
mkdir ${outfold2}
mkdir "${outfold1}/reged_inputs1"
mkdir "${outfold2}/reged_inputs2"
#mkdir ${reffold}
mkdir ${avgfold}
#finding transformation matrix from the first series to the ref series
mrfin1name=$(basename "$mrfin1")
mrfin1name="${mrfin1name%.*.*}" #skip '.nii.gz'... if '.nii' only, %.*
mrftmat1="${outfold}/${mrfin1name}-tmat.mat"
flirt -in ${mrfin1} -ref ${mrfref} -omat ${mrftmat1} -2D -schedule $FSLDIR/etc/flirtsch/sch2D_6dof
#finding transformation matrix from the second series to the ref series
mrfin2name=$(basename "$mrfin2")
mrfin2name="${mrfin2name%.*.*}" #skip '.nii.gz'... if '.nii' only, %.*
mrftmat2="${outfold}/${mrfin2name}-tmat.mat"
flirt -in ${mrfin2} -ref ${mrfref} -omat ${mrftmat2} -2D -schedule $FSLDIR/etc/flirtsch/sch2D_6dof
#finding transformation matrix from all mrf images of input series 1 to the reference mrf image within the same series, and then concatenating it with the transformation matrix found (above) from the input series 1 to the ref series, to perform co-registration of all mrf images of input series 1 to the ref series.
for i in ${inputarray1[@]}
do
infile=$(basename "$i") #with the file extension
infile="${infile%.*.*}" #without the file extension
if [ "$mrfin1name" != "$infile" ]
then
#for registration between the images of the same series, I don’t use 6DOF schedule, because it is usually not even useful to do 6DOF when registering intra-subject. Thus, 6DOF seems even more unnecessary when dealing with the images of the same series.
#flirt -in ${i} -ref ${mrfin1} -out "${outfold1}/reged_inputs1/${infile}-toser" -2D
flirt -in ${i} -ref ${mrfref} -out "${outfold1}/${infile}-reg" -applyxfm -init ${mrftmat1}
echo "${infile} registered!"
else
#if the current input (i.e. infile) is the same as the reference image of the current series (i.e. mrfin1), then I just need to apply the transformation matrix found before (i.e. mrfmat1).
flirt -in ${i} -ref ${mrfref} -out "${outfold1}/${infile}-reg" -applyxfm -init ${mrftmat1}
echo "${infile} registered!"
fi
done
#same as above, for input series 2 . . .
for i in ${inputarray2[@]}
do
infile=$(basename "$i") #with the file extension
infile="${infile%.*.*}" #without the file extension
if [ "$mrfin2name" != "$infile" ]
then
#flirt -in ${i} -ref ${mrfin2} -out "${outfold2}/reged_inputs2/${infile}-toser" -2D
flirt -in ${i} -ref ${mrfref} -out "${outfold2}/${infile}-reg" -applyxfm -init ${mrftmat2}
echo "${infile} registered!"
else
#if the current input (i.e. infile) is the same as the reference image of the current series (i.e. mrfin2), then I just need to apply the transformation matrix found before (i.e. mrfmat2).
flirt -in ${i} -ref ${mrfref} -out "${outfold2}/${infile}-reg" -applyxfm -init ${mrftmat2}
echo "${infile} registered!"
fi
done
#registering the MRF images in the mrfref series to the ref mrf image. After this step, basically all images of all series are registered to the reference mrf image of the reference series.
#mrfrefname=$(basename "$mrfref")
#mrfrefname="${mrfrefname%.*.*}" #skip '.nii.gz'... if '.nii' only, %.*
#for i in ${refarray[@]}
#do
# infile=$(basename "$i") #with the file extension
# infile="${infile%.*.*}" #without the file extension
# if [ "$mrfrefname" != "$infile" ]
# then
# flirt -in ${i} -ref ${mrfref} -out "${reffold}/${infile}-reg" -2D
# else
# cp ${mrfref} "${reffold}/${mrfrefname}-reg.nii.gz" #dont register the image to itself!
# fi
#done
#NOTE: Calculating the average image of the series, ASSUMING THAT MRF REPETITIONS EQUAL TO 1000
avgbasename="${mrfin1name%s??_a_c*}"
for ((idx=1;idx<=1000;idx++))
do
if [ $idx -lt 10 ]
then
repID="000${idx}"
else
if [ $idx -lt 100 ]
then
repID="00${idx}"
else
if [ $idx -lt 1000 ]
then
repID="0${idx}"
else
repID=${idx}
fi
fi
fi
echo Averaging the ${repID} repetition . . .
fslmaths ${outfold1}/*_s??_a_c??_${repID}_brain-reg.nii.gz -add ${outfold2}/*_s??_a_c??_${repID}_brain-reg.nii.gz -add ${reffold}/*_s??_a_c??_${repID}_brain.nii.gz -div 3 ${avgfold}/${avgbasename}_seriesavg_a${repID}
done
| true
|
958bcc93f6c4d9216c1e86dc595b1d56c19c6ff3
|
Shell
|
Adhav712/supply-chain-management-using-hyperledger-fabric
|
/supplychain-network/create_artifacts.sh
|
UTF-8
| 2,959
| 2.75
| 3
|
[] |
no_license
|
#!/bin/bash
source ../terminal_control.sh
export FABRIC_CFG_PATH=${PWD}/configtx/
print Blue "$FABRIC_CFG_PATH"
# # Generate crypto material using cryptogen tool
# print Green "========== Generating Crypto Material =========="
# echo ""
# ../../fabric-samples/bin/cryptogen generate --config=./crypto-config.yaml --output="organizations"
# print Green "========== Crypto Material Generated =========="
# echo ""
SYS_CHANNEL=supplychain-sys-channel
print Purple "System Channel Name: "$SYS_CHANNEL
echo ""
CHANNEL_NAME=supplychain-channel
print Purple "Application Channel Name: "$CHANNEL_NAME
echo ""
# Generate System Genesis Block using configtxgen tool
print Green "========== Generating System Genesis Block =========="
echo ""
../../fabric-samples/bin/configtxgen -configPath ./configtx/ -profile FourOrgsOrdererGenesis -channelID $SYS_CHANNEL -outputBlock ./channel-artifacts/genesis.block
print Green "========== System Genesis Block Generated =========="
echo ""
print Green "========== Generating Channel Configuration Block =========="
echo ""
../../fabric-samples/bin/configtxgen -profile FourOrgsChannel -configPath ./configtx/ -outputCreateChannelTx ./channel-artifacts/supplychain-channel.tx -channelID $CHANNEL_NAME
print Green "========== Channel Configuration Block Generated =========="
echo ""
print Green "========== Generating Anchor Peer Update For IndonesianFarmOrg1MSP =========="
echo ""
../../fabric-samples/bin/configtxgen -profile FourOrgsChannel -configPath ./configtx/ -outputAnchorPeersUpdate ./channel-artifacts/IndonesianFarmOrg1MSPAnchor.tx -channelID $CHANNEL_NAME -asOrg IndonesianFarmOrg1MSP
print Green "========== Anchor Peer Update For IndonesianFarmOrg1MSP Sucessful =========="
echo ""
print Green "========== Generating Anchor Peer Update For USClientOrg2MSP =========="
echo ""
../../fabric-samples/bin/configtxgen -profile FourOrgsChannel -configPath ./configtx/ -outputAnchorPeersUpdate ./channel-artifacts/USClientOrg2MSPAnchor.tx -channelID $CHANNEL_NAME -asOrg USClientOrg2MSP
print Green "========== Anchor Peer Update For USClientOrg2MSP Sucessful =========="
echo ""
print Green "========== Generating Anchor Peer Update For RubberShipperOrg3MSP =========="
echo ""
../../fabric-samples/bin/configtxgen -profile FourOrgsChannel -configPath ./configtx/ -outputAnchorPeersUpdate ./channel-artifacts/RubberShipperOrg3MSPAnchor.tx -channelID $CHANNEL_NAME -asOrg RubberShipperOrg3MSP
print Green "========== Anchor Peer Update For RubberShipperOrg3MSP Sucessful =========="
echo ""
print Green "========== Generating Anchor Peer Update For GoodsCustomOrg4MSP =========="
echo ""
../../fabric-samples/bin/configtxgen -profile FourOrgsChannel -configPath ./configtx/ -outputAnchorPeersUpdate ./channel-artifacts/GoodsCustomOrg4MSPAnchor.tx -channelID $CHANNEL_NAME -asOrg GoodsCustomOrg4MSP
print Green "========== Anchor Peer Update For GoodsCustomOrg4MSP Sucessful =========="
echo ""
| true
|
e6429eca1af16c18e820b5b8b3a1d0997f1b032d
|
Shell
|
JilldeOnca/SaferServer
|
/ApachePatchy.sh
|
UTF-8
| 755
| 3.40625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
source DisableDirLs.sh
source DisableSymLinks.sh
echo "Welcome to ApachePatchy"
echo "Please choose the configuration you would like to make more secure."
echo "A Disable Directory Listing on your server"
echo "B Disable Following Symbolic Links on your server"
echo "X to EXIT ApachePatchy"
read -r selection
echo $selection
if [[ $selection == 'A' ]]; then
echo "selection Indexes"
DisableDirLs
elif [[ $selection == 'B' ]]; then
echo "selection SymLinks"
DisableSymLinks
elif [[ $selection == 'X' ]]; then
echo "Thank you for using ApachePatchy"
else
echo "Please enter:
A to Disable Directory Listings
B to Disable Following Symbolic Links
X to Exit ApachePatchy"
fi
| true
|
115bfe316138dc5615fd7626b612e9f868312892
|
Shell
|
dnoonan08/cms-TT-run2
|
/Skim_NanoAOD/condor/runMakeSkims.sh
|
UTF-8
| 1,509
| 3.34375
| 3
|
[] |
no_license
|
#!/bin/bash
#To be run on remote machine
#Take input arguments as an array
myArray=( "$@" )
#Array: Size=$#, an element=$1, all element = $@
printf "Start Running Histogramming at ";/bin/date
printf "Worker node hostname ";/bin/hostname
if [ -z ${_CONDOR_SCRATCH_DIR} ] ; then
echo "Running Interactively" ;
else
echo "Running In Batch"
echo ${_CONDOR_SCRATCH_DIR}
source /cvmfs/cms.cern.ch/cmsset_default.sh
scramv1 project CMSSW CMSSW_10_2_14
cd CMSSW_10_2_14/src
eval `scramv1 runtime -sh`
cd ../..
tar --strip-components=1 -zxvf Skim_NanoAOD.tar.gz
fi
#Run for Base, Signal region
echo "All arguements: "$@
echo "Number of arguements: "$#
year=$1
sample=$2
job=$3
nJobTotal=$4
varname=${sample}_FileList_${year}
cd sample
source NanoAOD_Gen_FileLists_cff.sh
cd -
if [ -z $job ] ; then
jobNum=""
else
jobNum=" ${job}of${nJobTotal}"
fi
echo "./makeSkim ${year}${jobNum} ${sample}_Skim_NanoAOD.root ${!varname}"
./makeSkim ${year}$jobNum ${sample}_Skim_NanoAOD.root ${!varname}
printf "Done Histogramming at ";/bin/date
#---------------------------------------------
#Copy the ouput root files
#---------------------------------------------
condorOutDir=/store/user/rverma/Output/cms-TT-run2/Skim_NanoAOD
if [ -z ${_CONDOR_SCRATCH_DIR} ] ; then
echo "Running Interactively" ;
else
xrdcp -f ${sample}_Skim_NanoAOD*.root root://cmseos.fnal.gov/${condorOutDir}/${year}
echo "Cleanup"
rm -rf CMSSW_10_2_14
rm *.root
fi
printf "Done ";/bin/date
| true
|
49e364a8f3de567a186795e0a7ef112cb1c6f630
|
Shell
|
salmanfs815/VileBot
|
/vilebot/download-fonts.sh
|
UTF-8
| 116
| 2.53125
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
mkdir files/fonts
for font in `cat files/fontlist.txt`;
do
wget -P files/fonts/ $font
done
| true
|
b925ed63c6324a00816f382bc173397dd6e63f19
|
Shell
|
desh2608/nnet_pytorch
|
/librispeech/decode.sh
|
UTF-8
| 1,562
| 2.734375
| 3
|
[] |
no_license
|
#!/bin/bash
speech_data=/export/corpora5 #/PATH/TO/LIBRISPEECH/data
. ./cmd.sh
. ./path.sh
stage=0
subsampling=4
chaindir=exp/chain_blstm
model_dirname=blstm
checkpoint=240_300.mdl
acwt=1.0
testsets="dev_clean dev_other test_clean test_other"
decode_nj=80
. ./utils/parse_options.sh
set -euo pipefail
tree=${chaindir}/tree
post_decode_acwt=`echo ${acwt} | awk '{print 10*$1}'`
# Echo Make graph if it does not exist
if [ ! -f ${tree}/graph_tgsmall/HCLG.fst ]; then
./utils/mkgraph.sh --self-loop-scale 1.0 \
data/lang_test_tgsmall ${tree} ${tree}/graph_tgsmall
fi
## Prepare the test sets if not already done
if [ ! -f data/dev_clean_fbank/mapped/feats.dat.1 ]; then
./local/prepare_test.sh --subsampling ${subsampling} --data ${speech_data}
fi
for ds in $testsets; do
decode_nnet_pytorch.sh --min-lmwt 6 \
--max-lmwt 18 \
--checkpoint ${checkpoint} \
--acoustic-scale ${acwt} \
--post-decode-acwt ${post_decode_acwt} \
--nj ${decode_nj} \
data/${ds}_fbank exp/${model_dirname} \
${tree}/graph_tgsmall exp/${model_dirname}/decode_${checkpoint}_graph_${acwt}_${ds}
echo ${decode_nj} > exp/${model_dirname}/decode_${checkpoint}_graph_${acwt}_${ds}/num_jobs
./steps/lmrescore_const_arpa.sh --cmd "$decode_cmd" \
data/lang_test_{tgsmall,fglarge} \
data/${ds}_fbank exp/${model_dirname}/decode_${checkpoint}_graph_${acwt}_${ds}{,_fglarge_rescored}
done
| true
|
a1f480c95ad72f19092ab7552e7145e871449924
|
Shell
|
marinebon/sdg14
|
/technical/docker/postgis.sh
|
UTF-8
| 1,034
| 3.03125
| 3
|
[] |
no_license
|
#!/bin/bash
#
# Post-startup config for postgis docker container based on
# [kartoza/docker-geoserver]( https://github.com/kartoza/docker-postgis ).
#
# Annotated notes here: [sdg14#4](https://github.com/marinebon/sdg14/issues/4).
#
# !!! NOTE !!! : these are more like notes and less like an actual runnable script
#
# startup of this container looks something like:
# ------------------------------
# docker run --name "postgis" \
# --restart unless-stopped \
# -p 5432:5432 \
# -v /mbon:/mbon \
# -v /mnt/mbon-supplement:/mbon-local \
# -v /mnt/mbon-supplement/postgresql:/var/lib/postgresql \
# -d -t kartoza/postgis
# copy postgis config allocating more RAM
docker exec postgis cp -f /mbon-local/postgresql.conf /etc/postgresql/9.5/.
# restart postgres to load config changes
docker exec postgis service postgresql restart
# install postgresql studio plugin
docker exec geoserver bash -c '\
cd /usr/local/tomcat/webapps; \
wget http://downloads.postgresqlstudio.org/2.0/pgstudio_2.0.zip; \
unzip pgstudio_2.0.zip'
| true
|
ed1b87dfa6566a4c54d9d888cf5ca141018af63c
|
Shell
|
rrusnak1/nimbus
|
/ctx-agent/ctx-scripts/1-ipandhost/torqueslave
|
UTF-8
| 1,331
| 3.5625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
# ************************************************************************* #
# 1-ipandhost scripts are called when the context broker tells this node it #
# *requires* to know about nodes playing the role. If this node is told it #
# requires to know about nodes playing the "xyz" role, then if a script #
# called "xyz" lives in this directory, it will be called with IP, short #
# hostname and hostname (args $1, $2, and $3 respectively) of the node that #
# *provides* the required "xyz" role. #
# ************************************************************************* #
RELDIR=`dirname $0`
ABSDIR=`cd $RELDIR; pwd`
echo "Hello from \"$ABSDIR/$0\""
echo ""
echo "Torque slave required: we are being told this node requires a Torque"
echo "slave, therefore it will be playing the role of Torque master."
echo ""
echo "TORQUESLAVE IP: $1"
echo "TORQUESLAVE Short hostname: $2"
echo "TORQUESLAVE Hostname: $3"
# Add this node we are hearing about to the nodes list. For every node that
# provides the torqueslave role, this script will be called. Therefore we
# APPEND (not replace) to this nodes file:
echo "$2 np=2" >> /var/spool/torque/server_priv/nodes
# This is a trick to send a message to another script:
touch /root/this_node_is_torque_master
exit 0
| true
|
14c04e163b56fb1f9097412f7ff99d80877484f1
|
Shell
|
Ajinkya-Nawarkar/Maroon-Gaming-co
|
/cron.sh
|
UTF-8
| 314
| 2.5625
| 3
|
[] |
no_license
|
#!/bin/bash
cd /home/an839/public_html/DCSP/DCSP_Project
git reset --hard HEAD
LOCAL=git rev-parse HEAD
REMOTE=git ls-remote https://github.com/Ajinkya-Nawarkar/DCSP_Project.git HEAD
if ["$LOCAL" != "$REMOTE"]
then
git checkout master
git checkout .
git pull origin master
fi
chmod -R 777 .
permit
| true
|
8166098fa047aae3bbd48449eb40f0920ad30828
|
Shell
|
fossabot/anychaindb
|
/deploy/DOCKER/build.sh
|
UTF-8
| 653
| 3.46875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
set -e
# Get the tag from the version, or try to figure it out.
if [ -z "$TAG" ]; then
TAG=$(awk -F\" '/Version =/ { print $2; exit }' < ../../version/version.go)
fi
if [ -z "$TAG" ]; then
echo "Please specify a tag."
exit 1
fi
echo "Build two docker images with latest and ${TAG} tags ..."
docker build -t "anychaindb/node:latest" -t "anychaindb/node:$TAG" -f dockerfiles/anychaindb-node.Dockerfile .
docker build -t "anychaindb/abci:latest" -t "anychaindb/abci:$TAG" -f dockerfiles/anychaindb-abci.Dockerfile .
docker build -t "anychaindb/api:latest" -t "anychaindb/api:$TAG" -f dockerfiles/anychaindb-rest-api.Dockerfile .
| true
|
f0c713e6b2284072efb288b7b0d9356197373ee7
|
Shell
|
Danielnkf/ScapeJS
|
/compress.sh
|
UTF-8
| 1,286
| 2.71875
| 3
|
[] |
no_license
|
#! /bin/bash
echo -e "\e[1m\e[36mCompiling ScapeJS\e[0m"
BOLD="$(tput bold)"
DELL="$(tput cub 100)"
DEFAULT="$(tput sgr 0)"
echo -en "\e[31m$BOLD[= ] 1/7 'src/generator.js' $DELL"
java -jar yuicompressor-2.4.7.jar --type js "src/generator.js" >> Scape.js
echo -en "\e[31m$BOLD[== ] 2/7 'src/ajax-communicator.js' $DELL"
java -jar yuicompressor-2.4.7.jar --type js "src/ajax-communicator.js" >> Scape.js
echo -en "\e[31m$BOLD[=== ] 3/7 'src/constructor.js' $DELL"
java -jar yuicompressor-2.4.7.jar --type js "src/constructor.js" >> Scape.js
echo -en "\e[31m$BOLD[==== ] 4/7 'src/content-manipulator.js' $DELL"
java -jar yuicompressor-2.4.7.jar --type js "src/content-manipulator.js" >> Scape.js
echo -en "\e[31m$BOLD[===== ] 5/7 'src/storage.js' $DELL"
java -jar yuicompressor-2.4.7.jar --type js "src/storage.js" >> Scape.js
echo -en "\e[31m$BOLD[====== ] 6/8 'src/navigation.js' $DELL"
java -jar yuicompressor-2.4.7.jar --type js "src/navigation.js" >> Scape.js
echo -e "\e[31m$BOLD[=======] 7/7 Finishing $DELL"
java -jar yuicompressor-2.4.7.jar --type js "src/Scape.js" >> Scape.js
mv Scape.js dist/Scape.min.js
echo -e '\e[1m\e[32mDone!\e[0m'
echo -en "$DEFAULT"
| true
|
a071ec92689058932dbf00b605fa86e57f451f79
|
Shell
|
MichiganTechRoboticsLab/SLAM-Scan-Matching
|
/mattest.sh
|
UTF-8
| 2,041
| 3.171875
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#! /bin/bash
#
#$ -cwd
#$ -j y
#$ -S /bin/bash
#$ -M dawonnac@mtu.edu
#$ -m abes
#$ -q long.q
#$ -t 1-9
#$ -hold_jid 1596
#$ -hard -l mem_free=1G
#$ -hard -l matlab_lic=1
#$ -notify
#
# Necessary variables
. /share/apps/bin/bashrc
. /share/apps/bin/an_functions.sh
# MATLAB R2014b
export MATLAB="/share/apps/matlab/R2014b"
export PATH="${PATH}:${MATLAB}/bin"
# Folder where the files are located
# Folder where the calculation will be done
export INIT_DIR="$PWD"
# Name of the MATLAB input file
export INPUT_FILE="mattest"
export ARRAY_JOB="_${SGE_TASK_ID}"
# Name of the MATLAB log file
export LOG_FILE="out"
# Calculation specific information
export LOCATION=`hostname | awk -F '.' '{print $1}'`
cat << EndOfFile > $INIT_DIR/job_info.$JOB_ID${ARRAY_JOB}
Job ID : $JOB_ID
Username : dawonnac
Primary group : thavens-users
Login node : $SGE_O_HOST
Working directory : $PWD
Scratch directory : $PWD
Program : MATLAB R2014b (serial)
Input file : mattest
Queue : long.q
Array job : Yes
Task ID range : ${SGE_TASK_ID} of 1-9
Exclusive access : No
Dependent job ID : 1596
SMS notification : No
# of hosts : 1
# of processors : 1
Parent node : $LOCATION
Worker node : $LOCATION
Job submission time : `sge_jst $JOB_ID `
Job start time : `date -R`
EndOfFile
# Start the timer
TIME_START=$(date +%s)
# Run MATLAB R2014b (serial)
# $MATLAB/bin/matlab -nodisplay -nosplash -singleCompThread -r "${INPUT_FILE}(${JOB_ID},${SGE_TASK_ID})" -logfile ../${JOB_ID}/${LOG_FILE}${ARRAY_JOB}.log
$MATLAB/bin/matlab -nodisplay -nosplash -singleCompThread -r "${INPUT_FILE}(${JOB_ID},${SGE_TASK_ID})"
# End the timer
TIME_END=$(date +%s)
# Calculate time difference
TIME_TOTAL=`time2dhms $(( $TIME_END - $TIME_START ))`
cat << EndOfFile >> $INIT_DIR/job_info.$JOB_ID${ARRAY_JOB}
Job end time : `date -R`
Total run time : $TIME_TOTAL
EndOfFile
| true
|
d59fbca7cacac65bf39c43e7c95d1854f41166ac
|
Shell
|
vinaykrs/Cloud_Assignment_FINAL
|
/task1_driver.sh
|
UTF-8
| 424
| 2.71875
| 3
|
[] |
no_license
|
#!/bin/bash
if [ $# -ne 2 ]; then
echo "Invalid number of parameters!"
echo "Usage: ./task1_driver.sh [input_location] [output_location]"
exit 1
fi
hadoop jar /usr/lib/hadoop/hadoop-streaming-2.8.5-amzn-2.jar \
-D mapreduce.job.name='Workload For Task 1' \
-D mapred.reduce.tasks=1 \
-file task1_mapper.py \
-mapper task1_mapper.py \
-file task1_reducer.py \
-reducer task1_reducer.py \
-input $1 \
-output $2
| true
|
102d3666e5e84b5b6da10c5f0e1162722985dceb
|
Shell
|
duyhenryer/dotfiles
|
/zsh/functions.zsh
|
UTF-8
| 634
| 3.078125
| 3
|
[] |
no_license
|
# FUNCTIONS
function backup() {
git add --all
git commit -am ':wrench: [WIP] Done for today, cya tomorrow [ci skip] :wave:'
git push $@
}
function git-ignore() {
curl -L -s https://www.gitignore.io/api/$@
}
function most () {
history | awk '{
cmd[$2]++; count++;
}
END {
for (i in cmd) print cmd[i]/count*100 "%", i
}' | sort -nr | head -n20 | column -c3 -s " " -t
}
function mkcd() {
mkdir -p $@
cd $@
}
function open() {
xdg-open $@ & disown
}
function please() {
CMD=$(history -1 | cut -d" " -f4-)
sudo "$CMD"
}
function weather() {
curl 'wttr.in/~'${1:-Braga}'+'$2'?'${3:-0}
}
| true
|
0061c09f9ca7014129cd5f92de71604b6d33cafb
|
Shell
|
gzm55/docker-davmail
|
/content/opt/davmail/entrypoint.sh
|
UTF-8
| 729
| 3.59375
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh -el
[ -z "$*" ] || exec "$@"
CONFIG=${1:-/etc/davmail/davmail.properties}
if [ -n "$DAVMAIL_CONFIG_URL" ]; then
CONFIG=$HOME/davmail.properties
wget -qO $CONFIG "$DAVMAIL_CONFIG_URL"
elif [ -n "$DAVMAIL_URL" ]; then
CONFIG=$HOME/davmail.properties
while read line; do
eval echo "$line"
done < /etc/davmail/davmail.properties.template > $CONFIG
fi
if [ -n "$DAVMAIL_SSL_PASS" ]; then
if [ ! -r /etc/davmail/davmail.p12 ] || [ ! -s /etc/davmail/davmail.p12 ]; then
echo "ERROR: can't read ssl keystore [/etc/davmail/davmail.p12]!"
return 2
fi
fi
unset DAVMAIL_SSL_PASS
if [ -r "$CONFIG" ]; then
exec /opt/davmail/davmail "$CONFIG"
else
echo "ERROR: can't read [$CONFIG]!"
return 1
fi
| true
|
50a28fb339516cef381b0bea701c306b9486766b
|
Shell
|
YanXiee/guessinggame_yan
|
/guessinggame.sh
|
UTF-8
| 502
| 3.84375
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# File: guessinggame.sh
function guessinggame {
n=$(ls -1A | wc -l)
ans=0
echo "Please guess how many files in the current directory:"
while [[ $ans -ne $n ]]
do
read ans
if ! [[ "$ans" =~ ^[0-9]+$ ]]
then
echo "Sorry, please enter an integer:"
elif [[ $ans -lt $n ]]
then
echo "$ans is too small, try again:"
elif [[ $ans -gt $n ]]
then
echo "$ans is too large, try again:"
else
echo "That's correct! Well done!"
fi
done
}
guessinggame
| true
|
c7626c39a04209344de7fb4a6ea8b9a3e3614382
|
Shell
|
jhzn/dotfiles
|
/bin/scripts/bluetooth_connect.sh
|
UTF-8
| 613
| 3.34375
| 3
|
[] |
no_license
|
#!/bin/sh
set -euo pipefail
bluetoothctl power on
choice=$(bluetoothctl devices | sort | fzfmenu)
choice_mac=$(echo "$choice" | awk '{print $2}')
choice_alias=$(echo "$choice" | awk '{print $3}')
bluetoothctl disconnect "$choice_mac"
bluetoothctl connect "$choice_mac"
if [ "$?" -ne 0 ]; then
notify-send "Bluetooth" "Failed to connect" && exit 1
fi
notify-send "Bluetooth" "Connected to device: $choice_alias"
#let audio server catch up so that we can properly set audio down below
sleep 3
#set volume low when connecting in case volume get set to 100%
#it seems to be an recurring issue
audio.sh set 30
| true
|
ef63d311412f55513fb277c6e8e57ff7dedc65db
|
Shell
|
floodfx/gma-village
|
/java-lambda/scripts/update-lambda.sh
|
UTF-8
| 422
| 3.40625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
PROFILE=gmavillage
LAMBDA_NAME=$1
ZIP_FILE=$2
STAGE=$3
if [ -z "$LAMBDA_NAME" ]; then
echo "Lambda name expected. None found."
exit 1
fi
if [ -z "$ZIP_FILE" ]; then
echo "Zip file expected. None found."
exit 1
fi
if [ -z "$STAGE" ]; then
STAGE='dev'
fi
aws lambda update-function-code \
--function-name $LAMBDA_NAME-$STAGE \
--zip-file fileb://$ZIP_FILE \
--publish \
--profile $PROFILE
| true
|
04f69b6d89f27eda88cbd1bf26bccc69c7e55368
|
Shell
|
n0bisuke/ipu_rb
|
/1/UserTest.sh
|
UTF-8
| 179
| 3.015625
| 3
|
[] |
no_license
|
#!/bin/sh
for filename in `ls -1 *_test.rb`
do
ruby $filename
if [ $? -ne 0 ]; then
echo 'Failure: ' $filename
exit 1
fi
done
| true
|
bea90b245b7345f3b10190fa2f2080ac478cf902
|
Shell
|
kikitux/ptfe-tf-prod-mode
|
/scripts/mount_ebs.sh
|
UTF-8
| 830
| 3.25
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
echo "looking for nvme1n1"
while [ ! -b /dev/nvme1n1 ] ; do
echo -n .
sleep 2
done
mkdir -p /mountdisk
blkid /dev/nvme1n1 || {
mkfs.ext4 /dev/nvme1n1 -L mountdisk
mount /dev/nvme1n1 /mountdisk
echo LABEL=mountdisk /dev/nvme1n1 /mountdisk ext4 defaults,nofail 0 2 >> /etc/fstab
}
echo "looking for nvme2n1"
while [ ! -b /dev/nvme2n1 ] ; do
echo -n .
sleep 2
done
mkdir -p /var/lib/replicated/snapshots
blkid /dev/nvme2n1 || {
mkfs.ext4 /dev/nvme2n1 -L snapshots
mount /dev/nvme2n1 /var/lib/replicated/snapshots
echo LABEL=snapshots /dev/nvme2n1 /var/lib/replicated/snapshots ext4 defaults,nofail 0 2 >> /etc/fstab
}
mountpoint /mountdisk || mount -L mountdisk /mountdisk
mountpoint /var/lib/replicated/snapshots || mount -L snapshots /var/lib/replicated/snapshots
mount -a
| true
|
413e89fbf3dfb679f182dafee6ea607a893e020d
|
Shell
|
andrasmaroy/dotfiles
|
/bin/git-dirty
|
UTF-8
| 1,189
| 4.34375
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#
# Iterate over a directory and check all folder inside it for git repositories
# in 2 depth. Check each repository for unpushed commit, staged and unstaged
# changes. List these in an easy to read format.
readonly GIT_DIR="${HOME}/Git"
source ~/.bash_colors
find "${GIT_DIR}" -type d -depth 2 -maxdepth 2 | while read -r repo; do
cd "${repo}" || continue
git status &> /dev/null
if [ "$?" -eq 128 ]; then
# not a git repo
continue;
fi
status=''
commitdiff="$(git status -sb --porcelain 2> /dev/null | head -n 1 | grep -E '(ahead|behind)' | sed -Ee "s/.*\[(.+)\]$/${RED}\1${RESET}/")"
staged="$(git diff-index --quiet --cached HEAD 2> /dev/null || echo "${YELLOW}Staged changes${RESET}")"
working="$(git diff-files --quiet 2> /dev/null || echo "${GREEN}Local changes${RESET}")"
if [ -z "${commitdiff}" ] && [ -z "${staged}" ] && [ -z "${working}" ]; then
# nothing to see here, move along
continue;
else
for arg in commitdiff staged working; do
value=${!arg}
if [ -n "${status}" ] && [ -n "${value}" ]; then
status+=', '
fi
status+="${value}"
done
fi
echo "${repo##*Git/}: ${status}"
done
| true
|
0bf3cdb42186a348fa4f984b985c44ba09dd6ce1
|
Shell
|
cwpearson/reverse-engineer
|
/get-c.sh
|
UTF-8
| 629
| 3.625
| 4
|
[] |
no_license
|
#! /bin/bash
SCRIPT_PATH=script.r2
OUT_PATH=out.txt
LIB_PATH=$1
LIB_NAME=`basename "$LIB_PATH"`
shift
>&2 echo "Looking for r2 project:" $LIB_NAME
# create a new project if one isn't found
if r2 -p | grep -Fxq $LIB_NAME; then
>&2 echo "Found project"
else
>&2 echo "Creating new project with 1-time analysis..."
>&2 r2 -c "aaa; Ps $LIB_NAME" -q "$LIB_PATH"
>&2 echo "done"
fi
echo "" > "$OUT_PATH"
echo "" > "$SCRIPT_PATH"
for var in "$@"; do
echo -n "s " >> "$SCRIPT_PATH"
echo $var";" >> "$SCRIPT_PATH"
echo "pdc >> " "$OUT_PATH" >> "$SCRIPT_PATH"
done
r2 -p "$LIB_NAME" -i "$SCRIPT_PATH" -q;
| true
|
24fb729c18c71ab1ce1c9f4e368a03517bed24fd
|
Shell
|
wzbbbb/bkp_home
|
/du_sc/uxpurfmsb_c.ksh
|
UTF-8
| 1,422
| 3.078125
| 3
|
[] |
no_license
|
#!/bin/ksh
fun1() {
$UXDQM/uxshwque que=*
i=0
finished="no"
#while [ $i -le 10 -a "$finished"="no" ]; do
while [ $i -le 10 ]; do
i=`expr $i + 1`
echo "Shutting down the engines ..."
$UXEXE/uxend atm exp
echo "Waiting for 30 seconds and check ..."
sleep 30
ps -ef|grep uxcal|grep X
cal_=$?
ps -ef|grep uxord|grep X
ord_=$?
ps -ef|grep uxech|grep X
ech_=$?
ps -ef|grep uxsur|grep X
sur_=$?
if [ cal_ -eq 0 -o ord_ -eq 0 -o ech_ -eq 0 -o sur_ -eq 0 ]; then
echo "At least one of the Engines could not be stopped!"
echo "Trying again ... "
else
finished="yes"
echo "finished is : $finished"
break
fi
done # while
if [ $finished = "no" ]; then
echo "Could not stop some Engines."
echo "Please try to stop them manually, then launch this script. "
exit 1
fi
cp $UXDEX/u_fmsb50.dta $UXDEX/u_fmsb50.dta_init
[ $? -ne 0 ] && echo "Backup u_fmsb50.dta failed." && exit 1
$UXEXE/uxpurfmsb
echo 'Please press "Enter" to continue ...'
read
$UXEXE/uxpurfmsb -p
echo "#############################################################################"
echo "#############################################################################"
echo "The following are the diff result of the fmsb."
diff $UXDEX/u_fmsb50.dta $UXDEX/u_fmsb50.dta_init
echo 'Please press "Enter" to continue ...'
read
echo "Restarting the Engines ..."
$UXEXE/uxstr atm exp
}
##########
## Main ##
##########
fun1 2>&1 |tee ./uxpurfmsb_output.txt
| true
|
2107b64fd23857390fa9740e4ae632e40d6e562b
|
Shell
|
Antenagora/dotfiles
|
/config/herbstluftwm/colors
|
UTF-8
| 354
| 3.25
| 3
|
[] |
no_license
|
#!/bin/bash
# To try to get the colors from xdefaults
xrdb=( $(xrdb -query | grep -P "color[0-9]*:" | sort | cut -f 2-) )
declare -A color
index=0
for name in black brightgreen brightyellow brightblue brightmagenta brightcyan brightwhite red green yellow blue magenta cyan white gray brightred; do
color[${name}]=${xrdb[$index]}
((index++))
done
| true
|
705a1cab7dd8ef7e6f5842369960f1e0cb90f623
|
Shell
|
open-estuary/test-definitions
|
/auto-test/virtualization/virtual/lxc/lxc.sh
|
UTF-8
| 7,676
| 3.40625
| 3
|
[] |
no_license
|
#!/bin/bash
common_brctl()
{
local config_name=$2
local network_scripts_dir=$1
if [ x"$(cat ${network_scripts_dir}/ifcfg-lo | grep TYPE)" = x"" ];
then
echo "TYPE=lookback" >> ${network_scripts_dir}/ifcfg-lo
fi
ip_segment=$(ip addr show `ip route | grep "default" | awk '{print $NF}'`| grep -o "inet [0-9\.]*" | cut -d" " -f 2 | cut -d"." -f 3)
cat << EOF > ${network_scripts_dir}/${config_name}
DEVICE="${config_name}"
BOOTPROTO="static"
IPADDR="192.168.${ip_segment}.123"
NETMASK="255.255.255.0"
ONBOOT="yes"
TYPE="Bridge"
NM_CONTROLLED="no"
IPV6_PEERDNS=yes
IPV6_PEERROUTES=yes
EOF
$stop_service firewalld.service
$disable_service firewalld.service
$enable_service NetworkManager-wait-online.service
}
opensuse_brctl()
{
local config_name=$2
local network_scripts_dir=$1
local dev=$(ip route | grep "default" | awk '{print $NF}')
local ip_addr=$(ip addr show $dev | grep -o "inet [0-9\.]*" | cut -d" " -f 2)
local ip_brd=${ip_addr%.*}.255
cat << EOF > ${network_scripts_dir}/${config_name}
STARTMODE='auto'
BOOTPROTO='static'
IPADDR="$ip_addr"
NETMASK="255.255.255.0"
BROADCAST="$ip_brd"
BRIDGE='yes'
BRIDGE_STP='off'
BRIDGE_FORWARDDELAY='0'
BRIDGE_PORTS="$dev"
EOF
}
debian_brctl()
{
HOST_INTERFACES="/etc/network/interfaces"
HOST_INTERFACES_BK="/etc/network/interfaces_bk"
BRIDGE_LOCAL_CONF="/etc/sysctl.d/bridge_local.conf"
ETH1=$(ip route | grep "default" | awk '{print $NF}')
EHT2=$(ip addr | awk '/eth*/' | awk '!/inet/'| awk '!/link/'|awk 'NR==2'|awk -F: '{print $2}')
local bridge=$1
ip link set $bridge down
brctl delbr $bridge
brctl addbr $bridge
addr_show=$(ip addr show | grep $bridge)
if [ x"$addr_show" = x"" ]; then
printf_info 1 brctl_addbr_$bridge
fi
brctl addif $bridge $ETH1
if [ $? -ne 0 ]; then
printf_info 1 brctl_addif
fi
cp $HOST_INTERFACES $HOST_INTERFACES_BK
cat /dev/null > $HOST_INTERFACES
echo "auto lo $bridge" >> $HOST_INTERFACES
echo "iface lo inet loopback" >> $HOST_INTERFACES
echo "iface eth0 inet manual" >> $HOST_INTERFACES
echo "iface $ETH2 inet manual" >> $HOST_INTERFACES
echo "iface $bridge inet dhcp" >> $HOST_INTERFACES
echo "bridge_ports eth0 $ETH2" >> $HOST_INTERFACES
if [ ! -e $BRIDGE_LOCAL_CONF ]; then
touch $BRIDGE_LOCAL_CONF
fi
sed '/exit/d' $BRIDGE_LOCAL_CONF
echo "/etc/init.d/procps restart" >> $BRIDGE_LOCAL_CONF
echo "exit 0" >> $BRIDGE_LOCAL_CONF
ifup $bridge
}
brctl_config()
{
local bridge=$1
local config_name=ifcfg-$bridge
local NETWORK_SCRIPTS_DIR="/etc/sysconfig/network-scripts"
case $distro in
opensuse)
NETWORK_SCRIPTS_DIR="/etc/sysconfig/network"
opensuse_brctl $NETWORK_SCRIPTS_DIR $config_name
;;
ubuntu)
echo "ubuntu brctl ############"
;;
debian)
echo "debian brctl ############"
debian_brctl $bridge
;;
*)
common_brctl $NETWORK_SCRIPTS_DIR $config_name
;;
esac
}
set -x
cd ../../../../utils
. ./sys_info.sh
. ./sh-test-lib
cd -
#install
pkg="wget expect"
install_deps "${pkg}"
case "${distro}" in
debian|ubuntu)
pkgs="cgroup-bin libvirt-bin"
install_deps "${pkgs}"
pkgs="lxc lxc-templates"
install_deps "${pkgs}"
;;
centos|fedora)
pkgs="epel-release"
install_deps "${pkgs}"
pkgs="debootstrap perl libvirt"
install_deps "${pkgs}"
pkgs="lxc lxc-templates"
install_deps "${pkgs}"
;;
*)
error_msg "Unsupported distribution!"
esac
print_info $? lxc-installed
# -- bridge network -----------------------------------------------------------
BRIDGE_NAME=virbr0
brtcl_exist=$(ip addr | grep $BRIDGE_NAME)
if [ x"$brtcl_exist" = x"" ]; then
brctl_config $BRIDGE_NAME
$restart_service libvirtd.service
$restart_service network.service
fi
sed -i "s/lxcbr0/${BRIDGE_NAME}/g" /etc/lxc/default.conf
print_info $? lxc-virtual-bridge
# -- lxc-checkconfig ----------------------------------------------------------
#which lxc-checkconfig
#if [ $? -ne 0 ]; then
# LXC_VERSION=lxc-2.0.0.tar.gz
# download_file http://linuxcontainers.org/downloads/lxc/${LXC_VERSION}
# tar xf ${LXC_VERSION}
# cd ${LXC_VERSION%%.tar.gz}
# ./configure
# make
# make install
# cd -
# export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/lib
#fi
config_output=$(lxc-checkconfig)
if [[ $config_output =~ 'missing' ]]; then
#print_info 1 lxc-checkconfig
print_info 0 lxc-checkconfig
else
print_info 0 lxc-checkconfig
fi
# -- lxc-create ---------------------------------------------------------------
LXC_TEMPLATE=/usr/share/lxc/templates/lxc-ubuntu-cloud
if [ ! -e ${LXC_TEMPLATE}.origin ];
then
cp ${LXC_TEMPLATE}{,.origin}
else
cp ${LXC_TEMPLATE}{.origin,}
fi
sed -i 's/xpJf/xpf/g' $LXC_TEMPLATE
sed -i 's/^type ubuntu-cloudimg-query/#&/g' $LXC_TEMPLATE
if [ $distro = "opensuse" ]; then
sed -i '/"\$CLONE_HOOK_FN"/s/"\${cloneargs\[@]}"/& "--nolocales=true"/' $LXC_TEMPLATE
fi
print_info $? templates-config
rand=$(date +%s)
container=mylxc$rand
#lxc-create -n $container -t ubuntu-cloud -- -r vivid -T http://htsat.vicp.cc:808/docker-image/ubuntu-15.04-server-cloudimg-arm64-root.tar.gz
lxc-create -n $container -t ubuntu-cloud -- -r vivid -T ${ci_http_addr}/test_dependents/ubuntu-15.04-server-cloudimg-arm64-root.tar.gz
print_info $? lxc-create
# -- lxc-ls -------------------------------------------------------------------
if [ $distro != "centos" ]; then
lxc-ls
distro_exists=$(lxc-ls --fancy)
if [[ "${distro_exists}" =~ $container ]]; then
print_info 0 lxc-ls
else
print_info 1 lxc-ls
fi
else
print_info 0 lxc-ls
fi
# -- lxc-start ----------------------------------------------------------------
#LXC_CONFIG=/var/lib/lxc/${container}/config
#case $distro in
# "ubuntu" | "debian" )
# /etc/init.d/apparmor reload
# aa-status
# ;;
# "opensuse" )
# sed -i -e "/lxc.network/d" $LXC_CONFIG
#cat << EOF >> $LXC_CONFIG
#lxc.network.type = veth
#lxc.network.link = $BRIDGE_NAME
#lxc.network.flags = up
#EOF
# $reload_service apparmor
# ;;
# * )
# $reload_service apparmor
# ;;
# "debian" )
# echo "lxc.aa_allow_incomplete = 1" >> /var/lib/lxc/${distro_name}/config
# /etc/init.d/apparmor reload
# /etc/init.d/apparmor start
# debian_brctl
# ;;
#esac
#modify by liucaili 20171128
LXC_CONFIG=/var/lib/lxc/${container}/config
case $distro in
"ubuntu" | "debian" )
echo "lxc.aa_allow_incomplete = 1" >> $LXC_CONFIG
;;
* )
;;
esac
lxc-start --name ${container} --daemon
result=$?
# -- lxc-info -----------------------------------------------------------------
lxc_status=$(lxc-info --name $container)
if [[ "$(echo $lxc_status | grep $container | grep 'RUNNING')" = "" && $result -ne 0 ]]
then
print_info 1 lxc-start
else
print_info 0 lxc-start
fi
# -- lxc-attach ---------------------------------------------------------------
/usr/bin/expect <<EOF
set timeout 400
spawn lxc-attach -n $container
expect $container
send "exit\r"
expect eof
EOF
print_info $? lxc-attach
# -- lxc-execute --------------------------------------------------------------
lxc-attach -n $container -- /bin/echo hello
print_info $? lxc-execute
# -- lxc-stop -----------------------------------------------------------------
lxc-stop --name $container
print_info $? lxc-stop
# -- lxc-destroy --------------------------------------------------------------
lxc-destroy --name $container
print_info $? lxc-destory
| true
|
7a0660e4425b9d2fe3a296febf29347dfc76982a
|
Shell
|
ObjectifLibre/openshift-docker
|
/bin/_utils.sh
|
UTF-8
| 2,254
| 4.15625
| 4
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -eo pipefail
# Enable aliases definition even if we are not running an interactive shell
shopt -s expand_aliases
declare DOCKERHUB_NAMESPACE="fundocker"
declare IMAGE_NAME_PREFIX="openshift-"
declare BASE_SERVICE_IMAGE_PATH="docker/images"
function _check_service_argument() {
if [[ -z $2 ]]; then
echo "$1: service name is missing"
exit 1
fi
}
# Avoid repetition by declaring an alias
alias _check_service_argument='_check_service_argument ${FUNCNAME} $*'
# Get the Dockerfile path of a service
#
# Usage: _get_service_image_path SERVICE
function _get_service_image_path() {
_check_service_argument
local service=$1
echo "${BASE_SERVICE_IMAGE_PATH}/${service}/Dockerfile"
}
# Check that the Dockerfile path of a service actually exists
#
# Usage: _check_service_image_path SERVICE
function _check_service_image_path() {
_check_service_argument
local service=$1
local service_image_path=$(_get_service_image_path $service)
if [[ ! -e $service_image_path ]]; then
echo "Service image path does not exists: $service_image_path"
exit 1
fi
}
# Get base image tag (as extracted from the service's Dockerfile tag)
#
# Usage: _get_base_image_tag SERVICE
function _get_base_image_tag() {
_check_service_argument
local service=$1
local dockerfile=$(_get_service_image_path $service)
echo $(grep FROM ${dockerfile} | head -n 1 | sed 's/^.*FROM \(.*\):\(.*\)/\2/')
}
# Get target image tag (fully qualified, e.g. namespace/name:tag)
#
# Usage: _get_target_image_fullname SERVICE
function _get_target_image_fullname() {
_check_service_argument
local service=$1
local tag=$(_get_base_image_tag $service)
echo "${DOCKERHUB_NAMESPACE}/${IMAGE_NAME_PREFIX}${service}:${tag}"
}
# Check if target image has been built and is available locally
#
# Usage: _check_target_image_exists SERVICE
function _check_target_image_exists() {
_check_service_argument
local service=$1
local image=$(_get_target_image_fullname $service)
if ! docker images $tag | grep $service &> /dev/null; then
echo "Target image '${image}' does not exists! You should build it first (see: bin/build)"
exit 1
fi
}
| true
|
cb487b8114f9a9b76f1291d48c627805fbd7a59c
|
Shell
|
twalrant/yadynip
|
/etc/checkip.d/15dnsexit
|
UTF-8
| 325
| 3.4375
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
#
# Available proxy dnsexit servers:
declare -a proxyservs=(ip.dnsexit.com ip2.dnsexit.com ip3.dnsexit.com);
# Pick one at (very pseudo) random
num=$(($(date +%s)%3))
url=http://${proxyservs[$num]}
regexp='\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}'
wget="wget -o /dev/null -O - -t 1 -T 5"
$wget $url|grep -Poe $regexp
| true
|
28fdafb58b9ab7c99e0c66f2b24c25eb51581719
|
Shell
|
rafael-pinho/dot-files
|
/arch-linux/software-installer.sh
|
UTF-8
| 3,917
| 3.59375
| 4
|
[] |
no_license
|
#!/bin/bash
function text_editor_install(){
echo "Select one text editor to install"
echo "1- gedit"
echo "2- atom"
echo "3- visual studio code"
read TEXT_EDITOR
case $TEXT_EDITOR in
1)
sudo pacman -S gedit
;;
2)
yaourt -S atom
;;
3)
yaourt -S visual-studio-code
;;
esac
echo "Press any key to continue..."
read
}
function browser_install(){
echo "Select one browser to install"
echo "1- Opera"
echo "2- Firefox"
echo "3- Google-Chrome"
read BROWSER
case $BROWSER in
1)
sudo pacman -S opera
yaourt -S chromium-pepper-flash
;;
2)
sudo pacman -S firefox
;;
3)
yaourt -S google-chrome
;;
esac
echo "Press any key to continue..."
read
}
function file_manager_install(){
echo "Select one file manager to install"
echo "1- Ranger"
echo "2- Nautilus"
read FILE_MANAGER
case $FILE_MANAGER in
1)
sudo pacman -S ranger
;;
2)
sudo pacman -S nautilus
;;
esac
echo "Press any key to continue..."
read
}
function music_players_install(){
echo "Select one music player to install"
echo "1- Spotify"
echo "2- Playerctl - for multimedia keys support"
read MUSIC_PLAYER
case $MUSIC_PLAYER in
1)
yaourt -S spotify
;;
2)
yaourt -S playerctl
;;
esac
echo "Press any key to continue..."
read
}
function wallpaper_manager_install(){
echo "Select one wallpaper manager to install"
echo "1- Feh"
echo "2- Nitrogen"
read WALLPAPER_MANAGER
case $WALLPAPER_MANAGER in
1)
sudo pacman -S feh
;;
2)
sudo pacman -S nitrogen
;;
esac
echo "Press any key to continue..."
read
}
function ssh_client_install(){
sudo pacman -S openssh
echo "Press any key to continue..."
read
}
function nvm_install(){
curl -o- https://raw.githubusercontent.com/creationix/nvm/v0.31.4/install.sh | bash
echo "Press any key to continue..."
read
}
function docker_install(){
sudo pacman -S docker
systemctl enable docker
curl -L https://github.com/docker/compose/releases/download/1.7.0/docker-compose-`uname -s`-`uname -m` > /usr/local/bin/dc
chmod +x /usr/local/bin/dc
echo "Press any key to continue..."
read
}
function virtualbox(){
sudo pacman -S virtualbox linux-headers qt5
echo "Press any key to continue..."
read
}
function vagrant(){
sudo pacman -S vagrant
echo "Press any key to continue..."
read
}
function wine(){
sudo pacman -S wine wine_gecko wine-mono
echo "Press any key to continue..."
read
}
while [ "$USER_INPUT" != ":quit" ]; do
clear
printf "\n\n"
echo "///////////////////////////////////////////////////////////////////////"
echo "////////////////// ARCH LINUX PROGRAMS INSTALLER ///////////////////"
echo "///////////////////////////////////////////////////////////////////////"
printf "\n\n"
echo "THIS SCRIPT USES YAOURT AND CANNOT BE EXECUTED AS ROOT. PLEASE, IF YOU "
echo "RUN THIS SCRIPT AS ROOT EXIT AND RUN WITH OTHER USER ACCOUNT."
echo "choose what you want to install"
printf "\n"
echo " 1 .............. text editors"
echo " 2 .............. browser"
echo " 3 .............. file managers"
echo " 4 .............. music player"
echo " 5 .............. wallpaper managers"
echo " 6 .............. ssh client"
echo " 7 .............. nvm"
echo " 8 .............. docker - docker-compose"
echo " 9 .............. virtualbox"
echo "10 .............. vagrant"
echo "11 .............. wine"
printf "\n\n:quit - EXIT\n\n"
read USER_INPUT
case $USER_INPUT in
1)
text_editor_install
;;
2)
browser_install
;;
3)
file_manager_install
;;
4)
music_players_install
;;
5)
wallpaper_manager_install
;;
6)
ssh_client_install
;;
7)
nvm_install
;;
8)
docker_install
;;
9)
virtualbox
;;
10)
vagrant
;;
11)
wine
;;
esac
done
| true
|
accd59cd1dce6c5e84d1266f84c385049fbb56f1
|
Shell
|
d3lio/BattleTanks
|
/scripts/run_tests.sh
|
UTF-8
| 178
| 2.625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
GAME_DIR="$DIR/../game"
ENGINE_DIR="$DIR/../engine"
cd $ENGINE_DIR && cargo test
cd $GAME_DIR && cargo test
| true
|
8c08cfc820747d9ee5dacf4f5928b64328cf4a3d
|
Shell
|
CircleAround/bin
|
/remote-bastion-dump-eb-pg
|
UTF-8
| 684
| 3.359375
| 3
|
[] |
no_license
|
#!/bin/bash
# `remote-bastion-dump-eb-pg beanstalkenv pempath db-host db-user db-name [pg_dumpparams]`
beanstalkenv=$1
pempath=$2
db_host=$3
db_user=$4
db_name=$5
pg_dump_params=$6
customssh="ssh -i ${pempath}"
echo "CUSTOM SSH COMMAND: ${customssh}"
ip=`eb ssh ${beanstalkenv} -o --custom "${customssh}" --quiet --command "curl -s ifconfig.me"`
echo "IP: ${ip}"
ssh -f -N -L 15432:${db_host}:5432 -i ${pempath} ec2-user@${ip}
echo 'connected'
pg_dump -Fc -h localhost -p 15432 -U ${db_user} ${pg_dump_params} ${db_name} > ${beanstalkenv}`date "+%Y%m%d_%H%M%S"`.sql
echo 'dumped'
ps aux | grep "ssh -f -N -L" | grep ${ip} | awk '{ print $2 }' | xargs kill -9
echo 'finished'
| true
|
e853c8d9c144784ae24c4412a02460ca1ac9d353
|
Shell
|
doytsujin/wiki_barebone
|
/run_local_docker.sh
|
UTF-8
| 630
| 3.21875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
CURRENT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
REPO_NAME=$(basename "$CURRENT_DIR")
echo "Will share this folder:$REPO_NAME with the docker container".
echo "stop container:" && docker stop ci_xenial_robot || true
echo "remove container:" && docker rm ci_xenial_robot || true
echo "rerun container ci_xenial_robot:"
cd ${CURRENT_DIR} && docker run -d -it --name ci_xenial_robot -v $(pwd):/home/robot/${REPO_NAME} local_xenial_robot # read only share
docker exec -u robot -it ci_xenial_robot /bin/bash "sudo chown robot:robot -R /home/robot"
docker exec -u robot -it ci_xenial_robot /bin/bash
| true
|
99bc366aeda3f622599da409a2e6efea66bbdf28
|
Shell
|
aarontay/snap-labs
|
/advanced-deployment-docker-snap-influxdb-grafana/snap/get_plugins.sh
|
UTF-8
| 278
| 2.78125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
__dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
for i in "snap-plugin-publisher-influxdb" "snap-plugin-collector-psutil"
do
(cd "${__dir}"/plugins && wget -O ${i} http://snap.ci.snap-telemetry.io/plugins/${i}/latest/linux/x86_64/${i} && chmod 755 ${i})
done
| true
|
a8b92f32ecf0cd46421f91efe4571d5aba5eb6d0
|
Shell
|
ShiWeiPKU/setk
|
/scripts/run_ds_beamformer.sh
|
UTF-8
| 2,303
| 3.484375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
# wujian@2018
set -eu
nj=40
cmd="run.pl"
fs=16000
speed=340
topo="0,0.2,0.4,0.6"
doa_list="30 70 110 150"
utt2doa=""
stft_conf=./conf/stft.conf
echo "$0 $@"
function usage {
echo "Options:"
echo " --nj <nj> # number of jobs to run parallel, (default=$nj)"
echo " --cmd <run.pl|queue.pl> # how to run jobs, (default=$cmd)"
echo " --stft-conf <stft-conf> # stft configurations files, (default=$stft_conf)"
echo " --fs <fs> # sample frequency for source signal, (default=$fs)"
echo " --topo <topo> # topology for linear microphone arrays, (default=$topo)"
echo " --doa-list <doa-list> # list of DoA to be processed, (default=$doa_list)"
echo " --utt2doa <utt2doa> # utt2doa file, (default=$utt2doa)"
echo " --speed <speed> # sound speed, (default=$speed)"
}
. ./path.sh
. ./utils/parse_options.sh || exit 1
[ $# -ne 3 ] && echo "Script format error: $0 <data-dir> <log-dir> <enhan-dir>" && usage && exit 1
wav_scp=$1/wav.scp
exp_dir=$2
dst_dir=$3
for x in $stft_conf $wav_scp; do [ ! -f $x ] && echo "$0: Missing file: $x" && exit 1; done
[ ! -d $exp_dir ] && mkdir -p $exp_dir
split_wav_scp=""
for n in $(seq $nj); do split_wav_scp="$split_wav_scp $exp_dir/wav.$n.scp"; done
./utils/split_scp.pl $wav_scp $split_wav_scp
stft_opts=$(cat $stft_conf | xargs)
beamformer_opts="--fs $fs --speed $speed --linear-topo $topo"
if [ ! -z $utt2doa ]; then
echo "$0: Run DS beamformer on $utt2doa ..."
mkdir -p $dst_dir/doa${doa}_$dirname
$cmd JOB=1:$nj $exp_dir/run_ds.JOB.log \
./scripts/sptk/apply_ds_beamformer.py \
$stft_opts $beamformer_opts \
--utt2doa $utt2doa \
$exp_dir/wav.JOB.scp \
$dst_dir
echo "$0: Run delay and sum beamformer -- $utt2doa done"
else
dirname=$(basename $1)
for doa in $doa_list; do
echo "$0: Run DS beamformer on DoA $doa ..."
mkdir -p $dst_dir/doa${doa}_$dirname
$cmd JOB=1:$nj $exp_dir/$dirname.$doa.ds.JOB.log \
./scripts/sptk/apply_ds_beamformer.py \
$stft_opts $beamformer_opts \
--doa $doa \
$exp_dir/wav.JOB.scp \
$dst_dir/doa${doa}_$dirname
done
echo "$0: Run delay and sum beamformer -- $doa_list done"
fi
| true
|
1094f459d95d22a18bce5902a7a4fa812734dd57
|
Shell
|
fsi-tue/docker
|
/linux-ag-website/continuous-deployment/deploy.sh
|
UTF-8
| 688
| 3.25
| 3
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -o errexit
set -o nounset
set -x
cd /srv
for repository in website presentation; do
if [[ ! -d $repository.git ]]; then
git init --bare $repository.git
pushd $repository.git
git remote add origin https://github.com/linux-ag/$repository.git
popd
fi
export GIT_DIR=$repository.git
export GIT_WORK_TREE=$repository
if [[ ! -d $repository ]]; then
mkdir $repository
fi
git fetch
git reset --hard origin/master
unset GIT_DIR
unset GIT_WORK_TREE
done
# TODO: Listen for GitHub Webhook events (will have to wait until we change the
# CNAME record for www.linux-ag.uni-tuebingen.de from 134.2.2.45 to
# 134.2.220.61).
| true
|
fc7d996c4f883aa7d2967ec1690b2ce0b202f4de
|
Shell
|
nickschuetz/openshift-cd-demo
|
/oc/installgogs.sh
|
UTF-8
| 1,663
| 3.609375
| 4
|
[] |
no_license
|
#!/bin/bash
set -x
# Use the oc client to get the url for the gogs route
GOGSROUTE=$(oc get route gogs -o template --template='{{.spec.host}}')
# Use the oc client to get the postgres variables into the current shell"
eval $(oc env dc/postgresql-gogs --list | grep -v \#)
# postgres has a readiness probe, so checking if there is at least one
# endpoint means postgres is alive and ready, so we can then attempt to install gogs
# we're willing to wait 60 seconds for it, otherwise something is wrong.
x=1
oc get ep postgresql-gogs -o yaml | grep "\- addresses:"
while [ ! $? -eq 0 ]
do
sleep 3
x=$(( $x + 1 ))
if [ $x -gt 20 ]
then
exit 255
fi
oc get ep postgresql-gogs -o yaml | grep "\- addresses:"
done
# now we wait for gogs to be ready in the same way
x=1
oc get ep gogs -o yaml | grep "\- addresses:"
while [ ! $? -eq 0 ]
do
sleep 3
x=$(( $x + 1 ))
if [ $x -gt 20 ]
then
exit 255
fi
oc get ep gogs -o yaml | grep "\- addresses:"
done
# we might catch the router before it's been updated, so wait just a touch
# more
sleep 10
RETURN=$(curl -o /dev/null -sL -w "%{http_code}" http://$GOGSROUTE/install \
--form db_type=PostgreSQL \
--form db_host=postgresql-gogs:5432 \
--form db_user=$POSTGRESQL_USER \
--form db_passwd=$POSTGRESQL_PASSWORD \
--form db_name=gogs \
--form ssl_mode=disable \
--form db_path=data/gogs.db \
--form "app_name=Gogs: Go Git Service" \
--form repo_root_path=/home/gogs/gogs-repositories \
--form run_user=gogs \
--form domain=localhost \
--form ssh_port=22 \
--form http_port=3000 \
--form app_url=http://$GOGSROUTE/ \
--form log_root_path=/opt/gogs/log)
if [ $RETURN != "200" ]
then
exit 255
fi
| true
|
bd60a158e94fe712c4503ab4ad04081e1401dd89
|
Shell
|
ilushka/linuxconfig
|
/setup.sh
|
UTF-8
| 6,209
| 3.53125
| 4
|
[] |
no_license
|
#!/bin/bash
PATHOGEN_PATH=~/.vim/autoload/pathogen.vim
NERDTREE_PATH=~/.vim/bundle/nerdtree
CTRLP_PATH=~/.vim/bundle/ctrlp.vim
ACKVIM_PATH=~/.vim/bundle/ack.vim
TAGBAR_PATH=~/.vim/bundle/tagbar
GITGUTTER_PATH=~/.vim/bundle/vim-gitgutter
BASHRC_PATH=~/.bashrc
BASH_PROFILE_PATH=~/.bash_profile
VIMRC_PATH=~/.vimrc
function ask() {
read -s -n1 -p "$1"$' [yn]\n' _is_yes
if [ "$_is_yes" = "y" ] || [ "$_is_yes" = "Y" ]; then
echo "yes"
else
echo "no"
fi
}
function install_pathogen() {
mkdir -p ~/.vim/autoload ~/.vim/bundle && \
curl -LSso $PATHOGEN_PATH https://tpo.pe/pathogen.vim
echo 'execute pathogen#infect()' >> ~/.vimrc
}
function install_vimrc() {
cat <<'EOF' >> $VIMRC_PATH
set number
set nocompatible
syntax on
set expandtab
set tabstop=2
set shiftwidth=2
set ruler
set scrolloff=3
" Update refresh time to 100ms for gitgutter
set updatetime=100
" Increase max number of changes to display
let g:gitgutter_max_signs=1000
set laststatus=2
set statusline=
set statusline+=%#Pmenu#
set statusline+=%f\ \
set statusline+=%l:%c\ \
set statusline+=%{tagbar#currenttag('%s',\ '',\ 'f',\ 'scoped-stl')}
" show horizontal line under cursor
" set cursorline
" show tab, space, and newline charaters
" set list
" set listchars=tab:▸-,space:·,trail:¬
" show 80-character line limit
" set textwidth=80
" set colorcolumn=+1
EOF
}
function install_bashrc() {
bash_config=$1
cat <<'EOF' >> $bash_config
set -o vi
alias ll="ls -laG"
# get current branch in git repo
function parse_git_branch() {
BRANCH=`git branch 2> /dev/null | sed -e '/^[^*]/d' -e 's/* \(.*\)/\1/'`
if [ ! "${BRANCH}" == "" ]
then
STAT=`parse_git_dirty`
echo "[${BRANCH}${STAT}]"
else
echo ""
fi
}
# get current status of git repo
function parse_git_dirty {
status=`git status 2>&1 | tee`
dirty=`echo -n "${status}" 2> /dev/null | grep "modified:" &> /dev/null; echo "$?"`
untracked=`echo -n "${status}" 2> /dev/null | grep "Untracked files" &> /dev/null; echo "$?"`
ahead=`echo -n "${status}" 2> /dev/null | grep "Your branch is ahead of" &> /dev/null; echo "$?"`
newfile=`echo -n "${status}" 2> /dev/null | grep "new file:" &> /dev/null; echo "$?"`
renamed=`echo -n "${status}" 2> /dev/null | grep "renamed:" &> /dev/null; echo "$?"`
deleted=`echo -n "${status}" 2> /dev/null | grep "deleted:" &> /dev/null; echo "$?"`
bits=''
if [ "${renamed}" == "0" ]; then
#bits=">${bits}"
bits="🍄${bits}"
fi
if [ "${ahead}" == "0" ]; then
#bits="*${bits}"
bits="🔥${bits}"
fi
if [ "${newfile}" == "0" ]; then
#bits="+${bits}"
bits="✨${bits}"
fi
if [ "${untracked}" == "0" ]; then
#bits="?${bits}"
bits="🦴${bits}"
fi
if [ "${deleted}" == "0" ]; then
#bits="x${bits}"
bits="💀${bits}"
fi
if [ "${dirty}" == "0" ]; then
#bits="!${bits}"
bits="💩${bits}"
fi
if [ ! "${bits}" == "" ]; then
echo " ${bits}"
else
echo ""
fi
}
export PS1="\[\e[33m\]\u\[\e[m\]\[\e[35m\]\`parse_git_branch\`\[\e[m\]🐁 "
EOF
}
function install_nerdtree() {
cd ~/.vim/bundle && git clone https://github.com/scrooloose/nerdtree.git
echo 'let g:NERDTreeWinSize = 22' >> ~/.vimrc
}
function install_ack() {
if [[ "$OSTYPE" == "darwin"* ]]; then
echo "Installing ack"
brew install ack
else
sudo apt-get install ack-grep
fi
}
function install_ctags() {
bash_config=$1
if [[ "$OSTYPE" == "darwin"* ]]; then
echo "Installing ctags"
brew install ctags
echo "alias ctags=\"$(brew --prefix)/bin/ctags\"" >> $bash_config
else
sudo apt-get install ctags
fi
}
function install_pip() {
curl -O https://bootstrap.pypa.io/get-pip.py && sudo python get-pip.py
}
function install_virtualenv() {
pip install virtualenv
}
function install_fd() {
sudo apt install fd-find
}
function install_ctrlp() {
cd ~/.vim/bundle && git clone https://github.com/kien/ctrlp.vim.git
}
function install_ackvim() {
cd ~/.vim/bundle && git clone https://github.com/mileszs/ack.vim.git
}
function install_tagbar() {
cd ~/.vim/bundle && git clone https://github.com/majutsushi/tagbar.git
cat <<'EOF' >> $VIMRC_PATH
let g:tagbar_width = 30
nmap <F8> :TagbarToggle<CR>
EOF
}
function install_gitgutter() {
cd ~/.vim/bundle && git clone https://github.com/airblade/vim-gitgutter.git
}
[ -f $PATHOGEN_PATH ] && echo "Pathogen might be already installed."
[ "$(ask 'Install pathogen?')" = "yes" ] && install_pathogen
[ -f $VIMRC_PATH ] && [ ! "$(cat $VIMRC_PATH | grep 'set tabstop=2')_" = "_" ] \
&& echo "vimrc might be already installed"
[ "$(ask 'Install vimrc?')" = "yes" ] && install_vimrc
[ -d $NERDTREE_PATH ] && echo "NERDtree might be already installed."
[ "$(ask 'Install NERDtree?')" = "yes" ] && install_nerdtree
[ -d $CTRLP_PATH ] && echo "ctrlp.vim might be already installed."
[ "$(ask 'Install ctrlp.vim?')" = "yes" ] && install_ctrlp
[ -d $ACKVIM_PATH ] && echo "ack.vim might be already installed."
[ "$(ask 'Install ack.vim?')" = "yes" ] && install_ackvim
[ -d $TAGBAR_PATH ] && echo "Tagbar might be already installed."
[ "$(ask 'Install Tagbar?')" = "yes" ] && install_tagbar
[ -d $GITGUTTER_PATH ] && echo "GitGutter might be already installed."
[ "$(ask 'Install GitGutter?')" = "yes" ] && install_gitgutter
# figure out which bash configuration file to use
if [ -f $BASHRC_PATH ]; then
bash_conf=$BASHRC_PATH
else
bash_conf=$BASH_PROFILE_PATH
fi
[ ! "$(cat $bash_conf | grep 'set -o vi')_" = "_" ] \
&& echo "bashrc might be already installed"
[ "$(ask 'Install bashrc?')" = "yes" ] && install_bashrc $bash_conf
[ ! "$(command -v ack)_" = "_" ] && echo "ack is already installed"
[ "$(ask 'Install ack?')" = "yes" ] && install_ack
[ ! "$(command -v pip)_" = "_" ] && echo "pip is already installed"
[ "$(ask 'Install pip?')" = "yes" ] && install_pip
[ ! "$(command -v ctags)_" = "_" ] && echo "ctags might be already installed"
[ "$(ask 'Install ctags?')" = "yes" ] && install_ctags $bash_conf
[ ! "$(command -v fd)_" = "_" ] && echo "fd might be already installed"
[ "$(ask 'Install fd?')" = "yes" ] && install_fd
| true
|
68fb0d6d75191cffd2af8cbce00aca36283d01c7
|
Shell
|
Sanqui/hm3
|
/tools/rip.sh
|
UTF-8
| 316
| 2.515625
| 3
|
[] |
no_license
|
#!/bin/bash
dd skip=$(($1)) count=$(($2)) if=baserom.gbc of=$3 bs=1 >& /dev/null
if [[ "$3" == *.1bpp ]] || [[ "$3" == *.2bpp ]]; then
python pret/pokemontools/gfx.py png $3
fi
printf "\n"
printf "SECTION \"%s\", ROMX[$%04x], BANK[$%02x]\n" "$3" $(($1%0x4000+0x4000)) $(($1/0x4000))
printf "\tINCBIN \"%s\"\n" "$3"
| true
|
b0fa9233b4c9b6a764c7d3468d3b8df439db7174
|
Shell
|
erenso/summerseed2016
|
/bash-chat/chat_engine_berkeatac/send_msg.sh
|
UTF-8
| 300
| 2.78125
| 3
|
[] |
no_license
|
while true
do
echo "people known are:"
while read line
do
echo $line | cut -d ',' -f2
done < tablo.txt
echo "enter recipient: "
read name
echo "enter message: "
read message
line=$(grep $name tablo.txt)
IPADDR=$(echo $line | cut -d ',' -f1)
echo "berke,$message" | nc -c $IPADDR 10002
done
| true
|
b09bd833fe0366a6f960f0b40ef91a3120c58f78
|
Shell
|
irisjae/nephyrite
|
/source/meta/make/cordova/,
|
UTF-8
| 327
| 2.65625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
use_deps && as_make $@ || {
echo "please compile $0"
exit 1
} && use_node || (fail
) && use_file "$source" || (fail
) && {
mkdir -p "$out" 2> /dev/null
rm -rf "$out/*" 2> /dev/null
ln -s "$source/*" "$out/"
ln -s {./config.xml,./icon.png,./splash.png} "$out/"
} || fail echo "failed to make cordova"
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.