blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
7fb200d53f6a124c35a29468e831e8edbf041a19
|
Shell
|
i3ef0xh4ck/ShellRepository
|
/shell/spring-boot-server/serverDeploy.sh
|
UTF-8
| 1,374
| 3.46875
| 3
|
[] |
no_license
|
#!/bin/bash
source /etc/profile
# sh -x serverDeploy.sh test.jar 8099
# 第一:首相对旧服务进行检测关闭操作 命令行第一参数:xxx.jar
SERVICE=$1
PORT=$2
echo $SERVICE
PID=`ps -ef | grep "$SERVICE" | grep java | grep -v "$0" | grep -v "grep" | awk '{print $2}'`
# 如果启动了多个同名服务,需要使用for循环处理 for id in $PID do done
if [ "$PID"x != ""x ]
then
kill -9 $PID
echo "killed $PID"
fi
# 第二:进行部署文件的路径操作
JAR_NAME=$1
DIRNAME=`echo $JAR_NAME | awk -F . '{print $1}' `
TARGET=/data/server/services/target
DEPLOY_JAR_PATH=/data/server/services/$DIRNAME
#!将现有的jar备份后,将新的jar包替换
if [ ! -d "$DEPLOY_JAR_PATH" ]
then
mkdir -p $DEPLOY_JAR_PATH
fi
if [ ! -d "$TARGET/backup" ]
then
mkdir -p $TARGET/backup
fi
file="/$DEPLOY_JAR_PATH/$JAR_NAME"
if [ -f "$file" ]
then
mv $file $TARGET/backup/$JAR_NAME.`date +%Y%m%d%H%M%S`
fi
sleep 4
cp -f $TARGET/$JAR_NAME $DEPLOY_JAR_PATH/$JAR_NAME
# 第三:修改jar文件操作权限,启动服务,记录日志
cd $DEPLOY_JAR_PATH
chmod 777 $DEPLOY_JAR_PATH/$JAR_NAME
nohup java -Xmx1024m -Xms512m -Xss256k -jar $JAR_NAME --spring.profiles.active=test --server.port=$PORT --logging.path=$DEPLOY_JAR_PATH >> spring.log 2>&1 &
echo 'java进程列表:'
jps -m
echo '*********************服务部署结束***********************'
| true
|
9e62d6dcadf43bb298bcf63ed6337b3fc55c7325
|
Shell
|
orangekoi/ilr-website
|
/bin/sync-prod
|
UTF-8
| 490
| 2.625
| 3
|
[] |
no_license
|
#!/bin/bash
# check running from docroot
CURRENT_DIR=${PWD##*/}
if [ ! "$CURRENT_DIR" = "docroot" ]; then
echo 'Please be sure that you are running this command from the docroot.'
exit 2
fi
drush sql-drop -y
# download the acquia dev db
drush sql-sync @ilr-d7.master @self --sanitize --sanitize-password=password --structure-tables-list=cache,cache_*,sessions,entityform,freedompay_invoices,freedompay_payments,redhen_* -y
drush cc all
drush en ilr_dev -y
echo "Sync complete."
| true
|
ec6a96177aecce24d3aafc30d58ced997c8e2073
|
Shell
|
guildenstern70/SmartDjango
|
/run.sh
|
UTF-8
| 753
| 2.84375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#
# SmartDjango Python Project
#
# Copyright (c) 2021-23 Alessio Saltarin
# This software is distributed under MIT License.
# See LICENSE.
#
#
if [ -z "${DJANGO_SUPERUSER_USERNAME}" ]; then
export DJANGO_SUPERUSER_USERNAME='admin'
export DJANGO_SUPERUSER_PASSWORD='admin'
export DJANGO_SUPERUSER_EMAIL="admin@smartdjango.net"
fi
echo "building db..."
echo "using settings: $DJANGO_SETTINGS_MODULE"
python manage.py migrate
python manage.py createsuperuser --noinput
echo "make app migrations..."
python manage.py makemigrations SmartDjango
echo "migrate app..."
python manage.py migrate SmartDjango
python manage.py collectstatic --noinput
echo [$0] Starting Django Server...
exec gunicorn -w 3 SmartDjango.wsgi:application --bind 0.0.0.0:8080
| true
|
bb981ce086931a841f0e19bdc1b09786daddb6e8
|
Shell
|
aravind-vasudevan/sdc
|
/scripts/plotRatio.sh
|
UTF-8
| 283
| 3.015625
| 3
|
[] |
no_license
|
#!/bin/bash
outputFileName="tutu"
echo " " > $outputFileName
for fileName in $1/*.log
do
`grep -i SDC/HEFT $fileName >> $outputFileName`
done
#while read line
#do
# runConfig=`awk -F '=' '{print $1}'`
# ratio=`awk -F '=' '{print $2}'`
# echo $runConfig.$ratio
#done < $fileName
| true
|
65eb5aefe39dcc82b74db201bb408a395f7f50e2
|
Shell
|
insidiousmeme/fisheye
|
/fisheye_webservice/util/prepare_apache.sh
|
UTF-8
| 561
| 2.84375
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
ln -sf /mnt/fisheye_webservice/ /var/www/html/
chown apache:apache /var/www/html/fisheye_webservice -R
chown apache:apache /mnt/fisheye_webservice -R
# check that httpd.conf doesn't contain setup for fishey_webservice
if ! grep -q "fisheye_webservice" /etc/httpd/conf/httpd.conf; then
cat /var/www/html/fisheye_webservice/httpd_fisheye_webservice.conf >> /etc/httpd/conf/httpd.conf
fi
pip3 install -r /var/www/html/fisheye_webservice/requirements.txt
cd /mnt/fisheye && make all install
cd /mnt/fisheye/python/ && python setup.py install
| true
|
f8a26a2b9d3b15ba92769df33ce7eeefb6982a5e
|
Shell
|
xueshell/grep-for-filenames
|
/functions.sh
|
UTF-8
| 288
| 3.546875
| 4
|
[] |
no_license
|
#!/bin/bash
function flatListFilesFrom() {
find $1 -type f -printf '%f\n'
}
function listFilesFrom() {
find $1 -type f
}
function listNotFoundByBasename() {
for line in $1
do
grep -r `basename $line` $2 > /dev/null
[ $? -eq 1 ] && echo $line
done
}
| true
|
ebaa44842a36d5c311f18a14c3253a4e4b3f62c5
|
Shell
|
urmyfaith/sinstallation
|
/preferences/displays.sh
|
UTF-8
| 553
| 3.109375
| 3
|
[] |
no_license
|
################################################################################
# Displays
################################################################################
# Toggle Whether Brightness is Automatically Adjusted Depending on Ambient Light
function osx_displays_auto_brightness_adjustment {
local enabled; if [[ "$1" == "enabled" ]]; then enabled="1"; else enabled="0"; fi
sudo defaults write /Library/Preferences/com.apple.iokit.AmbientLightSensor "Automatic Display Enabled" -int $enabled
}
| true
|
719aec30ccd1defe430ad9a0837f4403494af978
|
Shell
|
youtube/cobalt
|
/third_party/skia/infra/skqp/run_skqp.sh
|
UTF-8
| 1,106
| 3.28125
| 3
|
[
"BSD-3-Clause",
"GPL-1.0-or-later",
"LGPL-2.0-or-later",
"Apache-2.0",
"MIT"
] |
permissive
|
#!/bin/bash
# Copyright 2018 Google LLC
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# Assumes this is in a docker container with a skia repo mounted at /SRC and a folder containing the
# built APK to be in /OUT.
# Additionally, this assumes that the docker container was run to have an android emulator running
# that is accesible with adb.
# This script in particular doesn't care about arm vs x86, but the current incarnation has an x86
# emulator and so the apk should be built for that.
#
# Example usage:
#
set -ex
# Wait for boot
timeout 60 adb wait-for-device shell 'while [[ -z $(getprop sys.boot_completed) ]]; do sleep 1; done'
# Some extra sleep to make sure the emulator is awake and ready for installation
sleep 10
adb install -r /OUT/skqp-universal-debug.apk
adb logcat -c
tmp_file="$(mktemp "${TMPDIR:-/tmp}/skqp.XXXXXXXXXX")"
adb shell am instrument -w org.skia.skqp | tee "$tmp_file" | fold -s
adb logcat -d TestRunner org.skia.skqp skia DEBUG "*:S"
if ! grep -q '^OK ' "$tmp_file"; then
echo 'this test failed'
fi
| true
|
84acaec1d6bb3a356747cdf4a04ffe7e5f930829
|
Shell
|
gridcoin-community/Gridcoin-Research
|
/src/bdb53/lang/sql/odbc/mkall.sh
|
UTF-8
| 784
| 3.484375
| 3
|
[
"MIT",
"LicenseRef-scancode-newlib-historical",
"LicenseRef-scancode-warranty-disclaimer",
"BSD-3-Clause",
"Sleepycat"
] |
permissive
|
#!/bin/sh
#
# Build everything for Win32/Win64
rm -rf dist
mkdir dist
exec >mkall.log 2>&1
set -x
if test -r VERSION ; then
VER32=$(tr -d '.' <VERSION)
VER=$(cat VERSION)
else
VER32="0"
VER="0.0"
fi
if test $(arch) = "x86_64" ; then
CC32="gcc -m32"
SH32="linux32 sh"
else
CC32=gcc
SH32=sh
fi
NO_SQLITE2=1 NO_TCCEXT=1 SQLITE_DLLS=2 CC=$CC32 $SH32 mingw-cross-build.sh
mv sqliteodbc.exe dist/sqliteodbc_dl-$VER32.exe
CC=$CC32 $SH32 mingw-cross-build.sh
mv sqliteodbc.exe dist/sqliteodbc-$VER32.exe
SQLITE_DLLS=2 sh mingw64-cross-build.sh
mv sqliteodbc_w64.exe dist/sqliteodbc_w64_dl-$VER32.exe
sh mingw64-cross-build.sh
mv sqliteodbc_w64.exe dist/sqliteodbc_w64-$VER32.exe
test -r ../sqliteodbc-$VER.tar.gz && cp -p ../sqliteodbc-$VER.tar.gz dist
| true
|
a8ecd0d2fbbdf8d0d4d709545f0837a938b93a3f
|
Shell
|
LokdeepSaluja/intelliJ-ce-idea
|
/run.sh
|
UTF-8
| 3,515
| 3.546875
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# First build a docker container that will create the host
# machines's current $USER in the container in order for data
# to be persistent when container is terminated
docker build --build-arg USER=$USER -t mvpjava/intellij-ide:latest .
# Create IntelliJ directories used as Docker bind mount volumes on host.
# if they don't exist, then create them with current $USER permissions on host
# in order to have write permissions in container (or else root is used)
# Resource reference: https://www.jetbrains.com/help/idea/tuning-the-ide.html
IJ_ROOT_DIRNAME=IdeaIC2020.3
IJ_ROOT_DIR_HOST=${HOME}/${IJ_ROOT_DIRNAME}
[ ! -d $IJ_ROOT_DIR_HOST ] && mkdir -p $IJ_ROOT_DIR_HOST
echo "IntelliJ directory located under ... $IJ_ROOT_DIR_HOST"
# IJ Syntax directory
IJ_SYNTAX_DIR_HOST=${IJ_ROOT_DIR_HOST}/.config/JetBrains/
[ ! -d $IJ_SYNTAX_DIR_HOST ] && mkdir -p $IJ_SYNTAX_DIR_HOST
# IJ cache directory
IJ_CACHE_DIR_HOST=${IJ_ROOT_DIR_HOST}/.cache/JetBrains/
[ ! -d $IJ_CACHE_DIR_HOST ] && mkdir -p $IJ_CACHE_DIR_HOST
# IJ plugins directory
IJ_PLUGINS_DIR_HOST=${IJ_ROOT_DIR_HOST}/.local/share/JetBrains/consentOptions
[ ! -d $IJ_PLUGINS_DIR_HOST ] && mkdir -p $IJ_PLUGINS_DIR_HOST
# IJ User Preferences
IJ_USER_PREFS_HOST=${IJ_ROOT_DIR_HOST}/.java/.userPrefs
[ ! -d $IJ_USER_PREFS_HOST ] && mkdir -p $IJ_USER_PREFS_HOST
############################################
# Create IJ directory paths within container
###########################################
IJ_ROOT_DIR_CNTR=~
IJ_SYNTAX_DIR_CNTR=${IJ_ROOT_DIR_CNTR}/.config/JetBrains/$IJ_ROOT_DIRNAME
IJ_CACHE_DIR_CNTR=${IJ_ROOT_DIR_CNTR}/.cache/JetBrains/$IJ_ROOT_DIRNAME
IJ_PLUGINS_DIR_CNTR=${IJ_ROOT_DIR_CNTR}/.local/share/JetBrains/$IJ_ROOT_DIRNAME
IJ_USER_PREFS_CNTR=${IJ_ROOT_DIR_CNTR}/.java/.userPrefs
#############################################################
# If you want to share code with the container,
# place your project directories under $IJ_PROJECTS_DIR_HOST
#############################################################
IJ_PROJECTS_DIR_HOST=${IJ_ROOT_DIR_HOST}/IdeaProjects
[ ! -d $IJ_PROJECTS_DIR_HOST ] && mkdir -p $IJ_PROJECTS_DIR_HOST
IJ_PROJECTS_DIR_CNTR=${IJ_ROOT_DIR_CNTR}/IdeaProjects
###########################################################
# Share you maven artiacts on hosts machine with container
# to avoid re-downloading everything
###########################################################
MAVEN_M2_DIR_HOST=~/.m2
[ ! -d $$MAVEN_M2_DIR_HOST ] && mkdir -p $MAVEN_M2_DIR_HOST
MAVEN_M2_DIR_CNTR=~/.m2
# Ensure host user has all persmissions correty set for access
chmod 764 -R $IJ_ROOT_DIR_HOST
#########################################################
# Ensure Docker can connect to X11 Server on host.
# Very permissive.
#########################################################
xhost +
docker container run -d --rm -it \
-e DISPLAY \
-v $HOME/.Xauthority:/home/$USER/.Xauthority \
-v /tmp/.X11-unix:/tmp/.X11-unix \
-v /var/run/docker.sock:/var/run/docker.sock \
-v ${IJ_ROOT_DIR_HOST}:${IJ_SYNTAX_DIR_CNTR} \
-v ${IJ_CACHE_DIR_HOST}:${IJ_CACHE_DIR_CNTR} \
-v ${IJ_PLUGINS_DIR_HOST}:${IJ_PLUGINS_DIR_CNTR} \
-v ${IJ_PROJECTS_DIR_HOST}:${IJ_PROJECTS_DIR_CNTR} \
-v ${MAVEN_M2_DIR_HOST}:${MAVEN_M2_DIR_CNTR} \
-v ${IJ_USER_PREFS_HOST}:${IJ_USER_PREFS_CNTR} \
-h jetbrains \
--name ij-ce-ide-jdk11 \
mvpjava/intellij-ide:latest
| true
|
1b1cc0832e09ca10a478b301131588b6d02321b1
|
Shell
|
EfraimKrug/BeisChayim
|
/ShellScripts/getUSB.sh
|
UTF-8
| 887
| 3.0625
| 3
|
[] |
no_license
|
CODE_DIRECTORY=bcCode
PIC_DIR=$HOME/$CODE_DIRECTORY/BeisChayim/img
PDF_DIR=$HOME/$CODE_DIRECTORY/BeisChayim/pdf
jsFile=$HOME/$CODE_DIRECTORY/BeisChayim/js/db01.js
###################################################
# Notice: travel flash drive
# Must have a directory: .BeisChayim/.LABEL
# The contents of that .LABEL must be INSTALLED...
# Then everything should work properly
###################################################
cd ~/bin
rm -f a
lsblk > .lsblk2
diff .lsblk1 .lsblk2 | grep media > TEMP
DIR=$(awk '{print $8}' TEMP)
cd $DIR
Q=$(cat .BeisChayim/.LABEL)
if [[ "$Q" = "INSTALLED" ]] ; then
cp BeisChayim/img/* $PIC_DIR
cp BeisChayim/pdf/* $PDF_DIR
[ -n "$(find BeisChayim/data/staging -name 'db01.js' | head -1)" ] && cp BeisChayim/data/staging/db01.js $jsFile
mv BeisChayim/data/staging/db01.js BeisChayim/data/db01-old.js
fi
cp $jsFile BeisChayim/js/db01.js
cd ~/bin
| true
|
211180b2d5ec27fae3f82395ced0c20381df6935
|
Shell
|
mattlukac/alpha-inference
|
/scripts/bed_to_vcf2.sbatch
|
UTF-8
| 994
| 3.15625
| 3
|
[] |
no_license
|
#!/bin/bash
#SBATCH --account=kernlab
#SBATCH --partition=kern
#SBATCH --nodes=1
#SBATCH --job-name=bed2vcf
#SBATCH --time 2:00:00
#SBATCH --mem-per-cpu=8G
#SBATCH --ntasks=1
#SBATCH --cpus-per-task=1
#SBATCH --mail-type=ALL
#SBATCH --mail-user=mlukac@uoregon.edu
# now that the unmapped sites have been removed,
# we use the b37 bed file and the hg18 call files
# to construct the tail of hspec.vcf
toDir='../data/human_specific_sites/mappedCalls'
# first we need to concatenate the mapped calls into a single file
touch ${toDir}/all_calls.txt
for k in `seq 1 22`; do
cat ${toDir}/chr${k}.calls.txt >> ${toDir}/all_calls.txt
done
cat ${toDir}/chrX.calls.txt >> ${toDir}/all_calls.txt
# now we'll use awk to obtain only the hspecsub and the ancestral allelic types
awk '{print $3 "\t" $20}' ${toDir}/all_calls.txt > ${toDir}/ref_alt.txt
# then we can append these to the end of hspec_37.bed
paste ../data/human_specific_sites/hspec_37.bed ${toDir}/ref_alt.txt > ../data/human_specific_sites/tail.vcf
| true
|
9a4a3b196d5dddcbcb8688dbd54624507fe3a401
|
Shell
|
ExLibrisGroup/Rosetta.ClamAVVirusCheckPlugin
|
/src/bin/clamAVscan.sh
|
UTF-8
| 716
| 3.6875
| 4
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/sh
CLAMSCAN=clamscan
SCANOPTS="--stdout --no-summary"
STATUS=255
CONTENT="ERROR"
AGENT="REG_SA_VC_CLAMAV"
TARGET=$1
EchoAndExit()
{
echo STATUS=${STATUS}
echo CONTENT=${CONTENT}
echo AGENT=${AGENT}
exit ${STATUS}
}
# Check is file exist
if [ ! -f ${TARGET} ]
then
CONTENT="File not found"
EchoAndExit
fi
# Check file size is less than 2GB
file_size=`ls -Ll ${TARGET} | awk '{print $5}'`
file_size_tb=`echo "$file_size / 1024 / 1024 / 2048" | bc`
if [ $file_size_tb -ne 0 ]
then
STATUS=0
CONTENT="Virus Check is limited to a 2GB file size and thus has been ignored"
AGENT="REG_SA_DPS"
EchoAndExit
fi
# Scan file
CONTENT=`${CLAMSCAN} ${SCANOPTS} ${TARGET}`
STATUS=$?
EchoAndExit
| true
|
7576fa48770279948ef1b4a77fc1993f1cabd514
|
Shell
|
kunle12/docker-ros-indigo-pr2
|
/assets/etc/profile.d/ros-setup.sh
|
UTF-8
| 205
| 2.515625
| 3
|
[
"BSD-2-Clause"
] |
permissive
|
if [ ! -z "$BASH" ]; then
. /opt/ros/${ROS_DISTRO}/setup.bash
. /home/pr2/dev/catkin_ws/devel/setup.bash
else
. /opt/ros/${ROS_DISTRO}/setup.sh
. /home/pr2/dev/catkin_ws/devel/setup.sh
fi
| true
|
8a5b6d09be71d12fa8578b70ff232f2447dcc1cc
|
Shell
|
enhancedlearningmapsopensource/Materials
|
/elm-admin-scripts/set_contact.sh
|
UTF-8
| 1,190
| 3.984375
| 4
|
[] |
no_license
|
#!/bin/bash
# Written by Mark Thompson (Agile Technology Solutions)
# for ELM Modern Copy administration
#DB_SERVER=localhost
ELM_DB_USER=elm_debug_user
ELM_DB_NAME=elm_release
CONFIG_TABLE=ELM_CONFIG
CONTACT_KEY=CONTACT_EMAIL
if [[ $# -ne 1 ]]; then
USAGE_ERROR=TRUE
else
USAGE_ERROR=FALSE
EMAIL=$1
fi
if [[ ${USAGE_ERROR} = "TRUE" ]]; then
echo "USAGE ERROR: $0 <contact_email_address>"
echo
exit 1
fi
echo
echo This command updates the configuration for the
echo CONTACT email address in the local elm_release database.
echo
echo Email contact will be set to ${EMAIL}
echo
echo You will be prompted to enter the elm_debug_user password...
# Read Password
echo -n Password:
read -s PASSWORD
#echo
#echo Password supplied: $PASSWORD
set_contact() {
#cat <<-endsql
mysql --user=${ELM_DB_USER} --password=${PASSWORD} <<-endsql
use ${ELM_DB_NAME};
/*
SELECT * FROM ${CONFIG_TABLE} WHERE CODE = '${CONTACT_KEY}';
*/
DELETE FROM ${CONFIG_TABLE} WHERE CODE = '${CONTACT_KEY}';
INSERT INTO ${CONFIG_TABLE} (code, val)
VALUES ('${CONTACT_KEY}', '${EMAIL}');
/*
SELECT * FROM ${CONFIG_TABLE} WHERE CODE = '${CONTACT_KEY}';
*/
endsql
}
set_contact
echo
| true
|
f73328153de802b90a0b86588849929afb0560c8
|
Shell
|
guidanoli/cptk
|
/libexec/base/run
|
UTF-8
| 427
| 3.625
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
#
# Summary: Run source code
#
# Usage: libexec/base/run [<flags>]
# Exit on error
set -e
# Make call to path
path=$(libexec/base/path)
# Save flags
export FLAGS=$@
# If program exists,
if [[ -f $path ]]
then
# Save working directory
export WD=$(dirname "$path")
# then make subcall
libexec/subcall run
else
# Get language
lang=$(libexec/get-lang)
# else exit
source libexec/err/no-program
fi
| true
|
ed9950bae111b221bc973ffd3b9193da76ce99f4
|
Shell
|
Vman45/nautilus-script
|
/scripts/unarchive & convmv to utf8
|
UTF-8
| 246
| 2.953125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Unarchive selected archives and convmv the entries from GBK to UTF8
# Usage: unarchive path
dir_script="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
source "$dir_script/func-unarchive-and-convmv.sh"
unarchive_and_convmv "$@"
| true
|
502975580d228556e685132ee0c290f47b2f7628
|
Shell
|
htcondor/htcondor
|
/src/condor_scripts/common-cloud-attributes-aws.sh
|
UTF-8
| 745
| 2.953125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
TOKEN=`curl -X PUT "http://169.254.169.254/latest/api/token" -v -H "X-aws-ec2-metadata-token-ttl-seconds: 21600"`
while read common specific; do
value=`curl -H "X-aws-ec2-metadata-token: $TOKEN" http://169.254.169.254/latest/meta-data/${specific}`
echo ${common}=\"${value}\"
done << EOF
Image ami-id
VMType instance-type
Zone placement/availability-zone
Region placement/region
InstanceID instance-id
EOF
echo 'Provider="AWS"'
echo 'Platform="EC2"'
interruptible="False"
ilc=`curl -H "X-aws-ec2-metadata-token: $TOKEN" -v http://169.254.169.254/latest/meta-data/instance-life-cycle`
if [[ $ilc == "spot" ]]; then
interruptible="True"
fi
echo "Interruptible=${interruptible}"
echo "- update:true"
| true
|
21fdad51160e34c19ecc61c535176a0c0d7fd959
|
Shell
|
chunkhang/dotfiles
|
/scripts/yabai/desktop/label
|
UTF-8
| 547
| 3.40625
| 3
|
[
"Unlicense"
] |
permissive
|
#!/bin/sh
current_space=$(yabai -m query --spaces | jq '.[] | select(.focused == 1)')
current_index=$(echo "$current_space" | jq '.index')
current_label=$(echo "$current_space" | jq --raw-output '.label')
osascript_output=$(
osascript <<EOF
display dialog "Enter a new name for Space ${current_index}:" \
with title "yabai" default answer "${current_label}"
EOF
)
if [ "$?" == 0 ]; then
new_label=$(
echo "$osascript_output" | \
grep --only-matching "text returned:.*" | \
cut -d : -f 2
)
yabai -m space --label "${new_label}"
fi
| true
|
3c7553cd861509f2f44b2020a4816e61691c5007
|
Shell
|
maxrd2/arch-repo
|
/packages/mingw-libs/mingw-w64-dav1d/PKGBUILD
|
UTF-8
| 1,805
| 2.796875
| 3
|
[] |
no_license
|
# Maintainer: Karl-Felix Glatzer <karl[dot]glatzer[at]gmx[dot]de>
# Contributor: Daniel Bermond < gmail-com: danielbermond >
pkgname=mingw-w64-dav1d
pkgver=0.7.0
pkgrel=1
pkgdesc='AV1 cross-platform decoder focused on speed and correctness (mingw-w64)'
arch=('i686' 'x86_64')
url='https://code.videolan.org/videolan/dav1d/'
license=('BSD')
depends=('mingw-w64-crt' 'mingw-w64-vulkan-icd-loader')
options=('!strip' '!buildflags' '!libtool' 'staticlibs')
makedepends=('mingw-w64-gcc' 'mingw-w64-meson' 'mingw-w64-wine' 'git' 'nasm' 'doxygen')
source=(https://downloads.videolan.org/pub/videolan/dav1d/${pkgver}/dav1d-${pkgver}.tar.xz{,.asc})
sha512sums=('a2c6981ba5ad5ffe8382e32d6afb943fdd3ee2801751e4d84718171ccaeeef28ae4822b2ae282b301f4806351526cd83aa72b2535fa3a99ec534996458508521'
'SKIP')
b2sums=('c601c8b03d712291e3e3357cac87fb7d4a2352503966f949bc023967a9fbeec6199477fa18381ac3eea7991de1df93e0c0ce95469d6400c0d1acbb8dc2b457a9'
'SKIP')
_architectures="i686-w64-mingw32 x86_64-w64-mingw32"
validpgpkeys=('65F7C6B4206BD057A7EB73787180713BE58D1ADC') # VideoLAN Release Signing Key
build () {
export NEED_WINE=1
export WINEDEBUG=-all
for _arch in ${_architectures}; do
mkdir -p "${srcdir}/dav1d-${pkgver}/build-${_arch}" && cd "${srcdir}/dav1d-${pkgver}/build-${_arch}"
${_arch}-meson .. --default-library both
ninja
done
}
check() {
for _arch in ${_architectures}; do
cd "${srcdir}/dav1d-${pkgver}/build-${_arch}"
export WINEDEBUG=-all
meson test
done
}
package () {
for _arch in ${_architectures}; do
cd "${srcdir}/dav1d-${pkgver}/build-${_arch}"
DESTDIR="$pkgdir" ninja install
done
cd "${srcdir}/dav1d-${pkgver}"
install -D -m644 COPYING "${pkgdir}/usr/share/licenses/${pkgname}/LICENSE"
install -D -m644 doc/PATENTS -t "${pkgdir}/usr/share/licenses/${pkgname}"
}
| true
|
090f9cfa7a81175cbc15e5f576a7e0eea92e40d3
|
Shell
|
wdicarlo/wdc-do-commands
|
/fs/do-fs-change-files-extension
|
UTF-8
| 298
| 3.328125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
if [ ! $# == 2 ]; then
cmd=`basename $0`
echo "Usage: $cmd <curr-ext> <new-ext>"
exit
fi
find . -maxdepth 1 -name "*.$1" | sed -e "s/\.$1//" | xargs -I@ basename @
find . -maxdepth 1 -name "*.$1" | sed -e "s/\.$1//" | xargs -I@ basename @ | xargs -I@ mv @.$1 @.$2
| true
|
ab482c5d68e864d90103edf48c13ae34af556f60
|
Shell
|
mkanenobu/mac_dotfiles
|
/home/.bashrc
|
UTF-8
| 3,994
| 3.078125
| 3
|
[] |
no_license
|
# Fig pre block. Keep at the top of this file.
[[ -f "$HOME/.fig/shell/bashrc.pre.bash" ]] && builtin source "$HOME/.fig/shell/bashrc.pre.bash"
# .bashrc: executed by bash(1) for non-login shells.
# vi: set tabstop=2 softtabstop=2 shiftwidth=2 :
# If not running interactively, don't do anything
case $- in
*i*) ;;
*) return;;
esac
# use GNU commands
export PATH="/usr/local/opt/coreutils/libexec/gnubin:$PATH"
export MANPATH="/usr/local/opt/coreutils/libexec/gnuman:$MANPATH"
# for setting history length see HISTSIZE and HISTFILESIZE in bash(1)
HISTSIZE=20000
HISTFILESIZE=20000
HISTCONTROL=ignoredups
# check the window size after each command and, if necessary,
# update the values of LINES and COLUMNS.
#shopt -u checkwinsize
# If set, the pattern "**" used in a pathname expansion context will
# match all files and zero or more directories and subdirectories.
#shopt -s globstar
# make less more friendly for non-text input files, see lesspipe(1)
[ -x /usr/bin/lesspipe ] && eval "$(SHELL=/bin/sh lesspipe)"
PS1="$(prompt.rs)"
PS2='>'
function _exists() {
type "$1" 1>/dev/null 2>/dev/null
return $?
}
if _exists "exa"; then
alias ls='exa -I=".DS_Store"'
alias la='ls -a'
alias ll='ls -lha --git'
alias l='ls -lha --git'
else
alias la='ls -a'
alias ll='ls -lha'
alias l='ls -lha'
fi
# colored GCC warnings and errors
#export GCC_COLORS='error=01;31:warning=01;35:note=01;36:caret=01;32:locus=01:quote=01'
# tmux share history
function share_history {
history -a
tac ~/.bash_history | awk '!a[$0]++' | tac >| ~/.bash_history.tmp
[ -f ~/.bash_history.tmp ] &&
mv -f ~/.bash_history{.tmp,} &&
history -c && history -r
}
PROMPT_COMMAND='share_history'
shopt -u histappend
# Alias definitions.
# You may want to put all your additions into a separate file like
# ~/.bash_aliases, instead of adding them here directly.
# See /usr/share/doc/bash-doc/examples in the bash-doc package.
if [ -f ~/.bash_aliases ]; then
. ~/.bash_aliases
fi
## User aliases
if _exists "nvim"; then
alias vi='nvim'
alias vim='nvim'
alias n='nvim'
alias ivm='nvim'
fi
alias br='vim ~/.bashrc'
alias bp='vim ~/.bash_profile'
alias sbp='source ~/.bash_profile'
alias sbr='source ~/.bashrc'
alias mkdir='mkdir -p'
alias nvr='vim ~/.config/nvim/'
if _exists "icdiff"; then
alias diff='icdiff -U 1 --line-number'
fi
if _exists "bat"; then
alias cat='bat'
fi
alias fd='fd -H'
alias rg='rg --no-ignore'
alias lame='lame -b 192'
alias vimdiff='nvim -d'
alias tree='tree -N'
# Shell utils
alias duc='du -d 1 -h | sort -h'
alias wcj='wc -m'
alias mv='mv -i'
alias cp='cp -i'
alias fzf='fzf --reverse'
alias cl='clear'
alias gitignore_init='gibo dump'
# Git
alias ga='git add'
alias gitpull='git pull'
alias gitpush='git push'
alias gcm='git commit -m'
alias gcd='git commit -m "$(date)"'
alias gca='git commit --amend'
alias gl='git log'
alias gs='git status'
alias gd='git diff'
alias gdt='git difftool --extcmd="icdiff --line-numbers --unified=3" --no-prompt | less'
alias gcl='git clone'
alias gg='git log --graph --pretty=oneline'
alias gc='git checkout'
alias gb='git branch'
alias gbrm='git brand --delete'
alias gf='git fetch'
alias gpr='git pull --rebase'
alias gfp='git fetch && git pull'
alias gst='git stash'
alias gsp='git stash pop'
alias uuidgen='uuidgen | tr "[:upper:]" "[:lower:]"'
alias ghpr='gh pr checkout'
# typo
alias al=':'
alias a=':'
alias .s='ls'
alias claer="clear"
alias im='vim'
shopt -s autocd
set -o emacs
# リダイレクトによる上書き禁止
# >|を用いればリダイレクトできる
set -C noclobber
[ -f ~/.env ] && source ~/.env
[ -f ~/.fzf.bash ] && source ~/.fzf.bash
[ -f "$HOME/.cargo/env" ] && source "$HOME/.cargo/env"
# stop ctrl-s panic
stty stop undef
# pnpm
export PNPM_HOME="${HOME}/Library/pnpm"
export PATH="$PNPM_HOME:$PATH"
# pnpm end
# Fig post block. Keep at the bottom of this file.
[[ -f "$HOME/.fig/shell/bashrc.post.bash" ]] && builtin source "$HOME/.fig/shell/bashrc.post.bash"
| true
|
ac0a51b2e824814e58deb89197d13c8326fc65cb
|
Shell
|
bu-ist/bu-traffic-splitter
|
/tests/scripts/case1.sh
|
UTF-8
| 533
| 3.09375
| 3
|
[] |
no_license
|
#!/bin/bash
# Container must work fine when DEFAULT_URL, ALTERNATE_URL and ALTERNATE_MASK are set
yml="docker-compose.case1.yml"
function up() {
docker-compose -f $yml up -d --build
}
function down() {
docker-compose -f $yml down
}
up
file1=`curl -sS localhost:3000/file1.txt`
if [[ $file1 != "backend1_file1" ]]; then
echo "Wrong output: $file1"
down
exit 1
fi
file2=`curl -sS localhost:3000/file2.txt`
if [[ $file2 != "backend2_file2" ]]; then
echo "Wrong output: $file2"
down
exit 1
fi
down
| true
|
d7591c5b9c9615ff33720258ce63fe1bdb6c74a8
|
Shell
|
dgmccain/COP2341_Valencia
|
/My Scripts/Project #11 using grep within a directory to search many files/grepOutsideDesiredDirectory.sh
|
UTF-8
| 919
| 3.4375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
#******************
#* Project #11 *
#* David McCain *
#******************
grep -Ri 'coffee' /home/david/Desktop/2015_ACARS
#The '*' should be where the directory path is entered, or else the current directory will be used by default.
#This means that the best way to utilize this script is to open terminal and go to the directory where the
#desired files are located where you should then enter this command manually into the terminal itself. Otherwise,
#this script will be included when the 'coffee' grep command is run.
#------------------------------------------------------------------------------------------------------------
#The -i option disregards case sensitivity for 'coffee' and -r searches the current directory for
#the string. The file location and lines found will automatically be printed. the -n can be also
#be added to print out the line numbers where 'coffee' was found...
| true
|
d9f91030acbf05f672616ef3ab53d73144bc6616
|
Shell
|
ggfeng/micro_os
|
/yodalite/build.sh
|
UTF-8
| 3,673
| 3.71875
| 4
|
[] |
no_license
|
#!/bin/bash
set -o errexit
usage() {
cat <<USAGE
Usage:
bash $0 -p <PRODUCT> -f <IMG_FOLDER> -n <IMG_NAME> [OPTIONS]
eg: ./build.sh -p esp32_lyrat -n esp32_lyrat -f esp32_lyrat -r
Description:
Builds Openwrt for given PRODUCT
OPTIONS:
-d, --debug
Enable debugging - change Debug/release image
-c, --clean
Make distclean
-r, --remove
Make clean
-h, --help
Display this help message
-p, --product
The product name (openwrt/configs/<PRODUCT>_defconfig, eg: leo_k18_universal_node)
-f, --folder
Image folder (eg:leo-k18-universal)
-n, --name
Image name (eg:openwrt-leo-k18-universal)
-j, --jobs
Specifies the number of jobs to run simultaneously (Default: 8)
USAGE
}
# Set defaults
CLEAN_OUTPUT_PRODUCT_DIR=
REMOVE_OUTPUT_PRODUCT_DIR=
PACKAGES_TO_CLEAN=
PRODUCT=
RELEASE_VERSION=
ROOT_DIR=$(pwd)
DEBUG="false"
# Setup getopt.
long_opts="debug,clean,folder:,help,jobs:,product:,name:,module:,solid_filesystem:"
getopt_cmd=$(getopt -o dcrf:hj:p:n:m:s: --long "$long_opts" \
-n $(basename $0) -- "$@") || \
{ echo -e "\nERROR: Getopt failed. Extra args\n"; usage; exit 1;}
eval set -- "$getopt_cmd"
while true; do
case "$1" in
-d|--debug) DEBUG="true";;
-c|--clean) CLEAN_OUTPUT_PRODUCT_DIR="true";;
-m|--module) PACKAGES_TO_CLEAN=$(echo $2 | tr "," "\n");;
-r|--remove) REMOVE_OUTPUT_PRODUCT_DIR="true";;
-f|--folder) IMG_FOLDER="$2";;
-h|--help) usage; exit 0;;
-j|--jobs) JOBS="$2"; shift;;
-p|--product) PRODUCT="$2"; shift;;
-n|--name) IMG_NAME="$2"; shift;;
-s|--solid_filesystem) BUILD_ROOT_FILESYSTEM="$2"; shift;;
--) shift; break;;
esac
shift
done
#============================================================
# prepare build enviroment
#===========================================================
if [ -z $PRODUCT ];then
usage
exit 1
else
echo "PRODUCT=${PRODUCT}"
fi
#============================================================
# default build all modules, make all image and save stuff
#============================================================
SCRIPTS_DIR=${ROOT_DIR}/products/common/modules
echo "${JOBS}"
if [[ ${PRODUCT} =~ esp32 ]]; then
echo "source ${SCRIPTS_DIR}/esp32.sh"
source ${SCRIPTS_DIR}/esp32.sh ${PRODUCT} ${DEBUG} ${IMG_NAME} ${IMG_FOLDER} ${JOBS}
elif [[ ${PRODUCT} =~ stm32 ]]; then
echo "source ${SCRIPTS_DIR}/stm32.sh"
source ${SCRIPTS_DIR}/stm32.sh ${PRODUCT} ${DEBUG} ${IMG_NAME} ${IMG_FOLDER} ${JOBS}
elif [[ ${PRODUCT} =~ kenobi ]]; then
echo "source ${SCRIPTS_DIR}/kenobi.sh"
source ${SCRIPTS_DIR}/kenobi.sh ${PRODUCT} ${DEBUG} ${IMG_NAME} ${IMG_FOLDER} ${JOBS}
elif [[ ${PRODUCT} =~ luke ]]; then
echo "source ${SCRIPTS_DIR}/luke.sh"
source ${SCRIPTS_DIR}/luke.sh ${PRODUCT} ${DEBUG} ${IMG_NAME} ${IMG_FOLDER} ${JOBS}
elif [[ ${PRODUCT} =~ xr871 ]]; then
echo "source ${SCRIPTS_DIR}/xr871.sh"
source ${SCRIPTS_DIR}/xr871.sh ${PRODUCT} ${DEBUG} ${IMG_NAME} ${IMG_FOLDER} ${JOBS}
elif [[ ${PRODUCT} =~ xr872 ]]; then
echo "source ${SCRIPTS_DIR}/xr872.sh"
source ${SCRIPTS_DIR}/xr872.sh ${PRODUCT} ${DEBUG} ${IMG_NAME} ${IMG_FOLDER} ${JOBS}
else
echo "${PRODUCT} is current unsupported"
fi
if [ "${CLEAN_OUTPUT_PRODUCT_DIR}" = "true" ]; then
clean_output_product_dir
fi
enviroment ${PRODUCT}
if [ "${REMOVE_OUTPUT_PRODUCT_DIR}" = "true" ]; then
remove_output_product_dir
fi
build_fullimage
build_otaimage
build_ftpfiles
if [[ ${PRODUCT} =~ esp32 || ${PRODUCT} =~ kenobi || ${PRODUCT} =~ xr871 || ${PRODUCT} =~ xr872 ]]; then
rokid_package
fi
check_exit
| true
|
5f3abf46b78cf6c3ca616f8292c6f1eddeac5059
|
Shell
|
Desenvolvimento-DGI/Scripts
|
/envisat/bin/contabiliza_dados_Satelite.sh
|
UTF-8
| 1,623
| 3.515625
| 4
|
[] |
no_license
|
#!/bin/bash -x
# Script conta o numero de arquivos dentro do L0 para cada satelite #
# Autor: Jose Neto
# Data: 10/2014
# Diretorios #
dir_exe='/home/cdsr/bin'
# Diretorio L0 #
dir_L0='/L0_'
total=0
valor=0
ano_teste='vazio'
# verifica o parametro de entrada - satelite #
if [ $1 ]
then
satelite=$1
# verifica qual foi o satelite selecionado #
case "$satelite" in
NOAA12)
sensor='HRPT';
pesquisa='NOAA'
;;
NOAA14)
sensor='HRPT'
pesquisa='NOAA'
;;
NOAA15)
sensor='HRPT'
pesquisa='NOAA'
;;
NOAA16)
sensor='HRPT'
pesquisa='NOAA'
;;
NOAA17)
sensor='HRPT'
pesquisa='NOAA'
;;
NOAA18)
sensor='HRPT'
pesquisa='NOAA'
;;
NOAA19)
sensor='HRPT'
pesquisa='NOAA'
;;
AQUA)
sensor='MODIS'
pesquisa='AQUA'
;;
TERRA)
sensor='MODIS'
pesquisa='TERRA'
;;
NPP)
sensor='VIIRS'
pesquisa='NPP'
;;
esac
# diretorio L0
dir_L0=$dir_L0''$satelite
# lista dos anos com os dados #
cd $dir_L0
ls > $dir_exe'/listaSat_ano-mes'
echo 'fim' >> $dir_exe'/listaSat_ano-mes'
# lendo a lista #
while read ano_mes
do
ano=`echo $ano_mes | cut -d "_" -f1`
if [ $ano != $ano_teste ]
then
variavel_ano=$mes
echo 'Total' $ano_teste ':' $total >> total$satelite
total=0
valor=0
fi
ano_teste=$ano
dir_L0_2=$dir_L0'/'$ano_mes'/'$sensor'/'
# verifica se existe o diretorio #
#if [ -e $dir_L0_2 ]
#then
if [ $ano == 'fim' ]
then
exit
else
cd $dir_L0_2
valor=`ls $pesquisa* | wc -l`
cd $dir_exe
total=$(($valor+$total))
fi
#fi
done < $dir_exe'/listaSat_ano-mes'
else
exit
fi
exit
| true
|
529b2703d0ecf805e0cce24af3f83bd9169637ff
|
Shell
|
mtalexan/modular-scripts
|
/newmsdk.sh
|
UTF-8
| 5,366
| 4.1875
| 4
|
[] |
no_license
|
#!/bin/bash
MSDK_BASEDIR=${HOME}/msdk_code_repos
REPO_SSH_URL=git@scm-02.karlstorz.com:VPD-SW/Dev/modularSDK.git
REPO_HTTPS_URL=https://scm-02.karlstorz.com/VPD-SW/Dev/modularSDK.git
REPO_URL=${REPO_SSH_URL}
CMagentaForeground="\\033[35m"
CBold="\\033[1m"
CNone="\\033[0m"
print_usage()
{
echo "USAGE"
echo " $0 [-b branch-name1 |--branch=branch-name1] [-i branch-name2 | --inherit branch-name2] name-of-clone [name-of-clone2 ...]"
echo ""
echo " Uses git-fast-clone to create a clone of the modularSDK using the normal git clone syntax and automatically runs"
echo " git externals on it. Allows specifying the branch name to check out on the modularSDK, and an additional"
echo " optional branch name to use when running git externals."
echo " Clones are created in ${MSDK_BASEDIR}"
echo " Clones occur using SSH syntax, so SSH key must be properly configured."
echo ""
echo "ARGUMENTS"
echo " -b branch-name1"
echo " --branch=branch-name1"
echo " The name of the branch to checkout on the repo-url. Fails if the branch listed doesn't exist."
echo " Optional argument, will use the default branch the repo specifies if not included."
echo " Note that if the -i or --inherit option is used and this option is not, it will try to check out"
echo " the branch listed for that option, but will not fail if it doesn't exist."
echo " If this is specified when -i or --inherit are not, this will also be used as the inherit option"
echo " when checking out the sub-repos with git-externals."
echo " Repeats of this option overwrite previous instances working left to right."
echo ""
echo " -i branch-name2"
echo " --inherit branch-name2"
echo " The name of the branch to try checking out for all sub-repos when running git-externals."
echo " If a sub-repo doesn't have this branch name available, it will not produce any error."
echo " If this is specified but -b or --branch is not, then this branch name will also be used"
echo " when checking out the top level repo, but unlike --branch it won't fail if a matching branch"
echo " wasn't found."
echo " If -b or --branch is specified when this is not, the default for this option is the branch"
echo " name specified to -b or --branch."
echo " Repeats of this option overwrite previous instances working left to right."
echo " WARNING: The inherit logic relies on properly configured .gitexternals files in all the repos"
echo " that list \${INHERIT} as the first branch option."
echo " --ssh"
echo " --https"
echo " Perform the clone using the indicated syntax. The default is SSH."
echo ""
echo " name-of-clone"
echo " Name to create the clone under. Standard syntax for git-fast-clone on this argument applies."
echo " Note that this can be a path to a folder as long as all intermediate paths already exist."
echo " Can be repeated for additional names of clones to also create."
echo ""
}
OPTS=$(getopt -n "$(basename $0)" --options hb:i: --longoptions help,ssh,https,branch:,inherit: -- "$@")
if [ $? -ne 0 ]
then
echo "ERROR: Unable to parse arguments"
print_usage
exit 1
fi
eval set -- "$OPTS"
PASSTHRU_ARGS=()
NAMES=()
FREE_ARGS=false
# we don't allow empty double quote arguments so don't worry about that corner case here
while [ -n "$1" ]
do
case $1 in
-h | --help )
print_usage
exit 0
;;
-b | -i )
PASSTHRU_ARGS+=("$1 $2")
shift 2
;;
--branch | --inherit )
PASSTHRU_ARGS+=("$1=$2")
shift 2
;;
--ssh )
REPO_URL=${REPO_SSH_URL}
shift
;;
--https )
REPO_URL=${REPO_HTTPS_URL}
shift
;;
-- )
FREE_ARGS=true
shift
;;
* )
if ${FREE_ARGS}
then
NAMES+=("$1")
shift
else
echo "ERROR: Unrecognized argument \"$1\""
print_usage
exit 1
fi
;;
esac
done
if [ ${#NAMES[@]} -eq 0 ]
then
echo "ERROR: Required argument missing"
print_usage
exit 1
fi
# try to create it if we don't have one yet
mkdir -p ${MSDK_BASEDIR}
if ! cd ${MSDK_BASEDIR}
then
echo "ERROR: Root directory for all clones not found: ${MSDK_BASEDIR}"
exit 1
fi
CLONE_TO_CLEANUP_ON_FAILURE=
cleanup_failed_parent_clone()
{
if [ -n "${CLONE_TO_CLEANUP_ON_FAILURE}" ] && [ -d ${CLONE_TO_CLEANUP_ON_FAILURE} ]
then
echo "Cleaning up failed clone: ${CLONE_TO_CLEANUP_ON_FAILURE} ..."
rm -rf ${CLONE_TO_CLEANUP_ON_FAILURE}
fi
}
trap "cleanup_failed_parent_clone" EXIT
for N in ${NAMES[@]}
do
echo -e "${CMagentaForeground}${CBold}Creating new clone in ${MSDK_BASEDIR}/${N}${CNone}"
# Set this so any failure will clean up after it
CLONE_TO_CLEANUP_ON_FAILURE=${MSDK_BASEDIR}/${N}
if ! newclone ${PASSTHRU_ARGS[@]} ${REPO_URL} ${N}
then
echo "ERROR: Could not create ${N}"
exit 1
else
# we successfully cloned it, we don't need to do any further cleanup on it
CLONE_TO_CLEANUP_ON_FAILURE=
fi
done
| true
|
7544bb499c4f486ce1f6014538517bb770fc4da2
|
Shell
|
arendina/linuxfromscratch
|
/scripts/6.49.Libffi.sh
|
UTF-8
| 447
| 2.671875
| 3
|
[] |
no_license
|
#!/bin/bash
tarball='libffi-3.2.1.tar.gz'
source_dir='libffi-3.2.1'
cd /sources
tar -xvf $tarball
cd $source_dir
sed -e '/^includesdir/ s/$(libdir).*$/$(includedir)/' \
-i include/Makefile.in
sed -e '/^includedir/ s/=.*$/=@includedir@/' \
-e 's/^Cflags: -I${includedir}/Cflags:/' \
-i libffi.pc.in
./configure --prefix=/usr --disable-static --with-gcc-arch=native
make
# make check
make install
cd /sources
rm -rf $source_dir
| true
|
92f95ff7e94bf7099e8c17a7ec2c14391fdfe99c
|
Shell
|
robotspace/arminix
|
/drivers/memory/ramdisk/rc
|
UTF-8
| 1,279
| 3.03125
| 3
|
[] |
no_license
|
#!/bin/sh
set -e
/bin/service up /bin/pci -config /etc/drivers.conf
/bin/service -c up /bin/floppy -config /etc/drivers.conf -dev /dev/fd0
if [ X`/bin/sysenv bios_wini` = Xyes ]
then
echo Using bios_wini.
/bin/service -c up /bin/bios_wini -dev /dev/c0d0
else
/bin/service -c up /bin/at_wini -dev /dev/c0d0 -config /etc/drivers.conf -label at_wini_0
/bin/service -c up /bin/at_wini -dev /dev/c1d0 -config /etc/drivers.conf -label at_wini_1 -args ata_instance=1
fi
rootdev=`sysenv rootdev` || echo 'No rootdev?'
rootdevname=`/bin/dev2name "$rootdev"` ||
{ echo 'No device name for root device'; exit 1; }
if sysenv cdproberoot >/dev/null
then
echo
echo 'Looking for boot CD. This may take a minute.'
echo 'Please ignore any error messages.'
echo
cddev=`cdprobe` || { echo 'No CD found'; exit 1; }
export cddev
echo "Loading ramdisk from ${cddev}p1"
loadramdisk "$cddev"p1
elif [ "$rootdevname" = "/dev/ram" ]
then
ramimagedev=`sysenv ramimagedev` ||
{ echo 'ramimagedev not found'; exit 1; }
ramimagename=`/bin/dev2name "$ramimagedev"` ||
{ echo 'No device name for ramimagedev'; exit 1; }
echo "Loading ramdisk from $ramimagename"
loadramdisk "$ramimagename"
fi
echo "Root device name is $rootdevname"
/bin/newroot "$rootdevname"
exec /bin/sh /etc/rc "$@"
| true
|
ab0bf7c909dcacde159a052c4561ad8e41895d93
|
Shell
|
libKriging/libKriging
|
/.travis-ci/release/python-release.sh
|
UTF-8
| 1,376
| 3.46875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
set -eo pipefail
if [[ "$DEBUG_CI" == "true" ]]; then
set -x
fi
# windows environment requires to load special tools
loadenv_sh=".travis-ci/${BUILD_NAME}/loadenv.sh"
if [ -e "$loadenv_sh" ]; then
. "$loadenv_sh"
fi
ARCH=$(uname -s)
echo "Ready to release from $ARCH"
if [ -z "${GIT_TAG}" ]; then
echo "No valid version tag found"
exit 1
fi
echo "Release tag '${GIT_TAG}' in branch '$(git branch --show-current)'"
case $ARCH in
Linux)
docker run --rm \
-e ROOT_DIR=/data \
-e DEBUG_CI="${DEBUG_CI}" \
-w /data \
-v `pwd`:/data \
quay.io/pypa/manylinux2014_x86_64 /data/bindings/Python/tools/build_wheels.sh
;;
Darwin|MSYS_NT*|MINGW64_NT*)
python3 ./bindings/Python/setup.py bdist_wheel
pip install pylibkriging --no-index -f ./dist
pytest ./bindings/Python/tests/canary_test.py
pytest ./bindings/Python/tests/loading_test.py
pytest ./bindings/Python/tests/random_generator_test.py
pytest ./bindings/Python/tests/direct_binding_test.py
pytest ./bindings/Python/tests/one_side_carma_binding_test.py
pytest ./bindings/Python/tests/two_side_carma_binding_test.py
pytest ./bindings/Python/tests/PyLinearRegression_test.py
pytest ./bindings/Python/tests/PyKriging_parametric_test.py
;;
*)
echo "Unknown OS [$ARCH]"
exit 1
;;
esac
| true
|
c15fda5001eecc725b591722e6236c0a1d3f2f0c
|
Shell
|
frostalf/empirecraft
|
/compile.sh
|
UTF-8
| 950
| 3.65625
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# get base dir regardless of execution location
SOURCE="${BASH_SOURCE[0]}"
while [ -h "$SOURCE" ]; do # resolve $SOURCE until the file is no longer a symlink
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
SOURCE="$(readlink "$SOURCE")"
[[ "$SOURCE" != /* ]] && SOURCE="$DIR/$SOURCE" # if $SOURCE was a relative symlink, we need to resolve it relative to the path where the symlink file was located
done
SOURCE=$([[ "$SOURCE" = /* ]] && echo "$SOURCE" || echo "$PWD/${SOURCE#./}")
basedir=$(dirname "$SOURCE")
. scripts/init.sh
if [ ! -d "${FORK_NAME}-API" ]; then
git clone $API_REPO ${FORK_NAME}-API
fi
if [ ! -d "${FORK_NAME}-Server" ]; then
git clone $SERVER_REPO ${FORK_NAME}-Server
fi
cd ${FORK_NAME}-API
git fetch origin
git reset --hard origin/master
cd ..
cd ${FORK_NAME}-Server
git fetch origin
git reset --hard origin/master
cd ..
if [ "$1" != "--nocompile" ]; then
mvn clean install
fi
| true
|
be82ffb9466629d356a187aa3e7d99382d8ef527
|
Shell
|
sherylynn/sh
|
/win-git/jump.sh
|
UTF-8
| 850
| 3.484375
| 3
|
[] |
no_license
|
#!/bin/bash
INSTALL_PATH=$HOME/tools
BASH_DIR=$INSTALL_PATH/rc
TOOLSRC_NAME=jumprc
TOOLSRC=$BASH_DIR/${TOOLSRC_NAME}
if [[ "$(uname)" == *MINGW* ]]; then
BASH_FILE=~/.bash_profile
PLATFORM=win
elif [[ "$(uname)" == *Linux* ]]; then
BASH_FILE=~/.bashrc
PLATFORM=Linux
elif [[ "$(uname)" == *Darwin* ]]; then
BASH_FILE=~/.bash_profile
PLATFORM=MacOS
fi
#--------------------------
# Install SOFT_HOME
#--------------------------
if ! command -v jump ;then
if ! command -v go ;then
echo need Golang && exit
else
go get -u github.com/gsamokovarov/jump
jump import z
fi
fi
#--------------------------
if [ ! -d "${BASH_DIR}" ]; then
mkdir $BASH_DIR
fi
if [[ "$(cat ${BASH_FILE})" != *${TOOLSRC_NAME}* ]]; then
echo "test -f ${TOOLSRC} && . ${TOOLSRC}" >> ${BASH_FILE}
fi
echo 'eval "$(jump shell --bind=z)"' > $TOOLSRC
| true
|
057c70d5cadee540ae8e3e18625723ef2d3abb83
|
Shell
|
back9ins/buildpack-loader
|
/bin/compile
|
UTF-8
| 743
| 3.453125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
set -e
BUILD_DIR=${1:-}
CACHE_DIR=${2:-}
ENV_DIR=${3:-}
##
# Load the SSH key from the environment.
if [ ! -d ~/.ssh ]; then
mkdir -p ~/.ssh
chmod 700 ~/.ssh
fi
base64 --decode $ENV_DIR/BUILDPACK_SSH_KEY > ~/.ssh/buildpack_key
chmod 400 ~/.ssh/buildpack_key
echo -e "Host github.com\n"\
" IdentityFile ~/.ssh/buildpack_key\n"\
" IdentitiesOnly yes\n"\
" UserKnownHostsFile=/dev/null\n"\
" StrictHostKeyChecking no"\
>> ~/.ssh/config
##
# Clone the repo and execute.
tmpdir=$(mktemp --directory private-buildpack-XXXXXXXX)
git clone git@github.com:back9ins/buildpack "${tmpdir}"
cd "${tmpdir}"
bash ./bin/compile "${BUILD_DIR}" "${CACHE_DIR}" "${ENV_DIR}"
rm -rf "${tmpdir}"
| true
|
557f67733e04c591b5cc6c377ea92bab8c8743a3
|
Shell
|
GBruceB/nexsciTAP-1
|
/make_manylinux.sh
|
UTF-8
| 1,948
| 3.140625
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
# /bin/sh
# This script is specific to building the 'manylinux' wheels
# on a Centos06 platform (in our case under Docker). Things
# like python2/python3 and auditwheel are already installed
# in the Docker image we ae using.
#
# In the Docker container, Python 2.7 is installed in
#
# /opt/python/cp27-cp27m/bin
#
# Python 3.6, 3.7 and 3.8 are installed in
#
# /opt/python/cp36-cp36m/bin
# /opt/python/cp37-cp37m/bin
# /opt/python/cp38-cp38/bin
#
# All have 'python' and 'pip' but only 3.7 has 'auditwheel'.
# To avoid confusion, we'll use full paths.
# ------------------------------------------------------------
# IMPORTANT: This only works in you are in the right Docker
# container so first start Docker (using startDocker.sh) then
# you will have to navigate back to this directory from inside
# the Docker container ("cd /build").
# ------------------------------------------------------------
rm -rf final_dist
rm -rf final_wheel
mkdir final_dist
mkdir final_wheel
# Python 3.6
/opt/python/cp36-cp36m/bin/pip install Cython
/opt/python/cp36-cp36m/bin/pip install jinja2
rm -rf wheelhouse dist
/opt/python/cp36-cp36m/bin/python setup.py build bdist_wheel
/opt/python/cp37-cp37m/bin/auditwheel repair dist/*.whl
cp wheelhouse/* final_wheel
cp dist/* final_dist
# Python 3.7
/opt/python/cp37-cp37m/bin/pip install Cython
/opt/python/cp37-cp37m/bin/pip install jinja2
rm -rf wheelhouse dist
/opt/python/cp37-cp37m/bin/python setup.py build bdist_wheel
/opt/python/cp37-cp37m/bin/auditwheel repair dist/*.whl
cp wheelhouse/* final_wheel
cp dist/* final_dist
# Python 3.8
/opt/python/cp38-cp38/bin/pip install Cython
/opt/python/cp38-cp38/bin/pip install jinja2
rm -rf wheelhouse dist
/opt/python/cp38-cp38/bin/python setup.py build bdist_wheel
/opt/python/cp37-cp37m/bin/auditwheel repair dist/*.whl
cp wheelhouse/* final_wheel
cp dist/* final_dist
rm -rf wheelhouse dist build
rm -rf TAP.egg-info
| true
|
d031996301385b11bb3f9c921d89dca736b3b0ea
|
Shell
|
ddelnano/tmux-vim-exit
|
/exit.tmux
|
UTF-8
| 309
| 3.375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
SCRIPTS_DIR=$DIR/scripts
source $SCRIPTS_DIR/helpers.sh
source $SCRIPTS_DIR/exit.sh
set_exit_key_binding() {
tmux bind-key "$(exit_key_binding)" run-shell -b "$SCRIPTS_DIR/exit.sh"
}
main() {
set_exit_key_binding
}
main
| true
|
b40f28618bae5a23666bcad9546561bafd5d9917
|
Shell
|
themoses/configs
|
/bash-setup/config-arch.sh
|
UTF-8
| 2,400
| 2.8125
| 3
|
[] |
no_license
|
#!/bin/sh
# This script is part 2 of bootstrapping an Archlinux installation and will be run after the arch-chroot command.
# Part 1 can be found at install-arch.sh
# Install tools
pacman -Sy reflector --noconfirm
reflector --verbose --country 'Germany' -l 200 -p https --sort rate --save /etc/pacman.d/mirrorlist
pacman -Sy vim zsh grub git --noconfirm
# Set hostname
echo lolwut-arch > /etc/hostname
# Set language
echo LANG=en_US.UTF-8 > /etc/locale.conf
sed -i '/s/#en_US ISO-8859-1/en_US ISO-8859-1/g' /etc/locale.gen
sed -i '/s/#en_US.UTF-8/en_US.UTF-8/g' /etc/locale.gen
locale-gen
# Set keyboard binding
echo KEYMAP=de_CH-latin1 > /etc/vconsole.conf
# Set timezone
ln -sf /usr/share/zoneinfo/Europe/Berlin /etc/localtime
# Enable multilib Support
sed -i '/s/#[multilib]/[multilib]/g' /etc/pacman.conf
sed -i '/s/#SigLevel = PackageRequired TrustedOnly/SigLevel = PackageRequired TrustedOnly/g' /etc/pacman.conf
sed -i '/s/#Include = /etc/pacman.d/mirrorlist/Include = /etc/pacman.d/mirrorlist/g' /etc/pacman.conf
# Generate initramfs
mkinitcpio -p linux
# Install bootloader
#grub-install --target=x86_64-efi --efi-directory=/boot --bootloader-id=grub
grub-install /dev/sda1
grub-mkconfig -o /boot/grub/grub.cfg
# Adding user
useradd -m -s /bin/zsh -U moses
usermod -g wheel moses
# Adding sudo access
sed -i '/s/#%wheel ALL=(ALL) ALL/%wheel ALL=(ALL) ALL/g' /etc/sudoers
# Install tools
pacman -S acpid dbus avahi cups cronie networkmanager --noconfirm
systemctl enable acpid avahi-daemon cronie
# Install Xorg
pacman -S xorg-server xorg-xinit ttf-dejavu --noconfirm
# Set Keymapping for X Session
echo > Section "InputClass" /etc/X11/xorg.conf.d/20-keyboard.conf
echo >> Identifier "keyboard" /etc/X11/xorg.conf.d/20-keyboard.conf
echo >> MatchIsKeyboard "yes" /etc/X11/xorg.conf.d/20-keyboard.conf
echo >> Option "XkbLayout" "ch" /etc/X11/xorg.conf.d/20-keyboard.conf
echo >> Option "XkbModel" "pc105" /etc/X11/xorg.conf.d/20-keyboard.conf
echo >> Option "XkbVariant" "nodeadkeys" /etc/X11/xorg.conf.d/20-keyboard.conf
echo >>EndSection /etc/X11/xorg.conf.d/20-keyboard.conf
# Install Video Driver
#pacman -S nvidia --no-confirm
# Install MATE
pacman -S lightdm lightdm-gtk-greeter alsa pulseaudio-alsa mate network-manager-applet firefox --noconfirm
systemctl enable lightdm NetworkManager
# Install Steam
#pacaur -S ttf-ms-win10
| true
|
7478fca3c4dbcbb8a7c9a684f3a4520566bb7f06
|
Shell
|
edii/ecommerce
|
/init.sh
|
UTF-8
| 1,015
| 3.78125
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
ERR_MSG=""
clear
while :
do
if [ "$ERR_MSG" != "" ]; then
echo "Error: $ERR_MSG"
echo ""
fi
echo ""
echo "Select an option:"
echo "1) Clear all cache"
echo "2) update schema DB."
echo "3) remove db."
echo "4) created db."
echo "5) load db fixtures."
echo "6) Generate bower."
echo "7) Run gulp."
echo "8) Run all."
ERR_MSG=""
read SEL
case $SEL in
1) php app/console cache:clear; exit; ;;
2) php app/console doctrine:schema:update --force; exit; ;;
3) php app/console doctrine:database:drop --force; exit; ;;
4) php app/console doctrine:database:create; exit; ;;
5) php app/console doctrine:fixtures:load --append; exit; ;;
6) cd "${SRC}./web/admin/" && bower i -g; exit; ;;
7) cd "${SRC}./web/admin/" && gulp; exit; ;;
8)
source "${SRC}./updateDB.sh";
php app/console cache:clear;
cd "${SRC}./web/admin/" && gulp build;
exit;
;;
*) ERR_MSG="Please enter a valid option!"
esac
clear
done
| true
|
07029d52d3890966e36aeaa6219ed607fc483517
|
Shell
|
dfelleman811/gameon
|
/bin/init_couchdb.sh
|
UTF-8
| 2,579
| 3.890625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash -x
## This is run from inside containers that require couchdb and/or cloudant
# Environment variables look like this:
# COUCHDB_SERVICE_URL=http://couchdb:5984
# COUCHDB_HOST_AND_PORT=couchdb:5984
# COUCHDB_USER=mapUser
# COUCHDB_PASSWORD=myCouchDBSecret
# GAMEON_MODE=development
# Ensure trailing slash
COUCHDB_SERVICE_URL=${COUCHDB_SERVICE_URL%%/}/
AUTH_URL=${COUCHDB_SERVICE_URL/\/\//\/\/$COUCHDB_USER:$COUCHDB_PASSWORD@}
COUCHDB_NODE=_node/nonode@nohost/
activeUrl=${COUCHDB_SERVICE_URL}
LIMIT=0
if [ -n "$1" ]; then
LIMIT=$1
fi
check_limit() {
((count++))
if [ $LIMIT -gt 0 ]; then
if [ $count -gt ${LIMIT} ]; then
echo "Exceeded ${LIMIT} attempts"
exit 1
fi
fi
}
ensure_exists() {
local uri=$1
local url=${activeUrl}$uri
shift
count=0
local result=0
while [ $result -ne 200 ]; do
check_limit
result=$(curl -s -o /dev/null -w "%{http_code}" --fail -X GET $url)
echo "****${count}: curl -X GET $uri ==> $result "
case "$result" in
200)
continue
;;
401) #retry with Auth URL (required after admin user added)
activeUrl=${AUTH_URL}
url=${activeUrl}$uri
;;
404)
echo "-- curl $@ -X PUT $uri ==> $result"
curl -s $@ -X PUT $url
;;
409) # conflict. Wait and try again
sleep 10
;;
*)
echo "unknown error with $uri";
curl -s --fail -X GET $url
exit 1
;;
esac
done
}
assert_exists() {
local uri=$1
local url=${activeUrl}$uri
local result=$(curl -s -o /dev/null -w "%{http_code}" --fail -X GET $url)
echo "**** curl -X GET $uri ==> $result "
if [ $result -ne 200 ]; then
curl -s --fail -X GET $url
exit 1
fi
}
# RC=7 means the host isn't there yet. Let's do some re-trying until it
# does start / is ready
RC=7
count=0
while [ $RC -eq 7 ]; do
check_limit
echo "**${count}: Testing connection to ${COUCHDB_SERVICE_URL}_up"
curl -s --fail -X GET ${COUCHDB_SERVICE_URL}_up
RC=$?
if [ $RC -eq 7 ]; then
sleep 15
fi
done
if [ "${GAMEON_MODE}" == "development" ]
then
echo "Initializing Cloudant"
# LOCAL DEVELOPMENT!
# We do not want to ruin the cloudant admin party, but our code is written to expect
# that creds are required, so we should make sure the required user/password exist
# We also have to ensure (with Cloudant 2.x) that we're using the right node URL
ensure_exists _users
ensure_exists _replicator
ensure_exists ${COUCHDB_NODE}_config/admins/${COUCHDB_USER} -d \"${COUCHDB_PASSWORD}\"
fi
echo "ok!"
| true
|
2be0a311667e03f9bb5f760cd858b6ab68d779a5
|
Shell
|
eventuate-foundation/eventuate-cdc
|
/scripts/build-and-test-all-database-id-jsonschema.sh
|
UTF-8
| 4,290
| 2.765625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#! /bin/bash
export TERM=dumb
set -e
. ./scripts/set-env.sh
dockermysql="./gradlew mysqlonlyCompose"
dockermariadb="./gradlew mariadbonlyCompose"
dockerpostgres="./gradlew postgresonlyCompose"
dockermssql="./gradlew mssqlonlyCompose"
function testMysql() {
./gradlew :eventuate-local-java-cdc-connector-mysql-binlog:cleanTest eventuate-local-java-cdc-connector-mysql-binlog:test --tests io.eventuate.local.mysql.binlog.MySqlBinlogEntryReaderMessageTableTest
}
function testPolling() {
./gradlew :eventuate-local-java-cdc-connector-polling:cleanTest eventuate-local-java-cdc-connector-polling:test --tests io.eventuate.local.polling.PollingBinlogEntryReaderMessageTableTest
}
function testPostgresWal() {
./gradlew :eventuate-local-java-cdc-connector-postgres-wal:cleanTest eventuate-local-java-cdc-connector-postgres-wal:test --tests io.eventuate.local.postgres.wal.PostgresWalBinlogEntryReaderMessageTableTest
}
function testMysqlMessageColumnsReordering() {
./gradlew -P testMessageTableColumnReordering=true :eventuate-local-java-cdc-connector-mysql-binlog:cleanTest eventuate-local-java-cdc-connector-mysql-binlog:test --tests io.eventuate.local.mysql.binlog.MySqlBinlogMessageTableColumnReorderdingTest
}
function testMysqlMessageTableRecreation() {
./gradlew -P testMessageTableRecreation=true :eventuate-local-java-cdc-connector-mysql-binlog:cleanTest eventuate-local-java-cdc-connector-mysql-binlog:test --tests io.eventuate.local.mysql.binlog.MySqlBinlogMessageTableRecreationTest
}
function testPollingMessageColumnsReordering() {
./gradlew -P testMessageTableColumnReordering=true :eventuate-local-java-cdc-connector-polling:cleanTest eventuate-local-java-cdc-connector-polling:test --tests io.eventuate.local.polling.PollingMessageTableColumnReorderdingTest
}
function testPollingMessageTableRecreation() {
./gradlew -P testMessageTableRecreation=true :eventuate-local-java-cdc-connector-polling:cleanTest eventuate-local-java-cdc-connector-polling:test --tests io.eventuate.local.polling.PollingMessageTableRecreationTest
}
function testPostgresWalMessageColumnsReordering() {
./gradlew -P testMessageTableColumnReordering=true :eventuate-local-java-cdc-connector-postgres-wal:cleanTest eventuate-local-java-cdc-connector-postgres-wal:test --tests io.eventuate.local.postgres.wal.PostgresWalMessageTableColumnReorderdingTest
}
function testPostgresWalMessageTableRecreation() {
./gradlew -P testMessageTableRecreation=true :eventuate-local-java-cdc-connector-postgres-wal:cleanTest eventuate-local-java-cdc-connector-postgres-wal:test --tests io.eventuate.local.postgres.wal.PostgresWalMessageTableRecreationTest
}
function startContainer() {
./gradlew ${1}onlyComposeUp
}
function stopContainer() {
./gradlew ${1}onlyComposeDown
}
function restartContainer() {
stopContainer $1
startContainer $1
}
stopContainer mysql
stopContainer mariadb
stopContainer mssql
stopContainer postgres
echo "TESTING MESSAGE TABLE SCHEMA MIGRATION"
startContainer mysql
testMysqlMessageColumnsReordering
restartContainer mysql
testMysqlMessageTableRecreation
stopContainer mysql
startContainer mssql
export SPRING_PROFILES_ACTIVE=mssql,EventuatePolling
testPollingMessageColumnsReordering
restartContainer mssql
testPollingMessageTableRecreation
stopContainer mssql
unset SPRING_PROFILES_ACTIVE
startContainer postgres
testPostgresWalMessageColumnsReordering
restartContainer postgres
testPostgresWalMessageTableRecreation
stopContainer postgres
export USE_DB_ID=true
export USE_JSON_PAYLOAD_AND_HEADERS=true
echo "TESTING APPLICATION GENERATION ID WITH DATABASE WITH DBID"
startContainer mysql
testMysql
stopContainer mysql
startContainer mariadb
testMysql
startContainer postgres
testPostgresWal
export SPRING_PROFILES_ACTIVE=postgres,EventuatePolling
testPolling
startContainer mssql
export SPRING_PROFILES_ACTIVE=mssql,EventuatePolling
testPolling
echo "TESTING DATABASE GENERATION ID"
unset SPRING_PROFILES_ACTIVE
export EVENTUATE_OUTBOX_ID=1
testMysql
stopContainer mariadb
startContainer mysql
testMysql
testPostgresWal
export SPRING_PROFILES_ACTIVE=postgres,EventuatePolling
testPolling
export SPRING_PROFILES_ACTIVE=mssql,EventuatePolling
testPolling
stopContainer mysql
stopContainer mssql
stopContainer postgres
| true
|
aeb914fee0e49b3cccd5e497bf0b3fa056544ef0
|
Shell
|
byndcivilization/dotfiles
|
/config/functions.zsh
|
UTF-8
| 28,457
| 3.90625
| 4
|
[
"Vim"
] |
permissive
|
function zsh_stats() {
fc -l 1 | awk '{CMD[$2]++;count++;}END { for (a in CMD)print CMD[a] " " CMD[a]/count*100 "% " a;}' | grep -v "./" | column -c3 -s " " -t | sort -nr | nl | head -n20
}
function uninstall_oh_my_zsh() {
env ZSH=$ZSH sh $ZSH/tools/uninstall.sh
}
function upgrade_oh_my_zsh() {
env ZSH=$ZSH sh $ZSH/tools/upgrade.sh
}
function take() {
mkdir -p $@ && cd ${@:$#}
}
function hs () {
history | grep $*
}
alias hsi='hs -i'
function open_command() {
local open_cmd
# define the open command
case "$OSTYPE" in
darwin*) open_cmd='open' ;;
cygwin*) open_cmd='cygstart' ;;
linux*) ! [[ $(uname -a) =~ "Microsoft" ]] && open_cmd='xdg-open' || {
open_cmd='cmd.exe /c start ""'
[[ -e "$1" ]] && { 1="$(wslpath -w "${1:a}")" || return 1 }
} ;;
msys*) open_cmd='start ""' ;;
*) echo "Platform $OSTYPE not supported"
return 1
;;
esac
# don't use nohup on OSX
if [[ "$OSTYPE" == darwin* ]]; then
${=open_cmd} "$@" &>/dev/null
else
nohup ${=open_cmd} "$@" &>/dev/null
fi
}
#
# Get the value of an alias.
#
# Arguments:
# 1. alias - The alias to get its value from
# STDOUT:
# The value of alias $1 (if it has one).
# Return value:
# 0 if the alias was found,
# 1 if it does not exist
#
function alias_value() {
(( $+aliases[$1] )) && echo $aliases[$1]
}
#
# Try to get the value of an alias,
# otherwise return the input.
#
# Arguments:
# 1. alias - The alias to get its value from
# STDOUT:
# The value of alias $1, or $1 if there is no alias $1.
# Return value:
# Always 0
#
function try_alias_value() {
alias_value "$1" || echo "$1"
}
#
# Set variable "$1" to default value "$2" if "$1" is not yet defined.
#
# Arguments:
# 1. name - The variable to set
# 2. val - The default value
# Return value:
# 0 if the variable exists, 3 if it was set
#
function default() {
test `typeset +m "$1"` && return 0
typeset -g "$1"="$2" && return 3
}
#
# Set environment variable "$1" to default value "$2" if "$1" is not yet defined.
#
# Arguments:
# 1. name - The env variable to set
# 2. val - The default value
# Return value:
# 0 if the env variable exists, 3 if it was set
#
function env_default() {
env | grep -q "^$1=" && return 0
export "$1=$2" && return 3
}
# Required for $langinfo
zmodload zsh/langinfo
# URL-encode a string
#
# Encodes a string using RFC 2396 URL-encoding (%-escaped).
# See: https://www.ietf.org/rfc/rfc2396.txt
#
# By default, reserved characters and unreserved "mark" characters are
# not escaped by this function. This allows the common usage of passing
# an entire URL in, and encoding just special characters in it, with
# the expectation that reserved and mark characters are used appropriately.
# The -r and -m options turn on escaping of the reserved and mark characters,
# respectively, which allows arbitrary strings to be fully escaped for
# embedding inside URLs, where reserved characters might be misinterpreted.
#
# Prints the encoded string on stdout.
# Returns nonzero if encoding failed.
#
# Usage:
# omz_urlencode [-r] [-m] [-P] <string>
#
# -r causes reserved characters (;/?:@&=+$,) to be escaped
#
# -m causes "mark" characters (_.!~*''()-) to be escaped
#
# -P causes spaces to be encoded as '%20' instead of '+'
function omz_urlencode() {
emulate -L zsh
zparseopts -D -E -a opts r m P
local in_str=$1
local url_str=""
local spaces_as_plus
if [[ -z $opts[(r)-P] ]]; then spaces_as_plus=1; fi
local str="$in_str"
# URLs must use UTF-8 encoding; convert str to UTF-8 if required
local encoding=$langinfo[CODESET]
local safe_encodings
safe_encodings=(UTF-8 utf8 US-ASCII)
if [[ -z ${safe_encodings[(r)$encoding]} ]]; then
str=$(echo -E "$str" | iconv -f $encoding -t UTF-8)
if [[ $? != 0 ]]; then
echo "Error converting string from $encoding to UTF-8" >&2
return 1
fi
fi
# Use LC_CTYPE=C to process text byte-by-byte
local i byte ord LC_ALL=C
export LC_ALL
local reserved=';/?:@&=+$,'
local mark='_.!~*''()-'
local dont_escape="[A-Za-z0-9"
if [[ -z $opts[(r)-r] ]]; then
dont_escape+=$reserved
fi
# $mark must be last because of the "-"
if [[ -z $opts[(r)-m] ]]; then
dont_escape+=$mark
fi
dont_escape+="]"
# Implemented to use a single printf call and avoid subshells in the loop,
# for performance (primarily on Windows).
local url_str=""
for (( i = 1; i <= ${#str}; ++i )); do
byte="$str[i]"
if [[ "$byte" =~ "$dont_escape" ]]; then
url_str+="$byte"
else
if [[ "$byte" == " " && -n $spaces_as_plus ]]; then
url_str+="+"
else
ord=$(( [##16] \#byte ))
url_str+="%$ord"
fi
fi
done
echo -E "$url_str"
}
# URL-decode a string
#
# Decodes a RFC 2396 URL-encoded (%-escaped) string.
# This decodes the '+' and '%' escapes in the input string, and leaves
# other characters unchanged. Does not enforce that the input is a
# valid URL-encoded string. This is a convenience to allow callers to
# pass in a full URL or similar strings and decode them for human
# presentation.
#
# Outputs the encoded string on stdout.
# Returns nonzero if encoding failed.
#
# Usage:
# omz_urldecode <urlstring> - prints decoded string followed by a newline
function omz_urldecode {
emulate -L zsh
local encoded_url=$1
# Work bytewise, since URLs escape UTF-8 octets
local caller_encoding=$langinfo[CODESET]
local LC_ALL=C
export LC_ALL
# Change + back to ' '
local tmp=${encoded_url:gs/+/ /}
# Protect other escapes to pass through the printf unchanged
tmp=${tmp:gs/\\/\\\\/}
# Handle %-escapes by turning them into `\xXX` printf escapes
tmp=${tmp:gs/%/\\x/}
local decoded
eval "decoded=\$'$tmp'"
# Now we have a UTF-8 encoded string in the variable. We need to re-encode
# it if caller is in a non-UTF-8 locale.
local safe_encodings
safe_encodings=(UTF-8 utf8 US-ASCII)
if [[ -z ${safe_encodings[(r)$caller_encoding]} ]]; then
decoded=$(echo -E "$decoded" | iconv -f UTF-8 -t $caller_encoding)
if [[ $? != 0 ]]; then
echo "Error converting string from UTF-8 to $caller_encoding" >&2
return 1
fi
fi
echo -E "$decoded"
}
# Simple Calculator
calc() {
local result=""
# ┌─ default (when --mathlib is used) is 20
result="$( printf "scale=10;%s\n" "$*" | bc --mathlib | tr -d "\\\n" )"
# remove the tailing "\" and "\n" ─┘
# (large numbers are printed on multiple lines)
if [[ "$result" == *.* ]]; then
# Improve the output for decimal numbers
printf "%s" "$result" |
sed -e "s/^\./0./" # add "0" for cases like ".5"` \
-e "s/^-\./-0./" # add "0" for cases like "-.5"`\
-e "s/0*$//;s/\.$//" # remove tailing zeros
else
printf "%s" "$result"
fi
printf "\n"
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Change working directory to the top-most Finder window location
function cdf() { # short for `cdfinder`
cd "$(osascript -e 'tell app "Finder" to POSIX path of (insertion location as alias)')";
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Create data URI from a file
datauri() {
local mimeType=""
if [ -f "$1" ]; then
mimeType=$(file -b --mime-type "$1")
# └─ do not prepend the filename to the output
if [[ $mimeType == text/* ]]; then
mimeType="$mimeType;charset=utf-8"
fi
printf "data:%s;base64,%s" \
"$mimeType" \
"$(openssl base64 -in "$1" | tr -d "\n")"
else
printf "%s is not a file.\n" "$1"
fi
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Delete files that match a certain pattern from the current directory
delete-files() {
local q="${1:-*.DS_Store}"
find . -type f -name "$q" -ls -delete
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Get gzip information (gzipped file size + reduction size)
gz() {
declare -i gzippedSize=0
declare -i originalSize=0
if [ -f "$1" ]; then
if [ -s "$1" ]; then
originalSize=$( wc -c < "$1" )
printf "\n original size: %12s\n" "$(hrfs "$originalSize")"
gzippedSize=$( gzip -c "$1" | wc -c )
printf " gzipped size: %12s\n" "$(hrfs "$gzippedSize")"
printf " ─────────────────────────────\n"
printf " reduction: %12s [%s%%]\n\n" \
"$( hrfs $((originalSize - gzippedSize)) )" \
"$( printf "%s %s" "$originalSize $gzippedSize" | \
awk '{ printf "%.1f", 100 - $2 * 100 / $1 }' | \
sed -e "s/0*$//;s/\.$//" )"
# └─ remove tailing zeros
else
printf "\"%s\" is empty.\n" "$1"
fi
else
printf "\"%s\" is not a file.\n" "$1"
fi
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Human readable file size
# (because `du -h` doesn't cut it for me)
hrfs() {
printf "%s" "$1" |
awk '{
i = 1;
split("B KB MB GB TB PB EB ZB YB WTFB", v);
value = $1;
# confirm that the input is a number
if ( value + .0 == value ) {
while ( value >= 1024 ) {
value/=1024;
i++;
}
if ( value == int(value) ) {
printf "%d %s", value, v[i]
} else {
printf "%.1f %s", value, v[i]
}
}
}' |
sed -e ":l" \
-e "s/\([0-9]\)\([0-9]\{3\}\)/\1,\2/; t l"
# └─ add thousands separator
# (changes "1023.2 KB" to "1,023.2 KB")
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Create new directories and enter the first one
mkd() {
if [ -n "$*" ]; then
mkdir -p "$@"
# └─ make parent directories if needed
cd "$@" \
|| exit 1
fi
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Search history
qh() {
# ┌─ enable colors for pipe
# │ ("--color=auto" enables colors only if
# │ the output is in the terminal)
grep --color=always "$*" "$HISTFILE" | less -RX
# display ANSI color escape sequences in raw form ─┘│
# don't clear the screen after quitting less ─┘
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Search for text within the current directory
qt() {
grep -ir --color=always "$*" . | less -RX
# │└─ search all files under each directory, recursively
# └─ ignore case
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Create a .tar.gz archive, using `zopfli`, `pigz` or `gzip` for compression
function targz() {
local tmpFile="${@%/}.tar";
tar -cvf "${tmpFile}" --exclude=".DS_Store" "${@}" || return 1;
size=$(
stat -f"%z" "${tmpFile}" 2> /dev/null; # OS X `stat`
stat -c"%s" "${tmpFile}" 2> /dev/null # GNU `stat`
);
local cmd="";
if (( size < 52428800 )) && hash zopfli 2> /dev/null; then
# the .tar file is smaller than 50 MB and Zopfli is available; use it
cmd="zopfli";
else
if hash pigz 2> /dev/null; then
cmd="pigz";
else
cmd="gzip";
fi;
fi;
echo "Compressing .tar using \`${cmd}\`…";
"${cmd}" -v "${tmpFile}" || return 1;
[ -f "${tmpFile}" ] && rm "${tmpFile}";
echo "${tmpFile}.gz created successfully.";
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Determine size of a file or total size of a directory
function fs() {
if du -b /dev/null > /dev/null 2>&1; then
local arg=-sbh;
else
local arg=-sh;
fi
if [[ -n "$@" ]]; then
du $arg -- "$@";
else
du $arg .[^.]* *;
fi;
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Use Git’s colored diff when available
hash git &>/dev/null;
if [ $? -eq 0 ]; then
function diff() {
git diff --no-index --color-words "$@";
}
fi;
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# git log with per-commit cmd-clickable GitHub URLs (iTerm)
function gf() {
local remote="$(git remote -v | awk '/^origin.*\(push\)$/ {print $2}')"
[[ "$remote" ]] || return
local user_repo="$(echo "$remote" | perl -pe 's/.*://;s/\.git$//')"
git log $* --name-status --color | awk "$(cat <<AWK
/^.*commit [0-9a-f]{40}/ {sha=substr(\$2,1,7)}
/^[MA]\t/ {printf "%s\thttps://github.com/$user_repo/blob/%s/%s\n", \$1, sha, \$2; next}
/.*/ {print \$0}
AWK
)" | less -F
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Gets the difference between the local and remote branches
function git_remote_status() {
local remote ahead behind git_remote_status git_remote_status_detailed
remote=${$(command git rev-parse --verify ${hook_com[branch]}@{upstream} --symbolic-full-name 2>/dev/null)/refs\/remotes\/}
if [[ -n ${remote} ]]; then
ahead=$(command git rev-list ${hook_com[branch]}@{upstream}..HEAD 2>/dev/null | wc -l)
behind=$(command git rev-list HEAD..${hook_com[branch]}@{upstream} 2>/dev/null | wc -l)
if [[ $ahead -eq 0 ]] && [[ $behind -eq 0 ]]; then
git_remote_status="$ZSH_THEME_GIT_PROMPT_EQUAL_REMOTE"
elif [[ $ahead -gt 0 ]] && [[ $behind -eq 0 ]]; then
git_remote_status="$ZSH_THEME_GIT_PROMPT_AHEAD_REMOTE"
git_remote_status_detailed="$ZSH_THEME_GIT_PROMPT_AHEAD_REMOTE_COLOR$ZSH_THEME_GIT_PROMPT_AHEAD_REMOTE$((ahead))%{$reset_color%}"
elif [[ $behind -gt 0 ]] && [[ $ahead -eq 0 ]]; then
git_remote_status="$ZSH_THEME_GIT_PROMPT_BEHIND_REMOTE"
git_remote_status_detailed="$ZSH_THEME_GIT_PROMPT_BEHIND_REMOTE_COLOR$ZSH_THEME_GIT_PROMPT_BEHIND_REMOTE$((behind))%{$reset_color%}"
elif [[ $ahead -gt 0 ]] && [[ $behind -gt 0 ]]; then
git_remote_status="$ZSH_THEME_GIT_PROMPT_DIVERGED_REMOTE"
git_remote_status_detailed="$ZSH_THEME_GIT_PROMPT_AHEAD_REMOTE_COLOR$ZSH_THEME_GIT_PROMPT_AHEAD_REMOTE$((ahead))%{$reset_color%}$ZSH_THEME_GIT_PROMPT_BEHIND_REMOTE_COLOR$ZSH_THEME_GIT_PROMPT_BEHIND_REMOTE$((behind))%{$reset_color%}"
fi
if [[ -n $ZSH_THEME_GIT_PROMPT_REMOTE_STATUS_DETAILED ]]; then
git_remote_status="$ZSH_THEME_GIT_PROMPT_REMOTE_STATUS_PREFIX$remote$git_remote_status_detailed$ZSH_THEME_GIT_PROMPT_REMOTE_STATUS_SUFFIX"
fi
echo $git_remote_status
fi
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Outputs the name of the current branch
# Usage example: git pull origin $(git_current_branch)
# Using '--quiet' with 'symbolic-ref' will not cause a fatal error (128) if
# it's not a symbolic ref, but in a Git repo.
function git_current_branch() {
local ref
ref=$(command git symbolic-ref --quiet HEAD 2> /dev/null)
local ret=$?
if [[ $ret != 0 ]]; then
[[ $ret == 128 ]] && return # no git repo.
ref=$(command git rev-parse --short HEAD 2> /dev/null) || return
fi
echo ${ref#refs/heads/}
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Gets the number of commits ahead from remote
function git_commits_ahead() {
if command git rev-parse --git-dir &>/dev/null; then
local commits="$(git rev-list --count @{upstream}..HEAD 2>/dev/null)"
if [[ -n "$commits" && "$commits" != 0 ]]; then
echo "$ZSH_THEME_GIT_COMMITS_AHEAD_PREFIX$commits$ZSH_THEME_GIT_COMMITS_AHEAD_SUFFIX"
fi
fi
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Gets the number of commits behind remote
function git_commits_behind() {
if command git rev-parse --git-dir &>/dev/null; then
local commits="$(git rev-list --count HEAD..@{upstream} 2>/dev/null)"
if [[ -n "$commits" && "$commits" != 0 ]]; then
echo "$ZSH_THEME_GIT_COMMITS_BEHIND_PREFIX$commits$ZSH_THEME_GIT_COMMITS_BEHIND_SUFFIX"
fi
fi
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Outputs if current branch is ahead of remote
function git_prompt_ahead() {
if [[ -n "$(command git rev-list origin/$(git_current_branch)..HEAD 2> /dev/null)" ]]; then
echo "$ZSH_THEME_GIT_PROMPT_AHEAD"
fi
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Outputs if current branch is behind remote
function git_prompt_behind() {
if [[ -n "$(command git rev-list HEAD..origin/$(git_current_branch) 2> /dev/null)" ]]; then
echo "$ZSH_THEME_GIT_PROMPT_BEHIND"
fi
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Outputs if current branch exists on remote or not
function git_prompt_remote() {
if [[ -n "$(command git show-ref origin/$(git_current_branch) 2> /dev/null)" ]]; then
echo "$ZSH_THEME_GIT_PROMPT_REMOTE_EXISTS"
else
echo "$ZSH_THEME_GIT_PROMPT_REMOTE_MISSING"
fi
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Formats prompt string for current git commit short SHA
function git_prompt_short_sha() {
local SHA
SHA=$(command git rev-parse --short HEAD 2> /dev/null) && echo "$ZSH_THEME_GIT_PROMPT_SHA_BEFORE$SHA$ZSH_THEME_GIT_PROMPT_SHA_AFTER"
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Formats prompt string for current git commit long SHA
function git_prompt_long_sha() {
local SHA
SHA=$(command git rev-parse HEAD 2> /dev/null) && echo "$ZSH_THEME_GIT_PROMPT_SHA_BEFORE$SHA$ZSH_THEME_GIT_PROMPT_SHA_AFTER"
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Get the status of the working tree
function git_prompt_status() {
local INDEX STATUS
INDEX=$(command git status --porcelain -b 2> /dev/null)
STATUS=""
if $(echo "$INDEX" | command grep -E '^\?\? ' &> /dev/null); then
STATUS="$ZSH_THEME_GIT_PROMPT_UNTRACKED$STATUS"
fi
if $(echo "$INDEX" | grep '^A ' &> /dev/null); then
STATUS="$ZSH_THEME_GIT_PROMPT_ADDED$STATUS"
elif $(echo "$INDEX" | grep '^M ' &> /dev/null); then
STATUS="$ZSH_THEME_GIT_PROMPT_ADDED$STATUS"
fi
if $(echo "$INDEX" | grep '^ M ' &> /dev/null); then
STATUS="$ZSH_THEME_GIT_PROMPT_MODIFIED$STATUS"
elif $(echo "$INDEX" | grep '^AM ' &> /dev/null); then
STATUS="$ZSH_THEME_GIT_PROMPT_MODIFIED$STATUS"
elif $(echo "$INDEX" | grep '^ T ' &> /dev/null); then
STATUS="$ZSH_THEME_GIT_PROMPT_MODIFIED$STATUS"
fi
if $(echo "$INDEX" | grep '^R ' &> /dev/null); then
STATUS="$ZSH_THEME_GIT_PROMPT_RENAMED$STATUS"
fi
if $(echo "$INDEX" | grep '^ D ' &> /dev/null); then
STATUS="$ZSH_THEME_GIT_PROMPT_DELETED$STATUS"
elif $(echo "$INDEX" | grep '^D ' &> /dev/null); then
STATUS="$ZSH_THEME_GIT_PROMPT_DELETED$STATUS"
elif $(echo "$INDEX" | grep '^AD ' &> /dev/null); then
STATUS="$ZSH_THEME_GIT_PROMPT_DELETED$STATUS"
fi
if $(command git rev-parse --verify refs/stash >/dev/null 2>&1); then
STATUS="$ZSH_THEME_GIT_PROMPT_STASHED$STATUS"
fi
if $(echo "$INDEX" | grep '^UU ' &> /dev/null); then
STATUS="$ZSH_THEME_GIT_PROMPT_UNMERGED$STATUS"
fi
if $(echo "$INDEX" | grep '^## [^ ]\+ .*ahead' &> /dev/null); then
STATUS="$ZSH_THEME_GIT_PROMPT_AHEAD$STATUS"
fi
if $(echo "$INDEX" | grep '^## [^ ]\+ .*behind' &> /dev/null); then
STATUS="$ZSH_THEME_GIT_PROMPT_BEHIND$STATUS"
fi
if $(echo "$INDEX" | grep '^## [^ ]\+ .*diverged' &> /dev/null); then
STATUS="$ZSH_THEME_GIT_PROMPT_DIVERGED$STATUS"
fi
echo $STATUS
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Compares the provided version of git to the version installed and on path
# Outputs -1, 0, or 1 if the installed version is less than, equal to, or
# greater than the input version, respectively.
function git_compare_version() {
local INPUT_GIT_VERSION INSTALLED_GIT_VERSION i
INPUT_GIT_VERSION=(${(s/./)1})
INSTALLED_GIT_VERSION=($(command git --version 2>/dev/null))
INSTALLED_GIT_VERSION=(${(s/./)INSTALLED_GIT_VERSION[3]})
for i in {1..3}; do
if [[ $INSTALLED_GIT_VERSION[$i] -gt $INPUT_GIT_VERSION[$i] ]]; then
echo 1
return 0
fi
if [[ $INSTALLED_GIT_VERSION[$i] -lt $INPUT_GIT_VERSION[$i] ]]; then
echo -1
return 0
fi
done
echo 0
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Outputs the name of the current user
# Usage example: $(git_current_user_name)
function git_current_user_name() {
command git config user.name 2>/dev/null
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Outputs the email of the current user
# Usage example: $(git_current_user_email)
function git_current_user_email() {
command git config user.email 2>/dev/null
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Checks if working tree is dirty
function parse_git_dirty() {
local STATUS=''
local FLAGS
FLAGS=('--porcelain')
if [[ "$(command git config --get oh-my-zsh.hide-dirty)" != "1" ]]; then
if [[ $POST_1_7_2_GIT -gt 0 ]]; then
FLAGS+='--ignore-submodules=dirty'
fi
if [[ "$DISABLE_UNTRACKED_FILES_DIRTY" == "true" ]]; then
FLAGS+='--untracked-files=no'
fi
STATUS=$(command git status ${FLAGS} 2> /dev/null | tail -n1)
fi
if [[ -n $STATUS ]]; then
echo "$ZSH_THEME_GIT_PROMPT_DIRTY"
else
echo "$ZSH_THEME_GIT_PROMPT_CLEAN"
fi
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Create a data URL from a file
function dataurl() {
local mimeType=$(file -b --mime-type "$1");
if [[ $mimeType == text/* ]]; then
mimeType="${mimeType};charset=utf-8";
fi
echo "data:${mimeType};base64,$(openssl base64 -in "$1" | tr -d '\n')";
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Create a git.io short URL
function gitio() {
if [ -z "${1}" -o -z "${2}" ]; then
echo "Usage: \`gitio slug url\`";
return 1;
fi;
curl -i http://git.io/ -F "url=${2}" -F "code=${1}";
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Start an HTTP server from a directory, optionally specifying the port
function server() {
local port="${1:-8000}";
sleep 1 && open "http://localhost:${port}/" &
# Set the default Content-Type to `text/plain` instead of `application/octet-stream`
# And serve everything as UTF-8 (although not technically correct, this doesn’t break anything for binary files)
python -c $'import SimpleHTTPServer;\nmap = SimpleHTTPServer.SimpleHTTPRequestHandler.extensions_map;\nmap[""] = "text/plain";\nfor key, value in map.items():\n\tmap[key] = value + ";charset=UTF-8";\nSimpleHTTPServer.test();' "$port";
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Start a PHP server from a directory, optionally specifying the port
# (Requires PHP 5.4.0+.)
function phpserver() {
local port="${1:-4000}";
local ip=$(ipconfig getifaddr en1);
sleep 1 && open "http://${ip}:${port}/" &
php -S "${ip}:${port}";
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Syntax-highlight JSON strings or files
# Usage: `json '{"foo":42}'` or `echo '{"foo":42}' | json`
function json() {
if [ -t 0 ]; then # argument
python -mjson.tool <<< "$*" | pygmentize -l javascript;
else # pipe
python -mjson.tool | pygmentize -l javascript;
fi;
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Run `dig` and display the most useful info
function digga() {
dig +nocmd "$1" any +multiline +noall +answer;
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# UTF-8-encode a string of Unicode symbols
function escape() {
printf "\\\x%s" $(printf "$@" | xxd -p -c1 -u);
# print a newline unless we’re piping the output to another program
if [ -t 1 ]; then
echo ""; # newline
fi;
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Decode \x{ABCD}-style Unicode escape sequences
function unidecode() {
perl -e "binmode(STDOUT, ':utf8'); print \"$@\"";
# print a newline unless we’re piping the output to another program
if [ -t 1 ]; then
echo ""; # newline
fi;
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Get a character’s Unicode code point
function codepoint() {
perl -e "use utf8; print sprintf('U+%04X', ord(\"$@\"))";
# print a newline unless we’re piping the output to another program
if [ -t 1 ]; then
echo ""; # newline
fi;
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Show all the names (CNs and SANs) listed in the SSL certificate
# for a given domain
function getcertnames() {
if [ -z "${1}" ]; then
echo "ERROR: No domain specified.";
return 1;
fi;
local domain="${1}";
echo "Testing ${domain}…";
echo ""; # newline
local tmp=$(echo -e "GET / HTTP/1.0\nEOT" \
| openssl s_client -connect "${domain}:443" -servername "${domain}" 2>&1);
if [[ "${tmp}" = *"-----BEGIN CERTIFICATE-----"* ]]; then
local certText=$(echo "${tmp}" \
| openssl x509 -text -certopt "no_aux, no_header, no_issuer, no_pubkey, \
no_serial, no_sigdump, no_signame, no_validity, no_version");
echo "Common Name:";
echo ""; # newline
echo "${certText}" | grep "Subject:" | sed -e "s/^.*CN=//" | sed -e "s/\/emailAddress=.*//";
echo ""; # newline
echo "Subject Alternative Name(s):";
echo ""; # newline
echo "${certText}" | grep -A 1 "Subject Alternative Name:" \
| sed -e "2s/DNS://g" -e "s/ //g" | tr "," "\n" | tail -n +2;
return 0;
else
echo "ERROR: Certificate not found.";
return 1;
fi;
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# `s` with no arguments opens the current directory in Sublime Text, otherwise
# opens the given location
function s() {
if [ $# -eq 0 ]; then
subl .;
else
subl "$@";
fi;
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# `a` with no arguments opens the current directory in Atom Editor, otherwise
# opens the given location
function a() {
if [ $# -eq 0 ]; then
atom .;
else
atom "$@";
fi;
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# `v` with no arguments opens the current directory in Vim, otherwise opens the
# given location
function v() {
if [ $# -eq 0 ]; then
vim .;
else
vim "$@";
fi;
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# `o` with no arguments opens the current directory, otherwise opens the given
# location
function o() {
if [ $# -eq 0 ]; then
open .;
else
open "$@";
fi;
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# `tre` is a shorthand for `tree` with hidden files and color enabled, ignoring
# the `.git` directory, listing directories first. The output gets piped into
# `less` with options to preserve color and line numbers, unless the output is
# small enough for one screen.
function tre() {
tree -aC -I '.git|node_modules|bower_components' --dirsfirst "$@" | less -FRNX;
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Syntax-highlight JSON strings or files
function json() {
if [ -p /dev/stdin ]; then
# piping, e.g. `echo '{"foo":42}' | json`
python -mjson.tool | pygmentize -l javascript
else
# e.g. `json '{"foo":42}'`
python -mjson.tool <<< "$*" | pygmentize -l javascript
fi
}
| true
|
11783985189782af2d0f7c54c79743db3df22608
|
Shell
|
lamkeewei/aa-chunk
|
/webtest_ajax.sh
|
UTF-8
| 435
| 2.953125
| 3
|
[] |
no_license
|
#!/bin/bash
if [ ! -f output.csv ]; then
rm "output_ajax_$2_chunks.csv"
fi
phantomas "http://192.168.1.1/ajax_$2.html" --modules=httpTrafficCompleted -R csv > tmp.csv
echo "Finished 1..."
for i in $(seq 2 $1)
do
phantomas "http://192.168.1.1/ajax_$2.html" --modules=httpTrafficCompleted -R csv:no-header >> tmp.csv
echo "Finished $i..."
done
cat tmp.csv | awk -F ',' '{ print $8}' > "output_ajax_$2_chunks.csv"
rm tmp.csv
| true
|
e0bedd886a8b1e78127a602f9a48e934339bbcf1
|
Shell
|
mminer/dotfiles
|
/bin/ue4log
|
UTF-8
| 614
| 3.890625
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# Searches upwards for an Unreal project then opens its most recent log file.
while [[ "${PWD}" != "/" ]]; do
unreal_project="$(ls *.uproject 2>/dev/null)"
if [ -z "${unreal_project}" ]; then
cd ..
continue
fi
project_name="$(basename "${unreal_project}" .uproject)"
log="Saved/Logs/${project_name}.log"
if [ ! -f "${log}" ]; then
echo "An Unreal project was found but no log file exists for it."
exit 1
fi
"${EDITOR}" "${log}"
exit 0
done
echo "No Unreal project found in this directory or parent directories."
exit 1
| true
|
fa10a91891137053c86af50281711394d109d1d1
|
Shell
|
thoutenbos/pipeline
|
/templates/Freec.sh.tt
|
UTF-8
| 1,204
| 3.03125
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# -*- TT -*-
[% INCLUDE ErrorHandling.tt %]
[% INCLUDE Logging.tt job_name="Freec" main_step=sample_name log_name="freec.log" %]
assert_not_empty "[% sample_path %]"
[%- IF control_path %]
assert_not_empty "[% control_path %]"
[%- END %]
PATH=[% opt.SAMTOOLS_PATH %]:$PATH
export PATH
[% opt.FREEC_PATH %]/freec -conf "[% config_file %]"
cd "[% dirs.freec.out %]"
cnv_file="[% sample_file_name %]_CNVs"
ratio_file="[% sample_file_name %]_ratio.txt"
assert_exists "$cnv_file"
assert_not_empty "$ratio_file"
start_step "MAKE_GRAPH"
R --slave --args 2 "$ratio_file" < [% opt.FREEC_PATH %]/makeGraph.R
finish_step
assert_not_empty "[% sample_file_name %]_ratio.txt.png"
if [ -s "$cnv_file" ]
then
start_step "ASSESS_SIGNIFICANCE"
R --slave --args "$cnv_file" "$ratio_file" < "[% opt.FREEC_PATH %]/assess_significance.R"
finish_step
start_step "MAKE_KARYOTYPE"
R --slave --args 2 4 500000 "$ratio_file" < "[% opt.OUTPUT_DIR %]/scripts/makeKaryotype.R"
finish_step
assert_not_empty "[% sample_file_name %]_CNVs.p.value.txt" "[% sample_file_name %]_ratio_karyotype.pdf"
else
echo "No copy number variants found, skipping post-processing."
fi
success
| true
|
6f4520c7cb3752143838f147512a323ee69290ce
|
Shell
|
samcday/linux-5.8-stuff
|
/kernel/build.sh
|
UTF-8
| 886
| 2.671875
| 3
|
[] |
no_license
|
#!/bin/sh
# created by juul
set -e
objcopy -I ihex -O binary firmware/imx/sdma/sdma-imx50.bin.ihex firmware/imx/sdma/sdma-imx50.bin
#make ARCH=arm CROSS_COMPILE=arm-linux-gnueabihf- imx50_kindle4nt_defconfig
make ARCH=arm CROSS_COMPILE=arm-linux-gnueabihf- -j 8
make ARCH=arm CROSS_COMPILE=arm-linux-gnueabihf- dtbs
make ARCH=arm CROSS_COMPILE=arm-linux-gnueabihf- zImage
make ARCH=arm CROSS_COMPILE=arm-linux-gnueabihf- LOADADDR=0x70800000 uImage
mkdir -p OUTPUT
cp arch/arm/boot/zImage OUTPUT/
cp arch/arm/boot/uImage OUTPUT/
cp arch/arm/boot/dts/imx50-kindle4nt.dtb OUTPUT/
cd OUTPUT/
cat zImage imx50-kindle4nt.dtb > zImage_with_dtb
cat uImage imx50-kindle4nt.dtb > uImage_with_dtb
#mkimage -a 0x70800000 -d uImage uImage_with_dtb
#cp linux-2.6.31-rt11-lab126.tar.gz OUTPUT/
#cp modules.dep OUTPUT/
echo " "
echo "Build complete. Find the results in the OUTPUT/ dir."
| true
|
9e29bba5c921b46f332d50ff49527d9fe6c26644
|
Shell
|
Dirack/Estudos
|
/Shell_script/dialog/gauge/simples/find/exemplo.sh
|
UTF-8
| 502
| 3.015625
| 3
|
[] |
no_license
|
#!/bin/bash
#
# exemplo.sh (Shell Script)
#
# Objetivo: Exemplo simples de barra
# de progresso com dialog monitorando o comando find.
#
# Site: https://dirack.github.io
#
# Versão 1.0
#
# Programador: Rodolfo A C Neves (Dirack) 31/03/2021
#
# Email: rodolfo_profissional@hotmail.com
#
# Licença: GPL-3.0 <https://www.gnu.org/licenses/gpl-3.0.txt>.
while true; do echo -n .; sleep 1; done &
trap 'kill $!' SIGTERM SIGKILL
echo "Running command find"
find $HOME -name "*.TXT" -exec cp -f {} backup/ \;
echo done
#sleep 2
kill $!
| true
|
94064a48c5b59a9bc32f540ff0c1f97a996bd0df
|
Shell
|
CharlesDDNoble/broncode
|
/spikes/Docker/current/docker-builds/build_all.sh
|
UTF-8
| 249
| 3.5625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
DIR_SUFFIX="broncode"
build_image() {
cd $1
echo "Building ${1:2}"
./build.sh > /dev/null
cd ..
}
for FILE in ./*
do
if [ "${FILE:2:8}" == "$DIR_SUFFIX" ]
then
build_image "$FILE"
fi
done
exit 0
| true
|
d0da298d753e7fc6ab2ad73227579c7bb4788a0c
|
Shell
|
shawnmanuel000/MultiAgentFootball
|
/train_tcp.sh
|
UTF-8
| 842
| 3.578125
| 4
|
[] |
no_license
|
models=([0]="ddqn" [1]="ddpg" [2]="ppo", [3]="rand")
steps=$1
model=$2
baseport=$3
workers=16
open_terminal()
{
script=$1
if [[ "$OSTYPE" == "darwin"* ]]; then # Running on mac
osascript <<END
tell app "Terminal" to do script "cd \"`pwd`\"; $script; exit"
END
elif [[ "$OSTYPE" == "linux-gnu" ]]; then # Running on linux
xterm -display ":0" -e $script $2 # Add -hold argument after xterm to debug
fi
}
run()
{
numWorkers=$1
steps=$2
agent=$3
echo "Workers: $numWorkers, Steps: $steps, Agent: $agent"
ports=()
for j in `seq 1 $numWorkers`
do
port=$((8000+$baseport+$j))
ports+=($port)
open_terminal "python3 -B train.py --selfport $port" &
done
sleep 4
port_string=$( IFS=$' '; echo "${ports[*]}" )
python3 -B train.py --steps $steps --model $agent --workerports $port_string
}
run $workers $steps $model
| true
|
5b2a2c19ea2fe55b2ba1b73523187bf8f96a5351
|
Shell
|
google/oss-fuzz
|
/projects/monero/build.sh
|
UTF-8
| 1,942
| 2.828125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash -eu
# Copyright 2020 The Monero Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
################################################################################
export BOOST_ROOT=/src/monero/boost_1_70_0
export OPENSSL_ROOT_DIR=/src/monero/openssl-1.1.1g
cd monero
sed -i -e 's/include(FindCcache)/# include(FindCcache)/' CMakeLists.txt
git submodule init
git submodule update
mkdir -p build
cd build
export CXXFLAGS="$CXXFLAGS -fPIC"
cmake -D OSSFUZZ=ON -D STATIC=ON -D BUILD_TESTS=ON -D USE_LTO=OFF -D ARCH="default" ..
TESTS="\
base58_fuzz_tests \
block_fuzz_tests \
transaction_fuzz_tests \
load-from-binary_fuzz_tests \
load-from-json_fuzz_tests \
parse-url_fuzz_tests \
http-client_fuzz_tests \
levin_fuzz_tests \
bulletproof_fuzz_tests \
tx-extra_fuzz_tests \
"
# only libfuzzer can run the slow to start ones
if test "x$FUZZING_ENGINE" == 'xlibfuzzer'
then
TESTS="$TESTS \
signature_fuzz_tests \
cold-outputs_fuzz_tests \
cold-transaction_fuzz_tests \
"
fi
make -C tests/fuzz $TESTS
cd /src/monero/monero/build/tests/fuzz
for fuzzer in *_fuzz_tests
do
cp "$fuzzer" "$OUT"
base=$(echo $fuzzer | sed -e s/_fuzz_tests//)
cd "/src/monero/monero/tests/data/fuzz/$base"
rm -f "${OUT}/${fuzzer}_seed_corpus.zip"
for f in *
do
h=$(sha1sum "$f" | awk '{print $1}')
cp "$f" "$h"
zip "${OUT}/${fuzzer}_seed_corpus.zip" "$h"
rm -f "$h"
done
cd -
done
| true
|
a0a70a1ea5989262ff163dfe7240bd521aeab702
|
Shell
|
schmir/blackzim
|
/build-release
|
UTF-8
| 609
| 3.578125
| 4
|
[] |
no_license
|
#! /usr/bin/env bash
set -euo pipefail
: "${GO:=go}"
: "${VERSION:=}"
: "${TARGETS:=linux-amd64 linux-arm linux-arm64 freebsd-amd64 openbsd-amd64}"
if [[ -z "${VERSION}" ]]; then
VERSION=$(git describe --tags --always --abbrev=4 --dirty)
fi
mkdir -p release/tmp
cd release/tmp
for osarch in ${TARGETS}; do
IFS='-' read os arch <<< "${osarch}"
dir=blackzim-${os}-${arch}-${VERSION}
target=${dir}/blackzim
tarfile=../${dir}.tgz
echo "Building ${target}"
env GOARCH=${arch} GOOS=${os} ${GO} build -o ${target} ../..
tar -czf ${tarfile} ${dir}
done
cd ../..
rm -rf release/tmp
| true
|
946e727e994ce2765f360de18208a9f1bfa1bac5
|
Shell
|
zhaozg/sts
|
/tools/genall
|
UTF-8
| 1,987
| 3.96875
| 4
|
[
"LicenseRef-scancode-public-domain"
] |
permissive
|
#!/bin/bash
#
# genall - write data for all 9 generators under $TOPDIR/__name__/__name__.u8
#
# WARNING: Some generators such as Blum-Blum-Shub are very very slow
# and will take a very very long time to run! For this reason
# we launch the 8 generators in the background. You may tail
# the $TOPDIR/*/*.out files to determine their progress.
# setup
#
export TOPDIR=/var/tmp/sts-data
export GENERATORS=/usr/local/src/bin/sts/tools/generators
if [[ ! -d $TOPDIR ]]; then
echo "$0: FATAL: TOPDIR is not a directory: $TOPDIR" 1>&2
exit 1
fi
if [[ ! -x $GENERATORS ]]; then
echo "$0: FATAL: cannot find GENERATORS executable: $GENERATORS" 1>&2
exit 2
fi
for i in BBS CCG LCG MODEXP MS QCG1 QCG2 SHA1 XOR; do
mkdir -v -p "$TOPDIR/$i"
done
# launch the generators in the background
#
nohup time $GENERATORS -i 1 1 65536 > $TOPDIR/LCG/LCG-8G.u8 2> $TOPDIR/LCG/LCG.out < /dev/null &
nohup time $GENERATORS -i 1 2 65536 > $TOPDIR/QCG1/QCG1-8G.u8 2> $TOPDIR/QCG1/QCG1.out < /dev/null &
nohup time $GENERATORS -i 1 3 65536 > $TOPDIR/QCG2/QCG2-8G.u8 2> $TOPDIR/QCG2/QCG2.out < /dev/null &
nohup time $GENERATORS -i 1 4 65536 > $TOPDIR/CCG/CCG-8G.u8 2> $TOPDIR/CCG/CCG.out < /dev/null &
nohup time $GENERATORS -i 1 5 65536 > $TOPDIR/XOR/XOR-8G.u8 2> $TOPDIR/XOR/XOR.out < /dev/null &
nohup time $GENERATORS -i 1 6 65536 > $TOPDIR/MODEXP/MODEXP-8G.u8 2> $TOPDIR/MODEXP/MODEXP.out < /dev/null &
nohup time $GENERATORS -i 1 7 65536 > $TOPDIR/BBS/BBS-8G.u8 2> $TOPDIR/BBS/BBS.out < /dev/null &
nohup time $GENERATORS -i 1 8 65536 > $TOPDIR/MS/MS-8G.u8 2> $TOPDIR/MS/MS.out < /dev/null &
nohup time $GENERATORS -i 1 9 65536 > $TOPDIR/SHA1/SHA1-8G.u8 2> $TOPDIR/SHA1/SHA1.out < /dev/null &
# ... now you wait
#
echo "$0: NOTICE: 9 generators launched in the background" 1>&2
echo "$0: NOTICE: Some of these generators will take a very long time to write 8 Gbytes of data" 1>&2
echo "$0: NOTICE: To monitor progress, try this command: tail -f $TOPDIR/*/*.out" 1>&2
exit 0
| true
|
115bd13036e6827cbfc58c680a4fb25369481f4a
|
Shell
|
Abdur-rahmaanJ/snakeware
|
/snakeware/build.sh
|
UTF-8
| 3,057
| 3.59375
| 4
|
[
"MIT"
] |
permissive
|
# This script downloads and builds linux and python sources, and outputs
# the snakeware structure in the build/ directory.
# linux kernel version
KERNEL_MAJ=v5.x
KERNEL_MIN=5.6.14
# python version
PYTHON_VER=3.8.3
# snakeware dirs
SNAKEWARE=$PWD
SRC=$PWD/src
BLD=$PWD/build
mkdir -p $SRC
mkdir -p $BLD
mkdir -p $BLD/etc/init.d $BLD/proc $BLD/sys $BLD/dev $BLD/tmp $BLD/boot $BLD/bin
chmod 1777 $BLD/tmp
# copy libs
#\TODO build these libs from source
mkdir -p $BLD/usr/lib $BLD/usr/lib64
# python dependencies
cp /usr/lib/libcrypt.so.1 $BLD/usr/lib
cp /usr/lib/libc.so.6 $BLD/usr/lib/
cp /usr/lib/libpython3.8.so.1.0 $BLD/usr/lib/
cp /usr/lib64/ld-linux-x86-64.so.2 $BLD/usr/lib64/
cp /usr/lib/libpthread.so.0 $BLD/usr/lib/
cp /usr/lib/libdl.so.2 $BLD/usr/lib/
cp /usr/lib/libutil.so.1 $BLD/usr/lib/
cp /usr/lib/libm.so.6 $BLD/usr/lib/
cp /lib/libgcc_s.so.1 $BLD/usr/lib/
# pygame dependencies
cp /usr/lib/libSDL-1.2.so.0 $BLD/usr/lib/
cp /usr/lib/libz.so.1 $BLD/usr/lib/
cp /lib/libSDL_ttf-2.0.so.0 $BLD/usr/lib/
cp /lib/libfreetype.so.6 $BLD/usr/lib/
cp /lib/libbz2.so.1.0 $BLD/usr/lib/
cp /lib/libpng16.so.16 $BLD/usr/lib/
cp /lib/libharfbuzz.so.0 $BLD/usr/lib/
cp /lib/libglib-2.0.so.0 $BLD/usr/lib/
cp /lib/libgraphite2.so.3 $BLD/usr/lib/
cp /lib/libpcre.so.1 $BLD/usr/lib/
mkdir -p $BLD/lib $BLD/lib64
cp $BLD/usr/lib/* $BLD/lib/
cp $BLD/usr/lib64/* $BLD/lib64/
# GET SOURCES
cd $SRC
if [ ! -d "linux-$KERNEL_MIN" ]; then
echo "Downloading kernel source..."
wget https://cdn.kernel.org/pub/linux/kernel/$KERNEL_MAJ/linux-$KERNEL_MIN.tar.xz
tar xf linux-$KERNEL_MIN.tar.xz
rm linux-$KERNEL_MIN.tar.xz
fi
if [ ! -d "Python-$PYTHON_VER" ]; then
echo "Downloading python source..."
wget https://www.python.org/ftp/python/3.8.3/Python-$PYTHON_VER.tar.xz
tar xf Python-$PYTHON_VER.tar.xz
rm Python-$PYTHON_VER.tar.xz
fi
if [ ! -d "busybox" ]; then
echo "Downloading busybox source..."
git clone https://git.busybox.net/busybox
fi
# BUILD SOURCES
# build kernel with default config
#\TODO better config?
cp $SNAKEWARE/config/kernel-config $SRC/linux-$KERNEL_MIN/.config
cd $SRC/linux-$KERNEL_MIN
make -j4
make modules
make modules_install INSTALL_MOD_PATH=$BLD/
cp arch/x86/boot/bzImage $BLD/boot/vmlinuz
# build python
cd $SRC/Python-$PYTHON_VER
./configure --prefix=$BLD/usr/
make -j4
make install
# build busybox
cd $SRC/busybox
make defconfig
export LDFLAGS="--static"
make -j4
make install
#sudo chmod 4755 _install/bin/busybox
sudo chown root _install/bin/busybox
cp -a _install/* $BLD/
rm $BLD/linuxrc
cd $BLD
ln -s bin/busybox init
# create /etc/init.d/rcS
cat << EOF > $BLD/etc/init.d/rcS
#!/bin/sh
mount -a
mdev -s
/bin/hostname -F /etc/hostname
/sbin/ifconfig lo 127.0.0.1 up
while [ 1 ]
do
clear
/usr/bin/python3
done
EOF
chmod +x $BLD/etc/init.d/rcS
# create /etc/fstab
cat << EOF > $BLD/etc/fstab
proc /proc proc defaults 0 0
sysfs /sys sysfs defaults 0 0
devpts /dev/pts devpts defaults 0 0
tmpfs /dev/shm tmpfs defaults 0 0
EOF
# create /etc/hostname
echo 'localhost' > $BLD/etc/hostname
| true
|
875c60fc5dd9647e41a9d947f5ba990afd8f5a8e
|
Shell
|
guilhemmarchand/TA-nmon
|
/TA-nmon/bin/nmon2csv.sh
|
UTF-8
| 5,892
| 3.265625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
# set -x
# Program name: nmon2csv.sh
# Purpose - Frontal script to nmon2csv, will launch Python or Perl script depending on interpreter availability
# See nmon2csv.py | nmon2csv.pl
# Author - Guilhem Marchand
# Disclaimer: this provided "as is".
# Date - February 2015
# Guilhem Marchand 2015/07/07, initial version
# - 07/27/2015, V1.0.01: Guilhem Marchand:
# - hotfix for using the PA-nmon to generate Performance data in standalone indexers
# - 09/29/2015, V1.0.02: Guilhem Marchand:
# - Restrict to Python 2.7.x to use nmon2csv.py
# - 10/14/2015, V1.0.03: Guilhem Marchand:
# - Use $SPLUNK_HOME/var/run/nmon for temp directory instead of /tmp
# - 10/28/2015, V1.0.04: Guilhem Marchand:
# - Fixed temp directory lacking creation if dir does not yet exist
# - 01/15/2016, V1.0.05: Guilhem Marchand:
# - Send arguments from sh wrapper to nmon2csv parsers
# - 02/08/2016, V1.0.06: Guilhem Marchand:
# - /dev/null redirection improvement for the which python check
# - 07/30/2016: V1.0.07: Guilhem Marchand:
# - the core-app does not contains anymore data collection objects
# - 07/30/2016: V1.0.08: Guilhem Marchand:
# - Splunk certification requires $SPLUNK_HOME/var/log/ for files generation
# - 08/02/2016: V1.0.09: Guilhem Marchand:
# - Manage the TA-nmon_selfmode
# - 01/04/2017: V1.0.10: Guilhem Marchand:
# - Update path discovery
# - 23/05/2017: V1.0.11: Guilhem Marchand:
# - Missing userargs call in condition
# - 24/06/2017: V1.0.12: Guilhem Marchand:
# - specify explicit date format to prevent time zone issues
# - 26/06/2017: V1.0.13: Guilhem Marchand:
# - Interpreter choice update
# - 30/07/2017: V1.0.14: Guilhem Marchand:
# - HOST variable is unset
# - 30/03/2018: V1.0.15: Guilhem Marchand:
# - Fix issues #55 / #56
# Version 1.0.15
# For AIX / Linux / Solaris
#################################################
## Your Customizations Go Here ##
#################################################
# format date output to strftime dd/mm/YYYY HH:MM:SS
log_date () {
date "+%d-%m-%Y %H:%M:%S"
}
# Set host
HOST=`hostname`
if [ -z "${SPLUNK_HOME}" ]; then
echo "`log_date`, ERROR, SPLUNK_HOME variable is not defined"
exit 1
fi
# Set tmp directory
APP_VAR=${SPLUNK_HOME}/var/log/nmon
# Verify it exists
if [ ! -d ${APP_VAR} ]; then
mkdir -p ${APP_VAR}
exit 1
fi
# silently remove tmp file (testing exists before rm seems to cause trouble on some old OS)
rm -f ${APP_VAR}/nmon2csv.temp.*
# Set nmon_temp
nmon_temp=${APP_VAR}/nmon2csv.temp.$$
# APP path discovery
if [ -d "$SPLUNK_HOME/etc/apps/TA-nmon" ]; then
APP=$SPLUNK_HOME/etc/apps/TA-nmon
elif [ -d "$SPLUNK_HOME/etc/slave-apps/TA-nmon" ];then
APP=$SPLUNK_HOME/etc/slave-apps/TA-nmon
else
echo "`log_date`, ${HOST} ERROR, the APP directory could not be defined, is the TA-nmon installed ?"
exit 1
fi
# source default nmon.conf
if [ -f $APP/default/nmon.conf ]; then
. $APP/default/nmon.conf
fi
# source local nmon.conf, if any
# Search for a local nmon.conf file located in $SPLUNK_HOME/etc/apps/TA-metricator-for-nmon/local
if [ -f $APP/local/nmon.conf ]; then
. $APP/local/nmon.conf
fi
# On a per server basis, you can also set in /etc/nmon.conf
if [ -f /etc/nmon.conf ]; then
. /etc/nmon.conf
fi
# Manage FQDN option
echo $nmon2csv_options | grep '\-\-use_fqdn' >/dev/null
if [ $? -eq 0 ]; then
# Only relevant for Linux OS
case $UNAME in
Linux)
HOST=`hostname -f` ;;
AIX)
HOST=`hostname` ;;
SunOS)
HOST=`hostname` ;;
esac
else
HOST=`hostname`
fi
# Manage host override option based on Splunk hostname defined
case $override_sys_hostname in
"1")
# Retrieve the Splunk host value
HOST=`cat $SPLUNK_HOME/etc/system/local/inputs.conf | grep '^host =' | awk -F\= '{print $2}' | sed 's/ //g'`
;;
esac
#
# Interpreter choice
#
PYTHON=0
PERL=0
# Set the default interpreter
INTERPRETER="python"
# Get the version for both worlds
PYTHON=`which python >/dev/null 2>&1`
PERL=`which perl >/dev/null 2>&1`
case $PYTHON in
*)
python_subversion=`python --version 2>&1`
case $python_subversion in
*" 2.7"*)
PYTHON_available="true" ;;
*)
PYTHON_available="false"
esac
;;
0)
PYTHON_available="false"
;;
esac
case $PERL in
*)
PERL_available="true"
;;
0)
PERL_available="false"
;;
esac
case `uname` in
# AIX priority is Perl
"AIX")
case $PERL_available in
"true")
INTERPRETER="perl" ;;
"false")
INTERPRETER="python" ;;
esac
;;
# Other OS, priority is Python
*)
case $PYTHON_available in
"true")
INTERPRETER="python" ;;
"false")
INTERPRETER="perl" ;;
esac
;;
esac
####################################################################
############# Main Program ############
####################################################################
# Store arguments sent to script
userargs=$@
# Store stdin
while read line ; do
echo "$line" >> ${nmon_temp}
done
# Start the parser
case ${INTERPRETER} in
"python")
cat ${nmon_temp} | ${SPLUNK_HOME}/bin/splunk cmd ${APP}/bin/nmon2csv.py ${userargs} ;;
"perl")
cat ${nmon_temp} | ${SPLUNK_HOME}/bin/splunk cmd ${APP}/bin/nmon2csv.pl ${userargs} ;;
esac
# Remove temp
rm -f ${nmon_temp}
exit 0
| true
|
fb5517757e475ea651433deaf2834010bf95277b
|
Shell
|
cmeid/dots
|
/custom/index.sh
|
UTF-8
| 539
| 2.5625
| 3
|
[
"MIT"
] |
permissive
|
# if you want to hook the installation to other dotfiles-type stuff, you can do it here
# install custom terminal
mkdir -p $custom/tmp
cd $custom/tmp
git clone git@github.com:agaridata/laptop-automation.git
open laptop-automation/terminal/*.terminal
defaults write com.apple.Terminal "Default Window Settings" -string "cmeid"
defaults write com.apple.Terminal "Startup Window Settings" -string "cmeid"
rm -rf $custom/tmp
# pull dotfiles
cd ~
git clone git@github.com:robbyrussell/oh-my-zsh.git
mackup restore
# set zsh default
chsh -s `which zsh`
| true
|
f20d3227285f6fd8c59d580f5d5d259daf5af451
|
Shell
|
adeelkazmi/bash-examples
|
/string-search
|
UTF-8
| 769
| 4.28125
| 4
|
[] |
no_license
|
#!/bin/bash
fullString="This is my full string"
echo "fullString=\"$fullString\""
# Function to search for a string
SearchForString()
{
searchString="$1"
echo -n "Search for \"${searchString}\": "
[[ $fullString == *"${searchString}"* ]] && echo "FOUND" || echo "NOT FOUND"
}
# Using a loop and a case statement to search for a string
SearchForMultipleStrings()
{
echo -n "Searching for"
for i in "$@"; do echo -n " \"$i\""; done
echo ":"
for searchString in "$@"
do
case "${fullString}" in
*${searchString}* )
echo " \"$searchString\" Found!"
;;
* )
echo " \"$searchString\" Not Found!"
;;
esac
done
}
SearchForString full
SearchForString Hello
SearchForMultipleStrings full Hello "my full"
| true
|
ba703da621556a29b6c5e36976248954ae617fce
|
Shell
|
uhh-lt/kaldi-tuda-de
|
/s5_r2/local/make_hires_feats_train.sh
|
UTF-8
| 874
| 2.828125
| 3
|
[
"Apache-2.0",
"CC-BY-4.0",
"LicenseRef-scancode-other-permissive",
"LGPL-2.0-or-later"
] |
permissive
|
. path.sh
. cmd.sh
mfccdir=mfcc
stage=0
nJobs=8
if [ $stage -le 6 ]; then
# Now make MFCC features.
#x=tuda_train
for x in dev_e test_e; do
# for x in dev_a dev_b dev_c dev_d test_a test_b test_c test_d; do
utils/copy_data_dir.sh data/$x data/${x}_hires
utils/fix_data_dir.sh data/$x # some files fail to get mfcc for many reasons
steps/make_mfcc.sh --cmd "$train_cmd" --nj $nJobs --mfcc-config conf/mfcc_hires.conf data/${x}_hires exp/make_mfcc/${x}_hires $mfccdir
utils/fix_data_dir.sh data/$x # some files fail to get mfcc for many reasons
steps/compute_cmvn_stats.sh data/${x}_hires exp/make_mfcc/${x}_hires $mfccdir
utils/fix_data_dir.sh data/${x}_hires
steps/online/nnet2/extract_ivectors_online.sh --cmd "$train_cmd" --nj $nJobs data/${x}_hires/ exp/nnet3_cleaned/extractor/ exp/nnet3_cleaned/ivectors_${x}_hires
done
fi
| true
|
2d41041ad81437d07bcb956958264434a85a1e06
|
Shell
|
problem-frames/openpf
|
/update.sh
|
UTF-8
| 374
| 2.8125
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
for syntax in ontology problem argument; do
#captalise the first letter
Syntax=`echo "${syntax:0:1}" | tr "[:lower:]" "[:upper:]"`${syntax:1}
trac-admin /home/share/repo/trac/openre attachment remove wiki:Examples/$syntax $Syntax.xtext
trac-admin /home/share/repo/trac/openre attachment add wiki:Examples/$syntax workspace/$syntax/samples/$Syntax.xtext
done
| true
|
cffbcc1caaf236c0365f10d582e1f316e962a513
|
Shell
|
rackaam/letsencrypt-simple
|
/startme.sh
|
UTF-8
| 153
| 3.0625
| 3
|
[] |
no_license
|
#!/bin/sh
set -e
OVERRIDE=${OVERRIDE:-}
if [ -z "${OVERRIDE}" ]
then
mkdir -p /var/www/html
darkhttpd /var/www/html --port 80 --daemon
else
"$@"
fi
| true
|
503fd7c489e36900e7c1c565c1c706d8d3af5399
|
Shell
|
paulburton/Arch-Linux-Repository
|
/gnome-patched/gnome-vfs/PKGBUILD
|
UTF-8
| 1,858
| 2.578125
| 3
|
[] |
no_license
|
# $Id: PKGBUILD 92007 2010-09-28 16:42:58Z ibiru $
# Maintainer: Jan de Groot <jgc@archlinux.org>
pkgname=gnome-vfs
pkgver=2.24.4
pkgrel=1.1
pkgdesc="The GNOME Virtual File System"
arch=(i686 x86_64)
license=('LGPL')
depends=('fam' 'gconf>=2.32.0' 'bzip2' 'avahi>=0.6.27' 'smbclient>=3.5.5' 'gnome-mime-data>=2.18.0-2' 'heimdal>=1.3.3' 'gnutls>=2.8.6')
makedepends=('pkgconfig' 'intltool' 'gtk-doc' 'gnome-common')
options=('!libtool' '!emptydirs' '!makeflags')
conflicts=(gnome-vfs-samba)
provides=(gnome-vfs-samba)
replaces=(gnome-vfs-samba gnome-vfs-extras)
url="http://www.gnome.org"
install=gnome-vfs.install
source=(http://ftp.gnome.org/pub/gnome/sources/${pkgname}/2.24/gnome-vfs-${pkgver}.tar.bz2
hal-show-volume-names.patch
gnutls-config.patch)
sha256sums=('62de64b5b804eb04104ff98fcd6a8b7276d510a49fbd9c0feb568f8996444faa'
'b38b86d6c966dcf64f9140d800aebe8a14845bb8ce7c045f52d4df483cacd23b'
'66c7cfb12995c0dd94a2caea95c7e3c55981993f05a79c585d60915ff131955d')
build() {
cd "${srcdir}/${pkgname}-${pkgver}"
#Archlinux patch (b.g.o #321498)
patch -Np1 -i "${srcdir}/hal-show-volume-names.patch"
#Fix build with new gnutls
patch -Np1 -i "${srcdir}/gnutls-config.patch"
libtoolize --force
gtkdocize
aclocal
autoconf
automake
./configure --prefix=/usr --sysconfdir=/etc \
--localstatedir=/var --disable-static \
--libexecdir=/usr/lib/gnome-vfs-2.0 \
--enable-samba --disable-hal \
--enable-avahi --disable-howl \
--disable-openssl --enable-gnutls
make
make GCONF_DISABLE_MAKEFILE_SCHEMA_INSTALL=1 DESTDIR="${pkgdir}" install
install -d -m755 "${pkgdir}/usr/share/gconf/schemas"
gconf-merge-schema "${pkgdir}/usr/share/gconf/schemas/${pkgname}.schemas" --domain gnome-vfs-2.0 ${startdir}/pkg/etc/gconf/schemas/*.schemas
rm -f ${pkgdir}/etc/gconf/schemas/*.schemas
}
| true
|
816065c5309acf8ce9a55f0f808918ad72d474f5
|
Shell
|
rayakakayung/abc
|
/weekly.sh.bak
|
UTF-8
| 1,756
| 3.484375
| 3
|
[] |
no_license
|
#!/bin/bash
POOL=Hosting-73
counter=0
vanished=1
log_file="/var/log/backup.log.`date +%a`_weekly"
backup_dir="/home /var /etc /usr /lib"
backup_location="/backup_on_storage"
function TIME(){
DATE=`date +%d%m%y" "%H:%M:%S`
echo $DATE
}
umount -f $backup_location
echo "$(TIME) Start Weekly backup" > $log_file
mount -t nfs -o tcp storage.abchk.net:/ZFSPOOL/$POOL $backup_location
if [ $? -eq 0 ]; then {
while [ $vanished -eq 1 ]; do {
echo "$(TIME) Start Weekly backup" > $log_file
rsync --exclude="/var/spool/exim" --ignore-errors -va --delete $backup_dir $backup_location >> $log_file 2>&1 || echo -e "\n$(TIME) BACKUP FAILED!!!!\n" >> $log_file
echo "$(TIME) End Weekly backup" >> $log_file
grep "vanished" $log_file
if [[ ($? -eq 0) && ($counter -lt 10) ]]; then
vanished=1
let counter=counter+1
echo -e "\nRetried $counter times\n" >> $log_file
else
vanished=0
echo -e "\nRetried $counter times\n" >> $log_file
fi
} done
grep "BACKUP FAILED" $log_file
if [ $? -eq 0 ]; then
echo -e "Backup Failed, check $log_file for detailed information\n\n`head -n1 $log_file;tail -n10 $log_file`" | mail -s"Backup FAILED `hostname` Weekly `date +%a`" support@abchk.com
else
echo -e "Backup Completed, check $log_file for detailed information\n\n`head -n1 $log_file;tail -n10 $log_file`" | mail -s"Backup COMPLETED `hostname` Weekly `date +%a`" support@abchk.com
rm -rf $backup_location/last_backup*
touch $backup_location/last_backup_`date +%d%m%Y_%H:%M:%S`
umount $backup_location
fi
}
else
echo -e "$(TIME) BACKUP FAILED!!!!: Cannot mount remote partition" > $log_file
echo "$(TIME) End Weekly backup" >> $log_file
mail -s"Backup FAILED `hostname` Weekly `date +%a`" support@abchk.com < $log_file
fi
| true
|
d32522c078728381697356e7ce84fca7ab8a5427
|
Shell
|
liquanzhou/ops_doc
|
/Service/nexus/CentOS系统中安装Nexus并导入已有的构件库.sh
|
UTF-8
| 2,477
| 3.140625
| 3
|
[] |
no_license
|
CentOS系统中安装Nexus并导入已有的构件库
Nexus是Maven仓库管理器,用于搭建一个本地仓库服务器,这样做的主要的好处就是节约网络资源,速度快,开发团队中所有的Maven可以共享这个本地仓库,下载一遍共享使用。另外一个优点就是他为你的组织提供一个搭建构件的地方。本文将介绍如何在CentOS系统中安装配置Nexus,并介绍如何导入已有的构件仓库。
1、 软件
a) 下载Nexus 地址:http://www.sonatype.org/downloads/nexus-2.1.2-bundle.tar.gz
b) 如无特殊说明,本文档操作用户为nexus
c) nexus默认的管理员用户名密码是:admin/admin123
2、 安装
a) 解压
$ tar zxvf nexus-2.1.2-bundle.tar.gz
b) 移动到其他目录
$ mv nexus-2.1.2 /home/nexus/nexus
c) 设置为系统自启动服务(使用root用户)
# cd /etc/init.d/
# cp /home/nexus/nexus/bin/jsw/linux-x86-64/nexus nexus
编辑/etc/init.d/nexus文件,添加以下变量定义:
NEXUS_HOME=/home/nexus/nexus
PLATFORM=linux-x86-64
PLATFORM_DIR="${NEXUS_HOME}/bin/jsw/${PLATFORM}"
修改以下变量:
WRAPPER_CMD="${PLATFORM_DIR}/wrapper"
WRAPPER_CONF="${PLATFORM_DIR}/../conf/wrapper.conf"
PIDDIR="${NEXUS_HOME}"
修改如下变量,设置启动用户为nexus:
RUN_AS_USER=nexus
执行命令添加nexus自启动服务
# chkconfig –add nexus
# chkconfig –levels 345 nexus on
执行如下命令启动、停止nexus服务
# service nexus start
# service nexus stop
d) 检查是否启动成功
在本机浏览器中访问URL: http://localhost:8081/nexus
会出现Nexus的欢迎页面
注:如果想远程通过浏览器访问,则在远程浏览器中输入http://<ip>:8081/nexus
<ip> 可通过在本地机器上输入命令 ifconfig 查看
如果未能访问到nexus的欢迎页面,需要查看本机的防火墙设置,是否打开了端口8081
e) 修改配置
配置文件位置nexus/conf/nexus.properties,配置示例如下:
# Sonatype Nexus
# ==============
# This is the most basic configuration of Nexus.
# Jetty section
application-port=8081
application-host=0.0.0.0
nexus-webapp=${bundleBasedir}/nexus
nexus-webapp-context-path=/nexus
# Nexus section
nexus-work=${bundleBasedir}/../sonatype-work/nexus
runtime=${bundleBasedir}/nexus/WEB-INF
pr.encryptor.publicKeyPath=/apr/public-key.txt
主要配置参数:
application-port:nexus启动端口
nexus-work:指定构件库的存储位置
| true
|
3ac6b7fac1381c3c0745c29d288182123f603f2f
|
Shell
|
mperdikeas/tools
|
/emax
|
UTF-8
| 2,874
| 4.25
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# TODO:
# * check that the answer provided by the user is one of the
# possible prompted answers
# * maybe change from numeric to letters to allow for more than
# 10 servers
ME=`basename $0`
WHERE_I_LIVE=${BASH_SOURCE[0]/%${ME}/.}
COUNTF=${WHERE_I_LIVE}/.emax/count
if [ -f ${COUNTF} ];
then
COUNT=$(<${COUNTF})
else
touch -a ${COUNTF}
echo "0" > ${COUNTF}
COUNT=0
fi
USE_UNIX_SOCKETS=true # if set to "false" ensure you have "(setq server-use-tcp t)" in your .emacs
if [ "${USE_UNIX_SOCKETS}" = true ] ; then
printf "using *UNIX* sockets\n"
servers=($(ss -lxp | grep LISTEN | grep -i /tmp/emacs | awk '{ print $5 }' | rev | cut -d'/' -f 1 | rev))
else
printf "using *TCP* sockets\n"
servers=($(find ~/.emacs.d/server -type f -exec basename {} \;))
fi
s=-1
NEWSERVER="newserver"
selectedServer=""
NUM_SERVERS=${#servers[@]}
echo "# of servers is: ${NUM_SERVERS}"
function spawnNewEmacsDaemon {
printf "Enter name for new emacs daemon:"
read newServerName
emacs --daemon="$newServerName"
if [ "${USE_UNIX_SOCKETS}" = true ] ; then
emacsclient -s "$newServerName" -t "$@"
else
emacsclient -f "$newServerName" -t "$@"
fi
echo $(($NUM_SERVERS+1)) > ${COUNTF}
}
if [[ $NUM_SERVERS -eq 0 ]] ; then
spawnNewEmacsDaemon $@
else
while ! [[ $s -ge 0 && $s -le $NUM_SERVERS ]]
do
echo "Choose server to connect to:"
if [[ $COUNT -eq 0 ]];
then
echo "0 - new server (*)"
else
echo "0 - new server"
fi
i=0
for server in "${servers[@]}"
do
:
i=$((i+1))
if [[ $i -eq $COUNT ]];
then
echo "$i - $server (*)"
else
echo "$i - $server"
fi
done
read -n 1 -s s
re='^[0-9]+$'
if ! [[ $s =~ $re ]] ; then
if [[ $s -eq "" ]]; then
s=$COUNT
else
s=-1
fi
fi
if [[ $s -eq 0 ]] ; then
selectedServer=$NEWSERVER
else
echo "$s" > ${COUNTF}
selectedServer=${servers[$((s-1))]}
fi
done
#printf "\n$s was entered corresponding to $selectedServer\n"
if [ ${selectedServer} = ${NEWSERVER} ] ; then
spawnNewEmacsDaemon $@
else
if [ "${USE_UNIX_SOCKETS}" = true ] ; then
emacsclient -s "$selectedServer" -t "$@"
else
emacsclient -f "$selectedServer" -t "$@"
fi
fi
fi
| true
|
bec96b24bc6b27ee2bfc0b3cf5c8716337e53887
|
Shell
|
umeboshi2/paella
|
/vagrant/salt/roots/salt/debianlive/install-win7-image.sh
|
UTF-8
| 1,857
| 3.453125
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
DRIVE=$1
PARTED=/sbin/parted
sudo $PARTED -s $DRIVE "mklabel msdos"
sudo $PARTED -s $DRIVE "mkpart primary ntfs 1 200MB"
sudo $PARTED -s $DRIVE "set 1 boot on"
sudo $PARTED -s $DRIVE "mkpart primary ntfs 200MB -0"
sysdevice=${DRIVE}1
windevice=${DRIVE}2
echo "Formatting system partition"
sudo /sbin/mkfs.ntfs -f -L System $sysdevice
echo "Formatting windows partition"
sudo /sbin/mkfs.ntfs -f -L Windows $windevice
echo "applying WIM $2 to $windevice"
# this always fails and throws SATA errors
# on vagrant virtualbox machine
sudo wimlib-imagex apply $2 $windevice || true
echo "Copying bootloader to master boot record."
sudo dd if=/srv/incoming/bootloader.bin of=$DRIVE
echo "Creating WinPE Virtual Machine"
VBoxManage createvm --name winpe --register
echo "Giving WinPE Virtual Machine 256MB of memory"
VBoxManage modifyvm winpe --memory 256
echo "Creating VMDK to point to $DRIVE"
VBoxManage internalcommands createrawvmdk -filename ~/sysdrive.vmdk -rawdisk $DRIVE
echo "Creating SATA controller on WinPE Virtual Machine for cdrom"
VBoxManage storagectl winpe --name "ide-controller-main" --add ide
echo "Attaching BCD Auto ISO to WinPE Virtual Machine"
VBoxManage storageattach winpe --storagectl "ide-controller-main" \
--port 0 --device 0 --type dvddrive --medium /srv/incoming/bcdauto.iso
echo "Creating SATA controller on WinPE Virtual Machine for system disk"
VBoxManage storagectl winpe --name "sata-controller-main" --add sata
echo "Attaching System Hard Drive to WinPE Virtual Machine"
VBoxManage storageattach winpe --storagectl "sata-controller-main" \
--port 0 --device 0 --type hdd --medium ~/sysdrive.vmdk
echo "Executing WinPE Virtual Machine......(please wait)..."
VBoxHeadless --startvm winpe
echo "WinPE Virtual Machine execution complete."
echo "System should be ready to boot!"
| true
|
ce3bf68009b5b7ad09d496feb9cc1232bd962ff2
|
Shell
|
iskra-vitaly/my-zsh-config
|
/my/corgi.zsh
|
UTF-8
| 215
| 2.734375
| 3
|
[] |
no_license
|
#!/usr/bin/zsh
sam=~/projects/sam
cor_home=$sam/corgi
cor_src=$cor_home/src
function corhome {
cd $cor_home
}
function corprj {
cd $cor_src
}
function correm {
cd $cor_home/nas/usr/share/corgi-nas
}
| true
|
a321a8b0493439ca49e7e8f00ceab0e51b02e65c
|
Shell
|
TheStalwart/dotswt
|
/bin/imgup
|
UTF-8
| 363
| 3.140625
| 3
|
[] |
no_license
|
#!/bin/sh
if [[ $1 == '-s' ]]
then scrot_opts='-s'
fi
if [[ ! -f $1 ]]
then filename='/tmp/imgup-'`whoami`'-'`date +%s`'.png'; scrot $scrot_opts -e $0' $f' $filename; rm $filename; exit
fi
filemime=`xdg-mime query filetype $1`
fileurl=`curl -s -F 'postimg=1' -F filename=@$1\;type=$filemime -w '%{redirect_url}' http://img.flashtux.org`
xdg-open $fileurl
| true
|
887a946fc5b1c891604f41d741f9935afd7ebb00
|
Shell
|
zengjle/animation_demo
|
/BuildProject/tools/android/mac/sign_apk.sh
|
UTF-8
| 567
| 3.09375
| 3
|
[] |
no_license
|
#!/bin/bash
project=BuildProject/android/app
keystore=$(ls $project/*.keystore)
version=$(ls $ANDROID_HOME/build-tools|grep '^\d'|sort -nr|head -1)
aliasname=$1
password=$2
apkname=$(ls $project/build/outputs/apk/release/*unsigned.apk)
signedname=$(echo $apkname | sed 's/-unsigned//g')
jarsigner -digestalg SHA1 -sigalg MD5withRSA -keystore $keystore -storepass $password -keypass $password -signedjar $signedname-unzipalign $apkname $aliasname
rm $apkname
$ANDROID_HOME/build-tools/$version/zipalign -v 4 $signedname-unzipalign $signedname
rm $signedname-unzipalign
| true
|
5bc2ca3f7a1cd45eb46c3912688accd58403c520
|
Shell
|
jgarvin/joe-etc
|
/bin/update-packages
|
UTF-8
| 394
| 3.8125
| 4
|
[] |
no_license
|
#!/bin/sh
filter_comments() {
sed 's/\(.*\)#\(.*\)/\1/g' "$@"
}
if uname -a | grep Ubuntu &> /dev/null
then
# sed invocation removes comments, pounds on their own line
# and when they trail after a package name
filter_comments $(dirname $0)/../ubuntu-packages | \
xargs sudo apt-get install --assume-yes --quiet
else
echo >&2 "Couldn't find package file for this OS/distro. See $0"
fi
| true
|
2a0c0b8e94fc1f829759d401a3a933db22fa0e03
|
Shell
|
jonasreinsch/boilermacs
|
/template.bump_version.sh
|
UTF-8
| 481
| 2.875
| 3
|
[] |
no_license
|
#!/bin/bash
PLIST_FILE='##CURSOR_POS.plist'
version=$(/usr/libexec/PlistBuddy -c 'Print CFBundleVersion' "${PLIST_FILE}")
new_version=$(<<< 0.1+"$version" bc)
/usr/libexec/PlistBuddy -c "Set CFBundleShortVersionString $new_version" "${PLIST_FILE}"
/usr/libexec/PlistBuddy -c "Set CFBundleVersion $new_version" "${PLIST_FILE}"
string_to_pbcopy="git commit -am 'bump version to $new_version'"
echo -n "$string_to_pbcopy" | pbcopy
echo "$string_to_pbcopy" copied to pasteboard
| true
|
0ace6823dcb4ed8a1cbc677b1eff1249196b6c1f
|
Shell
|
letrout/docker-linuxbuild
|
/build_src.sh
|
UTF-8
| 2,902
| 3.625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
LINUX_VER=4.16.1
BASE_DIR=/kernel
RESULTS_DIR=${BASE_DIR}
LINUX_TARBALL=linux-${LINUX_VER}.tar.xz
LINUX_SRC_URL=https://cdn.kernel.org/pub/linux/kernel/v4.x/${LINUX_TARBALL}
# Files to store output
RESULTS_FILE="results_`date +%Y%m%d%H%M%S`.txt"
UNTAR_STATS="untar_out.txt"
CONFIG_STATS="config_out.txt"
CLEAN_PREBUILD_STATS="make_clean_prebuild.txt"
MAKE_STATS="make_out.txt"
CLEAN_POSTBUILD_STATS="make_clean_postbuild.txt"
REMOVE_STATS="remove_src.txt"
# Get the disk that BASE_DIR is mounted on
DISK=`awk -v needle="${BASE_DIR}" '$2==needle {print $1}' /proc/mounts |cut -d '/' -f 3`
DISKSTATS="grep ${DISK} /proc/diskstats"
PROCS_AVAIL=`getconf _NPROCESSORS_ONLN`
MAKE_J=${PROCS_AVAIL}
date > ${RESULTS_DIR}/${RESULTS_FILE}
echo "${PROCS_AVAIL} processes" >> ${RESULTS_DIR}/${RESULTS_FILE}
echo "${MAKE_J} processes used for make -j" >> ${RESULTS_DIR}/${RESULTS_FILE}
echo "" >> ${RESULTS_DIR}/${RESULTS_FILE}
# Get the source
mkdir -p ${BASE_DIR} \
&& wget ${LINUX_SRC_URL} -O ${BASE_DIR}/${LINUX_TARBALL}
cd ${BASE_DIR}
# untar
echo "untar kernel" >> ${RESULTS_DIR}/${RESULTS_FILE}
${DISKSTATS} >> ${RESULTS_DIR}/${RESULTS_FILE}
(time tar -xf ${LINUX_TARBALL}) 2>&1 >/dev/null | grep -A2 ^real >> ${RESULTS_DIR}/${RESULTS_FILE}
${DISKSTATS} >> ${RESULTS_DIR}/${RESULTS_FILE}
echo "" >> ${RESULTS_DIR}/${RESULTS_FILE}
cd linux-${LINUX_VER}
# Make the config
echo "make oldconfig" >> ${RESULTS_DIR}/${RESULTS_FILE}
${DISKSTATS} >> ${RESULTS_DIR}/${RESULTS_FILE}
(time yes '' | make oldconfig) 2>&1 >/dev/null | grep -A2 ^real >> ${RESULTS_DIR}/${RESULTS_FILE}
${DISKSTATS} >> ${RESULTS_DIR}/${RESULTS_FILE}
echo "" >> ${RESULTS_DIR}/${RESULTS_FILE}
# Make clean before build
echo "make clean before build" >> ${RESULTS_DIR}/${RESULTS_FILE}
${DISKSTATS} >> ${RESULTS_DIR}/${RESULTS_FILE}
(time make clean) 2>&1 >/dev/null | grep -A2 ^real >> ${RESULTS_DIR}/${RESULTS_FILE}
${DISKSTATS} >> ${RESULTS_DIR}/${RESULTS_FILE}
echo "" >> ${RESULTS_DIR}/${RESULTS_FILE}
# Make
echo "make -j ${MAKE_J} the kernel" >> ${RESULTS_DIR}/${RESULTS_FILE}
${DISKSTATS} >> ${RESULTS_DIR}/${RESULTS_FILE}
(time make -j ${MAKE_J}) 2>&1 >/dev/null | grep -A2 ^real >> ${RESULTS_DIR}/${RESULTS_FILE}
${DISKSTATS} >> ${RESULTS_DIR}/${RESULTS_FILE}
echo "" >> ${RESULTS_DIR}/${RESULTS_FILE}
# Make clean after build
echo "make clean after build" >> ${RESULTS_DIR}/${RESULTS_FILE}
${DISKSTATS} >> ${RESULTS_DIR}/${RESULTS_FILE}
(time make clean) 2>&1 >/dev/null | grep -A2 ^real >> ${RESULTS_DIR}/${RESULTS_FILE}
${DISKSTATS} >> ${RESULTS_DIR}/${RESULTS_FILE}
echo "" >> ${RESULTS_DIR}/${RESULTS_FILE}
# Remove the source
cd /
echo "remove source directory" >> ${RESULTS_DIR}/${RESULTS_FILE}
${DISKSTATS} >> ${RESULTS_DIR}/${RESULTS_FILE}
(time rm -rf ${BASE_DIR}/linux-${LINUX_VER}) 2>&1 >/dev/null | grep -A2 ^real >> ${RESULTS_DIR}/${RESULTS_FILE}
${DISKSTATS} >> ${RESULTS_DIR}/${RESULTS_FILE}
| true
|
818a69eea4afd99729eaed435815fab5e2e0189d
|
Shell
|
leandrocprates/projecteste
|
/aplicacao_poco/batch_portabilidade/branches_3.0/script.sh
|
UTF-8
| 142
| 2.6875
| 3
|
[] |
no_license
|
#!/bin/sh
dir=$1
echo "diretorio dos arquivos convertidos: $dir" >> /tmp/teste_portabilidade.txt
find $dir -name "BDT*" | xargs dos2unix
| true
|
806435187b6b90c9841383df184ad1e4f89d4b37
|
Shell
|
bstiles/bin
|
/overrides/java.old
|
UTF-8
| 698
| 3.828125
| 4
|
[] |
no_license
|
#!/bin/bash
shopt -s extglob
set -o errexit
if [ -n "$(cd . 2>&1)" ]; then
echo "Can't run ${BASH_SOURCE[0]} from a non-existent directory!"
exit 1
fi
here=$(cd "${BASH_SOURCE[0]%/*}" && pwd)
ENV_FILE=${0##*/}.env
pushes=()
function push {
pushd "$1" > /dev/null
pushes=(${pushes[@]} ".")
}
until [[ $PWD == / ]]; do
push ..
done
if [[ -f $ENV_FILE ]]; then
. "$ENV_FILE"
fi
for x in "${pushes[@]}"; do
popd > /dev/null
if [[ -f $ENV_FILE ]]; then
. "$ENV_FILE"
fi
done
if [ -n "$JAVA_HOME" ]; then
JAVA=${JAVA-"$JAVA_HOME/bin/java"}
else
JAVA=${JAVA-"$(which -a java | grep -v "$here/${0##*/}" | head -1)"}
fi
exec "$JAVA" $JAVA_OPTS "$@"
| true
|
ed0f430156c839e14f5e64061907bcd45364ecf2
|
Shell
|
ngako/docker-drupal8
|
/app/conf/add-local-user.sh
|
UTF-8
| 358
| 3.078125
| 3
|
[] |
no_license
|
#!/bin/bash
# Add local user
# Either use the LOCAL_USER_ID if passed in at runtime or
# fallback to default user
USER_ID=${LOCAL_USER_ID:-9001}
USER_NAME=dev
echo "create user $USER_NAME with UID $USER_ID"
useradd --shell /bin/bash -u $USER_ID -o -c "" -m $USER_NAME
# Add ll alias
echo "alias ll='ls -al'" >> /home/dev/.bashrc
chown dev:dev -R /home/dev
| true
|
90af8a3673d54390b2de14292b19673ac4e6d3b9
|
Shell
|
vorburger/vorburger-dotfiles-bin-etc
|
/debian-install.sh
|
UTF-8
| 992
| 3.484375
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/usr/bin/env bash
set -euxo pipefail
# This is Debian specific; see ubuntu-install.sh for Ubuntu specific stuff, and apt-install.sh for stuff that's common to Debian and Ubuntu.
if [ $# -ne 1 ]; then
echo "Usage: $0 <Debian-Version> (9.0, 10, 11)"
echo "E.g. $0 11"
exit 1
fi
DEBIAN_VERSION=$1
# https://fishshell.com =>
# https://software.opensuse.org/download.html?project=shells%3Afish%3Arelease%3A3&package=fish
# (because Debian 11 packages an ancient Fish v3.1.2 which is 1.5 years behind)
sudo apt install -y curl gpg
echo "deb http://download.opensuse.org/repositories/shells:/fish:/release:/3/Debian_$DEBIAN_VERSION/ /" | sudo tee /etc/apt/sources.list.d/shells:fish:release:3.list
curl -fsSL "https://download.opensuse.org/repositories/shells:fish:release:3/Debian_$DEBIAN_VERSION/Release.key" | gpg --dearmor | sudo tee /etc/apt/trusted.gpg.d/shells_fish_release_3.gpg > /dev/null
# NB this ^^^ must be done BEFORE the apt update that comes NEXT
./apt-install.sh
| true
|
39a1f189da305d7ff1711e3b643bba631c86cc9e
|
Shell
|
NYCPlanning/docker-geosupport
|
/patch.sh
|
UTF-8
| 559
| 3.015625
| 3
|
[] |
no_license
|
#!/bin/bash
if [[ ${PATCH} = 0 ]]; then
echo "NO UPAD AVAILABLE YET ..."
else
echo "YES UPAD IS AVAILABLE linux_upad_tpad_${RELEASE}${PATCH}"
mkdir linux_upad_tpad_${RELEASE}${PATCH} &&
curl -o linux_upad_tpad_${RELEASE}${PATCH}/linux_upad_tpad_${RELEASE}${PATCH}.zip https://s-media.nyc.gov/agencies/dcp/assets/files/zip/data-tools/bytes/linux_upad_tpad_${RELEASE}${PATCH}.zip &&
unzip -o linux_upad_tpad_${RELEASE}${PATCH}/*.zip -d version-${RELEASE}_${MAJOR}.${MINOR}/fls/ &&
rm -r linux_upad_tpad_${RELEASE}${PATCH}
fi
| true
|
3ab78630095a7061a501a14721a9a58158b119d3
|
Shell
|
KyrosKrane/bitcoin-classic-installer
|
/install_classic.sh
|
UTF-8
| 7,266
| 4.1875
| 4
|
[] |
no_license
|
#!/bin/bash
################################################################################
# Copyright (c) 2016 George Macoukji (macoukji@gamesnet.org)
# License: CC-BY
# Licensed under a Creative Commons Attribution 4.0 International License.
# See here for more info: http://creativecommons.org/licenses/by/4.0/
################################################################################
###############################
# Script settings
# Update these for your environment
###############################
# This is the URL of the Bitcoin Classic binary distributable for your version of Linux.
CLASSIC_URL="https://github.com/bitcoinclassic/bitcoinclassic/releases/download/v0.11.2.cl1/bitcoin-0.11.2-linux64.tar.gz"
# This is the hash of the download archive, to verify it downloaded right.
# The authors of Classic will publish this in a file named SHA256SUMS.asc
# Copy the value for the file you selected here.
CLASSIC_HASH="3f4eb95a832c205d1fe3b3f4537df667f17f3a6be61416d11597feb666bde4ca"
# This is the temporary directory in your system.
# If you're not sure what that is, it's usually safe to leave this as the default.
TEMP_DIR="/tmp"
#####*****#####*****#####*****#####*****#####*****#####*****#####
#####*****#####*****#####*****#####*****#####*****#####*****#####
## ##
## Don't modify below here unless you know what you're doing. ##
## ##
#####*****#####*****#####*****#####*****#####*****#####*****#####
#####*****#####*****#####*****#####*****#####*****#####*****#####
###############################
# Internal script settings
###############################
Temp_subdir="classic"
###############################
# Functions
###############################
#
# Downloads the archive for Bitcoin Classic and unzips it.
# Takes two parameters:
# 1: The URL of the archive to download
# 2: The path in which to download and unzip it.
#
DownloadClassic()
{
typeset URL="$1"
typeset Location="$2"
# Store the old path we were in
typeset OldPath="$PWD"
# Get the filename of the Classic archive file
Classic_filename="$(basename "$URL")"
# Delete prior downloads
cd "$Location"
if [ -d "$Temp_subdir" ] ; then
echo "Deleting prior download directory"
rm -rf "$Temp_subdir"
fi
mkdir "$Temp_subdir"
cd "$Temp_subdir"
# Download Classic.
echo "Downloading Classic"
wget "$URL"
# Verify download is correct
echo "Verifying download"
typeset Download_Hash=$(sha256sum "$Classic_filename" | cut -f1 -d" ")
if [ "$Download_Hash" != "$CLASSIC_HASH" ] ; then
echo "ERROR: Could not download Bitcoin Classic correctly. Exiting." >&2
echo "Downloaded file is: $Location/$Temp_subdir/$Classic_filename." >&2
exit 2
fi
# Unzip Classic.
echo "Unzipping Classic"
tar -xvzf "$Classic_filename"
# Return to the path we were in previously
cd "$OldPath"
} # DownloadClassic()
###############################
# Check existing bitcoind
###############################
# Find the location of the bitcoin executables
Bitcoind_pathfile="$(which bitcoind)"
if [ -z "$Bitcoind_pathfile" ] ; then
#bitcoind is not installed; just need to download.
Version="not-installed"
# The path we'll use to put the bitcoin executables is not known.
# So, we'll take a good guess and stick them in the same directory as wget.
Bin_Dir="$(dirname "$(which wget)")"
else
# Figure out what version of bitcoin is installed now
Version_String="$(bitcoind --version | head -1)"
if echo "$Version_String" | grep -q "Bitcoin Core Daemon" ; then
Version="Core"
elif echo "$Version_String" | grep -q "Bitcoin XT Daemon" ; then # @TODO: Verify the XT version string
Version="XT"
elif echo "$Version_String" | grep -q "Bitcoin Classic Daemon" ; then
Version="classic"
else
Version="other"
fi
# Get the directory name of the executables
Bin_Dir="$(dirname "$Bitcoind_pathfile")"
fi
# Shut down bitcoind if it's running
echo "Stopping bitcoind."
bitcoin-cli stop 2>/dev/null
if [ 0 -eq $? ] ; then
Bitcoind_was_started="true"
# Give bitcoind a few seconds to exit while we do the download.
sleep 5 &
else
Bitcoind_was_started="false"
fi
###############################
# Download Bitcoin Classic
###############################
# Download and unzip Classic. Delete prior downloads
DownloadClassic "$CLASSIC_URL" "$TEMP_DIR"
# Figure out what the unzipped directory name is
UnzipDir="$TEMP_DIR/$Temp_subdir/$(cd "$TEMP_DIR/$Temp_subdir/"; ls -l | grep ^d | awk '{print $NF}')"
###############################
# Prepare for installation
###############################
# Wait for bitcoind to exit
echo "Waiting for bitcoind to exit"
wait
# Set up the sudoscript
echo "#!/bin/bash" > "$TEMP_DIR/$Temp_subdir/sudoscript.sh"
# Check if the old executables are actual files or symbolic links (such as from running this script before).
File_type=$(ls -l "$Bitcoind_pathfile" | cut -c1)
# Rename or delete existing executables
if [ "not-installed" = "$Version" ] ; then
# If no prior version of bitcoind is installed, then we don't need to clean up the old executables.
# Run any old command so that bash doesn't complain about an empty if block.
true
elif [ "l" = "$File_type" ] ; then
# old executable is a symbolic link. Just delete it.
cat <<EOF >> "$TEMP_DIR/$Temp_subdir/sudoscript.sh"
echo "Deleting old symbolic links"
rm "$Bin_Dir/bitcoind"
rm "$Bin_Dir/bitcoin-cli"
EOF
elif [ "classic" = "$Version" ] ; then
# A previous version of classic is installed. Delete it.
cat <<EOF >> "$TEMP_DIR/$Temp_subdir/sudoscript.sh"
echo "Deleting prior Classic executables"
rm "$Bin_Dir/bitcoind"
rm "$Bin_Dir/bitcoin-cli"
EOF
else
# Core, XT, or some other version is installed as an executable. Move them to that version.
cat <<EOF >> "$TEMP_DIR/$Temp_subdir/sudoscript.sh"
echo "Renaming old ($Version) executables"
mv "$Bin_Dir/bitcoind" "$Bin_Dir/bitcoind-$Version"
mv "$Bin_Dir/bitcoin-cli" "$Bin_Dir/bitcoin-cli-$Version"
EOF
fi
cat <<EOF >> "$TEMP_DIR/$Temp_subdir/sudoscript.sh"
# Copy the Classic executables to the bin directory
echo "Copying new Classic executables"
cp "$UnzipDir/bin/bitcoin-cli" "$Bin_Dir/bitcoin-cli-classic"
cp "$UnzipDir/bin/bitcoind" "$Bin_Dir/bitcoind-classic"
# Create a soft (symbolic) link from the normal filenames to the classic executables.
echo "Setting up Classic executables"
cd "$Bin_Dir"
ln -s bitcoin-cli-classic bitcoin-cli
ln -s bitcoind-classic bitcoind
EOF
###############################
# Install Bitcoin Classic
###############################
echo
echo
echo " ***** ***** ATTENTION ***** *****"
echo
echo " Setting up Classic executables."
echo "You may be prompted for your password in a second."
echo " Please enter it to continue."
echo
echo " ***** ***** ATTENTION ***** *****"
echo
sleep 1
# Run the sudoscript
sudo sh "$TEMP_DIR/$Temp_subdir/sudoscript.sh"
###############################
# Clean up
###############################
# Restart bitcoind (as Classic) if it was started before the script
if [ "true" = "$Bitcoind_was_started" ] ; then
echo "Starting bitcoind Classic"
bitcoind
fi
# Clean up after ourselves
rm -rf "$TEMP_DIR/$Temp_subdir"
echo "All done!"
| true
|
7e2229a58596261ba8efd61a714971a147b820b9
|
Shell
|
tamos/ValuesInText
|
/shell_code/join_us_data.sh
|
UTF-8
| 1,643
| 3.109375
| 3
|
[] |
no_license
|
# format is
# speakermap
# speakerid|speech_id|lastname|firstname|chamber|state|gender|party|district|nonvoting
# descr
# speech_id|chamber|date|number_within_file|speaker|first_name|last_name|state|gender|line_start|line_end|file|char_count|word_count
# speeches
# speech_id|speech
mkdir _tmp
# concatenate speakermap files and sort on the first column, speech id, then remove duplicates
cat *SpeakerMap.txt | sort -k2,2 -t "|" | uniq > _concat_spkrmap
# concatenate the descr files and sort on first field, speech id , remove duplicates
cat descr* | sort -k1,1 -t "|" | uniq > _concat_descr
# join the files
join -1 1 -2 2 -t "|" _concat_descr _concat_spkrmap | sort -k1,1 -t "|" > _joined_desc_spkrmap
# place all speeches in one file
cat speeches_* > speechall
# sort and drop duplicates as before
sort -k1,1 -t "|" speechall | uniq > _concat_speeches
# join with the other joined file and reverse order so the field names are at the top
join -1 1 -2 1 -t "|" _concat_speeches _joined_desc_spkrmap | tail -r > us_data_full.csv
# trim the us data by date. This happens in python for canada, but here we do it because the dates are integers like 19861018
awk -F "|" 'NR ==1 {print $0} NR > 1 {if ($4 > 19931231) {print $0} } ' < us_data_full.csv > us_data_trimmed_date.csv
# trim the us data by chamber - i.e., only house
awk -F "|" 'NR ==1 {print $0} NR > 1 {if ($3 != "S") {print $0} } ' < us_data_trimmed_date.csv > us_data_trimmed.csv
# clean up
rm speechall
rm _joined_desc_spkrmap
rm _concat_speeches
rm _concat_spkrmap
rm _concat_descr
rm us_data_trimmed_date.csv
rm us_data_full.csv
| true
|
74c90a3bc4667450d592eaa4322be0d9ca719d92
|
Shell
|
horngyih/work-shellscripts
|
/stop-docker-tomcat.sh
|
UTF-8
| 292
| 3.28125
| 3
|
[] |
no_license
|
#!/bin/sh
sh /d/work/scripts/shellscripts/container-running.sh tomcat-dev;
running=$?
if [ $running -eq 0 ]
then
echo "Tomcat Dev Docker is running";
echo "Stopping...";
docker stop tomcat-dev && docker rm tomcat-dev;
exit 0;
else
echo "Tomcat Dev Docker is not running";
exit -1;
fi
| true
|
bc6b0010f5cfeabbfaab70e0025adbcd57403aae
|
Shell
|
qdzlug/joyent-gerrit
|
/bin/crimport
|
UTF-8
| 6,384
| 4.28125
| 4
|
[] |
no_license
|
#!/bin/bash
#
# crimport REPO_GITHUB_ACCOUNT REPO_NAME [GERRIT_USER]: imports the given
# repository into gerrit. See the usage message for details.
#
#
# Configuration
#
# DNS name or IP address of Gerrit server
igp_gerrit_host=cr.joyent.us
# Group name for Gerrit superusers
igp_gerrit_group="Temporary Git Superusers"
# Temporary directory for cloned project
igp_tmproot="${TMPDIR-/var/tmp}/crimport-$$"
#
# Runtime state
#
#
# SSH host string. This is generally the same as igp_gerrit_host, but may
# include a "username@" if specified on the command line.
#
igp_gerrit_sshhost=
#
# Source repository: GitHub account and repository name.
# These are filled in from the command-line arguments.
#
igp_gh_account=
igp_gh_repo=
# Gerrit project name. This is constructed from the GitHub parameters.
igp_gerrit_project=
# Gerrit username. This is reported by the server.
igp_gerrit_username=
# Directory into which the source repository is cloned.
igp_clonedir=
function usage
{
cat <<EOF >&2
usage: crimport REPO_GITHUB_ACCOUNT REPO_NAME [GERRIT_USER]
This tool copies a GitHub repository at
https://github.com/REPO_GITHUB_ACCOUNT/REPO_NAME
to a new project on the Joyent Gerrit server at $igp_gerrit_host. The new
Gerrit project will be configured to replicate back to GitHub.
REPO_GITHUB_ACCOUNT may be either an individual GitHub account or a GitHub
organization.
If GERRIT_USER is provided, then that username is used when pushing over ssh to
Gerrit.
This tool must be run as a Gerrit administrator. It will temporarily add you to
the list of Git Superusers in order to do the import, and it will remove you
from that group upon completion.
EOF
exit 2
}
function main
{
trap igp_cleanup exit
if [[ $# != 2 && $# != 3 ]]; then
usage
fi
if [[ ! $1 =~ ^[-a-zA-Z0-9_]+$ ]] ||
[[ ! $2 =~ ^[-a-zA-Z0-9_]+$ ]]; then
fail "GitHub account or repo name contains unsupported" \
"characters"
fi
igp_gh_account=$1
igp_gh_repo=$2
igp_gerrit_project="$1/$2"
igp_clonedir="$igp_tmproot/$igp_gh_repo"
if [[ -n "$3" ]]; then
igp_gerrit_sshhost="$3@$igp_gerrit_host"
else
igp_gerrit_sshhost="$igp_gerrit_host"
fi
echo -n "Detecting your Gerrit username ... "
igp_gerrit_configure || fail "failed"
echo "$igp_gerrit_username"
echo "Cloning github.com repository $igp_gerrit_project into " \
"\"$igp_clonedir\"" >&2
mkdir "$igp_tmproot" || fail "failed to mkdir \"$igp_tmproot\""
igp_github_clone $igp_gh_account $igp_gh_repo "$igp_clonedir" ||
fail "failed to clone"
echo "Creating remote Gerrit project $igp_gerrit_project" >&2
igp_project_create $igp_gerrit_project \
"http://github.com/$igp_gh_account/$igp_gh_repo" ||
fail "failed to create Gerrit project"
#
# Fortunately, adding and removing members using this interface is
# idempotent, so we don't need to check first.
#
echo -n "Adding '$igp_gerrit_username' to" \
"group \"$igp_gerrit_group\" ... "
igp_gerrit set-members "'$igp_gerrit_group'" \
--add "$igp_gerrit_username" || fail "failed"
echo "Pushing existing changes to Gerrit project" >&2
igp_gerrit_push "$igp_clonedir" "$igp_gerrit_project" || \
fail "failed to push changes"
echo -n "Removing '$igp_gerrit_username' from" \
"group \"$igp_gerrit_group\" ... "
igp_gerrit set-members "'$igp_gerrit_group'" \
--remove "$igp_gerrit_username" || fail "failed"
cat <<EOF
Success! The following project has been created on the Gerrit server:
https://$igp_gerrit_host/#/admin/projects/$igp_gerrit_project
To make sure replication to GitHub will work, please make sure that the
"Joyent Engineering" team on GitHub has write access to the GitHub project
by visiting this page:
https://github.com/$igp_gerrit_project/settings/collaboration
EOF
}
function fail
{
echo "crimport: $@" >&2
exit 1
}
#
# igp_cleanup: clean up temporary files. We do this for both normal and
# abnormal exit.
#
function igp_cleanup
{
#
# This is just an especially paranoid check so that we don't do
# something terrible.
#
if [[ -d $igp_tmproot ]]; then
echo -n "cleaning up $igp_tmproot ... " >&2
(cd $igp_tmproot && \
[[ -d $igp_gh_repo/.git ]] &&
rm -rf $igp_gh_repo)
rmdir $igp_tmproot
echo "done." >&2
fi
}
#
# igp_gerrit ARGS...
#
# This runs the gerrit CLI over ssh. For example, to run the "flush-caches"
# subcommand, run "igp_gerrit flush-caches".
#
function igp_gerrit
{
ssh $igp_gerrit_sshhost gerrit "$@"
}
#
# igp_project_create NAME SOURCE
#
# Creates a project called NAME on the Gerrit server with appropriate settings.
#
function igp_project_create
{
#
# The extra quotes around the description are necessary, per the Gerrit
# create-project documentation.
#
igp_gerrit "create-project" \
--parent="GitHub-Joyent" \
--description="'Authoritative source for $2'" \
--submit-type=FAST_FORWARD_ONLY \
"$1"
}
#
# igp_github_clone ACCOUNT REPONAME DSTPATH
#
# Clones the specified repository to the specified local path.
#
function igp_github_clone
{
if [[ ! $1 =~ ^[-a-zA-Z0-9_]+$ ]] ||
[[ ! $2 =~ ^[-a-zA-Z0-9_]+$ ]]; then
fail "bogus account or repo name!"
fi
git clone git@github.com:$1/$2.git $3
}
#
# igp_gerrit_push REPO_PATH PROJECT_NAME
#
# For the local git repository at REPO_PATH, add a remote for the Gerrit project
# PROJECT_NAME and push the repository to it.
#
function igp_gerrit_push
{
(
set -o errexit
cd "$1" 2>&1;
git remote add cr git+ssh://$igp_gerrit_sshhost/$2.git
git push cr master
) || fail "failed to push to Gerrit"
}
#
# igp_gerrit_configure: determine the current Gerrit username based on what the
# server reports when we try to use it. If the user specified a username
# argument, this is redundant. But they may not have, and this approach will
# always report whatever username we're using to access the server.
#
# The actual approach leaves something to be desired, but Gerrit doesn't provide
# the equivalent of who(1) or id(1). On the plus side, this output should be
# stable enough for use here.
#
function igp_gerrit_configure
{
igp_gerrit_username=$(ssh $igp_gerrit_sshhost 2>&1 | \
grep '^\s\+git clone.*REPOSITORY_NAME.git\s*$' | \
sed -e s'#.*ssh://\(.*\)@.*#\1#') ||
fail "failed to parse out Gerrit username"
if [[ -z "$igp_gerrit_username" ]]; then
fail "failed to discover Gerrit username"
fi
}
main "$@"
| true
|
c34045eec5a40660d4368a927e6c05ff7782e32b
|
Shell
|
rjungemann/glitcher
|
/bashrc
|
UTF-8
| 339
| 3.046875
| 3
|
[
"MIT"
] |
permissive
|
SCRIPT_PATH="${BASH_SOURCE[0]}";
if([ -h "${SCRIPT_PATH}" ]) then
while([ -h "${SCRIPT_PATH}" ]) do SCRIPT_PATH=`readlink "${SCRIPT_PATH}"`; done
fi
pushd . > /dev/null
cd `dirname ${SCRIPT_PATH}` > /dev/null
SCRIPT_PATH=`pwd`;
popd > /dev/null
export PATH=${SCRIPT_PATH}/homebrew/bin:${SCRIPT_PATH}/ffmpegX.app/Contents/Resources:$PATH
| true
|
59615239d7bbbb5cfb7d10065aee4b2753959e66
|
Shell
|
real-dcnet/dcnet-source
|
/tests/vm_extract.sh
|
UTF-8
| 229
| 2.625
| 3
|
[] |
no_license
|
#!/bin/bash
TEST_DIRS=("lf5-lf5" "lf5-lf6" "lf5-lf3" "lf5-lf8" "lf5-lf14")
for dir in ${TEST_DIRS[@]}
do
echo $dir
for i in $(seq 1 10)
do
scp "dcnet@128.10.126.57:$dir/run$i/migrate_test_vm.out" "$1/$dir/run$i"
done
done
| true
|
7477b4e79290a170c6ce54c20a48af15455b15d9
|
Shell
|
cluabauti/ISO
|
/punto12_b.sh
|
UTF-8
| 541
| 3.515625
| 4
|
[] |
no_license
|
#!/bin/bash
#Realiza un script que le solicite al usuario 2 numeros
#los lea de la entrada Standard e imprima la
#multiplicacion, suma, resta y cual es la mayor de los numeros leidos
if [ $# -eq 2 ]; then
echo "Multiplicacion entre $1 y $2:"
echo $(($1 * $2))
echo "Suma entre $1 y $2:"
echo $(($1 + $2))
echo "Resta entre $1 y $2:"
echo $(($1 - $2))
echo "Numero mayor entre $1 y $2:"
if [ $1 -gt $2 ]; then
echo "$1 es mayor que $2"
else
if [ $1 -lt $2 ]; then
echo "$2 es mayor que $1"
else
echo "$1 y $2 son iguales"
fi
fi
fi
| true
|
d82f66170a9769d2a4230b21238b32c306e6d5e4
|
Shell
|
SolarAquarion/trizen
|
/archlinux/PKGBUILD
|
UTF-8
| 871
| 2.546875
| 3
|
[] |
no_license
|
# Maintainer: Trizen <echo dHJpemVueEBnbWFpbC5jb20K | base64 -d>
pkgname=trizen
pkgver=159.643662c
pkgrel=1
pkgdesc="Trizen's AUR Package Manager: A lightweight wrapper for AUR."
arch=('any')
url="https://github.com/trizen/trizen"
license=('GPL3');
depends=(
'git'
'diffutils'
'perl>=5.10.0'
'perl-libwww'
'perl-term-ui'
'pacman'
'perl-json'
'perl-data-dump'
'perl-lwp-protocol-https'
)
optdepends=(
'perl-json-xs: faster JSON deserialization'
'perl-term-readline-gnu: for better STDIN support'
)
source=('git://github.com/trizen/trizen.git')
md5sums=('SKIP')
pkgver() {
cd $pkgname
echo $(git rev-list --count master).$(git rev-parse --short master)
}
package() {
cd $pkgname
install -m 755 -D $pkgname "$pkgdir/usr/bin/$pkgname"
}
| true
|
f18fdb6f344189cf1fda9a0a1e3e63dbbb54fdf9
|
Shell
|
Enryou/ROS_Turtlebot-Encryption
|
/autostart/autostart.sh
|
UTF-8
| 1,061
| 2.609375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# !/bin/bash
LOG_FILE=/home/usr/autostart/logs/log_autostart.txt
echo "" >> ${LOG_FILE}
echo "" >> ${LOG_FILE}
echo "" >> ${LOG_FILE}
echo "" >> ${LOG_FILE}
echo "#############################################" >> ${LOG_FILE}
echo "Running autostart.sh" >> ${LOG_FILE}
echo $(date) >> ${LOG_FILE}
echo "#############################################" >> ${LOG_FILE}
echo "" >> ${LOG_FILE}
echo "Logs:" >> ${LOG_FILE}
set -e
{
source /opt/ros/kinetic/setup.bash
source /home/faolin/catkin_ws/devel/setup.bash
export ROS_WORKSPACE=/home/catkin_ws
export ROS_MASTER_URI=http://xxx.xxx.xxx.xx:11311/ ##e. g. Master
export ROS_IP=xxx.xxx.xxx.xx ##e. g. Own IP
sleep 8
} &>> ${LOG_FILE}
set -v
{
roscore &
sleep 10
roslaunch turtlebot_gazebo turtlebot_world.launch &
sleep 5
roslaunch turtlebot_gazebo amcl_demo.launch &
sleep 5
roslaunch turtlebot_rviz_launchers view_navigation.launch &
sleep 5
roslaunch turtlebot_teleop keyboard_teleop.launch
killall rosout roslaunch rosmaster gazebo
} &>> ${LOG_FILE}
| true
|
a8a4a216fbb9b89584431144927a7e5d01202439
|
Shell
|
go-po/po
|
/scripts/reset-rabbit.sh
|
UTF-8
| 147
| 2.703125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
IFS=$'\n'
for q in $(docker exec po_mq rabbitmqctl -qs list_queues name); do
docker exec po_mq rabbitmqctl delete_queue "${q}"
done;
| true
|
cd275dd76b47f9806aed8e2c351ab9afb3120c64
|
Shell
|
ETAChalmers/ETA-Hemsida
|
/publish.sh
|
UTF-8
| 305
| 2.578125
| 3
|
[] |
no_license
|
#!/bin/sh
echo "Deleting old publication"
rm -rf public
echo "Generating site"
hugo
git branch -D deploy
git checkout --orphan deploy
git rm -r --cached
git add -f ./public
git mv ./public/* ./
git clean -fd
git commit -m "Deploy"
git push --force --set-upstream origin deploy
git checkout master
| true
|
0d45faf3e88e17c8469c3d6aa9650d7bd2f39dd7
|
Shell
|
ixcat/admmk
|
/bin/mk
|
UTF-8
| 503
| 2.890625
| 3
|
[
"ISC"
] |
permissive
|
#! /bin/sh
# admmk PMake driver
# set make(1) command -
# should point to appropriate PMake -
# systems explicitly listed are expected to work as defined -
# otherwise, we assume the platform make to be incompatible
# and explicitly require a 'bmake'.
case `uname -s` in
"FreeBSD"|"NetBSD"|"DragonFly")
MAKE=make
;;
"OpenBSD"|"Linux")
MAKE=bmake
;;
*)
MAKE=bmake
;;
esac
exec $MAKE -m $SYS/mk.local -m $SYS/mk $*
| true
|
a3e9c92e384fe4ee0c14d605fd72462fb1bea70b
|
Shell
|
stephenmsachs/fv3gfs
|
/ecf/ecfutils/CROW/model/fv3gfs/jobs/eomg.sh
|
UTF-8
| 3,278
| 2.921875
| 3
|
[] |
no_license
|
#! /bin/bash
###############################################################
# < next few lines under version control, D O N O T E D I T >
# $Date: 2017-10-30 18:48:54 +0000 (Mon, 30 Oct 2017) $
# $Revision: 98721 $
# $Author: fanglin.yang@noaa.gov $
# $Id: eomg.sh 98721 2017-10-30 18:48:54Z fanglin.yang@noaa.gov $
###############################################################
###############################################################
## Author: Rahul Mahajan Org: NCEP/EMC Date: April 2017
## Abstract:
## EnKF innovations for ensemble members driver script
## EXPDIR : /full/path/to/config/files
## CDATE : current analysis date (YYYYMMDDHH)
## CDUMP : cycle name (gdas / gfs)
## ENSGRP : ensemble sub-group to compute innovations (1, 2, ...)
###############################################################
set -ex
eval $( $HOMEcrow/to_sh.py $CONFIG_YAML export:y scope:platform.general_env import:".*" )
eval $( $HOMEcrow/to_sh.py $CONFIG_YAML export:y scope:workflow.$TASK_PATH from:Inherit )
eval $( $HOMEcrow/to_sh.py $CONFIG_YAML export:y scope:workflow.$TASK_PATH from:shell_vars )
###############################################################
# Set script and dependency variables
export CASE=$CASE_ENKF
export GDATE=$($NDATE -$assim_freq $CDATE)
cymd=$(echo $CDATE | cut -c1-8)
chh=$(echo $CDATE | cut -c9-10)
gymd=$(echo $GDATE | cut -c1-8)
ghh=$(echo $GDATE | cut -c9-10)
export OPREFIX="${CDUMP}.t${chh}z."
export APREFIX="${CDUMP}.t${chh}z."
export ASUFFIX=".nemsio"
export GPREFIX="${CDUMP}.t${ghh}z."
export GSUFFIX=".nemsio"
export COMIN_GES="$ROTDIR/$CDUMP.$gymd/$ghh"
export COMIN_GES_ENS="$ROTDIR/enkf.$CDUMP.$gymd/$ghh"
export COMOUT="$ROTDIR/enkf.$CDUMP.$cymd/$chh"
export DATA="$RUNDIR/$CDATE/$CDUMP/eomg.grp$ENSGRP"
[[ -d $DATA ]] && rm -rf $DATA
export ATMGES_ENSMEAN="$COMIN_GES_ENS/${GPREFIX}atmf006.ensmean$GSUFFIX"
if [ ! -f $ATMGES_ENSMEAN ]; then
echo "FILE MISSING: ATMGES_ENSMEAN = $ATMGES_ENSMEAN"
exit 1
fi
export LEVS=$($NEMSIOGET $ATMGES_ENSMEAN dimz | awk '{print $2}')
status=$?
[[ $status -ne 0 ]] && exit $status
# Guess Bias correction coefficients related to control
export GBIAS=${COMIN_GES}/${GPREFIX}abias
export GBIASPC=${COMIN_GES}/${GPREFIX}abias_pc
export GBIASAIR=${COMIN_GES}/${GPREFIX}abias_air
export GRADSTAT=${COMIN_GES}/${GPREFIX}radstat
# Use the selected observations from ensemble mean
export RUN_SELECT="NO"
export USE_SELECT="YES"
export SELECT_OBS="$COMOUT/${APREFIX}obsinput.ensmean"
###############################################################
# Get ENSBEG/ENSEND from ENSGRP and NMEM_EOMGGRP
ENSEND=$((NMEM_EOMGGRP * ENSGRP))
ENSBEG=$((ENSEND - NMEM_EOMGGRP + 1))
export ENSBEG=$ENSBEG
export ENSEND=$ENSEND
# Run relevant exglobal script
$ENKFINVOBSSH
status=$?
[[ $status -ne 0 ]] && exit $status
###############################################################
# Double check the status of members in ENSGRP
EOMGGRP=$ROTDIR/enkf.${CDUMP}.$cymd/$chh/eomg.grp${ENSGRP}
if [ -f $EOMGGRP ]; then
npass=$(grep "PASS" $EOMGGRP | wc -l)
else
npass=0
fi
echo "$npass/$NMEM_EOMGGRP members successfull in eomg.grp$ENSGRP"
if [ $npass -ne $NMEM_EOMGGRP ]; then
echo "ABORT!"
cat $EOMGGRP
exit 99
fi
###############################################################
# Exit out cleanly
exit 0
| true
|
447b4d24b5fdf612f16f609a97c4c0ebc9580a0f
|
Shell
|
BerryJimK/MachineWolf
|
/ops/awk_example.sh
|
UTF-8
| 901
| 2.6875
| 3
|
[
"MIT"
] |
permissive
|
awk '{ gsub(/AAAA/,"BBBB"); print $0 }' t.txt
awk 替换后写入文件awk的sub/gsub函数用来替换字符串
1
sub(/regexp/, replacement, target)
注意第三个参数target,如果忽略则使用$0作为参数,即整行文本。
例子1:替换单个串
只把每行的第一个AAAA替换为BBBB
1
awk '{ sub(/AAAA/,"BBBB"); print $0 }' t.txt
例子2:替换所有的串
把每一行的所有AAAA替换为BBBB
1
awk '{ gsub(/AAAA/,"BBBB"); print $0 }' t.txt
例子3:替换满足条件的行的串
只在出现字符串CCCC的前提下,将行中所有AAAA替换为BBBB
1
2
3
awk '/CCCC/ { gsub(/AAAA/,"BBBB"); print $0; next }
{ print $0 }
' t.txt
**参考**
-----------------------------------------------------------------------------
1. [wk 替换文本字符串内容](http://www.pinhuba.com/linux/101488.htm)
| true
|
4fc4f4b7462dc357594f316219633b9277cdc6e2
|
Shell
|
bridgecrew-perf7/jumpinchat-deploy
|
/nginx/conf/site.conf.sh
|
UTF-8
| 5,004
| 2.828125
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/usr/bin/env bash
EXTERNAL_IP=`curl -s icanhazip.com`
if [[ $ENV == 'local' ]]; then
LOC_STREAM=$(cat <<EOF
location /janus/stream {
resolver 127.0.0.1 valid=30s;
proxy_pass http://streamtest/;
proxy_redirect default;
proxy_http_version 1.1;
proxy_set_header Upgrade \$http_upgrade;
proxy_set_header Connection \$connection_upgrade;
}
EOF
)
fi
if [[ $ENV == 'local' ]]; then
JANUS_UPSTREAM=$(cat <<EOF
upstream websocket {
server janus:8188;
}
upstream janusws {
server janus:8989;
}
upstream janusws2 {
server janus2:8989;
}
map \$cookie_janus_id \$janusServer {
janus "janusws";
janus2 "janusws2";
}
EOF
)
else
JANUS_UPSTREAM=$(cat <<EOF
upstream websocket {
server janus:8188;
}
upstream janusws {
server janus:8989;
}
upstream janusws2 {
server janus2:8989;
}
upstream janusws3 {
server janus3:8989;
}
upstream janusws4 {
server janus4:8989;
}
upstream janusws5 {
server janus5:8989;
}
map \$cookie_janus_id \$janusServer {
janus "janusws";
janus2 "janusws2";
janus3 "janusws3";
janus4 "janusws4";
janus5 "janusws5";
}
EOF
)
fi
cat << EOF > /etc/nginx/conf.d/site.conf
map \$http_upgrade \$connection_upgrade {
default upgrade;
'' close;
}
${JANUS_UPSTREAM}
upstream websrv {
ip_hash;
server web:80 max_fails=3 fail_timeout=30s;
server web2:80 max_fails=3 fail_timeout=30s;
}
upstream homesrv {
server home:3000 max_fails=3 fail_timeout=30s;
server home2:3000 max_fails=3 fail_timeout=30s;
}
geo \$limit {
default 1;
10.0.0.0/8 0;
${EXTERNAL_IP} 0;
}
map \$limit \$limit_key {
0 "";
1 \$binary_remote_addr;
}
# request limiting
limit_req_zone \$limit_key zone=sitelimit:10m rate=2r/s;
# caching
proxy_cache_path /var/cache/nginx levels=1:2 keys_zone=one:8m max_size=3000m inactive=600m;
proxy_temp_path /var/tmp;
sendfile_max_chunk 512k;
# gzip
gzip on;
gzip_comp_level 6;
gzip_vary on;
gzip_min_length 1000;
gzip_proxied any;
gzip_types text/plain text/html text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript;
gzip_buffers 16 8k;
server {
listen 80;
listen 443 ssl;
server_name _;
include /etc/nginx/root-ssl.conf;
limit_req zone=sitelimit burst=100 nodelay;
return 444;
}
server {
listen 443 ssl http2;
listen [::]:443 ssl http2;
server_name "~^172\.\d{1,3}\.\d{1,3}\.\d{1,3}\$" "~^10\.136\.\d{1,3}\.\d{1,3}\$" jumpin.chat local.jumpin.chat jumpinchat.com;
client_max_body_size 10M;
gzip on;
gzip_comp_level 6;
gzip_vary on;
gzip_min_length 1000;
gzip_proxied any;
gzip_types text/plain application/javascript application/x-javascript text/javascript text/xml text/css;
gzip_buffers 16 8k;
limit_req zone=sitelimit burst=100 nodelay;
include /etc/nginx/root-ssl.conf;
location / {
try_files \$uri \$uri/ @homepage;
proxy_cache one;
proxy_cache_bypass \$http_cache_control;
add_header X-Proxy-Cache \$upstream_cache_status;
aio threads;
}
location /api {
proxy_pass http://haproxy:80;
proxy_set_header X-Real-IP \$remote_addr;
proxy_set_header Host \$host;
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Host \$host;
}
location @homepage {
proxy_pass http://homesrv;
proxy_set_header X-Real-IP \$remote_addr;
proxy_set_header Host \$host;
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Host \$host;
proxy_intercept_errors on;
recursive_error_pages on;
error_page 404 = @web;
aio threads;
}
location @web {
proxy_pass http://websrv;
proxy_set_header X-Real-IP \$remote_addr;
proxy_set_header Host \$host;
proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Host \$host;
aio threads;
# Websocket support
proxy_http_version 1.1;
proxy_set_header Upgrade \$http_upgrade;
proxy_set_header Connection "upgrade";
}
location /janus/ws {
proxy_pass https://\$janusServer/;
proxy_http_version 1.1;
proxy_set_header Upgrade \$http_upgrade;
proxy_set_header Connection \$connection_upgrade;
add_header X-JANUS-SERVER \$janusServer;
}
location /janus/http {
limit_req zone=sitelimit burst=300 nodelay;
proxy_pass http://janus:8088/janus;
proxy_redirect default;
proxy_http_version 1.1;
proxy_set_header Upgrade \$http_upgrade;
proxy_set_header Connection \$connection_upgrade;
}
${LOC_STREAM}
location = /robots.txt {
root /var/www/site/;
}
}
server {
listen 80;
listen 443 ssl;
server_name www.jumpinchat.com www.jumpin.chat;
server_tokens off;
limit_req zone=sitelimit burst=10 nodelay;
return 301 https://jumpin.chat\$request_uri;
}
server {
listen 80;
server_name jumpin.chat www.jumpin.chat local.jumpin.chat jumpinchat.com www.jumpinchat.com;
server_tokens off;
limit_req zone=sitelimit burst=10 nodelay;
return 301 https://\$host\$request_uri;
}
EOF
| true
|
29cfbc1419adcd6e638e78c626d3eb7eb0e640b6
|
Shell
|
amerinero/Scripts
|
/SolarisGetInfo.sh
|
UTF-8
| 5,161
| 3.359375
| 3
|
[] |
no_license
|
#!/usr/bin/bash
FECHA=`date +%H%M%S-%d%m%Y`
HOST=`hostname`
FILEOUT=SolarisGetInfo_${HOST}_${FECHA}.txt
echo "*******************************************************************" >> $FILEOUT
echo "*** DISCOS ***" >> $FILEOUT
echo "*******************************************************************" >> $FILEOUT
echo "--- format" >> $FILEOUT
format << EOL >> $FILEOUT
EOL
echo "-------------------------------------------------------------------" >> $FILEOUT
echo "--- fcinfo hba-port (HBAs)" >> $FILEOUT
fcinfo hba-port >> $FILEOUT
echo "-------------------------------------------------------------------" >> $FILEOUT
echo "--- zpool list" >> $FILEOUT
zpool list >> $FILEOUT
echo "-------------------------------------------------------------------" >> $FILEOUT
echo "--- zpool status" >> $FILEOUT
zpool status >> $FILEOUT
echo "-------------------------------------------------------------------" >> $FILEOUT
echo "--- zfs list" >> $FILEOUT
zfs list >> $FILEOUT
echo "-------------------------------------------------------------------" >> $FILEOUT
echo "*******************************************************************" >> $FILEOUT
echo "*** FILESYSYTEMS ***" >> $FILEOUT
echo "*******************************************************************" >> $FILEOUT
echo "--- cat /etc/vfstab" >> $FILEOUT
cat /etc/vfstab >> $FILEOUT
echo "-------------------------------------------------------------------" >> $FILEOUT
echo "--- df -h" >> $FILEOUT
df -h >> $FILEOUT
echo "-------------------------------------------------------------------" >> $FILEOUT
echo "*******************************************************************" >> $FILEOUT
echo "*** RED ***" >> $FILEOUT
echo "*******************************************************************" >> $FILEOUT
echo "--- ifconfig -a" >> $FILEOUT
ifconfig -a >> $FILEOUT
echo "-------------------------------------------------------------------" >> $FILEOUT
echo "--- netstat -rnv" >> $FILEOUT
netstat -rnv >> $FILEOUT
echo "-------------------------------------------------------------------" >> $FILEOUT
echo "--- ls -l /etc/hostname.* ; cat /etc/hostname.*" >> $FILEOUT
ls -l /etc/hostname.* >> $FILEOUT ; cat /etc/hostname.* >> $FILEOUT
echo "-------------------------------------------------------------------" >> $FILEOUT
echo "--- cat /etc/netmasks" >> $FILEOUT
cat /etc/netmasks >> $FILEOUT
echo "-------------------------------------------------------------------" >> $FILEOUT
echo "--- cat /etc/hosts" >> $FILEOUT
cat /etc/hosts >> $FILEOUT
echo "-------------------------------------------------------------------" >> $FILEOUT
echo "--- netstat -an | grep LISTEN (puertos escuchando)" >> $FILEOUT
netstat -an | grep LISTEN >> $FILEOUT
echo "-------------------------------------------------------------------" >> $FILEOUT
echo "*******************************************************************" >> $FILEOUT
echo "*** SERVICIOS ***" >> $FILEOUT
echo "*******************************************************************" >> $FILEOUT
echo "--- svcs -a" >> $FILEOUT
svcs -a >> $FILEOUT
echo "-------------------------------------------------------------------" >> $FILEOUT
echo "--- ls -l /etc/rc[23].d/" >> $FILEOUT
ls -l /etc/rc[23].d/ >> $FILEOUT
echo "-------------------------------------------------------------------" >> $FILEOUT
echo "*******************************************************************" >> $FILEOUT
echo "*** PROCESOS ***" >> $FILEOUT
echo "*******************************************************************" >> $FILEOUT
echo "--- ps -ef" >> $FILEOUT
ps -ef >> $FILEOUT
echo "-------------------------------------------------------------------" >> $FILEOUT
echo "*******************************************************************" >> $FILEOUT
echo "*** ZONAS ***" >> $FILEOUT
echo "*******************************************************************" >> $FILEOUT
echo "--- zoneadm list -iv" >> $FILEOUT
zoneadm list -iv >> $FILEOUT
echo "-------------------------------------------------------------------" >> $FILEOUT
echo "--- ls -l /etc/zones" >> $FILEOUT
ls -l /etc/zones >> $FILEOUT
echo "-------------------------------------------------------------------" >> $FILEOUT
echo "*******************************************************************" >> $FILEOUT
echo "*** LDOMS ***" >> $FILEOUT
echo "*******************************************************************" >> $FILEOUT
echo "--- ldm ls" >> $FILEOUT
ldm ls >> $FILEOUT
echo "-------------------------------------------------------------------" >> $FILEOUT
for LDOM in `ldm ls | grep -v NAME | awk '{print $1}'`
do
echo "--- ldm ls -l $LDOM" >> $FILEOUT
ldm ls -l $LDOM >> $FILEOUT
echo "-------------------------------------------------------------------" >> $FILEOUT
done
echo "*******************************************************************" >> $FILEOUT
echo "*** PATHs de discos ***" >> $FILEOUT
echo "*******************************************************************" >> $FILEOUT
for DISCO in `ls /dev/rdsk/*s2`
do
echo "--- luxadm display $DISCO" >> $FILEOUT
luxadm display $DISCO >> $FILEOUT
echo "-------------------------------------------------------------------" >> $FILEOUT
done
| true
|
c8b6e0faf996c881b14c974d63db78e42c6a7870
|
Shell
|
jumanjiman/policy-based-routing
|
/tunnel-routing/ifdown-local
|
UTF-8
| 316
| 2.90625
| 3
|
[] |
no_license
|
#!/bin/bash
# external IP of remote vpn device
REMOTE_OUTER=10.8.1.42
if [ $1 == tun0 ]; then
ip rule delete to 172.16.0.1 table main
ip rule delete to 172.16.0.0/16 table remote
ip route del default via $REMOTE_OUTER dev tun0 table remote
echo "REMOVED policy-based routing per /sbin/ifdown-local" >&2
fi
| true
|
44ea801a7590b2032e81f2b0faf81ccd3da61730
|
Shell
|
FSMaxB/lfs-me-repos
|
/7.6-systemd/bc-1.06.95
|
UTF-8
| 1,178
| 3.609375
| 4
|
[] |
no_license
|
pkgbuild_version=5
pkgname=bc
pkgver=1.06.95
_info_pages=(
'dc.info'
'bc.info'
)
dependencies=(
"!$pkgname"
'readline'
)
sources=(
"http://alpha.gnu.org/gnu/${pkgname}/${pkgname}-${pkgver}.tar.bz2"
)
sha1sums=(
18717e0543b1dda779a71e6a812f11b8261a705a
)
lfs_me_prepare() {
tar -xf "${sources_dir}/${pkgname}-${pkgver}.tar.bz2" -C "$build_dir"
cd "${build_dir}/${pkgname}-${pkgver}"
patch -Np1 -i "${sources_dir}/${pkgname}-${pkgver}-memory_leak-1.patch"
./configure --prefix=/usr \
--with-readline \
--mandir=/usr/share/man \
--infodir=/usr/share/info
}
lfs_me_check() {
cd "${build_dir}/${pkgname}-${pkgver}"
echo "quit" | ./bc/bc -l Test/checklib.b
}
lfs_me_install() {
cd "${build_dir}/${pkgname}-${pkgver}"
make DESTDIR="$fakeroot_dir" install
#don't overwrite info pages
rm "${fakeroot_dir}/usr/share/info/dir"
}
lfs_me_postinstall() {
echo "Adding info pages"
install-info "/usr/share/info/${pkgname}.info" /usr/share/info/dir
}
lfs_me_preremove() {
echo "Adding info pages"
install-info --delete "/usr/share/info/${pkgname}.info" /usr/share/info/dir
}
# vim:set syntax=sh et:
| true
|
baa36b9a2a173c2cdcb1793d20b07d1a3dc8b789
|
Shell
|
aws-samples/amazon-eks-machine-learning-with-terraform-and-kubeflow
|
/build-ecr-images.sh
|
UTF-8
| 236
| 2.875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# set region
region=
if [ "$#" -eq 1 ]; then
region=$1
else
echo "usage: $0 <aws-region>"
exit 1
fi
./container/build_tools/build_and_push.sh $region
./container-optimized/build_tools/build_and_push.sh $region
| true
|
fbbb895e6fcb98ff1f7b6be5ebd235359595e8da
|
Shell
|
langmartin/nomad-dev
|
/cfg/client-pid-scan/repro
|
UTF-8
| 327
| 2.828125
| 3
|
[] |
no_license
|
#!/bin/sh
[ `id -u` = 0 ] || { echo "run as root" 1>&2; exit 1; }
killall -9 nomad
killall sleep
rm -rf /tmp/nomad-agent /tmp/plugin*
set -x
(cd ~/go/src/github.com/hashicorp/nomad && go install)
nomad agent -config agent.json > /tmp/nomad-dev.log 2>&1 &
sleep 12
for i in `seq 1`; do
nomad job run report.hcl
done
| true
|
d71e475093c848223c9ef520f172ff844f19ca7b
|
Shell
|
g2-field-team/field-daq
|
/common/scripts/rc.d/midasrc
|
UTF-8
| 644
| 2.921875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# A short script setting up some MIDAS environment variables
export MIDASSELECT="local"
if [ $MIDASSELECT = "local" ] ; then
export MIDASSYS=/home/newg2/Packages/gm2midas/midas
export MID_INC=$MIDASSYS/include
export MID_LIB=$MIDASSYS/linux/lib
export LD_LIBRARY_PATH=$MIDASSYS/linux/lib:$LD_LIBRARY_PATH
export PATH=$MIDASSYS/linux/bin:$PATH
else
export MIDASSYS=$GM2MIDAS_BASE
export MID_INC=$MIDASSYS/include
export MID_LIB=$MIDASSYS/lib
export LD_LIBRARY_PATH=$MIDASSYS/lib:$LD_LIBRARY_PATH
export PATH=$MIDASSYS/bin:$PATH
fi
export MIDAS_EXPTAB=/etc/exptab
#export MIDAS_SERVER_HOST=g2field-be-priv:1175
| true
|
812ac1715443b350face2de7ef8aafd962528f2c
|
Shell
|
gmonnier/emera
|
/EmeraWeb/deployment/setup.sh
|
UTF-8
| 1,410
| 3.953125
| 4
|
[] |
no_license
|
#!/bin/bash
sudo su -
echo "Cleanup Emera deployment directory"
deploymentDir=/EmeraWeb
if [ -d ${deploymentDir} ]; then
rm -rf ${deploymentDir}
fi
mkdir ${deploymentDir}
java_install_needed=false
if type -p java; then
echo "found java executable in PATH"
_java=java
elif [[ -n "$JAVA_HOME" ]] && [[ -x "$JAVA_HOME/bin/java" ]]; then
echo "found java executable in JAVA_HOME"
_java="$JAVA_HOME/bin/java"
else
java_install_needed=true
echo "no java executable found"
fi
if [[ "$_java" ]]; then
version=$("$_java" -version 2>&1 | awk -F '"' '/version/ {print $2}')
echo version "$version"
if [[ "$version" > "1.7" ]]; then
echo "version is more than 1.7"
else
java_install_needed=true
echo "version is less or equal than 1.7"
fi
fi
if [ "$java_install_needed" = true ] ; then
echo "Update apt repositories..."
sudo add-apt-repository ppa:webupd8team/java
sudo apt-get update
echo "Install java..."
echo oracle-java8-installer shared/accepted-oracle-license-v1-1 select true | /usr/bin/debconf-set-selections
apt-get install -y oracle-java8-installer
fi
mkdir -p ${deploymentDir}/conf
mkdir -p ${deploymentDir}/logs
mkdir -p ${deploymentDir}/src/main/webapp/resources
chown ubuntu:ubuntu ${deploymentDir}
chown ubuntu:ubuntu ${deploymentDir}/conf
chown ubuntu:ubuntu ${deploymentDir}/logs
chmod -R 777 ${deploymentDir}
| true
|
a00ed9898919de881562dac2ab4835218d290b40
|
Shell
|
polettix/App-OnePif
|
/bundle/ramenc
|
UTF-8
| 415
| 3.59375
| 4
|
[
"Artistic-2.0",
"LicenseRef-scancode-philippe-de-muyter"
] |
permissive
|
#!/bin/bash
SIZE="$1"
if [ "$SIZE" == "off" ] ; then
diskutil eject /Volumes/ramenc
else
if echo "$SIZE" | grep '^[1-9][0-9]*$' >/dev/null 2>&1 ; then
BLOCKS="$(($SIZE * 2 * 1024))"
THEDEVICE="$(hdiutil attach -nomount -encryption 'AES-256' "ram://$BLOCKS")"
diskutil erasevolume HFS+ ramenc $THEDEVICE
else
echo "please provide 'off' or a size in MB"
exit 1
fi
fi
exit 0
| true
|
8f03468c4e05854581f69215c256ef4589387894
|
Shell
|
lguerrin/browsergopass
|
/releases/install.sh
|
UTF-8
| 3,030
| 3.859375
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
DIR="$( cd "$( dirname "$0" )" && pwd )"
APP_NAME="com.lguerrin.browsergopass"
OS=`uname -s`
NC='\033[0m'
function printGreen {
echo -e "\033[32m$1$NC"
}
function printLightGreen {
echo -e "\033[92m$1$NC"
}
function printRed {
echo -e "\033[31m$1$NC"
}
function printYellow {
echo -e "\033[33m$1$NC"
}
function succesfulInstall {
printLightGreen "\t\u2713 install OK"
}
function addApp {
printGreen "\tAdd app..."
}
function detectApp {
echo "Checking $1..."
INSTALLED=0
case ${OS} in
Linux)
hash $1 > /dev/null 2>&1
;;
Darwin)
osascript -e "id of application \"$1\"" > /dev/null 2>&1
;;
*)
printRed "\u2718Cannot detect app on $OS"
exit 1
;;
esac
INSTALLED=$?
if [ $INSTALLED -eq 0 ]; then
printYellow "\tInstalled"
else
printYellow "\t\u2718 Not found"
fi
}
case ${OS} in
Linux)
TARGET_DIR_CHROME="$HOME/.config/google-chrome/NativeMessagingHosts"
CHROME_APP="google-chrome"
TARGET_DIR_FIREFOX="$HOME/.mozilla/native-messaging-hosts/"
FIREFOX_APP="firefox"
TARGET_DIR_CHROMIUM="$HOME/.config/chromium/NativeMessagingHosts"
CHROMIUM_APP="chromium-browser"
HOST_FILE="$DIR/browsergopass"
;;
Darwin)
TARGET_DIR_CHROME="$HOME/Library/Application Support/Google/Chrome/NativeMessagingHosts"
CHROME_APP="Google Chrome"
TARGET_DIR_FIREFOX="$HOME/Library/Application Support/Mozilla/NativeMessagingHosts"
FIREFOX_APP="Firefox"
HOST_FILE="$DIR/browsergopass-wrapper"
;;
*)
printRed "Cannot install on $OS"
exit 1
;;
esac
# Escape host file
ESCAPED_HOST_FILE=${HOST_FILE////\\/}
detectApp "$FIREFOX_APP"
if [ $INSTALLED -eq 0 ]; then
if [ ! -d "$TARGET_DIR_FIREFOX" ]; then
printYellow "\tCreates native app folder"
mkdir -p "$TARGET_DIR_FIREFOX"
fi
addApp
cp "host-firefox.json" "$TARGET_DIR_FIREFOX/$APP_NAME.json"
sed -i -e "s/%%replace%%/$ESCAPED_HOST_FILE/" "$TARGET_DIR_FIREFOX/$APP_NAME.json"
succesfulInstall
fi
detectApp "$CHROME_APP"
if [ $INSTALLED -eq 0 ]; then
if [ ! -d "$TARGET_DIR_CHROME" ]; then
echo "Creates native app folder"
mkdir -p "$TARGET_DIR_CHROME"
fi
addApp
cp "host-chrome.json" "$TARGET_DIR_CHROME/$APP_NAME.json"
sed -i -e "s/%%replace%%/$ESCAPED_HOST_FILE/" "$TARGET_DIR_CHROME/$APP_NAME.json"
succesfulInstall
fi
if [ -n "$CHROMIUM_APP" ]; then
detectApp "$CHROMIUM_APP"
if [ $INSTALLED -eq 0 ]; then
if [ -d "$TARGET_DIR_CHROMIUM" ]; then
addApp
cp "host-chrome.json" "$TARGET_DIR_CHROMIUM/$APP_NAME.json"
sed -i -e "s/%%replace%%/$ESCAPED_HOST_FILE/" "$TARGET_DIR_CHROMIUM/$APP_NAME.json"
succesfulInstall
fi
fi
fi
echo -e "\n\e[1;42mNow, you can add plugin to browsers.$NC"
| true
|
5f5805b65da3604ad29db772644906080a0fd977
|
Shell
|
VincentDR/CryptoFiscaFacile
|
/make.sh
|
UTF-8
| 374
| 3.03125
| 3
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
#!/bin/bash
PATH=$GOPATH/bin:$PATH
VER=$(git describe --tags)
BASENAME="CryptoFiscaFacile"
GOOS=$(go env | grep GOOS | cut -d"=" -f2)
GOARCH=$(go env | grep GOARCH | cut -d"=" -f2)
echo "$GOOS"/"$GOARCH"
if [ "$GOOS" = "windows" ]
then
EXT=".exe"
else
EXT=""
fi
go build -ldflags "-X main.version=$VER" && mv $BASENAME"$EXT" $BASENAME-"$VER"-"$GOOS"-"$GOARCH""$EXT"
| true
|
770ac56e6c7efb04d0992835737e1e773078bf84
|
Shell
|
mptrsen/myScripts
|
/moveNcat_fq.sh
|
UTF-8
| 369
| 3.28125
| 3
|
[] |
no_license
|
indir=$1
regex=$2
for dir in $(find ${indir} -type f -name $regex | sed -r 's|/[^/]+$||' |sort |uniq);
do
for R in R1 R2; do cat $dir/*_${R}_*.fastq.gz > $(basename $dir)_${R}.fastq.gz;
echo "$(basename $dir)_${R}.fastq.gz $(ls $dir | grep ${R} | grep L001)" >> rename.sh
done; done
cat rename.sh | sed s/_[A-Z0-9_]*_S[0-9]*_L[0-9]*// | sed s/_001// | xargs -L 1 mv
| true
|
bf4299ca6f53a118da92d31e0632694792b16114
|
Shell
|
arcu/xps-ics-ramdisk
|
/sbin/reboot
|
UTF-8
| 318
| 3.28125
| 3
|
[] |
no_license
|
#!/sbin/sh
# Wrapper script to handle reboot recovery command
for arg in "$@"
do
case "$arg" in
recovery) touch /cache/recovery/boot
RECOVERY=1
;;
*)
;;
esac
done
busybox reboot $@
RET=$?
if [ $RET -ne 0 ] && [ $RECOVERY -eq 1 ]; then
rm /cache/recovery/boot
fi
exit $RET
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.