blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
472be23f7e9fe1eed395fe37d86c9cc175d91010
|
Shell
|
janelee1618/ACRES_MPI_tutorial
|
/mpi_examples/run_job.qsub
|
UTF-8
| 1,006
| 2.96875
| 3
|
[] |
no_license
|
#!/bin/bash --login
### Set PBS parameters ###
# Time job will take to execute (HH:MM:SS format)
#PBS -l walltime=01:00:00
### # Memory needed by the job (not strictly necessary, hence commented out by default)
### #PBS -l mem=2gb
# Number of nodes required and the number of processors per node
# For the MSU HPCC, you're best off change nodes and leaving ppn set to 1.
#PBS -l nodes=16:ppn=1
# Make output and error files the same file
#PBS -j oe
# Send an email when a job is aborted, begins or ends
#PBS -m abe
# Specify the email address
#PBS -M <username>@msu.edu
# Give the job a name
#PBS -N my_clever_jobname
cd ${PBS_O_WORKDIR} # Change to the Original Working Directory
# that is, the directory where "qsub" was called
### Load any required modules ###
module load Python
module load NumPy
module load mpi4py
### List job commands ###
# (note: you should make sure that "nodes * ppn" match the value for "-np")
mpirun -np 16 python my_sparallel_script.py
| true
|
4977dee7e5d89a560d753901de898012624f4fc8
|
Shell
|
8l/insieme
|
/scripts/shark_install.sh
|
UTF-8
| 1,524
| 3.1875
| 3
|
[] |
no_license
|
# setup environment variables
. ./environment.setup
VERSION=2.3.4
########################################################################
## SHARK
########################################################################
if [ -d $PREFIX/shark-$VERSION ]; then
echo "SHARK version $VERSION already installed"
exit 0
fi
rm -Rf $PREFIX/shark-$VERSION
echo "#### Downloading SHARK library ####"
wget http://sourceforge.net/projects/shark-project/files/Shark%20Core/Shark%20$VERSION/shark-$VERSION.zip/download -O shark-$VERSION.zip
RET=$?
if [ $RET -ne 0 ]; then
exit $RET
fi
unzip shark-$VERSION.zip
cd Shark
# patch shark
patch -p0 < ../patches/svm.cpp.patch
patch -p0 < ../patches/svm.h.patch
patch -p0 < ../patches/fileutil.patch
patch -p0 < ../patches/randomvector.patch
export LD_LIBRARY_PATH=$PREFIX/gcc-latest/lib64:$PREFIX/gmp-latest/lib:$PREFIX/mpc-latest/lib:$PREFIX/mpfr-latest/lib:$PREFIX/cloog-gcc-latest/lib:$PREFIX/ppl-latest/lib:$LD_LIBRARY_PATH
echo "#### Building SHARK library ####"
mkdir build
cd build
CC=$CC CXX=$CXX XFLAGS="-mtune=native -O3" $PREFIX/cmake-latest/bin/cmake ../ -DCMAKE_INSTALL_PREFIX=$PREFIX/shark-$VERSION
make -j $SLOTS
# Check for failure
RET=$?
if [ $RET -ne 0 ]; then
exit $RET
fi
echo "#### Installing SHARK library ####"
make install
rm -f $PREFIX/shark-latest
ln -s $PREFIX/shark-$VERSION $PREFIX/shark-latest
echo "#### Cleaning up environment ####"
cd ../..
rm -R Shark
rm -R shark-$VERSION*
exit 0
| true
|
3d83b7b556e4c79b685ce447b6215864d050bc26
|
Shell
|
chongchuanbing/SourceTreeCustomAction
|
/merge_commit.sh
|
UTF-8
| 144
| 3.046875
| 3
|
[] |
no_license
|
#!/bin/bash --posix
argoCount=$#
for ((i=1;i<=argoCount;i++))
do
commitId=${!i}
echo 'commitId:' $commitId
git cherry-pick $commitId
done
| true
|
343713db663d8a7af79fde9aa85c3564f30f4e38
|
Shell
|
hchiam/sn-prompt-generator
|
/copy-to-public-folder.sh
|
UTF-8
| 1,146
| 4.03125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# to run this file in CLI: bash copy-to-public-folder.sh
echo
echo "Please wait."
# create public folder if it doesn't exist already
if ! [ -e public ]
then
mkdir public
fi
# empty public folder if it has anything in it:
if [[ -e public/* ]]
then
rm public/*
fi
# check if npm is installed:
if ! [ -x "$(command -v npm)" ]; then
echo
echo "Please install npm for minify to work."
echo
else
# install minify if unavailable:
if ! [ -x "$(command -v minify)" ]; then npm i minify; fi
# minify code:
minify script.js > minified-script.js
minify style.css > minified-style.css
fi
bash weakly_obfuscate_functions.sh
bash weakly_obfuscate_ids.sh
# prepend comment:
cat <(echo "// Remember: God is watching. Clear conscience?") minified-script.js > temp.js
mv temp.js minified-script.js
rm public/temp.js
# copy "regular" files into the public folder:
cp *.* public
# clean up unnecessary files:
rm public/copy-to-public-folder.sh
rm public/weakly_obfuscate_functions.sh
rm public/weakly_obfuscate_ids.sh
rm public/README.md
rm public/script.js
rm public/style.css
rm public/reminder.txt
echo
echo "Done."
echo
| true
|
779c8d2d2f3efd0007bf0b87a7fa376903042b2f
|
Shell
|
wendellpbarreto/tronco
|
/restart_db.sh
|
UTF-8
| 1,014
| 3.21875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#
#-----------------------------------
# @autor: Wendell P. Barreto
# @email: wendellp.barreto@gmail.com
# @project: tronco
# @doc: restart_db.sh
# ----------------------------------
while true; do
read -p "Are you using Linux (y or n)? " yn
case $yn in
[Yy]* )
sudo -u postgres psql -c 'DROP DATABASE tronco_db'
sudo -u postgres psql -c 'CREATE DATABASE tronco_db'
sudo -u postgres psql -c 'CREATE USER tronco_admin'
sudo -u postgres psql -c 'GRANT ALL PRIVILEGES ON DATABASE tronco_db TO tronco_admin'
sudo -u postgres psql -d tronco_db -c 'CREATE EXTENSION hstore'
break;;
[Nn]* )
psql -c 'DROP DATABASE tronco_db'
psql -c 'CREATE DATABASE tronco_db'
psql -c 'CREATE USER tronco_admin'
psql -c 'GRANT ALL PRIVILEGES ON DATABASE tronco_db TO tronco_admin'
psql -d tronco_db -c 'CREATE EXTENSION hstore'
break;;
* ) echo "Please answer yes or no.";;
esac
done
python manage.py syncdb
python manage.py collectstatic --noinput
python manage.py migrate
| true
|
b154141d2932d4225e6305bcf335f296b37a6313
|
Shell
|
bluestar/cloud-init
|
/modules/init-zypper.sh
|
UTF-8
| 120
| 2.609375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
if [ -x "$(command -v zypper)" ]; then
echo "init zypper module"
zypper --non-interactive install jq
fi
| true
|
aa4cd9520c66609e7147c02d8ac71390820042ae
|
Shell
|
kenshin912/Note
|
/Script/Bash/supervisord.sh
|
UTF-8
| 1,724
| 4.15625
| 4
|
[] |
no_license
|
#!/bin/bash
ContainerID=`/usr/bin/docker ps | grep php:latest | awk -F " " '{print $1}'`
ContainerIDArray=(${ContainerID})
#echo ${ContainerIDArray[0]}
#echo ${#ContainerIDArray[*]}
CONTROL=(start restart status)
# show supervisord status & exit script
if [ "${1}" == "status" ];then
for Container in ${ContainerIDArray[@]}
do
/usr/bin/docker exec ${Container} supervisorctl ${1}
done
exit 0
fi
# Arguments can't be null
if [[ -z "${1}" ]] || [[ -z "${2}" ]];then
echo "Need more arguments!"
exit 1
fi
# Check first parameter is in the "CONTROL" array or not.
if [[ ! "${CONTROL[@]}" =~ "${1}" ]];then
echo "Incorrect Parameter!!"
exit 1
fi
function shutdown_socket() {
# get socket-server's pid then kill it . start socket-server manually later.
socket_pid=`/usr/bin/docker exec ${1} ps aux | grep socket-server | awk -F " " '{print $1}'`
/usr/bin/docker exec ${1} kill -9 ${socket_pid}
}
for Container in ${ContainerIDArray[@]}
do
# if start all , that means we had started php container recently. so we can start up supervisord with config file
if [[ "${1}" == "start" ]] && [[ "${2}" == "all" ]];then
/usr/bin/docker exec ${Container} supervisord -c /etc/supervisord.conf
elif [[ "${1}" == "restart" ]] && [[ "${2}" == "socket" ]];then
shutdown_socket ${Container}
/usr/bin/docker exec ${Container} supervisorctl start socket-server-swoole:socket-server-swoole_00
elif [[ "${1}" == "restart" ]] && [[ "${2}" == "all" ]];then
shutdown_socket ${Container}
/usr/bin/docker exec ${Container} supervisorctl ${1} ${2}
else
/usr/bin/docker exec ${Container} supervisorctl ${1} ${2}
fi
done
exit
| true
|
cab07bd00b356a65191978a4d003c26ae6e2d3f2
|
Shell
|
mgrubb/scripts
|
/setupsys.sh
|
UTF-8
| 2,451
| 3.234375
| 3
|
[] |
no_license
|
#!/bin/ksh
# setup a new environment from scratch
# this is incomplete but is a start.
# TODO: install macports and common ports
# TODO: install TexLive
# TODO: Update with other needed software
# TODO: Download vim setup
CORE_SERVICES="/System/Library/Frameworks/CoreServices.framework"
CORE_SERVICES="$CORE_SERVICES/Versions/Current"
LAUNCH_SERVICES="$CORE_SERVICES/Frameworks/LaunchServices.framework"
LAUNCH_SERVICES="$LAUNCH_SERVICES/Versions/Current"
PATH="$PATH:/usr/local/bin:/usr/local/sbin"
export PATH
cd $HOME
## Pre-authorize with sudo for administrative tasks
sudo -l -p "Enter password for administrative tasks: " >/dev/null
## Setup /usr/local
sudo mkdir -p /usr/local/bin /usr/local/sbin /usr/local/share/man
cd /usr/local
sudo ln -s share/man man
cd $HOME
# Create links for certain utilities
if [ -f $LAUNCH_SERVICES/Support/lsregister ] ; then
sudo ln -sf $LAUNCH_SERVICES/Support/lsregister /usr/bin/lsregister
else
echo "Could not find lsregister... skipping"
fi
if [ -f $LAUNCH_SERVICES/Support/lssave ] ; then
sudo ln -sf $LAUNCH_SERVICES/Support/lssave /usr/bin/lssave
else
echo "Could not find lssave... skipping"
fi
## Setup home directory structure
for d in bin Library/Programs Downloads
do
[ -d $HOME/$d/. ] || mkdir $HOME/$d
done
[ -d /usr/local/. ] || sudo mkdir /usr/local
## Setup /usr/local
for i in share share/man share/man/man1 bin sbin etc
do
[ -d /usr/local/$i/. ] || sudo mkdir /usr/local/$i
done
## Download utilities
cd $HOME/Downloads
#->http://www.cs.indiana.edu/~kinzler/z/
curl -O http://www.cs.indiana.edu/~kinzler/z/z-2.6.1.tgz
#->http://www.cs.indiana.edu/~kinzler/align/
curl -O http://www.cs.indiana.edu/~kinzler/align/align-1.7.1.tgz
#->http://www.cs.indiana.edu/~kinzler/home/binp/vip
curl http://www.cs.indiana.edu/~kinzler/home/binp/vip > $HOME/bin/vip.tmp
#->http://web.sabi.net/nriley/software/appswitch-1.1.tar.gz
curl -O http://web.sabi.net/nriley/software/appswitch-1.1.tar.gz
# install z
tar xf z-2.6.1.tgz
cd z-2.6.1
sudo make install
sudo make install.man
cd ..
rm -Rf z-2.6.1*
# install align
cd `z align-1.7.1.tgz`
sudo make install
cd ..
rm -Rf align-1.7.1*
# fix vip
cat $HOME/bin/vip.tmp | \
sed -e 's,^\(: .*TMPDIR.*\)/usr/tmp,\1/tmp,g' > $HOME/bin/vip
rm $HOME/bin/vip.tmp
chmod +x $HOME/bin/vip
# install appswitch
cd `z appswitch-1.1.tar.gz`
sudo mv appswitch /usr/local/bin
sudo mv appswitch.1 /usr/local/share/man/man1
cd ..
rm -Rf appswitch-1.1*
| true
|
1bfe0bc526dcc32cfcfc1354beafa4c7e4ab7b15
|
Shell
|
slash-segmentation/CHM
|
/wrappers/panfish/scripts/runMergeTilesViaPanfish.sh
|
UTF-8
| 7,812
| 3.890625
| 4
|
[
"BSD-2-Clause",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
#!/bin/sh
function usage() {
echo -e "Run Merge Tiles via Panfish.
This program runs Merge Tiles on grid compute resources via Panfish. The
jobs are defined via runMergeTiles.sh.config file that must reside
in the same directory as this script.
runMergeTilesViaPanfish.sh <optional arguments>
Optional Arguments:
-h Displays this help.
-C Check if job is completed and exit 0 if yes otherwise
no. (Cannot be used with any other flag.)
-D Download/Land results from remote clusters and exit.
(Cannot be used with any other flag.)
-U Upload/Chum job data to remote clusters and exit.
(Cannot be used with any other flag.)
-S Check if any jobs are running. Exit 0 if no, 2 if
yes and 1 if there was an error.
(Cannot be used with any other flag.)
-n Specifies job name to pass to give to SGE. No
funny characters other then _ and only a-z|A-Z
for the first character
"
exit 1
}
#######################################################################
#
# Functions
#
#######################################################################
#
# Chums data to remote clusters
#
function chumJobData {
local task=$1
local iteration=$2
local jobDir=$3
chumData "$MERGE_TILES_CHUMMEDLIST" "$jobDir" "$MERGE_TILES_CHUM_OUT" "$CHUM_MERGE_TILES_OPTS"
return $?
}
#
# Checks that a single Merge Tiles task ran successfully by verifying
# an output image was created and std out file has size greater then 0
#
function checkSingleTask {
local task=$1
local jobDir=$2
local taskId=$3
getSingleMergeTilesStdOutFile "$jobDir" "$taskId"
if [ $? != 0 ] ; then
return 1
fi
if [ ! -s "$MERGE_TILES_STD_OUT_FILE" ] ; then
return 2
fi
getParameterForTaskFromConfig "$taskId" "2" "$jobDir/$RUN_MERGE_TILES_CONFIG"
if [ $? != 0 ] ; then
return 3
fi
if [ ! -s "$jobDir/$TASK_CONFIG_PARAM" ] ; then
return 4
fi
return 0
}
#
# function called when USR2 signal is caught
#
on_usr2() {
echo "USR2 signal caught exiting.." > $OUTPUT_DIR/KILL.JOB.REQUEST
checkForKillFile $OUTPUT_DIR
jobFailed "USR2 signal caught exiting.."
}
# trap usr2 signal cause its what gets sent by SGE when qdel is called
trap 'on_usr2' USR2
###########################################################
#
# Start of program
#
###########################################################
# Check if caller just wants to source this file for testing purposes
if [ $# -eq 1 ] ; then
if [ "$1" == "source" ] ; then
return 0
fi
fi
declare CHECK_MODE="false"
declare DOWNLOAD_MODE="false"
declare UPLOAD_MODE="false"
declare STATUS_MODE="false"
declare MERGE_TILES_JOB_NAME="mergetiles_job"
# get the directory where the script resides
declare SCRIPT_DIR=`dirname $0`
while getopts ":CDSUhn:" o; do
case "${o}" in
h)
usage
;;
n)
MERGE_TILES_JOB_NAME="${OPTARG}"
;;
C)
CHECK_MODE="true"
;;
D)
DOWNLOAD_MODE="true"
;;
U)
UPLOAD_MODE="true"
;;
S)
STATUS_MODE="true"
;;
*)
echo "Invalid argument"
usage
;;
esac
done
if [ ! -s "$SCRIPT_DIR/.helperfuncs.sh" ] ; then
echo "No $SCRIPT_DIR/.helperfuncs.sh found"
exit 2
fi
# load the helper functions
. $SCRIPT_DIR/.helperfuncs.sh
getFullPath "$SCRIPT_DIR"
declare OUTPUT_DIR="$GETFULLPATHRET"
# Parse the configuration file
parseProperties "$SCRIPT_DIR" "$OUTPUT_DIR"
if [ $? != 0 ] ; then
jobFailed "There was a problem parsing the $PANFISH_CHM_PROPS file"
fi
logEcho ""
logStartTime "Merge Tiles"
declare -i modeStartTime=$START_TIME
logEcho ""
# Get the number of jobs we will be running
getNumberOfJobsFromConfig "$OUTPUT_DIR" "$RUN_MERGE_TILES_CONFIG"
if [ $? != 0 ] ; then
jobFailed "Error obtaining number of jobs from $RUN_MERGE_TILES_CONFIG file"
fi
#######################################################################
#
# If user passed in -S that means only run check and exit
#
#######################################################################
if [ "$STATUS_MODE" == "true" ] ; then
logMessage "Getting status of running/pending jobs..."
getStatusOfJobsInCastOutFile "$OUTPUT_DIR" "$MERGE_TILES_CAST_OUT_FILE"
if [ $? != 0 ] ; then
logMessage "Unable to get status of jobs"
logEndTime "Merge Tiles" $modeStartTime 1
exit 1
fi
if [ "$JOBSTATUS" == "$DONE_JOB_STATUS" ] ; then
logMessage "No running/pending jobs found."
logEndTime "Merge Tiles" $modeStartTime 0
exit 0
fi
logMessage "Job status returned $JOBSTATUS Job(s) still running."
logEndTime "Merge Tiles" $modeStartTime 2
exit 2
fi
#######################################################################
#
# If user passed in -C that means only run check and exit
#
#######################################################################
if [ "$CHECK_MODE" == "true" ] ; then
logMessage "Checking results..."
verifyResults "$RUN_MERGE_TILES_SH" "1" "$OUTPUT_DIR" "1" "${NUMBER_JOBS}" "no" "$MERGE_TILES_FAILED_PREFIX" "$MERGE_TILES_TMP_FILE" "$MERGE_TILES_FAILED_FILE"
if [ $? != 0 ] ; then
logMessage "$NUM_FAILED_JOBS out of ${NUMBER_JOBS} job(s) failed."
logEndTime "Merge Tiles" $modeStartTime 1
exit 1
fi
logMessage "All ${NUMBER_JOBS} job(s) completed successfully."
logEndTime "Merge Tiles" $modeStartTime 0
exit 0
fi
#######################################################################
#
# If user passed in -D that means only download data and exit
#
#######################################################################
if [ "$DOWNLOAD_MODE" == "true" ] ; then
logMessage "Downloading/Landing data..."
landData "$MERGE_TILES_CHUMMEDLIST" "$OUTPUT_DIR" "$LAND_MERGE_TILES_OPTS" "0" "0"
if [ $? != 0 ] ; then
logWarning "Unable to retrieve data"
logEndTime "Merge Tiles" $modeStartTime 1
exit 1
fi
logMessage "Download successful."
logEndTime "Merge Tiles" $modeStartTime 0
exit 0
fi
logEcho ""
#######################################################################
#
# If user passed in -U that means only upload data and exit
#
#######################################################################
if [ "$UPLOAD_MODE" == "true" ] ; then
logMessage "Uploading/Chumming data..."
chumJobData "$RUN_MERGE_TILES_SH" "1" "$OUTPUT_DIR"
if [ $? != 0 ] ; then
logWarning "Unable to upload data"
logEndTime "Merge Tiles" $modeStartTime 1
exit 1
fi
logMessage "Upload successful."
logEndTime "Merge Tiles" $modeStartTime 0
exit 0
fi
# set iteration to 1 initially
iteration="1"
# If a iteration file exists set iteration to
# the value from that file +1
getNextIteration "$OUTPUT_DIR" "$MERGE_TILES_ITERATION_FILE"
if [ $? == 0 ] ; then
iteration=$NEXT_ITERATION
logMessage "$MERGE_TILES_ITERATION_FILE file found setting iteration to $iteration"
fi
# Chum, submit, and wait for jobs to complete
runJobs "$RUN_MERGE_TILES_SH" "$iteration" "$OUTPUT_DIR" "${NUMBER_JOBS}" "$MERGE_TILES_JOB_NAME" "$MERGE_TILES_CAST_OUT_FILE" "$MERGE_TILES_CHUMMEDLIST" "$LAND_MERGE_TILES_OPTS" "$MERGE_TILES_FAILED_PREFIX" "$MERGE_TILES_TMP_FILE" "$MERGE_TILES_FAILED_FILE" "$MAX_RETRIES" "$WAIT_SLEEP_TIME" "$MERGE_TILES_ITERATION_FILE" "$RETRY_SLEEP" "$MERGE_TILES_BATCH_AND_WALLTIME_ARGS" "$MERGE_TILES_OUT_DIR_NAME"
runJobsExit=$?
if [ "$runJobsExit" != 0 ] ; then
logWarning "Error running Merge Tiles"
logEndTime "Merge Tiles" $modeStartTime $runJobsExit
exit $runJobsExit
fi
logEcho ""
logMessage "Merge Tiles successfully run."
# need to write out a this phase is happy file so the next step can skip all the checks
logEndTime "Merge Tiles" $modeStartTime $runJobsExit
exit $runJobsExit
| true
|
9a2ec43af30b3e54fddd7f8ae7f7a7e505192296
|
Shell
|
sbabashahi/battery-discharging
|
/battery.sh
|
UTF-8
| 844
| 3.109375
| 3
|
[] |
no_license
|
#!/bin/bash
##Simple unpluged ac adapter warning for those laptops which has a broken battery
##Use It in startup apps
##Test in: Dell Inspiron
##
while true
do
#check adapter status
bat=$(upower -i /org/freedesktop/UPower/devices/battery_BAT0 | grep state| awk '{print $2}')
if [ "$bat" == "discharging" ]; then
cnt=$((1))
while [ "$bat" == "discharging" ];
do
per=$(upower -i /org/freedesktop/UPower/devices/battery_BAT0 |grep percentage|awk '{print $2}')
notify-send "Battery $bat $per Warning Time $cnt" "The AC Adapter is Unpluged.\nWarning of Shutting Down.\nPlease Plugin the Charger."
#send notify faster when user didn't reaction
sleep 30
cnt=$((cnt+1))
bat=$(upower -i /org/freedesktop/UPower/devices/battery_BAT0 | grep state| awk '{print $2}')
done
fi
#sleep until next check
sleep 300
done
| true
|
15bd844dd33d1d2dfaff6789053979eaa549e628
|
Shell
|
kamir/ks-inspector
|
/bin/setenv.sh
|
UTF-8
| 1,387
| 2.671875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#=======================================================================================================================
# Example : How to provide Cluster Credentials via ENV-Variables to the KST tool?
#=======================================================================================================================
########################################################################################################################
# Please fill in the API-KEYs and API-SECRETS for all environments and keep this data in a save place.
#-----------------------------------------------------------------------------------------------------------------------
#
# cluster_2 login ...
#
export KST_CLUSTER_API_KEY_cluster_2=AG2X57TIVLOSA2UF
export KST_CLUSTER_API_SECRET_cluster_2=128ALrmHYSzfyrmffHIj9HwgtEBeyjHZ2qQidBXnd/tPbpLRZL1mojIH97G05Har
export KST_SR_API_KEY_cluster_2=4TRMPKIJYHVENEPL
export KST_SR_API_SECRET_cluster_2=yNfXyhSlnRtmrl4VePfLI8KJRnDaROBMA7Rn+WV5SXQU8DLGyqiCc+fSPPVnpfqP
#
# cluster_0 login ...
#
# --- a local unsecure cluster does not require any API-Keys
########################################################################################################################
# This allows us to switch between Confluent cloud environments by using an ENV_VAR as a pointer.
#-----------------------------------------------------------------------------------------------------------------------
export KST_TARGET_CLUSTER_NAME=cluster_0
export KST_TARGET_CLUSTER_NAME=cluster_2
| true
|
2a81248a6aaf4f3f9e70db124f9f66e363f156ef
|
Shell
|
benitol87/unix-tp-html
|
/galerie-shell.sh
|
UTF-8
| 4,959
| 4.21875
| 4
|
[] |
no_license
|
#! /bin/sh
#####################################################
# Script permettant de générer une galerie d'images #
# (cf galerie-shell-help.txt pour la syntaxe) #
#####################################################
######### Initialisations #############
# Pour que les boucles for fonctionnent correctement avec les espaces et les guillemets
SAVEIFS="$IFS"
IFS=$(echo -en "\n\b")
# Initialisations des variables utilisées
src="."
dest="."
verb=0
force=0
fichier="index.html"
compteur=0
attribut="active" # Classe donnée à la première balise image
# On récupère le nom du dossier de l'exécutable
DIR=$(cd "$(dirname "$0")" && pwd)
# Répertoire contenant les vignettes
PICTURE_FOLDER="$DIR/thumbs"
# Création si besoin
mkdir -p "$PICTURE_FOLDER"
# Inclusion du script utilities
. "$DIR/utilities.sh"
########### Fonctions #################
# Fonction qui affiche la syntaxe de ce script
usage (){
cat "$DIR/galerie-shell-help.txt"
}
# Test de la validité du nom des répertoires source et destination
test_nom_dossier (){
# Test nom de dossier non vide
if [ -z "${1// }" ]
then
echo "Le nom du dossier $2 est vide."
usage
exit 1
fi
# Test dossier existant
if [ ! -d "$1" ]
then
echo "Le dossier $2 spécifié n'existe pas."
exit 1
fi
}
recuperation_arguments(){
# Récupération des arguments en ligne de commande
while test $# -ne 0
do
case "$1" in
--source|-s)
src="$2"
test_nom_dossier "$src" "source"
shift;;
--dest|-d)
dest="$2"
test_nom_dossier "$dest" "destination"
shift;;
--verb|-v)
verb=1;;
--force|-f)
force=1;;
--help|-h)
usage
exit 0;;
--index|-i)
fichier="$2"
# Test nom de fichier vide ou ne contenant que des espaces
if [ -z "${fichier// }" ]
then
echo "Le nom du fichier de destination est vide."
usage
exit 1
fi
shift;;
*)
echo "Option non reconnue : $1"
usage
exit 1;;
esac
shift
done
}
parcourir_images_source (){
# On parcourt les fichiers du répertoire source à la recherche d'images
for fic in $(ls -AQ "$src")
do
# ls -Q met les noms des fichiers entre guillemets => il faut les virer
fic="${fic#\"}" # On enlève le dernier
fic="${fic%\"}" # et le premier
# ${fic##*.} permet de ne garder que l'extension des fichiers
case "${fic##*.}" in
jpg|jpeg|gif|png|bmp)
if [ "$force" -eq 1 -o ! -f "$PICTURE_FOLDER/$fic" ]
then
convert -resize 200x200 "$src/$fic" "$PICTURE_FOLDER/$fic"
[ "$verb" = 1 ] && echo "Vignette créée : $src/$fic"
fi
# Ecriture du code HTML pour une image:
# - Argument 1 : Nom du fichier image utilisé
# - Argument 2 : Informations affichées dans l'infobulle (date de dernière modif de l'image, résultat de exiftags)
# - Argument 3 : Classe(s) du div contenant l'image
# - Argument 4 : Cible du lien de l'image
# Le tout est redirigé vers le fichier HTML que l'on avait déjà commencé à remplir
info=$(stat "$src/$fic" | tail -n 1 | cut -d' ' -f2,3 | cut -d'.' -f1)
info=$info"
"$($DIR/exiftags "$src/$fic" 2>/dev/null)
"$DIR"/generate-img-fragment.sh "$PICTURE_FOLDER/$fic" "$info" "$attribut" "$(pwd)/$src/$fic" >>"$dest/$fichier"
[ "$verb" = 1 ] && echo "Image ajoutée : $PICTURE_FOLDER/$fic"
attribut=" "
compteur=`expr "$compteur" + 1`;;
*);;#pas un fichier image reconnu, on le passe
esac
done
}
######## Début du programme ########
recuperation_arguments $@
# Ecriture de l'en-tete
[ "$verb" = 1 ] && echo "Ecriture de l'en-tête du fichier HTML"
html_head "Galerie d'images" >"$dest/$fichier"
# Ecriture des balises <img/>
parcourir_images_source
# Test si au moins une image a été trouvée
if [ "$compteur" = 0 ]
then
echo "Aucune image trouvée dans le dossier source"
rm "$dest/$fichier"
exit 2
fi
# Ajout des points du carousel si besoin
"$DIR"/generate-carousel-indicators.sh "$compteur" >>"$dest/$fichier"
# Ecriture de la fin du fichier
[ "$verb" = 1 ] && echo "Ecriture de la fin du fichier HTML"
html_tail >>"$dest/$fichier"
# Affichage d'un message convivial
echo "Fichier créé, vous pouvez lancer 'firefox $dest/$fichier'"
######### Fin du programme #########
IFS="$SAVEIFS"
| true
|
2b34a0093e1e0c0d834e753d46e8db62405054ef
|
Shell
|
kamir/etosha
|
/etosha-parent/etosha-examples/MRJob-Profiles/Simple_IO_BenchMark_1.sh
|
UTF-8
| 1,018
| 2.734375
| 3
|
[] |
no_license
|
NOW=$(date +"%m-%d-%Y-%h-%s")
FILE="tg2.$NOW"
echo "# work on file: $FILE"
mkdir temp
export NRLINES="100000"
export EXAMPLEJAR="/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar"
ssh training@elephant hadoop jar $EXAMPLEJAR teragen $NRLINES $FILE
export OPTS1="-Dio.sort.mb=256 -Dio.sort.factor=20 -Dmapreduce.reduce.slowstart.completed.maps=0.90"
ssh training@elephant hadoop jar $EXAMPLEJAR terasort $FILE "$FILE.sorted.A" $OPTS1
export OPTS2="-Dio.sort.mb=5 -Dio.sort.factor=2 -Dmapreduce.reduce.slowstart.completed.maps=0.01"
ssh training@elephant hadoop jar $EXAMPLEJAR terasort $FILE "$FILE.sorted.B" $OPTS2
ssh training@elephant "mkdir /home/training/temp"
ssh training@elephant "hadoop jar training_materials/hdt.jar --hdfsdir /user/training/$FILE.sorted.A 2> /home/training/temp/job.stat.A.dat"
ssh training@elephant "hadoop jar training_materials/hdt.jar --hdfsdir /user/training/$FILE.sorted.B 2> /home/training/temp/job.stat.B.dat"
scp training@elephant:/home/training/temp/*.dat ./temp/
| true
|
67f8027c207af4687441dbbb4b9df7b8b16d780a
|
Shell
|
KazAoyama/KaigoSystem
|
/E-LIFE/UNEI/CGI/POP_KOJINJYOUHOU_SHOUSAI_KAIGOHOKEN.SEARCH
|
UTF-8
| 6,555
| 3.5625
| 4
|
[] |
no_license
|
#!/bin/bash
#
# POP_KOJINJYOUHOU_SHOUSAI_RENRAKUSAKI.SEARCH
# 個人情報詳細(口座)POPUP
#
# Written by E.Okuda (Tokyu-hands.co.jp) :2013/12/13
#--------------------------------------------------------------
# ログ
# ログ
exec 2> /home/hands/E-LIFE/UNEI/APLOG/LOG.$(basename ${0}).${HOSTNAME}.$(date "+%Y%m%d"); set -xv
source /home/hands/.bashrc &> /dev/null
source /home/hands/E-LIFE/UNEI/CGI/UNEI.INI &> /dev/null
#--------------------------------------------------------------
#--------------------------------------------------------------
# 変数の定義
tmp=/var/tmp/${$}
namedata=$1
result_data=$2
today="$(date +%Y%m%d)"
#--------------------------------------------------------------
rm -f $tmp-*
#--------------------------------------------------------------
# ディレクトリ設定
app_dir="${home_dir}/UNEI"
cgi_dir="${app_dir}/CGI"
html_dir="${app_dir}/HTML"
pompa_dir="${app_dir}/POMPA"
# 渡ってきたデータを取得
#if [ ${CONTENT_LENGTH:-0} -gt 0 ]; then
# dd bs=${CONTENT_LENGTH} |
# cgi-name -d_ -i_
#else
# :
#fi > $tmp-name
# 変数の定義
namedata="$1"
data="$2"
# 必要な値の取得
name-source $namedata > $tmp-source
source $tmp-source
MODE=$(nameread "MODE" ${namedata})
[ -z "${MODE}" ] && MODE="init"
#--------------------------------------------------------------
#--------------------------------------------------------------
function error_exit {
message="$1"
echo "message ${message}"
echo "result ng"
rm -f $tmp-*
exit 1
}
function error_unlock {
message="$1"
cat $tmp-target-table |
while read table base ;do
rm -f $lock_dir/$base.lock
: ;done
error_exit ${message}
}
#--------------------------------------------------------------
#--------------------------------------------------------------
KAIGOHOKENID="$(nameread "KAIGOHOKENID" $namedata)"
USERID="$(nameread "USERID" $namedata)"
SHISETSUID="$(nameread "SHISETSUID" $namedata)"
RIYOUSHAID="$(nameread "RIYOUSHAID" $namedata)"
RIYOUSHANAME="$(awk '$1=="'${RIYOUSHAID}'"{print $2}' ${tbl_dir}/RIYOUSHA/RIYOUSHA_MASTER)"
### モードによる表示の分岐
#case "${MODE}" in
# 初期表示
# search)
###########
# 利用者情報の取得
###########
# 元号の準備
awk '$(NF-2)!="9"{print $1,$2}' ${tbl_dir}/ZEIRITSU_GENGOU_MASTER/GENGOU > $tmp-gengou
cat ${pompa_dir}/RIYOUSHA/RIYOUSHA_KIHON |
# 1:利用者(入居者)ID 2:利用者(入居者)氏名 3:利用者(入居者)氏名カナ 4:性別 5:和暦:元号
# 6:和暦:誕生年 7:和暦:誕生月 8:和暦:誕生日 9:西暦:生年月日 10:感染症
# 11:入居前郵便番号 12:入居前住所 13:現郵便番号 14:現住所 15:携帯電話番号
# 16:E-MAILアドレス 17:趣味特技 18:備考
awk '$1=="'${RIYOUSHAID}'"{print $4,$5,$6,$7,$8}' |
awk '{if($1=="1"){print $0,"男"}
else{print $0,"女"}
}' |
delf 1 |
cjoin1 key=1 $tmp-gengou - |
self NF 2/NF-1 > $tmp-riyousha
# 1:性別 2:和暦:元号 3:和暦:誕生年 4:和暦:誕生月 5:和暦:誕生日
###########
# 介護保険情報の取得
###########
# initの場合は既に介護保険情報があるか確認し、あれば保険者番号などを取得
if [ "${MODE}" = "init" ] ; then
cat ${pompa_dir}/RIYOUSHA/RIYOUSHA_KAIGOHOKEN |
awk '$1=="'${RIYOUSHAID}'"&&$(NF-2)!="9"{print "kaigohoken",$0}' |
LANG=C sort -u |
tail -1 > $tmp-kaigohoken
else
cat ${pompa_dir}/RIYOUSHA/RIYOUSHA_KAIGOHOKEN |
# 1:利用者(入居者)ID 2:介護保険ID 3:介護度 4:保険者番号 5:保険者名
# 6:被保険者番号 7:性別 8:生年月日:和暦元号 9:生年月日:和暦誕生年 10:生年月日:和暦誕生月
# 11:生年月日:和暦誕生日 12:郵便番号 13:住所 14:認定年月:和暦元号 15:認定年月:和暦年
# 16:認定年月:和暦月 17:認定年月:和暦日 18:認定年月:西暦 19:有効期間FROM:和暦元号 20:有効期間FROM:和暦年
# 21:有効期間FROM:和暦月 22:有効期間FROM:和暦日 23:有効期間FROM:西暦 24:有効期間TO:和暦元号 25:有効期間TO:和暦年
# 26:有効期間TO:和暦月 27:有効期間TO:和暦日 28:有効期間TO:西暦 29:居宅介護支援者又は介護予防事業者及びその事業所名 30:認定審査会の意見及びサービスの種類指定
# 31:備考
# 利用者IDと介護保険IDが一致し、有効フラグが9:削除ではないなもの
awk '$1=="'${RIYOUSHAID}'"&&$2=="'${KAIGOHOKENID}'"&&$(NF-2)!="9"{print "kaigohoken",$0}' |
ycat $tmp-riyousha - |
# 1:性別 2:和暦:元号 3:和暦:誕生年 4:和暦:誕生月 5:和暦:誕生日
# 6:kaigohoken 7:利用者(入居者)ID 8:介護保険ID 9:介護度 10:保険者番号
# 11:保険者名 12:被保険者番号 13:性別 14:生年月日:和暦元号 15:生年月日:和暦誕生年
# 16:生年月日:和暦誕生月 17:生年月日:和暦誕生日 18:郵便番号 19:住所 20:認定年月:和暦元号
# 21:認定年月:和暦年 22:認定年月:和暦月 23:認定年月:和暦日 24:認定年月:西暦 25:有効期間FROM:和暦元号
# 26:有効期間FROM:和暦年 27:有効期間FROM:和暦月 28:有効期間FROM:和暦日 29:有効期間FROM:西暦 30:有効期間TO:和暦元号
# 31:有効期間TO:和暦年 32:有効期間TO:和暦月 33:有効期間TO:和暦日 34:有効期間TO:西暦 35:居宅介護支援者又は介護予防事業者及びその事業所名
# 36:認定審査会の意見及びサービスの種類指定 37:備考
self 6/12 1/5 18/NF > $tmp-kaigohoken
fi
echo "search_result ok" > $tmp-result
# ;;
#--------------------------------------------------------------
if [ -s $tmp-kaigohoken ] ; then
cat $tmp-result $tmp-kaigohoken
else
cat $tmp-result
fi
#--------------------------------------------------------------
## 終了
rm -f $tmp-*
exit 0
| true
|
6fa221102eb753de94d43ab9bea7b231cfb57175
|
Shell
|
Zabrane/piqi-erlang
|
/make/get-piqi-binary
|
UTF-8
| 725
| 4.03125
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
#
# copies pre-built piqi binary from the piqi_binary rebar dependency to
# ./priv/piqi-binary/$os_version
#
# alternatively, when $PIQI environment variable is defined, it is used as
# source path for copying piqi binary
set -e
os_version=`uname -s`-`uname -m`
piqi_dir="priv/piqi-binary/$os_version"
# check if the PIQI environment variable is defined; when defined it points to
# the "piqi" binary executable
if [ -n "$PIQI" ]
then
dst=$piqi_dir/piqi
if [ ! -f $dst ] || [ "$PIQI" -nt $dst ]
then
mkdir -p $piqi_dir
cp -a "$PIQI" $dst
fi
else
PIQI=$piqi_dir/piqi
if [ ! -e $PIQI ]
then
mkdir -p $piqi_dir
cp -a $REBAR_DEPS_DIR/piqi_binary/$os_version/piqi $PIQI
fi
test -e $PIQI
fi
| true
|
08877566e1ad333e3e51c917722f2ef2ef9c28d6
|
Shell
|
arenadata/bigtop
|
/bigtop-tests/smoke-tests/odpi-runtime/src/main/resources/api-examiner-prep.sh
|
UTF-8
| 2,979
| 3.46875
| 3
|
[
"Apache-2.0",
"FreeBSD-DOC",
"MIT",
"DOC"
] |
permissive
|
#!/usr/bin/env bash
############################################################################
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
############################################################################
############################################################################
# This script is used to generate the hadoop-*-api.report.json files in the
# test/resources directory. To use it, you will first need to download an
# Apache binary distribution of Hadoop and set APACHE_HADOOP_DIR to the
# directory where you untar that distribution. You will then need to set
# BIGTTOP_HOME to the directory where your bigtop source is located. Then
# run this script for each of the jars you want to generate a report for.
# The arguments passed to this script should be -p <outputdir> -j <jarfile>
# where outputdir is the directory you'd like to write the report to and
# jarfile is the full path of the jar to generate the report for. Reports
# should be generated for the following jars: hadoop-common, hadoop-hdfs,
# hadoop-yarn-common, hadoop-yarn-client, hadoop-yarn-api, and
# hadoop-mapreduce-client-core
#
# Example usage:
# export APACHE_HADOOP_DIR=/tmp/hadoop-2.7.3
# export BIGTOP_HOME=/home/me/git/bigtop
# $BIGTOP_HOME/bigtop-tests/spec-tests/runtime/src/main/resources/api-examiner.sh -j $HADOOP_HOME/share/hadoop/common/hadoop-common-2.7.3.jar -p $BIGTOP_HOME/bigtop-tests/spec-tests/runtime/src/test/resources
#
# The resulting reports should be committed to git. This script only needs
# to be run once per Bigtop release.
############################################################################
if [ "x${APACHE_HADOOP_DIR}" = "x" ]
then
echo "You must set APACHE_HADOOP_DIR to the directory you have placed the Apache Hadoop binary distribution in"
exit 1
fi
if [ "x${BIGTOP_HOME}" = "x" ]
then
echo "You must set BIGTOP_HOME to the root directory for your bigtop source"
exit 1
fi
for jar in `find $BIGTOP_HOME/bigtop-tests/spec-tests/runtime/build/libs/ -name \*.jar`
do
CLASSPATH=$CLASSPATH:$jar
done
for jar in `find $APACHE_HADOOP_DIR -name \*.jar`
do
CLASSPATH=$CLASSPATH:$jar
done
java -cp $CLASSPATH org.apache.bigtop.itest.hadoop.api.ApiExaminer $@
| true
|
087e1d97ed852d598b54c3c625d4fa3340aeebf3
|
Shell
|
TheDen/0xfee1dead.top
|
/htopgen.sh
|
UTF-8
| 298
| 2.5625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
touch public/htop.html
touch public/htop.html.aux
while true; do
echo q | htop | aha --stylesheet --black --line-fix | sed 's/<\/style>/body {overflow-x: hidden; font-size: 2vh;}<\/style>/g' > ./public/htop.html.aux && mv -f ./public/htop.html.aux ./public/htop.html
sleep 2
done
| true
|
2b13a50c1648002221f708349d2b21f3c89045e4
|
Shell
|
grpc/grpc-java
|
/buildscripts/run_arm64_tests_in_docker.sh
|
UTF-8
| 2,046
| 3.546875
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -ex
readonly grpc_java_dir="$(dirname "$(readlink -f "$0")")/.."
if [[ -t 0 ]]; then
DOCKER_ARGS="-it"
else
# The input device on kokoro is not a TTY, so -it does not work.
DOCKER_ARGS=
fi
cat <<EOF >> "${grpc_java_dir}/gradle.properties"
skipAndroid=true
skipCodegen=true
org.gradle.parallel=true
org.gradle.jvmargs=-Xmx1024m
EOF
export JAVA_OPTS="-Duser.home=/grpc-java/.current-user-home -Djava.util.prefs.userRoot=/grpc-java/.current-user-home/.java/.userPrefs"
# build under x64 docker image to save time over building everything under
# aarch64 emulator. We've already built and tested the protoc binaries
# so for the rest of the build we will be using "-PskipCodegen=true"
# avoid further complicating the build.
docker run $DOCKER_ARGS --rm=true -v "${grpc_java_dir}":/grpc-java -w /grpc-java \
--user "$(id -u):$(id -g)" -e JAVA_OPTS \
openjdk:11-jdk-slim-buster \
./gradlew build -x test
# Build and run java tests under aarch64 image.
# To be able to run this docker container on x64 machine, one needs to have
# qemu-user-static properly registered with binfmt_misc.
# The most important flag binfmt_misc flag we need is "F" (set by "--persistent yes"),
# which allows the qemu-aarch64-static binary to be loaded eagerly at the time of registration with binfmt_misc.
# That way, we can emulate aarch64 binaries running inside docker containers transparently, without needing the emulator
# binary to be accessible from the docker image we're emulating.
# Note that on newer distributions (such as glinux), simply "apt install qemu-user-static" is sufficient
# to install qemu-user-static with the right flags.
# A note on the "docker run" args used:
# - run docker container under current user's UID to avoid polluting the workspace
# - set the user.home property to avoid creating a "?" directory under grpc-java
docker run $DOCKER_ARGS --rm=true -v "${grpc_java_dir}":/grpc-java -w /grpc-java \
--user "$(id -u):$(id -g)" -e JAVA_OPTS \
arm64v8/openjdk:11-jdk-slim-buster \
./gradlew build
| true
|
4e5e6dc3ed45864812588143e7b97181844336b6
|
Shell
|
jia-jerry/gardener
|
/hack/compare-k8s-feature-gates.sh
|
UTF-8
| 2,523
| 3.625
| 4
|
[
"Apache-2.0",
"MPL-2.0",
"BSD-3-Clause",
"MPL-1.1",
"MIT",
"BSD-2-Clause"
] |
permissive
|
#!/bin/bash
#
# Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -e
usage() {
echo "Usage:"
echo "> compare-k8s-feature-gates.sh [ -h | <old version> <new version> ]"
echo
echo ">> For example: compare-k8s-feature-gates.sh 1.22 1.23"
exit 0
}
if [ "$1" == "-h" ] || [ "$#" -ne 2 ]; then
usage
fi
versions=("$1" "$2")
files=(
"pkg/features/kube_features.go"
"staging/src/k8s.io/apiserver/pkg/features/kube_features.go"
"staging/src/k8s.io/apiextensions-apiserver/pkg/features/kube_features.go"
"staging/src/k8s.io/controller-manager/pkg/features/kube_features.go"
)
out_dir=dev/temp
mkdir -p "${out_dir}"
for version in "${versions[@]}"; do
rm -f "${out_dir}/featuregates-${version}.txt"
touch "${out_dir}/featuregates-${version}.txt"
for file in "${files[@]}"; do
{ wget -q -O - "https://raw.githubusercontent.com/kubernetes/kubernetes/release-${version}/${file}" || echo; } > "${out_dir}/kube_features.go"
grep -E '{Default: .*, PreRelease: .*},' "${out_dir}/kube_features.go" | awk '{print $1}' | { grep -Eo '[A-Z]\w+' || true; } > "${out_dir}/constants.txt"
while read constant; do
grep -E "${constant} featuregate.Feature = \".*\"" "${out_dir}/kube_features.go" | awk '{print $4}' | { grep -Eo '[A-Z]\w+' || true; } >> "${out_dir}/featuregates-${version}.txt"
done < "${out_dir}/constants.txt"
rm -f "${out_dir}/kube_features.go" "${out_dir}/constants.txt"
done
sort -u -o "${out_dir}/featuregates-${version}.txt" "${out_dir}/featuregates-${version}.txt"
done
echo "Feature gates added in $2 compared to $1:"
diff "${out_dir}/featuregates-$1.txt" "${out_dir}/featuregates-$2.txt" | grep '>' | awk '{print $2}'
echo
echo "Feature gates removed in $2 compared to $1:"
diff "${out_dir}/featuregates-$1.txt" "${out_dir}/featuregates-$2.txt" | grep '<' | awk '{print $2}'
| true
|
7bf320353d55966638042835b6844e26033082bf
|
Shell
|
ggerganov/llama.cpp
|
/examples/jeopardy/jeopardy.sh
|
UTF-8
| 846
| 3.640625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
MODEL=./models/ggml-vicuna-13b-1.1-q4_0.bin
MODEL_NAME=Vicuna
# exec options
prefix="Human: " # Ex. Vicuna uses "Human: "
opts="--temp 0 -n 80" # additional flags
nl='
'
introduction="You will be playing a game of Jeopardy. Simply answer the question in the correct format (Ex. What is Paris, or Who is George Washington)."
# file options
question_file=./examples/jeopardy/questions.txt
touch ./examples/jeopardy/results/$MODEL_NAME.txt
output_file=./examples/jeopardy/results/$MODEL_NAME.txt
counter=1
echo 'Running'
while IFS= read -r question
do
exe_cmd="./main -p "\"$prefix$introduction$nl$prefix$question\"" "$opts" -m ""\"$MODEL\""" >> ""\"$output_file\""
echo $counter
echo "Current Question: $question"
eval "$exe_cmd"
echo -e "\n------" >> $output_file
counter=$((counter+1))
done < "$question_file"
| true
|
74671e6ae1dbf145cd55bc8d892b6281e5675139
|
Shell
|
curieuxjy/academy
|
/tools/start-ray.sh
|
UTF-8
| 969
| 4.15625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
help() {
cat <<EOF
Check if Ray is already running on the current node. If not start it as the head node.
Usage: $0 [-h|--help] [-c|--check|--check-only]
Where:
-h|--help Print this message and exit
-c|--check|--check-only Only check if Ray is running, returning exit code 0 if true, 1 otherwise.
EOF
}
check_only=
while [[ $# -gt 0 ]]
do
case $1 in
-h|--help)
help
exit 0
;;
-c|--check*)
check_only=true
;;
*)
echo "ERROR: Unexpected argument $1"
help
exit 1
;;
esac
shift
done
if [[ -n $check_only ]]
then
$NOOP ray stat > /dev/null 2>&1
else
$NOOP ray stat > /dev/null 2>&1 || $NOOP ray start --head
if [[ $? -eq 0 ]]
then
echo
echo "Ray already running or successfully started"
else
echo
echo "ERROR: Ray failed to start. Please report this issue to academy@anyscale.com."
echo "ERROR: Provide as much information as you can about your setup, any error messages shown, etc."
fi
fi
| true
|
69dbf6c0fce5d58a4922d73c6701bfd6e43fdb99
|
Shell
|
binoyjayan/utilities
|
/scripts/writea
|
UTF-8
| 781
| 4.0625
| 4
|
[] |
no_license
|
#!/bin/bash
# Script to send message to all users logged in the machine
# Or to a specific user except the current user
usage()
{
cat <<ENDHERE
Usage:
writea <message in quotes> [user]
ENDHERE
}
if [ "$1" == "" ]
then
usage
exit
fi
me=`whoami`
count=0
who |
while read -r line
do
usr=`echo $line | awk '{print $1}'`
tt=`echo $line | awk '{print $2}'`
# Send message to all users logged in except me if second arg is empty
# if [[ ( "$2" == "" && "$usr" != "$me" ) || ( "$2" == "$usr" ) ]]
if [[ ( "$2" == "" && "$usr" != "$me" ) || ( "$2" == "$usr" && "$usr" != "$me" ) ]]
then
echo "Sending message '$1' to $usr on $tt"
echo "$1" | write $usr $tt
count=`expr $count + 1`
# echo "count : $count"
fi
done
# echo "Sent message to $count terminal(s)"
| true
|
0c6509d0a9e68183f7a507ba10178caca3b01d38
|
Shell
|
greg-erskine/pCP-github
|
/www/cgi-bin/pcp-rpi-functions
|
UTF-8
| 11,682
| 3.09375
| 3
|
[] |
no_license
|
#!/bin/sh
# pcp-rpi-functions
# These are the common Raspberry Pi functions used by piCorePlayer.
# Version: 6.0.0 2020-03-01
pcp_debug_log "$0 - Running pcp-rpi-functions..."
#========================================================================================
# Determine revisions and features of Raspberry Pi
#----------------------------------------------------------------------------------------
# References:
# https://github.com/raspberrypi/documentation/blob/master/hardware/raspberrypi/revision-codes/README.md
#----------------------------------------------------------------------------------------
pcp_rpi_revision() {
awk -F: '/^Revision/ { print $2 }' /proc/cpuinfo | sed 's/ //g'
}
pcp_rpi_details() {
case $(pcp_rpi_revision) in
#=======================================================================
# Old-style revision codes
#-----------------------------------------------------------------------
# Code Type Model Revision RAM Reboot
#-----------------------------------------------------------------------
*Beta) TYPE=1; MODEL=B; PCBREV=Beta; MEMORY=256; RB_DELAY=60;;
*0002) TYPE=1; MODEL=B; PCBREV=1.0; MEMORY=256; RB_DELAY=60;;
*0003) TYPE=1; MODEL=B; PCBREV=1.0; MEMORY=256; RB_DELAY=60;;
*0004) TYPE=1; MODEL=B; PCBREV=2.0; MEMORY=256; RB_DELAY=60;;
*0005) TYPE=1; MODEL=B; PCBREV=2.0; MEMORY=256; RB_DELAY=60;;
*0006) TYPE=1; MODEL=B; PCBREV=2.0; MEMORY=256; RB_DELAY=60;;
*0007) TYPE=1; MODEL=A; PCBREV=2.0; MEMORY=256; RB_DELAY=60;;
*0008) TYPE=1; MODEL=A; PCBREV=2.0; MEMORY=256; RB_DELAY=60;;
*0009) TYPE=1; MODEL=A; PCBREV=2.0; MEMORY=256; RB_DELAY=60;;
*000d) TYPE=1; MODEL=B; PCBREV=2.0; MEMORY=512; RB_DELAY=60;;
*000e) TYPE=1; MODEL=B; PCBREV=2.0; MEMORY=512; RB_DELAY=60;;
*000f) TYPE=1; MODEL=B; PCBREV=2.0; MEMORY=512; RB_DELAY=60;;
*0010) TYPE=1; MODEL=B+; PCBREV=1.2; MEMORY=512; RB_DELAY=60;;
*0011) TYPE=1; MODEL=CM; PCBREV=1.0; MEMORY=512; RB_DELAY=60;;
*0012) TYPE=1; MODEL=A+; PCBREV=1.1; MEMORY=256; RB_DELAY=60;;
*0013) TYPE=1; MODEL=B+; PCBREV=1.2; MEMORY=512; RB_DELAY=60;;
*0014) TYPE=1; MODEL=CM; PCBREV=1.0; MEMORY=512; RB_DELAY=60;;
*0015) TYPE=1; MODEL=A+; PCBREV=1.1; MEMORY=256; RB_DELAY=60;;
#=======================================================================
# New-style revision codes
#-----------------------------------------------------------------------
# Code Type Model Revision RAM Reboot
#-----------------------------------------------------------------------
*900021) TYPE=1; MODEL=A+; PCBREV=1.1; MEMORY=512; RB_DELAY=60;;
*900032) TYPE=1; MODEL=B+; PCBREV=1.2; MEMORY=512; RB_DELAY=60;;
*900092) TYPE=0; MODEL=ZERO; PCBREV=1.2; MEMORY=512; RB_DELAY=50;;
*900093) TYPE=0; MODEL=ZERO; PCBREV=1.3; MEMORY=512; RB_DELAY=50;;
*9000c1) TYPE=0; MODEL=ZERO-W; PCBREV=1.1; MEMORY=512; RB_DELAY=50;;
*9020e0) TYPE=3; MODEL=3A+; PCBREV=1.0; MEMORY=512; RB_DELAY=35;;
*920092) TYPE=0; MODEL=ZERO; PCBREV=1.2; MEMORY=512; RB_DELAY=50;;
*920093) TYPE=0; MODEL=ZERO; PCBREV=1.3; MEMORY=512; RB_DELAY=50;;
*900061) TYPE=1; MODEL=CM; PCBREV=1.1; MEMORY=512; RB_DELAY=60;;
*a01040) TYPE=2; MODEL=2B; PCBREV=1.0; MEMORY=1024; RB_DELAY=40;;
*a01041) TYPE=2; MODEL=2B; PCBREV=1.1; MEMORY=1024; RB_DELAY=40;;
*a02082) TYPE=3; MODEL=3B; PCBREV=1.2; MEMORY=1024; RB_DELAY=40;;
*a020a0) TYPE=3; MODEL=CM3; PCBREV=1.0; MEMORY=1024; RB_DELAY=40;;
*a020d3) TYPE=3; MODEL=3B+; PCBREV=1.3; MEMORY=1024; RB_DELAY=35;;
*a21041) TYPE=2; MODEL=2B; PCBREV=1.1; MEMORY=1024; RB_DELAY=40;;
*a22042) TYPE=2; MODEL=2B; PCBREV=1.2; MEMORY=1024; RB_DELAY=40;;
*a22082) TYPE=3; MODEL=3B; PCBREV=1.2; MEMORY=1024; RB_DELAY=40;;
*a220a0) TYPE=3; MODEL=CM3; PCBREV=1.0; MEMORY=1024; RB_DELAY=40;;
*a32082) TYPE=3; MODEL=3B; PCBREV=1.2; MEMORY=1024; RB_DELAY=40;;
*a52082) TYPE=3; MODEL=3B; PCBREV=1.2; MEMORY=1024; RB_DELAY=40;;
*a22083) TYPE=3; MODEL=3B; PCBREV=1.3; MEMORY=1024; RB_DELAY=40;;
*a02100) TYPE=3; MODEL=CM3+; PCBREV=1.0; MEMORY=1024; RB_DELAY=35;;
*a03111) TYPE=4; MODEL=4B; PCBREV=1.1; MEMORY=1024; RB_DELAY=30;;
*b03111) TYPE=4; MODEL=4B; PCBREV=1.1; MEMORY=2048; RB_DELAY=30;;
*b03112) TYPE=4; MODEL=4B; PCBREV=1.2; MEMORY=2048; RB_DELAY=30;;
*c03111) TYPE=4; MODEL=4B; PCBREV=1.1; MEMORY=4096; RB_DELAY=30;;
*c03112) TYPE=4; MODEL=4B; PCBREV=1.2; MEMORY=4096; RB_DELAY=30;;
*) TYPE=99; MODEL=?; PCBREV=?; MEMORY=?; RB_DELAY=60;;
esac
case "${MODEL}${PCBREV}" in
B1.0) HARDWARE=BCM2708; ETHER=1; USB=2; LED=5; P2PINS=1; HOLES=0; PIN3=0; PIN5=1; PIN13=21; I2C=0; P5=0; P6=0;;
B1+) HARDWARE=BCM2708; ETHER=1; USB=2; LED=5; P2PINS=0; HOLES=0; PIN3=0; PIN5=1; PIN13=21; I2C=0; P5=0; P6=0;;
B2.0) HARDWARE=BCM2708; ETHER=1; USB=2; LED=5; P2PINS=0; HOLES=2; PIN3=1; PIN5=2; PIN13=27; I2C=1; P5=8; P6=2;;
A2.0) HARDWARE=BCM2708; ETHER=0; USB=1; LED=2; P2PINS=0; HOLES=2; PIN3=1; PIN5=2; PIN13=27; I2C=1; P5=8; P6=2;;
B+*) HARDWARE=BCM2708; ETHER=1; USB=4; LED=2; P2PINS=0; HOLES=4; PIN3=X; PIN5=X; PIN13=X; I2C=X; P5=X; P6=X;;
CM3*) HARDWARE=BCM2710; ETHER=1; USB=2; LED=2; P2PINS=0; HOLES=4; PIN3=X; PIN5=X; PIN13=X; I2C=X; P5=X; P6=X;;
CM*) HARDWARE=BCM2708; ETHER=0; USB=0; LED=X; P2PINS=0; HOLES=0; PIN3=X; PIN5=X; PIN13=X; I2C=X; P5=X; P6=X;;
A+*) HARDWARE=BCM2708; ETHER=0; USB=1; LED=2; P2PINS=0; HOLES=4; PIN3=1; PIN5=2; PIN13=X; I2C=X; P5=X; P6=X;;
2B1.2) HARDWARE=BCM2710; ETHER=1; USB=4; LED=2; P2PINS=0; HOLES=4; PIN3=X; PIN5=X; PIN13=X; I2C=X; P5=X; P6=X;;
2B1*) HARDWARE=BCM2709; ETHER=1; USB=4; LED=2; P2PINS=0; HOLES=4; PIN3=X; PIN5=X; PIN13=X; I2C=X; P5=X; P6=X;;
ZERO*) HARDWARE=BCM2708; ETHER=0; USB=1; LED=2; P2PINS=0; HOLES=4; PIN3=X; PIN5=X; PIN13=X; I2C=X; P5=X; P6=X;;
3B+*) HARDWARE=BCM2710; ETHER=1; USB=4; LED=2; P2PINS=0; HOLES=4; PIN3=X; PIN5=X; PIN13=X; I2C=X; P5=X; P6=X;;
3B*) HARDWARE=BCM2710; ETHER=1; USB=4; LED=2; P2PINS=0; HOLES=4; PIN3=X; PIN5=X; PIN13=X; I2C=X; P5=X; P6=X;;
3A*) HARDWARE=BCM2710; ETHER=0; USB=1; LED=2; P2PINS=0; HOLES=4; PIN3=X; PIN5=X; PIN13=X; I2C=X; P5=X; P6=X;;
4B*) HARDWARE=BCM2711; ETHER=1; USB=4; LED=2; P2PINS=0; HOLES=4; PIN3=X; PIN5=X; PIN13=X; I2C=X; P5=X; P6=X;;
*) HARDWARE=X; ETHER=X; USB=X; LED=X; P2PINS=X; HOLES=X; PIN3=X; PIN5=X; PIN13=X; I2C=X; P5=X; P6=X;;
esac
}
pcp_pb_get() {
echo $1 | tr a-zA-Z n-za-mN-ZA-M
}
#========================================================================================
# Type
#----------------------------------------------------------------------------------------
pcp_rpi_is_rpi0() {
pcp_rpi_details
[ $TYPE -eq 0 ] && echo 0 || echo 1
}
pcp_rpi_is_rpi1() {
pcp_rpi_details
[ $TYPE -eq 1 ] && echo 0 || echo 1
}
pcp_rpi_is_rpi2() {
pcp_rpi_details
[ $TYPE -eq 2 ] && echo 0 || echo 1
}
pcp_rpi_is_rpi3() {
pcp_rpi_details
[ $TYPE -eq 3 ] && echo 0 || echo 1
}
pcp_rpi_is_rpi4() {
pcp_rpi_details
[ $TYPE -eq 4 ] && echo 0 || echo 1
}
pcp_rpi_type() {
pcp_rpi_details
echo $TYPE
}
#========================================================================================
# Model
#----------------------------------------------------------------------------------------
pcp_rpi_model() {
pcp_rpi_details
echo $MODEL
}
pcp_rpi_model_unknown() {
pcp_rpi_details
[ "$MODEL" = "?" ] && echo 0 || echo 1
}
# The shortname is a unique identifier intended to be used with pastebin uploading
pcp_rpi_shortname() {
pcp_rpi_details
if [ "$MODEL" = "A" ] || [ "$MODEL" = "A+" ] || [ "$MODEL" = "ZERO" ] || [ "$MODEL" = "ZERO-W" ] || [ "$MODEL" = "3A+" ]; then
SHORTMAC=$(pcp_wlan0_mac_address)
else
SHORTMAC=$(pcp_eth0_mac_address)
fi
SHORTMAC=$(echo $SHORTMAC | sed 's/://g')
echo "pCP-$(pcp_rpi_model)-${SHORTMAC:6}"
}
pcp_rpi_is_model_A() {
pcp_rpi_details
[ "$MODEL" = "A" ] && echo 0 || echo 1
}
pcp_rpi_is_model_B() {
pcp_rpi_details
[ "$MODEL" = "B" ] && echo 0 || echo 1
}
pcp_rpi_is_model_B_rev_1() {
pcp_rpi_details
([ "$MODEL" = "B" ] && [ "$PCBREV" = "1" ]) && echo 0 || echo 1
}
pcp_rpi_is_model_B_rev_2() {
pcp_rpi_details
([ "$MODEL" = "B" ] && [ "$PCBREV" = "2" ]) && echo 0 || echo 1
}
pcp_rpi_is_model_Bplus() {
pcp_rpi_details
[ "$MODEL" = "B+" ] && echo 0 || echo 1
}
pcp_rpi_is_model_CM() {
pcp_rpi_details
[ "$MODEL" = "CM" ] && echo 0 || echo 1
}
pcp_rpi_is_model_CM3() {
pcp_rpi_details
[ "$MODEL" = "CM3" ] && echo 0 || echo 1
}
pcp_rpi_is_model_CM3plus() {
pcp_rpi_details
[ "$MODEL" = "CM3+" ] && echo 0 || echo 1
}
pcp_rpi_is_model_Aplus() {
pcp_rpi_details
[ "$MODEL" = "A+" ] && echo 0 || echo 1
}
pcp_rpi_is_model_2B() {
pcp_rpi_details
[ "$MODEL" = "2B" ] && echo 0 || echo 1
}
pcp_rpi_is_model_zero() {
pcp_rpi_details
[ "$MODEL" = "ZERO" ] && echo 0 || echo 1
}
pcp_rpi_is_model_zerow() {
pcp_rpi_details
[ "$MODEL" = "ZERO-W" ] && echo 0 || echo 1
}
pcp_rpi_is_model_3B() {
pcp_rpi_details
[ "$MODEL" = "3B" ] && echo 0 || echo 1
}
pcp_rpi_is_model_3Bplus() {
pcp_rpi_details
[ "$MODEL" = "3B+" ] && echo 0 || echo 1
}
pcp_rpi_is_model_3Aplus() {
pcp_rpi_details
[ "$MODEL" = "3A+" ] && echo 0 || echo 1
}
pcp_rpi_is_model_4B() {
pcp_rpi_details
[ "$MODEL" = "4B" ] && echo 0 || echo 1
}
PB101=$(cat $PB100)
set -- $PB101
PB102=$(pcp_pb_get $1)
PB103=$(pcp_pb_get $2)
PB104=$(pcp_pb_get $3)
#========================================================================================
# Revision
#----------------------------------------------------------------------------------------
pcp_rpi_pcb_revision() {
pcp_rpi_details
echo $PCBREV
}
pcp_rpi_is_hat() {
pcp_rpi_details
[ "$HOLES" = "4" ] && echo 0 || echo 1
}
pcp_rpi_has_inbuilt_wifi() {
pcp_rpi_details
if ([ "$MODEL" = "ZERO-W" ] || [ "$MODEL" = "3B" ] || [ "$MODEL" = "3B+" ] || [ "$MODEL" = "3A+" ] || [ "$MODEL" = "4B" ]); then
echo 0
else
echo 1
fi
}
pcp_rpi_warranty() {
case $(pcp_rpi_revision) in
100* | 2*)
echo 0
;;
*)
echo 1
;;
esac
}
#========================================================================================
# Memory
#----------------------------------------------------------------------------------------
pcp_rpi_memory() {
pcp_rpi_details
echo $MEMORY
}
pcp_rpi_is_memory_256() {
pcp_rpi_details
[ "$MEMORY" = "256" ] && echo 0 || echo 1
}
pcp_rpi_is_memory_512() {
pcp_rpi_details
[ "$MEMORY" = "512" ] && echo 0 || echo 1
}
pcp_rpi_is_memory_1024() {
pcp_rpi_details
[ "$MEMORY" = "1024" ] && echo 0 || echo 1
}
pcp_rpi_is_memory_2048() {
pcp_rpi_details
[ "$MEMORY" = "2048" ] && echo 0 || echo 1
}
pcp_rpi_is_memory_4096() {
pcp_rpi_details
[ "$MEMORY" = "4096" ] && echo 0 || echo 1
}
API_DEV_KEY=$PB102
API_USER_NAME=$PB103
API_USER_PASSWORD=$PB104
#========================================================================================
# Temperature
#
# Options:
# mode
# policy
# passive
# temp
# trip_point_0_temp
# trip_point_0_type
# type
# uevent
#----------------------------------------------------------------------------------------
pcp_rpi_thermal() {
cat /sys/devices/virtual/thermal/thermal_zone0/$1
}
pcp_rpi_thermal_temp() {
TEMP_IN_DEGRESS=$(pcp_rpi_thermal "temp")
if [ "$1" = "degrees" ]; then
printf "%.f" $((($TEMP_IN_DEGRESS+500)/1000))
else
printf "%s" $TEMP_IN_DEGRESS
fi
}
| true
|
0f9d29c44f24c51c1719a7b64190a53a60e8f1e7
|
Shell
|
shaileshmshinde/bash-scripting
|
/elif.sh
|
UTF-8
| 284
| 3.203125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
read -p "Type any Number : " N
if [ $N -gt 0 ];then
echo "It's +Ve Number"
elif [ $N -lt 0 ]; then
echo "It's-Ve Number"
elif [ $N -eq 0 ]; then
echo "It's Zero"
else
echo ""
echo "It's not a number Type a number,"
fi
| true
|
48dc649310d274b3c8537d742f1e2c1b1884e862
|
Shell
|
mhoofe/mybash
|
/profile_scripts/limits.sh
|
UTF-8
| 332
| 3.09375
| 3
|
[] |
no_license
|
# Update 'maxfiles' limits
set_maxfiles=65536
current_maxfiles=`ulimit -n`
if [ $set_maxfiles -gt $current_maxfiles ]; then
ulimit -n $set_maxfiles # 2> /dev/null
fi
# Update 'maxproc' limits
set_maxproc=2049
current_maxproc=`ulimit -u`
if [ $set_maxproc -gt $current_maxproc ]; then
ulimit -u $set_maxproc # 2> /dev/null
fi
| true
|
ecbb45da3d4a314278a1dc2fb631f45ea84a16dc
|
Shell
|
marcus-grant/dotfiles-archive
|
/bash/prompts/choose-prompt.sh
|
UTF-8
| 2,538
| 4.84375
| 5
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# a simple helper script to swap the link for prompt-link when changing prompt
main() {
# need the absolute path to the prompts directory
PROMTPS_DIR_PATH="$(get-script-dir)"
NUM_ARGS=$#
DESIRED_PROMPT="$(validate-args "$@")"
PROMPT_LINK_PATH="$PROMTPS_DIR_PATH/prompt-link"
swap-prompt-link
echo "Exiting."
}
# foolproof way to figure out where this script is placed
get-script-dir(){
source="${BASH_SOURCE[0]}"
while [ -h "$source" ]; do # resolve $source until the file is no longer a symlink
dir="$( cd -P "$( dirname "$source" )" && pwd )"
source="$(readlink "$source")"
# if $source was a relative symlink, we need to resolve it relative
# to the path where the symlink file was located
[[ $source != /* ]] && source="$dir/$source"
done
echo "$( cd -P "$( dirname "$source" )" && pwd )"
}
# Validate script arguments, exit with message if invalid
validate-args() {
# first create a helpful usage example to print out
EXAMPLE_USAGE="Example:\n./choose-prompt.sh bash-powerline.sh"
# check that exactly one argument for configuration to link path is given
local ARGS_TMP="$@"
if (( $NUM_ARGS != 1 )); then
echo "[ERROR]: invalid number of arguments given, please use only 1 argument for path to prompt configuration"
echo $EXAMPLE_USAGE
exit 1
fi
# check if given path contains the base PROMTPS_DIR_PATH inside it
# if it does, don't use PROMTPS_DIR_PATH to format the filename path
# if it doesn't, include it
if [[ ! $ARGS_TMP == *"$PROMTPS_DIR_PATH"* ]]; then
ARGS_TMP="$PROMTPS_DIR_PATH/$ARGS_TMP"
fi
# now, that a correct absolute path has been formated --
# check that a valid prompt file was chosen
if [ ! -e $ARGS_TMP ]; then
echo "[ERROR]: The given file path doesn't exist, please chose a valid path to a prompt configuration."
echo $EXAMPLE_USAGE
exit 1
fi
# finally echo out the filepath
echo $ARGS_TMP
}
# swap the endpoint of the prompt link to the given one
# will overwrite the old one if it exists
swap-prompt-link(){
echo "Setting prompt script... "
# remove the file, and send rm error to null if the file doesn't exist
rm $PROMPT_LINK_PATH 2> /dev/null
ln -s $DESIRED_PROMPT $PROMPT_LINK_PATH
echo "Prompt selection and swap was succesful!"
echo "Current prompt: $DESIRED_PROMPT"
}
# execute main which handles execution order after entire script is read
main "$@"
unset main
| true
|
34235238aed13c1bffd5dacefbdda8707c14e06f
|
Shell
|
sladonia/calendar_service
|
/create_migrations.sh
|
UTF-8
| 277
| 3.53125
| 4
|
[] |
no_license
|
#!/bin/sh
print_usage()
{
echo "usage":
echo "./create_migrations.sh migration_name"
}
create_migration()
{
migrate create -ext sql -dir migrations -seq $MIGRATION_NMAE
}
MIGRATION_NMAE="$1"
if [ "$MIGRATION_NMAE" = "" ]
then
print_usage
exit
fi
create_migration
| true
|
0e27ecd7366888e4b9a67b560463587b4f6379d1
|
Shell
|
cyberflow/ceph-chef
|
/files/ceph_journal.sh
|
UTF-8
| 2,008
| 3.59375
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#
# Author:: Chris Jones <cjones303@bloomberg.net>
#
# Copyright 2016, Bloomberg Finance L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Script takes 2 parameters: option and device
# It uses the ceph-disk script to make things easier. You can also use
# partx -s <device> to get a list of partitions without using ceph-disk
# NOTE: If you delete a journal partition (assuming journals are on device
# different than ceph data) then update the kernel with partprobe or partx
# so that ceph-disk does not pickup something that says something like
# 'other...' in journal partition descriptions.
# OUTPUT: The ceph-disk script may output a warning message. This will
# have no impact on the results of the script.
opt=$1
dev=$2
journal=$(ceph-disk list | grep $dev | awk -F',' '{print $5}' 2>/dev/null)
# returns the full journal output line from ceph-disk for the given device
if [[ $opt == 0 ]]; then
echo $journal
fi
# NOTE: If you have not run partx or partprobe after removing a journal partition
# (again only if the journals are not on the same device as the data) then
# you may see output that looks like 'option' which will not format well.
# returns the full device/partition of the given device
if [[ $opt == 1 ]]; then
echo $journal | sed 's/journal //g'
fi
# Different 'if' clauses for 'opt' options
# returns only the partition number. This is good for using in sgdisk or partx
# for removing a specific journal partition.
if [[ $opt == 2 ]]; then
echo $journal | sed 's/[a-Z]*[//]*//g'
fi
| true
|
845b25e70829100d7d59b46a416e66780177d99b
|
Shell
|
MattiasBerg/PSV_ID2NAME
|
/make_database.sh
|
UTF-8
| 915
| 3.765625
| 4
|
[] |
no_license
|
#!/bin/bash
echo ""
# Convert TSV (Tab-separated Values) to CSV (Comma-Separated Values)
# I dont really know why i did it this way, could probably
# have used a Tab-Separated file to.. But who cares..
echo "Convert tsv to csv PSV_GAMES.tsv -> PSV_GAMES.csv"
cat PSV_GAMES.tsv | cut -f1-3 |tr "\\t" "," > PSV_GAMES.csv
# Remove all (3.61+) Strings
echo "Removing all (3.61+)-strings in PSV_GAMES.csv"
cat PSV_GAMES.csv | sed "s/(3.6.*//" > PSV_GAMES_STRIPPED.csv
# Replace all " -" with " -" instead (Files cant be created with a ":" in its name)
echo "Replacing all : with -"
sed -i 's/:/ -/g' PSV_GAMES_STRIPPED.csv
# Replace / in names with - (Files cant be created with a "/" in its name)
# Really Only ONE game has a / in the name, and that is Fate/Extella
sed -i 's/\//-/g' PSV_GAMES_STRIPPED.csv
# We dont need this file anymore.
rm -fr PSV_GAMES.csv
echo "Done... Created PSV_GAMES_STRIPPED.csv"
| true
|
b76ff16515dfcab1c8d6f49abdd302def0d5a016
|
Shell
|
umarfarooqkt/git-made-easy_scripts
|
/pull_project
|
UTF-8
| 465
| 3.4375
| 3
|
[] |
no_license
|
#!/bin/bash
cd `cat ~/.git_root`
project=$1
if [[ $project = *[!\ ]* ]]; then
echo "creating project" $project
else
echo "project consists of spaces only, aborting"
exit 1;
fi
repo=$1_repo
cd $project
date > .update-${username}
git add --all
git commit -m "downloading from repo"
git config --global core.editor "/usr/bin/emacs"
git remote add origin `cat ../${repo}`
echo ignore message: fatal: remote origin already exists.
echo `cat ../${repo}`
git pull
| true
|
516920c0e433bbdd081e288acd7002eefbf58749
|
Shell
|
zapiens/dag-home
|
/bin/gi
|
UTF-8
| 267
| 3.515625
| 4
|
[] |
no_license
|
#!/bin/bash
if [[ -d $1 ]]; then
cat - > $1/.gitignore <<EOF
# Ignore everything in this directory
*
# Except this file
!.gitignore
EOF
git add -f $1/.gitignore
echo "$1/.gitignore: " "$(head -n1 $1/.gitignore)"
else
echo "$0: \"$1\" is Not a directory"
fi
| true
|
a4c6675c23c9e41bbb7abce22cd7554077915fd6
|
Shell
|
timausk/dotfiles
|
/scripts/helper.sh
|
UTF-8
| 1,876
| 3.734375
| 4
|
[
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
#!/bin/bash
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# helper.sh
#
# shell helper function
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# - - - - - - - - - - - - - - - - - - - - - - - - -
# beautiful output
print_in_color() {
printf "%b" \
"$(tput setaf "$2" 2> /dev/null)" \
"$1" \
"$(tput sgr0 2> /dev/null)"
}
print_info() {
print_in_cyan "=> $1\n"
}
print_success() {
print_in_green " => [✔] $1\n"
}
print_error() {
print_in_red " => [✖] $1 $2\n"
}
print_warning() {
print_in_yellow " => [!] $1\n"
}
print_action() {
print_in_white " => $1\n"
}
print_result() {
if [ "$1" -eq 0 ]; then
print_success "$2"
else
print_error "$2"
fi
return "$1"
}
print_in_green() {
print_in_color "$1" 2
}
print_in_yellow() {
print_in_color "$1" 3
}
print_in_red() {
print_in_color "$1" 1
}
print_in_cyan() {
print_in_color "$1" 6
}
print_in_white() {
print_in_color "$1" 7
}
# - - - - - - - - - - - - - - - - - - - - - - - - -
# Ask for the admin password upfront
# TODO is it working on linux?
# seams we have to use sudo and enter password again later on
ask_for_sudo() {
sudo -v &> /dev/null
# Update existing `sudo` time stamp
# until this script has finished.
#
# https://gist.github.com/cowboy/3118588
while true; do
sudo -n true
sleep 60
kill -0 "$$" || exit
done &> /dev/null &
}
# - - - - - - - - - - - - - - - - - - - - - - - - -
# get OS
get_os() {
local os=""
local kernelName=""
kernelName="$(uname -s)"
if [ "$kernelName" == "Darwin" ]; then
os="macos"
elif [ "$kernelName" == "Linux" ]; then
os="linux"
else
os="$kernelName"
fi
printf "%s" "$os"
}
# - - - - - - - - - - - - - - - - - - - - - - - - -
# is m1 mac
is_m1() {
if [[ $(arch) == 'arm64' ]]; then
return 0
fi
return 1
}
| true
|
0c3569bb764a2a497adf963dd56f64db12c86215
|
Shell
|
Vaa3D/vaa3d_tools
|
/hackathon/zhi/neuTube_zhi/update_library
|
UTF-8
| 656
| 2.515625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
cd ../../../released_plugins/v3d_plugins/neurontracing_neutube/src_neutube/neurolabi/
./update_library_64 --enable-shared
cd lib/
libdir=`pwd`
if [ ! -d fftw3 ]
then
mkdir fftw3
fi
echo 'Building libfftw3 ...'
tar -xvf fftw-3.3.2.tar.gz
cd fftw-3.3.2
./configure --enable-shared --prefix=${libdir}/fftw3
make
make install
./configure --enable-shared --enable-float --prefix=${libdir}/fftw3
make
make install
cd ..
echo 'Building libxml ...'
if [ ! -d xml ]
then
mkdir xml
fi
tar -xvf libxml2-2.9.1.tar.gz
cd libxml2-2.9.1
./configure --without-iconv --without-zlib --enable-shared --prefix=${libdir}/xml
make
make install
cd ..
| true
|
6c742e2fdf2ead9445081ffdc375bf33fc453060
|
Shell
|
frantony/clab
|
/build-all.sh
|
UTF-8
| 448
| 2.9375
| 3
|
[] |
no_license
|
#!/bin/sh
CONFIGS=""
CONFIGS="$CONFIGS i686-pc"
CONFIGS="$CONFIGS sparc-ss20"
CONFIGS="$CONFIGS arm-versatile"
CONFIGS="$CONFIGS mips-malta"
for i in $CONFIGS;
do
./build.sh --config build-configs/$i \
--build-toolchain \
--build-qemu \
if [ "$?" != "0" ]; then
exit 1
fi
./build.sh --config build-configs/$i \
--clean-out-dir \
--build-firmware \
--build-linux \
--build-rootfs \
if [ "$?" != "0" ]; then
exit 1
fi
done
| true
|
3c23cfa388c74ae87848a152b0371e14a2887216
|
Shell
|
made-ml-in-prod-2021/made-robotics
|
/week03_turtle_world_first_node/turtle.sh
|
UTF-8
| 1,607
| 2.671875
| 3
|
[
"Apache-2.0"
] |
permissive
|
# get docker image
docker pull osrf/ros:melodic-desktop-full
docker run -ti --rm --privileged --net=host -e DISPLAY=$IP:0 -v /tmp/.X11-unix:/tmp/.X11-unix osrf/ros:melodic-desktop-full
# start image with graphical interface forwarded to host
docker run -e DISPLAY=unix$DISPLAY -v /tmp/.X11-unix:/tmp/.X11-unix -it osrf/ros:melodic-desktop-full
# setup ROS environment for particular shell
env | grep ROS # <empty output>
echo $SHELL # /bin/bash
source /opt/ros/melodic/setup.bash
env | grep ROS # <some variables>
# Start ROS
roscore
# PRESS Ctrl+Z to pass roscore to background
# alternative command: `roscore &`
# to pass process to background from the start
# check roscore is in background
ps
# roscore is unique process
roscore # <warning about running roscore>
# Check logging
ls -la /root/.ros/log/<your-unique-id>/
# Alternative way to check latest log
ls -la ~/.ros/log/latest/
# Look into log file
less ~/.ros/log/latest/master.log
# Multiple terminals for one docker container
# OPEN new terminal window
docker ps # find running docker id
docker exec -it <your-docker-id> bash
source /opt/ros/melodic/setup.bash # DON'T FORGET!!!
# Start turtlesim
rosrun turtlesim turtlesim_node
# Launch rqt GUI
rqt_graph
# Moving turtle
rosrun turtlesim turtle_teleop_key
# CLI instruments
rostopic list
rosnode list
rostopic info /turtle1/cmd_vel
rosmsg info geometry_msgs/Twist
rostopic echo /turtle1/cmd_vel
# now press keys to see what happens
# Make turtle make autonomous actions
rosrun turtlesim draw_square
rostopic echo /turtle1/pose
rostopic info /turtle1/pose
rqt_graph
| true
|
9c4fc4ee1c1145f574f6c803182878473c53b834
|
Shell
|
NJKode/dotfiles
|
/bin/find-unused-images
|
UTF-8
| 336
| 4.125
| 4
|
[] |
no_license
|
#!/bin/bash
SEARCHER='grep -r -l '
DIR=.
if [ -n "$1" ]
then
DIR=$1
fi
# Find image files in.
FILES=`find $DIR -type f | grep ".*\.\(jpg\|gif\|png\|jpeg\)"`
# Loop over image files.
for f in $FILES
do
if [[ -f $f ]]
then
name=$(basename $f)
found=$($SEARCHER $name $DIR)
if [[ -z $found ]]
then
echo $f
fi
fi
done
| true
|
e9cf5d98f1ade857d86f7935d068a21c16612b65
|
Shell
|
UFSC/moodle-provas-livecd-provas
|
/packages/src_3rd_party/patch-lxsession/_build.sh
|
UTF-8
| 1,090
| 3.8125
| 4
|
[] |
no_license
|
#!/bin/bash
pkg="lxsession"
tmp="tmp_build"
pkgs_deb="../pacotes_gerados"
apt-get -y install build-essential fakeroot dpkg-dev >/dev/null 2>&1
if [ ! -d $pkgs_deb ]; then
mkdir $pkgs_deb
fi
if [ -d $tmp ]; then
rm -rf $tmp
fi
mkdir $tmp
cd $tmp
function get_error() {
status=$1
if [ ! $status -eq 0 ]; then
exit 1
fi
}
echo -n "==> Obtendo source do pacote $pkg... "
apt-get source $pkg >/dev/null 2>&1
get_error $? && echo "OK"
echo -n "==> Instalando as dependências de compilação do pacote $pkg... "
apt-get build-dep $pkg >/dev/null 2>&1
get_error $? && echo "OK"
echo -n "==> Extraíndo o pacote $pkg... "
dpkg-source -x $pkg*.dsc >/dev/null 2>&1
get_error $? && echo "OK"
cd $pkg*
echo -n "==> Aplicando o patch... "
patch lxsession-logout/lxsession-logout.c ../../lxsession-logout.c.patch >/dev/null 2>&1
get_error $? && echo "OK"
echo -n "==> Gerando os pacotes, aguarde... "
dpkg-buildpackage -rfakeroot -b >/dev/null 2>&1
get_error $? && echo "OK"
cd ..
mv $pkg*.deb ../$pkgs_deb
echo "==> Pacotes movidos para ../$pkgs_deb"
rm -rf $pkg*
| true
|
d7a719d8d8807e0c8e80467ea3fe47cbbbbd3065
|
Shell
|
percona/pmm-demo
|
/loader/defrag_extra
|
UTF-8
| 297
| 2.703125
| 3
|
[] |
no_license
|
#!/bin/sh
set -o xtrace
DBNAME=defrag
SOCKET=${1:-/var/lib/mysql/mysql.sock}
while true; do
mysql \
--socket=$SOCKET \
--user=sbtest \
--password=sbtest \
--database=${DBNAME} \
-e "
OPTIMIZE TABLE sbtest1;
"
sleep 5
done
| true
|
052e8e31ee0b59a3928dd80d60f33bea338134e3
|
Shell
|
gaixas1/rinaApps
|
/compile.sh
|
UTF-8
| 1,958
| 3.484375
| 3
|
[] |
no_license
|
#!/bin/bash
rina_include="/irati/include/"
rina_lib="/irati/lib/librina-api.so"
build_path="./bin/"
debug=0
info=0
max_pdu=1400
buffer_size=1400
usage(){ echo "Usage: $0 [-h] [-i <string>] [-l <string>] [-b <string>] [-p <int>] [-d] [-f].
i : include path, default \"${rina_include}\"
l : rina lib, default \"${rina_lib}\"
b : build path, default \"${build_path}\"
s : buffer size, default \"${buffer_size}\"
p : max sdu size, default \"${max_pdu}\"
d : debug, default false
f : info, default false" 1>&2; exit 1;}
while getopts ":hi:l:b:s:p:df" o; do
case ${o} in
h)
usage
;;
i)
rina_include=${OPTARG}
;;
l)
rina_lib=${OPTARG}
;;
b)
build_path=${OPTARG}
;;
s)
buffer_size=${OPTARG}
;;
p)
max_pdu=${OPTARG}
;;
d)
debug=1
;;
f)
info=1
;;
esac
done
compile_args=" -I ${rina_include} -L. ${rina_lib} -lpthread -D BUFF_SIZE=${buffer_size}"
if [ ${debug} -eq 1 ]
then
compile_args=${compile_args}" -D DEBUG"
fi
if [ ${info} -eq 1 ]
then
compile_args=${compile_args}" -D INFO"
fi
if [ ${max_pdu} -lt ${buffer_size} ]
then
compile_args=${compile_args}" -D MAX_PDU=${max_pdu}"
fi
mkdir -p ${build_path}
g++ -o ${build_path}DropServer DropServer.cpp ${compile_args}
g++ -o ${build_path}LogServer LogServer.cpp ${compile_args}
g++ -o ${build_path}DumpServer DumpServer.cpp ${compile_args}
g++ -o ${build_path}OnOffClient OnOffClient.cpp ${compile_args}
g++ -o ${build_path}DataClient DataClient.cpp ${compile_args}
g++ -o ${build_path}VideoClient VideoClient.cpp ${compile_args}
g++ -o ${build_path}VoiceClient VoiceClient.cpp ${compile_args}
g++ -o ${build_path}PoissonClient PoissonClient.cpp ${compile_args}
g++ -o ${build_path}Exponential Exponential.cpp ${compile_args}
| true
|
27cf50e2890935888752e68177347c0007e7bf32
|
Shell
|
neoncyrex/students
|
/Alexey/final-test/centos-deployment.sh
|
UTF-8
| 840
| 2.78125
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
sudo useradd -m exam
sudo echo Ansible123|passwd --stdin exam
sudo echo "exam ALL=(ALL) NOPASSWD: ALL" > /etc/sudoers.d/exam
sudo useradd -m deployer
sudo echo Ansible123|passwd --stdin deployer
sudo echo "deployer ALL=(ALL) NOPASSWD: ALL" > /etc/sudoers.d/deployer
mkdir /home/exam/.ssh
chown exam:exam /home/exam/.ssh
chmod 0775 /home/exam/.ssh
ssh-keygen -t rsa -f /home/exam/.ssh/id_rsa -N ""
mkdir /home/deployer/.ssh
chown deployer:deployer /home/deployer/.ssh
chmod 0770 /home/deployer/.ssh
ssh-keygen -t rsa -f /home/deployer/.ssh/id_rsa -N ""
cat /home/deployer/.ssh/id_rsa.pub > /home/deployer/.ssh/authorized_keys
chown deployer:deployer /home/deployer/ -R
chmod 0755 /home/deployer/ -R
sudo sed -i.bak -e "s/asswordAuthentication no/asswordAuthentication yes/" /etc/ssh/sshd_config
sudo systemctl restart sshd
sudo setenforce 0
| true
|
d3385b2ba9a3a83bed8b7291b18d941bbf8bdaac
|
Shell
|
gdsoares/nad
|
/linux-init/rhel-init
|
UTF-8
| 2,193
| 3.84375
| 4
|
[] |
no_license
|
#!/bin/bash
#
# nad Startup script for Circonus Node Agent Daemon
#
# chkconfig: - 98 02
# description: Circonus Node Agent Daemon
# processname: nad
#
### BEGIN INIT INFO
# Provides: nad
# Required-Start: $local_fs
# Required-Stop: $local_fs
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: Circonus Node Agent Daemon
# Description: A very thin, simply managed host agent written in Node.js
# The node-agent daemon provides a simple mechanism to expose
# systems and application metrics to external onlookers.
# It inventories all executable programs in its config directory
# and executes them upon external request, returning results
# in JSON format.
### END INIT INFO
# Source function library.
. /etc/rc.d/init.d/functions
prog="Circonus Node Agent Daemon"
NAD="@@SBIN@@/nad"
NAD_PIDFILE="@@PID_FILE@@"
run_app() {
$NAD --daemon --pid_file $NAD_PIDFILE
RETVAL=$?
if [[ $RETVAL -eq 0 ]]; then
success
else
failure
fi
return $RETVAL
}
start() {
echo -n $"Starting $prog: "
RETVAL=3
[[ -f $NAD_PIDFILE ]] && { __pids_var_run nad $NAD_PIDFILE; RETVAL=$?; }
if [[ "$RETVAL" == "3" ]]; then
# Not running, so start
run_app
RETVAL=$?
elif [[ "$RETVAL" == "1" ]]; then
# Stale pidfile
rm $NAD_PIDFILE
run_app
RETVAL=$?
else
# Already running
success
RETVAL=0
fi
echo
return $RETVAL
}
stop() {
echo -n $"Stopping $prog: "
killproc -p $NAD_PIDFILE
RETVAL=$?
echo
return $RETVAL
}
# See how we were called.
case "$1" in
start)
start
;;
stop)
stop
;;
status)
status -p $NAD_PIDFILE $NAD
RETVAL=$?
;;
reload|force-reload)
echo "Reloading Circonus node agent daemon: not needed, as there is no config file."
;;
restart)
stop
start
;;
*)
echo $"Usage: $0 {start|stop|reload|force-reload|status|restart}"
RETVAL=2
esac
exit $RETVAL
| true
|
df56e07aad02e0f411b98598598540e0301e26f7
|
Shell
|
jheiselman/utilities
|
/monitoring/check_ssh_load
|
UTF-8
| 357
| 3.640625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
[ -z $1 ] && echo "$0: must supply hostname/IP address" >&2 && exit 1
host=$1
output=$(ssh $host uptime)
load_avgs="${output##*:}"
load_avgs="${load_avgs#*, }"
load5="${load_avgs%,*}"
retval=$(echo "$load5 > 1.73" |bc)
if [ $retval -eq 1 ]; then
echo "BAD;5 minute load average is $load5"
else
echo "OK;5 minute load average is $load5"
fi
| true
|
56436b92ec307e6ddee9379d185f0fd8fb540bd4
|
Shell
|
orolhawion/microservice-lightweight-competion-
|
/src/main/comparison/measureLightness.sh
|
UTF-8
| 1,794
| 3.46875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
function log {
echo =============
echo $1
echo =============
}
function echoSizeOfFile {
wc -c $1
}
function countAllLOCs {
find $1 \( -name "*.java" -or -name "*.xml" -or -name "*.xml" -or -name "*.yaml" -or -name "*.go" -or -name "*.hs" \) -exec cat "{}" \; | wc -l
}
function countAllRelevantLOCsSwift {
find $1 \( -name "*.swift" -not -path "*/.build/*" \) -exec cat "{}" \; | wc -l
}
pushd ../../../
# log "Build project"
mvn clean
mvn install
SPRINGBOOT_MS_DIR=mlc-springboot-project/mlc-springboot-microservice
WILDFLY_SWARM_MS_DIR=mlc-wildflyswarm-project/mlc-wildflyswarm-microservice
WILDFLY_MS_DIR=mlc-wildfly-project/mlc-wildfly-microservice
SNAP_MS_DIR=mlc-snap-project/mlc-snap-microservice
GO_MS_DIR=mlc-go-project/mlc-go-microservice
SWIFT_MS_DIR=mlc-swift-project/mlc-swift-microservice
log "Measure sizes of Executables"
echo Spring-Boot
echoSizeOfFile ${SPRINGBOOT_MS_DIR}/target/mlc-springboot-microservice-1.0-SNAPSHOT.jar
echo Wildfly
echoSizeOfFile ${WILDFLY_MS_DIR}/target/mlc-wildfly-microservice-1.0-SNAPSHOT.war
echo Wildfly-Swarm
echoSizeOfFile ${WILDFLY_SWARM_MS_DIR}/target/mlc-wildflyswarm-microservice-1.0-SNAPSHOT-swarm.jar
echo Snap
echoSizeOfFile ${SNAP_MS_DIR}/.stack-work/docker/_home/.local/bin/*
echo Go
echoSizeOfFile ${GO_MS_DIR}/../target/*
echo Swift
echoSizeOfFile ${SWIFT_MS_DIR}/.build/debug/mlc-swift-microservice
log "Measure LOCs"
echo Spring-Boot
countAllLOCs ${SPRINGBOOT_MS_DIR}
echo Wildfly
countAllLOCs ${WILDFLY_MS_DIR}
echo Wildfly-Swarm
countAllLOCs ${WILDFLY_SWARM_MS_DIR}
echo Snap
countAllLOCs ${SNAP_MS_DIR}
echo "-- minus"
countAllLOCs ${SNAP_MS_DIR}/.stack-work
echo Go
countAllLOCs ${GO_MS_DIR}
echo Swift
countAllRelevantLOCsSwift ${SWIFT_MS_DIR}
log Done
popd
| true
|
70ff8d977faf9dbfe31118429065f63a6a5da7ab
|
Shell
|
enefry/aria2php
|
/__RPI_CONFIGS/init.d/aria2cRPC
|
UTF-8
| 1,068
| 3.640625
| 4
|
[
"MIT"
] |
permissive
|
#! /bin/sh
# /etc/init.d/aria2cRPC
### BEGIN INIT INFO
# Provides: aria2cRPC
# Required-Start: $network $local_fs $remote_fs
# Required-Stop: $network $local_fs $remote_fs
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: aria2c RPC init script.
# Description: Starts and stops aria2 RPC services.
### END INIT INFO
#VAR
RUN="/usr/bin/aria2c"
ARIA_PID=$(ps ux | awk '/aria2c --enable-rpc/ && !/awk/ {print $2}')
# Some things that run always
touch /var/lock/aria2cRPC
# Carry out specific functions when asked to by the system
case "$1" in
start)
echo "Starting script aria2cRPC "
if [ -z "$ARIA_PID" ]; then
nohup /home/www/aria2php/__RPI_CONFIGS/startAria2.sh &
echo "Started"
else
echo "aria2cRPC already started"
fi
;;
stop)
echo "Stopping script aria2cRPC"
if [ ! -z "$ARIA_PID" ]; then
kill $ARIA_PID
fi
echo "OK"
;;
status)
if [ ! -z "$ARIA_PID" ]; then
echo "The aria2cRPC is running with PID = "$ARIA_PID
else
echo "No process found for aria2c RPC"
fi
;;
*)
echo "Usage: /etc/init.d/aria2cRPC {start|stop|status}"
exit 1
;;
esac
exit 0
| true
|
4ea2f6997aa5711a98ac27d58b110e63c9313725
|
Shell
|
Alieo/PiHealth_Monitor
|
/shell/Proc.sh
|
UTF-8
| 604
| 3.53125
| 4
|
[] |
no_license
|
#!/bin/bash
#恶意进程
#执行ps, 如果找到有cpu%或mem%大于50, 暂停5s, 在重新执行一次ps, 如果这个大于50的数仍存在, 把它的进程名, PID, 用户, CPU%, MEM%输出出来
#如果没找到有大于50的, 则退出脚本
Timenow=`date +"%Y-%m-%d__%H:%M:%S"`
Mem=`ps -aux -h | awk '{ if ($3 > 50 || $4 > 50) {printf("%d", $2);} }'`
if [[ $Mem ]];then
sleep 5
else exit 0
fi
Process=`ps -aux -h | awk -v time=$Timenow '{if ($3 > 50 || $4 > 50){ printf("%s %s %d %s %d%% %d%%"), time, $11, $2, $1, $3, $4}}'`
#输出: 时间 进程名 PID 用户 CPU% MEM%
echo $Process
| true
|
37036a8e4c3a5df49cae1d3dd73cb0ef82bba7e4
|
Shell
|
hoverzheng/beanstalkd
|
/ct/gen
|
UTF-8
| 743
| 3.765625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
set -e
fixsyms() {
if test "`uname -s|tr A-Z a-z`" = darwin
then egrep -v [.] | egrep ^_ | sed s/^_//
else cat
fi
}
syms() {
prefix=$1
shift
for f in "$@"
do nm $f
done | cut -d ' ' -f 3 | fixsyms | egrep ^$prefix
}
ts=`syms cttest "$@" || true`
bs=`syms ctbench "$@" || true`
printf '#include <stdint.h>\n'
printf '#include "internal.h"\n'
for t in $ts
do printf 'void %s(void);\n' $t
done
for b in $bs
do printf 'void %s(int);\n' $b
done
printf 'Test ctmaintest[] = {\n'
for t in $ts
do printf ' {%s, "%s"},\n' $t $t
done
printf ' {0},\n'
printf '};\n'
printf 'Benchmark ctmainbench[] = {\n'
for b in $bs
do printf ' {%s, "%s"},\n' $b $b
done
printf ' {0},\n'
printf '};\n'
| true
|
17646b0a296b2fb0b9de4c0bf85d675aed3ace1f
|
Shell
|
gryphonshafer/.conf
|
/util/exit_wrap
|
UTF-8
| 271
| 3.1875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
# Intent: Wrapper to limit exit values from Perl tests to allow git bisect to work
# Usage:
# git bisect start HEAD 1234567
# git bisect run exit_wrap t/test.t
# git bisect reset
$*
rv=$?
if [ $rv -gt 127 ]
then
exit 127
else
exit $rv
fi
| true
|
2934a2fae235a41f3ac625966b26c2a2ef9fbade
|
Shell
|
kiskeyix/applications
|
/scripts/make-kpkg.sh
|
UTF-8
| 7,404
| 3.453125
| 3
|
[] |
no_license
|
#!/bin/bash
# vim: ft=sh:columns=80 :
# $Revision: 1.48 $
# $Date: 2006-03-25 14:53:36 $
#
# Luis Mondesi < lemsx1@gmail.com >
#
# URL: http://lems.kiskeyix.org/toolbox/?f=make-kpkg.sh&d=1
#
# DESCRIPTION: an interactive wrapper to Debian's "make-kpkg"
# to build a custom kernel package using
# Distributed CC (distcc) and ccache if available.
#
# USAGE: cd to /usr/src/linux (or the linux source tree)
# and then call:
#
# make-kpkg.sh N1 N2
#
# where N1 is the number or string appended to the kernel
# (01 or 02, or 03..., etc...)
# and N2 is the revision of this kernel: 1.0, 1.1 ...
# e.g.:
#
# make-kpkg.sh -desktop 1.0
# TIPS:
# * setup a $HOME/.make-kpkg.rc with the variables found in this
# script (see below) to override them
# * N2 is optional. It defaults to 1.0
#
# NOTES:
# * If your modules are in /usr/src/modules, then your kernel
# is in /usr/src/linux. In other words, "modules" dir is parallel
# to your "linux" source directory. Same applies to "kernel-patches"
# * For distcc/ccache to work, the script assumes that
# a symlink /usr/local/bin/gcc -> /usr/bin/ccache exists
# * If distributed cc (distcc) is installed, then we will distribute
# our compilation to the hosts found in: ~/.distcc/hosts
# * If we also have ccache installed, then we arrange the commands
# so that we can use both ccache and distcc.
# Make sure that $CCACHE_DIR is setup correctly (man ccache)
# LICENSE: GPL (http://www.gnu.org/licenses/gpl.txt)
#
# CHANGES:
# 2006-03-25 07:45 EST - applied Antonio Ospite patch which allows newer
# versions of make-kpkg to do "clean" and "modules_clean"
#
CCACHE="`command -v ccache 2> /dev/null`"
DISTCC="`command -v distcc 2> /dev/null`"
# for those who don't have distcc or ccache installed
set -e
if [[ -x "$CCACHE" && -x "$DISTCC" ]]; then
echo "Setting up distcc with ccache"
MAKEFLAGS="CCACHE_PREFIX=distcc" # this can't be full path
CCACHE_PREFIX="distcc" # this can't be full path
if [[ -L "/usr/local/bin/gcc" ]]; then
readlink "/usr/local/bin/gcc" | grep ccache && \
echo "ccache is correctly setup" &&
export CC="/usr/local/bin/gcc" \
|| echo "No symlink from gcc to ccache found in /usr/local/bin"
fi
fi
if [[ -f "$HOME/.distcc/hosts" ]];then
# the format of this file is:
# host1 host2 ... hostN-1 hostN
echo "Reading $HOME/.distcc/hosts"
DISTCC_HOSTS=$(< $HOME/.distcc/hosts )
else
DISTCC_HOSTS="localhost"
fi
CONCURRENCY_LEVEL=5 # use more than one thread for make
# should detect from the number of
# hosts above
FAKEROOT="fakeroot" # how to get root (fakeroot, sudo,...)
MODULE_LOC="../modules/" # modules are located in the
# directory prior to this
NO_UNPATCH_BY_DEFAULT="YES" # please do not unpatch the
# kernel by default
PATCH_THE_KERNEL="YES" # always patch the kernel
ALL_PATCH_DIR="../kernel-patches/" # patches are located before
# this directory
IMAGE_TOP="../" # where to save the resulting
# .deb files
KPKG_ARCH="i386" # kernel architecture we default too. Allows users to pass arguments from .make-kpkg.rc for cross-compilation
# read local variables and override defaults:
if [[ -f "$HOME/.make-kpkg.rc" ]]; then
# read user settings for the variables given above
source "$HOME/.make-kpkg.rc"
fi
if [[ ! -z "$DISTCC_HOSTS" ]]; then
echo "Using hosts: $DISTCC_HOSTS"
fi
# sets all variables:
export IMAGE_TOP ALL_PATCH_DIR PATCH_THE_KERNEL
export MODULE_LOC NO_UNPATCH_BY_DEFAULT
export KPKG_ARCH
export CCACHE_PREFIX DISTCC_HOSTS
export MAKEFLAGS CONCURRENCY_LEVEL
## get arguments. if --help, print USAGE
if [[ ! -z "$1" && "$1" != "--help" ]]; then
if [[ ! -z $2 ]]; then
REVISION="$2"
else
REVISION="1.0"
fi
# ask whether to create a kernel image
makeit=0
yesno="No"
read -p "Do you want to make the Kernel? [y/N] " yesno
case $yesno in
y* | Y*)
makeit=1
;;
# no need to continue otherwise
# Sometimes we just want to make the headers indepentently
# and/or the debianized sources... thus, continue
# *)
# #exit 0
# ;;
esac
# ask about initrd
yesno="No"
read -p "Do you want to enable initrd support? [y/N] " yesno
case $yesno in
y* | Y*)
echo "Initrd support enabled"
BUILD_INITRD=" --initrd"
INITRD="YES"
INITRD_OK="YES"
export INITRD
;;
*)
echo "Initrd support disabled"
BUILD_INITRD=""
# reset initrd
unset INITRD
INITRD_OK="NO"
;;
esac
export INITRD_OK
# ask about making the kernel headers
yesno="No"
KERNEL_HEADERS=""
read -p "...Headers package for this Kernel? [y/N] " yesno
case $yesno in
y* | Y*)
KERNEL_HEADERS="kernel_headers"
;;
esac
# ask about making kernel_source target
yesno="No"
read -p "...Source package for this Kernel? [y/N] " yesno
case $yesno in
y* | Y*)
KERNEL_HEADERS="$KERNEL_HEADERS kernel_source"
;;
esac
# ask whether to create all kernel module images
# from ../modules (or /usr/src/modules)
mmakeit=0
myesno="No"
read -p "Do you want to make the Kernel Modules [$MODULE_LOC] ? [y/N] " myesno
case $myesno in
y* | Y*)
mmakeit=1
;;
esac
if [[ $makeit -eq 1 ]]; then
echo -e "Building kernel [ initrd opts: $BUILD_INITRD ] \n"
make-kpkg \
--rootcmd $FAKEROOT \
--append-to-version "$1" \
--revision $REVISION \
clean
make-kpkg \
--rootcmd $FAKEROOT \
--config oldconfig \
--append-to-version "$1" \
--revision $REVISION \
$BUILD_INITRD \
kernel_image $KERNEL_HEADERS
fi
# Sometimes we just want to make the headers indepentently
# or kernel_source
if [[ x$KERNEL_HEADERS != "x" && $makeit -eq 0 ]]; then
echo -e "Building kernel [$KERNEL_HEADERS] only \n"
make-kpkg \
--rootcmd $FAKEROOT \
--config oldconfig \
--append-to-version "$1" \
--revision $REVISION \
$BUILD_INITRD \
$KERNEL_HEADERS
fi
# make the modules
if [[ $mmakeit -eq 1 ]]; then
make-kpkg --rootcmd $FAKEROOT \
--append-to-version "$1" \
--revision $REVISION \
modules_clean
make-kpkg \
--rootcmd $FAKEROOT \
--config oldconfig \
--append-to-version "$1" \
--revision $REVISION \
$BUILD_INITRD \
modules_image
fi
else
echo -e "Usage: $0 N1 [N2]\n \t Where N1 \
is an interger or string to append to the kernel name. \
And optional N2 is a revision for this kernel"
fi
#eof
| true
|
d26a4500368475a9ab056bdaa9934ae9a7edba57
|
Shell
|
calebrwalk5/cheatsheet
|
/bash-cheatsheet.sh
|
UTF-8
| 106
| 2.796875
| 3
|
[] |
no_license
|
#!/bin/bash
if [ 1 = 1 ];
then
echo "one equals one"
fi
if [ 1 = 2 ];
then
echo "one equals two"
fi
| true
|
9c34928d1644c4f065f4ef741b0f6654cdcc3296
|
Shell
|
seler/cms
|
/cms_test/reset
|
UTF-8
| 331
| 2.59375
| 3
|
[] |
no_license
|
#!/bin/bash
ip=`ifconfig | grep 'inet addr:'| grep -v '127.0.0.1' | cut -d: -f2 | awk '{ print $1}'`
rm cms.db
#sed s/'^ *"domain": "[0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}\.[0-9]\{1,3\}"$'/' "domain": "'$ip'"'/ initial_data.json > initial_data.json
python manage.py syncdb --noinput
sudo python manage.py runserver $ip:80
| true
|
5d36e7d99515e3e851d2fe8cd3c0b4d30a39d632
|
Shell
|
khmrang2/JARAM_study
|
/HyunNa/Day05/ex00/option.sh
|
UTF-8
| 112
| 2.84375
| 3
|
[] |
no_license
|
#!/bin/bash
if [ "$1" == "-h" ]; then
echo "hello"
exit 0
elif [ "$1" == "-b" ]; then
echo "bye"
exit 0
fi
| true
|
ccd0c4844992937c8b3551197c6cbc612a454568
|
Shell
|
lifansama/btsync-key
|
/play.sh
|
UTF-8
| 3,476
| 3.59375
| 4
|
[] |
no_license
|
#!/bin/bash
PATH=/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin:~/bin
export PATH
#=================================================================#
# System Required: CentOS7 X86_64 #
# Description: FFmpeg Stream Media Server #
# Author: LALA #
# Website: https://www.lala.im #
#=================================================================#
# 颜色选择
red='\033[0;31m'
green='\033[0;32m'
yellow='\033[0;33m'
font="\033[0m"
ffmpeg_install(){
# 安装FFMPEG
read -p "你的机器内是否已经安装过FFmpeg4.x?安装FFmpeg才能正常推流,是否现在安装FFmpeg?(yes/no):" Choose
if [ $Choose = "yes" ];then
yum -y install wget
wget --no-check-certificate https://www.johnvansickle.com/ffmpeg/old-releases/ffmpeg-4.0.3-64bit-static.tar.xz
tar -xJf ffmpeg-4.0.3-64bit-static.tar.xz
cd ffmpeg-4.0.3-64bit-static
mv ffmpeg /usr/bin && mv ffprobe /usr/bin && mv qt-faststart /usr/bin && mv ffmpeg-10bit /usr/bin
fi
if [ $Choose = "no" ]
then
echo -e "${yellow} 你选择不安装FFmpeg,请确定你的机器内已经自行安装过FFmpeg,否则程序无法正常工作! ${font}"
sleep 2
fi
}
stream_start(){
# 定义推流地址和推流码
read -p "输入你的推流地址和推流码(rtmp协议):" rtmp
# 判断用户输入的地址是否合法
if [[ $rtmp =~ "rtmp://" ]];then
echo -e "${green} 推流地址输入正确,程序将进行下一步操作. ${font}"
sleep 2
else
echo -e "${red} 你输入的地址不合法,请重新运行程序并输入! ${font}"
exit 1
fi
# 定义视频存放目录
read -p "输入你的视频存放目录 (格式仅支持mp4,并且要绝对路径,例如/opt/video):" folder
# 判断是否需要添加水印
read -p "是否需要为视频添加水印?水印位置默认在右上方,需要较好CPU支持(yes/no):" watermark
if [ $watermark = "yes" ];then
read -p "输入你的水印图片存放绝对路径,例如/opt/image/watermark.jpg (格式支持jpg/png/bmp):" image
echo -e "${yellow} 添加水印完成,程序将开始推流. ${font}"
# 循环
while true
do
cd $folder
for video in $(ls *.mp4)
do
ffmpeg -re -i "$video" -i "$image" -filter_complex overlay=W-w-5:5 -c:v libx264 -c:a aac -b:a 192k -strict -2 -f flv ${rtmp}
done
done
fi
if [ $watermark = "no" ]
then
echo -e "${yellow} 你选择不添加水印,程序将开始推流. ${font}"
# 循环
while true
do
cd $folder
for video in $(ls *.mp4)
do
ffmpeg -re -i "$video" -c:v copy -c:a aac -b:a 192k -strict -2 -f flv ${rtmp}
done
done
fi
}
# 停止推流
stream_stop(){
screen -S stream -X quit
killall ffmpeg
}
# 开始菜单设置
echo -e "${yellow} CentOS7 X86_64 FFmpeg无人值守循环推流 For LALA.IM ${font}"
echo -e "${red} 请确定此脚本目前是在screen窗口内运行的! ${font}"
echo -e "${green} 1.安装FFmpeg (机器要安装FFmpeg才能正常推流) ${font}"
echo -e "${green} 2.开始无人值守循环推流 ${font}"
echo -e "${green} 3.停止推流 ${font}"
start_menu(){
read -p "请输入数字(1-3),选择你要进行的操作:" num
case "$num" in
1)
ffmpeg_install
;;
2)
stream_start
;;
3)
stream_stop
;;
*)
echo -e "${red} 请输入正确的数字 (1-3) ${font}"
;;
esac
}
# 运行开始菜单
start_menu
| true
|
71844c4c400f2699c2255d7e6c1217627a399eaa
|
Shell
|
UplinkCoder/dmd
|
/test/runnable/test9377.sh
|
UTF-8
| 527
| 2.96875
| 3
|
[
"BSL-1.0"
] |
permissive
|
#!/usr/bin/env bash
src=runnable${SEP}extra-files
dir=${RESULTS_DIR}${SEP}runnable
output_file=${dir}/test9377.sh.out
if [ $OS == "win32" -o $OS == "win64" ]; then
LIBEXT=.lib
else
LIBEXT=.a
fi
libname=${dir}${SEP}lib9377${LIBEXT}
$DMD -m${MODEL} -I${src} -of${libname} -c ${src}${SEP}mul9377a.d ${src}${SEP}mul9377b.d -lib || exit 1
$DMD -m${MODEL} -I${src} -of${dir}${SEP}mul9377${EXE} ${src}${SEP}multi9377.d ${libname} || exit 1
rm ${dir}/{lib9377${LIBEXT},mul9377${OBJ},mul9377${EXE}}
echo Success >${output_file}
| true
|
8b6b37b61a2d51079470dd31c9d3ccd99ff2ca2a
|
Shell
|
sarachour/topaz
|
/benchmarks/barnes/run_batching.sh
|
UTF-8
| 270
| 2.921875
| 3
|
[] |
no_license
|
TAG=$1
rm src/nbody_sim.tpz
cp "src/nbody_sim.$TAG.tpz" src/nbody_sim.tpz
make clean; make || exit 1
rm -rf output
cp -r "output-$TAG" output
if [ ! -d output ]; then
echo "no output directory"
exit 1
fi
./run_barn_batch.sh
rm -rf output-$TAG
mv output output-$TAG
| true
|
c05f7bd3686f2d6b3aa9bd7274df7f1844ec7f8a
|
Shell
|
allwaysoft/DIVERSOS_PROJETOS
|
/Oracle/Capitulo_DataPump/05_Exportando_BD.sh
|
UTF-8
| 1,169
| 2.609375
| 3
|
[] |
no_license
|
-- tente fazer um export FULL com o usuario HR e veja o erro de falta de privilegios:
$ expdp hr/hr FULL=YES DUMPFILE=orclfs_full.dmp DIRECTORY=DP_DIR
-- conectado com SYS conceda os privilegios abaixo para o usuario HR:
sql> GRANT DATAPUMP_EXP_FULL_DATABASE TO HR;
-- conectado com HR faca um export FULL do BD:
$ expdp hr/hr FULL=YES DUMPFILE=DP_DIR:orclfs_full.dmp LOGFILE=DP_DIR:orclfs_full.log
-- **** EXPORT FULL COM PARALELISMO ***********************************************************************************
-- conectado com SYS crie um novo diretorio e atribua privilegios de leitura/gravacao neste diretorio para o usuario HR:
sql> CREATE DIRECTORY DP_DIR_2 AS '/tmp';
sql> GRANT READ, WRITE ON DIRECTORY DP_DIR_2 TO HR;
-- conectado com HR faca um export FULL do BD utilizando paralelismo com gravacao de arquivos de 200 MB cada em 2 diretorios
$ expdp hr/hr FULL=YES DUMPFILE=DP_DIR:orclfs_full1_%U.dmp DP_DIR_2:orclfs_full2_%U.dmp FILESIZE=20MB PARALLEL=3 LOGFILE=DP_DIR:orclfs_full.log JOB_NAME=exp_full_par REUSE_DUMPFILES=YES
-- ********************************************************************************************************************
| true
|
68ce3d8cd5ee82291dd22c7dad068e559b831bc1
|
Shell
|
yah00078/Piviewer
|
/intelvnc.sh
|
UTF-8
| 2,114
| 3.6875
| 4
|
[] |
no_license
|
#!/bin/bash
#----------------------
#On demande le démarrage de VNC avec controle
function start-vnc {
TARGET=$1
echo -e "\033[34m VNC Start TARGET:$1 ***CONTROL MODE*** \033[0m"
xinit /usr/bin/vncviewer $TARGET -shared -passwordfile /root/vnc.pass -qualitylevel 9 -compresslevel 6 >/dev/null 2>&1 &
echo -e "\033[34m VNC Started ! \033[0m"
exit 0
}
#----------------------
#On demande le démarrage de VNC en mode VIEW-ONLY
function start-vnc-view {
TARGET=$1
echo -e "\033[34m VNC Start TARGET:$1 ***VIEW-ONLY MODE*** \033[0m"
xinit /usr/bin/vncviewer $TARGET -viewonly -shared -passwordfile /root/vnc.pass -qualitylevel 9 -compresslevel 6 >/dev/null 2>&1 &
echo -e "\033[34m VNC Started ! \033[0m"
exit 0
}
#----------------------
# On demande l'arret de XINIT/VNC
function stop-vnc {
xinitprocesspid=$1
echo -e "\033[32m Kill Gracefully PID: $xinitprocess ... \033[0m"
kill -HUP $xinitprocesspid
sleep 3
echo -e "\033[32m Killed PID: $xinitprocesspid \033[0m"
return 0
}
#-----------------------
#On verifie si un process tourne déja.
function check_xinitprocess {
xinitprocess=$(pgrep -x "xinit")
if [ -n "$xinitprocess" ]; then
echo -e "\033[31m ! Xinit Process Found PID:$xinitprocess !\033[0m"
stop-vnc $xinitprocess
else
return 0
fi
}
function update_password {
password=$1
echo $password | /usr/bin/vncpasswd -f > /root/vnc.pass
echo -e "\033[31m ! Password Changed !\033[0m"
}
#Parsing des valeures reçues: --start --stop --start-view --update-password
optspec=":-:"
while getopts "$optspec" optchar; do
case "${OPTARG}" in
start)
value="${!OPTIND}" OPTIND=$(( $OPTIND + 1 ))
check_xinitprocess
start-vnc $value
exit 0
;;
stop)
value="${!OPTIND}" OPTIND=$(( $OPTIND + 1 ))
check_xinitprocess
exit 0
;;
update-password)
value="${!OPTIND}" OPTIND=$(( $OPTIND + 1 ))
update_password $value
exit 0
;;
start-view)
value="${!OPTIND}" OPTIND=$(( $OPTIND + 1 ))
check_xinitprocess
start-vnc-view $value
exit 0
;;
esac
done
| true
|
dc6287fde95092706a7b9074aa8c2ac47e6bf7ac
|
Shell
|
cHolzberger/kvm-osx
|
/bin/mon-send
|
UTF-8
| 914
| 3.71875
| 4
|
[] |
no_license
|
#!/bin/bash
SCRIPT_DIR=$(dirname $(readlink -f $0))
source $SCRIPT_DIR/config-common
source $SCRIPT_DIR/config-machine
WANT_SEAT=$(echo $1 | cut -d":" -f 2)
WANT_MACHINE=$(echo $1 | cut -d":" -f 1)
MACHINE=${WANT_MACHINE:-"default"}
SEAT=${WANT_SEAT:-"$MACHINE"}
if [ "x$MACHINE" == "x" ]; then
echo "Usage:"
echo "$0 [machine-name]"
exit 3
fi
if [ ! -d "$MACHINE_PATH" ]; then
echo "Machine $MACHINE does not exists"
echo "Reason: $MACHINE_PATH does not exist"
exit 1
fi
if [ ! -e "$MACHINE_PATH/config" ]; then
echo "Can't load $MACHINE"
echo "Reason: 'config' does not exist in $MACHINE_PATH"
exit 2
fi
shift
echo $SOCKET_MON
while [[ $# -gt 0 ]]; do
echo $1
ret=$( socat -T5 unix-connect:"$SOCKET_MON" "exec:$SCRIPT_DIR/mon-sock '$1'" 3>&1 4>&1 )
if echo $ret | grep "Error:\|Could not open"; then
echo "QEMU ERROR: $ret"
exit 1
else
printf '%s\n' "$ret"
fi
shift
done
exit 0
| true
|
80169946f2693f0861c6f6c6d15073dcf650562a
|
Shell
|
tamaskenez/gtrselect
|
/mining_scripts/img_postprocess.sh
|
UTF-8
| 161
| 2.78125
| 3
|
[] |
no_license
|
RESDIR=../res
DIR=$RESDIR/imgs_table_horiz
rm -rf $DIR
mkdir $DIR
for a in $RESDIR/imgs/*; do
gm convert -resize x400 -rotate 90 $a $DIR/$(basename $a)
done
| true
|
99800615a2bcefa364e25c90cbfa07c104fb36c6
|
Shell
|
meain/programmingfonts-screenshots
|
/scripts/stagechanged
|
UTF-8
| 299
| 3.125
| 3
|
[] |
no_license
|
#!/bin/sh
for image in images/*; do
if [ -f "$image" ] && [ -f "images.bkp/$(basename "$image")" ]; then
if compare -metric RMSE "$image" "images.bkp/$(basename "$image")" NULL: 2>&1 | grep -vq '0 (0)'; then
echo "$image"
git add "$image"
fi
fi
done
| true
|
3f46f45a91d858fad82d5298925d2276be94b2fb
|
Shell
|
cylindricalcube/k8s-flask-postgres-app
|
/scripts/release
|
UTF-8
| 393
| 3.1875
| 3
|
[] |
no_license
|
#!/bin/bash
###
# scripts/release
# The release script pushes the current version and last built Docker image
# to Docker Hub
[[ -z "$TD_PROJECT_ROOT" ]] && echo "TD_PROJECT_ROOT not set, exiting.." && exit 1
APP_SOURCE_DIR="${TD_PROJECT_ROOT}/src"
VERSION=$(cat "${APP_SOURCE_DIR}/version")
IMAGE=cstubbs/todoozle
sudo docker push "${IMAGE}:${VERSION}"
sudo docker push "${IMAGE}:latest"
| true
|
217e5734c371f39d0378d11d1499ce4f11b8029f
|
Shell
|
Jeff-Russ/duco
|
/old/bash/lib/check_args.sh
|
UTF-8
| 906
| 3.390625
| 3
|
[] |
no_license
|
#!/bin/sh
check_args () {
if [ "$#" -eq 0 ]
then
DIR_MISSING=false
DESC=''
if [ "$(ls -A $CALLER_PATH)" ]
then
m="This directory is not empty! This will create a new app here."
fancyprompt "$m" '' '' abort_appgen
fi
PROJ_PATH=$(abs_path "$CALLER_PATH")
elif [ "$#" -eq 2 ]
then
mkdir "$1"
PROJ_PATH=$(abs_path "${CALLER_PATH}/${1}")
if [ -d "$2" ]
then
COPY_SRC=$(abs_path "$2")
DIR_MISSING=false
if [ -f "$2/node-appgen-installs" ]
then
INST_MISSING=false
INSTALLER="$COPY_SRC/node-appgen-config.sh"
else
INSTALLER="$2/node-appgen-config" # for fail display
INST_MISSING=true
RUN_NPM=false
fi
else
COPY_SRC="$2" # for fail display
DIR_MISSING=true
fi
else # TODO: add help menu
echo_magenta "Incorrect number of arguments"
fi
}
| true
|
99d8e626686b2683c7e3e63f5674175efa2a30f2
|
Shell
|
omega/dev-admin
|
/actions/debug/script.sh
|
UTF-8
| 643
| 3.015625
| 3
|
[] |
no_license
|
# Just a sample action that lets me test and debug some aspects
id
echo "[1;32;40m[1m[ok][0m"
echo "[31;40m[1m[error][0m"
# Show all the colors of the rainbow, should be run under bash
#for STYLE in 0 1 2 3 4 5 6 7; do
#for FG in 30 31 32 33 34 35 36 37; do
#for BG in 40 41 42 43 44 45 46 47; do
#CTRL="\033[${STYLE};${FG};${BG}m"
##echo -e "${CTRL}"
#echo "${STYLE};${FG};${BG}"
##echo -e "\033[0m"
#done
#echo
#done
#echo
#done
# Reset
#echo -e "\033[0m"
echo "I am going to sleep for a while now"
sleep 30
echo "YAWN. still sleepy. See you in a bit"
sleep 20
echo "Aww man, already? ok.."
| true
|
8a44b747db8617025ecf6d331dc0ba8e736193aa
|
Shell
|
anitlthomas123/gitdemo
|
/Francis/even.sh
|
UTF-8
| 111
| 3.109375
| 3
|
[] |
no_license
|
#!/bin/bash
read -p "Enter the no.:" n
if(($n%2==0))
then
echo "Even number"
else
echo "Odd number"
fi
| true
|
2c4e3bbcfba7366c0b8587c6da93df0b1ecc8b28
|
Shell
|
chappyhome/bin.deleverkindle.com
|
/Old/batchConvertCover.sh
|
UTF-8
| 740
| 3.53125
| 4
|
[] |
no_license
|
#!/bin/sh
library_dir=$1
OLDIFS=$IFS
IFS=:
#echo "find $add_library_dir -mindepth 2 -type d -printf '%p$IFS'"
for findlist in $( find "$library_dir" -mindepth 2 -type d -printf "%p$IFS")
do
#echo ${list}
for findname in $( find "${findlist}" -type f -name cover.jpg )
do
#echo ${filename}
jpg_dir=`dirname "${findname}"`
jpg_basename=`basename "${findname}"`
jpg_filename=${jpg_basename%.*}
jgp_new_filename="${jpg_dir}"/"${jpg_filename}""_128_190.jpg"
if [ ! -f "$jgp_new_filename" ]; then
echo "convert -resize 128X190 ${findname} ${jgp_new_filename}"
convert -resize 128X190! "${findname}" "${jgp_new_filename}"
fi
done
done
IFS=$OLDIFS
| true
|
462b94791f98bb8749017bec2168715304f8d930
|
Shell
|
elizabethengelman/bash_scripting_zagaku
|
/zagaku.sh
|
UTF-8
| 4,161
| 3.734375
| 4
|
[] |
no_license
|
#!/bin/bash
BLUE='\033[0;36m'
GREEN='\033[0;32m'
PURPLE='\033[0;35m'
RED='\033[0;31m'
NC='\033[0m'
LEFT_MARGIN=" "
title_slide () {
clear
echo
echo -e "$LEFT_MARGIN${BLUE}Writing Bash Scripts${NC}"
echo -e "$LEFT_MARGIN${BLUE} by Elizabeth" ${NC}
}
slide_1 () {
clear
echo
echo -e "$LEFT_MARGIN${BLUE}What is bash?${NC}"
echo
echo "$LEFT_MARGIN * a version of the UNIX shell program"
echo
echo "$LEFT_MARGIN * sh (Bourne Shell): original shell"
echo
echo "$LEFT_MARGIN * bash (Bourne Again): replacement for sh"
echo
echo "$LEFT_MARGIN * interprets commands from keyboard or script"
}
slide_2 () {
clear
echo
echo -e "$LEFT_MARGIN${BLUE}~/.bash_profile OR ~/.bashrc?${NC}"
echo
echo "$LEFT_MARGIN * executed at the start up of an interactive shell"
echo
echo "$LEFT_MARGIN * non-login interactive shell"
echo "$LEFT_MARGIN * ~/.bashrc"
echo
echo "$LEFT_MARGIN * login interactive shell"
echo "$LEFT_MARGIN * ~/.bash_profile"
echo "$LEFT_MARGIN * ~/.profile"
echo
echo -e "$LEFT_MARGIN * ${GREEN}if [-f ~/.bashrc ]; then"
echo "$LEFT_MARGIN source ~/.bashrc"
echo -e "$LEFT_MARGIN fi${NC}"
echo
echo "$LEFT_MARGIN * Mac OSX Terminal.app"
}
slide_3 () {
clear
echo
echo -e "$LEFT_MARGIN${BLUE}Creating a Script${NC}"
echo
echo -e "$LEFT_MARGIN * ${GREEN}#!/bin/bash${NC}"
echo
echo "$LEFT_MARGIN * Variables"
echo
echo -e "$LEFT_MARGIN * local variables: ${GREEN}COLOR=\"green\"${NC}"
echo -e "$LEFT_MARGIN * global variables: ${GREEN}export COLOR=\"green\"${NC}"
echo -e "$LEFT_MARGIN * getting the value of the variable: ${GREEN}\$COLOR${NC}"
read
echo -e "$LEFT_MARGIN ${GREEN}green${NC}"
}
slide_4 () {
clear
echo
echo -e "$LEFT_MARGIN${BLUE}Executing your Script${NC}"
echo
echo -e "$LEFT_MARGIN * ${GREEN}./zagaku_script.sh${NC}"
echo
echo "$LEFT_MARGIN * OR, add the script to your PATH:"
echo -e "$LEFT_MARGIN * ${GREEN}PATH="\$PATH~/myscripts/zagaku_script.sh"${NC}"
echo -e "$LEFT_MARGIN * ${GREEN}zagaku_script.sh${NC}"
echo
echo "$LEFT_MARGIN * bash creates a copy of itself - subshell"
}
slide_5 () {
clear
echo
echo -e "$LEFT_MARGIN${BLUE}Conditionals${NC}"
echo
echo -e "$LEFT_MARGIN * ${GREEN}if [TEST COMMAND ]; then"
echo "$LEFT_MARGIN CONSEQUENT COMMANDS"
echo -e "$LEFT_MARGIN fi${NC}"
echo -e "$LEFT_MARGIN * Primary expressions"
echo -e "$LEFT_MARGIN * ${GREEN}[-f FILE]${NC} - true if FILE exists and is a regular file"
echo -e "$LEFT_MARGIN * ${GREEN}[-e FILE]${NC} - true if FILE exists"
echo -e "$LEFT_MARGIN * ${GREEN}[-d FILE]${NC} - true if FILE exists and is a directory"
echo -e "$LEFT_MARGIN * ${GREEN}[STRING1 == STRING2]${NC} - true if strings are equal"
echo -e "$LEFT_MARGIN * ${GREEN}[EXPR1 -a EXPR2]${NC} - true if both EXPR1 and EXPR2 are true"
echo -e "$LEFT_MARGIN * ${GREEN}[EXPR1 -o EXPR2]${NC} - true if EXPR1 or EXPR2 are true"
}
slide_6 () {
clear
echo
echo -e "$LEFT_MARGIN${BLUE}Functions${NC}"
echo
echo "$LEFT_MARGIN * to define a function"
echo -e "$LEFT_MARGIN * ${GREEN}function FUNCTION { COMMANDS }${NC}"
echo -e "$LEFT_MARGIN * ${GREEN}FUNCTION () { COMMANDS }${NC}"
echo -e "$LEFT_MARGIN * ${GREEN}slide_6 () { #this text right here }${NC}"
echo
echo "$LEFT_MARGIN * positional parameters"
echo -e "$LEFT_MARGIN * ${GREEN}slide_6 \"purple\" \"red\"${NC}"
echo -e "$LEFT_MARGIN * ${GREEN}\$1: ${PURPLE}$1${NC}"
echo -e "$LEFT_MARGIN * ${GREEN}\$2: ${RED}$2${NC}"
}
slide_7 () {
clear
echo
echo -e "$LEFT_MARGIN${BLUE}Other tips & tricks${NC}"
echo "$LEFT_MARGIN * permissioning your scripts"
echo -e "$LEFT_MARGIN * ${GREEN}chmod u+x zagaku_script.sh${NC}"
echo
echo "$LEFT_MARGIN * debugging your scripts"
echo -e "$LEFT_MARGIN * ${GREEN}bash -x zagaku_script.sh${NC}"
echo
echo "$LEFT_MARGIN * aliases"
echo -e "$LEFT_MARGIN * ${GREEN}alias be='bundle exec'${NC}"
}
title_slide
read
slide_1
read
slide_2
read
slide_3
read
slide_4
read
slide_5
read
slide_6 "purple" "red"
read
slide_7
read
| true
|
a9fdeb38b212f8afc628e6532164eb146f025bc0
|
Shell
|
aqmail/aQmail
|
/ucspissl/configure
|
UTF-8
| 1,014
| 3.265625
| 3
|
[
"LicenseRef-scancode-warranty-disclaimer",
"ISC",
"LicenseRef-scancode-mit-taylor-variant"
] |
permissive
|
#!/bin/sh
#********************************************************************************
# Create compile, load, makelib
CFLAGS=`cat conf-cc | grep ^CFLAGS 2>/dev/null | cut -d= -f2`
echo -n "Checking for compile ... "
CC="cc"
( echo '#!/bin/sh'
echo exec "$CC" "$CFLAGS" -c '${1+"$@"}' ) > compile
chmod 755 compile
echo "created!"
# load: auto-ld
echo -n "Checking for loader ... "
#LD=`head -1 conf-ld`"$LDOPTS"
LD="cc -s -L/usr/local/lib/qlibs"
( echo '#!/bin/sh'
echo 'main="$1"; shift' ; \
echo exec "$LD" '-o "$main" "$main".o ${1+"$@"}') > load
chmod 755 load
echo "created!"
echo -n "Checking for makelib ... "
( echo '#!/bin/sh' ;
echo "" ;
echo 'main="$1"; shift' ; \
echo 'rm -f "$main"' ; \
echo 'ar cr "$main" ${1+"$@"}' ; \
echo 'ranlib "$main"') > makelib
chmod 755 makelib
echo "created!"
#********************************************************************************
# create the conf_ca* files
echo "const char auto_cadir[] = \"`head -1 conf-cadir`\";" > auto_cadir.c
| true
|
e568198502479bd336765589de42809a1641af00
|
Shell
|
Rajas1312/Flip-Coin-Simulation
|
/flipCoinSimulator.sh
|
UTF-8
| 774
| 3
| 3
|
[] |
no_license
|
#!/bin/bash -x
echo " Flip Coin Simulation problem"
a=0
b=0
x=0
y=0
for i in {1..42}
do
if [ $((RANDOM%2)) -eq 0 ]
then
echo "Heads is the winner"
((a++))
else
echo "Tails is the winner"
((b++))
fi
done
echo " $a times heads won"
echo " $b times tails won"
if [ $a -gt $b ]
then
c=$((a-b))
echo "Heads won by $c"
fi
if [ $a -lt $b ]
then
d=$((b-a))
echo "Tails won by $d"
fi
if [ $a -eq $b ]
then
echo "its a tie"
for (( ; ; ))
do
if [ $((RANDOM%2)) -eq 0 ]
then
if [ $((x-y)) -eq 2 ]
then
echo "Heads wins"
break
fi
((x++))
else
if [ $((y-x)) -eq 2 ]
then
echo "Tails wins"
break
fi
((y++))
fi
done
fi
| true
|
bf040e4a625325e66d008262cdf5e67bed3e8a0d
|
Shell
|
petronny/aur3-mirror
|
/gnome-shell-extension-maximus/PKGBUILD
|
UTF-8
| 1,873
| 2.71875
| 3
|
[] |
no_license
|
#Maintainer: jsh <jsh at myreseau dot org>
pkgname=gnome-shell-extension-maximus
pkgver=20130621
pkgrel=1
pkgdesc="Removes decoration (i.e. the title bar) from maximised windows, saving vertical real estate."
arch=('any')
url="https://bitbucket.org/mathematicalcoffee/maximus-gnome-shell-extension"
license=('Unknown')
depends=('gnome-shell')
makedepends=('mercurial')
#source="https://bitbucket.org/mathematicalcoffee/maximus-gnome-shell-extension/downloads/gnome${pkgver}_maximus@mathematical.coffee.gmail.com.zip"
groups=('gnome-shell-extensions')
#sha256sums='395b0f5be62b75a963c158c7b590a84ee497b7d7ccbc52d6784f179eef632be6'
install=maximus.install
_hgroot="https://bitbucket.org/mathematicalcoffee/maximus-gnome-shell-extension"
_hgname="maximus"
build() {
cd "${srcdir}"
msg "Connecting to Mercurial server...."
if [ -d $_hgname ] ; then
cd $_hgname && hg pull # && hg up gnome3.4
msg "The local files are updated."
else
hg clone $_hgroot $_hgname # && cd $_hgname && hg up gnome3.4
fi
msg "Mercurial checkout done or server timeout"
}
package() {
uuid='maximus@mathematical.coffee.gmail.com'
cd "${srcdir}/${_hgname}"
install -Dm644 "${uuid}/metadata.json" \
"${pkgdir}/usr/share/gnome-shell/extensions/${uuid}/metadata.json"
install -m644 "${uuid}/extension.js" \
"${pkgdir}/usr/share/gnome-shell/extensions/${uuid}/extension.js"
install -m644 "${uuid}/convenience.js" \
"${pkgdir}/usr/share/gnome-shell/extensions/${uuid}/convenience.js"
install -m644 "${uuid}/prefs.js" \
"${pkgdir}/usr/share/gnome-shell/extensions/${uuid}/prefs.js"
install -m644 "${uuid}/appChooserDialog.js" \
"${pkgdir}/usr/share/gnome-shell/extensions/${uuid}/appChooserDialog.js"
install -Dm644 "${uuid}/schemas/org.gnome.shell.extensions.maximus.gschema.xml" \
"${pkgdir}/usr/share/glib-2.0/schemas/org.gnome.shell.extensions.maximus.gschema.xml"
}
| true
|
8690f7970f9b9d59825110fa17e17ce4b541e3ae
|
Shell
|
azedarach/reanalysis-dbns
|
/bin/regrid_jra55_inputs.sh
|
UTF-8
| 2,209
| 3.171875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# License: MIT
# Regrid JRA-55 input fields.
PROJECT_DIR="${HOME}/projects/reanalysis-dbns"
BIN_DIR="${PROJECT_DIR}/bin"
RESULTS_DIR="${PROJECT_DIR}/results/jra55"
REGRID_FIELDS="${BIN_DIR}/regrid_cdo.sh"
if test ! -d "$RESULTS_DIR" ; then
mkdir -p "$RESULTS_DIR"
fi
slp_datafile="${RESULTS_DIR}/fields/anl_surf125.002_prmsl.1958010100_2018123100.daily.nc"
regridded_slp_datafile=$(echo "$slp_datafile" | sed 's/\.nc/.2.5x2.5.nc/g')
u850_datafile="${RESULTS_DIR}/fields/jra.55.ugrd.850.1958010100_2016123118.nc"
regridded_u850_datafile=$(echo "$u850_datafile" | sed 's/\.nc/.2.5x2.5.nc/g')
u200_datafile="${RESULTS_DIR}/fields/jra.55.ugrd.250.1958010100_2016123118.nc"
regridded_u200_datafile=$(echo "$u200_datafile" | sed 's/\.nc/.2.5x2.5.nc/g')
usfc_datafile="${RESULTS_DIR}/fields/anl_surf125.033_ugrd.1958010100_2018123100.daily.nc"
regridded_usfc_datafile=$(echo "$usfc_datafile" | sed 's/\.nc/.2.5x2.5.nc/g')
vsfc_datafile="${RESULTS_DIR}/fields/anl_surf125.034_vgrd.1958010100_2018123100.daily.nc"
regridded_vsfc_datafile=$(echo "$vsfc_datafile" | sed 's/\.nc/.2.5x2.5.nc/g')
olr_datafile="${RESULTS_DIR}/fields/jra.55.ulwrf.ntat.1958010100_2018123121.daily.nc"
regridded_olr_datafile=$(echo "$olr_datafile" | sed 's/\.nc/.2.5x2.5.nc/g')
sst_datafile="${RESULTS_DIR}/fields/fcst_surf125.118_brtmp.1958010100_2018123100.daily.nc"
regridded_sst_datafile=$(echo "$sst_datafile" | sed 's/\.nc/.2.5x2.5.nc/g')
files_to_regrid="\
${u850_datafile},${regridded_u850_datafile} \
${u200_datafile},${regridded_u200_datafile} \
${slp_datafile},${regridded_slp_datafile} \
${usfc_datafile},${regridded_usfc_datafile} \
${vsfc_datafile},${regridded_vsfc_datafile} \
${olr_datafile},${regridded_olr_datafile} \
${sst_datafile},${regridded_sst_datafile} \
"
for f in $files_to_regrid ; do
input_file=$(echo "$f" | cut -d , -f 1)
output_file=$(echo "$f" | cut -d , -f 2)
if test -e "$output_file" ; then
if test "x$OVERWRITE" = "xyes" ; then
echo "Warning: removing existing output file $output_file"
rm "$output_file"
else
echo "Error: output file $output_file exists"
exit 1
fi
fi
$REGRID_FIELDS "$input_file" "$output_file"
done
| true
|
8bca64849e1978984c9eee2ed83b13f795f087e0
|
Shell
|
moskey71/RMonEL7
|
/SupportScripts/redmine-osprep.sh
|
UTF-8
| 8,587
| 3.953125
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# shellcheck disable=SC2015
#
# Script to handle preparation of the instance for installing
# and configuring RedMine
#
#################################################################
PROGNAME=$(basename "${0}")
LOGFACIL="user.err"
# Read in template envs we might want to use
while read -r RMENV
do
# shellcheck disable=SC2163
export "${RMENV}"
done < /etc/cfn/RedMine.envs
NFSOPTS="-rw,vers=4.1"
FWSVCS=(
ssh
http
https
)
PERSISTED=(
files
Repositories
)
# Need to set this lest the default umask make our lives miserable
umask 022
# Error logging & handling
function err_exit {
echo "${1}"
logger -t "${PROGNAME}" -p ${LOGFACIL} "${1}"
exit 1
}
# Success logging
function logit {
echo "${1}"
logger -t "${PROGNAME}" -p ${LOGFACIL} "${1}"
}
# Install/start NFS components if installed
function NfsSetup {
local NFSRPMS
NFSRPMS=(
nfs-utils
autofs
)
local NFSSVCS
NFSSVCS=(
rpcbind
rpc-statd
nfs-idmapd
autofs
)
# shellcheck disable=SC2145
logit "Installing RPMs: ${NFSRPMS[@]}... "
yum install -y -q "${NFSRPMS[@]}" > /dev/null 2>&1 && \
logit "Success" || err_exit "Yum failure occurred"
# Configure autofs files
if [ -e /etc/auto.master ]
then
# Ensure auto.direct key present in auto.master
if [[ $(grep -q "^/-" /etc/auto.master)$? -eq 0 ]]
then
echo "Direct-map entry found in auto.master file"
else
printf "Adding direct-map key to auto.master file... "
sed -i '/^+auto.master/ s/^/\/- \/etc\/auto.direct\n/' \
/etc/auto.master && echo "Success" || \
err_exit "Failed to add direct-map key to auto.master file"
fi
# Ensure auto.direct file is properly populated
if [[ ! -e /etc/auto.direct ]]
then
(
printf "/var/www/redmine/files\t%s\t" "${NFSOPTS}"
printf "%s/files\n" "${RM_PERSISTENT_SHARE_PATH}"
printf "/var/www/redmine/Repositories\t%s\t" "${NFSOPTS}"
printf "%s/Repositories\n" "${RM_PERSISTENT_SHARE_PATH}"
) >> /etc/auto.direct
chcon --reference=/etc/auto.master /etc/auto.direct
fi
else
err_exit "Autofs's auto.master file missing"
fi
for SVC in "${NFSSVCS[@]}"
do
case $(systemctl is-enabled "${SVC}") in
enabled|static|indirect)
;;
disabled)
logit "Enabling ${SVC} service..."
systemctl enable "${SVC}" && logit success || \
err_exit "Failed to enable ${SVC} service"
;;
esac
if [[ $(systemctl is-active "${SVC}") != active ]]
then
logit "Starting ${SVC} service..."
systemctl start "${SVC}" && logit success || \
err_exit "Failed to start ${SVC} service"
fi
done
}
function ShareReady {
local SHARESRVR
local SHAREROOT
if [[ $(echo "${RM_PERSISTENT_SHARE_PATH}" | grep -q :)$? -eq 0 ]]
then
SHARESRVR=$(echo "${RM_PERSISTENT_SHARE_PATH}" | cut -d ':' -f 1)
SHAREROOT=$(echo "${RM_PERSISTENT_SHARE_PATH}" | cut -d ':' -f 2)
else
SHARESRVR="${RM_PERSISTENT_SHARE_PATH}"
fi
logit "Validating available share directories... "
logit "Verify ${SHARESRVR} is mountable... "
mount "${SHARESRVR}":/ /mnt && logit "Success" || \
err_exit "Was not able to mount ${SHARESRVR}:/"
logit "Ensure target persisted dirs are available... "
for PDIR in "${PERSISTED[@]}"
do
if [[ -d /mnt/${SHAREROOT}/${PDIR} ]]
then
logit "${SHARESRVR}:${SHAREROOT}/${PDIR} exists"
else
logit "${SHARESRVR}:${SHAREROOT}/${PDIR} doesnt exist"
logit "Attempting to create ${PDIR}"
mkdir -p "/mnt/${SHAREROOT}/${PDIR}" && logit "Success" || \
err_exit "Failed creating ${PDIR}"
fi
done
umount /mnt || err_exit "Failed to unmount test-fs"
}
function NeedEpel {
if [[ $(yum repolist epel | grep -qw epel)$? -eq 0 ]]
then
logit "epel repo already available"
else
logit "epel repo not already available: attempting to fix..."
yum install -y \
https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm \
&& logit success || err_exit "Failed to install epel repo-def"
fi
logit "Ensure epel repo is active..."
yum-config-manager --enable epel || err_exit "Failed to enable epel"
}
##########
## Main ##
##########
########
## Ensure /tmp is tmpfs
########
logit "Checking if tmp.mount service is enabled... "
case $(systemctl is-enabled tmp.mount) in
masked)
logit "Masked - attempting to unmask... "
systemctl -q unmask tmp.mount && logit "Success" || \
err_exit "Failed to unmask."
logit "Disableed - attempting to enable... "
systemctl -q enable tmp.mount && logit "Success" || \
err_exit "Failed to enable."
;;
disabled)
logit "Disableed - attempting to enable... "
systemctl -q enable tmp.mount && logit "Success" || \
err_exit "Failed to enable."
;;
enabled)
logit "Already enabled."
;;
esac
logit "Checking if tmp.mount service is active... "
case $(systemctl is-active tmp.mount) in
inactive)
logit "Inactive - attempting to activate"
systemctl start tmp.mount && Login "Success" || \
err_exit "Failed to start tmp.mount service"
;;
active)
logit "Already active"
;;
esac
## Dial-back SEL as needed
logit "Checking SEL mode"
case $(getenforce) in
Enforcing)
logit "SEL is in enforcing mode: attemptinto dial back... "
setenforce 0 && logit "Success" || \
err_exit "Failed to dial back SEL"
logit "Permanently dialing back enforcement mode..."
sed -i '/^SELINUX=/s/enforcing/permissive/' /etc/selinux/config && \
logit "Success" || err_exit "Failed to dial back SEL"
;;
Disabled|Permissive)
logit "SEL already in acceptable mode"
;;
esac
########
## Ensure firewalld is properly configured
########
logit "Checking firewall state"
case $(systemctl is-active firewalld) in
inactive)
logit "Firewall inactive: no exceptions needed"
logit "However, this is typically not a recommended config-state."
;;
active)
logit "Firewall active. Checking rules..."
FWSVCLST=$(firewall-cmd --list-services)
for SVC in "${FWSVCS[@]}"
do
if [[ $(echo "${FWSVCLST}" | grep -wq "${SVC}")$? -eq 0 ]]
then
logit "${SVC} already in running firewall-config"
else
logit "${SVC} missing from running firewall-config."
logit "Attempting to add ${SVC}... "
firewall-cmd --add-service "${SVC}" && logit "Success" || \
err_exit "Failed to add ${SVC} to running firewall config"
logit "Attempting to add ${SVC} (permanently)... "
firewall-cmd --add-service "${SVC}" --permanent && \
logit "Success" || \
err_exit "Failed to add ${SVC} to permanent firewall config"
fi
done
;;
esac
# Call NfsSetup function (make conditional, later)
NfsSetup
# Make sure the persistent-data share is ready
ShareReady
# Install first set of required RPMs
logit "Installing RPMs needed by RedMine..."
yum install -y parted lvm2 httpd mariadb mariadb-server mariadb-devel \
mariadb-libs wget screen bind-utils mailx iptables-services at jq && \
logit "Success" || err_exit "Yum experienced a failure"
# Create (temporary) default index-page
logit "Creating temporary index.html..."
cat << EOF > /var/www/html/index.html
<html>
<head>
<title>RedMine Rebuild In Progress</title>
<meta http-equiv="refresh" content="30" />
</head>
<body>
<div style="width: 100%; font-size: 40px; font-weight: bold; text-align: cen
ter;">
Service-rebuild in progress. Please be patient.
</div>
</body>
</html>
EOF
# Because here-documents don't like extra stuff on token-line
# shellcheck disable=SC2181
if [[ $? -eq 0 ]]
then
logit "Success"
else
err_exit "Failed creating temporary index.html"
fi
# Create ELB test file
logit "Create ELB-testable file..."
echo "I'm alive" > /var/www/html/ELBtest.txt && logit "Success" || \
err_exit "Failed creating ELB test-file."
# Let's get ELB/AS happy, early
logit "Temp-start httpd so ELB can monitor..."
systemctl restart httpd && logit success || \
err_exit "Failed starting httpd"
# Try to activate epel as needed
NeedEpel
| true
|
683a49d07e6d663eb9fae6839828abef5acc8524
|
Shell
|
camuso/scripts
|
/gitgetline
|
UTF-8
| 352
| 3.5625
| 4
|
[] |
no_license
|
#!/bin/bash
#
function usage() {
echo
echo "usage: gitgetline <FROM-COMMIT-OR-TAG> <TO-COMMIT-OR-TAG> <PATTERN>"
echo -e "\tcommand: git log COMMIT-OR-TAG..COMMIT-OR-TAG --oneline | grep --color PATTERN"
echo
exit
}
[ $# -ne 3 ] && usage
echo "git log "$1".."$2" --oneline | grep --color "$3""
git log "$1".."$2" --oneline | grep --color "$3"
| true
|
b3efab8c1dd5fc0e4245830999710eb3288e2462
|
Shell
|
mrquincle/noparama
|
/scripts/collect.sh
|
UTF-8
| 673
| 3.296875
| 3
|
[] |
no_license
|
#!/bin/sh
directory=${1:? "directory"}
touch $directory/purity.txt
rm $directory/purity.txt
touch $directory/rand.txt
rm $directory/rand.txt
touch $directory/adjusted_rand.txt
rm $directory/adjusted_rand.txt
for f in $directory/*/*/; do
cat $f/results.score.txt | grep ^Purity | cut -f2 -d: >> $directory/purity.txt
cat $f/results.score.txt | grep ^Rand | cut -f2 -d: >> $directory/rand.txt
cat $f/results.score.txt | grep ^Adjusted | cut -f2 -d: >> $directory/adjusted_rand.txt
done
echo "You can find the results in purity.txt, rand.txt, and adjusted_rand.txt in $directory"
#cat $directory/purity.txt
#cat $directory/rand.txt
#cat $directory/adjusted_rand.txt
| true
|
f79a52bd587d7a9d36a25316a7e2a3fb65cae4ef
|
Shell
|
luayalem/dd
|
/dd-bbr-integration/backup_bbr.sh
|
UTF-8
| 2,212
| 3.265625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
export BOSH_CLIENT_SECRET=v51N-hfSFTdVHh9uAxNDheR_6D3BTE3P
export PRIVATE_KEY=/bin/bbr/bbr_key
export DIRECTOR_IP=10.0.0.10
export ERT_USERNAME=bbr_client
export BOSH_USERNAME=bbr
export DEPLOYMENT_NAME=cf-e6b6ade56b8a6e34b0e7
export BOSH_SERVER_CERT=/bin/bbr/bbr_root_cert
export LOG_FILE=backup_log_$(date +%F_%H_%M)
if pwd==/home/ubuntu/local_backup
then
rm -rf cf*
rm -rf 10*
fi
STARTBT=$(date +%s)
echo "running pre-backup-check" > $LOG_FILE
STARTPHASE=$(date +%s)
BOSH_CLIENT_SECRET=$BOSH_CLIENT_SECRET bbr deployment --target $DIRECTOR_IP --username $ERT_USERNAME --deployment $DEPLOYMENT_NAME --ca-cert $BOSH_SERVER_CERT pre-backup-check >> $LOG_FILE
ENDPHASE=$(date +%s)
DIFFPRE=$[$ENDPHASE - $STARTPHASE]
if [ $?==0 ]
then
STARTPHASE=$(date +%s)
echo "running ERT backup" >> $LOG_FILE
BOSH_CLIENT_SECRET=$BOSH_CLIENT_SECRET bbr deployment --target $DIRECTOR_IP --username $ERT_USERNAME --deployment $DEPLOYMENT_NAME --ca-cert $BOSH_SERVER_CERT backup >> $LOG_FILE
ENDPHASE=$(date +%s)
DIFFERT=$[$ENDPHASE - $STARTPHASE]
if [ $?==0 ]
then
STARTPHASE=$(date +%s)
echo "running BOSH Director backup" >> $LOG_FILE
bbr director --private-key-path $PRIVATE_KEY --username $BOSH_USERNAME --host $DIRECTOR_IP backup >> $LOG_FILE
ENDPHASE=$(date +%s)
DIFFDIRBOSH=$[$ENDPHASE - $STARTPHASE]
else
echo "ERT backup failed" >> $LOG_FILE
fi
else
echo "pre-backup-check failed" >> $LOG_FILE
fi
DIFFBT=$[$ENDPHASE - $STARTBT]
STARTCT=$(date +%s)
scp -rf cf-* cndpuser@40.71.101.137:/bbr_backup_files >> $LOG_FILE
scp -rf 10* cndpuser@40.71.101.137:/bbr_backup_files >> $LOG_FILE
ENDCT=$(date +%s)
DIFFCT=$[$ENDCT - $STARTCT]
ssh cndpuser@40.71.101.137 rm -rf /home/cndpuser/bbr_backup_files/*
echo "It took $DIFFPRE seconds to perform pre check" >> $LOG_FILE
echo "It took $DIFFERT seconds to perform ERT backup" >> $LOG_FILE
echo "It took $DIFFDIRBOSH seconds to perform BOSH director backup" >> $LOG_FILE
echo "It took $DIFFBT seconds to perform full backup" >> $LOG_FILE
echo "It took $DIFFCT seconds to copy backed up files off the jumpbox" >> $LOG_FILE
du -s cf* >> $LOG_FILE
du -s 10* >> $LOG_FILE
| true
|
6fb5b6388134246cf6fb1e801742bb22ec0fe00d
|
Shell
|
crimsonfaith91/click-to-deploy
|
/cloudbuild/set_commit_status.sh
|
UTF-8
| 2,196
| 3.203125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -xeo pipefail
for i in "$@"
do
case $i in
--secret=*)
secret="${i#*=}"
shift
;;
--issuer=*)
issuer="${i#*=}"
shift
;;
--commit=*)
commit="${i#*=}"
shift
;;
--repo=*)
repo="${i#*=}"
shift
;;
--state=*)
state="${i#*=}"
shift
;;
--description=*)
description="${i#*=}"
shift
;;
--context=*)
context="${i#*=}"
shift
;;
--target_url=*)
target_url="${i#*=}"
shift
;;
*)
>&2 echo "Unrecognized flag: $i"
exit 1
;;
esac
done
# Getting the directory of the running script
DIR="$(realpath $(dirname $0))"
jwt_token="$($DIR/get_jwt.py --secret "$secret" --issuer " $issuer")"
accept_header="Accept: application/vnd.github.machine-man-preview+json"
auth_header="Authorization: Bearer $jwt_token"
# One app might be installed in many organizations. We need to select the installation that matches
# the organization of the repo
account_login=$(echo $repo | sed -e 's/\/.*//')
install_id=$(curl -X GET https://api.github.com/app/installations \
-H "$accept_header" \
-H "$auth_header" | jq -r ".[] | select(.account.login==\"$account_login\") | .id")
token=$(curl -X POST "https://api.github.com/installations/$install_id/access_tokens" \
-H "$accept_header" \
-H "$auth_header" | jq -r '.token')
token_header="Authorization: Bearer $token"
curl -X POST "https://api.github.com/repos/$repo/statuses/$commit" \
-H "$accept_header" \
-H "$token_header" \
-d @- <<EOF
{
"state": "$state",
"target_url": "$target_url",
"description": "$description",
"context": "$context"
}
EOF
| true
|
4501fe83afa670cd24f84ef5f069152506c6f4c7
|
Shell
|
shashi1304/FIOLDE_INIT
|
/TESTSCRIPT.SH
|
UTF-8
| 846
| 3.21875
| 3
|
[] |
no_license
|
#!bin\bash
echo "Parameter I passed" $0 $1 $2
echo $x "This is returned from ?"
FILE_NM=$1
SRC_DIR=$2
DEST_DIR=$3
echo "FILE_NM value is $FILE_NM"
echo "SRC_DIR value is $SRC_DIR"
echo "DEST_DIR value is $DEST_DIR"
LKP_LIST_FILE=./List_of_Files.lkp
ls -lrt ${SRC_DIR} >> $LKP_LIST_FILE
for LKP_LIST_FILE in $SRC_DIR
do
echo "Found file $LKP_LIST_FILE"
mv $SRC_DIR $DEST_DIR
#echo " file_moved_successful"
MV_CHECK=$?
echo " value $MV_CHECK"
echo " iam here before IF"
if [ $MV_CHECK == 0 ] ; then
grep -i $LKP_LIST_FILE $DEST_DIR
echo " FILE $LKP_LIST_FILE is in $DEST_DIRT/$LKP_LIST_FILE"
echo " iam here if"
echo "FILE IS MOVED FROM $SRC_DIR to $DEST_DIR"
else
echo " iam here else "
echo "failed to move the file from $SRC_DIR to $DEST_DIR....: error code: $MV_CHECK"
fi
done
~ `
| true
|
7c6b1b11b8e6788af4cd7ce84cecc912cd2c84b7
|
Shell
|
duxing2007/srcutils
|
/bin/get_ftrace.sh
|
UTF-8
| 451
| 3.140625
| 3
|
[] |
no_license
|
#/system/bin/sh
sleep 6
mount -t debugfs none /d
#init filter
echo > /d/tracing/set_ftrace_filter
for fn in `cat /data/f.txt`
do
echo $fn >> /d/tracing/set_ftrace_filter
done
echo function > /d/tracing/current_tracer
echo 'begin tracing'
i=0;
while busybox [ $i -lt 20 ]
do
date > /data/to/t$i.txt
cat /proc/uptime >> /data/to/t$i.txt
cat /d/tracing/trace >> /data/to/t$i.txt
sleep 1
i=$(($i+1))
done
echo nop > /d/tracing/current_tracer
| true
|
40bea94c5d163d623f6a82b218dba9fb9dbeac5c
|
Shell
|
jonauman/puppet-hybris
|
/modules/aws/files/autoname.sh
|
UTF-8
| 2,293
| 3.375
| 3
|
[] |
no_license
|
#!/bin/bash
#
# autoname Set your hostname in amazon
#
# chkconfig: 2345 90 60
# description: little script that will setup instance hostnema during bootup
### BEGIN INIT INFO
# Provides: autoname
# Required-Start: $local_fs $syslog
# Required-Stop: $local_fs $syslog
# Default-Start: 2345
# Default-Stop: 90
# Short-Description: run cron daemon
# Description: little script that will setup instance hostnema during bootup
### END INIT INFO
#######################################################
#
# chkconfig: - 60 60
# description: sethostname and add to route53
#
### BEGIN INIT INFO
# Provides: autoname
# Required-Start: $network $local_fs
# Required-Stop: $network $local_fs
# Should-Start: $syslog
# Should-Stop: $syslog
# Short-Description: run autoname at startup
# Description: hostname set in instance user-data (HOSTNAME=name)
# instance IAM role set to addRoute53Record
#
# author: info@daveops.co.uk
### END INIT INFO
HOSTNAME=$(curl -s http://169.254.169.254/latest/user-data|grep HOSTNAME|cut -d = -f 2)
DOMAIN=$(curl -s http://169.254.169.254/latest/user-data|grep DOMAIN|cut -d = -f 2)
AWSPUBNAME=$(curl -s http://169.254.169.254/latest/meta-data/public-hostname)
AWSINTIP=$(curl -s http://169.254.169.254/latest/meta-data/local-ipv4)
TTL=300
OS=$(facter operatingsystem)
###########################
# cli53 executable
if [ $OS == "Amazon" ]; then
COMMAND='/usr/bin/cli53'
else
COMMAND='/usr/local/bin/cli53'
fi
# default hostname
if [ -z $HOSTNAME ]; then
HOSTNAME="changeme"
fi
# default domain
if [ -z $DOMAIN ]; then
DOMAIN=$(facter domain)
fi
# set hostname
if [ $OS == "Debian" ]; then
echo "${HOSTNAME}" > /etc/hostname
hostname $(cat /etc/hostname)
else
sed -i -e "s/^HOSTNAME.*/HOSTNAME=${HOSTNAME}/g" /etc/sysconfig/network
fi
# set /etc/hosts
if [ $(grep $HOSTNAME /etc/hosts|wc -l) = "0" ]; then
echo -e "$(facter ipaddress)\t${HOSTNAME} ${HOSTNAME}.${DOMAIN}" >> /etc/hosts
fi
# Add public IP as CNAME record, if exists
if [ $(curl -s http://169.254.169.254/latest/meta-data/ | grep public-hostname) ]; then
$COMMAND rrcreate -x $TTL -r example.net $HOSTNAME CNAME $AWSPUBNAME
fi
# Add internal IP as A record with -int suffix
$COMMAND rrcreate -x $TTL -r example.net ${HOSTNAME}-int A $AWSINTIP
| true
|
6eb26b127969b98c946953541c72723bbf91a6b5
|
Shell
|
keathley/dotfiles
|
/bin/histcount
|
UTF-8
| 922
| 4.03125
| 4
|
[] |
no_license
|
#!/bin/bash
#
# Written by Chris Keathley
#
# Counts up history commands. I use this to find commands that I would like to
# alias.
# $ histcount
set -e
HISTFILE=~/.local/share/fish/fish_history
# Tail the file
tail -n 1000 $HISTFILE `# Tail the history file for fish` \
| sed 's/^- cmd://' `# Strip the command line to only have commands` \
| sed 's/when\: [[:digit:]]*//' `# Remove the 'when' line`\
| awk 'length($0)>4' `# Only show commands that are over 4 characters` \
| sort `# Pre-sort the list so we can remove duplicates` \
| uniq `# Return only a list of uniq commands` \
| while read command; do `# Count how many times the command has been called` \
echo `grep "\b$command\b" $HISTFILE | wc -l` $command
done \
| awk '$1 >= 4' \
| sort -n `# Sort by highest occuring`
| true
|
8f55ab2b900b822357a01b69480f673e15a9dbf2
|
Shell
|
bestjae/fg_jnl
|
/skt_script/micro/id-ns-block.sh
|
UTF-8
| 287
| 2.921875
| 3
|
[] |
no_license
|
#!/bin/bash
#$1 = namespace id
nvme id-ns /dev/nvme0n1
nvme id-ns /dev/nvme0n1 --vendor-specific
nvme id-ns /dev/nvme0n1 --raw-binary
nvme id-ns /dev/nvme0n1 --human-readable
ret=`echo $?`
if [ $ret -eq 0 ]; then
echo "id-ns command pass"
else
echo "id-ns command fail"
fi
| true
|
9effc07393892762ed261c7c35deec8b3a728fb7
|
Shell
|
shopetan/shell
|
/sougo/shell/sougo.sh
|
UTF-8
| 9,092
| 3.53125
| 4
|
[] |
no_license
|
#!/bin/bash
##############################################################
## file name: sougo.sh ##
## function :file shuffle ##
##############################################################
# シャッフルの挙動はOK
# shuffle function
sougo_file(){
#拡張性に欠けるが,初めから3人に採点をしてもらう前提でプログラムを書く
srcDir=$1
dstDir1=$2
dstDir2=$3
dstDir3=$4
null=0
error=0
flag1='0'
flag2='0'
dbg=0
i=0
# echo "srcDir is [$srcDir]"
# echo "dstDir1 is [$dstDir1]"
# echo "dstDir2 is [$dstDir2]"
# echo "dstDir3 is [$dstDir3]"
cd $srcDir
#report Data
repData=(`ls`)
# sougo1Data
#shuffle Data
shuData=(`ls`)
prshuData=(`ls`)
prprshuData=(`ls`)
#total Data
toData=${#repData[@]}
count=${#repData[@]}
#ランダムシャッフル* 3
for j in `seq 1 3`
do
#init Data
toData=${#repData[@]}
count=${#repData[@]}
i=`expr $toData - 1 `
# 自分含む重複を許さないランダムシャッフル
# 参照されるファイルが少ない場合,
# 重複ファイルが必ず出来てしまうために終了されないプログラムになる.
while [ $i != $null ]
do
rand=$((RANDOM % $count))
# echo dbg i = $i
# echo dbg count = $count
# echo rand = $rand
dbg=`expr $count - 1 `
#自分のファイルと同じファイルになってしまった場合
# if [ "${shuData[$rand]}" = "${repData[`expr $count - 1`]" ]
# echo dbg shuData[$rand] = ${shuData[$rand]}
# echo dbg repData[`expr $count - 1`] = ${repData[`expr $count - 1`]}
if [ "$rand" = "$dbg" ]
then
# echo bug
if [ "$null" != "dbg" ]
then
i=`expr $i + 1`
fi
else
unset hoge
hoge=${shuData[$rand]}
# echo dbg repData[`expr $count - 1`] = ${repData[`expr $count - 1`]}
# echo dbg shuData[`expr $count - 1`] = ${shuData[`expr $count - 1`]}
# echo dbg hoge = $hoge
# echo dbg prshuData[`expr $count - 1`] = ${prshuData[`expr $count - 1`]}
# echo dbg prprshuData[`expr $count - 1`] = ${prprshuData[`expr $count - 1`]}
#過去のファイルと一致しないユニークなファイルだった場合
if [ "${prshuData[`expr $count - 1 `]}" != "$hoge" ]
then
flag1='0'
fi
if [ "${prprshuData[`expr $count - 1 `]}" != "$hoge" ]
then
flag2='0'
fi
#過去のファイルと同じファイルになってしまった場合
if [ "${prshuData[`expr $count - 1`]}" = "$hoge" ]
then
# echo dbg prshuData[`expr $count - 1`] is ${prshuData[`expr $count - 1`]}
# echo dbg hoge is $hoge
count=`expr $count + 1`
i=`expr $i + 1`
flag1='1'
error=`expr $error + 1`
elif [ "${prprshuData[`expr $count - 1 `]}" = "$hoge" ]
then
count=`expr $count + 1`
i=`expr $i + 1`
flag2='1'
error=`expr $error + 1`
fi
#過去のファイルのどちらとも同じになってしまった場合
if [ $flag1 = '1' ]
then
if [ $flag2 = '1' ]
then
count=`expr $count - 1`
i=`expr $i - 1`
fi
fi
# どちらにも該当しなかった場合
# ここで初めてシャッフルするファイルが決定される.
if [ $flag1 != '1' ]
then
if [ $flag2 != '1' ]
then
shuData[$rand]=${shuData[`expr $count - 1 `]}
shuData[`expr $count - 1 `]=$hoge
# echo dbg new shuData[`expr $count - 1`] = ${shuData[`expr $count - 1`]}
flag1='0'
flag2='0'
fi
fi
#重複したまま計算上詰んでしまい,これ以上ファイルを更新できない場合のエラー処理.
#必要変数をすべて初期化してやり直し
if [ $error -gt 10 ]
then
count=`expr $toData + 1 `
i=$toData
error=`expr $error - $error`
flag1='0'
flag2='0'
unset shuData
unset hoge
for i in `seq 1 $toData`
do
shuData[`expr $i - 1 `]=${repData[`expr $i - 1 `]}
done
fi
# 最後の項が過去のファイルと一致した場合,一度乱数の降り直しを行う
# プログラムにどこから間違っているのか探索させるより,一度乱数を振り直した方が速く動作した.
if [ $i = '1' ]
then
# echo dbg check i = $i
# echo dbg prshuData[`expr $count - 2`] is ${prshuData[`expr $count - 2`]}
# echo dbg hoge is $hoge
if [ "${prshuData[`expr $i - 1 `]}" = "${shuData[`expr $i - 1`]}" ]
then
count=`expr $toData + 1 `
i=$toData
error=`expr $error - $error`
flag1='0'
flag2='0'
unset shuData
unset hoge
for i in `seq 1 $toData`
do
shuData[`expr $i - 1 `]=${repData[`expr $i - 1 `]}
done
elif [ "${prprshuData[`expr $i - 1 `]}" = "${shuData[`expr $i - 1`]}" ]
then
count=`expr $toData + 1 `
i=$toData
error=`expr $error - $error`
flag1='0'
flag2='0'
unset shuData
unset hoge
for i in `seq 1 $toData`
do
shuData[`expr $i - 1 `]=${repData[`expr $i - 1 `]}
done
elif [ "${repData[`expr $i - 1 `]}" = "${shuData[`expr $i - 1`]}" ]
then
count=`expr $toData + 1 `
i=$toData
error=`expr $error - $error`
flag1='0'
flag2='0'
unset shuData
unset hoge
for i in `seq 1 $toData`
do
shuData[`expr $i - 1 `]=${repData[`expr $i - 1 `]}
done
fi
fi
count=`expr $count - 1`
fi
i=`expr $i - 1`
done
# echo ${repData[@]}
# echo ${shuData[@]}
# echo ""
# 格納先フォルダを表示
if [ $j = "1" ]
then
echo srcfile to "[$dstDir1]"
elif [ $j = "2" ]
then
echo srcfile to "[$dstDir2]"
elif [ $j = "3" ]
then
echo srcfile to "[$dstDir3]"
fi
#該当ファイルをコピー
toData=${#shuData[@]}
for i in `seq 1 $toData`
do
cd $srcDir
fName=`find . -name "${repData[$i - 1 ]}"`
# echo dbg shuData[`expr $i - 1`] is ${shuData[$i -1]}
# echo dbg fName is [$fName]
if [ $j = "1" ]
then
cp -f $fName $dstDir1
elif [ $j = "2" ]
then
cp -f $fName $dstDir2
elif [ $j = "3" ]
then
cp -f $fName $dstDir3
fi
done
#該当ファイルをリネーム
for i in `seq 1 $toData`
do
cd $srcDir
fName=`find . -name "${repData[$i-1]}"`
rName=`find . -name "${shuData[$i-1]}"`
fName=`echo ${fName:2:6}`
fName=`echo $fName | sed -e "s/$/.c/"`
if [ $j = "1" ]
then
cd $dstDir1
rName=`echo ${rName:2:6}`
rName=`echo $rName | sed -e "s/^/s1/"`
rName=`echo $rName | sed -e "s/$/.c/"`
mv $fName $rName
echo "$fName to $rName "
elif [ $j = "2" ]
then
cd $dstDir2
rName=`echo ${rName:2:6}`
rName=`echo $rName | sed -e "s/^/s2/"`
rName=`echo $rName | sed -e "s/$/.c/"`
mv $fName $rName
echo "$fName to $rName "
elif [ $j = "3" ]
then
cd $dstDir3
rName=`echo ${rName:2:6}`
rName=`echo $rName | sed -e "s/^/s3/"`
rName=`echo $rName | sed -e "s/$/.c/"`
mv $fName $rName
echo "$fName to $rName "
fi
#renameしたいけどこれではできないみたい
# fName=`echo ${rName:2:8}"`
# mv $rName $fName
done
if [ $j != 3 ]
then
unset prprshuData
for i in `seq 1 $toData`
do
prprshuData[`expr $i - 1 `]=${prshuData[`expr $i - 1 `]}
done
unset prshuData
for i in `seq 1 $toData`
do
prshuData[`expr $i - 1 `]=${shuData[`expr $i - 1 `]}
done
unset shuData
for i in `seq 1 $toData`
do
shuData[`expr $i - 1 `]=${repData[`expr $i - 1 `]}
done
fi
echo ""
done
echo dbg REPORT ${repData[@]}
echo dbg SOUGO1 ${prprshuData[@]}
echo dbg SOUGO2 ${prshuData[@]}
echo dbg SOUGO2 ${shuData[@]}
}
echo "###############################################"
echo "## START ##"
echo "###############################################"
echo ""
# start function
# sougo srcDir destDir
sougo_file /home/owner/shell-master/sougo/admin /home/owner/shell-master/sougo/shdbg/home/class/j2/prog/j13/j2pro1007/sougo1 /home/owner/shell-master/sougo/shdbg/home/class/j2/prog/j13/j2pro1007/sougo2 /home/owner/shell-master/sougo/shdbg/home/class/j2/prog/j13/j2pro1007/sougo3
echo ""
echo "###############################################"
echo "## END ##"
echo "###############################################"
exit
| true
|
9a9e793c8f6e24d1abcc006b8f860a09805edcee
|
Shell
|
CermakM/char-generator
|
/src/scraper/font-unzip.sh
|
UTF-8
| 478
| 3.828125
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
## Unzip all zip files found in the given path
TARGET=`realpath $1`
[ -z $TARGET ] && printf '%s\n%s\n' 'argument TARGET has not been provided' \
'TARGET is a path to the parent font directory containing subdirectories with *.zip files' && exit 1
find $TARGET -name '*.zip' -type f | xargs -I{} sh -c '\
font_file={}
font_dir=${font_file%/*}
pushd $font_dir >/dev/null
unzip ${font_file##*/}
popd >/dev/null'
detox -r "$TARGET" # clean up file names
| true
|
488b8f017f4d367dcd5394b61e6ffa14ed65a008
|
Shell
|
lxdraw/reverse-ssh
|
/install-reverse-ssh-tunnel.sh
|
UTF-8
| 1,178
| 3.125
| 3
|
[] |
no_license
|
#!/usr/bin/env sh
set -euo pipefail
# Install gcloud cli
if ! command -v gcloud &> /dev/null
then
echo "gcloud not found. Installing..."
curl -Lo google-cloud-sdk.tar.gz https://dl.google.com/dl/cloudsdk/channels/rapid/downloads/google-cloud-sdk-331.0.0-darwin-x86_64.tar.gz
tar xvzf google-cloud-sdk.tar.gz
./google-cloud-sdk/install.sh
else
echo "Found gcloud."
fi
gcloud auth login
gcloud config set project pubsubplayground-11081991
# Creating SSH keys
echo "Creating SSH keys"
ssh-keygen -t RSA -f operation_nigeria -C operation-nigeria@alexdrawbond.com -N ""
ssh-add -K operation_nigeria
echo "SSH keys created"
echo "Adding SSH keys to Compute Engine"
export op_nigeria_key=$(cat operation_nigeria.pub)
echo "operation-nigeria:$op_nigeria_key operation-nigeria@alexdrawbond.com" > compute-keys
gcloud compute instances add-metadata reverse-ssh --metadata-from-file ssh-keys=compute-keys
# Add launchd daemon
sed -i.bu "s~SCRIPT_LOCATION~$(pwd)~g" com.alexdrawbond.run-reverse-ssh-tunnel.plist
sudo cp com.alexdrawbond.run-reverse-ssh-tunnel.plist /Library/LaunchDaemons/
launchctl load /Library/LaunchDaemons/com.alexdrawbond.run-reverse-ssh-tunnel.plist
echo "Reverse SSH tunnel daemon installed"
| true
|
d22fc6ce33f98f0ef31fdde20e2d363d74826de1
|
Shell
|
AdoptOpenJDK/jdk9-jigsaw
|
/session-1-jigsaw-intro/03_MultiModuleCompilation/multiModCompile.sh
|
UTF-8
| 1,127
| 3.28125
| 3
|
[
"CC0-1.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash
set -eu
source ../../common-functions.sh
DESTINATION_FOLDER="mods"
echo ""
echo "${info} *** Displaying the contents (source files) of the 'src' folder *** ${normal}"
runTree src
echo ""
echo "${info} *** Compiling both modules into the '$DESTINATION_FOLDER' folder *** ${normal}"
javac -d $DESTINATION_FOLDER \
[parameter to point to the folder with source files in a module] src $(find . -name "*.java")
#
# * look for tool references, quick started guides, and other documentation in the Java 9 Resource (https://github.com/AdoptOpenJDK/jdk9-jigsaw/blob/master/Java-9-Resources.md).
#
# *************************************************************************************
#
# ...
# In the above both the modules 'org.astro' and 'com.greetings' are compiled at the same time, dependency is fulfilled immediately.
#
# *************************************************************************************
echo ""
echo "${info} *** Displaying the contents (modules) of the 'mods' folder *** ${normal}"
runTree "$DESTINATION_FOLDER"
# See ../01_Greetings/compile.sh for explanations to above commands
| true
|
0097af039e7874ffbd187cf72c61457dd99a4ab5
|
Shell
|
acampbell1990/kt-robots
|
/server/deploy-server.sh
|
UTF-8
| 1,756
| 3.28125
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
is_set() {
local v=$1
local message=$2
if [ ${v+x} ]; then
echo " = '${v}'"
else
echo $message
exit 1
fi
}
AWS_ACCOUNT_ID=$(aws sts get-caller-identity --query Account --output text)
is_set $AWS_ACCOUNT_ID "Unable to get AWS_ACCOUNT_ID from caller-identity"
VPC_ID=$(aws ec2 describe-vpcs --filters Name=isDefault,Values=true --query 'Vpcs[*].VpcId' --output text)
is_set $VPC_ID "Unable to get default VPC_ID from list of VPCs"
REGION=${1:-us-east-1}
SUBNET_IDS=$(aws ec2 describe-subnets --filter Name=vpc-id,Values=$VPC_ID --query 'Subnets[?MapPublicIpOnLaunch==`true`].SubnetId' --output text | sed 's/\t/,/g')
is_set $SUBNET_IDS "Unable to find public subnets for default VPC"
aws cloudformation deploy --stack-name kt-robots-server --template-file fargate_cluster_cfn.yml --parameter-overrides VpcId=${VPC_ID} --capabilities CAPABILITY_NAMED_IAM
echo "Building Docker image"
docker build -t ktrobots-server .
echo "Loggin in"
aws ecr get-login-password --region "${REGION}" | docker login --username AWS --password-stdin "${AWS_ACCOUNT_ID}.dkr.ecr.${REGION}.amazonaws.com"
echo "Tagging image"
docker tag ktrobots-server:latest "${AWS_ACCOUNT_ID}.dkr.ecr.${REGION}.amazonaws.com/kt-robots-server-repository:latest"
echo "Pushing image"
docker push "${AWS_ACCOUNT_ID}.dkr.ecr.${REGION}.amazonaws.com/kt-robots-server-repository:latest"
aws cloudformation deploy --stack-name kt-robots-server-service --template-file fargate_service_cfn.yml --parameter-overrides VpcId=${VPC_ID} Subnets=${SUBNET_IDS} Cpu=1024 Memory=2GB --capabilities CAPABILITY_NAMED_IAM
ECS_TASK_ARN=$(aws ecs list-tasks --cluster kt-robots-server-cluster --query 'taskArns[*]' --output text | sed 's/\t/,/g')
| true
|
83d0faa6cc3281aed61be211fccea778cc5a28f3
|
Shell
|
JorgenWan/RetrieveNMT
|
/fairseq-baseline/shells/MD/test_TM.sh
|
UTF-8
| 1,278
| 2.65625
| 3
|
[] |
no_license
|
export CUDA_VISIBLE_DEVICES=3
PYTHON=/home/v-jiaya/anaconda3/bin/python
GENERATE=/home/v-jiaya/RetrieveNMT/fairseq-baseline/generate.py
src=$1
tgt=$2
testset=$3
if [ "$src" == "en" -a "$tgt" == "de" ]; then
MODEL_DIR=/home/v-jiaya/RetrieveNMT/data/MD/retrieve-en2de-top2/concat/model/en2de-TM-base1/
TEXT=/home/v-jiaya/RetrieveNMT/data/MD/retrieve-en2de-top2/concat/test-data-bin/
elif [ "$src" == "de" -a "$tgt" == "en" ]; then
MODEL_DIR=/home/v-jiaya/RetrieveNMT/data/MD/retrieve-de2en-top2/concat/model/de2en-TM-base1/
TEXT=/home/v-jiaya/RetrieveNMT/data/MD/retrieve-de2en-top2/concat/test-data-bin/
else
echo "Error Language !"
exit
fi
if [ "$testset" == "JRC" ]; then
gen_subset=test0
lenpen=1.0
elif [ "$testset" == "newstest2014" ]; then
gen_subset=test1
lenpen=0
elif [ "$testset" == "Opensubtitles" ]; then
gen_subset=test2
lenpen=1.0
elif [ "$testset" == "tst2014" ]; then
gen_subset=test3
lenpen=1.0
else
echo "Error testset !"
exit
fi
$PYTHON $GENERATE $TEXT/ --path $MODEL_DIR/checkpoint_best.pt --batch-size 32 --beam 8 --gen-subset $gen_subset --source-lang $src --target-lang $tgt --remove-bpe --lenpen $lenpen --min-len 0 --unkpen 0 --no-repeat-ngram-size 4 --output $TEXT/result.txt
| true
|
9fe9b396e8c8d3357dcc0fd9db79163b447f37be
|
Shell
|
philtap/Yelp_reviews_analysis
|
/NLP_code/Install_NLP.sh
|
UTF-8
| 890
| 2.84375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# This file should be run from the NLP directory in the submission zip
echo -**********************************************************-
echo Script:Install_NLP.sh
echo -**********************************************************-
echo ------------------------------------------
echo Installing dependencies....
echo ------------------------------------------
pip3 install -r requirements.txt
echo ------------------------------------------
echo Downloading NLTK stopwords
echo ------------------------------------------
python3 -m nltk.downloader stopwords
echo ------------------------------------------------
echo Downloading pre-trained word embeddings from glove
echo ------------------------------------------------
wget http://nlp.stanford.edu/data/glove.6B.zip
unzip glove.6B.zip
mkdir glove
mv glove.6B.100d.txt ./glove
mv glove.6B.50d.txt ./glove
rm glove.6B*
| true
|
eafa6a2c0953ffaa2db7d4d73e35dc37fc82149f
|
Shell
|
toamitkumar/chef_cookbooks_suse
|
/bootstraps/base_packages.sh
|
UTF-8
| 422
| 3.328125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
function bootstrap_base_packages {
echo "= Base packages"
if [ "10" -eq "$VERSION"]; then
zypper install gcc gcc-c++ zlib-devel openssl-devel curl-devel readline-devel
elif [ "11" -eq "$VERSION" ]; then
zypper install gcc gcc-c++ zlib-devel libopenssl-devel libcurl-devel readline-devel
else
echo "== Unhandled version: $VERSION"
exit
fi
echo "= Base packages installed"
}
| true
|
03e83da3edce172eef9cc7da8a090415f957a30b
|
Shell
|
johnlee175/ImageProcessor
|
/imgproc-java/dist/linux/launcher.sh
|
UTF-8
| 2,006
| 3.25
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
cd "$(dirname "$0")"
# download and copy jre to this directory
if [ ! -x "$(command -v java)" ]; then
JAVA_BIN="$(pwd)/jre/bin/java"
echo "java not found, using built-in jre in ${JAVA_BIN}"
alias java=${JAVA_BIN}
fi
# you can java-debug with 'jdb -attach 127.0.0.1:51230'
# you can java-debug with Intellij IDEA Remote Debug Configurations, Socket, Attach, 51230 port
if [ "$1" == "debug" -o "$1" == "-debug" -o "$1" == "--debug" ]; then
DEBUG_PARAM='-agentlib:jdwp=transport=dt_socket,address=127.0.0.1:51230,suspend=y,server=y'
else
DEBUG_PARAM=''
fi
PATH_SEPARATOR=':'
JAVA_FILE_INFO="$(file "$(realpath "$(which java)")")"
if [ $(expr "${JAVA_FILE_INFO}" : ".*Windows.*") -ne 0 ]; then
PATH_SEPARATOR=';'
fi
java \
${DEBUG_PARAM} \
-Xmx2048M -Xms1024M \
-Dfile.encoding="UTF-8" \
-Djava.library.path="jniLibs" \
-classpath "ImageProcessor-1.0.0.jar${PATH_SEPARATOR}libs/*" \
-Dsun.java2d.opengl=true \
-splash:"splash.png" \
com.johnsoft.MainLauncher
# you can jni-debug with follow steps:
# 1. break point on java code call (java-debug) before jni call;
# 2. execute command ps or jps to location the current process pid;
# 3. 'gdb/lldb -p pid' or Attach to local process with Clion;
# 4. break point on jni code call;
# In Linux (Ubuntu) issue:
# Error: ptrace: Operation not permitted
# We should execute command: sudo su - && echo 0 > /proc/sys/kernel/yama/ptrace_scope
# jdb command usages: [you can type help]
# use <path-to-src/main/java>
# stop at <class-full-name:line>
# run/cont
# next/step
# list/threads/locals/dump <object name>/print <expr>
# lldb (on macosx) command usages: [you can type help]
# settings set target.source-map /build_src /source
# breakpoint set --file main.c --line 10
# run/continue
# next/step
# list/thread list/bt/frame variable/p <expr>
# gdb (on linux/unix) command usages: [you can type help]
# dir <path-to-src>
# break main.c:10
# run/continue
# next/step
# list/info threads/bt/info locals/p <expr>
| true
|
fc5fdd4dacf268f6eb50711b64ae03788ad0aef0
|
Shell
|
psteinb/deeprace
|
/scripts/resnet/titanx-pascal-short/resnet50.sh
|
UTF-8
| 1,887
| 2.515625
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
cd /home/steinba/development/deeprace/scripts/titanx-pascal-short
export CUDA_VISIBLE_DEVICES=1
module load singularity/2.4.2
for r in `seq 1 10`;
do
echo
echo $i
echo
for i in 32 64 128 256 512;
do
echo "$0 batch_size=$i run ${r}/10"
singularity exec -B /home/steinbac/development/deeprace/:/deeprace --nv /projects/steinbac/software/singularity/sandbox//tf1.3-plus.simg python3 /deeprace/deeprace.py train -O batch_size=${i} -c "titanx-pascal:1,fs:nfs,singularity:lustre" -t /deeprace/scripts/titanx-pascal-short/resnet56v1-short-bs${i}-singularity-${r}.tsv -e 10 resnet56v1
singularity exec -B /home/steinbac/development/deeprace/:/deeprace --nv /projects/steinbac/software/singularity/sandbox//tf1.7-plus.simg python3 /deeprace/deeprace.py train -O batch_size=${i} -b tf -c "titanx-pascal:1,fs:nfs,singularity:lustre" -t /deeprace/scripts/titanx-pascal-short/resnet56v1-short-bs${i}-singularity-${r}.tsv -e 10 resnet56v1
done
done
export CUDA_VISIBLE_DEVICES=0,1
for r in `seq 1 10`;
do
echo
echo $i
echo
for i in 32 64 128 256 512;
do
echo "$0 batch_size=$i run ${r}/10"
singularity exec -B /home/steinbac/development/deeprace/:/deeprace --nv /projects/steinbac/software/singularity/sandbox//tf1.3-plus.simg python3 /deeprace/deeprace.py train -O batch_size=${i},n_gpus=2 -c "titanx-pascal:2,fs:nfs,singularity:lustre" -t /deeprace/scripts/titanx-pascal-short/resnet56v1-short-bs${i}-2gpus-singularity-${r}.tsv -e 10 resnet56v1
singularity exec -B /home/steinbac/development/deeprace/:/deeprace --nv /projects/steinbac/software/singularity/sandbox//tf1.7-plus.simg python3 /deeprace/deeprace.py train -O batch_size=${i},n_gpus=2 -b tf -c "titanx-pascal:2,fs:nfs,singularity:lustre" -t /deeprace/scripts/titanx-pascal-short/resnet56v1-short-bs${i}-2gpus-singularity-${r}.tsv -e 10 resnet56v1
done
done
| true
|
5e708c71a7bf3163d1ba9f6a4fd37686399cb0e3
|
Shell
|
sensu-plugins/sensu-plugins-postgres
|
/test/fixtures/bootstrap.sh
|
UTF-8
| 869
| 3.171875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#
# Set up a super simple web server and make it accept GET and POST requests
# for Sensu plugin testing.
#
set -e
# base utilities that need to exist to start bootatraping
apt-get update
apt-get install -y wget
# setup the rubies
source /etc/profile
DATA_DIR=/tmp/kitchen/data
RUBY_HOME=${MY_RUBY_HOME}
# setup a .pgpass file
cat << EOF > /tmp/.pgpass
# This is a comment
*:5432:*:postgres:<REDACTED>
EOF
# Start bootatraping
## install some required deps for pg_gem to install
apt-get install -y libpq-dev build-essential
# setup postgres server
## TODO: multiple postgres versions and replication the versions should probably
## be matrixed but wanted to start as small as possible initially.
# End of Actual bootatrap
# Install gems
cd $DATA_DIR
SIGN_GEM=false gem build sensu-plugins-postgres.gemspec
gem install sensu-plugins-postgres-*.gem
| true
|
27db01de66fbc23c91633d73e020d7bc1eaad9cc
|
Shell
|
skmpro27/linux-content
|
/array/2ndLargest.sh
|
UTF-8
| 782
| 3.703125
| 4
|
[] |
no_license
|
#! /bin/bash -x
declare -a numbers
i=0
while [ $i -lt 10 ]
do
numbers[$i]=$(( RANDOM%900+100 ))
i=$(( i+1 ))
done
echo ${numbers[@]}
i=1
min1=${numbers[0]}
max1=${numbers[0]}
while [ $i -lt ${#numbers[@]} ]
do
if [ ${numbers[$i]} -gt $max1 ]
then
max1=${numbers[$i]}
elif [ ${numbers[$i]} -lt $min1 ]
then
min1=${numbers[$i]}
fi
i=$(( i+1 ))
done
i=0
max2=0
min2=1000
while [ $i -lt ${#numbers[@]} ]
do
if [ ${numbers[$i]} -gt $max2 -a ${numbers[$i]} -lt $max1 ]
then
max2=${numbers[$i]}
elif [ ${numbers[$i]} -lt $min2 -a ${numbers[$i]} -gt $min1 ]
then
min2=${numbers[$i]}
fi
i=$(( i+1 ))
done
echo ""
echo "1st largest number is" $max1
echo "1st smallest number is" $min1
echo ""
echo "2nd largest number is" $max2
echo "2nd smallest number is" $min2
| true
|
77a009d8b94a7a6feafd609842231df02ce9e6c8
|
Shell
|
yijia2413/docker-bigdata-cluster
|
/scripts/download_tars.sh
|
UTF-8
| 1,241
| 3.046875
| 3
|
[
"Unlicense"
] |
permissive
|
#!/bin/bash
set -ex
CUR_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )"
TARS_DIR="${CUR_DIR}/tars"
mkdir -p ${TARS_DIR}
HADOOP_VERSION=3.1.1
HADOOP_URL="https://dist.apache.org/repos/dist/release/hadoop/common/hadoop-${HADOOP_VERSION}/hadoop-${HADOOP_VERSION}.tar.gz"
wget ${HADOOP_URL} -O ${TARS_DIR}/$(basename ${HADOOP_URL})
HIVE_VERSION=2.3.4
HIVE_URL="http://mirror.bit.edu.cn/apache/hive/hive-${HIVE_VERSION}/apache-hive-${HIVE_VERSION}-bin.tar.gz"
wget ${HIVE_URL} -O ${TARS_DIR}/$(basename ${HIVE_URL})
KAFKA_VERSION=2.1.0
SCALA_VERSION=2.12
KAFKA_URL="http://mirrors.tuna.tsinghua.edu.cn/apache/kafka/${KAFKA_VERSION}/kafka_${SCALA_VERSION}-${KAFKA_VERSION}.tgz"
wget ${KAFKA_URL} -O ${TARS_DIR}/$(basename ${KAFKA_URL})
SPARK_VERSION=2.4.0
PREBUILD_HADOOP_VERSION=2.7
SPARK_URL="http://mirror.bit.edu.cn/apache/spark/spark-${SPARK_VERSION}/spark-${SPARK_VERSION}-bin-hadoop${PREBUILD_HADOOP_VERSION}.tgz"
wget ${SPARK_URL} -O ${TARS_DIR}/$(basename ${SPARK_URL})
# move to build inside docker
mv ${TARS_DIR}/$(basename ${HADOOP_URL}) ../hadoop/base
mv ${TARS_DIR}/$(basename ${HIVE_URL}) ../hive/base
mv ${TARS_DIR}/$(basename ${KAFKA_URL}) ../kafka
mv ${TARS_DIR}/$(basename ${SPARK_URL}) ../spark/base
| true
|
f3c74dc5a038b77ba93ec3e2e122d11270ceea1a
|
Shell
|
vraj20/Vraj-Shah
|
/121063_lab5/lab5_3.sh
|
UTF-8
| 68
| 2.75
| 3
|
[] |
no_license
|
i=1
temp=$1
while [ "$i" <= "$2" ]
do
echo $1
i=`expr $i +1 `
done
| true
|
b9e258a78d9b494ebc85a1997ff27b56c485f694
|
Shell
|
wchsh908/myEm
|
/tools/obsolete/bounce/bounce_analysis.sh
|
UTF-8
| 7,453
| 3.046875
| 3
|
[] |
no_license
|
#!/bin/sh
# 本程序用于从输入的退信邮箱帐号中读取新邮件并作分析处理。功能包括:
# 1.查出无效的邮箱地址,结果放在user_not_found目录下各个文件中。
#分析一封邮件.参数:邮件文件全名;返回:无
read163()
{
#网易
if grep -q -m1 "550 User not found" $1
then
recipient=$(grep -Eo -m1 "([a-zA-Z0-9_-.+]+)@(vip.163.com|163.com|126.com|yeah.net)" $1)
echo "User not found: $recipient. "
echo $recipient >> $basedir/user_not_found/163
fi
}
readqq()
{
#腾讯
if grep -q -m1 "550 Mailbox not found" $1
then
recipient=$(grep -Eo -m1 "([a-zA-Z0-9_-.+]+)@(vip.qq.com|qq.com|foxmail.com)" $1)
echo "User not found: $recipient. "
echo $recipient >> $basedir/user_not_found/qq
fi
}
readsina()
{
#新浪
if grep -q -m1 "User unknown in virtual mailbox table" $1
then
recipient=$(grep -Eo -m1 "([a-zA-Z0-9_-.+]+)@(vip.sina.com|sina.com|sina.com.cn)" $1)
echo "User not found: $recipient. "
echo $recipient >> $basedir/user_not_found/sina
fi
}
readhotmail()
{
#微软
if grep -q -m1 "mailbox unavailable" $1
then
recipient=$(grep -Eo -m1 "([a-zA-Z0-9_-.+]+)@(hotmail.com|live.cn|msn.com)" $1)
echo "User not found: $recipient. "
echo $recipient >> $basedir/user_not_found/hotmail
fi
}
readyahoo()
{
#雅虎
if grep -q -m1 "user doesn't have a * account" $1
then
recipient=$(grep -Eo -m1 "([a-zA-Z0-9_-.+]+)@(yahoo.com.cn|yahoo.cn|yahoo.com|yahoo.com.hk|yahoo.com.tw)" $1)
echo "User not found: $recipient. "
echo $recipient >> $basedir/user_not_found/yahoo
fi
}
readsohu()
{
#搜狐
if grep -q -m1 "User unknown in local recipient table" $1
then
recipient=$(grep -Eo -m1 "([a-zA-Z0-9_-.+]+)@(sohu.com)" $1)
echo "User not found: $recipient. "
echo $recipient >> $basedir/user_not_found/sohu
fi
}
readgmail()
{
#gmail
if grep -q -m1 "550-5.1.1" $1
then
recipient=$(grep -Eo -m1 "([a-zA-Z0-9_-.+]+)@(gmail.com)" $1)
echo "User not found: $recipient. "
echo $recipient >> $basedir/user_not_found/gmail
fi
}
readchinatelecom()
{
#中国电信
if grep -q -m1 "550 no such user" $1
then
recipient=$(grep -Eo -m1 "([a-zA-Z0-9_-.+]+)@(21cn.com|189.cn)" $1)
echo "User not found: $recipient. "
echo $recipient >> $basedir/user_not_found/chinatelecom
fi
}
readtom()
{
#tom和163.net
if grep -q -m1 "554 Invalid recipient" $1
then
recipient=$(grep -Eo -m1 "([a-zA-Z0-9_-.+]+)@(tom.com|163.net)" $1)
echo "User not found: $recipient. "
echo $recipient >> $basedir/user_not_found/tom
fi
}
read139()
{
#中国移动139
if grep -q -m1 "User not found" $1
then
recipient=$(grep -Eo -m1 "([a-zA-Z0-9_-.+]+)@(139.com)" $1)
echo "User not found: $recipient. "
echo $recipient >> $basedir/user_not_found/139
fi
}
##################################################################################################################
# 分析一个邮箱帐号下的所有新邮件.参数:合法的邮箱帐号名;返回:无.
readaccount()
{
acct=$1
bcuser=${acct%@*}
bcdomain=${acct#*@}
if [ -d "/home/vmail/$bcdomain/$bcuser/Maildir" ]
then
getaccounttime $acct # 获取我们上次处理这个邮箱帐号的时间,保存在变量$lasttime里
bouncedirs="/home/vmail/$bcdomain/$bcuser/Maildir/cur /home/vmail/$bcdomain/$bcuser/Maildir/new"
for bcdir in $bouncedirs
do
if [ -d $bcdir ]
then
for bcfile in $(ls $bcdir) #取出每封邮件
do
mailfile=$bcdir/$bcfile
getfiletime $mailfile #获取邮箱文件的修改时间,结果保存在$filetime里
#如果该邮件的修改时间晚于我们上次处理这个邮箱帐号的时间 被认为是新邮件,才会分析处理
if (( $filetime > $lasttime ))
then
if grep -Eqo -m1 "([a-zA-Z0-9_-.+]+)@(vip.163.com|163.com|126.com|yeah.net)" $mailfile
then
read163 $mailfile #网易系列
elif grep -Eqo -m1 "([a-zA-Z0-9_-.+]+)@(vip.qq.com|qq.com|foxmail.com)" $mailfile
then
readqq $mailfile #腾讯系列
elif grep -Eqo -m1 "([a-zA-Z0-9_-.+]+)@(vip.sina.com|sina.com|sina.com.cn)" $mailfile
then
readsina $mailfile #新浪系列
elif grep -Eqo -m1 "([a-zA-Z0-9_-.+]+)@(hotmail.com|live.cn|msn.com)" $mailfile
then
readhotmail $mailfile #hotmail系列
elif grep -Eqo -m1 "([a-zA-Z0-9_-.+]+)@(yahoo.com.cn|yahoo.cn|yahoo.com|yahoo.com.hk|yahoo.com.tw)" $mailfile
then
readyahoo $mailfile #雅虎系列
elif grep -Eqo -m1 "([a-zA-Z0-9_-.+]+)@(sohu.com)" $mailfile
then
readsohu $mailfile #搜狐
elif grep -Eqo -m1 "([a-zA-Z0-9_-.+]+)@(gmail.com)" $mailfile
then
readgmail $mailfile #谷歌
elif grep -Eqo -m1 "([a-zA-Z0-9_-.+]+)@(21cn.com|189.cn)" $mailfile
then
readchinatelecom $mailfile #中国电信系列
elif grep -Eqo -m1 "([a-zA-Z0-9_-.+]+)@(tom.com|163.net)" $mailfile
then
readtom $mailfile #tom和163.net
elif grep -Eqo -m1 "([a-zA-Z0-9_-.+]+)@(139.com)" $mailfile
then
read139 $mailfile #中国移动139
fi
fi
done
fi
done
setaccounttime $acct
else
echo "Error.can not get emails from $acct."
fi
}
# 获取一个文件的修改时间.参数:文件名;返回:全局变量filetime.
getfiletime()
{
#文件的修改时间,原本格式是2012-09-05 20:25:50.000000000 +0800这样
filetime=$(stat -c%y $1)
#时间处理一下,去掉- 空格 : 和秒后面的字符串
filetime=${filetime//-/}
filetime=${filetime// /}
filetime=${filetime//:/}
filetime=${filetime//.*/} #最终成为20120905202550这样的格式。处理成这种格式是为了好比较早晚
}
# 记录我们这次对一个邮箱帐号分析的时间.参数:邮箱帐号;返回:无
setaccounttime()
{
curtime=$(date "+%Y%m%d%H%M%S") #格式是20120905202550
sed '/'"$1"'/d' $basedir/lasttime.txt > tmp #删除旧标记
echo "$1:$curtime" >> tmp #追加新标记
mv -f tmp $basedir/lasttime.txt
}
# 获取一个邮箱帐号的上次处理它的时间.参数:邮箱帐号;返回:全局变量lasttime
getaccounttime()
{
if grep -Eqo -m1 "$1:[0-9]+" $basedir/lasttime.txt
then
str=$(grep -Eo -m1 "$1:[0-9]+" $basedir/lasttime.txt)
lasttime=${str#*:} #剔除前半段(帐号)
else
lasttime=0
fi
}
##############################################################################################################
#程序入口
##############################################################################################################
#全局变量
basedir=/var/www/html/tools/bounce
filetime=0
lasttime=0
i=1
#打开bash的开关,使shell支持5个扩展的模式匹配操作符
shopt -s extglob
#处理输入的参数
if (( $# != 0 ))
then
#输入的是邮箱帐号
bc_accounts=$@
else
#没有输入参数,从bounce_accounts.txt这个文档中读出退信邮箱帐号
if [ -f $basedir/bounce_accounts.txt ]
then
bc_accounts=$(cat $basedir/bounce_accounts.txt)
else
echo "bounce_accounts.txt not found.Quit."
exit 1
fi
fi
for account in $bc_accounts
do
#判断是不是一个合法的email邮箱帐号
if [[ $account != +([a-zA-Z0-9_-.+])@+([a-zA-Z0-9_-.+]).[a-zA-Z][a-zA-Z]?([a-zA-Z])?([a-zA-Z])?([a-zA-Z]) ]]
then
echo "$i. Error!$account is not an email account."
else
echo "$i. Reading and analysing new emails of $account."
readaccount $account
fi
done
exit 0
| true
|
e4985503d4aa6d83fbbdc1a0670713a5dd781593
|
Shell
|
panpanse/systeme_fichiers_distribues
|
/benchmark/readingBigFile.sh
|
UTF-8
| 191
| 3.015625
| 3
|
[] |
no_license
|
#!/bin/bash
whereToWrite=$1
nameOfMachine=`uname -n`
cd "$whereToWrite/$nameOfMachine"
# lecture des gros fichiers
cat bigFile1 > /dev/nul
cat bigFile2 > /dev/nul
cat bigFile3 > /dev/nul
| true
|
56e65dc935a81c7809d77c9b877645c480240fe6
|
Shell
|
richard512/opentraveldata
|
/tools/prepare_ref_dump_file.sh
|
UTF-8
| 9,004
| 4.03125
| 4
|
[] |
no_license
|
#!/bin/bash
#
# One parameter is optional for this script:
# - the file-path of the dump file extracted from the reference data.
#
##
# MacOS 'date' vs GNU date
DATE_TOOL=date
if [ -f /usr/bin/sw_vers ]
then
DATE_TOOL=gdate
fi
displayRefDetails() {
##
# Snapshot date
SNAPSHOT_DATE=`$DATE_TOOL "+%Y%m%d"`
SNAPSHOT_DATE_HUMAN=`$DATE_TOOL`
echo
echo "####### Note #######"
echo "# The data dump from reference data can be obtained from this project"
echo "# (http://<gitorious/bitbucket>/dataanalysis/dataanalysis.git). For instance:"
echo "DAREF=~/dev/dataanalysis/dataanalysisgit/data_generation"
echo "mkdir -p ~/dev/dataanalysis"
echo "cd ~/dev/dataanalysis"
echo "git clone git://<gitorious/bitbucket>/dataanalysis/dataanalysis.git dataanalysisgit"
echo "cd \${DAREF}/REF"
echo "# The following script fetches a SQLite file, holding reference data,"
echo "# and translates it into three MySQL-compatible SQL files:"
echo "./fetch_sqlite_ref.sh # it may take several minutes"
echo "# It produces three create_*_ref_*${SNAPSHOT_DATE}.sql files, which are then"
echo "# used by the following script, in order to load the reference data into MySQL:"
echo "./create_ref_user.sh"
echo "./create_ref_db.sh"
echo "./create_all_tables.sh ref ref_ref ${SNAPSHOT_DATE} localhost"
if [ "${TMP_DIR}" = "/tmp/por/" ]
then
echo "mkdir -p ${TMP_DIR}"
fi
echo "cd ${MYCURDIR}"
echo "# The POR database table has then to be exported into a CSV file."
echo "\${DAREF}/por/extract_ref_por.sh ref ref_ref localhost"
echo "\cp -f ${TMP_DIR}por_all_ref_${SNAPSHOT_DATE}.csv ${TMP_DIR}dump_from_ref_city.csv"
echo "\cp -f ${OPTDDIR}/opentraveldata/optd_por_best_known_so_far.csv ${TMP_DIR}"
echo "\cp -f ${OPTDDIR}/opentraveldata/ref_airport_pageranked.csv ${TMP_DIR}"
echo "\cp -f ${OPTDDIR}/opentraveldata/optd_por.csv ${TMP_DIR}optd_airports.csv"
echo "\${DAREF}/update_airports_csv_after_getting_ref_city_dump.sh"
echo "ls -l ${TMP_DIR}"
echo "#####################"
echo
}
##
# Input file names
AIR_REF_FILENAME=dump_from_ref_airline.csv
GEO_REF_FILENAME=dump_from_ref_city.csv
GEO_OPTD_FILENAME=optd_por_best_known_so_far.csv
##
# Temporary path
TMP_DIR="/tmp/por"
MYCURDIR=`pwd`
##
# Path of the executable: set it to empty when this is the current directory.
EXEC_PATH=`dirname $0`
# Trick to get the actual full-path
EXEC_FULL_PATH=`pushd ${EXEC_PATH}`
EXEC_FULL_PATH=`echo ${EXEC_FULL_PATH} | cut -d' ' -f1`
EXEC_FULL_PATH=`echo ${EXEC_FULL_PATH} | sed -e 's|~|'${HOME}'|'`
#
CURRENT_DIR=`pwd`
if [ ${CURRENT_DIR} -ef ${EXEC_PATH} ]
then
EXEC_PATH="."
TMP_DIR="."
fi
# If the reference data file is in the current directory, then the current
# directory is certainly intended to be the temporary directory.
if [ -f ${GEO_REF_FILENAME} ]
then
TMP_DIR="."
fi
EXEC_PATH="${EXEC_PATH}/"
TMP_DIR="${TMP_DIR}/"
if [ ! -d ${TMP_DIR} -o ! -w ${TMP_DIR} ]
then
\mkdir -p ${TMP_DIR}
fi
##
# Sanity check: that (executable) script should be located in the tools/
# sub-directory of the OpenTravelData project Git clone
EXEC_DIR_NAME=`basename ${EXEC_FULL_PATH}`
if [ "${EXEC_DIR_NAME}" != "tools" ]
then
echo
echo "[$0:$LINENO] Inconsistency error: this script ($0) should be located in the refdata/tools/ sub-directory of the OpenTravelData project Git clone, but apparently is not. EXEC_FULL_PATH=\"${EXEC_FULL_PATH}\""
echo
exit -1
fi
##
# OpenTravelData directory
OPTD_DIR=`dirname ${EXEC_FULL_PATH}`
OPTD_DIR="${OPTD_DIR}/"
##
# OPTD sub-directory
DATA_DIR=${OPTD_DIR}opentraveldata/
TOOLS_DIR=${OPTD_DIR}tools/
REF_DIR=${TOOLS_DIR}
##
# Log level
LOG_LEVEL=4
##
# Input files
AIR_REF_FILE=${TOOLS_DIR}${AIR_REF_FILENAME}
GEO_REF_FILE=${TOOLS_DIR}${GEO_REF_FILENAME}
GEO_OPTD_FILE=${DATA_DIR}${GEO_OPTD_FILENAME}
##
# Reference data
AIR_REF_CAP_FILENAME=cap_${AIR_REF_FILENAME}
GEO_REF_CAP_FILENAME=cap_${GEO_REF_FILENAME}
GEO_REF_WPK_FILENAME=wpk_${GEO_REF_FILENAME}
SORTED_GEO_REF_WPK_FILENAME=sorted_${GEO_REF_WPK_FILENAME}
SORTED_CUT_GEO_REF_WPK_FILENAME=cut_${SORTED_GEO_REF_WPK_FILENAME}
#
AIR_REF_CAP_FILE=${TMP_DIR}${AIR_REF_CAP_FILENAME}
GEO_REF_CAP_FILE=${TMP_DIR}${GEO_REF_CAP_FILENAME}
GEO_REF_WPK_FILE=${TMP_DIR}${GEO_REF_WPK_FILENAME}
SORTED_GEO_REF_WPK_FILE=${TMP_DIR}${SORTED_GEO_REF_WPK_FILENAME}
SORTED_CUT_GEO_REF_WPK_FILE=${TMP_DIR}${SORTED_CUT_GEO_REF_WPK_FILENAME}
##
# Cleaning
if [ "$1" = "--clean" ]
then
if [ "${TMP_DIR}" = "/tmp/por" ]
then
\rm -rf ${TMP_DIR}
else
\rm -f ${SORTED_GEO_REF_WPK_FILE} ${SORTED_CUT_GEO_REF_WPK_FILE}
\rm -f ${AIR_REF_CAP_FILE} ${GEO_REF_CAP_FILE} ${GEO_REF_WPK_FILE}
fi
exit
fi
##
#
if [ "$1" = "-h" -o "$1" = "--help" ]
then
echo
echo "Usage: $0 [<refdata directory of the OpenTravelData project Git clone> [<Reference data directory for data dump files> [<log level>]]]"
echo " - Default refdata directory for the OpenTravelData project Git clone: '${OPTD_DIR}'"
echo " - Default path for the OPTD-maintained file of best known coordinates: '${GEO_OPTD_FILE}'"
echo " - Default path for the reference data files: '${REF_DIR}'"
echo " + 'Airlines: ${AIR_REF_FILE}'"
echo " + 'Airports/cities: ${GEO_REF_FILE}'"
echo " - Default log level: ${LOG_LEVEL}"
echo " + 0: No log; 1: Critical; 2: Error; 3; Notification; 4: Debug; 5: Verbose"
echo " - Generated files:"
echo " + '${AIR_REF_CAP_FILE}'"
echo " + '${GEO_REF_CAP_FILE}'"
echo " + '${GEO_REF_WPK_FILE}'"
echo " + '${SORTED_GEO_REF_WPK_FILE}'"
echo " + '${SORTED_CUT_GEO_REF_WPK_FILE}'"
echo
exit
fi
#
if [ "$1" = "-r" -o "$1" = "--ref" ]
then
displayRefDetails
exit
fi
##
# The OpenTravelData opentraveldata/ sub-directory contains, among other things,
# the OPTD-maintained list of POR file with geographical coordinates.
if [ "$1" != "" ]
then
if [ ! -d $1 ]
then
echo
echo "[$0:$LINENO] The first parameter ('$1') should point to the refdata/ sub-directory of the OpenTravelData project Git clone. It is not accessible here."
echo
exit -1
fi
OPTD_DIR_DIR=`dirname $1`
OPTD_DIR_BASE=`basename $1`
OPTD_DIR="${OPTD_DIR_DIR}/${OPTD_DIR_BASE}/"
DATA_DIR=${OPTD_DIR}opentraveldata/
TOOLS_DIR=${OPTD_DIR}tools/
REF_DIR=${TOOLS_DIR}
GEO_OPTD_FILE=${DATA_DIR}${GEO_OPTD_FILENAME}
fi
if [ ! -f "${GEO_OPTD_FILE}" ]
then
echo
echo "[$0:$LINENO] The '${GEO_OPTD_FILE}' file does not exist."
echo
exit -1
fi
##
# Reference data file with geographical coordinates
if [ "$2" != "" ]
then
REF_DIR="$2"
AIR_REF_FILE=${REF_DIR}${AIR_REF_FILENAME}
GEO_REF_FILE=${REF_DIR}${GEO_REF_FILENAME}
if [ "${GEO_REF_FILE}" = "${GEO_REF_FILENAME}" ]
then
GEO_REF_FILE="${TMP_DIR}${GEO_REF_FILE}"
fi
fi
AIR_REF_CAP_FILE=${TMP_DIR}${AIR_REF_CAP_FILENAME}
GEO_REF_CAP_FILE=${TMP_DIR}${GEO_REF_CAP_FILENAME}
GEO_REF_WPK_FILE=${TMP_DIR}${GEO_REF_WPK_FILENAME}
SORTED_GEO_REF_WPK_FILE=${TMP_DIR}${SORTED_GEO_REF_WPK_FILENAME}
SORTED_CUT_GEO_REF_WPK_FILE=${TMP_DIR}${SORTED_CUT_GEO_REF_WPK_FILENAME}
if [ ! -f "${GEO_REF_FILE}" ]
then
echo
echo "[$0:$LINENO] The '${GEO_REF_FILE}' file does not exist."
echo
if [ "$2" = "" ]
then
displayRefDetails
fi
exit -1
fi
if [ ! -f "${AIR_REF_FILE}" ]
then
echo
echo "[$0:$LINENO] The '${AIR_REF_FILE}' file does not exist."
echo
if [ "$2" = "" ]
then
displayRefDetails
fi
fi
##
# Log level
if [ "$3" != "" ]
then
LOG_LEVEL="$3"
fi
##
# Capitalise the names of the airline dump file, if existing
REF_CAPITILISER=ref_capitalise.awk
if [ -f "${AIR_REF_FILE}" ]
then
awk -F'^' -v log_level=${LOG_LEVEL} -f ${REF_CAPITILISER} ${AIR_REF_FILE} \
> ${AIR_REF_CAP_FILE}
fi
##
# Capitalise the names of the geographical dump file
awk -F'^' -v log_level=${LOG_LEVEL} -f ${REF_CAPITILISER} ${GEO_REF_FILE} \
> ${GEO_REF_CAP_FILE}
##
# Generate a second version of the geographical file with the OPTD primary key
# (integrating the location type)
OPTD_PK_ADDER=${TOOLS_DIR}ref_pk_creator.awk
awk -F'^' -v log_level=${LOG_LEVEL} -f ${OPTD_PK_ADDER} \
${GEO_OPTD_FILE} ${GEO_REF_CAP_FILE} > ${GEO_REF_WPK_FILE}
#sort -t'^' -k1,1 ${GEO_REF_WPK_FILE}
##
# Remove the header (first line) of the geographical file
GEO_REF_WPK_FILE_TMP=${GEO_REF_WPK_FILE}.tmp
sed -e "s/^pk\(.\+\)//g" ${GEO_REF_WPK_FILE} > ${GEO_REF_WPK_FILE_TMP}
sed -i -e "/^$/d" ${GEO_REF_WPK_FILE_TMP}
##
# That version of the REF geographical dump file (without primary key)
# is sorted according to the IATA code.
sort -t'^' -k 1,1 ${GEO_REF_WPK_FILE_TMP} > ${SORTED_GEO_REF_WPK_FILE}
\rm -f ${GEO_REF_WPK_FILE_TMP}
##
# Only four columns/fields are kept in that version of the geographical file:
# the primary key, airport/city IATA code and the geographical coordinates
# (latitude, longitude).
cut -d'^' -f 1,2,16,17 ${SORTED_GEO_REF_WPK_FILE} \
> ${SORTED_CUT_GEO_REF_WPK_FILE}
##
# Reporting
echo
echo "Preparation step"
echo "----------------"
echo "The '${AIR_REF_CAP_FILE}', '${GEO_REF_CAP_FILE}', '${GEO_REF_WPK_FILE}', '${SORTED_GEO_REF_WPK_FILE}' and '${SORTED_CUT_GEO_REF_WPK_FILE}' files have been derived from '${GEO_REF_FILE}'."
echo
| true
|
8dfdc66e966c70afee842586ba60219a90b65351
|
Shell
|
rom1504/rbot
|
/grammar/checkEverything.sh
|
UTF-8
| 95
| 2.640625
| 3
|
[
"MIT"
] |
permissive
|
for i in `ls test/*.test | cut -f1 -d'.' | cut -f2 -d'/'`;
do
echo $i :
./check.sh $i ;
done
| true
|
bb5a48f6739c258c81c73aa7dcd2ee26a8fd2c6e
|
Shell
|
ElectricDinoLab/Efield_Pipeline
|
/tarfolders.sh
|
UTF-8
| 155
| 2.5625
| 3
|
[] |
no_license
|
#!/bin/bash
#tar -zcvf archive-name.tar.gz directory-name
for i in *
do
echo tar -zcvf "${i}.tar.gz" "${i}"
tar -zcvf "${i}.tar.gz" "${i}"
done
| true
|
867453434807a493aa8d6d9f336d71d46d8ef1f7
|
Shell
|
Li-Wentao/QC_workflow
|
/workflow
|
UTF-8
| 7,207
| 2.890625
| 3
|
[] |
no_license
|
#!/bin/bash
### QC ###
### Step 1: check missingness ###
plink --bfile $1 --missing #check for missing data
plink --bfile $1 --geno 0.02 --make-bed --out data_out #setting the threshold 0.02, can be changed accordingly
plink --bfile $1 --mind 0.02 --make-bed --out data_out #setting the threshold 0.02, can be changed accordingly
### Step 2: check sex-discrepancy ###
plink --bfile data_out --check-sex #check for sex discrepancies
grep "PROBLEM" plink.sexcheck | awk '{print$1,$2}' > sex_discrepancy.txt #locate the problem data
plink --bfile data_out --remove sex_discrepancy.txt --make-bed --out data_out #remove the problem data
### Step 3: check MAF ###
awk '{ if ($1 >= 1 && $1 <= 22) print $2 }' data_out.bim > snp.txt #select autosomal SNPs only
plink --bfile data_out --extract snp.txt --make-bed --out data_out
plink --bfile data_out --freq --out MAF_check #check MAF
plink --bfile data_out --maf 0.05 --make-bed --out data_out #remove snps with low MAF
### Step 4: check Hardy-Weinberg equilibrium (HWE) ###
plink --bfile data_out --Hardy #check Hardy-Weinberg equilibrium
plink --bfile data_out --hwe 1e-6 --make-bed --out HWE_controls #HWE threshold for controls
plink --bfile data_out --hwe 1e-10 --hwe-all --make-bed --out HWE_case #HWE threshold for case
### Step 5: check heterozygosity ###
plink --bfile HWE_case --indep-pairwise 50 5 0.2 --out indep_snp #find out the independent snps
plink --bfile HWE_case --extract indep_snp.prune.in --het --out R_check
Rscript --no-save check_heterozygosity_rate.R #plot the hist-gram of heterozygosity rate
Rscript --no-save heterozygosity_outliers_list.R #find out the high and low heterozygosity rate
sed 's/"// g' fail-het-qc.txt | awk '{print$1, $2}' > het_fail_ind.txt
plink --bfile HWE_case --remove het_fail_ind.txt --make-bed --out data_out
### Step 6: check relatedness ###
plink --bfile data_out --extract indep_snp.prune.in --genome --min 0.2 --out pihat_min0.2
awk '{ if ($8 >0.9) print $0 }' pihat_min0.2.genome > zoom_pihat.genome
plink --bfile data_out --filter-founders --make-bed --out data_out #remove data from relatedness
### Population Stratification ###
# Check whether you have 1000 Genome data to render Population Stratification
if [ -e ALL.2of4intersection.20100804.genotypes.vcf.gz ]
then
echo "You have already downloaded the 1000 Genome data"
else
echo "You haven't already downloaded the 1000 Genome data, and start donwloading"
wget ftp://ftp-trace.ncbi.nih.gov/1000genomes/ftp/release/20100804/ALL.2of4intersection.20100804.genotypes.vcf.gz
fi
# unzip the vcf file into binary files
plink --vcf ALL.2of4intersection.20100804.genotypes.vcf.gz --make-bed --out ALL.2of4intersection.20100804.genotypes
### QC for 1000 Genome data ###
plink --bfile ALL.2of4intersection.20100804.genotypes --set-missing-var-ids @:#\$1,\$2 --make-bed --out ALL.2of4intersection.20100804.genotypes_no_missing_IDs
plink --bfile ALL.2of4intersection.20100804.genotypes_no_missing_IDs --geno 0.2 --allow-no-sex --make-bed --out 1KG_MDS
plink --bfile 1KG_MDS --mind 0.2 --allow-no-sex --make-bed --out 1KG_MDS
plink --bfile 1KG_MDS --maf 0.05 --allow-no-sex --make-bed --out 1KG_MDS
### Extract the variants present in HapMap dataset from the 1000 genomes dataset ###
awk '{print$2}' data_out.bim > HapMap_SNPs.txt
plink --bfile 1KG_MDS --extract HapMap_SNPs.txt --make-bed --out 1KG_MDS
### Extract the variants present in 1000 Genomes dataset from the HapMap dataset ###
awk '{print$2}' 1KG_MDS.bim > 1KG_MDS_SNPs.txt
plink --bfile data_out --extract 1KG_MDS_SNPs.txt --recode --make-bed --out HapMap_MDS
### The datasets must have the same build. Change the build 1000 Genomes data build ###
awk '{print$2,$4}' HapMap_MDS.map > buildhapmap.txt
plink --bfile 1KG_MDS --update-map buildhapmap.txt --make-bed --out 1KG_MDS
### Merge the HapMap and 1000 Genomes data sets ###
# 1) set reference genome
awk '{print$2,$4}' 1kG_MDS.bim > 1kg_ref-list.txt
plink --bfile HapMap_MDS --reference-allele 1kg_ref-list.txt --make-bed --out HapMap-adj
# 2) Resolve strand issues
awk '{print$2,$5,$6}' 1KG_MDS.bim > 1KGMDS_tmp
awk '{print$2,$5,$6}' HapMap-adj.bim > HapMap-adj_tmp
sort 1KGMDS_tmp HapMap-adj_tmp |uniq -u > all_differences.txt
awk '{print$1}' all_differences.txt | sort -u > flip_list.txt
plink --bfile HapMap-adj --flip flip_list.txt --reference-allele 1kg_ref-list.txt --make-bed --out corrected_hapmap
awk '{print$2,$5,$6}' corrected_hapmap.bim > corrected_hapmap_tmp
sort 1kGMDS_tmp corrected_hapmap_tmp |uniq -u > uncorresponding_SNPs.txt
# 3) Remove problematic SNPs from HapMap and 1000 Genomes
awk '{print$1}' uncorresponding_SNPs.txt | sort -u > SNPs_for_exlusion.txt
plink --bfile corrected_hapmap --exclude SNPs_for_exlusion.txt --make-bed --out HapMap_MDS
plink --bfile 1KG_MDS --exclude SNPs_for_exlusion.txt --make-bed --out 1KG_MDS
plink --bfile HapMap_MDS --bmerge 1kG_MDS.bed 1KG_MDS.bim 1kG_MDS.fam --allow-no-sex --make-bed --out MDS_merge
plink --bfile MDS_merge --extract indep_snp.prune.in --genome --out MDS_merge
plink --bfile MDS_merge --read-genome MDS_merge.genome --cluster --mds-plot 10 --out MDS_merge
### Exclude ethnic outliers ###
wget ftp://ftp.1000genomes.ebi.ac.uk/vol1/ftp/release/20100804/20100804.ALL.panel
# Convert population codes into superpopulation codes
awk '{print$1,$1,$2}' 20100804.ALL.panel > race_1kG.txt
sed 's/JPT/ASN/g' race_1kG.txt>race_1kG2.txt
sed 's/ASW/AFR/g' race_1kG2.txt>race_1kG3.txt
sed 's/CEU/EUR/g' race_1kG3.txt>race_1kG4.txt
sed 's/CHB/ASN/g' race_1kG4.txt>race_1kG5.txt
sed 's/CHD/ASN/g' race_1kG5.txt>race_1kG6.txt
sed 's/YRI/AFR/g' race_1kG6.txt>race_1kG7.txt
sed 's/LWK/AFR/g' race_1kG7.txt>race_1kG8.txt
sed 's/TSI/EUR/g' race_1kG8.txt>race_1kG9.txt
sed 's/MXL/AMR/g' race_1kG9.txt>race_1kG10.txt
sed 's/GBR/EUR/g' race_1kG10.txt>race_1kG11.txt
sed 's/FIN/EUR/g' race_1kG11.txt>race_1kG12.txt
sed 's/CHS/ASN/g' race_1kG12.txt>race_1kG13.txt
sed 's/PUR/AMR/g' race_1kG13.txt>race_1kG14.txt
awk '{print$1,$2,"OWN"}' HapMap_MDS.fam>racefile_own.txt
cat race_1kG14.txt racefile_own.txt | gsed -e '1i\FID IID race' > racefile.txt
Rscript MDS_merged.R
legend("topright", pch=c(1,1,1,1,3),c("EUR","ASN","AMR","AFR","OWN"),col=c("green","red",470,"blue","black"),bty="o",cex=1)'
### Exclude ethnic outliers ###
awk '{ if ($4 <-0.03 && $5 >0.03) print $1,$2 }' MDS_merge.mds > EUR_MDS_merge
plink --bfile data_out --keep EUR_MDS_merge --make-bed --out HapMap_out
plink --bfile HapMap_out --extract indep_snp.prune.in --genome --out HapMap_out
plink --bfile HapMap_out --read-genome HapMap_out.genome --cluster --mds-plot 10 --out HapMap_out_mds
awk '{print$1, $2, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13}' HapMap_out_mds.mds > covar_mds.txt
### Association analysis ###
# chi-squared test
plink --bfile HapMap_out --assoc --out assoc_result
# logistic
plink --bfile HapMap_out --covar covar_mds.txt --logistic --hide-covar --out logistic_result
awk '!/'NA'/' logistic_result.assoc.logistic > logistic_result2.assoc.logistic
| true
|
5cbc27f25fc7a0350a1c9b9ad625002e9456d00a
|
Shell
|
ni1ight/istrobot-stopwatch-ui
|
/scripts/install_unity.sh
|
UTF-8
| 492
| 2.5625
| 3
|
[] |
no_license
|
#!/bin/bash
INSTALL_DIR='/usr/local/bin/'
DESKTOP_DIR=`realpath ~/.local/share/applications/`
INSTALL_ICON=`realpath ~/.local/share/icons/`
cp StopWatch ${INSTALL_DIR}
cp ./../icons/StopWatch.png ${INSTALL_ICON}
echo "
[Desktop Entry]
Encoding=UTF-8
Version=1.0
Type=Application
Name=StopWatch
Icon=StopWatch.png
Path='${INSTALL_DIR}'
Exec='${INSTALL_DIR}/StopWatch'
StartupNotify=false
StartupWMClass=StopWatch
OnlyShowIn=Unity;
X-UnityGenerated=true
" > ${DESKTOP_DIR}/stopwatch.desktop
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.