blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 4 115 | path stringlengths 2 970 | src_encoding stringclasses 28 values | length_bytes int64 31 5.38M | score float64 2.52 5.28 | int_score int64 3 5 | detected_licenses listlengths 0 161 | license_type stringclasses 2 values | text stringlengths 31 5.39M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
b47b3761fdb553b2bc706f3f4cccbbbc6844578b | Shell | goalgorilla/open_social_scripts | /trigger_dockerhub.sh | UTF-8 | 460 | 2.625 | 3 | [] | no_license | #!/usr/bin/env bash
# This script is used in Travis in order to trigger an automated build in the DockerHub.
# Key is secure so it can not be used in forks. Currently we only trigger on the 8.x-1.x branch.
if [[ $TRAVIS_BRANCH == "8.x-4.x" ]] && [[ $TRAVIS_PULL_REQUEST == "false" ]]
then
curl -H "Content-Type: application/json" --data '{"build": true}' -X POST https://registry.hub.docker.com/u/goalgorilla/open_social_docker/trigger/$DOCKERHUB_TOKEN/
fi
| true |
c315bc72e67e2ea8070b27e87f319172e1e4910e | Shell | ofirnash/CloudComputingParkingLot | /deployScript.sh | UTF-8 | 2,302 | 3.515625 | 4 | [] | no_license | KEY_NAME="cloud-course-`date +'%N'`"
KEY_PEM="$KEY_NAME.pem"
echo "create key pair $KEY_PEM to connect to instances and save locally"
aws ec2 create-key-pair --key-name $KEY_NAME \
| jq -r ".KeyMaterial" > $KEY_PEM
# secure the key pair
chmod 400 $KEY_PEM
SEC_GRP="my-sg-`date +'%N'`"
echo "setup firewall $SEC_GRP"
aws ec2 create-security-group \
--group-name $SEC_GRP \
--description "Access my instances"
# figure out my ip
MY_IP=$(curl ipinfo.io/ip)
echo "My IP: $MY_IP"
echo "setup rule allowing SSH access to $MY_IP only"
aws ec2 authorize-security-group-ingress \
--group-name $SEC_GRP --port 22 --protocol tcp \
--cidr $MY_IP/32
echo "setup rule allowing SSH access to $MY_IP only for NodeJS"
aws ec2 authorize-security-group-ingress \
--group-name $SEC_GRP --port 8080 --protocol tcp \
--cidr $MY_IP/32
echo "setup rule allowing SSH access to $MY_IP only for Redis"
aws ec2 authorize-security-group-ingress \
--group-name $SEC_GRP --port 6379 --protocol tcp \
--cidr $MY_IP/32
UBUNTU_20_04_AMI="ami-042e8287309f5df03"
echo "Creating Ubuntu 20.04 instance..."
RUN_INSTANCES=$(aws ec2 run-instances \
--image-id $UBUNTU_20_04_AMI \
--instance-type t3.micro \
--key-name $KEY_NAME \
--security-groups $SEC_GRP)
INSTANCE_ID=$(echo $RUN_INSTANCES | jq -r '.Instances[0].InstanceId')
echo "Waiting for instance creation..."
aws ec2 wait instance-running --instance-ids $INSTANCE_ID
PUBLIC_IP=$(aws ec2 describe-instances --instance-ids $INSTANCE_ID |
jq -r '.Reservations[0].Instances[0].PublicIpAddress'
)
echo "New instance $INSTANCE_ID @ $PUBLIC_IP"
echo "setup production environment"
ssh -i $KEY_PEM -o "StrictHostKeyChecking=no" -o "ConnectionAttempts=10" ubuntu@$PUBLIC_IP <<EOF
sudo apt update
sudo apt install curl -y
curl -fsSL https://deb.nodesource.com/setup_16.x | sudo -E bash -
sudo apt install nodejs -y
sudo apt install redis -y
sudo apt install git -y
git clone https://github.com/ofirnash/CloudComputingParkingLot.git
cd CloudComputingParkingLot
redis-server &
npm install
npm start
exit
EOF
echo "test that it all worked"
curl --retry-connrefused --retry 10 --retry-delay 1 http://$PUBLIC_IP:8080 | true |
8a95b7146d3daaebfaf852d91ce49e802817d64c | Shell | suspectpart/neo4j-learning | /run.sh | UTF-8 | 545 | 2.640625 | 3 | [] | no_license | #!/bin/sh
echo "Stop running instances..."
docker kill neo4j > /dev/null 2>&1
docker rm neo4j > /dev/null 2>&1
echo "Start neo4j..."
docker run -d \
--name neo4j \
--publish=7474:7474 \
--publish=7687:7687 \
--volume=$(pwd)/conf:/conf \
--volume=$(pwd)/plugins:/plugins \
--volume=$(pwd)/logs:/logs \
--volume=$(pwd)/import:/var/lib/neo4j/import \
neo4j:3.3.2
echo "Wait a while to make sure it is started..."
sleep 5
echo "Prepopulate data..."
docker exec -i neo4j bin/cypher-shell < ./data.cql
echo "Done."
| true |
173679440f1652cc15cb53d9d3f60bed39a440d5 | Shell | jasonmorganson/zsh | /opts.zsh | UTF-8 | 3,677 | 3.3125 | 3 | [] | no_license | # ===== Basics
setopt no_beep # don't beep on error
setopt interactive_comments # Allow comments even in interactive shells (especially for Muness)
# ===== Changing Directories
setopt auto_cd # If you type foo, and it isn't a command, and it is a directory in your cdpath, go there
setopt cdablevarS # if argument to cd is the name of a parameter whose value is a valid directory, it will become the current directory
setopt pushd_ignore_dups # don't push multiple copies of the same directory onto the directory stack
# ===== Expansion and Globbing
setopt extended_glob # treat #, ~, and ^ as part of patterns for filename generation
# ===== History
setopt append_history # Allow multiple terminal sessions to all append to one zsh command history
setopt extended_history # save timestamp of command and duration
setopt inc_append_history # Add comamnds as they are typed, don't wait until shell exit
setopt hist_expire_dups_first # when trimming history, lose oldest duplicates first
setopt hist_ignore_dups # Do not write events to history that are duplicates of previous events
setopt hist_ignore_all_dups # Remove older duplicate entries from history
setopt hist_ignore_space # remove command line from history list when first character on the line is a space
setopt hist_find_no_dups # When searching history don't display results already cycled through twice
setopt hist_reduce_blanks # Remove extra blanks from each command line being added to history
setopt hist_verify # don't execute, just expand history
setopt share_history # imports new commands and appends typed commands to history
# ===== Completion
setopt always_to_end # When completing from the middle of a word, move the cursor to the end of the word
setopt auto_menu # show completion menu on successive tab press. needs unsetop menu_complete to work
setopt auto_name_dirs # any parameter that is set to the absolute name of a directory immediately becomes a name for that directory
setopt complete_in_word # Allow completion from within a word/phrase
unsetopt menu_complete # do not autoselect the first completion entry
# ===== Correction
unsetopt correct_all # spelling correction for arguments
setopt correct # spelling correction for commands
# ===== Prompt
setopt prompt_subst # Enable parameter expansion, command substitution, and arithmetic expansion in the prompt
# setopt transient_rprompt # only show the rprompt on the current prompt
# ===== Scripts and Functions
setopt multios # perform implicit tees or cats when multiple redirections are attempted
# why would you type 'cd dir' if you could just type 'dir'?
setopt AUTO_CD
# Now we can pipe to multiple outputs!
setopt MULTIOS
# This makes cd=pushd
setopt AUTO_PUSHD
# This will use named dirs when possible
setopt AUTO_NAME_DIRS
# If we have a glob this will expand it
setopt GLOB_COMPLETE
setopt PUSHD_MINUS
# No more annoying pushd messages...
# setopt PUSHD_SILENT
# blank pushd goes to home
setopt PUSHD_TO_HOME
# this will ignore multiple directories for the stack. Useful? I dunno.
setopt PUSHD_IGNORE_DUPS
# 10 second wait if you do something that will delete everything. I wish I'd had this before...
setopt RM_STAR_WAIT
setopt NO_HUP
setopt IGNORE_EOF
# If I could disable Ctrl-s completely I would!
setopt NO_FLOW_CONTROL
# Keep echo "station" > station from clobbering station
# setopt NO_CLOBBER
# Case insensitive globbing
setopt NO_CASE_GLOB
# Be Reasonable!
setopt NUMERIC_GLOB_SORT
# I don't know why I never set this before.
setopt EXTENDED_GLOB
# hows about arrays be awesome? (that is, frew${cool}frew has frew surrounding all the variables, not just first and last
setopt RC_EXPAND_PARAM
| true |
7fa32a3dfff7f8c20dbd109802df5154af4feac1 | Shell | bylam/QIIME2_16SV4_Pipeline | /renaming_seq_names.sh | UTF-8 | 485 | 3.03125 | 3 | [] | no_license | #!/bin/bash
## MC69 Sequences have '_' which plugs up Deblur in QIIME2
## This script will reduce down the '_' thats in the names
# Created: 8/15/2019
# Last Edited: 8/16/2019
# Hinako Terauchi
######################################################################3
# mv ${name} ${name:0:5}${name:6};
for name in $(ls *.fastq.gz); do
echo $name
a=$(echo ${name%_L001*} | tr -d \_)
mv ${name} $a${name:(-21)}
done
### trying to put the replaced first part into a variable
| true |
e4dc84947a96c25ceb4a6d3ed39922fb3eb7e53c | Shell | Pinoinha/dotfiles | /bin/temp | UTF-8 | 164 | 3.046875 | 3 | [] | no_license | #!/bin/env bash
# prints temperature
temp() {
temp="$(sensors | grep -i package | awk '{print $4}' | sed -e 's/+//g')"
printf "%s %dºC" "" "$temp"
}
temp
| true |
b140e2cad7314aeb37a968c6ffa64cdf0e780cf9 | Shell | s3688102/assignment2 | /performance_monitor_script | UTF-8 | 488 | 3.15625 | 3 | [] | no_license | #!/bin/bash
trap "exit" SIGUSR1
BEGIN_TIME=$(date "+%s")
while true
do
TEMP=$(vcgencmd measure_temp)
TEMP=${TEMP#temp=}
TEMP=${TEMP%\'C}
CLOCK_ARM=$(vcgencmd measure_clock arm)
CLOCK_ARM=${CLOCK_ARM#frequency(*)=}
VOLT=$(vcgencmd measure_volts)
VOLT=${VOLT#volt=}
VOLT=${VOLT%V}
NOW_TIME=$(date "+%s")
let USED_TIME=NOW_TIME-BEGIN_TIME
printf "%s\t%s\t%s\t%s\n" $USED_TIME $TEMP $CLOCK_ARM $VOLT &>> kernel_performance_data
sleep 5s
done
| true |
b67088029b06c497ceff096800dd4a48e0c2f6bf | Shell | hpoul/codeux.design | /_tools/build_and_deploy.sh | UTF-8 | 633 | 2.625 | 3 | [] | no_license | #!/bin/bash
set -xeu
dir="${0%/*}"
basedir="$dir/.."
cd $basedir
DC2F_ENV=production ./dc2f.sh build
#time find ./public -type f -name '*.png' -o -name '*.jpg' | xargs -P 1 -I {} sh -c 'echo $1 ; cwebp -m 6 -af -short -mt -q 75 $1 -o "${1%.*}.webp"' _ {} \;
#
#cat _tools/_htaccess_append >> public/.htaccess
./_tools/_deploy_web_sphene_net.sh
rsync --progress -a --delete public/ docker-host.tapo.at:dev/web.poul.at/data/sites/newpage.codeux.design/
#echo WARNING WARNING
#echo WARNING WARNING
#echo WARNING WARNING
echo
echo "purge cloudflare cachedisabled"
./_tools/purge_cloudflare_cache.sh
./_tools/gh-pages-deploy.sh
| true |
e6c0edf4de8e664314def403b407eb6abc320723 | Shell | ludekvesely/proxy | /run.sh | UTF-8 | 500 | 2.796875 | 3 | [] | no_license | #!/usr/bin/env bash
set -o nounset
set -o errexit
set -o pipefail
# make sure it doesn't daemonize
grep -v "daemon off" /etc/nginx/nginx.conf > /tmp/nginx.tmp && mv /tmp/nginx.tmp /etc/nginx/nginx.conf
echo "daemon off;" >> /etc/nginx/nginx.conf
# replace variables at runtime
sed -i "s/UPSTREAM_PORT/${UPSTREAM_PORT}/g" /etc/nginx/conf.d/proxy.conf
sed -i "s/UPSTREAM_ADDRESS/${UPSTREAM_ADDRESS}/g" /etc/nginx/conf.d/proxy.conf
cat /etc/nginx/conf.d/proxy.conf
# start the thing
/usr/sbin/nginx
| true |
def1b7185fc8423ee3ae1883e37f2d8e89d9dfe8 | Shell | miroso/tooling | /installer/install_opencv_4_pi.sh | UTF-8 | 6,682 | 3.53125 | 4 | [
"MIT"
] | permissive | ###################################################################################################
# Step #0: Setup
###################################################################################################
# Path to OpenCV4 download directory
cv_download_dir=/home/pi/opencv_install
# Path to OpenCV4 install directory
# For system wide installation of OpenCV, change cv_install_dir to /usr/local
cv_install_dir=/home/pi/workspace/packages/opencv4
# Path, where you want to create your python virtual environment
py3_virtualenv_dir=/home/pi/virtualenv
# Name of your python3 virtual environment
virtualenv_name=env_cv4
# Current directory
curr_dir=`pwd`
##### OPTIONAL #####
# Path to your list with pip packages
# If a package list is not available, leave empy --> packages=""
packages=/home/pi/workspace/requirements/env_cv4
###################################################################################################
# Step #1: Expand filesystem on your Raspberry Pi
###################################################################################################
#
# sudo raspi-config
# advanced options
# expand filesystem
# sudo reboot
###################################################################################################
# Step #2: Install OpenCV 4 dependencies on your Raspberry Pi
###################################################################################################
echo -e "\n\033[91mStep #2: Install OpenCV 4 dependencies on your Raspberry Pi\033[0m"
# System update
echo -e "\n\033[91m... System update\033[0m"
sudo apt -y update
sudo apt -y upgrade
# Install developer tools:
echo -e "\n\033[91m... Install developer tools\033[0m"
sudo apt -y install build-essential cmake ninja unzip pkg-config
# Install image and video libraries:
echo -e "\n\033[91m... Install image and video libraries\033[0m"
sudo apt -y install libjpeg-dev libpng-dev libtiff-dev
sudo apt -y install libavcodec-dev libavformat-dev libswscale-dev libv4l-dev
sudo apt -y install libxvidcore-dev libx264-dev
# install GTK, our GUI backend
echo -e "\n\033[91m... Install GTK, our GUI backend\033[0m"
sudo apt -y install libgtk-3-dev
# Install a package which may reduce pesky GTK warnings
# the asterisk will grab the ARM specific GTK
echo -e "\n\033[91m... Install a package which may reduce pesky GTK warnings\033[0m"
sudo apt -y install libcanberra-gtk*
# Install numerical optimizations for OpenCV
echo -e "\n\033[91m... Install numerical optimizations for OpenCV\033[0m"
sudo apt -y install libatlas-base-dev gfortran
# install the Python 3 development headers
echo -e "\n\033[91m... Install the Python 3 development headers\033[0m"
sudo apt -y install python3-dev
###################################################################################################
# Step #3: Download OpenCV 4 for your Raspberry Pi
###################################################################################################
echo -e "\n\033[91mDownload OpenCV 4 for your Raspberry Pi\033[0m"
if [ ! -d $cv_download_dir ]; then
mkdir -p $cv_download_dir
fi
cd $cv_download_dir
# Change version if necessary
wget -O opencv.zip https://github.com/opencv/opencv/archive/4.0.1.zip
wget -O opencv_contrib.zip https://github.com/opencv/opencv_contrib/archive/4.0.1.zip
unzip opencv.zip
unzip opencv_contrib.zip
mv opencv-4.0.1 opencv
mv opencv_contrib-4.0.1 opencv_contrib
rm opencv.zip
rm opencv_contrib.zip
###################################################################################################
# Step #4: Configure your Python 3 virtual environment for OpenCV 4
###################################################################################################
echo -e "\n\033[91mConfigure your Python 3 virtual environment for OpenCV 4\033[0m"
# Create python 3 virtual environment
virtualenv_path=$py3_virtualenv_dir/$virtualenv_name
virtualenv -p python3 $virtualenv_path
source $virtualenv_path/bin/activate
# Install python 3 packages
if [ -z $packages ]; then
pip install numpy
else
pip install -r $packages
fi
###################################################################################################
# Step #5: CMake and compile OpenCV 4 for your Raspberry Pi
###################################################################################################
echo -e "\n\033[91mStep #5: CMake and compile OpenCV 4 for your Raspberry Pi\033[0m"
cd opencv
if [ ! -d "build" ]; then
mkdir build
fi
cd build
# Run cmake for OpenCV4
cmake -D CMAKE_BUILD_TYPE=RELEASE \
-D CMAKE_INSTALL_PREFIX=$cv_install_dir \
-D OPENCV_EXTRA_MODULES_PATH=$cv_download_dir/opencv_contrib/modules \
-D ENABLE_NEON=ON \
-D ENABLE_VFPV3=ON \
-D BUILD_TESTS=OFF \
-D OPENCV_ENABLE_NONFREE=ON \
-D INSTALL_PYTHON_EXAMPLES=OFF \
-D INSTALL_C_EXAMPLES=OFF \
-D PYTHON_EXECUTABLE=$virtualenv_path/bin/python \
-D BUILD_EXAMPLES=OFF ..
echo -e "\n\033[91mIncrease the SWAP on the Raspberry Pi\033[0m"
# Increasing the SWAP size will enable to compile OpenCV with all four cores of the Raspberry Pi.
# without the install hanging due to memory exhausting.
sudo cp /etc/dphys-swapfile /etc/dphys-swapfile_copy
sudo rm /etc/dphys-swapfile
echo "CONF_SWAPSIZE=2048" > dphys-swapfile
sudo mv dphys-swapfile /etc/
sudo /etc/init.d/dphys-swapfile stop
sudo /etc/init.d/dphys-swapfile start
# Compile OpenCV4
make -j4
sudo make install
sudo ldconfig
echo -e "\n\033[91mDecrease the SWAP on the Raspberry Pi\033[0m"
sudo rm /etc/dphys-swapfile
sudo mv /etc/dphys-swapfile_copy /etc/dphys-swapfile
sudo /etc/init.d/dphys-swapfile stop
sudo /etc/init.d/dphys-swapfile start
###################################################################################################
Step #6: Link OpenCV 4 into your Python 3 virtual environment
###################################################################################################
echo -e "\n\033[91mStep #5: Link OpenCV 4 into your Python 3 virtual environment\033[0m"
# Rename python bidings
py3_bindings_path=`find $cv_install_dir -name "cv2.cpython-*-arm-linux-gnueabihf.so"`
dir_name=`dirname $py3_bindings_path`
file_name=`basename $py3_bindings_path`
sudo mv $dir_name/$file_name $dir_name/cv2.so
# Link the bindings to your virtual environment
cd `find $py3_virtualenv_dir/$virtualenv_name -name "site-packages"`
ln -s $dir_name/cv2.so cv2.so
###################################################################################################
echo -e "\n\033[91mDone\033[0m"
cd $curr_dir
# to make the file executable, run
# sudo chmod +x install_opencv_4_pi.sh
| true |
6394aacefdaa2ee8d1cc806bb697986693855d87 | Shell | chrispap95/deadCellRegression | /regressionScripts/prepareCondor.sh | UTF-8 | 393 | 3.234375 | 3 | [] | no_license | #!/bin/sh
USERBASE=`pwd`
rm ${CMSSW_VERSION}.tgz
cd ../../../../
echo -n "Creating tarball..."
tar --exclude="*.root" --exclude="*.nfs*" --exclude=${CMSSW_BASE}/src/ResolutionAnalyzer --exclude-vcs -zcf ${CMSSW_VERSION}.tgz ${CMSSW_VERSION}
mv ${CMSSW_VERSION}.tgz ${USERBASE}
cd $USERBASE
if [ ! -f ${CMSSW_VERSION}.tgz ]; then
echo -e "\nError: tarball doesn't exist!"
else
echo " Done!"
fi
| true |
4341da7c67ffb012dcba13aaeac99fba8d1ac86a | Shell | Shump/.dotfiles | /.bash_profile | UTF-8 | 1,413 | 3.234375 | 3 | [] | no_license |
if [[ `uname` == 'Linux' ]]; then
# ARCH:
# ~/.bashrc
#
# If not running interactively, don't do anything
[[ $- != *i* ]] && return
eval "`dircolors`"
alias ls='ls --color=auto'
PS1='[\u@\h \W]\$ '
alias reboot='sudo reboot'
alias powerdown='sudo shutdown -h 0'
alias grep='grep --color=auto'
fi
if [[ `uname` == 'Darwin' ]]; then
eval "`gdircolors`"
alias ls='gls -h --color'
alias vim='mvim -v'
alias gvim='mvim'
fi
alias ll='ls -lah'
alias la='ls -ah'
function mk
{
command mkdir $1 && cd $1
}
function copy
{
for name in $*
do
cp $name "$name".copy
done
}
function realpath
{
dirname=`perl -e 'use Cwd "abs_path";print abs_path(shift)' $0`
echo $dirname
}
#screenshot - takes a screenshot of your current window
screenshot ()
{
import -frame -strip -quality 85 "$HOME/screenshots/screen_$(date +%Y%m%d_%H%M%S).png"
}
if [[ `uname` == 'Darwin' ]]; then
# homebrew bash completion
source `brew --prefix`/Library/Contributions/brew_bash_completion.sh
[[ -s `brew --prefix`/etc/autojump.sh ]] && . `brew --prefix`/etc/autojump.sh
if [ -f $(brew --prefix)/etc/bash_completion ]; then
. $(brew --prefix)/etc/bash_completion
fi
fi
function de
{
mv $1 ~/.Trash
}
export PATH=$HOME/Library/Haskell/bin:$PATH
export PATH=/Users/julian/Documents/skola/proglang/bin:$PATH
export CC=/usr/bin/clang
export CXX=/usr/bin/clang++
setenv GL_ENABLE_DEBUG_ATTACH YES
| true |
806cff3fc7d6c407d6c6b3be9c78e1e8047c9f17 | Shell | giomarc/SABD-2nd-Project | /docker-compose/metrics/save-metrics.sh | UTF-8 | 1,124 | 3.1875 | 3 | [] | no_license | #!/bin/bash
function cleanstring() {
toFind='[{"id":"0.numRecordsOutPerSecond","value":"'
replaceWith=""
firstStep="${1/$toFind/$replaceWith}"
toFind='"}]'
secondStep="${firstStep/$toFind/$replaceWith}"
echo $secondStep;
}
JOB_ID="126a3b53cd7f38bbdb06d42aead2652d" #REMEMBER THAT IT CHANGES AT EACH CREATION OF THE PLAN
SOURCE_THR_VTX_ID="cbc357ccb763df2852fee8c4fc7d55f2" # SOURCE OPERATOR
SINK_THR_VTX_ID="f1c4789f15c75d0d32f107a9595d229d" # SINK OPERATOR
FLINK_THR_IN_METRIC_URL="http://localhost:8081/jobs/$JOB_ID/vertices/$SOURCE_THR_VTX_ID/metrics?get=0.numRecordsOutPerSecond"
FLINK_THR_OUT_METRIC_URL="http://localhost:8081/jobs/$JOB_ID/vertices/$SINK_THR_VTX_ID/metrics?get=0.numRecordsInPerSecond"
FILE_OUTPUT="metrics_$(date +'%s').txt";
echo $FLINK_THR_IN_METRIC_URL;
echo $FLINK_THR_OUT_METRIC_URL;
while true
do
THR_IN=$(curl $FLINK_THR_IN_METRIC_URL);
THR_OUT=$(curl $FLINK_THR_OUT_METRIC_URL);
#THR_IN=cleanstring $THR_IN;
#THR_OUT=cleanstring $THR_OUT;
DATETIME=$(date);
echo $THR_IN";"$THR_OUT";"$DATETIME>>$FILE_OUTPUT;
sleep 10
done
| true |
9af9a5633b47272ff3784ab397ec868762973a5f | Shell | yeloer/socblox | /units_ve/axi4_a23/axi4_a23_svf/tests/interconnects/axi4_2x2_uvm/sim/scripts/build.sh | UTF-8 | 923 | 2.625 | 3 | [] | no_license | #!/bin/sh
#****************************************************************************
#* build.sh
#****************************************************************************
try()
{
$*
status=$?
if test $status -ne 0; then
exit $status
fi
}
if test "x$SIM_DIR" = "x"; then
SIM_DIR=`pwd`
fi
if test "x$BUILD_DIR" = "x"; then
BUILD_DIR=`pwd`
fi
export PROJECT_LOC=`dirname $SIM_DIR`
export SOCBLOX=`cd $SIM_DIR/../../../../ ; pwd`
VLOG_FLAGS="-sv"
vlib work
# Build design and core testbench first
try vlog $VLOG_FLAGS -f ${SIM_DIR}/scripts/rtl_env_types.f
# Now, compile the bench (including the inFact sequences)
try vlog $VLOG_FLAGS \
-f ${SIM_DIR}/scripts/seqs_tb_tests.f
# try vopt ethmac_tb eth_dut_binds -o ethmac_tb_opt +cover+/ethmac_tb/dut.
try vopt -debugdb axi4_2x2_uvm_tb -o axi4_2x2_uvm_tb_opt -time
try vopt +acc -debugdb axi4_2x2_uvm_tb -o axi4_2x2_uvm_tb_opt_dbg
exit 0
| true |
e2b861e2c6e3c98f52ca78ac68ed1a8d852fd36c | Shell | chaadow/dotfiles | /bin/travis-encrypt | UTF-8 | 1,468 | 4.125 | 4 | [
"MIT"
] | permissive | #!/bin/bash
if [[ $# < 2 ]]; then
p="$(basename $0)"
here=$(mktemp)
git remote -v 2>/dev/null | grep -oP '(?<=github.com[:/])([^/]+/[^/]+?)(?=\.git| )' > "$here"
IFS=/ read user repo < "$here"
else
user="$1"
repo="$2"
shift 2
fi
if [[ -z "$user" || -z "$repo" ]]; then
echo "usage: $p [user] [repository] [value to encrypt]"
echo
echo " e.g.: $p 'P@ssw0rd' (only inside a repo with a github remote)"
echo " or $p ${user:-jsmith} ${repo:-MyRepo} 'VAR=\"s3cret\"'"
echo " or $p ${user:-jsmith} ${repo:-MyRepo} 'P@ssw0rd'"
exit 1
fi >&2
value="$1"
# Fetch key
keyurl="https://api.travis-ci.org/repos/$user/$repo/key"
echo "Fetching key from $keyurl ..." >&2
keyfile=$(mktemp)
curl -s "$keyurl" > "$keyfile" || {
echo "Couldn't fetch key from $keyurl!" >&2
exit 1
}
# (Exceptionally poor)-man's JSON-to-PEM
# Some Travis-CI pubkeys have " RSA PUBLIC KEY", where others have the standard " PUBLIC KEY".
sed -i 's|\\n|\n|g; s|"|\n|g; s/ RSA PUBLIC KEY/ PUBLIC KEY/g' "$keyfile"
grep -q "BEGIN PUBLIC KEY" "$keyfile" || {
echo "Key file from $keyurl seems malformed: $keyfile" >&2
exit 1
}
if [[ -z "$value" ]]; then
read -p "Value to encrypt? " value
fi
echo "Encrypting with openssl rsautl ..." >&2
set -o pipefail
echo -n "$value" | openssl rsautl -encrypt -inkey "$keyfile" -pubin -pkcs | base64 -w0 || {
echo "Error in openssl rsautl." >&2
exit 1
}
echo $'\nSuccess.' >&2
| true |
e81b0083a1655f628dbd4d62b3e4a0e28856bb67 | Shell | rubmary/cfr | /solve.sh | UTF-8 | 542 | 3.765625 | 4 | [] | no_license | #!/bin/bash
function solve_game() {
echo "Running cfr"
./targets/cfr $@
echo "Running gebr"
./targets/gebr $@
}
function make_folders() {
echo "Making dirs"
mkdir -p graphics/$1
mkdir -p regret/$2
mkdir -p results/$2
}
GAME=$1
FOLDER=$1
shift 1
FIRST=true
for var in "$@"
do
if "$FIRST" = true; then
FOLDER="${FOLDER}/${var}"
else
FOLDER="${FOLDER}_${var}"
fi
FIRST=false
done
echo "GAME: ${GAME}"
echo "FOLDER: ${FOLDER}"
make_folders $GAME $FOLDER
solve_game $GAME $@
echo "Creating graphics"
python src/graph.py $FOLDER
| true |
0b405aa9b127eaa5a1c2af6bc2b80c874015b175 | Shell | darwinproject/darwin3 | /tools/run_cpl_test | UTF-8 | 13,219 | 3.46875 | 3 | [
"MIT"
] | permissive | #! /usr/bin/env bash
#---
# Description:
# script to compile and run MITgcm Atmosphere-Ocean coupled set-up
# see, e.g., verification/cpl_aim+ocn/README.md
# or verification_other/cpl_gray+ocn/
# Note: currently these coupled set-up are not covered by "testreport"
# 1) running "./run_cpl_test" show the different steps (+ some options) available
# 2) some options are not available as argument and require editing this script, e.g.,
# a) to run Ocean or Atmos component on more that 1 MPI process (edit: NpOc and Npr)
# b) to use compiler Optimisation (edit: GMKopt)
#---
bdPfx='build'; # build-dir prefix
# Npr :: total number of MPI procs (including 1 for coupler)
# NpOc :: number of MPI procs for Ocean component
# Npr - NpOc - 1 :: number of MPI procs for Atmos. component
#- default:
Npr=3 ; NpOc=1 ;
#Npr=25; NpOc=12;
# MTH :: genmake2 option for multi-threading compilation
MTH=
# GMKopt :: other genmake2 option (empty --> use compiler optimisation)
GMKopt='-devel'
#GMKopt='-ieee'
#GMKopt=
rnkO=1 ; rnkA=`expr $rnkO + $NpOc`
MTHo=
MTHa=
#- parse options:
if [ $# -ge 1 ] ; then if test $1 = '-mth' ; then
MTH='-omp' ; shift
if test -f input_ocn/eedata.mth ; then MTHo=$MTH ; fi
if test -f input_atm/eedata.mth ; then MTHa=$MTH ; fi
fi ; fi
sfx=''; chkArg=$# ;
if [ $chkArg -eq 2 ]; then
sfx=$2 ; nInpAlt=`ls -1 -d input_???.$sfx 2> /dev/null | wc -l`
if [ $nInpAlt -eq 0 ]; then chkArg=0
echo " no second set of input-dir matching suffix '.$sfx'"
else chkArg=1 ; fi
fi
if [ $chkArg -ge 1 ]; then
if [ $1 -lt 0 -o $1 -gt 5 ]; then chkArg=0 ; fi
#- allows more argument for building step (step=1)
if [ $1 -eq 1 ]; then chkArg=1 ; fi
fi
if [ $chkArg -ne 1 ]; then
echo 'Usage: '`basename $0`' [typ] step [opt-arg]'
echo ' => test coupled set-up on linux box (1.cpu)'
echo ' typ = -mth : compile and run (if eedata.mth) 2-threads for ocn & atm'
echo ' step = 0 : clean all directories'
echo ' step = 1 : compile the 3 executables (cpl,ocn,atm);'
echo ' opt-arg: -of Optfile_Name : using option-file "Optfile_Name"'
echo ' step = 2 : copy input files and dir(s);'
echo " step = 3 : run with $Npr mpi processes"
echo ' step = 4 : check the results'
echo ' step = 5 : remove output files in rank_0,1,2 dir.'
echo ' opt-arg (for step 2 & 4): suffix of second set of input-dir to use'
exit
fi
kpr=$1
curDir=`pwd`
#============================================================================
if test $kpr = 0 ; then
rm -f pr_group std_outp comp_res.{ocn,atm,land,icTh,icDy,pTr}
rm -f ${bdPfx}_???/TTT.*make.* ${bdPfx}_???/TTT.mkdepend.*
/bin/rm -r -f rank_? rank_1? rank_2?
if test -f ${bdPfx}_cpl/Makefile ; then cd ${bdPfx}_cpl ; make Clean ; cd .. ; fi
if test -f ${bdPfx}_ocn/Makefile ; then cd ${bdPfx}_ocn ; make Clean ; cd .. ; fi
if test -f ${bdPfx}_atm/Makefile ; then cd ${bdPfx}_atm ; make Clean ; cd .. ; fi
fi
if test $kpr = 5 ; then
echo 'remove output files in rank_0,1,2 dir.'
rm -f pr_group std_outp comp_res.{ocn,atm,land,icTh,icDy,pTr}
test -f rank_0/Coupler.0000.clog && rm -f rank_0/Coupler.0000.clog
if test -d rank_$rnkO ; then
( cd rank_$rnkO ; rm -f *.txt *.log STD???.00?? UV-*.00??.clog
mkdir tmp_trash ; mv *.data *.meta tmp_trash
listLNK=`find tmp_trash -type l`
if test "x$listLNK" != x ; then mv $listLNK .
echo -n " move back to rank_$rnkO : " ; echo $listLNK | sed "s|tmp_trash/||g"
fi
/bin/rm -rf tmp_trash )
fi
if test -d rank_$rnkA ; then
( cd rank_$rnkA ; rm -f *.txt *.log STD???.00?? UV-*.00??.clog
mkdir tmp_trash ; mv *.data *.meta tmp_trash
listLNK=`find tmp_trash -type l`
if test "x$listLNK" != x ; then mv $listLNK .
echo -n " move back to rank_$rnkA : " ; echo $listLNK | sed "s|tmp_trash/||g"
fi
/bin/rm -rf tmp_trash )
fi
fi
if test $kpr = 1 ; then
#- choice of the optfile:
# default: take a local one in dir verification with sufix '+mpi'
nbOpF=`ls ../linux_* | grep '+mpi' 2> /dev/null | wc -l`
# or take the one given as argument:
if [ $# -ge 3 ]; then
if test $2 = '-of' -a -f $3 ; then nbOpF=-1 ; OPTFILE=$3 ; fi
fi
if test $nbOpF = 1 ; then
OPTFILE=`ls ../linux_* | grep '+mpi'`
elif [ $nbOpF -ge 2 ] ; then
echo "pick the 1rst of these ( $nbOpF ) optfiles:"
ls ../linux_* | grep '+mpi'
OPTFILE=`ls ../linux_* | grep '+mpi' | head -1`
elif [ $nbOpF -ne -1 ] ; then
echo "Pb in finding optfile: found $nbOpF :"
ls ../linux_* | grep '+mpi' ; exit
fi
zz=`grep '^ *FC=' $OPTFILE | tail -1`
echo " Using optfile: $OPTFILE (compiler: $zz) $MTH"
zz=`echo $OPTFILE | grep -c '^\/'`
if test $zz = 0 ; then OPTFILE="../$OPTFILE" ; fi
#---
echo '==== compile coupler:'
cd ${bdPfx}_cpl
echo ' --- genmake2 (cpl):'
../../../tools/genmake2 -of $OPTFILE -mpi $GMKopt > TTT.genmake.$$ 2>&1
RetVal=$? ; tail -5 TTT.genmake.$$
if test "x$RetVal" != x0 ; then
echo "Error in genmake2 (cpl)" ; exit 11
fi
echo ' --- make depend (cpl):'
make depend > TTT.mkdepend.$$ 2>&1
RetVal=$? ; tail -5 TTT.mkdepend.$$
if test "x$RetVal" != x0 ; then
echo "Error in mkdepend (cpl)" ; exit 12
fi
echo ' --- make (cpl):' ; touch TTT.make.$$
#do_make_syntax.sh obj > TTT.make.$$ 2>&1
make >> TTT.make.$$ 2>&1
RetVal=$? ; tail -10 TTT.make.$$
if test "x$RetVal" != x0 ; then
echo "Error in make (cpl)" ; exit 13
fi
echo ' ' ; cd $curDir
echo '==== compile OGCM:'
cd ${bdPfx}_ocn
echo ' --- genmake2 (ocn):'
../../../tools/genmake2 -of $OPTFILE -mpi $MTHo $GMKopt > TTT.genmake.$$ 2>&1
RetVal=$? ; tail -5 TTT.genmake.$$
if test "x$RetVal" != x0 ; then
echo "Error in genmake2 (ocn)" ; exit 21
fi
echo ' --- make depend (ocn):'
make depend > TTT.mkdepend.$$ 2>&1
RetVal=$? ; tail -10 TTT.mkdepend.$$
if test "x$RetVal" != x0 ; then
echo "Error in mkdepend (ocn)" ; exit 22
fi
echo ' --- make (ocn):' ; touch TTT.make.$$
#do_make_syntax.sh obj > TTT.make.$$ 2>&1
make >> TTT.make.$$ 2>&1
RetVal=$? ; tail -10 TTT.make.$$
if test "x$RetVal" != x0 ; then
echo "Error in make (ocn)" ; exit 23
fi
echo ' ' ; cd $curDir
echo '==== compile AGCM:'
cd ${bdPfx}_atm
echo ' --- genmake2 (atm):'
../../../tools/genmake2 -of $OPTFILE -mpi $MTHa $GMKopt > TTT.genmake.$$ 2>&1
RetVal=$? ; tail -5 TTT.genmake.$$
if test "x$RetVal" != x0 ; then
echo "Error in genmake2 (atm)" ; exit 31
fi
echo ' --- make depend (atm):'
make depend > TTT.mkdepend.$$ 2>&1
RetVal=$? ; tail -10 TTT.mkdepend.$$
if test "x$RetVal" != x0 ; then
echo "Error in mkdepend (atm)" ; exit 32
fi
echo ' --- make (atm):' ; touch TTT.make.$$
#do_make_syntax.sh obj > TTT.make.$$ 2>&1
make >> TTT.make.$$ 2>&1
RetVal=$? ; tail -10 TTT.make.$$
if test "x$RetVal" != x0 ; then
echo "Error in make (atm)" ; exit 33
fi
echo ' ' ; cd $curDir
ls -l ${bdPfx}_???/mitgcmuv
fi
if test $kpr = 2 ; then
echo 'rm dir:' rank_? rank_1? rank_2?
/bin/rm -r -f rank_? rank_1? rank_2?
n=0 ; inpDr='input_cpl';
mkdir rank_$n
( cd rank_$n
if test -d ../$inpDr.$sfx ; then
echo 'Link files from dir:' $inpDr.$sfx '->' rank_$n
ln -s ../$inpDr.$sfx/* .
fi
echo 'Link files from dir:' $inpDr '->' rank_$n
ln -s ../$inpDr/* .
if test -x prepare_run ; then ./prepare_run ; fi
)
n=$rnkO ; inpDr='input_ocn';
mkdir rank_$n
( cd rank_$n
if test -d ../$inpDr.$sfx ; then
echo 'Link files from dir:' $inpDr.$sfx '->' rank_$n
ln -s ../$inpDr.$sfx/* .
fi
echo 'Link files from dir:' $inpDr '->' rank_$n
ln -s ../$inpDr/* .
if test -x prepare_run ; then ./prepare_run ; fi
if test "x$MTHo" != x ; then
echo " MTH run: mv -f eedata.mth eedata"
if test -h eedata ; then rm -f eedata ; fi
mv -f eedata.mth eedata
fi
)
n=`expr $n + 1`
while [ $n -le $NpOc ] ; do
ln -s rank_$rnkO rank_$n
n=`expr $n + 1`
done
n=$rnkA ; inpDr='input_atm';
mkdir rank_$n
( cd rank_$n
if test -d ../$inpDr.$sfx ; then
echo 'Link files from dir:' $inpDr.$sfx '->' rank_$n
ln -s ../$inpDr.$sfx/* .
fi
echo 'Link files from dir:' $inpDr '->' rank_$n
ln -s ../$inpDr/* .
if test -x prepare_run ; then ./prepare_run ; fi
if test "x$MTHa" != x ; then
echo " MTH run: mv -f eedata.mth eedata"
if test -h eedata ; then rm -f eedata ; fi
mv -f eedata.mth eedata
fi
)
n=`expr $n + 1`
while [ $n -lt $Npr ] ; do
ln -s rank_$rnkA rank_$n
n=`expr $n + 1`
done
fi
if test $kpr = 3 ; then
runDir=$curDir
# rm -f rank_?/pickup*.ckptA.00?.00?.??ta
echo $runDir
tmpfil=TTT.$$
#--- running on the same node:
list='' ; nc=0; xx=`hostname`
while [ $nc -lt $Npr ] ; do list="$list $xx" ; nc=`expr $nc + 1` ; done
#-- On darwin cluster node (from qrsh session):
# JOB_ID=`qstat | sed -n '3,$ p' | grep " $USER " | awk '{print $1}'`
# NODEFILE="/tmp/$JOB_ID.1.darwin/machines"
# echo " JOB_ID = '$JOB_ID' ; NODEFILE = '$NODEFILE'"
#-- On ACES cluster (in PBS batch job):
# NODEFILE=$PBS_NODEFILE
#--- running on different nodes:
# ls -l $NODEFILE
# nprc=`cat $NODEFILE | uniq | wc -l`
# if [ $nprc -ge $Npr ] ; then
# list=`cat $NODEFILE | uniq | head -$Npr`
# else
# list=`cat $NODEFILE | head -$Npr`
# fi
nc=0; nn=0; dd1=cpl ;
rm -f pr_group ; touch pr_group
for xx in $list
do
echo $xx $nn $curDir/${bdPfx}_$dd1/mitgcmuv >> pr_group
nc=`expr $nc + 1`
if [ $nc -le $NpOc ] ; then dd1=ocn ; else dd1=atm ; fi
nn=1
done
NpAt=`expr $Npr - 1 - $NpOc`
RunOpt="-np 1 ./${bdPfx}_cpl/mitgcmuv"
RunOpt="$RunOpt : -np $NpOc ./${bdPfx}_ocn/mitgcmuv"
RunOpt="$RunOpt : -np $NpAt ./${bdPfx}_atm/mitgcmuv"
cd $runDir
if test "x$MTH" != x ; then
export OMP_NUM_THREADS=2 ; export KMP_STACKSIZE=400m
if test "x$MTHo" != x ; then
echo -n " run OCN ($MTHo) with $OMP_NUM_THREADS threads ;"
fi
if test "x$MTHa" != x ; then
echo -n " run ATM ($MTHa) with $OMP_NUM_THREADS threads ;"
fi
echo ""
fi
mpich=`which mpirun`
echo $mpich | grep 'mpich-mx' > /dev/null 2>&1
mpichmx=$?
echo $mpich | grep 'mpich-1' > /dev/null 2>&1
mpich1=$?
echo $mpich | grep 'mpich2' > /dev/null 2>&1
mpich2=$?
echo $mpich | grep 'openmpi' > /dev/null 2>&1
opnmpi=$?
if test $mpich1 == 0 ; then
# /usr/local/pkg/mpi/mpi-1.2.4..8a-gm-1.5/pgi/bin/mpirun.ch_gm -pg pr_group -wd $runDir --gm-kill 5 -v ./${bdPfx}_cpl/mitgcmuv > std_outp 2>&1
#- with mpich-1 (on danton, old aces: ao, geo, itrda):
echo "execute 'mpirun -p4pg pr_group -v ./${bdPfx}_cpl/mitgcmuv' :"
mpirun -p4pg pr_group -v ./${bdPfx}_cpl/mitgcmuv > std_outp 2>&1
elif test $mpichmx == 0 ; then
#- with mpich-mx (on beagle):
echo "execute 'mpirun -pg pr_group -v ./${bdPfx}_cpl/mitgcmuv' :"
mpirun -pg pr_group -v ./${bdPfx}_cpl/mitgcmuv > std_outp 2>&1
elif test $mpich2 == 0 -o $opnmpi == 0 ; then
#- with Hydra mpich2 (on baudelaire) or with openmpi:
echo "execute 'mpirun $RunOpt' :"
mpirun $RunOpt > std_outp 2>&1
else
#- new mpich (mpich2) installation often just put in "mpich" dir
echo "execute 'mpirun $RunOpt' :"
mpirun $RunOpt > std_outp 2>&1
fi
tail -20 std_outp
ls -l rank_$rnkO/pickup.ckpt?.*data | tail -1
ls -l rank_$rnkA/pickup.ckpt?.*data | tail -1
fi
if test $kpr = 4 ; then
CompRes="$HOME/bin/comp_res"
if test -x $CompRes ; then
if test "x$sfx" = x ; then rfx='0000' ; else rfx=$sfx ; fi
if test -f rank_$rnkO/STDOUT.0000 ; then
echo '==> check Ocean output:'
$CompRes rank_$rnkO/STDOUT.0000 results/ocnSTDOUT.$rfx
mv -f comp_res.log comp_res.ocn
usePkg=`grep -i '^ *useSEAICE *=' rank_$rnkO/data.pkg | tail -n 1 | grep -i -c '= *\.TRUE\.'`
if [ $usePkg -ge 1 ] ; then
echo '==> check Seaice output:'
$CompRes rank_$rnkO/STDOUT.0000 results/ocnSTDOUT.$rfx S
mv -f comp_res.log comp_res.icDy
fi
usePkg=`grep -i '^ *usePTRACERS *=' rank_$rnkO/data.pkg | tail -n 1 | grep -i -c '= *\.TRUE\.'`
if [ $usePkg -ge 1 ] ; then
echo '==> check pTracers output:'
nTr=`grep -i '^ *PTRACERS_numInUse *=' rank_$rnkO/data.ptracers \
| tail -n 1 | sed 's/^.*=//' | sed 's/,.*$//'`
$CompRes rank_$rnkO/STDOUT.0000 results/ocnSTDOUT.$rfx $nTr
mv -f comp_res.log comp_res.pTr
fi
echo ' '
else echo "No Ocean output file in rank_$rnkO" ; fi
if test -f rank_$rnkA/STDOUT.0000 ; then
echo '==> check Atmos output:'
$CompRes rank_$rnkA/STDOUT.0000 results/atmSTDOUT.$rfx
mv -f comp_res.log comp_res.atm
usePkg=`grep -i '^ *useLand *=' rank_$rnkA/data.pkg | tail -n 1 | grep -i -c '= *\.TRUE\.'`
if [ $usePkg -ge 1 ] ; then
echo '==> check Land output:'
$CompRes rank_$rnkA/STDOUT.0000 results/atmSTDOUT.$rfx L
mv -f comp_res.log comp_res.land
fi
usePkg=`grep -i '^ *useThSIce *=' rank_$rnkA/data.pkg | tail -n 1 | grep -i -c '= *\.TRUE\.'`
if [ $usePkg -ge 1 ] ; then
echo '==> check thSIce output:'
$CompRes rank_$rnkA/STDOUT.0000 results/atmSTDOUT.$rfx I
mv -f comp_res.log comp_res.icTh
fi
echo ' '
else echo "No Atmos output file in rank_$rnkA" ; fi
else
echo "No file '$CompRes' to run ==> skip step: $kpr "
fi
fi
exit 0
| true |
ac3242888f93075d2b1ff6eb6dbb7a11c7815e17 | Shell | TBrade-hub/AAD-Group-Project | /build-customer | UTF-8 | 190 | 2.65625 | 3 | [] | no_license | #!/bin/sh
if which npm > /dev/null 2> /dev/null; then
pushd CustomerSite
npm i
npm run build
popd
else
echo "Failed to find npm, please install nodejs on your system"
fi
| true |
bd75877d97e313df9906a1eeb54201092d09e1f1 | Shell | kkolberg/pypytest | /splitter/run.sh | UTF-8 | 555 | 2.96875 | 3 | [] | no_license | #!/bin/bash
CONFIG_S3=$1
rm -rf './temp'
mkdir './temp'
echo "----- starting virtualenv -----"
sudo -H pip install virtualenv
virtualenv -p /usr/bin/pypy ./pythonScript/.venv
echo "----- finished virtualenv -----"
echo "----- activating virtualenv and install pip-----"
. ./pythonScript/.venv/bin/activate
pip install -r ./pythonScript/requirements.txt
echo "----- activated virtualenv and installed pip -----"
echo "----- starting app.py with config $CONFIG_S3 -----"
pypy ./pythonScript/app.py --config $CONFIG_S3
echo "----- app.py finished -----" | true |
2ccfe8dba52fd4b75b304edacabc8277baaeecd3 | Shell | mono/release | /rpmvercmp/test | UTF-8 | 620 | 3.09375 | 3 | [] | no_license | #!/bin/bash
# Why something had to be used besides ls -v : fails sorting the files mono source tarball versions
# rpmvercmp fixed the below version problem, but introduced some problems in that 1.1.10.1 and 1.1.10-1 were equivalent, thus had to come up with currently in rpmvercmp.c
# This is what fails with ls -v *
versions=(
mono-1.1.9.2.tar.gz
mono-1.1.10.1.tar.gz
mono-1.1.10.tar.gz
hey
mono-1.1.9.tar.gz
mono-1.1.9.1.tar.gz
)
echo ${versions[@]}
./rpmvercmp ${versions[@]}
versions=(
1.1.9.2
1.1.10-1
1.1.10.1
1.1.10
1.1.9
1.1.9.1
)
echo ${versions[@]}
./rpmvercmp ${versions[@]}
| true |
c9ab7ab18349834fffc523f2d3906f16f1075af9 | Shell | brisbane/azure-hpc | /pbs-scripts/mpipingtest-pbs.sh | UTF-8 | 939 | 2.65625 | 3 | [] | no_license | #!/bin/bash -x
# For a SLES 12 SP1 HPC cluster
#source /opt/intel/impi/5.0.3.048/bin64/mpivars.sh
# For a CentOS-based HPC cluster
source /opt/intel/impi/2017.2.174/bin64/mpivars.sh
export I_MPI_FABRICS=shm:dapl
# THIS IS A MANDATORY ENVIRONMENT VARIABLE AND MUST BE SET BEFORE RUNNING ANY JOB
# Setting the variable to shm:dapl gives best performance for some applications
# If your application doesn’t take advantage of shared memory and MPI together, then set only dapl
export I_MPI_DAPL_PROVIDER=ofa-v2-ib0
# THIS IS A MANDATORY ENVIRONMENT VARIABLE AND MUST BE SET BEFORE RUNNING ANY JOB
export I_MPI_DYNAMIC_CONNECTION=0
# THIS IS A MANDATORY ENVIRONMENT VARIABLE AND MUST BE SET BEFORE RUNNING ANY JOB
NP=$(cat $PBS_NODEFILE | wc -l)
# Command line to run the job
mpirun -np $NP -machinefile $PBS_NODEFILE -env I_MPI_FABRICS=dapl -env I_MPI_DAPL_PROVIDER=ofa-v2-ib0 -env I_MPI_DYNAMIC_CONNECTION=0 IMB-MPI1 pingpong
| true |
7d430d2deebd0599c918e010eb1d9726eb7a87a6 | Shell | jaypipes/placement-bench | /all_benchmarks.sh | UTF-8 | 2,266 | 3.171875 | 3 | [] | no_license | #! /usr/bin/env bash
results_file="`date --utc +"%Y-%m-%d"`-results.csv"
rm -f $results_file
echo "Results file: $results_file"
echo "Running benchmarks..."
print_header_row="--results-csv-print-header-row"
for rows in 1 2 4 8 16; do
for schema in placement; do
for placement_strategy in pack spread random random-pack random-spread; do
for filter_strategy in db python; do
for partition_strategy in none modulo; do
echo -n " $schema | $placement_strategy | $filter_strategy | $partition_strategy | $rows rows | 1 worker ... "
# We do a serial run, saving the instance requests
# to an output file that we then read in to perform
# the same scenario tests for the multi-worker variants
python benchmark.py --workers=1 \
--quiet --results-format=csv $print_header_row \
--rows=$rows \
--out-requests-file=requests.yaml \
--schema=$schema \
--filter-strategy=$filter_strategy \
--placement-strategy=$placement_strategy \
--partition-strategy=$partition_strategy \
--results-file=$results_file
echo "OK"
print_header_row=""
for workers in 2 4 8; do
echo -n " $schema | $placement_strategy | $filter_strategy | $partition_strategy | $rows rows | $workers workers ... "
python benchmark.py --workers=$workers \
--quiet --results-format=csv $print_header_row \
--rows=$rows \
--in-requests-file=requests.yaml \
--schema=$schema \
--filter-strategy=$filter_strategy \
--placement-strategy=$placement_strategy \
--partition-strategy=$partition_strategy \
--results-file=$results_file
echo "OK"
done
done
done
done
done
done
| true |
6039d2cea653240157aabfa671f1a0e07ab4acdb | Shell | etrieschman/template-repo | /tools/bootstrap.sh | UTF-8 | 1,011 | 3.625 | 4 | [] | no_license | #!/bin/bash
. $(dirname $0)/script_setup.sh
echo "BOOTSTRAPPING PYTHON ENVIRONMENT"
installed_version=$(pyenv versions | grep "$PYTHON_VER")
# TODO ask Devops about the python SSL missing if python is not reinstalled from scratch
echo "Installing Python version '$PYTHON_VER'..."
pyenv install $PYTHON_VER
[ $? -ne 0 ] && echo "ERROR: Unable to install required python version" && exit 1
virtual_env=$(pyenv versions | grep "$VIRTUAL_PY")
case "$virtual_env" in
*$VIRTUAL_PY*)
echo "Python virtual env '$VIRTUAL_PY' is already created"
;; # the PYTHON_VER is already installed - nothing to do
*)
echo "Python virtual env '$VIRTUAL_PY' is not created - creating now..."
pyenv virtualenv $PYTHON_VER $VIRTUAL_PY
[ $? -ne 0 ] && echo "ERROR: Unable to create python virtual env" && exit 1
;;
esac
echo "Setting local python virtual env"
pyenv local $VIRTUAL_PY
echo "INSTALL DEPENDENCIES"
pip install -r requirements.txt
pip install -r requirements-dev.txt
yarn install
| true |
977c7ef045fa6ae6fd9cd678076435feb5599d50 | Shell | dsedivec/macos_ansible | /roles/software_emacs/files/rebase_org_mode.sh | UTF-8 | 556 | 3.390625 | 3 | [] | no_license | #!/usr/bin/env bash
set -euo pipefail
UPSTREAM_REMOTE=upstream
UPSTREAM_BRANCH=master
UPSTREAM_URL=https://code.orgmode.org/bzg/org-mode.git
if remote_url=$(git config remote."$UPSTREAM_REMOTE".url); then
if [ "$remote_url" != "$UPSTREAM_URL" ]; then
git remote set-url "$UPSTREAM_REMOTE" "$UPSTREAM_URL"
fi
else
git remote add "$UPSTREAM_REMOTE" "$UPSTREAM_URL"
fi
git fetch "$UPSTREAM_REMOTE"
before=$(git rev-parse HEAD)
git rebase "$UPSTREAM_REMOTE/$UPSTREAM_BRANCH"
if [ "$before" != "$(git rev-parse HEAD)" ]; then
exit 111
else
exit 0
fi
| true |
c98cbf4b5d2234a5e17e075e64b1b2273dccee45 | Shell | braindeaead/OS | /Lab 1/help.sh | UTF-8 | 2,901 | 3.234375 | 3 | [] | no_license | #!/bin/zsh
help() {
echo "
Функции данного приложения:
calc - приложение выполняет функции калькулятора. Если вторым аргументом передан ключ sum/sub/mul/div, приложение выводит на экран сумму/разность/произведение/частное третьего и четвертого аргумента, являющихся целыми числами.
search - приложение производит рекурсивный поиск по содержимому файлов в директории, указанной вторым аргументом, и выводит на экран строки в соответствии с регулярным выражением, заданным третьим аргументом.
reverse - приложение в обратном порядке записывает содержимое файла, имя которого задано вторым аргументом, в файл с именем, переданным третьим аргументом.
strlen - приложение в обратном порядке записывает содержимое файла, имя которого задано вторым аргументом, в файл с именем, переданным третьим аргументом.
log - приложение выводит строки файла /var/log/anaconda/X.log, содержащие предупреждения и информационные сообщения.
exit - риложение завершает свою работу с кодом возврата, заданным вторым параметром. Если код возврата не задан, по умолчанию используется 0.
help - приложение выводит справку по использованию, в которой перечислены все действия и их аргументы.
interactive - приложение переходит в интерактивный режим работы, предоставляя пользователю интерактивное меню с выбором действий. "
echo "
Значение кодов завершения приложения:
0 - ошибок нет.
-1 - вызываемый скрипт не существует в данной директории.
-2 - указано недопустимое количество аргументов.
-3 - указано недопустимое действие (калькулятор).
-4 - обращение к несуществующему файлу/директории.
-5 - недопустимый вид (тип) аргументов. "
}
| true |
9f35d9e9811936c11b86b8b73a60b129e88cd4a7 | Shell | kisskillkiss/boot2docker-xhyve | /xhyveexec.sh | UTF-8 | 576 | 3.265625 | 3 | [] | no_license | #!/bin/sh
CMD="cd '$(pwd)'; sudo ./xhyverun.sh && exit"
if [ "${TERM_PROGRAM}" = "Apple_Terminal" ] ; then
osascript <<END
tell application "Terminal"
do script "${CMD}"
end tell
END
elif [ "${TERM_PROGRAM}" = "iTerm.app" ] ; then
osascript <<END
tell application "iTerm"
tell application "System Events" to keystroke "d" using {shift down, command down}
tell the current session of current terminal
write text "${CMD}"
end tell
tell application "System Events" to keystroke "[" using {command down}
end tell
END
fi
| true |
1024116f9187f5ea614888ef26f4bbc3fd603516 | Shell | ericmur/Angular4 | /config/servers/staging/resque-scheduler.sh | UTF-8 | 1,405 | 3.5 | 4 | [] | no_license | #!/bin/sh -e
app_name="docyt-staging"
app_dir="/home/deploy/${app_name}/current"
pidfile="${app_dir}/tmp/pids/resque-scheduler.pid"
run_as_user="deploy"
sleep_time_during_restart=5
stop_schedule="QUIT/30/INT/10/KILL/5"
bundler="/usr/local/bin/bundle"
rails_environment="staging"
stdout_log="${app_dir}/log/resque-scheduler.log"
. /etc/docyt/env.sh
case "$1" in
start)
# I'm assuming that you are using bundler. If you are using rip or
# something else, you'll need to change this. Remember to
# keep the double-dash; e.g.: --startas CMD -- ARGS
start-stop-daemon --start --pidfile ${pidfile} \
--chuid ${run_as_user} --chdir ${app_dir} \
--startas ${bundler} exec rake environment resque:scheduler -- \
PIDFILE=${pidfile} BACKGROUND=yes DYNAMIC_SCHEDULE=yes APP_NAME=${app_name} RAILS_ENV=${rails_environment} LOGFILE=${stdout_log}
;;
reload)
start-stop-daemon --stop --pidfile ${pidfile} --signal HUP
;;
graceful-stop)
start-stop-daemon --stop --pidfile ${pidfile} --signal QUIT
;;
quick-stop)
start-stop-daemon --stop --pidfile ${pidfile} --signal INT
;;
stop)
start-stop-daemon --stop --pidfile ${pidfile} --retry=${stop_schedule}
;;
restart)
$0 stop
sleep ${sleep_time_during_restart}
$0 start
;;
*)
echo "Usage: $0 {start|stop|graceful-stop|quick-stop|restart|reload}"
exit 1
;;
esac | true |
5f13446573cc2ac7449c329ef6f171d3b8cba6b6 | Shell | beeverycreative/BEEwebPi | /src/filesystem/home/root/bin/map_iface | UTF-8 | 347 | 3.484375 | 3 | [] | no_license | #!/bin/sh
set -e
IFACE=$1
if grep -q "^\s*iface $IFACE-beewebpi " /boot/beewebpi-network.txt
then
echo "$IFACE-beewebpi"
echo "Using /boot/beewebpi-network.txt for configuring $IFACE..." >> /var/log/map_iface.log
else
echo "$IFACE-raspbian"
echo "Using original Raspbian configuration for configuring $IFACE..." >> /var/log/map_iface.log
fi
| true |
c1ca6c047d95a5abbf1bdd58ad6076d8917a7ce9 | Shell | kalisio/kalisioscope | /generate.sh | UTF-8 | 4,573 | 2.8125 | 3 | [] | no_license | #!/bin/bash
PRODUCT=$1
OUTPUT=$2
mkdir -p $OUTPUT/$PRODUCT
# Generate icons
cp $PRODUCT/$PRODUCT-icon-2048x2048.png $OUTPUT/$PRODUCT/$PRODUCT-icon-2048x2048.png
mogrify -resize 1024x1024 -write $OUTPUT/$PRODUCT/$PRODUCT-icon-1024x1024.png $PRODUCT/$PRODUCT-icon-2048x2048.png
mogrify -resize 512x512 -write $OUTPUT/$PRODUCT/$PRODUCT-icon-512x512.png $PRODUCT/$PRODUCT-icon-2048x2048.png
mogrify -resize 256x256 -write $OUTPUT/$PRODUCT/$PRODUCT-icon-256x256.png $PRODUCT/$PRODUCT-icon-2048x2048.png
mogrify -resize 192x192 -write $OUTPUT/$PRODUCT/$PRODUCT-icon-192x192.png $PRODUCT/$PRODUCT-icon-2048x2048.png
mogrify -resize 128x128 -write $OUTPUT/$PRODUCT/$PRODUCT-icon-128x128.png $PRODUCT/$PRODUCT-icon-2048x2048.png
mogrify -resize 64x64 -write $OUTPUT/$PRODUCT/$PRODUCT-icon-64x64.png $PRODUCT/$PRODUCT-icon-2048x2048.png
mogrify -resize 48x48 -write $OUTPUT/$PRODUCT/$PRODUCT-icon-48x48.png $PRODUCT/$PRODUCT-icon-2048x2048.png
mogrify -resize 32x32 -write $OUTPUT/$PRODUCT/$PRODUCT-icon-32x32.png $PRODUCT/$PRODUCT-icon-2048x2048.png
mogrify -resize 24x24 -write $OUTPUT/$PRODUCT/$PRODUCT-icon-24x24.png $PRODUCT/$PRODUCT-icon-2048x2048.png
# Generate square logos with black text
cp $PRODUCT/$PRODUCT-logo-black-2048x2048.png $OUTPUT/$PRODUCT/$PRODUCT-logo-black-2048x2048.png
mogrify -resize 1024x1024 -write $OUTPUT/$PRODUCT/$PRODUCT-logo-black-1024x1024.png $PRODUCT/$PRODUCT-logo-black-2048x2048.png
mogrify -resize 512x512 -write $OUTPUT/$PRODUCT/$PRODUCT-logo-black-512x512.png $PRODUCT/$PRODUCT-logo-black-2048x2048.png
mogrify -resize 256x256 -write $OUTPUT/$PRODUCT/$PRODUCT-logo-black-256x256.png $PRODUCT/$PRODUCT-logo-black-2048x2048.png
mogrify -resize 192x192 -write $OUTPUT/$PRODUCT/$PRODUCT-logo-black-192x192.png $PRODUCT/$PRODUCT-logo-black-2048x2048.png
mogrify -resize 128x128 -write $OUTPUT/$PRODUCT/$PRODUCT-logo-black-128x128.png $PRODUCT/$PRODUCT-logo-black-2048x2048.png
mogrify -resize 64x64 -write $OUTPUT/$PRODUCT/$PRODUCT-logo-black-64x64.png $PRODUCT/$PRODUCT-logo-black-2048x2048.png
# Generate square logos with white text
convert $OUTPUT/$PRODUCT/$PRODUCT-logo-black-2048x2048.png -fuzz 1% -fill white -opaque black $OUTPUT/$PRODUCT/$PRODUCT-logo-white-2048x2048.png
mogrify -resize 1024x1024 -write $OUTPUT/$PRODUCT/$PRODUCT-logo-white-1024x1024.png $OUTPUT/$PRODUCT/$PRODUCT-logo-white-2048x2048.png
mogrify -resize 512x512 -write $OUTPUT/$PRODUCT/$PRODUCT-logo-white-512x512.png $OUTPUT/$PRODUCT/$PRODUCT-logo-white-2048x2048.png
mogrify -resize 256x256 -write $OUTPUT/$PRODUCT/$PRODUCT-logo-white-256x256.png $OUTPUT/$PRODUCT/$PRODUCT-logo-white-2048x2048.png
mogrify -resize 192x192 -write $OUTPUT/$PRODUCT/$PRODUCT-logo-white-192x192.png $OUTPUT/$PRODUCT/$PRODUCT-logo-white-2048x2048.png
mogrify -resize 128x128 -write $OUTPUT/$PRODUCT/$PRODUCT-logo-white-128x128.png $OUTPUT/$PRODUCT/$PRODUCT-logo-white-2048x2048.png
mogrify -resize 64x64 -write $OUTPUT/$PRODUCT/$PRODUCT-logo-white-64x64.png $OUTPUT/$PRODUCT/$PRODUCT-logo-white-2048x2048.png
# Generate banner logos with black text
cp $PRODUCT/$PRODUCT-logo-black-2048x672.png $OUTPUT/$PRODUCT/$PRODUCT-logo-black-2048x672.png
mogrify -resize 1024x336 -write $OUTPUT/$PRODUCT/$PRODUCT-logo-black-1024x336.png $PRODUCT/$PRODUCT-logo-black-2048x672.png
mogrify -resize 512x168 -write $OUTPUT/$PRODUCT/$PRODUCT-logo-black-512x168.png $PRODUCT/$PRODUCT-logo-black-2048x672.png
mogrify -resize 256x84 -write $OUTPUT/$PRODUCT/$PRODUCT-logo-black-256x84.png $PRODUCT/$PRODUCT-logo-black-2048x672.png
mogrify -resize 192x64 -write $OUTPUT/$PRODUCT/$PRODUCT-logo-black-192x64.png $PRODUCT/$PRODUCT-logo-black-2048x672.png
mogrify -resize 128x42 -write $OUTPUT/$PRODUCT/$PRODUCT-logo-black-128x42.png $PRODUCT/$PRODUCT-logo-black-2048x672.png
# Generate banner logos with white text
convert $OUTPUT/$PRODUCT/$PRODUCT-logo-black-2048x672.png -fuzz 1% -fill white -opaque black $OUTPUT/$PRODUCT/$PRODUCT-logo-white-2048x672.png
mogrify -resize 1024x336 -write $OUTPUT/$PRODUCT/$PRODUCT-logo-white-1024x336.png $OUTPUT/$PRODUCT/$PRODUCT-logo-white-2048x672.png
mogrify -resize 512x168 -write $OUTPUT/$PRODUCT/$PRODUCT-logo-white-512x168.png $OUTPUT/$PRODUCT/$PRODUCT-logo-white-2048x672.png
mogrify -resize 256x84 -write $OUTPUT/$PRODUCT/$PRODUCT-logo-white-256x84.png $OUTPUT/$PRODUCT/$PRODUCT-logo-white-2048x672.png
mogrify -resize 192x64 -write $OUTPUT/$PRODUCT/$PRODUCT-logo-white-192x64.png $OUTPUT/$PRODUCT/$PRODUCT-logo-white-2048x672.png
mogrify -resize 128x42 -write $OUTPUT/$PRODUCT/$PRODUCT-logo-white-128x42.png $OUTPUT/$PRODUCT/$PRODUCT-logo-white-2048x672.png
| true |
26c1cce4b3695bc625efe36602268956b22833d7 | Shell | WhatEason/Aurora_AWTK | /resource/scripts/create_assets_zip.sh | UTF-8 | 470 | 3.390625 | 3 | [] | no_license | #!/bin/bash
SRC=$1
if [ -z "$SRC" ]
then
echo "Usage: $0 assets_dir"
exit 0
fi
rm -rf temp
rm -f assets.zip
mkdir temp
cp -rf "$SRC" temp
cd temp/assets
for f in *;
do
if [ -d $f ]
then
echo "clean " $f;
rm -rvf $f/inc
rm -rvf $f/raw/ui/*.xml
rm -rvf $f/raw/styles/*.xml
rm -rvf $f/raw/strings/*.xml
fi
done
cd ..
zip -r ../assets.zip assets
cd ..
rm -rf temp
ls -l assets.zip
| true |
62ebfad42d5e79702410fb25ceee10f5463ce0e6 | Shell | egabancho/dotfiles | /functions | UTF-8 | 2,241 | 4.3125 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
# Create a new directory and enter it
function mkd() {
mkdir -p "$@" && cd "$_";
}
# One command to extract them all...
function extract () {
if [ $# -ne 1 ] ; then
echo "Error: No file specified."
return 1;
fi
if [ -f $1 ] ; then
case $1 in
*.tar.bz2) tar xvjf $1 ;;
*.tar.gz) tar xvzf $1 ;;
*.bz2) bunzip2 $1 ;;
*.rar) unrar x $1 ;;
*.gz) gunzip $1 ;;
*.tar) tar xvf $1 ;;
*.tbz2) tar xvjf $1 ;;
*.tgz) tar xvzf $1 ;;
*.zip) unzip $1 ;;
*.Z) uncompress $1 ;;
*.7z) 7z x $1 ;;
*) echo "'$1' cannot be extracted via extract" ;;
esac
else
echo "'$1' is not a valid file";
fi
}
# Determine size of a file or total size of a directory
function fs() {
if du -b /dev/null > /dev/null 2>&1; then
local arg=-sbh;
else
local arg=-sh;
fi
if [[ -n "$@" ]]; then
du $arg -- "$@";
else
du $arg .[^.]* ./*;
fi;
}
# Use Git’s colored diff when available
hash git &>/dev/null;
if [ $? -eq 0 ]; then
function diff() {
git diff --no-index --color-words "$@";
}
fi;
# Normalize `open` across Linux, macOS, and Windows.
# This is needed to make the `o` function (see below) cross-platform.
if [ ! $(uname -s) = 'Darwin' ]; then
if grep -q Microsoft /proc/version; then
# Ubuntu on Windows using the Linux subsystem
alias open='explorer.exe';
else
alias open='xdg-open';
fi
fi
# `o` with no arguments opens the current directory, otherwise opens the given
# location
function o() {
if [ $# -eq 0 ]; then
open .;
else
open "$@";
fi;
}
# `tre` is a shorthand for `tree` with hidden files and color enabled, ignoring
# the `.git` directory, listing directories first. The output gets piped into
# `less` with options to preserve color and line numbers, unless the output is
# small enough for one screen.
function tre() {
tree -aC -I '.git|node_modules|bower_components' --dirsfirst "$@" | less -FRNX;
}
# Allow installation of python packages globally using pip
gpip() {
PIP_REQUIRE_VIRTUALENV="" pip "$@"
}
| true |
b43979823908858a0014f22d6b3c841cb0de4d43 | Shell | ajashton/gis-scripts | /shpmerge.sh | UTF-8 | 474 | 3.8125 | 4 | [] | no_license | #!/bin/bash
set -e -u
# shpmerge.sh <outfile> <infile> ...
OUTFILE="$1"
shift
for INFILE in "$@"; do
if test -e "$OUTFILE"; then
echo -n "Merging $INFILE ... "
ogr2ogr -f "ESRI Shapefile" -update -append \
"$OUTFILE" "$INFILE" -nln `basename $OUTFILE .shp` && \
echo "OK" || exit 1
else
echo -n "Creating $OUTFILE from $INFILE ... "
ogr2ogr -f "ESRI Shapefile" "$OUTFILE" "$INFILE" && \
echo "OK" || exit 1
fi
done
echo "DONE!"
| true |
b6112a4d4e6774410c26d0546e9360a574a753c4 | Shell | uwplse/oeuf | /make_metrics.sh | UTF-8 | 346 | 3.328125 | 3 | [] | no_license | #!/usr/bin/env bash
set -e
METRICS="metrics.json"
rm -f $METRICS
touch $METRICS
echo "[" >> $METRICS
FIRST="1"
for f in src/*.v ; do
if [[ $FIRST == "1" ]] ; then
FIRST=0
else
echo "," >> $METRICS
fi
cat >> $METRICS <<EOF
{ "name" : "$f"
, "count" : $(grep -i "admit" $f | wc -l)
}
EOF
done
echo "]" >> $METRICS
| true |
e61844c063e3ecff15a2aee956f3927824a9764a | Shell | friscoMad/HMR600.com-Firmware | /patches/Servicios/sbin/service | UTF-8 | 10,119 | 4.03125 | 4 | [] | no_license | #!/bin/sh
#
# description: Starts and stops the Samba smbd daemon\
# used to provide SMB network services.
#
# config: /usr/local/daemon/samba/lib/smb.conf
# Check that smb.conf exists.
#[ -f /tmp/Package/samba/lib/smb.conf ] || exit 0
#RETVAL=0
# ====== UTILITY FUNCTION BEGINNING ======
# Set up a default search path.
PATH="/sbin:/usr/sbin:/bin:/usr/bin"
export PATH
# Check if $pid (could be plural) are running
checkpid() {
local i
for i in $* ; do
[ -d "/proc/$i" ] && return 0
done
return 1
}
# __proc_pids {program} [pidfile]
# Set $pid to pids from /var/run* for {program}. $pid should be declared
# local in the caller.
# Returns LSB exit code for the 'status' action.
__pids_var_run() {
local base=${1##*/}
local pid_file=${2:-/var/run/$base.pid}
pid=
if [ -f "$pid_file" ] ; then
local line p
read line < "$pid_file"
for p in $line ; do
[ -d "/proc/$p" ] && pid="$pid $p"
done
if [ -n "$pid" ]; then
return 0
fi
return 1 # "Program is dead and /var/run pid file exists"
fi
return 3 # "Program is not running"
}
# Output PIDs of matching processes, found using pidof
__pids_pidof() {
pidof "$1"
}
# A function to stop a program.
killproc() {
local RC killlevel= base pid pid_file= delay
RC=0; delay=3
# Test syntax.
if [ "$#" -eq 0 ]; then
echo $"Usage: killproc [-p pidfile] [ -d delay] {program} [-signal]"
return 1
fi
if [ "$1" = "-p" ]; then
pid_file=$2
shift 2
fi
if [ "$1" = "-d" ]; then
delay=$2
shift 2
fi
# check for second arg to be kill level
[ -n "${2:-}" ] && killlevel=$2
# Save basename.
base=${1##*/}
# Find pid.
__pids_var_run "$1" "$pid_file"
if [ -z "$pid_file" -a -z "$pid" ]; then
pid="$(__pids_pidof "$1")"
fi
# Kill it.
if [ -n "$pid" ] ; then
[ "$BOOTUP" = "verbose" -a -z "${LSB:-}" ] && echo -n "$base "
if [ -z "$killlevel" ] ; then
if checkpid $pid 2>&1; then
# TERM first, then KILL if not dead
kill -TERM $pid >/dev/null 2>&1
usleep 100000
if checkpid $pid && sleep 1 &&
checkpid $pid && sleep $delay &&
checkpid $pid ; then
kill -KILL $pid >/dev/null 2>&1
usleep 100000
fi
fi
checkpid $pid
RC=$?
[ "$RC" -eq 0 ] && failure $"$base shutdown" || success $"$base shutdown"
RC=$((! $RC))
# use specified level only
else
if checkpid $pid; then
kill $killlevel $pid >/dev/null 2>&1
RC=$?
[ "$RC" -eq 0 ] && success $"$base $killlevel" || failure $"$base $killlevel"
elif [ -n "${LSB:-}" ]; then
RC=7 # Program is not running
fi
fi
else
if [ -n "${LSB:-}" -a -n "$killlevel" ]; then
RC=7 # Program is not running
else
failure $"$base shutdown"
RC=0
fi
fi
# Remove pid file if any.
if [ -z "$killlevel" ]; then
rm -f "${pid_file:-/var/run/$base.pid}"
fi
return $RC
}
# Log that something succeeded
success() {
return 0
}
# Log that something failed
failure() {
local rc=$?
return $rc
}
# ====== UTILITY FUNCTION END ======
# ====== Samba ======
smb_start() {
KIND="SMB"
echo -n $"Starting $KIND services: "
nice -n 10 /usr/local/daemon/samba/sbin/smbd -D
RETVAL=$?
KIND="NMB"
echo -n $"Starting $KIND services: "
nice -n 10 /usr/local/daemon/samba/sbin/nmbd -D
RETVAL2=$?
if [ $RETVAL -eq 0 -a $RETVAL2 -eq 0 ]
then
return 0
else
return 1
fi
}
smb_stop() {
echo -n $"Shutting down SMB services: "
killproc smbd
echo -n $"Shutting down NMB services: "
killproc nmbd
return 0;
}
smb_restart() {
smb_stop
smb_start
}
smb_status() {
if [ -e /usr/local/daemon/samba/var/locks/smbd.pid ]
then
PID=`cat /usr/local/daemon/samba/var/locks/smbd.pid`
if [ -e /proc/$PID ]
then
echo $"SMB Service is running"
RETVAL=0
else
echo $"SMB Serice is stopped"
RETVAL=1
fi
else
echo $"SMB Serice is stopped"
RETVAL=1
fi
return $RETVAL
}
# ====== UPnP ======
upnp_start() {
KIND="UPnP"
echo -n $"Starting $KIND services: "
nice -n 10 /sbin/www/ushare -f /sbin/www/ushare.conf -D
RETVAL=$?
if [ $RETVAL -eq 0 ]
then
return 0
else
return 1
fi
}
upnp_stop() {
echo -n $"Shutting down UPnP services: "
killproc ushare
return 0;
}
upnp_restart() {
upnp_stop
upnp_start
}
upnp_status() {
PID=`ps ax | grep -v "ps ax" | grep -v grep | grep ushare | awk '{ print $1 }'`
if [ -z $PID ]
then
echo $"UPnP Service is stopped"
RETVAL=1
else
echo $"UPnP Serice is running"
RETVAL=0
fi
return $RETVAL
}
# ====== HTTP ======
http_start() {
KIND="HTTP"
echo -n $"Starting $KIND services: "
nice -n 10 /sbin/www/lighttpd -f /sbin/www/lighttpd.conf
RETVAL=$?
if [ $RETVAL -eq 0 ]
then
mkdir -p /tmp/locks
echo -n run > /tmp/locks/http
return 0
else
return 1
fi
}
http_stop() {
echo -n $"Shutting down HTTP services: "
killproc lighttpd
rm /tmp/locks/http
return 0;
}
http_restart() {
http_stop
http_start
}
http_status() {
PID=`ps ax | grep -v "ps ax" | grep -v grep | grep lighttpd | awk '{ print $1 }'`
if [ -z $PID ]
then
echo $"HTTP Service is stopped"
RETVAL=1
else
echo $"HTTP Serice is running"
RETVAL=0
fi
return $RETVAL
}
# ====== HTTP Local======
http_local_start() {
KIND="HTTP_Local"
echo -n $"Starting $KIND services: "
nice -n 10 /sbin/www/lighttpd -f /sbin/www/lighttpd_local.conf
RETVAL=$?
if [ $RETVAL -eq 0 ]
then
mkdir -p /tmp/locks
echo -n run > /tmp/locks/http
return 0
else
return 1
fi
}
http_local_stop() {
echo -n $"Shutting down HTTP Local services: "
killproc lighttpd
rm /tmp/locks/http
return 0;
}
http_local_restart() {
http_local_stop
http_local_start
}
http_local_status() {
PID=`ps ax | grep -v "ps ax" | grep -v grep | grep lighttpd | awk '{ print $1 }'`
if [ -z $PID ]
then
echo $"HTTP Local Service is stopped"
RETVAL=1
else
echo $"HTTP Local Serice is running"
RETVAL=0
fi
return $RETVAL
}
# ====== FTP ======
ftp_start() {
KIND="FTP"
echo -n $"Starting $KIND services: "
nice -n 10 /sbin/www/stupid-ftpd -f /usr/local/etc/stupid-ftpd.conf
RETVAL=$?
if [ $RETVAL -eq 0 ]
then
return 0
else
return 1
fi
}
ftp_stop() {
echo -n $"Shutting down FTP services: "
killproc stupid-ftpd
return 0;
}
ftp_restart() {
ftp_stop
ftp_start
}
ftp_status() {
PID=`ps ax | grep -v "ps ax" | grep -v grep | grep stupid-ftpd | awk '{ print $1 }'`
if [ -z $PID ]
then
echo $"FTP Serice is stopped"
RETVAL=1
else
echo $"FTP Service is running"
RETVAL=0
fi
return $RETVAL
}
# ====== Disk_Spindown ======
spin_start() {
KIND="spin"
echo -n $"Starting $KIND services: "
nice -n 10 spindownd -d -f /tmp/spindown.fifo -p /dev/null -c /sbin/spindown.conf
RETVAL=$?
if [ $RETVAL -eq 0 ]
then
return 0
else
return 1
fi
}
spin_stop() {
echo -n $"Shutting down spin services: "
killproc spindownd
return 0;
}
spin_restart() {
spin_stop
spin_start
}
spin_status() {
PID=`ps ax | grep -v "ps ax" | grep -v grep | grep spindownd | awk '{ print $1 }'`
if [ -z $PID ]
then
echo $"spin Serice is stopped"
RETVAL=1
else
echo $"spin Service is running"
RETVAL=0
fi
return $RETVAL
}
case "$1" in
samba)
case "$2" in
start)
smb_start
;;
stop)
smb_stop
;;
restart)
smb_restart
;;
status)
smb_status
;;
*)
echo $"Usage: $0 samba {start|stop|restart|status}"
#exit 1
esac
;;
upnp)
case "$2" in
start)
upnp_start
;;
stop)
upnp_stop
;;
restart)
upnp_restart
;;
status)
upnp_status
;;
*)
echo $"Usage: $0 upnp {start|stop|restart|status}"
#exit 1
esac
;;
http)
case "$2" in
start)
http_start
;;
stop)
http_stop
;;
restart)
http_restart
;;
status)
http_status
;;
*)
echo $"Usage: $0 http {start|stop|restart|status}"
#exit 1
esac
;;
http_local)
case "$2" in
start)
http_local_start
;;
stop)
http_local_stop
;;
restart)
http_local_restart
;;
status)
http_local_status
;;
*)
echo $"Usage: $0 http_local {start|stop|restart|status}"
#exit 1
esac
;;
ftp)
case "$2" in
start)
ftp_start
;;
stop)
ftp_stop
;;
restart)
ftp_restart
;;
status)
ftp_status
;;
*)
echo $"Usage: $0 ftp {start|stop|restart|status}"
#exit 1
esac
;;
spin)
case "$2" in
start)
spin_start
;;
stop)
spin_stop
;;
restart)
spin_restart
;;
status)
spin_status
;;
*)
echo $"Usage: $0 spin {start|stop|restart|status}"
#exit 1
esac
;;
*)
echo $"Usage: $0 {samba|upnp|http|ftp} {start|stop|restart|status}"
#exit 1
esac
exit $?
| true |
5aed4abf186f3aef0dac7bc37fc95ae86e6e2d67 | Shell | terrymandin/QuickReference | /Arc/Environment/ubuntu.sh | UTF-8 | 736 | 3.4375 | 3 | [
"MIT"
] | permissive | c# Get Environment Variables
USER=$1
VMNAME=$2
# Create .profile script
touch /home/$USER/.bash_profile
chmod +x /home/$USER/.bash_profile
cat <<EOT > /home/$USER/.bash_profile
#!/bin/bash
##Environment Variables
export VMNAME=$VMNAME
## Configure Ubuntu to allow Azure Arc Connected Machine Agent Installation
echo "Configuring walinux agent"
sudo service walinuxagent stop
sudo waagent -deprovision -force
sudo rm -rf /var/lib/waagent
echo "Configuring Firewall"
sudo ufw --force enable
sudo ufw deny out from any to 169.254.169.254
sudo ufw default allow incoming
sudo apt-get update
echo "Reconfiguring Hostname"
sudo hostname $VMNAME
sudo -E /bin/sh -c 'echo $VMNAME > /etc/hostname'
rm -f /home/$USER/.bash_profile
EOT
| true |
546e2e430bd325121fb7b05c78e93ef07f2b4927 | Shell | rodricifuentes1/authentication-manager | /docker/postgres/01_createSchema.sh | UTF-8 | 298 | 3.109375 | 3 | [
"MIT"
] | permissive | #!/bin/bash
set -e
# Replace sql tables owner occurrences from .bak file
TEMPORAL=$(sed -e "s/OWNER TO postgres/OWNER TO ${POSTGRES_USER}/g" /docker-entrypoint-initdb.d/bak/schema.sql.bak)
# Load sql file into database
echo "$TEMPORAL" | psql --username "$POSTGRES_USER" --dbname "$POSTGRES_USER" | true |
15bdaf3a999b6ca9ecae9f74cb0ea1aecc089b3f | Shell | opyate/there-goes-my-social-life | /prepare-solr.sh | UTF-8 | 641 | 2.65625 | 3 | [] | no_license | #!/bin/bash
echo "Setting up SYSV service"
scp scripts/solr root@bopango.net:/etc/init.d/
ssh root@bopango.net "chmod +x /etc/init.d/solr"
ssh root@bopango.net "chkconfig --add solr"
echo "Setting up SOLR local properties"
ssh root@bopango.net "mkdir -p /etc/solr-bopango"
scp solr-config/bopango.net.properties root@bopango.net:/etc/solr-bopango/props.properties
exit 0
# The below is not necessary if we run chkconfig
ssh root@bopango.net "rm /etc/rc3.d/S99solr"
ssh root@bopango.net "ln -s /etc/init.d/solr /etc/rc3.d/S99solr"
ssh root@bopango.net "rm /etc/rc3.d/K01solr"
ssh root@bopango.net "ln -s /etc/init.d/solr /etc/rc3.d/K01solr"
| true |
a1f5e72398a37d833df4ee8182988735d9420a1b | Shell | cankutcubuk/TCGA-Germline-Paired-Tumor-Normal | /germline/germlineCalling.sh | UTF-8 | 1,212 | 3.328125 | 3 | [] | no_license | #!/bin/bash
############################################################
# Index bams and call germline SNPs and indels #
# #
# cankutcubuk [at] {gmail} [dot] {com} #
# cankut.cubuk [at] {icr} [dot] {ac} [dot] {uk} #
# 2019-2021 #
# @ ICR London #
############################################################
sampleID=$1
sampletype=$2
outputDir=$3
referenceGenome=$4
target=$5
bamDir=$outputDir'/'$sampletype/
varDir=$outputDir'/germline/'
mkdir $outputDir'/germline/'
suffix_vcf='.g.vcf.gz'
fname=$(basename $sampleID)
fbname=${fname%.*}
target_argument='-L '$target
module load samtools/1.5
module load java/sun8/1.8.0u66
module load gatk/4.1.0.0
## 1. Index bam file
samtools index $bamDir/$sampleID
echo >&2 '
************
*** DONE Bam Indexing ***
************
'
## 2. Call germline SNPs and indels
gatk HaplotypeCaller \
-R $referenceGenome \
-ERC GVCF \
$target_argument -I $bamDir/$sampleID \
--output $varDir/$fbname$suffix_vcf
echo >&2 '
************
*** DONE Variant Calling ***
************
'
| true |
f7403d640b2e1fb3c2ce572ab2f17f9d1fa66fd7 | Shell | grpc/grpc | /tools/distrib/buildozer.sh | UTF-8 | 3,494 | 3.890625 | 4 | [
"BSD-3-Clause",
"MPL-2.0",
"Apache-2.0"
] | permissive | #! /bin/bash
# Copyright 2019 The gRPC Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -e
BUILDOZER_VERSION="4.2.2"
TEMP_BUILDOZER_PATH="/tmp/buildozer-for-grpc"
MAX_DOWNLOAD_RETRY=5
DOWNLOAD_WAITING_INTERVAL_SECS=10
function error_handling() {
error=$1
if [[ -n "$error" ]]; then
echo "${error}"
exit 1
fi
}
function download_buildozer() {
platform="$(uname -sm)"
case "${platform}" in
"Linux x86_64") download_link="https://github.com/bazelbuild/buildtools/releases/download/${BUILDOZER_VERSION}/buildozer-linux-amd64";;
"Linux aarch64") download_link="https://github.com/bazelbuild/buildtools/releases/download/${BUILDOZER_VERSION}/buildozer-linux-arm64";;
"Darwin x86_64") download_link="https://github.com/bazelbuild/buildtools/releases/download/${BUILDOZER_VERSION}/buildozer-darwin-amd64";;
"Darwin arm64") download_link="https://github.com/bazelbuild/buildtools/releases/download/${BUILDOZER_VERSION}/buildozer-darwin-arm64";;
*) error_handling "Unsupported platform: ${platform}";;
esac
download_success=0
for i in $(seq 1 $MAX_DOWNLOAD_RETRY); do
if [ -x "$(command -v curl)" ]; then
http_code=`curl -L -o ${TEMP_BUILDOZER_PATH} -w "%{http_code}" ${download_link}`
if [ $http_code -eq "200" ]; then
download_success=1
fi
elif [ -x "$(command -v wget)" ]; then
wget -S -O ${TEMP_BUILDOZER_PATH} ${download_link} 2>&1 | grep "200 OK" && download_success=1
else
error_handling "Download failed: curl and wget not available"
fi
if [ $download_success -eq 1 ]; then
break
elif [ $i -lt $MAX_DOWNLOAD_RETRY ]; then
echo "Failed to download buildozer: retrying in $DOWNLOAD_WAITING_INTERVAL_SECS secs"
sleep $DOWNLOAD_WAITING_INTERVAL_SECS
fi
done
if [ $download_success -ne 1 ]; then
error_handling "Failed to download buildozer after $MAX_DOWNLOAD_RETRY tries"
fi
chmod +x ${TEMP_BUILDOZER_PATH}
}
# Get the correct version of buildozer
if [ -x "$(command -v buildozer)" ]; then
existing_buildozer_version="$(buildozer -version 2>&1 | head -n1 | cut -d" " -f3)"
if [[ "${existing_buildozer_version}" != "${BUILDOZER_VERSION}" ]]; then
download_buildozer
buildozer_bin="${TEMP_BUILDOZER_PATH}"
else
buildozer_bin="buildozer"
fi
else
if [ -x ${TEMP_BUILDOZER_PATH} ]; then
existing_buildozer_version="$(${TEMP_BUILDOZER_PATH} -version 2>&1 | head -n1 | cut -d" " -f3)"
if [[ "${existing_buildozer_version}" != "${BUILDOZER_VERSION}" ]]; then
download_buildozer
fi
else
download_buildozer
fi
buildozer_bin="${TEMP_BUILDOZER_PATH}"
fi
# cd to repo root
dir=$(dirname "${0}")
cd "${dir}/../.."
set -ex
# shellcheck disable=SC2086,SC2068
${buildozer_bin} "$@"
| true |
ba7aeece5f249a8a494a3bd4f859c49a24310644 | Shell | delkyd/alfheim_linux-PKGBUILDS | /doom3bfg-data/PKGBUILD | UTF-8 | 1,508 | 3.203125 | 3 | [] | no_license | # Maintainer: Mike Swanson <mikeonthecomputer@gmail.com>
# This is intended as a generic data package for open source Doom 3:
# BFG Edition engines.
#
# You must copy the base directory from Doom 3 BFG to the same
# directory as this PKGBUILD.
#
# The pkgver is from the ProductVersion of Doom3BFG.exe. I don't know
# if it is the official version number or if there even is one.
#
# Due to file differences between the Steam and GOG.com releases in
# the *.lang files, the checksums are not compared. We'll just assume
# if the rest of the files are OK, these are too.
pkgname=doom3bfg-data
pkgver=1.0.34.6456
pkgrel=2
pkgdesc="Doom 3: BFG Edition game files"
url="http://www.idsoftware.com/"
arch=('any')
# Can't find any license in the game files. Presumably your standard EULA stuff.
license=('custom')
source=(base.sha512 doom3bfg.png)
sha512sums=('b8b93ac414520c9f3cf34d412fa2ee0496887c1b487a6b21550a5624bd1a698b551db596b06508bc8b0ec6fa36ecae2f0e0c17d6ff8f3fd8439cbd3542b479f8'
'7fd894b4f962b14798eb8802a0d7bb20e087958d3eff63aea743c2d205604614f4bd07911cfa32652198d80bf5859c4d212e5abf548fd09e664de842cc3dd886')
prepare() {
ln -s "$startdir/base"
sha512sum -c base.sha512 --quiet
# Sanitizing if it was copied from NTFS or wherever.
find -L base -type d -exec chmod 755 {} +
find -L base -type f -exec chmod 644 {} +
}
package() {
install -d "$pkgdir"/usr/share/games/doom3bfg
install -Dm 644 doom3bfg.png "$pkgdir"/usr/share/icons/doom3bfg.png
cp -a base/ "$pkgdir"/usr/share/games/doom3bfg
}
| true |
052636d5506c8d096a1bab1294b39b84e4a1e4ea | Shell | tyll/tillconf | /bashrc | UTF-8 | 3,500 | 3.1875 | 3 | [] | no_license | if command -v powerline-daemon >/dev/null 2>&1; then
powerline-daemon -q
POWERLINE_BASH_CONTINUATION=1
POWERLINE_BASH_SELECT=1
test -f /usr/share/powerline/bash/powerline.sh && . /usr/share/powerline/bash/powerline.sh
fi
if command -v keychain >/dev/null 2>&1; then
keychain
[ -z "$HOSTNAME" ] && HOSTNAME=`uname -n`
[ -f $HOME/.keychain/$HOSTNAME-sh ] && \
. $HOME/.keychain/$HOSTNAME-sh
[ -f $HOME/.keychain/$HOSTNAME-sh-gpg ] && \
. $HOME/.keychain/$HOSTNAME-sh-gpg
fi
# If set, bash checks the window size after each command and,
# if necessary, updates the values of LINES and COLUMNS.
shopt -s checkwinsize
# If set, the history list is appended to the file named by the
# value of the HISTFILE variable when the shell exits, rather than overwriting
# the file.
shopt -s histappend
# If set, minor errors in the spelling of a directory component in a cd
# command will be corrected. The errors checked for are transposed
# characters, a missing character, and one character too many. If a
# correction is found, the corrected file name is printed, and the command
# proceeds. This option is only used by interactive shells.
shopt -s cdspell
export HISTCONTROL=ignoredups:erasedups
export HISTSIZE=2048000
export HISTFILESIZE="${HISTSIZE}"
command -v direnv &>/dev/null && eval "$(direnv hook bash)"
export EDITOR=vim
command -v vimx >/dev/null && alias vim="vimx"
alias vi=vim
alias vispec="vim *.spec"
alias __list_rpm_gpg='rpm --qf "%{name}-%{version}-%{release} %{summary}\n" -q gpg-pubkey'
alias tma="tmux attach"
alias nvr="/bin/rpm --qf '%{name}-%{version}-%{release}\n' -q"
alias youtube-dl='youtube-dl --output "%(title)s-%(extractor)s:%(id)s.%(ext)s"'
alias kvm_iso="qemu-kvm -boot d -k de -m 1024 -usbdevice tablet -cdrom"
eval `dircolors -b /etc/DIR_COLORS`
alias d="ls --color=auto"
alias ls="ls --color=auto"
alias ll="ls --color=auto -lA"
alias l="ls --color=auto -l"
alias grep='grep --color=auto'
alias egrep='egrep --color=auto'
alias cclive='cclive --format best --filename-format="%t-%h:%i.%s"'
alias clive='clive --format best --filename-format="%t-%h:%i.%s"'
alias ..="cd .."
alias ...="cd ../.."
alias ....="cd ../../.."
alias .....="cd ../../../.."
alias md="mkdir -p"
alias g="git"
alias gc="git commit -v"
alias gcv="gc"
alias ga="gc --amend"
alias gcva="gcv -a"
alias gst="git status"
alias grbm="git rebase -i master"
alias a="ansible"
alias ap="ansible-playbook"
alias ytd="youtube-dl"
alias iSSH="ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null"
alias dos2unix="recode ibmpc..lat1"
alias unix2dos="recode lat1..ibmpc"
#unset SSH_ASKPASS;
ssh_convert () {
ssh-keygen -f "${1}" -i
}
MAIL=$HOME/Maildir/
PATH=${PATH}:${HOME}/go/bin
export _JAVA_OPTIONS='-Dawt.useSystemAAFontSettings=on -Dswing.aatext=true -Dswing.defaultlaf=com.sun.java.swing.plaf.gtk.GTKLookAndFeel'
# Settings for X and non X
if [[ "${DISPLAY}" != "" ]] && xset q &>/dev/null
then
# delay, rate in 1/s
xset r rate 220 45
gsettings set org.gnome.desktop.peripherals.keyboard delay 220
# Milliseconds between repeat keypresses
gsettings set org.gnome.desktop.peripherals.keyboard repeat-interval 22
xkbset q &>/dev/null && xkbset sticky -twokey # latchlock
xkbset q &>/dev/null && xkbset exp 3600 sticky # latchlock
xinput set-button-map "Logitech USB-PS/2 Optical Mouse" 1 2 3 4 5 6 7 2 2>/dev/null
fi
if [[ "${TERM}" != "" ]]
then
setterm -blength 0 -bfreq 0 &> /dev/null
fi
| true |
7a9a93a486aee7a9e8316863610f6766c3436c69 | Shell | jens-maus/RaspberryMatic | /buildroot-external/overlay/base-raspmatic_oci/etc/init.d/S00DataMount | UTF-8 | 832 | 3.75 | 4 | [
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-free-unknown",
"GPL-2.0-only",
"Apache-2.0"
] | permissive | #!/bin/sh
# shellcheck shell=dash disable=SC2169 source=/dev/null
#
# Startup script to bind mount /data to /usr/local
# (required for Home Assistant Add-on only)
#
[[ -z "${HM_RUNNING_IN_HA}" ]] && exit 0
start() {
echo -n "Mounting /data as /usr/local (Home Assistant Add-On): "
# Home Assistant Add-on expects persistent data to be stored in /data
mount -o bind /data /usr/local
# remount /dev as rw to allow multimacd to create mmd_bidcos/mmd_hmip
# later on
mount -o rw,remount /dev
echo "OK"
}
stop() {
echo -n "Unmounting /data from /usr/local (Home Assistant Add-On): "
umount /usr/local
echo "OK"
}
restart() {
stop
start
}
case "$1" in
start)
start
;;
stop)
stop
;;
restart|reload)
restart
;;
*)
echo "Usage: $0 {start|stop|restart}"
exit 1
esac
exit 0
| true |
c93e07675c6eb7837f8ba75ce71c58592966d618 | Shell | alkorgun/HighloadCup2018 | /hlcup2018.sh | UTF-8 | 690 | 2.609375 | 3 | [
"MIT"
] | permissive | #!/bin/bash
service clickhouse-server start
# sleep 10
export PYTHONIOENCODING=utf-8
echo -e "\nfiles found:\n$( ls -1 /tmp/data )\n"
unzip -o /tmp/data/data.zip
echo -e "\ncreating schema..."
cat /var/lib/hlc2018/schema/db.sql | clickhouse-client --multiline
cat /var/lib/hlc2018/schema/accounts.sql | clickhouse-client --multiline
cat /var/lib/hlc2018/schema/likes.sql | clickhouse-client --multiline
echo -e "\nloading data..."
loader.py accounts_*.json | clickhouse-client --query="INSERT INTO hlcup2018.accounts FORMAT CSV"
# likes_loader.py likes_*.json | clickhouse-client --query="INSERT INTO hlcup2018.likes FORMAT CSV"
echo -e "\nclearing...\n"
/bin/hlcup2018 -port 80
| true |
001160d699b419be1933661c7bcb802da0e17434 | Shell | kapeels/mobilize-in-a-box | /ocpu/docker_entrypoint.sh | UTF-8 | 334 | 2.671875 | 3 | [] | no_license | #!/bin/bash
set -e
# hack to ensure that ohmage is accessible without leaving the docker network
if ping -c 1 ohmage > /dev/null 2>&1; then
sed -i 's|var serverurl = location.protocol + "//" + location.host + "/app";|var serverurl = "http://ohmage:8080/app";|g' /usr/local/lib/R/site-library/plotbuilder/www/js/app.js
fi
exec "$@" | true |
3a2d87cafca7ed73ae62904039228f4e077b5707 | Shell | ChristianKienle/highway | /scripts/generate-project.sh | UTF-8 | 601 | 2.96875 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
# Debug Support
if [ -z ${HIGHWAY_DEBUG_ENABLED+x} ]
then
echo "Debugging disabled"
echo "To enable debugging: 'export HIGHWAY_DEBUG_ENABLED=1'"
else
echo "Debugging enabled"
set -x # => enable tracing
fi
# more boilerplate
set -e # => exit on failure
set -u # => exit on undeclared variable access
set -o pipefail # => sane exit codes when using |
# Do it
# rm -f -rf highway.xcodeproj || true
swift package generate-xcodeproj --enable-code-coverage --xcconfig-overrides ./config/config.xcconfig --output .
open highway.xcodeproj -a Xcode
| true |
7fdfb596e728e4b9574590a096a6d0ff6f1624ca | Shell | NewRelic-Python-Plugins/newrelic-python-agent | /docker/riak/bin/start-cluster.sh | UTF-8 | 1,875 | 3.90625 | 4 | [] | permissive | #! /bin/bash
set -e
if env | grep -q "DOCKER_RIAK_DEBUG"; then
set -x
fi
RIAK_CLUSTER_SIZE=${DOCKER_RIAK_CLUSTER_SIZE:-5}
RIAK_AUTOMATIC_CLUSTERING=${DOCKER_RIAK_AUTOMATIC_CLUSTERING:-1}
if docker ps -a | grep "nrpa/riak" >/dev/null; then
echo ""
echo "It looks like you already have some Riak containers running."
echo "Please take them down before attempting to bring up another"
echo "cluster with the following command:"
echo ""
echo " make stop-cluster"
echo ""
exit 1
fi
echo
echo "Bringing up ${RIAK_CLUSTER_SIZE} cluster nodes:"
echo
for index in $(seq -f "%02g" "1" "${RIAK_CLUSTER_SIZE}");
do
echo " Starting [riak${index}]"
if [ "${index}" -gt "1" ] ; then
docker run -e DOCKER_RIAK_CLUSTER_SIZE=${RIAK_CLUSTER_SIZE} \
-e DOCKER_RIAK_AUTOMATIC_CLUSTERING=${RIAK_AUTOMATIC_CLUSTERING} \
-e NEWRELIC_KEY=${NEWRELIC_KEY} \
-h riak${index} \
-P \
--link=riak01:8098 \
--name riak${index} \
--volumes-from SOURCE \
-d nrpa/riak > /dev/null 2>&1
else
docker run -e DOCKER_RIAK_CLUSTER_SIZE=${RIAK_CLUSTER_SIZE} \
-e DOCKER_RIAK_AUTOMATIC_CLUSTERING=${RIAK_AUTOMATIC_CLUSTERING} \
-e NEWRELIC_KEY=${NEWRELIC_KEY} \
-h riak${index} \
-P \
--name riak${index} \
--volumes-from SOURCE \
-d nrpa/riak > /dev/null 2>&1
fi
CONTAINER_ID=$(docker ps | egrep "riak${index}[^/]" | cut -d" " -f1)
CONTAINER_PORT=$(docker port "${CONTAINER_ID}" 8098 | cut -d ":" -f2)
until curl -s "http://127.0.0.1:${CONTAINER_PORT}/ping" | grep "OK" > /dev/null 2>&1;
do
sleep 1
done
echo " Started [riak${index}]"
done
echo
echo "Please wait approximately 30 seconds for the cluster to stabilize."
echo
| true |
e24964ec40b5fcb4fac5167612f502ea0cf12194 | Shell | Bugajska/TEST | /put_date.sh | UTF-8 | 125 | 2.734375 | 3 | [] | no_license | #!/bin/bash
now='date'
if [ "$1" ]; then
touch ./"$1"
echo "$now"
>> ."$1"
else
touch ./data.txt
echo "$now"
>> ./data.txt
fi | true |
f03f7719bc070b0665087ee9dd840c528e26377f | Shell | bossjones/boss-pipenv-zsh-plugin | /boss-pipenv.plugin.zsh | UTF-8 | 366 | 3.28125 | 3 | [] | no_license | # Try to find pipenv, if it's not on the path
if [ ! "$(command -v pipenv)" ]; then
echo "Install http://docs.pipenv.org/en/latest/ to use this plugin." > /dev/stderr
fi
if [ "$(command -v pipenv)" ]; then
# Add activate to change pwd functions
chpwd_functions+=(zsh-pipenv-shell-activate)
# enable pipenv tab completion
eval "$(pipenv --completion)"
fi
| true |
94d164b04791b78e1d346dbcd5440ec498b59a01 | Shell | ln3942/libreoffice-help | /help3xsl/get_url.sh | UTF-8 | 754 | 2.921875 | 3 | [
"MIT"
] | permissive | #/bin/bash
# This file is part of the LibreOffice project.
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# $1 is html/productversion/
# $2 is Language
# $3 is productversion
outdir=html/
# mkdir -p $outdir
sourcedir=`pwd`/
ffile=$outdir'filemap.js'
rm -f $ffile
ffile2=temp.html
stub2='};'
xslfile=get_url.xsl
# bookmarks branch -> path/to/helpfile.html
stub1='var map={'
sfind=$sourcedir'text/'
rm -f $ffile2
find $sfind -type f -name "*.xhp" -exec xsltproc $xslfile {} + > $ffile2
echo $stub1 >> $ffile
# sort -k3b -t\> -s -o $ffile2 $ffile2
awk 'NF' $ffile2 >> $ffile
echo $stub2 >> $ffile
| true |
e4963e6411c29bb3e3b6d9957b8787dabf9e7bcb | Shell | m4rkl4r/bl_tools | /py3/sbin/bl_cpm_pullHeaders | UTF-8 | 517 | 2.921875 | 3 | [
"BSD-3-Clause"
] | permissive | #!/bin/bash
. /unixworks/bl_tools/etc/bl_tools.sh
. /unixworks/bl_tools/etc/promotion.sh
bl_conf_auto "BL_ENG_PROFILE" "$BL_ENG_PROFILE"
cat /unixworks/bl_tools/etc/headerlist.txt | while read DEPOT_HEADER
do
HEADER="$(echo $DEPOT_HEADER | sed -e "s#$BL_ENG_BASE/##")"
LOCAL_HEADER="$BL_LOCAL_ENG/$HEADER"
LOCAL_DIR=$(dirname "$LOCAL_HEADER")
echo RUNNING: bl_getDepotFile --depotfile "$DEPOT_HEADER" --dstdir "$LOCAL_DIR"
bl_getDepotFile --depotfile "$DEPOT_HEADER" --dstdir "$LOCAL_DIR"
done
| true |
c6058641f812f4596ce91ec7b46cabe50824f924 | Shell | Tiago-Maessi/Freechains | /dia1.sh | UTF-8 | 2,411 | 2.84375 | 3 | [] | no_license | #!/bin/bash
echo "Iniciando o dia 1..."
date +%d/%m/%y
echo "Conectando no forum publico #Brasileirao:"
freechains chains join "#Brasileirao"
echo "Criacao das credenciais do usuario Fla..."
flapubprivkey=`freechains crypto pubpvt "Fla"`
flaprivkey=`echo $flapubprivkey | cut -d" " -f2`
flapubkey=`echo $flapubprivkey | cut -d" " -f1`
#echo "A chave privada do usuario Fla: $flaprivkey";
#echo "A chave publica do usuario Fla: $flapubkey";
echo "Postando na cadeia #Brasileirao com o usuario Fla a mensagem: Curtiram o VAR?"
freechains chain "#Brasileirao" post inline "Curtiram o VAR?" --sign="$flaprivkey"
echo "Criacao das credenciais do usuario Int..."
intpubprivkey=`freechains crypto pubpvt "Int"`
intprivkey=`echo $intpubprivkey | cut -d" " -f2`
intpubkey=`echo $intpubprivkey | cut -d" " -f1`
echo "Postando na cadeia #Brasileirao com o usuario Int a mensagem: Time pilantra!"
freechains chain "#Brasileirao" post inline "Time pilantra!" --sign="$intprivkey"
echo "Criacao das credenciais do usuario Sao..."
saopubprivkey=`freechains crypto pubpvt "Sao"`
saoprivkey=`echo $saopubprivkey | cut -d" " -f2`
saopubkey=`echo $saopubprivkey | cut -d" " -f1`
echo "Postando na cadeia #Brasileirao com o usuario Sao a mensagem: Libertadores!"
saopost=`freechains chain "#Brasileirao" post inline "Libertadores!" --sign="$saoprivkey"`
echo "$saopost"
echo "Reps do usuario Sao:"
freechains chain "#Brasileirao" reps "$saopubkey"
echo "Reps do usuario Int:"
freechains chain "#Brasileirao" reps "$intpubkey"
echo "Reps do usuario Fla:"
freechains chain "#Brasileirao" reps "$flapubkey"
echo "Usuario Fla da um like no post do usuario Sao:"
freechains chain "#Brasileirao" like "$saopost" --sign="$flaprivkey"
echo "Reps atualizados do usuario Sao:"
freechains chain "#Brasileirao" reps "$saopubkey"
echo "Reps atualizados do usuario Int:"
freechains chain "#Brasileirao" reps "$intpubkey"
echo "Reps atualizados do usuario Fla:"
freechains chain "#Brasileirao" reps "$flapubkey"
echo "Usuario Fla posta a mentira Octacampeao:"
freechains chain "#Brasileirao" post inline "Octacampeao!" --sign="$flaprivkey"
echo "Reps finais do dia 1 do usuario Sao:"
freechains chain "#Brasileirao" reps "$saopubkey"
echo "Reps finais do dia 1 do usuario Int:"
freechains chain "#Brasileirao" reps "$intpubkey"
echo "Reps finais do dia 1 do usuario Fla:"
freechains chain "#Brasileirao" reps "$flapubkey"
echo "Fim do dia 1!"
| true |
156f3044aaecda87f6f88ca2d700b0b74e34b6ce | Shell | petronny/aur3-mirror | /natrix-calc/PKGBUILD | UTF-8 | 1,196 | 2.90625 | 3 | [] | no_license | # Maintainer: Mijo Medvedec <mijo dot medvedec at gmail dot com>
pkgname=natrix-calc
pkgver=2.3
pkgrel=1
pkgdesc="Generic IP calculator with ability to calculate parameters of IPv4/IPv6 networks"
arch=('i686' 'x86_64')
url="http://sourceforge.net/projects/natrix/"
license=('GPL3')
depends=('qt4')
makedepends=('gendesk')
source=("http://sourceforge.net/projects/natrix/files/${pkgver}/${pkgname}_${pkgver}.zip")
md5sums=('f8c9be76329850a037d998d5f28487b9')
sha256sums=('24ad3d26745112fd622e938f153797dd53809bfff4e43926be6f82ff7d156ed7')
prepare() {
gendesk -n --pkgname "$pkgname" --pkgdesc "$pkgdesc"
}
build() {
cd "$srcdir/$pkgname"
qmake-qt4 Natrix.pro
make
}
package() {
cd "$srcdir/$pkgname"
install -Dm 755 "$pkgname" "$pkgdir/usr/bin/$pkgname"
install -dm 755 "$pkgdir/usr/share/$pkgname/locale"
install -Dm 644 *.qm "$pkgdir/usr/share/$pkgname/locale"
install -Dm 644 "$pkgname.png" "$pkgdir/usr/share/pixmaps/$pkgname.png"
install -Dm 644 "$srcdir/$pkgname.desktop" "$pkgdir/usr/share/applications/$pkgname.desktop"
sed -i 's/Natrix-calc/Natrix Calculator/g' "$pkgdir/usr/share/applications/$pkgname.desktop"
sed -i 's/Office;/Network;Internet;/g' "$pkgdir/usr/share/applications/$pkgname.desktop"
}
| true |
e649f93b183ad74ae184e9ab8ff607f236f70971 | Shell | gomba66/holberton-system_engineering-devops | /0x04-loops_conditions_and_parsing/7-clock | UTF-8 | 256 | 3.390625 | 3 | [] | no_license | #!/usr/bin/env bash
# This script shows you a clock using a while loop
hour=0
minute=1
while [ $hour -le 12 ]
do
minute=1
echo Hour: "$hour"
while [ $minute -le 59 ]
do
echo "$minute"
let minute=minute+1
done
let hour=hour+1
done
| true |
fa1e8a64580418cb6439e10e3178ee18c7241abb | Shell | mriedmann/humhub-docker | /nginx/docker-entrypoint.d/60-nginx-config.sh | UTF-8 | 460 | 2.921875 | 3 | [
"MIT"
] | permissive | #!/bin/sh
set -e
export NGINX_UPSTREAM="${NGINX_UPSTREAM:-unix:/run/php-fpm.sock}"
export NGINX_CLIENT_MAX_BODY_SIZE="${NGINX_CLIENT_MAX_BODY_SIZE:-10m}"
export NGINX_KEEPALIVE_TIMEOUT="${NGINX_KEEPALIVE_TIMEOUT:-65}"
# shellcheck disable=SC2046
defined_envs=$(printf "\${%s} " $(env | grep -E "^NGINX_.*" | cut -d= -f1))
envsubst "$defined_envs" </etc/nginx/nginx.conf >/tmp/nginx.conf
cat /tmp/nginx.conf >/etc/nginx/nginx.conf
rm /tmp/nginx.conf
exit 0
| true |
300e234d9b8028f3bb3163f097310306a8949b48 | Shell | akuz/readrz-deploy-srv | /bin/feedsget_live_sync.sh | UTF-8 | 338 | 2.71875 | 3 | [] | no_license | #!/bin/bash
. ./bin/ENV.sh
echo START: feedsget_live...
java -Dfile.encoding=UTF-8 \
-jar ./bin/feedsget.jar \
-mongoServer $MNG_SERVER \
-mongoPort $MNG_PORT \
-mongoDb $MNG_DB \
--------verbose \
-threadCount 10 \
if [ "$?" -ne "0" ]; then
echo "ERROR: feedsget_live"
exit 1
else
echo "DONE: feedsget_live"
exit 0
fi
| true |
77332ceefc10484c82e56f6920e545edb147dc43 | Shell | Mozilla-GitHub-Standards/08adbe97182dccd9ecd90506e964102130f06c33ce45fcb7afd7ea518eb913ad | /check_puppet_agent | UTF-8 | 4,226 | 4.03125 | 4 | [] | no_license | #!/bin/bash
#
# A nrpe plugin that will poll the state of puppet on a machine. We want
# to have different notification intervals for different states, so this
# script operates in two different modes.
#
# -c: Check for catalog compile failures only
# -t: Check the last time puppet ran and check for run time errors
#
# Based on code by Alexander Swen <a@swen.nu>
#
# CHANGELOG
# 5/15/2013 bhourigan Initial commit
#
# NOTE: Puppet 3.x changed configprint syntax
puppetversion=$(puppet -V)
case $puppetversion in
2.*)
statedir=$(puppet --configprint statedir)
;;
3.*)
statedir=$(puppet config print statedir)
;;
*)
echo "Sorry, puppet version ${puppetversion:-UNKNOWN} is unsupported by $0"
exit 1
;;
esac
statefile=${statedir}/last_run_summary.yaml
reportfile=${statedir}/last_run_report.yaml
disablefile=${statedir}/disabled.at
usage(){
if [ $# -gt 0 ]; then
echo "ERROR: $*"
echo
fi
echo -e "Usage: $0 [-c] [-t <threshold>] [-s <statefile>] [-r <reportfile>] [-d <disablefile>]"
echo
echo -e "\t-w last run alert threshold"
echo -e "\t-c check for catalog compilation failures only"
echo -e "\t-d disable file location (default: ${statedir}/disabled.at)"
echo -e "\t-r report file location (default: ${statedir}/last_run_report.yaml)"
echo -e "\t-s state file location (default: ${statedir}/last_run_summary.yaml)"
echo
exit 1
}
result(){
echo "$1"
exit $2
}
check_last_run_time(){
if [ ! -f $disablefile ]; then
# Parse last_last run from statefile
last_run_time=$(awk '/\s*last_run:/ {print $2}' $statefile)
if [ ${last_run_time:-0} -eq 0 ]; then
result "Can't get last_run from $statefile" 3
fi
now=$(date "+%s")
time_since_last=$((now-last_run_time))
if [ $time_since_last -gt $threshold ]; then
result "Last run was ${time_since_last} seconds ago" 3
fi
else
comment=$(grep 'Puppet has been disabled' /etc/motd | sed -r "s/\x1B\[([0-9]{1,3}((;[0-9]{1,3})*)?)?[m|K]//g")
result "${comment}" 0
fi
}
check_last_run_errors(){
failed=$(awk '/\s*failed:/ {print $2}' $statefile)
failure=$(awk '/\s*failed:/ {print $2}' $statefile)
fail_to_restart=$(awk '/\s*failed:/ {print $2}' $statefile)
sum_of_fail=$((failed+failure+failure_to_restart))
if [ ${sum_of_fail:-0} -gt 0 ]; then
if [ ${sum_of_fail:-0} -gt 1 ]; then
s="s"
fi
result "Last run had ${sum_of_fail} error${s:-}" 1
fi
}
check_catalog_compile(){
failed_catalog=$(awk -F '"' '/\s*Could not retrieve catalog from remote server:|\s*Failed to apply catalog/ {gsub(/on node .*/,"",$2); print $2}' $reportfile)
if [ ! -z "${failed_catalog:-}" ]; then
result "${failed_catalog}" 2
fi
}
result_ok(){
configuration_version=$(awk -F '"' '/\s*configuration_version:/ {print $2}' $reportfile)
result "Puppet agent ${puppetversion} running catalog ${configuration_version:-UNKNOWN}" 0
}
while getopts ":t:s:d:c" opt; do
case $opt in
t)
threshold=$OPTARG
;;
t)
statefile=$OPTARG
;;
d)
disablefile=$OPTARG
;;
r)
reportfile=$OPTARG
;;
c)
check_catalog=1
;;
\?)
usage "Invalid option: -$OPTARG"
;;
:)
usage "Option -$OPTARG requires an argument."
;;
esac
done
if [ ! -f $statefile ]; then
result "State file $statefile doesn't exist" 3
fi
if [ ! -f $reportfile ]; then
result "Report file $reportfile doesn't exist" 3
fi
# Nagios won't allow us to have different alerting timers for different
# states (we want to see catalog compile failures and recoveries
# immediately, and run time errors daily), so this script will operate
# in two modes which will be defined with two different notification
# intervals in Nagios
if [ ${check_catalog:-0} -eq 1 ]; then
if [ ${threshold:-0} -gt 1 ]; then
usage "Threshold argument not allowed with -c"
fi
check_catalog_compile
else
if [ ${threshold:-0} -lt 1 ]; then
usage "Invalid time threshold $OPTARG"
fi
check_last_run_time
check_last_run_errors
fi
result_ok
| true |
0f636cbe4ffdc1d07d5c434a8e928db06ebb37e8 | Shell | Xennis/debian_bash_install | /general_functions.sh | UTF-8 | 1,749 | 4.1875 | 4 | [] | no_license | #
# Function: ask
# Source: https://gist.github.com/davejamesmiller/1965569
#
function ask {
while true; do
if [ "${2:-}" = "Y" ]; then
prompt="Y/n"
default=Y
elif [ "${2:-}" = "N" ]; then
prompt="y/N"
default=N
else
prompt="y/n"
default=
fi
# Ask the question
read -p "$1 [$prompt] " REPLY
# Default?
if [ -z "$REPLY" ]; then
REPLY=$default
fi
# Check if the reply is valid
case "$REPLY" in
Y*|y*) return 0 ;;
N*|n*) return 1 ;;
esac
done
}
#
# Check if directory exits
#
function check_dir_exits(){
if [ "$#" -lt 1 ]
then
echo "Usage check_dir_exits DIR"
echo "Example: check_dir_exits dir/"
echo ""
exit 1
else
local dir="$1"
fi
if [ -d "${dir}" ]
then
return 0
else
return 1
fi
}
#
# Check if file exits
#
function check_file_exits(){
if [ "$#" -lt 1 ]
then
echo "Usage check_file_exits FILE"
echo "Example: check_dir_exits example.txt"
echo ""
exit 1
else
local file="$1"
fi
if [ -f "${file}" ]
then
return 0
else
return 1
fi
}
#
# Uses dpkg to check if a package exits / is installed.
#
function check_package_exits() {
if [ "$#" -lt 1 ]
then
echo "Usage check_package_exits PACKAGE-NAME"
echo "Example: check_package_exits vim"
echo ""
exit 1
else
local package_name=$1
fi
if dpkg -l | grep -qw ${package_name};
then
return 0
else
return 1
fi
} | true |
dfa76294b3bf0c88a410c2f83249c511553f08ef | Shell | ymxl85/MRs-based-test-suite-for-APR | /original/GenProg/replace/rds/v20/4/test.sh | UTF-8 | 12,775 | 2.71875 | 3 | [] | no_license | ulimit -t 1
echo $1 $2 $3 $4 $5 >> testruns.txt
case $2 in p1) $1 'Re' '2RO' 'YJz-R7' | diff outputP/O1 - && exit 0 ;;
p2) $1 '60oB' '' 'QI6[r' | diff outputP/O2 - && exit 0 ;;
p3) $1 '?3$' '' '_' | diff outputP/O3 - && exit 0 ;;
p4) $1 'lTk*' '' '*' | diff outputP/O4 - && exit 0 ;;
p5) $1 '+' 'J.:' 'g2*SlHwvh!y+iwy' | diff outputP/O5 - && exit 0 ;;
p6) $1 '&' 'SS]' '# Bn?]hf[,4b]' | diff outputP/O6 - && exit 0 ;;
p7) $1 '%n' 'P' 'q?Ik.b%h*]t' | diff outputP/O7 - && exit 0 ;;
p8) $1 'Q[qP' ',uUX' 'LrBg&q' | diff outputP/O8 - && exit 0 ;;
p9) $1 'AgL' '' 'AE2A*?h$Y$NZ G*6H{' | diff outputP/O9 - && exit 0 ;;
p10) $1 '' '' 'v@S' | diff outputP/O10 - && exit 0 ;;
p11) $1 '}' '}UG' '@CB' | diff outputP/O11 - && exit 0 ;;
p12) $1 '#' 'f8r' '{.,i$jd&lY' | diff outputP/O12 - && exit 0 ;;
p13) $1 '' '{Oo' 'bO=' | diff outputP/O13 - && exit 0 ;;
p14) $1 'yQl' 'I ' 'ML^PKa_Hv>+:\Y#j1' | diff outputP/O14 - && exit 0 ;;
p15) $1 '$8' 'xo' 'X-6fu>5|!aR[z' | diff outputP/O15 - && exit 0 ;;
p16) $1 'ev$' 'k' 'UC^oX' | diff outputP/O16 - && exit 0 ;;
p17) $1 '' '' '>`A3m8.a5AKi]c35#u|' | diff outputP/O17 - && exit 0 ;;
p18) $1 '' '+y' 'F_xz)' | diff outputP/O18 - && exit 0 ;;
p19) $1 'c<{F' '}PbI' '8{Sv"z' | diff outputP/O19 - && exit 0 ;;
p20) $1 'v' 'i<,/' '`U}THChXp}n<-zXz";@<' | diff outputP/O20 - && exit 0 ;;
p21) $1 '{y/ ' '}' 'u;' | diff outputP/O21 - && exit 0 ;;
p22) $1 '' 'T' 'zE\#oZ&Vm>Co-pj' | diff outputP/O22 - && exit 0 ;;
p23) $1 '' 'a6B' ')n,!(UY=' | diff outputP/O23 - && exit 0 ;;
p24) $1 '<*W' '+U3s' 's^=wl ggR<lGg9pq28' | diff outputP/O24 - && exit 0 ;;
p25) $1 'R' '0cB' 'EqZ$>N7t4>&\`n2<' | diff outputP/O25 - && exit 0 ;;
p26) $1 '' 'h^$X' '3`m_*ixOJEsdM?r:f' | diff outputP/O26 - && exit 0 ;;
p27) $1 'p#' '' 'Ni!_f{P}T }I5Wn' | diff outputP/O27 - && exit 0 ;;
p28) $1 'rX' 'D)]T' ' ca$+p~bbG6DcGJgF_*' | diff outputP/O28 - && exit 0 ;;
p29) $1 'zuZ' 'rKC' '2HTz' | diff outputP/O29 - && exit 0 ;;
p30) $1 '' 'X$1X' 'hGN:v6U~11' | diff outputP/O30 - && exit 0 ;;
p31) $1 '_' 'J' 'M[]:>IC{RTg$Px|6R' | diff outputP/O31 - && exit 0 ;;
p32) $1 'iw4' ' lC' 'B[%)D2I>Et?d6l`HdQ7' | diff outputP/O32 - && exit 0 ;;
p33) $1 ']<>' '^K_' '9' | diff outputP/O33 - && exit 0 ;;
p34) $1 'J<|' '<' 'FP]' | diff outputP/O34 - && exit 0 ;;
p35) $1 'G' '}5' 'c>i>;' | diff outputP/O35 - && exit 0 ;;
p36) $1 'd$' 'HFj' 't#' | diff outputP/O36 - && exit 0 ;;
p37) $1 '' 'h:7n' 'y21 ~d+!\TW.]y3pWz' | diff outputP/O37 - && exit 0 ;;
p38) $1 '&' '"F%' ')\:bU+2oVC6s' | diff outputP/O38 - && exit 0 ;;
p39) $1 'D' '' 'g' | diff outputP/O39 - && exit 0 ;;
p40) $1 'a' 'T2+Z' 'GoyBzY-+&Lif\-#x59' | diff outputP/O40 - && exit 0 ;;
p41) $1 '^a' '-<=t' ']^GJwWY2$(sjwnQgK' | diff outputP/O41 - && exit 0 ;;
p42) $1 '' '' 'o>hM\:#eW' | diff outputP/O42 - && exit 0 ;;
p43) $1 ')C9' '' 'F@mm!"c}zXQx{~>J' | diff outputP/O43 - && exit 0 ;;
p44) $1 'w' '!' 'LA/=*,-|KE}!' | diff outputP/O44 - && exit 0 ;;
p45) $1 'mtV' 'ty' 'Jw9}7q.{%2!@~E*({Qk_' | diff outputP/O45 - && exit 0 ;;
p46) $1 '7`a' '' '{A?x"K E--yd' | diff outputP/O46 - && exit 0 ;;
p47) $1 '9' 'zl6' 'j1/MOE6' | diff outputP/O47 - && exit 0 ;;
p48) $1 '' 'U' 'M8q$8RF>FRV5)vp!P' | diff outputP/O48 - && exit 0 ;;
p49) $1 '"%' 'bD' '4CfM6j#~\1;j8A~by"L' | diff outputP/O49 - && exit 0 ;;
p50) $1 'it' 'P' '=nk[2ps' | diff outputP/O50 - && exit 0 ;;
p51) $1 '' '9q8' '~M;8?Lh`Z<urDk&(' | diff outputP/O51 - && exit 0 ;;
p52) $1 'a9*u' '3V\' '/DJ?,&p~' | diff outputP/O52 - && exit 0 ;;
p53) $1 'R,L\' '> ' '_Y3=g]U!FpLS ' | diff outputP/O53 - && exit 0 ;;
p54) $1 'x\y' 'K#' 'te }uQD[$' | diff outputP/O54 - && exit 0 ;;
p55) $1 'jED' 'Wv^' '[fhy^E6gPT|c[!lWo' | diff outputP/O55 - && exit 0 ;;
p56) $1 '(xR' 'O' '-/N' | diff outputP/O56 - && exit 0 ;;
p57) $1 'Hh2' '9O(' ':N{7I2R+>`QbL%s' | diff outputP/O57 - && exit 0 ;;
p58) $1 '?y5Z' '' '{Gm3*,Ml_B' | diff outputP/O58 - && exit 0 ;;
p59) $1 '' '&' '7L' | diff outputP/O59 - && exit 0 ;;
p60) $1 ',|o' '-' 'OtA^x"' | diff outputP/O60 - && exit 0 ;;
p61) $1 '.' '1v' 'WQ-Jj-MUJ' | diff outputP/O61 - && exit 0 ;;
p62) $1 '' 'Z' '#[b#g x3.V1*W*' | diff outputP/O62 - && exit 0 ;;
p63) $1 ')o(' 'kGg|' 'H0UH"%ySAz' | diff outputP/O63 - && exit 0 ;;
p64) $1 ' Xp' '>' 'j-<`|tj P`_7/' | diff outputP/O64 - && exit 0 ;;
p65) $1 '' '%"q8' '*~4O{avyOv+^' | diff outputP/O65 - && exit 0 ;;
p66) $1 '7' 'n}' '6^;<d[m' | diff outputP/O66 - && exit 0 ;;
p67) $1 'nepi' '#h`>' '7,EY9;*T/4M' | diff outputP/O67 - && exit 0 ;;
p68) $1 '' 'Z' '~*#&]ZGE' | diff outputP/O68 - && exit 0 ;;
p69) $1 'YQ' '*1' '~;R' | diff outputP/O69 - && exit 0 ;;
p70) $1 'd' 'M)X' '%^kI3@9r7N7)qZw-<{' | diff outputP/O70 - && exit 0 ;;
p71) $1 '[' 'v3@' '7"eYhs tLJN#Pld>m' | diff outputP/O71 - && exit 0 ;;
p72) $1 'E)' 'l~~' 'B8kgsZpB0%ogbU^' | diff outputP/O72 - && exit 0 ;;
p73) $1 'Cc' '\' 's46 _=b' | diff outputP/O73 - && exit 0 ;;
p74) $1 'y)' 'iI' '(?1MqiZk2~&A|b$`-i|A' | diff outputP/O74 - && exit 0 ;;
p75) $1 's.<g' '4' 'v9U$4rFiPOOV[kp*oHc.' | diff outputP/O75 - && exit 0 ;;
p76) $1 '' '_0!l' '9I!#[u,I9Ph9~?_$!' | diff outputP/O76 - && exit 0 ;;
p77) $1 '' '2Z' '78g]' | diff outputP/O77 - && exit 0 ;;
p78) $1 'arp' '>' '&pCr=fyT}@~~J G0' | diff outputP/O78 - && exit 0 ;;
p79) $1 'BjnF' 'wh:' '?YRzrbo!P"E#o~Irl;i' | diff outputP/O79 - && exit 0 ;;
p80) $1 '+' '' 'Bn' | diff outputP/O80 - && exit 0 ;;
p81) $1 'N,' '=3*' '&-2DEErzZ' | diff outputP/O81 - && exit 0 ;;
p82) $1 '' 'qA}?' '! eyPwW$/7' | diff outputP/O82 - && exit 0 ;;
p83) $1 'o}' '' 'Jc%<cY' | diff outputP/O83 - && exit 0 ;;
p84) $1 'G 4B' 'Q' 'D' | diff outputP/O84 - && exit 0 ;;
p85) $1 '2' '' ' Gn7]53@A9xysy5,Wx' | diff outputP/O85 - && exit 0 ;;
p86) $1 'Yq' 'iRr' 'a(ie' | diff outputP/O86 - && exit 0 ;;
p87) $1 '}MU' '_2QN' 'X |8>a.@8)5lIbY&}tnS' | diff outputP/O87 - && exit 0 ;;
p88) $1 '$aB%' '' 'BYmCCx' | diff outputP/O88 - && exit 0 ;;
p89) $1 '' 'Nj' 'Ucw(@' | diff outputP/O89 - && exit 0 ;;
p90) $1 '' 'wGy5' 'KpSN>H#' | diff outputP/O90 - && exit 0 ;;
p91) $1 'c' ';%}B' '6}c' | diff outputP/O91 - && exit 0 ;;
p92) $1 '' '' 'm9_wyV<A' | diff outputP/O92 - && exit 0 ;;
n1) $1 '@' 'd^#' 'z@/8pR=r' | diff outputF/O1 - && exit 0 ;;
s) # single-valued fitness
let fit=0
$1 'Re' '2RO' 'YJz-R7' | diff outputP/O1 - && let fit=$fit+1
$1 '60oB' '' 'QI6[r' | diff outputP/O2 - && let fit=$fit+1
$1 '?3$' '' '_' | diff outputP/O3 - && let fit=$fit+1
$1 'lTk*' '' '*' | diff outputP/O4 - && let fit=$fit+1
$1 '+' 'J.:' 'g2*SlHwvh!y+iwy' | diff outputP/O5 - && let fit=$fit+1
$1 '&' 'SS]' '# Bn?]hf[,4b]' | diff outputP/O6 - && let fit=$fit+1
$1 '%n' 'P' 'q?Ik.b%h*]t' | diff outputP/O7 - && let fit=$fit+1
$1 'Q[qP' ',uUX' 'LrBg&q' | diff outputP/O8 - && let fit=$fit+1
$1 'AgL' '' 'AE2A*?h$Y$NZ G*6H{' | diff outputP/O9 - && let fit=$fit+1
$1 '' '' 'v@S' | diff outputP/O10 - && let fit=$fit+1
$1 '}' '}UG' '@CB' | diff outputP/O11 - && let fit=$fit+1
$1 '#' 'f8r' '{.,i$jd&lY' | diff outputP/O12 - && let fit=$fit+1
$1 '' '{Oo' 'bO=' | diff outputP/O13 - && let fit=$fit+1
$1 'yQl' 'I ' 'ML^PKa_Hv>+:\Y#j1' | diff outputP/O14 - && let fit=$fit+1
$1 '$8' 'xo' 'X-6fu>5|!aR[z' | diff outputP/O15 - && let fit=$fit+1
$1 'ev$' 'k' 'UC^oX' | diff outputP/O16 - && let fit=$fit+1
$1 '' '' '>`A3m8.a5AKi]c35#u|' | diff outputP/O17 - && let fit=$fit+1
$1 '' '+y' 'F_xz)' | diff outputP/O18 - && let fit=$fit+1
$1 'c<{F' '}PbI' '8{Sv"z' | diff outputP/O19 - && let fit=$fit+1
$1 'v' 'i<,/' '`U}THChXp}n<-zXz";@<' | diff outputP/O20 - && let fit=$fit+1
$1 '{y/ ' '}' 'u;' | diff outputP/O21 - && let fit=$fit+1
$1 '' 'T' 'zE\#oZ&Vm>Co-pj' | diff outputP/O22 - && let fit=$fit+1
$1 '' 'a6B' ')n,!(UY=' | diff outputP/O23 - && let fit=$fit+1
$1 '<*W' '+U3s' 's^=wl ggR<lGg9pq28' | diff outputP/O24 - && let fit=$fit+1
$1 'R' '0cB' 'EqZ$>N7t4>&\`n2<' | diff outputP/O25 - && let fit=$fit+1
$1 '' 'h^$X' '3`m_*ixOJEsdM?r:f' | diff outputP/O26 - && let fit=$fit+1
$1 'p#' '' 'Ni!_f{P}T }I5Wn' | diff outputP/O27 - && let fit=$fit+1
$1 'rX' 'D)]T' ' ca$+p~bbG6DcGJgF_*' | diff outputP/O28 - && let fit=$fit+1
$1 'zuZ' 'rKC' '2HTz' | diff outputP/O29 - && let fit=$fit+1
$1 '' 'X$1X' 'hGN:v6U~11' | diff outputP/O30 - && let fit=$fit+1
$1 '_' 'J' 'M[]:>IC{RTg$Px|6R' | diff outputP/O31 - && let fit=$fit+1
$1 'iw4' ' lC' 'B[%)D2I>Et?d6l`HdQ7' | diff outputP/O32 - && let fit=$fit+1
$1 ']<>' '^K_' '9' | diff outputP/O33 - && let fit=$fit+1
$1 'J<|' '<' 'FP]' | diff outputP/O34 - && let fit=$fit+1
$1 'G' '}5' 'c>i>;' | diff outputP/O35 - && let fit=$fit+1
$1 'd$' 'HFj' 't#' | diff outputP/O36 - && let fit=$fit+1
$1 '' 'h:7n' 'y21 ~d+!\TW.]y3pWz' | diff outputP/O37 - && let fit=$fit+1
$1 '&' '"F%' ')\:bU+2oVC6s' | diff outputP/O38 - && let fit=$fit+1
$1 'D' '' 'g' | diff outputP/O39 - && let fit=$fit+1
$1 'a' 'T2+Z' 'GoyBzY-+&Lif\-#x59' | diff outputP/O40 - && let fit=$fit+1
$1 '^a' '-<=t' ']^GJwWY2$(sjwnQgK' | diff outputP/O41 - && let fit=$fit+1
$1 '' '' 'o>hM\:#eW' | diff outputP/O42 - && let fit=$fit+1
$1 ')C9' '' 'F@mm!"c}zXQx{~>J' | diff outputP/O43 - && let fit=$fit+1
$1 'w' '!' 'LA/=*,-|KE}!' | diff outputP/O44 - && let fit=$fit+1
$1 'mtV' 'ty' 'Jw9}7q.{%2!@~E*({Qk_' | diff outputP/O45 - && let fit=$fit+1
$1 '7`a' '' '{A?x"K E--yd' | diff outputP/O46 - && let fit=$fit+1
$1 '9' 'zl6' 'j1/MOE6' | diff outputP/O47 - && let fit=$fit+1
$1 '' 'U' 'M8q$8RF>FRV5)vp!P' | diff outputP/O48 - && let fit=$fit+1
$1 '"%' 'bD' '4CfM6j#~\1;j8A~by"L' | diff outputP/O49 - && let fit=$fit+1
$1 'it' 'P' '=nk[2ps' | diff outputP/O50 - && let fit=$fit+1
$1 '' '9q8' '~M;8?Lh`Z<urDk&(' | diff outputP/O51 - && let fit=$fit+1
$1 'a9*u' '3V\' '/DJ?,&p~' | diff outputP/O52 - && let fit=$fit+1
$1 'R,L\' '> ' '_Y3=g]U!FpLS ' | diff outputP/O53 - && let fit=$fit+1
$1 'x\y' 'K#' 'te }uQD[$' | diff outputP/O54 - && let fit=$fit+1
$1 'jED' 'Wv^' '[fhy^E6gPT|c[!lWo' | diff outputP/O55 - && let fit=$fit+1
$1 '(xR' 'O' '-/N' | diff outputP/O56 - && let fit=$fit+1
$1 'Hh2' '9O(' ':N{7I2R+>`QbL%s' | diff outputP/O57 - && let fit=$fit+1
$1 '?y5Z' '' '{Gm3*,Ml_B' | diff outputP/O58 - && let fit=$fit+1
$1 '' '&' '7L' | diff outputP/O59 - && let fit=$fit+1
$1 ',|o' '-' 'OtA^x"' | diff outputP/O60 - && let fit=$fit+1
$1 '.' '1v' 'WQ-Jj-MUJ' | diff outputP/O61 - && let fit=$fit+1
$1 '' 'Z' '#[b#g x3.V1*W*' | diff outputP/O62 - && let fit=$fit+1
$1 ')o(' 'kGg|' 'H0UH"%ySAz' | diff outputP/O63 - && let fit=$fit+1
$1 ' Xp' '>' 'j-<`|tj P`_7/' | diff outputP/O64 - && let fit=$fit+1
$1 '' '%"q8' '*~4O{avyOv+^' | diff outputP/O65 - && let fit=$fit+1
$1 '7' 'n}' '6^;<d[m' | diff outputP/O66 - && let fit=$fit+1
$1 'nepi' '#h`>' '7,EY9;*T/4M' | diff outputP/O67 - && let fit=$fit+1
$1 '' 'Z' '~*#&]ZGE' | diff outputP/O68 - && let fit=$fit+1
$1 'YQ' '*1' '~;R' | diff outputP/O69 - && let fit=$fit+1
$1 'd' 'M)X' '%^kI3@9r7N7)qZw-<{' | diff outputP/O70 - && let fit=$fit+1
$1 '[' 'v3@' '7"eYhs tLJN#Pld>m' | diff outputP/O71 - && let fit=$fit+1
$1 'E)' 'l~~' 'B8kgsZpB0%ogbU^' | diff outputP/O72 - && let fit=$fit+1
$1 'Cc' '\' 's46 _=b' | diff outputP/O73 - && let fit=$fit+1
$1 'y)' 'iI' '(?1MqiZk2~&A|b$`-i|A' | diff outputP/O74 - && let fit=$fit+1
$1 's.<g' '4' 'v9U$4rFiPOOV[kp*oHc.' | diff outputP/O75 - && let fit=$fit+1
$1 '' '_0!l' '9I!#[u,I9Ph9~?_$!' | diff outputP/O76 - && let fit=$fit+1
$1 '' '2Z' '78g]' | diff outputP/O77 - && let fit=$fit+1
$1 'arp' '>' '&pCr=fyT}@~~J G0' | diff outputP/O78 - && let fit=$fit+1
$1 'BjnF' 'wh:' '?YRzrbo!P"E#o~Irl;i' | diff outputP/O79 - && let fit=$fit+1
$1 '+' '' 'Bn' | diff outputP/O80 - && let fit=$fit+1
$1 'N,' '=3*' '&-2DEErzZ' | diff outputP/O81 - && let fit=$fit+1
$1 '' 'qA}?' '! eyPwW$/7' | diff outputP/O82 - && let fit=$fit+1
$1 'o}' '' 'Jc%<cY' | diff outputP/O83 - && let fit=$fit+1
$1 'G 4B' 'Q' 'D' | diff outputP/O84 - && let fit=$fit+1
$1 '2' '' ' Gn7]53@A9xysy5,Wx' | diff outputP/O85 - && let fit=$fit+1
$1 'Yq' 'iRr' 'a(ie' | diff outputP/O86 - && let fit=$fit+1
$1 '}MU' '_2QN' 'X |8>a.@8)5lIbY&}tnS' | diff outputP/O87 - && let fit=$fit+1
$1 '$aB%' '' 'BYmCCx' | diff outputP/O88 - && let fit=$fit+1
$1 '' 'Nj' 'Ucw(@' | diff outputP/O89 - && let fit=$fit+1
$1 '' 'wGy5' 'KpSN>H#' | diff outputP/O90 - && let fit=$fit+1
$1 'c' ';%}B' '6}c' | diff outputP/O91 - && let fit=$fit+1
$1 '' '' 'm9_wyV<A' | diff outputP/O92 - && let fit=$fit+1
$1 '@' 'd^#' 'z@/8pR=r' | diff outputF/O1 - && let fit=$fit+1
let passed_all_so_stop_search="$fit >=93 "
echo $fit > $5
if [ $passed_all_so_stop_search -eq 1 ] ; then
exit 0
else
exit 1
fi;;
esac
exit 1
| true |
14c1a15bbcb239a3767513893e4def70376abc82 | Shell | krzysieqq/new_random_project | /run.sh | UTF-8 | 6,319 | 3.5625 | 4 | [
"MIT"
] | permissive | #!/bin/bash
# Credits for: Krzysieqq (https://github.com/krzysieqq/DUNP)
set -e
# Setup Main Container for Project
MAIN_CONTAINER=backend
PROJECT_NAME=
#ADDITIONAL_DOCKER_COMPOSE_PARAMS="-f ./docker/docker-compose.local.yml"
LOCAL_FILES="
./envs/.env.local.example
./docker/docker-compose.local.yml.example
./docker/entrypoint.local.sh.example
./docker/requirements.local.txt.example"
function compose() {
CI_REGISTRY=localhost RELEASE_VERSION=local docker-compose -f ./docker/docker-compose.yml $ADDITIONAL_DOCKER_COMPOSE_PARAMS -p $PROJECT_NAME $@
}
case $1 in
help|-h|--help)
echo "Usage (params with '*' are optional):"
echo "./run.sh -> UP containers in detach mode"
echo "./run.sh bash|-sh -> Open bash in main container"
echo "./run.sh build|-b <params*> -> BUILD containers"
echo "./run.sh build-force|-bf <params*> -> Force build containers (with params no-cache, pull)"
echo "./run.sh custom_command|-cc -> Custom docker-compose command"
echo "./run.sh create_django_secret|-crs -> Create Django Secret Key"
echo "./run.sh create_superuser|-csu <password> -> Create default super user"
echo "./run.sh down|-dn -> DOWN (stop and remove) containers"
echo "./run.sh downv|-dnv -> DOWN (stop and remove with volumes) containers"
echo "./run.sh init|-i <project name> <django version*> -> Initial setup and config development environment with creating new django project"
echo "./run.sh help|-h -> Show this help message"
echo "./run.sh logs|-l <params*> -> LOGS from ALL containers"
echo "./run.sh logsf|-lf <params*> -> LOGS from ALL containers with follow option"
echo "./run.sh shell|-sl -> Open shell in main container"
echo "./run.sh shell_plus|-sp -> Open shell plus (only if django_extensions installed) in main container"
echo "./run.sh makemigrate|-mm <params*> -> Make migrations and migrate inside main container"
echo "./run.sh notebook|-nb -> Run notebook (only if django_extensions installed)"
echo "./run.sh recreate|-rec <params*> -> Up and recreate containers"
echo "./run.sh recreated|-recd <params*> -> Up and recreate containers in detach mode"
echo "./run.sh restart|-r <params*> -> Restart containers"
echo "./run.sh rm|-rm <params*> -> Remove force container"
echo "./run.sh setup|-stp -> Setup project for local development"
echo "./run.sh stop|-s <params*> -> Stop containers"
echo "./run.sh test|-t <params*> -> Run tests"
echo "./run.sh up|-u <params*> -> UP containers with output"
;;
bash|-sh)
compose exec $MAIN_CONTAINER bash
exit
;;
build|-b)
compose build ${@:2}
exit
;;
build-force|-bf)
compose build --no-cache --pull ${@:2}
exit
;;
custom_command|-cc)
compose ${@:2}
exit
;;
create_django_secret|-crs)
compose ${@:2}
exit
;;
down|-dn)
compose down
exit
;;
downv|-dnv)
compose down -v
exit
;;
init|-i)
# Set project name in ./run.sh
sed -i '1,/PROJECT_NAME=/{x;/first/s///;x;s/PROJECT_NAME=/PROJECT_NAME='$2'/;}' ./run.sh
sed -i '1,/module=/{x;/first/s///;x;s/module=/module='$2'.wsgi:application/;}' ./configs/uwsgi.ini
sed -i '1,/DJANGO_SETTINGS_MODULE=/{x;/first/s///;x;s/DJANGO_SETTINGS_MODULE=/DJANGO_SETTINGS_MODULE='$2'.settings/;}' ./envs/.env.local.example
if [ -n "$3" ]; then
DJANGO_VERSION="==$3"
sed -i '1,/Django>=3.1,<3.2/{x;/first/s///;x;s/Django>=3.1,<3.2/Django=='$3'/;}' ./
fi
docker run --rm -v $(pwd)/app:/code -w /code -e DEFAULT_PERMS=$(id -u):$(id -g) python:3-slim /bin/bash -c "pip install Django${DJANGO_VERSION} && django-admin startproject ${2} .&& chown -R \$DEFAULT_PERMS /code"
./run.sh -stp
exit
;;
create_superuser|-csu)
if [ -z "$2" ]; then
echo -e "You must provide an admin password as param f.g. \n$ ./run.sh -csu admin"
exit
fi
echo "import os; from django.contrib.auth import get_user_model; User = get_user_model(); User.objects.create_superuser('admin', 'admin@email.com', '$2') if not User.objects.filter(username='admin').exists() else print('Admin account exist.')" | compose exec -T $MAIN_CONTAINER "python manage.py shell"
exit
;;
logs|-l)
compose logs ${@:2}
exit
;;
logsf|-lf)
compose logs -f ${@:2}
exit
;;
shell|-sl)
compose exec $MAIN_CONTAINER "python manage.py shell"
exit
;;
shell_plus|-sp)
compose exec $MAIN_CONTAINER "python manage.py shell_plus"
exit
;;
makemigrate|-mm)
compose exec $MAIN_CONTAINER django-admin makemigrations
compose exec $MAIN_CONTAINER django-admin migrate
exit
;;
notebook|-nb)
compose exec $MAIN_CONTAINER django-admin shell_plus --notebook
exit
;;
recreate|-rec)
compose up --force-recreate ${@:2}
exit
;;
recreated|-recd)
compose up --force-recreate -d ${@:2}
exit
;;
restart|-r)
compose restart ${@:2}
exit
;;
rm|-rm)
compose rm -fv ${@:2}
exit
;;
setup|-stp)
for f in $LOCAL_FILES
do
cp "$f" "${f::-8}"
done
sed -i '1,/ADDITIONAL_DOCKER_COMPOSE_PARAMS/{x;/first/s///;x;s/#ADDITIONAL_DOCKER_COMPOSE_PARAMS/ADDITIONAL_DOCKER_COMPOSE_PARAMS/;}' ./run.sh
compose build
DJANGO_SECRET_KEY=$(echo "from django.core.management.utils import get_random_secret_key;print(get_random_secret_key())" | ./run.sh -cc run --no-deps --rm backend django-admin shell)
sed -i 's|DJANGO_SECRET_KEY=""|DJANGO_SECRET_KEY="'"$DJANGO_SECRET_KEY"'"|' ./envs/.env.local
echo "Project setup completed. Now you can run containers with './run.sh' or './run.sh -u' (with live output)."
exit
;;
stop|-s)
compose stop ${@:2}
exit
;;
test|-t)
compose exec $MAIN_CONTAINER "python manage.py test ${@:2}"
exit
;;
up|-u)
compose up ${@:2}
exit
;;
*)
compose up -d ${@:2}
exit
;;
esac
| true |
ac05ccf6fb50b6a56b2f98c8b34fa7d139db67b5 | Shell | fenix-soft/unlock-archiver | /unlock-zip.sh | UTF-8 | 591 | 3.296875 | 3 | [] | no_license | #!/bin/bash
# unlock archive zip without password, the script use the dictionary attack (use file dic-pass.txt), the script find all zip file in the directory to run and check it all, and if possible extract the archive file.#
#Copyright (c) 2014 fenix-soft
checkpass() {
for i in `cat dic-pass.txt`
do a="`7za x -y -p$i $j | grep 'Everything is Ok'`"
if [ "$a" = "Everything is Ok" ]
then echo; echo; echo "file: $j pass trovata: $i"; echo ; echo "ho estratto i file...con sucesso."; echo
return 0
fi
done }
for j in `ls *.zip`
do echo; echo; echo "archivio $j"
checkpass; done
| true |
375696c085b2ba8d792c93dad3e75a39130d08d0 | Shell | janejpark/niehs | /final_scripts/reheader.sh | UTF-8 | 647 | 2.65625 | 3 | [] | no_license | #!/bin/bash -l
#SBATCH --array=1-164%50
#SBATCH -D /home/jajpark/niehs/final_scripts/
#SBATCH -o /home/jajpark/niehs/slurm-log/191211-reheader-stout-%A-%a.txt
#SBATCH -e /home/jajpark/niehs/slurm-log/191211-reheader-stderr-%A-%a.txt
#SBATCH -J reheader
#SBATCH -p high
#SBATCH -t 24:00:00
module load samtools
DIR=~/niehs/Data/mergedalignments
cd $DIR
f=$(find $DIR -name "*.bai" | sed -n $(echo $SLURM_ARRAY_TASK_ID)p)
name=$(echo $f | cut -d "/" -f 7 | cut -d "." -f 1 | cut -d "_" -f 1-4 )
echo "Processing sample ${name}"
samtools view -H $name.bam | sed -e "s/ID:/ID:${name}./" | samtools reheader -P - ${name}.bam > ${name}.rehead.bam | true |
380d08ebca4345d0517459aeefbdb06593eac4e6 | Shell | davidschlegel/dotfiles | /.xinitrc | UTF-8 | 804 | 2.609375 | 3 | [] | no_license | #!/bin/sh
# start some nice programs
if [ -d /etc/X11/xinit/xinitrc.d ] ; then
for f in /etc/X11/xinit/xinitrc.d/?*.sh ; do
[ -x "$f" ] && . "$f"
done
unset f
fi
export TERM="urxvt"
#Set background image
#feh --randomize --no-fehbg --bg-fill $HOME/Pictures/EarthPorn/* --randomize --no-fehbg --bg-fill $HOME/Pictures/EarthPorn/*
#feh --no-fehbg --bg-fill $HOME/Pictures/arch_linux_bg.jpg
#use Xresources
xrdb $HOME/.Xresources
# hide mouse cursor whet it isn't used
unclutter -root -visible &
# pulse audio session
start-pulseaudio-x11
# keyboard layout
setxkbmap de #This can also be done using xmodmap for custom keyboard layout
# disks automounting with system tray icon
udiskie -t &
#start i3 with 25MiB of RAM for debug logs
exec i3 --shmlog-size=2621440 -c $HOME/.config/i3/config
| true |
1e7bc747cf20951fdecca2d0cc9901bcc963595b | Shell | apache/impala | /testdata/bin/kill-hive-server.sh | UTF-8 | 1,870 | 3.484375 | 3 | [
"Apache-2.0",
"OpenSSL",
"bzip2-1.0.6",
"LicenseRef-scancode-openssl",
"LicenseRef-scancode-ssleay-windows",
"LicenseRef-scancode-google-patent-license-webrtc",
"PSF-2.0",
"BSD-3-Clause",
"dtoa",
"MIT",
"LicenseRef-scancode-mit-modification-obligations",
"Minpack",
"BSL-1.0",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-unknown",
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/bin/bash
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
set -euo pipefail
. $IMPALA_HOME/bin/report_build_error.sh
setup_report_build_error
DIR=$(dirname "$0")
KILL_HIVESERVER=1
KILL_METASTORE=1
while [ -n "$*" ]
do
case $1 in
-only_hiveserver)
KILL_METASTORE=0
;;
-only_metastore)
KILL_HIVESERVER=0
;;
-help|-h|*)
echo "kill-hive-server.sh : Kills the hive server and the metastore."
echo "[-only_metastore] : Only kills the hive metastore."
echo "[-only_hiveserver] : Only kills the hive server."
exit 1;
;;
esac
shift;
done
if [[ $KILL_HIVESERVER -eq 1 ]]; then
echo Stopping Hive server.
"$DIR"/kill-java-service.sh -c HiveServer
# The kill-java-service.sh command would fail if it did not succeed in
# stopping HiveServer2. Remove the pid file so that a reuse of the pid cannot
# interfere with starting HiveServer2. By default, the pid is written to
# $HIVE_CONF_DIR.
rm -f "$HIVE_CONF_DIR"/hiveserver2.pid
fi
if [[ $KILL_METASTORE -eq 1 ]]; then
echo Stopping Hive metastore.
"$DIR"/kill-java-service.sh -c HiveMetaStore
fi
| true |
d3472e89876bb6421e57fe820e8da53c6a2f17ea | Shell | petronny/aur3-mirror | /pyside-tools/PKGBUILD | UTF-8 | 938 | 2.828125 | 3 | [] | no_license | # Maintainer: Matthias Maennich <arch@maennich.net>
# Contributor: Massimiliano Torromeo <massimiliano.torromeo@gmail.com>
pkgname=pyside-tools
pkgver=0.2.13
pkgrel=5
_pyver=2.7
pkgdesc="UI Compiler (pyside-uic) plus Qt Resource Compiler (pyside-rcc4) for PySide."
arch=('i686' 'x86_64')
license=('LGPL')
url="http://www.pyside.org"
depends=('pyside>=1.1.0' 'python2')
makedepends=('cmake' 'automoc4' 'shibokengenerator>=1.1.0')
source=("http://www.pyside.org/files/$pkgname-$pkgver.tar.bz2")
md5sums=('14d3a36df06d680357d7bc1960f19a6d')
build(){
cd $srcdir/$pkgname-$pkgver
sed -e "s/python/python2/g" pyside-uic > pyside-uic_mod && mv pyside-uic{_mod,} && chmod 755 pyside-uic
mkdir -p build && cd build
cmake ../ -DCMAKE_INSTALL_PREFIX=/usr \
-DCMAKE_BUILD_TYPE=Release \
-DSHIBOKEN_PYTHON_SUFFIX=-python$_pyver \
-DPYTHON_EXECUTABLE=/usr/bin/python$_pyver
make
}
package(){
cd $srcdir/$pkgname-$pkgver/build
make DESTDIR=$pkgdir install
}
| true |
cb86612e96a336b9e0738a713184350506b20af5 | Shell | charlottelambert/DETM | /run_detm.sh | UTF-8 | 1,104 | 3 | 3 | [] | no_license | #!/bin/bash
set -e
# First run scripts/make_data.sh
# Possible alternative options:
# --rho_size 300 (that's the default)
data_path=${1:-sessionsAndOrdinarys-txt-tok.tsv-decades}
echo STARTING: "$(date)"
echo ++ Training embeddings for Old Bailey.
CUDA_DEVICES=3 python3 skipgram.py \
--data_file /data/clambert/$data_path-proc \
--emb_file data/$data_path-embed \
--dim_rho 100 --iters 50 --window_size 4
echo Done with embeddings: "$(date)"
echo ++ Learning topics for Old Bailey.
mkdir -p results/$data_path
# Initialize values for training:
min_df=50
max_df=0.9
num_topics=70
epochs=100
lr=0.004
echo ++ min_df $min_df max_df $max_df
CUDA_DEVICES=3 python3 main.py \
--mode train \
--dataset ob \
--data_path scripts/$data_path/min_df_$min_df\_max_df_$max_df \
--num_topics $num_topics \
--emb_path data/$data_path-embed \
--epochs $epochs \
--min_df $min_df \
--emb_size 100 \
--rho_size 100 \
--lr $lr \
--tc 1 \
> results/$data_path/min_df_$min_df\_max_df_$max_df\_t_$num_topics\_epochs_$epochs\_lr_$lr.log
echo ++ Done with training on $data_path: "$(date)"
| true |
3270547a57437a8eb0c4070cae50973fc390603d | Shell | woyizhidouzai/dotfiles | /home/shell/functions/string/json_highlight.sh | UTF-8 | 539 | 3.71875 | 4 | [
"MIT"
] | permissive | #/
## A shell function to syntax-highlight JSON strings or files.
#\
## Syntax-highlight JSON strings or files.
##
## @param [optional, List] JSON data. If not specified, then data is read from
## standard input.
function json_highlight() {
if [[ -p /dev/stdin ]]; then
# Piping, e.g. `echo '{"foo":42}' | json_highlight`
python -mjson.tool | pygmentize -l javascript
else
# e.g. `json_highlight '{"foo":42}'`
python -mjson.tool <<< "$*" | pygmentize -l javascript
fi
}
| true |
038ae4f9414c08f78063e1f857e4bc8ff3acd628 | Shell | rcedwards/IdiotBoxReprise | /curl_scripts/base_curl.sh | UTF-8 | 1,315 | 3.609375 | 4 | [] | no_license | #!/bin/bash
source ./load_env.sh
set -x
API_ROOT="https://api.thetvdb.com/"
if [ -z "$SESSION_TOKEN" ]
then
echo "Missing SESSION_TOKEN"
echo "Not needed for an authenticate call but all others will need one."
echo "Set in .env"
fi
#Store the passed in arguments
URL="$API_ROOT$1"
VERB=$2
DATA_FILE=$3
CONTENT_TYPE=$4
if [ -z "$URL" ]
then
echo "Missing URL."
exit
fi
if [ -z "$VERB" ]
then
echo "Missing Verb."
exit
fi
# Set JSON content type as default
if [ -z "$CONTENT_TYPE" ]
then
CONTENT_TYPE="application/json"
fi
# Add HTTP body
DATA_COMMAND=""
if [ "$DATA_FILE" ]
then
if [[ "$CONTENT_TYPE" == "multipart/form-data" ]]
then
DATA_COMMAND="--form"
elif [[ "$CONTENT_TYPE" == "application/x-www-form-urlencoded" ]]
then
DATA_COMMAND="--data"
else
DATA_COMMAND="--data-binary"
fi
fi
echo "Calling $URL"
echo "$VERB: $DATA_COMMAND $DATA_FILE"
if [ -z "$DATA_COMMAND" ]
then
curl -v \
-H "Content-Type: $CONTENT_TYPE" \
-H "Accept: application/json" \
-H "Authorization: Bearer $SESSION_TOKEN" \
-X "$VERB" \
"$URL" | python -m json.tool
else
curl -v \
-H "Content-Type: $CONTENT_TYPE" \
-H "Accept: application/json" \
-H "Authorization: Bearer $SESSION_TOKEN" \
-X "$VERB" "$DATA_COMMAND" "$DATA_FILE" \
"$URL" | python -m json.tool
fi
| true |
d6cbbbf600d7f5a6415697de60cdb92dc1c53bb1 | Shell | ikechuku/yeoman-portfolio-generator | /app/src/common/_deploy.sh | UTF-8 | 775 | 3.421875 | 3 | [
"MIT"
] | permissive | #!/bin/bash
#
#
CWD="<%= cwd %>"
DATE=`eval date `
echo $DATE
if [ $1 == "init" ]
then
cd $CWD
gulp codejam
gulp sweetcodejam
gulp github
gulp build
cd dist
git init
git remote add origin https://github.com/<%= username %>/<%= repository %>.git
git checkout -b gh-pages
git add .
git commit -m 'first deploy'
git status
git push -u origin gh-pages
rm -rf .git
exit 0
fi
if [ $1 == "push" ]
then
COMMENT='deploy'
if [ $# -eq 2 ]
then
COMMENT=$2
echo $COMMENT
fi
cd $CWD
rm -rf dist
git clone https://github.com/<%= username %>/<%= repository %>.git
mv codepolio dist
gulp codejam
gulp sweetcodejam
gulp build
cd dist
git add .
git commit -m 'deploy'
git push
exit 0
fi
if [ $1 ]
then
echo "usage: deploy [init or push]"
exit 65
fi | true |
80283ab775a26a0a774d742c3d8891c3b099c7b8 | Shell | Tradesparq/sparqueue-examples | /submit.sh | UTF-8 | 287 | 2.921875 | 3 | [
"MIT"
] | permissive | INPUT=`dirname $0`/json/example.json
JOBID=`sparqueue-cli submit $INPUT`
while [ "$STATUS" != "SUCCESS" -a "$STATUS" != "FAILED" ]
do
STATUS=`sparqueue-cli status -text $JOBID`
echo "$JOBID=$STATUS"
sleep 1
done
sparqueue-cli job $JOBID
echo "$JOBID finished with status $STATUS" | true |
3a34225c050f52e4fe9cbbeeeed148db150fc81c | Shell | orhanf/mtUtils | /src/translate_and_calculate_bleu_shallow.sh | UTF-8 | 2,408 | 3.28125 | 3 | [] | no_license | #!/bin/bash
# arguments to script
PREFIX=$1
BASE_DIR=$2
DEVICE=$3
# unique id, time-stamp
CODEWORD=$(date +%s)
# script needs a directory structure
# like ./trainedModels ./tst ./dev
TRAINEDMODELS_PATH=${BASE_DIR}/trainedModels
# Groundhog path for sample.py
SAMPLE_PY=/u/firatorh/git/GroundHog/experiments/nmt/sampleShallowLM.py
# input and output files for test set
TST_SOURCE=${BASE_DIR}/tst/$4
TST_GOLD=${BASE_DIR}/tst/$5
TST_OUT=${BASE_DIR}/${PREFIX}_${CODEWORD}.IWSLT14.TED.tst2010.zh-en.TRANSLATION
# input and output files for development set
DEV_SOURCE=${BASE_DIR}/dev/$6
DEV_GOLD=${BASE_DIR}/dev/$7
DEV_OUT=${BASE_DIR}/${PREFIX}_${CODEWORD}.IWSLT14.TED.dev2010.zh-en.TRANSLATION
# joint input and output files
INP_FILE=${BASE_DIR}/${CODEWORD}_INPUT
OUT_FILE=${BASE_DIR}/${CODEWORD}_OUTPUT
cat $TST_SOURCE $DEV_SOURCE > $INP_FILE
# get line numbers of test
NUMLINES_TST=$(cat $TST_SOURCE | wc -l )
# these are usually same
REF_STATE=${TRAINEDMODELS_PATH}/${PREFIX}_state.pkl
REF_MODEL=${TRAINEDMODELS_PATH}/${PREFIX}_model.npz
STATE=${BASE_DIR}/${PREFIX}_${CODEWORD}_state.pkl
MODEL=${BASE_DIR}/${PREFIX}_${CODEWORD}_model.npz
LM_STATE=/data/lisatmp3/firatorh/nmt/en_lm/lm_state.pkl
LM_MODEL=/data/lisatmp3/firatorh/nmt/en_lm/lm_model.npz
ETA=0.5
# path to bleu score function
EVAL_BLEU=/data/lisatmp3/firatorh/turkishParallelCorpora/iwslt14/scripts/multi-bleu.perl
# copy original state and model file first
echo 'copying state and model files'
cp $REF_STATE $STATE
cp $REF_MODEL $MODEL
# next get the translations
echo 'translating from chinese to english...'
THEANO_FLAGS="floatX=float32, device=${DEVICE}" python $SAMPLE_PY --beam-search --state $STATE $MODEL --source $INP_FILE --trans $OUT_FILE --beam-size 20 --lm-model $LM_MODEL --lm-state $LM_STATE --eta $ETA
# split output file back to test and dev files
split -l $NUMLINES_TST $OUT_FILE $OUT_FILE
mv ${OUT_FILE}aa $TST_OUT
mv ${OUT_FILE}ab $DEV_OUT
rm $OUT_FILE
rm $INP_FILE
rm $STATE
rm $MODEL
# calculate bleu score
TST_BLEU=$(perl $EVAL_BLEU $TST_GOLD < $TST_OUT | grep -oP '(?<=BLEU = )[.0-9]+')
echo 'Tst BLEU =' $TST_BLEU
# calculate bleu score
DEV_BLEU=$(perl $EVAL_BLEU $DEV_GOLD < $DEV_OUT | grep -oP '(?<=BLEU = )[.0-9]+')
echo 'Dev BLEU =' $DEV_BLEU
# append scores to translation files
echo 'CODEWORD =' ${CODEWORD}
mv ${TST_OUT} ${TST_OUT}-BLEU${TST_BLEU}
mv ${DEV_OUT} ${DEV_OUT}-BLEU${DEV_BLEU}
| true |
5d6876434787a6d264172a6d89ffe607364be8b0 | Shell | giappi/heroku-buildpack-bash | /bin/detect | UTF-8 | 176 | 2.984375 | 3 | [] | no_license | #!/bin/sh
# this pack is valid for apps with a autorun.sh in the root
if [ -f $1/autorun.sh ]; then
echo ">>> Bash buildpack detected: autorun.sh"
exit 0
else
exit 1
fi
| true |
df7e7e5278efc2d63f591786348940257c2fb5d9 | Shell | jorgenbele/sh | /statusbar/statusbar.sh | UTF-8 | 2,405 | 4.03125 | 4 | [] | no_license | #!/bin/sh
# Author: Jørgen Bele Reinfjell
# Date: xx.07.2017 [dd.mm.yyyy]
# Modified: 13.11.2017 [dd.mm.yyyy]
# File: statusbar.sh
# Description:
# Starts and runs the statusline generator status
# and pipes it's output into 'lemonbar'.
# Dependencies: status, lemonbar
### Setup
[ -z "$FONT_SIZE_FILE" ] && FONT_SIZE_FILE="/tmp/fontsize"
[ -z "$STATUS_PIPE_FILE" ] && STATUS_FIFO_PATH="/tmp/status.pipe"
[ -z "$STATUS_PID_FILE" ] && STATUS_PID_FILE="/tmp/status_pid"
[ -n "$LEMONBAR_CMD" ] && lemonbar_cmd="$LEMONBAR_CMD" || lemonbar_cmd="lemonbar"
[ -n "$LEMONBAR_ARGS" ] && lemonbar_args="$LEMONBAR_ARGS"
# Fontsize
if [ -z "$FONTSIZE" ]; then
case $# in
0)
# Restore font size
[ -f "$FONT_SIZE_FILE" ] && NEWFONTSIZE="$(cat "$FONT_SIZE_FILE")"
if [ -n "$NEWFONTSIZE" ]; then
FONTSIZE="$NEWFONTSIZE"
echo "Restoring font size: $FONTSIZE" 1>&2
else
FONTSIZE="23"
echo "Using default font size: $FONTSIZE" 1>&2
fi
;;
*) FONTSIZE="$1";;
esac
fi
[ -z "$STATUSBAR_FONT" ] && STATUSBAR_FONT="Source Code Pro:pixelsize=$FONTSIZE:antialias=true"
### Main
echo "FONT_SIZE_FILE=$FONT_SIZE_FILE" 1>&2
echo "FONTSIZE=$FONTSIZE" 1>&2
echo "STATUSBAR_FONT=$STATUSBAR_FONT" 1>&2
# Save font size
echo "$FONTSIZE" > "$FONT_SIZE_FILE"
# Create named pipe if not already existing.
if ! [ -p "$STATUS_FIFO_PATH" ]; then
echo "No FIFO file, creating it now..." 1>&2
if ! mkfifo "$STATUS_FIFO_PATH"; then
echo "Unable to make fifo: $STATUS_FIFO_PATH\nEXITING!" 1>&2
exit 1
fi
fi
# Make sure no process is already running.
if [ -f "$STATUS_PID_FILE" ]; then
echo "Process already running..." 1>&2
echo "Killing process..." 1>&2
kill "$(cat "$STATUS_PID_FILE")"
fi
# Start status and pipe it's output to a named pipe.
# Also set it's priority low.
echo "Starting status..." 1>&2
status 2>/dev/null > "$STATUS_FIFO_PATH" &
status_pid="$!"
#renice 19 -p "$status_pid"
# Write pid to file.
echo "$status_pid" > "$STATUS_PID_FILE"
# Start 'lemonbar' with a low priority.
# - 'lemonbar' will be killed when status is killed.
echo "Starting lemonbar..." 1>&2
nice -19 $lemonbar_cmd $lemonbar_args -f "$STATUSBAR_FONT" < "$STATUS_FIFO_PATH"
# Start new process.
#nice status 2>/dev/null | lemonbar -f "$STATUSBAR_FONT"
| true |
c25aeab5b203911ac7bff8c3eef7c46b0202da12 | Shell | nateurope/eldk | /ppc_85xx/usr/lib/ltp/testcases/bin/nfs03 | UTF-8 | 11,504 | 3.65625 | 4 | [] | no_license | #! /bin/sh
#
# Copyright (c) International Business Machines Corp., 2001
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implie; warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
# the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
#
#
# FILE : nfs03
#
# DESCRIPTION: This script sets up the NFS directories in the remote machine
# and runs the LTP's filesystem test: fs_inod.
#
# SETUP: The home directory of root on the machine exported as "RHOST"
# MUST have a ".rhosts" file with the hostname of the machine
# where the test is executed.
#
#
# HISTORY:
# 11/2/01 Robbie Williamson (robbiew@us.ibm.com)
# -Created
#
#***********************************************************************
#Uncomment line below for debug output.
#trace_logic=${trace_logic:-"set -x"}
$trace_logic
#-----------------------------------------------------------------------
# Initialize local variables
#-----------------------------------------------------------------------
TC=${TC:=nfs03}
TCbin=${TCbin:=`pwd`}
TCdat=${TCdat:=$TCbin}
TCsrc=${TCsrc:=$TCbin}
TCtmp=${TCtmp:=$TCbin/$TC$$}
TCdump=${TCdump:=$TCbin}
RHOST=${RHOST:=`hostname | awk {'print $1'}`}
PID=$$
# Setting the NFS to version 2 by default
VERSION=${VERSION:=2}
SOCKET_TYPE=${SOCKET_TYPE:=udp}
TESTDIR=${TESTDIR:=/tmp/$TC$PID.testdir}
NFS_TYPE=${NFS_TYPE:=nfs}
# If CLEANUP is not set; set it to "ON"
CLEANUP=${CLEANUP:="ON"}
DIR_NUM=$1
FILE_NUM=$2
NFSD_NUM=$3
DIR_NUM=${DIR_NUM:=100}
FILE_NUM=${FILE_NUM:= 100}
THREAD_NUM=${THREAD_NUM:=1}
#============================================================================
# FUNCTION NAME: fs_inod
#
# FUNCTION DESCRIPTION: Filesystems test from LTP.
#
# PARAMETERS: Filesystem, number of directories, number of files,
# and loops
#
# RETURNS: 0 for PASS and Error Code for FAIL.
#============================================================================
fs_inod()
{
#=============================================================================
# FUNCTION NAME: err_log
#
# FUNCTION DESCRIPTION: Log error
#
# PARAMETERS: None.
#
# RETURNS: None.
#=============================================================================
function err_log
{
error "$1"
let step_errors="$step_errors + 1"
}
#=============================================================================
# FUNCTION NAME: make_subdirs
#
# FUNCTION DESCRIPTION: Creates $numsubdirs subdirectories
#
# PARAMETERS: None.
#
# RETURNS: None.
#=============================================================================
function make_subdirs
{
i=0;
while [ "$i" -lt "$numsubdirs" ]; do
[ -d dir$i ] || { \
echo "$0: mkdir dir$i"
mkdir -p dir$i || echo "mkdir dir$i FAILED"
}
let i="$i + 1"
done;
}
#=============================================================================
# FUNCTION NAME: touch_files
#
# FUNCTION DESCRIPTION: Creates $numfiles in each of $numsubdirs directories
#
# PARAMETERS: None.
#
# RETURNS: None.
#=============================================================================
function touch_files
{
echo "$0: touch files [0-$numsubdirs]/file$numsubdirs[0-$numfiles]"
j=0;
while [ "$j" -lt "$numsubdirs" ]; do
cd dir$j
k=0;
while [ "$k" -lt "$numfiles" ]; do
>file$j$k || err_log ">file$j$k FAILED"
let k="$k + 1"
done
let j="$j + 1"
cd ..
done
}
#=============================================================================
# FUNCTION NAME: rm_files
#
# FUNCTION DESCRIPTION: Removes $numfiles in each $numsubdir directory
#
# PARAMETERS: None.
#
# RETURNS: None.
#=============================================================================
function rm_files
{
echo "$0: rm files [0-$numsubdirs]/file$numsubdirs[0-$numfiles]"
j=0;
while [ "$j" -lt "$numsubdirs" ]; do
cd dir$j
k=0;
while [ "$k" -lt "$numfiles" ]; do
rm -f file$j$k || err_log "rm -f file$j$k FAILED"
let k="$k + 1"
done
let j="$j + 1"
cd ..
done
}
#=============================================================================
# FUNCTION NAME: step1
#
# FUNCTION DESCRIPTION: multiple processes creating and deleting files
#
# PARAMETERS: None.
#
# RETURNS: None.
#=============================================================================
function step1
{
echo "=============================================="
echo "MULTIPLE PROCESSES CREATING AND DELETING FILES"
echo "=============================================="
echo "$0: creating dir2 subdirectories"
[ -d dir2 ] || { \
mkdir -p dir2 || end_testcase "mkdir dir2 failed"
}
cd dir2 || err_log "cd dir2 FAILED"
make_subdirs || err_log "make_subdirs on dir2 FAILED"
cd ..
echo "$0: creating dir1 subdirectories & files"
[ -d dir1 ] || { \
mkdir dir1 || abort "mkdir dir1 FAILED"
}
cd dir1 || err_log "cd dir1 FAILED"
make_subdirs || err_log "make_subdirs on dir1 FAILED"
touch_files
pid1=$!
i=1;
while [ "$i" -le "$numloops" ]; do
echo "Executing loop $i of $numloops..."
# Added date stamps to track execution time and duration
echo "$0: cd ../dir1 & creating files"
cd ../dir1
wait $pid1
touch_files &
pid1=$!
echo "$0: cd ../dir1 & removing files"
cd ../dir1
wait $pid1
rm_files &
pid1=$!
echo "$0: cd ../dir2 & creating files"
cd ../dir2
wait $pid2
touch_files &
pid2=$!
echo "$0: cd ../dir2 & removing files"
cd ../dir2
wait $pid2
rm_files &
pid2=$!
let i="$i + 1"
done
# wait for all background processes to complete execution
wait
return $step_errors
}
#=============================================================================
# MAIN
# See the description, purpose, and design of this test under TEST
# in this test's prolog.
#=============================================================================
USAGE="Usage: ./fs_inod [volumename] [numsubdirectories] [numfiles] [numloops]"
if [ $# -ne 4 ]
then
echo $USAGE
exit 2
fi
testvol=$1
numsubdirs=$2
numfiles=$3
numloops=$4
cd $testvol || exit 2
echo "FS_INODE: File system stress - inode allocation/deallocation"
echo "Volume under test: $testvol"
echo "Number of subdirectories: $numsubdirs"
echo "Number of files: $numfiles"
echo "Number of loops: $numloops"
echo "Execution begins "
date
STEPS="1"
for I in $STEPS
do
step_errors=0
step$I
if [ $? != 0 ]; then
error "step$I failed - see above errors"
fi
done
# Clean up and timestamp
rm -rf $testvol/dir*
echo "Execution completed"
date
return $ERRORS
}
#=============================================================================
# FUNCTION NAME: setup_testcase
#
# FUNCTION DESCRIPTION: Perform the setup function for the testcase.
#
# PARAMETERS: None.
#
# RETURNS: None.
#=============================================================================
setup_testcase()
{
$trace_logic
echo ""
echo "Test Options:"
echo " VERSION: $VERSION"
echo " SOCKET_TYPE: $SOCKET_TYPE"
echo " TESTDIR: $TESTDIR"
echo " RHOST: $RHOST"
echo " NFS_TYPE: $NFS_TYPE"
echo ""
echo "Test Parameters:"
echo " Number of Directories: $DIR_NUM"
echo " Number of Files per Directory: $FILE_NUM"
echo " Number of nfsds tested: $THREAD_NUM"
echo ""
if [ "x$NFS_TYPE" != "xnfs4" ]; then
OPTS=${OPTS:="-o vers=$VERSION,proto=$SOCKET_TYPE "}
fi
REMOTE_DIR=${RHOST}:$TESTDIR
mkdir -p $TCtmp || end_testcase "Could not create $TCtmp"
chmod 777 $TCtmp
echo "Setting up remote machine: $RHOST"
rsh -n $RHOST "mkdir -p $TESTDIR"
[ $? = 0 ] || end_testcase "Could not create remote directory"
if [ "x$NFS_TYPE" = "xnfs4" ]; then
rsh -n $RHOST "mkdir -p /export$TESTDIR"
[ $? = 0 ] || end_testcase "Could not create /export$TESTDIR on server"
echo "rsh -n $RHOST 'mount --bind $TESTDIR /export$TESTDIR'"
rsh -n $RHOST "mount --bind $TESTDIR /export$TESTDIR"
[ $? = 0 ] || end_testcase "Could not bind $TESTDIR to /export"
rsh -n $RHOST "/usr/sbin/exportfs -i -o no_root_squash,rw *:$TESTDIR"
[ $? = 0 ] || end_testcase "Could not export remote directory"
else
rsh -n $RHOST "/usr/sbin/exportfs -i -o no_root_squash,rw *:$TESTDIR"
[ $? = 0 ] || end_testcase "Could not export remote directory"
fi
echo "Mounting NFS filesystem $REMOTE_DIR on $TCtmp with options '$OPTS'"
mount -t $NFS_TYPE $OPTS $REMOTE_DIR $TCtmp || end_testcase "Cannot mount $TCtmp"
[ $? = 0 ] || end_testcase "Could not mount $REMOTE_DIR"
echo "Setting server side nfsd count to $THREAD_NUM"
ORIG_NFSD=`rsh -n $RHOST "ps -ef" | grep nfsd | grep -v grep | wc -l`
rsh -n $RHOST "/usr/sbin/rpc.nfsd $THREAD_NUM"
[ $? = 0 ] || end_testcase "Could not set the number of nfsds on $RHOST"
}
#=============================================================================
# FUNCTION NAME: do_test
#
# FUNCTION DESCRIPTION: Perform the test
#
# PARAMETERS: None.
#
# RETURNS: None.
#=============================================================================
do_test()
{
$trace_logic
echo "fs_inod starting on $TCtmp."
fs_inod $TCtmp $DIR_NUM $FILE_NUM 1
retval=$?
echo $"fs_inod on $TCtmp finished."
if [ "$retval" != 0 ]; then
end_testcase "Errors have resulted from this test: fs_inod returned $retval."
fi
cd /
}
#=============================================================================
# FUNCTION NAME: end_testcase
#
# FUNCTION DESCRIPTION: Clean up
#
# PARAMETERS: None.
#
# RETURNS: None.
#=============================================================================
end_testcase()
{
$trace_logic
if [ "$CLEANUP" = "ON" ]; then
cd \
echo "Cleaning up testcase"
/bin/umount $TCtmp || echo "Cannot umount $TCtmp"
sleep 2
rmdir $TCtmp || echo "Cannot remove $TCtmp"
rsh -n $RHOST "/usr/sbin/exportfs -u *:$TESTDIR"
rsh -n $RHOST "rm -rf $TESTDIR"
rsh -n $RHOST "/usr/sbin/rpc.nfsd $ORIG_NFSD"
fi
[ $# = 0 ] && { echo "Test Successful"; exit 0; }
echo "Test Failed: $@"
exit 1
}
#=============================================================================
# MAIN PROCEDURE
#=============================================================================
setup_testcase
do_test
end_testcase
| true |
23b751618673e700b669feb104c28a9cc728ea9c | Shell | rikeshi/stuff | /scripts/output_connected | UTF-8 | 225 | 2.703125 | 3 | [] | no_license | #!/bin/bash
#
# Simple script to update all
#
while read -r line; do
monitor="$(cut -f 5 -d ' ' <<< $line)"
args="$args --output $monitor --auto"
done <<< "$(xrandr --listmonitors)"
xrandr $args
sleep 1
jwm -restart
| true |
0b6b517e9d28db27259cf07157953d854f504af2 | Shell | opena11y/fae1-django | /fae/scripts/cleanup_sites | UTF-8 | 861 | 3.984375 | 4 | [] | no_license | #! /bin/bash
# This script deletes directory trees under 'sites'
# with modification dates of at least 30 days past.
# If no argument is specified, 90 days is assumed.
# Note: For the following if statement to work, you
# must invoke this script with its full pathname.
if [ `echo $0 | grep ^/services/faedev` ]; then
INSTALL_PATH=/services/faedev
elif [ `echo $0 | grep ^/services/faetest` ]; then
INSTALL_PATH=/services/faetest
elif [ `echo $0 | grep ^/services/faedata` ]; then
INSTALL_PATH=/services/faedata
else
echo "ERROR: Unable to set INSTALL_PATH to a known value."
exit 1
fi
if [ -z $1 ]; then
DAYS=90
elif [ $1 -lt 30 ]; then
echo "WARNING: Using minimum value of 30 for DAYS variable."
DAYS=30
else
DAYS=$1
fi
find ${INSTALL_PATH}/sites -maxdepth 1 -mindepth 1 -type d -mtime +$DAYS -exec /bin/rm -r '{}' \;
| true |
dec99afe2e58739ec57985b9c116cbd83b2868ba | Shell | waterhd/wcc-on-docker | /images/database/runOracle.sh | UTF-8 | 5,650 | 3.90625 | 4 | [
"UPL-1.0",
"MIT"
] | permissive | #!/bin/bash
# LICENSE UPL 1.0
#
# Copyright (c) 1982-2016 Oracle and/or its affiliates. All rights reserved.
#
# Since: November, 2016
# Author: gerald.venzl@oracle.com
# Description: Runs the Oracle Database inside the container
#
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER.
#
# Scripts modified in April 2019 by dennis.waterham@oracle.com
# Exit immediately if any command exits non-zero
set -eo pipefail
# Die function
die() { printf "ERROR: %s" "$1" >&2; exit 1; }
########### Move DB files ############
function moveFiles {
# Create directory
[[ -d $ORACLE_BASE/oradata/dbconfig/$ORACLE_SID ]] || mkdir -pv $ORACLE_BASE/oradata/dbconfig/$ORACLE_SID
# Move files
mv $ORACLE_HOME/dbs/spfile$ORACLE_SID.ora \
$ORACLE_HOME/dbs/orapw$ORACLE_SID \
$ORACLE_HOME/network/admin/sqlnet.ora \
$ORACLE_HOME/network/admin/listener.ora \
$ORACLE_HOME/network/admin/tnsnames.ora \
$ORACLE_BASE/oradata/dbconfig/$ORACLE_SID/
# oracle user does not have permissions in /etc, hence cp and not mv
cp /etc/oratab $ORACLE_BASE/oradata/dbconfig/$ORACLE_SID/
symLinkFiles;
}
########### Symbolic link DB files ############
function symLinkFiles {
[[ -L $ORACLE_HOME/dbs/spfile$ORACLE_SID.ora ]] ||
ln -s $ORACLE_BASE/oradata/dbconfig/$ORACLE_SID/spfile$ORACLE_SID.ora $ORACLE_HOME/dbs/spfile$ORACLE_SID.ora
[[ -L $ORACLE_HOME/dbs/orapw$ORACLE_SID ]] ||
ln -s $ORACLE_BASE/oradata/dbconfig/$ORACLE_SID/orapw$ORACLE_SID $ORACLE_HOME/dbs/orapw$ORACLE_SID
[[ -L $ORACLE_HOME/network/admin/sqlnet.ora ]] ||
ln -s $ORACLE_BASE/oradata/dbconfig/$ORACLE_SID/sqlnet.ora $ORACLE_HOME/network/admin/sqlnet.ora
[[ -L $ORACLE_HOME/network/admin/listener.ora ]] ||
ln -s $ORACLE_BASE/oradata/dbconfig/$ORACLE_SID/listener.ora $ORACLE_HOME/network/admin/listener.ora
[[ -L $ORACLE_HOME/network/admin/tnsnames.ora ]] ||
ln -s $ORACLE_BASE/oradata/dbconfig/$ORACLE_SID/tnsnames.ora $ORACLE_HOME/network/admin/tnsnames.ora
# oracle user does not have permissions in /etc, hence cp and not ln
cp $ORACLE_BASE/oradata/dbconfig/$ORACLE_SID/oratab /etc/oratab
}
########### SIGINT handler ############
function _term() {
printf 'Stopping container.\n%s received, shutting down database!\n' $1
# Stop database
if [[ $1 = 'SIGKILL' ]]; then
# Kill the database
sqlplus / as sysdba <<< "SHUTDOWN ABORT"
else
# Shut down cleanly
sqlplus / as sysdba <<< "SHUTDOWN IMMEDIATE"
fi
# Stop listener
lsnrctl stop
# Kill any tail processes
pkill -9 tail &>/dev/null
}
###################################
############# MAIN ################
###################################
# Check whether container has enough memory
container_mem=$(cat /sys/fs/cgroup/memory/memory.limit_in_bytes)
# Check for
[[ ${container_mem} -lt 2147483648 ]] && die "The container doesn't have enough memory allocated. At least 2GB is required."
# Check that hostname doesn't container any "_" (Github issue #711)
[[ ${HOSTNAME} =~ _ ]] && die "Hostname is set to $HOSTNAME. Underscores are not allowed!"
# Shutdown database when receiving a signal
trap "_term SIGINT" SIGINT
trap "_term SIGTERM" SIGTERM
trap "_term SIGKILL" SIGKILL
# Validate Oracle SID
if [[ ! $ORACLE_SID ]]; then
# Set default value
export ORACLE_SID=ORCLCDB
else
# Make ORACLE_SID upper case (Github issue # 984)
export ORACLE_SID=${ORACLE_SID^^}
# Check whether SID is no longer than 12 bytes (Github issue #246: Cannot start OracleDB image)
[[ "${#ORACLE_SID}" -gt 12 ]] && die "The ORACLE_SID must only be up to 12 characters long."
# Check whether SID is alphanumeric (Github issue #246: Cannot start OracleDB image)
[[ "$ORACLE_SID" =~ [^a-zA-Z0-9] ]] && die "The ORACLE_SID must be alphanumeric."
fi;
# Default for ORACLE PDB
export ORACLE_PDB=${ORACLE_PDB:-ORCLPDB1}
# Make ORACLE_PDB upper case (github issue # 984)
export ORACLE_PDB=${ORACLE_PDB^^}
# Check whether database already exists
if [[ -d $ORACLE_BASE/oradata/$ORACLE_SID ]]; then
symLinkFiles;
# Make sure audit file destination exists
[[ -d $ORACLE_BASE/admin/$ORACLE_SID/adump ]] || mkdir -p $ORACLE_BASE/admin/$ORACLE_SID/adump
# Start database
$ORACLE_BASE/startDB.sh
else
# Remove database config files, if they exist
rm -fv \
$ORACLE_HOME/dbs/spfile$ORACLE_SID.ora \
$ORACLE_HOME/dbs/orapw$ORACLE_SID \
$ORACLE_HOME/network/admin/sqlnet.ora \
$ORACLE_HOME/network/admin/listener.ora \
$ORACLE_HOME/network/admin/tnsnames.ora
# Create database
$ORACLE_BASE/createDB.sh $ORACLE_SID $ORACLE_PDB $ORACLE_PWD
# Move database operational files to oradata
moveFiles;
# Execute custom provided setup scripts
$ORACLE_BASE/runUserScripts.sh $ORACLE_BASE/scripts/setup
# Start listener
lsnrctl start
# Register with listener
sqlplus / as sysdba <<< "ALTER SYSTEM REGISTER;"
# Sleep a few seconds
sleep 5
fi;
# Check whether database is up and running
$ORACLE_BASE/checkDBStatus.sh || die "DATABASE SETUP WAS NOT SUCCESSFUL! Please check output for further info."
# Shutdown database on script exit
trap "_term EXIT" EXIT
cat >&1 <<EOF
###########################
DATABASE IS READY TO USE!
###########################
EOF
# Execute custom provided startup scripts
$ORACLE_BASE/runUserScripts.sh $ORACLE_BASE/scripts/startup
# Tail on alert log and wait (otherwise container will exit)
echo "The following output is now a tail of the database alert log:"
echo "============================================================="
tail -f $ORACLE_BASE/diag/rdbms/*/*/trace/alert*.log &
# Wait for termination signal
childPID=$!
wait $childPID
| true |
95d659bbc4c3ab2e2043b69c27f16f0973893a83 | Shell | kaburrell/bash | /bash_functions.sh | UTF-8 | 2,592 | 4.125 | 4 | [] | no_license | #!/bin/bash
# Exloring Bash functions with short code examples
# 12/18/20 Updated
# 12/09/20 Created
# function exploration examples
# developed on-line Bash REPL
# https://repl.it/languages/bash
# great overview
# https://linuxize.com/post/bash-functions/
# define a function
function foo()
{
echo "foo() - SUCCESS"
}
# call a function 2 ways
# foo call directly, name only, no parens e.g. foo()
# $(function) or `function`
# foo writes to stdout
foo
# capture stdout in a varaible
msg=$(foo)
echo "$msg"
# pass arguments
# parameters are not specified in the function definition
# each passed parameter gets assigned $1 .. $9
function foo_with_args()
{
echo "$0"
echo "${FUNCNAME[0]}"
echo "$1"
echo "$2"
}
# call - no parens, space separated
foo_with_args "Hello" "World"
# what can bash functions do
# execute a sequence of commands
# write text to stdout
# return a string .. if capture stdout with $(function)
# return a status code
# bash functions CAN'T return numeric values, like 10356
# what can a bash function return
# 1) a string value
# 2) a return code, an int 0 to 255
# 3) write a value to a global variable
# return a value
function today()
{
echo $(date +"%m-%d-%y")
}
# this is not useful if need the return value in a script
today
# get the return value
today=$(today)
echo "Today's date is $today"
# get the return code, must get it right after the call
today
echo "return code is $?"
# function can explicitly set its return code
# return an int value
# 0 is success
# 1 is failure
# all other values indicate a specific error code
# simple function
# returns a value and status code
function greeting()
{
echo "Hello $1!"
return 0
}
greeting "Ken"
echo "greeting() return status is $?"
function favorite_food()
{
if [ "$1" = "pizza" ]; then
return 0
else
return 1
fi
}
# return success
favorite_food "pizza"
echo "favorite_food('pizza') return status is $?"
# return failure
favorite_food "broccoli"
echo "favorite_food('broccoli') return status is $?"
# testing the return status code from the function
# 1) examine $? immediately
# 2) if test .. if cmd | function; then
if favorite_food 'pizza'; then
echo "everyone loves pizza"
fi
if ! favorite_food 'broccoli'; then
echo "you don't love pizza?"
fi
# local variables
# all variables are global in bash
# use local in a function to keep variable scope
function is_weekday()
{
local _dow
_dow=$(date +%u)
if [ $_dow -ge 1 ] && [ $_dow -le 5 ]; then
return 0
else
return 1
fi
} | true |
284673db37ca244c98fdc580a58eec0b5406e7f7 | Shell | run2death/Sunix | /.Sunix/bin/about | UTF-8 | 334 | 2.96875 | 3 | [] | no_license | #!/bin/bash
# This command is a quick way to search something with your web browser.
# e.g. $? key word
q=${1//+/%2b}
shift
while [ $1 ]
do
q=$q+${1//+/%2b}
shift
done
$browser "http://www.google.com/search?q=$q" >/dev/null 2>/dev/null &
# I feel lucky!
# $browser "http://www.google.com/search?q=$q&btnI" >/dev/null 2>/dev/null &
| true |
32e09d3fbc03ac8c909a752a0001b26bed8a1ce7 | Shell | dev-rc-build-2019/working-dir | /playbooks/roles/oracleInstall/files/mod_code/run_binary_ins.sh | UTF-8 | 2,142 | 3.71875 | 4 | [] | no_license | #!/bin/bash
#
# * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
# NAME: run_binary_ins.sh
# AUTHOR: R Cutler
# DESCRIPTION: Script combines install and patch of database binaries
# VARIABLES:
#
# * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
# DATE NAME DESCRIPTION
# ---------- --------------- --------------------------------------------------
# 08.05.2020 R Cutler Add opatch update code
# 01.09.2020 R Cutler Add comments on code logic
# 01.09.2020 R Cutler Add comments on code logic
# 12.04.2019 R Cutler Code for automation
# * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
##########################################
# Initialize global variables
##########################################
stage_loc="/app/oradba/automation/oracle19"
stage_dir="/tmp/code"
upd_opatch="/app/vendor/oracle/OPatch/p6880880_190000_Linux-x86-64.zip"
tarfile=code.tar
##########################################
# If directory exists delete and create
# If not create the directory
# download and untar file in new directory
##########################################
if [ ! -d ${stage_dir} ]; then
mkdir -p ${stage_dir}
cp ${stage_loc}/${tarfile} ${stage_dir}/.
cd ${stage_dir}
tar -xvf ${tarfile}
else
rm -rf ${stage_dir}
mkdir -p ${stage_dir}
cp ${stage_loc}/${tarfile} ${stage_dir}/.
cd ${stage_dir}
tar -xvf ${tarfile}
fi
##########################################
# Set environment variables
##########################################
. ${stage_dir}/set_env_19.sh
##########################################
# Install binaries
# if successful patch binaries
# if not successful send failure message
##########################################
${stage_dir}/install_binaries_19.sh
if [ "$?" = "0" ]; then
cd $orahome
mv OPatch OPatch.orig
unzip ${upd_opatch}
${stage_dir}/patch_binary_only_19.sh
else
echo -e "STATUS:FAILURE\n"
echo -e "ERRORMESSAGE:Failure running install_binaries_19.sh\n"
fi
##########################################
# end of script
| true |
77aa3656bb3401b0eb88950442060e5e76cbc399 | Shell | theoriginalgri/arm-cross-sysroot | /formula/json-glib.sh | UTF-8 | 826 | 2.890625 | 3 | [] | no_license | #!/bin/bash
URL="ftp://ftp.gnome.org/pub/gnome/sources/json-glib//0.14/json-glib-0.14.2.tar.xz"
DEPEND=()
ARGS=(
"--host=${HOST}"
"--sbindir=${BASE_DIR}/tmp/sbin"
"--libexecdir=${BASE_DIR}/tmp/libexec"
"--sysconfdir=${BASE_DIR}/tmp/etc"
"--sharedstatedir=${BASE_DIR}/tmp/com"
"--localstatedir=${BASE_DIR}/tmp/var"
"--datarootdir=${BASE_DIR}/tmp/share"
"--disable-glibtest"
"--disable-gtk-doc"
"--disable-nls"
)
get_names_from_url
installed "json-glib-1.0.pc"
if [ $? == 1 ]; then
if [ -f "${SYSROOT_DIR}/bin/glib-genmarshal" ]; then
mv "${SYSROOT_DIR}/bin/glib-genmarshal" "${SYSROOT_DIR}/bin/glib-genmarshal_bak"
fi
get_download
extract_tar
build
if [ -f "${SYSROOT_DIR}/bin/glib-genmarshal_bak" ]; then
mv "${SYSROOT_DIR}/bin/glib-genmarshal_bak" "${SYSROOT_DIR}/bin/glib-genmarshal"
fi
fi
| true |
0191d3cbc662dfde127f0d7f0c7f750e6f947e5e | Shell | deltafront/scripts | /shell/speedtest.sh | UTF-8 | 216 | 2.796875 | 3 | [] | no_license | #!/usr/bin/env bash
timestamp=$(date +"%Y-%m-%d_%H_%M")
logs_dir=$HOME"/logs/speedtest"
file_name=$logs_dir/$timestamp.log
echo $file_name
speedtest --simple --share > "$file_name"
echo "Results stored in $file_name" | true |
0478de80dea3490eebc9b478af2af8e10314fca7 | Shell | AnthonyZi/linux_startup_installations | /old_installations/PI/startinstallation.sh | UTF-8 | 210 | 2.625 | 3 | [] | no_license | echo "[SCRIPT] deactivate swapping consistent"
sudo service dphys-swapfile stop
until [ $(free | grep Swap | tr -cd [:digit:] | sed s/0*0/0/g) -le 0 ]
do
sleep 2s
done
sudo apt-get purge dphys-swapfile
| true |
19acf6cd1ed8336155544634239c23bf7b3c567b | Shell | eleksbai/stress-testing | /quick_image.sh | UTF-8 | 571 | 3.15625 | 3 | [] | no_license | #!/usr/bin/env bash
read -p "Please set image tag:" tag
echo create docker image hyman-web:${tag}
docker build -t hyman-web:${tag} /opt/case/stress-test/web
docker tag hyman-web:${tag} 10.10.65.200/hyman/hyman-web:${tag}
docker push 10.10.65.200/hyman/hyman-web:${tag}
typeset -u yn
read -p "Please set tag latest and push registry(y/N)?" yn
if [[ ${yn} == Y ]]; then
echo push latest registry!
docker tag hyman-web:${tag} hyman-web:latest
docker tag hyman-web:${tag} 10.10.65.200/hyman/hyman-web:latest
docker push 10.10.65.200/hyman/hyman-web:latest
fi
| true |
ef9a4c019cbc778954d62cb67366219260364744 | Shell | riverosm/scripts | /envia_pagos.sh | UTF-8 | 1,214 | 3.796875 | 4 | [] | no_license | #!/bin/sh
SCRIPT=`basename $0 .sh`
FECHA=`date "+%d_%m_%Y"`
FECHA_BARRAS=`date "+%d/%m/%Y"`
ARCH_TMP=~/scripts/logs/${SCRIPT}.tmp
ARCH_LOG=~/scripts/logs/${SCRIPT}_${FECHA}.log
USER=xxxx
find ~/scripts/logs/ -name "${SCRIPT}*.log" -mtime +30 -exec rm {} \; >/dev/null 2>/dev/null
cat /dev/null >${ARCH_LOG}
DIR_TRANSFERENCIAS=/home/${USER}/scripts/transferencias
ls -l $DIR_TRANSFERENCIAS/*$FECHA* >/dev/null 2>/dev/null
# Obtengo los dos ultimos archivos transferidos
ULTIMO_ARCHIVO=`ls -ltr $DIR_TRANSFERENCIAS/transferencias_* | tail -1 | awk '{print $9}'`
ANTULT_ARCHIVO=`ls -ltr $DIR_TRANSFERENCIAS/transferencias_* | tail -2 | head -1 | awk '{print $9}'`
# Reviso que ninguno este vacio
if [ ! -s $ULTIMO_ARCHIVO ];
then
echo "El archivo $ULTIMO_ARCHIVO esta vacio" >>${ARCH_LOG}
exit 1
fi
if [ ! -s $ANTULT_ARCHIVO ];
then
echo "El archivo $ANTULT_ARCHIVO esta vacio" >>${ARCH_LOG}
exit 1
fi
# La diferencia es lo nuevo a enviar
diff $ULTIMO_ARCHIVO $ANTULT_ARCHIVO | grep "OP_" | awk '{print $6}' | awk -F_ '{print $2}' | sed "s/\.pdf//g" | sed "s/
//g" | sort -u | while read OP
do
echo "$FECHA_BARRAS: OP #$OP" >>${ARCH_LOG}
/home/${USER}/scripts/genera_pdf.php $OP >>${ARCH_LOG}
done
| true |
3fd90de41519d274e0d0cc497f5208ad1579690a | Shell | hexiu/Shell_Scripts | /lamp/domaindel.sh | UTF-8 | 584 | 3.09375 | 3 | [] | no_license | echo -e "Now have :\n"
grep -o "zone .*.*" /etc/named.conf |cut -d'"' -f2|sed "1,5d"
echo
read -p "You want to delete(eg:piwik.com):" DOMAIN
[ ! -d /www/$DOMAIN ] && echo " Domain Error ..." && exit 2
rm -fr /www/$DOMAIN
[ $? -eq 0 ]&& echo "Input sure ! delete ing ..." || echo "Tnput Error!"
[ $? -eq 0 ] && sed -i "/zone[[:space:]]\?\"$DOMAIN\"/,+3d" /etc/named.conf
rm -fr /var/named/$DOMAIN.zone
sed -i "/#$DOMAIN/,+4d" /etc/httpd/conf.d/virtualhost.conf
service httpd reload
service named reload
[ $? -eq 0 ]||echo "Starting named failure..."&&echo "delete finished ."
| true |
e32b4f50b5e192f9ec47fd3174fd0161a98d1c9c | Shell | makker-nl/ReleaseDeploy | /Source/ReleaseDeploy/Deploy/deployAll.sh | UTF-8 | 3,543 | 3.671875 | 4 | [] | no_license | #!/bin/sh
###################################################################################################
# deployAll
# Script to deploy OSB, SOA or BPM
#
# author: Martien van den Akker
# (C) october 2017
# Darwin-IT Professionals
###################################################################################################
DEPLOYMENT_ENVIRONMENT=$1
ENV_PROP_DIR=$2
ENV_PROP_FILE=$ENV_PROP_DIR/$DEPLOYMENT_ENVIRONMENT.properties
#
#Usage
usage(){
echo "[INFO] Run script $0.sh <osoa|obpm|tsoa|tbpm> <Environment property location>"
echo $0 obpm ~/conf
echo
exit
}
#
#Usage
help(){
echo "."
echo "[INFO]Create script $ENV_PROP_FILE, with the following (example) content"
echo "overwrite=true"
echo "user=wls_admin"
echo "password=<password you know>"
echo "forceDefault=true"
echo "deploy.keepInstancesOnRedeploy=true"
echo "deploy.server=<your SOA server>"
echo "deploy.port=8001"
echo "deploy.serverURL=http://\${server}:\${port}"
echo "deploy.admin.server=o-bpm-1-admin-vhn.ont.org.darwin-it.local"
echo "deploy.admin.port=7001"
echo "deploy.adminServerURL=t3\://${deploy.admin.server}\:${deploy.admin.port}"
echo "# Config plan replacement properties"
echo "soasuite.URL=${deploy.serverURL}"
echo "soaserver.endpoint=${deploy.server}\:${deploy.port}"
echo "bpm.URL=o-bpm-1.ont.org.darwin-it.local"
echo "soa.URL=o-soa-1.ont.org.darwin-it.local"
echo "osb.URL=o-osb-1.ont.org.darwin-it.local"
echo
exit
}
#
# Check if FMW_HOME is set
#
# if not set default it to $ORACLE_HOME
if [[ -z $FMW_HOME ]]; then
if [[ ! -z $ORACLE_HOME ]]; then
echo "[INFO] FMW_HOME not set, it will be defaulted to ORACLE_HOME: $ORACLE_HOME"
export FMW_HOME=$ORACLE_HOME
fi
fi
#
# Set WLS environment
#
if [[ ! -z $FMW_HOME ]]; then
export WL_HOME=$FMW_HOME/wlserver
. $WL_HOME/server/bin/setWLSEnv.sh
echo "FMW_HOME: $FMW_HOME"
echo "WL_HOME: $WL_HOME"
echo
fi
#
if [[ -z $FMW_HOME ]]; then
echo "[ERROR] FMW_HOME not set."
echo "set FMW_HOME environment "
usage
#
# Check if environment is specified on command line
#
elif [[ -z $DEPLOYMENT_ENVIRONMENT ]]; then
echo "[ERROR] The deployment environment is not specified."
usage
#
# Check if environment property directory is specified on command line
#
elif [[ -z $ENV_PROP_DIR ]]; then
echo "[ERROR] The directory with 'properties' file is not specified."
usage
#
# Check if property directory exists
#
elif [[ ! -d "$ENV_PROP_DIR" ]]; then
echo "[ERROR] The deployment environment configuration directory $ENV_PROP_DIR does not exists."
help
#
# Check if property file itself exists
#
elif [[ ! -f "$ENV_PROP_FILE" ]]; then
echo "[ERROR] The deployment environment configuration file $ENV_PROP_FILE does not exists."
help
#
# Check if property file contains proprties matching environment
#
elif ! grep -q "server" "$ENV_PROP_FILE"; then
echo "[ERROR] Properties for $DEPLOYMENT_ENVIRONMENT environment not found."
help
else
echo "[OK] The deployment environment is specified: $DEPLOYMENT_ENVIRONMENT."
echo "[OK] The directory with 'properties' file is specified: $ENV_PROP_DIR."
echo "[OK] The deployment environment configuration directory $ENV_PROP_DIR exists."
echo "[OK] The deployment environment environment configuration file $ENV_PROP_FILE exists."
echo "[OK] Properties for $DEPLOYMENT_ENVIRONMENT environment found."
fi
# Start ant deploy.
echo
echo "Start ant deploy"
echo "."
ant -f build.xml deployAll -Denv.prop.dir=$ENV_PROP_DIR -Ddeployment.plan.environment=$DEPLOYMENT_ENVIRONMENT | true |
0f78e8ba0df4737b07f4fb251323f3be0d0a1a4d | Shell | lanwan/PKGBUILDs | /alarm/fake-hwclock/PKGBUILD | UTF-8 | 738 | 2.703125 | 3 | [] | no_license | # Maintainer: Oleg Rakhmanov <orakhmanov [at] gmail [dot] com>
#
# Reworked Alexander Manning's rc.d script for systemd
# Reference: http://archplusplus.co.uk/post/31401843803/fake-hwclock-for-arch-linux-arm-on-raspberry-pi
pkgname=fake-hwclock
pkgver=0.2
pkgrel=1
pkgdesc="Saves time on shutdown and restores it on boot from a file"
arch=('arm')
license=('GPL')
install=fake-hwclock.install
source=('fake-hwclock.sh'
'fake-hwclock.service')
md5sums=('09cea0ee86071fb86d3cdbc52feabe69'
'8a328ff872a092dcdf86088ae2c20fd3')
package() {
mkdir -p "${pkgdir}/usr/lib/systemd/"{scripts,system}
cp "${srcdir}/fake-hwclock.sh" "${pkgdir}/usr/lib/systemd/scripts/"
cp "${srcdir}/fake-hwclock.service" "${pkgdir}/usr/lib/systemd/system/"
}
| true |
c42a9cac312ef31745beac6a3bea83738e73476e | Shell | newsboat/newsboat | /contrib/bookmark-buku.sh | UTF-8 | 749 | 3.34375 | 3 | [
"MIT",
"CC-BY-4.0"
] | permissive | #!/usr/bin/env bash
# newsboat bookmarking plugin for buku
# (c) 2021 Greg Fitzgerald <gregf@beastie.tech>
#
# Heavily inspired by bookmark-pinboard.sh
#
# Add the following to your newsboat config, and adjust the path as needed.
# bookmark-cmd ~/bin/bookmark-buku.sh
# bookmark-interactive yes
url="$1"
title="$2" #you can comment this out and rely on buku to get the title if you prefer.
#desc="$3" # not used by buku, buku will get this information for you.
#feed_title="$4" don't think this is of any use to us either?
buku=$(command -v buku)
echo "Enter some tags comma seperated: "
read tags
if [ ! "$tags" ]; then
# You can set default tags here
tags="newsboat"
fi
if [ "$buku" ]; then
buku --nostdin -a "$url" "$tags" --title "$title" >/dev/null
fi
| true |
877b25c9110c032b5ea0417641a9e07dbe82c85e | Shell | DriveClutch/circleci-python | /tools/canary_deploy.sh | UTF-8 | 772 | 3.40625 | 3 | [] | no_license | #!/usr/bin/env bash
if [[ -z "$1" ]]
then
echo "\$CIRCLE_BRANCH is empty"
fi
if [[ -z "$2" ]]
then
echo "\$CIRCLE_SHA1 is empty"
exit 1
fi
if [[ -z "$3" ]]
then
echo "\$CIRCLE_PROJECT_REPONAME is empty"
exit 1
fi
if [[ -z "$4" ]]
then
echo "\$API_KEY is empty"
exit 1
fi
REPO_BRANCH=$1
IMAGE_ID=$2
REPO_NAME=$3
API_KEY=$4
generate_post_data()
{
cat <<EOF
{
"imageId": "$IMAGE_ID",
"target": "$REPO_NAME"
}
EOF
}
if [[ $REPO_BRANCH == "develop" ]]
then
WEBHOOK_URL="https://api.dev1.clutchtech.io/canary-service/webhook"
curl --location --request POST $WEBHOOK_URL \
--header 'Content-Type: application/json' \
--header "X-Github-Webhook-API-Key: $API_KEY" \
--data "$(generate_post_data)"
fi | true |
18e04257e13a6e336292d5fd17a08088a2d12efe | Shell | Totchino/Hyrule-Castle | /hyrule_castle.sh | UTF-8 | 322 | 2.828125 | 3 | [] | no_license | #!/bin/bash
source ./fonctions.sh
clear
coin=0
#Affichage du menu et des différents modes de jeu
menu
win=0
lose=0
if [[ $m == 1 ]]; then
menu_versus
elif [[ $m == 2 ]]; then
clear
alerte_maintenance
else
echo "invalid option $REPLY"
read -n 1 -s -r -p " === Press any key to return to the menu ==="
clear
menu
fi
| true |
4b5d33e9725deb388a87b5c62633b282d859a6e5 | Shell | troywill/shiloh-build | /usr/bin/lfs-commands/chapter06/1130-patch | UTF-8 | 172 | 2.546875 | 3 | [] | no_license | #!/bin/bash
set +h
set -e
cd $PKGDIR
patch -Np1 -i ../patch-2.6.1-test_fix-1.patch
./configure --prefix=/usr
make
make install
echo -e "\n\nTotalseconds: $SECONDS\n"
exit
| true |
56b0bb501c60738735275e35e93d543a70f48163 | Shell | cloux/sin | /modules/ec2-tools/install | UTF-8 | 849 | 3.890625 | 4 | [
"WTFPL",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/bin/sh
#
# Install/Update AWS tools from 'inst' and ec2-metadata
#
# need to be root
if [ $(id -u) -ne 0 ]; then printf 'Need to be root!\n'; exit 1; fi
# Install all scripts from 'inst' into /usr/local/bin/
find ${0%/*}/inst -maxdepth 1 -type f -perm /111 -exec cp -uvpP -t /usr/local/bin/ '{}' \;
# Install ec2-metadata from Amazon
if [ -x /usr/local/bin/ec2-metadata ]; then
printf 'Update '
else
printf 'Install '
fi
printf 'ec2-metadata tool ... '
cd /tmp || exit
rm -f ec2-metadata
wget -N -q http://s3.amazonaws.com/ec2metadata/ec2-metadata
if [ $? -ne 0 ] || [ ! -f ec2-metadata ]; then
printf 'download FAILED\n'
exit 1
fi
if head -n 1 ec2-metadata | grep -vq '^#!'; then
printf 'FAILED: downloaded file is not a script!\n'
rm -f ec2-metadata
exit 1
fi
chmod 755 ec2-metadata
mv -f ec2-metadata /usr/local/bin/
printf 'DONE\n'
| true |
5ad57b7183ad0439e6fa35e9c83d8db4c0b08a50 | Shell | habzy/profile | /bash_profile.d/ssh-agent.sh | UTF-8 | 719 | 3.5625 | 4 | [] | no_license | _SSH_AUTH_SOCK=/tmp/ssh-agent.sock
_SSH_PID_FILE=/tmp/ssh-agent.pid
_SSH_AGENT_PID=$(cat $_SSH_PID_FILE 2>/dev/null)
# validate SSH_AUTH socket.
if [[ -S "$_SSH_AUTH_SOCK" && -n $_SSH_AGENT_PID
&& -d /proc/$_SSH_AGNET_PID ]]; then
# ssh-agent is already running now.
export SSH_AUTH_SOCK=$_SSH_AUTH_SOCK
export SSH_AGENT_PID=$_SSH_AGENT_PID
else
# Spawn a new ssh-agent
eval `ssh-agent -a $_SSH_AUTH_SOCK` >/dev/null
if [[ -z $SSH_AGENT_PID ]]; then
echo Failed to start ssh-agent.
else
# Save pid.
echo $SSH_AGENT_PID >$_SSH_PID_FILE
echo Trying to add keys.
ssh-add
fi
fi
unset _SSH_AUTH_SOCK
unset _SSH_AGENT_PID
unset _SSH_PID_FILE
| true |
87c264550f445ce312d809f41bfe84fac7e76143 | Shell | kdu2/mac-config | /packages/printer-template/build.sh | UTF-8 | 500 | 3.09375 | 3 | [] | no_license | #!/bin/bash
# Name of the package.
NAME="printer"
# Once installed the identifier is used as the filename for a receipt files in /var/db/receipts/.
IDENTIFIER="com.github.kdu2.$NAME"
# Package version number.
VERSION="1.0"
# The location to copy the contents of files.
INSTALL_LOCATION="/Library/Printers/PPDs/Contents/Resources"
# Build package.
pkgbuild --root files --install-location "$INSTALL_LOCATION" --scripts scripts --identifier "$IDENTIFIER" --version "$VERSION" "$NAME-$VERSION.pkg"
| true |
2ad03a3adef7abd482eb204fe6887cdb6e340c6a | Shell | Zacharias030/ProGraML | /deeplearning/ml4pl/graphs/labelled/dataflow/flaky_make_data_flow_analysis_dataset.sh | UTF-8 | 1,494 | 3.078125 | 3 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/usr/bin/env bash
#
# A wrapper around
# //deeplearning/ml4pl/graphs/labelled/dataflow:make_data_flow_analysis_dataset
# for running on flaky, heavily loaded systems.
#
# Usage:
#
# bazel run //deeplearning/ml4pl/graphs/labelled/dataflow:flaky_make_data_flow_analysis -- \
# <make_data_flow_analysis_args...>
#
# Copyright 2019 the ProGraML authors.
#
# Contact Chris Cummins <chrisc.101@gmail.com>.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
set -u
main() {
i=0
while true; do
i=$((i+1))
echo "Beginning run $i of dataset generator"
# Run the dataset generator for 30 minutes.
if timeout -s9 1800 deeplearning/ml4pl/graphs/labelled/dataflow/make_data_flow_analysis_dataset \
--max_instances=10000 \
--vmodule='*'=3 \
$@ ; then
echo "Dataset generator terminated gracefully after $i iterations"
break
fi
# Pause for a second so that the user can C-c twice to break the loop.
if ! sleep 1 ; then
break
fi
done
}
main $@
| true |
c92ac1a6348eac9ca88279bed72378147d4d0c56 | Shell | devYaoYH/cogPsychLab_toolkit | /bin/fix_wordpairs.sh | UTF-8 | 151 | 2.515625 | 3 | [] | no_license | #!/bin/bash
cd ..
echo "Word pair .csv file (prime,target) to fix:"
read word_file
echo "Reading from... $word_file"
python filter_words.py $word_file
| true |
9e7dc5e0d40d43deef0395dc1be13be97915c6ea | Shell | kbgoda/bitcoin-prices-analysis | /Project Stage 2/Unix/analysis_unix.sh | UTF-8 | 1,079 | 3.265625 | 3 | [
"BSD-3-Clause"
] | permissive | #! /usr/bin/env bash
#The number of lines in the file including the header
wc -l mergeLBAUD.csv
#The number of rows do not have a N/A value for BTC
tail -n+2 mergeLBAUD.csv | egrep -v 'N/A' | wc -l
#The highest weighted price of BTCAUD and the date it was in
cut -d',' -f'1,9' mergeLBAUD.csv | sort -t',' -k'2n' | tail -1
#The total number of rows where the opening price of BTCAUD is greater than $700
cut -d',' -f'3' mergeLBaud.csv | awk '$0 > 700 {print $0}' | wc -l
#Gets the date where LTC was worth 0.008BTC and the BTC high (4rth column)
#at the exchanges was over $315
grep '0.00800' mergeLBAUD.csv | awk -F',' '$4 > 315 {print $1}'
#Gets the largest 'close' value recorded for Bitcoin where
#the 'high' is greater than $350 and the 'low' is less than $300
cut -d',' -f'4,5,6' mergeLBAUD.csv | awk -F',' '$1 > 350 && $2 < 300 { print $3 }' |\
sort -n | tail -1
#The date where both BTCAUD's weighted_price and LTCBTC's were at their lowest together overall
cut -d',' -f'1,2,9' mergeLBAUD.csv | tail -n+2 |\
sort -t',' -k'3,3n' -k'2,2n' | head -1 | cut -d',' -f'1'
| true |
63511cf62186107775edc0973169059e8db5342d | Shell | houyi-tracing/ms-http-demo | /examples/kube/apply-all.sh | UTF-8 | 243 | 3.125 | 3 | [] | no_license | #!/bin/bash
log_level=$LOG_LEVEL
if [[ -z $LOG_LEVEL ]]; then
log_level=info
fi
echo "LOG_LEVEL=${LOG_LEVEL}"
files=$(ls ./ | grep "^ms-.*\.yaml$")
for f in $files; do
sed 's/log_level/'${log_level}'/g' $f | \
kubectl apply -f -
done
| true |
f812a283e6da932711f1502f8b62ec8517e2b063 | Shell | jd28/dotfiles | /zsh/zshrc | UTF-8 | 1,799 | 2.796875 | 3 | [] | no_license | # Set name of the theme to load. Optionally, if you set this to "random"
# it'll load a random theme each time that oh-my-zsh is loaded.
# See https://github.com/robbyrussell/oh-my-zsh/wiki/Themes
ZSH_THEME="agnoster"
# Uncomment the following line to use case-sensitive completion.
# CASE_SENSITIVE="true"
# Uncomment the following line to use hyphen-insensitive completion. Case
# sensitive completion must be off. _ and - will be interchangeable.
# HYPHEN_INSENSITIVE="true"
# Uncomment the following line to disable bi-weekly auto-update checks.
# DISABLE_AUTO_UPDATE="true"
# Uncomment the following line to change how often to auto-update (in days).
# export UPDATE_ZSH_DAYS=13
# Uncomment the following line to disable auto-setting terminal title.
# DISABLE_AUTO_TITLE="true"
# Uncomment the following line to enable command auto-correction.
# ENABLE_CORRECTION="true"
# Uncomment the following line to display red dots whilst waiting for completion.
# COMPLETION_WAITING_DOTS="true"
# Uncomment the following line if you want to disable marking untracked files
# under VCS as dirty. This makes repository status check for large repositories
# much, much faster.
DISABLE_UNTRACKED_FILES_DIRTY="true"
# Uncomment the following line if you want to change the command execution time
# stamp shown in the history command output.
# The optional three formats: "mm/dd/yyyy"|"dd.mm.yyyy"|"yyyy-mm-dd"
# HIST_STAMPS="mm/dd/yyyy"
source $HOME/dotfiles/zsh/antigen.zsh
source $HOME/dotfiles/zsh/aliases.zsh
source $HOME/dotfiles/zsh/shell.zsh
antigen use oh-my-zsh
# Bundles
antigen bundle emacs git pip
antigen bundle "zsh-users/zsh-syntax-highlighting"
antigen bundle "zsh-users/zsh-completions"
antigen bundle fzf
antigen bundle ripgrep
# Theme
antigen theme "$ZSH_THEME"
antigen apply
| true |
88bbd72193c2b7ebb88d5a070327c55cb19c68cf | Shell | tomitribe/docker-tomee | /automation.inprogress/bump_version.sh | UTF-8 | 198 | 2.953125 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
NEW_VERSION="$1"
KEY_SECTION=`./create_key_section.sh`
flavors=(webprofile plus plume microprofile)
for flavor in ${flavors[*]}
do
echo "$NEW_VERSION"
echo "$KEY_SECTION"
done
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.