blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
e2d9502bcbd5e9aea29a1389f121558fc2467315
|
Shell
|
shifi/git-aliae
|
/wip/git-backward
|
UTF-8
| 180
| 2.71875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#
# Move back one step in history (towards older commits)
#
# If this commit has multiple parents, the first is chosen.
git checkout $(git name-rev --name-only HEAD^)
| true
|
e75f20a9e3bce1e1a1c366e3c6edbec47d1416bc
|
Shell
|
espnet/espnet
|
/egs/jnas/asr1/local/jnas_train_prep.sh
|
UTF-8
| 4,457
| 3.75
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
# Copyright 2019 Nagoya University (Someki Masao) and Tomoki Hayashi
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
# Prepare JNAS dataset
. ./path.sh || exit 1;
if [ $# -lt 2 ] || [ $# -gt 3 ]; then
echo "Usage: $0 <data-directory> <speaker_text> [<trans_type>]";
exit 1;
fi
DATA=$1 # database root directory
speaker_list=$2
trans_type=${3:-kanji}
echo "=== Starting initial JNAS training_data preparation ..."
wavdir=${DATA}/WAVES_HS
trans=Transcription # transcription dir name
type=KANJI # transcription type
ifNP=NP
locdata=${PWD}/data
loctmp=$locdata/train/tmp
rm -rf $loctmp >/dev/null 2>&1
mkdir -p ${locdata}/train/tmp
# extract speakers names
logdir=exp/train_prep
mkdir -p $logdir
echo -n > $logdir/make_trans.log
echo "--- Preparing train/wav.scp, train/trans.txt and train/utt2spk ..."
for spkname in $(cat ${speaker_list}); do
scrdir=${DATA}/${trans}/${type}/${ifNP}
spkwav_dir=${wavdir}/${spkname}/${ifNP}
if [ ! -f ${scrdir}/${spkname}_${type:0:3}.txt ]; then
echo "No ${spkname}_${type:0:3}.txt file exists in $scrdir - skipping the dir ..." \
>> $logdir/make_trans.log
fi
if ls ${spkwav_dir}/*.wav > /dev/null 2>&1; then
wavtype=wav
else
echo "No 'wav' dir in $spkwav_dir - skipping ..."
continue
fi
train_wavs=()
train_utt2spk_entries=()
for w in ${spkwav_dir}/*${wavtype}; do
bw=`basename $w`
wavname=${bw%.$wavtype}
train_wavs+=("${wavname:0:-3}")
id="${spkname}_${ifNP}_${wavname:0:-3}"
if [ ! -s $w ]; then
echo "$w is zero-size - skipping ..." 1>&2
continue
fi
echo "$id $w"
train_utt2spk_entries+=("$id $spkname")
done >> ${loctmp}/train_wav.scp.unsorted
for a in "${train_utt2spk_entries[@]}"; do echo $a; done >> $loctmp/train_utt2spk.unsorted
if [ ! -f ${loctmp}/train_wav.scp.unsorted ]; then
echo "$0: processed no data: error: pattern ${dir}/${wavtype}/*${wavtype} might match nothing"
exit 1;
fi
# check character set, and convert to utf-8
mkdir -p ${loctmp}/char_tmp
CHARSET=`file -bi ${scrdir}/${spkname}_${type:0:3}.txt |awk -F "=" '{print $2}'`
if [ "$CHARSET" != 'utf-8' ] && [ "$CHARSET" != 'us-ascii' ] ; then
echo "iconv -f "$CHARSET" -t UTF-8 ${scrdir}/${spkname}_${type:0:3}.txt |
sed 's/\r//' > ${loctmp}/char_tmp/$id.utf8"
iconv -f "$CHARSET" -t UTF-8 ${scrdir}/${spkname}_${type:0:3}.txt |\
> ${loctmp}/char_tmp/$id.utf8
nkf --overwrite -Lu ${loctmp}/char_tmp/$id.utf8
else
cp ${scrdir}/${spkname}_${type:0:3}.txt ${loctmp}/char_tmp/$id.utf8
nkf --overwrite -Lu ${loctmp}/char_tmp/$id.utf8
fi
local/make_train_trans.py \
${loctmp}/char_tmp/$id.utf8 \
${spkname}_${ifNP} \
"${train_wavs[@]}" \
2>>${logdir}/make_trans.log >> ${loctmp}/train_trans.txt.unsorted
done
# filter out the audio for which there is no proper transcript
awk 'NR==FNR{trans[$1]; next} ($1 in trans)' FS=" " \
${loctmp}/train_trans.txt.unsorted ${loctmp}/train_wav.scp.unsorted |\
sort -k1 > ${locdata}/train/wav.scp
awk 'NR==FNR{trans[$1]; next} ($1 in trans)' FS=" " \
${loctmp}/train_trans.txt.unsorted $loctmp/train_utt2spk.unsorted |\
sort -k1 > ${locdata}/train/utt2spk
sort -k1 < ${loctmp}/train_trans.txt.unsorted > ${locdata}/train/text.tmp
# remove spaces
paste -d " " <(cut -f 1 -d" " ${locdata}/train/text.tmp) <(cut -f 2- -d" " ${locdata}/train/text.tmp | tr -d " ") > ${locdata}/train/text
rm ${locdata}/train/text.tmp
echo "--- Preparing train/spk2utt ..."
cat $locdata/train/text |\
cut -f1 -d' ' |\
awk 'BEGIN {FS="_"}
{names[$1]=names[$1] " " $0;}
END {for (k in names) {print k, names[k];}}' | sort -k1 > $locdata/train/spk2utt
trans_err=$(wc -l <${logdir}/make_trans.log)
if [ "${trans_err}" -ge 1 ]; then
echo -n "$trans_err errors detected in the transcripts."
echo " Check ${logdir}/make_trans.log for details!"
fi
# check the structure of perepraed data directory
utils/fix_data_dir.sh ${locdata}/train
rm -rf ${loctmp}
# convert text type (only for tts)
if [ ${trans_type} != "kanji" ]; then
mv ${locdata}/train/text ${locdata}/train/rawtext
local/clean_text.py ${locdata}/train/rawtext ${locdata}/train/text ${trans_type}
rm ${locdata}/train/rawtext
fi
echo "*** Initial JNAS training_data preparation finished!"
| true
|
cb7ddb8f021bdccb8a802f77012f045d27618b5a
|
Shell
|
sebastian-king/UT-Region5-Robotics-2017
|
/install-a-pi.sh
|
UTF-8
| 1,129
| 3.03125
| 3
|
[] |
no_license
|
#!/bin/bash
echo "THE PI WILL REBOOT ONCE THIS SCRIPT HAS FINISHED RUNNING!";
read -r -p "Press X to exit the script now if you do not want to reboot " response;
if [[ "$response" =~ ^([xX])+$ ]]; then
exit;
fi
raspi-config --expand-rootfs
sudo apt-get update
sudo apt-get upgrade
sudo apt-get dist-upgrade
sudo apt-get -y install git runit
cd ~
git clone https://github.com/afloresescarcega/UTR5.git # perhaps ignore this file
sudo ln -s /home/pi/UTR5/runit/start /etc/sv/
sudo ln -s /home/pi/UTR5/runit/stop /etc/sv/
sudo ln -s /home/pi/UTR5/runit/start /etc/service/
sudo ln -s /home/pi/UTR5/runit/stop /etc/service/
systemctl disable hciuart.service # fix later in the image
awk '/\/dev\/mmcblk0p1/{$4="defaults,noauto"}{print}' /etc/fstab > /tmp/fstab # noauto
sudo -i << EOF
cat /tmp/fstab > /etc/fstab
EOF
sudo rm /tmp/fstab
mount -a
#adduser r5 --home /home/r5 -q --disabled-password --gecos GECOS
#su r5
#cd ~
#sudo sudo apt-get -y install python python-pip python-dev python3 python-pip3 python3-dev #3/3 removed for switch to soft PWM
#sudo pip install wiringpi
#sudo pip install wiringpi2
sudo reboot
| true
|
0ca30f09b7b66081b1d59bed6aa8f0022d1ddde7
|
Shell
|
openhpc/ohpc
|
/components/admin/ohpc-filesystem/SOURCES/ohpc-find-requires
|
UTF-8
| 2,089
| 3.953125
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#
# ohpc-find-requires
#-----------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You may
# obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
#-----------------------------------------------------------------------
IFS=$'\n'
# First argument is buildroot
buildroot="$1"
if [ ! -d "$buildroot" ]; then
>&2 echo "Invalid buildroot"
exit 1
fi
# Second argument is default search path.
searchPath="$2"
if [ -z "$searchPath" ];then
>&2 echo "Required search path argument not provided"
exit 1
fi
if [ ! -x /usr/lib/rpm/elfdeps ]; then
>&2 echo "Required /usr/lib/rpm/elfdeps binary not available locally"
exit 1
fi
# Get the list of files.
filelist=`sed "s/[]['\"*?{}]/\\\\\&/g"`
if [ -z "$filelist" ]; then exit 0; fi
# Step 1: use standard elfdeps analysis and cache results
requireList=$(echo ${filelist} | /usr/lib/rpm/elfdeps -R)
# Step 2: append additional color delimiter for ohpc provided packages (that
# install into $searchPath)
for require in ${requireList}; do
# Check if this is owned by ohpc pre-requisite
package=$(rpm -q --queryformat '%{NAME}\n' --whatprovides "$require(ohpc)")
if [ $? -eq 0 ];then
echo "$require(ohpc)"
else
# check if this requirement is housed in ${buildroot}/opt/ohpc.
# If so, we append an (ohpc) color designation, otherwise we
# leave the requirement as is.
libname=${require%%(*} # strip off () to get libname
match=$(find ${buildroot}/${searchPath} -name ${libname})
if [ -n "${match}" ];then
echo "$require(ohpc)"
else
echo "$require"
fi
fi
done
| true
|
4f6343f54390cc63af9a536c97ff40660bf905e1
|
Shell
|
petronny/aur3-mirror
|
/gendef/PKGBUILD
|
UTF-8
| 720
| 2.90625
| 3
|
[] |
no_license
|
# Maintainer: Daniel Kirchner <daniel at ekpyron dot org>
pkgname=gendef
pkgver=1.0.1346
pkgrel=1
pkgdesc="Tool to generate .def files from dlls."
arch=('i686' 'x86_64')
url="http://www.mingw.org/"
license=('custom')
source=("https://downloads.sourceforge.net/project/mingw/MinGW/Extension/gendef/gendef-1.0.1346/gendef-1.0.1346-1-mingw32-src.tar.lzma")
md5sums=('44761bf08b1fa2c2ced3096b8d2ac7cb')
build() {
cd "$srcdir"
tar -xf $pkgname-$pkgver.tar.lzma
cd $pkgname-$pkgver
sed -i -e 's/unable to allocate %Iu bytes/unable to allocate %Ilu bytes/' src/gendef.c
sed -i -e 's/fread (gDta, 1, gDta_size, fp);/gDta_size = fread (gDta, 1, gDta_size, fp);/' src/gendef.c
./configure --prefix=/usr
make
make DESTDIR=$pkgdir install
}
| true
|
da347626166a468ba4fe2993a10bce2cfcd3d6a0
|
Shell
|
jaikejennison/pure-aptRGB
|
/pure-aptRGB-install
|
UTF-8
| 652
| 3.4375
| 3
|
[] |
no_license
|
#! /bin/bash
if [ -d $1 ]; then
echo '';
echo -e "\e[1;31m------------\e[0m"
echo -e "\e[0;37mInstall Tool:\e[0m"
echo -e "\e[1;31m--------------------------------------------------------------\e[0m"
read INSTALL_QUERY
QUIT="quit"
if [ "$QUIT" = "$INSTALL_QUERY" ]; then
exit
else
sudo apt -y install "$INSTALL_QUERY"
exit
fi
else
echo -e "\e[1;31m----------\e[0m"
echo -e "\e[0;37mInstalling:\e[0m"
echo -e "\e[1;31m--------------------------------------------------------------\e[0m"
sudo apt -y install $*
exit
fi
#---------------------------------------------------------------------------
# Written By: Jaike Howard copyleft 2009
| true
|
cfa384fa182d8e6c74f6cd77f20279f33adfd4e9
|
Shell
|
bikash/ScriptsDebian
|
/GPUec2.sh
|
UTF-8
| 1,908
| 2.828125
| 3
|
[] |
no_license
|
#!/bin/bash
## reference http://tleyden.github.io/blog/2015/11/22/cuda-7-dot-5-on-aws-gpu-instance-running-ubuntu-14-dot-04/
#Install CUDA repository
wget http://developer.download.nvidia.com/compute/cuda/repos/ubuntu1404/x86_64/cuda-repo-ubuntu1404_7.5-18_amd64.deb
sudo dpkg -i cuda-repo-ubuntu1404_7.5-18_amd64.deb
#Update APT
sudo apt-get update
sudo apt-get upgrade -y
sudo apt-get install -y opencl-headers build-essential protobuf-compiler \
libprotoc-dev libboost-all-dev libleveldb-dev hdf5-tools libhdf5-serial-dev \
libopencv-core-dev libopencv-highgui-dev libsnappy-dev libsnappy1 \
libatlas-base-dev cmake libstdc++6-4.8-dbg libgoogle-glog0 libgoogle-glog-dev \
libgflags-dev liblmdb-dev git python-pip gfortran
#cleanup
sudo apt-get clean
#DRM module workaround
sudo apt-get install -y linux-image-extra-`uname -r` linux-headers-`uname -r` linux-image-`uname -r`
#Install CUDA
sudo apt-get install -y cuda
sudo apt-get clean
#install python-pip
sudo apt-get install python-pip python-dev
# Requires CUDA toolkit 7.5 and CuDNN v4. For other versions, see "Install from sources" below.
export TF_BINARY_URL=https://storage.googleapis.com/tensorflow/linux/gpu/tensorflow-0.9.0rc0-cp27-none-linux_x86_64.whl
# Python 2
sudo pip install --upgrade $TF_BINARY_URL
## check gpu
nvidia-smi
## check tensorflow
# python
# import tensorflow as tf
## write global path in bashrc file
echo "export PATH="/usr/local/cuda/bin:$PATH"" >> ~/.bashrc
echo "export LD_LIBRARY_PATH=/usr/local/cuda/lib64:$LD_LIBRARY_PATH" >> ~/.bashrc
## install nltk
sudo pip install -U nltk
## install mumpy
sudo pip install -U numpy
## install screen for maintaining session
sudo apt-get install screen
## install punkt for nltk type d to choose download and type punkt package and then type q to quit
python nltkPunkt.py
#d punkt q
## restart your bash profile
source ~/.bashrc
| true
|
78cba443b3794e40b9ebaf8ec5aae64077cd36d0
|
Shell
|
dwharve/rock
|
/bin/deploy_rock.sh
|
UTF-8
| 8,036
| 3.828125
| 4
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/sh
#
#Info
#########################
# file: deploy_rock.sh
# name: Deploy Rock Script
#
#
# Description
#########################
# Deploys ROCKNSM using the playbook associated based of option of playbooks.
#
#
# Notes
########################
#
#
# Functions
#########################
# Main function to call the deploy_rock.yml playbook
Main() {
# Get the current directory of deploy_rock.sh (Ex: if deploy rock is in /root/rock/bin/deploy_rock.sh,
# this will return /root/rock/bin
SCRIPT_PATH=$(dirname $(readlink -f $0))
# Returns rock's directory. Ex: If deploy_rock.sh is in /root/rock/bin then this returns /root/rock
TOPLEVEL=$(dirname ${SCRIPT_PATH})
VERBOSE_FLAGS=
if [ "x${DEBUG}" != "x" ]; then
VERBOSE_FLAGS="-vvv"
fi
# The purpose of the following conditional block is to ensure the user has run generate_defaults before running
# deploy_rock. If not, it will prompt them to do so.
# The bash option -e checks if a file exists. This line checks to see if config.yml has already been generated.
if [[ ! -e /etc/rocknsm/config.yml ]]; then
# This gets the name of the running script. In this case it is deploy_rock.sh
SOURCE="${BASH_SOURCE[0]}"
# The -h option checks to see if a file exists and is a symbolic link.
# The purpose of this code is to resolve deploy_rock.sh in case it is a symlink. At the end, the DIR
# variable will contain deploy_rock.sh's current directory. So if deploy_rock.sh is in /root/rock/bin
# that's what will be returned. If it has been symlinked, it will return the actual file path.
while [ -h "$SOURCE" ]; do # resolve $SOURCE until the file is no longer a symlink
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
SOURCE="$(readlink "$SOURCE")"
# If $SOURCE was a relative symlink, we need to resolve it relative to the path where the symlink file was located.
[[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE"
done
# Contains deploy_rock.sh's directory
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
Generate_config
fi
cd "${TOPLEVEL}/playbooks"
Mainmenu
if [ $? -eq 0 ]; then
cat << 'EOF'
โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ
โ โ
โ โ
โ โ
โ โ
โ โ
โ Thank you for installing: โ
โ โ
โ โ
โ :::::::.. ... .,-::::: ::: . โ
โ ;;;;``;;;; .;;;;;;;. ,;;;'````' ;;; .;;,. โ
โ [[[,/[[[' ,[[ [[,[[[ [[[[[/' โ
โ $$$$$$c $$$, $$$$$$ _$$$$, โ
โ 888b "88bo,"888,_ _,88P`88bo,__,o,"888"88o, โ
โ MMMM "W" "YMMMMMP" "YUMMMMMP"MMM "MMP" โ
โ :::. :::. .::::::. . : โ
โ `;;;;, `;;;;;;` ` ;;,. ;;; โ
โ [[[[[. '[['[==/[[[[,[[[[, ,[[[[, โ
โ $$$ "Y$c$$ ''' $$$$$$$$$"$$$ โ
โ 888 Y88 88b dP888 Y88" 888o โ
โ MMM YM "YMmMY" MMM M' "MMM โ
โ โ
| โ
โ โ
โ โ
โ โ
โ โ
โ โ
โ โ
โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ
EOF
fi
}
#=======================
Stand_alone() {
ansible-playbook "${TOPLEVEL}/playbooks/all-in-one.yml" ${VERBOSE_FLAGS}
}
#=======================
Server() {
ansible-playbook "${TOPLEVEL}/playbooks/server.yml" ${VERBOSE_FLAGS}
}
#=======================
Sensor() {
ansible-playbook "${TOPLEVEL}/playbooks/sensor.yml" ${VERBOSE_FLAGS}
}
#=======================
# Generate the /etc/rocknsm/config.yml
Generate_config() {
echo "[-] You must run generate_defaults.sh prior to deploying for the first time. "
read -p "Would you like to generate the defaults now? [y/n] " -n 1 -r
if [[ $REPLY =~ ^[Yy]$ ]];then
echo ''
/bin/bash $DIR/generate_defaults.sh
echo "**** Please verify configuration settings in /etc/rocknsm/config.yml before re-running the deploy script."
sleep 3
exit
else
echo ''
exit
fi
}
#=======================
# Main menu to call all available install options be it a stand alone system or just a sensor.
Mainmenu() {
clear
Header
echo "+ [ 1 ] Install a Stand alone system (everything on this box) +"
echo "+ +"
echo "+ [ 2 ] Server Install: only the services for a Server +"
echo "+ +"
echo "+ [ 3 ] Sensor Install: only the services for a Sensor +"
echo "+ +"
echo "+ +"
echo "+ +"
echo "+ [ X ] Exit Script +"
echo "+ +"
echo "+ +"
Footer
read -p "Please make a Selection: " mainmenu_option
case $mainmenu_option in
1) clear && Stand_alone;;
2) clear && Server;;
3) clear && Sensor;;
x|X) clear && exit ;;
*) echo "Invalid input" && sleep 1 && Mainmenu;;
esac
}
#=======================
Header() {
echo "++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"
echo "+ +"
echo "+ Deployment Configuration Options +"
echo "+ +"
echo "++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"
echo "+ +"
}
#=======================
Footer() {
echo "+ +"
echo "+ +"
echo "++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"
echo ""
}
#
#Script Execution:
########################
Main
| true
|
479db56778097793448d863878b36e4e29f91ed2
|
Shell
|
stefansundin/dotfiles
|
/snippets-ubuntu.sh
|
UTF-8
| 1,631
| 3.1875
| 3
|
[] |
no_license
|
multipass launch --name primary focal
# make multipass use iTerm2 on macOS:
duti -s com.googlecode.iTerm2 com.apple.terminal.shell-script shell
# screenshot from terminal
sudo apt-get install scrot
sleep 5 && scrot screenshot.png
# sudo forget password
sudo -K
# search for a package
apt-cache search vim
# show package information
apt-cache show vim
dpkg -s vim
# list files in a package
dpkg -L nginx
# show package containing path
dpkg -S /usr/lib/evolution/
# see available versions of package
apt-cache policy nginx
# clean up dpkg stuff
sudo apt-get autoremove
sudo apt-get clean
sudo apt-get autoclean
# get list of installed and uninstalled packages
dpkg --get-selections
# see progress of unattended-upgrades
tail -f /var/log/unattended-upgrades/unattended-upgrades-dpkg.log
# mount encrypted home directory on another disk
# get mount passphrase (this is what it tells you to save after you encrypt your home directory):
ecryptfs-unwrap-passphrase /media/ubuntu/long-uuid/home/.ecryptfs/username/.ecryptfs/wrapped-passphrase
# add mount passphrase to keyring:
sudo ecryptfs-add-passphrase --fnek
# this will print two tokens, you need to use the second one as the "Filename Encryption Key (FNEK) Signature"
mkdir ecryptfs
sudo mount -t ecryptfs /media/ubuntu/long-uuid/home/.ecryptfs/username/.Private ecryptfs
# Selection: aes
# Selection: 16
# Enable plaintext passthrough: n
# Enable filename encryption: y
# Filename Encryption Key (FNEK) Signature: <the second key you got from ecryptfs-add-passphrase>
# If you get a warning that you have never mounted with this key before, that is fine, just continue.
| true
|
a9769f274dd3c2d14273585e750e1b75fa365b7b
|
Shell
|
crgk/dotfiles
|
/findreplace.zsh
|
UTF-8
| 355
| 3.34375
| 3
|
[] |
no_license
|
# Recursive Find and Replace
# Usage:
#
# Replace all "bad" with "good" in files ending in ".txt"
# findreplace bad good txt
#
# Replace all "bad" with "good" in .java files in the src/main directory
# findreplace bad good java src/main
function findreplace () {
find ${4:-.} -type f -name "*.${3:-java}" -exec sed -i "" s/$1/$2/ {} +
}
| true
|
97dfe09b18153649b476170c2baded521cad373f
|
Shell
|
caffeinum/pravda.contracts
|
/tests/balance_check.sh
|
UTF-8
| 230
| 2.984375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
CURDIR=`pwd`/$(dirname $0)
source $CURDIR/../setup_env
# test 1
BALANCE=`curl -s $NODE/balance?address=$ADDRESS`
echo $BALANCE
if [[ $BALANCE != 0 ]]; then
echo '+ balance passed'
else
echo '- balance failed'
fi
| true
|
c2824edfd8a9cc292db9f9577c716aff60db088c
|
Shell
|
tribusonz/jburkardt-c
|
/c/linked_list_vector.sh
|
UTF-8
| 503
| 3.015625
| 3
|
[] |
no_license
|
#!/bin/bash
#
gcc -c linked_list_vector.c
if [ $? -ne 0 ]; then
echo "Errors compiling linked_list_vector.c."
exit
fi
#
gcc linked_list_vector.o -lm
if [ $? -ne 0 ]; then
echo "Errors linking linked_list_vector.o."
exit
fi
#
rm linked_list_vector.o
#
mv a.out linked_list_vector
./linked_list_vector > linked_list_vector_output.txt
if [ $? -ne 0 ]; then
echo "Errors running linked_list_vector."
exit
fi
rm linked_list_vector
#
echo "Program output written to linked_list_vector_output.txt"
| true
|
b71bbb839ddd87728643002dea8c6d0121221c59
|
Shell
|
targetbench/redis
|
/run_server.sh
|
UTF-8
| 170
| 2.796875
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
TOPDIR=${0%/*}
cd $TOPDIR
#Start
start_cpu_num=1
inst_num=2
end_cpu_num=$[$start_cpu_num+$inst_num-1]
scripts/start_server.sh $start_cpu_num $end_cpu_num
| true
|
e1aaab384aff81c8a72be54dea01bdfac427d7d0
|
Shell
|
bdzim/netkes
|
/bin/backup_omva.sh
|
UTF-8
| 803
| 3.375
| 3
|
[] |
no_license
|
#!/bin/bash
# Backup script for the OpenManage Virtual Appliance.
# Today: Generates a tarball with the important-to-backup data.
# Tomorrow: This will eventually send the tarball automatically for secure offsite backup.
. /etc/default/openmanage
backup_workspace=$HOME/omva-backup
backup_date=`date -u +%Y%m%d_%H%M`
# Stage one: prepare the destination
mkdir -p $backup_workspace
# Stage two: Collect the trivial stuff.
cp $OPENMANAGE_CONFIGDIR/agent_config.json $backup_workspace
cp -r $SPIDEROAK_ESCROW_KEYS_PATH $backup_workspace
cp -r $SPIDEROAK_ESCROW_LAYERS_PATH $backup_workspace
# Stage three: collect the DB contents.
su postgres -c "pg_dump openmanage" > $backup_workspace/db_dump.sql
pushd $HOME
tar czf $HOME/omva-backup-$backup_date.tar.gz ./omva-backup
rm -r $backup_workspace
popd
| true
|
2d9629eea3965728aeba194d9884749ccb09e9b5
|
Shell
|
codelurker/home
|
/scripts/nzbmatrix-postdetail.sh
|
UTF-8
| 2,798
| 3.40625
| 3
|
[] |
no_license
|
#!/bin/bash
#
# NZB Post Details from NZBMatrix
# by: MreDD
#
##
# NZBMatrix API Download Link
MATRIX="http://nzbmatrix.com/api-nzb-details.php?"
# Variables
# Curl - Path to Curl
CURL="/usr/bin/curl"
# Elinks - Path to Elinks
ELINKS="/usr/bin/elinks"
# Cat - Path to Cat
CAT="/bin/cat"
# UserID - Add it Here
USERNAME=`cat $HOME/.nzbmatrixrc | awk '{print $6}'`
# APIKey - Add it Here
APIKEY=`cat $HOME/.nzbmatrixrc | awk '{print $9}'`
# Temp Folder Path
TMP="$HOME/tmp"
# Max Results
MAXRESULTS="3"
# Fetch Info
NZBID="$@"
if [ -z "$NZBID" ]; then
echo "Post Details needs the NZBid... "
echo "User NZB-Search or NZB-Latest to retrieve ID"
echo "Example: NZBID: 596240"
else
unset response
sleep 2
# Search
$CURL "${MATRIX}id=${NZBID}&username=${USERNAME}&apikey=${APIKEY}" > $TMP/nzbpostdetail.txt
# $ELINKS "${MATRIX}id=${NZBID}&username=${USERNAME}&apikey=${APIKEY}" > $TMP/nzbpostdetail.txt
SPLAT=$($CAT $TMP/nzbpostdetail.txt | tr -d ";")
if [ "$SPLAT" = "error:nothing_found" ]; then
echo "NoFiles: No Files Found.."
elif [ "$SPLAT" = "error:invalid_login" ]; then
echo "Username: There is a problem with the username you have provided."
elif [ "$SPLAT" = "error:invalid_api" ]; then
echo "APIKey: There is a problem with the API Key you have provided."
elif [ "$SPLAT" = "error:invalid_nzbid" ]; then
echo "NZBid: There is a problem with the NZBid supplied."
elif [ "$SPLAT" = "error:vip_only" ]; then
echo "VIP Only: You need to be VIP or higher to access."
elif [ "$SPLAT" = "error:disabled_account" ]; then
echo "Account: User Account Disabled."
elif [ "$SPLAT" = "error:no_nzb_found" ]; then
echo "NoNZB: No NZB found."
else
# echo -e "$SPLAT"
echo -e "[ NZB Post Details ]"
sleep 2
echo -e "NZBid: $NZBID"
echo -e "NZBName: `$CAT $TMP/nzbpostdetail.txt | grep "NZBNAME:" | sed 's/^NZBNAME://' | tr -d ";"`"
echo -e "Subject: `$CAT $TMP/nzbpostdetail.txt | grep "USENET_SUBJECT:" | sed 's/^USENET_SUBJECT://' | tr -d ";"`"
echo -e "Group: `$CAT $TMP/nzbpostdetail.txt | grep "GROUP:" | sed 's/^GROUP://' | tr -d ";"`"
echo -e "IndexDate: `$CAT $TMP/nzbpostdetail.txt | grep "INDEX_DATE:" | sed 's/^INDEX_DATE://' | tr -d ";"`"
echo -e "Link: `$CAT $TMP/nzbpostdetail.txt | grep "LINK:" | sed 's/^LINK://' | tr -d ";"`"
echo -e "Comments: `$CAT $TMP/nzbpostdetail.txt | grep "COMMENTS:" | sed 's/^COMMENTS://' | tr -d ";"`"
echo -e "Hits: `$CAT $TMP/nzbpostdetail.txt | grep "HITS:" | sed 's/^HITS://' | tr -d ";"`"
echo -e "Parts: `$CAT $TMP/nzbpostdetail.txt | grep "PARTS:" | sed 's/^PARTS://' | tr -d ";"`"
CONVERTONE=$($CAT $TMP/nzbpostdetail.txt | grep "SIZE:" | sed 's/^SIZE://' | cut -f 1 -d ".")
echo -ne "FileSize: $(($CONVERTONE >> 20)) Mb"
echo " "
fi
fi
exit 0
| true
|
8fd23a27d80412a15523b3ac293729cb20238f3e
|
Shell
|
phoreproject/obp-search-engine
|
/apiserver/start_db_in_docker.sh
|
UTF-8
| 1,548
| 3.6875
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
PORT=33060
DB_NAME='apiserver_db'
PASS_LEN=20
NUMBYTES=`echo ${PASS_LEN} | awk '{print int($1*1.16)+1}'`
NEW_PASSWORD="$(openssl rand -base64 ${NUMBYTES} | tr -d "=+/" | cut -c1-${PASS_LEN})"
DOCKER_INSTANCE_NAME='apiserver_mysql'
ALREADY_RUNNING=`docker inspect -f '{{.State.Running}}' ${DOCKER_INSTANCE_NAME}`
if [[ ${ALREADY_RUNNING} = true ]]; then
NEW_PASSWORD=`docker exec -it ${DOCKER_INSTANCE_NAME} sh -c "cat /root/mysql_pass.txt | tr -d '\n'"`
echo "Container is already running with password ${NEW_PASSWORD}"
else
docker pull mysql/mysql-server:5.7
echo "Starting mysql with password ${NEW_PASSWORD}"
docker run --name=${DOCKER_INSTANCE_NAME} -d -p ${PORT}:3306 -e MYSQL_ROOT_PASSWORD=${NEW_PASSWORD} -e MYSQL_DATABASE=${DB_NAME} -e MYSQL_ROOT_HOST=% mysql/mysql-server:5.7
docker exec -it ${DOCKER_INSTANCE_NAME} sh -c "echo ${NEW_PASSWORD} > /root/mysql_pass.txt"
echo "Started mysql instance on localhost:${PORT} with root user and ${NEW_PASSWORD}"
fi
echo ""
echo "Now run npm init with DATABASE_URI set to: "
echo "mysql://root:${NEW_PASSWORD}@127.0.0.1:${PORT}/${DB_NAME}"
echo "OR connect to mysql from host"
echo "mysql -h 127.0.0.1 -P ${PORT} -u root -p${NEW_PASSWORD}"
echo "OR connect to docker bash"
echo "docker exec -it ${DOCKER_INSTANCE_NAME} bash"
echo "OR connect to mysql inside docker"
echo "docker exec -it ${DOCKER_INSTANCE_NAME} sh -c 'mysql -p${NEW_PASSWORD}'"
echo "OR crawler mysql cmd parameter"
echo "root:${NEW_PASSWORD}@tcp(127.0.0.1:${PORT})/${DB_NAME}"
| true
|
3aecdaa20e85cc641adbaaa22592384d91a2e8fb
|
Shell
|
m4saosw/pkg_installer
|
/scripts/libs/common_lib.sh
|
UTF-8
| 27,728
| 3.671875
| 4
|
[] |
no_license
|
#!/bin/sh
#############################################################################
# INSTALLER PACKAGE
# COMMONS LIBRARY
#############################################################################
# A lib utiliza as seguintes variaveis que devem ser inicializadas pelo script chamador:
# GLO_SYSTEM_FILE
# GLO_GLO_BACKUPDIR
# GLO_GLO_UNINSTALL_FILE
# GLO_TIMESTAMP
# GLO_NUM_GLO_STATUSCMDERROR
# GLO_STATUSCMD
#############################################################################
# ######################################################################
# ANALISA O EXIT STATUS DO ULTIMO COMANDO E, EM CASO DE STATUS DE ERRO ACIONA O COMANDO DE QUEBRA DE FLUXO DESEJADO
# $1 - etapa
# $2 - acao de quebra de fluxo - (break, continue, exit)
exitStatusHandler() {
local statement="$1"
# ****** se foram encontrados erros
if [ $GLO_STATUSCMD -ne 0 ]; then
GLO_NUM_GLO_STATUSCMDERROR=`expr $GLO_NUM_GLO_STATUSCMDERROR + 1`
myEcho 0 error S "Ocorreu um Erro durante a execucao do Comando ou Processo anterior. Foi tomada a acao do tipo: $statement"
eval "$statement"
else
myEcho 0 debug S "OK"
fi
}
# ######################################################################
# TODO - ESTA FUNCAO AINDA NAO ESTA SENDO USADA
# Analisa o exit status do ultimo comando e toma uma acao pre-definida
# $1 - etapa
# $2 - acao de quebra de fluxo - (break, continue, exit)
exitStatusHandler2() {
local etapa="$1"
local statementOnContinue="$2"
local statementOnBreak="$3"
if [ "$statementOnContinue" == "-" ]; then
statementOnContinue="continue"
fi
if [ "$statementOnBreak" == "-" ]; then
statementOnBreak="break"
fi
# ****** se foram encontrados erros
if [ $GLO_STATUSCMD -ne 0 ]; then
if [ "$etapa" == "backup" ]; then
if [ "$GLO_PROCESS_BACKUP_CONTINUEONERROR" == "S" ]; then
GLO_NUM_GLO_STATUSCMDERROR=`expr $GLO_NUM_GLO_STATUSCMDERROR + 1`
myEcho 0 error S "Ocorreu um erro. O processo prosseguira."
eval "$statementOnContinue"
else
myEcho 0 error S "Ocorreu um erro. Processo interrompido."
eval "$statementOnBreak"
fi
elif [ "$etapa" == "install" ]; then
if [ "$GLO_PROCESS_INSTALL_CONTINUEONERROR" == "S" ]; then
GLO_NUM_GLO_STATUSCMDERROR=`expr $GLO_NUM_GLO_STATUSCMDERROR + 1`
myEcho 0 error S "Ocorreu um erro. O processo prosseguira."
eval "$statementOnContinue"
else
myEcho 0 error S "Ocorreu um erro. Processo interrompido."
eval "$statementOnBreak"
fi
else
GLO_NUM_GLO_STATUSCMDERROR=`expr $GLO_NUM_GLO_STATUSCMDERROR + 1`
myEcho 0 error S "Ocorreu um erro. Acao tomada: $statementOnContinue"
eval "$statementOnContinue"
fi
fi
}
# ######################################################################
# TODO - ESTA FUNCAO AINDA NAO ESTA SENDO USADA
# Se foi encontrado algum erro no processo exibe uma mensagem de usuario e sai da execucao
# $1 - mensagem
exitOnFoundStatusCmdError() {
# ****** se foram encontrados erros
if [ $GLO_NUM_GLO_STATUSCMDERROR -gt 0 ]; then
myEcho 0 warn S "Devido a erros ocorridos o processo sera abortado. $1"
myEcho 0 info S "Processo abortado"
exit 1
fi
}
# ######################################################################
# TODO - ESTA FUNCAO AINDA NAO ESTA SENDO USADA
# Exibe uma mensagem de erro generica e termina execucao com exit
exitOnStatusCmdError() {
# ****** se ocorreu erro no comand
if [[ $GLO_STATUSCMD -ne 0 ]]; then
myEcho 0 error S "Ocorreu um erro na operacao anterior. Processo abortado"
exit 1
fi
}
# ######################################################################
# TODO - ESTA FUNCAO AINDA NAO ESTA SENDO USADA
# Exibe uma mensagem de erro generica e continua execucao
continueOnStatusCmdError() {
# ****** se ocorreu erro no comand
if [[ $GLO_STATUSCMD -ne 0 ]]; then
myEcho 0 error S "Ocorreu um erro na operacao anterior. Processo abortado"
GLO_NUM_GLO_STATUSCMDERROR=`expr $GLO_NUM_GLO_STATUSCMDERROR + 1`
return 1
fi
}
# ######################################################################
isValid_Var_SystemFile() {
# ***** valida variavel de ambiente
if [ ! -f "$GLO_SYSTEM_FILE" ]; then
myEcho 0 error S "Arquivo de configuracao de ambiente '$GLO_SYSTEM_FILE' nao encontrado."
myEcho 0 info S "Verifique se a variavel 'SYSTEM_PATH' esta definida e esta executando com usuario correto."
myEcho 0 info S "Processo abortado"
return $GLO_ERROR
fi
}
# ######################################################################
# EXIBE INFORMACOES UTEIS DO AMBIENTE - COMO CABECALHO DA DES/INSTALACAO - USO PRIVADO DAS ROTINAS INTERNAS
headerInfo() {
echo ""
myEcho 0 info S "#####################################################################"
myEcho 0 info S "PACKAGE INSTALLER 2.0.0.4 beta"
myEcho 0 info S " "
myEcho 0 info S "Script Executado: `echo $0 $GLO_SCRIPT_ARGUMENTS`" # โ Exibe o nome do programa ou script executando
myEcho 0 info S "Diretorio: `pwd`"
myEcho 0 info S "Host: `hostname -s`"
myEcho 0 info S "User: `whoami`"
myEcho 0 info S "PID: `echo $$`"
myEcho 0 info S "Hora: `date +"%d-%m-%Y %H:%M:%S"`"
echo ""
}
# ######################################################################
# CRIA O DIRETORIO DE BACKUP
createBackupDir() {
if [ -d "$GLO_BACKUPDIR" ]; then
myEcho 0 error S "Ja existe uma instalacao efetuada anteriormente. Por favor desinstale para poder instalar novamente (para instalar forcadamente renomeie o diretorio '$GLO_BACKUPDIR' para outro nome)"
myEcho 0 info S "Instalacao abortada"
exit 1
else
myEcho 0 info S "Criando diretorio de backup '$GLO_BACKUPDIR'"
mkdir $GLO_BACKUPDIR
fi
}
# ######################################################################
# VERIFICA EXISTENCIA DO DIRETORIO DE BACKUP
checkBackupDir() {
if [ ! -d "$GLO_BACKUPDIR" ]; then
myEcho 0 error S "Nao e possivel desinstalar pois nao ha uma instalacao anterior ou o diretorio do backup '$GLO_BACKUPDIR' foi deletado."
myEcho 0 info S "Desinstalacao abortada"
exit 1
fi
}
# ######################################################################
# CRIA O ARQUIVO AUXILIAR DE DESINSTALACAO
createUninstallFile() {
if [ -f "$GLO_UNINSTALL_FILE" ]; then
myEcho 0 info S "Removendo arquivo auxiliar pre-existente '$GLO_UNINSTALL_FILE'"
rm -f "$GLO_UNINSTALL_FILE"
fi
myEcho 0 info S "Criando arquivo auxiliar '$GLO_UNINSTALL_FILE'"
touch "$GLO_UNINSTALL_FILE"
echo "# Lista de arquivos para o script de desinstalacao - nao remova este arquivo" >> "$GLO_UNINSTALL_FILE"
echo "# Origem refere-se ao arquivo backup. Destino refere-se ao local para onde deve ser restaurado" >> "$GLO_UNINSTALL_FILE"
}
# ######################################################################
# FINALIZA O PROCESSO DA DESINSTALACAO
endUninstallSO() {
echo ""
#remove backup files generated by installation process (to permit a new installation in the future)
myEcho 0 info S "Removendo pasta de backup '$GLO_BACKUPDIR'"
rm -rf $GLO_BACKUPDIR
myEcho 0 info S "Removendo arquivo auxiliar de desinstalacao '$GLO_UNINSTALL_FILE'"
rm -rf $GLO_UNINSTALL_FILE
}
# ######################################################################
# ANALISA O ARQUIVO DE LOG PROCURANDO POR POSSIVEIS ERROS
# $1 - arquivo de log
checkLog() {
#desabilitado - procura por strings que nao sejam do padrao
#PATTERN="\[info\]|\[debug\]|dos2unix"
#NUM=`cat "$1" | sed '/^$/d' | grep -vEc $PATTERN`
#procura por strings sejam do padrao
PATTERN="\[error\]|ORA-"
NUM=`cat "$1" | grep -iEc "$PATTERN"`
if [ "$NUM" != "0" ]; then
myEcho 0 warn S "Ocorreram $NUM mensagens de erros, pelo menos."
return 1
fi
}
# ######################################################################
# remove cache in the instances
removeCache(){
echo ""
myEcho 0 info S "Removendo cache da instancias..."
for instance_folder in ` grep "engine.inst.*.path" $GLO_SYSTEM_FILE | awk -F "=" {'print $2'}`; do
myEcho 1 info S "Removendo cache da instancia "${instance_folder:30:30}"..."
rm -rf $instance_folder/cache/
done;
}
# ######################################################################
# upgrade solution configs
upgradeVersion(){
echo ""
myEcho 1 info S "Atualizando as configuracoes da solucao (upgrade_version.sh)..."
echo ""
local ACM_SCRIPTS_PATH=` grep "acm.scripts.path" $GLO_SYSTEM_FILE | awk -F "=" {'print $2'}`
$ACM_SCRIPTS_PATH/upgrade_version.sh
}
# ######################################################################
# RETORNA TRUE CASO O HOST INFORMADO SEJA O HOST CORRENTE
# modo de uso1: if isCurrentHost "1tokyo" ; then
# modo de uso2: if isCurrentHost "1tokyo" || isCurrentHost "1tokyo" ; then
isCurrentHost() {
local _TRUE=1
local HOSTFOUND=`hostname | grep $1 | wc -l`
# ***** sai da funcao caso a maquina atual seja diferente do esperado
if [ $HOSTFOUND != $_TRUE ]; then
return 1
fi
}
# ######################################################################
# REMOVE QUALQUER CODIGO DE CORES
removeColorCodes() {
sed -i "s,\x1B\[[0-9;]*[a-zA-Z],,g" "$1"
}
# ######################################################################
# OBTEM UM TIMESTAMP (MILISEGUNDOS)
getTimestamp() {
date +%s%N | cut -b1-13
}
# ######################################################################
# LE A ENTRADA E DEVOLVE A SAIDA COM ECHOS DO TIPO ERROR
listEchoError() {
local msg="$1"
local line # importante usar para nao misturar dados com algum outro processo concorrente
while read line; do
myEcho 0 error S "$msg $line"
done
}
# ######################################################################
# LE A ENTRADA E DEVOLVE A SAIDA COM ECHOS DO TIPO INFO
listEchoInfo() {
local list="$(</dev/stdin)"
local msg="$1"
local line # importante usar para nao misturar dados com algum outro processo concorrente
while read line; do
myEcho 0 info S "$msg $line"
done <<< "$list"
}
# ######################################################################
# ELIMINA LINHAS DE COMENTARIOS DA STDIN, E LINHAS EM BRANCO. O RESULTADO SAI NA STDOUT
# Os seguintes casos sao cobertos:
# emulate.root_path=my root path certo # certo com comentario - NรฃO apaga esta linha (cuidado: evite comentarios ao final da linha)
# # emulate.root_path=sujeira espaco - ok
# # emulate.root_path=sujeira tab - ok
# #emulate.root_path=sujeira - ok
cleanComments() {
# entrada - de dois modos: via stdin ou parametro1
local file="$1"
if [ "$file" == "" ]; then
# opcao nao recomendada, baixa performance
local line # importante usar para nao misturar dados com algum outro processo concorrente
while read line; do
echo "$line" | grep -Ev "^[[:blank:]]*#" | grep -v ^$
done
else
# melhor opcao, alta performance
grep -Ev "^[[:blank:]]*#" "$file" | grep -v ^$
fi
}
# ######################################################################
# LIMPA PATHS COM BARRAS DUPLICADAS
cleanPath() {
local patternRemoveDoubleSlash="s#//#/#g"
local line # importante usar para nao misturar dados com algum outro processo concorrente
while read line; do
echo $line | sed "$patternRemoveDoubleSlash"
done
}
# ######################################################################
# RETORNA UM ARQUIVO TEMPORARIO
# $1 - prefixo desejado (opcional)
# retorno: imprime o path do arquivo na stdout
# exit status: erro caso nao consiga criar arquivo
getTmpFilename() {
local prefix="$1"
local tmpFile="$GLO_TMPDIR/tmp_file_$(getTimestamp)_$prefix.tmp"
# ATENCAO: nao imprimir mensagens na saida padrao. A funcao chamadora (que usa pipe) espera receber somente um print com o nome do arquivo temporario gerado
myEcho 0 debug S "Criando arquivo temporario... $tmpFile" >&2
if [ -f "$tmpFile" ]; then
myEcho 0 error S "[getTmpFilename] Arquivo temporario $tmpFile ja existe!" >&2
return 1
fi
local dir=$(dirname "$tmpFile")
if [ ! -d "$dir" ]; then
mkdir -p "$dir"
fi
touch "$tmpFile"
echo "$tmpFile"
}
# ######################################################################
# FAZ UM ECHO PERSONALIZADO (INDENTACAO, NIVEL DE LOG E COLORIDO)
# se for utilizar em conjunto com o tee, utilize ao final o removeColorCodes
# $1 - nivel (0 a n)
# $2 - tipo (info debug error warn)
# $3 - tipo de texto (S - simples, T1 - titulo1, T2 - titulo2, P1 - paragrafo, B1 - box1)
# $4 - a mensagem
myEcho() {
# atencao: necessita da variavel showDebugMessages definida
local CONT=0
local S=""
local TABSPACE=" "
# --- codigo de cores
local GRAY='\e[1;30m'
local RED='\e[0;31m'
local YELLOW='\e[0;33m'
local BLUE='\e[1;34m'
local GREEN='\e[0;32m'
local CYAN='\e[0;36m'
local NOCOLOR='\e[0m'
local COLOR=$NOCOLOR
local LEVEL=$( echo $2 | tr '[a-z]' '[A-Z]' )
local textType=$( echo $3 | tr '[a-z]' '[A-Z]' )
local message=$4
local timeNow=$( date +"%T" )
local strLevel="[$LEVEL][$timeNow] "
# mensagem vazia, sai
if [ "$message" == "" ]; then
return 0
fi
if [ "$message" == " " ]; then
if [[ ("$LEVEL" == "DEBUG" && $GLO_SHOW_DEBUG_MESSAGES == "S") || ("$LEVEL" != "DEBUG") ]]; then
echo ""
return 0
fi
fi
if [ "$LEVEL" == 'ERROR' ]; then
COLOR=$RED
elif [ "$LEVEL" == 'WARN' ]; then
COLOR=$YELLOW
elif [ "$LEVEL" == 'DEBUG' ]; then
#COLOR=$BLUE
COLOR=$GRAY
elif [ "$LEVEL" == 'INFO' ]; then
COLOR=$NOCOLOR
#COLOR=$GREEN
elif [ "$LEVEL" == 'NONE' ]; then
strLevel=""
fi
while [ $CONT -lt $1 ]; do
S+="$TABSPACE"
CONT=$( expr $CONT + 1 )
done
# validacao caso mensagem seja level debug, precisa estar com show debug ativado
if [[ ("$LEVEL" == "DEBUG" && $GLO_SHOW_DEBUG_MESSAGES == "S") || ("$LEVEL" != "DEBUG") ]]; then
# --- tratamento do tipo de texto (pre-texto)
case $textType in
"T1")
echo ""
echo ""
echo ""
echo ""
echo -e "$S$CYAN$strLevel####################################################################################################$NOCOLOR"
;;
"T2")
echo ""
echo ""
echo -e "$S$CYAN$strLevel*************************************************************************************$NOCOLOR"
;;
"B1")
echo ""
echo ""
echo -e "$S$CYAN$strLevel=========================================================================$NOCOLOR"
;;
"P1")
echo ""
;;
*)
esac
# echo colorido ou sem cor
if [[ $GLO_SHOW_COLOR_MESSAGES == "S" ]]; then
# itera pela lista, caso seja uma lista
local line
while read line; do
echo -e "$S$COLOR$strLevel$line$NOCOLOR"
done <<< "$message"
else
# itera pela lista, caso seja uma lista
local line
while read line; do
echo "$S$strLevel$line"
done <<< "$message"
fi
# --- tratamento do tipo de texto (pos-texto)
case $textType in
"T1"|"T2")
echo ""
;;
"B1")
echo -e "$S$CYAN$strLevel=========================================================================$NOCOLOR"
echo ""
echo ""
;;
*)
esac
fi
}
# ######################################################################
# ESTA FUNCAO NAO ESTA SENDO USADA. ESTA EM DESENVOLVIMENTO - Tentando INTEGRAR com a funcao myEchoColor
# FAZ UM ECHO PERSONALIZADO (INDENTACAO, NIVEL DE LOG E COLORIDO)
# se for utilizar em conjunto com o tee, utilize ao final o removeColorCodes
# $1 - nivel (0 a n)
# $2 - tipo (info debug error warn)
# $3 - tipo de texto (S - simples, T1 - titulo1, T2 - titulo2, P1 - paragrafo, B1 - box1)
# $4 - a mensagem
myEcho_() {
# atencao: necessita da variavel showDebugMessages definida
local CONT=0
local S=""
local TABSPACE=" "
local COLOR
local LEVEL=$( echo $2 | tr '[a-z]' '[A-Z]' )
local textType=$( echo $3 | tr '[a-z]' '[A-Z]' )
local message=$4
local timeNow=$( date +"%T" )
local strLevel="[$LEVEL][$timeNow] "
# mensagem vazia, sai
if [ "$message" == "" ]; then
return 0
fi
if [ "$message" == " " ]; then
if [[ ("$LEVEL" == "DEBUG" && $GLO_SHOW_DEBUG_MESSAGES == "S") || ("$LEVEL" != "DEBUG") ]]; then
echo ""
return 0
fi
fi
if [ "$LEVEL" == 'ERROR' ]; then
COLOR=RED
elif [ "$LEVEL" == 'WARN' ]; then
COLOR=YELLOW
elif [ "$LEVEL" == 'DEBUG' ]; then
#COLOR=BLUE
COLOR=GRAY
elif [ "$LEVEL" == 'INFO' ]; then
COLOR=NOCOLOR
#COLOR=$GREEN
elif [ "$LEVEL" == 'NONE' ]; then
strLevel=""
fi
while [ $CONT -lt $1 ]; do
S+="$TABSPACE"
CONT=$( expr $CONT + 1 )
done
# validacao caso mensagem seja level debug, precisa estar com show debug ativado
if [[ ("$LEVEL" == "DEBUG" && $GLO_SHOW_DEBUG_MESSAGES == "S") || ("$LEVEL" != "DEBUG") ]]; then
# --- tratamento do tipo de texto (pre-texto)
case $textType in
"T1")
echo ""
echo ""
echo ""
echo ""
myEchoColor "CYAN" "$S$strLevel####################################################################################################"
;;
"T2")
echo ""
echo ""
myEchoColor "CYAN" "$S$strLevel*************************************************************************************"
;;
"B1")
echo ""
echo ""
myEchoColor "CYAN" "$S$strLevel========================================================================="
;;
"P1")
echo ""
;;
*)
esac
# echo colorido ou sem cor
if [[ $GLO_SHOW_COLOR_MESSAGES == "S" ]]; then
# itera pela lista, caso seja uma lista
local line
while read line; do
local mymsg=$( echo "$S$strLevel$line" )
myEchoColor "$COLOR" "$mymsg"
done <<< "$message"
else
# itera pela lista, caso seja uma lista
local line
while read line; do
echo "$S$strLevel$line"
done <<< "$message"
fi
# --- tratamento do tipo de texto (pos-texto)
case $textType in
"T1"|"T2")
echo ""
;;
"B1")
myEchoColor "CYAN" "$S$strLevel========================================================================="
echo ""
echo ""
;;
*)
esac
fi
}
# ######################################################################
# FAZ UM ECHO COLORIDO
# $1 - cor (RED, YELLOW, BLUE, GREEN, CYAN, NOCOLOR)
# $2 - mensagem (pode ser single ou multi-line)
myEchoColor() {
local color=$( echo $1 | tr '[a-z]' '[A-Z]' )
local listMessage="$2"
[[ "$listMessage" != "" ]] || return 0
# --- codigo de cores
local GRAY='\e[1;30m'
local RED='\e[0;31m'
local YELLOW='\e[0;33m'
local BLUE='\e[1;34m'
local GREEN='\e[0;32m'
local CYAN='\e[0;36m'
local NOCOLOR='\e[0m'
local colorSelected
eval colorSelected="$"$color
if [[ $GLO_SHOW_COLOR_MESSAGES == "S" ]]; then
local line
while read line; do
echo -e "$colorSelected$line$NOCOLOR"
done <<< "$listMessage"
else
local line
while read line; do
echo "$line"
done <<< "$listMessage"
fi
}
# ######################################################################
convertFiles() {
local dir="$1"
echo ""
myEcho 0 info N "Convertendo arquivos (dos2unix) do diretorio '$dir'..."
find $dir -name '*.sh' -type f -exec dos2unix -k {} \;
find $dir -name '*.txt' -type f -exec dos2unix -k {} \;
find $dir -name '*.sql' -type f -exec dos2unix -k {} \;
find $dir -name '*.properties' -type f -exec dos2unix -k {} \;
find $dir -name '*.xml' -type f -exec dos2unix -k {} \;
find $dir -name '*.cfg' -type f -exec dos2unix -k {} \;
}
# ######################################################################
allowFiles() {
local dir="$1"
echo ""
myEcho 0 info N "Concendendo provilegios de execucao (chmod +x) a arquivos sh do diretorio '$dir'..."
find $dir -name '*.sh' -type f -exec chmod +x {} \;
}
# ######################################################################
doSpecificOperations() {
ENGINE="prod1 prod2 local1"
ENGINE_CLI=""
ENGINE_PRD=""
ENGINE_LOCAL=""
ENGINE_RC=""
INTEGRATION=""
INTEGRATION_CLI=""
INTEGRATION_PRD=""
INTEGRATION_LOCAL=""
INTEGRATION_RC=""
APRESENTATION=""
APRESENTATION_CLI=""
APRESENTATION_PRD=""
APRESENTATION_LOCAL=""
APRESENTATION_RC=""
PRD=""
HML=""
LOCAL=""
DEV=""
RC=""
HOST_INFORMED="$1"
OPERATION=$2
HOSTNAME=`hostname -s`
TRUE=1
if [ "$HOST_INFORMED" == "{ALL}" ]; then
echo "[info] Executando o comando \"$OPERATION\" no host '`hostname -s`'..."
eval $OPERATION
elif [ "$HOST_INFORMED" == "{ENGINE}" ]; then
HOST_VALIDATION=`echo $ENGINE | grep $HOSTNAME | wc -l`
if [ $HOST_VALIDATION != $TRUE ]; then
return 0
else
echo "[info] Executando o comando \"$OPERATION\" no host '`hostname -s`'..."
eval $OPERATION
return 0
fi
elif [ "$HOST_INFORMED" == "{ENGINE_CLI}" ]; then
HOST_VALIDATION=`echo $ENGINE_CLI | grep $HOSTNAME | wc -l`
if [ $HOST_VALIDATION != $TRUE ]; then
return 0
else
echo "[info] Executando o comando \"$OPERATION\" no host '`hostname -s`'..."
eval $OPERATION
return 0
fi
elif [ "$HOST_INFORMED" == "{ENGINE_PRD}" ]; then
HOST_VALIDATION=`echo $ENGINE_PRD | grep $HOSTNAME | wc -l`
if [ $HOST_VALIDATION != $TRUE ]; then
return 0
else
echo "[info] Executando o comando \"$OPERATION\" no host '`hostname -s`'..."
eval $OPERATION
return 0
fi
elif [ "$HOST_INFORMED" == "{ENGINE_LOCAL}" ]; then
HOST_VALIDATION=`echo $ENGINE_LOCAL | grep $HOSTNAME | wc -l`
if [ $HOST_VALIDATION != $TRUE ]; then
return 0
else
echo "[info] Executando o comando \"$OPERATION\" no host '`hostname -s`'..."
eval $OPERATION
return 0
fi
elif [ "$HOST_INFORMED" == "{ENGINE_RC}" ]; then
HOST_VALIDATION=`echo $ENGINE_RC | grep $HOSTNAME | wc -l`
if [ $HOST_VALIDATION != $TRUE ]; then
return 0
else
echo "[info] Executando o comando \"$OPERATION\" no host '`hostname -s`'..."
eval $OPERATION
return 0
fi
elif [ "$HOST_INFORMED" == "{INTEGRATION}" ]; then
HOST_VALIDATION=`echo $INTEGRATION | grep $HOSTNAME | wc -l`
if [ $HOST_VALIDATION != $TRUE ]; then
return 0
else
echo "[info] Executando o comando \"$OPERATION\" no host '`hostname -s`'..."
eval $OPERATION
return 0
fi
elif [ "$HOST_INFORMED" == "{INTEGRATION_CLI}" ]; then
HOST_VALIDATION=`echo $INTEGRATION_CLI | grep $HOSTNAME | wc -l`
if [ $HOST_VALIDATION != $TRUE ]; then
return 0
else
echo "[info] Executando o comando \"$OPERATION\" no host '`hostname -s`'..."
eval $OPERATION
return 0
fi
elif [ "$HOST_INFORMED" == "{INTEGRATION_PRD}" ]; then
HOST_VALIDATION=`echo $INTEGRATION_PRD | grep $HOSTNAME | wc -l`
if [ $HOST_VALIDATION != $TRUE ]; then
return 0
else
echo "[info] Executando o comando \"$OPERATION\" no host '`hostname -s`'..."
eval $OPERATION
return 0
fi
elif [ "$HOST_INFORMED" == "{INTEGRATION_LOCAL}" ]; then
HOST_VALIDATION=`echo $INTEGRATION_LOCAL | grep $HOSTNAME | wc -l`
if [ $HOST_VALIDATION != $TRUE ]; then
return 0
else
echo "[info] Executando o comando \"$OPERATION\" no host '`hostname -s`'..."
eval $OPERATION
return 0
fi
elif [ "$HOST_INFORMED" == "{INTEGRATION_RC}" ]; then
HOST_VALIDATION=`echo $INTEGRATION_RC | grep $HOSTNAME | wc -l`
if [ $HOST_VALIDATION != $TRUE ]; then
return 0
else
echo "[info] Executando o comando \"$OPERATION\" no host '`hostname -s`'..."
eval $OPERATION
return 0
fi
elif [ "$HOST_INFORMED" == "{APRESENTATION}" ]; then
HOST_VALIDATION=`echo $APRESENTATION | grep $HOSTNAME | wc -l`
if [ $HOST_VALIDATION != $TRUE ]; then
return 0
else
echo "[info] Executando o comando \"$OPERATION\" no host '`hostname -s`'..."
eval $OPERATION
return 0
fi
elif [ "$HOST_INFORMED" == "{APRESENTATION_CLI}" ]; then
HOST_VALIDATION=`echo $APRESENTATION_CLI | grep $HOSTNAME | wc -l`
if [ $HOST_VALIDATION != $TRUE ]; then
return 0
else
echo "[info] Executando o comando \"$OPERATION\" no host '`hostname -s`'..."
eval $OPERATION
return 0
fi
elif [ "$HOST_INFORMED" == "{APRESENTATION_PRD}" ]; then
HOST_VALIDATION=`echo $APRESENTATION_PRD | grep $HOSTNAME | wc -l`
if [ $HOST_VALIDATION != $TRUE ]; then
return 0
else
echo "[info] Executando o comando \"$OPERATION\" no host '`hostname -s`'..."
eval $OPERATION
return 0
fi
elif [ "$HOST_INFORMED" == "{APRESENTATION_LOCAL}" ]; then
HOST_VALIDATION=`echo $APRESENTATION_LOCAL | grep $HOSTNAME | wc -l`
if [ $HOST_VALIDATION != $TRUE ]; then
return 0
else
echo "[info] Executando o comando \"$OPERATION\" no host '`hostname -s`'..."
eval $OPERATION
return 0
fi
elif [ "$HOST_INFORMED" == "{APRESENTATION_RC}" ]; then
HOST_VALIDATION=`echo $APRESENTATION_RC | grep $HOSTNAME | wc -l`
if [ $HOST_VALIDATION != $TRUE ]; then
return 0
else
echo "[info] Executando o comando \"$OPERATION\" no host '`hostname -s`'..."
eval $OPERATION
return 0
fi
elif [ "$HOST_INFORMED" == "{PRD}" ]; then
HOST_VALIDATION=`echo $PRD | grep $HOSTNAME | wc -l`
if [ $HOST_VALIDATION != $TRUE ]; then
return 0
else
echo "[info] Executando o comando \"$OPERATION\" no host '`hostname -s`'..."
eval $OPERATION
return 0
fi
elif [ "$HOST_INFORMED" == "{HML}" ]; then
HOST_VALIDATION=`echo $HML | grep $HOSTNAME | wc -l`
if [ $HOST_VALIDATION != $TRUE ]; then
return 0
else
echo "[info] Executando o comando \"$OPERATION\" no host '`hostname -s`'..."
eval $OPERATION
return 0
fi
elif [ "$HOST_INFORMED" == "{LOCAL}" ]; then
HOST_VALIDATION=`echo $LOCAL | grep $HOSTNAME | wc -l`
if [ $HOST_VALIDATION != $TRUE ]; then
return 0
else
echo "[info] Executando o comando \"$OPERATION\" no host '`hostname -s`'..."
eval $OPERATION
return 0
fi
elif [ "$HOST_INFORMED" == "{RC}" ]; then
HOST_VALIDATION=`echo $RC | grep $HOSTNAME | wc -l`
if [ $HOST_VALIDATION != $TRUE ]; then
return 0
else
echo "[info] Executando o comando \"$OPERATION\" no host '`hostname -s`'..."
eval $OPERATION
return 0
fi
elif [ "$HOST_INFORMED" == "{DEV}" ]; then
HOST_VALIDATION=`echo $DEV | grep $HOSTNAME | wc -l`
if [ $HOST_VALIDATION != $TRUE ]; then
return 0
else
echo "[info] Executando o comando \"$OPERATION\" no host '`hostname -s`'..."
eval $OPERATION
return 0
fi
elif [ $HOSTNAME == $HOST_INFORMED ]; then
echo "[info] Executando o comando \"$OPERATION\" no host '`hostname -s`'..."
eval $OPERATION
return 0
fi
}
# ######################################################################
# TODO - ESTA FUNCAO NAO ESTA FINALIZADA
# realiza o backup da origem informada para o destino informado
# $1 - operacao (copy, remove)
# $2 - origem path
# $3 - destino path
# $4 - backup path
myBackup() {
local operation=$1
local sourcePath=$2
local targetPath=$3
local backupPath=$4
local _=$1
local _PATH=$2
local _FILE=$( basename "$_PATH" )
local DIR=$( dirname "$_PATH" )
echo ""
myEcho 0 info S "[myBackup] Criando backup de '$_PATH'"
if [ ! -d "$GLO_BACKUPDIR/$DIR" ]; then
mkdir -p "$GLO_BACKUPDIR/$DIR"
fi
myEcho 0 debug S "cp -fp $DIR/$_FILE $GLO_BACKUPDIR/$DIR"
cp -fp "$DIR/$_FILE" "$GLO_BACKUPDIR/$DIR"
addToUninstallList "$operation" "$GLO_BACKUPDIR/$_PATH" "$DIR"
# addToUninstallList copy "$sourceCommandLine" "$targetPath" "$GLO_BACKUPDIR/$targetPath" || { GLO_STATUSCMD=$?; result=$GLO_ERROR; exitStatusHandler "break 2"; }
# addToUninstallList remove "$sourceCommandLine" "$removePath"
}
# ######################################################################
# EXCLUI ARQUIVOS DO DIRETORIO TEMPORARIO
removeTmpFiles() {
# --- exclui temporarios
echo ""
myEcho 0 info S "Excluindo arquivos temporarios ($GLO_TMPDIR)..."
rm -rf "$GLO_TMPDIR"/*.*
return $?
}
| true
|
cb4e0ecc42865a640a48c6c7ca7e404893d3ff8d
|
Shell
|
pawan3103/dockerizedDjango
|
/dockerconfig/postgres/backup.sh
|
UTF-8
| 468
| 3.671875
| 4
|
[] |
no_license
|
#!/bin/bash
# stop on errors
set -e
if [ "$POSTGRES_USER" == "postgres" ]
then
echo "creating backup"
exit 1
fi
# export the postgres password so that subsequent commands don't ask for it
export PGPASSWORD=$POSTGRES_PASSWORD
echo "creating backup"
echo "==============="
FILENAME=backup_$(date +'%Y_%m_%dT%H_%M_%S').dump
pg_dump -Fc --no-acl --no-owner -h postgres -U $POSTGRES_USER > /backups/$FILENAME
echo "backup taken successfully - $FILENAME"
| true
|
7d68d3503acd88e58b00b668d57507bfb134c399
|
Shell
|
0xNebiros/Red_Team_Class
|
/Carlos/Bash Scripting/Ejercicios/1. Backup/backup_3
|
UTF-8
| 2,581
| 4.1875
| 4
|
[] |
no_license
|
#!/bin/bash
# This bash scripts is used to backup a user's home directory to /tmp/
user=$(whoami)
input=/home/$user
output=/tmp/${user}_home_$(date +%Y-%m-%d_%H%M%S).tar.gz
function total_files {
find $1 . -type f | wc -l
}
function total_directories {
find $1 -type d | wc -l
}
# En estas nuevas funciones se esta accediendo a un fichero comprimido mediante "tar", al especificar el parametro "-tzf"
# lo que se hace es listar todos los ficheros y directorios comprimidos dentro de este archivo comprimido.
# El resultado de esta busqueda se envia mediante una tuberia a "grep", este comando lo que hace es buscar un patrรณn a partir de una expresiรณn regular
# en un fichero que se le este enviando. En este caso el patrรณn que busca es "/$", lo que significa que busca un nombre de fichero
# acabado en /, es decir, un directorio.
# En la segunda funciรณn se utiliza el parametro "-v", esto es para revertir la expresiรณn regular, buscarรก todos los nombres que NO acaben con "/".
# De esta forma se consigue encontrar todo lo que no sean directorios.
# Por รบltimo se envia el resultado por la entrada estandar a "wc -l" para que lo cuente.
function total_archived_directories {
tar -tzf $1 | grep /$ | wc -l
}
function total_archived_files {
tar -tzf $1 | grep -v /$ | wc -l
}
# Se ejecuta el comando de comprimir el fichero a partir de la entrada y la salida indicadas
# y los errores (2>) los desecha enviandolos a null.
tar -czf $output $input 2> /dev/null
# Se comprueban todos los archivos y carpetas que se debian comprimir.
src_files=$( total_files $input )
src_directories=$( total_directories $input )
# Se comprueban todos los archivos y carpetas que se han comprimido.
# y se muestra un mensaje diciendo el resultado de estas operaciones.
arch_files=$( total_archived_files $output )
arch_directories=$( total_archived_directories $output )
echo "Files to be included: $src_files"
echo "Directories to be included: $src_directories"
echo "Files archived: $arch_files"
echo "Directories archived: $arch_directories"
# Comprueba que el nรบmero de ficheros leidos en la carpeta de la que se hace backup coincida con
# el nรบmero de ficheros comprimidos.
# Si coincide muestra un mensaje de que ha ido bien y hace un ls mostrando los archivos que se han comprimido.
# Si sale mal muestra un mensaje de error.
if [ $src_files -eq $arch_files ]; then
echo "Backup of $input completed!"
echo "Details about the output backup file:"
ls -l $output
else
echo "Backup of $input failed!"
fi
echo "Details about the output backup file:"
ls -l $output
| true
|
cf75c229524f2e0dcfd4d794fc8547ff7b4c8a09
|
Shell
|
dj0nz/lxwrx
|
/iperf-server.sh
|
UTF-8
| 865
| 3.84375
| 4
|
[] |
no_license
|
#!/bin/bash
# purpose:
# quick and dirty solution to start/stop an iperf3 server on default port 5201/tcp
# adjust permissions and create symlink in /usr/local/bin to run it from everywhere
# dont forget to add firewall rules allowing 5201 in the input chain
#
# requirements:
# linux, iperf3, lsof, bash
#
# dj0Nz Feb 2022
IPERFBIN=/usr/bin/iperf3
LOGFILE=/var/log/iperf.log
PIDFILE=/var/run/iperf.pid
PORT=`lsof -Pni | grep iperf | awk '{print $9}' | tr -d '*:'`
ME=`basename "$0"`
case $1 in
start)
$IPERFBIN --logfile $LOGFILE --timestamps -s -D -I $PIDFILE
;;
stop)
kill $(tr -d '\0' < $PIDFILE)
;;
status)
if [[ $PORT = "" ]]; then
echo "iperf server not running"
else
echo "iperf server listening on port $PORT"
fi
;;
*)
echo "Usage: $ME start|stop|status"
;;
esac
| true
|
91bacc3598308afcd6b8a13a42c239a67f979336
|
Shell
|
aigarskadikis/spacewalk-scripts
|
/spacewalk-2.8-install-centos-7.5.sh
|
UTF-8
| 2,191
| 2.5625
| 3
|
[] |
no_license
|
#!/bin/bash
#open 80 and 443 into firewall
firewall-cmd --permanent --add-service=http
firewall-cmd --permanent --add-service=https
firewall-cmd --reload
#update system
yum update -y
#install GPG key for Spacewalk repository
cd /etc/pki/rpm-gpg
curl -s -O http://yum.spacewalkproject.org/RPM-GPG-KEY-spacewalk-2015
rpm --import RPM-GPG-KEY-spacewalk-2015
#install GPG key for EPEL repository
cd /etc/pki/rpm-gpg
curl -s -O https://dl.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-7
rpm --import RPM-GPG-KEY-EPEL-7
#install GPG key for Java packages repository
cd /etc/pki/rpm-gpg
curl -s https://copr-be.cloud.fedoraproject.org/results/@spacewalkproject/java-packages/pubkey.gpg > java-packages.gpg
rpm --import java-packages.gpg
#install Spacewalk repository
rpm -Uvh https://copr-be.cloud.fedoraproject.org/results/@spacewalkproject/spacewalk-2.8/epel-7-x86_64/00736372-spacewalk-repo/spacewalk-repo-2.8-11.el7.centos.noarch.rpm
#install EPEL repository
rpm -Uvh https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm
#install Java packages repository
cd /etc/yum.repos.d
curl -s -O https://copr.fedorainfracloud.org/coprs/g/spacewalkproject/java-packages/repo/epel-7/group_spacewalkproject-java-packages-epel-7.repo
#install postgresql database server
yum -y install spacewalk-setup-postgresql
#install spacewalk using postgresql as database server
yum -y install spacewalk-postgresql
#create spacewalk unattanded installation file into root home direcotry
cat > /root/spacewalk-answer-file << EOF
admin-email = root@localhost
ssl-set-cnames = spacewalk2
ssl-set-org = Spacewalk Org
ssl-set-org-unit = spacewalk
ssl-set-city = My City
ssl-set-state = My State
ssl-set-country = US
ssl-password = spacewalk
ssl-set-email = root@localhost
ssl-config-sslvhost = Y
db-backend=postgresql
db-name=spaceschema
db-user=spaceuser
db-password=spacepw
db-host=localhost
db-port=5432
enable-tftp=Y
EOF
#enable postgresql at startup
systemctl enable postgresql
#create first postgresql contend.. directories and stuff
postgresql-setup initdb
#start postgresql
systemctl start postgresql
#spacewalk silent install
spacewalk-setup --answer-file=/root/spacewalk-answer-file
| true
|
ed8bf3c41323452d624c7dcfe99be6ed0c31a524
|
Shell
|
cyber-dojo-retired/porter
|
/client_shell/test_001_storer_data_container_missing.sh
|
UTF-8
| 460
| 2.734375
| 3
|
[] |
no_license
|
#!/bin/bash
readonly my_dir="$( cd "$( dirname "${0}" )" && pwd )"
. ${my_dir}/porter_helpers.sh
test_001_storer_data_container_missing_is_error_status_3()
{
export SHOW_PORTER_INFO=true
port --id10
assert_stdout_includes_installed docker
assert_stdout_includes_installed curl
assert_stdout_line_count_equals 2
assert_stderr_equals_cant_find_storers_data_container
assert_status_equals 3
}
. ${my_dir}/shunit2_helpers.sh
. ${my_dir}/shunit2
| true
|
79c95e2c8e8319263aacc157d1e7fa2a3642bbaa
|
Shell
|
darwinz/hackerrank
|
/bash/cut-6.sh
|
UTF-8
| 1,187
| 3.140625
| 3
|
[] |
no_license
|
#!/bin/bash
: '
Print the characters from thirteenth position to the end.
Input Format
A text file with lines of ASCII text only.
Constraints
* 1 <= N <= 100
(N is the number of lines of text in the input file)
Output Format
The output should contain N lines. For each input line, print the characters from thirteenth position to the end.
Sample Input
New York is a state in the Northeastern and Mid-Atlantic regions of the United States.
New York is the 27th-most extensive, the third-most populous populated of the 50 United States.
New York is bordered by New Jersey and Pennsylvania to the south.
About one third of all the battles of the Revolutionary War took place in New York.
Henry Hudson''s 1609 voyage marked the beginning of European involvement with the area.
Sample Output
a state in the Northeastern and Mid-Atlantic regions of the United States.
the 27th-most extensive, the third-most populous populated of the 50 United States.
bordered by New Jersey and Pennsylvania to the south.
ird of all the battles of the Revolutionary War took place in New York.
''s 1609 voyage marked the beginning of European involvement with the area.
'
cut --characters=13- /dev/stdin
| true
|
4d71001f87715311c245cf04fab6e6ffc7a6ff55
|
Shell
|
herder/dotfiles
|
/home/.zsh.after/circleci.zsh
|
UTF-8
| 251
| 2.578125
| 3
|
[] |
no_license
|
if ! type circleci >/dev/null 2>&1 ; then
echo "Installing CircleCI..."
curl -fLSs https://raw.githubusercontent.com/CircleCI-Public/circleci-cli/master/install.sh | DESTDIR=$HOME/.local/bin bash
circleci completion zsh > ~/.zfunc/_circleci
fi
| true
|
2e86372fe2c16ccbe81bf614d43a9fa251263d68
|
Shell
|
ringcrl/cs-notes
|
/playground/22-06-12-node-startup/launch.sh
|
UTF-8
| 419
| 3.21875
| 3
|
[] |
no_license
|
# 1ใๅฆๆ FIFO ไธๅญๅจ๏ผๅๅๅปบ
[ -p "/tmp/node_start_fifo" ] || mkfifo /tmp/node_start_fifo
[ -p "/tmp/node_finish_fifo" ] || mkfifo /tmp/node_finish_fifo
# 2ใๅฆๆ worker ๅๅฐ็ฎก็่ๆฌๆชๆง่ก๏ผๅๆ่ตท
MANAGER_RUNNING_CNT=`pgrep -c -f "fifo-manager.sh"`
if [ "$MANAGER_RUNNING_CNT" -eq 0 ]
then
nohup ./fifo-manager.sh > /dev/null 2>&1 &
fi
# 3. worker ไปฃ็่ฟ็จ
node proxy.js -r $PWD $*
| true
|
e960f121ce94c5629bbcd7c63c33f238bf835ea2
|
Shell
|
mikkogozalo/sydney39
|
/docker-entrypoint.sh
|
UTF-8
| 1,704
| 2.546875
| 3
|
[] |
no_license
|
#!/bin/bash
set -m
if [ -e "/mnt/dev/data/initialized" ]
then
nodeos -e -p eosio -d /mnt/dev/data --config-dir /mnt/dev/config --hard-replay --http-validate-host=false --plugin eosio::wallet_api_plugin --plugin eosio::wallet_plugin --plugin eosio::producer_plugin --plugin eosio::history_plugin --plugin eosio::chain_api_plugin --plugin eosio::history_api_plugin --plugin eosio::http_plugin --http-server-address=0.0.0.0:8888 --access-control-allow-origin=* --contracts-console
else
nodeos -e -p eosio -d /mnt/dev/data --config-dir /mnt/dev/config --http-validate-host=false --plugin eosio::wallet_api_plugin --plugin eosio::wallet_plugin --plugin eosio::producer_plugin --plugin eosio::history_plugin --plugin eosio::chain_api_plugin --plugin eosio::history_api_plugin --plugin eosio::http_plugin --http-server-address=0.0.0.0:8888 --access-control-allow-origin=* --contracts-console &
sleep 5s
until curl localhost:8888/v1/chain/get_info
do
sleep 5s
done
touch /mnt/dev/data/initialized
cleos wallet list
cleos wallet create -n eosiomain | tail -1 | sed -e 's/^"//' -e 's/"$//' > eosiomain_wallet_password.txt
cleos wallet import -n eosiomain --private-key 5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3
cleos wallet list
cleos wallet list
sleep 5s
cleos wallet list
cleos wallet create | tail -1 | sed -e 's/^"//' -e 's/"$//' > default_wallet_password.txt
cleos wallet import --private-key 5KPMYCJoskqaaniDiBA4x5ZHCMPFAggE3rLa4jytBByBhc2jYTZ
cleos wallet import --private-key 5KgUkmgSy4XGqjsL7Uc6XjeZUau1LdDqfinKsvjY8LfeftCuenM
cleos create account eosio fadmin EOS7dD59Df7y5S9kbMFPD4bwGNCWq9dv7GTqcBRjf83K7JkXwx2zd EOS57MDZYUBGfDTBYeCJ8qPsJ7NXQ68ur2uw22ksP7NpoAp9FTrYZ
cd /work/farmville
eosiocpp -o farmville.wast farmville.cpp
eosiocpp -g farmville.abi farmville.cpp
cd ..
cleos set contract fadmin farmville
fg
fi
| true
|
b60d7639915c1b23c2e847ad9aaa738f3fe8f673
|
Shell
|
AmyGuo/Linux
|
/shellTest/9.sh
|
UTF-8
| 911
| 3.875
| 4
|
[] |
no_license
|
#!/bin/bash
#็ผๅ่ๆฌ๏ผๅฎ็ฐไบบๆบใ็ณๅคด๏ผๅชๅ๏ผๅธใ็ๆธธๆ
game=(็ณๅคด ๅชๅ ๅธ)
num=$[RANDOM%3]
computer=${game[$num]}
echo "่ฏทๆ นๆฎไธๅๆ็คบ้ๆฉๆจ็ๅบๆณๆๅฟ๏ผ"
echo "1. ็ณๅคด"
echo "2. ๅชๅ"
echo "3. ๅธ"
read -p "่ฏท่พๅ
ฅ1-3๏ผ" person
echo $computer
case $person in
1)
echo "ๆจๅบไบ็ณๅคด"
if [ $num -eq 0 ];then
echo "ๅนณๅฑ"
elif [ $num -eq 1 ];then
echo "ๅฏนๆนๅบไบๅชๅ๏ผๆจ่ตขไบ"
else
echo "ๅฏนๆนๅบไบๅธ๏ผๆจ่พไบ"
fi
;;
2)
echo "ๆจๅบไบๅชๅ"
if [ $num -eq 0 ];then
echo "ๅฏนๆนๅบไบ็ณๅคด๏ผๆจ่พไบ"
elif [ $num -eq 1 ];then
echo "ๅนณๅฑ"
else
echo "ๅฏนๆนๅบไบๅธ๏ผๆจ่ตขไบ"
fi
;;
3)
echo "ๆจๅบไบๅธ"
if [ $num -eq 0 ];then
echo "ๅฏนๆนๅบไบ็ณๅคด๏ผๆจ่ตขไบ"
elif [ $num -eq 1 ];then
echo "ๅฏนๆนๅบไบๅชๅ๏ผๆจ่พไบ"
else
echo "ๅนณๅฑ"
fi
;;
*)
echo "ๅฟ
้กป่พๅ
ฅ1-3ไน้ด็ๆฐๅญ"
esac
| true
|
1344370519ae376a8eb1d1af4197627b9045b68a
|
Shell
|
tal-moshayov/step-elastic-beanstalk-deploy
|
/run.sh
|
UTF-8
| 5,325
| 3.3125
| 3
|
[] |
no_license
|
#!/bin/bash
set +e
cd $HOME
if [ -n "$WERCKER_ELASTIC_BEANSTALK_DEPLOY_ELASTIC_BEANSTALK_DEPLOY_SKIP" ] || [ -n "$ELASTIC_BEANSTALK_DEPLOY_SKIP" ]; then
echo "Skipping AWS Elastic beanstalk deployment step!"
return 0;
fi
if [ ! -n "$WERCKER_ELASTIC_BEANSTALK_DEPLOY_APP_NAME" ]
then
fail "Missing or empty option APP_NAME, please check wercker.yml"
fi
if [ ! -n "$WERCKER_ELASTIC_BEANSTALK_DEPLOY_ENV_NAME" ]
then
fail "Missing or empty option ENV_NAME, please check wercker.yml"
fi
if [ ! -n "$WERCKER_ELASTIC_BEANSTALK_DEPLOY_KEY" ]
then
fail "Missing or empty option KEY, please check wercker.yml"
fi
if [ ! -n "$WERCKER_ELASTIC_BEANSTALK_DEPLOY_SECRET" ]
then
fail "Missing or empty option SECRET, please check wercker.yml"
fi
if [ ! -n "$WERCKER_ELASTIC_BEANSTALK_DEPLOY_REGION" ]
then
warn "Missing or empty option REGION, defaulting to us-west-2"
WERCKER_ELASTIC_BEANSTALK_DEPLOY_REGION="us-west-2"
fi
if [ -n "$WERCKER_ELASTIC_BEANSTALK_DEPLOY_DEBUG" ]
then
warn "Debug mode turned on, this can dump potentially dangerous information to log files."
fi
if [ -z "$WERCKER_ELASTIC_BEANSTALK_DEPLOY_BRANCH" ]
then
fail "Missing or empty option BRANCH, please check wercker.yml"
fi
AWSEB_ROOT="$WERCKER_STEP_ROOT/eb-tools"
AWSEB_TOOL="$AWSEB_ROOT/eb/linux/python2.7/eb"
mkdir -p "/home/ubuntu/.elasticbeanstalk/"
mkdir -p "$WERCKER_SOURCE_DIR/.elasticbeanstalk/"
if [ $? -ne "0" ]
then
fail "Unable to make directory.";
fi
debug "Change back to the source dir.";
cd $WERCKER_SOURCE_DIR
AWSEB_CREDENTIAL_FILE="/home/ubuntu/.elasticbeanstalk/aws_credential_file"
AWSEB_CONFIG_FILE="$WERCKER_SOURCE_DIR/.elasticbeanstalk/config"
AWSEB_DEVTOOLS_ENDPOINT="git.elasticbeanstalk.$WERCKER_ELASTIC_BEANSTALK_DEPLOY_REGION.amazonaws.com"
AWSEB_SERVICE_ENDPOINT="https://elasticbeanstalk.$WERCKER_ELASTIC_BEANSTALK_DEPLOY_REGION.amazonaws.com"
debug "Setting up credentials."
cat <<EOT >> $AWSEB_CREDENTIAL_FILE
AWSAccessKeyId=$WERCKER_ELASTIC_BEANSTALK_DEPLOY_KEY
AWSSecretKey=$WERCKER_ELASTIC_BEANSTALK_DEPLOY_SECRET
EOT
if [ -n "$WERCKER_ELASTIC_BEANSTALK_DEPLOY_DEBUG" ]
then
debug "Dumping Credential file."
cat $AWSEB_CREDENTIAL_FILE
fi
debug "Setting up config file ($AWSEB_CONFIG_FILE)."
rm -f $AWSEB_CONFIG_FILE
cat <<EOT >> $AWSEB_CONFIG_FILE
[global]
ApplicationName=$WERCKER_ELASTIC_BEANSTALK_DEPLOY_APP_NAME
DevToolsEndpoint=$AWSEB_DEVTOOLS_ENDPOINT
Region=$WERCKER_ELASTIC_BEANSTALK_DEPLOY_REGION
ServiceEndpoint=$AWSEB_SERVICE_ENDPOINT
AwsCredentialFile=$AWSEB_CREDENTIAL_FILE
EnvironmentName=$WERCKER_ELASTIC_BEANSTALK_DEPLOY_ENV_NAME
[branches]
$WERCKER_GIT_BRANCH=$WERCKER_ELASTIC_BEANSTALK_DEPLOY_ENV_NAME
[branch:$WERCKER_GIT_BRANCH]
ApplicationVersionName=$WERCKER_GIT_BRANCH
EnvironmentName=$WERCKER_ELASTIC_BEANSTALK_DEPLOY_ENV_NAME
InstanceProfileName=aws-elasticbeanstalk-ec2-role
EOT
if [ $? -ne "0" ]
then
fail "Unable to set up config file."
fi
if [ -n "$WERCKER_ELASTIC_BEANSTALK_DEPLOY_DEBUG" ]
then
debug "Dumping config file."
cat $AWSEB_CONFIG_FILE
fi
sudo pip install boto
if [ $? -ne "0" ]
then
fail "Could not install boto, which is needed for eb"
fi
debug "Setting up AWS tools repository (git commands)"
bash $AWSEB_ROOT/AWSDevTools/Linux/AWSDevTools-RepositorySetup.sh
if [ $? -ne "0" ]
then
fail "Unknown error with EB tools."
fi
# debug "git aws.config"
# if [ -n "$WERCKER_ELASTIC_BEANSTALK_DEPLOY_DEBUG" ]
# then
# echo "echo -e \"$WERCKER_ELASTIC_BEANSTALK_DEPLOY_REGION\n$WERCKER_ELASTIC_BEANSTALK_DEPLOY_APP_NAME\n$WERCKER_ELASTIC_BEANSTALK_DEPLOY_ENV_NAME\n\" | git aws.config"
# fi
# echo -e "$WERCKER_ELASTIC_BEANSTALK_DEPLOY_REGION\n$WERCKER_ELASTIC_BEANSTALK_DEPLOY_APP_NAME\n$WERCKER_ELASTIC_BEANSTALK_DEPLOY_ENV_NAME\n" | git aws.config
# if [ $? -ne "0" ]
# then
# fail "Failed configurating git"
# fi
debug "eb init"
debug "echo -e $WERCKER_ELASTIC_BEANSTALK_DEPLOY_KEY\n$WERCKER_ELASTIC_BEANSTALK_DEPLOY_SECRET\n1\n$WERCKER_ELASTIC_BEANSTALK_DEPLOY_APP_NAME\n$WERCKER_ELASTIC_BEANSTALK_DEPLOY_ENV_NAME\n1\n47\n2\nN\n1\n | $AWSEB_TOOL init"
echo -e "$WERCKER_ELASTIC_BEANSTALK_DEPLOY_KEY\n$WERCKER_ELASTIC_BEANSTALK_DEPLOY_SECRET\n1\n$WERCKER_ELASTIC_BEANSTALK_DEPLOY_APP_NAME\n$WERCKER_ELASTIC_BEANSTALK_DEPLOY_ENV_NAME\n1\n47\n2\nN\n1\n" | $AWSEB_TOOL init
if [ $? -ne "0" ]
then
fail "Failed initializing EB"
fi
debug "Checking if eb exists and can connect. $AWSEB_TOOL status"
$AWSEB_TOOL --verbose status
if [ $? -ne "0" ]
then
fail "EB is not working or is not set up correctly"
fi
if [ -n "$WERCKER_ELASTIC_BEANSTALK_DEPLOY_DEBUG" ]
then
debug "git status: `git status`"
debug "git branch: `git branch`"
debug "aws version: `aws --version`"
debug "eb version: `$AWSEB_TOOL --version`"
debug "PWD=`pwd`"
debug "AWSEB_CONFIG_FILE=$AWSEB_CONFIG_FILE"
export GIT_TRACE=1
export GIT_CURL_VERBOSE=1
fi
debug "Checking out the source from $WERCKER_ELASTIC_BEANSTALK_DEPLOY_BRANCH"
git checkout $WERCKER_ELASTIC_BEANSTALK_DEPLOY_BRANCH
if [ $? -ne "0" ]
then
fail "git checkout failed for branch $WERCKER_ELASTIC_BEANSTALK_DEPLOY_BRANCH"
fi
debug "Pushing to AWS eb servers."
git aws.push
if [ $? -ne "0" ]
then
fail "Unable to push to Amazon Elastic Beanstalk"
fi
success 'Successfully pushed to Amazon Elastic Beanstalk'
| true
|
076e301b39824c8f4853c542a33a433bd922fc50
|
Shell
|
nhtlongcs/AIC2021-TheFirstSwans
|
/run.sh
|
UTF-8
| 685
| 2.546875
| 3
|
[] |
no_license
|
#!/bin/bash --login
echo "Running inference script"
INP_IMG_DIR=/data/test_data
OUT_IMG_DIR=/data/submission_output
echo "INP_IMG_DIR=$INP_IMG_DIR"
echo "OUT_IMG_DIR=$OUT_IMG_DIR"
echo "Running maskrcnn..."
mkdir -p /workspace/output/det/
bash scripts/run_det.sh $INP_IMG_DIR /workspace/output/det/ /workspace/output/det/thrs/ /workspace/output/det/sort/
echo "Running text recognition..."
bash scripts/run_reg.sh $INP_IMG_DIR /workspace/output/det/sort/ /workspace/output/predicted/
echo "Running postprocess..."
bash scripts/run_post.sh /workspace/output/predicted/ $OUT_IMG_DIR
echo "rename files"
python scripts/rename.py
echo "Done"
chmod a+rwx */*
chmod a+rwx /data/*/*
| true
|
403e752521e1db8c17ffb89e275f235ac1d50b73
|
Shell
|
leroyron-zz/timeline-jscode
|
/bin/install_githooks.sh
|
UTF-8
| 263
| 3.015625
| 3
|
[] |
no_license
|
#!/bin/sh
# Creates symlinks inside .git/hooks
# This will clobber any existing hooks in .git/hooks
repo_dir="$(git rev-parse --show-toplevel)"
src_dir="$repo_dir/githooks"
hook_dir="$repo_dir/.git/hooks"
ln --verbose --symbolic --force "$src_dir"/* "$hook_dir"
| true
|
a449a167a56e12f72f0727a62617d48a95dbc441
|
Shell
|
null267/danorz.com
|
/tools/bootstrap-aws.sh
|
UTF-8
| 451
| 3.25
| 3
|
[] |
no_license
|
#!/bin/bash
DOCKER_IMAGE=danorz-aws
DOCKER_AWS_USER=awsuser
cleanup () {
docker image rm -f $DOCKER_IMAGE
}
docker build -t $DOCKER_IMAGE \
--build-arg UID=$(id -u)\
--build-arg GID=$(id -g) \
--build-arg USER=$DOCKER_AWS_USER \
aws
# use docker image to configure AWS creds
echo
echo Please configure your AWS credentials...
docker run --rm -itv $HOME/.aws:/home/$DOCKER_AWS_USER/.aws $DOCKER_IMAGE aws configure
| true
|
a49166b40bf4ab360e213a0d1bc5c87481bb90ab
|
Shell
|
hkoba/hktools
|
/nsenter-docker-image.zsh
|
UTF-8
| 683
| 3.609375
| 4
|
[] |
no_license
|
#!/bin/zsh
emulate -L zsh
scriptFn=$0
function usage {
cat 1>&2 <<EOF
Usage: ${scriptFn:t} IMAGE_NAME [COMMAND...]
EOF
exit 1
}
function die { echo 1>&2 $*; exit 1 }
#----------------------------------------
((ARGC)) || usage
imageName=$1; shift
((ARGC)) || argv=(/bin/bash)
cid=$(docker container ls -q --filter ancestor=$imageName) || return 1
[[ -n $cid ]] || die "Can't find container for image $imageName"
pid=$(docker inspect --format '{{.State.Pid}}' $cid) || return 1
[[ -n $pid ]] || die "Can't find pid for container $cid"
sudo=()
[[ -r /proc/$pid/ns ]] || sudo=(sudo)
$sudo nsenter --target $pid --mount --uts --ipc --net --pid "$argv[@]" || return 1
| true
|
6282a23e079adad8e052dbae4d6fafa93d8f295e
|
Shell
|
Brambler/CSGO-Automation
|
/scripts/AutoDeploy/csgoDeploy.sh
|
UTF-8
| 3,027
| 3.21875
| 3
|
[] |
no_license
|
#!/bin/bash
clear
# Deps and screen install & update
dpkg --add-architecture i386
apt-get install gcc-multilib libstdc++6:i386 libgcc1:i386 zlib1g:i386 libncurses5:i386 libc6:i386 lib32stdc++6 -y
apt-get install lib32gcc1 -y
apt-get install screen -y
apt-get update && apt-get upgrade -y
clear
cd
read -p "Name your install dir: " installDir
clear
# Install Server Files
mkdir steam
cd steam/
wget http://media.steampowered.com/client/steamcmd_linux.tar.gz
tar xfvz steamcmd_linux.tar.gz
./steamcmd.sh +login anonymous +force_install_dir ../$installDir +app_update 740 validate +quit
cd ../
#hostname
cd CSGO-Automation/scripts/AutoDeploy/
clear
read -p "Enter the hostname (Name of your server): " hostnameVar
echo "hostname" "\"$hostnameVar\"" >> CSGO-AUTOMATION/scripts/AutoDeploy/server.cfg
#ServerToken
clear
read -p "Enter your SteamGameServer TOKEN: " steamidVar
echo "sv_setsteamaccount" "\"$steamidVar\"" >> CSGO-AUTOMATION/scripts/AutoDeploy/server.cfg
#Password (Yes or No)
clear
while true; do
read -p "Do you wish to Password Protect your server? (Y or N): " yn
case $yn in
[Yy]* ) read -p "Enter your desired password: " passwordVar; echo "sv_password" "\"$passwordVar\"" >> CSGO-AUTOMATION/scripts/AutoDeploy/server.cfg; break;;
[Nn]* ) break;;
* ) echo "Please answer yes or no.";;
esac
done
#Rcon Password
clear
read -p "Enter your desired rcon password: " rconVar
<<<<<<< Updated upstream
echo "rcon_password" "\"$rconVar\"" >> server.cfg
cd
cp -r CSGO-Automation/scripts/AutoDeploy/server.cfg $installDir/csgo/cfg/
#Prac/Scrim Files
cp -r CSGO-Automation/scripts/AutoDeploy/csgo/addons $installDir/csgo/
cp -r CSGO-Automation/scripts/AutoDeploy/csgo/cfg/get5 $installDir/csgo/cfg/
cp -r CSGO-Automation/scripts/AutoDeploy/csgo/cfg/sourcemod $installDir/csgo/cfg/
clear
cd
rm -r CSGO-Automation/scripts/AutoDeploy
=======
echo "rcon_password" "\"$rconVar\"" >> CSGO-AUTOMATION/scripts/AutoDeploy/server.cfg
cd
mv CSGO-AUTOMATION/scripts/AutoDeploy/server.cfg $installDir/csgo/cfg/
#Prac/Scrim Files
cd
cd CSGO-AUTOMATION/scripts/AutoDeploy/
mv csgo/addons/ $installDir/csgo/; mv csgo/cfg/get5/ $installDir/csgo/cfg/; mv csgo/cfg/sourcemod/ $installDir/csgo/cfg/
clear
cd
rm -r CSGO-AUTOMATION/scripts/AutoDeploy
>>>>>>> Stashed changes
#Create Start and Update scripts
# Start
echo "#!/bin/bash" >> startServer.sh
echo "cd $installDir/" >> startServer.sh
echo "screen -A -m -d -S $installDir ./srcds_run -game csgo -console -usercon +game_type 0 +game_mode 1 +mapgroup mg_active -tickrate 128 +map de_cache -maxplayers_override 12 +sv_setsteamaccount $steamidVar -port 27015" >> startServer.sh
chmod +x startServer.sh
# Update
echo "#!/bin/bash" >> updateServer.sh
echo "killall screen" >> updateServer.sh
echo "cd steam/" >> updateServer.sh
echo "./steamcmd.sh +login anonymous +force_install_dir ./$installDir/ +app_update 740 validate +quit" >> updateServer.sh
chmod +x updateServer.sh
#Runs start script
./startServer.sh
echo "Server Install Complete"
sleep 2
clear
| true
|
3ae84b8ad073ecaefdac3022b64887a63ca94357
|
Shell
|
hmcts/cmc-citizen-frontend
|
/bin/ccd-import-definition.sh
|
UTF-8
| 1,698
| 3.640625
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -eu
dir=$(dirname ${0})
filepath=${1}
filename=$(basename ${filepath})
uploadFilename="$(date +"%Y%m%d-%H%M%S")-${filename}"
userToken=$(${dir}/idam-lease-user-token.sh ${CCD_CONFIGURER_IMPORTER_USERNAME:-ccd.docker.default@hmcts.net} ${CCD_CONFIGURER_IMPORTER_PASSWORD:-Password12!})
serviceToken=$(${dir}/idam-lease-service-token.sh ccd_gw $(docker run --rm toolbelt/oathtool --totp -b ${CCD_API_GATEWAY_S2S_SECRET:-AAAAAAAAAAAAAAAC}))
uploadResponse=$(curl --insecure --silent -w "\n%{http_code}" --show-error -X POST \
${CCD_DEFINITION_STORE_API_BASE_URL:-http://localhost:4451}/import \
-H "Authorization: Bearer ${userToken}" \
-H "ServiceAuthorization: Bearer ${serviceToken}" \
-F "file=@${filepath};filename=${uploadFilename}")
upload_http_code=$(echo "$uploadResponse" | tail -n1)
upload_response_content=$(echo "$uploadResponse" | sed '$d')
if [[ "${upload_http_code}" == '504' ]]; then
for try in {1..10}
do
sleep 5
echo "Checking status of ${filename} (${uploadFilename}) upload (Try ${try})"
audit_response=$(curl --insecure --silent --show-error -X GET \
${CCD_DEFINITION_STORE_API_BASE_URL:-http://localhost:4451}/api/import-audits \
-H "Authorization: Bearer ${userToken}" \
-H "ServiceAuthorization: Bearer ${serviceToken}")
if [[ ${audit_response} == *"${uploadFilename}"* ]]; then
echo "${filename} (${uploadFilename}) uploaded"
exit 0
fi
done
else
if [[ "${upload_response_content}" == 'Case Definition data successfully imported' ]]; then
echo "${filename} (${uploadFilename}) uploaded"
exit 0
fi
fi
echo "${filename} (${uploadFilename}) upload failed (${upload_response_content})"
exit 1;
| true
|
e5d0ed721f7780145121f915d85011162e2a68b2
|
Shell
|
MINAMISAMA/Castle-X
|
/ch6 slam&navigation/turtlebot/kobuki/kobuki_testsuite/scripts/batch_test.sh
|
UTF-8
| 775
| 2.734375
| 3
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#AUTHOR: Younghun Ju <yhju@yujinrobot.com>, <yhju83@gmail.com>
test_angle=360.0
angle=-150.0
for test_angle in `seq 360 360.0 $((360*4))`; do
for angle in `seq -150 5.0 150`; do
if [ $angle == 0.0 ]; then continue; fi
rosrun kobuki_testsuite gyro_perf.py\
cmd_vel:=/mobile_base/commands/velocity\
imu_data:=/mobile_base/sensors/imu_data\
angle_abs:=/scan_angle\
sound:=/mobile_base/commands/sound\
button:=/mobile_base/events/button\
_command_vx:=0.0\
_command_wz:=$angle\
_max_sample:=100\
_test_angle:=$test_angle
echo '--------------------------------------------------------------------------------'
sleep 10
done
echo '================================================================================'
done
| true
|
0336bd050c7d450cf2ca28054c8f43f4520807a7
|
Shell
|
dwladdimiroc/s4-class1
|
/apache-s4-0.6.0/subprojects/s4-benchmarks/bench-cluster.sh
|
UTF-8
| 3,473
| 3.359375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash -x
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
HOSTS=$1
INJECTOR_CONFIGS=$2 # 1 injector injects to 1 stream. Use more injector configs for injecting to more streams
NODE_CONFIG=$3
NB_INJECTORS_PER_NODE=$4
INJECTOR_NODES=$5
BENCH_ROOTDIR=`pwd`
echo "hosts = $HOSTS"
echo "injector config file = $INJECTOR_CONFIG"
echo "node config file = $NODE_CONFIG"
echo "bench root dir = $BENCH_ROOTDIR"
#########################################################
#### cleanup files and processes, and build platform
#########################################################
killall -9 java
cd $BENCH_ROOTDIR
rm -Rf measurements/*
$BENCH_ROOTDIR/../../gradlew -b=s4-benchmarks.gradle compileJava
$BENCH_ROOTDIR/../../gradlew -b=s4-benchmarks.gradle cp
NB_NODES=0
for host in $HOSTS
do
((NB_NODES++))
ssh $host "killall -9 java"
done
NB_INJECTORS=0
for injectorNode in $INJECTOR_NODES ; do
for INJECTOR_CONFIG in $INJECTOR_CONFIGS ; do
NB_INJECTORS=$(($NB_INJECTORS + $NB_INJECTORS_PER_NODE))
done
ssh $injectorNode "killall -9 java"
done
# must run from where ZooKeeper server is running (as specified in injector config file)
(cd $BENCH_ROOTDIR/../../ && ./s4 zkServer -clusters=c=testCluster1:flp=12000:nbTasks=$NB_INJECTORS,c=testCluster2:flp=13000:nbTasks=$NB_NODES &)
sleep 6
BENCH=`date +"%Y-%m-%d--%H-%M-%S"`
BENCH_DIR=$BENCH_ROOTDIR/$BENCH
echo "bench dir is: $BENCH_DIR"
mkdir $BENCH
echo "nb nodes = $NB_NODES\n" > $BENCH/benchConf.txt
echo "hosts = $HOSTS" >> $BENCH/benchConf.txt
echo "injector config ">> $BENCH/benchConf.txt
for INJECTOR_CONFIG in $INJECTOR_CONFIGS ; do
cat $INJECTOR_CONFIG >> $BENCH/benchConf.txt
done
#########################################################
#### start S4 nodes
#########################################################
i=0
for host in $HOSTS
do
((i++))
if [ $host == "localhost" ] || [ $host == "127.0.0.1" ] ; then
$BENCH_ROOTDIR/startNode.sh $BENCH_ROOTDIR $NODE_CONFIG "localhost" > $BENCH_DIR/output_$i.log 2>$BENCH_DIR/s4err_$i.err < /dev/null &
else
ssh $host "$BENCH_ROOTDIR/startNode.sh $BENCH_ROOTDIR $NODE_CONFIG $host > $BENCH_DIR/output_$host-$i.log 2>$BENCH_DIR/s4err_$host-$i.err < /dev/null &"
fi
done
sleep 15
PROFILING_OPTS=""
#########################################################
#### start injectors
#########################################################
for INJECTOR_NODE in $INJECTOR_NODES ; do
for INJECTOR_CONFIG in $INJECTOR_CONFIGS ; do
ssh $INJECTOR_NODE "cd $BENCH_ROOTDIR ; $BENCH_ROOTDIR/startInjector.sh $NB_INJECTORS_PER_NODE $INJECTOR_CONFIG $ZK_SERVER > $BENCH_DIR/out.injector_$INJECTOR_NODE.log 2>$BENCH_DIR/err.injector_$INJECTOR_NODE.log < /dev/null &"
done
done
| true
|
6bbb6d0c7148950b52ca860a44ee4a14559f0295
|
Shell
|
cwt1/scripts-1
|
/projFocus/ceRNA/runs/run05ResultCandireg.sh
|
UTF-8
| 10,156
| 3
| 3
|
[] |
no_license
|
#!/bin/bash
#$ -cwd
#By: J.He
#Desp: this is the running file for all coding testing in this folder
##run on selected know BRCA genes
source /ifs/home/c2b2/ac_lab/jh3283/scripts/projFocus/ceRNA/geneUtilsRuns.sh
srcDir=/ifs/home/c2b2/ac_lab/jh3283/scripts/projFocus/ceRNA
##----pickle dump data
cernet=/ifs/data/c2b2/ac_lab/jh3283/projFocus/other/brca_ceRNA_network.txt
refseqTsstse=/ifs/data/c2b2/ac_lab/jh3283/database/refseq/refseq_gene_hg19_selected_Mar22_Tsstse.tsv.single.tsv
expTum=/ifs/data/c2b2/ac_lab/jh3283/projFocus/result/03102014/exp/brca_exp_l3_tumor_Mar-21-2014.matrix_Mar-26-2014.voomNormed.matrix
expNorm=/ifs/data/c2b2/ac_lab/jh3283/projFocus/result/03102014/exp/brca_exp_l3_normal_Mar-21-2014.matrix_Mar-26-2014.voomNormed.matrix
# $PYTHON /ifs/home/c2b2/ac_lab/jh3283/scripts/projFocus/ceRNA/processData/pickleDumpExp.py $expTum $refseqTsstse
# $PYTHON /ifs/home/c2b2/ac_lab/jh3283/scripts/projFocus/ceRNA/processData/pickleDumpExp.py $expNorm $refseqTsstse
gslist=/ifs/data/c2b2/ac_lab/jh3283/projFocus/result/05012014/gslist/gslist_CnvMethSomFree.10smapMore.deg_20140430.txt.hasReg.list
geneAnnofile=/ifs/data/c2b2/ac_lab/jh3283/database/refseq/refseq_gene_hg19_selected_Mar22_Tsstse.tsv.single.tsv
qsubgKR_gene() {
gene=$1
if [ ! -d $candiRegDir/log ] ; then mkdir $candiRegDir/log ; fi
out=$candiRegDir/${gene}_candidateRegs_${CDT}.txt
nperm=1000
if [ ! -f $out ]; then
cmd="$PYTHON /ifs/home/c2b2/ac_lab/jh3283/scripts/projFocus/ceRNA/model/step2-1_getKeyReg_v4.py -c $cernet -p $nperm -g $gene -t $expTum -n $expNorm -a $geneAnnofile -l $gslist -o $out"
echo $cmd |qsub -l mem=8g,time=6:: -N ${gene}.KeyReg -e $candiRegDir/log -o $candiRegDir/log -cwd >> $candiRegDir/qsubGKR.logs
tail -1 $candiRegDir/qsubGKR.logs
else
echo $out" existed "
fi
}
qsubGetKeyRegsSmall() {
dirName=`echo $1|awk -F "/" '{print $NF}'`
CWD=$candiRegDir/temp-$dirName
echo $CWD
if [ ! -d $CWD ] ; then mkdir $CWD ; fi
if [ ! -d $CWD/log ] ; then mkdir $CWD/log ; fi
cnt=0
while read gene
do
if [ ! -d $candiRegDir/log ] ; then mkdir $candiRegDir/log ; fi
runFlag=0
if [ -f $candiRegDir/pid_running.txt ] ; then
runFlag=`grep -w $gene $candiRegDir/pid_running.txt|awk 'END{print NR}'`
fi
gene=$gene
out=$CWD/${gene}_candidateRegs.txt
nperm=1000
if [ ! -f $out ] && [ $runFlag -eq 0 ] ; then
cmd="$PYTHON /ifs/home/c2b2/ac_lab/jh3283/scripts/projFocus/ceRNA/model/step2-1_getKeyReg_v4.py -c $cernet -p $nperm -g $gene -t $expTum -n $expNorm -a $geneAnnofile -l $gslist -o $out"
echo $cmd |qsub -l mem=8g,time=160:: -N ${gene}.KeyReg -e $CWD/log -o $CWD/log -cwd >> $CWD/qsubGKR.logs
tail -1 $CWD/qsubGKR.logs
((cnt=cnt+1))
fi
done < $1
echo $cnt "submitted!"
}
qsubGetKeyRegsBig() {
dirName=`echo $1|awk -F "/" '{print $NF}'`
CWD=$candiRegDir/temp-$dirName
echo $CWD
if [ ! -d $CWD ] ; then mkdir $CWD ; fi
if [ ! -d $CWD/log ] ; then mkdir $CWD/log ; fi
cnt=0
while read gene
do
if [ ! -d $candiRegDir/log ] ; then mkdir $candiRegDir/log ; fi
runFlag=0
if [ -f $candiRegDir/pid_running.txt ] ; then
runFlag=`grep -w $gene $candiRegDir/pid_running.txt|awk 'END{print NR}'`
fi
gene=$gene
out=$CWD/${gene}_candidateRegs.txt
nperm=100
if [ ! -f $out ] && [ $runFlag -eq 0 ] ; then
cmd="$PYTHON /ifs/home/c2b2/ac_lab/jh3283/scripts/projFocus/ceRNA/model/step2-1_getKeyReg_v4.py -c $cernet -p $nperm -g $gene -t $expTum -n $expNorm -a $geneAnnofile -l $gslist -o $out"
echo $cmd |qsub -l mem=8g,time=160:: -N ${gene}.KeyReg -e $CWD/log -o $CWD/log -cwd >> $CWD/qsubGKR.logs
tail -1 $CWD/qsubGKR.logs
((cnt=cnt+1))
fi
done < $1
echo $cnt "submitted!"
}
localgKR_gene() {
gene=$1
if [ ! -d $candiRegDir/log ] ; then mkdir $candiRegDir/log ; fi
out=$candiRegDir/${gene}_candidateRegs_${CDT}.txt
if [ ! -f $out ]; then
cmd="$PYTHON /ifs/home/c2b2/ac_lab/jh3283/scripts/projFocus/ceRNA/model/step2-1_getKeyReg_v4.py -p 100 -c $cernet -g $gene -t $expTum -n $expNorm -a $geneAnnofile -l $gslist -o $out"
$cmd
else
echo $out" existed "
fi
}
localGetKeyRegs() {
CWD=$candiRegDir/temp-$1
if [ ! -d $CWD ] ; then mkdir $CWD ; fi
if [ ! -d $CWD/log ] ; then mkdir $CWD/log ; fi
while read gene
do
gene=$gene
echo "#-----------$gene-------------------"
# gslist=/ifs/data/c2b2/ac_lab/jh3283/projFocus/result/03102014/gslist/gslist_Mar-24-2014_CnvMethSomFree.10smapMore.deg_20140325.txt.10more.hasReg.list
# expTum=/ifs/data/c2b2/ac_lab/jh3283/projFocus/result/03102014/exp/brca_exp_l3_tumor_Mar-21-2014.matrix_Mar-26-2014.voomNormed.matrix
# expNorm=/ifs/data/c2b2/ac_lab/jh3283/projFocus/result/03102014/exp/brca_exp_l3_normal_Mar-21-2014.matrix_Mar-26-2014.voomNormed.matrix
# geneAnnofile=/ifs/data/c2b2/ac_lab/jh3283/database/refseq/refseq_gene_hg19_selected_Mar22_Tsstse.tsv.single.tsv
# cernet=/ifs/data/c2b2/ac_lab/jh3283/projFocus/other/brca_ceRNA_network.txt
# out=$CWD/${gene}_candidateRegs_${CDT}.txt
if [ ! -f $out ]; then
cmd="$PYTHON /ifs/home/c2b2/ac_lab/jh3283/scripts/projFocus/ceRNA/model/step2-1_getKeyReg.py -c $cernet -g $gene -t $expTum -n $expNorm -a $geneAnnofile -l $gslist -o $out"
$cmd > $CWD/${gene}.local_stdout
else
echo -e $out" existed\n please remove at first to redo!"
fi
done < $1
}
getErrGene(){
ls -alt temp-gslist.Gintset_Mar31.txt_*/log/*.e*|awk '$5!="514"&&$5!="582"{print $}'
}
###----------
candiRegDir=/ifs/data/c2b2/ac_lab/jh3283/projFocus/result/05012014/candiReg/runApr30/
##step-1 devide target into small and large
gslistStat=/ifs/data/c2b2/ac_lab/jh3283/projFocus/result/05012014/gslist/gslist_CnvMethSomFree.10smapMore.deg_20140430.txt_stat.GintRegCount
tgSmall=$candiRegDir/tgene_small.txt
tgBig=$candiRegDir/tgene_big.txt
# awk '$2>200{print $1}' $gslistStat > $tgBig
# awk '$2<=200{print $1}' $gslistStat > $tgSmall
gslist=/ifs/data/c2b2/ac_lab/jh3283/projFocus/result/05012014/gslist/gslist_CnvMethSomFree.10smapMore.deg_20140430.txt.hasReg.list
# ~/bin/splitByN $tgSmall 150
# ~/bin/splitByN $tgBig 150
##step-2 run grplasso to both group
## test one gene
# qsubgKR_gene BCL9
# qsubgKR_gene ACTA2
# qsubGetKeyRegsSmall ${tgSmall}_1
# for i in `seq 8 20 `
# do
# qsubGetKeyRegsSmall ${tgSmall}_${i}
# sleep 280m
# done
# for i in `seq 1 3 `
# do
# qsubGetKeyRegsSmall ${tgBig}_${i}
# sleep 280m
# done
# qsubGetKeyRegsBig $candiRegDir/jobs.fail.05162014
##----debugging for genes without expression data
checksubFolder() {
# for file in `ls -1 temp-tgene_small.txt_$1/*tumor.temp`
for file in `ls -1 temp-tgene_big.txt_$1/*tumor.temp`
do awk 'NR==2{split(FILENAME,a,"/|_"); if (a[4]!= $1) print a[4]}' $file >> tgene_noExpdata.txt_$1
done
}
# for i in `seq 1 20`
# for i in `seq 1 3`
# do
# checksubFolder $i &
# done
# cat tgene_noExpdata.txt_* > tgene_noExpdata.txt
##step2.4 check job running
#ls -1 temp-tgene_*/*txt|awk -F"-|_|/" '{print $5}' > jobs.done
# grep -v -w -f jobs.done tgene_small.txt > jobs.small.fail
# grep -w -f jobs.small.fail /ifs/data/c2b2/ac_lab/jh3283/projFocus/result/05012014/gslist/gslist_CnvMethSomFree.10smapMore.deg_20140430.txt_stat.GintRegCount |awk '$2 ==1{print $1}' > jobs.small.fail.1reg
# grep -w -f jobs.small.fail /ifs/data/c2b2/ac_lab/jh3283/projFocus/result/05012014/gslist/gslist_CnvMethSomFree.10smapMore.deg_20140430.txt_stat.GintRegCount |awk '$2>1{print $1}' > jobs.small.fail.gt1reg
# qsubGetKeyRegsSmall jobs.small.fail.gt1reg
##step-3 calculate summary
##-------runing the 47 remaining ones with large memory and long time
qsubGetKeyRegs() {
# CWD=$candiRegDir/temp-bigTarget
CWD=$candiRegDir/temp-$1
if [ ! -d $CWD ] ; then mkdir $CWD ; fi
if [ ! -d $CWD/log ] ; then mkdir $CWD/log ; fi
cnt=0
while read gene
do
if [ ! -d $candiRegDir/log ] ; then mkdir $candiRegDir/log ; fi
gene=$gene
gslist=/ifs/data/c2b2/ac_lab/jh3283/projFocus/result/03102014/gslist/gslist_Mar-24-2014_CnvMethSomFree.10smapMore.deg_20140325.txt.10more.hasReg.list
geneAnnofile=/ifs/data/c2b2/ac_lab/jh3283/database/refseq/refseq_gene_hg19_selected_Mar22_Tsstse.tsv.single.tsv
out=$CWD/${gene}_candidateRegs_${CDT}.txt
oldout=$CWD/${gene}_candidateRegs_*2014.txt
if [ ! -f $oldout ] ; then
cmd="$PYTHON /ifs/home/c2b2/ac_lab/jh3283/scripts/projFocus/ceRNA/model/step2-1_getKeyReg_v5.py -c $cernet -g $gene -t $expTum -n $expNorm -a $geneAnnofile -l $gslist -o $out"
# cmd="$PYTHON /ifs/home/c2b2/ac_lab/jh3283/scripts/projFocus/ceRNA/model/step2-1_getKeyReg_v4.py -c $cernet -g $gene -t $expTum -n $expNorm -a $geneAnnofile -l $gslist -o $out"
echo $cmd |qsub -l mem=20g,time=140:: -N ${gene}.KeyReg -e $CWD/log -o $CWD/log -cwd >> $CWD/qsubGKR.logs
tail -1 $CWD/qsubGKR.logs
((cnt=cnt+1))
fi
done < $1
echo $cnt "submitted!"
}
## get all cancer gene
# grep -w -f $candiRegDir/tgene_small.txt /ifs/data/c2b2/ac_lab/jh3283/projFocus/result/03102014/gslist/CG_target_Mar-23-2014.list > /ifs/data/c2b2/ac_lab/jh3283/projFocus/result/05012014/candiReg/runApr30/summaryCG/cancergene.list
# grep -w -f $candiRegDir/tgene_big.txt /ifs/data/c2b2/ac_lab/jh3283/projFocus/result/03102014/gslist/CG_target_Mar-23-2014.list >> /ifs/data/c2b2/ac_lab/jh3283/projFocus/result/05012014/candiReg/runApr30/summaryCG/cancergene.list
# CWD=/ifs/data/c2b2/ac_lab/jh3283/projFocus/result/03102014/candiReg
# resDir=$CWD/run-Apr-1-2014
# # mkdir $resDir
# cd $resDir
# # cp $CWD/temp-*/*txt .
# $PYTHON /ifs/home/c2b2/ac_lab/jh3283/scripts/projFocus/ceRNA/model/getKeyRegStats.py -d $resDir -o $resDir/kegRegs_${CDT}.summary
# sort $resDir/kegRegs_${CDT}.summary.driverRegs.list |uniq > $resDir/kegRegs_${CDT}.summary.driverRegs.list.uniq
##step4---get some summary stats
# awk 'NR==FNR{a[$1]=$2;next}{print $1,a[$1],$2}' /ifs/data/c2b2/ac_lab/jh3283/projFocus/result/05012014/gslist/gslist_CnvMethSomFree.10smapMore.deg_20140430.txt_stat.GintRegCount candiReg_summary_05072014_v2.txt_0.01.regCount > candiReg_summary_05072014_v2.txt_0.01.regCount_GintReg_keyReg_05112014
| true
|
0bf895a7decf78304288d02b4bbba39d813d9911
|
Shell
|
ericnchen/fenics-tue
|
/recipes/mumps/build.sh
|
UTF-8
| 881
| 2.875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
set -e
cp "${RECIPE_DIR}/Makefile.inc" Makefile.inc
cp "${RECIPE_DIR}/examples_Makefile" examples/Makefile
export AR="${AR} vr "
# I don't think passing the -j NP flag results in successful builds 100%.
make alllib
# Manually install.
mkdir -p "${PREFIX}/lib" "${PREFIX}/include"
cp lib/*.a "${PREFIX}/lib/."
cp include/*.h "${PREFIX}/include/."
cd examples
make all
# Test the Fortran programs.
export LD_LIBRARY_PATH="${PREFIX}/lib"
mpirun -np "${CPU_COUNT}" ssimpletest < input_simpletest_real
mpirun -np "${CPU_COUNT}" dsimpletest < input_simpletest_real
mpirun -np "${CPU_COUNT}" csimpletest < input_simpletest_cmplx
mpirun -np "${CPU_COUNT}" zsimpletest < input_simpletest_cmplx
# Test the C programs.
mpirun -np "${CPU_COUNT}" c_example
# Test multiple precisions with the Fortran backend.
mpirun -np "${CPU_COUNT}" multiple_arithmetics_example
| true
|
461a641af46ea921ec54dfdb59230ecdc87a5da8
|
Shell
|
pinoylinuxguru/psec
|
/lynis-cron.sh
|
UTF-8
| 673
| 3.390625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
#Be sure mailx is install -> rpm -qi mailx
#if not installed run -> yum install mailx -y
#Aeonmike - PinoyLinux - PCG
AUDITOR="automated"
DATE=$(date +%Y%m%d)
HOST=$(hostname -s)
LOG_DIR="/var/log/lynis"
REPORT="$LOG_DIR/report-${HOST}.${DATE}"
DATA="$LOG_DIR/report-data-${HOST}.${DATE}.txt"
LYNIS=/usr/bin/lynis
# Run Lynis
${LYNIS} audit system --auditor "${AUDITOR}" --cronjob > ${REPORT}
# Optional step: Move report file if it exists
if [ -f /var/log/lynis-report.dat ]; then
mv /var/log/lynis-report.dat ${DATA}
fi
# Send report via email
MAIL=/usr/bin/mail
EMAILTO=mike.cabalin@gmail.com
${MAIL} -s "Lynis Report for ${HOST}" ${EMAILTO} < ${REPORT}
# The End
| true
|
e34912fbac08b465332ca451c9d60a60a84ab3e0
|
Shell
|
jongwony/demos
|
/youtube-streamer/youtube-streamer
|
UTF-8
| 1,183
| 3.328125
| 3
|
[] |
no_license
|
#!/bin/bash
# Add pulseaudio subdirectory for ffmepg access
ARCH_TRIPLET=x86_64-linux-gnu
[ "$SNAP_ARCH" = "armhf" ] && ARCH_TRIPLET=arm-linux-gnueabihf
[ "$SNAP_ARCH" = "arm64" ] && ARCH_TRIPLET=aarch64-linux-gnu
export LD_LIBRARY_PATH=$SNAP/usr/lib/$ARCH_TRIPLET/pulseaudio/:$LD_LIBRARY_PATH
CONFIG_DISPLAYED=false
cred_path=$SNAP_DATA/credentials
while [ ! -f $cred_path ]; do
if [ "$CONFIG_DISPLAYED" = "false" ]; then
echo """You need to configure this snap with your youtube credentials
run $ sudo snappy config $SNAP_NAME <cred_file>
<cred_file> should be of form:
config:
youtube-streamer:
YOUTUBE_URL: "rtmp://a.rtmp.youtube.com/live2/"
YOUTUBE_KEY: "yourcreds"
-------------------------------------------------------------------------
"""
CONFIG_DISPLAYED=true
fi
sleep 5
done
# source the actual file
. $cred_path
ffmpeg -thread_queue_size 1024 -re -ar 44100 -ac 2 -acodec pcm_s16le -f s16le -ac 2 -i /dev/zero -f v4l2 -s 1280x720 -r 10 -i /dev/video0 -vcodec libx264 -pix_fmt yuv420p -preset ultrafast -r 25 -g 300 -b:v 2500k -codec:a libmp3lame -ar 44100 -threads 6 -b:a 256K -bufsize 512k -f flv "$YOUTUBE_URL/$YOUTUBE_KEY"
| true
|
217288abeef78a0e3dcda9b714fe1fe908e3593e
|
Shell
|
jmsalter/harness-demo
|
/harness/hq/scripts/runPL.sh
|
UTF-8
| 1,099
| 2.96875
| 3
|
[] |
no_license
|
#!/bin/sh
fn_run_query () {
curl -s \
-H 'x-api-key: '$HARNESS_API_KEY \
-X POST \
-H 'Content-Type: application/json' \
--data @- \
'https://app.harness.io/gateway/api/graphql?accountId='$HARNESS_ACCOUNT_ID
}
appName="Basic"
plName="simple-pipeline"
APP_ID=$(hq id/App.hql $appName)
PL_ID=$(hq id/Pipeline.hql $plName $APP_ID)
echo $APP_ID
echo $PL_ID
cat <<_EOF_ | fn_run_query
{"query":"
mutation(\$startExecution: StartExecutionInput\u0021){
startExecution(input: \$startExecution){
clientMutationId
execution {
notes
status
id
}
}
}",
"variables":{
"startExecution": {
"notes": "Test GraphQL using runPL script",
"executionType": "PIPELINE",
"applicationId": "$APP_ID",
"entityId": "$PL_ID",
"variableInputs": [
{
"name": "myinfra1",
"variableValue": {
"type": "NAME",
"value": "tmp-infra"
}
},
{
"name": "myinfra2",
"variableValue": {
"type": "NAME",
"value": "tmp-infra"
}
}
]
}
}
}
_EOF_
| true
|
d0533903b6d4e5ad3ed9bf751b6553c20bcb46cc
|
Shell
|
ewon/efive
|
/config/busybox/default.script
|
UTF-8
| 1,343
| 3.4375
| 3
|
[] |
no_license
|
#!/bin/sh
#
# udhcpc script
#
# Based on simple.script by Tim Riker <Tim@Rikers.org>
# Adapted by the IPCop team for use with IPCop installation
#
# $Id: default.script 5583 2011-04-01 06:49:06Z gespinasse $
#
[ -z "$1" ] && echo "Error: should be called from udhcpc" && exit 1
RESOLV_CONF="/etc/resolv.conf"
[ -n "$broadcast" ] && BROADCAST="broadcast $broadcast"
[ -n "$subnet" ] && NETMASK="netmask $subnet"
case "$1" in
deconfig)
/sbin/ifconfig $interface 0.0.0.0
;;
renew|bound)
/sbin/ifconfig $interface $ip $BROADCAST $NETMASK
if [ -n "$router" ] ; then
echo "deleting routers"
while route del default gw 0.0.0.0 dev $interface ; do
:
done
metric=0
for i in $router ; do
route add default gw $i dev $interface metric $((metric++))
done
fi
echo -n > $RESOLV_CONF
[ -n "$domain" ] && echo search $domain >> $RESOLV_CONF
for i in $dns ; do
echo adding dns $i
echo nameserver $i >> $RESOLV_CONF
done
DHCP_PARAMS="/etc/dhcp-$interface.params"
echo -n > $DHCP_PARAMS
[ -n "$serverid" ] && echo "SERVERID=$serverid" >> $DHCP_PARAMS
[ -n "$ip" ] && echo "IP=$ip" >> $DHCP_PARAMS
[ -n "$subnet" ] && echo "NETMASK=$subnet" >> $DHCP_PARAMS
[ -n "$hostname" ] && echo "HOSTNAME=$hostname" >> $DHCP_PARAMS
[ -n "$domain" ] && echo "DOMAIN=$domain" >> $DHCP_PARAMS
;;
esac
exit 0
| true
|
980ac7110afd26c69695f0d7f7065c0eecf1fc1d
|
Shell
|
kalenpw/DotFiles
|
/bin/install_desktop_files.sh
|
UTF-8
| 266
| 3.140625
| 3
|
[] |
no_license
|
#!/bin/bash
./_check_proper_dir.sh
cd ..
configDir=$(pwd)
# not a gurantee this is being executed in right directory but should catch it most times
for file in $configDir/DesktopFiles/*.desktop; do
echo $file
ln -s $file ~/.local/share/applications
done
| true
|
302c0b5d03003bbc242ad148c78ab88fb8aa6831
|
Shell
|
mantissaman/authentication-fast-api
|
/setvenv.sh
|
UTF-8
| 344
| 3.109375
| 3
|
[] |
no_license
|
#!/bin/bash
pushd `dirname $0` > /dev/null
SCRIPTPATH=`pwd -P`
popd > /dev/null
cd $SCRIPTPATH
echo $SCRIPTPATH
if [ ! -d "$SCRIPTPATH/env" ]
then
echo "Creating Virtual Environment..."
python3 -m venv env
source env/bin/activate
fi
sort -um api/requirements.txt requirements-dev.txt > requirements.txt
pip3 install -r requirements.txt
| true
|
3d98a286b63218d5f4207bd86d26ddf5227bf7a7
|
Shell
|
nomlab/tools
|
/bin/matplot
|
UTF-8
| 1,855
| 3.921875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
HOST_USER_ID=$(id -u)
HOST_USER_NAME=$(id -un)
IMG_NAME="matplot"
IMG_TAG="latest"
IMG_NAME_TAG="${IMG_NAME}:${IMG_TAG}"
function dockerfile() {
cat <<EOF
FROM ubuntu:22.04
RUN apt-get update
RUN apt-get install -y sudo vim python3 python3-pip
RUN apt-get install -y fonts-ipaexfont
RUN pip3 install pandas matplotlib scipy
RUN useradd -m -s /bin/bash -u $HOST_USER_ID $HOST_USER_NAME
RUN usermod -aG sudo $HOST_USER_NAME
RUN sed -i 's/^%sudo.*$/%sudo ALL=(ALL:ALL) NOPASSWD: ALL/' /etc/sudoers
RUN mkdir /home/$HOST_USER_NAME/work
RUN touch /home/$HOST_USER_NAME/.sudo_as_admin_successful
RUN chown -R ${HOST_USER_NAME}:${HOST_USER_NAME} /home/$HOST_USER_NAME
RUN sed -i "s/^#font\.family.*/font.family: IPAexGothic/g" /usr/local/lib/python*/dist-packages/matplotlib/mpl-data/matplotlibrc
USER $HOST_USER_NAME
WORKDIR /home/$HOST_USER_NAME/work
CMD ["bash"]
EOF
}
function dockerfile_signature() {
dockerfile | md5sum | sed 's/ .*//'
}
function img_signature() {
local name_and_tag="$1"
docker inspect "$name_and_tag" \
--format='{{.Config.Labels.dockerfile_signature}}' 2>/dev/null
}
function img_is_uptodate() {
local name_and_tag="$1"
test "$(dockerfile_signature)" = "$(img_signature "$name_and_tag")"
}
# Build Docker image locally.
# Usage: build_img "matplot:latest"
#
function build_img() {
local name_and_tag="$1"
dockerfile | \
docker build \
--label dockerfile_signature=$(dockerfile_signature) \
-t "$name_and_tag" -f - .
}
################################################################
## main
if ! img_is_uptodate "$IMG_NAME_TAG"; then
build_img "${IMG_NAME_TAG}"
fi
if [ $# -eq 0 ]; then
command="bash"
else
command="python3"
fi
docker run -t -i --rm -v .:/home/$HOST_USER_NAME/work \
--name "$IMG_NAME" "$IMG_NAME_TAG" "$command" "$@"
| true
|
1ed6b9f95ab6df8a4d5a78ca05b0f4addb1469d5
|
Shell
|
worldwide-asset-exchange/wax-boot-testnet
|
/ansible/roles/eos-node/scripts/get_wax-system-contracts.sh
|
UTF-8
| 829
| 3.6875
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
WAX_SYSTEM_CONTRACTS_VERSION=$1
DOWNLOAD_DIR=$SCRIPT_DIR/tmp
RESULT_DIR=$SCRIPT_DIR/../files/contracts
if [ -z $WAX_SYSTEM_CONTRACTS_VERSION ]; then
echo "Syntax: $0 <WAX System Contracts Version>"
exit 1
fi
# Prepare & download
mkdir -p $DOWNLOAD_DIR
rm -rf $DOWNLOAD_DIR/wax-system-contracts
pushd $DOWNLOAD_DIR
git clone -b $WAX_SYSTEM_CONTRACTS_VERSION https://github.com/worldwide-asset-exchange/wax-system-contracts.git
# Build and test contracts
cd wax-system-contracts
make dev-docker-all
if [ ! $? = 0 ]; then
echo "WAX System Contracts building/testing has failed. Code = $?"
exit 2
fi
# Ready for deployment
mkdir -p $RESULT_DIR
rm -rf $RESULT_DIR/.*
cp -r build/contracts/eosio.* $RESULT_DIR
popd
| true
|
5001c50b4638f4b373e072cd9738f7c89c2a932a
|
Shell
|
ArangoGutierrez/gitops-training-lab
|
/flux/setup.sh
|
UTF-8
| 772
| 2.59375
| 3
|
[] |
no_license
|
# Pre-reqs:
# helm - https://helm.sh/docs/intro/install/
# fluxctl - https://docs.fluxcd.io/en/1.18.0/references/fluxctl.html (optional)
echo 'Now input your GitHub username: '
read GHUSER
helm repo add fluxcd https://charts.fluxcd.io
kubectl apply -f https://raw.githubusercontent.com/fluxcd/helm-operator/master/deploy/crds.yaml
kubectl create namespace flux
helm upgrade -i flux fluxcd/flux --set git.url=git@github.com:${GHUSER}/flux-get-started --namespace flux
helm upgrade -i helm-operator fluxcd/helm-operator --set git.ssh.secretName=flux-git-deploy --namespace flux --set helm.versions=v3
kubectl -n flux rollout status deployment/flux
echo SSH KEY:
fluxctl identity --k8s-fwd-ns flux || kubectl -n flux logs deployment/flux | grep identity.pub | cut -d '"' -f2
| true
|
3e6d0c020e625e0be77bc56ef0d9c92ee5d7f0d5
|
Shell
|
garrettheath4/dotfiles-mac
|
/bin/minus
|
UTF-8
| 129
| 2.984375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
if [ "$#" -ne 2 ]; then
echo "Usage: minus AllLinesFile LinesToFilterFile"
exit 1
fi
grep -vf "$2" "$1"
| true
|
cc9241f60d624c9dd19144a521c6160ad9e0b312
|
Shell
|
instruct-br/puppet-toolkit
|
/git/puppet-git-hooks/commit_hooks/puppet_manifest_syntax_check.sh
|
UTF-8
| 1,220
| 3.984375
| 4
|
[
"GPL-2.0-only",
"MIT"
] |
permissive
|
#!/bin/bash
# This script expects $1 to be passed and for $1 to be the filesystem location
# to a puppet manifest file for which it will run syntax checks against.
manifest_path="$1"
USE_PUPPET_FUTURE_PARSER="$2"
syntax_errors=0
error_msg=$(mktemp /tmp/error_msg_puppet-syntax.XXXXX)
manifest_name="$manifest_path"
error_msg_filter="sed"
# Get list of new/modified manifest and template files to check (in git index)
# Check puppet manifest syntax
$ERRORS_ONLY || echo -e "$(tput setaf 6)Checking puppet manifest syntax for $manifest_name...$(tput sgr0)"
if [[ $USE_PUPPET_FUTURE_PARSER != "enabled" ]]; then
puppet parser validate --color=false "$1" > "$error_msg"
else
puppet parser validate --parser future --color=false "$1" > "$error_msg"
fi
if [[ $? -ne 0 ]]; then
syntax_errors=$((syntax_errors + 1))
$error_msg_filter -e "s/^/$(tput setaf 1)/" -e "s/$/$(tput sgr0)/" < "$error_msg"
echo -e "$(tput setaf 1)Error: puppet syntax error in $manifest_name (see above)$(tput sgr0)"
fi
rm -f "$error_msg"
if [[ $syntax_errors -ne 0 ]]; then
echo -e "$(tput setaf 1)Error: $syntax_errors syntax error(s) found in puppet manifests. Commit will be aborted.$(tput sgr0)"
exit 1
fi
exit 0
| true
|
9e9f028757ff5c0caf2389bfa2d620082cd37d30
|
Shell
|
unixb0y/prjxray
|
/gridinfo/runme.sh
|
UTF-8
| 1,975
| 3.078125
| 3
|
[
"ISC",
"LicenseRef-scancode-dco-1.1"
] |
permissive
|
#!/bin/bash
set -ex
cat > design.xdc << EOT
set_property -dict {PACKAGE_PIN $XRAY_PIN_00 IOSTANDARD LVCMOS33} [get_ports I[0]]
set_property -dict {PACKAGE_PIN $XRAY_PIN_01 IOSTANDARD LVCMOS33} [get_ports I[1]]
set_property -dict {PACKAGE_PIN $XRAY_PIN_02 IOSTANDARD LVCMOS33} [get_ports I[2]]
set_property -dict {PACKAGE_PIN $XRAY_PIN_03 IOSTANDARD LVCMOS33} [get_ports I[3]]
set_property -dict {PACKAGE_PIN $XRAY_PIN_04 IOSTANDARD LVCMOS33} [get_ports I[4]]
set_property -dict {PACKAGE_PIN $XRAY_PIN_05 IOSTANDARD LVCMOS33} [get_ports I[5]]
set_property -dict {PACKAGE_PIN $XRAY_PIN_06 IOSTANDARD LVCMOS33} [get_ports O]
set_property LOCK_PINS {I0:A1 I1:A2 I2:A3 I3:A4 I4:A5 I5:A6} [get_cells lut]
set_property -dict {IS_LOC_FIXED 1 IS_BEL_FIXED 1 BEL SLICEL.A6LUT} [get_cells lut]
set_property CFGBVS VCCO [current_design]
set_property CONFIG_VOLTAGE 3.3 [current_design]
set_property BITSTREAM.GENERAL.PERFRAMECRC YES [current_design]
EOT
cat > design.v << EOT
module top(input [5:0] I, output O);
LUT6 #(.INIT(64'h8000000000000000)) lut (
.I0(I[0]),
.I1(I[1]),
.I2(I[2]),
.I3(I[3]),
.I4(I[4]),
.I5(I[5]),
.O(O)
);
endmodule
EOT
cat > design.tcl << EOT
create_project -force -part $XRAY_PART design design
read_xdc design.xdc
read_verilog design.v
synth_design -top top
place_design
route_design
write_checkpoint -force design.dcp
source logicframes.tcl
source tiledata.tcl
EOT
rm -f design.log
vivado -nojournal -log design.log -mode batch -source design.tcl
{
sed -e '/^--tiledata--/ { s/[^ ]* //; p; }; d;' design.log
for f0 in logicframes_SLICE_*_0.bit; do
f1=${f0%_0.bit}_1.bit
${XRAY_BITREAD} -x -o ${f0%.bit}.asc $f0 > /dev/null
${XRAY_BITREAD} -x -o ${f1%.bit}.asc $f1 > /dev/null
f0=${f0%.bit}.asc
f1=${f1%.bit}.asc
n=${f0%_0.asc}
n=${n#logicframes_}
echo SLICEBIT $n $( diff $f0 $f1 | grep '^>' | cut -c3-; )
done
} > grid-${XRAY_PART}-db.txt
python3 gridinfo-txt2json.py grid-${XRAY_PART}-db ${XRAY_PART}
| true
|
dfb7d15ac3b8ae2727d42d2a32a5a5c30b0b736c
|
Shell
|
sighttviewliu/ultrain-core-production
|
/vendor/pbc/release
|
UTF-8
| 2,150
| 3.640625
| 4
|
[
"GPL-1.0-or-later",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"LGPL-3.0-only",
"MIT"
] |
permissive
|
#!/bin/bash
# perform sanity checks, make packages
VER=`grep AC_INIT configure.ac | sed 's/.*\[\([0-9]*\.[0-9]*\.[0-9]*\)\].*/\1/'`
echo Preparing new release: pbc-$VER
GREPVER=${VER//\./\\.}
if [[ $1 == "test" ]]; then
echo test run
TESTRUN=1
fi
if [[ ! -z $(git diff) ]]; then
echo Uncommitted changes detected. Commit them first.
exit 1
fi
git log > ChangeLog
cat ChangeLog | head -20 | grep pbc-$GREPVER > /dev/null || {
echo git log does not mention release
if [[ $TESTRUN ]]; then
echo test run: continuing anyway...
else
exit 1
fi
}
TMPDIR=`mktemp -d` || {
echo Error creating temp directory
exit 1
}
PBCDIR=$TMPDIR/pbc-$VER
echo Running setup...
git archive --format=tar --prefix=pbc-$VER/ HEAD | tar xvC $TMPDIR
HERE=`pwd`
make -f simple.make pbc/parser.tab.c pbc/lex.yy.c
cp pbc/parser.tab.[ch] pbc/lex.yy.[ch] $PBCDIR/pbc
cp ChangeLog $PBCDIR
cd $PBCDIR
grep $GREPVER NEWS > /dev/null || {
echo NEWS does not mention release
if [[ $TESTRUN ]]; then
echo test run: continuing anyway...
else
cd $HERE
rm -rf $TMPDIR
exit 1
fi
}
grep $GREPVER doc/preface.txt > /dev/null || {
echo Error: cannot find $GREPVER in doc/preface.txt.
if [[ $TESTRUN ]]; then
echo test run: continuing anyway...
else
cd $HERE
rm -rf $TMPDIR
exit 1
fi
}
./setup || {
echo ./setup error
rm -rf $TMPDIR
exit 1
}
cd $TMPDIR
echo Creating tarball...
rm -rf $PBCDIR/autom4te.cache
if [[ $TESTRUN ]]; then
echo test run: not building tarball...
else
tar cvfz $HERE/pbc-$VER.tar.gz pbc-$VER
fi
cd $PBCDIR
./configure || {
echo ./configure error
rm -rf $TMPDIR
exit 1
}
echo Testing make...
make || {
echo make error
rm -rf $TMPDIR
exit 1
}
make clean
echo Cross compiling with simple.make...
PLATFORM=win32 colormake -f simple.make || {
echo mingw cross compile error
rm -rf $TMPDIR
exit 1
}
if [[ $TESTRUN ]]; then
echo test run: not building zip...
else
mkdir pbc-$VER
mv out/* param/* pbc-$VER
cp benchmark/REPORT.BAT pbc-$VER
zip -r $HERE/pbc-$VER-win32-bin.zip pbc-$VER
fi
rm -rf $TMPDIR
echo 'Now run ./publish!'
| true
|
d8c0503b4ac7f8617df4c8820ac3157fed723522
|
Shell
|
olimpiada-informatica/oie-cms-boxes
|
/TestBox/provisioning/scripts/73-configure-ranking.sh
|
UTF-8
| 706
| 3.484375
| 3
|
[] |
no_license
|
#!/bin/bash -ex
# -e: termina si algรบn pipeline falla
# -x: escribe los comandos y argumentos segรบn los va ejecutando
# Aรฑadimos en el directorio del ranking las fotos de participantes
# y "banderas" de equipos
readonly user=${DEFAULT_USER-$(whoami)}
readonly SIM_DIR=/home/$user/datos-simulacro
readonly CONTESTDIR=/home/$user/cms-ejemplo-concurso
# Cargamos la configuraciรณn (para la contraseรฑa de la BBDD, $DB_PASSWD)
if [ "$CONFIG_FILE" -a -f "$CONFIG_FILE" ]; then
set +x
source "$CONFIG_FILE"
set -x
fi
if [ -z $CONCURSO_PRUEBA ]; then
exit 0
fi
# Por รบltimo, aรฑadimos las fotos en el directorio del ranking
addRankingImages.sh /var/local/lib/cms/ranking "$SIM_DIR/usuarios"
| true
|
319ce2d8f5ec9f66dadf26e0f2eea7b013335416
|
Shell
|
antigenius0910/home_migration
|
/final_rsync.sh
|
UTF-8
| 2,245
| 3.34375
| 3
|
[] |
no_license
|
#!/bin/sh
set -x
LOCAL_USERS=$(cat /etc/passwd | grep -i home | grep "^[^#;]" | sed s"/:..*//")
for LOCAL_USER in $LOCAL_USERS
do
if [[ $LOCAL_USER =~ ^(git|gitlab|oracle)$ ]] ; then
#echo "service account found! $LOCAL_USER. don't rsync directories"
echo "service account found! $LOCAL_USER. point home directory to /home.old/$LOCAL_USER"
sed -i -e "s/home\/$LOCAL_USER/home.old\/$LOCAL_USER/" /etc/passwd
else
#do final rsync for all user
rsync -avz /home/$LOCAL_USER/ /home.new/home/$LOCAL_USER/$LOCAL_USER@$HOSTNAME/
AD_USER_UID=$(id $LOCAL_USER@sparkcognition.com | sed s'/uid=//' | sed s'/(..*//')
AD_USER_GID=$(id admin@sparkcognition.com | sed s'/uid..*gid=//' | sed s'/(..*//')
LOCAL_USER_UID=$(id $LOCAL_USER | sed s'/uid=//' | sed s'/(..*//')
LOCAL_USER_GID=$(id $LOCAL_USER | sed s'/uid..*gid=//' | sed s'/(..*//')
cd /home.new/home/$LOCAL_USER
#if no AD user UID then keep orignial else apply AD UID and GID
if [[ -z "$AD_USER_UID" ]]; then
echo "No AD account!"
chown -R $LOCAL_USER_UID:$LOCAL_USER_GID $LOCAL_USER@$HOSTNAME
cd /home.new/home/
chown $LOCAL_USER_UID:$LOCAL_USER_GID $LOCAL_USER; chmod 700 $LOCAL_USER
#cut off local users who has no record in AD
sed -e "/$LOCAL_USER/ s/^#*/#/" -i /etc/passwd
sed -e "/$LOCAL_USER/ s/^#*/#/" -i /etc/shadow
sed -e "/$LOCAL_USER/ s/^#*/#/" -i /etc/group
else
echo 'AD account exsit!'
chown -R $AD_USER_UID:$AD_USER_GID $LOCAL_USER@$HOSTNAME
cd /home.new/home/
chown $AD_USER_UID:$AD_USER_GID $LOCAL_USER; chmod 700 $LOCAL_USER
#cut off user from local to AD
sed -e "/$LOCAL_USER/ s/^#*/#/" -i /etc/passwd
sed -e "/$LOCAL_USER/ s/^#*/#/" -i /etc/shadow
sed -e "/$LOCAL_USER/ s/^#*/#/" -i /etc/group
fi
fi
done
#echo 'then for system'
#echo '!!!!!!!!!!!!!!!'
exit 0
| true
|
77372fa0b92fe47de40644814abf1cfadbb2d310
|
Shell
|
cms-sw/cmssw
|
/OnlineDB/SiStripO2O/test/O2O_Validation/fedcabling_validation.sh
|
UTF-8
| 3,779
| 3.25
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# start the Validation of FEDCabling
eval `scramv1 runtime -sh`
### check if everything needed to connect to DB is there
if [ "$CONFDB" == "" ];
then echo "\$CONFDB not set, please set it before You continue"; exit 0;
else echo "\$CONFDB="$CONFDB;
fi
if [ "$TNS_ADMIN" == "" ];
then echo "\$TNS_ADMIN not set, please set it before You continue"; exit 0;
else echo "\$TNS_ADMIN="$TNS_ADMIN;
fi
if [ `ps aux |grep 10121|wc -l` -lt 2 ];
then echo "No Tunnel to cmsusr active, please activate before starting!"; exit 0;
fi
#describe what this script does
echo -e "\n-------------------------------------------------------------------------------------"
echo "#This scripts validates the SiStripFedCabling O2O";
echo "#It awaits a sqlite.db file and assumes that only one Tag is there for the FedCabling";
echo "#If this is not the case please change inline the sqlite_partition variable!";
echo -e "-------------------------------------------------------------------------------------\n"
#needed infos to run script
if [ $# -lt 3 ];
then echo "Usage: "
echo "./Cabling_Validation.sh \"dbfile\" runnr \"tag_orcoff\""
exit 0;
fi
#set input variables to script variables
dbfile_name=$1;
runnr=$2
tag_orcoff=$3;
echo -e "Sqlite Tag for run "$runnr" is retrieved from "$dbfile_name" !\n";
sqlite_tag=`cmscond_list_iov -c sqlite_file:$dbfile_name | grep FedCabling`;
# create .py files
cat template_Validate_FEDCabling_O2O_cfg.py | sed -e "s@template_runnr@$runnr@g" | sed -e "s@template_database@sqlite_file:$dbfile_name@g" | sed -e "s@template_tag@$sqlite_tag@g">> validate_sqlite_cfg.py
cat template_Validate_FEDCabling_O2O_cfg.py | sed -e "s@template_runnr@$runnr@g" | sed -e "s@template_database@oracle://cms_orcoff_prod/CMS_COND_21X_STRIP@g" | sed -e "s@template_tag@$tag_orcoff@g">> validate_orcoff_cfg.py
#cmsRun
cmsRun validate_sqlite_cfg.py > "Reader_"$runnr"_sqlite.txt"
cmsRun validate_orcoff_cfg.py > "Reader_"$runnr"_orcoff.txt"
#check if cmsRun was ok
if [ `cat Reader_"$runnr"_sqlite.txt | grep "\[SiStripFedCablingReader::beginRun\] VERBOSE DEBUG" | wc -l` -lt 1 ]
then echo "There is a problem with cmsRun for the sqlite file: validate_sqlite_cfg.py! Please check the file";
exit 0;
fi
if [ `cat Reader_"$runnr"_orcoff.txt | grep "\[SiStripFedCablingReader::beginRun\] VERBOSE DEBUG" | wc -l` -lt 1 ]
then echo "There is a problem with cmsRun for the orcoff file: validate_orcoff_cfg.py! Please check the file ";
exit 0;
fi
#Validation procedure
if [ `diff "Reader_"$runnr"_sqlite.txt" "Reader_"$runnr"_orcoff.txt" | grep "> DcuId"| sort -u | wc -l` -lt 1 ];
then if [ `diff "Reader_"$runnr"_sqlite.txt" "Reader_"$runnr"_orcoff.txt" | grep "< DcuId"| sort -u |wc -l` -lt 1 ];
then echo -e '\033[1;32m'"No Difference between OrcOff FEDCabling and sqlite FEDCabling, O2O was successful!!!"`tput sgr0`;
fi;
else echo -n -e '\033[1;31m'"File Reader_"$runnr"_orcoff.txt contains ";
echo -n `diff Reader_"$runnr"_orcoff.txt Reader_"$runnr"_sqlite.txt | grep "> DcuId"| sort -u | wc -l`;
echo " differing lines! Check Your O2O !!!"`tput sgr0`;
echo -n -e '\033[1;31m'"File Reader_"$runnr"_sqlite.txt contains ";
echo -n `diff Reader_"$runnr"_sqlite.txt Reader_"$runnr"_orcoff.txt | grep "< DcuId"| sort -u | wc -l`;
echo " differing lines! Check Your O2O !!!"`tput sgr0`;
echo "Attaching diff to File: dcudetid_diff_"$runnr".txt!!!" ;
touch dcudetid_diff_$runnr.txt;
for i in `diff Reader_"$runnr"_orcoff.txt Reader_"$runnr"_sqlite.txt | grep DcuId| sort -u`;
do echo $i >> dcudetid_diff_$runnr.txt;
done;
fi;
#clean up
rm "Reader_"$runnr"_sqlite.txt";
rm "Reader_"$runnr"_orcoff.txt";
rm validate_sqlite_cfg.py;
rm validate_orcoff_cfg.py;
| true
|
6f46ca74710b409f504468f33b5035ec0b852992
|
Shell
|
ljukas/dotfiles
|
/bkup/.zshrc
|
UTF-8
| 747
| 2.84375
| 3
|
[] |
no_license
|
#
# Executes commands at the start of an interactive session.
#
# Authors:
# Sorin Ionescu <sorin.ionescu@gmail.com>
#
# Source Prezto.
if [[ -s "${ZDOTDIR:-$HOME}/.zprezto/init.zsh" ]]; then
source "${ZDOTDIR:-$HOME}/.zprezto/init.zsh"
fi
# Customize to your needs...
# Python virtualenv
source /usr/bin/virtualenvwrapper.sh
# Aliases
alias update="sudo pacman -Syu"
alias ya="yaourt"
alias vim="nvim"
alias vi="nvim"
alias workoff="deactivate; cd"
# This loads RVM into a shell session.
[[ -s "$HOME/.rvm/scripts/rvm" ]] && . "$HOME/.rvm/scripts/rvm"
# Add electron to path, for linter in atom
export PATH="$PATH:/usr/lib/electron/electron"
# Add RVM to PATH for scripting. Make sure this is the last PATH variable change.
export PATH="$PATH:$HOME/.rvm/bin"
| true
|
3c3095e428549c3c3f78a5fddd41ce962d3e6e80
|
Shell
|
waltflanagan/dotfiles-old
|
/script/install_stephen.sh
|
UTF-8
| 311
| 2.59375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#Download and install qlstephen
FOO=`mktemp -d .XXXXXXXXX`
curl -O http://cloud.github.com/downloads/whomwah/qlstephen/QLStephen.qlgenerator.zip
unzip -d $FOO QLStephen.qlgenerator.zip
mv $FOO/QLStephen.qlgenerator ~/Library/QuickLook/
qlmanage -r
rm -rf $FOO
rm -rf QLStephen.qlgenerator.zip
| true
|
ee1c1a279f2c3bf13ea80b76b91b709e0fc4f1fe
|
Shell
|
mudasobwa/bash-dropbox
|
/ff_synch_up
|
UTF-8
| 1,497
| 3.625
| 4
|
[] |
no_license
|
#!/bin/bash
# shell script to upload watched items
# @param $1 โย the file to be uploaded (fully qualified)
# @param $2 โ an action to be done on file
# load default configuration
source /etc/forkforge.conf
source /usr/bin/ff_functions
# load user-specific config
if [ -f $HOME_DIR/.forkforgerc ] ; then
source $HOME_DIR/.forkforgerc
fi
#IS_ONLINE=$(mount | grep $REMOTE_DIR)
#IS_OL=$?
IS_OL=0
# get the full path (/home/am/blah/foo/bah/zee.zo)
W_LOCAL=$(readlink -e -s -n $1)
shift
if [ x$1 == x -o x$W_LOCAL == x ] ; then
# shit happens
exit 201
fi
W_REMOTE=$(ff_get_remote_dir $W_LOCAL)
case $1 in
IN_CREATE|IN_MOVED_TO|IN_MODIFY)
if [ $IS_OL -eq 0 ] ; then
if [ -f $W_LOCAL ] ; then
ff_log "Is file"
mkdir -p $(dirname $W_REMOTE)
ff_log "Made $(dirname $W_REMOTE)"
ff_compare $W_LOCAL
if [ $? -gt 0 ] ; then
ff_log "Not compared"
ff_rem_incron_entry $W_LOCAL
ff_log "Removed incron entry"
ff_encrypt $W_LOCAL > $W_REMOTE
ff_log "Encrypted"
ff_add_incron_entry $W_LOCAL
ff_log "Added incron entry"
fi
else
mkdir -p $W_REMOTE
fi
fi
;;
IN_DELETE|IN_MOVED_FROM)
# FIXME PREVENT LOOP HERE !!!
if [ $IS_OL -eq 0 ] ; then
rm -rf $W_REMOTE
fi
;;
IN_DELETE_SELF|IN_MOVE_SELF)
ff_rem_incron_entry $W_LOCAL
if [ $IS_OL -eq 0 ] ; then
rm -rf $W_REMOTE
fi
;;
*)
exit 103
;;
esac
| true
|
4df3032d595ecfafef14cedf8b17f74246eb8094
|
Shell
|
TryKatChup/SistemiOperativi_Template
|
/Bash/rec_search.sh
|
UTF-8
| 2,218
| 3.5625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
for file in $1/*; do
if [[ -f "$file" ]]; then
# Faccio cose
elif [[ -d "$file" ]]; then
echo "Ricorsione nella directory $file"
# Chiamata ricorsiva
"$0" "$file" "$2" "$3"
fi
done
##############################################
############# NOTA BENE #############
##############################################
#SE NON FACCIO CD $1 I FILE CHE ANALIZZO UNO PER UNO NON AVRANNO IL PERCORSO ASSOLUTO, BENSI
#SOLO IL NOME DEL FILE, PERTANTO OCCHIO CON PWD.
#USARE PWD SOLO SE RICHIESTO, OPPURE SE SI SCRIVE CD $1 PER RIFERIRSI ALLA CARTELLA ATTUALE
## Controllare che un file (senza percorso assoluto, solo nome del file) inizi per una determinata stringa
if [[ $(basename $file) == "$2"* ]]; then
# Inserire codice qui
fi
# Iterate over all subdirectories
directories=()
while IFS= read -r -d $'\0'; do
directories+=("$REPLY")
done < <(find "$1" -type d -print0)
for dir in "${directories[@]}"; do
echo "$dir"
done
# Iterate over all files in a directory BUT ONLY THE CURRENT ONE
files=()
while IFS= read -r -d $'\0'; do
files+=("$REPLY")
done < <(find "$1" -maxdepth 1 -type f -print0)
for file in "${files[@]}"; do
echo "$file"
done
# Iterate over all files in a directory AND SUBDIRECTORIES
files=()
while IFS= read -r -d $'\0'; do
files+=("$REPLY")
done < <(find "$1" -type f -print0)
for file in "${files[@]}"; do
echo "$file"
done
#Iterate over all subdirectories and ALL THE FILES in those subdirectoies
directories=()
while IFS= read -r -d $'\0'; do
directories+=("$REPLY")
done < <(find "$1" -type d -print0)
for dir in "${directories[@]}"; do
files=()
while IFS= read -r -d $'\0'; do
files+=("$REPLY")
done < <(find "$dir" -type f -print0)
for file in "${files[@]}"; do
echo "$file"
done
done
#Operate on all subdirectories and "save" them based on condition (variation of line #25 with code inside)
directories=()
while IFS= read -r -d $'\0'; do
directories+=("$REPLY")
done < <(find "$1" -type d -print0)
for dir in "${directories[@]}"; do
conta_files=$(find "$dir" -maxdepth 1 -type f -name "$2*" 2>/dev/null | wc -l)
if [ "$conta_files" -gt "$3" ]; then
echo "$dir" "$conta_files" >>"$PWD"/esito.out
fi
done
| true
|
66e88ff0dbab2a9f4a0cba15b5b757a1a7ffdd28
|
Shell
|
christian-tl/monitor-shell
|
/restartApi_all.sh
|
UTF-8
| 685
| 3.921875
| 4
|
[] |
no_license
|
#/bin/bash
helpInfo(){
echo "Usage: `basename $0` [options] [ <interval> ]"
echo "options are:"
echo " -h : host ip file"
}
if [[ $# < 2 ]];then
helpInfo;
exit 1
fi
while getopts 'h:' OPT; do
case $OPT in
h)
CONF_FILE="$OPTARG";;
?)
helpInfo
exit 1
esac
done
INTERVAL=$3
INTERVAL=${INTERVAL:=10}
if [ $INTERVAL -lt 10 ]; then
INTERVAL=10
fi
start1=$(date +%s)
for CUR_HOST in `cat $CONF_FILE | egrep -v ^# |egrep -v ^$`
do
printf "restarting: %-16s ...... " $CUR_HOST
./restartApi.sh $CUR_HOST
sleep $INTERVAL
printf "OK\n"
done
end1=$(date +%s)
echo "All Finished. elapse: $(( $end1 - $start1 ))s"
| true
|
c59600119c110572e0f50eaa7068864b18c8a780
|
Shell
|
jennafin/flic-lifx
|
/start.sh
|
UTF-8
| 2,958
| 4.125
| 4
|
[] |
no_license
|
#!/bin/bash
################################################################################
#
# This script is just an easy way to start the server and client in one command
#
# 1. Kill any running server processes
# 2. Kill any running client processes
# 3. Sleep for 3 seconds to give the kill commands a chance to finish
# 4. Start up the server process
# 5. Wait 15 seconds for user to input credentials (only really required for Ubuntu I think)
# If the user fails to enter credentials within the 15 second timeframe this script
# will exit.. you'll have to start it up again and be faster next time ;)
# 6. Start up client process with LIFX light type - Hue support to come later
#
################################################################################
print_help() {
cat << EOF
Usage: ${0##*/} [-hc]
Starts flic server and starts up button client
-h display this help and exit
-c start client in config mode. Config mode retrieves light data and prints to console
EOF
}
config_mode_on=false
while getopts "hc" opt; do
case $opt in
h)
print_help
exit 0
;;
c)
config_mode_on=true
;;
*)
print_help
exit 1
;;
esac
done
# Kill any running server or client processes
found_server=$(ps aux | grep '[f]licd')
found_client=$(ps aux | grep '[c]lientlib/client.py')
if [ "$found_server" ]; then
sudo kill $(ps aux | grep '[f]licd' | awk '{print $2}');
fi
if [ "$found_client" ]; then
sudo kill $(ps aux | grep '[c]lientlib/client.py' | awk '{print $2}');
fi
# Need to sleep here before starting up the server process so it isn't killed by commands above
sleep 3
pushd /home/pi/Documents/flic-lifx
# Start server in separate terminal - supports ubuntu for testing purposes
if [[ $(arch) == "x86_64" ]]; then
pushd bin/x86_64 &> /dev/null
gnome-terminal -e 'sudo ./flicd -f flic.sqlite3'
popd &> /dev/null
elif [[ $(arch) == "armv7l" ]]; then
pushd bin/armv6l &> /dev/null
lxterminal -e 'sudo ./flicd -f flic.sqlite3'
popd &> /dev/null
else
echo "Unsupported OS"
exit
fi
echo "The server has been launched in another terminal."
echo "It may require your credentials, please check..."
echo
# Wait 15 seconds so user can enter credentials - spin from http://stackoverflow.com/questions/12498304/using-bash-to-display-a-progress-working-indicator
spin='-\|/'
j="0"
while [ $j -lt 150 ]
do
i=$(( (i+1) %4 ))
printf "\r${spin:$i:1}"
sleep .1
j=$[$j+1]
done
# Start up light control client using voltos LIFXToken bundle.
# Make sure you have set TOKEN in your voltos bundle to
# your LIFX cloud token from cloud.lifx.com.
# Instructions for using voltos found at voltos.io
voltos use LIFXToken
if $config_mode_on; then
voltos run "python3 clientlib/client.py -c LIFX"
else
voltos run "python3 clientlib/client.py LIFX"
fi
| true
|
1e676102753cb1252af3e7c559e46ac0c6a4ccc8
|
Shell
|
bob-token/git-create
|
/git-create.sh
|
UTF-8
| 343
| 3.078125
| 3
|
[] |
no_license
|
#!/bin/sh
repo_name=$1
test -z $repo_name && echo "Repo name required." 1>&2 && exit 1
curl -u 'xhban520@gmail.com' https://api.github.com/user/repos -d "{\"name\":\"$repo_name\"}"
git init
echo "git-create.sh" >.gitignore
git add .
git commit -am'first init'
git remote add origin "https://github.com/bob-token/$repo_name.git"
git push origin master
| true
|
dc2ae4a6e77935ce74c3186cf7ad53430065a043
|
Shell
|
GeekOncall-Workflow/test-environment
|
/box-scripts/puppet-master
|
UTF-8
| 322
| 2.53125
| 3
|
[] |
no_license
|
#!/bin/bash
# Pull/run r10k
if [ -d "${gitDir}/puppet-r10k" ]
then
PullProjectRepo "puppet-r10k"
else
PullProjectRepo "puppet-r10k"
${gitDir}/puppet-r10k/install.sh
fi
puppet apply /etc/puppet/environments/${puppetEnvironment}/manifests/site.pp "--modulepath=/etc/puppet/environments/${puppetEnvironment}/modules"
| true
|
4bcf63bd97151b5a144f05333aae22b906e18ecd
|
Shell
|
paulfryzel/linux-dotfiles
|
/bash_profile
|
UTF-8
| 634
| 2.78125
| 3
|
[] |
no_license
|
PATH=$PATH:$HOME/bin
PATH=$PATH:$HOME/workspace/google/depot_tools
PATH=$PATH:/opt/dart/dart-sdk/bin
# Yarn global bin
PATH=$PATH:$HOME/.yarn/bin
# Idris
PATH=$PATH:$HOME/.cabal/bin
PATH=$PATH:$HOME/Workspace/idris-sandbox/.cabal-sandbox/bin
# Miniconda3
PATH=$PATH:$HOME/miniconda3/bin
# Google Cloud
if [ -d $HOME/google-cloud-sdk ]; then
PATH=$PATH:$HOME/google-cloud-sdk/bin
. $HOME/google-cloud-sdk/completion.bash.inc
. $HOME/google-cloud-sdk/path.bash.inc
fi
# OPAM
if [ -d $HOME/.opam ]; then
. $HOME/.opam/opam-init/init.sh > /dev/null 2> /dev/null || true
fi
if [ -f $HOME/.bashrc ]; then
. $HOME/.bashrc
fi
| true
|
ac4b03c8c69f1caaf2885ee684ca4528b081f018
|
Shell
|
iodar/confluence-backup-cleanup
|
/test-prep.sh
|
UTF-8
| 839
| 3.984375
| 4
|
[] |
no_license
|
#!/bin/bash
# shell script to create files for testing clean-up.sh
# get current dir where script is located in
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
cd $DIR
DATES=("20190918140810" "20190918140811" "20190918140812" "20190918140814" "20190918140815" "20190918140816" "20190918140817" "20190918140818" "20190918140819" "20190918140820")
function get_date_string {
echo -n "$(date +"%Y%m%d%H%M%S")"
}
function create_fake_file_name {
FILE_DATE_INDEX="$1"
FILE_NAME_PREFIX='confluence'
FILE_NAME_DATE=${DATES[FILE_DATE_INDEX]}
FILE_NAME_POSTFIX='backup.zip'
echo -n "$FILE_NAME_PREFIX-$FILE_NAME_DATE-$FILE_NAME_POSTFIX"
}
function main {
for i in `seq 0 9`; do
touch "$(create_fake_file_name $i)"
done
}
echo "creating test folders in $DIR"
main
echo "... done"
| true
|
61f66dd8de1a1fcd1408fb106dae0ad0d34a916e
|
Shell
|
katsifolis/bin
|
/mikrine.sh
|
UTF-8
| 262
| 3.6875
| 4
|
[
"Unlicense"
] |
permissive
|
#!/bin/bash
# Script that minifies a css (all that it does is trims whitespace and \n\r duh!)
if [[ -n $1 ]]; then
f=$(cat $1 | tr -d "\n\r\t ")
if [[ -n $2 ]]; then
echo $f > $2
else
echo "NOT DESTINATION FILE GIVEN :)"
fi
else
echo "NO FILE GIVEN"
fi
| true
|
5b9443a8699c2f84385df3cd453ba49b2e527375
|
Shell
|
jpka/dotfiles
|
/install.zsh
|
UTF-8
| 469
| 2.765625
| 3
|
[] |
no_license
|
#!/bin/zsh
local OHMY=$HOME/.oh-my-zsh
if ! test -d $OHMY
then wget --no-check-certificate https://github.com/robbyrussell/oh-my-zsh/raw/master/tools/install.sh -O - | sh
fi
local CUSTOM=$OHMY/custom
rm -rf $CUSTOM
ln -s -f $PWD/oh-my-zsh $CUSTOM
ln -f .zshrc $HOME/.zshrc
ln -f .vimrc $HOME/.vimrc
cp z.sh $HOME/.
local ST2C=$HOME/.config/sublime-text-2
rm -rf $ST2C
ln -s -f $PWD/sublime-text-2 $ST2C
local VIM=$HOME/.vim
rm -rf $VIM
ln -s -f $PWD/.vim $VIM
| true
|
ff347ade419275dd1d2c31d12f3e67b6984ef3fd
|
Shell
|
miketheprogrammer/dkxyz14-maestro
|
/deploy/python.sh
|
UTF-8
| 1,529
| 3.15625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#!/bin/bash
echo "$1"
echo '$1'
json='{
"name": "hello-python'$3'",
"image": "miketheprogrammer/hello-python",
"command": "",
"scale": '$1',
"docker_options": {
"ExposedPorts": {
"9090/tcp": {
}
},
"Env": [
"MSG=hello-from-mike"
],
"HostConfig": {
"PortBindings": {
"9090/tcp": [
{
"HostPort": "'$2'"
}
]
}
}
},
"options": {}
}'
echo curl -H \"Content-Type: application/json\" -X POST -d "'$json'" http://$(docker-machine ip c1):8080/application
eval $(echo curl -H \"Content-Type: application/json\" -X POST -d "'$json'" http://$(docker-machine ip c1):8080/application)
if [ -z "$4" ]
then
echo "There is no old version"
else
echo "bringing down old version in 10 seconds"
sleep 0
json='{
"name": "hello-python'$4'",
"image": "miketheprogrammer/hello-python",
"command": "",
"scale": 0,
"docker_options": {
"ExposedPorts": {
"9090/tcp": {
}
},
"Env": [
"MSG=hello-from-mike"
],
"HostConfig": {
"PortBindings": {
"9090/tcp": [
{
"HostPort": "'$2'"
}
]
}
}
},
"options": {}
}'
echo curl -H \"Content-Type: application/json\" -X POST -d "'$json'" http://$(docker-machine ip c1):8080/application
eval $(echo curl -H \"Content-Type: application/json\" -X POST -d "'$json'" http://$(docker-machine ip c1):8080/application)
fi
| true
|
7e916ddd29ffca60b5ed76620a263d76e6bf27b3
|
Shell
|
mompccc/COMP9041-Software-Construction-Techniques-and-Tools
|
/test1/htm2html.sh
|
UTF-8
| 180
| 3.328125
| 3
|
[] |
no_license
|
#!/bin/sh
for file in *.htm
do
temp="${file%.htm}.html"
if [ -f "$temp" ]; then
echo "${file%.htm}.html exists"
exit 1
fi
rename 's/\.htm/.html/' "$file" 2>/dev/null
done
| true
|
0ad9fe6e10360409978cfbd3c7900db59faba138
|
Shell
|
rcv-legado/Gravitation
|
/launch.sh
|
UTF-8
| 177
| 2.734375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
for i in `seq 0.5 59.5`
do
for j in 1 2 3 4 5
do
i=`echo $i | sed 's/,/./'`
j=`echo $j | sed 's/,/./'`
python Monte_Carlo.py $i $j
done
done
| true
|
ab4082b53dd9d1477d505002b3e50c9154850eab
|
Shell
|
mamontp/CheckTwoMasternodesBot
|
/stop_start_Bot.sh
|
UTF-8
| 1,117
| 3.609375
| 4
|
[] |
no_license
|
#!/bin/bash
status() {
if ps -ef |grep CheckTwoMasternodesBot.py |grep -v grep
then
echo "CheckTwoMasternodesBot.py start"
else
echo "CheckTwoMasternodesBot.py not start"
fi
}
start() {
echo "CheckTwoMasternodesBot.py starting"
if ps -ef |grep CheckTwoMasternodesBot.py |grep -v grep
then
echo "CheckTwoMasternodesBot.py already running"
else
nohup ./CheckTwoMasternodesBot.py > CheckTwoMasternodesBot.out &
fi
}
stop() {
echo "CheckTwoMasternodesBot.py stoping"
if ps -ef |grep CheckTwoMasternodesBot.py |grep -v grep
then
ps -ef |grep CheckTwoMasternodesBot.py |grep -v grep | awk '{print $2}' | xargs kill
sleep 5
ps -ef |grep CheckTwoMasternodesBot.py |grep -v grep | awk '{print $2}' | xargs kill
else
echo "Not start CheckTwoMasternodesBot.py"
fi
}
case "$1" in
'start') start;;
'stop') stop;;
'restart') stop ; echo "Sleeping..."; sleep 1 ;
start;;
'status') status;;
*) echo
echo "Usage: $0 { start | stop | restart | status }"
echo
exit 1
;;
esac
exit 0
| true
|
5e8b2b13561467ac554926e4f7d368a81a03b979
|
Shell
|
georgemarshall/python-plans
|
/python2/ipaddress/plan.sh
|
UTF-8
| 783
| 2.765625
| 3
|
[] |
no_license
|
pkg_name=ipaddress
pkg_distname=${pkg_name}
pkg_version=1.0.18
pkg_origin=python2
pkg_license=('Python-2.0')
pkg_maintainer="George Marshall <george@georgemarshall.name>"
pkg_description="IPv4/IPv6 manipulation library"
pkg_upstream_url=https://github.com/phihag/ipaddress
pkg_dirname=${pkg_distname}-${pkg_version}
pkg_source=https://pypi.org/packages/source/i/ipaddress/${pkg_dirname}.tar.gz
pkg_shasum=5d8534c8e185f2d8a1fda1ef73f2c8f4b23264e8e30063feeb9511d492a413e1
pkg_deps=(
python2/python
)
pkg_build_deps=(
python2/setuptools
)
pkg_env_sep=(
['PYTHONPATH']=':'
)
do_build() {
python setup.py build
}
do_install() {
add_path_env 'PYTHONPATH' "$PYTHON_SITE_PACKAGES"
python setup.py install \
--prefix="$pkg_prefix" \
--no-compile \
--old-and-unmanageable # bypass egg install
}
| true
|
9aad2c4a7239bf30a7c6028653627c23cb89f324
|
Shell
|
madworx/docker-remoteswinglibrary
|
/docker-entrypoint.sh
|
UTF-8
| 1,302
| 3.609375
| 4
|
[] |
no_license
|
#!/bin/bash
[ -z "${PYTHONPATH}" ] || PYTHONPATH="${PYTHONPATH}:"
export PYTHONPATH=${PYTHONPATH}$(JARS=(/usr/local/lib/*.jar) ; IFS=:; echo "${JARS[*]}")
: ${DEFAULT_DEPTH:=24}
if [[ $RESOLUTION =~ ^([0-9]+)x([0-9]+)(x([0-9]+))?$ ]] ; then
RESOLUTION=${BASH_REMATCH[1]}x${BASH_REMATCH[2]}x${BASH_REMATCH[4]:-${DEFAULT_DEPTH}}
FFMPEGRES=${BASH_REMATCH[1]}x${BASH_REMATCH[2]}
else
cat 1>&2 <<EOF
ERROR: Incorrect format for \$RESOLUTION variable, should be <width>x<height>[x<depth>].
If <depth> is not specified, it will default to ${DEFAULT_DEPTH}.
E.g. 1024x768x16
512x512
640x480x8
EOF
exit 1
fi
: ${VIDCAP_FPS:=30}
echo "Starting with resolution: ${RESOLUTION}."
echo "Recording video at ${VIDCAP_FPS} fps."
cd /home/robot
if [ "$1" == "-c" ] ; then
shift
CMD="$*"
else
CMD="/usr/local/bin/robot -d output $*"
fi
xvfb-run -s "-screen 0 ${RESOLUTION}" -a bash -c "
twm &
ffmpeg -framerate ${VIDCAP_FPS} -video_size ${FFMPEGRES} -f x11grab -i \${DISPLAY} -vcodec libx264 -preset ultrafast -qp 0 -pix_fmt yuv444p -r 25 -filter:v \"setpts=4.0*PTS\" output/video-capture.mkv > output/ffmpeg.log 2>&1 &
x11vnc -nolookup -forever -usepw </dev/null >/dev/null 2>&1 &
eval ${CMD}
ROBOT_STATUS=\$?
kill -INT \$(jobs -p)
exit \$ROBOT_STATUS
"
| true
|
57330f4e2cf7a679f1945e37564f60c2a37c8568
|
Shell
|
b115030/Codin
|
/src/com/loopShellScripts/whileloop/MagicNumber.sh
|
UTF-8
| 855
| 3.921875
| 4
|
[] |
no_license
|
#!/bin/bash -x
read -p "think of a number between 1 to 100"
n=50
ul=100 # Initializing upper limit to 100
ll=1 # Initializing lowwer limiy to 1
while [ $n -ge 1 -a $n -le 100 ] # checking if the number is between 1 and 100
do
echo " greater than or less than or equal to [ g-1 /l-2 /e-3 ? ]" $n # Choose an option
read p
if [ $p -eq 3 ] # If we found the number then stop
then
break
elif [ $p -eq 1 ] # If greater than
then
ll=$n # new Lower limit is the number itself
num=$((($ul-$n)/2)) # Half of the difference between upper limit and the number
n=$(($ul-$num)) # subtract this sum from the upper limit
elif [ $p -eq 2 ] # if less than
then
ul=$n #new upper limit is the number itself
num=$((($n-$ll) / 2)) # half og the difference between the number and lower limit
n=$(($ll+$num)) # add this sum to the lower limit
fi
done
| true
|
fa784ee19d3664f07a55f2ed245c640e105d5a4c
|
Shell
|
echasseriaud/bump-version-bash
|
/main.sh
|
UTF-8
| 851
| 3.828125
| 4
|
[] |
no_license
|
#!/bin/bash -l
set -euo pipefail
main() {
local path
local result
local step
path="$1"
step="$2"
echo "path to version-file: $path"
result=$(cat $path)
echo "current version: $result"
if [ $result != "first" ]
then
RE='[^0-9]*\([0-9]*\)[.]\([0-9]*\)[.]\([0-9]*\)\([0-9A-Za-z-]*\)'
MAJOR=`echo $result | sed -e "s#$RE#\1#"`
MINOR=`echo $result | sed -e "s#$RE#\2#"`
PATCH=`echo $result | sed -e "s#$RE#\3#"`
case "$step" in
major)
let MAJOR+=1
;;
minor)
let MINOR+=1
;;
patch)
let PATCH+=1
;;
esac
else
echo "that is the first build !!!"
MAJOR=1
MINOR=0
PATCH=0
fi
echo "new version: $MAJOR.$MINOR.$PATCH"
echo ::set-output name=value::"$MAJOR.$MINOR.$PATCH"
}
main ${INPUT_PATH} ${INPUT_STEP}
| true
|
4feac76aa3c885b8cfac1dddc3d990e929e8973b
|
Shell
|
rid9/r1-linux
|
/pcre/pkg.sh
|
UTF-8
| 676
| 3.0625
| 3
|
[] |
no_license
|
#!/bin/bash
PKG_DIR=$(realpath "${BASH_SOURCE[0]%/*}")
source "$(realpath "$PKG_DIR/..")/functions.sh"
SOURCE_URLS=(
"https://downloads.sourceforge.net/pcre/pcre-8.41.tar.bz2"
)
before_configure() {
CONFIGURE_FLAGS+=(
"--docdir=/usr/share/doc/pcre-8.41"
"--enable-unicode-properties"
"--enable-pcre16"
"--enable-pcre32"
"--enable-pcregrep-libz"
"--enable-pcregrep-libbz2"
"--enable-pcretest-libreadline"
"--disable-static"
"--enable-jit"
)
}
after_install() {
$SUDO mv -v /usr/lib/libpcre.so.* /lib &&
$SUDO ln -sfv ../../lib/$(readlink /usr/lib/libpcre.so) /usr/lib/libpcre.so
}
| true
|
ebce51ba820f55abcb11a5be303be1bd91dd5ddc
|
Shell
|
onknows/terraform-azure-openshift
|
/scripts/install.sh
|
UTF-8
| 1,508
| 3.375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
NODE_COUNT=$1
ADMIN_USER=$2
MASTER_DOMAIN=$3
if [ ! -d "terraform-azure-openshift" ]; then
echo "Cloning terraform-azure-openshift Github repo..."
git clone https://github.com/onknows/terraform-azure-openshift.git
fi
cd terraform-azure-openshift
git pull
chmod 600 certs/*
cp -f certs/openshift.key ansible/openshift.key
cp -f templates/host-preparation-inventory ansible/inventory/hosts
NODE_MAX_INDEX=$((NODE_COUNT-1))
sed -i "s/###NODE_COUNT###/$NODE_MAX_INDEX/g" ansible/inventory/hosts
sed -i "s/###ADMIN_USER###/$ADMIN_USER/g" ansible/inventory/hosts
#### EXIT
exit
#### EXIT
cd ansible
ansible-playbook -i inventory/hosts host-preparation.yml
cd ../..
if [ ! -d "openshift-ansible" ]; then
echo "Cloning openshift-ansible Github repo..."
git clone https://github.com/openshift/openshift-ansible.git
fi
cd openshift-ansible
git pull
cp -f ../terraform-azure-openshift/certs/openshift.key openshift.key
cp -f ../terraform-azure-openshift/templates/openshift-inventory openshift-inventory
INDEX=0
while [ $INDEX -lt $NODE_COUNT ]; do
printf "node$INDEX openshift_hostname=node$INDEX openshift_node_labels=\"{'role':'app','zone':'default','logging':'true'}\"\n" >> openshift-inventory
let INDEX=INDEX+1
done
sed -i "s/###ADMIN_USER###/$ADMIN_USER/g" openshift-inventory
sed -i "s/###MASTER_DOMAIN###/$MASTER_DOMAIN/g" openshift-inventory
ansible-playbook --private-key=openshift.key -i openshift-inventory playbooks/deploy_cluster.yml
cd ..
rm install.sh
| true
|
551d8b9e37ef67443230df8c2c0a1fcba5ef09c5
|
Shell
|
eric-tang/ELL-Docker
|
/scripts/run-ell.sh
|
UTF-8
| 1,350
| 4.125
| 4
|
[] |
no_license
|
#!/bin/bash -e
POSITIONAL=()
while [[ $# -gt 0 ]]
do
key="$1"
case $key in
-i|โinput-folder)
INPUT_FOLDER="$2"
shift # past argument
shift # past value
;;
--default)
DEFAULT=YES
shift # past argument
;;
*) # unknown option
POSITIONAL+=("$1") # save it in an array for later
shift # past argument
;;
esac
done
set -- "${POSITIONAL[@]}" # restore positional parameters
red=`tput setaf 1`
green=`tput setaf 2`
reset=`tput sgr0`
# first check if docker installed
if hash docker 2>/dev/null; then
echo "${green}You have docker installed. Proceed to next step.${reset}"
else
echo "${red}You don't have docker installed. Install docker first.${reset}"
curl -sSL https://get.docker.com | sh
fi
# now check if image already exists
if docker inspect "erict2017/rpi0-ell-base:v1.1">/dev/null; then
echo "${green}ELL base image already exists. Proceed to next step...${reset}"
else
echo "${red}ELL base image does not exist. Pull the image first...${reset}"
docker pull erict2017/rpi0-ell-base:v1.1
fi
# now run tutorial from input source folder with built models
echo "${green}Geting into ${INPUT_FOLDER}. Making sure you have built in model ready in this folder!${reset}"
cd ${INPUT_FOLDER}
docker run --rm -v $(pwd):/home/sources/ell-python erict2017/rpi0-ell-base:v1.1
| true
|
5ca6d8a6ffe332c163f1e599ca341135363077e3
|
Shell
|
ProfessorTao/MyUtilCodes
|
/supervisord-config/MultipleTasks/do-with-default-supervisorctl.sh
|
UTF-8
| 141
| 2.65625
| 3
|
[] |
no_license
|
#!/bin/bash
config_file=supervisord-global.conf
action=$1
command="supervisorctl -c ${config_file} ${action}"
echo $command
eval $command
| true
|
d656f3aec5a2654572583af1c3a08ae6c15464bf
|
Shell
|
guidocecilio/neologism
|
/make-archive.sh
|
UTF-8
| 1,405
| 2.703125
| 3
|
[] |
no_license
|
# Download Drupal Core
drush dl drupal
# Rename directory created by above command; use wildcard because version can change
mv drupal-6.* neologism
cd neologism
# Download required modules
drush dl cck rdf ext rules
# Download and extract ARC, which is required as part of the RDF module
mkdir sites/all/modules/rdf/vendor
curl -o sites/all/modules/rdf/vendor/arc.tar.gz http://code.semsol.org/source/arc.tar.gz
tar xzf sites/all/modules/rdf/vendor/arc.tar.gz -C sites/all/modules/rdf/vendor/
rm sites/all/modules/rdf/vendor/arc.tar.gz
# Download and extract ExtJS-3, which is required for the evoc module
curl -O http://extjs.cachefly.net/ext-3.0.0.zip
unzip ext-3.0.0.zip
mv ext-3.0.0 sites/all/modules/ext/
rm ext-3.0.0.zip
# Check out Neologism and evoc modules from Google Code SVN
# @@@ use export instead???
svn co https://neologism.googlecode.com/svn/trunk/neologism sites/all/modules/neologism --username richard@cyganiak.de
svn co https://neologism.googlecode.com/svn/trunk/evoc sites/all/modules/evoc --username richard@cyganiak.de
# Check out Neologism installation profile from Google Code SVN
svn co https://neologism.googlecode.com/svn/trunk/profile profiles/neologism --username richard@cyganiak.de
# Delete the Drupal default installation profile, we only support the Neologism one
rm -rf profiles/default/
# Create archive of the entire thing, ready for installation by users
cd ..
zip -r neologism.zip neologism
| true
|
2da7dfabc34ed1e88f1311570970d3fa1c654b8c
|
Shell
|
manoelhc/restafari
|
/test/exceptions_test/empty_expected_data/run.sh
|
UTF-8
| 356
| 3
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
cd $(dirname ${0})
_SCRIPT_DIR=$(pwd)
_TMP="${_SCRIPT_DIR}/.tmp$(date +%s)"
. ${_SCRIPT_DIR}/../../../ci/common.sh
cd ${_SCRIPT_DIR}
echo "-- TESTING EMPTY EXPECTED DATA --"
restafari -s localhost --port ${TEST_SERVER_PORT} file.rest > ${_TMP}
cat ${_TMP}
[[ -n "$(grep Trackback $_TMP)" ]] && rm -f ${_TMP} && exit 1
rm -f ${_TMP}
exit 0
| true
|
072b6a452b5d0aa50ac1677f852257ee6aed0cdf
|
Shell
|
huntergps/trytond-scripts
|
/docker_entry_script.sh
|
UTF-8
| 575
| 3.15625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
echo "Generating config..."
python /tryton/trytond-scripts/generate_config.py
echo "First run."
echo "Generating password file..."
echo "$TRYTONPASS" > $TRYTONPASSFILE
echo "Initializing the DB..."
/usr/local/bin/trytond -c $TRYTOND_CONFIG -d $DATABASE_NAME -v --all
echo "Removing password file..."
rm $TRYTONPASSFILE
if [ $1 == 'first-run' ]; then
echo "Install ALL the modules..."
python /tryton/trytond-scripts/install_modules.py
fi
echo "Launching Trytond Server..."
exec /usr/local/bin/trytond -c $TRYTOND_CONFIG -d $DATABASE_NAME -v
| true
|
c957315fbd3345036a915b97ec5a3a8beec3e213
|
Shell
|
lmiglior/alidist
|
/sherpa.sh
|
UTF-8
| 2,315
| 3.015625
| 3
|
[] |
no_license
|
package: SHERPA
version: "%(tag_basename)s"
tag: "v2.2.8-alice1"
source: https://github.com/alisw/SHERPA
requires:
- "GCC-Toolchain:(?!osx)"
- Openloops
- HepMC
- lhapdf-pdfsets
- fastjet
build_requires:
- curl
- autotools
- cgal
- GMP
---
#!/bin/bash -e
rsync -a --delete --exclude '**/.git' --delete-excluded $SOURCEDIR/ ./
autoreconf -ivf
# SHERPA's configure uses wget which might not be there
mkdir -p fakewget && [[ -d fakewget ]]
printf '#!/bin/bash\nexec curl -fO $1' > fakewget/wget && chmod +x fakewget/wget
PATH=$PATH:fakewget
export LDFLAGS="$LDFLAGS -L$CGAL_ROOT/lib -L$GMP_ROOT/lib"
./configure --prefix=$INSTALLROOT \
--with-sqlite3=install \
--enable-hepmc2=$HEPMC_ROOT \
--enable-lhapdf=$LHAPDF_ROOT \
--enable-openloops=$OPENLOOPS_ROOT \
--enable-fastjet=$FASTJET_ROOT
make ${JOBS+-j $JOBS}
make install
# Modulefile
MODULEDIR="$INSTALLROOT/etc/modulefiles"
MODULEFILE="$MODULEDIR/$PKGNAME"
mkdir -p "$MODULEDIR"
cat > "$MODULEFILE" <<EoF
#%Module1.0
proc ModulesHelp { } {
global version
puts stderr "ALICE Modulefile for $PKGNAME $PKGVERSION-@@PKGREVISION@$PKGHASH@@"
}
set version $PKGVERSION-@@PKGREVISION@$PKGHASH@@
module-whatis "ALICE Modulefile for $PKGNAME $PKGVERSION-@@PKGREVISION@$PKGHASH@@"
# Dependencies
module load BASE/1.0 ${GCC_TOOLCHAIN_REVISION:+GCC-Toolchain/$GCC_TOOLCHAIN_VERSION-$GCC_TOOLCHAIN_REVISION} \\
${LHAPDF_PDFSETS_REVISION:+lhapdf-pdfsets/$LHAPDF_PDFSETS_VERSION-$LHAPDF_PDFSETS_REVISION} \\
${FASTJET_REVISION:+fastjet/$FASTJET_VERSION-$FASTJET_REVISION} \\
${HEPMC_REVISION:+HepMC/$HEPMC_VERSION-$HEPMC_REVISION} \\
${OPENLOOPS_REVISION:+Openloops/$OPENLOOPS_VERSION-$OPENLOOPS_REVISION}
# Our environment
set SHERPA_ROOT \$::env(BASEDIR)/$PKGNAME/\$version
setenv SHERPA_ROOT \$SHERPA_ROOT
setenv SHERPA_INSTALL_PATH \$::env(SHERPA_ROOT)/lib/SHERPA
setenv SHERPA_SHARE_PATH \$::env(SHERPA_ROOT)/share/SHERPA-MC
prepend-path PATH \$SHERPA_ROOT/bin
prepend-path LD_LIBRARY_PATH \$SHERPA_ROOT/lib
prepend-path LD_LIBRARY_PATH \$SHERPA_ROOT/lib/SHERPA-MC
EoF
| true
|
0c8bf2abe8fe918d1f98db1538705908a26e7844
|
Shell
|
jpsuter/boemb-tl
|
/recipes-core/config-boter/files/waitconfig.sh
|
UTF-8
| 886
| 2.921875
| 3
|
[
"MIT"
] |
permissive
|
rm /etc/localtime
ln -s /usr/share/zoneinfo/Europe/Paris /etc/localtime
echo 'search t-l.ch' > /etc/resolv.conf
echo 'nameserver 10.6.10.10' >> /etc/resolv.conf
echo 'nameserver10.6.10.9' >> /etc/resolv.conf
sn=`ethtool -P eth0 | cut -d " " -f 3`
config=`/usr/bin/mosquitto_sub -h tara.t-l.ch -C 1 -t TL/BOTER/$sn/config`
ip=`echo $config | jq '.IP' | tr --delete \"`
name=`echo $config | jq '.NAME' | tr --delete \"`
echo $name > /etc/hostname
echo "URL=http://127.0.0.1/player/#/boemb/landscape/standard" > /opt/config/boemb.cfg
echo "Screen_L=1920" >> /opt/config/boemb.cfg
echo "Screen_H=630" >> /opt/config/boemb.cfg
echo "IP=$ip" >> /opt/config/boemb.cfg
echo "NAME=$name" >> /opt/config/boemb.cfg
sed -i "s/.*address.10.*/ address $ip/" /etc/network/interfaces
sleep 1
if [ "$ip" != "$IP" ] || [ "$name" != "$NAME" ]; then
/sbin/reboot
fi
| true
|
8dc8a676efc82c5b5423a6c6569196a1a81b8187
|
Shell
|
btison/docker-images
|
/run-java/bin/run-java.sh
|
UTF-8
| 2,688
| 3.78125
| 4
|
[] |
no_license
|
#! /bin/bash
function get_heap_size {
CONTAINER_MEMORY_IN_BYTES=`cat /sys/fs/cgroup/memory/memory.limit_in_bytes`
DEFAULT_MEMORY_CEILING=$((2**60-1))
if [ "${CONTAINER_MEMORY_IN_BYTES}" -lt "${DEFAULT_MEMORY_CEILING}" ]; then
if [ -z $CONTAINER_HEAP_PERCENT ]; then
CONTAINER_HEAP_PERCENT=0.50
fi
CONTAINER_MEMORY_IN_MB=$((${CONTAINER_MEMORY_IN_BYTES}/1024**2))
CONTAINER_HEAP_MAX=$(echo "${CONTAINER_MEMORY_IN_MB} ${CONTAINER_HEAP_PERCENT}" | awk '{ printf "%d", $1 * $2 }')
echo "${CONTAINER_HEAP_MAX}"
fi
}
JAVA_OPTS=${JAVA_OPTS:-""}
# Nexus
NEXUS_IP=$(ping -q -c 1 -t 1 ${NEXUS_HOST} | grep -m 1 PING | cut -d "(" -f2 | cut -d ")" -f1)
NEXUS_PORT=8080
NEXUS_URL=$NEXUS_IP:$NEXUS_PORT
# debug options
DEBUG_MODE=${DEBUG_MODE:-false}
DEBUG_PORT=${DEBUG_PORT:-8787}
if [ -n "$JAVA_APP_GAV" ]; then
IFS=':' read -a gav <<< "${JAVA_APP_GAV}"
if [ "${#gav[@]}" = "5" ]; then
JAVA_APP_LIB=${gav[1]}-${gav[2]}-${gav[3]}.${gav[4]}
gav_url="$NEXUS_URL/nexus/service/local/artifact/maven/redirect?r=public&g=${gav[0]}&a=${gav[1]}&v=${gav[2]}&c=${gav[3]}&e=${gav[4]}"
elif [ "${#gav[@]}" = "4" ]; then
JAVA_APP_LIB=${gav[1]}-${gav[2]}.${gav[3]}
gav_url="$NEXUS_URL/nexus/service/local/artifact/maven/redirect?r=public&g=${gav[0]}&a=${gav[1]}&v=${gav[2]}&e=${gav[3]}"
elif [ "${#gav[@]}" = "3" ]; then
JAVA_APP_LIB=${gav[1]}-${gav[2]}.jar
gav_url="$NEXUS_URL/nexus/service/local/artifact/maven/redirect?r=public&g=${gav[0]}&a=${gav[1]}&v=${gav[2]}&e=jar"
fi
if [ ! -f $JAVA_APP_DIR/$JAVA_APP_LIB ]; then
echo "Installing library ${JAVA_APP_LIB} in ${JAVA_APP_DIR}"
curl --insecure -s -L -o $JAVA_APP_DIR/$JAVA_APP_LIB "$gav_url"
fi
fi
if [ "$DEBUG_MODE" = "true" ]; then
echo "Debug mode = true"
JAVA_OPTS="$JAVA_OPTS -agentlib:jdwp=transport=dt_socket,address=$DEBUG_PORT,server=y,suspend=n"
fi
# max memory
# Check whether -Xmx is already given in JAVA_OPTS. Then we dont
# do anything here
if ! echo "${JAVA_OPTS}" | grep -q -- "-Xmx"; then
MAX_HEAP=`get_heap_size`
if [ -n "$MAX_HEAP" ]; then
JAVA_OPTS="$JAVA_OPTS -Xmx${MAX_HEAP}m"
fi
fi
# Make sure that we use /dev/urandom
JAVA_OPTS="${JAVA_OPTS} -Djava.security.egd=file:/dev/./urandom"
# system properties
for i in $(compgen -A variable | grep "^SYSTEM_PROP_"); do
prop="${!i}"
prop_resolved=$(eval echo $prop)
echo "Adding property ${prop_resolved} to the system properties"
JAVA_OPTS="$JAVA_OPTS ${prop_resolved}"
done
if [ -n "$JAVA_APP_GAV" ]; then
cd $JAVA_APP_DIR
echo "exec java $JAVA_OPTS -jar $JAVA_APP_LIB $@"
exec java $JAVA_OPTS -jar $JAVA_APP_LIB "$@"
else
echo "ERROR: No Java app specified"
fi
| true
|
dff27d295f4b0581b5e790c3a9c1e4e13eb64dda
|
Shell
|
pksung2/ATMS-305
|
/Week3Answers/simplevariables.sh
|
UTF-8
| 212
| 3.390625
| 3
|
[] |
no_license
|
#!/bin/bash
declare -f var1 var2
echo Input variable 1
read var1
echo
echo Input variable 2
read var2
echo
if [[ $var1 -lt $var2 ]];
then
echo "Your output is $((var1 + $var2))"
else
echo You stink.
fi
echo
| true
|
8c4b7111947318fb79dea98a8059b6618014811f
|
Shell
|
mirokuxy/471
|
/A1.sh
|
UTF-8
| 3,356
| 3.984375
| 4
|
[] |
no_license
|
#!/bin/bash
#####
# This script is written and supposed to be run under host "may"
# Author: Yu Xiao
# StudentID: 301267080
#####
thisMachineName=may
thisMachineIP4=192.168.0.5
thisMachineIP6=fdd0:8184:d967:118:250:56ff:fe85:d1d8
myPing=ping #will be modified to ping6 when testing IPv6
myTracepath=tracepath # will be modified to tracepath6 when testing IPv6
#Test a host with either hostname or IP address
function testHost { # $1: target host; $2: this machine
echo
status=false
$myPing -c 1 -W 1 $1 &> /dev/null && status=true || status=false
if [ $status = 'true' ]; then
echo $1 is reachable from $2
etherAddr=`arp $1 2> /dev/null | sed -n '2p' | awk '{ print $3}'`
if [ $etherAddr ]; then
echo Ethernet address is $etherAddr
fi
$myTracepath $1
else
echo $1 is not reachable from $2
fi
}
#Test all hosts with their names
function testWithName {
#Hosts names
adminHosts=( january february march april may june july august september october november december spring summer autumn fall winter solstice equinox seansons year )
net16Hosts=( january april summer june fall september equinox december )
net17Hosts=( january november spring august autumn february )
net18Hosts=( december may july winter march )
net19Hosts=( february october solstice year march )
#Network names
nets=( admin net16 net17 net18 net19 )
#Test hosts in a network
function testNet { # $1: network name
#Get the array of host names
netName=$1
net=${netName}Hosts
hosts=$net[@]
hosts=( "${!hosts}" )
#echo ${hosts[@]}
for host in "${hosts[@]}"; do
#Test each host in the network
host=${host}.${netName}
testHost $host $thisMachineName
done
}
echo
echo -------------------------------------------
echo --------- Testing With Host Names ---------
echo
for net in "${nets[@]}"; do #Test each network
echo
echo $net
testNet $net
done
}
#Test all hosts with their IPv4 address (Scan through a range of IPs)
function testWithIP4 {
index=4 #index of last entry in the array below
#First three bytes of each network address
nets=( 192.168.0 172.16.1 172.17.1 172.18.1 172.19.1 )
lowerBound=( 1 1 1 3 2 ) #lower bound of the last byte for each network for scan
upperBound=( 20 16 20 15 18 ) #upper bound of the last byte for each network for scan
echo
echo ------------------------------------------
echo ---------- Testing With IPv4 -------------
echo
for i in `seq 0 $index`; do #Test each network
net=${nets[$i]}
lB=${lowerBound[$i]}
uB=${upperBound[$i]}
lByte=$lB
while [ $lByte -le $uB ]; do #Scan each IP in the range and test
IP=${net}.${lByte}
testHost $IP $thisMachineIP4
let lByte+=1
done
done
}
#Test just one host with its IPv6 address
function testWithIP6 {
targetIP6=fdd0:8184:d967:118:250:56ff:fe85:f802 #march
echo
echo -----------------------------------------
echo ---------- Testing With IPv6 ------------
echo
testHost $targetIP6 $thisMachineIP6
}
#Reset global commands for test with host names and test with IPv4 address
myPing=ping
myTracepath=tracepath
testWithName
testWithIP4
#Reset global commands for test with IPv6 address
myPing=ping6
myTracepath=tracepath6
testWithIP6
| true
|
5451c31a5d72f0a8f4aa856c548efaab36cd15fb
|
Shell
|
atweiden/voidpkgs
|
/srcpkgs/mit-krb5/template
|
UTF-8
| 2,955
| 2.5625
| 3
|
[
"Unlicense",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-public-domain"
] |
permissive
|
maintainer="nox"
pkgname="mit-krb5"
version=1.21.1
revision=1
_distver="$(echo "$version" | cut -d. -f-2)"
short_desc="MIT Kerberos 5 implementation"
makedepends+=" db-devel"
makedepends+=" e2fsprogs-devel"
makedepends+=" $(vopt_if ldap libldap-devel)"
makedepends+=" $(vopt_if lmdb lmdb-devel)"
hostmakedepends+=" e2fsprogs-devel"
hostmakedepends+=" flex"
hostmakedepends+=" perl"
hostmakedepends+=" pkg-config"
homepage="http://web.mit.edu/kerberos"
license="MIT"
distfiles="http://kerberos.org/dist/krb5/$_distver/krb5-$version.tar.gz"
checksum="7881c3aaaa1b329bd27dbc6bf2bf1c85c5d0b6c7358aff2b35d513ec2d50fa1f"
build_style="gnu-configure"
configure_args+=" --disable-rpath"
configure_args+=" --enable-shared"
configure_args+=" --sbindir=/usr/bin"
configure_args+=" --with-system-db"
configure_args+=" --with-system-et"
configure_args+=" --with-system-ss"
configure_args+=" --without-system-verto"
configure_args+=" $(vopt_with ldap)"
configure_args+=" $(vopt_with lmdb)"
build_options+=" ldap"
build_options+=" lmdb"
build_options_default="ldap"
desc_option_lmdb="Enable LMDB database backend"
post_patch() {
# fix db plugin
vsed \
-i \
-e "s|<db.h>|<db_185.h>|" \
src/plugins/kdb/db2/{adb_openclose.c,db2_exp.c,kdb_db2.c,policy_db.h}
}
do_configure() {
./src/configure \
$configure_args \
ac_cv_func_pthread_once="yes" \
ac_cv_func_pthread_rwlock_init="yes" \
ac_cv_func_regcomp="yes" \
ac_cv_printf_positional="yes" \
acx_pthread_ok="yes" \
krb5_cv_attr_constructor_destructor="yes,yes"
}
post_install() {
vlicense NOTICE
vsv krb5kdc
vsv kadmind
# install schema files for LDAP database backend
if [[ -n "$(vopt_if ldap present)" ]]; then
for schema in kerberos.{schema,ldif,openldap.ldif}; do
vdoc "src/plugins/kdb/ldap/libkdb_ldap/$schema"
done
fi
}
mit-krb5-client_package() {
short_desc+=" - client programs"
pkg_install() {
for f in gss-client \
k5srvutil \
kadmin \
kdestroy \
kinit \
klist \
kpasswd \
ksu \
kswitch \
ktutil \
kvno \
sclient \
sim_client \
uuclient; do
vmove "usr/bin/$f"
if [[ -f "$DESTDIR/usr/share/man/man1/$f.1" ]]; then
vmove "usr/share/man/man1/$f.1"
fi
done
}
}
mit-krb5-devel_package() {
unset depends
depends+=" $makedepends"
depends+=" mit-krb5-libs>=${version}_$revision"
short_desc+=" - development files"
pkg_install() {
vmove usr/include
vmove usr/bin/krb5-config
vmove usr/lib/pkgconfig
vmove "usr/lib/*.so"
}
}
mit-krb5-libs_package() {
short_desc+=" - runtime libraries"
pkg_install() {
vmove "usr/lib/*.so.*"
}
}
# REMARKS:
#
# if there is a bump in .so version, also update
# srcpkgs/libgssglue/files/gssapi_mech.conf
# vim: set filetype=sh foldmethod=marker foldlevel=0 nowrap:
| true
|
a301e715580a8d673628be84ec7b151b7a443ba6
|
Shell
|
Senso-Care/SensoApi
|
/scripts/build.sh
|
UTF-8
| 584
| 3.8125
| 4
|
[] |
no_license
|
#!/bin/bash
if [ "$#" -lt 1 ] || ! [ -d "./cmd/$1" ] > /dev/null; then
echo "Usage: $0 CMDNAME [OS/PLATFORM]" >&2
echo "Check os/platform available by running: \"go tool dist list\""
exit 1
fi
os="linux"
platform="amd64"
if [ ! -z "$2" ] && go tool dist list | grep "$2" > /dev/null; then
os=$(echo "$2" | cut -d"/" -f1)
platform=$(echo "$2" | cut -d"/" -f2)
fi
if [ "$2" == "linux/arm/v7" ]; then
os="linux"
platform="arm"
fi
cmd_path="./cmd/$1"
echo "Compiling $1 on $os/$platform"
CGO_ENABLED=0 GOOS=$os GOARCH=$platform go build -o="bin/$2/$1" -x "./$cmd_path"
| true
|
67ebe0aa20b496194d2712d90a13296747af7cc6
|
Shell
|
smashism/jamfpro-extension-attributes
|
/detect_chrome_extension.sh
|
UTF-8
| 496
| 3.015625
| 3
|
[] |
no_license
|
#!/bin/sh
#
# script by emily kw 2014-07-29
# to detect if the Awesome Screenshot Extension is installed on Google Chrome
# updated 2022-01-29
#
currentUser=$( echo "show State:/Users/ConsoleUser" | scutil | awk '/Name :/ { print $3 }' )
if [ -d "/Users/$currentUser/Library/Application Support/Google/Chrome/Default/Extensions/alelhddbbhepgpmgidjdcjakblofbmce" ] ; then
STATUS="Awesome Screenshot Installed."
else
STATUS="Not installed."
fi
echo "<result>$STATUS</result>"
| true
|
d1e3aa822e7181c903b37479029e9f08f10b2db7
|
Shell
|
zhengjiexu123/AEP_TEST
|
/dram_test/run_numa.sh
|
UTF-8
| 814
| 3.046875
| 3
|
[] |
no_license
|
block_name=(01_8B 02_16B 03_32B 04_64B 05_128B 06_256B 07_512B 08_1KB 09_4KB 10_16KB 11_64KB 12_256KB 13_1MB 14_4MB 15_16MB)
block_size=(8 16 32 64 128 256 512 1024 4096 16384 65536 262144 1048576 4194304 16777216)
test_type=(rr rw sr sw)
num_thread=(1 2 4 6 8 10 12 14 16)
DIR=numa
data_amount=$((2*1024))
for ((i=0; i<${#test_type[*]}; i+=1))
do
for ((j=0; j<${#block_size[*]}; j+=1))
do
OUTPUT=$DIR/${test_type[$i]}/${block_name[$j]}
mkdir -p $OUTPUT
for ((k=0; k<${#num_thread[*]}; k+=1))
do
echo "running $OUTPUT/${num_thread[$k]}.result"
./../main --numa=0 --sync=1 --ntstore=1 --verify=0 --pmem_file_path=/home/pmem0/pm --align_size=256 --benchmark=${test_type[$i]} --block_size=${block_size[$j]} --num_thread=${num_thread[$k]} --data_amount=$data_amount > $OUTPUT/${num_thread[$k]}.result
done
done
done
| true
|
c5effe2e1cea090a32a22843547379e872e81cec
|
Shell
|
christiangda/PuppetMaster
|
/Vagrant/scripts/bootstrap-redhat-family.sh
|
UTF-8
| 1,547
| 2.890625
| 3
|
[] |
no_license
|
#!/bin/bash
################################################################################
# Prepare the system
#yum -y install deltarpm
#yum -y install kernel-devel kernel-headers dkms
#yum -y update
yum -y install vim-enhanced htop elinks mlocate nmap telnet
rpm -ivh https://yum.puppetlabs.com/puppetlabs-release-pc1-el-7.noarch.rpm
################################################################################
# Disable selinux
sed -i 's/SELINUX=permissive/SELINUX=disabled/g' /etc/selinux/config
sed -i 's/SELINUXTYPE=targeted/SELINUXTYPE=minimum/g' /etc/selinux/config
setenforce 0
################################################################################
# Enter like root all time
cat << __EOF__ >> /home/vagrant/.profile
#
# Autologin as root
# Added by vagrant-agent.sh script
sudo su -
__EOF__
chown vagrant.vagrant /home/vagrant/.profile
echo "" >> /home/vagrant/.bash_profile
echo "source ~/.profile" >> /home/vagrant/.bash_profile
################################################################################
# Set hostname file (replaced)
cat << __EOF__ > /etc/hosts
# MANAGED BY boostrap.sh script in Vagrant
127.0.0.1 localhost
192.168.33.100 ps.puppet.local master
192.168.33.101 pa-01.puppet.local pa-01
192.168.33.102 pa-02.puppet.local pa-02
__EOF__
################################################################################
# Set hostname file (add lines)
cat << __EOF__ >> /etc/resolv.conf
# MANAGED BY boostrap.sh script in Vagrant
domain puppet.local
search puppet.local
__EOF__
| true
|
6f65d6c171c7ac210b67576e008a8e4a8d6b610b
|
Shell
|
skjq/Hannigan_CRCVirome_mBio_2018
|
/bin/getOrfAbundance.sh
|
UTF-8
| 1,207
| 3.34375
| 3
|
[
"MIT"
] |
permissive
|
#! /bin/bash
# getOrfAbundance.sh
# Geoffrey Hannigan
# Pat Schloss Lab
# University of Michigan
#######################
# Set the Environment #
#######################
export OrfFile=$1
export FastaSequences=$2
export diamondpath=/mnt/EXT/Schloss-data/bin/
export tmpfile=./data/tmporfabund
export WorkingDirectory=$(pwd)
###################
# Set Subroutines #
###################
GetOrfHits () {
sampleid=$(echo ${1} | sed 's/_2.fastq//')
echo Samplie ID is ${sampleid}
# Use blast to get hits of ORFs to Uniprot genes
echo Running Phage ORFs...
${diamondpath}diamond blastx \
-q ${FastaSequences}/${1} \
-d ${tmpfile}/diamonddatabase \
-a ${tmpfile}/${sampleid}-output.daa \
-t ./ \
--max-target-seqs 1 \
--evalue 1e-15 \
--id 0.90
${diamondpath}diamond view \
-a ${tmpfile}/${sampleid}-output.daa \
-o ${FastaSequences}/${1}.diamondout
}
export -f GetOrfHits
################
# Run Analysis #
################
mkdir -p ./data/tmporfabund
# Create diamond database
echo Creating Database...
${diamondpath}diamond makedb \
--in "${OrfFile}" \
-d ${tmpfile}/diamonddatabase
ls ${FastaSequences}/*_R2.fastq | sed "s/.*\///g" | xargs -I {} --max-procs=32 bash -c 'GetOrfHits "$@"' _ {}
| true
|
2787911b7ffac2f005f069c7b841c018dc7d24ee
|
Shell
|
sfitpro/pi-hole
|
/setup.cloudflared.doh.for.pi-hole.sh
|
UTF-8
| 1,793
| 3.421875
| 3
|
[] |
no_license
|
#!/bin/bash
# Author: Eddie Lu
# Description: Bash script to automate set up Cloudfared DoH for Pi-hole on Raspbian
# Reference:
# https://docs.pi-hole.net/guides/dns-over-https/
# https://www.cyberciti.biz/faq/configure-ubuntu-pi-hole-for-cloudflare-dns-over-https/
# 1. download the precompiled binary and copy it to the /usr/local/bin/ directory
wget https://bin.equinox.io/c/VdrWdbjqyF/cloudflared-stable-linux-arm.tgz
tar -xvzf cloudflared-stable-linux-arm.tgz
sudo cp ./cloudflared /usr/local/bin
sudo chmod +x /usr/local/bin/cloudflared
cloudflared -v
# 2. configure cloudflared to run on startup
sudo useradd -s /usr/sbin/nologin -r -M cloudflared
# lock down the cloudflared user
sudo passwd -l cloudflared
sudo chage -E 0 cloudflared
cat > /etc/default/cloudflared <<'EOM'
# Commandline args for cloudflared
CLOUDFLARED_OPTS=--port 5053 --upstream https://1.1.1.1/dns-query --upstream https://1.0.0.1/dns-query
EOM
sudo chown cloudflared:cloudflared /etc/default/cloudflared
sudo chown cloudflared:cloudflared /usr/local/bin/cloudflared
cat > /etc/systemd/system/cloudflared.service <<'EOM'
[Unit]
Description=cloudflared DNS over HTTPS proxy
After=syslog.target network-online.target
[Service]
Type=simple
User=cloudflared
EnvironmentFile=/etc/default/cloudflared
ExecStart=/usr/local/bin/cloudflared proxy-dns $CLOUDFLARED_OPTS
Restart=on-failure
RestartSec=10
KillMode=process
[Install]
WantedBy=multi-user.target
EOM
sudo systemctl enable cloudflared
sudo systemctl start cloudflared
sudo systemctl status cloudflared
# 3. verify
dig @127.0.0.1 -p 5053 www.google.com
# 4. configure Pi-hole to use Custom Upstream DNS Server 127.0.0.1#5053
echo "Logon http://<pihole.ip.address>/admin"
echo "Setting, DNS, Upstream DNS Servers"
echo "Custom 1 (IPv4): 127.0.0.1#5053"
| true
|
2047e34a57d11930fddd3ae575bc5f7c27cd6e16
|
Shell
|
iT-Boyer/Hexo-Templates-For-Xcode
|
/lntemplates.sh
|
UTF-8
| 719
| 2.71875
| 3
|
[] |
no_license
|
#!/bin/sh
# lntemplates.sh
# HexoDeploy
#
# Created by pengyucheng on 28/04/2017.
# Copyright ยฉ 2017 hexoไธป้ข. All rights reserved.
SRC_HOME=`pwd`
XcodePATH="~/Library/Developer/Xcode/Templates/File\ Templates/HexoMD"
echo "mkdir -p ${XcodePATH}"
mkdir -p ~/Library/Developer/Xcode/Templates/File\ Templates/HexoMD
mkdir -p ~/Library/Developer/Xcode/Templates/Project\ Templates/ProjectTemplates
echo "ln -fs ${SRC_HOME}/Util/HexoTemplates ${XcodePATH}"
ln -fs ${SRC_HOME}/Util/HexoTemplates ~/Library/Developer/Xcode/Templates/File\ Templates/HexoMD
ln -fs ${SRC_HOME}/Util/HexoTemplates/ProjectTemplates ~/Library/Developer/Xcode/Templates/Project\ Templates/ProjectTemplates
echo "ๅฎ่ฃ
hexoๆจก็ๅฎๆ"
| true
|
46a3d4fed870710376fff25b7f8957db87363144
|
Shell
|
mcoughlin/fisheye
|
/chrisbash/makefits.sh
|
UTF-8
| 1,924
| 3.671875
| 4
|
[] |
no_license
|
#!/bin/bash
# Convert CR2 to FITS (cr2fits)
# hacked to fix error in image plane assumptions, Jan 14, 2014, CWS
# Assumptions:
# Where are the extra binaries, bash scripts and monsta scripts?
export PRODIR=/usr/local/bin
# What is the from-root directory where these files should go?
dirto=$dirpath/$dirname
dirfrom=$dirto
# What filter are we extracting into a FITS file? B, G, R, M (onochrome)
filter=M
eval $@
export PATH=$PRODIR:$PATH
# Load OS specific functions
source stepOS.sh
yymmdd=`echo $dirname | sed s/ut//`
#
# assume canon format will have prefix.nnnn.imtype.cr2
cd $dirto
prefix=$dirname
echo " " >> $prefix.log
echo "# Step1:" >> $prefix.log
echo "datestep1=\"`date`\" # Date step1 was run" >> $prefix.log
echo "dirfrom=$dirfrom # source directory of CR2" >> $prefix.log
echo "dirto=$dirto # root directory of results" >> $prefix.log
echo "obsdate=$dirname # UT date of observations" >> $prefix.log
echo Separating files into CR2 and color subdirectories
mkdir CR2 M B G R
rm M/*.fits
rm B/*.fits
rm G/*.fits
rm R/*.fits
chmod 644 *.CR2 *.cr2
mv -f *.CR2 *.cr2 CR2
cd $dirto/CR2
filter=M
color=bw
echo "Extracting $filter FITS from CR2, raw2fits -$color ..."
cr2fits -dir ../$filter -$color *.cr2
filter=B
color=g1
echo "Extracting $filter FITS from CR2, raw2fits -$color ..."
cr2fits -dir ../$filter -$color *.cr2
filter=G
color=b
echo "Extracting $filter FITS from CR2, raw2fits -$color ..."
cr2fits -dir ../$filter -$color *.cr2
filter=R
color=g2
echo "Extracting $filter FITS from CR2, raw2fits -$color ..."
cr2fits -dir ../$filter -$color *.cr2
echo "fixing up fits file names"
cd $dirto/M
for i in *.fits; do mv "$i" "${i/.fits}".M.fits; done
cd $dirto/B
for i in *.fits; do mv "$i" "${i/.fits}".B.fits; done
cd $dirto/G
for i in *.fits; do mv "$i" "${i/.fits}".G.fits; done
cd $dirto/R
for i in *.fits; do mv "$i" "${i/.fits}".R.fits; done
| true
|
e751bd490d2338c8c472c2c280999323c17fac0b
|
Shell
|
Vikutorika/Cytus-II-DB
|
/longyuan.sh
|
UTF-8
| 1,222
| 3
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
echo "================================="
echo " Cytus II DB Migrate Tool V1.0 "
echo " A.R.C. Tech. "
echo "================================="
# clean web build
rm -rf ./res/*
# mkdir
mkdir ./res/unity
mkdir ./res/bundles
mkdir ./res/export
mkdir ./res/export/audios
mkdir ./res/export/images
mkdir ./res/export/videos
mkdir ./res/export/videos/extra
mkdir ./res/export/videos/titles
mkdir ./res/export/videos/song_select
echo "Cleaned cache."
# unzip files
unzip -q ./apk/cytus.apk -d ./res/apk
echo "Unziped APK."
# version check
if [ ! -d "./res/apk/assets/bin/" ];then
echo "Longyuan Version is no longer supported!"
exit
fi
# move unity
mv ./res/apk/assets/bin/Data/* ./res/unity
echo "Migrated Unity."
# move bundles
mv ./res/apk/assets/AssetBundles/* ./res/bundles
# raw assets
mv ./res/apk/assets/RawAssets/* ./res/export/videos
mv ./res/apk/assets/Titles/* ./res/export/videos/titles
mv ./res/apk/assets/*_song_select.mp4 ./res/export/videos/song_select
mv ./res/apk/assets/*.mp4 ./res/export/videos/extra
echo "Migrated Raw Assets."
# clean source
rm -rf ./res/apk
echo "Finished."
| true
|
d4858a2dde891db7e696a903c6593154e18ea741
|
Shell
|
bbary/utils
|
/shell/global_functions.sh
|
UTF-8
| 927
| 4
| 4
|
[] |
no_license
|
#!/bin/bash
set -e
# pulls the first occurence of a char from a list of chars
#Params:
# 1: list of chars
# 2: the char to pull
# 3: the result variable
# Returns a list of sorted floats in the second parameter
function pull_first_occurence_from_list()
{
result=""
found="false"
for i in $1; do
if [ $i != $2 -o $found = "true" ] ; then
result="$result $i"
else
found="true"
fi
done
eval "$3=\$result"
}
# Sorts floats
#Params:
# 1: list of floats
# 2: sorted floats
# Returns a list of sorted floats in the parameter 2
function sort_floats()
{
list="$1"
sorted_list=""
#echo $list
for i in $list; do
#echo $list
min=`echo $list | cut -d ' ' -f 1`
for j in $list; do
var=$(awk 'BEGIN{ print "'$j'"<="'$min'" }')
if [ $var -eq 1 ]; then
min=$j
fi
done
pull_first_occurence_from_list "$list" $min list
sorted_list="$sorted_list $min"
#echo $list
done
eval "$2=\$sorted_list"
}
| true
|
4e45793a91c815cbabbfe4e85e6b7a3ce56b20ba
|
Shell
|
DataDog/Miscellany
|
/dd_public_ip.sh
|
UTF-8
| 1,348
| 3.671875
| 4
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
function usage
{
echo 'usage dd-public-ip-tag.sh --api_key "<key>" --app_key "<key>"'
}
#### Main
api_key=""
app_key=""
while [ "$1" != "" ]; do
case $1 in
-i | --api_key ) shift
api_key=$1
;;
-p | --app_key ) shift
app_key=$1
;;
-h | --help ) usage
exit
;;
* ) usage
exit 1
esac
shift
done
if [ "$api_key" = "" -o "$app_key" = "" ]; then
usage
exit 1
fi
private_ip=`wget -q -O - http://instance-data/latest/meta-data/local-ipv4`
private_ip='ip-'`echo $private_ip | sed 's/\./-/g'`
instance_id=`wget -q -O - http://instance-data/latest/meta-data/instance-id`
public_ip=`wget -qO- http://instance-data/latest/meta-data/public-ipv4`
tag='"tags" : ["public_ip:'$public_ip'"]'
echo 'api_key: '$api_key
echo 'app_key: '$app_key
echo 'instance_id: '$instance_id
echo 'public_ip: '$public_ip
curl -X POST -H "Content-type: application/json" \
-d '{
"tags" : ["public_ip:'$public_ip'"]
}' \
"https://app.datadoghq.com/api/v1/tags/hosts/${instance_id}?api_key=${api_key}&application_key=${app_key}"
| true
|
7258c9645acc145445ea32add728a58e640dbf95
|
Shell
|
rundeck/rundeck
|
/test/run-docker-tests.sh
|
UTF-8
| 902
| 3.921875
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#/ trigger local ci test run
set -euo pipefail
IFS=$'\n\t'
readonly ARGS=("$@")
DOCKER_DIR=$PWD/test/docker
usage() {
grep '^#/' <"$0" | cut -c4- # prints the #/ lines above as usage info
}
die(){
echo >&2 "$@" ; exit 2
}
check_args(){
if [ ${#ARGS[@]} -gt 0 ] ; then
DOCKER_DIR=$1
fi
}
copy_jar(){
local FARGS=("$@")
local DIR=${FARGS[0]}
local buildJar=( $PWD/rundeckapp/build/libs/*.war )
echo "Testing against ${buildJar[0]}"
test -d $DIR || mkdir -p $DIR
cp ${buildJar[0]} $DIR/rundeck-launcher.war
}
run_tests(){
local FARGS=("$@")
local DIR=${FARGS[0]}
cd $DIR
bash $DIR/test.sh
}
run_docker_test(){
local FARGS=("$@")
local DIR=${FARGS[0]}
local launcherJar=$( copy_jar $DIR ) || die "Failed to copy jar"
run_tests $DIR
}
main() {
check_args
run_docker_test $DOCKER_DIR
}
main
| true
|
817a11470cce0b94edb4925130efcfd421de9a3a
|
Shell
|
t0mk/dosk
|
/henter
|
UTF-8
| 540
| 3.46875
| 3
|
[] |
no_license
|
#!/bin/bash
CID=$1
[ -z "$CID" ] && CID=`hyper ps -ql`
[ `hyper ps -q | wc -l` -eq 1 ] && CID=`hyper ps -q`
if [[ ! "$CID" =~ "^[a-f0-9]{2,64}$" ]]; then
NEW_ID=`hyper ps | grep $CID | grep -v grep | awk '{print $1;}'`
NUM_LINES=`echo $NEW_ID | wc -l`
if [ -z "$NEW_ID" ]; then
echo "no container like $CID"
return 1
fi
if [ $NUM_LINES -ge 2 ]; then
echo "$CID is ambiguous substring"
return 1
else
CID=$NEW_ID
fi
fi
hyper exec -it $CID bash || hyper exec -it $CID sh
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.