blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 4 115 | path stringlengths 2 970 | src_encoding stringclasses 28 values | length_bytes int64 31 5.38M | score float64 2.52 5.28 | int_score int64 3 5 | detected_licenses listlengths 0 161 | license_type stringclasses 2 values | text stringlengths 31 5.39M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
ad8d0c1ba600f474eec02756037d068d57b39600 | Shell | carefreetime/cot-ctrlr-gpio | /socket_gpio | UTF-8 | 489 | 3.359375 | 3 | [
"MIT"
] | permissive | #!/bin/sh
# /etc/init.d/socket_gpio
case "$1" in
start)
echo "Starting socket_gpio"
# run application you want to start
python /usr/local/sbin/socket_gpio.pyc &
;;
stop)
echo "Stopping example"
# kill application you want to stop
# killall python
kill -SIGKILL $(ps aux | grep 'socket_daemon.py' | grep -v 'grep'| awk '{print $2}')
;;
*)
echo "Usage: /etc/init.d/socket_gpio {start|stop}"
exit 1
;;
esac
exit 0
| true |
25b1321067c5989fa2e6c982b77a52e8b49c0c6f | Shell | camptocamp/docker-prometheus-server | /docker-entrypoint.d/10-wait-for-configfile.sh | UTF-8 | 134 | 2.96875 | 3 | [] | no_license | #!/bin/sh -e
FILE="/etc/prometheus-config/prometheus.yml"
while [ ! -f "${FILE}" ]; do
echo "waiting for ${FILE}"
sleep 1
done
| true |
d934f201a48d36d291003e56b90a7611f715b822 | Shell | MLDL/DARTS- | /scripts/run_darts_minus_fulltrain.sh | UTF-8 | 970 | 2.796875 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/sh
# RDARTS Evaluation
SLURM_ARRAY_JOB_ID=0
SPACE='s5'
DATASET='cifar100'
DROP_PROB=0.0
WEIGHT_DECAY=0.0003
#CONFIG="--layers 20 --init_channels 36"
CONFIG=""
for i in $(seq 0 2);do
let j=$i
let SLURM_ARRAY_TASK_ID=$i
echo $i $j
python src/evaluation/train.py --data /home/work/dataset/cifar $CONFIG --gpu $j --cutout --auxiliary --job_id $SLURM_ARRAY_JOB_ID --task_id $i --seed 1 --space $SPACE --dataset $DATASET --search_dp $DROP_PROB --search_wd $WEIGHT_DECAY --search_task_id $i --archs_config_file ./experiments/search_logs/darts_minus_arch.yaml > train_darts_minus_$DATASET-$SPACE-$DROP_PROB-$WEIGHT_DECAY-task-$i.log 2>&1 &
done
#SGAS Evaluation
# gpu=5
# for i in $(seq 0 1);do
# echo $i $gpu
# export CUDA_VISIBLE_DEVICES=$gpu && python src/eval/train.py --data /home/work/dataset/cifar --cutout --auxiliary --arch DARTS_MINUS_C10_LINEAR_S3_$i >& DARTS_MINUS_C10_LINEAR_S3_${i}_fulltrain.log &
# let gpu=($gpu+1)%8
# done | true |
e10b45d89263868efb05cdbef27448a191925452 | Shell | delkyd/alfheim_linux-PKGBUILDS | /opensubtitles-uploader/PKGBUILD | UTF-8 | 1,088 | 2.875 | 3 | [] | no_license | # Maintainer: Michał Lisowski <lisu@riseup.net>
pkgname=opensubtitles-uploader
pkgver=2.2.0
pkgrel=1
pkgdesc="Desktop app to upload subtitles to OpenSubtitles, using Node.JS and NWjs"
arch=('i686' 'x86_64')
url="http://blog.opensubtitles.eu/opensubtitlesorg/web/opensubtitles-uploader/"
license=('GPL3')
makedepends=('npm' 'gulp')
source=("${pkgname}::https://github.com/vankasteelj/${pkgname}/archive/${pkgver}.tar.gz"
"${pkgname}.desktop"
"${pkgname}.png")
md5sums=('08ea6d624cd8f2b10b5de8bd7d5613e5'
'f38a85d864e5aca6706a76adec1c5e86'
'f8e90af7251e2c76a9d863e283aa3804')
prepare() {
cd "$pkgname-$pkgver"
}
build() {
cd "$pkgname-$pkgver"
npm install
gulp build
}
package() {
cd "$pkgname-$pkgver"
install -m 0755 -d ${pkgdir}/opt/${pkgname}
install -m 0755 -d ${pkgdir}/usr/share/applications
install -m 0755 -d ${pkgdir}/usr/share/icons
install -m 0644 ${srcdir}/${pkgname}.desktop ${pkgdir}/usr/share/applications
install -m 0644 ${srcdir}/${pkgname}.png ${pkgdir}/usr/share/icons
cp -R build/${pkgname}/linux*/* ${pkgdir}/opt/${pkgname}
}
| true |
47de24edab4d55b06878bfd499af7e610000796c | Shell | louvelaz/jaguaos | /software/ASMInstructions/A7.sh | UTF-8 | 1,032 | 3.625 | 4 | [] | no_license | #!/bin/bash
# A7.sh
#
# Execute an interrupt
#
# @author Sergio Pohlmann <sergio@ycube.net>
# @date May, 20 of 2016
#
####################################################
baseDir="/opt";
. ${baseDir}/jaguaOs/config/config.sh
. ${baseDir}/jaguaOs/config/ASMfunctions.sh
. ${baseDir}/jaguaOs/config/Basefunctions.sh
function interrupt
{
function=$(readRegister RA);
case $function in
"00")
;;
"01") # Print a string started in a DR address
address=$(readRegister DR);
address=`echo $((16#${address}))`;
char="";
escape=0;
backslash="\\";
while [ "${char}" != "$" ];
do
readMemoryPosition ${address};
if [ "${char}" == "${backslash}" ] ; then
escape=1;
else
if [ "${escape}" == "1" ] ; then
escape=0;
case ${char} in
"t")
echo -n " ";
;;
"n")
echo
;;
esac
else
if [ "${char}" != "$" ] ; then
echo -n $char;
fi
fi
fi
address=$(( ${address} + 1 ));
done
;;
esac
}
interrupt;
| true |
817cba35e690e4b1077a75ac025335476791dfec | Shell | nrxr/quickconfig | /pre-setup.sh | UTF-8 | 5,044 | 3.8125 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env sh
#
# Copyright © 2020 nrxr <nrxr@disroot.org>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
help='
Usage:
pre-setup.sh [-h|--help] # default
pre-setup.sh setup # pre-install of rcm, git and curl
pre-setup.sh pkm # print the package manager used in this machine
pre-setup.sh os # print what os is being used on the machine
Options:
-h --help Show this help message.
--version Show the version.
'
version='
Version: 1.1.0
© 2020, nrxr <nrxr@disroot.org>
Released under the MIT license terms.
'
print_version() {
echo "$version"
}
print_help() {
echo "$help"
}
guessOS() {
if uname -a | grep -q "Darwin"; then
OS='mac'
elif uname -a | grep -q -i linux; then
OS='linux'
fi
}
printGuessOS() {
guessOS "$@"
printf "%s" "$OS"
}
guessPKM() {
if command -v xbps-install > /dev/null; then
PM='xbps-install'
elif command -v brew > /dev/null; then
PM='brew'
elif command -v apk > /dev/null; then
PM='apk'
elif command -v yay > /dev/null; then
PM='yay'
elif command -v pacman > /dev/null; then
PM='pacman'
elif command -v apt > /dev/null; then
PM='apt'
fi
}
printGuessPKM() {
guessPKM "$@"
printf "%s" "$PM"
}
installYay() {
sudo pacman -S base-devel git
mkdir -p ~/code/src/aur.archlinux.org/
git clone https://aur.archlinux.org/yay.git ~/code/src/aur.archlinux.org/yay
cd ~/code/src/aur.archlinux.org/yay && makepkg -risc && cd - || exit
}
installRcm() {
guessOS
guessPKM
PKGS='rcm curl git'
isinstalled=$(command -v rcup)
if [ "$isinstalled" = "" ]; then
if [ "${PM}" = "brew" ] && [ "${OS}" = "mac" ]; then
printf "will install rcm with brew...\n"
{
brew tap thoughtbot/formulae
brew install ${PKGS}
}
elif [ "${PM}" = "xbps" ]; then
printf "will install rcm with xbps...\n"
sudo xbps-install -S ${PKGS}
elif [ "${PM}" = "apk" ]; then
printf "will install rcm with apk...\n"
sudo apk add ${PKGS}
elif [ "${PM}" = "apt" ]; then
printf "will install rcm with apt...\n"
sudo apt install ${PKGS}
elif [ "${PM}" = "pacman" ]; then
printf "will install yay with pacman and then install rcm...\n"
installYay "$@"
printf "yay installed; installing rcm...\n"
yay -S ${PKGS}
elif [ "${PM}" = "yay" ]; then
yay -S ${PKGS}
else
printf "please check in https://github.com/thoughtbot/rcm how to install rcm in your system\n"
exit
fi
fi
}
installer() {
guessOS
guessPKM
# PKGS="$@"
for pkg in "$@"; do
if command -v "${pkg}" > /dev/null; then
echo "${pkg} is already installed"
continue
fi
printf "On %s and using %s as pm...\n" "${OS}" "${PM}"
if [ "${PM}" = "brew" ] && [ "${OS}" = "mac" ]; then
printf "will install %s with brew...\n" "${pkg}"
brew install "${pkg}"
elif [ "${PM}" = "xbps" ]; then
printf "will install %s with xbps...\n" "${pkg}"
sudo xbps-install -S "${pkg}"
elif [ "${PM}" = "apk" ]; then
printf "will install %s with apk...\n" "${pkg}"
sudo apk add "${pkg}"
elif [ "${PM}" = "apt" ]; then
printf "will install %s with apt...\n" "${pkg}"
sudo apt install "${pkg}"
elif [ "${PM}" = "pacman" ]; then
printf "will install yay with pacman and then install %s...\n" "${pkg}"
installYay "$@"
printf "yay installed; installing %s...\n" "${pkg}"
yay -S "${pkg}"
elif [ "${PM}" = "yay" ]; then
printf "will install %s with yay...\n" "${pkg}"
yay -S "${pkg}"
else
printf "can't handle your package manager, please install %s\n" "${pkg}"
exit
fi
done
}
main() {
cmd="$1"
case "$cmd" in
-h|--help) shift; print_help "$@";;
--version) shift; print_version "$@";;
setup) shift; installRcm "$@";;
pkm) shift; printGuessPKM "$@";;
os) shift; printGuessOS "$@";;
install) shift; installer "$@";;
*) print_help "$@";;
esac
}
main "$@"
| true |
426d993419ddb5ce66d9cd9f1c608dd4d1aee33e | Shell | kloudsio/server-2 | /test.sh | UTF-8 | 888 | 2.6875 | 3 | [] | no_license | #!/bin/bash
BASE=localhost:1999
apps=`curl -s \
-H "Content-Type: application/json" \
localhost:1999/apps`
echo 'Apps:'
iojs -e "console.log($apps.map(function(v){ return v.name } ).join(', '))"
echo
disabled=`curl -s \
-H "Content-Type: application/json" \
localhost:1999/disabled`
echo 'Disabled Apps:'
iojs -e "console.log($disabled.map(function(v){ return v.name } ).join(', '))"
echo
curl -i \
-H "Content-Type: application/json" \
-d '{
"email": "'$(date +%s)'@klouds.io",
"password": "TEST123"
}' \
http://localhost:1999/register \
http://localhost:1999/login
# echo -n -e "\nPOST /login: \t--\t"
# curl --silent -i localhost:1999/login \
# -d email=test@klouds.io \
# -d password=test \
# --header "Content-Type:application/json"
# # | head -1
#
# echo -n -e "\nGET /subscribe: \t--\t"
# curl --silent -i localhost:1999/subscribe\
# | head -1
| true |
894bf5ab3eeef7d022dc63a35c7a3e8e9967742a | Shell | hollasch/ray4 | /ray4/craig/convert | UTF-8 | 191 | 3.1875 | 3 | [
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] | permissive | #!/bin/sh
files=`ls slice_*`
for i in $files
do
echo
echo $i:
mtvtorast $i
done
echo "Delete data files: (y/n): "
read del
if test "$del" = y
then
for i in $files
do
rm -i $i
done
fi
| true |
c35ed7397898493422f721c9105f2f940336f67e | Shell | git2samus/rcfiles | /symlink_dotfiles.sh | UTF-8 | 264 | 3.59375 | 4 | [] | no_license | #!/bin/bash
# ensure we're on the base of the dotfiles repo
toplevel="$(git rev-parse --show-toplevel)" && cd "$toplevel" || exit
for f in .[!.]*; do
[[ $f = .git ]] && continue
[[ -f ~/$f || -L ~/$f ]] && mv ~/"$f"{,.bak}
ln -s "$toplevel/$f" ~/"$f"
done
| true |
1aa0b2387a47f2a25999b2c1c215af6503c13ef2 | Shell | tmbuza/iMAP | /code/requirements/00_get_raw_data.bash | UTF-8 | 1,282 | 2.75 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
## Author: TMB, April, 2018
## Prepare rawdata folders
mkdir data
mkdir data/metadata
mkdir data/raw
mkdir data/raw/qced
# <br>
## Obtain the raw data and put the files into the data/raw directory
# unzip MiSeqSOPData.zip
cp ./data/demo/raw.zip ./
unzip raw.zip
cp raw/* ./data/raw
rm -rf raw* __MACOSX
# Copy demo metadata and put the files in the data/metadata directory
cp ./data/demo/metadata/* ./data/metadata/
# Copy mapping fastq file and put it in the data/raw/qced directory
# cp ./data/demo/mapping.files ./data/raw/qced/qced.files
# #Raw data for the iMAP test package (19 murine gut samples + 1 Mock sample)
# wget --no-check-certificate https://www.mothur.org/w/images/d/d6/MiSeqSOPData.zip
# unzip MiSeqSOPData.zip
# gzip MiSeq_SOP/*.fastq
# cp MiSeq_SOP/*.fastq.gz data/raw
# rm MiSeqSOPData.zip
# rm -rf MiSeq_SOP __MACOSX
# #Raw data for a reproducible manuscript (360 murine gut samples + 2 Mock samples)
# # * Download raw data from https://www.mothur.org/MiSeqDevelopmentData/StabilityWMetaG.tar
# # * Ran the following from the project's root directory
# wget --no-check-certificate https://www.mothur.org/MiSeqDevelopmentData/StabilityWMetaG.tar
# tar xvf StabilityWMetaG.tar -C ./data/raw/
# rm StabilityWMetaG.tar
# # ```
| true |
60c66f050fa7b28d100fb1111f1f85ca81819556 | Shell | pinnokio/scs-test-env | /helper_scripts/pg_group2_up | UTF-8 | 499 | 2.734375 | 3 | [] | no_license | #!/bin/bash
PG_IMG=pinnokio/postgresql:base
PIPE=helper_scripts/pipework
sudo $PIPE bush2 $(docker run -d -expose 22 -expose 5432 -e ROLE=MASTER2 $PG_IMG) 192.168.20.20/24
echo "Master of AUTH group started"
sleep 30
sudo $PIPE bush2 $(docker run -d -expose 22 -expose 5432 -e ROLE=SLAVE2 $PG_IMG) 192.168.20.21/24
echo "Slave 1 of AUTH group started"
sleep 60
sudo $PIPE bush2 $(docker run -d -expose 22 -expose 5432 -e ROLE=SLAVE2 $PG_IMG) 192.168.20.22/24
echo "Slave 2 of AUTH group started"
| true |
5e8bfe594b4b8dfde4a7eaa41db1ef17c605e2e1 | Shell | bttf/snes_dev | /snes/asm/datagen | UTF-8 | 613 | 2.953125 | 3 | [] | no_license | #!/usr/bin/rc
cat <<!
typedef struct {
char *mnemonic;
short addr_m[22];
} Instruction;
#define ADDR_M_COUNT 22
!
awk -F' ' '
BEGIN { print "enum" }
{
if (NR==1) printf("{ "); else printf(", ")
printf("%4s_ // %2d | %7s | %s\n",tolower($1),NR-1,$2,$3) }
END { print "};" }
' <modes
echo;
awk '
BEGIN {
print "#define MNEMONIC_COUNT 99"
print "Instruction Set[] = {" }
{
printf("{\"%s\",{", $1)
for (i=2; i<=NF; i++) {
if (i==NF) fmt="%2s}"; else fmt="%s,"
if ($i=="--") { printf(fmt," -1 ") }
else { printf("0x"); printf(fmt,$i) }}
printf("},\n") }
END { print "};" }
' <instr
| true |
2ed9e110bfcd5fa17cbe2fd97e4d5e5672226675 | Shell | mortaromarcello/a10-rescue-scripts | /init-display.sh | UTF-8 | 248 | 2.828125 | 3 | [] | no_license | #!/bin/sh
if [ ! -f /mnt/sysconfig/system.bin ]; then
echo "system.bin has not yet been extracted, exiting"
exit 1
fi
# Options for overriding the default output display
# VGA
#/sbin/a10_display vga mode 2
# TV
#/sbin/a10_display tv mode 0
| true |
c93c92df98d87129dcb94cc283aeecb8c806925b | Shell | leimao/Chicago_Community_Crime_App | /backend/hdfs/getWeather_cloud.sh | UTF-8 | 472 | 3.8125 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
# Pass target data directory as argument
if [ -z "$1" ]
then
dataDir="../data/weatherData"
else
dataDir="$1"
fi
mkdir -p $dataDir
# cd ./data/weatherData
# Change year here to add more data
# 2001 is an appropriate starting year
year=2001
while [ $year -le 2017 ]
do
wget ftp://ftp.ncdc.noaa.gov/pub/data/gsod/$year/gsod_$year.tar -P $dataDir
(( year++ ))
done
for f in $dataDir/*.tar;
do
tar xf $f -C $dataDir
rm $f
done
| true |
ce49f6a7ccb625d1a3c33bffa3f5473778079ab6 | Shell | skraynev/murano-docker-suite | /Kubernetes/KubernetesCluster/package/Resources/scripts/member-etcd-setup.sh | UTF-8 | 1,265 | 3.640625 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
# $1 - NAME
# $2 - IP
# $3 - ETCD_INITIAL_CLUSTER
mkdir /var/lib/etcd
if [[ $(which systemctl) ]]; then
systemctl stop etcd
sed -i.bak "s/%%NAME%%/$1/g" environ/etcd
sed -i.bak "s/%%IP%%/$2/g" environ/etcd
sed -i.bak "s/%%STATE%%/existing/g" environ/etcd
sed -i.bak "s#%%CLUSTER_CONFIG%%#$3#g" environ/etcd
cp -f environ/etcd /etc/default/
cp -f systemd/etcd.service /etc/systemd/system/
systemctl daemon-reload
systemctl enable etcd
systemctl start etcd
else
service etcd stop
sed -i.bak "s/%%NAME%%/$1/g" default_scripts/etcd-member
sed -i.bak "s/%%IP%%/$2/g" default_scripts/etcd-member
sed -i.bak "s#%%CLUSTER_CONFIG%%#$3#g" default_scripts/etcd-member
cp -f default_scripts/etcd-member /etc/default/etcd
cp init_conf/etcd.conf /etc/init/
chmod +x initd_scripts/etcd
cp initd_scripts/etcd /etc/init.d/
service etcd start
fi
#check if cluster works well after member adding
count=30
echo "Registration member $1 in etcd cluster" >> /tmp/etcd.log
while [ $count -gt 0 ]; do
/opt/bin/etcdctl cluster-health >> /tmp/etcd.log
if [ $? -eq 0 ]; then
echo "Member $1 started" >> /tmp/etcd.log
sleep 10
exit 0
fi
((count-- ))
sleep 5
done
echo "Member $1 is not started" >> /tmp/etcd.log
exit 1 | true |
d0ac0d4b0edc6a77d086d123659e58c8ddeb1367 | Shell | bobyla1/arduinosketchbook | /hardwareTests/xbee/prog.out.sh | UTF-8 | 1,404 | 2.953125 | 3 | [] | no_license | #!/bin/bash
port=/dev/ttyUSB0
#line passing
#commands="ATRE,ID3456,MY2,DL1,D25,P02,P12,IU1,IA1,WR" #for output module
#commands="ATRE,ID3456,MY1,DL2,D13,IR14,IT1,WR" #for input module, sends to output
# commands="ATRE,ID3456,MY1,DL2,D02,D12,D23,IRC8,IT1,SM5,SP28,ST1,WR" # low power (sleeping) input module. gas pulse on d2(p18), current on d1(19) and battv on d0(20)
#simple
#commands="ATRE,ID3456,MY1,DL2,SM1,WR" #for remote
commands="ATRE,ID3456,MY2,DL3,IU1,WR" #for nanode
#commands="ATRE,ID3456,MY3,DL2,WR" #for robot
# RE reload defaults
# MY my address
# DL which ID to send to
# ID pan address
# D0 - IO line 0 (0 disabled, 2 adc, 3 di, 4 do low, 5 do high
# D1 - IO line 1
# IR sample rate x ms
# IT samples to buffer
# IU enable/disable sending data out the uart
# IA (to do with line passing)
# SM - sleep mode 4 = cyclic, 5 cyclic with wake
# ST - time before slep x 1ms
# SP - cyclic sleep period x 10ms
echo -n +++ > $port
sleep 4
echo -ne $commands\\r > $port
#for i in $commands ; do
# echo $i
# echo -ne AT$i\\r > $port
# sleep 1
#done
#> /dev/ttyUSB1 ; sleep 2; echo -ne ATIR14\\r > /dev/ttyUSB1
#ATID3456 –> PAN ID
#ATMY2 –> my address 2
#ATDL1 –> destination address 1
#ATP02 –> PWM 0 in PWM mode
#ATD15 –> output 1 in digital out high mode
#ATIU1 –> I/O output enabled
#ATIA1 –> I/O input from address 1
#ATWR –> write settings to firmware
| true |
0b2e554ced244ed73f3f2ea6d529e334c02dbb46 | Shell | menardorama/cert-manager | /hack/update-deps.sh | UTF-8 | 436 | 2.78125 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
SCRIPT_ROOT=$(dirname "${BASH_SOURCE}")
REPO_ROOT="${SCRIPT_ROOT}/.."
pushd "${REPO_ROOT}"
echo "+++ Running dep ensure"
dep ensure -v "$@"
echo "+++ Cleaning up erroneous vendored testdata symlinks"
rm -Rf vendor/github.com/prometheus/procfs/fixtures \
vendor/github.com/hashicorp/go-rootcerts/test-fixtures \
vendor/github.com/json-iterator/go/skip_tests
popd
| true |
8d18f581a88e7bc79f58aaeb0b0a5c720b0adf86 | Shell | MarksVoroncovs/DMI | /shell_relational_operators_commants.sh | UTF-8 | 523 | 3.09375 | 3 | [] | no_license | #!/bin/sh
a=10
b=20
# 2.piemers
echo"Vai skaitlis $a ir vienāds ār skaitli $b?"
if [ $a -eq $b ]
then
echo "$Jā,a -eq $b ir vienādi skaitļš"
else
echo"Nē,$a un $b ir dazadi skaitli"
fi
# 1.piemers - salidzināsana - vai ir vienads (atslēgo -eq)
a=10
b=20
echo"-------------------- 1.pimērs -----------"
echo"Vai skaitlis $a ir vienāds ār skaitli $b"
if [ $a -eq $b ]
then
echo "Jā"
echo "$a -eq $b ir vienādi skaitļš"
else
echo"Nē"
echo"$a un $b ir dazadi skaitli"
fi
echo"----------------------------"
| true |
1a62cc314ee22d2c15e0daa244f31c370757c8a9 | Shell | asoroosh/xDF_Paper18 | /Sim/FPR/AUC/Sub_SenSpc_Alt.sh | UTF-8 | 474 | 2.59375 | 3 | [] | no_license | for t_cnt in `seq 1 4`
do
FileName="FPR_t${t_cnt}_Alt.sh"
cat > $FileName << EOF
#!/bin/bash
#$ -o /home/wmrnaq/ACAnal/Sim/FPR/AUC/logs
#$ -e /home/wmrnaq/ACAnal/Sim/FPR/AUC/logs
#$ -l h_rt=02:00:00
#$ -l h_vmem=6G
#$ -N SenSpc_AUC_${t_cnt}_Alt
#$ -r y
#$ -t 1-500
cd /home/wmrnaq/ACAnal/Sim/FPR/AUC
. /etc/profile
module add matlab
matlab -nodisplay -nosplash -nodesktop -nojvm -r "t_cnt=${t_cnt};SenSpc_Alt"
EOF
qsub $FileName
done
| true |
004bbf9faf4fbdbc35674e0ad37ea2e05acf6026 | Shell | BangJagu/on | /bun | UTF-8 | 21,938 | 3.015625 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
#proveedor=$(curl -s https://www.whoismyisp.org | grep -oP -m1 '(?<=isp">).*(?=</p)')
#if [[ $proveedor == *Amazon* ]]; then
#sudo su
#fi
#if [[ $proveedor == *Microsoft* ]]; then
#sudo su
#fi
if [[ "$USER" != 'root' ]]; then
echo "Este Script Solo Funciona Para Usuarios root"
exit
fi
sistema_operativo=$(cat /etc/os-release)
MYIP=$(wget -qO- ipv4.icanhazip.com);
MYIP2="s/xxxxxxxxx/$MYIP/g";
Plugin_autent='';
if [ -f /usr/lib/x86_64-linux-gnu/openvpn/plugins/openvpn-plugin-auth-pam.so ]; then
Plugin_autent='/usr/lib/x86_64-linux-gnu/openvpn/plugins/openvpn-plugin-auth-pam.so';
else
Plugin_autent='/usr/lib/openvpn/openvpn-plugin-auth-pam.so';
fi
function ubuntu_16(){
echo "INSTALANDO OPENVPN EN UBUNTU 16...\n"
apt-get -y install openvpn easy-rsa openssl iptables > /dev/null 2>&1
cp -r /usr/share/easy-rsa/ /etc/openvpn
mkdir /etc/openvpn/easy-rsa/keys > /dev/null 2>&1
sed -i 's|export KEY_COUNTRY="mx"|export KEY_COUNTRY="mx"|' /etc/openvpn/easy-rsa/vars
sed -i 's|export KEY_PROVINCE="mx"|export KEY_PROVINCE="mx"|' /etc/openvpn/easy-rsa/vars
sed -i 's|export KEY_CITY="mx"|export KEY_CITY="mx"|' /etc/openvpn/easy-rsa/vars
sed -i 's|export KEY_ORG="mx"|export KEY_ORG="mx"|' /etc/openvpn/easy-rsa/vars
sed -i 's|export KEY_EMAIL="@mx"|export KEY_EMAIL="@mx"|' /etc/openvpn/easy-rsa/vars
sed -i 's|export KEY_OU="mx"|export KEY_OU="mx"|' /etc/openvpn/easy-rsa/vars
sed -i 's|export KEY_NAME="cod3err0r"|export KEY_NAME="cod3err0r"|' /etc/openvpn/easy-rsa/vars
sed -i 's|export KEY_OU=cod3err0r|export KEY_OU=cod3err0r|' /etc/openvpn/easy-rsa/vars
# Create Diffie-Helman Pem
openssl dhparam -out /etc/openvpn/dh2048.pem 2048 > /dev/null 2>&1
# Create PKI
cd /etc/openvpn/easy-rsa
. ./vars
./clean-all
export EASY_RSA="${EASY_RSA:-.}"
"$EASY_RSA/pkitool" --initca $*
# Create key server
export EASY_RSA="${EASY_RSA:-.}"
"$EASY_RSA/pkitool" --server server
# Setting KEY CN
export EASY_RSA="${EASY_RSA:-.}"
"$EASY_RSA/pkitool" client
# cp /etc/openvpn/easy-rsa/keys/{server.crt,server.key,ca.crt} /etc/openvpn
cd
cp /etc/openvpn/easy-rsa/keys/server.crt /etc/openvpn/server.crt
cp /etc/openvpn/easy-rsa/keys/server.key /etc/openvpn/server.key
cp /etc/openvpn/easy-rsa/keys/ca.crt /etc/openvpn/ca.crt
# Setting Server
cd /etc/openvpn/
rm server.conf > /dev/null 2>&1
wget "https://github.com/egrojlive/codeerror/raw/master/server.conf" > /dev/null 2>&1
#Create OpenVPN Config
cd
rm client.ovpn > /dev/null 2>&1
wget "https://github.com/egrojlive/codeerror/raw/master/client.ovpn" > /dev/null 2>&1
cp client.ovpn clienttcp.ovpn
sed -i $MYIP2 clienttcp.ovpn;
echo '<ca>' >> clienttcp.ovpn
cat /etc/openvpn/ca.crt >> clienttcp.ovpn
echo '</ca>' >> clienttcp.ovpn
# Restart OpenVPN
/etc/init.d/openvpn restart > /dev/null 2>&1
service openvpn start > /dev/null 2>&1
service openvpn status
# Setting USW
apt-get install ufw -y > /dev/null 2>&1
ufw allow ssh
ufw allow 80/tcp
ufw allow 81/tcp
ufw allow 3128/tcp
ufw allow 8080/tcp
ufw allow 107/tcp
ufw allow 108/tcp
ufw allow 109/tcp
ufw allow 110/tcp
ufw allow 111/tcp
ufw allow 442/tcp
ufw allow 443/tcp
ufw allow 666/tcp
ufw allow 8181/tcp
ufw allow 4444/tcp
sed -i 's|DEFAULT_INPUT_POLICY="DROP"|DEFAULT_INPUT_POLICY="ACCEPT"|' /etc/default/ufw
sed -i 's|DEFAULT_FORWARD_POLICY="DROP"|DEFAULT_FORWARD_POLICY="ACCEPT"|' /etc/default/ufw
cd /etc/ufw/
rm beore.rules > /dev/null 2>&1
wget "https://github.com/egrojlive/codeerror/raw/master/before.rules" > /dev/null 2>&1
cd
DEBIAN_FRONTEND=noninteractive | echo "y" | ufw enable > /dev/null 2>&1
ufw status
#ufw disable
# set ipv4 forward
echo 1 > /proc/sys/net/ipv4/ip_forward
sed -i 's|#net.ipv4.ip_forward=1|net.ipv4.ip_forward=1|' /etc/sysctl.conf
echo "PROCESO TERMINADO CORREACTAMENTE"
}
function ubuntu(){
echo "INSTALANDO OPENVPN EN UBUNTU 16+...\n"
echo "VERIFICANDO REQUERIMIENTOS"
TCP_SERVICE_AND_CONFIG_NAME="openvpn_tcp"
UDP_SERVICE_AND_CONFIG_NAME="openvpn_udp"
###############################################################
if [[ "$USER" != 'root' ]]; then
echo "LO SENTIMOS ESTE SCRIPT SOLO SE PUEDE EJECUTAR COMO ROOT"
exit
fi
###############################################################
if [[ ! -e /dev/net/tun ]]; then
echo "TUN/TAP is not available"
exit
fi
###############################################################
if grep -qs "CentOS release 5" "/etc/redhat-release"; then
echo "CentOS 5 NO ES SOPORTADO"
exit
fi
###############################################################
if [[ -e /etc/debian_version ]]; then
OS=debian
RCLOCAL='/etc/rc.local'
elif [[ -e /etc/centos-release || -e /etc/redhat-release ]]; then
OS=centos
RCLOCAL='/etc/rc.d/rc.local'
# Needed for CentOS 7
chmod +x /etc/rc.d/rc.local
else
echo "ESTE SCRIPT SOLO FUNCIONA EN : Debian Y Ubuntu"
exit
fi
###############################################################
newclienttcp () {
# This function is used to create tcp client .ovpn file
cp /etc/openvpn/clienttcp-common.txt ~/"$1tcp.ovpn"
echo "<ca>" >> ~/"$1tcp.ovpn"
cat /etc/openvpn/easy-rsa/pki/ca.crt >> ~/"$1tcp.ovpn"
echo "</ca>" >> ~/"$1tcp.ovpn"
echo "<cert>" >> ~/"$1tcp.ovpn"
cat /etc/openvpn/easy-rsa/pki/issued/"$1.crt" >> ~/"$1tcp.ovpn"
echo "</cert>" >> ~/"$1tcp.ovpn"
echo "<key>" >> ~/"$1tcp.ovpn"
cat /etc/openvpn/easy-rsa/pki/private/"$1.key" >> ~/"$1tcp.ovpn"
echo "</key>" >> ~/"$1tcp.ovpn"
if [ "$TLS" = "1" ]; then #check if TLS is selected to add a TLS static key
echo "key-direction 1" >> ~/"$1tcp.ovpn"
echo "<tls-auth>" >> ~/"$1tcp.ovpn"
cat /etc/openvpn/easy-rsa/pki/private/ta.key >> ~/"$1tcp.ovpn"
echo "</tls-auth>" >> ~/"$1tcp.ovpn"
fi
if [ $TLSNEW = 1 ]; then
echo "--tls-version-min 1.2" >> ~/"$1.ovpn"
fi
}
###############################################################
function version_gt() {
test "$(echo "$@" | tr " " "\n" | sort -V | head -n 1)" != "$1";
}
###############################################################
IP=$(wget -qO- ipv4.icanhazip.com)
###############################################################
clear
echo "listening to. " $IP
#read -p "IP address: " -e -i $IP IP
echo ""
#read -p "Do you want to run a UDP server [y/n]: " -e -i y UDP
TCP=1
#read -p "Do you want to run a TCP server [y/n]: " -e -i n TCP
###############################################################
clear
#read -p "What UDP port do you want to run OpenVPN on?: " -e -i 1194 PORT
echo "PUERTO DE ESCUCHA : 4444"
PORTTCP=4444
#read -p "What TCP port do you want to run OpenVPN on?: " -e -i 443 PORTTCP
echo "USANDO KEY 2048 BITS"
KEYSIZE=2048
DIGEST=SHA256
#read -p "Digest Size [1-2]: " -e -i 1 DIGEST
###############################################################
AES=0
grep -q aes /proc/cpuinfo #Check for AES-NI availability
if [[ "$?" -eq 0 ]]; then
AES=1
fi
if [[ "$AES" -eq 1 ]]; then
echo "Your CPU supports AES-NI instruction set."
fi
echo "USANDO CIRADO : AES-256-CBC"
CIPHER=AES-256-CBC
echo "USANDO TLS-AUTH"
TLS=1
#read -p "Do you want to use additional TLS authentication [y/n]: " -e -i y TLS
INTERNALNETWORK=1
echo "USANDO DNS 1.1.1.1 - 9.9.9.9"
#read -p "Allow internal networking [y/n]: " -e -i y INTERNALNETWORK
DNSRESOLVER=0
ANTIVIR=0
###############################################################
if [ "$DNSRESOLVER" = 0 ]; then
DNS=1
#read -p "DNS [1-6]: " -e -i 1 DNS
CLIENT='client'
#read -p "Client name: " -e -i client CLIENT
if [[ "$OS" = 'debian' ]]; then
apt-get update -qq -y > /dev/null 2>&1
apt-get install openvpn iptables openssl -y -qq > /dev/null 2>&1
apt-get install build-essential libssl-dev liblzo2-dev libpam0g-dev easy-rsa -y > /dev/null 2>&1
ovpnversion=$(openvpn --status-version | grep -o "([0-9].*)" | sed 's/[^0-9.]//g')
if version_gt $ovpnversion "2.3.3"; then
echo "Your OpenVPN version is $ovpnversion and it supports"
echo "NOTE: Your client also must use version 2.3.3 or newer"
TLSNEW=1
#read -p "Force TLS 1.2 [y/n]: " -e -i n TLSNEW
fi
###############################################################
if [[ -d /etc/openvpn/easy-rsa/ ]]; then
rm -rf /etc/openvpn/easy-rsa/
fi
# Get easy-rsa
wget --no-check-certificate -O ~/EasyRSA-3.0.1.tgz https://github.com/OpenVPN/easy-rsa/releases/download/3.0.1/EasyRSA-3.0.1.tgz > /dev/null 2>&1
tar xzf ~/EasyRSA-3.0.1.tgz -C ~/
mkdir /etc/openvpn
mv ~/EasyRSA-3.0.1/ /etc/openvpn/EasyRSA-3.0.1
mv /etc/openvpn/EasyRSA-3.0.1/ /etc/openvpn/easy-rsa/
chown -R root:root /etc/openvpn/easy-rsa/
rm -rf ~/EasyRSA-3.0.1.tgz
cd /etc/openvpn/easy-rsa/
# Create the PKI, set up the CA, the DH params and the server + client certificates
./easyrsa init-pki
cp vars.example vars
sed -i 's/#set_var EASYRSA_KEY_SIZE 2048/set_var EASYRSA_KEY_SIZE '$KEYSIZE'/' vars
./easyrsa --batch build-ca nopass
./easyrsa gen-dh
./easyrsa build-server-full server nopass
./easyrsa build-client-full "$CLIENT" nopass
./easyrsa gen-crl
openvpn --genkey --secret /etc/openvpn/easy-rsa/pki/private/ta.key
cp pki/ca.crt pki/private/ca.key pki/dh.pem pki/issued/server.crt pki/private/server.key /etc/openvpn
echo "GENERANDO CERTIFICADO"
echo "port $PORTTCP
proto tcp
dev tun
ca ca.crt
cert server.crt
key server.key
dh dh.pem
push \"register-dns\"
topology subnet
server 10.9.0.0 255.255.255.0
ifconfig-pool-persist ipp.txt
cipher AES-256-CBC
user nobody
group nogroup
client-cert-not-required
username-as-common-name
plugin $Plugin_autent login
sndbuf 0
rcvbuf 0
push \"redirect-gateway def1 bypass-dhcp\"
--tls-auth /etc/openvpn/easy-rsa/pki/private/ta.key 0
push \"dhcp-option DNS 1.1.1.1\"
push \"dhcp-option DNS 9.9.9.9\"
keepalive 10 120
comp-lzo
persist-key
persist-tun
status openvpn-status.log
verb 3
crl-verify /etc/openvpn/easy-rsa/pki/crl.pem
client-to-client
" > /etc/openvpn/$TCP_SERVICE_AND_CONFIG_NAME.conf
sed -i 's|#net.ipv4.ip_forward=1|net.ipv4.ip_forward=1|' /etc/sysctl.conf
sed -i " 5 a\echo 1 > /proc/sys/net/ipv4/ip_forward" $RCLOCAL # Added for servers that don't read from sysctl at startup
echo 1 > /proc/sys/net/ipv4/ip_forward
# Set NAT for the VPN subnet
if [ "$INTERNALNETWORK" = 1 ]; then
if [ "$TCP" = 1 ]; then
iptables -t nat -A POSTROUTING -s 10.9.0.0/24 ! -d 10.9.0.0/24 -j SNAT --to $IP
sed -i "1 a\iptables -t nat -A POSTROUTING -s 10.9.0.0/24 ! -d 10.9.0.0/24 -j SNAT --to $IP" $RCLOCAL
fi
else
if [ "$TCP" = 1 ]; then
iptables -t nat -A POSTROUTING -s 10.9.0.0/24 ! -d 10.9.0.1 -j SNAT --to $IP #This line and the next one are added for tcp server instance
sed -i "1 a\iptables -t nat -A POSTROUTING -s 10.9.0.0/24 -j SNAT --to $IP" $RCLOCAL
fi
fi
if iptables -L | grep -q REJECT; then
if [ "$TCP" = 1 ]; then
iptables -I INPUT -p udp --dport $PORTTCP -j ACCEPT #This line and next 5 lines have been added for tcp support
iptables -I FORWARD -s 10.9.0.0/24 -j ACCEPT
iptables -I FORWARD -m state --state RELATED,ESTABLISHED -j ACCEPT
sed -i "1 a\iptables -I INPUT -p tcp --dport $PORTTCP -j ACCEPT" $RCLOCAL
sed -i "1 a\iptables -I FORWARD -s 10.9.0.0/24 -j ACCEPT" $RCLOCAL
sed -i "1 a\iptables -I FORWARD -m state --state RELATED,ESTABLISHED -j ACCEPT" $RCLOCAL
fi
fi
if [ "$TCP" = 1 ]; then
echo "[Unit]
#Created by openvpn-install-advanced (https://github.com/pl48415/openvpn-install-advanced)
Description=OpenVPN Robust And Highly Flexible Tunneling Application On <server>
After=syslog.target network.target
[Service]
Type=forking
PIDFile=/var/run/openvpn/$TCP_SERVICE_AND_CONFIG_NAME.pid
ExecStart=/usr/sbin/openvpn --daemon --writepid /var/run/openvpn/$TCP_SERVICE_AND_CONFIG_NAME.pid --cd /etc/openvpn/ --config $TCP_SERVICE_AND_CONFIG_NAME.conf
[Install]
WantedBy=multi-user.target" > /etc/systemd/system/$TCP_SERVICE_AND_CONFIG_NAME.service
if pgrep systemd-journal; then
sudo systemctl enable $TCP_SERVICE_AND_CONFIG_NAME.service
fi
fi
if pgrep systemd-journal; then
sudo systemctl start openvpn.service
else
if [[ "$OS" = 'debian' ]]; then
/etc/init.d/openvpn start
else
service openvpn start
fi
fi
service openvpn_tcp restart
EXTERNALIP=$(wget -qO- ipv4.icanhazip.com)
if [ "$TCP" = 1 ]; then
echo "client
cipher $CIPHER
auth-user-pass
dev tun
proto tcp
remote $IP $PORTTCP tcp-client
resolv-retry infinite
nobind
persist-key
persist-tun
remote-cert-tls server
comp-lzo
verb 3
sndbuf 0
rcvbuf 0
" > /etc/openvpn/clienttcp-common.txt
newclienttcp "$CLIENT"
fi
if [ "$TCP" = 1 ]; then
echo "Your TCP client config is available at ~/${CLIENT}tcp.ovpn"
fi
fi
if [ "$DNSRESOLVER" = 1 ]; then
service unbound restart
service openvpn_tcp restart
fi
fi
service openvpn_tcp restart
}
function debian(){
echo "INSTALANDO OPENVPN DEBIAN...\n"
echo "VERIFICANDO REQUERIMIENTOS"
TCP_SERVICE_AND_CONFIG_NAME="openvpn_tcp"
UDP_SERVICE_AND_CONFIG_NAME="openvpn_udp"
###############################################################
if [[ "$USER" != 'root' ]]; then
echo "LO SENTIMOS ESTE SCRIPT SOLO SE PUEDE EJECUTAR COMO ROOT"
exit
fi
###############################################################
if [[ ! -e /dev/net/tun ]]; then
echo "TUN/TAP is not available"
exit
fi
###############################################################
if grep -qs "CentOS release 5" "/etc/redhat-release"; then
echo "CentOS 5 NO ES SOPORTADO"
exit
fi
###############################################################
if [[ -e /etc/debian_version ]]; then
OS=debian
RCLOCAL='/etc/rc.local'
elif [[ -e /etc/centos-release || -e /etc/redhat-release ]]; then
OS=centos
RCLOCAL='/etc/rc.d/rc.local'
# Needed for CentOS 7
chmod +x /etc/rc.d/rc.local
else
echo "ESTE SCRIPT SOLO FUNCIONA EN : Debian Y Ubuntu"
exit
fi
###############################################################
newclienttcp () {
# This function is used to create tcp client .ovpn file
cp /etc/openvpn/clienttcp-common.txt ~/"$1tcp.ovpn"
echo "<ca>" >> ~/"$1tcp.ovpn"
cat /etc/openvpn/easy-rsa/pki/ca.crt >> ~/"$1tcp.ovpn"
echo "</ca>" >> ~/"$1tcp.ovpn"
echo "<cert>" >> ~/"$1tcp.ovpn"
cat /etc/openvpn/easy-rsa/pki/issued/"$1.crt" >> ~/"$1tcp.ovpn"
echo "</cert>" >> ~/"$1tcp.ovpn"
echo "<key>" >> ~/"$1tcp.ovpn"
cat /etc/openvpn/easy-rsa/pki/private/"$1.key" >> ~/"$1tcp.ovpn"
echo "</key>" >> ~/"$1tcp.ovpn"
if [ "$TLS" = "1" ]; then #check if TLS is selected to add a TLS static key
echo "key-direction 1" >> ~/"$1tcp.ovpn"
echo "<tls-auth>" >> ~/"$1tcp.ovpn"
cat /etc/openvpn/easy-rsa/pki/private/ta.key >> ~/"$1tcp.ovpn"
echo "</tls-auth>" >> ~/"$1tcp.ovpn"
fi
if [ $TLSNEW = 1 ]; then
echo "--tls-version-min 1.2" >> ~/"$1.ovpn"
fi
}
###############################################################
function version_gt() {
test "$(echo "$@" | tr " " "\n" | sort -V | head -n 1)" != "$1";
}
###############################################################
IP=$(wget -qO- ipv4.icanhazip.com)
###############################################################
clear
echo "listening to. " $IP
#read -p "IP address: " -e -i $IP IP
echo ""
#read -p "Do you want to run a UDP server [y/n]: " -e -i y UDP
TCP=1
#read -p "Do you want to run a TCP server [y/n]: " -e -i n TCP
###############################################################
clear
#read -p "What UDP port do you want to run OpenVPN on?: " -e -i 1194 PORT
echo "PUERTO DE ESCUCHA : 4444"
PORTTCP=4444
#read -p "What TCP port do you want to run OpenVPN on?: " -e -i 443 PORTTCP
echo "USANDO KEY 2048 BITS"
KEYSIZE=2048
DIGEST=SHA256
#read -p "Digest Size [1-2]: " -e -i 1 DIGEST
###############################################################
AES=0
grep -q aes /proc/cpuinfo #Check for AES-NI availability
if [[ "$?" -eq 0 ]]; then
AES=1
fi
if [[ "$AES" -eq 1 ]]; then
echo "Your CPU supports AES-NI instruction set."
fi
echo "USANDO CIRADO : AES-256-CBC"
CIPHER=AES-256-CBC
echo "USANDO TLS-AUTH"
TLS=1
#read -p "Do you want to use additional TLS authentication [y/n]: " -e -i y TLS
INTERNALNETWORK=1
echo "USANDO DNS 1.1.1.1 - 9.9.9.9"
#read -p "Allow internal networking [y/n]: " -e -i y INTERNALNETWORK
DNSRESOLVER=0
ANTIVIR=0
###############################################################
if [ "$DNSRESOLVER" = 0 ]; then
DNS=1
#read -p "DNS [1-6]: " -e -i 1 DNS
CLIENT='client'
#read -p "Client name: " -e -i client CLIENT
if [[ "$OS" = 'debian' ]]; then
apt-get update -qq -y > /dev/null 2>&1
apt-get install openvpn iptables openssl -y -qq > /dev/null 2>&1
apt-get install build-essential libssl-dev liblzo2-dev libpam0g-dev easy-rsa -y > /dev/null 2>&1
ovpnversion=$(openvpn --status-version | grep -o "([0-9].*)" | sed 's/[^0-9.]//g')
if version_gt $ovpnversion "2.3.3"; then
echo "Your OpenVPN version is $ovpnversion and it supports"
echo "NOTE: Your client also must use version 2.3.3 or newer"
TLSNEW=1
#read -p "Force TLS 1.2 [y/n]: " -e -i n TLSNEW
fi
###############################################################
if [[ -d /etc/openvpn/easy-rsa/ ]]; then
rm -rf /etc/openvpn/easy-rsa/
fi
# Get easy-rsa
wget --no-check-certificate -O ~/EasyRSA-3.0.1.tgz https://github.com/OpenVPN/easy-rsa/releases/download/3.0.1/EasyRSA-3.0.1.tgz > /dev/null 2>&1
tar xzf ~/EasyRSA-3.0.1.tgz -C ~/
mkdir /etc/openvpn
mv ~/EasyRSA-3.0.1/ /etc/openvpn/EasyRSA-3.0.1
mv /etc/openvpn/EasyRSA-3.0.1/ /etc/openvpn/easy-rsa/
chown -R root:root /etc/openvpn/easy-rsa/
rm -rf ~/EasyRSA-3.0.1.tgz
cd /etc/openvpn/easy-rsa/
# Create the PKI, set up the CA, the DH params and the server + client certificates
./easyrsa init-pki
cp vars.example vars
sed -i 's/#set_var EASYRSA_KEY_SIZE 2048/set_var EASYRSA_KEY_SIZE '$KEYSIZE'/' vars
./easyrsa --batch build-ca nopass
./easyrsa gen-dh
./easyrsa build-server-full server nopass
./easyrsa build-client-full "$CLIENT" nopass
./easyrsa gen-crl
openvpn --genkey --secret /etc/openvpn/easy-rsa/pki/private/ta.key
cp pki/ca.crt pki/private/ca.key pki/dh.pem pki/issued/server.crt pki/private/server.key /etc/openvpn
echo "GENERANDO CERTIFICADO"
echo "port $PORTTCP
proto tcp
dev tun
ca ca.crt
cert server.crt
key server.key
dh dh.pem
push \"register-dns\"
topology subnet
server 10.9.0.0 255.255.255.0
ifconfig-pool-persist ipp.txt
cipher AES-256-CBC
user nobody
group nogroup
client-cert-not-required
username-as-common-name
plugin $Plugin_autent login
sndbuf 0
rcvbuf 0
push \"redirect-gateway def1 bypass-dhcp\"
--tls-auth /etc/openvpn/easy-rsa/pki/private/ta.key 0
push \"dhcp-option DNS 1.1.1.1\"
push \"dhcp-option DNS 9.9.9.9\"
keepalive 10 120
comp-lzo
persist-key
persist-tun
status openvpn-status.log
verb 3
crl-verify /etc/openvpn/easy-rsa/pki/crl.pem
client-to-client
" > /etc/openvpn/$TCP_SERVICE_AND_CONFIG_NAME.conf
sed -i 's|#net.ipv4.ip_forward=1|net.ipv4.ip_forward=1|' /etc/sysctl.conf
sed -i " 5 a\echo 1 > /proc/sys/net/ipv4/ip_forward" $RCLOCAL # Added for servers that don't read from sysctl at startup
echo 1 > /proc/sys/net/ipv4/ip_forward
# Set NAT for the VPN subnet
if [ "$INTERNALNETWORK" = 1 ]; then
if [ "$TCP" = 1 ]; then
iptables -t nat -A POSTROUTING -s 10.9.0.0/24 ! -d 10.9.0.0/24 -j SNAT --to $IP
sed -i "1 a\iptables -t nat -A POSTROUTING -s 10.9.0.0/24 ! -d 10.9.0.0/24 -j SNAT --to $IP" $RCLOCAL
fi
else
if [ "$TCP" = 1 ]; then
iptables -t nat -A POSTROUTING -s 10.9.0.0/24 ! -d 10.9.0.1 -j SNAT --to $IP #This line and the next one are added for tcp server instance
sed -i "1 a\iptables -t nat -A POSTROUTING -s 10.9.0.0/24 -j SNAT --to $IP" $RCLOCAL
fi
fi
if iptables -L | grep -q REJECT; then
if [ "$TCP" = 1 ]; then
iptables -I INPUT -p udp --dport $PORTTCP -j ACCEPT #This line and next 5 lines have been added for tcp support
iptables -I FORWARD -s 10.9.0.0/24 -j ACCEPT
iptables -I FORWARD -m state --state RELATED,ESTABLISHED -j ACCEPT
sed -i "1 a\iptables -I INPUT -p tcp --dport $PORTTCP -j ACCEPT" $RCLOCAL
sed -i "1 a\iptables -I FORWARD -s 10.9.0.0/24 -j ACCEPT" $RCLOCAL
sed -i "1 a\iptables -I FORWARD -m state --state RELATED,ESTABLISHED -j ACCEPT" $RCLOCAL
fi
fi
if [ "$TCP" = 1 ]; then
echo "[Unit]
#Created by openvpn-install-advanced (https://github.com/pl48415/openvpn-install-advanced)
Description=OpenVPN Robust And Highly Flexible Tunneling Application On <server>
After=syslog.target network.target
[Service]
Type=forking
PIDFile=/var/run/openvpn/$TCP_SERVICE_AND_CONFIG_NAME.pid
ExecStart=/usr/sbin/openvpn --daemon --writepid /var/run/openvpn/$TCP_SERVICE_AND_CONFIG_NAME.pid --cd /etc/openvpn/ --config $TCP_SERVICE_AND_CONFIG_NAME.conf
[Install]
WantedBy=multi-user.target" > /etc/systemd/system/$TCP_SERVICE_AND_CONFIG_NAME.service
if pgrep systemd-journal; then
sudo systemctl enable $TCP_SERVICE_AND_CONFIG_NAME.service
fi
fi
if pgrep systemd-journal; then
sudo systemctl start openvpn.service
else
if [[ "$OS" = 'debian' ]]; then
/etc/init.d/openvpn start
else
service openvpn start
fi
fi
service openvpn_tcp restart
EXTERNALIP=$(wget -qO- ipv4.icanhazip.com)
if [ "$TCP" = 1 ]; then
echo "client
cipher $CIPHER
auth-user-pass
dev tun
proto tcp
remote $IP $PORTTCP tcp-client
resolv-retry infinite
nobind
persist-key
persist-tun
remote-cert-tls server
comp-lzo
verb 3
sndbuf 0
rcvbuf 0
" > /etc/openvpn/clienttcp-common.txt
newclienttcp "$CLIENT"
fi
if [ "$TCP" = 1 ]; then
echo "Your TCP client config is available at ~/${CLIENT}tcp.ovpn"
fi
fi
if [ "$DNSRESOLVER" = 1 ]; then
service unbound restart
service openvpn_tcp restart
fi
fi
service openvpn_tcp restart
}
if cat /etc/*release | grep DISTRIB_DESCRIPTION | grep "Ubuntu 14.04" > /dev/null 2>&1; then
ubuntu_14
elif [[ $sistema_operativo == *Ubuntu* ]]; then
ubuntu
elif [[ $sistema_operativo == *Debian* ]]; then
debian
else
echo "este script no es compatible con este sistema operativo\n" $sistema_operativo
fi
| true |
ceeea20c246f1462dffbb8e530e8bbe79af778ae | Shell | estudeplus/api-gateway | /deploy/deploy.sh | UTF-8 | 707 | 2.796875 | 3 | [
"MIT"
] | permissive | #!/bin/bash
echo "Setting up GCLOUD auth"
gcloud auth activate-service-account --key-file ${TRAVIS_BUILD_DIR}/deploy/gcloud-key.json
gcloud --quiet config set project $PROJECT_ID
gcloud --quiet config set container/cluster $CLUSTER
gcloud --quiet config set compute/zone $ZONE
echo "Getting cluster credentials"
gcloud --quiet container clusters get-credentials $CLUSTER
echo "Authenticating on DockerHub"
echo "$DOCKER_PASSWORD" | docker login -u "$DOCKER_USERNAME" --password-stdin
echo "Pushing new image"
docker push ${ORG_NAME}/${IMAGE_NAME}:$TRAVIS_COMMIT
echo "Setting new image on deployment"
kubectl set image deployment/${DEPLOYMENT} ${CONTAINER}=${ORG_NAME}/${IMAGE_NAME}:$TRAVIS_COMMIT
| true |
1abe7570071625edbace7b9beca22420e28c27be | Shell | ramirofd/so2 | /tp2/run_ntimes.sh | UTF-8 | 129 | 2.6875 | 3 | [] | no_license | #!/bin/bash
max_threads=$1
n=$2
for ((j=1;j<=max_threads;j=j*2));do
for ((i=1;i<=n;i++));do
./main $j
done
done | true |
bf67ead748e92a89bb30fdcc97f0133e12501611 | Shell | N-Medvedev/Slater-Koster-parameters-no-repulsion_v1 | /2elements/hotcent_condition/run_seq.sh | UTF-8 | 629 | 2.96875 | 3 | [] | no_license | #!/bin/bash
chmod +x run.sh
elements1="C H O N P S F Cl Li Na"
elements2="H He Li Be B C N O F Ne Na Mg Al Si P S Cl Ar K Ca Sc Ti V Cr Mn Fe Co Ni Cu Zn Ga Ge As Se Br Kr Rb Sr Y Zr Nb Mo Tc Ru Rh Pd Ag Cd In Sn Sb Te I Xe Cs Ba La Ce Pr Nd Pm Sm Eu Gd Tb Dy Ho Er Tm Yb Lu Hf Ta W Re Os Ir Pt Au Hg Tl Pb Bi"
mkdir results_v1
for elm1 in ${elements1} ;do
cd ./results_v1
mkdir ${elm1}-X
cd ..
for elm2 in ${elements2} ;do
./run.sh ${elm1} ${elm2}
cp ./*_no_repulsion.skf ./results_v1/${elm1}-X/
rm -f -r *_no_repulsion.skf
rm -f -r generate_${elm1}-${elm2}_skf.py
done
done
ls -ltr ./results_v1
| true |
9a86c6d7fdcbe98e5e13f7cc36ded520095d8915 | Shell | HLFH/pkgbuilds | /rs-serve-git/PKGBUILD | UTF-8 | 1,358 | 2.875 | 3 | [
"LicenseRef-scancode-public-domain",
"Unlicense"
] | permissive | # Maintainer: Andy Weidenbaum <archbaum@gmail.com>
pkgname=rs-serve-git
pkgver=20130927
pkgrel=1
pkgdesc="a remotestorage server for POSIX systems"
arch=('i686' 'x86_64')
depends=('nodejs')
makedepends=('cmake' 'gcc' 'git' 'make' 'nodejs-node-gyp' 'pkg-config')
url="https://github.com/remotestorage/rs-serve"
license=('AGPL3')
source=(git+https://github.com/remotestorage/rs-serve
Makefile.patch)
sha256sums=('SKIP'
'4d7d36b950b8a4a938d4fd97ef8991bc2f377f0526845be7999b6259016b26c2')
provides=('rs-serve')
conflicts=('rs-serve')
pkgver() {
cd ${pkgname%-git}
git log -1 --format="%cd" --date=short | sed "s|-||g"
}
prepare() {
cd ${pkgname%-git}
msg 'Patching Makefile...'
patch -p1 < ${srcdir}/Makefile.patch
}
build() {
cd ${pkgname%-git}
export PYTHON=python2
msg 'Building bindings...'
make bindings # Authorization server backend
msg 'Building...'
make
}
package() {
cd ${pkgname%-git}
mkdir -p "$pkgdir"/usr/bin
mkdir -p "$pkgdir"/var/lib/rs-serve/authorizations
mkdir -p "$pkgdir"/etc/init.d/rs-serve
mkdir -p "$pkgdir"/etc/default/rs-serve
msg 'Installing...'
make DESTDIR="$pkgdir" install
msg 'Cleaning up pkgdir...'
find "$pkgdir" -type d -name .git -exec rm -r '{}' +
find "$pkgdir" -type f -name .gitignore -exec rm -r '{}' +
find "$pkgdir" -type f -name .gitmodules -exec rm -r '{}' +
}
| true |
4fc117b5a490404838f587208c565d55fd69f2a8 | Shell | Gogistics/prjNodeRF | /infra/scripts/spin_up_app.sh | UTF-8 | 1,001 | 3.59375 | 4 | [] | no_license | #!/usr/local/bin/bash
# Author:
# Alan Tai
# Program:
# Spin up the fan systems
# Date:
# 08/02/2019
set -e
# export variables
finish() {
local existcode=$?
cd $CWD
exit $existcode
}
trap "finish" INT TERM
while :; do
case $1 in
-d|--docker)
echo "Spin up Docker container for running the application"
# build base img
docker build -t alantai/node_app_ef:0.0.0 \
-f ./infra/Dockerfiles/Dockerfile.development.node .
# run docker conatiner
docker run -it --rm --name node_app_ef \
--log-opt mode=non-blocking \
--log-opt max-buffer-size=4m \
--log-opt max-size=100m \
--log-opt max-file=5 \
alantai/node_app_ef:0.0.0 \
/bin/sh
;;
-l|--local)
echo "Install npm modules and then run the application"
# install npm modules
npm i --save request-promise \
request \
readline \
moment && \
node app.js
;;
*) break
esac
shift
done
| true |
d407a00b1e7316424e84d6f6c364d8646dad9258 | Shell | silky/HymHub | /species/Am11/data.sh | UTF-8 | 918 | 3.140625 | 3 | [
"CC-BY-4.0"
] | permissive | #!/usr/bin/env bash
set -eo pipefail
# Contributed 2015
# Daniel Standage <daniel.standage@gmail.com>
# Configuration
#-------------------------------------------------------------------------------
FULLSPEC="Apis mellifera OGS 1.1"
SPEC=Am11
URLGENUS="beebase"
ORIGFASTA=Amel_2.0_scaffolds.fa.gz
ORIGGFF3=amel_OGSv1.1.gff.gz
# Procedure
#-------------------------------------------------------------------------------
source src/data-cli.sh
source src/filenames.sh
if [ "$DODOWNLOAD" != "0" ]; then
source src/hymbase-download.sh
hymbase_download
fi
if [ "$DOFORMAT" != "0" ]; then
source src/hymbase-format.sh
hymbase_format_gtf 'GB30545|GB30541|GB30085'
fi
if [ "$DODATATYPES" != "0" ]; then
source src/datatypes.sh
get_datatypes $SPEC
fi
if [ "$DOSTATS" != "0" ]; then
source src/stats.sh
get_stats $SPEC
fi
if [ "$DOCLEANUP" != "0" ]; then
source src/cleanup.sh
data_cleanup
fi
echo "[HymHub: $FULLSPEC] complete!"
| true |
8eebe40b86a1150c30cef8fa8f19beb7f40bccdc | Shell | jrnp97/customer | /entrypoint.sh | UTF-8 | 556 | 3.171875 | 3 | [] | no_license | #!/usr/bin/env bash
WORK_DIR=/src/
if python $WORK_DIR/manage.py test; then
echo 'Test Passed!!'
else
echo 'Tests Failed'
exit 1
fi
if python $WORK_DIR/manage.py migrate --no-input; then
echo 'Migrations executed!!'
else
echo 'Error migration'
exit 1
fi
echo 'Filling database with customer data....'
if python $WORK_DIR/manage.py fill_customer_data $WORK_DIR/data/customers.csv; then
echo 'Customer data on database!!'
else
echo 'Error importing customer data on database :('
exit 1
fi
python $WORK_DIR/manage.py runserver 0.0.0.0:8000
| true |
42b6b6439a4549847c7f89a04f458425bcae3a23 | Shell | NeoResearch/neo-tests | /docker-build-neo-cli/building_scripts/2x/buildAllList_Plugins_2x.sh | UTF-8 | 270 | 3.3125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
source /opt/env-repositories.sh
for plugin in $PLUGINS_LIST
do
echo "Going to build plugin $plugin"
/opt/build_plugin_2x.sh --plugin-name $plugin
res=$?
if [ $res = 1 ]; then
echo "Going to exit because last return is $res"
exit 1
fi
done
| true |
d181fc18abd3f0d808b53b5b02c8d29ec0e07c8c | Shell | parsa/ReadyBuildScripts | /m4-1.4.17.sh | UTF-8 | 292 | 3.53125 | 4 | [] | no_license | #/bin/bash
if [ ! -d $LOCAL_PATH ]; then
echo "Install directory doesn't exist"
exit 1
fi
mkdir -p m4/{build,tmp}
pushd m4
curl -O http://ftp.gnu.org/gnu/m4/m4-1.4.17.tar.xz
tar xf m4-1.4.17.tar.xz
pushd tmp
../m4-1.4.17/configure --prefix=$LOCAL_PATH
make $LOCAL_MAKEFLAGS install
| true |
0b97c663f856d87c9fe96a2b1a9188522b564b46 | Shell | rsling/cow | /src/en/slurm/en16-ner-marmot-malt.sh | UTF-8 | 1,043 | 3.046875 | 3 | [
"BSD-2-Clause"
] | permissive | #!/bin/bash
# FOR ARRAY JOB!!!
# Pass: dir (absolut corpus root, like "encow16"), LIST FILE, offset in file list
#SBATCH --mem=18000M
#SBATCH --time=00:50:00
set -e
set -u
# Create true in file name from
input="`cat ${2} | tail -n +${3} | sed -n ${SLURM_ARRAY_TASK_ID},${SLURM_ARRAY_TASK_ID}p`"
inf="${1}/05divide/${input}"
odn_ner="${1}06ner/$(dirname ${input})"
odn_marmot="${1}07marmot/$(dirname ${input})"
odn_malt="${1}08malt/$(dirname ${input})"
ofn_ner="${1}/06ner/${input}"
ofn_marmot="${1}/07marmot/$(echo ${input} | sed 's/\.gz$//')"
ofn_malt="${1}/08malt/$(echo ${input} | sed 's/\.gz$//')"
mkdir -p ${odn_ner}
\rm -f ${ofn_ner}
mkdir -p ${odn_marmot}
\rm -f ${ofn_marmot}
\rm -f "${ofn_marmot}.gz"
mkdir -p ${odn_malt}
\rm -f ${ofn_malt}
\rm -f "${ofn_malt}.gz"
cow16-ner ${inf} ${ofn_ner} ${NER_EN}
cow16-marmot ${inf} ${ofn_marmot} en.marmot
python /home/rsling/usr/local/cow/src/en/cow16-multextconv-en.py ${ofn_marmot} "${ofn_marmot}.gz"
\rm -f ${ofn_marmot}
cow16-malt-en ${inf} ${ofn_malt}
gzip ${ofn_malt}
| true |
75909439404c2c1f96a50e0b65d1219c9324158c | Shell | chnhkk/dotfiles-blocky | /config/polybar/scripts/zscroller.sh | UTF-8 | 374 | 2.828125 | 3 | [] | no_license | #!/bin/sh
if ! mpc >/dev/null 2>&1; then
echo Server offline
exit 1
elif mpc status | grep -qE 'playing|paused'; then
#( mpc current | zscroll -l 24 -d 0.4 -n true) &
zscroll -n true -d 0.6 -l 20 -p " " -b " " -u true "mpc current" &
else
echo Not playing
fi
# Block until an event is emitted
mpc idle >/dev/null
| true |
bc09ba8a2b697c32176f3ef467d0124ad75f4b30 | Shell | prasadpalkar94/SnakeAndLadder | /snakeandladder.sh | UTF-8 | 962 | 3.625 | 4 | [] | no_license | #!/bin/bash -x
echo "------------------Welcome To SnakeAndLadder-----------------------"
PLAYER_START_POS=0
WIN_POS=100
NO_PLAY=1
LADDER=2
SNAKE=3
checkvalue=1
pos=$PLAYER_START_POS
function roll(){
randomCheck=$((RANDOM%6 + 1))
echo $randomCheck
((totalrandomCheck++))
}
function checkOptions(){
options=$((RANDOM%3 + 1))
case $options in
$NO_PLAY)pos=$pos ;;
$LADDER)pos=$(( $pos + $randomCheck ))
if [ $pos -gt $WIN_POS ]
then
pos=$(( $pos - $randomCheck ))
fi
;;
$SNAKE)pos=$(( $pos - $randomCheck ))
if [ $pos -lt $PLAYER_START_POS ]
then
pos=$PLAYER_START_POS
fi
;;
esac
echo $pos
}
function playerTurn()
{
if [[ $checkvalue -eq 1 ]]
then
checkvalue=2
else
checkvalue=1
fi
}
while [ $pos -ne $WIN_POS ]
do
roll
checkOptions
playerTurn
done
echo "Number Of Time randomCheck Tossed: "$totalrandomCheck
echo Player $checkvalue won
| true |
098f82acba10a7b1cfff0f4d44d48bd6adb454c6 | Shell | eighttails/ProgramListOCR | /setup/MSYS2Private/qt5-static-angle/qt.sh | UTF-8 | 4,859 | 3.1875 | 3 | [
"Apache-2.0",
"GPL-3.0-only"
] | permissive | #!/bin/bash
function prerequisite(){
#必要ライブラリ
pacman "${PACMAN_INSTALL_OPTS[@]}" \
$MINGW_PACKAGE_PREFIX-ntldd \
$MINGW_PACKAGE_PREFIX-clang \
$MINGW_PACKAGE_PREFIX-clang-tools-extra \
$MINGW_PACKAGE_PREFIX-SDL2 \
$MINGW_PACKAGE_PREFIX-dbus \
$MINGW_PACKAGE_PREFIX-openssl \
2> /dev/null
exitOnError
mkdir -p $PREFIX/bin 2> /dev/null
mkdir -p $QT5_STATIC_PREFIX/bin 2> /dev/null
pushd $MINGW_PREFIX/bin
cp -f $NEEDED_DLLS $QT5_STATIC_PREFIX/bin
popd
}
function makeQtSourceTree(){
#Qt
QT_MAJOR_VERSION=5.15
QT_MINOR_VERSION=.2
QT_VERSION=$QT_MAJOR_VERSION$QT_MINOR_VERSION
QT_ARCHIVE_DIR=qt-everywhere-src-$QT_VERSION
QT_ARCHIVE=$QT_ARCHIVE_DIR.tar.xz
QT_SOURCE_DIR=qt5-src-$1
#QT_RELEASE=development_releases
QT_RELEASE=official_releases
if [ -e $QT_SOURCE_DIR ]; then
# 存在する場合
echo "$QT_SOURCE_DIR already exists."
else
# 存在しない場合
if [ ! -e $QT_ARCHIVE ]; then
wget -c http://download.qt.io/$QT_RELEASE/qt/$QT_MAJOR_VERSION/$QT_VERSION/single/$QT_ARCHIVE
fi
tar xf $QT_ARCHIVE
mv $QT_ARCHIVE_DIR $QT_SOURCE_DIR
pushd $QT_SOURCE_DIR
#qdocのビルドが通らないので暫定パッチ
if [ "$1" == "static" ]; then
patch -p1 -i $SCRIPT_DIR/0302-ugly-hack-disable-qdoc-build.patch
fi
#MSYSで引数のパス変換が勝手に走ってビルドが通らない問題への対策パッチ
sed -i -e "s|load(qt_tool)|msysargconv.name = MSYS2_ARG_CONV_EXCL\nmsysargconv.value = *\nQT_TOOL_ENV += msysargconv\nload(qt_tool)|" qtdeclarative/src/qmltyperegistrar/qmltyperegistrar.pro
#64bit環境で生成されるオブジェクトファイルが巨大すぎでビルドが通らない問題へのパッチ
sed -i -e "s|QMAKE_CFLAGS = |QMAKE_CFLAGS = -Wa,-mbig-obj |g" qtbase/mkspecs/win32-g++/qmake.conf
#gcc11対応パッチ
GCC_CXXFLAGS="-include $(cygpath -am $MINGW_PREFIX/include/c++/*/limits)"
sed -i -e "s|QMAKE_CXXFLAGS += |QMAKE_CXXFLAGS += $GCC_CXXFLAGS |g" qtbase/mkspecs/win32-g++/qmake.conf
popd
fi
#共通ビルドオプション
QT_COMMON_CONF_OPTS=()
QT_COMMON_CONF_OPTS+=("-opensource")
QT_COMMON_CONF_OPTS+=("-confirm-license")
QT_COMMON_CONF_OPTS+=("-silent")
QT_COMMON_CONF_OPTS+=("-platform" "win32-g++")
QT_COMMON_CONF_OPTS+=("-optimize-size")
QT_COMMON_CONF_OPTS+=("-pkg-config")
QT_COMMON_CONF_OPTS+=("QMAKE_CXXFLAGS+=-Wno-deprecated-declarations")
QT_COMMON_CONF_OPTS+=("-no-direct2d")
QT_COMMON_CONF_OPTS+=("-no-wmf")
QT_COMMON_CONF_OPTS+=("-no-mng")
QT_COMMON_CONF_OPTS+=("-no-fontconfig")
QT_COMMON_CONF_OPTS+=("-qt-zlib")
QT_COMMON_CONF_OPTS+=("-qt-libjpeg")
QT_COMMON_CONF_OPTS+=("-qt-libpng")
QT_COMMON_CONF_OPTS+=("-qt-tiff")
QT_COMMON_CONF_OPTS+=("-no-jasper")
QT_COMMON_CONF_OPTS+=("-qt-webp")
QT_COMMON_CONF_OPTS+=("-qt-freetype")
QT_COMMON_CONF_OPTS+=("-qt-pcre")
QT_COMMON_CONF_OPTS+=("-qt-harfbuzz")
QT_COMMON_CONF_OPTS+=("-nomake" "tests")
QT_COMMON_CONF_OPTS+=("-no-feature-openal")
QT_COMMON_CONF_OPTS+=("-no-feature-d3d12")
}
function buildQtStatic(){
if [ -e $QT5_STATIC_PREFIX/bin/qmake.exe -a $((FORCE_INSTALL)) == 0 ]; then
echo "Qt5 Static Libs are already installed."
return 0
fi
#Qtのソースコードを展開
makeQtSourceTree static
exitOnError
#static版
QT5_STATIC_BUILD=qt5-static-$BIT
rm -rf $QT5_STATIC_BUILD
mkdir $QT5_STATIC_BUILD
pushd $QT5_STATIC_BUILD
QT_STATIC_CONF_OPTS=()
# QT_STATIC_CONF_OPTS+=("-verbose")
QT_STATIC_CONF_OPTS+=("-prefix" "$(cygpath -am $QT5_STATIC_PREFIX)")
QT_STATIC_CONF_OPTS+=("-angle")
QT_STATIC_CONF_OPTS+=("-static")
QT_STATIC_CONF_OPTS+=("-static-runtime")
QT_STATIC_CONF_OPTS+=("-nomake" "examples")
QT_STATIC_CONF_OPTS+=("-D" "JAS_DLL=0")
QT_STATIC_CONF_OPTS+=("-openssl-linked")
QT_STATIC_CONF_OPTS+=("-no-dbus")
export QDOC_SKIP_BUILD=1
export QDOC_USE_STATIC_LIBCLANG=1
OPENSSL_LIBS="$(pkg-config --static --libs openssl)" \
../$QT_SOURCE_DIR/configure "${QT_COMMON_CONF_OPTS[@]}" "${QT_STATIC_CONF_OPTS[@]}" &> ../qt5-static-$BIT-config.status
exitOnError
makeParallel && make install
exitOnError
popd
unset QDOC_SKIP_BUILD
unset QDOC_USE_STATIC_LIBCLANG
rm -rf $QT5_STATIC_BUILD
}
#----------------------------------------------------
SCRIPT_DIR=$(dirname $(readlink -f ${BASH_SOURCE:-$0}))
source $SCRIPT_DIR/../common/common.sh
commonSetup
#必要ライブラリ
prerequisite
#ANGLEをビルドするために必要なfxc.exeにパスを通す
export WindowsSdkVerBinPath=$(cygpath -am "C:/Program Files (x86)/Windows Kits/10/bin/10.0.22000.0")
export PATH=$(cygpath "$WindowsSdkVerBinPath/$ARCH"):$PATH
export PKG_CONFIG="$(cygpath -am $MINGW_PREFIX/bin/pkg-config.exe)"
export LLVM_INSTALL_DIR=$(cygpath -am $MINGW_PREFIX)
#Qtのインストール場所
QT5_STATIC_PREFIX=$PREFIX/qt5-static-angle
cd $EXTLIB
#static版Qtをビルド
buildQtStatic
exitOnError
| true |
d9e02cb9c221c80ec39926b4f87a9fb03b6eeaa4 | Shell | dffischer/gnome-shell-extensions | /makepkg-templates/adjust-version-1.template | UTF-8 | 512 | 3.109375 | 3 | [] | no_license | prepare() {
# adjust for shell versions that are not officially supported.
local min=$(echo ${depends[@]} | grep -Po '(?<=gnome-shell>=3\.)[[:digit:]]+')
local max=$(echo ${depends[@]} | grep -Po '(?<=gnome-shell<3\.)[[:digit:]]+')
if [ -z "$max" ]
then max=$(
# template input; name=gnome-shell-version
); fi
find -name 'metadata.json' -exec sed -i 'H;1h;$!d;x;
s/"shell-version": \[.*\]/"shell-version": [ '"$(seq -s ', ' -f '"3.%g"' $min 2 $max)"' ]/' \
'{}' +
}
# vim: filetype=sh
| true |
b107f2ed706cf328a2a1d1558405698cc9055ba4 | Shell | yongmingcode/yl-csdn | /linux_start_stop.sh | UTF-8 | 1,836 | 4.15625 | 4 | [] | no_license |
packageName="yl-csdn" #包名 xx.jar
baseDirPath="/data/yl_csdn" #包名 xx.jar
#检测pid
function getPid()
{
echo "检测状态---------------------------------------------"
pid=`ps -ef | grep -n ${packageName} | grep -v grep | awk '{print $2}'`
if [ ${pid} ]
then
echo "运行pid:${pid}"
else
echo "未运行"
fi
}
#启动程序
function start()
{
#启动前,先停止之前的
stop
if [ ${pid} ]
then
echo "停止程序失败,无法启动"
else
echo "启动程序---------------------------------------------"
nohup java -jar ${baseDirPath}/${packageName}.jar >/dev/null 2>&1 &
#查询是否有启动进程
getPid
if [ ${pid} ]
then
echo "已启动"
else
echo "启动失败"
fi
fi
}
#停止程序
function stop()
{
getPid
if [ ${pid} ]
then
echo "停止程序---------------------------------------------"
kill -9 ${pid}
getPid
if [ ${pid} ]
then
#stop
echo "停止失败"
else
echo "停止成功"
fi
fi
}
#启动时带参数,根据参数执行
if [ ${#} -ge 1 ]
then
case ${1} in
"start")
start
;;
"restart")
start
;;
"stop")
stop
;;
*)
echo "${1}无任何操作 注:项目jar包需放在/data/yl_csdn目录下执行,或者修改该脚本的baseDirPath"
;;
esac
else
echo "
command如下命令:
start:启动
stop:停止进程
restart:重启
示例命令如:./run.sh start
注:项目jar包需放在/data/yl_csdn目录下执行,或者修改该脚本的baseDirPath
"
fi | true |
e66b3091d5bbbc55c4d547f0064f03dacc05da3b | Shell | jose-lpa/dotfiles | /default/bash_profile | UTF-8 | 2,766 | 3.03125 | 3 | [] | no_license | # Colorize the prompt.
if tty -s; then
yellow=$(tput setaf 3)
green=$(tput setaf 2)
blue=$(tput setaf 104)
bold=$(tput bold)
reset=$(tput sgr0)
fi
PS1="\[$yellow\]\u\[$reset\]@\[$green\]\h\[$reset\]:\[$blue$bold\]\w\[$reset\]\$ "
# Don't put duplicate lines or lines starting with space in the history.
# See bash(1) for more options.
export HISTCONTROL=ignoreboth
# Append to the history file, don't overwrite it.
shopt -s histappend
# History size up to 2000 commands.
export HISTSIZE=2000
# History format is better with timestamps.
export HISTTIMEFORMAT="%d/%m/%y %T "
# Make less more friendly for non-text input files, see lesspipe(1).
[ -x /usr/bin/lesspipe ] && eval "$(SHELL=/bin/sh lesspipe)"
# Enable color support of ls and also add handy aliases.
export CLICOLOR=1
export LSCOLORS=ExFxCxDxBxegedabagacad
if [ -x /usr/bin/dircolors ]; then
test -r ~/.dircolors && eval "$(dircolors -b ~/.dircolors)" || eval "$(dircolors -b)"
alias ls='ls --color=auto'
alias grep='grep --color=auto'
alias fgrep='fgrep --color=auto'
alias egrep='egrep --color=auto'
fi
# Enable programmable completion features (you don't need to enable
# this, if it's already enabled in /etc/bash.bashrc and /etc/profile
# sources /etc/bash.bashrc).
if [ -f /etc/bash_completion ] && ! shopt -oq posix; then
. /etc/bash_completion
fi
# Check for rbenv.
if command -v rbenv > /dev/null 2>&1; then eval "$(rbenv init -)"; fi
# Set your favorite editor here.
VISUAL=vim; export VISUAL
EDITOR=vim; export EDITOR
# GOPATH configuration.
export PATH=$PATH:/usr/local/opt/go/libexec/bin
# UTF-8 terminal support.
export LC_CTYPE=en_US.UTF-8
export LC_ALL=en_US.UTF-8
export LANG=en_US.UTF-8
# Point to pyenv repo.
if [ -d $HOME/.pyenv ]; then
export PYENV_ROOT="$HOME/.pyenv"
# Add path for `pyenv` command line utility.
export PATH="$PYENV_ROOT/bin:$PATH"
fi
# Check for pyenv.
if which pyenv > /dev/null; then eval "$(pyenv init -)"; fi
# Virtualenvwrapper artifacts.
if [ -f $HOME/.pyenv/plugins/pyenv-virtualenvwrapper/bin/pyenv-virtualenvwrapper ]; then
pyenv virtualenvwrapper
elif [ -f /usr/local/share/python/virtualenvwrapper.sh ]; then
source /usr/local/share/python/virtualenvwrapper.sh
elif [ -f /usr/local/bin/virtualenvwrapper.sh ]; then
source /usr/local/bin/virtualenvwrapper.sh
fi
export VIRTUALENVWRAPPER_PYTHON=`which python`
export VIRTUALENVWRAPPER_VIRTUALENV=`which virtualenv`
export WORKON_HOME=$HOME/.virtualenvs
export VIRTUALENVWRAPPER_VIRTUALENV_ARGS='--no-site-packages'
if [ -f /usr/local/bin/pyenv-virtualenvwrapper ]; then
pyenv virtualenvwrapper
fi
# Poetry - Python dependencies management.
if [ -d $HOME/.poetry ]; then
export PATH="$HOME/.poetry/bin:$PATH"
fi
| true |
295b19a4a02306e35f1af7edc6559e80643e4a04 | Shell | DiamondBond/bin | /pamute | UTF-8 | 728 | 3.6875 | 4 | [] | no_license | #!/bin/bash
operation="list"
list() {
for i in $(pactl list sink-inputs short | awk '{ print $3 }'); do
client_name=$(pactl list clients short | grep -E "^$i[^0-9]" | awk '{ print $3 }')
echo "$i $client_name "
done
}
mute() {
client_id_target=$(list | grep -E "(^| )$client_target " | cut -d ' ' -f 1)
[ -z "$client_id_target" ] && exit 1
pactl set-sink-input-mute $(pactl list sink-inputs short | grep -E "[^0-9]$client_id_target[^0-9]" | awk '{ print $1 }') toggle
}
main() {
case "$operation" in
"mute")
mute
exit 0
;;
"list")
list
exit 0
;;
*)
exit 0
;;
esac
}
while [ $# -gt 0 ]; do
case "$1" in
*)
operation="mute"
client_target="$1"
;;
esac
shift
done
main
| true |
c6a9dc231faab3c9edd17cec9df1c622372affae | Shell | WeAreChampion/notes | /shell/common/sort.sh | UTF-8 | 1,095 | 3.203125 | 3 | [
"Apache-2.0"
] | permissive | # sort [-fbMnrtuk] [file or stdin]
# 选项与参数:
# -f :忽略大小写的差异,例如 A 与 a 视为编码相同;
# -b :忽略最前面的空格符部分;
# -M :以月份的名字来排序,例如 JAN, DEC 等等的排序方法;
# -n :使用『纯数字』进行排序(默认是以文字型态来排序的);
# -r :反向排序;
# -u :就是 uniq ,相同的数据中,仅出现一行代表;
# -t :分隔符,默认是用 [tab] 键来分隔;
# -k :以那个区间 (field) 来进行排序的意思
# sort file
sort file
# 内容是以 : 来分隔的,以第三栏来排序
cat file | sort -t ':' -k 3
# 默认是以字符串来排序的,如果想要使用数字排序
cat file | sort -t ':' -k 3n
# 默认是升序排序,如果要倒序排序
cat file | sort -t ':' -k 3nr
# 先以第六个域的第2个字符到第4个字符进行正向排序,再基于第一个域进行反向排序
cat file | sort -t':' -k 6.2,6.4 -k 1r
# 查看有多少个shell:对文件的第七个域进行排序,然后去重
cat file | sort -t':' -k 7 -u
| true |
0014ccb625fa87e6f3ca3f58b656287a170d2ee2 | Shell | markoangelovski/markMe | /commit-deploy.sh | UTF-8 | 371 | 2.8125 | 3 | [] | no_license | #!/bin/bash
echo "Running git add ."
git add .
echo "Running git commit..."
read -p "Enter commit description: " description
echo "Pushing to Azure DevOps remote repository..."
git commit -m "$description"
echo "Pushing to Azure DevOps remote repository..."
git push -u azure-devops master
echo "Pushing to Github remote repository..."
git push -u github-prod master | true |
cf7b4a06a60652608a90903788074807cb3f9c20 | Shell | raychorn/svn_Cargo_Chief | /trunk/@ubuntu/etc/munin/plugins/vmstat | UTF-8 | 1,115 | 3.59375 | 4 | [
"CC0-1.0"
] | permissive | #!/bin/sh
# -*- sh -*-
: << =cut
=head1 NAME
vmstat - Plugin to monitor the number of processes in io-sleep and
other wait states.
=head1 CONFIGURATION
No configuration
=head1 NOTES
Uses the command "vmstat"
=head1 AUTHOR
Unknown author
=head1 LICENSE
Unknown license
=head1 MAGIC MARKERS
#%# family=auto
#%# capabilities=autoconf
=cut
. $MUNIN_LIBDIR/plugins/plugin.sh
if [ "$1" = "autoconf" ]; then
if ( vmstat 1 1 >/dev/null 2>&1 ); then
echo yes
exit 0
else
if [ $? -eq 127 ]; then
echo "no (could not run \"vmstat\")"
exit 0
else
echo no
exit 0
fi
fi
fi
if [ "$1" = "config" ]; then
echo 'graph_title VMstat'
echo 'graph_args --base 1000 -l 0'
echo 'graph_vlabel process states'
echo 'graph_category processes'
echo 'wait.label running'
echo 'wait.type GAUGE'
echo 'wait.max 500000'
echo 'sleep.label I/O sleep'
echo 'sleep.type GAUGE'
echo 'sleep.max 500000'
print_warning wait
print_warning sleep
print_critical wait
print_critical sleep
exit 0
fi
vmstat 1 2 | awk '{wait = $1; sleep = $2} END { print "wait.value " wait "\nsleep.value " sleep }'
| true |
35fce6df6a07c6e6fe5e22368cb2370c16708e80 | Shell | cloudmesh-community/hid-sp18-405 | /hadoop/hadoop-python-docker-sentiment/hadoop-master/bootstrap.sh | UTF-8 | 2,354 | 3.046875 | 3 | [] | no_license | #!/bin/bash
: ${HADOOP_PREFIX:=/usr/local/hadoop}
$HADOOP_PREFIX/etc/hadoop/hadoop-env.sh
rm /tmp/*.pid
# installing libraries if any - (resource urls added comma separated to the ACP system variable)
cd $HADOOP_PREFIX/share/hadoop/common ; for cp in ${ACP//,/ }; do echo == $cp; curl -LO $cp ; done; cd -
# altering the core-site configuration
#sed s/NAMENODE/$HOSTNAME/ /usr/local/hadoop/etc/hadoop/core-site.xml.template > /usr/local/hadoop/etc/hadoop/core-site.xml
#sed s/NAMENODE/$HOSTNAME/ /usr/local/hadoop/etc/hadoop/hdfs-site.xml.template > /usr/local/hadoop/etc/hadoop/hdfs-site.xml
#sed s/RESOURCEMANAGER/$HOSTNAME/ /usr/local/hadoop/etc/hadoop/yarn-site.xml.template.template > /usr/local/hadoop/etc/hadoop/yarn-site.xml.template
service sshd start
nohup $HADOOP_PREFIX/bin/hdfs namenode &
nohup $HADOOP_PREFIX/bin/yarn resourcemanager &
nohup $HADOOP_PREFIX/bin/yarn timelineserver &
nohup $HADOOP_PREFIX/bin/mapred historyserver &
mkdir -p $HADOOP_PREFIX/logs
chmod 777 $HADOOP_PREFIX/logs
date > $HADOOP_PREFIX/logs/temp.txt
if [[ $1 == "-d" ]]; then
while true; do sleep 1000; done
fi
if [[ $1 == "-bash" ]]; then
/bin/bash
fi
if [[ $1 == "-run" ]]; then
sleep 45
(time /cloudmesh/python/runPythonMapReduce.sh) 2>&1 | tee -a /cloudmesh/python/log.txt
export PATH=$PATH:/$HADOOP_PREFIX/bin
tail -3 /cloudmesh/python/log.txt |head -1>> /cloudmesh/python/time.txt
cp /cloudmesh/python/time.txt $HADOOP_PREFIX/logs/time.txt
cp /cloudmesh/python/log.txt $HADOOP_PREFIX/logs/log.txt
cp -r /cloudmesh/python/output_pos_tagged $HADOOP_PREFIX/logs/output_pos_tagged
cp -r /cloudmesh/python/output_neg_tagged $HADOOP_PREFIX/logs/output_neg_tagged
while true; do sleep 1000; done
fi
#if [[ $1 == "-benchmark" ]]; then
# sleep 30
# export PATH=$PATH:/$HADOOP_PREFIX/bin
# for i in $(seq 1 $2)
# do
# hadoop fs -rm -R /nlp
# (time /cloudmesh/python/runPythonMapReduce.sh) 2>&1 | tee -a /cloudmesh/python/log.txt
# tail -3 /cloudmesh/python/log.txt |head -1>>/cloudmesh/python/$3_worker.txt
# mkdir -p $HADOOP_PREFIX/logs
# chmod 777 $HADOOP_PREFIX/logs
# cp /cloudmesh/python/log.txt $HADOOP_PREFIX/logs/log.txt
# cp /cloudmesh/python/$3_worker.txt $HADOOP_PREFIX/logs/$3_worker.txt
# done
# while true; do sleep 1000; done
#fi
| true |
aa2887629ccad4e7a6cd0b1181fdea4f90a3e5d1 | Shell | Spottybadrabbit/blobio | /biod/blobio.rc.template | UTF-8 | 858 | 3.234375 | 3 | [] | no_license | #!/bin/sh
########################################################################
# Begin $rc_base/init.d/
#
# Description :
#
# Authors :
#
# Version : 00.00
#
# Notes :
#
########################################################################
BLOBIO_ROOT=${BLOBIO_ROOT:=/usr/local/blobio}
export BLOBIO_ROOT
. /etc/sysconfig/rc
. ${rc_functions}
. $BLOBIO_ROOT/etc/profile.rc
case "${1}" in
start)
boot_mesg "Starting BlobIO biod daemon..."
loadproc -p $BLOBIO_RC_BIOD_PID_FILE $BLOBIO_RC_BIOD
;;
stop)
boot_mesg "Stopping BlobIO biod daemon..."
killproc -p $BLOBIO_RC_BIOD_PID_FILE $BLOBIO_RC_BIOD
;;
restart)
${0} stop
sleep 1
${0} start
;;
status)
statusproc -p $BLOBIO_RC_BIOD_PID_FILE $BLOBIO_RC_BIOD
;;
*)
echo "Usage: ${0} {start|stop|restart|status}"
exit 1
;;
esac
# End $rc_base/init.d/
| true |
f3e7cac22f33d78a93ff47b97d921be4ca05cbf6 | Shell | m2atal/notebook-distant | /jupyter.sh | UTF-8 | 631 | 3.828125 | 4 | [] | no_license | #!/bin/bash
if [ "$1" != "" ]; then
RESULT=`ps -aux | grep "jupyter-notebook --no-browser --port $1 --ip 0.0.0.0"`
if [ "$1" == "stop" ]; then
kill -9 `cat save_pid.txt`
rm save_pid.txt
else
if [ "${RESULT:-null}" = null ]; then
echo "Jupyter notebook already running"
else
echo "Jupyter notebook not found. Starting it"
nohup jupyter notebook --no-browser --port $1 --ip 0.0.0.0 &
echo $! > save_pid.txt
TOKEN=`cat nohup.out | grep -o "token=[a-z0-9]*"| sed -n 1p`
echo $TOKEN
exit 0
fi
fi
else
echo "Missing argument port: ./jupyter.sh <port>"
exit 1
fi | true |
6cd3069948d5f23dfa2cb45e1ec4a0f80aebfcbc | Shell | yangbinnnn/docker-example | /entrypoint.sh | UTF-8 | 329 | 3.359375 | 3 | [] | no_license | #!/usr/bin/env bash
APP=app
# 启动程序并获取进程号
chmod +x ${APP}
./${APP} &
pid=${!}
function safeShutdown {
echo "do something..."
kill -9 ${pid}
echo "shutdown..."
exit 0
}
# 使用trap 捕获信号
trap safeShutdown SIGTERM
trap safeShutdown SIGINT
# 保持容器运行
while true; do :; done
| true |
0b954c772ca06d10f2e625debe31de2df1d9f40e | Shell | ohmyzsh/ohmyzsh | /plugins/chucknorris/chucknorris.plugin.zsh | UTF-8 | 862 | 3.890625 | 4 | [
"MIT"
] | permissive | () {
# %x: name of file containing code being executed
local fortunes_dir="${${(%):-%x}:h}/fortunes"
# Aliases
alias chuck="fortune -a $fortunes_dir"
alias chuck_cow="chuck | cowthink"
# Automatically generate or update Chuck's compiled fortune data file
if [[ "$fortunes_dir/chucknorris" -ot "$fortunes_dir/chucknorris.dat" ]]; then
return
fi
# For some reason, Cygwin puts strfile in /usr/sbin, which is not on the path by default
local strfile="${commands[strfile]:-/usr/sbin/strfile}"
if [[ ! -x "$strfile" ]]; then
echo "[oh-my-zsh] chucknorris depends on strfile, which is not installed" >&2
echo "[oh-my-zsh] strfile is often provided as part of the 'fortune' package" >&2
return
fi
# Generate the compiled fortune data file
$strfile "$fortunes_dir/chucknorris" "$fortunes_dir/chucknorris.dat" >/dev/null
}
| true |
68e189afab1a4ec3d0bfa2c03e0958e7ce87bcfc | Shell | rushioda/PIXELVALID_athena | /athena/Trigger/TrigConfiguration/TrigConfStorage/scripts/TestCoolOnlineWriting.sh | UTF-8 | 885 | 3.078125 | 3 | [] | no_license | #!/bin/zsh
testname=help
if [[ $# -gt 0 ]]; then
testname=$1
fi
case $testname in
help)
echo "Usage:"
echo " $0 <option>\n"
echo "<option> can be"
echo " dbcool ..... TriggerDB(ATLR) -> Cool"
;;
dbcool)
rm -f trigconfonltest.db
echo "Creating trigconfonltest.db"
TrigConf2COOLApp -e create --cooldb 'sqlite://;schema=trigconfonltest.db;dbname=TRIGCONF' >&! cooldbcreation.log
echo "Filling trigconfonltest.db, log: db2coolonltest.log"
#valgrind --trace-children=yes --num-callers=8 --show-reachable=yes \
TrigConf2COOLOnlineTestApp -e write --cooldb 'sqlite://;schema=trigconfonltest.db;dbname=TRIGCONF' \
--run 52290 --trigdb TRIGGERDB --configkey 634 --prescalekeyhlt 818 820 821 --prescalekeylvl1 1 \
>&! db2coolonltest.log
;;
esac
# AtlCoolConsole.py 'sqlite://;schema=trigconfonltest.db;dbname=TRIGCONF'
| true |
9ad07daa149ffa62a0a9a0eec90d7c4103d9b6e5 | Shell | maltebp/ComputerGraphics | /deploy.sh | UTF-8 | 301 | 2.6875 | 3 | [] | no_license | #!/usr/bin/env bash
# Deploys the solutions onto my personal DTU student webpage
USER=s185139
SERVER=login.gbar.dtu.dk
KEY="$SSH_KEY"
# Create target directory
ssh -i "$KEY" "$USER"@"$SERVER" "rm -rf ~/public_html/*" &&\
scp -r -i "$KEY" out/ "$USER"@"$SERVER":./public_html/thisisaveryhiddenfolder | true |
91e27187669334d5f3400e11d9d964f1f7797bd0 | Shell | leo-hsk/bash_process_scheduler | /common/printHelp.sh | UTF-8 | 1,436 | 3.484375 | 3 | [] | no_license | #!/bin/bash
#############################################################################################
# #
# This shell script prints the usage of the Process Scheduler Simulation. #
# Authors: Anton Rösler (anton.roesler@stud.fra-uas.de) #
# Leonard Hußke (leonard.husske@stud.fra-uas.de) #
# Patrick Frech (patrick.frech@stud.fra-uas.de) #
# #
# Copyright (c) 2020-2020 Anton Rösler, Leonard Hußke, Patrick Frech. All Rights Reserved. #
# #
#############################################################################################
echo ""
echo "Usage:"
echo "Start_Process_Scheduler_Simulation.sh [opt:param1] [opt:param2]"
echo ""
echo "param1 is optional and can be one of the described commands."
echo "param2 is only used with param1=help and has to be a valid string."
echo ""
echo "--resume - Starts the program with the processes from the last session."
echo "--help - Prints out this help message."
echo "--help [param2] - Starts the help with the given param2 for specific algorithm."
echo ""
echo "Valid param2's are: "
echo ""
source ${processSchedulerWorkingDir}/common/printValidParam2.sh | true |
cce84eb33031f8cc407427b66eadf88b27683612 | Shell | eosswedenorg/haproxy-exporter-systemd | /install.sh | UTF-8 | 1,293 | 3.859375 | 4 | [] | no_license | #!/usr/bin/env bash
ARCH=$(dpkg --print-architecture)
VERSION=0.9.0
INSTALLDIR=/usr/local/bin
CONFIGDIR=/etc/prometheus
SRC_URL=https://github.com/prometheus/haproxy_exporter/releases/download/v${VERSION}/haproxy_exporter-${VERSION}.linux-${ARCH}.tar.gz
# Download and install the binary.
echo " - Download source from: ${SRC_URL}"
wget -q --show-progress -O- ${SRC_URL} | tar zxf -
sudo mv haproxy_exporter-${VERSION}.linux-${ARCH}/haproxy_exporter ${INSTALLDIR}/haproxy_exporter-${VERSION}
# Create User/Group
echo " - Adding user: haproxy_exporter"
sudo useradd -M -s /bin/false haproxy_exporter
# Write config
echo " - Write config: ${CONFIGDIR}/haproxy_exporter.conf"
sudo mkdir -p ${CONFIGDIR}
sudo cp ./haproxy_exporter.conf ${CONFIGDIR}/
# Write system service file.
echo " - Write systemd service file: /etc/systemd/system/haproxy_exporter.service"
echo "[Unit]
Description=HAProxy Prometheus Exporter
Wants=network-online.target
After=network-online.target
[Service]
EnvironmentFile=-${CONFIGDIR}/haproxy_exporter.conf
User=haproxy_exporter
Group=haproxy_exporter
Type=simple
ExecStart=/usr/local/bin/haproxy_exporter-${VERSION} \$HAPROXY_EXPORTER_OPTS
[Install]
WantedBy=multi-user.target" | sudo tee /etc/systemd/system/haproxy_exporter.service > /dev/null
echo " - Done"
| true |
f097fb0327d70b93f0d2cebee646950421b640ab | Shell | markusklems/KCSD | /scripts/start_chef_server.sh | UTF-8 | 449 | 3 | 3 | [] | no_license | #!/bin/bash
# DESC
# start Chef-Server
# read the config file
source $CHEF_HOME/config/config.cfg
# start Chef-Server via EC2 API Tools
echo ":::::::::::::::::: starting Chef Server"
ec2-start-instances $chef_server_instance_id
# make a pause so that the instance has enough time
sleep 10
# assign the elastic IP
echo ":::::::::::::::::: assigning the elastic IP to Chef Server"
ec2-associate-address -i $chef_server_instance_id $elastic_ip_1
| true |
f96a1d412a445d0c655e91dd02db4ae4fe470f05 | Shell | qshao/AnalyzeToolsforMD | /runputfile.sh | UTF-8 | 127 | 2.9375 | 3 | [] | no_license | for file in *.top
do
{
file1=${file:0:3}
file2=${file:0:7}
mkdir $file1
mkdir $file1/$file2
cp $file2*.* $file1/$file2
}
done
| true |
3fb6d6c446cdaf637e872e64178e6a04629ed53f | Shell | GovanifY/crossdev-gentoo | /crossdev-boost.sh | UTF-8 | 2,532 | 4.09375 | 4 | [] | no_license | #!/bin/sh
#
# Run from the boost source directory.
if [ ! -d tools/build ]; then
echo "Can not find tools/build directory!";
echo "Make sure you are at the root of the boost source tree";
exit
fi
USERCFG=$(find ./tools/build -name user-config.jam)
# Copy the example user-config.jam to the boost build root directory.
[[ -f "$USERCFG" ]] && cp "$USERCFG" .
export BOOST_BUILD_PATH=$(pwd)
echo -n "Determining python root... "
PYTHON_ROOT=`python -c "import sys; print(sys.prefix)"`
echo $PYTHON_ROOT
# Find installed crossdev toolchains.
echo "Finding installed crossdev toolchains..."
TARGET=""
P=/etc/portage/package.use/cross-
B=( $(ls $P*|sed -e "s:$P::") )
if [ ${#B[@]} -gt 1 ]; then
echo "Installed crossdev toolchains:"
echo
for i in $(seq 1 ${#B[@]}); do
echo "$i) ${B[$[i-1]]}"
done
echo
echo -n "Which toolchain should we install boost for [1-${#B[@]}]? "
read SELECTION
[[ $SELECTION -lt 1 ]] || [[ $SELECTION -gt ${#B[@]} ]] && exit
TARGET=${B[$[SELECTION-1]]}
else
TARGET=$B
fi
# Find installed gcc versions
echo "Finding gcc versions (MAJOR.MINOR) installed in $TARGET..."
GCCVER=""
G=$(equery -q list --format=\$version cross-$TARGET/gcc|sed -e 's/\([0-9]*\.[0-9]\)*\..*/\1/')
if [ ${#G[@]} -gt 1 ]; then
echo "Installed gcc versions in $TARGET"
echo
for i in $(seq 1 ${#G[@]}); do
echo "$i) ${G[$[i-1]]}"
done
echo
echo -n "Which gcc version should we install boost for [1-${#G[@]}]? "
read SELECTION
[[ $SELECTION -lt 1 ]] || [[ $SELECTION -gt ${#G[@]} ]] && exit
GCCVER=${G[$[SELECTION-1]]}
else
GCCVER=$G
fi
echo
echo "Setting gcc version to ${GCCVER}, and target to ${TARGET}."
echo
echo "using gcc : ${GCCVER} : ${TARGET}-g++ ;" >> user-config.jam
#
# Now run the bootstrap
echo "Running the bootstrap..."
#
./bootstrap.sh --prefix=/usr/${TARGET}/usr --with-python-root=$PYTHON_ROOT
#
# Now build it.
echo "Building boost..."
#
./b2 -a -j3 --prefix=/usr/${TARGET}/usr --build-dir=build --layout=versioned --ignore-site-config target-os=windows threadapi=win32
#
# in stage/lib/, remove _win32 from the thread files.
#
echo "Renaming *_win32* files..."
for x in stage/lib/*_win32*; do mv -v "${x}" "${x/_win32/}"; done
#
# Now copy the library and include files to the crossdev
# system.
#
echo "Copying files to /usr/${TARGET}/usr/lib/"
cp stage/lib/libboost_* /usr/${TARGET}/usr/lib/
echo "Copying files to /usr/${TARGET}/usr/include/"
cp -R boost /usr/${TARGET}/usr/include/
#
#
# Done - Manual install of boost
echo "Done installing of boost for ${TARGET}."
| true |
cd14b3bfc568d444d03846f5c9de01cae446bebf | Shell | kaspermeerts/ManyParticles | /src/bench/fixedPartnum/fixedPartnum.sh | UTF-8 | 257 | 2.765625 | 3 | [] | no_license | maxtime=200
maxiter=1000
radius=0.1
worldsize=50
maxboxnum=50
npart=10000
for nbox in $(seq 1 1 200)
do
boxsize=$(echo "$worldsize / $nbox" | bc -l)
output=$(../../main $boxsize $nbox $npart $radius -i $maxiter -b $maxtime)
echo "$nbox $output";
done
| true |
04e52324bb1f6ab1b5ed8ebdad1c918be4b69ca3 | Shell | ultraelephant/ASA_Users_Zabbix_Monitoring | /asa_vpn_usr.sh | UTF-8 | 1,989 | 3.5625 | 4 | [] | no_license | #!/bin/bash
script_directory="/etc/zabbix/ext_scripts/"
if [[ $3 == "discover" ]]
then
act_usr=`snmpwalk -v 2c -c $2 $1 1.3.6.1.4.1.9.9.392.1.3.21.1.10 | awk -F "." '{for(i=11;i<(NF-3);++i) printf "%c", $i; printf "\n"}' | uniq`$'\n'
if [ ! -f "$script_directory/asa_usr_lst" ]; then
printf '%s' "$act_usr" | while IFC= read -r name
do
echo -n "$name|1|" >> "$script_directory/asa_usr_lst"
done
else
IFS='| ' read -r -a usr_all <<< `cat $script_directory/asa_usr_lst`
for (( c=0; c<=${#usr_all[@]}; c++ ))
do
if [ "${usr_all[$c]}" == "1" ];
then usr_all[$c]="0"
fi
done
printf '%s\n' "$act_usr" | ( while IFC= read -r name_act
do
count=0;
for (( c=0; c<${#usr_all[@]}; c++ ))
do
if [ "${usr_all[$c]}" != "0" ] && [ "$name_act" != "" ]; then
if [ "${usr_all[$c]}" == "$name_act" ];
then
usr_all[(($c+1))]="1";
count=0;
break;
else
((count++));
fi
fi
done
if [ "$count" != "0" ]; then
echo $name_act
usr_all+=($name_act);
usr_all+=('1');
fi
done
IFS="|$IFS";
printf '%s' "${usr_all[*]}" > "$script_directory/asa_usr_lst";
IFS="{IFS:1}");
echo "|" >> "$script_directory/asa_usr_lst"
fi
IFS='| ' read -r -a usr_dvr <<< `cat $script_directory/asa_usr_lst`;
json="{\"data\":[";
for (( c=0; c<${#usr_dvr[@]}; c++ ))
do
if ! [[ "${usr_dvr[$c]}" =~ ^[0-1,]+$ ]];
then
json="$json{\"{#USERNAME}\":\"";
json1="$(echo -n ${usr_dvr[$c]} | sed 's,\\,\\\\,g')"
json="$json$json1"
if [[ "$c" == "$((${#usr_dvr[@]}-2))" ]]
then json="$json\"}";
else json="$json\"},";
fi
fi
done
json="$json]}"
echo "$json";
fi
if [[ $3 == "check" ]];
then
if [[ -n $4 ]];
then
IFS='| ' read -r -a usr_chk <<< `cat $script_directory/asa_usr_lst`;
for (( c=0; c<${#usr_chk[@]}; c++ ))
do
if [[ "${usr_chk[$c]}" == "$4" ]];
then
echo "${usr_chk[(($c+1))]}";
break;
fi
done
else
echo "name is not defined";
fi
fi
| true |
f9de0f3c50f734021022898ac152bc0f130785bd | Shell | gbsf/archlinux-packages | /texinfo/repos/core-x86_64/PKGBUILD | UTF-8 | 579 | 2.65625 | 3 | [] | no_license | # $Id$
# Maintainer: Jason Chu <jason@archlinux.org>
# Contributor: Tom Newsom <Jeepster@gmx.co.uk>
#
pkgname=texinfo
pkgver=4.11
pkgrel=2
pkgdesc="Utilities to work with and produce manuals, ASCII text, and on-line documentation from a single source file"
arch=('i686' 'x86_64')
url="http://www.gnu.org/software/texinfo/"
license=('GPL')
depends=('ncurses')
source=(ftp://ftp.gnu.org/pub/gnu/$pkgname/$pkgname-$pkgver.tar.gz)
md5sums=('0c652adddc75b385ee1509fc55ff2837')
build() {
cd $startdir/src/$pkgname-$pkgver
./configure --prefix=/usr
make || return 1
make DESTDIR=$startdir/pkg install
}
| true |
496348f6aef915d34be7fcc0da4fbfe2a8d4545f | Shell | ludamad/lanarts | /run.sh | UTF-8 | 6,125 | 4.125 | 4 | [] | no_license | cd `dirname $0`
# Good practice -- exit completely on any bad exit code:
set -e
###############################################################################
# Helper functions for conditionally coloring text.
###############################################################################
function is_mac() {
if [ "$(uname)" == "Darwin" ]; then
return 0 # True!
else
return 1 # False!
fi
}
# Bash function to apply a color to a piece of text.
function colorify() {
if is_mac ; then
cat
else
local words;
words=$(cat)
echo -e "\e[$1m$words\e[0m"
fi
}
###############################################################################
# Bash function to check for a flag in 'args' and remove it.
# Treats 'args' as one long string.
# Returns true if flag was removed.
###############################################################################
args="$@" # Create a mutable copy of the program arguments
function handle_flag(){
flag=$1
local new_args
local got
got=1 # False!
for arg in $args ; do
if [ $arg = $flag ] ; then
args="${args/$flag/}"
got=0 # True!
else
new_args="$new_args $arg"
fi
done
args="$new_args"
return $got # False!
}
##############################################################################
# Eclipse options
# --eclipse/-e: Create eclipse project files
###############################################################################
# Create eclipse-project-files
if handle_flag "--eclipse" || handle_flag "-e" ; then
src=$(pwd)
rm -f CMakeCache.txt
mkdir ../LanartsEclipse -p
cd ../LanartsEclipse
# Eclipse project creation
cmake -Wno-dev -G"Eclipse CDT4 - Unix Makefiles" $src
exit
fi
###############################################################################
# Compiling and setting up runtime directory structure
###############################################################################
# Handle environment-variable setting convenience flags
# These are used to communicate with CMake
# Each flag has an optional shortform, use whichever is preferred.
if handle_flag "--mingw" ; then
export BUILD_MINGW=1
fi
if handle_flag "--headless" ; then
export BUILD_HEADLESS=1
fi
if handle_flag "--small" || handle_flag "-s" ; then
export LANARTS_SMALL=1
fi
if handle_flag "--luajit" || handle_flag "-lj" ; then
export BUILD_LUAJIT=1
fi
if handle_flag "--optimize" || handle_flag "-O" ; then
export BUILD_OPTIMIZE=1
fi
if handle_flag "--sanitize" ; then
export BUILD_SANITIZE=1
fi
if handle_flag "--emscripten" ; then
export BUILD_EMSCRIPTEN=1
fi
if handle_flag "--profile-gen" || handle_flag "--pgen" ; then
export BUILD_OPTIMIZE=1
export BUILD_PROF_GEN=1
fi
# Use --pgen, and then this flag, for optimal performance
if handle_flag "--profile-use" || handle_flag "--puse" ; then
export BUILD_OPTIMIZE=1
export BUILD_PROF_USE=1
fi
# Pick whether to use debug std data-structures for eg std::vector
if handle_flag "--debug-std" ; then
export BUILD_FLAGS="$BUILD_FLAGS -D_GLIBCXX_DEBUG"
fi
# Configure amount of cores used
if [[ -e /proc/cpuinfo ]] ; then
cores=$(grep -c ^processor /proc/cpuinfo)
else
cores=4 # Guess -- may want to manually edit if above fails.
fi
# Helper for managing build directories:
function rm_if_link(){ [ ! -L "$1" ] || rm -f "$1"; }
function build_lanarts(){
BUILD_DIR="build_debug"
if [ $BUILD_OPTIMIZE ] ; then
BUILD_DIR="build_release"
fi
# Specialize build dirs
if [ $BUILD_HEADLESS ] ; then
BUILD_DIR="${BUILD_DIR}_headless"
fi
if [ $BUILD_LUAJIT ] ; then
BUILD_DIR="${BUILD_DIR}_luajit"
fi
if [ $BUILD_SANITIZE ] ; then
BUILD_DIR="${BUILD_DIR}_asan"
fi
if [ $BUILD_PROF_GEN ] ; then
BUILD_DIR="${BUILD_DIR}_profgen"
fi
if [ $BUILD_EMSCRIPTEN ] ; then
BUILD_DIR="${BUILD_DIR}_emscripten"
fi
if [ $BUILD_PROF_USE ] ; then
BUILD_DIR="${BUILD_DIR}_profuse"
fi
if [ $BUILD_MINGW ] ; then
BUILD_DIR="${BUILD_DIR}_mingw"
fi
rm_if_link build
if [ -d build ] ; then
echo "You have a non-symlink build directory. Lanarts has moved to symlinking 'build' to 'build_release' or 'build_debug'. Please rename the build directory to the appropriate one of those." >&2
exit 1
fi
mkdir -p $BUILD_DIR
ln -s $BUILD_DIR build
cd $BUILD_DIR
if [ $BUILD_EMSCRIPTEN ] ; then
emcmake cmake ..
elif [ $BUILD_MINGW ] ; then
if python -mplatform | grep fedora ; then
export BUILD_FEDORA_CROSS=1
mingw32-cmake -Wno-dev .. | colorify '1;33'
else
cmake -DCMAKE_TOOLCHAIN_FILE=mingw-toolchain.cmake -Wno-dev .. | colorify '1;33'
fi
else
cmake -Wno-dev .. | colorify '1;33'
fi
if handle_flag "--clean" ; then
make clean
fi
make -j$((cores+1)) lanarts
cd ../runtime && python2 compile_images.py > compiled/Resources.lua
cd ..
}
# --force/-f: Do not build (use last successful compiled binary)
if ! handle_flag "-f" && ! handle_flag "--force" ; then
if handle_flag "--verbose" || handle_flag "-v" ; then
build_lanarts
else
build_lanarts > /dev/null
fi
fi
# --build/-b: Do not run (build only)
if handle_flag "-b" || handle_flag "--build" ; then
exit
fi
###############################################################################
# Running the game.
###############################################################################
function run_lanarts(){
cd runtime
export vblank_mode=0
if handle_flag "--gdb" || handle_flag "-g" ; then
echo "Wrapping in GDB:" | colorify '1;35'
gdb -silent -x ../debug.gdb --args ../build/src/lanarts $args
else
exec ../build/src/lanarts $args
fi
cd ..
}
# TODO add gdb macro with this line:
#print luaL_loadstring(L, "return debug.traceback()") || lua_pcall(L, 0, 1, 0) || printf(lua_tolstring(L, -1, 0))
run_lanarts
| true |
bf47ebd7f23694884cad454c05d55ab21cc1edee | Shell | qianfan1996/MultiplicativeMultimodal | /imagerecognition/tools/download_cifar.sh | UTF-8 | 442 | 3.03125 | 3 | [] | no_license | OUTPUT_DIR="${1%/}"
CIFAR10_URL="https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz"
CIFAR100_URL="https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz"
CIFAR10_DIR="$OUTPUT_DIR/cifar-10"
CIFAR100_DIR="$OUTPUT_DIR/cifar-100"
echo "Downloading CIFAR-100"
mkdir -p $CIFAR100_DIR
wget $CIFAR100_URL -O "$CIFAR100_DIR/cifar-100.tar.gz"
tar xzf "$CIFAR100_DIR/cifar-100.tar.gz" -C $CIFAR100_DIR
#rm -rf "$CIFAR100_DIR/cifar-100.tar.gz"
| true |
507438ee4bcdba27206f59297d166a32a472e6a4 | Shell | RobertAldrich/ruby-on-rails-mysql | /devspace/entrypoint.sh | UTF-8 | 209 | 2.65625 | 3 | [] | no_license | #!/bin/bash
MYSQL_DATA_DIR=/data/.mysql_data
if [ ! -d ${MYSQL_DATA_DIR} ]; then
rsync -av /var/lib/mysql/ ${MYSQL_DATA_DIR}
fi
/etc/init.d/mysql start
mysql < /tmp/open-remote-connections.sql
exec "$@"
| true |
af7851b0d934f9b3f89107425c2d5132c0ae147f | Shell | dbernstein/fcrepo-performance-test-scripts | /n-uris.sh | UTF-8 | 572 | 3.453125 | 3 | [] | no_license | #!/bin/bash
BASE=http://localhost:8080/rest/$RANDOM
COL=$BASE/uriCollection
# create a collection
curl -X PUT $COL && echo
N=0
MAX=$1
while [ $N -lt $MAX ]; do
# add a property
curl -X PATCH -H "Content-Type: application/sparql-update" -d "
prefix dc: <http://purl.org/dc/elements/1.1/>
insert { <> dc:relation <http://example.org/uri/$N> } where { }" $COL
N=$(( $N + 1 ))
if [ $(( $N % 500 )) == 0 ]; then
echo $N properties
fi
done
echo retrieving $COL
time curl -H "Accept: application/n-triples" $COL > n-uris.nt
grep -c relation n-uris.nt
| true |
90439c35a1150c9a422fe24a14e698bfb0ad8a72 | Shell | bittorrent3389/libfuzzer-bot | /dockerfiles/libxml/build.sh | UTF-8 | 250 | 2.640625 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
set -e
cd /src/libxml2/
if [ ! -f configure ]
then
./autogen.sh
fi
echo =========== MAKE
make -j 16
$CXX $CXXFLAGS -std=c++11 libxml2_fuzzer.cc \
-Iinclude -L.libs -lxml2 -llzma $LIBFUZZER_OBJS \
-o /work/libxml2/libxml2_fuzzer
| true |
f2ec57203d29b230f3a0c256a9873d523aa58fa4 | Shell | nerdsupremacist/RandomStuff | /Bash/DeleteAll.sh | UTF-8 | 87 | 2.515625 | 3 | [
"MIT"
] | permissive | #!/bin/bash
find "$PWD" -type d | while read -r line; do cd "$line" && rm *.pyc; done;
| true |
aae26e3cd37f2496369c46001dfb6aba6981f036 | Shell | Backup-Gits/0019-scripts | /bin/import-model-from-244.sh | UTF-8 | 1,479 | 3.609375 | 4 | [] | no_license | #!/bin/sh
QB_DIR=/u1/Project.Q-Balancer
if [ $# -lt 3 ]; then
echo
echo "Usage : $0 <VERSION> <BRAND> <MODEL1> <MODEL2> ..."
echo
echo " <VERSION> : 2.5.0"
echo " <BRAND> : Deansoft, XRIO, ZeroOneTech"
echo " <MODEL> : 1610, 1611, ..."
exit 1
fi
VERSION=$1
BRAND=$2
shift; shift
MODEL_LIST=$*
echo
echo " NOTE : Only function.pkg and conf.pkg will be imported (as spec) !!!"
echo
for MODEL in $MODEL_LIST; do
FROM_DIR=192.168.5.244:/home/QBPL/DOM/$VERSION/$MODEL
SPEC_DIR=$QB_DIR/build/$VERSION/spec/$BRAND/$MODEL
if [ -d $SPEC_DIR ]; then
echo
echo "INFO : Model ($MODEL) was already imported from 244 host."
continue
fi
mkdir $SPEC_DIR
#
# To transfer model-dependent PKG (function.pkg, conf.pkg).
#
echo
echo "... going to transfer files of model ($MODEL) from host 244 "
echo
scp -pr $FROM_DIR/[cf]*.pkg $SPEC_DIR
cd $SPEC_DIR
#
# Import/create :
# .../products/brands/imported-from-244/models/$MODEL/function.pkg.d
#
(tar xfz ./function.pkg; \
mkdir function.pkg.d; \
mv conf function.pkg.d; \
rm -rf ./function.pkg)
#
# Import/create :
# .../products/brands/imported-from-244/models/$MODEL/conf.pkg.d
#
(tar xfz ./conf.pkg; \
mkdir conf.pkg.d; \
mv conf conf.pkg.d; \
rm -rf ./conf.pkg)
echo
echo "... complete to transfer files of model ($MODEL) from host 244 "
echo
echo " NOTE : Remember to add 'BRAND $BRAND' into the registry file."
echo
done
| true |
d371f75c13a1318887f6bc7518b78e58d33efc4e | Shell | NeonMan/shellUnit | /shunit.d/string-asserts.sh | UTF-8 | 3,266 | 3.515625 | 4 | [
"BSD-2-Clause"
] | permissive | #Tests if param 1 (string) matches the param 2 regexp
#
# Params:
# $1 <-- A regular expression
# $2 <-- A string
assertMatches () {
SHU_TMP=`echo "$2" | grep "$1"`
if [[ "$SHU_TMP" != "" ]]
then
pass
else
fail "expected '$2' to match regular expression '$1'"
fi
}
#Tests if param 1 (string) does not match the param 2 regexp
#
# Params:
# $1 <-- A string
# $2 <-- A regular expression
assertNotMatches () {
SHU_TMP=`echo "$2" | grep "$1"`
if [[ "$SHU_TMP" != "" ]]
then
fail "expected '$2' to NOT match regular expression '$1'"
else
pass
fi
}
#Test if a string is contained onto another
#
# Params:
# $1 <-- A string
# $2 <-- A substring
assertStringContains () {
if [[ "$1" == *$2* ]]
then
pass
else
fail "'$1' does not contain '$2'"
fi
}
#Test if a string is contained onto another
#
# Params:
# $1 <-- A string
# $2 <-- A substring
assertStringNotContains () {
if [[ "$1" == *$2* ]]
then
fail "'$1' contais '$2'"
else
pass
fi
}
#Test equality of two strings, ignoring case
#
# Params:
# $1 <-- String
# $2 <-- String
assertEqualsIgnoringCase () {
#Convert parameters to lower case
SHU_TMPSTR1=`echo "$1" | tr '[:upper:]' '[:lower:]'`
SHU_TMPSTR2=`echo "$2" | tr '[:upper:]' '[:lower:]'`
#Test equality
if [ "$SHU_TMPSTR1" == "$SHU_TMPSTR2" ]
then
pass
else
fail "'$SHU_TMPSTR1'('$1') does not equal '$SHU_TMPSTR2'('$2')"
fi
}
#Test inequality of two strings, ignoring case
#
# Params:
# $1 <-- String
# $2 <-- String
assertNotEqualsIgnoringCase () {
#Convert parameters to lower case
SHU_TMPSTR1=`echo "$1" | tr '[:upper:]' '[:lower:]'`
SHU_TMPSTR2=`echo "$2" | tr '[:upper:]' '[:lower:]'`
#Test equality
if [ "$SHU_TMPSTR1" == "$SHU_TMPSTR2" ]
then
fail "'$SHU_TMPSTR1'('$1') equals '$SHU_TMPSTR2'('$2')"
else
pass
fi
}
#Test if two strings are equal ignoring whitespace
#
# Params:
# $1 <-- String
# $2 <-- String
assertEqualsIgnoringWhitespace () {
#Remove whitespace (both vertical and horizontal)
SHU_TMPSTR1=`echo "$1" | tr -d '[:space:]'`
SHU_TMPSTR2=`echo "$2" | tr -d '[:space:]'`
#Test equality
#Test equality
if [ "$SHU_TMPSTR1" == "$SHU_TMPSTR2" ]
then
pass
else
fail "'$SHU_TMPSTR1'('$1') does not equal '$SHU_TMPSTR2'('$2')"
fi
}
#Test if two strings are not equal ignoring whitespace
#
# Params:
# $1 <-- String
# $2 <-- String
assertNotEqualsIgnoringWhitespace () {
#Remove whitespace (both vertical and horizontal)
SHU_TMPSTR1=`echo "$1" | tr -d '[:space:]'`
SHU_TMPSTR2=`echo "$2" | tr -d '[:space:]'`
#Test equality
#Test equality
if [ "$SHU_TMPSTR1" == "$SHU_TMPSTR2" ]
then
fail "'$SHU_TMPSTR1'('$1') equals '$SHU_TMPSTR2'('$2')"
else
pass
fi
}
#Test is a strings starts with another
#
# Params:
# $1 <-- String
# $2 <-- Expected prefix
assertStringStartsWith () {
if [[ "$1" == $2* ]]
then
pass
else
fail "'$1' does not start with '$2'"
fi
}
#Test is a strings does not start with another
#
# Params:
# $1 <-- String
# $2 <-- Expected prefix
assertStringNotStartsWith () {
if [[ "$1" == $2* ]]
then
fail "'$1' starts with '$2'"
else
pass
fi
}
| true |
9d4c06901d34f10c01b749d1f6fc278c1e78d03a | Shell | JinKanai/dotfiles | /preparator/installers/install_neovim_source.sh | UTF-8 | 570 | 3.125 | 3 | [] | no_license | #!/bin/sh
which nvim > /dev/null 2>&1
ret=$?
if [ $ret -eq 0 ];then
echo 'nvim is installed.nothing to do.'
else
git clone --depth 1 https://github.com/neovim/neovim.git
cd neovim
make CMAKE_BUILD_TYPE=RelWithDebInfo
sudo make install
cd -
fi
| true |
ce6cdde6fd436168ac6d622c5311b8bfddf48d72 | Shell | miekuskamil/Ansible_ACI_Faults_Extract | /implementation/main.sh | UTF-8 | 5,418 | 3.625 | 4 | [
"MIT"
] | permissive | #!/bin/bash
###FUNCTIONS
newline () {
echo -ne "\n"
}
###GLOBAL
today=$(date +%Y-%m-%dT%H:%M:%S )
yesterday=$(date +%Y-%m-%dT%H:%M:%S --date="1 day ago")
today_short=$(date +%Y-%m-%d)
today_line=$(date +%Y-%m-%d-%H:%M)
yesterday_line=$(date +%Y-%m-%d-%H:%M --date="1 day ago")
###REACHIBILITY CHECK
declare -a arr
ansible-playbook nested_list_iteration.yml
ansible-playbook nested_list_iteration.yml
arr+=($(cat 'apics.txt'))
echo "${arr[@]}"
###LIST FOR LOOP
for ip in "${arr[@]}"
do
file=$(echo ${ip}_faults_daily_report-$(date +%Y-%m-%d-%H-%M).html)
echo -ne "\nWorking on: $ip ..."
###API CALLS
operational_ansible=$(ansible-playbook faults_playbook.yml -i ../inventories/inventory.txt --vault-password-file ../vault.txt --limit "sandboxapicdc.cisco.com," --tags="operational_faults" --extra-vars "today=$today yesterday=$yesterday")
operational_ansible_cut=$(echo -ne "$operational_ansible" | egrep -i -A20 --group-separator=$'\n----------\n' "ack" | tr -d \, | sed -e 's/^[ \t]*//')
sleep 1
config_ansible=$(ansible-playbook faults_playbook.yml -i ../inventories/inventory.txt --vault-password-file ../vault.txt --limit "sandboxapicdc.cisco.com," --tags="config_faults" --extra-vars "today=$today yesterday=$yesterday")
config_ansible_cut=$(echo -ne "$config_ansible" | egrep -i -A20 --group-separator=$'\n----------\n' "\"ack\"" | tr -d \, | sed -e 's/^[ \t]*//')
sleep 1
communications_ansible=$(ansible-playbook faults_playbook.yml -i ../inventories/inventory.txt --vault-password-file ../vault.txt --limit "sandboxapicdc.cisco.com," --tags="communications_faults" --extra-vars "today=$today yesterday=$yesterday")
communications_ansible_cut=$(echo -ne "$communications_ansible" | egrep -i -A20 --group-separator=$'\n----------\n' "\"ack\"" | tr -d \, | sed -e 's/^[ \t]*//')
sleep 1
environmental_ansible=$(ansible-playbook faults_playbook.yml -i ../inventories/inventory.txt --vault-password-file ../vault.txt --limit "sandboxapicdc.cisco.com," --tags="environmental_faults" --extra-vars "today=$today yesterday=$yesterday")
environmental_ansible_cut=$(echo -ne "$environmental_ansible" | egrep -i -A20 --group-separator=$'\n----------\n' "\"ack\"" | tr -d \, | sed -e 's/^[ \t]*//')
sleep 1
###CONDITIONALS ON JSON EXTRACTS
if [[ "echo $operational_ansible" == *'"totalCount": 0'* ]]; then
operational=$(echo -ne "\nNO FAULTS BETWEEN $yesterday_line and $today_line\n")
sleep 1
else
operational=$(echo -ne "\nFAULTS BETWEEN $yesterday_line and $today_line:\n$operational_ansible_cut\n")
sleep 1
fi
if [[ "echo $config_ansible" == *'"totalCount": 0'* ]]; then
config=$(echo -ne "\nNO FAULTS BETWEEN $yesterday_line and $today_line\n\n")
sleep 1
else
config=$(echo -ne "\nFAULTS BETWEEN $yesterday_line and $today_line:\n\n$config_ansible_cut\n")
sleep 1
fi
if [[ "echo $communications_ansible" == *'"totalCount": 0'* ]]; then
communications=$(echo -ne "\nNO FAULTS BETWEEN $yesterday_line and $today_line\n\n")
sleep 1
else
communications=$(echo -ne "\nFAULTS BETWEEN $yesterday_line and $today_line:\n\n$communications_ansible_cut\n")
sleep 1
fi
if [[ "echo $environmental_ansible" == *'"totalCount": 0'* ]]; then
environmental=$(echo -ne "\nNO FAULTS BETWEEN $yesterday_line and $today_line\n\n")
sleep 1
else
environmental=$(echo -ne "\nFAULTS BETWEEN $yesterday_line and $today_line:\n\n$environmental_ansible_cut\n")
sleep 1
fi
###HTML FORM
echo -ne "
<pre><strong>OPERATIONAL</strong> - The system has detected an operational issue, such as a log capacity limit or a failed component discovery</pre>
<pre><strong>CONFIG</strong> - The system is unable to successfully configure a specific component</pre>
<pre><strong>COMMUNICATIONS</strong> - This fault happens when the system has detected a network issue such as a link down</pre>
<pre><strong>ENVIRONMENTAL</strong> - The system has detected a power problem, thermal problem, voltage problem, or a loss of CMOS settings</pre>
<table valign='top' style='width: 1000px; float: left; background-color: #f5b286;' border='1'>
<tbody>
<tr>
<td valign='top' style='width: 500px; text-align: center;'><span style='color: #000000;'><strong>OPERATIONAL</strong></span></td>
<td valign='top' style='width: 500px;'><pre>$operational</pre></td>
</tr>
<tr>
<td valign='top' style='width: 500px; text-align: center;'><span style='color: #000000;'><strong>CONFIG</strong></span></td>
<td valign='top' style='width: 500px;'><pre>$config</pre></td>
</tr>
<tr>
<td valign='top' style='width: 500px; text-align: center;'><span style='color: #000000;'><strong>COMMUNICATIONS</strong></span></td>
<td valign='top' style='width: 500px;'><pre>$communications</pre></td>
</tr>
<tr>
<td valign='top' style='width: 500px; text-align: center;'><span style='color: #000000;'><strong>ENVIRONMENTAL</strong></span></td>
<td valign='top' style='width: 500px;'><pre>$environmental</pre></td>
</tr>
</tbody>
</table>" > $file
done
###MAIL CALL
ansible-playbook mail_playbook.yml
rm -r *.html
newline
| true |
718f81f782bbf1aa18e6c4935b0fa8ac511361e1 | Shell | AbhilashG97/CrimsonBeauty | /Basics/Variables/CreateAFile.sh | UTF-8 | 145 | 3.109375 | 3 | [
"MIT"
] | permissive | #!bin/bash
echo Enter your username
read userName
echo A file with your user name has been created
touch "${userName}File"
echo "\n\n"
ls -AF
| true |
850ec6b79a4dfe311406bf7c31f784f1c0c6febd | Shell | 844196/shellscripts | /img2aesc | UTF-8 | 1,990 | 4 | 4 | [] | no_license | #!/bin/bash -e
#
# @(#) ターミナルで画像表示
#
# Usage:
# img2aesc.sh [file]
# command | img2aesc.sh
#
# Author:
# Original sasairc (@sasairc_2)
# Modified 844196 (@84____)
#
# License:
# MIT
#
function _Error() {
echo "${0##*/}: $@" 1>&2
echo "Usage: ${0##*/} [file]" 1>&2
echo " command | ${0##*/}" 1>&2
exit 1
}
# ImageMagickがなければエラーを出して終了
if $(type convert >/dev/null 2>&1); then
:
else
_Error "Require ImageMagick"
fi
# パイプ or 第一引数の読み取り
# 第一引数優先
# どちらもなければエラーを出して終了
[ -p /dev/stdin ] && img=$(cat -)
[ -n "${1}" ] && img=${1}
[ -z "${img}" ] && _Error "Invaild argument"
if [[ ${img##*.} =~ JPE?G|jpe?g|GIF|gif|PNG|png ]]; then
:
else
_Error "Invaild argument"
fi
# 一時ファイル作成
tmpfile=$(mktemp "/tmp/tmp.$[RANDOM*RANDOM]")
function _DeleteTmp() {
[[ -n ${tmpfile} ]] && rm -f "${tmpfile}"
}
trap '_DeleteTmp;' EXIT
trap '_DeleteTmp; exit 1;' INT ERR
# 画像を画面の高さ-5のサイズに変換し一時ファイルに格納
convert -resize x$[$(tput lines)-5] "${img}" "${tmpfile}"
# 画像の横幅を取得
img_width=$(identify "${tmpfile}" | sed 's/^.* \([0-9]*\)x[0-9]* .*$/\1/g')
# 1. 画像を1ドットずつ読み取りRGBを取得
# awkとsedでスペース区切りに加工しwhileで読み取る
# 2. RGBをANSI(6x6x6)に変換
# rgb[0..5] -> (r*36)+(g*6)+b+16
# 0..5(6階調)なので5を掛け255で割る
# 3. 描画
# iをインクリメントし、画像の横幅と等しくなったら改行
i=0
convert "${tmpfile}" -crop 1x1+${img_width} txt:- 2>/dev/null |
awk 'NR >= 2 {print $2}' | sed -e 's/[()]//g' -e 's/,/ /g' |
while read R G B _;
do
color=$[(R*5/255*36)+(G*5/255*6)+(B*5/255)+16]
echo -en "\033[48;5;${color}m \033[m"
i=$(( ${i} + 1 ))
[ 0 -eq $(( ${i} % ${img_width} )) ] && echo
done | true |
c302bd3f14d0efb2f4094ebfb4d98b4ff1624cb0 | Shell | TosinJia/shell-test | /src/main/shell/sh/predefined-var-1.sh | UTF-8 | 408 | 3.140625 | 3 | [] | no_license | #!/bin/bash
# Author: TosinJia
# 输出当前进程的PID,这个PID就是这个脚本执行时,生成的进程PID
echo "The current process is $$"
# 使用find命令在/root目录下查找 var 相关的文件;符号&把命令放入后台执行,工作管理在系统管理章节详解
find /root -name *var* &
# 后台运行的最后一个进程的进程号
echo "The last one Daemon process is $!" | true |
90155b531261da8936afff1fbc49393b976d12d6 | Shell | sgoodrow/dotfiles | /src/bash_profile | UTF-8 | 316 | 2.96875 | 3 | [] | no_license | if [ -n "$BASH_VERSION" ]; then
# include .bashrc if it exists
if [ -f "$HOME/.bashrc" ]; then
. "$HOME/.bashrc"
fi
fi
function reload() {
source ~/.bash_profile
}
export GIT_PATH=~/projects/github
export DEV_PATH=$GIT_PATH/sgoodrow
alias cdgit='cd ${GIT_PATH}'
alias cddev='cd ${DEV_PATH}'
clear
| true |
45cbb7e9bc34a3ab20369901daf30af6f42bd065 | Shell | ravasthi/dotfiles | /shellrc/groovy.bash | UTF-8 | 326 | 2.703125 | 3 | [] | no_license | # --------------------------------------------------------------------------------------------------
# Groovy
# --------------------------------------------------------------------------------------------------
if [[ -d $HOMEBREW_PREFIX/opt/groovy/libexec ]]; then
export GROOVY_HOME=$HOMEBREW_PREFIX/opt/groovy/libexec
fi
| true |
9e9c2da6a2498a63c1b356e60615c7c48c99f16f | Shell | openxla/iree | /build_tools/cmake/run_android_test.sh | UTF-8 | 2,088 | 3.859375 | 4 | [
"Apache-2.0",
"LLVM-exception",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/bin/bash
# Copyright 2020 The IREE Authors
#
# Licensed under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
# Wrapper script to push build artifacts and run tests on an Android device.
#
# This script expects the following arguments:
# <test-binary> [<test-args>]..
# Where <test-binary> should be a path relative to /data/local/tmp/ on device.
#
# This script reads the following environment variables:
# - TEST_ANDROID_ABS_DIR: the absolute path on Android device for the build
# artifacts.
# - TEST_DATA: optional; the files to push to the Android device. Space-separated.
# - TEST_EXECUTABLE: the executable file to push to the Android device.
# - TEST_TMPDIR: optional; temporary directory on the Android device for
# running tests.
#
# This script pushes $TEST_EXECUTABLE and $TEST_DATA onto the device
# under $TEST_ANDROID_ABS_DIR/ before running <test-binary> with all
# <test-args> under /data/local/tmp.
set -x
set -e
adb push $TEST_EXECUTABLE $TEST_ANDROID_ABS_DIR/$(basename $TEST_EXECUTABLE)
if [ -n "$TEST_DATA" ]; then
for datafile in $TEST_DATA
do
adb push "$datafile" "$TEST_ANDROID_ABS_DIR/$(basename "$datafile")"
done
fi
if [ -n "$TEST_TMPDIR" ]; then
adb shell "mkdir -p $TEST_TMPDIR"
tmpdir="TEST_TMPDIR=$TEST_TMPDIR"
else
tmpdir=""
fi
# Execute the command with `adb shell` under `/data/local/tmp`.
# We set LD_LIBRARY_PATH for the command so that it can use libvulkan.so under
# /data/local/tmp when running Vulkan tests. This is to workaround an Android
# issue where linking to libvulkan.so is broken under /data/local/tmp.
# See https://android.googlesource.com/platform/system/linkerconfig/+/296da5b1eb88a3527ee76352c2d987f82f3252eb.
# This requires copying the vendor vulkan implementation under
# /vendor/lib[64]/hw/vulkan.*.so to /data/local/tmp/libvulkan.so.
adb shell "cd /data/local/tmp && LD_LIBRARY_PATH=/data/local/tmp $tmpdir $*"
if [ -n "$TEST_TMPDIR" ]; then
adb shell "rm -rf $TEST_TMPDIR"
fi
| true |
57c02089edbacc24100820ab15b554a14b595ca5 | Shell | bbrowning/heroku-buildpack-jruby | /bin/compile | UTF-8 | 4,125 | 3.40625 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
# bin/compile <build-dir> <cache-dir>
# fail fast
set -e
# debug
#set -x
logger -p user.notice -t "slugc[$$]" "language_pack_jruby jruby_compile_start"
# clean up leaking environment
unset GIT_DIR
BUILD_DIR=$1
CACHE_DIR=$2
LOGGER_FLAGS=""
curl --silent --location http://heroku-jvm-common.s3.amazonaws.com/jvm-buildpack-common.tar.gz | tar xz
. bin/java
#create the cache dir if it doesn't exist
mkdir -p $CACHE_DIR
# install JDK
logger -p user.notice -t "slugc[$$]" "language_pack_jruby download_jdk"
LOGGER_FLAGS="$LOGGER_FLAGS download_jdk"
javaVersion=1.7
echo -n "-----> Installing OpenJDK ${javaVersion}..."
install_java ${BUILD_DIR} ${javaVersion}
echo "done"
# Remove JDK's logging.properties since it interferes with JBoss
# Logging finding the appropriate one
rm $BUILD_DIR/.jdk/jre/lib/logging.properties
function indent {
c='s/^/ /'
case $(uname) in
Darwin) sed -l "$c";;
*) sed -u "$c";;
esac
}
function download_jruby_and_install_default_gems {
if [ ! -d "$CACHE_DIR/jruby-$VERSION" ]; then
echo "-----> Downloading and unpacking JRuby ${VERSION}"
mkdir -p $CACHE_DIR
JRUBY_TAR=http://jruby.org.s3.amazonaws.com/downloads/$VERSION/jruby-bin-$VERSION.tar.gz
curl $JRUBY_TAR -s -o - | tar xzf - -C $CACHE_DIR/
PATH=$CACHE_DIR/jruby-$VERSION/bin:$PATH
echo "-----> Installing JRuby-OpenSSL, Bundler and Rake"
jruby $JRUBY_OPTS -S jgem install jruby-openssl bundler rake | indent
fi
}
function copy_jruby_to_build {
echo "-----> Vendoring JRuby ${VERSION} into slug"
cp -ru $CACHE_DIR/jruby-$VERSION $BUILD_DIR/jruby
PATH=$BUILD_DIR/jruby/bin:$PATH
}
function get_bundle_cache {
if [ -d $BUNDLE_CACHE ]; then
mkdir -p $BUNDLE_DIR
cp -ru $BUNDLE_CACHE/* $BUNDLE_DIR
fi
}
function store_bundle_cache {
mkdir -p $BUNDLE_CACHE
cp -ru $BUNDLE_DIR/* $BUNDLE_CACHE
}
function bundle_install {
echo "-----> Installing dependencies with Bundler"
cd $BUILD_DIR
jruby $JRUBY_OPTS -r openssl -S bundle install --without development:test --binstubs --deployment | indent
jruby $JRUBY_OPTS -r openssl -S bundle clean
echo "Dependencies installed" | indent
}
function bundle_install_with_cache {
BUNDLE_CACHE=$CACHE_DIR/vendor/bundle
BUNDLE_DIR=$BUILD_DIR/vendor/bundle
get_bundle_cache
bundle_install
store_bundle_cache
# Keep cached gems out of the slug
rm -r $BUNDLE_DIR/jruby/1.9/cache
}
function create_database_yml {
echo "-----> Writing config/database.yml to read from DATABASE_URL"
mkdir -p $BUILD_DIR/config
cat > $BUILD_DIR/config/database.yml << EOF
<%
require 'cgi'
require 'uri'
begin
uri = URI.parse(ENV["DATABASE_URL"])
rescue URI::InvalidURIError
raise "Invalid DATABASE_URL"
end
raise "No RACK_ENV or RAILS_ENV found" unless ENV["RAILS_ENV"] || ENV["RACK_ENV"]
def attribute(name, value, force_string = false)
if value
value_string =
if force_string
'"' + value + '"'
else
value
end
"#{name}: #{value_string}"
else
""
end
end
adapter = uri.scheme
adapter = "jdbcpostgresql" if adapter == "postgres"
database = (uri.path || "").split("/")[1]
username = uri.user
password = uri.password
host = uri.host
port = uri.port
params = CGI.parse(uri.query || "")
%>
<%= ENV["RAILS_ENV"] || ENV["RACK_ENV"] %>:
<%= attribute "adapter", adapter %>
<%= attribute "database", database %>
<%= attribute "username", username %>
<%= attribute "password", password, true %>
<%= attribute "host", host %>
<%= attribute "port", port %>
<% params.each do |key, value| %>
<%= key %>: <%= value.first %>
<% end %>
EOF
}
function precompile_assets {
echo "-----> Precompiling assets"
cd $BUILD_DIR
env DATABASE_URL=postgres://user:pass@127.0.0.1/dbname jruby $JRUBY_OPTS -S bin/rake assets:precompile 2>&1 | indent
}
JRUBY_OPTS="--1.9 -J-Xmx400m"
VERSION="1.7.0.preview2"
download_jruby_and_install_default_gems
copy_jruby_to_build
bundle_install_with_cache
create_database_yml
precompile_assets
logger -p user.notice -t "slugc[$$]" "language_pack_jruby jruby_compile_end $LOGGER_FLAGS"
| true |
3bdb2bfab7c07fbd504688682c989275bf4cb358 | Shell | pixelc-linux/pixelc-kernel-scripts | /make_image.sh | UTF-8 | 570 | 3.796875 | 4 | [
"MIT"
] | permissive | #!/bin/sh
if [ -z "$MKBOOTIMG" ]; then
MKBOOTIMG="mkbootimg"
fi
if [ ! -x "$(command -v $MKBOOTIMG)" ]; then
echo "mkbootimg not found (install android-tools-mkbootimg), exitting..."
exit 1
fi
if [ -z "$1" ] || [ -z "$2" ]; then
echo "Usage: $0 Image.fit initramfs.cpio.lz4"
exit 1
fi
IMAGEFIT="$1"
RAMDISK="$2"
OUTPUT="boot.img.unsigned"
if [ -n "$3" ]; then
OUTPUT="$3"
fi
rm -f "$OUTPUT"
"$MKBOOTIMG" --kernel "$1" --ramdisk "$2" -o "$OUTPUT"
if [ $? -ne 0 ]; then
echo "mkbootimg failed, exitting..."
fi
echo "Created ${OUTPUT}."
| true |
28d0c25214b9e3999243f2757f608bc61e47d545 | Shell | arashpath/Scripts | /Linux/fssai/Linux_Setup/final/apache-tomcat/manager.sh | UTF-8 | 606 | 3.265625 | 3 | [] | no_license | #!/bin/bash
set -e
ID=$1
DEVENV=/opt/DevEnv ; HOME=$DEVENV/tomcat8-HOME
BASE=/opt/APPS/$(ls -lrth /opt/APPS/ | awk "/-tom"$ID"/ "'{print $9}')
# ---------------------------------------------------------------------------#
if [ ! -d "$BASE" ]
then
cp -a $HOME/webapps/manager $BASE/webapps/
sed -i '/<Context/a <!--
/<\/Context/i -->' $BASE/webapps/manager/META-INF/context.xml
cp -a $HOME/conf/tomcat-users.* $BASE/conf/
sed -i '/<\/tomcat-users>/i <user username="USER" password="PASSWD" roles="manager-gui,admin-gui"/>' $BASE/conf/tomcat-users.xml
systemctl restart tomcat$ID
fi
| true |
8085172b326b5d541a6e6a0c6b027ecf3005d019 | Shell | DistributedDesigns/docs | /data/quoteserver-times/qs-timer.sh | UTF-8 | 227 | 3.125 | 3 | [] | no_license | #!/bin/sh
if [ -z $1 ]; then
echo "Usage: $0 [num attempts] 2> <file>"
exit 1
else
MAX_ITER=$1
fi
TIMEFORMAT=%R
for i in `seq 1 ${MAX_ITER}`; do
time echo "hello,friend" | nc quoteserve.seng.uvic.ca 4443 > /dev/null
done
| true |
63ada80de4d8fbb4f76c105911750e025e9ebadc | Shell | hyperledger/indy-test-automation | /system_payments_only/docker/prepare.sh | UTF-8 | 3,498 | 3.609375 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
set -o errexit
set -o pipefail
#set -o nounset
set -o xtrace
export MSYS_NO_PATHCONV=1
DEF_TEST_NETWORK_NAME="indy-test-automation-network"
# TODO limit default subnet range to reduce risk of overlapping with system resources
DEF_TEST_NETWORK_SUBNET="10.0.0.0/24"
function usage {
echo "\
Usage: $0 [test-network-name] [test-network-subnet]
defaults:
- test-network-name: '${DEF_TEST_NETWORK_NAME}'
- test-network-subnet: '${DEF_TEST_NETWORK_SUBNET}'\
"
}
if [ "$1" = "--help" ] ; then
usage
exit 0
fi
test_network_name="${1:-$DEF_TEST_NETWORK_NAME}"
test_network_subnet="${2:-$DEF_TEST_NETWORK_SUBNET}"
user_id=$(id -u)
repo_path=$(git rev-parse --show-toplevel)
docker_routine_path="$repo_path/system_payments_only/docker"
# Set the following variables based on the OS:
# - docker_socket_path
# - docker_socket_mount_path
# - $docker_socket_user_group
. set_docker_socket_path.sh
workdir_path="/tmp/indy-test-automation"
image_repository="hyperledger/indy-test-automation"
docker_compose_image_name="${image_repository}:docker-compose"
node_env_variables=" \
INDY_PLENUM_VERSION \
INDY_NODE_VERSION \
UBUNTU_VERSION \
PYTHON3_PYZMQ_VERSION \
SOVRIN_INSTALL \
SOVRIN_VERSION \
SOVTOKEN_VERSION \
SOVTOKENFEES_VERSION \
TOKEN_PLUGINS_INSTALL \
URSA_VERSION \
"
client_env_variables=" \
LIBINDY_CRYPTO_VERSION \
LIBSOVTOKEN_INSTALL \
LIBSOVTOKEN_VERSION \
DIND_CONTAINER_REGISTRY \
DIND_IMAGE_NAME\
UBUNTU_VERSION \
"
echo "Docker version..."
docker version
set +x
echo "Environment env variables..."
for i in $node_env_variables $client_env_variables
do
echo "$i=${!i}"
done
set -x
# 1. build docker-compose image
# TODO pass optional docker composer version
docker build -t "$docker_compose_image_name" "$docker_routine_path/docker-compose"
# 2. build client image
docker run -t --rm \
--group-add $docker_socket_user_group \
-v "$docker_socket_path:"$docker_socket_mount_path \
-v "$repo_path:$workdir_path" \
-w "$workdir_path" \
-u "$user_id" \
-e "IMAGE_REPOSITORY=$image_repository" \
-e u_id="$user_id" \
-e LIBINDY_VERSION \
-e LIBSOVTOKEN_INSTALL \
-e LIBSOVTOKEN_VERSION \
-e DIND_CONTAINER_REGISTRY \
-e DIND_IMAGE_NAME \
-e UBUNTU_VERSION \
"$docker_compose_image_name" docker-compose -f system_payments_only/docker/docker-compose.yml build client
# 3. build node image
docker run -t --rm \
--group-add $docker_socket_user_group \
-v "$docker_socket_path:"$docker_socket_mount_path \
-v "$repo_path:$workdir_path" \
-w "$workdir_path" \
-u "$user_id" \
-e "IMAGE_REPOSITORY=$image_repository" \
-e u_id="$user_id" \
-e INDY_NODE_VERSION \
-e INDY_PLENUM_VERSION \
-e TOKEN_PLUGINS_INSTALL \
-e SOVRIN_VERSION \
-e SOVRIN_INSTALL \
-e SOVTOKEN_VERSION \
-e SOVTOKENFEES_VERSION \
-e URSA_VERSION \
-e PYTHON3_PYZMQ_VERSION \
-e UBUNTU_VERSION \
"$docker_compose_image_name" docker-compose -f system_payments_only/docker/docker-compose.yml build node
docker images "$image_repository"
# 4. clean existing environment
$docker_routine_path/clean.sh "$test_network_name"
# 5. remove test network if exists
docker network ls -q --filter name="$test_network_name" | xargs -r docker network rm
# 6. create test network
docker network create --subnet="$test_network_subnet" "$test_network_name"
docker network ls
docker inspect "$test_network_name" | true |
990f10ceee39363a6335f533002759caad96611a | Shell | SirWumpus/ioccc-hibachi | /hibachi-start.sh.in | UTF-8 | 5,077 | 3.3125 | 3 | [
"0BSD"
] | permissive | #!/bin/bash
#!/usr/bin/env -i /bin/sh
#!/bin/ksh
#!/bin/sh
#
# hibachi-start.sh
#
# Limited implementation of RFC 2616 and CGI/1.1.
#
# Public Domain 2002, 2004 by Anthony Howe. All rights released.
#
# usage:
#
# hibachi-start.sh &
#
#######################################################################
# Do NOT modify the following section if you are security conscious.
#######################################################################
#
# Reset internal field separator to space, tab, newline.
# Not 100% effective if the environment we inherited has
# already played silly buggers with IFS like "IFS='S'".
#
# Use #!/usr/bin/env -i /bin/sh on shells that support
# option passing and #! lines longer than 32 bytes to
# guarantee a clean environment.
#
IFS='
'
SED=$(which sed)
#
# Get a clean environment to work with. This works for all
# Bourne like shells under most Unix like environments.
#
# Cleaning out the environment on Cygwin prevents hibachi from
# starting and would appear there is some Windows/Cygwin specific
# information maintained and required here.
#
if ! expr `uname` : 'CYGWIN.*' >/dev/null; then
unset $(env | $SED -e '/!::/d' -e 's/^\([^=]*\).*/\1/')
fi
#
# Path of safe executables.
#
export PATH='/usr/local/bin:/usr/bin:/bin'
#
# Disable any environment file.
#
unset ENV
#
# Disable cd shortcuts.
#
unset CDPATH
#######################################################################
# Minimum required for serving static files.
#######################################################################
prefix=@prefix@
exec_prefix=@exec_prefix@
datadir=@datadir@
sysconfdir=@sysconfdir@
sharedstatedir=@sharedstatedir@
localstatedir=@localstatedir@
sbindir=@sbindir@
bindir=@bindir@
#
# The port to be used by the Hibachi web server.
#
# This is NOT normally a server parameter, but information about the
# client connection, but since Hibachi only listens on one port, we
# assume to know what the incoming port is and listen on that. Also
# for the purpose of demonstration, we can't bind to a privileged
# port.
#
export SERVER_PORT=@enable_port@
#
# The root of the Hibachi document tree. Virtual hosts
# are implemented as subdirectories from this location.
#
# For example, to setup virtual hosts by name or IP:
#
# /usr/local/share/hibachi/
# localhost/
# index.html
# ...
# 127.0.0.1 -> localhost/
#
# www.ioccc.org/
# index.html
# ...
# 64.81.251.233 -> www.ioccc.org/
#
#export DOCUMENT_ROOT=${datadir}/hibachi
export DOCUMENT_ROOT=$(pwd)
#
# The executable.
#
#hibachi=${sbindir}/@PACKAGE_NAME@
hibachi=`dirname $0`/@PACKAGE_NAME@
#######################################################################
# Required for most CGI scripts.
#######################################################################
#
# Your host name or IP address. Ideally this should be the server-name
# used in self-referencing URLs.
#
export SERVER_NAME=127.0.0.1
#
# My web server name and version
#
export SERVER_SOFTWARE='@PACKAGE_NAME@/@PACKAGE_VERSION@'
#
# Predefined environment space for the request method.
#
export REQUEST_METHOD='1234'
#
# Predefined environment space for remote address of a request.
#
export REMOTE_ADDR='123.123.123.123'
#
# Predefined environment space for upload Content-Length.
#
export CONTENT_LENGTH='123456789.123456789.'
#
# Predefined environment space for upload Content-Type.
# Look at the Apache mime.types file, there are some long ones.
#
export CONTENT_TYPE='123456789.123456789.123456789.123456789.123456789.123456789.'
#
# Predefined environment space for cookies. A single cookie can actually
# be a maximum of 4096 bytes in size, but here we only handle shorter ones.
#
export HTTP_COOKIE='123456789.123456789.123456789.123456789.123456789.123456789.123456789.123456789.'
#
# Predefined environment space for the absolute path of the script
# to be processed. Required by PHP/CGI in order to find the script.
# This variable is not part of the CGI/1.1 Specification.
#
export SCRIPT_FILENAME='123456789.123456789.123456789.123456789.123456789.123456789.123456789.123456789.'
#
# Hibachi does support the entire CGI/1.1 specification.
#
export GATEWAY_INTERFACE='CGI/0.0'
#
# We cannot support the following common CGI environment variables
# as specified in the draft 3 of a proposed Internet Standard
# due to space limitations, but since the draft has expired and
# on hold, there is a lot of lee way:
#
# AUTH_TYPE (MUST) HTTP_ACCEPT (MAY)
# HTTP_REFERER (MAY) PATH_INFO (MUST)
# PATH_TRANSLATED (SHOULD) REMOTE_HOST (SHOULD)
# REMOTE_IDENT (MAY) REMOTE_USER (SHOULD)
# SCRIPT_NAME (MUST) SERVER_PROTOCOL (MUST)
#
#######################################################################
# Nothing to configure below this point.
#######################################################################
#
# Start the web server.
#
exec ${hibachi}
#######################################################################
# Beyond here be dragons...
#######################################################################
| true |
537e6d5a4ca0c1358200892dad0a39dc449b94bc | Shell | laiafr/SegCatSpa | /Analysis_Pipeline/Commonscripts/5_analyze.sh | UTF-8 | 2,251 | 3.5625 | 4 | [] | no_license | #!/usr/bin/env bash
# Script for analyzing mono and bilingual corpus with diferent mixings - M2 SegCatSpa -
# Alex Cristia alecristia@gmail.com 2016-11
# Adapted by Laia Fibla 2017-03-15 laia.fibla.reixachs@gmail.com
# minor changes Alex Cristia 2018-10-12
######### VARIABLES ###############
input_dir=$1
output_dir=$2
##################################
# wordseg tool to launch segmentation pipelines on the cluster
wordseg_slurm="/shared/apps/wordseg/tools/wordseg-slurm.sh"
# the token separators in the tags files
separator="-p' ' -s';esyll' -w';eword'"
# get the list of tags files in the input_dir, assuming that what gets passed is a folder containing tags (i.e. no embedding)
all_tags="$input_dir/*tags.txt"
ntags=$(echo $all_tags | wc -w)
echo "found $ntags tags files in $input_dir"
# temporary jobs file to list all the wordseg jobs to execute
jobs=$(mktemp)
trap "rm -rf $jobs" EXIT
# build the list of wordseg jobs from the list of tags files
counter=1
for tags in $all_tags
do
name=$(basename $tags | cut -d- -f1)
echo -n "[$counter/$ntags] building jobs for $name ..."
# defines segmentation jobs
echo "$name-syllable-baseline-00 $tags syllable $separator wordseg-baseline -v -P 0" >> $jobs
echo "$name-syllable-baseline-10 $tags syllable $separator wordseg-baseline -v -P 1" >> $jobs
echo "$name-syllable-tprel $tags syllable $separator wordseg-tp -v -t relative" >> $jobs
echo "$name-syllable-tpabs $tags syllable $separator wordseg-tp -v -t absolute" >> $jobs
echo "$name-phone-dibs $tags phone $separator wordseg-dibs -v -t phrasal -u phone $tags" >> $jobs
echo "$name-phone-puddle $tags phone $separator wordseg-puddle -v -j 5 -w 2" >> $jobs
echo "$name-syllable-ag $tags syllable $separator wordseg-ag -vv -j 8" >> $jobs
((counter++))
echo " done"
# # for testing, process only some tags
# [ $counter -eq 4 ] && break
done
# load the wordseg python environment
module load anaconda/3
source activate /shared/apps/anaconda3/envs/wordseg
# launching all the jobs
echo -n "submitting $(cat $jobs | wc -l) jobs ..."
$wordseg_slurm $jobs $output_dir > /dev/null
echo " done"
echo "all jobs submitted, writing to $output_dir"
echo "view status with 'squeue -u $USER'"
# unload the environment
source deactivate
| true |
51afc8935ca33093a64a3c4a6a943228e0efe9de | Shell | arm64-gentoo-images/Build.Dist | /chroot.sh | UTF-8 | 208 | 2.578125 | 3 | [
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-other-permissive"
] | permissive | #! /bin/bash
if [[ -z "$PROJECT" ]]; then
PROJECT="gentoo-arm"
fi
BASEDIR=$( dirname $0 )
sudo pychroot -B "${BASEDIR}/build/${PROJECT}/packages":/var/cache/binpkgs "${BASEDIR}/build/${PROJECT}/chroot"
| true |
2461efb9899a982294742d2dc586342d87fdb361 | Shell | AnthonyArmour/holberton-system_engineering-devops | /0x10-https_ssl/0-world_wide_web | UTF-8 | 693 | 3.484375 | 3 | [] | no_license | #!/usr/bin/env bash
# gets domain info
printinfo() {
record=$(dig $var | grep -A1 'ANSWER SECTION:' | awk '{print $1, $4, $5}')
domain=$(echo $record | awk '{print $2}')
rec=$(echo $record | awk '{print $3}')
ip=$(echo $record | awk '{print $4}')
echo "The subdomain ${sub} is a ${rec} record and points to ${ip}"
}
if [ $# -eq 3 ]; then
var="$2.$1"
sub="$2"
printinfo
elif [ $# -eq 2 ]; then
var="$2.$1"
sub="$2"
printinfo
elif [ $# -eq 1 ]; then
var="www.$1"
sub="www"
printinfo
var="lb-01.$1"
sub="lb-01"
printinfo
var="web-01.$1"
sub="web-01"
printinfo
var="web-02.$1"
sub="web-02"
printinfo
fi
| true |
3e57aa219e6ce972d19d8234c89c15d478fd60b6 | Shell | EricZBL/RealTimeFaceCompare | /Service/bin/start-check-dubbo.sh | UTF-8 | 3,233 | 3.71875 | 4 | [] | no_license | #!/bin/bash
################################################################################
## Copyright: HZGOSUN Tech. Co, BigData
## Filename: start-check-dubbo.sh
## Description: 大数据dubbo ftp 守护脚本
## Author: liushanbin
## Created: 2018-01-08
################################################################################
#set -x
#crontab 里面不会读取jdk环境变量的值
source /etc/profile
#set -x
#---------------------------------------------------------------------#
# 定义变量 #
#---------------------------------------------------------------------#
cd `dirname $0`
declare -r BIN_DIR=`pwd` #bin 目录
cd ..
declare -r DEPLOY_DIR=`pwd` #项目根目录
declare -r LOG_DIR=${DEPLOY_DIR}/logs ## log 日记目录
declare -r CHECK_LOG_FILE=${LOG_DIR}/check_dubbo.log
#####################################################################
# 函数名: check_dubbo
# 描述: 把脚本定时执行,定时监控dubbo 服务是否挂掉,如果挂掉则重启。
# 参数: N/A
# 返回值: N/A
# 其他: N/A
#####################################################################
function check_dubbo()
{
echo "" | tee -a $CHECK_LOG_FILE
echo "****************************************************" | tee -a $CHECK_LOG_FILE
echo "dubbo procceding ing......................." | tee -a $CHECK_LOG_FILE
dubbo_pid=$(lsof -i | grep 20881 | awk '{print $2}' | uniq)
echo "dubbo's pid is: ${dubbo_pid}" | tee -a $CHECK_LOG_FILE
if [ -n "${dubbo_pid}" ];then
echo "dubbo process is exit,do not need to do anything. exit with 0 " | tee -a $CHECK_LOG_FILE
else
echo "dubbo process is not exit, just to restart dubbo." | tee -a $CHECK_LOG_FILE
sh ${BIN_DIR}/start-dubbo.sh
echo "starting, please wait........" | tee -a $CHECK_LOG_FILE
sleep 1m
dubbo_pid_restart=$(lsof -i | grep 20881 | awk '{print $2}' | uniq)
if [ -z "${dubbo_pid_restart}" ];then
echo "start dubbo failed.....,retrying to start it second time" | tee -a $CHECK_LOG_FILE
sh ${BIN_DIR}/start-dubbo.sh
echo "second try starting, please wait........" | tee -a $CHECK_LOG_FILE
sleep 1m
dubbo_pid_retry=$(lsof -i | grep 20881 | awk '{print $2}' | uniq)
if [ -z "${dubbo_pid_retry}" ];then
echo "retry start dubbo failed, please check the config......exit with 1" | tee -a $CHECK_LOG_FILE
else
echo "secondary try start ftp sucess. exit with 0." | tee -a $CHECK_LOG_FILE
fi
else
echo "trying to restart dubbo sucess. exit with 0." | tee -a $CHECK_LOG_FILE
fi
fi
}
#####################################################################
# 函数名: main
# 描述: 模块功能main 入口,即程序入口, 用来监听整个大数据服务的情况。
# 参数: N/A
# 返回值: N/A
# 其他: N/A
#####################################################################
function main()
{
while true
do
check_dubbo
sleep 5m
done
}
# 主程序入口
main
| true |
ad4eb5cbf4e388dc47e860379be229c54ff53ea9 | Shell | openshift-psap/special-resource-operator | /scripts/rename.sh | UTF-8 | 223 | 3.171875 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
set -e
for i in [0-9][0-9][0-9][0-9].yaml
do
K=$(grep ^kind "$i")
N=$(grep -e '^ name: ' "$i" | head -n1)
F=$(echo "${i%.*}"_"${K##* }"_"${N##* }".yaml | tr '[:upper:]' '[:lower:]')
mv "$i" "$F"
done
| true |
02a7fcdda7d4109d51c424ca7c0aca58e76c57d5 | Shell | tinnnysu/FFmpeg-Android | /build.sh | UTF-8 | 6,693 | 4.1875 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
###############################################################################
#
# This script will build FFMPEG for android.
#
# Prerequisits:
# - FFMPEG source checked out / copied to FFmpeg subfolder.
#
# Build steps:
# - Patch the FFMPEG configure script to fix missing support for shared
# library versioning on android.
# - Configure FFMPEG
# - Build FFMPEG
# Requirement:
# - make
# - patch
# - bash
# - diffutils
#
###############################################################################
SCRIPT=$(readlink -f $0)
BASE=$(dirname $SCRIPT)
NPROC=$(grep -c ^processor /proc/cpuinfo)
###############################################################################
#
# Argument parsing.
# Allow some components to be overwritten by command line arguments.
#
###############################################################################
if [ -z $HOST_ARCH]; then
HOST_ARCH=$(uname -m)
fi
if [ -z $PLATFORM ]; then
PLATFORM=14
fi
if [ -z $MAKE_OPTS ]; then
MAKE_OPTS="-j$(($NPROC+1))"
fi
function usage
{
echo "$0 [-a <ndk>] [-h <host arch>] [-m <make opts>] [-p <android platform>]"
echo -e "\tdefaults:"
echo -e "\tHOST_ARCH=$HOST_ARCH"
echo -e "\tPLATFORM=$PLATFORM"
echo -e "\tMAKE_OPTS=$MAKE_OPTS"
echo -e "\tANDROID_NDK must be set manually."
echo ""
echo -e "\tAll arguments can also be set as environment variables."
exit -3
}
while getopts "a:h:m:p:" opt; do
case $opt in
a)
ANDROID_NDK=$OPTARG
;;
h)
HOST_ARCH=$OPTARG
;;
m)
MAKE_OPTS=$OPTARG
;;
p)
PLATFORM=$OPTARG
;;
\?)
echo "Invalid option $OPTARG" >&2
usage
;;
esac
done
if [ -z $HOST_ARCH ]; then
HOST_ARCH=$(uname -m)
fi
if [ -z $PLATFORM ]; then
PLATFORM=14
fi
if [ -z $MAKE_OPTS ]; then
MAKE_OPTS="-j3"
fi
if [ -z $ANDROID_NDK ]; then
echo "ANDROID_NDK not set. Set it to the directory of your NDK installation."
exit -1
fi
if [ ! -d $BASE/FFmpeg ]; then
echo "Please copy or check out FFMPEG source to folder FFmpeg!"
exit -2
fi
echo "Building with:"
echo "HOST_ARCH=$HOST_ARCH"
echo "PLATFORM=$PLATFORM"
echo "MAKE_OPTS=$MAKE_OPTS"
echo "ANDROID_NDK=$ANDROID_NDK"
cd $BASE/FFmpeg
## Save original configuration file
## or restore original before applying patches.
if [ ! -f configure.bak ]; then
echo "Saving original configure file to configure.bak"
cp configure configure.bak
else
echo "Restoring original configure file from configure.bak"
cp configure.bak configure
fi
patch -p1 < $BASE/patches/configure.patch
#if [ ! -f library.mak.bak ]; then
# echo "Saving original library.mak file to library.mak.bak"
# cp library.mak library.mak.bak
#else
# echo "Restoring original library.mak file from library.mak.bak"
# cp library.mak.bak library.mak
#fi
#
#patch -p1 < $BASE/patches/library.mak.patch
# Remove old build and installation files.
if [ -d $BASE/output ]; then
rm -rf $BASE/output
fi
if [ -d $BASE/build ]; then
rm -rf $BASE/build
fi
###############################################################################
#
# build_one ... builds FFMPEG with provided arguments.
#
# Calling convention:
#
# build_one <PREFIX> <CROSS_PREFIX> <ARCH> <SYSROOT> <CFLAGS> <LDFLAGS> <EXTRA>
#
# PREFIX ... Installation directory
# CROSS_PREFIX ... Full path with toolchain prefix
# ARCH ... Architecture to build for (arm, x86, mips)
# SYSROOT ... Android platform to build for, full path.
# CFLAGS ... Additional CFLAGS for building.
# LDFLAGS ... Additional LDFLAGS for linking
# EXTRA ... Any additional configuration flags, e.g. --cpu=XXX
#
###############################################################################
function build_one
{
mkdir -p $1
cd $1
$BASE/FFmpeg/configure \
--prefix=$2 \
--enable-shared \
--enable-pic \
--enable-runtime-cpudetect \
--enable-cross-compile \
--disable-symver \
--disable-static \
--disable-programs \
--disable-avdevice \
--disable-doc \
--cross-prefix=$3 \
--target-os=linux \
--arch=$4 \
--sysroot=$5 \
--extra-cflags="-Os $6" \
--extra-ldflags="$7" \
--disable-linux-perf \
$8
make clean
make $MAKE_OPTS
make install
}
NDK=$ANDROID_NDK
###############################################################################
#
# x86 build configuration
#
###############################################################################
PREFIX=$BASE/output/x86
BUILD_ROOT=$BASE/build/x86
SYSROOT=$NDK/platforms/android-$PLATFORM/arch-x86/
TOOLCHAIN=$NDK/toolchains/x86-4.8/prebuilt/linux-$HOST_ARCH
CROSS_PREFIX=$TOOLCHAIN/bin/i686-linux-android-
ARCH=x86
E_CFLAGS=
E_LDFLAGS=
EXTRA="--disable-asm"
build_one "$BUILD_ROOT" "$PREFIX" "$CROSS_PREFIX" "$ARCH" "$SYSROOT" \
"$E_CFLAGS" "$E_LDFLAGS" "$EXTRA"
###############################################################################
#
# ARM build configuration
#
###############################################################################
PREFIX=$BASE/output/armeabi
BUILD_ROOT=$BASE/build/armeabi
SYSROOT=$NDK/platforms/android-$PLATFORM/arch-arm/
TOOLCHAIN=$NDK/toolchains/arm-linux-androideabi-4.8/prebuilt/linux-$HOST_ARCH
CROSS_PREFIX=$TOOLCHAIN/bin/arm-linux-androideabi-
ARCH=arm
E_CFLAGS=
E_LDFLAGS=
EXTRA=
build_one "$BUILD_ROOT" "$PREFIX" "$CROSS_PREFIX" "$ARCH" "$SYSROOT" \
"$E_CFLAGS" "$E_LDFLAGS" "$EXTRA"
###############################################################################
#
# ARM-v7a build configuration
#
###############################################################################
PREFIX=$BASE/output/armeabi-v7a
BUILD_ROOT=$BASE/build/armeabi-v7a
SYSROOT=$NDK/platforms/android-$PLATFORM/arch-arm/
TOOLCHAIN=$NDK/toolchains/arm-linux-androideabi-4.8/prebuilt/linux-$HOST_ARCH
CROSS_PREFIX=$TOOLCHAIN/bin/arm-linux-androideabi-
ARCH=arm
E_CFLAGS="-march=armv7-a -mfloat-abi=softfp"
E_LDFLAGS=
EXTRA=
build_one "$BUILD_ROOT" "$PREFIX" "$CROSS_PREFIX" "$ARCH" "$SYSROOT" \
"$E_CFLAGS" "$E_LDFLAGS" "$EXTRA"
###############################################################################
#
# MIPS build configuration
#
###############################################################################
##PREFIX=$BASE/output/mips
##BUILD_ROOT=$BASE/build/mips
##SYSROOT=$NDK/platforms/android-$PLATFORM/arch-mips/
##TOOLCHAIN=$NDK/toolchains/mipsel-linux-android-4.8/prebuilt/linux-$HOST_ARCH
##CROSS_PREFIX=$TOOLCHAIN/bin/mipsel-linux-android-
##ARCH=mips32
##E_CFLAGS=
##E_LDFLAGS=
##EXTRA=""
##
##build_one "$BUILD_ROOT" "$PREFIX" "$CROSS_PREFIX" "$ARCH" "$SYSROOT" \
## "$E_CFLAGS" "$E_LDFLAGS" "$EXTRA"
##
| true |
27fe7d4a7b66ac9315af95176f26cfb9a9bd25f1 | Shell | toddhodes/bin | /rxterm | UTF-8 | 839 | 3.328125 | 3 | [] | no_license | #!/bin/sh
#set -o xtrace
case $1 in
"NOW"|"u") \
xterm -sb -sl 1200 -title $1 -sb -bg `getColor` -e nowlogin ;
exit ;;
esac
case $1 in
"-s") \
xterm -sb -sl 1200 -title $2 -sb -bg `getColor` -e ssh $2 ;
exit ;;
esac
tmpfile=rxterm.$RANDOM
rlogin -x $1 </dev/null >/dev/null 2> /tmp/$tmpfile
if [ `head -1 /tmp/$tmpfile | \
egrep "(ticket|failed|standard|error|Kerberos)" | cut -d" " -f1`X = X ]
then
rlcmd="rlogin -x"
else
rlcmd=rlogin
fi
# provide monitoring
echo "`date` : $rlcmd $1" >> $HOME/var/rlogin-results
# --- xhost + the host, assuming it will set its display back
xhost +$1
#echo "----"
#head -1 /tmp/$tmpfile
#echo "----"
/bin/rm /tmp/$tmpfile
case $2 in
"") xterm -sb -sl 1200 -title $1 -sb -bg `getColor` -e $rlcmd $1 ; exit ;;
esac
xterm -sb -sl 1200 -title $1 -sb -bg `getColor` -e $rlcmd -l $2 $1
| true |
cace3930a49375a972302c302a6405a15a1f2405 | Shell | SamuelZimmer/Mutect2_pipeline | /cleanup.sh | UTF-8 | 1,494 | 3.3125 | 3 | [] | no_license | #!/bin/bash
# Exit immediately on error
#set -eu -o pipefail
export BAM=$1
export NAME=${BAM%.bam}
export NOPATHNAME=${NAME##*/}
PREVIOUS=$2
OUTPUT_DIR=`pwd`
JOB_OUTPUT_DIR=$OUTPUT_DIR/job_output
mkdir -p $JOB_OUTPUT_DIR
cd $JOB_OUTPUT_DIR
mkdir -p $OUTPUT_DIR/jobs
JOB_DEPENDENCIES=$(cat ${JOB_OUTPUT_DIR}/${PREVIOUS}/${NOPATHNAME}.JOBID)
mkdir -p $OUTPUT_DIR/logs
LOG=$OUTPUT_DIR/logs/cleanup.log
# Define a timestamp function
timestamp() {
date +"%Y-%m-%d %H:%M:%S"
}
COMMAND="timestamp() {
date +\"%Y-%m-%d %H:%M:%S\"
}
echo \"Started:\" | sed $'s,.*,\e[96m&\e[m,' >> $LOG
timestamp >> $LOG
mkdir -p $OUTPUT_DIR/logs
mv $JOB_OUTPUT_DIR/*.out $OUTPUT_DIR/logs/
mv $OUTPUT_DIR/*.out $OUTPUT_DIR/logs/
mv $JOB_OUTPUT_DIR/*/*.sh $OUTPUT_DIR/jobs/
rm $JOB_OUTPUT_DIR/*/*.JOBID
rm $JOB_OUTPUT_DIR/*/*/*.JOBID
mv $JOB_OUTPUT_DIR/Recalibration/*.ba* /netmount/ip29_home/zimmers/Mutect2/recalibrated/
rm -fr $JOB_OUTPUT_DIR/ReplaceReadGroup $JOB_OUTPUT_DIR/Sambamba_markDuplicates $JOB_OUTPUT_DIR/FixMate
echo \"Ended:\" | sed $'s,.*,\e[96m&\e[m,' >> $LOG
timestamp >> $LOG"
#Write .sh script to be submitted with sbatch
echo "#!/bin/bash" > $OUTPUT_DIR/jobs/cleanup.sh
echo "$COMMAND" >> $OUTPUT_DIR/jobs/cleanup.sh
sbatch --job-name=Cleanup_${NOPATHNAME} --output=%x-%j.out --time=24:00:00 --mem=2G \
--dependency=afterok:$JOB_DEPENDENCIES $OUTPUT_DIR/jobs/cleanup.sh \
echo $COMMAND >> $LOG
echo "Submitted:" | sed $'s,.*,\e[96m&\e[m,' >> $LOG
echo "$(timestamp)" >> $LOG | true |
bc4865580df3682b2484fa04b14df27e71ed9e58 | Shell | kmonticolo/scripts | /fullbackup.sh | UTF-8 | 1,428 | 3.21875 | 3 | [] | no_license | #!/usr/bin/ksh
export DSM_DIR=/usr/tivoli/tsm/client/ba/bin64
export DSM_LOG=/usr/tivoli/tsm/client/LOG
SEEDFILE=/usr/tivoli/tsm/client/CFG/seedfile
LOGFILE=/usr/tivoli/tsm/client/LOG/file_backup.log
OPT14D=/usr/tivoli/tsm/client/CFG/dsm_14D.opt
OPT3M=/usr/tivoli/tsm/client/CFG/dsm_3M.opt
echo "Backup uruchomiony: " > $LOGFILE
date >> $LOGFILE
if [[ ! -f $SEEDFILE ]] then
RAND=$(($RANDOM%29+1))
echo $RAND > $SEEDFILE
echo Dopisalem SEED: $RAND >> $LOGFILE
fi
SEED=`cat $SEEDFILE`
DAY=`date +%d`
echo Uzyskany seed to: $SEED >> $LOGFILE
if [[ $DAY -eq $SEED ]] then
echo SEED:$SEED zgadza sie z DAY:$DAY >> $LOGFILE
echo Wykonuje backup pelny 14d >> $LOGFILE
dsmc selective -optfile=$OPT14D -subdir=yes / /usr/ /var/ /home/ /admin/ /opt/ /opt/oracle/ /opt/ctmuser/ /opt/emuser/ /vol/ctm01/ /vol/ctm02/ /vol/ctm03/ >> $LOGFILE 2>&1
echo Wykonuje backup pelny 3m >> $LOGFILE
dsmc selective -optfile=$OPT3M -subdir=yes / /usr/ /var/ /home/ /admin/ /opt/ /opt/oracle/ /opt/ctmuser/ /opt/emuser/ /vol/ctm01/ /vol/ctm02/ /vol/ctm03/ >> $LOGFILE 2>&1
else
echo SEED:$SEED sie nie zgadza z DAY:$DAY >> $LOGFILE
echo Wykonuje backup incrementalny 14d >> $LOGFILE
dsmc incr -optfile=$OPT14D -subdir=yes / /usr/ /var/ /home/ /admin/ /opt/ /opt/oracle/ /opt/ctmuser/ /opt/emuser/ /vol/ctm01/ /vol/ctm02/ /vol/ctm03/ >> $LOGFILE 2>&1
| true |
9a756ab9c4c49687199ba1668f1485146911cf45 | Shell | melindam/chef | /cookbooks/jmh-operations/templates/default/archive_weekly_mysql_sh.erb | UTF-8 | 460 | 3.5 | 4 | [] | no_license | #!/bin/sh
#set -x
WEEKAGO=`date --date "now -1week" +"%Y%m%d"`
DAILYFILE="<%= @dailyfile %>"
ARCHIVEDIR="<%= @backup_dir %>"
<% @server_list.each do |n| %>
#<%= n['name']%>
cd $ARCHIVEDIR/weekly/<%= n['name'] %>
for x in `find $ARCHIVEDIR/<%= n['name'] %> -name "*${WEEKAGO}${DAILYFILE}*"`
do
echo "Copying $x"
cp -rp $x ./
done
find . -mtime <%= @retention %> -exec rm -f {} \;
if [ $? -ne 0 ]
then
echo "Error with the copy"
exit 1
fi
<% end %>
| true |
468ecb14e4f0864f007c2eb9c88a4f46a0bae747 | Shell | clstefi/ignite | /Test/ROBOTCs/support_utilities/run.sh | UTF-8 | 3,251 | 3.625 | 4 | [
"Apache-2.0"
] | permissive | #! /bin/bash
#
# Copyright (c) 2019, Infosys Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# File: run.sh Usage: ./run.sh Description: To execute the Robo Suites
# Change History
# dd-mm-yyyy : Author - Description
# 12-08-2019 : Vidhya Lakshmi Shankarlal - Initial Version
logDir=$HOME/LOGS
# Create $HOME/LOGS directory,if LOGS folder does not exist
if [ ! -d "$logDir" ]; then
mkdir $HOME/LOGS
fi
echo "*********************************************"
echo
read -s -p "Enter ignite sudo password: " pswd
echo
read -s -p "Enter mme sudo password: " mmePswd
echo
read -p "Enter Option: s (for single tc execution) or Option: p(for package execution):" execSinglePackOption
echo
execSinglePackOption=${execSinglePackOption,,}
if [ "$execSinglePackOption" == "s" ] ; then
echo "*****************Suite Names******************"
echo
# Find all the suites under testsuites directory and display in console
find ../testsuites -type f -name "*.robot" | rev | cut -d '/' -f1 | rev | cut -d '.' -f1 | sort
echo
echo "**********************************************"
echo
read -p "Enter Suite Name from above available option: " suiteName
echo
echo "**********************************************"
echo
# Create $suiteName directory, if it folder does not exist
if [ ! -d "$logDir/$suiteName" ]; then
mkdir $logDir/$suiteName
fi
now=$(date +%Y%m%d_%H%M%S)
# Execute the testsuites
echo $pswd | sudo -S python3 -m robot --variable suitenamecmdline:$suiteName\_$now --variable mmePassword:$mmePswd --variable ignitePassword:$pswd --outputdir $HOME/LOGS/$suiteName/$suiteName\_$now --timestampoutput ../testsuites/$suiteName.robot
elif [ "$execSinglePackOption" == "p" ] ; then
echo "*****************Package Name******************"
echo
#Find all the available packages under testsuites directory
grep -Ri "Pkg*" ../testsuites/ | sed s/' '/'\n'/g | grep -E 'Pkg' | uniq
echo
echo "**********************************************"
echo
read -p "Enter the Package Name from above available option:" pkgName
echo
echo "**********************************************"
echo
testSuites=$(echo "$a"|grep -Ril "$pkgName" ../testsuites/ | cut -d '/' -f3 | cut -d '.' -f1)
for eachSuiteName in $testSuites
do
# Create $suiteName directory, if it folder does not exist
if [ ! -d "$logDir/$eachSuiteName" ]; then
mkdir $logDir/$eachSuiteName
fi
now=$(date +%Y%m%d_%H%M%S)
echo $pswd | sudo -S python3 -m robot --variable suitenamecmdline:$eachSuiteName\_$now --variable mmePassword:$mmePswd --variable ignitePassword:$pswd --outputdir $HOME/LOGS/$eachSuiteName/$eachSuiteName\_$now --timestampoutput ../testsuites/$eachSuiteName.robot
done
fi | true |
b832f70e94e30601236f0dde9f4b31f0880c8a26 | Shell | sashetov/seshu | /utils/ssh-tunnel-twohop | UTF-8 | 560 | 3.4375 | 3 | [
"WTFPL"
] | permissive | #!/bin/bash
function ee(){ echo $*; eval $*; }
function two_hop_ssh_tunnel(){
export LOCAL_PORT=$1
export REMOTE_PORT=$2
export MID_PORT=$3
export USER1=$4
export HOST1=$5
export USER2=$6
export HOST2=$7
if [ $# -ne 7 ]; then
echo -e "Usage:\n$0 LOCAL_PORT REMOTE_PORT MID_PORT USER1 HOST1 USER2 HOST2'";
return 1
fi;
ee ssh -A -t -l $USER1 $HOST1 -L $LOCAL_PORT:localhost:$MID_PORT \
ssh -A -t -l $USER2 $HOST2 -L $MID_PORT:localhost:$REMOTE_PORT;
}
function __main__(){
two_hop_ssh_tunnel $*;
}
__main__ $*
| true |
a3bfffd56d19d8dad0cc9b2009e882218c0e348a | Shell | sun-mir/home | /bin/start_tor_socks_proxy.sh | UTF-8 | 3,675 | 3.84375 | 4 | [] | no_license | #!/bin/bash
#Sam Gleske
#Ubuntu 18.04.5 LTS
#Linux 5.4.0-42-generic x86_64
#Sat Sep 12 18:23:10 EDT 2020
#DESCRIPTION
# Start a SOCKS5 proxy on localhost:9150 which routes through the TOR onion
# network. Also starts a DNS server on localhost:53 (UDP).
#
# https://github.com/PeterDaveHello/tor-socks-proxy
#
# The following is a recommended crontab to start TOR automatically and
# change the TOR endpoint every 5 minutes.
#CRONTAB(5)
# */5 * * * * start_tor_socks_proxy.sh restart
# @reboot start_tor_socks_proxy.sh
function helpdoc() {
cat <<EOF
SYNOPSIS
${0##*/} [-f] [-c CODE] [restart|stop]
BASIC OPTIONS
-f or --strict-firewall
TOR will only make outbound connections over ports 443 or 80 when
connecting to the onion network.
restart
Will kill an already running TOR docker container before starting. Without
this option, an already running TOR docker container will make this command
a no-op.
stop
Permanently stops the TOR docker container.
OPTIONS WITH ARGUMENTS
-c CODE or --country CODE
Provide a country CODE to restrict TOR exit notes just to that country.
Useful if you want to restrict your traffic to be coming from a specific
country. If you're looking for a compromise between anonymity and speed,
then restricting exit nodes to your own country is more performant than no
restriction.
-e CODE or --country-entry CODE
Same as -c but allows you to specify a specific entry point into the TOR
network.
EOF
exit 1
}
country=""
strict_firewall=false
while [ "$#" -gt 0 ]; do
case "$1" in
restart)
"$0" stop
shift
;;
--country-entry|-e)
if [ -z "${country_entry:-}" ]; then
country_entry="$2"
else
country_entry+=",$2"
fi
shift
shift
;;
--country|-c)
if [ -z "${country:-}" ]; then
country="$2"
else
country+=",$2"
fi
shift
shift
;;
--strict-firewall|-f)
strict_firewall=true
shift
;;
stop)
docker rm -f tor-socks-proxy
exit
;;
*)
helpdoc
;;
esac
done
function get_country_config() {
local country_config
[ -n "$1" ] || return
while read c; do
if [ -z "${country_config:-}" ]; then
country_config="{${c}}"
else
country_config+=",{${c}}"
fi
done <<< "$(tr ',' '\n' <<< "${1}")"
echo "$country_config" | tr 'A-Z' 'a-z'
}
country_config="$(get_country_config "$country")"
if [ -z "${country_entry:-}" ]; then
country_entry="$country_config"
else
country_entry="$(get_country_config "$country_entry")"
fi
if docker ps -a | grep tor-socks-proxy; then
docker start tor-socks-proxy
echo 'Started existing proxy.'
exit
fi
# https://2019.www.torproject.org/docs/tor-manual.html.en
# MiddleNodes is an experimental option and may be removed.
docker run -d --restart=always --name tor-socks-proxy \
-p 127.0.0.1:9150:9150/tcp \
-p 127.0.0.1:53:53/udp \
--init \
peterdavehello/tor-socks-proxy:latest \
/bin/sh -exc "
echo > /var/lib/tor/torrc2
if [ '${strict_firewall}' = true ]; then
echo 'FascistFirewall 1' >> /var/lib/tor/torrc2
echo 'ReachableAddresses *:80,*:443' >> /var/lib/tor/torrc2
fi
if [ -n '${country_config}' ]; then
echo 'GeoIPExcludeUnknown 1' >> /var/lib/tor/torrc2
echo 'EntryNodes ${country_entry}' >> /var/lib/tor/torrc2
echo 'MiddleNodes ${country_config}' >> /var/lib/tor/torrc2
echo 'ExitNodes ${country_config}' >> /var/lib/tor/torrc2
echo 'StrictNodes 1' >> /var/lib/tor/torrc2
fi
/usr/bin/tor --defaults-torrc /etc/tor/torrc -f /var/lib/tor/torrc2
"
echo 'Started a new proxy.'
| true |
2a22d47b6f2cf7cd65d8e2238169d96f0abf567f | Shell | Homas/ATC_DOH | /run_doh.sh | UTF-8 | 1,717 | 3.078125 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
#Copyright 2018 Vadim Pavlov ioc2rpz[at]gmail[.]com
#ATC DOH start script
SYSUSER=`whoami | awk '{print $1}'`
DOH_ROOT="/opt/doh"
if [ -z "$PREFIX" ]; then
PREFIX="atc"
fi
if [ ! -f /etc/letsencrypt/live/$HOST/fullchain.pem ] && [ ! -f $DOH_ROOT/ssl/letsencrypt/live/$HOST/fullchain.pem ]; then
certbot -n certonly --standalone -d $HOST --agree-tos --email $EMAIL
cp /etc/letsencrypt/live/$HOST/fullchain.pem $DOH_ROOT/ssl/doh.crt
cp /etc/letsencrypt/live/$HOST/privkey.pem $DOH_ROOT/ssl/doh.key
cp -Rf /etc/letsencrypt $DOH_ROOT/ssl/
elif [ ! -f /etc/letsencrypt/live/$HOST/fullchain.pem ] && [ -f $DOH_ROOT/ssl/letsencrypt/live/$HOST/fullchain.pem ]; then
cp -Rf $DOH_ROOT/ssl/letsencrypt /etc/
fi
if [ ! -f $DOH_ROOT/ssl/doh.crt ]; then
cp /etc/letsencrypt/live/$HOST/fullchain.pem $DOH_ROOT/ssl/doh.crt
cp /etc/letsencrypt/live/$HOST/privkey.pem $DOH_ROOT/ssl/doh.key
fi
if [ ! -f $DOH_ROOT/etc/doh-server.conf ]; then
cat >> $DOH_ROOT/etc/doh-server.conf << EOF
listen = ["172.17.0.2:443",]
cert = "$DOH_ROOT/ssl/doh.crt"
key = "$DOH_ROOT/ssl/doh.key"
path = "/$PREFIX"
upstream = ["52.119.40.100:53",]
timeout = 60
tries = 10
tcp_only = false
verbose = false
EOF
cat >> /tmp/$SYSUSER << EOF
### Renew certificates
42 0,12 * * * /usr/bin/python -c 'import random; import time; time.sleep(random.random() * 3600)' && /usr/bin/certbot renew && if ! cmp -s /etc/letsencrypt/live/$HOST/fullchain.pem $DOH_ROOT/ssl/doh.crt; then cp /etc/letsencrypt/live/$HOST/fullchain.pem $DOH_ROOT/ssl/doh.crt && cp /etc/letsencrypt/live/$HOST/privkey.pem $DOH_ROOT/ssl/doh.key && killall -9 doh-server; fi
EOF
fi
crontab /tmp/$SYSUSER
crond
$DOH_ROOT/bin/doh-server -conf $DOH_ROOT/etc/doh-server.conf
| true |
b243d73d4ead9e39ec2a9dcc49df5135f39ccbc7 | Shell | fdavalo/dbaas | /databases/snapshot.sh | UTF-8 | 524 | 3.078125 | 3 | [] | no_license | NS=$1
APP=$2
ID=$3
DIR=data/$NS-$APP
oc new-project $NS 2>/dev/null
oc project $NS >/dev/null
ret=$?
if [[ $ret -ne 0 ]]; then echo "Could not use namespace $NS"; exit 1; fi
PVC=`oc get deployment/$APP-$TYPE -o custom-columns=PVC:.spec.template.spec.volumes[].persistentVolumeClaim.claimName | tail -1`
mkdir -p $DIR
sed -e "s/\${app}/$APP/g" -e "s/\${type}/$TYPE/g" -e "s/\${id}/$ID/g" -e "s/\${vsc}/$VSC/g" -e "s/\${pvc}/$PVC/g" snapshot.yaml > $DIR/snapshot.yaml
oc apply -f $DIR/snapshot.yaml >> $DIR/out.log 2>&1
| true |
49eba0193c2888f166a94e485865c4d7bde1938e | Shell | toumorokoshi/ytlaces | /bin/install_yay | UTF-8 | 173 | 2.546875 | 3 | [] | no_license | #!/bin/sh
TMP_DIR=`mktemp -d`
cd $TMP_DIR
sudo pacman -S make fakeroot --noconfirm
git clone https://aur.archlinux.org/yay.git
cd yay
echo `pwd`
makepkg -si
rm -r $TMP_DIR
| true |
a09a9d2eb380ae5750fad16b118234250a4a3725 | Shell | swordhui/xglibs | /dev-lang/undro-tcl/undro-tcl-2019.6.22.xgb | UTF-8 | 3,179 | 3.578125 | 4 | [] | no_license | #!/bin/bash
#
# Xiange Linux build scripts
# Short one-line description of this package.
DESCRIPTION="Tool Command Language inside AndroWish/UndroWish"
# Homepage, not used by Portage directly but handy for developer reference
HOMEPAGE="http://www.androwish.org"
# Point to any required sources; these will be automatically downloaded by
# gpkg.
# $N = package name, such as autoconf, x-org
# $V = package version, such as 2.6.10
#SRC_URI="http://foo.bar.com/$N-$V.tar.bz2"
SRC_URI="http://www.androwish.org/download/androwish-6e2085e6e4.tar.gz"
# Binary package URI.
BIN_URI=""
# Runtime Depend
RDEPEND=""
# Build time depend
DEPEND="rsync ${RDEPEND}"
#init
xgb_init()
{
XGPATH_SOURCE=$XGPATH_SOURCE_RAW/androwish-$V
}
#unpack
xgb_unpack()
{
#unpard file from $XGPATH_SOURCE to current directory.
echo "Unpacking to `pwd`"
tar xf $XGPATH_SOURCE/$(basename $SRC_URI)
}
#config
xgb_config()
{
echo "config $N-$V$R..."
#fist, cd build directory
cd androwish-6e2085e6e4/undroid
err_check "enter directory failed."
./build-undroidwish-wayland.sh init
err_check "script init failed"
cd tcl
err_check "enter tcl failed"
#patch for sdl2tk: TclCreateLatedExitHandler
cd generic
patch -p1 < $XGPATH_SCRIPT/sdl2tk.diff
err_check "patch failed"
cd ..
export SRCDIR=`pwd`
cd unix
err_check "enter unix failed."
#second, add package specified config params to XGB_CONFIG
XGB_CONFIG+=" --without-tzdata --with-zipfs "
if [ "$XGB_ARCH" == "x86_64" ]; then
XGB_CONFIG+=" --enable-64bit "
fi
#Third, call configure with $XGB_CONFIG
./configure $XGB_CONFIG
}
#build
xgb_build()
{
echo "make $N-$V$R..."
#run make in current directory
make $XGPARA_MAKE
}
#check
xgb_check()
{
echo "checking $N-$V$R.."
#make check
}
#install
xgb_install()
{
echo "install to $XGPATH_DEST"
sed -e "s#$SRCDIR/unix#/usr/lib#" \
-e "s#$SRCDIR#/usr/include#" \
-i tclConfig.sh
err_check "install 1 failed."
sed -e "s#$SRCDIR/unix/pkgs/tdbc1.1.0#/usr/lib/tdbc1.1.0#" \
-e "s#$SRCDIR/pkgs/tdbc1.1.0/generic#/usr/include#" \
-e "s#$SRCDIR/pkgs/tdbc1.1.0/library#/usr/lib/tcl8.6#" \
-e "s#$SRCDIR/pkgs/tdbc1.1.0#/usr/include#" \
-i pkgs/tdbc1.1.0/tdbcConfig.sh
err_check "install 2 failed."
sed -e "s#$SRCDIR/unix/pkgs/itcl4.1.2#/usr/lib/itcl4.1.2#" \
-e "s#$SRCDIR/pkgs/itcl4.1.2/generic#/usr/include#" \
-e "s#$SRCDIR/pkgs/itcl4.1.2#/usr/include#" \
-i pkgs/itcl4.1.2/itclConfig.sh
err_check "install 3 failed."
unset SRCDIR
#install everything to $XGPATH_DEST
make DESTDIR=$XGPATH_DEST install &&
make DESTDIR=$XGPATH_DEST install-private-headers
err_check "install $V-$N failed."
#zipfs
install -m 0644 ../generic/zipfs.h $XGPATH_DEST/usr/include
err_check "install zipfs failed"
#create symbol link
ln -v -sf tclsh8.6 $XGPATH_DEST/usr/bin/tclsh &&
chmod -v 755 $XGPATH_DEST/usr/lib/libtcl8.6.so
err_check "install $V-$N 4 failed."
}
#post install
xgb_postinst()
{
echo "running after package installed..."
}
#pre remove
xgb_prerm()
{
echo "running before package delete..."
}
#post remove
xgb_postrm()
{
echo "running after package delete..."
}
| true |
12d15521c3668d37a220b586bdbd4272fe9ec873 | Shell | patmorin/bin | /cgbackup | UTF-8 | 176 | 3.359375 | 3 | [] | no_license | #!/bin/bash
dir=$1
if [[ -d $dir ]]; then
dirname=`basename $dir`
tar czvf - "$dir" | ssh cg "cat > $dirname.tgz"
else
echo "Usage $0 <directory>"
exit -1
fi
| true |
a622daa1e65af7b9735de1a2eeecf6eceae5ff37 | Shell | ethancaballero/ib_irm | /InvarianceUnitTests/old_job_scripts/job_node.sh | UTF-8 | 1,537 | 2.515625 | 3 | [
"MIT"
] | permissive | #!/bin/bash
#SBATCH --account=rrg-bengioy-ad # Yoshua pays for your job
#SBATCH --cpus-per-task=6 # Ask for 40 CPUs
#SBATCH --gres=gpu:0 # Ask for 0 GPU
#SBATCH --mem=32G # Ask for 752 GB of RAM
#SBATCH --time=1:00:00 # The job will run for 3 hours
#SBATCH -o /scratch/ethancab/slurm-%j.out # Write the log in $SCRATCH
# 1. Create your environement locally
: '
cd $SLURM_TMPDIR
echo $1 >> myfile1.txt
cp $SLURM_TMPDIR/myfile.txt $SCRATCH
'
module load python/3.8
cd /home/ethancab
source invariance_env/bin/activate
cd /home/ethancab/research/invariance_unit_test/ib_irm/InvarianceUnitTests
: '
echo $2 >> myfile2.txt
cp $SLURM_TMPDIR/myfile.txt $SCRATCH
'
#python scripts/sweep.py --models ERM IRMv1 IB_ERM IB_IRM --datasets Example2 --num_samples 2 --num_data_seeds 2 --num_model_seeds 2
#python scripts/sweep.py --skip_confirmation True --models ERM --datasets Example2 --num_samples 2 --num_data_seeds 2 --num_model_seeds 2
#python scripts/sweep.py --skip_confirmation True --models ERM --datasets Example2 --num_samples 2 --m_start 0 --m_end 1 --d_start 0 --d_end 1
python scripts/sweep.py --skip_confirmation True --models ERM --datasets Example2 --num_samples 2 --m_start $1 --m_end $2 --d_start 0 --d_end 1
#echo $2 >> myfile3.txt
# 5. Copy whatever you want to save on $SCRATCH
# cp $SLURM_TMPDIR/<to_save> $SCRATCH
cp -R /home/ethancab/research/invariance_unit_test/ib_irm/InvarianceUnitTests/results $SCRATCH
#echo $1 >> myfile4.txt | true |
8088ef502876b7b17391a2bbee01e10ab13c0f72 | Shell | AmitSisodiya275/ShellScriptPrograms | /weekdays.sh | UTF-8 | 518 | 3.40625 | 3 | [] | no_license | #!/bin/bash
echo "Enter any number from 1 to 7 to get the weekdays."
read -p "Enter your choice :" val
if [ $val -eq 7 ]
then
echo "SUNDAY"
elif [ $val -eq 1 ]
then
echo "MONDAY"
elif [ $val -eq 2 ]
then
echo "TUESDAY"
elif [ $val -eq 3 ]
then
echo "WEDNESDAY"
elif [ $val -eq 4 ]
then
echo "THURSDAY"
elif [ $val -eq 5 ]
then
echo "FRIDAY"
elif [ $val -eq 6 ]
then
echo "SATURDAY"
else
echo "Please enter correct number from 1 to 7 to get the weekday."
fi
| true |
579b078bd6c0cb420edd3573c39a1bb5e65c4830 | Shell | krishnaindani/contour-operator | /hack/test-examples.sh | UTF-8 | 1,194 | 3.578125 | 4 | [
"Apache-2.0"
] | permissive | #! /usr/bin/env bash
# test-examples.sh: An e2e test using the examples from
# https://projectcontour.io/
readonly KUBECTL=${KUBECTL:-kubectl}
readonly CURL=${CURL:-curl}
kubectl::do() {
${KUBECTL} "$@"
}
kubectl::apply() {
kubectl::do apply -f "$@"
}
kubectl::delete() {
kubectl::do delete -f "$@"
}
waitForHttpResponse() {
local -r url="$1"
local delay=$2
local timeout=$3
local n=0
while [ $n -le "$timeout" ]
do
echo "Sending http request to $url"
resp=$(curl -w %"{http_code}" -s -o /dev/null "$url")
if [ "$resp" = "200" ] ; then
echo "Received http response from $url"
break
fi
sleep "$delay"
n=($n + 1)
done
}
kubectl::apply https://projectcontour.io/quickstart/operator.yaml
kubectl::apply https://projectcontour.io/quickstart/contour-custom-resource.yaml
kubectl::apply https://projectcontour.io/examples/kuard.yaml
waitForHttpResponse http://local.projectcontour.io 1 300
kubectl::delete https://projectcontour.io/examples/kuard.yaml
kubectl::delete https://projectcontour.io/quickstart/contour-custom-resource.yaml
kubectl::delete https://projectcontour.io/quickstart/operator.yaml
| true |
fbebd61030f443724e7e2b00881525dd5de3bac9 | Shell | simonhughxyz/tools-bin | /src/random_number.sh | UTF-8 | 183 | 3.25 | 3 | [
"MIT"
] | permissive | #!/bin/sh
# RANDNR
# Simon Hugh Moore
#
# Simple script to generate a random number of set length
[ $1 ] && length="$1" || length=6
tr -cd "[:digit:]" < /dev/urandom | head -c "$length"
echo
| true |
8f611e5d1d7d6b0325984374e6c91acb2fbf2166 | Shell | ktbartholomew/teambicyclesinc | /scripts/build_templates.sh | UTF-8 | 680 | 3.484375 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
set -ueo pipefail
scriptroot="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
set +e
php_errors=0
for file in $(find ./src -type f -name '*.php' -not -path "./src/vendor/*"); do
output=$(php -l $file)
if [ "$?" != "0" ]; then
php_errors=1
echo "$output"
fi
done
set -e
if [ "$php_errors" == "1" ]; then
exit 1;
fi
if [ ! -d ${scriptroot}/../dist ];then
mkdir -p ${scriptroot}/../dist
mkdir -p ${scriptroot}/../dist/img
mkdir -p ${scriptroot}/../dist/templates
fi
pushd src
composer install
composer update
composer dump-autoload --optimize
popd
cp -a ${scriptroot}/../src/. ${scriptroot}/../dist/
rm -r ${scriptroot}/../dist/{scss,js}
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.