blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
ed96749701197a514e7aed1996d69cd1b3f11e3c
|
Shell
|
raydennis/docker-selenium-grid
|
/.docker/docker-test.sh
|
UTF-8
| 1,514
| 3.8125
| 4
|
[] |
no_license
|
#!/bin/sh
COLOR_REST="$(tput sgr0)"
COLOR_GREEN="$(tput setaf 2)"
COLOR_RED="$(tput setaf 1)"
function service_info(){
service=$1
echo ""
printf 'Testing service: '
printf '%s%s%s' $COLOR_GREEN $service $COLOR_REST
echo ""
echo "======="
}
function assert_result(){
if [[ "$1" == true ]];
then
printf '%s%s%s\n' $COLOR_GREEN 'OK' $COLOR_REST
else
printf '%s%s%s\n' $COLOR_RED 'ERROR' $COLOR_REST
fi;
}
function docker_exec(){
service=$1
shift;
docker exec $(docker ps --filter name=${service} -q | head -1) "$@"
}
function test_container_is_running(){
service=$1
result=false
echo "Checking if '${service}' has a running container"
echo "$(docker ps --filter name=${service})" | grep -q "${service}" && result=true
assert_result ${result}
}
function test_host_docker_internal(){
service=$1
result=false
echo "Checking 'host.docker.internal' on '${service}'"
docker_exec ${service} dig host.docker.internal | grep -vq NXDOMAIN && result=true
assert_result ${result}
}
service="node-chrome"
service_info ${service}
test_container_is_running ${service}
test_host_docker_internal ${service}
service="node-firefox"
service_info ${service}
test_container_is_running ${service}
test_host_docker_internal ${service}
service="hub"
service_info ${service}
test_container_is_running ${service}
test_host_docker_internal ${service}
echo "Checking 'status' on '${service}'"
curl 127.0.0.1:4444/wd/hub/status
| true
|
37fe49a98fa6fec34a35c37529780232d75d6959
|
Shell
|
dk-dev/osv
|
/scripts/gen-drivers-config-header
|
UTF-8
| 650
| 3.75
| 4
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/sh
if [ "$#" -ne 2 ]; then
echo "usage: $(basename $0) ARCH OUTPUT" >&2
exit 1
fi
arch=$1
output=$2
drivers_config_base_file=`dirname "$0"`/../conf/profiles/$arch/base.mk
tmp=$(mktemp)
cat >$tmp <<EOL
/* This file is generated automatically. */
#ifndef OSV_DRIVERS_CONFIG_H
#define OSV_DRIVERS_CONFIG_H
EOL
cat $drivers_config_base_file | grep "export conf" | cut --delimiter=_ -f 2- | cut --delimiter=? -f 1 | \
sort | uniq | awk '{ printf("#define CONF_%s %s\n", $0, ENVIRON["conf_"$0]) }' >> $tmp
cat >>$tmp <<EOL
#endif
EOL
if cmp -s $tmp $output
then
rm $tmp
else
mkdir -p $(dirname $output)
mv $tmp $output
fi
| true
|
1dabe7e9e870ef67ef5c2c03d36f6f7ea50a983e
|
Shell
|
crsantos/dotfiles
|
/scripts/bootstrap.sh
|
UTF-8
| 874
| 3.5
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# git pull origin master;
function syncFishFolder() {
rsync --exclude ".git/" \
--exclude ".DS_Store" \
--exclude ".osx" \
--exclude "scripts" \
--exclude "README.md" \
--exclude "LICENSE-MIT.txt" \
-avh --no-perms ./fish/ ~/.config/fish;
# Reload fish shell after that
exec /usr/local/bin/fish -l;
}
function copyIndividualConfigFiles() {
# asdf
cp .asdfrc ~/
# git
cp .gitconfig ~/
cp .gitignore_global ~/
# ruby
cp .gemrc ~/
cp .curlrc ~/
}
# TODO: use it
function brewBundle() {
cd ~/
# Run bundle command with Brewfile
brew bundle
}
if [ "$1" == "--force" -o "$1" == "-f" ]; then
syncFishFolder;
else
read -p "This may overwrite existing files in your home directory. Are you sure? (y/n) " -n 1;
echo "";
if [[ $REPLY =~ ^[Yy]$ ]]; then
syncFishFolder;
copyIndividualConfigFiles;
fi;
fi;
unset syncFishFolder;
| true
|
58e88f28d3df99f660fcd60f6cca892e07ec6537
|
Shell
|
adis300/mxnet-build
|
/build-ios.sh
|
UTF-8
| 564
| 3.125
| 3
|
[] |
no_license
|
#!/bin/bash
# Remove previous build
rm -rf dist/
# Build mxnet in-case anything is not ready
./build-mxnet.sh
# Build amalgamation
AMALGAMATION_PATH=mxnet/amalgamation
SOURCE_FNAME=mxnet_predict-all.cc
BKUP_FNAME=${SOURCE_FNAME}.original
cd "${AMALGAMATION_PATH}"
echo Path changed
make
# Adapt source to iOS
cp ${SOURCE_FNAME} ${BKUP_FNAME}
cd ../..
python ios_adapt.py
cd ${AMALGAMATION_PATH}
make
rm -f ${SOURCE_FNAME}
mv ${BKUP_FNAME} ${SOURCE_FNAME}
cd ../..
LIB_NAME=libmxnet_predict.a
mkdir dist
cp ${AMALGAMATION_PATH}/${LIB_NAME} dist/${LIB_NAME}
| true
|
c1e433d18cd37962472b961c31f3b6bfb0e22920
|
Shell
|
MasahiroSakoda/dotfiles
|
/home/.chezmoiscripts/run_onchange_after_21-python.sh.tmpl
|
UTF-8
| 406
| 2.75
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# -*-mode:sh-*- vim:ft=sh
set -eo pipefail
. "$(brew --prefix asdf)/libexec/asdf.sh"
echo $(asdf current python)
# Upgrade pip
python3 -m pip install --upgrade pip
# Poetry
## create .venv in your project
poetry config virtualenvs.create true
poetry config virtualenvs.in-project true
## use python via asdf
poetry config virtualenvs.prefer-active-python true
asdf reshim python
| true
|
96e26184025ebef9830bec4aff47b963cf95bb6c
|
Shell
|
brettjrea/Armhf_ChromeOS_Crouton_Ubuntu_Wordpress_Apache_MySql
|
/wp
|
UTF-8
| 1,836
| 3.5
| 4
|
[] |
no_license
|
#!/bin/sh -e
# Copyright (c) 2016 The crouton Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
REQUIRES=''
DESCRIPTION='Basic installation of WordPress on a LAMP stack'
HOSTBIN='wp'
CHROOTBIN='croutonpowerd'
. "${TARGETSDIR:="$PWD"}/common"
###Download & Install WP-CLI.
if [ ! -f '/usr/local/bin/wp' ]; then
wget -O /tmp/wp-cli.phar https://raw.githubusercontent.com/wp-cli/builds/gh-pages/phar/wp-cli.phar
chmod +x /tmp/wp-cli.phar
mv /tmp/wp-cli.phar /usr/local/bin/wp
fi
###Start Mysql
service mysql start
###Download WP.
wp core download --path=/var/www/html/wordpress/ --allow-root
###Create wp-config.php
wp config create --path=/var/www/html/wordpress/ --dbhost=localhost --dbname=wordpress --dbuser=root --dbpass=password --allow-root
###Create MySql DB.
wp db create --path=/var/www/html/wordpress/ --allow-root
###Install WP.
wp core install --path=/var/www/html/wordpress/ --url=http://localhost/wordpress/ --title=Example --admin_user=root --admin_password=password --admin_email=info@example.com --allow-root
###Install WP Theme Understrap
wp theme install --path=/var/www/html/wordpress/ understrap --allow-root
###Move to directory themes.
cd /var/www/html/wordpress/wp-content/themes/
###Download and Unzip Understrap-child.
wget https://github.com/understrap/understrap-child/archive/master.zip -O temp.zip;
unzip temp.zip;
rm temp.zip
###Fix permissions
chown -R www-data /var/www/html/wordpress/
###Create wordpress.conf to configure access to directory /var/www/html/wordpress/, enable & reload.
cat << EOF > /etc/apache2/sites-available/wordpress.conf
<Directory /var/www/html/wordpress/>
AllowOverride All
Require all granted
</Directory>
EOF
chmod u+x /etc/apache2/sites-available/wordpress.conf
a2ensite wordpress.conf
| true
|
33f41262d0ca29665247bc6b25dc3e0a5e834eb5
|
Shell
|
colonelpanic8/nixos-config
|
/home/config/scripts/wallpaper
|
UTF-8
| 1,622
| 3.9375
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
set -euo pipefail
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
source "$SCRIPT_DIR/config"
help() {
# shellcheck disable=SC2154
printf '
## Usage
run dmwall without any argument to select a wallpaper from %s\n
dmwall [optional arguments]
-h\tDisplays this help menu
-d\tdraws the wallpaper at startx. Add "dmwall -d" in your xinitrc
-r\trandomly selects a wallpaper
-i\tGo insane
' "${setbg_dir}"
}
change() {
xwallpaper --stretch "$1" 2>/dev/null
}
shuffle() {
wallpaper=$(find "${setbg_dir}" -type f | shuf -n 1)
echo "$wallpaper"
}
setbg() {
wallpaper="$(sxiv -t -o "${setbg_dir}")"
echo "$wallpaper" > "$HOME"/.cache/wall
change "$wallpaper"
}
# draw the wallpaper at startx
draw() {
change "$(cat "$HOME"/.cache/wall)"
}
random() {
wallpaper=$(shuffle)
echo "$wallpaper" > "$HOME"/.cache/wall
change "$wallpaper"
}
goInsane() {
while true; do
wallpaper=$(shuffle)
change "$wallpaper"
sleep 1s
done
}
# dmenu menu
menu() {
# options="set background\nrandom\ngo insane"
input=$(printf "set wallpaper\nrandom\ngo insane" | ${DMENU} -i -l 3) || exit 1
case "$input" in
"set wallpaper") setbg ;;
"random") random ;;
"go insane") goInsane ;;
esac
}
noOpt=1
while getopts "hdrim" arg 2>/dev/null; do
case "${arg}" in
h) help ;;
d) draw ;;
r) random ;;
i) goInsane ;;
*) printf "invalid option\nType dmwall -h for help" ;;
esac
noOpt=0
done
[ $noOpt = 1 ] && menu
| true
|
f230111c591dac4817a45361a0893ac67d341634
|
Shell
|
harrisonlab/fusarium_ex_strawberry
|
/gene_pred_Scripts/busco_s11.sh
|
UTF-8
| 852
| 2.515625
| 3
|
[] |
no_license
|
#Edited paths from Andy's direc to mine
#Updated entire script using https://github.com/harrisonlab/bioinformatics_tools/blob/master/Gene_prediction/README.md
#Does not have quast segment in script like Andy's pipeline
#Look into BuscoDB direc - directory exists
#Run in conda env - BUSCOenv
#Ran on genome(softmasked) and gene models (final_genes_appended_renamed.gene.fasta)
for Assembly in $(ls assembly/SMARTdenovo/F.oxysporum_fsp_lactucae/race_1/Pilon_SDen/FolR1_SDen_pilon.fasta); do
Strain=$(echo $Assembly| rev | cut -d '/' -f3 | rev)
Organism=$(echo $Assembly | rev | cut -d '/' -f4 | rev)
echo "$Organism - $Strain"
ProgDir=~/git_repos/fusarium_ex_strawberry/ProgScripts
BuscoDB=$(ls -d /projects/dbBusco/sordariomycetes_odb10)
OutDir=$(dirname $Assembly)/busco_sordariomycetes_obd10
sbatch $ProgDir/busco.sh $Assembly $BuscoDB $OutDir
done
| true
|
2043f27286e3fa3f0bb6423b105c31f8b1b9ec76
|
Shell
|
zhh518/AppleHDAPatcher
|
/AppleHDAPatcher.app/Contents/Resources/ListCodec.command
|
UTF-8
| 7,580
| 3.171875
| 3
|
[] |
no_license
|
#!/bin/bash
# Maintained by: Mironeⓒ
#
# PCI ids : https://github.com/pciutils/pciids
#
# Using in AppleHAPatcher.app to identify installed audio codecs
#
# Mandatory Requirements:
# 1. AppleHDA.kext
#
printf "Codecs Detecteds:\n\n"
#
#
# detect installed codecs for Vendor id.
ListCodec=$(ioreg -rxn IOHDACodecDevice | grep VendorID | awk '{ print $4 }' | sed -e 's/ffffffff//')
HDMInvidia=$(ioreg -rxn IOHDACodecDevice | grep VendorID | awk '{ print $4 }' | sed 's/ffffffff//' | grep '0x10de')
HDMIamd=$(ioreg -rxn IOHDACodecDevice | grep VendorID | awk '{ print $4 }' | sed 's/ffffffff//' | grep '0x1002')
HDMIintel=$(ioreg -rxn IOHDACodecDevice | grep VendorID | awk '{ print $4 }' | sed 's/ffffffff//' | grep '0x8086')
# no codecs detected.
if [[ -z "${ListCodec}" ]]; then
printf "No audio codec detected!!"
exit 1
fi
for Codec in $ListCodec
do
case ${Codec} in
#Desktop's.
0x10ec0885) Codec="Codec Name: Realtek ALC885\nVendor Id: $Codec";;
0x10ec0887) Codec="Codec Name: Realtek ALC887\nVendor Id: $Codec";;
0x10ec0888) Codec="Codec Name: Realtek ALC888\nVendor Id: $Codec";;
0x10ec0889) Codec="Codec Name: Realtek ALC889\nVendor Id: $Codec";;
0x10ec0892) Codec="Codec Name: Realtek ALC892\nVendor Id: $Codec";;
0x10ec0899) Codec="Codec Name: Realtek ALC898\nVendor Id: $Codec";;
0x10ec0900) Codec="Codec Name: Realtek ALC1150\nVendor Id: $Codec";;
0x11060441) Codec="Codec Name: VT2021\nVendor Id: $Codec";;
#Laptop's.
0x10ec0233) Codec="Codec Name: Realtek ALC233\nVendor Id: $Codec";;
0x10ec0235) Codec="Codec Name: Realtek ALC235\nVendor Id: $Codec";;
0x10ec0255) Codec="Codec Name: Realtek ALC255\nVendor Id: $Codec";;
0x10ec0268) Codec="Codec Name: Realtek ALC268\nVendor Id: $Codec";;
0x10ec0269) Codec="Codec Name: Realtek ALC269\nVendor Id: $Codec";;
0x10ec0270) Codec="Codec Name: Realtek ALC270\nVendor Id: $Codec";;
0x10ec0272) Codec="Codec Name: Realtek ALC272\nVendor Id: $Codec";;
0x10ec0275) Codec="Codec Name: Realtek ALC275\nVendor Id: $Codec";;
0x10ec0280) Codec="Codec Name: Realtek ALC280\nVendor Id: $Codec";;
0x10ec0282) Codec="Codec Name: Realtek ALC282\nVendor Id: $Codec";;
0x10ec0283) Codec="Codec Name: Realtek ALC283\nVendor Id: $Codec";;
0x10ec0284) Codec="Codec Name: Realtek ALC284\nVendor Id: $Codec";;
0x10ec0288) Codec="Codec Name: Realtek ALC288\nVendor Id: $Codec";;
0x10ec0290) Codec="Codec Name: Realtek ALC290\nVendor Id: $Codec";;
0x10ec0663) Codec="Codec Name: Realtek ALC663\nVendor Id: $Codec";;
0x10ec0668) Codec="Codec Name: Realtek ALC668\nVendor Id: $Codec";;
0x14f15067) Codec="Codec Name: Conexant 20583\nVendor Id: $Codec";;
0x14f15069) Codec="Codec Name: Conexant 20585\nVendor Id: $Codec";;
0x14F1506C) Codec="Codec Name: Conexant 20588\nVendor Id: $Codec";;
0x14F1506E) Codec="Codec Name: Conexant 20590\nVendor Id: $Codec";;
0x14F1510F) Codec="Codec Name: Conexant 20752\nVendor Id: $Codec";;
0x14f15114) Codec="Codec Name: Conexant 20756\nVendor Id: $Codec";;
0x14f15115) Codec="Codec Name: Conexant 20757\nVendor Id: $Codec";;
0x111d76f3) Codec="Codec Name: IDT 92HD66C3/65\nVendor Id: $Codec";;
0x111d76b2) Codec="Codec Name: IDT 92HD71B7X\nVendor Id: $Codec";;
0x111d7608) Codec="Codec Name: IDT 92HD75B2X5\nVendor Id: $Codec";;
0x111D7603) Codec="Codec Name: IDT 992HD75B3X5\nVendor Id: $Codec";;
0x111d7605) Codec="Codec Name: IDT 92HD87B1/92HD81B1X5\nVendor Id: $Codec";;
0x111d76e0) Codec="Codec Name: IDT 92HD91BXX\nVendor Id: $Codec";;
0x111d76e5) Codec="Codec Name: IDT 92HD99BXX\nVendor Id: $Codec";;
0x11068446) Codec="Codec Name: VIA VT1802\nVendor Id: $Codec";;
#Mac Codecs
0x10134206) Codec="Codec Name: Cirrus Logic CS4206\nVendor id: $Codec";;
0x10134208) Codec="Codec Name: Cirrus Logic CS4208\nVendor id: $Codec";;
0x1AEC8800A) Codec="Codec Name: Cirrus Logic CS8409\nVendor id: $Codec";;
0x10138409) Codec="Codec Name: Wolfson WM8800\nVendor id: $Codec";;
*) Codec="Unknow onboard audio codec!!\nVendor id: $Codec";;
esac
done
printf "Onboard Audio Codec:\n$Codec\n\n"
for hdmi in $HDMInvidia
do
case ${hdmi} in
#NVidia HDMI Codecs.
0x10de000a) hdmi="Codec Name: NVidia GT216\nVendor id: $hdmi";;
0x10de000b) hdmi="Codec Name: NVidia GT216\nVendor id: $hdmi";;
0x10de0e08) hdmi="Codec Name: NVidia GF119\nVendor id: $hdmi";;
0x10de0e0a) hdmi="Codec Name: NVidia GK104\nVendor id: $hdmi";;
0x10de0e0b) hdmi="Codec Name: NVidia GK106\nVendor id: $hdmi";;
0x10de0e0c) hdmi="Codec Name: NVidia GF114\nVendor id: $hdmi";;
0x10de0e1a) hdmi="Codec Name: NVidia GK110\nVendor id: $hdmi";;
0x10de0e1b) hdmi="Codec Name: NVidia GK107\nVendor id: $hdmi";;
0x10de0e08) hdmi="Codec Name: NVidia GF119\nVendor id: $hdmi";;
0x10de0be2) hdmi="Codec Name: NVidia GT216\nVendor id: $hdmi";;
esac
done
for hdmi in $HDMIamd
do
case ${hdmi} in
#AMD HDMI Codecs.
0x10021308) hdmi="Codec Name: AMD Kaveri HDMI/DP Audio Controller\nVendor id: $hdmi";;
0x10021314) hdmi="Codec Name: AMD Wrestler HDMI Audio\nVendor id: $hdmi";;
0x10021714) hdmi="Codec Name: AMD BeaverCreek HDMI Audio\nVendor id: $hdmi";;
0x1002793b) hdmi="Codec Name: AMD RS600 HDMI Audio\nVendor id: $hdmi";;
0x1002960f) hdmi="Codec Name: AMD RS780 HDMI Audio\nVendor id: $hdmi";;
0x1002970f) hdmi="Codec Name: AMD RS880 HDMI Audio\nVendor id: $hdmi";;
0x10029840) hdmi="Codec Name: AMD Kabini HDMI/DP Audio\nVendor id: $hdmi";;
0x10029902) hdmi="Codec Name: AMD Trinity HDMI Audio\nVendor id: $hdmi";;
0x1002aa00) hdmi="Codec Name: AMD R600 HDMI Audio\nVendor id: $hdmi";;
0x1002aa08) hdmi="Codec Name: AMD RV630 HDMI Audio\nVendor id: $hdmi";;
0x1002aa10) hdmi="Codec Name: AMD RV610 HDMI Audio\nVendor id: $hdmi";;
0x1002aa18) hdmi="Codec Name: AMD RV670/680 HDMI Audio\nVendor id: $hdmi";;
0x1002aa20) hdmi="Codec Name: AMD RV635 HDMI Audio\nVendor id: $hdmi";;
0x1002aa28) hdmi="Codec Name: AMD RV620 HDMI Audio\nVendor id: $hdmi";;
0x1002aa30) hdmi="Codec Name: AMD RV770 HDMI Audio\n Vendor id: $hdmi";;
0x1002aa38) hdmi="Codec Name: AMD RV710/730 HDMI Audio\nVendor id: $hdmi";;
0x1002aa50) hdmi="Codec Name: AMD Cypress HDMI Audio\nVendor id: $hdmi";;
0x1002aa58) hdmi="Codec Name: AMD Juniper HDMI Audio\nVendor id: $hdmi";;
0x1002aa60) hdmi="Codec Name: AMD Redwood HDMI Audio\nVendor id: $hdmi";;
0x1002aa68) hdmi="Codec Name: AMD Cedar HDMI Audio\nVendor id: $hdmi";;
0x1002aa80) hdmi="Codec Name: AMD Cayman/Antilles HDMI Audio\nVendor id: $hdmi";;
0x1002aa88) hdmi="Codec Name: AMD Barts HDMI Audio\nVendor id: $hdmi ";;
0x1002aa90) hdmi="Codec Name: AMD Turks/Whistler HDMI Audio\n id: $hdmi";;
0x1002aa98) hdmi="Codec Name: AMD Caicos HDMI Audio\nVendor id: $hdmi";;
0x1002aaa0) hdmi="Codec Name: AMD Tahiti XT HDMI Audio\nVendor id: $hdmi";;
0x1002aac0) hdmi="Codec Name: AMD Name: AMD Tobago HDMI Audio\nVendor id: $hdmi";;
0x1002aac8) hdmi="Codec Name: AMD Hawaii HDMI Audio\nVendor id: $hdmi";;
esac
done
for hdmi in $HDMIintel
do
case ${hdmi} in
#Intel HDMI Codecs.
0x80862807) hdmi="Codec Name: Intel Haswell HDMI\nVendor id: $hdmi";;
0x80862806) hdmi="Codec Name: Intel Haswell HDMI\nVendor id: $hdmi";;
*) hdmi="Codec Name: Unknow HDMI audio codec\nVendor id: $hdmi";;
esac
done
printf "HDMI Audio Codec(s):\n$hdmi\n\n"
| true
|
fe2c9de3038fcfef2642e78c035c43c78d27aead
|
Shell
|
Tubbz-alt/dockerbuild
|
/dockerbuild/containerscripts/common.sh
|
UTF-8
| 1,536
| 3.703125
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
set -x
set -e
export PKGDIR=/mnt/package
export OUTDIR=/mnt/output
export DEBIAN_FRONTEND=noninteractive
source /etc/os-release
xdie () {
local msg=$1
printf "ERROR: %s\nABORT.\n" "$msg"
exit 2
}
xgitclean () {
git clean -d -f
}
xarch () {
dpkg --print-architecture
}
xupstreamversion () {
local lastref
lastref=$(git describe --tags --abbrev=0)
lastref=${lastref%-*}
echo "$lastref"
}
xcurrentbranch () {
git rev-parse --abbrev-ref HEAD
return $?
}
xcleanup () {
rm -f -- /etc/apt/sources.list.d/sources.list
apt-get clean
return $?
}
xinstall () {
apt-get install -y "$@"
return $?
}
xinit () {
trap xcleanup EXIT
if (( VERSION_ID <= 8 )); then
VERSION_CODENAME=$(<<<"$VERSION" tr -dc 'a-z')
cat >/etc/apt/sources.list.d/sources.list <<<"
deb-src http://archive.debian.org/debian/ ${VERSION_CODENAME} main contrib non-free";
else
cat >/etc/apt/sources.list.d/sources.list <<<"
deb-src http://deb.debian.org/debian/ ${VERSION_CODENAME} main contrib non-free
deb-src http://security.debian.org/ ${VERSION_CODENAME}/updates main contrib non-free";
fi
# Prevent automatic building of man pages
echo "man-db man-db/auto-update boolean false" | debconf-set-selections
# Speed up dpkg. When running containers in parallel on our build machine,
# this delivers a huge speed up because the available disk I/O bandwidth is so
# low.
echo "force-unsafe-io" > /etc/dpkg/dpkg.cfg.d/02speedup
apt-get update && apt-get upgrade -y
return $?
}
xinit
| true
|
e1251c68bc3e961731edb107c00a9494b6e2ec77
|
Shell
|
wdicarlo/wdc-do-commands
|
/list/do-list-netstatus
|
UTF-8
| 264
| 2.859375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
cmd="do-super"
if [ ! `which lsof` ]; then
echo "Missing lsof command"
cmd="$cmd sh -c \"netstat -lptu&& netstat -tulpn && ss\""
else
cmd="$cmd sh -c \"lsof -i&& netstat -lptu&& netstat -tulpn && ss\""
fi
echo "> $cmd"
eval $cmd
| true
|
fd987d7d888f11a28f4052bebe44fbbdcff472f8
|
Shell
|
AdamStawarz/scripts
|
/stress_tools/cassandra-stress/test-cassandra-gaussian.sh
|
UTF-8
| 10,192
| 2.8125
| 3
|
[] |
no_license
|
#!/bin/bash
USER=$1
SCYLLA_HOST=$2
test_mode=$3
#USER=root
#SCYLLA_HOST_INTERNAL=10.100.53.133
# SCYLLA_HOST=( 147.75.107.46 147.75.107.30 147.75.193.234 )
# SCYLLA_HOST_INTERNAL=( 10.100.53.133 10.100.53.131 10.100.53.129 )
#SCYLLA_HOST=147.75.107.46
#test_mode=$2
STRESS_NUM=14
#NUM_KEYS=18000000
NUM_KEYS=9000000
GAUS_STDEV=10000
#NUM_KEYS=400000
#NUM_KEYS=250000
#SCYLLA_HOST=192.168.129.14
#SCYLLA_HOST=10.13.104.5
#SCYLLA_HOST=10.13.100.22
NUM_THREADS=100
OUT_BASE=$PWD
NUM_CORES=152
CORE_START=8
CORES_PER_INST=$((NUM_CORES / STRESS_NUM))
LOADERS=( 10.13.104.9 10.13.104.8 )
#LOADERS=( 10.13.104.9 )
ITERATIONS=1
POP_WIDTH=$NUM_KEYS
#RATE_LIMIT="fixed=80000/s"
RATE_LIMIT=""
#POP_WIDTH=$NUM_KEYS
SSH_CMD="ssh $USER"
SSH_LOADER_CMD="ssh user1"
#SCYLLA_START_CMD=`$SSH_CMD@$SCYLLA_HOST cat /home/$USER/scylla-ccm/start_cmd.txt`
CS_CMD="/home/user1/scylla-tools-java/tools/bin/cassandra-stress"
#CS_CMD="/home/user1/cassandra/tools/bin/cassandra-stress"
get_server_latencies()
{
local test_type=$1
local itn=$2
local i
for ((i = 0; i < ${#LOADERS[@]}; i++))
do
$SSH_CMD@$SCYLLA_HOST /home/$USER/scylla-tools-java/bin/nodetool cfhistograms ks$i standard1 > $OUT_BASE/$test_type-server-latencies-$itn-$i.txt 2>&1
done
}
CS_PARAMS()
{
local ks_id=$1
local it_id=$2
local nthreads=$3
local num_itarations=$4
local min=$((1+POP_WIDTH*it_id))
local max=$((POP_WIDTH*(it_id+1)))
local mean=$(( (min + max) / 2))
#echo "no-warmup n=$num_itarations -node $SCYLLA_HOST -rate threads=$nthreads $RATE_LIMIT -mode native cql3 -pop seq=$((1+POP_WIDTH*it_id))..$((POP_WIDTH*(it_id+1))) -schema keyspace=ks$ks_id replication\(strategy=NetworkTopologyStrategy, datacenter1=1\)"
echo "no-warmup n=$num_itarations -node $SCYLLA_HOST -rate threads=$nthreads $RATE_LIMIT -mode native cql3 -pop dist=gaussian\($min..$max,$mean,$GAUS_STDEV\) -schema keyspace=ks$ks_id replication\(strategy=NetworkTopologyStrategy, datacenter1=1\)"
}
CS_PARAMS_UNI()
{
local ks_id=$1
local it_id=$2
local nthreads=$3
local num_itarations=$4
echo "no-warmup n=$num_itarations -node $SCYLLA_HOST -rate threads=$nthreads $RATE_LIMIT -mode native cql3 -pop seq=$((1+POP_WIDTH*it_id))..$((POP_WIDTH*(it_id+1))) -schema keyspace=ks$ks_id replication\(strategy=NetworkTopologyStrategy, datacenter1=1\)"
}
stress_write_cmd()
{
local ks_id=$1
local it_id=$2
local nthreads=$3
local num_itarations=$4
echo "$CS_CMD write $(CS_PARAMS $ks_id $it_id $nthreads $num_itarations)"
}
stress_write_uni_cmd()
{
local ks_id=$1
local it_id=$2
local nthreads=$3
local num_itarations=$4
echo "$CS_CMD write $(CS_PARAMS_UNI $ks_id $it_id $nthreads $num_itarations)"
}
stress_read_cmd()
{
local ks_id=$1
local it_id=$2
echo "$CS_CMD read $(CS_PARAMS $ks_id $it_id $NUM_THREADS $NUM_KEYS)"
}
stress_mixed_cmd()
{
local ks_id=$1
local it_id=$2
echo "$CS_CMD mixed ratio\(write=2,read=8\) $(CS_PARAMS $ks_id $it_id $NUM_THREADS $NUM_KEYS)"
}
clear_and_restart()
{
local i=1
echo "$SCYLLA_HOST: Stopping cassandra..."
$SSH_CMD@$SCYLLA_HOST "pkill java" &> /dev/null
while `$SSH_CMD@$SCYLLA_HOST ps -elf | grep java | grep cassandra | grep -v grep` &> /dev/null
do
echo "Waiting $i..."
sleep 1
let "i++"
done
echo "$SCYLLA_HOST: Clearing data..."
$SSH_CMD@$SCYLLA_HOST "cd /home/$USER/ccm; ccm clear" &> /dev/null
echo "$SCYLLA_HOST: Starting cassandra..."
$SSH_CMD@$SCYLLA_HOST "cd /home/$USER/ccm; ccm start --wait-for-binary-proto" &> /dev/null
}
restart_scylla()
{
local i=1
echo "$SCYLLA_HOST: Stopping cassandra..."
$SSH_CMD@$SCYLLA_HOST "cd /home/$USER/ccm; ccm stop" &> /dev/null
while `$SSH_CMD@$SCYLLA_HOST ps -elf | grep java | grep cassandra | grep -v grep` &> /dev/null
do
echo "Waiting $i..."
sleep 1
let "i++"
done
echo "$SCYLLA_HOST: Starting cassandra..."
$SSH_CMD@$SCYLLA_HOST "cd /home/$USER/ccm; ccm start --wait-for-binary-proto" &> /dev/null
}
test_write()
{
local iterations=$1
echo "Write test. $iterations iterations..."
local i
local itn
local loader
local ld
# local total_stress_inst=$((STRESS_NUM*${#LOADERS[@]}))
for ((itn=0; itn < iterations; itn++))
do
echo -e "Iteration $itn..."
clear_and_restart
# Create the KS with a short single thread WRITE
for ((ld = 0; ld < ${#LOADERS[@]}; ld++))
do
echo "${LOADERS[$ld]}: $(stress_write_cmd $ld 0 1 1000)"
$SSH_LOADER_CMD@${LOADERS[$ld]} "$(stress_write_cmd $ld 0 1 1000)"
done
local j=0
for ((ld = 0; ld < ${#LOADERS[@]}; ld++))
do
for ((i = 0; i < STRESS_NUM; i++))
do
echo "${LOADERS[$ld]}: taskset -c $((CORE_START + i * CORES_PER_INST))-$((CORE_START + (i+1) * CORES_PER_INST - 1)) $(stress_write_cmd $ld $i $NUM_THREADS $NUM_KEYS)" > $OUT_BASE/write-out-$itn-$j.txt
$SSH_LOADER_CMD@${LOADERS[$ld]} "taskset -c $((CORE_START + i * CORES_PER_INST))-$((CORE_START + (i+1) * CORES_PER_INST - 1)) $(stress_write_cmd $ld $i $NUM_THREADS $NUM_KEYS)" >> $OUT_BASE/write-out-$itn-$j.txt 2>&1 &
echo "starting write test instance $j..."
sleep 0.1
j=$((j+1))
done
done
wait
get_server_latencies write $itn
done
}
test_write_uni()
{
local iterations=$1
echo "Write test with a UNIFORM distribution. $iterations iterations..."
local i
local itn
local loader
local ld
# local total_stress_inst=$((STRESS_NUM*${#LOADERS[@]}))
for ((itn=0; itn < iterations; itn++))
do
echo -e "Iteration $itn..."
clear_and_restart
# Create the KS with a short single thread WRITE
for ((ld = 0; ld < ${#LOADERS[@]}; ld++))
do
echo "${LOADERS[$ld]}: $(stress_write_cmd $ld 0 1 1000)"
$SSH_LOADER_CMD@${LOADERS[$ld]} "$(stress_write_cmd $ld 0 1 1000)"
done
local j=0
for ((ld = 0; ld < ${#LOADERS[@]}; ld++))
do
for ((i = 0; i < STRESS_NUM; i++))
do
echo "${LOADERS[$ld]}: taskset -c $((CORE_START + i * CORES_PER_INST))-$((CORE_START + (i+1) * CORES_PER_INST - 1)) $(stress_write_uni_cmd $ld $i $NUM_THREADS $NUM_KEYS)" > $OUT_BASE/write-out-$itn-$j.txt
$SSH_LOADER_CMD@${LOADERS[$ld]} "taskset -c $((CORE_START + i * CORES_PER_INST))-$((CORE_START + (i+1) * CORES_PER_INST - 1)) $(stress_write_uni_cmd $ld $i $NUM_THREADS $NUM_KEYS)" >> $OUT_BASE/write-out-$itn-$j.txt 2>&1 &
echo "starting write test instance $j..."
sleep 0.1
j=$((j+1))
done
done
wait
get_server_latencies write $itn
done
}
test_read()
{
echo "Read test. $ITERATIONS iterations..."
local rd_from_disk="$1"
local i
local j
local itn
local ld
for ((itn=0; itn < ITERATIONS; itn++))
do
j=0
echo -e "Iteration $itn..."
[[ -n "$rd_from_disk" ]] && restart_scylla
for ((ld = 0; ld < ${#LOADERS[@]}; ld++))
do
for ((i = 0; i < STRESS_NUM; i++))
do
echo "${LOADERS[$ld]}: taskset -c $((CORE_START + i * CORES_PER_INST))-$((CORE_START + (i+1) * CORES_PER_INST - 1)) $(stress_read_cmd $ld $i)" > $OUT_BASE/read-out-$itn-$j.txt
$SSH_LOADER_CMD@${LOADERS[$ld]} "taskset -c $((CORE_START + i * CORES_PER_INST))-$((CORE_START + (i+1) * CORES_PER_INST - 1)) $(stress_read_cmd $ld $i)" >> $OUT_BASE/read-out-$itn-$j.txt 2>&1 &
echo "starting read test instance $j..."
sleep 0.1
j=$((j+1))
done
done
wait
get_server_latencies read $itn
done
}
test_mixed()
{
echo "Mixed (2 writes 8 read) test. $ITERATIONS iterations..."
local arg="$1"
local rd_from_disk=""
local no_write=""
local rate_limit="$RATE_LIMIT"
case "$arg" in
"rd")
rd_from_disk="1"
;;
"rd-no-wr")
rd_from_disk="1"
no_write="1"
;;
"no-wr")
no_write="1"
;;
esac
local i
local j
local itn
local ld
for ((itn=0; itn < ITERATIONS; itn++))
do
j=0
echo -e "Iteration $itn..."
RATE_LIMIT=""
[[ -z "$no_write" ]] && test_write_uni 1
RATE_LIMIT="$rate_limit"
[[ -n "$rd_from_disk" ]] && restart_scylla
for ((ld = 0; ld < ${#LOADERS[@]}; ld++))
do
for ((i = 0; i < STRESS_NUM; i++))
do
echo "${LOADERS[$ld]}: taskset -c $((CORE_START + i * CORES_PER_INST))-$((CORE_START + (i+1) * CORES_PER_INST - 1)) $(stress_mixed_cmd $ld $i)" > $OUT_BASE/mixed-out-$itn-$j.txt
$SSH_LOADER_CMD@${LOADERS[$ld]} "taskset -c $((CORE_START + i * CORES_PER_INST))-$((CORE_START + (i+1) * CORES_PER_INST - 1)) $(stress_mixed_cmd $ld $i)" >> $OUT_BASE/mixed-out-$itn-$j.txt 2>&1 &
echo "starting mixed test instance $j..."
sleep 0.1
j=$((j+1))
done
done
wait
get_server_latencies mixed $itn
done
}
test_read_from_disk()
{
test_read rd
}
intr_handler()
{
for loader in ${LOADERS[@]}
do
echo "$loader: stopping java..."
$SSH_LOADER_CMD@$loader "pkill java"
done
exit 1
}
trap 'intr_handler' INT TERM
case "$test_mode" in
"r")
test_read
;;
"rd")
test_read_from_disk
;;
"w")
test_write $ITERATIONS
;;
"wr")
test_write $ITERATIONS
test_read
;;
"mx")
test_mixed
;;
"mx-no-wr")
test_mixed "no-wr"
;;
"mxd")
test_mixed rd
;;
"mxd-no-wr")
test_mixed "rd-no-wr"
;;
*)
echo "Bad test mode: $test_mode"
;;
esac
| true
|
e76370f4a496fea2c41bfbfc84830fd6b7b049ce
|
Shell
|
encryptme/private-end-points-docker
|
/test.sh
|
UTF-8
| 3,646
| 3.6875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash -ux
REG_KEY="${REG_KEY:-}"
API_URL="${API_URL:-}"
SSL_EMAIL="${SSL_EMAIL:-}"
PEP_IMAGE="${PEP_IMAGE:-}"
BRANCH="${BRANCH:-}"
STATS_SERVER="${STATS_SERVER:-}"
REMOTE_USER="${REMOTE_USER:-}"
REMOTE_HOST="${REMOTE_HOST:-}"
REMOTE_HOST_IP="${REMOTE_HOST_IP:-}"
fail() {
echo "! ${1:-error}" >&1
exit ${2:-1}
}
usage() {
cat <<EOI
usage: $0 init|run|reset|clean|build|cycle [remote]
ENV VARS:
BRANCH
REG_KEY
API_URL
SSL_EMAIL
PEP_IMAGE
REMOTE_USER (optional)
REMOTE_HOST (optional)
REMOTE_HOST_IP (optional)
EOI
}
[ $# -ge 1 ] || {
usage
fail "no command given"
}
cd $(dirname "$0")
action="$1"
where="${2:-local}"
shift
shift
[ -n "$API_URL" ] || fail "env var API_URL not set"
[ -n "$SSL_EMAIL" ] || fail "env var SSL_EMAIL not set"
[ -n "$PEP_IMAGE" ] || fail "env var PEP_IMAGE not set"
[ -n "$BRANCH" ] || fail "env var BRANCH not set"
[ "$where" = 'remote' ] || {
[ -n "$REMOTE_USER" ] || fail "env var REMOTE_USER not set"
[ -n "$REMOTE_HOST" ] || fail "env var REMOTE_HOST not set"
[ -n "$REMOTE_HOST_IP" ] || fail "env var REMOTE_HOST_IP not set"
}
[ $action = "clean" -o $action = "cycle" ] && {
if [ "$where" = 'remote' ]; then
./go.sh --remote "$REMOTE_USER@$REMOTE_HOST" \
clean \
-v -i $PEP_IMAGE "$@" \
|| fail "Failed to perform --remote clean"
else
./go.sh clean \
-v -i $PEP_IMAGE "$@" \
|| fail "Failed to perform clean"
fi
}
[ $action = "build" -o $action = "cycle" ] && {
./build.sh -e dev -b $BRANCH -p -t "$PEP_IMAGE"
}
[ $action = "init" -o $action = "cycle" ] && {
reg_key=${REG_KEY:-}
while [ -z "$reg_key" ]; do
read -p "Server registration key: " reg_key
done
if [ "$where" = 'remote' ]; then
./go.sh \
--remote $REMOTE_USER@$REMOTE_HOST \
init \
--non-interactive \
-e $SSL_EMAIL \
--api-url "$API_URL" \
--pull-image \
-i $PEP_IMAGE \
--dns-test-ip "$REMOTE_HOST_IP" \
--slot-key "$reg_key" \
--server-name "$BRANCH-testing.$$" \
-v \
"$@" || fail "Failed to init VPN"
else
./go.sh \
init \
-e $SSL_EMAIL \
--non-interactive \
--api-url "$API_URL" \
-i $PEP_IMAGE \
--slot-key "$reg_key" \
--server-name "$BRANCH-testing.$$" \
-v \
"$@" || fail "Failed to init VPN"
fi
}
[ $action = "run" -o $action = "cycle" ] && {
if [ "$where" = 'remote' ]; then
./go.sh \
--remote $REMOTE_USER@$REMOTE_HOST \
run \
--api-url "$API_URL" \
--stats \
--stats-extra \
--stats-server "$STATS_SERVER" \
--stats-key "$STATS_KEY" \
-e $SSL_EMAIL \
-i $PEP_IMAGE \
-v \
"$@" || fail "Failed to run VPN"
else
./go.sh run \
--api-url "$API_URL" \
--stats \
--stats-extra \
--stats-server "$STATS_SERVER" \
--stats-key "$STATS_KEY" \
-e $SSL_EMAIL \
-i $PEP_IMAGE \
-v \
"$@" || fail "Failed to run VPN"
fi
}
[ $action = "reset" ] && {
if [ "$where" = 'remote' ]; then
./go.sh \
--remote $REMOTE_USER@$REMOTE_HOST \
reset \
-v -i $PEP_IMAGE "$@"
else
./go.sh reset \
-v -i $PEP_IMAGE "$@"
fi
}
echo "PID: $$"
| true
|
8dd83a9389e9e8a941e50b9e97398dc122a353ef
|
Shell
|
GavinNL/linux_fun
|
/file_image_mount/run
|
UTF-8
| 524
| 3.578125
| 4
|
[] |
no_license
|
#!/bin/bash
#create a blank file which is 10Mb in size
dd if=/dev/zero of=$PWD/file.img bs=1M count=10
#format the file as an ext4 file system
mkfs.ext4 file.img
# make the mount point
mkdir mnt
# mount it in the folder
sudo mount -o loop file.img $PWD/mnt
# We need to change the permissions of the folder otherwise only Root can write files
# into the filesystem
sudo chmod 777 $PWD/mnt
# Add some files into the folder
touch $PWD/mnt/file_{1,2,3,4}.txt
echo Unmount the folder by typing \"sudo umount $PWD/mnt\"
| true
|
59e5686d1a4c93be6d41ebbcba043117127c5c8d
|
Shell
|
leam18/dockerS1
|
/EntornoDockerS1.sh
|
UTF-8
| 622
| 3.15625
| 3
|
[] |
no_license
|
#!/bin/bash
#Crea la imagen de Composer, instala las dependencias requeridas y luego borra el container
echo "Creando imagen de composer e instalando dependencias..."
docker build -f DockerfileComposer . -t alpinecomposer:latest 2>&1 >/dev/null && docker run --rm -v $(pwd):/var/www alpinecomposer "composer install" 2>&1 >/dev/null
docker rmi alpinecomposer 2>&1 >/dev/null
echo "Instaladas dependencias del proyecto"
#Cambia permisos para el user NO ROOT actual
sudo chown -R $USER:$USER $(pwd)
#Levanta docker-compose con php-fpm y nginx
echo "Levantando entorno"
docker-compose up -d 2>&1 >/dev/null
echo "Done"
| true
|
c1996cdc9e39a8b84687bcd1606e199b00431890
|
Shell
|
theos/templates
|
/build.sh
|
UTF-8
| 716
| 3.984375
| 4
|
[
"CC0-1.0"
] |
permissive
|
#!/usr/bin/env bash
set -e
if [[ "$#" -gt 1 || "$1" == "--help" || "$1" == "-h" ]]; then
echo "Usage: $0 [template_name|-h|--help]"
exit 1
fi
if [[ "$#" == 1 ]]; then
templateName="$1"
fi
# For each directory containing one or more valid NIC templates' source
templateDirs=$(find . -type d -name NIC -exec dirname {} \; | xargs -n1 dirname | sort -u)
for d in $templateDirs; do
pushd $d &> /dev/null
# For each valid NIC template source
templateSubDirs=$(find . -type d -name NIC -exec dirname {} \;)
for f in $templateSubDirs; do
# Build the template
if [[ -z $templateName || $templateName == $(basename $f) ]]; then
$THEOS/bin/nicify.pl $f
mv *.nic.tar ../
fi
done
popd &> /dev/null
done
| true
|
aaea10f716fae88442d486dfb4546328d78981b7
|
Shell
|
PoCta/PCF-demo
|
/ci/tasks/ft.sh
|
UTF-8
| 1,767
| 3.890625
| 4
|
[] |
no_license
|
#!/bin/sh
inputDir= outputDir= artifactId= packaging= inputManifest=
# optional
hostname=$CF_MANIFEST_HOST # default to env variable from pipeline
echo "hello ut test"
while [ $# -gt 0 ]; do
case $1 in
-i | --input-dir )
inputDir=$2
shift
;;
-o | --output-dir )
outputDir=$2
shift
;;
-a | --artifactId )
artifactId=$2
shift
;;
-p | --packaging )
packaging=$2
shift
;;
f | --input-manifest )
inputManifest=$2
shift
;;
-n | --hostname )
hostname=$2
shift
;;
* )
echo "Unrecognized option: $1" 1>&2
exit 1
;;
esac
shift
done
error_and_exit() {
echo $1 >&2
exit 1
}
if [ ! -d "$inputDir" ]; then
error_and_exit "missing input directory: $inputDir"
fi
if [ ! -d "$outputDir" ]; then
error_and_exit "missing output directory: $outputDir"
fi
if [ -z "$artifactId" ]; then
error_and_exit "missing artifactId!"
fi
if [ -z "$packaging" ]; then
error_and_exit "missing packaging!"
fi
if [ ! -f "$inputManifest" ]; then
error_and_exit "missing input manifest: $inputManifest"
fi
version="0.1"
artifactName="${artifactId}-${version}.${packaging}"
cd $inputDir
./mvnw clean package -Pci -DversionNumber=$version
# Copy war file to concourse output folder
cd ..
cp $inputDir/target/$artifactName $outputDir/$artifactName
# copy the manifest to the output directory and process it
outputManifest=$outputDir/manifest.yml
cp $inputManifest $outputManifest
# the path in the manifest is always relative to the manifest itself
sed -i -- "s|path: .*$|path: $artifactName|g" $outputManifest
if [ ! -z "$hostname" ]; then
sed -i "s|host: .*$|host: ${hostname}|g" $outputManifest
fi
cat $outputManifest
| true
|
b9ed2ad9cbb44c86553ca000629cb83dd6eda0d2
|
Shell
|
evoltech/sympa
|
/tools/configfixin/topicfix
|
UTF-8
| 621
| 3.46875
| 3
|
[] |
no_license
|
#!/bin/sh
# Written by micah to deal with topics having commas at the end
# 07/15/03
# Updated by micah to deal with "Argument list too long" error - 07/27/03
listsdir=/crypt/sympa/expl
for list in `find $listsdir -maxdepth 3 -type f -name config \
| xargs egrep "topics .*,$" | cut -d/ -f6`
do
topic=`cat $listsdir/$list/config |grep topics`
echo "Correcting $list:$topic"
cp $listsdir/$list/config $listsdir/$list/config_bak.topicfix
cat $listsdir/$list/config_bak.topicfix | sed 's/,*$//' > $listsdir/$list/config
topic=`cat $listsdir/$list/config |grep topics`
echo "Fixed topic: $topic"
echo ""
done
| true
|
662decda2269f998f92a96b7bf501930a1b7c5f8
|
Shell
|
Qarik-Group/buildkite-cloudfoundry-demo-app
|
/ci/agent/linode-stackscript.sh
|
UTF-8
| 3,198
| 3.5625
| 4
|
[] |
no_license
|
#!/bin/sh
# This script is the StackScript for Linode
exec >/var/log/stackscript.log 2>&1
set -eux
# <UDF name="buildkite_token" Label="Buildkite account token" />
# <UDF name="buildkite_spawn" Label="The number of agents to spawn in parallel" default="5" />
# <UDF name="buildkite_secrets_bucket" Label="[optional] AWS S3 bucket containing secrets" default="" />
# <UDF name="aws_access_key" Label="[optional] AWS access key for S3 buckets" default="" />
# <UDF name="aws_secret_password" Label="[optional] AWS access secret key for S3 buckets" default="" />
LINODE_STACK=${LINODE_STACK:-633367}
BUILDKITE_QUEUE=${BUILDKITE_QUEUE:-default}
# explicit aws installation to support alpine
install_aws() {
apk add openssh-client groff less -uUv --force-overwrite
apk --update add --virtual .build-dependencies python3-dev libffi-dev openssl-dev build-base
pip3 install --no-cache --upgrade \
requests \
awscli \
awsebcli \
boto3 \
cfn-flip \
cfn-lint \
PyYAML \
sceptre
mkdir ~buildkite/.aws
cat > ~buildkite/.aws/config <<CONFIG
[default]
region = us-east-1
CONFIG
cat > ~buildkite/.aws/credentials <<CREDS
[default]
aws_access_key_id = ${AWS_ACCESS_KEY}
aws_secret_access_key = ${AWS_SECRET_PASSWORD}
CREDS
chown -Rh buildkite:buildkite ~buildkite/.aws
chmod 700 ~buildkite/.aws
chmod 600 ~buildkite/.aws/*
}
install_s3_plugin() {
S3_SECRETS_DIR=~buildkite/.buildkite-agent/plugins/elastic-ci-stack-s3-secrets-hooks
git clone \
https://github.com/buildkite/elastic-ci-stack-s3-secrets-hooks \
$S3_SECRETS_DIR
cat > ~buildkite/.buildkite-agent/hooks/environment <<SHELL
export BUILDKITE_PLUGIN_S3_SECRETS_BUCKET="$BUILDKITE_SECRETS_BUCKET"
source $S3_SECRETS_DIR/hooks/environment
SHELL
}
apk add curl docker bash git ca-certificates
rc-update add docker boot
service docker start
# Create buildkite user/group
addgroup -g 100000 buildkite
adduser -G buildkite -u 100000 -D buildkite
addgroup buildkite docker
TOKEN="$BUILDKITE_TOKEN" bash -c "`curl -sL https://raw.githubusercontent.com/buildkite/agent/master/install.sh`"
BUILDKITE_DIR=/home/buildkite/.buildkite-agent
mv /root/.buildkite-agent $BUILDKITE_DIR
DOCKER_VERSION=$(docker --version | cut -f3 -d' ' | sed 's/,//')
export BUILDKITE_AGENT_NAME="linode-$LINODE_ID-dc-$LINODE_DATACENTERID"
sed -i "s/name=.*$/name=\"$BUILDKITE_AGENT_NAME\"/g" $BUILDKITE_DIR/buildkite-agent.cfg
cat <<CFG >> $BUILDKITE_DIR/buildkite-agent.cfg
spawn="$BUILDKITE_SPAWN"
tags=queue=${BUILDKITE_QUEUE},docker=${DOCKER_VERSION},linode-stack=${LINODE_STACK},linode-id=${LINODE_ID},linode-ram=${LINODE_RAM},linode-dc-id=${LINODE_DATACENTERID}
tags-from-host=true
CFG
[[ -n "${BUILDKITE_SECRETS_BUCKET:-}" && -n "${AWS_ACCESS_KEY:-}" && -n "${AWS_SECRET_PASSWORD:-}" ]] && {
echo "--> Setup AWS S3 buckets"
install_aws
echo "--> Install S3 plugin"
install_s3_plugin
}
chown -Rh buildkite:buildkite $BUILDKITE_DIR
curl -L https://raw.githubusercontent.com/starkandwayne/buildkite-cloudfoundry-demo-app/master/ci/agent/buildkite-agent.openrc.sh > /etc/init.d/buildkite-agent
chmod +x /etc/init.d/buildkite-agent
rc-update add buildkite-agent
service buildkite-agent start
| true
|
08a6065b6bc588281b5d8a3f581e8047977862e4
|
Shell
|
lukschwalb/Dotfiles
|
/scripts/o
|
UTF-8
| 456
| 3.25
| 3
|
[] |
no_license
|
#!/bin/bash
#########################################################
#Script Name : o (stands for open)
#Description : Executes the command you enter and
# detaches it from the current shell
#Args : No limit, just the command e.g. : o echo test
#Author : Luk Schwalb
#Email : schwalb@luk.im
#########################################################
sum=""
for arg in "$@"
do
sum="${sum} ${arg}"
done
eval "( $sum & )"
| true
|
beaac77e75256a8dba1911f8791d78b64ce6ad21
|
Shell
|
Qwaz/ctf-env-setup
|
/optional/qemu.sh
|
UTF-8
| 634
| 2.65625
| 3
|
[] |
no_license
|
$INSTALL gdb-multiarch qemu binfmt-support qemu-user-static
update-binfmts --display
if [ $KERNEL_OPT -eq 1 ]; then
echo 0 | $SUDO tee /proc/sys/vm/mmap_min_addr
echo "vm.mmap_min_addr = 0" | $SUDO tee /etc/sysctl.d/mmap_min_addr.conf
fi
$SUDO mkdir -p /etc/qemu-binfmt
# apt-cache search '^libc6-[^-]+-cross'
$INSTALL libc6-arm64-cross libc6-armhf-cross libc6-mips-cross libc6-mipsel-cross
$SUDO ln -s /usr/aarch64-linux-gnu /etc/qemu-binfmt/aarch64
$SUDO ln -s /usr/arm-linux-gnueabihf /etc/qemu-binfmt/arm
$SUDO ln -s /usr/mips-linux-gnu /etc/qemu-binfmt/mips
$SUDO ln -s /usr/mipsel-linux-gnu /etc/qemu-binfmt/mipsel
| true
|
6b97ee53782f22a2daff0822fed52e63b4bbe2d1
|
Shell
|
google/filament
|
/third_party/libsdl2/build-scripts/strip_fPIC.sh
|
UTF-8
| 454
| 3.453125
| 3
|
[
"Zlib",
"Apache-2.0",
"CC0-1.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/sh
#
# libtool assumes that the compiler can handle the -fPIC flag
# This isn't always true (for example, nasm can't handle it)
command=""
while [ $# -gt 0 ]; do
case "$1" in
-?PIC)
# Ignore -fPIC and -DPIC options
;;
-fno-common)
# Ignore -fPIC and -DPIC options
;;
*)
command="$command $1"
;;
esac
shift
done
echo $command
exec $command
| true
|
8b60cd624f26aa2f7da955085d730915cc7425c1
|
Shell
|
PhilippMolitor/server-dotfiles
|
/.bashrc
|
UTF-8
| 3,182
| 3.640625
| 4
|
[] |
no_license
|
#################
# Bash settings #
#################
# stop if non-interactive
case $- in
*i*) ;;
*) return;;
esac
# load bash completion
if ! shopt -oq posix; then
if [ -f /usr/share/bash-completion/bash_completion ]; then
source /usr/share/bash-completion/bash_completion
elif [ -f /etc/bash_completion ]; then
source /etc/bash_completion
fi
fi
# shopt
shopt -s autocd
shopt -s checkwinsize
shopt -s histappend
shopt -s dotglob
shopt -s globstar
# history settings
HISTCONTROL=ignoreboth
HISTSIZE=10000
SAVEHIST=10000
#################
# PATH variable #
#################
PRIVATE_BIN_PATH="${HOME}/.bin"
if [[ -d $PRIVATE_BIN_PATH ]] && [[ ":$PATH:" != *":$PRIVATE_BIN_PATH:"* ]]; then
export PATH="${PRIVATE_BIN_PATH}:${PATH}"
fi
#################
# Custom prompt #
#################
PROMPT_COMMAND=__render_prompt
__render_prompt () {
local ecode="$?"
local fmtcode="$(printf "%03d" "$ecode")"
# reset
PS1='\[\e[0m\]'
# exit code
PS1+='['
if [[ $ecode == "0" ]]; then
PS1+='\[\033[32m\]'
PS1+=$' \xe2\x9c\x93 '
else
PS1+='\[\033[31m\]'
PS1+="$fmtcode"
fi
PS1+='\[\033[0m\]] '
# hostname/user:
PS1+='\[\e[32m\]\h\[\e[0m\]'
PS1+='/'
PS1+='\[\e[96m\]\u\[\e[0m\]'
PS1+=':'
# pwd (abbreviated)
PS1+="\[\e[95m\]"
PS1+="$(
if [[ $PWD = / ]]; then
printf '/'
else
p="${PWD#${HOME}}"
[[ $PWD != $p ]] && printf "~"
IFS=/
for d in ${p:1}; do
[[ ${d:0:1} == "." ]] && printf "/${d:0:2}" || printf "/${d:0:1}"
done
[[ ${d:0:1} == "." ]] && printf "${d:2}" || printf "${d:1}"
fi
)"
PS1+='\[\e[00m\]'
# privilege
if [[ $UID == "0" ]]; then
PS1+='# '
else
PS1+='$ '
fi
}
###########
# Aliases #
###########
# vim all the things
if [[ -x "$(command -v vim)" ]]; then
alias vi='vim'
export EDITOR="vim"
export VISUAL="vim"
fi
# ls shortcuts
if [[ -x "$(command -v exa)" ]]; then
alias ls="exa --header --git --group --group-directories-first --color-scale --color=always"
alias lm="exa --header --long --group --sort=modified --reverse --color always --color-scale"
alias lt="exa --long --tree --git-ignore"
else
alias ls='ls --group-directories-first -h --color=auto'
fi
alias ll='ls -l'
alias la='ls -la'
# docker-compose
alias dc="docker-compose"
alias dcr="docker-compose down && docker-compose up -d"
alias dcpu="docker-compose pull && docker-compose up -d"
# git
alias gitlog="git log --graph --oneline --decorate --all"
# in case someone fucked up again... (me)
alias fuck='sudo env "PATH=$PATH" $(fc -ln -1)'
# list all currently listening tcp sockets
alias lssockets='ss -nrlpt'
# pretty mount table
alias mountfmt="mount | column -t | sort"
# upload to https://0x0.st
0x0 () {
echo ">> $(curl -s --fail -F "file=@$1" "https://0x0.st" || echo "error uploading $1")"
}
# config management with git
dotconf () {
local cdir="$HOME/.dotconf"
[[ -d $cdir ]] || mkdir -p $cdir
[[ -f $cdir/HEAD ]] || git init --bare $cdir
git --git-dir=$cdir --work-tree=$HOME/ "$@"
}
###################
# Update dotfiles #
###################
(dotconf pull >/dev/null 2>&1 &)
| true
|
67f179674d50f250a7d4c9b18ed065abad494a65
|
Shell
|
yangcs1596/ycs_test
|
/脚本/sdelasticsearch
|
UTF-8
| 2,653
| 3.734375
| 4
|
[] |
no_license
|
#!/bin/bash
# Copyright www.safedog.cn @ 2017
# by 1057 (lvxj@safedog.cn)
# Check if user is mybk
[ $(whoami) != "mybk" ] && { echo "ERROR: This script must be run as mybk"; exit 1; }
# Default
es_instances="elasticsearch1 elasticsearch2"
# Config File
. /home/mybk/mybk.cfg &> /dev/null
[[ $es_enable != 1 ]] || [ "$loganalyze" == "n" ] && exit
start() {
for i in $es_instances; do
if [[ -d /opt/$i ]]; then
if ps aux | grep -v grep | grep "=/opt/$i" &> /dev/null; then
echo -e "$i\t\trunning!"
else
echo "Starting $i..."
export node_name=$i
nohup /opt/$i/bin/elasticsearch &>> /opt/$i/$i.log &
logexpect /opt/$i/$i.log "started"
sleep 1
fi
else
echo "/opt/$i not exists!"
fi
done
if [[ ${elasticsearch_head_enable:-0} == 1 ]]; then
if [[ -d /opt/nodejs/elasticsearch-head/ ]]; then
echo "Starting elasticsearch-head..."
cd /opt/nodejs/elasticsearch-head
nohup npm run start &> /dev/null &
fi
fi
}
stop() {
for i in $es_instances; do
if [[ -d /opt/$i ]]; then
pid=$(ps aux | grep -v grep | grep "=/opt/$i" | awk '{print $2}')
if [[ $pid -gt 0 ]]; then
echo "Stopping $i..."
kill -9 $pid
else
echo -e "$i\t\tstopped!"
fi
fi
done
if [[ -d /opt/nodejs/elasticsearch-head/ ]]; then
for i in npm grunt; do
pid=$(ps aux | grep -v grep | grep "$i" | awk '{print $2}')
if [[ $pid -gt 0 ]]; then
echo "Shutting down elasticsearch-head ($pid)"
kill -9 $pid
fi
done
fi
}
restart() {
stop
start
}
status() {
jps -v | awk '$2 == "Elasticsearch" {for(i=1;i<=NF;i++) if($i ~ /-Des.path.home=/) {sub(/.*\//,"",$i); print $i,$1}}'
if [[ -d /opt/nodejs/elasticsearch-head/ ]]; then
for i in npm grunt; do
pid=$(ps aux | grep -v grep | grep "$i" | awk '{print $2}')
if [[ $pid -gt 0 ]]; then
echo "elasticsearch-head is running ($(pgrep -l $i))"
else
echo "elasticsearch-head is stopped"
fi
done
fi
}
usage() {
echo "Usage: $(basename $0) {start|stop|restart|status}"
}
[ $# -eq 0 ] && usage
case "$1" in
"start")
start
;;
"stop")
stop
;;
"restart")
restart
;;
"status")
status
;;
"-h")
usage
;;
esac
| true
|
751a398560960c6e12ed27090b21381eaed94d4d
|
Shell
|
Asmageddon/dotfiles
|
/bin/screenshot
|
UTF-8
| 626
| 3.515625
| 4
|
[] |
no_license
|
#!/bin/bash
# TODO: Check in order: flameshot, scrot, imagemagick
# ImageMagick:
#timestamp="$(date +%d.%m.%Y"_shot_"%H:%M:%S)"
#targetbase="$HOME/Pictures/screenshots"
#mkdir -p $targetbase
#[ -d $targetbase ] || exit 1
#import -window root -quality 98 $targetbase/$timestamp.png
if [[ $(command -v flameshot) != "" ]] ; then
flameshot gui --delay 200
elif [[ $(command -v scrot) != "" ]] ; then
CMDS='nice -n 20 optipng "$f"'
CMDS=$CMDS';mv "$f" ~/Pictures/screenshots'
scrot '%Y.%m.%d @ %H:%M:%S ($wx$h).png' -e "$CMDS"
else
echo "You need to install either flameshot or scrot to use this script"
fi
| true
|
94ff699d1879c90e72a14127bcb16e0acb90c48f
|
Shell
|
nivyaal/OS-HW1
|
/tests/alice/drive-download-20210420T150016Z-001/t.sh
|
UTF-8
| 189
| 2.890625
| 3
|
[] |
no_license
|
#!/bin/bash
for i in {1..7}; do
diff test${i}.out test${i}.exp > diff${i}
if ! [[ -s diff${i} ]]; then
echo "test $i PASSED! yayyy"
else
echo "test $i failed"
fi
done
| true
|
c638ee93e4ada8bd24be875e973eb28483bf9d59
|
Shell
|
alex-shumilov/dotfiles
|
/.bash_it_custom/nvm.bash
|
UTF-8
| 300
| 2.875
| 3
|
[] |
no_license
|
# shellcheck shell=bash
# vi: set ft=sh:
if [ -d "$HOME/.config/nvm" ]; then
export NVM_DIR="$HOME/.config/nvm"
elif [ -d "$HOME/.nvm" ]; then
export NVM_DIR="$HOME/.nvm"
fi
[ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh"
[ -s "$NVM_DIR/bash_completion" ] && source "$NVM_DIR/bash_completion"
| true
|
ec2f48f3312a555da693b8d10cfc9a157c9d3398
|
Shell
|
sjawhar/nts-rss-feed
|
/deploy.sh
|
UTF-8
| 181
| 2.9375
| 3
|
[] |
no_license
|
#!/bin/sh
set -e
docker-compose run --rm generator
read -p "Commit and push? " commit
if [ "${commit}" = "y" ]; then
git add -p
git commit -m "Update feed.xml"
git push
fi
| true
|
eb993bad7b757a790acd0743aa004041d0b42a7a
|
Shell
|
RichardBronosky/dotfiles
|
/.bash_quick-setup.sh
|
UTF-8
| 334
| 2.71875
| 3
|
[] |
no_license
|
cat >> ~/.bashrc << EOF
for f in .bash_aliases .bashrc.env.ext .bashrc.man.ext .bashrc.hist.ext .bashrc.tmux.ext; do
if [ -f $(cd "$(dirname "$BASH_SOURCE")"; pwd)/\$f ]; then
source $(cd "$(dirname "$BASH_SOURCE")"; pwd)/\$f
fi
done
EOF
mkdir ~/.vim
ln -s $(cd "$(dirname "$BASH_SOURCE")"; pwd)/.vim/vimrc.basic ~/.vim/
| true
|
bb1a5e926029c174cf81e909bf2ca8ed8bbbf8d4
|
Shell
|
Prashu94/Learnings
|
/LinuxP/ost-exam/Decimal to Binary.sh
|
UTF-8
| 175
| 3.109375
| 3
|
[] |
no_license
|
#Decimal to Binary
clear
echo " enter the decimal number"
read b
bin=0
while [ $b -ne 0 ]
do
r=`expr $b%2|bc`
b=`expr $b/2|bc`
bin=$r$bin
done
bin=`expr $bin/10|bc`
echo $bin
| true
|
77f5d16fdd335427802fbd90469f173f890b51ac
|
Shell
|
SussexUCU/ogustools
|
/search_for.sh
|
UTF-8
| 765
| 4.46875
| 4
|
[] |
no_license
|
#/bin/bash
#
# Search for string "foo" in PDF files, writing result to file foo.txt
usage() { echo "Usage: $0 [-h] [-i] [-o <output_directory>] <string> " 1>&2; }
CASE=""
OUT_DIR=""
DATE_TIME=`date -u "+%Y-%m-%d_%H-%M"`
while getopts ':hio:' option
do
case "${option}"
in
i) CASE=-i
;;
o) OUT_DIR=${OPTARG}/
;;
h) usage; exit 0
;;
:) echo "foo bar"; exit 1
;;
*) usage; exit 1
;;
esac
done
shift $((OPTIND - 1))
if (($# == 0))
then
echo "search_for.sh: error: no search string given"
exit 1
else
string="$1"
fi
results_file="${OUT_DIR}${string}_${DATE_TIME}.txt"
echo "Searching for string ${string}"
echo "Writing results to $results_file"
pdfgrep ${CASE} -R -c -H "$string" --match-prefix-separator / . | sed '/0$/d' > "${results_file}"
exit 0
| true
|
3e57de622b37e254d01f64fddbfdbea2cad9dc6b
|
Shell
|
maxxwave/dipole
|
/submit_gpu
|
UTF-8
| 766
| 2.671875
| 3
|
[] |
no_license
|
#!/bin/bash
#$ -notify
# Job name
#$ -N test
# Request Queue
#$ -q tesla.q
#$ -l gpu=1
#$ -l proc_type=intel*
#$ -l h_rt=48:00:00
# Export current environment
#$ -V -cwd
#$ -j y
user=`id -un`
nodes=$HOSTNAME
scratch=/scratch/$user/$JOB_ID
#====================================================
# Copy data to nodes
#====================================================
mkdir run.$JOB_ID
cd run.$JOB_ID
cp ../a.out .
mkdir $scratch
cp * $scratch
cd $scratch
#====================================================
# Executable
#====================================================
./CUDA >aici
#====================================================
# Copy back results
#====================================================
cp -rpv * $SGE_O_WORKDIR;
| true
|
7a23bc290ac3272106f21de236c2ac349bc43b04
|
Shell
|
fnareoh/jellyfish
|
/scripts/remote.sh
|
UTF-8
| 1,336
| 3.65625
| 4
|
[] |
no_license
|
#!/bin/bash
# number of frames to render
NB_FRAMES=120
nb_hosts=0
while read host ; do
nb_hosts=$((nb_hosts + 1))
done < scripts/hosts.list
echo "---" $nb_hosts "hosts found"
echo "--- Update remote sources"
while read host ; do
ssh -n $host 'rm -rf /tmp/jelly && mkdir -p /tmp/jelly' &
done < scripts/hosts.list
wait
while read host ; do
echo $host
rsync --exclude-from '.gitignore' --exclude '.git' -au ./ $host:/tmp/jelly &
done < scripts/hosts.list
wait
echo "--- Build frames"
host_id=0
while read host ; do
start=$((host_id * (NB_FRAMES / nb_hosts)))
end=$(((host_id + 1) * (NB_FRAMES / nb_hosts) - 1))
echo $host ":" $start "to" $end
command='cd /tmp/jelly && make clean && make release'
for i in `seq $start $end` ; do
i=$(printf "%03d" $i)
command=$command" && nohup make POV_ARGS=+WT7 FRAME=${i} scene > /dev/null"
done
echo $command
ssh -n $host "${command}" &
host_id=$((host_id + 1))
done < scripts/hosts.list
wait
clear
echo "--- Collect frames"
rm -r frames
while read host ; do
mkdir -p frames/$host
scp $host:/tmp/jelly/scene_frame*.png frames/$host &
done < scripts/hosts.list
wait
echo "--- Building .gif"
convert frames/*/scene_frame_*.png scene.gif
# ssh info 'cd /tmp/jelly && make FRAME=1 scene && make FRAME=2 scene'
| true
|
09224bd51a89cc186bf3f881073f335d6c93fce9
|
Shell
|
nvm-sh/nvm
|
/test/fast/Aliases/lts/'nvm alias' should ensure LTS alias dir exists
|
UTF-8
| 355
| 3.265625
| 3
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/sh
set -ex
\. ../../../../nvm.sh
\. ../../../common.sh
LTS_ALIAS_PATH="$(nvm_alias_path)/lts"
rm -rf "${LTS_ALIAS_PATH}"
die () { echo "$@" ; exit 1; }
[ ! -d "${LTS_ALIAS_PATH}" ] || die "'${LTS_ALIAS_PATH}' exists and should not"
nvm alias >/dev/null 2>&1
[ -d "${LTS_ALIAS_PATH}" ] || die "'${LTS_ALIAS_PATH}' does not exist and should"
| true
|
0018e1316d41cd4b7d4f89cb5c249c679f08e1eb
|
Shell
|
xlhyy/Zero_book
|
/kv/超一线上布代码相关/deploy_pub_android.sh
|
UTF-8
| 2,590
| 3.390625
| 3
|
[] |
no_license
|
#!/bin/sh
# main
if [ $# -ne 1 ]; then
echo "Usage: `basename $0` <version_tag>"
exit 1
fi
tag=$1
# example r0.00.00.00
reg="^r[0-9]\.[0-9]{1,2}\.[0-9]{1,2}\.[0-9]{1,2}$"
if [ ! `echo $tag | grep -E $reg` ]; then
echo "version error example r0.0.0.0"
exit 1
fi
WORK_DIR=$(cd "$(dirname "$0")"; pwd)
GIT_REPO_PATH="/data/repo/backend"
APP_SRV_LIST="${WORK_DIR}/pub_android_srv.txt"
#ROOT_DIR="/home/madajie/release/cmge"
#repo_dir="/home/madajie/s/genesis/backend"
temp_dir="${WORK_DIR}/temp_android/"
#HOST_LIST="${ROOT_DIR}/server_list.txt"
rm -rf $temp_dir/*
echo $tag > $temp_dir/version
cd $GIT_REPO_PATH
/usr/bin/git fetch --all
/usr/bin/git archive --format=tar $tag | tar -xv -C $temp_dir
HOST=(`cat ${APP_SRV_LIST} | grep -v '^#' `)
echo "Deploy to the hosts:"
for i in ${HOST[@]}
do
echo $i
done
read -p "Are you sure you want to continue (y/n)?" MFLAG
if [ "x${MFLAG}" != "xy" ]; then
exit 1
fi
# 校验下需要更新多少文件
THOST=${HOST[0]}
/usr/bin/rsync --dry-run -aI --progress --recursive --exclude='upload_xls' --exclude='.git' --exclude='logs' --exclude='test/local_config.py' $temp_dir "admin@$THOST:/data/sites/genesis_backend/"
read -p "Are you sure you want to continue (y/n)?" CFLAG
if [ "x${CFLAG}" != "xy" ]; then
exit 1
fi
echo 'backend pub deployed'
for host in ${HOST[@]}
do
echo $host
/usr/bin/rsync -aI --progress --recursive --exclude='upload_xls' --exclude='.git' --exclude='logs' --exclude='test/local_config.py' $temp_dir "admin@${host}:/data/sites/genesis_backend/"
done
# 另外2台nginx
for host in 10.10.30.240
do
echo $host
/usr/bin/rsync -aI --progress --recursive --exclude='upload_xls' --exclude='.git' --exclude='logs' --exclude='test/local_config.py' $temp_dir "admin@${host}:/data/sites/genesis_backend/"
done
echo $?
cd $WORK_DIR
d=`date +"%F %T"`
echo $d $0 $tag
echo $d $0 $tag >> release.log
| true
|
c0def85faf29526deef717529e5a78333ec5f012
|
Shell
|
dmnur/tupe
|
/ch3/ex3-18/bundle
|
UTF-8
| 324
| 3.765625
| 4
|
[] |
no_license
|
#!/bin/sh
# bundle: group files into distribution package
# (from section 3.9)
# Modified for exercise 3-18: include with each file the information
# garnered from `ls -l`.
echo '# To unbundle, sh this file'
for i
do
echo "cat >&2 <<'EOF'"
ls -l $i
echo EOF
echo "cat >$i <<'End of $i'"
cat $i
echo "End of $i"
done
| true
|
9ba6a3e73310f95a87443527a2982a07cf57cfce
|
Shell
|
kumattau/gitapps
|
/git-restore-timestamp
|
UTF-8
| 1,779
| 3.703125
| 4
|
[] |
no_license
|
#!/bin/sh
LANG=C
IFS='
'
if [ $# -eq 0 ]
then
set .
fi
while [ 0 -lt $# ]
do
obj=$1
for sub in `find "${obj}" -print`
do
# check "not" git metadata directory
if echo "${sub}" | grep -q -E '(^\.git|/.git/|/.git$)'
then
continue
fi
if [ ! -d "${sub}" ]
then
# check "not" outside repository
if ! git log -- "${sub}" 2>/dev/null 1>&2
then
continue
fi
if [ `git log -- "${sub}" | wc -c` = 0 ]
then
echo "- ${sub} : this file has not been commited yet"
continue
fi
if ! git diff --quiet -- "${sub}" || ! git diff --quiet --staged -- "${sub}"
then
echo "x ${sub} : this file is modified after last commit"
continue
fi
modify_time=`stat --format='%Y' "${sub}"`
commit_time=`git log --date=raw --max-count=1 -- "${sub}" | grep ^Date: | awk '{print $2}'`
modify_text=`date +'%Y-%m-%d %X' --date=@${modify_time}`
commit_text=`date +'%Y-%m-%d %X' --date=@${commit_time}`
if [ ${modify_time} -eq ${commit_time} ]
then
echo "- ${sub} : modify and commit is same time ${commit_text}"
continue
fi
if [ ${modify_time} -lt ${commit_time} ]
then
echo "x ${sub} : modify ${modify_text} is older than last commit ${commit_text}"
continue
fi
echo "o ${sub} : APPLY ${modify_text} to ${commit_text}"
touch --time=modify --date="${commit_text}" "${sub}"
fi
done
shift
done
| true
|
43e0d8d22afeb246380e4379ae8474a452982ff2
|
Shell
|
lanceshelton/Avere
|
/src/tutorials/ArtistAnywhere/ArtistWorkstation/Linux/15.Machine.sh
|
UTF-8
| 449
| 2.6875
| 3
|
[
"CC-BY-4.0",
"MIT",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
#!/bin/bash
set -ex
cd /usr/local/bin
echo "export CUEBOT_HOSTS=$renderManagerHost" > /etc/profile.d/opencue.sh
if [ "$teradiciLicenseKey" != "" ]; then
yum -y install https://downloads.teradici.com/rhel/teradici-repo-latest.noarch.rpm
yum -y install epel-release
yum -y install usb-vhci
yum -y install pcoip-agent-graphics
pcoip-register-host --registration-code="$teradiciLicenseKey"
systemctl restart 'pcoip-agent'
fi
| true
|
d28173b7341180c6048c559845c12d5f90c91ad6
|
Shell
|
labaneilers/dk
|
/install.sh
|
UTF-8
| 557
| 3.609375
| 4
|
[] |
no_license
|
#!/bin/bash
cd $(dirname "$0")
if [ "$DK_DIR" = "" ]; then
DK_DIR="$HOME/bin"
fi
DK_BIN_PATH="$DK_DIR/dk"
echo "Installing dk to $DK_BIN_PATH..."
mkdir -p ~/bin
curl -s -f -S -o "$DK_BIN_PATH" https://raw.githubusercontent.com/labaneilers/dk/master/dk
ERROR="$?"
if [ ! "$ERROR" = "0" ]; then
echo "Failed to download"
exit 1
fi
chmod +x "$DK_BIN_PATH"
TESTED=$(which dk)
if [[ "$TESTED" = "" ]]; then
echo "dk was installed at $DK_BIN_PATH, but it wasn't on your PATH."
exit 1
fi
echo "dk installed successfully at $DK_BIN_PATH"
| true
|
917988a10a7d09f5e598c88c1cbd08a4680e06ee
|
Shell
|
WhitewaterFoundry/pengwin-setup
|
/rpm/pengwin-setup.d/terminal.sh
|
UTF-8
| 4,918
| 3.703125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# shellcheck source=./common.sh
source "$(dirname "$0")/common.sh" "$@"
declare wHome
declare SetupDir
function main() {
# shellcheck disable=SC2155
local menu_choice=$(
menu --title "Terminal Menu" --checklist --separate-output "Select the terminals you want to install\n[SPACE to select, ENTER to confirm]:" 14 60 7 \
"WINTERM" "Windows Terminal" off \
"WSLTTY" "WSLtty" off \
"TILIX" "Tilix (requires X Server)" off \
"GTERM" "Gnome Terminal (requires X Server)" off \
"XFTERM" "Xfce Terminal (requires X Server)" off \
"TERMINATOR" "Terminator (requires X Server)" off \
"KONSO" "Konsole (requires X Server)" off \
3>&1 1>&2 2>&3)
if [[ ${menu_choice} == "CANCELLED" ]]; then
return 1
fi
if [[ ${menu_choice} == *"WINTERM"* ]]; then
echo "WINTERM"
if (confirm --title "Windows Terminal" --yesno "Would you like to install Windows Terminal?" 8 40); then
tmp_win_version=$(wslsys -B -s)
if [ "$tmp_win_version" -lt 18362 ]; then
message --title "Unsupported Windows 10 Build" --msgbox "Windows Terminal requires Windows 10 Build 18362, but you are using $tmp_win_version. Skipping Windows Terminal." 8 56
return
fi
wslview "ms-windows-store://pdp/?ProductId=9n0dx20hk701"
else
echo "Skipping Windows Terminal"
fi
fi
if [[ ${menu_choice} == *"WSLTTY"* ]]; then
echo "WSLTTY"
if (confirm --title "WSLtty" --yesno "Would you like to install WSLtty?" 8 40); then
createtmp
echo "Installing required install dependencies"
install_packages wget p7zip-full
[ -d "${wHome}/Pengwin/.wsltty" ] || mkdir -p "${wHome}/Pengwin/.wsltty"
wsltty_url="$(curl -s https://api.github.com/repos/mintty/wsltty/releases | grep 'browser_' | head -1 | cut -d\" -f4)"
wget --progress=dot "$wsltty_url" -O "wsltty.7z" 2>&1 | sed -un 's/.* \([0-9]\+\)% .*/\1/p' | whiptail --title "WSLtty" --gauge "Downloading WSLtty..." 7 50 0
7z x wsltty.7z -o"${wHome}"/Pengwin/.wsltty/
echo "Installing WSLtty.... Please wait patiently"
tmp_f="$(pwd)"
# shellcheck disable=SC2164
cd "${wHome}/Pengwin/.wsltty"
cmd.exe /C "install.bat"
# shellcheck disable=SC2164
cd "$tmp_f"
unset tmp_f
message --title "WSLtty" --msgbox "Installation complete. You can find the shortcuts in your start menu.\nNote: use the Terminal unisntall to uninstall cleanly" 8 56
else
echo "Skipping WSLtty"
fi
fi
if [[ ${menu_choice} == *"TILIX"* ]]; then
echo "TILIX"
if (confirm --title "Tilix" --yesno "Would you like to install Tilix?" 8 40); then
install_packages tilix libsecret-1-0
message --title "Tilix" --msgbox "Installation complete. You can start it by running $ tilix" 8 56
INSTALLED=true
else
echo "skipping TILIX"
fi
fi
if [[ ${menu_choice} == *"GTERM"* ]]; then
echo "GTERM"
if (confirm --title "GNOME Terminal" --yesno "Would you like to install GNOME Terminal?" 8 40); then
echo "Install dependencies..."
bash "${SetupDir}"/guilib.sh --yes "$@"
install_packages gnome-terminal
message --title "GNOME Terminal" --msgbox "Installation complete. You can start it by running $ gnome-terminal" 8 56
INSTALLED=true
else
echo "skipping GTERM"
fi
fi
if [[ ${menu_choice} == *"XFTERM"* ]]; then
echo "XFTERM"
if (confirm --title "Xfce Terminal" --yesno "Would you like to install Xfce Terminal?" 8 40); then
install_packages xfce4-terminal
message --title "Xfce Terminal" --msgbox "Installation complete. You can start it by running $ xfce4-terminal" 8 56
INSTALLED=true
else
echo "Skipping XFTERM"
fi
fi
if [[ ${menu_choice} == *"TERMINATOR"* ]]; then
echo "TERMINATOR"
if (confirm --title "Terminator" --yesno "Would you like to install Terminator?" 8 40); then
echo "Install dependencies..."
bash "${SetupDir}"/guilib.sh --yes "$@"
install_packages terminator
message --title "Terminator" --msgbox "Installation complete. You can start it by running $ terminator" 8 56
INSTALLED=true
else
echo "Skipping TERMINATOR"
fi
fi
if [[ ${menu_choice} == *"KONSO"* ]]; then
echo "KONSO"
if (confirm --title "Konsole" --yesno "Would you like to install Konsole?" 8 40); then
echo "Install dependencies..."
bash "${SetupDir}"/guilib.sh --yes "$@"
install_packages konsole breeze
sudo tee "/etc/profile.d/kde.sh" <<EOF
#!/bin/bash
export QT_STYLE_OVERRIDE=Breeze
EOF
message --title "Konsole" --msgbox "Installation complete.\n\nYou can start it by running: $ konsole" 10 56
INSTALLED=true
else
echo "Skipping KONSO"
fi
fi
if [[ "${INSTALLED}" == true ]]; then
bash "${SetupDir}"/shortcut.sh --yes "$@"
fi
}
main "$@"
| true
|
ce95d4008b201f810b62037fffee4f4251b731be
|
Shell
|
housel/catkin
|
/bin/catkin_util.sh
|
UTF-8
| 1,685
| 3.4375
| 3
|
[] |
no_license
|
TMPDIR=$PWD/.tmp/$$
# /bin/echo "$0 will be working in temporary dir $TMPDIR"
initializeANSI()
{
esc=""
blackf="${esc}[30m"; redf="${esc}[31m"; greenf="${esc}[32m"
yellowf="${esc}[33m" bluef="${esc}[34m"; purplef="${esc}[35m"
cyanf="${esc}[36m"; whitef="${esc}[37m"
blackb="${esc}[40m"; redb="${esc}[41m"; greenb="${esc}[42m"
yellowb="${esc}[43m" blueb="${esc}[44m"; purpleb="${esc}[45m"
cyanb="${esc}[46m"; whiteb="${esc}[47m"
boldon="${esc}[1m"; boldoff="${esc}[22m"
italicson="${esc}[3m"; italicsoff="${esc}[23m"
ulon="${esc}[4m"; uloff="${esc}[24m"
invon="${esc}[7m"; invoff="${esc}[27m"
reset="${esc}[0m"
}
initializeANSI
get_version_component()
{
REGEX='/.*\((\d+)\.(\d+)\.(\d+)\-(\d+)(\w+)\)/'
NUM=$1
REV=$2
VALUE=$(perl -e "\"$REV\" =~ $REGEX && print \$$NUM")
}
get_upstream_version_component()
{
REGEX='/upstream\/(\d+)\.(\d+)\.(\d+)/'
NUM=$1
REV=$2
VALUE=$(perl -e "\"$REV\" =~ $REGEX && print \$$NUM")
}
bailout()
{
/bin/echo "${redf}${boldon}$*${reset}"
exit 1
}
checking()
{
/bin/echo "${yellowf}$*${reset}"
}
status()
{
/bin/echo "$*"
}
okay()
{
/bin/echo "${greenf}$*${reset}"
}
github_api()
{
VARNAME=$1
shift
CALL=$1
shift
URLS=$(curl -s https://api.github.com/$CALL | $TOP/json-extract $*)
eval $VARNAME="\"$URLS\""
}
_track_all()
{
for x in catkin upstream
do
if git branch | grep $x >/dev/null
then
status "$(basename `pwd`) has branch $x."
elif git branch -r | grep origin/$x >/dev/null
then
git branch --track $x origin/$x
fi
done
}
| true
|
10de4c4675dfc295243a3b0b642d6b77c860c4f8
|
Shell
|
yu-ichiro/.settings
|
/defaults.sh
|
UTF-8
| 5,125
| 2.546875
| 3
|
[] |
no_license
|
#! /bin/zsh
# ================================================================================
# 【For Mac】MacOS.sh : Setup MacOS
# ================================================================================
if [[ "$(uname)" != "Darwin" ]] ; then
echo 'Not macOS!'
exit 1
fi
echo 'Setup MacOS'
# ================================================================================
# System
# ================================================================================
sudo nvram SystemAudioVolume=" " # ブート時のサウンドを無効化する
sudo defaults write /Library/Preferences/com.apple.loginwindow AdminHostInfo HostName # 時計アイコンクリック時に OS やホスト名 IP を表示する
defaults write -g NSAutomaticWindowAnimationsEnabled -bool false # ファイルを開くときのアニメーションを無効にする
defaults write -g NSInitialToolTipDelay -integer 0.5 # ツールチップ表示までのタイムラグをなくす
defaults write -g NSWindowResizeTime 0.1 # ダイアログ表示やウィンドウリサイズ速度を高速化する
defaults write NSGlobalDomain KeyRepeat -int 2 # キーリピートの速度
defaults write NSGlobalDomain InitialKeyRepeat -int 15 # キーリピート開始までのタイミング
defaults write com.apple.LaunchServices LSQuarantine -bool false # 未確認のアプリケーションを実行する際のダイアログを無効にする
# ================================================================================
# Finder
# ================================================================================
chflags nohidden ~/Library # ~/Library ディレクトリを見えるようにする
sudo chflags nohidden /Volumes # /Volumes ディレクトリを見えるようにする
defaults write NSGlobalDomain AppleShowAllExtensions -bool true # 全ての拡張子のファイルを表示する
defaults write NSGlobalDomain com.apple.springing.enabled -bool true # ディレクトリのスプリングロードを有効にする
defaults write com.apple.desktopservices DSDontWriteNetworkStores -bool true # USB やネットワークストレージに .DS_Store ファイルを作成しない
defaults write com.apple.desktopservices DSDontWriteUSBStores -bool true
defaults write com.apple.finder _FXShowPosixPathInTitle -bool true # Finder のタイトルバーにフルパスを表示する
defaults write com.apple.finder _FXSortFoldersFirst -bool true # 名前で並べ替えを選択時にディレクトリを前に置くようにする
defaults write com.apple.finder AppleShowAllFiles YES # 不可視ファイルを表示する
defaults write com.apple.finder FXDefaultSearchScope -string "SCcf" # 検索時にデフォルトでカレントディレクトリを検索する
defaults write com.apple.finder FXEnableExtensionChangeWarning -bool false # 拡張子変更時の警告を無効化する
defaults write com.apple.finder QLEnableTextSelection -bool true # クイックルックでテキストを選択可能にする
defaults write com.apple.Finder QuitMenuItem -bool true # Finder を終了させる項目を追加する
defaults write com.apple.finder ShowStatusBar -bool true # ステータスバーを表示する
defaults write com.apple.finder WarnOnEmptyTrash -bool false # ゴミ箱を空にする前の警告を無効化する
# ================================================================================
# Safari
# ================================================================================
defaults write NSGlobalDomain WebKitDeveloperExtras -bool true # Safari のコンテキストメニューに Web インスペクタを追加する
defaults write com.apple.Safari com.apple.Safari.ContentPageGroupIdentifier.WebKit2DeveloperExtrasEnabled -bool true # Safari の開発・デバッグメニューを有効にする
defaults write com.apple.Safari IncludeDevelopMenu -bool true # Safari の開発・デバッグメニューを有効にする
defaults write com.apple.Safari IncludeInternalDebugMenu -bool true # Safari の開発・デバッグメニューを有効にする
defaults write com.apple.Safari ShowFullURLInSmartSearchField -bool true # アドレスバーに完全な URL を表示する
defaults write com.apple.Safari ShowStatusBar -bool true # ステータスバーを表示する
defaults write com.apple.Safari WebKitDeveloperExtrasEnabledPreferenceKey -bool true # Safari の開発・デバッグメニューを有効にする
# ================================================================================
# Others
# ================================================================================
defaults write com.apple.screencapture disable-shadow -bool true # スクリーンキャプチャの影をなくす
defaults write com.apple.screencapture type -string "png" # スクリーンショットの保存形式を PNG にする
defaults write com.apple.terminal StringEncodings -array 4 # UTF-8 のみを使用する
echo 'Finished'
| true
|
63dd084d8e682402e87cb9b81d561b3fd870d4d4
|
Shell
|
s6088/Local-Search
|
/plse/all.sh
|
UTF-8
| 394
| 2.625
| 3
|
[] |
no_license
|
make
#g++ qwh.cpp -o a
#grid size
for i in 70
do
#fixed percent
for j in 70
do
truncate -s 0 o.txt
printf "$i sq grid with $j percent fixed \n"
for eachfile in in/PLSE-$i-$j-0/*
do
./a $eachfile $i >> o.txt
done
echo -n "avrg time : "
awk '{ total1 += $2 } END { print total1/100 }' o.txt
echo -n "avrg vio : "
awk '{ total2 += $1 } END { print total2/100 }' o.txt
done
done
| true
|
f8dbcfa034362e2df49719ea2efb38287bd27483
|
Shell
|
qq944463782/InjectDylib
|
/resign_script.sh
|
UTF-8
| 1,328
| 2.59375
| 3
|
[] |
no_license
|
rm -rf Payload
rm -rf SampleNOF-Signed.ipa
unzip -q SampleNOF.ipa
rm -rf __MACOSX
rm -rf Payload/SwiftSupport
find . -name '.DS_Store' -type f -delete
#read -p "enter to continue"
rm -rf Payload/SampleNOF.app/_CodeSignature
#read -p "enter to continue"
cp embedded.mobileprovision Payload/SampleNOF.app/embedded.mobileprovision
cp ./SwiftSupport/iphoneos/**.dylib Payload/SampleNOF.app/Frameworks/
cp -a ./SwiftSupport/iphoneos/LocationSpoofing.framework Payload/SampleNOF.app/Frameworks/
../template/optool install -c load -p "@executable_path/Frameworks/libLocationFaker.dylib" -t "./Payload/SampleNOF.app/SampleNOF" >& /dev/null
read -p "enter to continue"
codesign -f -s "iPhone Distribution: ENETS PTE. LTD. (2YU75YNV6V)" Payload/SampleNOF.app/Frameworks/*
rm -rf Payload/SampleNOF.app/libswiftRemoteMirror.dylib
codesign -d -vv --entitlements entitlements.plist Payload/SampleNOF.app/SampleNOF
#read -p "enter to continue"
#cp ../template/UAT/entitlements.plist entitlements.plist
codesign -vfs "iPhone Distribution: ENETS PTE. LTD. (2YU75YNV6V)" --entitlements entitlements.plist Payload/SampleNOF.app
find . -name '.DS_Store' -type f -delete
rm entitlements.plist
zip -r --symlinks SampleNOF-Signed.ipa * -x *.sh* -x *.mobileprovision* -x *.ipa*
#Deploy to device
ios-deploy --bundle ./SampleNOF-Signed.ipa
| true
|
d298ac4bfa8df722fa3191806819af20e19ed47a
|
Shell
|
orcasound/orca-noise-analysis
|
/setup-data-dirs.sh
|
UTF-8
| 443
| 2.578125
| 3
|
[
"MIT"
] |
permissive
|
# setup-data-dirs.sh
#
# Set up data directory structure in the local root directory
# (explicitly not in the repository, to avoid hitting the 100MB Github limit)
# for temporary storage of audio and ais data during processing
#
# Should eventually add some logic to ensure there isn't any data/strucutre that gets overwritten!
mkdir ~/shipnoise-data
mkdir ~/shipnoise-data/audio
mkdir ~/shipnoise-data/audio/test
mkdir ~/shipnoise-data/ais
| true
|
48bf680d2ca3c604216b009b089b7abf25372d16
|
Shell
|
Ste74/manjaro-tools
|
/lib/util-iso-image.sh
|
UTF-8
| 10,887
| 3.703125
| 4
|
[] |
no_license
|
#!/bin/bash
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
copy_overlay(){
if [[ -e $1 ]];then
msg2 "Copying [%s] ..." "${1##*/}"
if [[ -L $1 ]];then
cp -a --no-preserve=ownership $1/* $2
else
cp -LR $1/* $2
fi
fi
}
track_img() {
info "mount: [%s]" "$2"
mount "$@" && IMG_ACTIVE_MOUNTS=("$2" "${IMG_ACTIVE_MOUNTS[@]}")
}
mount_img() {
IMG_ACTIVE_MOUNTS=()
mkdir -p "$2"
track_img "$1" "$2"
}
umount_img() {
if [[ -n ${IMG_ACTIVE_MOUNTS[@]} ]];then
info "umount: [%s]" "${IMG_ACTIVE_MOUNTS[@]}"
umount "${IMG_ACTIVE_MOUNTS[@]}"
unset IMG_ACTIVE_MOUNTS
rm -r "$1"
fi
}
add_svc_rc(){
if [[ -f $1/etc/init.d/$2 ]];then
msg2 "Setting %s ..." "$2"
chroot $1 rc-update add $2 default &>/dev/null
fi
}
add_svc_sd(){
if [[ -f $1/etc/systemd/system/$2.service ]] || \
[[ -f $1/usr/lib/systemd/system/$2.service ]];then
msg2 "Setting %s ..." "$2"
chroot $1 systemctl enable $2 &>/dev/null
fi
}
set_xdm(){
if [[ -f $1/etc/conf.d/xdm ]];then
local conf='DISPLAYMANAGER="'${displaymanager}'"'
sed -i -e "s|^.*DISPLAYMANAGER=.*|${conf}|" $1/etc/conf.d/xdm
fi
}
configure_mhwd_drivers(){
local path=$1${mhwd_repo}/ \
drv_path=$1/var/lib/mhwd/db/pci/graphic_drivers
info "Configuring mwwd db ..."
if [ -z "$(ls $path | grep catalyst-utils 2> /dev/null)" ]; then
msg2 "Disabling Catalyst driver"
mkdir -p $drv_path/catalyst/
touch $drv_path/catalyst/MHWDCONFIG
fi
if [ -z "$(ls $path | grep nvidia-utils 2> /dev/null)" ]; then
msg2 "Disabling Nvidia driver"
mkdir -p $drv_path/nvidia/
touch $drv_path/nvidia/MHWDCONFIG
msg2 "Disabling Nvidia Bumblebee driver"
mkdir -p $drv_path/hybrid-intel-nvidia-bumblebee/
touch $drv_path/hybrid-intel-nvidia-bumblebee/MHWDCONFIG
fi
if [ -z "$(ls $path | grep nvidia-304xx-utils 2> /dev/null)" ]; then
msg2 "Disabling Nvidia 304xx driver"
mkdir -p $drv_path/nvidia-304xx/
touch $drv_path/nvidia-304xx/MHWDCONFIG
fi
if [ -z "$(ls $path | grep nvidia-340xx-utils 2> /dev/null)" ]; then
msg2 "Disabling Nvidia 340xx driver"
mkdir -p $drv_path/nvidia-340xx/
touch $drv_path/nvidia-340xx/MHWDCONFIG
fi
if [ -z "$(ls $path | grep xf86-video-amdgpu 2> /dev/null)" ]; then
msg2 "Disabling AMD gpu driver"
mkdir -p $drv_path/xf86-video-amdgpu/
touch $drv_path/xf86-video-amdgpu/MHWDCONFIG
fi
}
configure_lsb(){
if [ -e $1/etc/lsb-release ] ; then
msg2 "Configuring lsb-release"
sed -i -e "s/^.*DISTRIB_RELEASE.*/DISTRIB_RELEASE=${dist_release}/" $1/etc/lsb-release
sed -i -e "s/^.*DISTRIB_CODENAME.*/DISTRIB_CODENAME=${dist_codename}/" $1/etc/lsb-release
fi
}
configure_mhwd(){
if [[ ${target_arch} == "x86_64" ]];then
if ! ${multilib};then
msg2 "Disable mhwd lib32 support"
echo 'MHWD64_IS_LIB32="false"' > $1/etc/mhwd-x86_64.conf
fi
fi
}
configure_logind(){
msg2 "Configuring logind ..."
local conf=$1/etc/systemd/logind.conf
sed -i 's/#\(HandleSuspendKey=\)suspend/\1ignore/' "$conf"
sed -i 's/#\(HandleLidSwitch=\)suspend/\1ignore/' "$conf"
sed -i 's/#\(HandleHibernateKey=\)hibernate/\1ignore/' "$conf"
}
configure_journald(){
msg2 "Configuring journald ..."
local conf=$1/etc/systemd/journald.conf
sed -i 's/#\(Storage=\)auto/\1volatile/' "$conf"
}
configure_services(){
info "Configuring [%s]" "${initsys}"
case ${initsys} in
'openrc')
for svc in ${enable_openrc[@]}; do
[[ $svc == "xdm" ]] && set_xdm "$1"
add_svc_rc "$1" "$svc"
done
for svc in ${enable_openrc_live[@]}; do
add_svc_rc "$1" "$svc"
done
;;
'systemd')
for svc in ${enable_systemd[@]}; do
add_svc_sd "$1" "$svc"
done
for svc in ${enable_systemd_live[@]}; do
add_svc_sd "$1" "$svc"
done
;;
esac
info "Done configuring [%s]" "${initsys}"
}
write_live_session_conf(){
local path=$1${SYSCONFDIR}
[[ ! -d $path ]] && mkdir -p $path
local conf=$path/live.conf
msg2 "Writing %s" "${conf##*/}"
echo '# live session configuration' > ${conf}
echo '' >> ${conf}
echo '# autologin' >> ${conf}
echo "autologin=${autologin}" >> ${conf}
echo '' >> ${conf}
echo '# login shell' >> ${conf}
echo "login_shell=${login_shell}" >> ${conf}
echo '' >> ${conf}
echo '# live username' >> ${conf}
echo "username=${username}" >> ${conf}
echo '' >> ${conf}
echo '# live password' >> ${conf}
echo "password=${password}" >> ${conf}
echo '' >> ${conf}
echo '# live group membership' >> ${conf}
echo "addgroups='${addgroups}'" >> ${conf}
if [[ -n ${smb_workgroup} ]];then
echo '' >> ${conf}
echo '# samba workgroup' >> ${conf}
echo "smb_workgroup=${smb_workgroup}" >> ${conf}
fi
}
configure_hosts(){
sed -e "s|localhost.localdomain|localhost.localdomain ${hostname}|" -i $1/etc/hosts
}
configure_system(){
case ${initsys} in
'systemd')
configure_journald "$1"
configure_logind "$1"
# Prevent some services to be started in the livecd
echo 'File created by manjaro-tools. See systemd-update-done.service(8).' \
| tee "${path}/etc/.updated" >"${path}/var/.updated"
msg2 "Disable systemd-gpt-auto-generator"
ln -sf /dev/null "${path}/usr/lib/systemd/system-generators/systemd-gpt-auto-generator"
echo ${hostname} > $1/etc/hostname
;;
'openrc')
local hn='hostname="'${hostname}'"'
sed -i -e "s|^.*hostname=.*|${hn}|" $1/etc/conf.d/hostname
;;
esac
}
configure_thus(){
msg2 "Configuring Thus ..."
source "$1/etc/mkinitcpio.d/${kernel}.preset"
local conf="$1/etc/thus.conf"
echo "[distribution]" > "$conf"
echo "DISTRIBUTION_NAME = \"${dist_name} Linux\"" >> "$conf"
echo "DISTRIBUTION_VERSION = \"${dist_release}\"" >> "$conf"
echo "SHORT_NAME = \"${dist_name}\"" >> "$conf"
echo "[install]" >> "$conf"
echo "LIVE_MEDIA_SOURCE = \"/bootmnt/${iso_name}/${target_arch}/root-image.sfs\"" >> "$conf"
echo "LIVE_MEDIA_DESKTOP = \"/bootmnt/${iso_name}/${target_arch}/desktop-image.sfs\"" >> "$conf"
echo "LIVE_MEDIA_TYPE = \"squashfs\"" >> "$conf"
echo "LIVE_USER_NAME = \"${username}\"" >> "$conf"
echo "KERNEL = \"${kernel}\"" >> "$conf"
echo "VMLINUZ = \"$(echo ${ALL_kver} | sed s'|/boot/||')\"" >> "$conf"
echo "INITRAMFS = \"$(echo ${default_image} | sed s'|/boot/||')\"" >> "$conf"
echo "FALLBACK = \"$(echo ${fallback_image} | sed s'|/boot/||')\"" >> "$conf"
if [[ -f $1/usr/share/applications/thus.desktop && -f $1/usr/bin/kdesu ]];then
sed -i -e 's|sudo|kdesu|g' $1/usr/share/applications/thus.desktop
fi
}
configure_live_image(){
msg "Configuring [livefs]"
configure_hosts "$1"
configure_mhwd "$1"
configure_system "$1"
configure_services "$1"
configure_calamares "$1"
[[ ${edition} == "sonar" ]] && configure_thus "$1"
write_live_session_conf "$1"
msg "Done configuring [livefs]"
}
make_repo(){
repo-add $1${mhwd_repo}/mhwd.db.tar.gz $1${mhwd_repo}/*pkg*z
}
copy_from_cache(){
local list="${tmp_dir}"/mhwd-cache.list
chroot-run \
-r "${mountargs_ro}" \
-w "${mountargs_rw}" \
-B "${build_mirror}/${target_branch}" \
"$1" \
pacman -v -Syw $2 --noconfirm || return 1
chroot-run \
-r "${mountargs_ro}" \
-w "${mountargs_rw}" \
-B "${build_mirror}/${target_branch}" \
"$1" \
pacman -v -Sp $2 --noconfirm > "$list"
sed -ni '/.pkg.tar.xz/p' "$list"
sed -i "s/.*\///" "$list"
msg2 "Copying mhwd package cache ..."
rsync -v --files-from="$list" /var/cache/pacman/pkg "$1${mhwd_repo}"
}
chroot_create(){
[[ "${1##*/}" == "rootfs" ]] && local flag="-L"
setarch "${target_arch}" \
mkchroot ${mkchroot_args[*]} ${flag} $@
}
chroot_clean(){
msg "Cleaning up ..."
for image in "$1"/*fs; do
[[ -d ${image} ]] || continue
local name=${image##*/}
if [[ $name != "mhwdfs" ]];then
msg2 "Deleting chroot [%s] ..." "$name"
lock 9 "${image}.lock" "Locking chroot '${image}'"
if [[ "$(stat -f -c %T "${image}")" == btrfs ]]; then
{ type -P btrfs && btrfs subvolume delete "${image}"; } #&> /dev/null
fi
rm -rf --one-file-system "${image}"
fi
done
exec 9>&-
rm -rf --one-file-system "$1"
msg2 "Deleting isoroot [%s] ..." "${2##*/}"
rm -rf --one-file-system "$2"
}
clean_up_image(){
msg2 "Cleaning [%s]" "${1##*/}"
local path
if [[ ${1##*/} == 'mhwdfs' ]];then
path=$1/var
if [[ -d $path ]];then
find "$path" -mindepth 0 -delete &> /dev/null
fi
path=$1/etc
if [[ -d $path ]];then
find "$path" -mindepth 0 -delete &> /dev/null
fi
else
[[ -f "$1/etc/locale.gen.bak" ]] && mv "$1/etc/locale.gen.bak" "$1/etc/locale.gen"
[[ -f "$1/etc/locale.conf.bak" ]] && mv "$1/etc/locale.conf.bak" "$1/etc/locale.conf"
path=$1/boot
if [[ -d "$path" ]]; then
find "$path" -name 'initramfs*.img' -delete &> /dev/null
fi
path=$1/var/lib/pacman/sync
if [[ -d $path ]];then
find "$path" -type f -delete &> /dev/null
fi
path=$1/var/cache/pacman/pkg
if [[ -d $path ]]; then
find "$path" -type f -delete &> /dev/null
fi
path=$1/var/log
if [[ -d $path ]]; then
find "$path" -type f -delete &> /dev/null
fi
path=$1/var/tmp
if [[ -d $path ]];then
find "$path" -mindepth 1 -delete &> /dev/null
fi
path=$1/tmp
if [[ -d $path ]];then
find "$path" -mindepth 1 -delete &> /dev/null
fi
fi
find "$1" -name *.pacnew -name *.pacsave -name *.pacorig -delete
file=$1/boot/grub/grub.cfg
if [[ -f "$file" ]]; then
rm $file
fi
}
| true
|
4a866107a5f29782a5a5e8e378031571e432e246
|
Shell
|
lpf7551321/script_shell
|
/quota_check_mini.sh
|
UTF-8
| 1,278
| 3.5
| 4
|
[] |
no_license
|
#!/bin/sh
cd
#used=`df -h . | awk '{print $5}' | sed -n '2p' | sed 's/%//g'`
exit_code=1
# status
minFS=""
minSize=""
minUsed=""
minAvail=""
minUsage=100
minMounted=""
for disk in `df -h | awk '{print $6}' | awk '/mnt/'|sed '1i /'`
do
#echo '***********'
cd $disk
usage=`df -h . | awk '{print $5}' | sed -n '2p' | sed 's/%//g'`
if [ $usage -lt $minUsage ]; then
minUsage=$usage
minFS=`df -h . | awk '{print $1}' | sed -n '2p'`
minSize=`df -h . | awk '{print $2}' | sed -n '2p'`
minUsed=`df -h . | awk '{print $3}' | sed -n '2p'`
minAvail=`df -h . | awk '{print $4}' | sed -n '2p'`
minMounted=$disk
fi
done
#echo "Max Usage: $maxUsage"
if [ $minUsage -lt 85 ]; then
echo ">>> The least used DISK $minFS on $minMounted is $minUsage% used (Size:$minSize, Used:$minUsed, Avail:$minAvail)
>>> safe regression "
exit_code=0;
elif [ $miniUsage -lt 95 ];then
echo ">>>The least used DISK $minFS on $minMounted is $minUsage% used (Size:$minSize, Used:$minUsed, Avail:$minAvail)
>>> WARNING! could be overload "
exit_code=0;
else
echo ">>> The least used DISK $minFS on $minMounted is $minUsage% used (Size:$minSize, Used:$minUsed, Avail:$minAvail)
>>> overload range."
fi
echo "------- Full Usage Report -------"
df -h
exit $exit_code
| true
|
9a685411cc2c8f7cb00dc8acedd6425273ce1d67
|
Shell
|
caguerra/Burkardt-Fortran-90
|
/f_src/wave_mpi/wave_mpi.sh
|
UTF-8
| 252
| 2.703125
| 3
|
[] |
no_license
|
#! /bin/bash
#
mpifort -c -Wall wave_mpi.f90
if [ $? -ne 0 ]; then
echo "Compile error."
exit
fi
#
mpifort wave_mpi.o
if [ $? -ne 0 ]; then
echo "Load error."
exit
fi
rm wave_mpi.o
mv a.out $HOME/bin/wave_mpi
#
echo "Normal end of execution."
| true
|
0bc9f812ef8fa4687ce41fa80bb1b14a8e524820
|
Shell
|
jacob975/deep_learning
|
/get_2mass_GCQE.sh
|
UTF-8
| 990
| 3.3125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#
# Abstract:
# This is a program for retrieve data from the GCQE 2MASS table.
# Usage:
# get_2mass_GCQE.sh [input wise table]
# Output:
# 1. The SEDs of sources.
# 2. The coordinates of sources.
# 3. The Quality label of sources.
# Editor:
# Jacob975
#
# ##################################
# # Python3 #
# # This code is made in python3 #
# ##################################
#
# 20190527
# ####################################
# update log
# 20190527 version alpha 1
# The code works
#-------------------
if [ "$#" -ne 1 ]; then
echo "Illegal number of parameters"
echo "Usage: ${0##*/} [file name]"
exit 1
fi
awk '{print $2}' ${1} > ${1::-4}_dist.dat
awk '{print $12" " $16" " $20" " \
$13" " $17" " $21}' ${1} > ${1::-4}_mag_sed.dat
awk '{print $24}' ${1} > ${1::-4}_Q.dat
awk '{print $4" "$5 }' ${1} > ${1::-4}_coord.dat
#awk -F "|" '{print FNR }' ${1} > wise_tracer.dat
exit 0
| true
|
c5bf553f507a33b6043f3ea0acde39ca072343c3
|
Shell
|
Rotuladores/twitsim
|
/mp
|
UTF-8
| 261
| 3.4375
| 3
|
[] |
no_license
|
#!/bin/bash
# Make Push
git add . -n
echo -n "Confirm? [y/n]: "
read CONFIRM
if [ "$CONFIRM" == "y" ]; then
git add .
echo -n "Write commit message and press [ENTER]: "
read MESSAGE
git commit -m "$MESSAGE"
git push -u origin master
else
echo "Abort."
fi
| true
|
1a4f461a96c654ffb78d8cf35bf206ef28044d97
|
Shell
|
lamiru/reactor
|
/deploy/_release
|
UTF-8
| 676
| 2.84375
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
source ./.common
sudo su -p <<HERE
set -e
source ~/.bash_profile
# Python version
pip install --upgrade pip==$PIP_VERSION
# Extract project
mv $PROJECT_DIR/media /home/ec2-user
rm -rf $PROJECT_DIR
mkdir $PROJECT_DIR
tar -xzf /home/ec2-user/$PROJECT_NAME.tar.gz -C $PROJECT_DIR
mv /home/ec2-user/media $PROJECT_DIR
# Copy files
yes | cp -rf $PROJECT_DIR/deploy/nginx.conf /etc/nginx
yes | cp -rf $PROJECT_DIR/deploy/redis.conf /etc
yes | cp -rf $PROJECT_DIR/deploy/uwsgi.ini /etc/uwsgi
yes | cp -rf $PROJECT_DIR/deploy/uwsgi /etc/init.d
yes | cp -rf $PROJECT_DIR/deploy/celeryd.conf /etc/celeryd
yes | cp -rf $PROJECT_DIR/deploy/celeryd /etc/init.d
HERE
| true
|
18cffc7f23d2b221f6422b0dc7be3954b805d87c
|
Shell
|
stnmrshx/bakufu
|
/etc/init.d/bakufu.bash
|
UTF-8
| 1,575
| 3.84375
| 4
|
[
"WTFPL"
] |
permissive
|
#!/bin/bash
# bakufu daemon
# chkconfig: 345 20 80
# description: bakufu daemon
# processname: bakufu
#
#
#
DAEMON_PATH="/usr/local/bakufu"
DAEMON=bakufu
DAEMONOPTS="--verbose http"
NAME=bakufu
DESC="bakufu: MySQL replication management and visualization"
PIDFILE=/var/run/$NAME.pid
SCRIPTNAME=/etc/init.d/$NAME
ulimit -n 16384
[ -f /etc/bakufu_profile ] && . /etc/bakufu_profile
case "$1" in
start)
printf "%-50s" "Starting $NAME..."
cd $DAEMON_PATH
PID=$(./$DAEMON $DAEMONOPTS >> /var/log/${NAME}.log 2>&1 & echo $!)
#echo "Saving PID" $PID " to " $PIDFILE
if [ -z $PID ]; then
printf "%s\n" "Fail"
exit 1
elif [ -z "$(ps axf | awk '{print $1}' | grep ${PID})" ]; then
printf "%s\n" "Fail"
exit 1
else
echo $PID > $PIDFILE
printf "%s\n" "Ok"
fi
;;
status)
printf "%-50s" "Checking $NAME..."
if [ -f $PIDFILE ]; then
PID=$(cat $PIDFILE)
if [ -z "$(ps axf | awk '{print $1}' | grep ${PID})" ]; then
printf "%s\n" "Process dead but pidfile exists"
exit 1
else
echo "Running"
fi
else
printf "%s\n" "Service not running"
exit 1
fi
;;
stop)
printf "%-50s" "Stopping $NAME"
PID=$(cat $PIDFILE)
cd $DAEMON_PATH
if [ -f $PIDFILE ]; then
kill -TERM $PID
printf "%s\n" "Ok"
rm -f $PIDFILE
else
printf "%s\n" "pidfile not found"
exit 1
fi
;;
restart)
$0 stop
$0 start
;;
reload)
PID=$(cat $PIDFILE)
cd $DAEMON_PATH
if [ -f $PIDFILE ]; then
kill -HUP $PID
printf "%s\n" "Ok"
else
printf "%s\n" "pidfile not found"
exit 1
fi
;;
*)
echo "Usage: $0 {status|start|stop|restart|reload}"
exit 1
esac
| true
|
6401a649a94e0ddf46035a682013583498256a5a
|
Shell
|
fcandido/docker-day2
|
/appserver/monitoring/graph/get_cpu_info.sh
|
UTF-8
| 2,567
| 3.8125
| 4
|
[] |
no_license
|
#!/bin/bash
#
# get_cpu_info This shell script gets cpu information from a linux server
# using free.
#
SERVER=`hostname`
MCONNECT="/usr/bin/mcacheconnect"
STAT="/proc/stat"
GREP='/bin/grep'
EXPR=`which expr`
COUNT=0
LOCKFILE="/tmp/get_cpu_info.lock"
if [ -f $LOCKFILE ]; then
echo "Script $0 is already running (lockfile exists)"
exit 1
else
touch $LOCKFILE
for LINE in `vmstat -a 2 5 | tail -1 | awk '{print $1, $2, $9, $10, $11, $12, $13, $14, $15, $16, $17}'`
do
case "$COUNT" in
0)
echo -n "RunTime:$LINE "
$MCONNECT -c set -s $SERVER -k Cpu_RunTime -n $LINE
;;
1)
echo -n "Uninterruptible:$LINE "
$MCONNECT -c set -s $SERVER -k Cpu_Uninterruptible -n $LINE
;;
2)
echo -n "SwapIn:$LINE "
$MCONNECT -c set -s $SERVER -k Cpu_SwapIn -n $LINE
;;
3)
echo -n "SwapOut:$LINE "
$MCONNECT -c set -s $SERVER -k Cpu_SwapOut -n $LINE
;;
4)
echo -n "Interrupts:$LINE "
$MCONNECT -c set -s $SERVER -k Cpu_Interrupts -n $LINE
;;
5)
echo -n "ContextSwitches:$LINE "
$MCONNECT -c set -s $SERVER -k Cpu_ContextSwitches -n $LINE
;;
6)
echo -n "User:$LINE "
$MCONNECT -c set -s $SERVER -k Cpu_User -n $LINE
;;
7)
echo -n "System:$LINE "
$MCONNECT -c set -s $SERVER -k Cpu_System -n $LINE
;;
8)
echo -n "Idle:$LINE "
$MCONNECT -c set -s $SERVER -k Cpu_Idle -n $LINE
;;
9)
echo -n "Waiting_IO:$LINE "
$MCONNECT -c set -s $SERVER -k Cpu_Waiting_io -n $LINE
;;
10)
echo -n "Stollen:$LINE "
$MCONNECT -c set -s $SERVER -k Cpu_Stollen -n $LINE
;;
esac
COUNT=`$EXPR ${COUNT} + 1`
done
PROCESSES=`$GREP -w processes $STAT | awk '{print $2}'`
$MCONNECT -c set -s $SERVER -k Cpu_Processes -n $PROCESSES
PROCS_RUNNING=`$GREP -w procs_running $STAT | awk '{print $2}'`
$MCONNECT -c set -s $SERVER -k Cpu_Procs_Running -n $PROCS_RUNNING
PROCS_BLOCKED=`$GREP -w procs_blocked $STAT | awk '{print $2}'`
$MCONNECT -c set -s $SERVER -k Cpu_Procs_Blocked -n $PROCS_BLOCKED
rm -f $LOCKFILE # Removes LockFile
exit 0
fi
exit 0
| true
|
a797faa429a6296b6283848bcd516f754e6638d9
|
Shell
|
emryan1/eosio-web-ide
|
/setup_hokietoken.sh
|
UTF-8
| 319
| 2.65625
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
echo "=== start of first time setup ==="
# change to script's directory
cd "$(dirname "$0")"
SCRIPTPATH="$( pwd -P )"
echo "=== clone system contracts ==="
git clone https://github.com/EOSIO/eosio.contracts --branch v1.7.0 --single-branch
echo "=== setup hokietok ==="
scripts/init_blockchain.sh
| true
|
9794db6490006e3a0beb4a74a73b00b07bb05c59
|
Shell
|
LiQingLeo/java-cef-build
|
/make-release-version.sh
|
UTF-8
| 522
| 3.453125
| 3
|
[] |
no_license
|
#!/bin/bash
{
# Pattern taken from https://bitbucket.org/chromiumembedded/cef/issues/2596/improve-cef-version-number-format#comment-50679036
export CEF_VERSION=$(perl -n -e '/^\s*set\s*\(CEF_VERSION\s+"((?:\d+\.?){3}\+g\w+\+chromium-(?:\d+\.?){4})"\s*\)/i && print "$1"' "$1/CMakeLists.txt")
echo -e '\n\nChanges'
echo "git log --pretty=format:'%h - %s <%aN>' <after_commit>...<latest_commit>"
if [ -z $CEF_VERSION ]; then
echo "Failed to retrieve cef version"
exit 1
fi
} > /dev/null
echo "$CEF_VERSION"
| true
|
2d4b3630bc82145816af5b4f1173fb22afecb9fb
|
Shell
|
krlmlr-archive/r-snap
|
/install.sh
|
UTF-8
| 520
| 3.359375
| 3
|
[] |
no_license
|
#!/bin/sh
set -e
set -x
APPNAME=r-snap
APPDESC=R
BINPATH=R/bin
log() {
echo $* >> /dev/stderr
}
clone() {
( cd $SNAP_CACHE_DIR/$APPNAME && git pull && git checkout . && git clean -fdx )
}
clone_or_pull() {
if ! clone; then
rm -rf $SNAP_CACHE_DIR/$APPNAME
git clone https://github.com/krlmlr/${APPNAME}.git $SNAP_CACHE_DIR/$APPNAME
fi
}
set_symlinks() {
sudo ln -s -f $SNAP_CACHE_DIR/$APPNAME/$BINPATH/* /usr/local/bin
}
clone_or_pull
set_symlinks
sudo yum install -y gcc-gfortran.x86_64 texinfo
| true
|
2da4349fc85b9138f75bc16fb6a0e8aabf82f538
|
Shell
|
FTeichmann/soundex_metric_search
|
/benchmark.sh
|
UTF-8
| 753
| 3.21875
| 3
|
[] |
no_license
|
#!/bin/bash
###############################################
# Run intensive benchmark tests from here #
###############################################
declare -i lines=10;
declare -i thresholds=0;
threshold=0.0;
threshholds=0;
testData="./labels_en.nt"
testDataUrl="http://downloads.dbpedia.org/3.9/en/labels_en.nt.bz2"
./gradlew downloadTestData -P testData=${testData} -P testDataUrl=${testDataUrl};
while [ $lines -le 1000 ]
do
while [ $thresholds -le 10 ]
do
./gradlew clean;
./gradlew test -P threshold=${threshold} -P lines=${lines} -P testData=${testData}
threshold=`echo "$threshold + 0.1" | bc`
let "thresholds+=1";
done
lines=`echo "$lines * 10" | bc`
threshold=0;
thresholds=0;
done
| true
|
7586fd0190a4911d5b3d5589d61b0dd674de8db5
|
Shell
|
fossabot/kevlar-repo
|
/salt/apply-state.sh
|
UTF-8
| 1,035
| 3.0625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# TBD trap on error
# for debug
# showgrains=1
# echo=echo
# dryrun="test=True"
# loglevel=debug
loglevel=info
saltdir=$(cd "$( dirname "${BASH_SOURCE[0]}" )" &>/dev/null && pwd)
${echo} salt-call \
--config-dir "${saltdir}/config/" \
grains.setval username $(whoami)
${echo} salt-call \
--config-dir "${saltdir}/config/" \
grains.setval homedir $HOME
${echo} salt-call \
--config-dir "${saltdir}/config/" \
grains.setval stateroot ${saltdir}/states
${echo} salt-call \
--config-dir "${saltdir}/config/" \
grains.setval saltenv dev
${echo} salt-call \
--config-dir "${saltdir}/config/" \
--file-root "${saltdir}/states/" \
--log-level $loglevel \
state.highstate ${dryrun}
if [ $showgrains ]; then
salt-call \
--config-dir "${saltdir}/config/" \
--file-root "${saltdir}/states/" \
grains.items
fi
| true
|
f6ecb6473fefc71142dd1ebcffbd9f32faf11fbc
|
Shell
|
akauppi/just-solve
|
/tools/port-is-free.sh
|
UTF-8
| 342
| 3.84375
| 4
|
[] |
no_license
|
#!/bin/bash
#
# Usage:
# <<
# $ tools/port-is-free.sh <port>
# <<
#
# Returns as success, if 'port' is available; with non-zero if taken
#
# Requires:
# - curl
#
if [ $# -eq 0 ]; then
echo "Usage: $0 port"
exit 1
fi
_PORT=$1
curl -o /dev/null --silent --fail http://localhost:$_PORT
RC=$?
if [[ $RC -eq 0 ]]; then
exit 5
fi
| true
|
61229c1b62c24f108cd3122300ce20886ffe5e5c
|
Shell
|
sodeon/dotfiles
|
/ubuntu/bin/hardware/toggle-tv-power
|
UTF-8
| 486
| 3.5
| 4
|
[] |
no_license
|
#!/usr/bin/bash -ue
tv=192.168.0.106
if [ $1 == "on" ]; then
cmd='turn_on'
elif [ $1 == "off" ]; then
cmd='turn_off'
else
exit 1 # Error. Support on/off arguments only
fi
for i in 0 1 2 3 4 5; do
# When triggered by resuming from sleep, network may not be available. Add service polling to alleviate.
if ip -brief addr | grep -q UP; then
python3 -m panasonic_viera $tv $cmd
# python3 -m panasonic_viera $tv $@
exit
fi
sleep 1
done
| true
|
3ecf8c33c627e978c3b906ed3b11e926e356c379
|
Shell
|
fidian/bin
|
/wavtomp3
|
UTF-8
| 411
| 3.53125
| 4
|
[] |
no_license
|
#!/bin/bash
IN="$1"
OUT="$2"
if [ -z "$IN" ]; then
echo "Specify a wav to convert"
exit
fi
if [ -z "$OUT" ]; then
OUT="`echo "$IN"|rev|cut -d '.' -f 2-|rev`.mp3"
fi
lame -Sh --vbr-new -V 2 "${IN}" "${OUT}"
if [ $? -eq 1 ]; then
# ffmpeg: While I don't like it (can't set good VBR), it does convert
# troublesome WAV files very nicely
ffmpeg -y -i "${IN}" -vn -ac 2 -ar 44100 "${OUT}" 2>/dev/null
fi
| true
|
6b234e3ef52cfb5418a81184c3c2945b6102f329
|
Shell
|
zonginator/github-contributors
|
/deploy.sh
|
UTF-8
| 304
| 2.78125
| 3
|
[] |
no_license
|
#!/usr/bin/env sh
if [[ "$DEPLOY_KEY" ]]; then
GIT_SSH_COMMAND='ssh -i $DEPLOY_KEY'
fi
BUILD_NUMBER=${SNAP_PIPELINE_COUNTER:-DEV}
rm -rf deploy
git clone git@github.com:zonginator/zonginator.github.io.git deploy
cp -r dist/* deploy
cd deploy && git commit -am "Deploying build $BUILD_NUMBER" && git push
| true
|
a47bc9527f52f9be72e957575ec20d5b3eb391a0
|
Shell
|
gecos-team/gecosws-installation-disk-generator
|
/config/var/gensys/live-build/gecosv2-14.04/scripts/build/rebuild_mount_squashfs_chroot
|
UTF-8
| 1,350
| 3.734375
| 4
|
[] |
no_license
|
#!/bin/sh
## live-build(7) - System Build Scripts
## Copyright (C) 2006-2013 Daniel Baumann <daniel@debian.org>
##
## This program comes with ABSOLUTELY NO WARRANTY; for details see COPYING.
## This is free software, and you are welcome to redistribute it
## under certain conditions; see COPYING for details.
# Including common functions
[ -e "${LIVE_BUILD}/scripts/build.sh" ] && . "${LIVE_BUILD}/scripts/build.sh" || . /usr/lib/live/build.sh
# Setting static variables
DESCRIPTION="$(Echo 'mount chroot from isos squashfs stage')"
HELP=""
USAGE="${PROGRAM} [--force]"
SQUASHFS_FILE=$(find binary/ -name '*.squashfs')
# Reading configuration files
Read_conffiles config/all config/common config/bootstrap config/chroot config/binary config/source
Set_defaults
#put code to remount here
Echo_message "Mounting squashfs file $1"
set +e
umount chroot
rm -rf chroot
mkdir chroot
umount chroot_ro
rm -rf chroot_ro
mkdir chroot_ro
umount chroot_changes
rm -rf chroot_changes
mkdir chroot_changes
mount -o loop -t squashfs $SQUASHFS_FILE chroot_ro
if [ $? -ne 0 ]
then
Echo_error "Cant mount iso file $SQUASHFS_FILE"
exit 1
fi
mount -t aufs -o dirs=$(pwd)/chroot_changes=rw:$(pwd)/chroot_ro=ro: aufs chroot
if [ $? -ne 0 ]
then
Echo_error "Cant mount aufs for $SQUASHFS_FILE"
exit 1
fi
Create_stagefile .build/bootstrap
Create_stagefile .build/config
| true
|
38fc9212251d778530d7f6264f26028987bf4a96
|
Shell
|
openhdf/hdftoolbox
|
/usr/lib/enigma2/python/Plugins/Extensions/HDF-Toolbox/scripts/all-hbbtv.sh
|
UTF-8
| 2,810
| 3.3125
| 3
|
[] |
no_license
|
#!/bin/sh
line=$(grep -e et4000 -e et8000 -e et10000 /etc/enigma2/boxinformations)
boxtype=$(grep getBoxType /etc/enigma2/boxinformations | cut -d "=" -f 2)
echo Boxtype =$boxtype
if [[ -n "$line" ]]
then
if [ -f /usr/local/NXBrowser/launcher ]; then
echo
echo -n "HbbTV Browser found ... remove from$boxtype"
opkg remove --force-depends enigma2-plugin-extensions-newxtrend-hbbtv > /dev/null 2>&1
echo " ... done"
echo "please reboot your$boxtype now"
sync
df -h | grep /usr
echo
else
echo
echo "HbbTV Browser not found ... install HbbTV for$boxtype"
#freespace=`df -h | grep rootfs | df -h | grep rootfs | cut -c 46-47`
freespace=`df | awk '/rootfs/ {print $4}'`
freespace2=`df | awk '/usr/ {print $4}'`
freeneeded=10000
echo
if [ $freespace -ge $freeneeded ]; then
echo "$freespace kB available on ubi0:rootfs. $freeneeded kB are needed"
echo
echo -n "please wait"
opkg install enigma2-plugin-extensions-newxtrend-hbbtv > /dev/null 2>&1
echo " ... done"
echo "please reboot your$boxtype now"
elif [ $freespace2 -ge $freeneeded ]; then
echo "$freespace2 kB available on /usr. $freeneeded kB are needed"
echo
echo -n "please wait"
opkg install enigma2-plugin-extensions-newxtrend-hbbtv > /dev/null 2>&1
echo " ... done"
echo "please reboot your$boxtype now"
else
echo "You need $freeneeded kB in your Flash available. But it is only $freespace kB free"
fi
sync
echo
df -h | grep /usr
echo
fi
else
if [ -f /usr/local/hbb-browser/launcher ]; then
echo
echo -n "HbbTV Browser found ... remove from$boxtype"
opkg remove --force-depends enigma2-plugin-extensions-hbbtv vuplus-opera-browser-util vuplus-opera-dumpait enigma2-hbbtv-util > /dev/null 2>&1
echo " ... done"
echo "please reboot your$boxtype now"
sync
df -h | grep /usr
echo
else
echo
echo "HbbTV Browser not found ... install HbbTV for$boxtype"
#freespace=`df -h | grep rootfs | df -h | grep rootfs | cut -c 46-47`
freespace=`df | awk '/rootfs/ {print $4}'`
freespace2=`df | awk '/usr/ {print $4}'`
freeneeded=10000
echo
if [ $freespace -ge $freeneeded ]; then
echo "$freespace kB available on ubi0:rootfs. $freeneeded kB are needed"
echo
echo -n "please wait"
opkg install enigma2-plugin-extensions-hbbtv > /dev/null 2>&1
echo " ... done"
echo "please reboot your$boxtype now"
elif [ $freespace2 -ge $freeneeded ]; then
echo "$freespace2 kB available on /usr. $freeneeded kB are needed"
echo
echo -n "please wait"
opkg install enigma2-plugin-extensions-hbbtv > /dev/null 2>&1
echo " ... done"
echo "please reboot your$boxtype now"
else
echo "You need $freeneeded kB in your Flash available. But it is only $freespace kB free"
fi
sync
echo
df -h | grep /usr
echo
fi
fi
exit 0
| true
|
3d0d41a1a22f77006521702a2f80971c74a2ec34
|
Shell
|
emeentag/followermaze-realtime-com-server
|
/run.sh
|
UTF-8
| 588
| 3.03125
| 3
|
[] |
no_license
|
#!/bin/bash
export CONCURRENCY_LEVEL=0 \
LOG_LEVEL="off" \
REGISTRATION_SERVER_PORT=9099 \
EVENT_RECEIVER_SERVER_PORT=9090
PROD="--prod"
JAR_PATH=build/libs/followermaze-server-all-1.0.jar
if [ \( "$1" = "$PROD" \) -a \( "$#" -ne 0 \) ]
then
if [ -f "$JAR_PATH" ]
then
echo "Application jar file is exist."
else
echo "Application jar file is not exist."
echo "Building application jar file."
gradle test
gradle integrationTest
gradle createJar
fi
echo "Running application Jar file."
java -jar "$JAR_PATH"
else
gradle run
fi
| true
|
dc5565c8cb5c1ad688eb66ab92ca14d87ff20095
|
Shell
|
l3nz/toonverter
|
/convert_folder.sh
|
UTF-8
| 141
| 3.171875
| 3
|
[
"MIT"
] |
permissive
|
#! /bin/bash
if [ -z "$1" ]; then
echo "Please enter a folder to convert"
exit 1;
fi
find $1/* -exec ./convert_xvid.sh '{}' \;
| true
|
b7dab901e791a4d7ada69ca14e93eddc064cf7b5
|
Shell
|
cjalmeida/test-batch
|
/21-create-batch-resources.sh
|
UTF-8
| 730
| 3.234375
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
. ./variables.sh
. ./password.sh
echo "loging to batch service"
az batch account login -g $RESOURCE_GROUP -n $BATCH_ACCOUNT
echo "create batch pool. We'll use a JSON file to specify advanced"
echo "container configurations"
envsubst '$POOL_ID $CLIENT_ID $CLIENT_SECRET $ACR_REGISTRY' < ./pool_configuration.json > /tmp/pool.json
az batch pool create --account-name $BATCH_ACCOUNT --json-file /tmp/pool.json
# enable pool autoscaling
# formula=`cat ./autoscale-formula.txt`
# az batch pool autoscale enable --pool-id $POOL_ID --auto-scale-formula "$formula"
# print current pool state
state=`az batch pool show --pool-id $POOL_ID --query allocationState`
echo "Pool '$POOL_ID' is currently in state: $state"
| true
|
a1182a0da8b13aeef129b53e542cf40d6f799b17
|
Shell
|
bluesquall/lcm-syslog
|
/java/run-lcm-spy.sh
|
UTF-8
| 316
| 2.59375
| 3
|
[] |
no_license
|
#!/bin/bash
LPATH=${1:-../process-management-lcmtypes}
JPATH=${2:-/tmp/lcmtypes}
find $JPATH -type f \( -name "*.java" -o -name "*.class" \) -delete
find $LPATH -type f -name "*.lcm" -exec lcm-gen --java --jpath $JPATH {} \;
find $JPATH -type f -name "*.java" -exec javac -cp $JPATH {} \;
CLASSPATH=$JPATH lcm-spy
| true
|
c1a4ab842fa9a4d2318d195836b0d12bbd50f5cc
|
Shell
|
jantomec/BEAM-App
|
/scripts/build_lin_gnu.sh
|
UTF-8
| 381
| 3.640625
| 4
|
[
"MIT"
] |
permissive
|
if [ "$1" = "" ]
then
echo "No filename. Correct usage: bash scripts/build_lin_gnu.sh filename.f90"
exit
fi
file="${1##*/}"
filename="${file%.f90}"
LIST1=""
for file in lib/*.o
do
LIST1=$LIST1" "$file
done
LIST2=""
for file in lapack/lib/*.o
do
LIST2=$LIST2" "$file
done
gfortran -Ilib -Ilapack/lib $1 $LIST2 $LIST1 -o $filename".exe"
echo "Finished building program"
| true
|
7475b4b8e565d43371ee93fae107f2e0f89112d2
|
Shell
|
garage-env/garage-env
|
/install
|
UTF-8
| 312
| 3.53125
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -eu
INSTALL_DIR="$HOME/.garage"
garage_install() {
[ -e "$INSTALL_DIR" ] || mkdir "$INSTALL_DIR"
curl -L https://github.com/juniorz/garage-env/archive/master.tar.gz | tar -C $INSTALL_DIR --strip-components 1 -xzvf -
echo "Add ~/.garage/cli/ to you PATH"
}
garage_install "$@"
| true
|
e48309131c85fd77693e8024871c49ee3aa50ebc
|
Shell
|
vncloudsco/bitnami-docker-ghost
|
/3/debian-10/rootfs/run.sh
|
UTF-8
| 375
| 3.453125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# shellcheck disable=SC1091
. /opt/bitnami/base/functions
. /opt/bitnami/base/helpers
USER=ghost
START_COMMAND="ghost start && ghost log -f"
export NODE_ENV=production
cd /opt/bitnami/ghost || exit 1
# If container is started as `root` user
if [ $EUID -eq 0 ]; then
exec gosu ${USER} bash -c "${START_COMMAND}"
else
exec bash -c "$START_COMMAND"
fi
| true
|
2a040a3cd21979435a09895c19859f5233c561d7
|
Shell
|
netarchivesuite/netsearch
|
/netarchive-arctika/src/test/resources/ssh_test_script.sh
|
UTF-8
| 199
| 2.84375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
#
# Dummy script that ignores all processing and reports that all jobs succeds
#
# Input: solr warc*
echo "Solr: $1"
shift # Ignore Solr
for W in "$@"; do
echo "0 $W"
done
| true
|
9215ed89b5b46d63c98235b26b080f10af520815
|
Shell
|
veip007/hj
|
/hj.sh
|
UTF-8
| 7,945
| 2.828125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
PATH=/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin:/usr/local/sbin:~/bin
export PATH
sh_ver="2.1.3"
#0升级脚本
Update_Shell(){
sh_new_ver=$(wget --no-check-certificate -qO- -t1 -T3 "https://raw.githubusercontent.com/veip007/hj/master/hj.sh"|grep 'sh_ver="'|awk -F "=" '{print $NF}'|sed 's/\"//g'|head -1) && sh_new_type="github"
[[ -z ${sh_new_ver} ]] && echo -e "${Error} 无法链接到 Github !" && exit 0
wget -N --no-check-certificate "https://raw.githubusercontent.com/veip007/hj/master/hj.sh" && chmod +x hj.sh
echo -e "脚本已更新为最新版本[ ${sh_new_ver} ] !(注意:因为更新方式为直接覆盖当前运行的脚本,所以可能下面会提示一些报错,无视即可)" && exit 0
}
#—————————系统类—————————
#1改ls颜色(debian)
ls_color(){
wget https://raw.githubusercontent.com/veip007/hj/master/bashrc && mv /root/bashrc /root/.bashrc
}
#2更改为中国时区(24h制,重启生效)
timezone(){
cp /usr/share/zoneinfo/Asia/Shanghai /etc/localtime && hwclock -w && echo $(curl -sSL "https://github.com/veip007/hj/raw/main/time") >> ~/.bashrc
}
#3安装系统依赖
yl(){
apt update && apt list --upgradable && apt install -y wget && apt install -y curl && apt install -y vim
}
#4 Nginx进程守护
Nginx(){
wget -N --no-check-certificate https://raw.githubusercontent.com/veip007/hj/master/ng.sh && chmod +x ng.sh && crontab -l > conf && echo "*/1 * * * * ./ng.sh" >> conf && crontab conf && rm -f conf
}
#5小鸡性能测试
View_superbench(){
bash <(curl -s -L https://raw.githubusercontent.com/veip007/cesu/master/superbench.sh)
}
#6回程线路测试
View_huicheng(){
wget -N --no-check-certificate https://raw.githubusercontent.com/veip007/huicheng/master/huicheng && chmod +x huicheng
}
#7docker安装
docker(){
wget -qO- get.docker.com | sh
}
#8screen安装
screen(){
apt install screen -y
}
#—————————代理类—————————
#11安装V2ary_233一键
Install_V2ray(){
bash <(curl -s -L https://raw.githubusercontent.com/veip007/v2ray/master/v2.sh)
}
#12 V2八合一脚本
v2_8(){
bash <(curl -s -L https://raw.githubusercontent.com/veip007/v2ray-agent/master/install.sh)
}
#13 xray
xray(){
bash <(curl -sSL "https://raw.githubusercontent.com/veip007/scripts/master/xray.sh")
}
#14安装SSR多用户版
Install_ssr(){
bash <(curl -s -L https://raw.githubusercontent.com/veip007/doubi/master/ssrmu.sh)
}
#15 trojan-go
trojan-go(){
bash <(curl -sSL "https://raw.githubusercontent.com/veip007/hj/main/trojan-go.sh")
}
#16安装Tg专用代理
Tg_socks(){
bash <(curl -s -L https://raw.githubusercontent.com/veip007/mtg-dist/master/install.sh)
}
#17安装Goflyway
Install_goflyway(){
bash <(curl -s -L https://git.io/goflyway.sh && chmod +x goflyway.sh)
}
#18 Hysteria安装
Hysteria(){
bash <(curl -sSL "https://raw.githubusercontent.com/emptysuns/Hi_Hysteria/main/server/install.sh")
}
#19 安装warp
warp(){
bash <(curl -sSL "https://raw.githubusercontent.com/fscarmen/warp/main/menu.sh")
}
#—————————加速类—————————
#31一键开启默认bbr
open_bbr(){
modprobe tcp_bbr && echo "tcp_bbr" | tee --append /etc/modules-load.d/modules.conf && echo "net.core.default_qdisc=fq" | tee --append /etc/sysctl.conf && echo "net.ipv4.tcp_congestion_control=bbr" | tee --append /etc/sysctl.conf && sysctl -p && sysctl net.ipv4.tcp_available_congestion_control && sysctl net.ipv4.tcp_congestion_control && lsmod | grep bbr
}
#32安装BBR 锐速
bbr_ruisu(){
bash <(curl -s -L https://raw.githubusercontent.com/veip007/Linux-NetSpeed/master/tcp.sh)
}
#33谷歌 BBR2 BBRV2
Google_bbr2(){
bash <(curl -s -L https://raw.githubusercontent.com/yeyingorg/bbr2.sh/master/bbr2.sh)
}
#—————————辅助类—————————
#41安装Aria2
Aria2(){
bash <(curl -s -L https://raw.githubusercontent.com/veip007/doubi/master/aria2.sh)
}
#42安装云监控
Install_status(){
bash <(curl -s -L https://raw.githubusercontent.com/veip007/doubi/master/status.sh)
}
#43一键DD包(OD源)
DD_OD(){
bash <(curl -s -L https://raw.githubusercontent.com/veip007/dd/master/dd-od.sh)
}
#44一键DD包(GD源)
DD_GD(){
bash <(curl -s -L https://raw.githubusercontent.com/veip007/dd/master/dd-gd.sh)
}
#45 Netflix解锁检测
netflix(){
bash <(curl -sSL "https://github.com/veip007/Netflix_Unlock_Information/raw/main/netflix.sh")
}
action=$1
if [[ "${action}" == "monitor" ]]; then
crontab_monitor_goflyway
else
echo && echo -e "
+-------------------------------------------------------------+
| 懒人专用 |
| 小鸡一键管理脚本 ${Red_font_prefix}[v${sh_ver}]${Font_color_suffix} |
| 一键在手小鸡无忧 |
| 欢迎提交一键脚本 |
+-------------------------------------------------------------+
${Green_font_prefix} 0.${Font_color_suffix} 升级脚本
——————————————————1.系统类——————————————————
${Green_font_prefix} 1.${Font_color_suffix} 改ls颜色(debian) ${Green_font_prefix} 2.${Font_color_suffix} 更改为中国时区(24h制,重启生效)
${Green_font_prefix} 3.${Font_color_suffix} 安装系统依赖 ${Green_font_prefix} 4.${Font_color_suffix} Nginx进程守护
${Green_font_prefix} 5.${Font_color_suffix} 小鸡性能测试 ${Green_font_prefix} 6.${Font_color_suffix} 回程线路测试:命令:./huicheng 您的IP
${Green_font_prefix} 7.${Font_color_suffix} docker安装 ${Green_font_prefix} 8.${Font_color_suffix} screen安装
——————————————————2.代理类——————————————————
${Green_font_prefix} 11.${Font_color_suffix} 安装V2ary_233一键 ${Green_font_prefix} 12.${Font_color_suffix} V2八合一脚本快捷命令:vasma
${Green_font_prefix} 13.${Font_color_suffix} xray安装 ${Green_font_prefix} 14.${Font_color_suffix} 安装SSR多用户版
${Green_font_prefix} 15.${Font_color_suffix} trojan-go安装 ${Green_font_prefix} 16.${Font_color_suffix} Tg专用代理(Go版)
${Green_font_prefix} 17.${Font_color_suffix} 安装Goflyway ${Green_font_prefix} 18.${Font_color_suffix} Hysteria安装
${Green_font_prefix} 19.${Font_color_suffix} 安装warp
——————————————————3.加速类——————————————————
${Green_font_prefix} 31.${Font_color_suffix} 一键开启默认bbr ${Green_font_prefix} 32.${Font_color_suffix} 加速系列:Bbr系列、锐速
${Green_font_prefix} 33.${Font_color_suffix} 安装谷歌 BBR2 BBRV2
——————————————————4.辅助类——————————————————
${Green_font_prefix} 41.${Font_color_suffix} 安装Aria2 ${Green_font_prefix} 42.${Font_color_suffix} 云监控
${Green_font_prefix} 43.${Font_color_suffix} 傻瓜式一键DD包(OD源) ${Green_font_prefix} 44.${Font_color_suffix} 傻瓜式一键DD包(GD源)
${Green_font_prefix} 45.${Font_color_suffix} Netflix解锁检测
" && echo
fi
echo
read -e -p " 请输入数字 [0-45]:" num
case "$num" in
0)
Update_Shell
;;
1)
ls_color
;;
2)
timezone
;;
3)
yl
;;
4)
Nginx
;;
5)
View_superbench
;;
6)
View_huicheng
;;
7)
docker
;;
8)
screen
;;
11)
Install_V2ray
;;
12)
v2_8
;;
13)
xray
;;
14)
Install_ssr
;;
15)
trojan-go
;;
16)
Tg_socks
;;
17)
Install_goflyway
;;
18)
Hysteria
;;
19)
warp
;;
31)
open_bbr
;;
32)
bbr_ruisu
;;
33)
Google_bbr2
;;
41)
Aria2
;;
42)
Install_status
;;
43)
DD_OD
;;
44)
DD_GD
;;
45)
netflix
;;
*)
echo "请输入正确数字 [0-45]"
;;
esac
| true
|
044c4201067983e03351ea8306e5ec5cd8b587f5
|
Shell
|
HumboldtWirelessLab/brn-testbed
|
/brn-testbed-driver.sh
|
UTF-8
| 3,222
| 3.40625
| 3
|
[] |
no_license
|
#!/bin/sh
PATH=$PATH:/testbedhome/testbed/software/openwrt/backfire-x86/staging_dir/toolchain-i386_gcc-4.1.2_uClibc-0.9.30.1/usr/bin
PATH=$PATH:/testbedhome/testbed/software/openwrt/backfire-mips/staging_dir/toolchain-mipsel_gcc-4.1.2_uClibc-0.9.30.1/usr/bin
PATH=$PATH:/testbedhome/testbed/software/openwrt/backfire-wndr3700/staging_dir/toolchain-mips_r2_gcc-4.1.2_uClibc-0.9.30.1/usr/bin/
if [ "x$CPUS" = "x" ]; then
if [ -f /proc/cpuinfo ]; then
CPUS=`grep -e "^processor" /proc/cpuinfo | wc -l`
else
CPUS=1
fi
fi
dir=$(dirname "$0")
pwd=$(pwd)
SIGN=`echo $dir | cut -b 1`
case "$SIGN" in
"/")
DIR=$dir
ABS=`echo $0 | cut -b 1`
if [ "$ABS" = "/" ]; then
FULLNAME=$0
else
FULLNAME=$DIR/$0
fi
;;
".")
DIR=$pwd/$dir
FULLNAME=$pwd/$0
;;
*)
echo "Error while getting directory"
exit -1
;;
esac
KERNELBASE="/testbedhome/testbed/software/kernel-"
if [ "x$BRN_TOOLS_PATH/helper" = "x" ]; then
echo "Set BRN_TOOLS_PATH or helper is missing"
exit 1
fi
if [ "x$ARCHS" = "x" ]; then
ARCHS="mips mipsel i386"
fi
if [ "x$KERNELDIRS" != "x" ]; then
SETKERNELDIRS=1
else
SETKERNELDIRS=0
fi
for i in $ARCHS; do
which $i-linux-uclibc-gcc > /dev/null
if [ $? -eq 0 ]; then
echo "Found $i-linux-uclibc-gcc"
fi
ARCHALIAS=`cat $FULLNAME | grep -e "^#alias $i " | awk '{print $3}'`
BUILDALIAS=`cat $FULLNAME | grep -e "^#build $i " | awk '{print $3}'`
DRIVER=`cat $FULLNAME | grep -e "^#driver $ARCHALIAS " | awk '{print $3}'`
LINK=`cat $FULLNAME | grep -e "^#link $ARCHALIAS " | awk '{print $3}'`
if [ $SETKERNELDIRS -eq 0 ]; then
KERNELDIRS=`(cd $KERNELBASE$ARCHALIAS; ls -d linux*)`
fi
for k in $KERNELDIRS; do
pure_k_version=`echo $k | sed "s#linux-headers-##g" | sed "s#linux-##g"`
if [ -f $KERNELBASE$ARCHALIAS/$k/.config ]; then
for d in $DRIVER; do
echo "Build $d for $ARCHALIAS $KERNELBASE$ARCHALIAS/$k -> $BRN_TOOLS_PATH/helper/nodes/lib/modules/$ARCHALIAS/$pure_k_version"
echo "Build $d for $ARCHALIAS $KERNELBASE$ARCHALIAS/$k -> $BRN_TOOLS_PATH/helper/nodes/lib/modules/$ARCHALIAS/$pure_k_version" >> build.log
echo "(cd $DIR/../brn-driver; KERNELPATH=$KERNELBASE$ARCHALIAS/$k/ ARCH=$BUILDALIAS COMPILER_PREFIX=$i-linux-uclibc- TARGETDIR=$BRN_TOOLS_PATH/helper/nodes/lib/modules/$ARCHALIAS/$pure_k_version sh ./brn-driver.sh build-modules $d)" >> build.log
(cd $DIR/../brn-driver; KERNELPATH=$KERNELBASE$ARCHALIAS/$k/ ARCH=$BUILDALIAS COMPILER_PREFIX=$i-linux-uclibc- TARGETDIR=$BRN_TOOLS_PATH/helper/nodes/lib/modules/$ARCHALIAS/$pure_k_version sh ./brn-driver.sh build-modules $d)
done
fi
done
if [ -e $BRN_TOOLS_PATH/helper/nodes/lib/modules/$ARCHALIAS/ ]; then
for l in $LINK; do
if [ ! -e $BRN_TOOLS_PATH/helper/nodes/lib/modules/$l ]; then
(cd $BRN_TOOLS_PATH/helper/nodes/lib/modules/; ln -s $ARCHALIAS $l)
fi
done
fi
done
#
# C O N F I G P A R T
#
#alias mips mips-wndr3700
#alias mipsel mips
#alias i386 x86
#build mips mips
#build mipsel mips
#build i386 i386
#link x86 i386
#link x86 i386
#link x86 i486
#link x86 i586
#link x86 i686
#driver x86 madwifi
#driver mips madwifi
#driver mips-wndr3700 ath
| true
|
a2666771f8d3b847157f8593d1ae9fa57349f3d8
|
Shell
|
ECP-WarpX/WarpX
|
/.github/workflows/source/inputsNotTested
|
UTF-8
| 1,003
| 4.09375
| 4
|
[
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause-LBNL"
] |
permissive
|
#!/usr/bin/env bash
# Search input files in Examples/ and verify if all input files are tested
set -eu -o pipefail
ok=0
for file in $(find Examples -type f)
do
# Name of file without path
filename=$(basename $file)
# If file is an input file
if [[ ${filename:0:6 } =~ inputs ]] ||
[[ ${filename:0:12} =~ PICMI_inputs ]]
then
cr=$'$'
file_cr="$file$cr"
# Search file name in test list
string_match=$(grep -m1 "$file_cr" Regression/WarpX-tests.ini || echo "")
# If match is empty, inputs examples is not tested
if [[ -z $string_match ]]
then
echo "$file is not tested!"
ok=1
fi
fi
done
if [ $ok -ne 0 ]
then
echo ""
echo "All files in Examples that start with one of"
echo " - inputs"
echo " - PICMI_inputs"
echo "must have an automated test."
echo "Please add a test in Regression/WarpX-tests.ini"
echo "for all files listed above."
fi
exit $ok
| true
|
58cd325c5bcaf56d2821966ecaf4450b7fca3802
|
Shell
|
pkafma-aon/subnet
|
/build_deb.sh
|
UTF-8
| 1,162
| 3.625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
SCRIPT_BASE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )"
BUILD_DIR=${1%/}
if [ -f "${BUILD_DIR}" ]; then
echo "Cannot build into '${BUILD_DIR}': it is a file."
exit 1
fi
if [ -d "${BUILD_DIR}" ]; then
rm -rfv ${BUILD_DIR}/*
fi
mkdir -pv ${BUILD_DIR}/src/github.com/twitchyliquid64/subnet
cp -rv DEBIAN/ subnet/ vendor/ *.go ${BUILD_DIR}/src/github.com/twitchyliquid64/subnet
export GOPATH="${BUILD_DIR}"
mkdir -pv "${BUILD_DIR}/usr/bin"
go build -v -o "${BUILD_DIR}/usr/bin/subnet" github.com/twitchyliquid64/subnet
rm -rf "${BUILD_DIR}/src"
mkdir -pv "${BUILD_DIR}/DEBIAN"
cp -rv ${SCRIPT_BASE_DIR}/DEBIAN/* "${BUILD_DIR}/DEBIAN"
ARCH=`dpkg --print-architecture`
sed -i "s/ARCH/${ARCH}/g" "${BUILD_DIR}/DEBIAN/control"
cat > ${BUILD_DIR}/usr/bin/subnet-make-certs << "EOF"
#!/bin/bash
set -e
subnet --mode init-server-certs --cert server.certPEM --key server.keyPEM --ca ca.certPEM --ca_key ca.keyPEM && \
echo "" && \
echo "Wrote: server.certPEM, server.keyPEM, ca.certPEM, ca.keyPEM." && \
echo "Keep them safe."
EOF
chmod +x ${BUILD_DIR}/usr/bin/subnet-make-certs
dpkg-deb --build "${BUILD_DIR}" ./
| true
|
d4af7e8b9c7637be79157ea295802b5e9219f732
|
Shell
|
ashok24r/miniproject
|
/component/gerrit_tracker/cleanup.sh
|
UTF-8
| 246
| 2.625
| 3
|
[] |
no_license
|
cleanup_location=$1
echo "Cleaning up requests"
find $cleanup_location -iname "*.txt" -mmin +5 -exec rm -f {} \;
find $cleanup_location -iname "*.json" -mmin +5 -exec rm -f {} \;
find $cleanup_location -iname "*.html" -mmin +5 -exec rm -f {} \;
| true
|
245857ccc5d11421878d7a405382a63678d9808a
|
Shell
|
openstack-yak/dev-multinode
|
/multinode.sh
|
UTF-8
| 5,412
| 2.78125
| 3
|
[] |
no_license
|
#!/bin/bash
source ~ubuntu/counts.sh
GLOBALS_FILE=/etc/kolla/globals.yml
DOCKER_SERVICE=/etc/systemd/system/docker.service
INVENTORY=~ubuntu/kolla/ansible/inventory/multinode
DISK=/dev/vdc
PRIMARY_IP=$(hostname -I | awk '{ print $1 }')
# echo "${PRIMARY_IP} $(hostname)" | sudo tee -a /etc/hosts %>/dev/null
chmod 0600 ~ubuntu/.ssh/id_rsa
sudo apt-get update
# sudo apt-get install -y linux-image-generic-lts-wily
sudo apt-get install -y python-dev libffi-dev gcc libssl-dev ntp python-pip
sudo pip install -U pip
curl -sSL https://get.docker.io | bash
sudo cp /lib/systemd/system/docker.service ${DOCKER_SERVICE}
sudo sed -i 's/process$/process\nMountFlags=shared/' ${DOCKER_SERVICE}
# Prepare docker registry
sudo docker run -d -p 4000:5000 --restart=always --name registry registry:2
echo "DOCKER_OPTS=\"--insecure-registry ${PRIMARY_IP}:4000\"" | sudo tee -a /etc/default/docker %>/dev/null
sudo sed -i 's|Service\]|Service\]\nEnvironmentFile=/etc/default/docker/|' ${DOCKER_SERVICE}
sudo sed -i 's|ExecStart=.*|ExecStart=/usr/bin/dockerd -H fd:// $DOCKER_OPTS|' ${DOCKER_SERVICE}
sudo systemctl daemon-reload
sudo systemctl restart docker
sudo usermod -aG docker ubuntu
sudo pip install -U docker-py ansible
git clone https://git.openstack.org/openstack/kolla
sudo pip install -r kolla/requirements.txt -r kolla/test-requirements.txt
sudo cp -r etc/kolla /etc/
sudo pip install -U python-openstackclient python-neutronclient
sudo modprobe configfs
sudo systemctl start sys-kernel-config.mount
sudo sed -i 's/^#kolla_base_distro.*/kolla_base_distro: "ubuntu"/' $GLOBALS_FILE
sudo sed -i 's/^#kolla_install_type.*/kolla_install_type: "source"/' $GLOBALS_FILE
sudo sed -i 's/^kolla_internal_vip_address.*/kolla_internal_vip_address: "192.168.50.254"/' $GLOBALS_FILE
#sudo sed -i 's/^kolla_external_vip_address.*/kolla_external_vip_address: "172.99.106.249"/' $GLOBALS_FILE
sudo sed -i 's/^#network_interface.*/network_interface: "ens3"/g' $GLOBALS_FILE
sudo sed -i 's/^#neutron_external_interface.*/neutron_external_interface: "ens4"/g' $GLOBALS_FILE
# Enable required services
#sudo sed -i 's/#enable_barbican:.*/enable_barbican: "yes"/' $GLOBALS_FILE
sudo sed -i 's/#enable_cinder:.*/enable_cinder: "yes"/' $GLOBALS_FILE
# Cinder LVM backend
#sudo sed -i 's/#enable_cinder_backend_lvm:.*/enable_cinder_backend_lvm: "yes"/' $GLOBALS_FILE
sudo sed -i 's/#enable_heat:.*/enable_heat: "yes"/' $GLOBALS_FILE
sudo sed -i 's/#enable_horizon:.*/enable_horizon: "yes"/' $GLOBALS_FILE
#sudo sed -i 's/#enable_sahara:.*/enable_sahara: "yes"/' $GLOBALS_FILE
#sudo sed -i 's/#enable_murano:.*/enable_murano: "yes"/' $GLOBALS_FILE
#sudo sed -i 's/#enable_magnum:.*/enable_magnum: "yes"/' $GLOBALS_FILE
#sudo sed -i 's/#enable_manila:.*/enable_manila: "yes"/' $GLOBALS_FILE
#sudo sed -i 's/#enable_manila_backend_generic:.*/enable_manila_backend_generic: "yes"/' $GLOBALS_FILE
#sudo sed -i 's/#enable_neutron_lbaas:.*/enable_neutron_lbaas: "yes"/' $GLOBALS_FILE
sudo sed -i 's/#enable_ceph:.*/enable_ceph: "yes"/' $GLOBALS_FILE
sudo sed -i 's/#enable_ceph_rgw:.*/enable_ceph_rgw: "yes"/' $GLOBALS_FILE
# Ceilometer
sudo sed -i 's/#enable_aodh:.*/enable_aodh: "yes"/' $GLOBALS_FILE
sudo sed -i 's/#enable_ceilometer:.*/enable_ceilometer: "yes"/' $GLOBALS_FILE
# To use Gnocchi as DB in Ceilometer
sudo sed -i 's/#enable_gnocchi:.*/enable_gnocchi: "yes"/' $GLOBALS_FILE
sudo sed -i 's/#ceilometer_database_type:.*/ceilometer_database_type: "gnocchi"/' $GLOBALS_FILE
# To use MongDB as DB in Ceilometer
#sudo sed -i 's/#enable_mongodb:.*/enable_mongodb: "yes"/' $GLOBALS_FILE
sudo mkdir -p /etc/kolla/config
# Reconfigure Manila to use different Flavor ID
#cat <<-EOF | sudo tee /etc/kolla/config/manila-share.conf
#[global]
#service_instance_flavor_id = 2
#EOF
# Reconfigure CEPH to use just 1 drive
#cat <<-EOF | sudo tee /etc/kolla/config/ceph.conf
#[global]
#osd pool default size = 1
#osd pool default min size = 1
#EOF
# Configure inventory
sudo mkdir -p /etc/ansible
echo -e "[defaults]\nhost_key_checking = False" | sudo tee /etc/ansible/ansible.cfg %>/dev/null
sed -i "s|control01|192.168.50.[5:$(( 4 + CONTROLLER_COUNT ))] ansible_become=True|" $INVENTORY
sed -i "s|control0.||g" $INVENTORY
sed -i "s|network01|192.168.50.[5:$(( 4 + CONTROLLER_COUNT ))] ansible_become=True|g" $INVENTORY
sed -i "s|monitoring01|192.168.50.[5:$(( 4 + CONTROLLER_COUNT ))] ansible_become=True|g" $INVENTORY
sed -i "s|compute01|192.168.50.[10:$(( 9 + COMPUTE_COUNT ))] ansible_become=True|g" $INVENTORY
sed -i "s|storage01|192.168.50.[10:$(( 9 + COMPUTE_COUNT ))] ansible_become=True|g" $INVENTORY
# Install python2 required by ansible <2.2.0
ansible -m raw -i ~ubuntu/kolla/ansible/inventory/multinode -a "apt-get install -y python" all
# Configure disk to be used for Ceph
ansible -m shell -i ~ubuntu/kolla/ansible/inventory/multinode -a "parted $DISK -s -- mklabel gpt mkpart KOLLA_CEPH_OSD_BOOTSTRAP 1 -1" storage
# The rest of the commands execute from kolla dir
cd kolla
# Bootstrap servers
tools/kolla-ansible -i $INVENTORY bootstrap-servers
# Change user to Kolla
# sed -i "s|become=True|become=True ansible_user=kolla|g" $INVENTORY
# Build all images in registry
# tools/build.py -b ubuntu -t source
sudo tools/generate_passwords.py
tools/kolla-ansible -i $INVENTORY prechecks
tools/kolla-ansible -i $INVENTORY pull
tools/kolla-ansible -i $INVENTORY deploy
sudo tools/kolla-ansible post-deploy
| true
|
bae1e990433170592bff7896ee825fc3745396c9
|
Shell
|
stayhsfLee/tc-all
|
/bin/webapp/server-stop.sh
|
UTF-8
| 1,236
| 3.9375
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
#if [[ "admin" != $(/usr/bin/whoami) ]]; then
# echo "only executable under admin user, exiting";
# exit 1;
#fi
SCRIPT_NAME=$0;
BIN_DIR=`dirname ${SCRIPT_NAME}`;
MS_HOME=$(cd ${BIN_DIR}/..; pwd);
MAIN_CLASS="com.thenorthw.tc.web.Main"
PID_FILE="${MS_HOME}/.tcserver.pid"
function is_alive()
{
old_pid=`cat ${PID_FILE}`;
pids=`ps aux | grep ${MAIN_CLASS} | grep java | grep -v grep | awk '{print $2}'`;
for pid in ${pids}; do
if [ "${pid}" == "${old_pid}" ]; then
return 1
fi
done
return 0
}
function stop()
{
(is_alive)
if [ $? == 0 ] ; then
return 0
fi
old_pid=`cat ${PID_FILE}`;
echo "discover pid:${PID_FILE}";
kill $1 $old_pid;
(is_alive)
if [ $? == 0 ] ; then
return 0
fi
return 1
}
# 不存在pidFile,表明已经停止
if [ ! -f ${PID_FILE} ]; then
echo "pid file ${PID_FILE} not found";
exit 0;
fi
# 循环3次kill
iter=0
while [[ $iter -lt 3 ]]; do
stop
if [ $? == 0 ] ; then
echo "TeamCoding Server Process Stopped.";
rm -rf $PID_FILE;
exit 0;
fi
sleep 1
(( iter=iter+1 ))
done
echo "use \"kill -9\" to kill"
(stop -9)
sleep 1
| true
|
dfdfbcad541a751b2749a22a1b24c3ab41f121c1
|
Shell
|
anikchowdhury440/EmployeeWageComputation
|
/empWageComputation.sh
|
UTF-8
| 1,009
| 3.71875
| 4
|
[] |
no_license
|
#!/bin/bash -x
# CONSTANTS FOR THE PROGRAM
IS_PART_TIME=1
IS_FULL_TIME=2
MAX_HRS_IN_MONTH=100
EMP_RATE_PER_HR=20
NUM_WORKING_DAYS=20
#VARIABLES
totalEmpHrs=0
totalWorkingDays=0
function getWorkingHours(){
case $1 in
$IS_FULL_TIME)
empHrs=8
;;
$IS_PART_TIME)
empHrs=4
;;
*)
empHrs=0
;;
esac
echo $empHrs
}
function calcDailyWage() {
local workHrs=$1
wage=$(( $workHrs*$EMP_RATE_PER_HR ))
echo $wage
}
while [[ $totalEmpHrs -lt $MAX_HRS_IN_MONTH && $totalWorkingDays -lt $NUM_WORKING_DAYS ]]
do
((totalWorkingDays++))
workHours="$( getWorkingHours $((RANDOM%3)) )"
totalEmpHrs=$(($totalEmpHrs+$workHours))
empDailyWage[$totalWorkingDays]="$( calcDailyWage $workHours )"
done
totalSalary="$( calcDailyWage $totalEmpHrs )"
echo "Daily wage " ${empDailyWage[@]}
echo "All Days " ${!empDailyWage[@]}
| true
|
ba403fc079deb240760fe254b6a6e6e0074b015f
|
Shell
|
strengthen8/python_tools
|
/API状态监控/api_request_of_last_hour_statics.sh
|
UTF-8
| 671
| 2.890625
| 3
|
[] |
no_license
|
#!/usr/bin/bash
host=`/usr/local/zabbix/sbin/zabbix_agentd -t agent.hostname | sed -n 's/.*|\(.*\)]/\1/p'`
demo_log_dir='/data/logs/nginx/demo/'
day=`date +%Y%m%d_`
h=`date +%H`
file_time=`date +%Y%m%d_%H -d '-1 hours'`
last_h=`expr $h - 1`
file_date=${day}'*'${last_h}
demo_file=`find ${demo_log_dir} -name access_${file_time}*.log`
demo_words='/api/recommend/demo'
demo_sum=`grep "$demo_words" ${demo_file}|wc -l`
echo '************************************************'
date +"%Y%m%d %H:%M:%S"
echo $demo_sum
/usr/local/zabbix/bin/zabbix_sender -z 192.168.1.10 -s $host -k demo_api_requests -o $demo_sum
echo '************************************************'
| true
|
1da11a6bd446fdb252bcbfb90a3c6bab662b48ea
|
Shell
|
GNU-Pony/netcfg
|
/src/globals
|
UTF-8
| 3,009
| 4.21875
| 4
|
[] |
no_license
|
#!/bin/bash
# /usr/lib/networks/globals
#
# All +x files in /usr/lib/network/hooks will be sourced when this file is.
# Hook files can override any of the utility functions defined here for custom
# behavior (such as logging error messages to syslog). This lets us keep netcfg
# simple but gives it the flexibility for users to make modular use of it to do
# more complex things
### Globals
PROFILE_DIR="/etc/network.d"
IFACE_DIR="$PROFILE_DIR/interfaces"
SUBR_DIR="/usr/lib/network"
HOOKS_DIR="$SUBR_DIR/hooks"
CONN_DIR="$SUBR_DIR/connections"
STATE_DIR="/run/network"
### Logging/Error reporting
function report_err {
echo "$*"
}
function report_notice {
echo "$*"
}
function report_debug {
checkyesno "$NETCFG_DEBUG" && echo "DEBUG: $*" >&2
}
function report_try {
# This needs -n and a trailing space.
echo -n ":: $* "
REPORT_TRYING=1
}
function report_fail {
if [[ -n "$*" ]]; then
if (( REPORT_TRYING )); then
echo "- $* [fail]"
REPORT_TRYING=
else
echo "$*"
fi
elif (( REPORT_TRYING )); then
echo "[fail]"
REPORT_TRYING=
fi
}
function report_success {
if [[ -n "$*" ]]; then
# This needs -n and a trailing space.
echo -n "- $* "
fi
echo "[done]"
REPORT_TRYING=
}
### For calling scripts only; don't use in library functions
function exit_stderr { echo "$*" >&2; exit 1; }
function exit_err { report_err "$*"; exit 1; }
function exit_fail { report_fail "$*"; exit 1; }
### From FreeBSD's /etc/rc.subr
##
# checkyesno var
# Test $1 variable, and warn if not set to YES or NO.
# Return 0 if it's "yes" (et al), nonzero otherwise.
# To default to yes, do: "checkyesno ${VAR:-yes}".
#
function checkyesno() {
local _value="$1"
#debug "checkyesno: $1 is set to $_value."
case "$_value" in
# "yes", "true", "on", or "1"
[Yy][Ee][Ss]|[Tt][Rr][Uu][Ee]|[Oo][Nn]|1)
return 0
;;
# "no", "false", "off", or "0"
[Nn][Oo]|[Ff][Aa][Ll][Ss][Ee]|[Oo][Ff][Ff]|0)
return 1
;;
*)
#warn "\$${1} is not set properly - see rc.conf(5)."
return 1
;;
esac
}
## Check if variable is a member of an array
# $1: the variable to find
# $2...: the array elements
function inarray() {
local item search="$1"
shift
for item in "$@"; do
if [[ "$item" == "$search" ]]; then
return 0
fi
done
return 1
}
## Waits until a statement succeeds or a timeout occurs
# $1: timeout in seconds
# $2...: condition command
function timeout_wait() {
local timeout="$1"
(( timeout *= 10 ))
shift
while ! eval "$*"; do
(( timeout-- > 0 )) || return 1
sleep 0.1
done
return 0
}
### Load all +x files in $HOOKS_DIR
function load_hooks() {
local hook
for hook in $(find -L "$HOOKS_DIR/" -maxdepth 1 -type f -executable | sort -u); do
source "$hook"
done
}
load_hooks
| true
|
d52cdaf3ae868fd09aab7dbe1182630b94e9be6a
|
Shell
|
WormBase/website-classic
|
/cgi-perl/ontology/browser_lib/launch_ontology_sockets.sh
|
UTF-8
| 1,025
| 3.125
| 3
|
[] |
no_license
|
#!/bin/bash
# This shell script launches socket servers that support
# browsing of the various ontologies at WormBase
# Usage:
# launch_ontolgy_sockets.sh /path/to/obo/dir /path/to/socket/dir WSVersion
BINDIR=/usr/local/wormbase/cgi-perl/ontology/browser_lib
SOCKET=/usr/local/wormbase/sockets
WSVERS=$1
DBDIR=/usr/local/wormbase/databases/${WSVER}/ontology
# GO
sudo -u nobody ${BINDIR}/browser.initd -o ${DBDIR}/gene_ontology.${WSVERS}.obo \
-a ${DBDIR}/${WSVERS}/gene_association.${WSVERS}.wb.ce \
-t go \
-v ${WSVERS} &
# AO
sudo -u nobody ${BINDIR}/browser.initd -o ${DBDIR}/anatomy_ontology.${WSVERS}.obo \
-a ${DBDIR}/${WSVERS}/anatomy_association.${WSVERS}.wb \
-t ao \
-v ${WSVERS}
# PO
sudo -u nobody ${BINDIR}/browser.initd -o ${DBDIR}/phenotype_ontology.${WSVERS}.obo \
-a ${DBDIR}/${WSVERS}/phenotype_association.${WSVERS}.wb \
-t po \
-v ${WSVERS}
| true
|
5ded012badd9a97eacd48e2219bd68bc42dbf6d7
|
Shell
|
Kingsford-Group/lrassemblyanalysis
|
/src/biosample_isoseq.sh
|
UTF-8
| 3,215
| 3.859375
| 4
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
#
# Laura Tung
#
# Perform isoseq full analysis for a dataset corresponding to a BioSample.
# First, create the merged subreads dataset from all the SRA Runs of current BioSample before performing the full analysis.
#
# Usage: biosample_isoseq.sh <BioSample_ID> <Top_dir> <Organism>
#
# <BioSample_ID>: BioSample ID. Under the BioSamples/ directory of the SRA Study directory, each BioSample has a directory named by the BioSample ID.
# The directory for this BioSample ID should have a 'SRA_Runs' file that contains all the SRA Run ID's of this BioSample, and each line is one SRA Run ID.
# <Top_dir>: Top-level directory of SRA Study (all the SRA Runs of this SRA Study are under this directory).
# <Organism>: human or mouse
#
# Run this script under the directory for this BioSample ID.
if [ "$#" != "3" ]; then
echo "Usage: biosample_isoseq.sh <BioSample_ID> <Top_dir> <Organism>"
echo "<BioSample_ID>: BioSample ID. Under the BioSamples/ directory of the SRA Study directory, each BioSample has a directory named by the BioSample ID."
echo " The directory for this BioSample ID should have a 'SRA_Runs' file that contains all the SRA Run ID's of this BioSample, and each line is one SRA Run ID."
echo "<Top_dir>: Top-level directory of SRA Study (all the SRA Runs of this SRA Study are under this directory)"
echo "<Organism>: human or mouse"
echo "Run this script under the directory for this BioSample ID"
exit
fi
run_id=$1
# note: run_id here is the BioSample_ID.
top_dir=$2
organism=$3
curr_dir="$PWD"
# GMAP reference sets (pre-generated) locations for human and mouse
# REPLACE WITH YOUR ACTUAL PATH TO THE REFERENCE DATA (for the following 2 lines)
human_gmap_refset="/mnt/disk27/user/ltung/longreadscallop/data/datasets/PacBio/human/ERX1468898/ERR1397639/GMAP_Ref_GRCh38/GmapReferenceSet_GRCh38"
mouse_gmap_refset="/mnt/disk27/user/ltung/longreadscallop/data/datasets/PacBio/mouse/GMAP_Ref_GRCm38/GmapReferenceSet_GRCm38"
# make soft links to the bam files in all the SRA Runs of current BioSample
filename="SRA_Runs"
while read -r line
do
sra_run_id=$line
ln -f -s $top_dir/$sra_run_id/*.bam* .
done < $filename
# create dataset
if [ -f ${run_id}.subreadset.xml ]
then
rm ${run_id}.subreadset.xml
fi
dataset create --type SubreadSet ${run_id}.subreadset.xml *.subreads.bam
# perform full analysis
analysis_dir=${run_id}_full_analysis
if [ ! -d $analysis_dir ]
then
mkdir $analysis_dir
else
echo "Cleaning the existing full analysis output directory..."
rm -r $analysis_dir
mkdir $analysis_dir
fi
if [ $organism == 'mouse' ]
then
echo "mouse"
pbsmrtpipe pipeline-id pbsmrtpipe.pipelines.sa3_ds_isoseq2_with_genome -e eid_subread:$curr_dir/${run_id}.subreadset.xml eid_gmapref_dataset:$mouse_gmap_refset/gmapreferenceset.xml -o $analysis_dir --local-only --force-chunk-mode > full_analysis.log
else
echo "human"
pbsmrtpipe pipeline-id pbsmrtpipe.pipelines.sa3_ds_isoseq2_with_genome -e eid_subread:$curr_dir/${run_id}.subreadset.xml eid_gmapref_dataset:$human_gmap_refset/gmapreferenceset.xml -o $analysis_dir --local-only --force-chunk-mode > full_analysis.log
fi
| true
|
df11ee11e9c1b648e10f4829c7823ee32d3198ff
|
Shell
|
dkirker/modeswitcher
|
/control/prerm
|
UTF-8
| 1,046
| 2.6875
| 3
|
[] |
no_license
|
#!/bin/sh
# Handle execution as pmPostInstall.script
if [ -z "$IPKG_OFFLINE_ROOT" ]; then
IPKG_OFFLINE_ROOT=/media/cryptofs/apps
mount -o remount,rw /
fi
SRV_ID=org.e.lnx.wee.modeswitcher.srv
SRV_DIR=/media/cryptofs/apps/usr/palm/services/${SRV_ID}
SYS_ID=org.e.lnx.wee.modeswitcher.sys
SYS_DIR=/media/cryptofs/apps/usr/palm/services/${SYS_ID}
rm -f /var/cache/configurator/_media_cryptofs_apps_usr_palm_services_${SRV_ID}_configuration_db_permissions_${SRV_ID}
rm -f /var/palm/event.d/${SRV_ID}
/usr/bin/killall -9 ${SYS_ID} || true
rm -f /var/usr/sbin/${SYS_ID}
rm -f /var/palm/ls2/roles/prv/${SRV_ID}.json
rm -f /var/palm/ls2/roles/pub/${SRV_ID}.json
rm -f /var/palm/ls2/services/prv/${SRV_ID}.service
rm -f /var/palm/ls2/services/pub/${SRV_ID}.service
rm -f /var/palm/ls2/roles/prv/${SYS_ID}.json
rm -f /var/palm/ls2/roles/pub/${SYS_ID}.json
rm -f /var/palm/ls2/services/prv/${SYS_ID}.service
rm -f /var/palm/ls2/services/pub/${SYS_ID}.service
/usr/bin/pkill switcher.srv.js || true
/usr/bin/ls-control scan-services || true
| true
|
581da7a3c2c84e6571695ac459f0f2bb928960ba
|
Shell
|
ossimlabs/o2-pushbutton
|
/openshift/disconnected/containers/bundle-images.sh
|
UTF-8
| 388
| 2.640625
| 3
|
[] |
no_license
|
#!/bin/bash
pushd `dirname ${BASH_SOURCE[0]}` >/dev/null
BUNDLE_IMAGES_SCRIPT_DIR=`pwd -P`
popd >/dev/null
DESTINATION_DIR=$1
mkdir -p ${DESTINATION_DIR}/docker-images
docker save ansible|gzip -c>${DESTINATION_DIR}/docker-images/ansible.tgz
docker save httpd|gzip -c>${DESTINATION_DIR}/docker-images/httpd.tgz
docker save registry|gzip -c>${DESTINATION_DIR}/docker-images/registry.tgz
| true
|
17555ed0fe03383a8b29fcd1da11bb93230a2b87
|
Shell
|
johnchaussard/skeletor
|
/scripts/scan_all_for_prepare.bash
|
UTF-8
| 420
| 3.140625
| 3
|
[] |
no_license
|
#!/bin/bash
if [ ! $# -eq 2 ]
then
echo "usage: " $0 "shape skeleton_directory"
exit 1
fi
List="$(ls $2/*.pgm)"
~/Progs/collapse/bin/div2 $1 4 temp_script
sub $1 temp_script temp_script
inverse $1 temp_inv
for i in ${List[*]};
do
name=`echo $i | sed s/".pgm"//g`
sub temp_script $i temp_res.pgm
add temp_res.pgm temp_inv temp_res.pgm
convert temp_res.pgm $name.eps
done
rm temp_inv temp_res.pgm temp_script
| true
|
c7dcdab11b09189626d0125a1206e7dd99e4c734
|
Shell
|
amazurenko/nexus-scripts
|
/provision.sh
|
UTF-8
| 1,650
| 3.984375
| 4
|
[] |
no_license
|
#!/bin/bash
# A simple example script that publishes a number of scripts to the Nexus Repository Manager
# and executes them.
# fail if anything errors
set -e
# fail if a function call is missing an argument
set -u
while getopts u:p:h: option
do
case "${option}"
in
u) username=${OPTARG};;
p) password=${OPTARG};;
h) host=${OPTARG};;
esac
done
echo $host
# add a script to the repository manager and run it
function addAndRunScript {
name=$1
file=$2
# using grape config that points to local Maven repo and Central Repository , default grape config fails on some downloads although artifacts are in Central
# change the grapeConfig file to point to your repository manager, if you are already running one in your organization
groovy -Dgroovy.grape.report.downloads=true -Dgrape.config=grapeConfig.xml addUpdateScript.groovy -u "$username" -p "$password" -n "$name" -f "$file" -h "$host"
printf "\nPublished $file as $name\n\n"
#curl -v -X POST -u "$username:$password" --header "Content-Type: text/plain" "$host/service/rest/v1/script/$name/run"
#printf "\nSuccessfully executed $name script\n\n\n"
}
printf "Provisioning Integration API Scripts Starting \n\n"
printf "Publishing and executing on $host\n"
addAndRunScript listAssets src/main/groovy/listAssets.groovy
addAndRunScript deleteAssets src/main/groovy/deleteAssets.groovy
addAndRunScript deleteDockerReleasedSnapshots src/main/groovy/deleteDockerReleasedSnapshots.groovy
addAndRunScript listComponents src/main/groovy/listComponents.groovy
addAndRunScript deleteComponents src/main/groovy/deleteComponents.groovy
printf "\nProvisioning Scripts Completed\n\n"
| true
|
d3372c6ad38215534250c8bde688057976980f23
|
Shell
|
extremevn/gradledeputil
|
/tools/git/config.sh
|
UTF-8
| 258
| 2.8125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
#Set branch type name prefix. If value is "feature|bug|hotfix" then branch name must be start with "feature" or "bug" or "hotfix"
# So branch "feature/task_123" is valid but not "task_123" or "task/123"
export BRANCH_TYPE_NAME="feature|bug|hotfix"
| true
|
591660026b5031f6d0913ce1d6d8fa008f5b9c5f
|
Shell
|
xpjiang/physics-recon
|
/simulations/moba_T1_simu.sh
|
UTF-8
| 2,412
| 2.984375
| 3
|
[] |
no_license
|
#!/bin/bash
#
# Copyright 2020. Uecker Lab, University Medical Center Göttingen.
#
# Authors: Xiaoqing Wang and Nick Scholand, 2020
# nick.scholand@med.uni-goettingen.de
# xiaoqing.wang@med.uni-goettingen.de
#
set -e
export PATH=$TOOLBOX_PATH:$PATH
if [ ! -e $TOOLBOX_PATH/bart ] ; then
echo "\$TOOLBOX_PATH is not set correctly!" >&2
exit 1
fi
# generating a numerical phantom using BART
# Simulation parameters
TR=0.0041
DIM=384
SPOKES=1
REP=1020
NC=8
NBR=$(( $DIM / 2 ))
# create trajectory
bart traj -x $DIM -y $SPOKES -t $REP -c -r -G _traj
bart transpose 5 10 {_,}traj
bart scale 0.5 traj _traj1
# create geometry basis functions
bart phantom -s$NC -T -k -b -t _traj1 _basis_geom
# create simulation basis functions
bart signal -F -I -n$REP -r$TR -1 3:3:1 -2 1:1:1 _basis_simu_water
bart signal -F -I -n$REP -r$TR -1 0.2:2.2:10 -2 0.045:0.045:1 _basis_simu_tubes
bart scale 1. _basis_simu_tubes _basis_simu_sdim_tubes
bart join 6 _basis_simu_water _basis_simu_sdim_tubes _basis_simu
# create simulated dataset
bart fmac -s $(bart bitmask 6) _basis_geom _basis_simu _phantom_ksp
bart phantom -x$NBR -T mask
# add noise to the simulated dataset
for (( i=0; i <= 7; i++ )) ; do
bart slice 3 $i _phantom_ksp _phantom_ksp$i
bart noise -n200 _phantom_ksp$i tmp_ksp_$i.coo
done
bart join 3 tmp_ksp_*.coo _phantom_ksp2
rm tmp_ksp_*.coo
#------------------------------------------------------
#------------- Nonlinear model-based reco -------------
#------------------------------------------------------
SPOKES_BIN=20
REP_BIN=$(($REP / $SPOKES_BIN))
bart reshape $(bart bitmask 4 5) $SPOKES_BIN $REP_BIN _phantom_ksp2 _phantom_ksp3
bart transpose 4 2 _phantom_ksp3 phantom_ksp
bart reshape $(bart bitmask 4 5) $SPOKES_BIN $REP_BIN traj _traj1
bart transpose 4 2 _traj1 traj
#scale1=$(($TR * $SPOKES_BIN))
#scale2=$(($TR * $SPOKES_BIN / 2))
scale1=0.082
scale2=0.041
bart index 5 $REP_BIN tmp1.coo
bart scale $scale1 tmp1.coo tmp2.coo
bart ones 6 1 1 1 1 1 $REP_BIN tmp1.coo
bart saxpy $scale2 tmp1.coo tmp2.coo TI
ITER=12
REG=0.05
bart moba -L -l1 -i$ITER -g -C300 -d4 -j$REG -o1.0 -n -R3 -t traj phantom_ksp TI moba_simu_T1 sens
bart resize -c 0 $NBR 1 $NBR moba_simu_T1 moba_simu_T1_${NBR}
bart fmac mask moba_simu_T1_${NBR} moba_simu_T1_masked
bart looklocker -t0. -D0. moba_simu_T1_masked tmp
bart transpose 0 1 tmp T1
rm tmp*.{cfl,hdr} _*.{cfl,hdr} phantom*.{cfl,hdr} sens*.{cfl,hdr} traj*.{cfl,hdr} mask*.{cfl,hdr} TI*.{cfl,hdr}
| true
|
d0976d44ebee911dc64547e9c3029db0ac8a7526
|
Shell
|
ee12lmb/TextureStrength
|
/shell/run_IR.sh
|
UTF-8
| 3,477
| 3.5625
| 4
|
[] |
no_license
|
#!/bin/bash
# Script runs index_repeat from command line
# See relevant documentation in function (/project/souce/dev/...)
# Output filename is always placed in: /project/analysis/outputs/IR
function usage()
{
echo "Usage: run_IR.sh [ infile ] [ step ] [ no. grains ] [ repeat ] [ seed ] [ crystal ] [ index ] [ bin size (md only) ] [ binning type (md only) [ output name ]"
echo "Usage: alternatively, will run interactively if no arguments given"
}
#------------------------------------------------------------------------
# check if running quietly or accepting inputs
if [[ $# -eq 0 ]]
then
echo
printf "Input file:........ "
read infile
printf "Step:.............. "
read step
printf "No. grains:........ "
read n
printf "Repeat:............ "
read repeat
printf "Seed:.............. "
read seed
printf "Crystal............ "
read crystal
printf "Index:............. "
read index
# if we're running m_indexDisc, need to know bin size
case $index in
md|MD) printf "Bin size (deg)..... "
read bin
printf "Binning type....... "
read binning
needBin=1 # we do have bin size
;;
*) needBin=0 # we don't need bin size
;;
esac
printf "Output file name:.. "
read outname
echo "Running function with user inputs..."
echo
else
# check if discrete m index (so we need bin size)
case $7 in
md|MD) [[ $# -ne 10 ]] && usage && exit 1
infile=$1
step=$2
n=$3
repeat=$4
seed=$5
crystal=$6
index=$7
bin=$8
binning=$9
outname=${10}
needBin=1 # we do need bin
;;
# index must be either j or m cont.
*) [[ $# -ne 8 ]] && usage && exit 1
infile=$1
step=$2
n=$3
repeat=$4
seed=$5
crystal=$6
index=$7
outname=$8
needBin=0 # we dont need bin
;;
esac
fi
#------------------------------------------------------------------------
# setup important dirs
#### CHANGE TO RELEVANT DIRECTORIES ####
outdir="/nfs/see-fs-01_teaching/ee12lmb/project/analysis/outputs/IR"
devdir="/nfs/see-fs-01_teaching/ee12lmb/project/source/dev"
outfile=$outdir/$outname
#echo "OUTNAME: $outname"
#echo "OUTPATH: $outfile"
#------------------------------------------------------------------------
# input checks
[[ ! -f $infile ]] && echo "Input file not found!" && usage && exit 1
[[ -f $outfile ]] && echo "Output file already exists!" && usage && exit 1
#------------------------------------------------------------------------
# Run matlab function
##### CHANGE PATH IN ADDPATH COMMAND TO THE TEXTURE STRENGTH PACKAGE LOACTION IN BOTH COMMANDS BELOW #####
if [[ $needBin -eq 0 ]]
then
matlab -nodesktop -nodisplay -nosplash -r "addpath('/nfs/see-fs-01_teaching/ee12lmb/project/source/dev/'); setup_env; index_repeat('$infile',$step,$n,$repeat,$seed,'crystal','$crystal','index','$index','outfile','$outfile','-v'); exit;"
elif [[ $needBin -eq 1 ]]
then
matlab -nodesktop -nodisplay -nosplash -r "addpath('/nfs/see-fs-01_teaching/ee12lmb/project/source/dev/'); setup_env; index_repeat('$infile',$step,$n,$repeat,$seed,'crystal','$crystal','index','$index','bin',$bin,'binning','$binning','outfile','$outfile','-v'); exit;"
fi
exit 0
| true
|
7f2edd5e2a5cc50c28ef90b92605e0a5826ae634
|
Shell
|
pcopfer/ansible-duplicity
|
/templates/backup_dup.sh
|
UTF-8
| 1,093
| 3.859375
| 4
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
# Backup with Duplicity
if [ "`whoami`" != "root" ]; then
echo 'Only root can run "backup"!' >&2
exit 1
fi
#
# read the configuration file, it defines
# * $BACKUP_MOUNT
# * $BACKUP_CREDENTIALS
if [ -z $1 ] || [ $1 == '-h' ];
then
echo "Usage: backup_mount.sh PATH \n PATH is PATH for .backuprc "
exit 2
fi
CONFIG=$1
[ -f $CONFIG ] && . $CONFIG || {
echo "Missing configuration file $CONFIG!" >&2
exit 2
}
#
# run scripts in /root/pre-backup-scripts when directory exists
#
if [ -d "/root/pre-backup-scripts" ]; then
run-parts -v --regex '.sh$' /root/pre-backup-scripts
fi
export PASSPHRASE=$BACKUP_PASSWORD
HN=`hostname`
cat /root/backuppaths/*.conf | while read line
do
echo $line
BN=`basename $line`
{% if duplicity_scheme == "rsync" %}
ssh -n $BACKUP_SSH mkdir -p $BACKUP_PATH$HN/$BN
{% endif %}
duplicity incremental --full-if-older-than 1M $line $BACKUP_CREDENTIALS$HN/$BN
rc=$?
duplicity remove-all-inc-of-but-n-full 2 --force $BACKUP_CREDENTIALS$HN/$BN
duplicity remove-all-but-n-full 4 --force $BACKUP_CREDENTIALS$HN/$BN
done
unset PASSPHRASE
exit $rc
| true
|
f5b223aeddb4a6ecc681379c21cb4a660751fd90
|
Shell
|
seema1611/Shell_Script
|
/Functions/isPrimePalindrome.sh
|
UTF-8
| 1,382
| 4.375
| 4
|
[] |
no_license
|
#!/bin/bash
#Program to findout first number is prime and then palindrome and last palindrome is also a prime
#Check here number is Prime or NOT
function isPrime() {
number=$1
check=1
for (( i=2; i<$number; i++ ))
do
if [ $(($number % $i)) -eq 0 ]
then
check=0
fi
done
echo $check
}
#Check here Number is Palindrome Or Not
function isPalindrome() {
tempVariable=$1
reverse=0
while [ $tempVariable -gt 0 ]
do
remainder=$(($tempVariable % 10))
reverse=$(( $(($reverse*10)) + $remainder ))
tempVariable=$(($tempVariable / 10))
done
echo $reverse
}
#Check here number is Palindrome and OR a Prime both condition
function isPrimePalindrome() {
inputNumber=$1
prime="$(isPrime $inputNumber)"
if [ $prime -eq 1 ] #check here the number is prime or NOT
then
palindrome="$(isPalindrome $inputNumber)"
if [ $inputNumber -eq $palindrome ] #Check here the number is Palindrome or NOT
then
#Here first check number is prime then palindrome or not palindrome means reverse of this number because of that it is PalindromePrime
echo $inputNumber ":is a prime and palindrome both"
else
echo $inputNumber ":is a Prime but NOT Palindrome"
fi #End of second if Loop
else
echo $inputNumber ":is NOT a Prime number"
fi #End of first if Loop
}
echo "Enter the numbers: "
read number
isPrimePalindrome $number
| true
|
c98fea633ab53aa5978fc0ae2cd7927534694817
|
Shell
|
Facj/Update_sw
|
/Tests/reset_for_test.sh
|
UTF-8
| 3,069
| 3.546875
| 4
|
[] |
no_license
|
#!/bin/bash
cd /home/fatima/Raspi_sw/Update_sw/Tests
#----------------------------------------------------------------------------------------------
#
# This script restores the conditions to start the update system testing.
# It restores v 1.0.0 in the repo folder of the both users whose name is received as a parameter and
# deletes every tag in the repository
#
# Parameters: No parameters
#
#-----------------------------------------------------------------------------------------------
USER1=Facj
USER2=Facj2
#Change to repo folder and commit the changes
#cd /home/fatima/Raspi_sw/$USER1/Repo1
#Restore v 1.0.0 files in the repo folder of the first user
cd /home/fatima/Raspi_sw/Update_sw/Tests
cp ../start_files/loop.c /home/fatima/Raspi_sw/$USER1/Repo1/loop.c
cp ../start_files/loop_2.c /home/fatima/Raspi_sw/$USER1/Repo1/loop_2.c
cp ../start_files/loop_d.c /home/fatima/Raspi_sw/$USER1/Repo1/loop_d.c
cp ../start_files/loop_d_2.c /home/fatima/Raspi_sw/$USER1/Repo1/loop_d_2.c
cp ../start_files/Makefile /home/fatima/Raspi_sw/$USER1/Repo1/Makefile
cp ../start_files/dynamic_c.h /home/fatima/Raspi_sw/$USER1/Repo1/dynamic_c.h
cd /home/fatima/Raspi_sw/$USER1/Repo1
rm loop_3.c
touch loop_3.c
rm new_doc.txt
touch new_doc.txt
git add loop.c loop_2.c loop_3.c new_doc.txt
git commit -m "Restart" >/dev/null 2>&1
git push origin master >/dev/null 2>&1
if [ $? -ne 0 ]
then
#echo "User2 will push"
push=false
else
#echo "Pushed"
push=true
fi
#Delete all tags
for t in `git tag`
do
git push origin :$t >/dev/null 2>&1
git tag -d $t >/dev/null 2>&1
done
cd /home/fatima/Raspi_sw/$USER2/Repo1
for t in `git tag`
do
git push origin :$t >/dev/null 2>&1
git tag -d $t >/dev/null 2>&1
done
#Apply same changes to USER2
cd /home/fatima/Raspi_sw/Update_sw/Tests
cp ../start_files/loop.c /home/fatima/Raspi_sw/$USER2/Repo1/loop.c
cp ../start_files/loop_2.c /home/fatima/Raspi_sw/$USER2/Repo1/loop_2.c
cp ../start_files/loop_d.c /home/fatima/Raspi_sw/$USER2/Repo1/loop_d.c
cp ../start_files/loop_d_2.c /home/fatima/Raspi_sw/$USER2/Repo1/loop_d_2.c
cp ../start_files/Makefile /home/fatima/Raspi_sw/$USER2/Repo1/Makefile
cp ../start_files/dynamic_c.h /home/fatima/Raspi_sw/$USER2/Repo1/dynamic_c.h
if $push; then
cd /home/fatima/Raspi_sw/$USER2/Repo1
git fetch origin master >/dev/null 2>&1
git merge FETCH_HEAD --no-edit >/dev/null 2>&1
else
cd /home/fatima/Raspi_sw/$USER2/Repo1
rm loop_3.c
touch loop_3.c
git add loop.c loop_2.c loop_3.c loop_d.c loop_d_2.c Makefile dynamic_c.h
git commit -m "Restart" >/dev/null 2>&1
git push origin master >/dev/null 2>&1
cd /home/fatima/Raspi_sw/$USER1/Repo1
git fetch origin master >/dev/null 2>&1
git merge FETCH_HEAD --no-edit >/dev/null 2>&1
fi
#Check if the reset has finished correctly
cd /home/fatima/Raspi_sw
diff -rq --exclude '.git' Facj/Repo1 Facj2/Repo1 #>/dev/null 2>&1
if [ $? -ne 0 ]
then
echo "Error while reseting."
else
echo "Reset performed correctly."
fi
| true
|
ce18c1fc227a847dc84aee7ec1e34e3d5d0e40e7
|
Shell
|
amaurer/patagonia-image-processing
|
/createCatalog
|
UTF-8
| 1,570
| 3.15625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
cd /Users/andrew/webfiles/patagonia-image-processing
secrets=`cat secrets.txt`
echo "$secrets"
# Remove existing files
echo Remove existing files
rm -rf ./dest/*
rm -rf ./todo/*
# Copy files from remote server to local
echo Mount...
mount_smbfs //$secrets@v33/storage/Dropbox/Patagonia/Catalog ./tmpmnt
declare -a foldersarr=("beds" "sofas" "vinyls" "fabrics" "finishes" "casegoods" "ottomans" "sectionals" "nail-heads" "dining-chairs" "occasional-chairs" "occasional-tables" "custom-creations")
echo Copying files...
for i in "${foldersarr[@]}"
do
echo "$i"
cp -Rf ./tmpmnt/$i ./todo
echo "Done!"
done
# Unmount share
echo Unmount...
umount ./tmpmnt
# change case
find ./todo -depth -type f -exec rename -f 's!/([^/]*/?)$!\L/$1!' {} +
# convert types
echo Convert image types...
find ./todo -name "*.tiff" -exec mogrify -quiet -format jpg {} \;
find ./todo -name "*.tif" -exec mogrify -quiet -format jpg {} \;
find ./todo -name "*.png" -exec mogrify -queit -format jpg {} \;
# Remove any windows files or old tiff
echo Delete old files...
find ./todo -name "*.ini" -type f -delete
find ./todo -name "*.tiff" -type f -delete
find ./todo -name "*.tif" -type f -delete
find ./todo -name "*.png" -type f -delete
# Catalog is in in place, now use Grunt to make responsive images
# grunt responsive_images
for i in "${foldersarr[@]}"
do
echo "$i"
grunt responsive_images --targetdir=$i &
echo "Done!"
done
wait
# Remove remaining files
echo Remove remaining files
rm -rf ./todo/*
open ./dest/
open ../patagonia/public/catalog/
| true
|
bbda2f9b720b225b1bd755f13f7677d86a8943dc
|
Shell
|
halhenke/gist-bin-pub
|
/pdf-metadaten-leeren-exif-pdftk/pdf-metadaten-leeren-exif-pdftk.sh
|
UTF-8
| 652
| 3.5625
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# Remove PDF metadata with pdftk and exiftool
shopt -s nocaseglob
[[ -e ./tmp ]] && { echo "There is already a tmp dir." 1>&2 ; exit 1 ; }
[[ -e ./pdf_new ]] && { echo "There is already a pdf_new dir." 1>&2 ; exit 1 ; }
mkdir -- ./tmp ./pdf_new
cp -- ./*.pdf ./tmp
while IFS= read -d '' -r
do
filename=$(basename "$REPLY")
# remove XMP-Metadata incrementell
exiftool -all= "$REPLY"
# then rewrite PDF
pdftk "$REPLY" dump_data | sed -r -e 's/^(InfoValue:).*/\1/g' | pdftk "$REPLY" update_info - output "./pdf_new/${filename}" 2>/dev/null
done < <(find ./tmp -type f -iname "*.pdf" -print0)
rm -r -- ./tmp
| true
|
9b035e9e6d53c6ba48ef5c68d02c8ae8c2c55289
|
Shell
|
EdwardOst/bash-explore
|
/array/array_scope.sh
|
UTF-8
| 699
| 3.125
| 3
|
[] |
no_license
|
declare -a myarr_outside=( x y )
function myfunc() {
declare -a -g myarr=( value1 value2 )
# implicit local scope
declare -a myarr_local=( value1 value2 )
declare -a myarr_outside=( value1 value2 )
echo "myarr inside: ${myarr[@]}"
echo "myarr_local inside: ${myarr_local[@]}"
echo "myarr_outside inside: ${myarr_outside[@]}"
function mycommand() {
echo "myarr nested: ${myarr[@]}"
echo "myarr_local nested: ${myarr_local[@]}"
echo "myarr_outside nested: ${myarr_outside[@]}"
}
mycommand
}
myfunc
echo "myarr outside: ${myarr[@]}"
echo "myarr_local outside: ${myarr_local[@]}"
echo "myarr_outside outside: ${myarr_outside[@]}"
| true
|
a0271c0e4b7b1f233bd2e965a0c865cce0bf5983
|
Shell
|
Lars139/Archive
|
/CS344/HW4/Homework4/time_bash
|
UTF-8
| 142
| 2.71875
| 3
|
[] |
no_license
|
#!/bin/bash
x=1
nt=10
while [ $x -le 4 ]
do
./sieve -q -t 1000 -n 100 -p $(($nt * $x)) --color=always | less -r
wait
x=$(($x+1))
wait
done
| true
|
7795e755e1072b59b263d24ccc9463969d37dc65
|
Shell
|
shanedenecke/ABC_scan
|
/SLC_scan/SLC_id_scripts/SLC_HMM_Search.sh
|
UTF-8
| 4,531
| 3.625
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
### add help
if [ "$1" == "-h" ]; then
echo "
Welcome ! This shell script is designed to search SLC transporters in non-model arthropods
Arguments:
-database: starting database. Pathw to folder of model species you will use to search target species
-target: path to target species proteome
-out: Name of output folder. Will create a new SLC database in this folder for the target species.
-threads: threads"
exit 0
fi
### add arguments
while [ "$#" -gt 0 ]; do
case "$1" in
-database) DATABASE="$2"; shift 2;;
-target) TARGET="$2"; shift 2;;
-threads) THREADS="$2"; shift 2;;
-out) OUT="$2"; shift 2;;
esac
done
### Set scripts directory
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
SOURCE_DIR="$(dirname "$SCRIPT_DIR")"
#For Debugging
#TARGET=/mnt/disk/shane/Transporter_ID/SLC_test/proteomes/HelZea_unigene.faa
#DATABASE=/home/sdenecke/Applications/Custom_Applications/SLC_scan/SLC_id_reference/HomSap_Database
#THREADS=14
#OUT=/mnt/disk/shane/Transporter_ID/SLC_test/Human_search/Human_HelZea
#SCRIPT_DIR=/home/sdenecke/Applications/Custom_Applications/SLC_scan/SLC_id_scripts
#SOURCE_DIR=/home/sdenecke/Applications/Custom_Applications/SLC_scan/SLC_id_scripts
#echo 'Target is '$TARGET
#echo 'Database is '$DATABASE
#echo 'Out is '$OUT
#echo 'Script dir '$SCRIPT_DIR
#echo 'Source dir '$SOUCRE_DIR
## Create new output directory
mkdir $OUT
cd $OUT
echo 'Performing HMM search'
## search HMM profiles against target proteome using hmm search
rm -rf ./hmm_outputs
mkdir ./hmm_outputs
for i in $DATABASE/hmm_profiles/*; do
#echo $i
base=$(echo $(basename $i))
hmmsearch --notextw -E 20 $i $TARGET > ./hmm_outputs/$base.hmmoutput
done
## parse hmm outputs into usable format
rm -rf ./hmm_clean
mkdir ./hmm_clean
for i in ./hmm_outputs/*; do
base=$(echo $(basename $i))
cat $i | sed -n '/Scores for complete sequences/,/------ inclusion threshold/p' | sed '$ d' | awk 'NR > 4 { print }' | awk '/^$/{exit} {print $0}' | sed -e "s/\s\{1,\}/\t/g" | cut -f 2- > ./hmm_clean/$base.table
done
## extract fasta sequences from All HMM hits
rm -rf ./SLC_fa
mkdir ./SLC_fa
for i in ./hmm_clean/*.table; do
base=$(echo $(basename $i))
cut -f 9 $i | sed 's/\s+//g'| $SCRIPT_DIR/unigene_fa_sub.sh $TARGET - > ./SLC_fa/$base'.fa'
done
find ./SLC_fa/* -size 0 -delete
##perform blast with HMM hits as queries and the source genomes SLC_mark.fa proteome as a target
echo 'Blast away'
rm -rf ./recip_blast
mkdir ./recip_blast
for i in ./SLC_fa/*.fa; do
base=$(echo $(basename $i))
blastp -query $i -db $DATABASE/reference_proteome/proteome_SLC_mark.fa -outfmt "6 qseqid sseqid pident evalue qcovs" -evalue 1e-3 -max_target_seqs 6 -max_hsps 1 -num_threads $THREADS > ./recip_blast/$base'_blast.tsv'
done
find ./recip_blast/* -size 0 -delete
## Run R script to output table
mkdir ./prelim_summary
Rscript $SCRIPT_DIR/SLC_Family_Sort.R $DATABASE'/SLC_source_dict.csv' > ./prelim_summary/Family_sort_preliminary.csv
## Filter based on lengths of human SLC gene
mkdir length_analysis
### make fasta from reciprocal blast results
cut -d ',' -f 1 ./prelim_summary/Family_sort_preliminary.csv | $SCRIPT_DIR/unigene_fa_sub.sh $TARGET - > ./length_analysis/preliminary_SLC.fa
cut -d ',' -f 2 ./prelim_summary/Family_sort_preliminary.csv | sed '1d' > ./length_analysis/length_families.txt
awk '/^>/ {if (seqlen) print seqlen;print;seqlen=0;next} {seqlen+=length($0)}END{print seqlen}' ./length_analysis/preliminary_SLC.fa > ./length_analysis/names_lengths.txt
grep ">" ./length_analysis/names_lengths.txt | perl -pe 's/^>(.+$)/$1/;'| cut -d ' ' -f 1 > ./length_analysis/all_proteins.txt
grep -E "^[0-9]" ./length_analysis/names_lengths.txt > ./length_analysis/all_lengths.txt
paste -d',' ./length_analysis/all_proteins.txt ./length_analysis/all_lengths.txt ./length_analysis/length_families.txt > ./length_analysis/gene_lengths.txt
Rscript $SCRIPT_DIR/SLC_length_filter.R $SOURCE_DIR > ./length_analysis/total_slc_table.csv
#### Produce final fasta file
rm -f ./length_analysis/SLC_final.faa
for i in $(cat ./length_analysis/total_slc_table.csv | cut -d ',' -f 1)
do
grep -A 1 $i ./length_analysis/preliminary_SLC.fa >> ./length_analysis/SLC_final.faa
done
## final output
mkdir final_output
cp ./length_analysis/total_slc_table.csv ./final_output/total_slc_table.csv
cp ./length_analysis/SLC_final.faa ./final_output/SLC_final.faa
Rscript $SCRIPT_DIR/SLC_dictionary_format.R > ./final_output/SLC_final_output.csv
cd -
| true
|
3839de1c4acf45e344e79c05b4a7fcbda197f248
|
Shell
|
Jeanhwea/spring-docs-builder
|
/assets/setup.sh
|
UTF-8
| 694
| 3.1875
| 3
|
[] |
no_license
|
SPRING_GIT_REPO=${SPRING_GIT_REPO:="https://github.com/spring-projects/spring-framework.git"}
set -e
log() {
echo "$(TZ='Asia/Shanghai' date +'%F %T'): $*"
}
chmod +x /assets/entrypoint.sh
log "Cloning source code ..."
git clone -q $SPRING_GIT_REPO /assets/source
cd /assets/source && \
git reset --hard v5.2.5.RELEASE
log "Building source code ..."
cd /assets/source && \
./gradlew build asciidoctor #> /assets/gradle.log 2>&1
log "Archiving build docs ..."
ARCNAME=$(TZ='Asia/Shanghai' date +'%Y%m%d_%H%M%S_docs')
ARCFILE=/assets/${ARCNAME}.tar.gz
tar czvf $ARCFILE -C /assets/source/build docs
log "Cleanup caches ..."
rm -rf ~root/.gradle && rm -rf /assets/source
log "Done!"
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.