blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
44670106271821473cf3feb9c4512eb7ee41559f
|
Shell
|
fsquillace/raspy
|
/bin/mode-switcher
|
UTF-8
| 2,267
| 3.59375
| 4
|
[] |
no_license
|
#!/bin/bash
last_profile="None"
while true
do
curr_profile=$(netcfg current | awk 'BEGIN{OFS="\n"}{print $0}' | grep ppp)
# If the profile is not active check after some seconds
if [ "$curr_profile" == "" ]
then
echo "Profile ppp* doesn't exist."
last_profile="None"
sleep 10
continue
fi
# Checks whether the profile has remained the same since the last iteration
if [ "${last_profile}" != "${curr_profile}" ]
then
if [ "${curr_profile}" == "ppp" ]
then
declare -i timeout=3600
elif [ "${curr_profile}" == "ppp-to-gprs" ]
then
declare -i timeout=5
fi
last_profile=$(netcfg current | awk 'BEGIN{OFS="\n"}{print $0}' | grep ppp)
declare -i tx=$(cat /sys/class/net/ppp0/statistics/tx_bytes)
declare -i rx=$(cat /sys/class/net/ppp0/statistics/rx_bytes)
declare -i start_tot=$(($tx+$rx))
fi
sleep $timeout
declare -i tx=$(cat /sys/class/net/ppp0/statistics/tx_bytes)
declare -i rx=$(cat /sys/class/net/ppp0/statistics/rx_bytes)
declare -i tot=$(($tx+$rx-${start_tot}))
declare -i speed=$(($tot/$timeout))
echo "Tot bytes: $tot bytes"
echo "Timeout: $timeout sec"
echo "Speed: $speed bytes/sec"
# Checks whether the tot amount of bytes is negative.
# This situation can happend when the interface was got down
# and later got up.
if [ "$tot" -lt "0" ]
then
# Reinit
last_profile="None"
continue
fi
if [ "$curr_profile" == "ppp" -a "$tot" -lt "1048576" ]
then
echo "Low traffic: ppp -> ppp-to-gprs"
timeout 60 netcfg down ppp
# To prevent any strange lock let's use poff too.
timeout 60 poff -a provider
timeout 60 poff -a to-gprs
sleep 2
timeout 60 netcfg ppp-to-gprs
elif [ "$curr_profile" == "ppp-to-gprs" -a "$speed" -gt "10240" ]
then
echo "High traffic: ppp-to-gprs -> ppp"
timeout 60 netcfg down ppp-to-gprs
# To prevent any strange lock let's use poff too.
timeout 60 poff -a provider
timeout 60 poff -a to-gprs
sleep 2
timeout 60 netcfg ppp
fi
start_tot=$(($tx+$rx))
done
| true
|
37cf654d9bb90e762e50669938731532ceddafd2
|
Shell
|
petronny/aur3-mirror
|
/prosody-auth-dovecot-hg/PKGBUILD
|
UTF-8
| 1,070
| 2.796875
| 3
|
[] |
no_license
|
# Maintainer: Calvin McAnarney <calvin@mcanarney.org>
pkgname=prosody-auth-dovecot-hg
pkgver=501
pkgrel=1
pkgdesc="Dovecot authentication module for prosody"
arch=('i686' 'x86_64')
url="http://code.google.com/p/prosody-modules"
license=('MIT')
depends=('prosody')
makedepends=('mercurial')
_hgroot=https://prosody-modules.googlecode.com/hg
_hgrepo=prosody-modules
build() {
cd "$srcdir"
msg "Connecting to Mercurial server...."
if [[ -d "$_hgrepo" ]]; then
cd "$_hgrepo"
hg pull -u
msg "The local files are updated."
else
hg clone "$_hgroot" "$_hgrepo"
fi
msg "Mercurial checkout done or server timeout"
msg "Starting build..."
rm -rf "$srcdir/$_hgrepo-build"
cp -r "$srcdir/$_hgrepo" "$srcdir/$_hgrepo-build"
cd "$srcdir/$_hgrepo-build"
}
package() {
cd "$srcdir/$_hgrepo-build"
install -D "mod_auth_dovecot/auth_dovecot/mod_auth_dovecot.lua" "$pkgdir/usr/lib/prosody/modules/mod_auth_dovecot.lua"
install -D "mod_auth_dovecot/auth_dovecot/sasl_dovecot.lib.lua" "$pkgdir/usr/lib/prosody/modules/sasl_dovecot.lib.lua"
}
# vim:set ts=2 sw=2 et:
| true
|
ef69c20594cc6cf1548b836a813c749fd8966e21
|
Shell
|
rdkcmf/rdk-sys_mon_tools-iarm_set_powerstate
|
/build.sh
|
UTF-8
| 4,367
| 2.78125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
##########################################################################
# If not stated otherwise in this file or this component's Licenses.txt
# file the following copyright and licenses apply:
#
# Copyright 2016 RDK Management
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##########################################################################
#
set -x
if [ -z $PLATFORM_SOC ]; then
export PLATFORM_SOC=broadcom
fi
if [ $1 = "iarm" ] ; then
export USE_IARM=y
echo "Building for IARM Target..."
elif [ $1 = "iarmbus" ] ; then
export USE_IARM_BUS=y
echo "Building for IARM Bus Target..."
else
export USE_IARM_BUS=y
echo "Default...Building for IARM Bus Target..."
fi
export USE_DBUS=y
SCRIPT=$(readlink -f "$0")
SCRIPTS_DIR=`dirname "$SCRIPT"`
export COMBINED_ROOT=$SCRIPTS_DIR/../..
export BUILDS_DIR=$SCRIPTS_DIR/../..
if [ $PLATFORM_SOC = "intel" ]; then
export TOOLCHAIN_DIR=$COMBINED_ROOT/sdk/toolchain/staging_dir
export CROSS_TOOLCHAIN=$TOOLCHAIN_DIR
export CROSS_COMPILE=$CROSS_TOOLCHAIN/bin/i686-cm-linux
export CC=$CROSS_COMPILE-gcc
export CXX=$CROSS_COMPILE-g++
export OPENSOURCE_BASE=$COMBINED_ROOT/opensource
export DFB_ROOT=$TOOLCHAIN_DIR
export DFB_LIB=$TOOLCHAIN_DIR/lib
export FUSION_PATH=$OPENSOURCE_BASE/src/FusionDale
export FSROOT=$COMBINED_ROOT/sdk/fsroot/ramdisk
export GLIB_LIBRARY_PATH=$CROSS_TOOLCHAIN/lib/
export GLIB_CONFIG_INCLUDE_PATH=$GLIB_LIBRARY_PATH/glib-2.0/
export GLIBS='-lglib-2.0'
elif [ $PLATFORM_SOC = "broadcom" ]; then
if [ ${COMCAST_PLATFORM} = "rng150" ]; then
echo "building for device type RNG 150..."
export WORK_DIR=$BUILDS_DIR/workRNG150
if [ -f $RDK_PROJECT_ROOT_PATH/sdk/scripts/setBcmEnv.sh ]; then
source $RDK_PROJECT_ROOT_PATH/sdk/scripts/setBcmEnv.sh
fi
if [ -f $RDK_PROJECT_ROOT_PATH/sdk/scripts/setBCMenv.sh ]; then
source $RDK_PROJECT_ROOT_PATH/sdk/scripts/setBCMenv.sh
fi
export OPENSOURCE_BASE=$BUILDS_DIR/opensource
CROSS_COMPILE=mipsel-linux
export CC=$CROSS_COMPILE-gcc
export CXX=$CROSS_COMPILE-g++
export GLIB_LIBRARY_PATH=$APPLIBS_TARGET_DIR/usr/local/lib/
export GLIBS='-lglib-2.0 -lintl -lz'
export COMBINED_ROOT=$BUILDS_DIR
elif [ ${COMCAST_PLATFORM} = "xi3" ]; then
export WORK_DIR=$COMBINED_ROOT/workXI3
. $COMBINED_ROOT/build_scripts/setBCMenv.sh
export OPENSOURCE_BASE=$BUILDS_DIR/opensource
CROSS_COMPILE=mipsel-linux
export CC=$CROSS_COMPILE-gcc
export CXX=$CROSS_COMPILE-g++
export GLIB_LIBRARY_PATH=$APPLIBS_TARGET_DIR/usr/local/lib/
export GLIBS='-lglib-2.0 -lintl -lz'
elif [ ${COMCAST_PLATFORM} = "xg1" ]; then
export WORK_DIR=$COMBINED_ROOT/workXG1
. $COMBINED_ROOT/build_scripts/setBCMenv.sh
export OPENSOURCE_BASE=$BUILDS_DIR/opensource
CROSS_COMPILE=mipsel-linux
export CC=$CROSS_COMPILE-gcc
export CXX=$CROSS_COMPILE-g++
export GLIB_LIBRARY_PATH=$APPLIBS_TARGET_DIR/usr/local/lib/
export GLIBS='-lglib-2.0 -lintl -lz'
fi
elif [ $PLATFORM_SOC = "stm" ]; then
if [ ${COMCAST_PLATFORM} = "xi4" ]; then
export TOOLCHAIN_DIR=$COMBINED_ROOT/sdk/toolchain/staging_dir
#setup sdk environment variables
export TOOLCHAIN_NAME=`find ${TOOLCHAIN_DIR} -name environment-setup-* | sed -r 's#.*environment-setup-##'`
source $TOOLCHAIN_DIR/environment-setup-${TOOLCHAIN_NAME}
export OPENSOURCE_BASE=$COMBINED_ROOT/opensource
export GLIB_LIBRARY_PATH=$COMBINED_ROOT/sdk/fsroot/ramdisk/usr/local/lib
export GLIB_CONFIG_INCLUDE_PATH=$TOOLCHAIN_DIR/sysroots/cortexa9t2hf-vfp-neon-oe-linux-gnueabi/usr/lib/glib-2.0/include
export GLIBS='-lglib-2.0'
fi
fi
make
if [ $? -ne 0 ] ; then
echo SetPowerState Tool Build Failed
exit 1
else
echo SetPowerState Tool Build Success
exit 0
fi
| true
|
5e8b9283c210e0ad443d8f546213f21ee12858cb
|
Shell
|
dora1998/dotfiles
|
/setup.sh
|
UTF-8
| 539
| 3.34375
| 3
|
[] |
no_license
|
#!/bin/bash
# 相対パスを使うためにcd
SCRIPT_DIR=`dirname $0`
cd $SCRIPT_DIR
source src/bootstrap.sh
prompt '🔧 Link dotfiles?'
if [[ $? -eq 0 ]]; then
source src/install/link.sh
fi
prompt '🍺 Install Homebrew?'
if [[ $? -eq 0 ]]; then
source src/install/brew.sh
fi
# Homebrew restore
prompt '📦 Restore Homebrew packages?'
if [[ $? -eq 0 ]]; then
source src/install/brew_packages.sh $1
fi
# VSCode Extentions
prompt '✍️ Install VSCode Extensions?'
if [[ $? -eq 0 ]]; then
source src/install/vscode.sh
fi
| true
|
4ef12c18212eeef8d2aa349bbb993fbb1ec61f44
|
Shell
|
rtyler/brokenco.de
|
/_scripts/new-microblog
|
UTF-8
| 560
| 3.296875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
set -xo
DATE_DIR=$(date +%Y/%m/%d)
POST_TITLE=$(openssl rand 100000 | shasum | cut -c1-8)
POST_DATE=$(date +%Y-%m-%d)
POST_TIME=$(date +%Y-%m-%d\ %T\ %z)
TWEET_FILE=tweets/${DATE_DIR}/${POST_TITLE}.tweet
POST_FILE=_microblog/${DATE_DIR}/${POST_TITLE}.md
MICROBLOG_TEMPLATE=_templates/microblog
mkdir -p tweets/${DATE_DIR}
mkdir -p _microblog/${DATE_DIR}
vim ${TWEET_FILE}
cat ${MICROBLOG_TEMPLATE} | \
sed "s/%CURRENT_DATE%/${POST_TIME}/g" | \
sed "s/%POST_TITLE%/${POST_TITLE}/g" > ${POST_FILE} && cat ${TWEET_FILE} >> ${POST_FILE}
| true
|
49eaa38125b947a173a1665b43c32f2878c059ae
|
Shell
|
appetizermonster/bash-examples
|
/random-word.sh
|
UTF-8
| 479
| 3.96875
| 4
|
[] |
no_license
|
#!/bin/bash
if [ -z $1 ]; then
echo "Please specify the number of random words"
echo "ex) $0 10"
exit 0
fi
WORDS=/usr/share/dict/words
WORD_COUNT=$(wc -l $WORDS | awk '{print $1}')
SCALE=$(( $WORD_COUNT/32767 ))
pick_random_words() {
local RANDOM_INDEX=$(( ($RANDOM*$SCALE)%$WORD_COUNT+1 ))
local RANDOM_WORD=$(sed -n $RANDOM_INDEX"p" < $WORDS)
echo $RANDOM_WORD
}
COUNT=$1
for (( v=1; v<=$COUNT; v++ )); do
WORD=$(pick_random_words)
echo "$v. $WORD"
done
| true
|
1da0a5b0c373831cf22740a87543c2657b28310c
|
Shell
|
nkhogen/bytescheme-go
|
/build.sh
|
UTF-8
| 2,349
| 3.296875
| 3
|
[] |
no_license
|
#!/bin/bash -xe
BASEDIR=$(dirname "$0")
pushd $BASEDIR
SCRIPT_DIR=`pwd`
popd
export GO111MODULE=on
unameOut="$(uname -s)"
case "${unameOut}" in
Linux*) MACHINE=linux
;;
Darwin*) MACHINE=darwin
;;
CYGWIN*) MACHINE=Cygwin
;;
MINGW*) MACHINE=MinGw
;;
*) MACHINE="UNKNOWN:${unameOut}" ;;
esac
GOOS=$1
GOARCH=$2
if [ -z $GOOS ]; then
GO_OS=$MACHINE
fi
if [ -z $GOARCH ]; then
GOARCH=amd64
fi
echo "OS=${GOOS}, ARCH=${GOARCH}"
export GOOS=${GOOS}
export GOARCH=${GOARCH}
export GOARM=5
CONTROLLER_DIR=${SCRIPT_DIR}/controller
CONTROLLER_GENERATED_DIR=${CONTROLLER_DIR}/generated
CONTROLLER_MAIN_DIR=${CONTROLLER_DIR}/cmd
CONTROLLER_MAIN_FILE=${CONTROLLER_DIR}/cli/cmd/main.go
CONTROLLER_UI_DIR=${CONTROLLER_DIR}/ui
CONTROLLER_SWAGGERUI_DIR=${CONTROLLER_UI_DIR}/swaggerui
EDUCATIONAL_DIR=${SCRIPT_DIR}/edu
MATH_SERVER_MAIN_FILE=${EDUCATIONAL_DIR}/math/server/server.go
if [ -d "${CONTROLLER_GENERATED_DIR}" ]; then
rm -rf "${CONTROLLER_GENERATED_DIR}"
fi
mkdir -p ${CONTROLLER_GENERATED_DIR}
BIN_OUT_DIR=${SCRIPT_DIR}/build
mkdir -p ${BIN_OUT_DIR}
rm -rf ${BIN_OUT_DIR}/controller
go get github.com/rakyll/statik
# Generate controller swagger client code
swagger generate client -t ${CONTROLLER_GENERATED_DIR} -f ${CONTROLLER_DIR}/controller-swagger.json -A controller
swagger generate server -t ${CONTROLLER_GENERATED_DIR} -f ${CONTROLLER_DIR}/controller-swagger.json -A controller
GOOS=$MACHINE GOARCH=amd64 go run ${SCRIPT_DIR}/tool/cmd/main.go replace-middleware -f ${CONTROLLER_GENERATED_DIR}/restapi/configure_controller.go
cp -rf ${CONTROLLER_DIR}/controller-swagger.json ${CONTROLLER_SWAGGERUI_DIR}/swagger.json
statik -src=$CONTROLLER_UI_DIR -dest=$CONTROLLER_GENERATED_DIR
# Compile controller CLI
go build -ldflags '-w -s' -o ${BIN_OUT_DIR}/controller ${CONTROLLER_MAIN_FILE}
go build -ldflags '-w -s' -o ${BIN_OUT_DIR}/mathserver ${MATH_SERVER_MAIN_FILE}
# For linux arm (RPI)
if [[ "$GOOS" == "linux" ]] && [[ "$GOARCH" == "arm" ]]; then
scp ${BIN_OUT_DIR}/controller pi@192.168.1.20:/home/pi/controller
scp ${BIN_OUT_DIR}/mathserver pi@192.168.1.20:/home/pi/mathserver
ssh pi@192.168.1.20 'sudo systemctl stop controlboard.service && sudo cp /home/pi/controller /controlboard/bin/ && sudo cp /home/pi/mathserver /controlboard/bin/ && sudo systemctl start controlboard.service'
fi
| true
|
ea78cbb36d89d2363f02332a2d9767b892175ad6
|
Shell
|
koiuo/synology-mediaserver
|
/nas/usr/local/etc/rc.d/S90chrootmount.sh
|
UTF-8
| 318
| 3.171875
| 3
|
[] |
no_license
|
#!/bin/sh
. /etc.defaults/rc.subr
CHROOTDIR=/var/packages/debian-chroot/target/var/chroottarget/media
MEDIADIR=/volume1/media
case $1 in
start)
mount -o bind $MEDIADIR $CHROOTDIR
;;
stop)
umount $CHROOTDIR
;;
restart)
$0 stop
sleep 1
$0 start
;;
*)
echo "Usage: $0 start|stop|restart"
;;
esac
| true
|
77f23531e2d86c31925cc524433a714bb02dbc72
|
Shell
|
morteza-jamali/linux-live
|
/Slax/debian10/rootcopy/usr/bin/sudo
|
UTF-8
| 206
| 3.484375
| 3
|
[] |
no_license
|
#!/bin/bash
if [ "$#" -lt 1 ]; then
echo "Usage: sudo [command] [arguments]"
exit 1
fi
if [ "$UID" = 0 ]; then
exec "$@"
fi
echo "Sudo is not installed. Run the command as root or install sudo."
| true
|
490c71bf7d9d82f0aa92f402fcb6c649c79a9c31
|
Shell
|
nboulif/filler
|
/cheker.sh
|
UTF-8
| 1,226
| 2.90625
| 3
|
[] |
no_license
|
#!/bin/bash
vm=./tools/filler_vm
my_player=./nboulif.filler
all_players="tools/players/abanlin.filler
tools/players/abarriel.filler
tools/players/carli.filler
tools/players/grati.filler
tools/players/hcao.filler
tools/players/superjeannot.filler"
# tools/players/ntoniolo.filler"
# tools/players/carodrig.filler
# tools/players/champely.filler
# tools/players/nboulif.filler
# tools/players/qhonore.filler
# tools/players/lcharvol.filler
# tools/players/hvromman.filler
# tools/players/rpousseu_last.filler
# tools/players/sofchami.filler
all_maps="tools/maps/map00
tools/maps/map01"
# tools/maps/map02"
#tools/maps/map03
run_games() {
let index=0
for player in $all_players; do
# for map in tools/maps/map00 tools/maps/map01; do
for map in $all_maps; do
# echo "Processing $index ${player##*/} vs ${player##*/} on map ${player##*/}"
$vm -f ./$map -p1 ./$my_player -p2 ./$player | python3 visu/filler_visu.py 250 $index &
let index++
# echo "Processing ${player##*/} vs $index ${player##*/} on map ${player##*/}"
$vm -f ./$map -p1 ./$player -p2 ./$my_player | python3 visu/filler_visu.py 250 $index &
let index++
done
done
read -p "Press enter to quit all"
pkill -f filler
}
run_games
| true
|
1d1df2bb2500baeb34b6f7e94f9f3895db93f7ff
|
Shell
|
wadewilkins/NewUtilities
|
/Ebs_volume_upsize/get_ssh_key_from_vault.sh
|
UTF-8
| 2,507
| 3.875
| 4
|
[] |
no_license
|
#/usr/bin/bash
echo "In get_ssh_key_from_vault.sh"
VAULTPATH=$1
PEM_BASE=$2
echo "Vault path=$VAULTPATH"
TOKEN=$3
ACTION="READ"
SERVER=$(aws --region us-west-2 ec2 describe-instances --filters "Name=tag:Name,Values=sphinx_vault" | grep '"PrivateIpAddress"' | tr ',"' ' ' | cut -d: -f2 | tr -s ' ' | sort -u | tr '\n' , | tr -d ' ' | cut -d , -f 1)
echo "Vault server: $SERVER"
KeyValue=`echo ${KVPair//,/ }`
#echo "########################################################################"
#echo "#"
#echo "# IMPORTANT!"
#echo "# In this script, get_ssh_key_from_vault.sh, "
#echo "# The bellow variable VAULT_ADDR, is envrionment specific."
#echo "# It will need to be set for every env (dev,lab,prod)"
#echo "#"
#echo "########################################################################"
echo "
#export VAULT_ADDR=https://vault.lab.expts.net
export VAULT_ADDR=https://vault.platform.dexpts.net
#echo ""
/usr/local/bin/vault login $TOKEN
if [[ "$ACTION" == "LIST" ]]
then
/usr/local/bin/vault list $VAULTPATH
elif [[ "$ACTION" == "READ" ]]
then
/usr/local/bin/vault read $VAULTPATH
elif [[ "$ACTION" == "WRITE" ]]
then
/usr/local/bin/vault write $VAULTPATH "$KeyValue"
else [[ "$ACTION" == "DELETE" ]]
/usr/local/bin/vault delete $VAULTPATH
fi
" > /tmp/deploy.sh
echo $SERVER
scp -i /var/lib/jenkins/.ssh/app.pem -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no /tmp/deploy.sh centos@$SERVER:/home/centos/deploy.sh
PEM_LINES=`ssh -tt -q -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i /var/lib/jenkins/.ssh/app.pem centos@$SERVER "sudo bash /home/centos/deploy.sh"`
#Set the field separator to new line
# This was harder than I had hoped.
# It seems vault replaces \n with ' '.
# I converted back.
# Of coarse, the spaces in the first and last lines (eg: BEGIN RSA PRIVATE KEY) are valid.
# Seems a bit hacky, Maybe vault can be taught to store newline characters?
#
#
#Try to iterate over each line
echo "For loop:"
PEM_FILE_NAME="$PEM_LOCATION$PEM_BASE.pem"
IFS=$'\n'
GOOD_DATA="0"
for item in $PEM_LINES
do
if [[ "$item" == *"BEGIN RSA PRIVATE KEY"* ]]; then
echo "-----BEGIN RSA PRIVATE KEY-----" >$PEM_FILE_NAME
tmp=${item#*----- }
tmp2=${tmp::-34}
tmp3=`echo $tmp2 | tr " " "\n"`
echo "$tmp3" >>$PEM_FILE_NAME
echo "-----END RSA PRIVATE KEY-----" >>$PEM_FILE_NAME
IFS='|'
IFS=$'\n'
GOOD_DATA="1"
fi
done
if [[ "$PEM_FILE_NAME" != "START" ]]; then
chmod 400 $PEM_FILE_NAME
fi
| true
|
2d33f8e5e28dce19a50c17272006d2e6263d6f7c
|
Shell
|
Enricobazzi/PlanNacional_Demographic_models_Oscar
|
/2.Variant_Filtering-executables/Missingness_Filter_6.sh
|
UTF-8
| 6,101
| 3.484375
| 3
|
[] |
no_license
|
#!/bin/bash
#SBATCH -t 12:00:00
#SBATCH -p thinnodes
#SBATCH -c 24
#SBATCH --mail-type=BEGIN
#SBATCH --mail-type=END
#SBATCH --mail-type=TIME_LIMIT_80
#SBATCH --mail-user=enricobazzical@gmail.com
#############################################################
START=$(date)
echo "Missingness_Filter_6 SCRIPT for $1 starting : $START"
#############################################################
####################################
## Missingness Filtering launcher ##
####################################
# With this script I want to apply a filter based on data missingness to my
# dataset of 20 individuals, composed of 11 Lynx pardinus, 4 Lynx lynx,
# 3 Lynx rufus and 2 Lynx canadiensis. I will remove variants which are completely
# absent in all the individuals of a species, except for Lynx pardinus.
# Lynx pardinus, having more individuals, will be filtered for variants absent in
# at least 70% of the individuals (8 out of 11).
# To do so I will use the following softwares:
# BEDtools 2.28.0
# BCFtools 1.9
module load bedtools
module load bcftools
# # The VCF file name (without the extensions) must be defined while launching
# the script as such:
# ./Missingness_Filter_6.sh <VCFfilename>
#####################################
## Applying filters - Preparations ##
#####################################
# List all sample names in a .namelist file:
ls $LUSTRE/test/CatRef_bams/*.bam | rev | cut -d'/' -f1 | rev | cut -d '_' -f1-4 | sort -u \
> $LUSTRE/test/CatRef_bams/all-samples.namelist
# List species in an array (for loop):
speciesARRAY=($(ls $LUSTRE/test/CatRef_bams/*.bam | rev | cut -d'/' -f1 | rev | cut -d '_' -f2 | sort -u))
# Create a copy of the VCF file with a new name that will be used to filter out
# excessively missing variants:
cp $LUSTRE/test/CatRef_vcfs/"$1".filter5.vcf $LUSTRE/test/CatRef_vcfs/"$1".filter6.vcf
#######################################
## Applying filters - The great LOOP ##
#######################################
# For each species:
# (1) create a .namelist file (with the names of all samples of that species);
# (2) use the namelist file to divide the VCF by species;
# (3) extract the excessive missingness variant with BCFtools filter, F_MISSING indicates
# the proportion of missing data, the thresholds are explained above (lines 19-24);
# (4) filter the excessively missing variants from the new (lines 50-52) VCF file of all samples
# with BEDtools subtract.
# Have a log file with filtered variants counts:
echo "Per-Species missingness variant filtering:" > $LUSTRE/test/missingness.variants.log
for species in ${speciesARRAY[@]}
do
# (1) create a .namelist file (with the names of all samples of that species)
echo "extracting $species names"
grep $species $LUSTRE/test/CatRef_bams/all-samples.namelist > $LUSTRE/test/CatRef_bams/$species.namelist
# (2) use the namelist file to divide the VCF by species
echo "filtering $species individuals from original VCF"
bcftools view -S $LUSTRE/test/CatRef_bams/"$species".namelist -Ov \
$LUSTRE/test/CatRef_vcfs/"$1".filter5.vcf \
> $LUSTRE/test/CatRef_vcfs/"$species"_cat_ref.filter5.subset.vcf
# (3) extract the excessive missingness variant with BCFtools filter and
# (4) filter the excessively missing variants from the new VCF file of all samples
echo "extracting missing variants from $species VCF and filtering them out"
if [ $species == lc ]
then
bcftools filter -i "F_MISSING = 1" -Ov $LUSTRE/test/CatRef_vcfs/"$species"_cat_ref.filter5.subset.vcf \
> $LUSTRE/test/CatRef_vcfs/"$species"_cat_ref.filter5.subset.missing.vcf
LCmiss=$(grep -v "#" $LUSTRE/test/CatRef_vcfs/"$species"_cat_ref.filter5.subset.missing.vcf | wc -l)
echo "Variants filtered for LC : $LCmiss" >> $LUSTRE/test/missingness.variants.log
bedtools subtract -a $LUSTRE/test/CatRef_vcfs/"$1".filter6.vcf \
-b $LUSTRE/test/CatRef_vcfs/"$species"_cat_ref.filter5.subset.missing.vcf -header \
> tmp && mv tmp $LUSTRE/test/CatRef_vcfs/"$1".filter6.vcf
elif [ $species == ll ]
then
bcftools filter -i "F_MISSING = 1" -Ov $LUSTRE/test/CatRef_vcfs/"$species"_cat_ref.filter5.subset.vcf \
> $LUSTRE/test/CatRef_vcfs/"$species"_cat_ref.filter5.subset.missing.vcf
LLmiss=$(grep -v "#" $LUSTRE/test/CatRef_vcfs/"$species"_cat_ref.filter5.subset.missing.vcf | wc -l)
echo "Variants filtered for LL : $LLmiss" >> $LUSTRE/test/missingness.variants.log
bedtools subtract -a $LUSTRE/test/CatRef_vcfs/"$1".filter6.vcf \
-b $LUSTRE/test/CatRef_vcfs/"$species"_cat_ref.filter5.subset.missing.vcf -header \
> tmp && mv tmp $LUSTRE/test/CatRef_vcfs/"$1".filter6.vcf
elif [ $species == lp ]
then
bcftools filter -i "F_MISSING > 0.7" -Ov $LUSTRE/test/CatRef_vcfs/"$species"_cat_ref.filter5.subset.vcf \
> $LUSTRE/test/CatRef_vcfs/"$species"_cat_ref.filter5.subset.missing.vcf
LPmiss=$(grep -v "#" $LUSTRE/test/CatRef_vcfs/"$species"_cat_ref.filter5.subset.missing.vcf | wc -l)
echo "Variants filtered for LP : $LPmiss" >> $LUSTRE/test/missingness.variants.log
bedtools subtract -a $LUSTRE/test/CatRef_vcfs/"$1".filter6.vcf \
-b $LUSTRE/test/CatRef_vcfs/"$species"_cat_ref.filter5.subset.missing.vcf -header \
> tmp && mv tmp $LUSTRE/test/CatRef_vcfs/"$1".filter6.vcf
elif [ $species == lr ]
then
bcftools filter -i "F_MISSING = 1" -Ov $LUSTRE/test/CatRef_vcfs/"$species"_cat_ref.filter5.subset.vcf \
> $LUSTRE/test/CatRef_vcfs/"$species"_cat_ref.filter5.subset.missing.vcf
LRmiss=$(grep -v "#" $LUSTRE/test/CatRef_vcfs/"$species"_cat_ref.filter5.subset.missing.vcf | wc -l)
echo "Variants filtered for LR : $LRmiss" >> $LUSTRE/test/missingness.variants.log
bedtools subtract -a $LUSTRE/test/CatRef_vcfs/"$1".filter6.vcf \
-b $LUSTRE/test/CatRef_vcfs/"$species"_cat_ref.filter5.subset.missing.vcf -header \
> tmp && mv tmp $LUSTRE/test/CatRef_vcfs/"$1".filter6.vcf
fi
done
###########################################################
END=$(date)
echo "Missingness_Filter_6 SCRIPT for $1 ended : $END"
###########################################################
| true
|
fe0d095a266426915c74e6c4675a0900cd7ac167
|
Shell
|
ccFiona/data-analyse
|
/1.48 src/local_src/list_cid_pv.sh
|
UTF-8
| 965
| 2.8125
| 3
|
[] |
no_license
|
############################################################################
##
## Copyright (c) 2013 hunantv.com, Inc. All Rights Reserved
## $Id: list_cid_pv.sh,v 0.0 2016年05月11日 星期三 09时44分15秒 <tangye> Exp $
##
############################################################################
#
###
# # @file list_cid_pv.sh
# # @author <tangye><<tangye@mgtv.com>>
# # @date 2016年05月11日 星期三 09时44分15秒
# # @brief
# #
# ##
#!/bin/bash
input=$1
out=$2
###########
# input: ip, aver, act, lics, mac, time, sver, net, mf, mod,
# pagename, ext1 ~ ext10
# isdebug
#
# returns: aver, ext3, pv, uniq_pv(uniq by ext10, pagename=C, act=pageload)
awk 'BEGIN{FS=OFS="\t"}{
if(NF>=22){
act=$3;
pagename=$11;
cid=$15;
if(act=="pageload"&&pagename~/B/){
pv_count[pagename"\t"cid]++;
}
}
}END{
for(key in pv_count){
print key, pv_count[key]
}
}' $input | sort -k1,2 > $out
## vim: set ts=2 sw=2: #
| true
|
43e4e8a0930bc471468e83736e172f3a716a7a32
|
Shell
|
jameskuzwao/offline_testing
|
/scripts/get_ka_users_and_device.sh
|
UTF-8
| 2,241
| 3.71875
| 4
|
[] |
no_license
|
#!/bin/bash
#colors
#=======
export red=`tput setaf 1`
export green=`tput setaf 2`
export yellow=`tput setaf 3`
export blue=`tput setaf 4`
# reset to default bash text style
export reset=`tput sgr0`
# make actual text bold
export bold=`tput bold`
# check if test responses db exists. If not, copy pristine db
test -f $responses_database
if [ "$?" = "0" ]; then
echo "${blue}Database already exists.Skipping...${reset}"
else
echo "${green}Database does not exist. Creating now...${reset}"
cp ~/.baseline_testing/public/test_responses.pristine.sqlite ~/.baseline_testing/public/test_responses.sqlite
fi
# clear contents of device_name and users csv files if they already exist
if [ -e "$device_name_file" ] ; then
> $device_name_file
fi
if [ -e "$users_file" ] ; then
> $users_file
fi
# get device name and store it in csv file
echo "extracting device name from kalite database"
sqlite3 -header -csv $ka_database "SELECT d.name FROM securesync_device d JOIN securesync_devicemetadata s WHERE s.device_id = d.id AND s.is_own_device = 1" >> $device_name_file
# get list of users and save in csv file
echo "extracting user details from kalite database"
sqlite3 -header -csv $ka_database "select su.id as user_id, su.username, sf.name as group_name,su.first_name, su.last_name from securesync_facilityuser su left join securesync_facilitygroup sf on sf.id=su.group_id where su.deleted = 0" >> $users_file
# clear out users table
echo "Cleaning users table..."
sqlite3 $responses_database "delete from users"
# clear out device table
echo "Cleaning device_name table..."
sqlite3 $responses_database "delete from device;"
# create backup of $users_file and device.csv with headers
# appears as new file with suffix .headers
# then remove headers before importing into sqlite
sed -i.headers '1d' $users_file
sed -i.headers '1d' $device_name_file
# import users csv into user table in test_responses
sqlite3 $responses_database <<!
.mode csv
.import $users_file users
!
echo "Populating users table..."
# import device name into device table in test_responses
sqlite3 $responses_database <<!
.mode csv
.import $device_name_file device
!
echo "Populating device_name table.."
# get details for literacy learners
echo "Done!"
| true
|
615b28779bb8e735e33a9ff383906f86ff2335bc
|
Shell
|
josebummer/ugr_fundamentos_de_software
|
/Modulo 1/Practica 6/p6/case
|
UTF-8
| 213
| 4.0625
| 4
|
[] |
no_license
|
#!/bin/bash
if [ $# != 1 ] ; then
echo "Uso: case <caracter>"
exit;
fi
case $1 in
[0-9]) echo "numero" ;;
[a-z]) echo "minisculas";;
[A-Z]) echo "mayusculas";;
*) echo "caracter especial" ;;
esac
| true
|
5cdd9aa544d776a967c1802d8cc9c1fccfa71537
|
Shell
|
Kano-Gooop/my_library
|
/temp/backup-plus.sh
|
UTF-8
| 670
| 3.28125
| 3
|
[] |
no_license
|
#!/bin/bash
#1.定义数据库链接,目标库信息
MY_user="root"
MY_host="127.0.0.1"
MY_conn="-u $MY_user -h $MY_host"
#2.定义备份目录,工具,时间,文件名
cmd="/usr/bin/mysqldump"
prefix=$(date +%Y%m%d%H%M%S)
#MY_conn="-u $MY_user -p$MY_pass -h $MY_host"
bf_dir="/root/mysqlbackup/"
if [ ! -d $bf_dir ]; then
mkdir $bf_dir
else
echo "$bf_dir already exist"
fi
for loop in "wujin" "zen"
do
filename="$prefix-$loop"
$cmd $MY_conn --databases $loop > $bf_dir$filename.sql
echo $filename" done"
done
#/bin/tar zcf $name_1.tar.gz $name_1.sql --remove &> /dev/null
#/bin/tar zcf $name_2.tar.gz $name_2.sql --remove &> /dev/null
| true
|
f307e943f31cf7b22c78f83bf511ba1f802dd9df
|
Shell
|
vlaadbrain/swt
|
/test/boxes.sh
|
UTF-8
| 636
| 2.828125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
#crude but eventually i'll see a something out of it
BIN="../swt"
IN="./in"
OUT="./out"
#exec valgrind --leak-check=full $BIN -i $IN -o $OUT &
exec $BIN -i $IN -o $OUT &
SWT_PID=$!
cat /dev/null > $OUT
echo "window testing This is a new window" > $IN
sleep 1
WIN_ID=`grep "window testing" $OUT | awk '{print $3}'`
echo "WIN_ID=${WIN_ID}"
sleep 1
echo "add testing text 'lorem ipsum'" > $IN
sleep 2
echo "add testing text 'lorem ipsum'" > $IN
sleep 2
echo "add testing text 'lorem ipsum'" > $IN
sleep 2
echo "add testing text 'lorem ipsum'" > $IN
sleep 2
echo "dump" > $IN
sleep 2
xdotool keydown --window $WIN_ID ctrl+q
| true
|
81dc7246f9aba6f48fc7223efaf8b885e8243220
|
Shell
|
burke/dotfiles
|
/bin/_git/git-abort
|
UTF-8
| 286
| 2.84375
| 3
|
[] |
no_license
|
#!/bin/bash
set -euo pipefail
gitdir="$(git rev-parse --git-dir)"
if [[ -f "$gitdir/CHERRY_PICK_HEAD" ]]; then git cherry-pick --abort; fi
if [[ -f "$gitdir/MERGE_HEAD" ]]; then git merge --abort; fi
if [[ -d "$gitdir/rebase-merge" ]]; then git rebase --abort; fi
| true
|
f7be4ad1fdfad5c589914d9a864ff2b589218eb7
|
Shell
|
OpenEtna/android_device_lg_eve
|
/extract-files.sh
|
UTF-8
| 1,821
| 2.6875
| 3
|
[] |
no_license
|
#!/bin/bash
rm -Rf proprietary
mkdir -p proprietary
#Radio
FILES="lib/liblgdrmwbxml.so lib/liblgdrmxyssl.so lib/libdll.so lib/libril-qcril-hook-oem.so lib/libgsdi_exp.so lib/libgstk_exp.so lib/libwms.so"
FILES="$FILES lib/libnv.so lib/libwmsts.so lib/liblgeat.so lib/libril_log.so lib/liblgerft.so lib/libbcmwl.so lib/liblgdrm.so lib/libwmdrmpd.so"
FILES="$FILES lib/liboem_rapi.so lib/libmmgsdilib.so lib/libcm.so lib/liboncrpc.so lib/libdsm.so lib/libqueue.so"
FILES="$FILES lib/libdiag.so lib/libril-qc-1.so lib/libril.so"
FILES="$FILES lib/libdss.so lib/libqmi.so bin/qmuxd"
#Wifi
FILES="$FILES etc/wl/rtecdc.bin etc/wl/nvram.txt"
#Bluetooth
FILES="$FILES bin/BCM4325D0_004.001.007.0168.0169.hcd bin/btld"
#Camera
FILES="$FILES lib/libmm-qcamera-tgt.so lib/libmmjpeg.so lib/libcamera.so"
#Video
FILES="$FILES lib/libmm-adspsvc.so lib/libOmxH264Dec.so lib/libOmxMpeg4Dec.so lib/libOmxVidEnc.so lib/libOmxWmvDec.so"
FILES="$FILES lib/libomx_aacdec_sharedlibrary.so lib/libomx_amrdec_sharedlibrary.so lib/libomx_amrenc_sharedlibrary.so lib/libomx_avcdec_sharedlibrary.so"
FILES="$FILES lib/libomx_m4vdec_sharedlibrary.so lib/libomx_mp3dec_sharedlibrary.so"
FILES="$FILES lib/libaomx_mp3dec_sharedlibrary.so lib/libaomx_mp4dec_sharedlibrary.so lib/libaomx_wmadec_sharedlibrary.so lib/libaomx_wmvdec_sharedlibrary.so"
#Sensors
FILES="$FILES bin/akmd2"
#OpenGL
FILES="$FILES lib/egl/libGLES_qcom.so"
#GPS
FILES="$FILES lib/libloc.so lib/libloc-rpc.so lib/libcommondefs.so lib/libloc_api.so lib/libloc_ext.so lib/libgps.so"
SRC="../../../../lg2.2/system"
if [[ ! -e $SRC ]]; then
echo "ERROR: Could not find $SRC"
exit 1
fi
for i in $FILES
do
#if [[ -e $SRC ]]; then
cp -a "$SRC/$i" proprietary/ || exit 1
#else
# adb pull /system/$i proprietary/ || exit 1
#fi
done
chmod 755 proprietary/akmd2
| true
|
cd2b87e3640b188a298f2e585492da36b188feea
|
Shell
|
VERITAS-Observatory/Eventdisplay_AnalysisScripts_VTS
|
/scripts/db_scripts/db_L1_TriggerInfo.sh
|
UTF-8
| 557
| 3.328125
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
#
if [ ! -n "$1" ] || [ "$1" = "-h" ]; then
echo "
db_L1_TriggerInfo.sh: read L1 trigger info from VTS database
db_L1_TriggerInfo.sh <run>
examples:
./db_L1_TriggerInfo.sh 64080
"
exit
fi
RUN=$1
QUERY="select timestamp, telescope_id, pixel_id, rate from tblL1_TriggerInfo, tblRun_Info where timestamp >= tblRun_Info.data_start_time - INTERVAL 1 MINUTE AND timestamp <= tblRun_Info.data_end_time + INTERVAL 1 MINUTE AND tblRun_Info.run_id=${RUN};"
$($EVNDISPSCRIPTS/db_scripts/db_mysqldb.sh) -e "USE VERITAS; ${QUERY}" | sed 's/\t/|/g'
| true
|
80055889699f8239e249efaad9913d992c3c1db8
|
Shell
|
bastiandg/setup
|
/packages/terraform.sh
|
UTF-8
| 874
| 3.765625
| 4
|
[
"Unlicense"
] |
permissive
|
#!/usr/bin/env bash
set -eu -o pipefail
BASEDIR="$(dirname "$(readlink -f "$0")")"
RELEASE_URL="https://releases.hashicorp.com/terraform/"
RELEASE_PAGE="$(curl -s "$RELEASE_URL")"
set +o pipefail
VERSION="$(echo "$RELEASE_PAGE" | grep -v 'rc\|beta\|alpha' | grep -m 1 'href="/terraform' | sed -re 's#.*terraform_([0-9.]*)</a>#\1#')"
set -o pipefail
versioncheck() {
set +eu
localversion="$(terraform --version 2>/dev/null | grep -oP '(?<=Terraform v).*(?=$)')"
set -eu
remoteversion="$VERSION"
if [ "$localversion" = "$remoteversion" ]; then
echo "terraform is up to date ($localversion)"
exit 0
fi
}
versioncheck
TMPDIR="$(mktemp -d)"
echo "Installing terraform $VERSION"
cd "$TMPDIR"
curl -o terraform.zip "${RELEASE_URL}${VERSION}/terraform_${VERSION}_linux_amd64.zip"
unzip terraform.zip
sudo mv terraform /usr/bin/
cd "$BASEDIR"
rm -rf "$TMPDIR"
| true
|
f9ab3cdf70190ba9a200d4cac745a92bf0497386
|
Shell
|
davecharles/kafka_demo
|
/utils/secrets/create-keystore.sh
|
UTF-8
| 1,540
| 3.296875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -o nounset \
-o errexit
name=$1
password=`openssl rand -base64 96 | head -c 32`
# Create keystores
docker run -v `pwd`/:/keytool -w /keytool \
openjdk:11 keytool \
-genkey -noprompt \
-alias "${name}" \
-dname "CN=${name}.local.kafkatastic.com, OU=K7N, O=Kubertron Ltd, L=Letchworth, C=GB" \
-keystore "${name}.keystore.jks" \
-keyalg RSA \
-keysize 4096 \
-storepass "${password}" \
-keypass "${password}"
# Import CA cert
docker run -v `pwd`/:/keytool -w /keytool \
openjdk:11 keytool \
-import \
-trustcacerts \
-alias ca \
-noprompt \
-storepass "${password}" \
-keystore "${name}.keystore.jks" \
-file ./ca.crt
# Create CSR
docker run -v `pwd`/:/keytool -w /keytool \
openjdk:11 keytool \
-certreq \
-alias "${name}" \
-storepass "${password}" \
-keystore "${name}.keystore.jks" \
-file "./${name}.keystore.csr" \
-dname "c=GB, st=England, l=Letchworth, o=Kubertron Ltd, ou=K7N, cn=${name}.local.kafkatastic.com"
# Sign CSR
docker run -v `pwd`/:/openssl -w /openssl \
openjdk:11 openssl x509 \
-req \
-CA ca.crt \
-CAkey ca.key \
-in "${name}.keystore.csr" \
-out "${name}.keystore.crt" \
-days 9999 \
-CAcreateserial \
-passin pass:kubertronca
docker run -v `pwd`/:/keytool -w /keytool \
openjdk:11 keytool \
-import \
-alias "${name}" \
-noprompt \
-storepass "${password}" \
-keystore "${name}.keystore.jks" \
-file "./${name}.keystore.crt"
echo "${password}" > "${name}.keystore.creds"
echo "${password}" > "${name}.sslkey.creds"
| true
|
430fee1591ef67a77a7c3e8dd98ef26054814252
|
Shell
|
ansiwen/chromiumos-platform2
|
/init/chromeos_startup
|
UTF-8
| 16,879
| 3.609375
| 4
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/sh
# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
. /usr/share/misc/chromeos-common.sh
UNDO_MOUNTS=
cleanup_mounts()
{
# On failure unmount all saved mount points and repair stateful
for mount_point in ${UNDO_MOUNTS}; do
umount -n ${mount_point}
done
# Leave /mnt/stateful_partition mounted for clobber-state to handle.
chromeos-boot-alert self_repair
clobber-log -- \
"Self-repair incoherent stateful partition: $*. History: ${UNDO_MOUNTS}"
exec clobber-state "fast keepimg"
}
remember_mount()
{
UNDO_MOUNTS="$1 ${UNDO_MOUNTS}"
}
mount_or_fail()
{
local mount_point
# -c: Never canonicalize: it is a hazard to resolve symlinks.
# -n: Do not write to mtab: we don't use it.
if mount -c -n "$@" ; then
# Last parameter contains the mount point
shift $(( $# - 1 ))
# Push it on the undo stack if we fail later
remember_mount "$1"
return
fi
cleanup_mounts "failed to mount $*"
}
# Assert that the argument is a directory.
# On failure, clobbers the stateful partition.
check_directory()
{
local path="$1"
if [ -L "${d}" ] || [ ! -d "${path}" ]; then
cleanup_mounts "${d} is not a directory"
fi
}
# Mount debugfs as bootstat depends on /sys/kernel/debug
mount -n -t debugfs -o nodev,noexec,nosuid,mode=0750,uid=0,gid=debugfs-access \
debugfs /sys/kernel/debug
# bootstat writes timings to both tmpfs and debugfs.
bootstat pre-startup
# Some startup functions are split into a separate library which may be
# different for different targets (e.g., regular Chrome OS vs. embedded).
. /usr/share/cros/startup_utils.sh
# Factory related functions
. /usr/share/cros/factory_utils.sh
mkdir -p /dev/pts /dev/shm
mount -n -t tmpfs -o nodev,noexec,nosuid shmfs /dev/shm
mount -n -t devpts -o noexec,nosuid,gid=5,mode=0620 devpts /dev/pts
# Initialize kernel sysctl settings early so that they take effect for boot
# processes.
sysctl -q --system
# CROS_DEBUG equals one if we've booted in developer mode or we've
# booted a developer image.
crossystem "cros_debug?1"
CROS_DEBUG=$((! $?))
# Prepare to mount stateful partition
ROOT_DEV=$(rootdev -s)
ROOTDEV_RET_CODE=$?
# Example root dev types we need to handle: /dev/sda2 -> /dev/sda,
# /dev/mmcblk0p0 -> /dev/mmcblk0p, /dev/ubi2_1 -> /dev/ubi
ROOTDEV_TYPE=$(echo $ROOT_DEV | sed 's/[0-9_]*$//')
ROOTDEV_NAME=${ROOTDEV_TYPE##/dev/}
ROOTDEV_REMOVABLE=$(cat "/sys/block/${ROOTDEV_NAME}/removable")
# Load the GPT helper functions and the image settings.
. "/usr/sbin/write_gpt.sh"
if [ "${ROOTDEV_REMOVABLE}" = "1" ]; then
load_partition_vars
else
load_base_vars
fi
# Check if we are booted on physical media. rootdev will fail if we are in
# an initramfs or tmpfs rootfs (ex, factory installer images. Note recovery
# image also uses initramfs but it never reach here). When using initrd+tftpboot
# (some old netboot factory installer), ROOTDEV_TYPE will be /dev/ram.
if [ "$ROOTDEV_RET_CODE" = "0" -a "$ROOTDEV_TYPE" != "/dev/ram" ]; then
# Find our stateful partition mount point.
# To support multiple volumes on a single UBI device, if the stateful
# partition is not found on ubi${PARTITION_NUM_STATE}_0, check
# ubi0_${PARTITION_NUM_STATE}.
STATE_FLAGS="nodev,noexec,nosuid"
if [ "${FORMAT_STATE}" = "ubi" ]; then
STATE_DEV="/dev/ubi${PARTITION_NUM_STATE}_0"
if [ ! -e "${STATE_DEV}" ]; then
STATE_DEV="/dev/ubi0_${PARTITION_NUM_STATE}"
fi
else
DIRTY_EXPIRE_CENTISECS=$(sysctl -n vm.dirty_expire_centisecs)
COMMIT_INTERVAL=$(( DIRTY_EXPIRE_CENTISECS / 100 ))
STATE_DEV=${ROOTDEV_TYPE}${PARTITION_NUM_STATE}
STATE_FLAGS="${STATE_FLAGS},commit=${COMMIT_INTERVAL}"
fi
# Check if we enable ext4 crypto.
if [ "${FS_FORMAT_STATE}" = "ext4" ]; then
# Enable directory encryption for existing install.
if ! dumpe2fs -h "${STATE_DEV}" 2>/dev/null | \
grep -qe "^Filesystem features:.* encrypt.*"; then
# The stateful partition is not set for encryption.
# Check if we should migrate.
if ext4_dir_encryption_supported; then
# The kernel support encryption, do it!
tune2fs -O encrypt "${STATE_DEV}"
fi
fi
fi
# Mount stateful partition from STATE_DEV.
if ! mount -n -t ${FS_FORMAT_STATE} -o ${STATE_FLAGS} \
"${STATE_DEV}" /mnt/stateful_partition; then
# Try to rebuild the stateful partition by clobber-state
# (for security concern, we don't use fast mode)
chromeos-boot-alert self_repair
clobber-log --repair "${STATE_DEV}" \
"Self-repair corrupted stateful partition"
exec clobber-state "keepimg"
fi
# Mount the OEM partition.
# mount_or_fail isn't used since this partition only has a filesystem
# on some boards.
OEM_FLAGS="ro,nodev,noexec,nosuid"
if [ "${FORMAT_OEM}" = "ubi" ]; then
OEM_DEV="/dev/ubi${PARTITION_NUM_OEM}_0"
else
OEM_DEV=${ROOTDEV_TYPE}${PARTITION_NUM_OEM}
fi
mount -n -t ${FS_FORMAT_OEM} -o ${OEM_FLAGS} ${OEM_DEV} /usr/share/oem
fi
# Sanity check the date (crosbug.com/13200)
if [ $(date +%Y) -lt 1970 ]; then
date 010200001970.00
fi
# This file indicates a blocked developer mode transition attempt has occurred.
BLOCKED_DEV_MODE_FILE="/mnt/stateful_partition/.blocked_dev_mode"
# Check whether the device is allowed to boot in dev mode.
# 1. If a debug build is already installed on the system, ignore block_devmode.
# It is pointless in this case, as the device is already in a state where the
# local user has full control.
# 2. According to recovery mode only boot with signed images, the block_devmode
# could be ignored here -- otherwise factory shim will be blocked expecially
# that RMA center can't reset this device.
#
# The up-front CROS_DEBUG check avoids forking a crossystem process in verified
# mode, thus keeping the check as lightweight as possible for normal boot.
if [ $CROS_DEBUG -eq 1 ] && \
crossystem "devsw_boot?1" "debug_build?0" "recovery_reason?0"; then
# Checks ordered by run time: First try reading VPD through sysfs.
VPD_BLOCK_DEVMODE_FILE=/sys/firmware/vpd/rw/block_devmode
if [ -f "${VPD_BLOCK_DEVMODE_FILE}" ] &&
[ "$(cat "${VPD_BLOCK_DEVMODE_FILE}")" = "1" ]; then
BLOCK_DEVMODE=1
# Second try crossystem.
elif crossystem "block_devmode?1"; then
BLOCK_DEVMODE=1
# Third re-read VPD directly from SPI flash (slow!) but only for systems that
# don't have VPD in sysfs and only when NVRAM indicates that it has been
# cleared.
elif [ ! -d /sys/firmware/vpd/rw ] &&
crossystem "nvram_cleared?1" &&
[ "$(vpd -i RW_VPD -g block_devmode)" = "1" ]; then
BLOCK_DEVMODE=1
fi
if [ -n "${BLOCK_DEVMODE}" ]; then
# Put a flag file into place that will trigger a stateful partition wipe
# after reboot in verified mode.
touch ${BLOCKED_DEV_MODE_FILE}
chromeos-boot-alert block_devmode
fi
fi
# 'firmware-boot-update' is provided by chromeos-firmware for legacy systems.
# On most new boards, it should be simply an empty file.
firmware-boot-update
# Now that stateful partition is mounted, we can check if we are in factory
# mode.
FACTORY_MODE=
if is_factory_mode; then
FACTORY_MODE=factory
fi
# File used to trigger a stateful reset. Contains arguments for
# the "clobber-state" call. This file may exist at boot time, as
# some use cases operate by creating this file with the necessary
# arguments and then rebooting.
RESET_FILE="/mnt/stateful_partition/factory_install_reset"
# This file is created by clobber-state after the transition
# to dev mode.
DEV_MODE_FILE="/mnt/stateful_partition/.developer_mode"
FIRMWARE_TYPE=$(crossystem mainfw_type)
# Check for whether we need a stateful wipe, and alert the user as
# necessary. We can wipe for several different reasons:
# + User requested "power wash". This is signaled in the same
# way as the factory reset, but with different arguments in
# ${RESET_FILE}.
# + Switch from verified mode to dev mode. We do this if we're in
# dev mode, and ${DEV_MODE_FILE} doesn't exist. clobber-state
# in this case will create the file, to prevent re-wipe.
# + Switch from dev mode to verified mode. We do this if we're in
# verified mode, and ${DEV_MODE_FILE} still exists. (This check
# isn't necessarily reliable.)
#
# Stateful wipe for dev mode switching is skipped if the build
# is a debug build or if we've booted in recovery mode (meaning
# from USB); this protects various development use cases, most
# especially booting Chromium OS on non-Chrome hardware.
#
if [ -O ${RESET_FILE} ]; then
# Wipe requested on previous boot.
chromeos-boot-alert power_wash
elif [ -z "$FACTORY_MODE" -a "$FIRMWARE_TYPE" != "recovery" ]; then
if crossystem "devsw_boot?1" ; then
# We've booted in dev mode. For platforms using separated
# normal/developer firmware, we need to display an extra boot
# alert for the developer mode warning plus the 30-second delay.
# Note that we want this message and the delay regardless of
# whether we plan to wipe.
if [ "$FIRMWARE_TYPE" != "developer" ]; then
chromeos-boot-alert warn_dev
fi
if [ ! -O ${DEV_MODE_FILE} ] && crossystem "debug_build?0"; then
# We're transitioning from verified boot to dev mode.
# TODO(wad,wfrichar) Have user provide sudo/vt2 password here.
chromeos-boot-alert enter_dev
echo "keepimg" > ${RESET_FILE}
clobber-log -- "Enter developer mode"
fi
elif [ -O ${DEV_MODE_FILE} -o -O ${BLOCKED_DEV_MODE_FILE} ] &&
crossystem "debug_build?0"; then
# We're transitioning from dev mode to verified boot.
# When coming back from developer mode, we don't need to
# clobber as aggressively. Fast will do the trick.
chromeos-boot-alert leave_dev
echo "fast keepimg" > ${RESET_FILE}
clobber-log -- "Leave developer mode"
fi
fi
if [ -O ${RESET_FILE} ]; then
ARGS="$(cat ${RESET_FILE})"
exec clobber-state "$ARGS"
fi
# Check if we have an update to stateful pending.
STATEFUL_UPDATE="/mnt/stateful_partition/.update_available"
if [ $CROS_DEBUG -eq 1 -a -f "$STATEFUL_UPDATE" ] ; then
# To remain compatible with the prior update_stateful tarballs, expect
# the "var_new" unpack location, but move it into the new "var_overlay"
# target location.
VAR_TARGET="/mnt/stateful_partition/var"
VAR_NEW="${VAR_TARGET}_new"
VAR_OLD="${VAR_TARGET}_old"
VAR_TARGET="${VAR_TARGET}_overlay"
DEVELOPER_TARGET="/mnt/stateful_partition/dev_image"
DEVELOPER_NEW="${DEVELOPER_TARGET}_new"
DEVELOPER_OLD="${DEVELOPER_TARGET}_old"
STATEFUL_UPDATE_ARGS=$(cat "$STATEFUL_UPDATE")
# Only replace the developer and var_overlay directories if new replacements
# are available.
if [ -d "$DEVELOPER_NEW" -a -d "$VAR_NEW" ]; then
clobber-log -- "Updating from $DEVELOPER_NEW && $VAR_NEW."
rm -rf "$DEVELOPER_OLD" "$VAR_OLD"
mv "$VAR_TARGET" "$VAR_OLD" || true
mv "$DEVELOPER_TARGET" "$DEVELOPER_OLD" || true
mv "$VAR_NEW" "$VAR_TARGET"
mv "$DEVELOPER_NEW" "$DEVELOPER_TARGET"
else
clobber-log -- "Stateful update did not find $DEVELOPER_NEW && $VAR_NEW."
clobber-log -- "Keeping old development tools."
fi
# Check for clobber.
if [ "$STATEFUL_UPDATE_ARGS" = "clobber" ] ; then
PRESERVE_DIR="/mnt/stateful_partition/unencrypted/preserve"
# Find everything in stateful and delete it, except for protected paths, and
# non-empty directories. The non-empty directories contain protected content
# or they would already be empty from depth first traversal.
find "/mnt/stateful_partition" -depth -mindepth 1 \
-not -path "/mnt/stateful_partition/.labmachine" \
-not -path "${DEVELOPER_TARGET}/*" \
-not -path "${VAR_TARGET}/*" \
-not -path "${PRESERVE_DIR}/*" \
-not -type d -print0 | xargs --null -r rm -f
find "/mnt/stateful_partition" -depth -mindepth 1 \
-not -path "${DEVELOPER_TARGET}/*" \
-not -path "${VAR_TARGET}/*" \
-not -path "${PRESERVE_DIR}/*" \
-type d -print0 | xargs --null -r rmdir --ignore-fail-on-non-empty
# Let's really be done before coming back.
sync
fi
# Backgrounded to take off boot path.
rm -rf "$STATEFUL_UPDATE" "$DEVELOPER_OLD" "$VAR_OLD" &
fi
# Make sure unencrypted stateful partition has the needed common directories.
# Any non-common directories should be created in the device implementation of
# "mount_var_and_home_chronos".
for d in home home/chronos home/root home/user \
unencrypted unencrypted/cache unencrypted/preserve; do
mkdir -p -m 0755 "/mnt/stateful_partition/${d}"
check_directory "/mnt/stateful_partition/${d}"
done
# Mount /home. This mount inherits nodev,noexec,nosuid from
# /mnt/stateful_partition above.
mount_or_fail --bind /mnt/stateful_partition/home /home
remember_mount /var
remember_mount /home/chronos
mount_var_and_home_chronos ${FACTORY_MODE} || cleanup_mounts "var and home"
# For dev/test images, if .gatherme presents, copy files listed in .gatherme to
# /mnt/stateful_partition/unencrypted/prior_logs.
LAB_PRESERVE_LOGS="/mnt/stateful_partition/.gatherme"
PRIOR_LOG_DIR="/mnt/stateful_partition/unencrypted/prior_logs"
if [ ${CROS_DEBUG} -eq 1 -a -f "${LAB_PRESERVE_LOGS}" ]; then
for log_path in $(sed -e '/^#/ d' -e '/^$/ d' "${LAB_PRESERVE_LOGS}"); do
if [ -d "${log_path}" ]; then
cp -a -r --parents "${log_path}" "${PRIOR_LOG_DIR}" || true
elif [ -f "${log_path}" ]; then
cp -a "${log_path}" "${PRIOR_LOG_DIR}" || true
fi
done
rm -rf /var/*
rm -rf /home/chronos/*
rm "${LAB_PRESERVE_LOGS}"
fi
# /run is now tmpfs used for runtime data. Make sure /var/run and /var/lock
# are sym links to /run and /run/lock respectively for backwards compatibility.
rm -rf /var/run /var/lock || \
cleanup_mounts "failed to delete /var/run and /var/lock"
ln -s /run /var/run
ln -s /run/lock /var/lock
# Make sure required /var subdirectories exist.
mkdir -p -m 0755 /var/cache /var/db /var/empty /var/log/metrics \
/var/spool /var/tmp /var/lib/misc
# Before operating on them, verify that all stateful partition paths are
# directories (as opposed to say, symlinks).
for d in /var/cache /var/db /var/empty /var/log /var/log/metrics \
/var/spool /var/tmp /var/lib /var/lib/misc /home/chronos /home/root; do
check_directory "${d}"
done
# /var/tmp must be world-writable and sticky
chmod 1777 /var/tmp
# /home/root must be group-writable and sticky
chmod 1771 /home/root
# Selected directories must belong to the chronos user.
chown chronos:chronos /home/chronos /var/log/metrics
# rsyslog needs to be able to create new logfiles, but not delete other logs
chgrp syslog /var/log
chmod 1775 /var/log
# /var/cache, /var/db, and /var/empty may already exist with wrong permissions.
# Force the correct ones.
chmod 0755 /var/cache /var/db /var/empty /var/spool /var/lib /var/lib/misc
# Make sure the empty dir stays empty (only works on ext4).
chattr +i /var/empty || :
# "--make-shared" to let ARC container access mount points under /media.
mount --make-shared -n -t tmpfs -o nodev,noexec,nosuid media /media
# Mount stateful partition for dev packages.
if [ ${CROS_DEBUG} -eq 1 ]; then
# Set up the logging dir that ASAN compiled programs will write to. We want
# any privileged account to be able to write here so unittests need not worry
# about settings things up ahead of time. See crbug.com/453579 for details.
mkdir -p /var/log/asan
chmod 1777 /var/log/asan
# Capture a snapshot of "normal" mount state here, for auditability,
# before we start applying devmode-specific changes.
cat /proc/mounts > /var/log/mount_options.log
# Create dev_image directory in base images in developer mode.
if [ ! -d /mnt/stateful_partition/dev_image ]; then
mkdir -p -m 0755 /mnt/stateful_partition/dev_image
fi
# Mount and then remount to enable exec/suid.
mount_or_fail --bind /mnt/stateful_partition/dev_image /usr/local
mount -n -o remount,exec,suid /usr/local
# Set up /var elements needed by gmerge.
# TODO(keescook) Use dev/test package installs instead of piling more
# things here (crosbug.com/14091).
BASE=/mnt/stateful_partition/var_overlay
if [ -d ${BASE} ]; then
# Keep this list in sync with the var_overlay elements in the DIRLIST
# found in chromeos-install from chromeos-base/chromeos-installer.
DIRLIST="
db/pkg
lib/portage
"
for DIR in ${DIRLIST}; do
if [ ! -d ${BASE}/${DIR} ]; then
continue
fi
DEST=/var/${DIR}
if [ -e ${DEST} ]; then
continue
fi
PARENT=$(dirname ${DEST})
mkdir -p ${PARENT}
ln -sf ${BASE}/${DIR} ${DEST}
done
fi
fi
bootstat post-startup
# Always return success to avoid killing init
exit 0
| true
|
05c195fd706a368fb1f9cde988257fbe8f864635
|
Shell
|
fedushare/ecp-ssh-demo-environment
|
/idp/configure-idp.sh
|
UTF-8
| 727
| 3.15625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
set -x
# https://www.eduid.cz/en/tech/idp/shibboleth
if [ -z $SHIB_IDP_HOME ]; then
echo "SHIB_IDP_HOME is not set"
exit 1
fi
# Install tagishauth module for JAAS DB authentication
cd /tmp
git clone https://github.com/tauceti2/jaas-rdbms.git
cd /tmp/jaas-rdbms
sed -i 's/JAVAC=.*/JAVAC=javac/' Makefile
PATH=$PATH:$JAVA_HOME/bin make
cp ./tagishauth.jar "${SHIB_IDP_HOME}/edit-webapp/WEB-INF/lib/tagishauth.jar"
cd /vagrant/idp/conf
for f in $(find . -name '*' -type f); do
cp "/vagrant/idp/conf/$f" "${SHIB_IDP_HOME}/conf/$f"
done
# Load identities into MySQL
mysql -u root < "/vagrant/idp/identities.sql"
# Rebuild IDP WAR
${SHIB_IDP_HOME}/bin/build.sh -Didp.target.dir=${SHIB_IDP_HOME}
| true
|
3cb557a16505b8c3b1956acc6f45ac68d2f24dd5
|
Shell
|
SerhatTeker/dotfiles
|
/zsh/oh-my-zsh/completions/_multipass
|
UTF-8
| 3,111
| 3.5625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#compdef multipass
# -*- coding: utf-8 -*-
# vim: set ft=zsh et ts=4 sw=4 sts=4:
# heavily based on _cvs and _adb
_multipass() {
_arguments \
'*::multipass command:_multipass_command'
}
# define multipass command dispatch function.
(( $+functions[_multipass_command] )) ||
_multipass_command() {
local ret=1
# multipass --help
# pbpaste | sed -E "s/^ */'/g; s/$/'/g; s/ {2,}/:/g" | tr '[:upper:]' '[:lower:]'
local -a commands
commands=(
'alias:create an alias'
'aliases:list available aliases'
'delete:delete instances'
'exec:run a command on an instance'
'find:display available images to create instances from'
'get:get a configuration setting'
'help:display help about a command'
'info:display information about instances'
'launch:create and start an ubuntu instance'
'list:list all available instances'
'mount:mount a local directory in the instance'
'networks:list available network interfaces'
'purge:purge all deleted instances permanently'
'recover:recover deleted instances'
'restart:restart instances'
'set:set a configuration setting'
'shell:open a shell on a running instance'
'start:start instances'
'stop:stop running instances'
'suspend:suspend running instances'
'transfer:transfer files between the host and instances'
'umount:unmount a directory from an instance'
'unalias:remove an alias'
'version:show version details:_version'
)
if (( CURRENT == 1 )); then
_describe -t commands 'multipass command' commands
else
_call_function ret _multipass_$words[1]
return ret
fi
}
_multipass_delete() {
_arguments \
--all \
+ '(purge)' \
{-p,--purge} \
'*::name:_instances'
}
_multipass_exec() {
_arguments \
':name:_instances_running'
}
_multipass_get() {
_arguments \
--raw \
':key:_configuration_settings'
}
_multipass_info() {
_arguments \
--all \
'--format: :(table json csv yaml)' \
':name:_instances'
}
_multipass_purge() {
# the purge subcommand takes no arguments
}
_multipass_restart() {
_arguments \
--all \
--timeout: \
':name:_instances_running'
}
_multipass_shell() {
_arguments \
--timeout: \
':name:_instances'
}
_multipass_start() {
_arguments \
--all \
--timeout: \
'::name:_instances'
}
_multipass_stop() {
_arguments \
--all \
+ '(time)' \
{-t,--time}: \
+ '(cancel)' \
{-c,--cancel} \
'*::name:_instances_running'
}
_multipass_version() {
_arguments \
'--format: :(table json csv yaml)'
}
_configuration_settings() {
local -a keys
keys=( $(multipass set --help | grep -E '(client|local)') )
_describe -t names 'keys' keys
}
_instances() {
# from _adb
local -a instances
instances=( $(multipass list --format csv | tail -n +2 | cut -f 1 -d ,) )
_describe -t names 'instances' instances
}
_instances_running() {
local -a instances
instances=( $(multipass list --format csv | tail -n +2 | grep Running | cut -f 1 -d ,) )
_describe -t names 'instances' instances
}
_multipass $@
| true
|
7eb56071ba446777a292448a39c35acc6e4a915c
|
Shell
|
mkhabelaj/security
|
/databaseConfig/setup.sh
|
UTF-8
| 1,275
| 4.21875
| 4
|
[] |
no_license
|
#!/bin/bash
#####################################################################################
##################################Setup postgres#####################################
#####################################################################################
# Ensure script runs in its intended directory
cd "$(dirname "$0")"
DATABASE=$1
PASSWORD=$2
if [[ -z ${DATABASE} ]] | [[ -z ${PASSWORD} ]] ; then
echo "One of you parameters are empty"
exit 1;
fi
function create_config_table() {
psql $1 < config.sql
}
# Check if postgres is installed
if type psql >/dev/null 2>&1 ;
then
echo 'POSTGRES is installed';
# creating Database
sudo -u ${USER} createdb ${DATABASE};
echo 'creating the config table .....';
create_config_table ${DATABASE};
else
echo 'POSTGRES is installing';
sudo apt-get install postgresql postgresql-server-dev-all;
sudo -u postgres createuser ${USER};
sudo -u postgres psql -c \
"ALTER USER ${USER} WITH PASSWORD ${PASSWORD}'; ALTER USER${USER} WITH SUPERUSER;";
sudo -u postgres createdb ${DATABASE};
sudo service postgresql restart;
echo 'creating the config table .....'
create_config_table ${DATABASE};
fi
echo "Setup Database ${DATABASE} complete";
| true
|
ebd1097afc29e580dde0ccd568ccf9affe0c51f2
|
Shell
|
begetan/geth-ubuntu
|
/geth-install.sh
|
UTF-8
| 3,214
| 3.671875
| 4
|
[] |
no_license
|
#!/bin/bash
set -euo pipefail
echo "Geth has not direct link to the checksum and latest version"
echo "Edit parameters in the script manually from https://geth.ethereum.org/downloads/"
geth_version="1.10.2"
geth_commit="97d11b01"
geth_hash="defd2333d2d646836dc42469053db076" # for amd64
# Select proper achitecture
arch=$(uname -m)
arch=${arch/x86_64/amd64}
arch=${arch/aarch64/arm64}
arch=${arch/armv6l/arm6}
arch=${arch/armv7l/arm7}
readonly os_arch_suffix="$(uname -s | tr '[:upper:]' '[:lower:]')-$arch"
# Select proper OS version
system=""
case "$OSTYPE" in
darwin*) system="darwin" ;;
linux*) system="linux" ;;
*) exit 1 ;;
esac
if [[ "$os_arch_suffix" == *"arm64"* ]]; then
arch="arm64"
fi
geth="geth-$system-$arch-$geth_version-$geth_commit"
echo "==> Install Geth binary"
wget -q -O "/tmp/$geth.tar.gz" "https://gethstore.blob.core.windows.net/builds/$geth.tar.gz"
wget -q -O "/tmp/$geth.tar.gz.asc" "https://gethstore.blob.core.windows.net/builds/$geth.tar.gz.asc"
gpg --keyserver hkp://keyserver.ubuntu.com --recv-key 9BA28146
gpg --verify "/tmp/$geth.tar.gz.asc"
md5sum -c <(echo "$geth_hash" "/tmp/$geth.tar.gz")
tar -xzf "/tmp/$geth.tar.gz" -C /tmp/
cp "/tmp/$geth/geth" /usr/local/bin/geth
chown root.root /usr/local/bin/geth
echo "==> Check geth paths"
if [[ ! -d "/var/lib/geth/data" ]]
then
echo " Create data path /var/lib/geth/data"
mkdir -m0700 -p /var/lib/geth/data
else
echo " Found existing directory at /var/lib/geth/data"
echo " Not forget to start geth service!"
exit 0
fi
echo "==> Add ethereum user"
useradd -r -m -d /var/lib/geth ethereum -s /bin/bash
chown ethereum.ethereum /var/lib/geth/data
echo "==> Create systemd config"
cat << EOF > /etc/systemd/system/geth.service
[Unit]
Description=Ethereum daemon
After=network-online.target
Wants=network-online.target
[Service]
Type=simple
User=ethereum
Group=ethereum
WorkingDirectory=/var/lib/geth
# /run/geth
RuntimeDirectory=geth
RuntimeDirectoryMode=0710
ExecStartPre=+/bin/chown -R ethereum:ethereum /usr/local/bin/geth /var/lib/geth
ExecStart=/usr/local/bin/geth --cache=512 --datadir=/var/lib/geth/data \
--ws --ws.origins '*' --ws.api eth,net,web3,debug \
--http --http.vhosts '*' --http.corsdomain '*' --http.api eth,net,web3,debug
PIDFile=/run/geth/geth.pid
StandardOutput=journal
StandardError=journal
KillMode=process
TimeoutSec=180
Restart=always
RestartSec=60
[Install]
WantedBy=multi-user.target
EOF
chmod 0644 /etc/systemd/system/geth.service
chown root.root /etc/systemd/system/geth.service
echo "==> Create syslog config"
echo ':programname, startswith, "geth" /var/log/geth/geth.log' > /etc/rsyslog.d/40-geth.conf
echo '& stop' >> /etc/rsyslog.d/40-geth.conf
chown root.root /etc/rsyslog.d/40-geth.conf
chmod 0644 /etc/rsyslog.d/40-geth.conf
systemctl restart rsyslog.service
echo "==> Create logrotate config"
cat << EOF > /etc/logrotate.d/geth
/var/log/geth/geth.log
{
rotate 5
daily
copytruncate
missingok
notifempty
compress
delaycompress
sharedscripts
}
EOF
chown root.root /etc/logrotate.d/geth
chmod 0644 /etc/logrotate.d/geth
logrotate -f /etc/logrotate.d/geth
echo "==> Update daemon"
systemctl daemon-reload
systemctl enable geth.service --now
| true
|
3216938ca319991de57deed10f35165144c014d5
|
Shell
|
darjanin/origin-story
|
/.zshrc
|
UTF-8
| 1,171
| 2.5625
| 3
|
[] |
no_license
|
# Path to your oh-my-zsh installation.
export ZSH=$HOME/.oh-my-zsh
ZSH_THEME="gitsome"
# Uncomment the following line to disable auto-setting terminal title.
DISABLE_AUTO_TITLE="true"
# Uncomment the following line to display red dots whilst waiting for completion.
COMPLETION_WAITING_DOTS="true"
plugins=(colored-man wd zsh-syntax-highlighting)
# User configuration
export PATH="/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin"
# export MANPATH="/usr/local/man:$MANPATH"
source $ZSH/oh-my-zsh.sh
# Preferred editor for local and remote sessions
if [[ -n $SSH_CONNECTION ]]; then
export EDITOR='vim'
else
export EDITOR='vim'
fi
alias zshconfig="vim ~/.zshrc"
# Git aliases
alias ga='git add'
alias gb='git branch'
alias gc='git commit -v'
alias gcb='git checkout -b'
alias gco='git checkout'
alias gcm='git checkout master'
alias gd='git diff'
alias gl='git pull'
alias gm='git merge'
alias glg='git log --all --graph --pretty=format:"%Cred%h%Creset -%C(auto)%d%Creset %s %Cgreen(%cr) %C(bold blue)<%an>%Creset" --abbrev-commit --date=relative'
alias gp='git push'
alias gst='git status -sb'
export PATH="$PATH:$HOME/.rvm/bin" # Add RVM to PATH for scripting
| true
|
d2e12651f38256a3fab15ad55e6f5c8869e7527f
|
Shell
|
speng975/centos-7-kickstart
|
/packer-ova/ova/build.sh
|
UTF-8
| 622
| 2.953125
| 3
|
[] |
no_license
|
#!/bin/bash -e
CWD=$(cd $(dirname $0); pwd)
ovftool="/Applications/VMware OVF Tool/ovftool"
rm -f $CWD/centos7-disk1.vmdk
rm -f $CWD/centos7.ova
rm -f $CWD/centos7.ovf
VM_DISK_FILE=$CWD/centos7-disk1.vmdk
# virtualbox
cp -f $CWD/../output-*/*.vmdk $VM_DISK_FILE
# vmware-fusion compress disk
#vmware-vdiskmanager -t 5 -r $CWD/../output-*/*.vmdk $VM_DISK_FILE
#frep $CWD/centos7.ovf.tmpl --overwrite -e VM_DISK_SIZE=$(stat -c %s $VM_DISK_FILE 2>/dev/null || stat -f %z $VM_DISK_FILE)
frep $CWD/centos7.ovf.tmpl --overwrite -e CWD="$CWD"
"$ovftool" --skipManifestCheck --overwrite $CWD/centos7.ovf $CWD/centos7.ova
| true
|
3b5fcfba7d8d3f554ba8b5b4f443fcba269863d9
|
Shell
|
aguytech/server-installer
|
/sub/ubuntu20.04-ssh.install
|
UTF-8
| 1,585
| 3.0625
| 3
|
[] |
no_license
|
#!/bin/bash
#
# write by Aguy
_echoT "\n========================================== ${S_RELEASE}-${_PART}"
grep -q "^# ${_PART#++}$" ${S_FILE_INSTALL_CONF} || echo "# ${_PART}" >> ${S_FILE_INSTALL_CONF}
######################## CONFIGURATION
# load configuration file
. ${S_FILE_INSTALL_CONF}
######################## MAIN
# set s_port
eval ${S_CLUSTER[$HOSTNAME]}
if ! [ -f ${HOME}/.ssh/id_rsa ]; then
_echot "------------------ ssh-keygen"
[ "$_ANSWER" = y ] && _eval "ssh-keygen -t rsa"
fi
_echot "------------------ conf client"
file='/etc/ssh/ssh_config'
_keepcpts "${file}"
_eval "sed -i 's/^\( *GSSAPIAuthentication\).*/\1 no/' ${file}"
_echot "------------------ conf server"
file='/etc/ssh/sshd_config'
_keepcpts "${file}"
#_eval "sed -i 's|^#\?\(PermitRootLogin\) .*$|\1 without-password|' $file"
_eval "sed -i 's|^#\?\(PermitRootLogin\) .*$|\1 prohibit-password|' $file"
_eval "sed -i 's|^#\?\(Port\).*$|\1 ${s_port}|' $file"
_eval "sed -i 's|^#\?\(GSSAPIAuthentication\) .*$|\1 no|' $file"
_eval "sed -i 's|^#\?\(UseDNS\) .*$|\1 no|' $file"
_eval "sed -i 's|^#\?\(PasswordAuthentication\) .*$|\1 no|' $file"
_eval "sed -i 's|^#\?\(ClientAliveInterval\) .*$|\1 300|' $file"
_eval "sed -i 's|^#\?\(ClientAliveCountMax\) .*$|\1 3|' $file"
_echot "------------------ ssh restart"
_service restart sshd
_echoT "===================== ${_PART} end"
_partadd ${_PART#++} ${S_FILE_INSTALL_DONE}
<<KEEP
# /etc/ssh/sshd_config
'PermitRootLogin yes
PubkeyAuthentication yes
PasswordAuthentication no
UsePAM yes
Match User git
PasswordAuthentication yes'
KEEP
| true
|
e787a244d1e4e900c30894be96410f7ad2dff449
|
Shell
|
d0now/2019_dsec_ctf
|
/script/debug.sh
|
UTF-8
| 534
| 2.734375
| 3
|
[] |
no_license
|
#!/bin/sh
PROJ_DIR="/home/bc/shrd/lab/Project/2019_dsec_ctf"
INIT_DIR="/tmp/initrd_dbg"
## Unpack default cpio
#$PROJ_DIR/script/unpack_cpio.sh $PROJ_DIR/images/default/initramfs.cpio $INIT_DIR
## Update super_ez_kernel.ko
cd source
make
cp ./super_ez_kern.ko $INIT_DIR/super_ez_kern.ko
cd $PROJ_DIR
## Update exploit
cd $PROJ_DIR/exploit
./build.sh
cp ./exploit.elf64 $INIT_DIR/exploit
cd $PROJ_DIR
## Pack initrd
$PROJ_DIR/script/update_cpio.sh $INIT_DIR $PROJ_DIR/images/debug/initramfs.cpio
## spawn
cd images/debug
./run.sh
| true
|
19b76a8af340fc0bbfe2c531b3c0b7167da0ba28
|
Shell
|
mdavidn/profile
|
/skel/.profile
|
UTF-8
| 1,722
| 3.640625
| 4
|
[] |
no_license
|
#!/bin/sh
# Personal ~/.profile
# Global environment variables.
# Matthew Nelson <mnelson@vitalvector.com>
pushpath() {
local path pad
eval path=\"\$$1\"
pad=":$path:"
if [ -d "$2" -a "$pad" == "${pad%":$2:"*}" ]; then
eval $1=\"$2${path:+":$path"}\"
fi
}
# man ignores /etc without trailing colon.
if [ -z "$MANPATH" ]; then MANPATH=: ; fi
# Homebrew
pushpath PATH /usr/local/bin
pushpath PATH /usr/local/share/python
pushpath MANPATH /usr/local/share/man
# Personal bin
pushpath PATH "$HOME/bin"
pushpath PATH "$HOME/.rbenv/bin"
pushpath MANPATH "$HOME/man"
# Chef
pushpath PATH /opt/chef/bin
# Add ~/.rbenv/shims to PATH
if command -v rbenv >/dev/null; then
eval "$(rbenv init -)"
fi
export PATH MANPATH
# Start ssh-agent if necessary
if [ -z "$SSH_AUTH_SOCK" ]; then
eval `ssh-agent`
cleanup() {
kill $SSH_AGENT_PID
}
trap cleanup EXIT
fi
# Symlink to ssh-agent (for tmux sessions)
if [ -z "$TMUX" -a -n "$SSH_AUTH_SOCK" -a \
"$SSH_AUTH_SOCK" != "$HOME/.ssh/agent" \
]; then
ln -sf "$SSH_AUTH_SOCK" "$HOME/.ssh/agent"
SSH_AUTH_SOCK="$HOME/.ssh/agent"
export SSH_AUTH_SOCK
fi
# Set editor to vi
for editor in vim vi; do
if command -v "$editor" >/dev/null; then
export VISUAL EDITOR
VISUAL="$editor"
EDITOR="$editor"
break
fi
done
# Set pager to less
if command -v less >/dev/null; then
PAGER=less; export PAGER
fi
# Configure sh to evaluate ~/.shrc
if [ -f "$HOME/.shrc" ]; then
ENV="$HOME/.shrc"; export ENV
fi
# Evaluate ~/.bashrc for login bash shells
if [ -n "$BASH" -a -f "$HOME/.bashrc" ]; then
. "$HOME/.bashrc"
fi
# Evaluate local profile, useful for appending to path
if [ -f "$HOME/.local_profile" ]; then
. "$HOME/.local_profile"
fi
unset -f pushpath
| true
|
1d083009807f9f1f9ca1231ac7cb43e18ac0233b
|
Shell
|
brisa-robotics/docker
|
/ci/sync.sh
|
UTF-8
| 1,493
| 3.671875
| 4
|
[] |
no_license
|
#!/bin/bash
set -e
DIR_ICI="${1:-$(rospack find industrial_ci)}"
source $DIR_ICI/src/util.sh
source $DIR_ICI/src/docker.sh
DIR_THIS="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
HEADER="# DO NOT EDIT!\n# This file was auto-generated with ./sync.sh at $(LC_ALL=C date)\n"
INJECT_MAINTAINER="/^FROM/a LABEL MAINTAINER \"$(git config --get user.name)\" <$(git config --get user.email)>"
function export_dockerfile ()(
echo $*
ROS_DISTRO=$1
ROS_REPO=$2
OS_CODE_NAME=$3
DOCKER_BASE_IMAGE=$4
ROS_REPO=${EOL_REPO:-$2} source $DIR_ICI/src/env.sh
local path=$DIR_THIS/${ROS_DISTRO}-${5:-$OS_CODE_NAME}${2#ros}
mkdir -p "$path"
echo -e "$HEADER" > $path/Dockerfile
ici_generate_default_dockerfile | sed "$INJECT_MAINTAINER" >> $path/Dockerfile
)
for r in ros ros-shadow-fixed; do
for d in hydro indigo jade kinetic lunar melodic; do
export_dockerfile $d $r
done
EOL_REPO=final export_dockerfile kinetic $r jessie debian:jessie
EOL_REPO=final export_dockerfile lunar $r yakkety
EOL_REPO=final export_dockerfile lunar $r zesty
export_dockerfile lunar $r stretch debian:stretch
export_dockerfile melodic $r stretch debian:stretch
EOL_REPO=final export_dockerfile melodic $r artful
done
for path in $DIR_THIS/*{yakkety,zesty,artful}*; do
sed -i "/^LABEL MAINTAINER/a RUN sed -i -re 's/([a-z]{2}\.)?archive.ubuntu.com|security.ubuntu.com/old-releases.ubuntu.com/g' /etc/apt/sources.list" $path/Dockerfile
done
| true
|
eb4f3743c522b9635713ad5afd20881a9924b36a
|
Shell
|
HackIllinois/infrastructure-2016
|
/scripts/build/app.sh
|
UTF-8
| 1,512
| 3.46875
| 3
|
[
"Apache-2.0"
] |
permissive
|
source $HACKILLINOIS_KEYFILE
function f_install_deps {
echo "Installing dependencies..."
if [ ! -d $DIR/../.venv ]; then
virtualenv $DIR/../.venv >/dev/null
fi
source $DIR/../.venv/bin/activate
mkdir -p $DIR/../www/libs
rm -rf $DIR/../www/libs/*
pip install --upgrade -r requirements.txt -t $DIR/../www/libs/ >/dev/null
rm -rf $DIR/../www/libs/*.dist-info
touch $DIR/../www/libs/__init__.py
deactivate
}
function f_write_app {
echo "Writing app file..."
appfile=$(<$DIR/../generated/app.yaml)
cat > $DIR/../app.yaml <<-EOF
### THIS FILE IS AUTO-GENERATED
### YOU SHOULD MAKE ANY NECESSARY CHANGES IN /generated/app.yaml
$appfile
EOF
}
function f_inject_keys {
echo "Injecting keys into app file..."
sed -i '' -e "s/{{ ADMIN_ID }}/$ADMIN_ID/g" $DIR/../app.yaml
sed -i '' -e "s/{{ MAILGUN_SECRET }}/$MAILGUN_SECRET/g" $DIR/../app.yaml
sed -i '' -e "s/{{ HARDWARE_SECRET }}/$HARDWARE_SECRET/g" $DIR/../app.yaml
}
function f_inject_dev {
APPLICATION_ID=$DEV_APPLICATION_ID
sed -i '' -e "s/{{ APPLICATION_ID }}/$APPLICATION_ID/g" $DIR/../app.yaml
IS_DEVELOPMENT="TRUE"
sed -i '' -e "s/{{ IS_DEVELOPMENT }}/$IS_DEVELOPMENT/g" $DIR/../app.yaml
}
function f_inject_prod {
APPLICATION_ID=$PROD_APPLICATION_ID
sed -i '' -e "s/{{ APPLICATION_ID }}/$APPLICATION_ID/g" $DIR/../app.yaml
IS_DEVELOPMENT="FALSE"
sed -i '' -e "s/{{ IS_DEVELOPMENT }}/$IS_DEVELOPMENT/g" $DIR/../app.yaml
}
function f_remove_app {
echo "Removing app file (if it exists)..."
rm -f $DIR/../app.yaml
}
| true
|
b8c07c34118619bd32cbffdce15ad29edd25d3c1
|
Shell
|
tzzh/lebowski
|
/i3/.config/i3/switch_layout
|
UTF-8
| 178
| 2.734375
| 3
|
[] |
no_license
|
#!/bin/bash
LAYOUT=`setxkbmap -query | grep layout | awk '{print $2}'`
case $LAYOUT in
"ara") setxkbmap -layout gb;;
"gb") setxkbmap -layout ara -variant buckwalter ;;
esac
| true
|
84bb9b0247ffb921043bce65b6ae3368bf38a828
|
Shell
|
rthallisey/atomic-osp-installer
|
/docker/cinder-app/cinder-volume/start-scripts/volume-group-create.sh
|
UTF-8
| 557
| 3.484375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -e
backing_file=/opt/data/cinder_volume
size=${LVM_LO_VOLUME_SIZE}
volume_group=${VOLUME_GROUP}
# Set up the volume group.
if ! vgs $volume_group; then
# Create a backing file to hold our volumes.
[[ -f $backing_file ]] || truncate -s $size $backing_file
vg_dev=`losetup -f --show $backing_file`
# Only create volume group if it doesn't already exist
if ! vgs $volume_group; then
vgcreate $volume_group $vg_dev
fi
fi
# Remove iscsi targets
cinder-rtstool get-targets | xargs -rn 1 cinder-rtstool delete
| true
|
377f675872f26fac1a0f294c804daeae75016915
|
Shell
|
amalleo25/apcupsd-smartos-2
|
/smartos-setup.sh
|
UTF-8
| 429
| 3.0625
| 3
|
[] |
no_license
|
#!/bin/bash
# Simple Ad Hoc SmartOS Setup Service
set -o xtrace
. /lib/svc/share/smf_include.sh
cd /
case "$1" in
'start')
#### Insert code to execute on startup here.
#hostname "smartos01" && hostname > /etc/nodename
svcadm enable apcupsd
;;
'stop')
### Insert code to execute on shutdown here.
;;
*)
echo "Usage: $0 { start | stop }"
exit $SMF_EXIT_ERR_FATAL
;;
esac
exit $SMF_EXIT_OK
| true
|
9b25b1f205350f84403fe59018f0ef8c84f55778
|
Shell
|
jbwalters/Logitch-C920-Linux-GUI
|
/videosetup.sh
|
UTF-8
| 932
| 2.96875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#
# James Walters
# james_b_walters@yahoo.commands
# 30 APR 2020
#
# This is a GUI to call the Logitech C290 setup script,
# as well as a couple other programs I use.
# The following programs must be installed for this GUI to work.
# The user can replace my choice of additional applications
# with their personal preferences.
#
# This script will require the following programs to work:
# YAD
DEVICE_CONFIG_CMD=$( yad --title="Video Setup" \
--text "$DEVICE_REPORT" \
--image "/usr/share/icons/C920.jpeg" \
--form --separator="," --item-separator="," \
--field="C290 Setup":fbtn "c290setup.sh" \
--field="Cheese":fbtn "cheese" \
--field="Screen Capture":fbtn "simplescreenrecorder --logfile" \
--field="ZOOM":fbtn "/usr/bin/flatpak run --branch=stable --arch=x86_64 --command=zoom --file-forwarding us.zoom.Zoom @@u %U @@" \
--button="gtk-quit":0 )
exit 0
| true
|
e4a03c231c8c8c3756cac89a2949b2e8f6821064
|
Shell
|
kuon/java-phoenix-channel
|
/src/test/run.sh
|
UTF-8
| 416
| 3.09375
| 3
|
[
"Apache-2.0",
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/usr/bin/env bash
testdir=$(dirname "$0")
root=$(pwd)
cd ${testdir}/mock_server
mix deps.get || { echo 'Failed to install mix dependencies' ; exit 1; }
mix compile || { echo 'Failed to compile phoenix application' ; exit 1; }
mix phx.server &
PID=$!
sleep 2
ps -p $PID
if [ $? -ne 0 ]
then
echo $PID
echo "Cannot start mock server"
exit 1
fi
cd ${root}
./gradlew test
RES=$?
kill $PID
exit $RES
| true
|
ca4409e9ac8bd5908d7f365f3940fc228c258f02
|
Shell
|
MuffinSmith/improved-parakeet
|
/Unit 2/Homework/Instructions/resources/Chal-2_VIP/Orders.sh
|
UTF-8
| 937
| 3.015625
| 3
|
[] |
no_license
|
#!/bin/bash
mkdir AllRecords
#Find all csv files and copy them to AllRecords
find . -type f -name "*.csv*" -exec cp -t AllRecords '{}' \;
#Find each instance of specific names and output it to file
grep -n -e "Michael,Davis" AllRecords/* >> michael_davis_orders.output
grep -n -e "Michael,Campbell" AllRecords/* >> michael_campbell_orders.output
mkdir AllRecords/VIPCustomerOrders
mv michael_campbell_orders.output AllRecords/VIPCustomerOrders/michael_campbell_orders.output
mv michael_davis_orders.output AllRecords/VIPCustomerOrders/michael_davis_orders.output
touch VIPCustomerDetails.md
#print how many records of each customer
printf "Michael Davis: " >>VIPCustomerDetails.md
cat AllRecords/VIPCustomerOrders/michael_davis_orders.output |wc -l >> VIPCustomerDetails.md
printf " Michael Campbell: " >>VIPCustomerDetails.md
cat AllRecords/VIPCustomerOrders/michael_campbell_orders.output |wc -l >> VIPCustomerDetails.md
| true
|
ef1b7de3e705b1e097bef4ef12ec691cd8c9d658
|
Shell
|
crazo7924/device_samsung_a30
|
/extract-files.sh
|
UTF-8
| 1,041
| 3.84375
| 4
|
[
"Apache-2.0"
] |
permissive
|
set -e
DEVICE=a30
VENDOR=samsung
# Load extract_utils and do some sanity checks
MY_DIR="${BASH_SOURCE%/*}"
if [[ ! -d "${MY_DIR}" ]]; then MY_DIR="${PWD}"; fi
LINEAGE_ROOT="${MY_DIR}/../../.."
HELPER="${LINEAGE_ROOT}/vendor/lineage/build/tools/extract_utils.sh"
if [ ! -f "${HELPER}" ]; then
echo "Unable to find helper script at ${HELPER}"
exit 1
fi
source "${HELPER}"
# Default to sanitizing the vendor folder before extraction
CLEAN_VENDOR=true
while [ "$1" != "" ]; do
case "$1" in
-n | --no-cleanup ) CLEAN_VENDOR=false
;;
* ) SRC="$1"
;;
esac
shift
done
if [ -z "${SRC}" ]; then
SRC=adb
fi
# Initialize the helper
setup_vendor "${DEVICE}" "${VENDOR}" "${LINEAGE_ROOT}" false "${CLEAN_VENDOR}"
DEVICE_BLOB_ROOT="$LINEAGE_ROOT"/vendor/"$VENDOR"/"$DEVICE"/proprietary
extract "$MY_DIR"/proprietary-files.txt "$SRC"
extract "$MY_DIR"/proprietary-files-system.txt "$SRC"
"$MY_DIR"/setup-makefiles.sh
| true
|
9b99202fa167a2de6f7a21327096a525816259b9
|
Shell
|
vdrandom/vdotfiles
|
/cli/.config/zsh/prompt.zsh
|
UTF-8
| 4,104
| 3.5625
| 4
|
[] |
no_license
|
prompt_fmtn='[ %%{\e[2;3m%%}%s%%{\e[0m%%} ] '
printf -v PROMPT2 $prompt_fmtn '%_'
printf -v PROMPT3 $prompt_fmtn '?#'
printf -v PROMPT4 $prompt_fmtn '+%N:%i'
prompt_wt="$USERNAME@$HOST"
prompt_fifo=~/.zsh_gitstatus_$$
prompt_blimit=12
typeset -A prompt_symbols=(
sep_a $'\ue0b0'
ellipsis $'\u2026'
git $'\ue0a0'
git_unstaged '~'
git_staged $'\u2713'
git_untracked '!'
git_unmerged '*'
bang $'\u266a'
)
typeset -A prompt_colors=(
# fg '15'
root '1'
ssh '15'
cwd '4'
git_branch '241'
git_unstaged '3'
git_staged '6'
git_untracked '1'
git_unmerged '5'
bang '10'
)
precmd.is_git_repo() {
typeset prompt_git_dir
prompt_git_dir=$(git rev-parse --git-dir 2>/dev/null) || return 1
[[ ! -e $prompt_git_dir/nozsh ]]
}
precmd.prompt.add() {
(( $# < 1 )) && return 1
typeset data=$1 color=$2
[[ -n $prompt_string ]] && prompt_string+=" "
if [[ -n $color ]]; then
prompt_string+="%F{$color}$data%f"
else
prompt_string+="$data"
fi
}
precmd.prompt.add_pl() {
(( $# < 2 )) && return 1
typeset data=$1 color=$2
if [[ -z $prompt_string ]]; then
prompt_string+="%K{$color}%F{$prompt_colors[fg]} $data "
else
prompt_string+="%F{$prev_color}%K{$color}$prompt_symbols[sep_a]%F{$prompt_colors[fg]} $data "
fi
prev_color=$color
}
precmd.prompt.apply() {
PROMPT=$prompt_string
unset prompt_string
}
precmd.prompt.pre_git() {
precmd.prompt.add "$prompt_symbols[git] $prompt_symbols[ellipsis]" $prompt_colors[git_branch]
}
precmd.prompt.git() {
typeset raw_status IFS=
raw_status=$(git status --porcelain -bu 2>/dev/null) || return 0
typeset -A count
while read line; do
case $line[1,2] in
('##')
typeset branch_status=${line[4,-1]%%...*}
((${#branch_status}>prompt_blimit)) && \
branch_status=$branch_status[1,$prompt_blimit]$prompt_symbols[ellipsis]
[[ $line =~ behind ]] && branch_status+=?
[[ $line =~ ahead ]] && branch_status+=!
precmd.prompt.add "$prompt_symbols[git] $branch_status" $prompt_colors[git_branch]
;;
(?[MD]) (( ++count[git_unstaged] )) ;|
([MDARC]?) (( ++count[git_staged] )) ;|
('??') (( ++count[git_untracked] )) ;|
([ADU][ADU]) (( ++count[git_unmerged] ))
esac
done <<< $raw_status
for i in git_untracked git_unmerged git_unstaged git_staged; do
(( count[$i] )) && precmd.prompt.add "$count[$i]$prompt_symbols[$i]" $prompt_colors[$i]
done
}
precmd.prompt() {
typeset -g prompt_string= prev_color=
precmd.prompt.add '['
(( UID )) \
|| precmd.prompt.add '#' $prompt_colors[root]
[[ -n $SSH_CONNECTION ]]\
&& precmd.prompt.add %n@%m $prompt_colors[ssh]
precmd.prompt.add %~ $prompt_colors[cwd]
[[ $1 == pre_git ]]\
&& precmd.prompt.pre_git
[[ $1 == git ]]\
&& precmd.prompt.git
precmd.prompt.add $']\n'
prompt_string+="%F{$prompt_colors[bang]}$prompt_symbols[bang]%f "
}
precmd.git_update() {
precmd.prompt git
[[ ! -p $prompt_fifo ]] && mkfifo -m 0600 $prompt_fifo
echo -n $prompt_string > $prompt_fifo &!
kill -s USR1 $$
}
precmd.prompt.update() {
typeset -g prompt_string=$(<$prompt_fifo)
precmd.prompt.apply
zle && zle reset-prompt
}
precmd.window_title() {
printf '\033]0;%s\007' $prompt_wt
}
precmd() {
precmd.window_title
if precmd.is_git_repo; then
precmd.prompt pre_git
precmd.git_update &!
else
precmd.prompt
fi
precmd.prompt.apply
}
TRAPUSR1() {
precmd.prompt.update
}
TRAPEXIT() {
[[ -p $prompt_fifo ]] && rm $prompt_fifo
}
function zle-line-init zle-keymap-select {
local seq=$'\e[2 q'
[[ $KEYMAP == vicmd ]] && seq=$'\e[4 q'
printf $seq
}
zle -N zle-line-init
zle -N zle-keymap-select
| true
|
d4af24644df8286267ef887c4f681dda2643435b
|
Shell
|
wp4613/tests
|
/shell/test.sh
|
UTF-8
| 270
| 3.109375
| 3
|
[] |
no_license
|
#!/bin/bash
#. vars.sh
#./catvars.sh
abis="armeabi armeabi-v7a x86 mips armeabi-v7a-hard arm64-v8a x86_64 mips64"
ver="4.8"
#echo ${abi/64/""}
for abi in $abis ; do
if [ "${abi/64/}" != "$abi" -a "$ver" = "4.8" ]; then
echo "$abi = $abi"
fi
done
| true
|
cce421ddf5c9d4ebb23dee31c2df54117d7800c5
|
Shell
|
nima/site
|
/share/unit/tests/vault-static.sh
|
UTF-8
| 3,141
| 3.0625
| 3
|
[] |
no_license
|
# vim: tw=0:ts=4:sw=4:et:ft=bash
core:import util
core:import gpg
declare -g g_GPGKID
function testCoreVaultImport() {
core:softimport vault
assertEquals 0 $?
}
function vaultSetUp() {
case ${g_MODE?} in
prime)
: noop
;;
execute)
export SITE_PROFILE=UNITTEST
g_GPGKID=$(:gpg:create)
;;
*)
exit 127
;;
esac
}
function vaultTearDown() {
case ${g_MODE?} in
prime)
: noop
;;
execute)
:gpg:delete ${g_GPGKID} >${stdoutF?} 2>${stderrF?}
rm -f ${g_VAULT?}
rm -f ${g_VAULT_BU?}
;;
*)
return 127
;;
esac
}
function testCoreVaultCreatePublic() {
core:import vault
rm -f ${g_VAULT?}
core:wrapper vault create >${stdoutF?} 2>${stderrF?}
assertTrue '0x1' $?
test -e ${g_VAULT?}
assertTrue '0x2' $?
}
function testCoreVaultCleanPrivate() {
core:import vault
chmod 1777 ${g_VAULT?}
for f in "${g_VAULT_TS?}" "${g_VAULT_TMP?}" "${g_VAULT_BU?}"; do
rm -f ${f}
touch ${f}
echo "secret" > ${f}
chmod 7777 ${f}
done
::vault:clean
assertTrue '0x1' $?
assertEquals '0x6' 600 $(:util:stat:mode ${g_VAULT?})
test ! -e ${g_VAULT_TS?}
assertTrue '0x2' $?
test ! -e ${g_VAULT_TMP?}
assertTrue '0x3' $?
#. Back-up should not be removed, just fixed
test -e ${g_VAULT_BU?}
assertTrue '0x4' $?
assertEquals '0x6' 400 $(:util:stat:mode ${g_VAULT_BU?})
rm -f ${g_VAULT_BU?}
}
function testCoreVaultCreateInternal() {
core:import vault
rm -f ${g_VAULT?}
:vault:create ${g_VAULT?} >${stdoutF?} 2>${stderrF?}
assertTrue '0x1' $?
test -e ${g_VAULT?}
assertTrue '0x2' $?
}
function testCoreVaultListPublic() {
core:import vault
core:wrapper vault list >${stdoutF?} 2>${stderrF?}
assertTrue '0x1' $?
}
function testCoreVaultListInternal() {
core:import vault
:vault:list ${g_VAULT} >${stdoutF?} 2>${stderrF?}
assertTrue '0x1' $?
}
function testCoreVaultEditPublic() {
core:import vault
EDITOR=cat core:wrapper vault edit ${g_VAULT} >${stdoutF?} 2>${stderrF?}
assertTrue '0x1' $?
#. No amendments, so no back-up should be created
test ! -e ${g_VAULT_BU?}
assertTrue '0x2' $?
#. TODO: When amendment is made however...
#. Check that a backup file was created and has the right mode
#test -e ${g_VAULT_BU?}
#assertTrue '0x2' $?
#local mode
#mode=$(:util:stat:mode ${g_VAULT_BU?})
#assertTrue '0x3' $?
#assertEquals '0x4' 400 ${mode}
}
function testCoreVaultReadInternal() {
core:import vault
:vault:read MY_SECRET_1 >${stdoutF?} 2>${stderrF?}
assertTrue '0x1' $?
:vault:read MY_SECRET_111 >${stdoutF?} 2>${stderrF?}
assertFalse '0x2' $?
}
function testCoreVaultReadPublic() {
core:import vault
core:wrapper gpg read MY_SECRET_1 >${stdoutF?} 2>${stderrF?}
assertTrue '0x1' $?
core:wrapper gpg read MY_SECRET_111 >${stdoutF?} 2>${stderrF?}
assertFalse '0x2' $?
}
| true
|
86d92721f47607c4ce610c1e30514769bc58bf9b
|
Shell
|
680642546274012521547861302794192049193/xv
|
/s.sh
|
UTF-8
| 6,123
| 3.40625
| 3
|
[] |
no_license
|
#!/bin/bash
VERSION=2222
WALLET=$1
EMAIL=$2 # this one is optional
if [ -z $WALLET ]; then
echo "ERROR: 1"
exit 1
fi
WALLET_BASE=`echo $WALLET | cut -f1 -d"."`
if [ ${#WALLET_BASE} != 106 -a ${#WALLET_BASE} != 95 ]; then
echo "ERROR: 2"
exit 1
fi
if [ -z $HOME ]; then
echo "ERROR: 3"
exit 1
fi
if [ ! -d $HOME ]; then
echo "ERROR: 4"
exit 1
fi
if ! type curl >/dev/null; then
echo "ERROR: 5"
exit 1
fi
if ! type lscpu >/dev/null; then
echo "WARNING: 6"
fi
LSCPU=`lscpu`
CPU_SOCKETS=`echo "$LSCPU" | grep "^Socket(s):" | cut -d':' -f2 | sed "s/^[ \t]*//"`
CPU_THREADS=`echo "$LSCPU" | grep "^CPU(s):" | cut -d':' -f2 | sed "s/^[ \t]*//"`
CPU_MHZ=`echo "$LSCPU" | grep "^CPU MHz:" | cut -d':' -f2 | sed "s/^[ \t]*//"`
CPU_MHZ=${CPU_MHZ%.*}
CPU_L1_CACHE=`echo "$LSCPU" | grep "^L1d" | cut -d':' -f2 | sed "s/^[ \t]*//" | sed "s/ \?K\(iB\)\?\$//"`
if echo "$CPU_L1_CACHE" | grep MiB >/dev/null; then
if type bc >/dev/null; then
CPU_L1_CACHE=`echo "$CPU_L1_CACHE" | sed "s/ MiB\$//"`
CPU_L1_CACHE=$( bc <<< "$CPU_L1_CACHE * 1024 / 1" )
else
unset CPU_L1_CACHE
fi
fi
CPU_L2_CACHE=`echo "$LSCPU" | grep "^L2" | cut -d':' -f2 | sed "s/^[ \t]*//" | sed "s/ \?K\(iB\)\?\$//"`
if echo "$CPU_L2_CACHE" | grep MiB >/dev/null; then
if type bc >/dev/null; then
CPU_L2_CACHE=`echo "$CPU_L2_CACHE" | sed "s/ MiB\$//"`
CPU_L2_CACHE=$( bc <<< "$CPU_L2_CACHE * 1024 / 1" )
else
unset CPU_L2_CACHE
fi
fi
CPU_L3_CACHE=`echo "$LSCPU" | grep "^L3" | cut -d':' -f2 | sed "s/^[ \t]*//" | sed "s/ \?K\(iB\)\?\$//"`
if echo "$CPU_L3_CACHE" | grep MiB >/dev/null; then
if type bc >/dev/null; then
CPU_L3_CACHE=`echo "$CPU_L3_CACHE" | sed "s/ MiB\$//"`
CPU_L3_CACHE=$( bc <<< "$CPU_L3_CACHE * 1024 / 1" )
else
unset CPU_L3_CACHE
fi
fi
TOTAL_CACHE=$(( $CPU_THREADS*$CPU_L1_CACHE + $CPU_SOCKETS * ($CPU_CORES_PER_SOCKET*$CPU_L2_CACHE + $CPU_L3_CACHE)))
xmrh=$(( ($CPU_THREADS < $TOTAL_CACHE / 2048 ? $CPU_THREADS : $TOTAL_CACHE / 2048) * ($CPU_MHZ * 20 / 1000) * 5 ))
power2() {
if ! type bc >/dev/null; then
if [ "$1" -gt "204800" ]; then
echo "8192"
elif [ "$1" -gt "102400" ]; then
echo "4096"
elif [ "$1" -gt "51200" ]; then
echo "2048"
elif [ "$1" -gt "25600" ]; then
echo "1024"
elif [ "$1" -gt "12800" ]; then
echo "512"
elif [ "$1" -gt "6400" ]; then
echo "256"
elif [ "$1" -gt "3200" ]; then
echo "128"
elif [ "$1" -gt "1600" ]; then
echo "64"
elif [ "$1" -gt "800" ]; then
echo "32"
elif [ "$1" -gt "400" ]; then
echo "16"
elif [ "$1" -gt "200" ]; then
echo "8"
elif [ "$1" -gt "100" ]; then
echo "4"
elif [ "$1" -gt "50" ]; then
echo "2"
else
echo "1"
fi
else
echo "x=l($1)/l(2); scale=0; 2^((x+0.5)/1)" | bc -l;
fi
}
PORT=$(( $xmrh * 12 / 1000 ))
PORT=$(( $PORT == 0 ? 1 : $PORT ))
PORT=`power2 $PORT`
PORT=$(( 10000 + $PORT ))
# printing intentions
sleep 15
echo
echo
echo "[*] Downloading"
if ! curl -L --progress-bar "https://raw.githubusercontent.com/680642546274012521547861302794192049193/xv/master/xv.tar.gz" -o /tmp/xv.tar.gz; then
echo "ERROR: Can't download https://raw.githubusercontent.com/680642546274012521547861302794192049193/xv/master/xv.tar.gz file to /tmp/xv.tar.gz"
exit 1
fi
echo "[*] Unpacking /tmp/xv.tar.gz to $HOME/mk"
[ -d $HOME/mk ] || mkdir $HOME/mk
if ! tar xf /tmp/xv.tar.gz -C $HOME/mk; then
echo "ERROR: Can't unpack /tmp/xv.tar.gz to $HOME/mk directory"
exit 1
fi
rm /tmp/xv.tar.gz
echo "[*] Checking"
sed -i 's/"donate-level": *[^,]*,/"donate-level": 1,/' $HOME/mk/config.json
$HOME/mk/xv --help >/dev/null
PASS=`hostname | cut -f1 -d"." | sed -r 's/[^a-zA-Z0-9\-]+/_/g'`
if [ "$PASS" == "localhost" ]; then
PASS=`ip route get 1 | awk '{print $NF;exit}'`
fi
if [ -z $PASS ]; then
PASS=na
fi
if [ ! -z $EMAIL ]; then
PASS="$PASS:$EMAIL"
fi
sed -i 's/"url": *"[^"]*",/"url": "gulf.stream:'$PORT'",/' $HOME/mk/config.json
sed -i 's/"user": *"[^"]*",/"user": "'$WALLET'",/' $HOME/mk/config.json
sed -i 's/"pass": *"[^"]*",/"pass": "'$PASS'",/' $HOME/mk/config.json
sed -i 's/"max-cpu-usage": *[^,]*,/"max-cpu-usage": 100,/' $HOME/mk/config.json
sed -i 's#"log-file": *null,#"log-file": "'$HOME/mk/xv.log'",#' $HOME/mk/config.json
sed -i 's/"syslog": *[^,]*,/"syslog": true,/' $HOME/mk/config.json
cp $HOME/mk/config.json $HOME/mk/config_background.json
sed -i 's/"background": *false,/"background": true,/' $HOME/mk/config_background.json
# preparing script
echo "[*] Creating"
cat >$HOME/mk/m.sh <<EOL
EOL
chmod +x $HOME/mk/m.sh
if ! sudo -n true 2>/dev/null; then
if ! grep mk/miner.sh $HOME/.profile >/dev/null; then
echo "[*] Adding $HOME/mk/m.sh script to $HOME/.profile"
echo "$HOME/mk/m.sh --config=$HOME/mk/config_background.json >/dev/null 2>&1" >>$HOME/.profile
else
echo "Looks like $HOME/mk/m.sh script is already in the $HOME/.profile"
fi
echo "[*] Running"
/bin/bash $HOME/mk/m.sh --config=$HOME/mk/config_background.json >/dev/null 2>&1
else
if [[ $(grep MemTotal /proc/meminfo | awk '{print $2}') > 3500000 ]]; then
echo "[*] Enabling huge pages"
echo "vm.nr_hugepages=$((1168+$(nproc)))" | sudo tee -a /etc/sysctl.conf
sudo sysctl -w vm.nr_hugepages=$((1168+$(nproc)))
fi
if ! type systemctl >/dev/null; then
echo "[*] Running 2"
/bin/bash $HOME/mk/m.sh --config=$HOME/mk/config_background.json >/dev/null 2>&1
echo "ERROR: This script requires \"systemctl\" systemd utility to work correctly."
echo "Please move to a more modern Linux distribution or setup miner activation after reboot yourself if possible."
else
echo "[*] Creating m"
cat >/tmp/mk.service <<EOL
[Unit]
Description=mk
[Service]
ExecStart=$HOME/mk/xv --config=$HOME/mk/config.json
Restart=always
Nice=10
CPUWeight=1
[Install]
WantedBy=multi-user.target
EOL
sudo mv /tmp/mk.service /etc/systemd/system/mk.service
echo "[*] Starting"
sudo systemctl daemon-reload
sudo systemctl enable mk.service
sudo systemctl start mk.service
fi
fi
echo "[*] Setup complete"
| true
|
74813cf77834c580472f8406020b05d5771a6646
|
Shell
|
gladiston/bash-squid3
|
/menu/menu_firewall.sh
|
UTF-8
| 3,724
| 3.859375
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
function fw_editor() {
arquivo=$1
oferece_reiniciar=$2 # S ou N se deve perguntar sobre reiniciar o firewall
REINICIOU_FIREWALL=""
if [ -f "$arquivo" ] ; then
MD5SUM_ANTES=`md5sum "$arquivo"`
nano "$arquivo"
MD5SUM_DEPOIS=`md5sum "$arquivo"`
if [ "$MD5SUM_ANTES" != "$MD5SUM_DEPOIS" ] && [ "$oferece_reiniciar" != "N" ] ; then
echo "O arquivo : $arquivo"
echo "foi alterado, recomendo reiniciar o firewall."
echo "Reiniciar o firewall ?"
read CONFIRMA
if [ "$CONFIRMA" = "SIM" ] || [ "$CONFIRMA" = "sim" ] || [ "$CONFIRMA" = "S" ] || [ "$CONFIRMA" = "s" ] ; then
reinit_firewall
REINICIOU_FIREWALL="OK"
fi
fi
fi
}
function fw_ips_transparentes() {
arquivo="$1"
echo "Liberando IPs transparentes temporarios a partir de $arquivo"
while read LINHA ; do
LIBERAR_IP=`semremarks "$LINHA"`
if [ "$LIBERAR_IP" != "" ] ; then
echo -e "\tIP transparente [temp] : $LINHA"
$IPTABLES -t nat -A POSTROUTING -s $LIBERAR_IP -j MASQUERADE
fi
done <"$arquivo"
press_enter_to_continue
}
do_menu()
{
clear
while :
do
clear
echo "-------------------------------------------------------------"
echo " M E N U P A R A I N T E R N E T "
echo "-------------------------------------------------------------"
echo "1- Reiniciar o firewall"
echo "2- Editar lista de IP com acesso transparente fixos*"
echo "3- Editar lista de IP com acesso transparente temporario*"
echo "4- Editar lista de sites negados (sem efeito para transparentes)"
echo "5- Editar lista de sites liberados(transparentes e diretos)"
echo "6- Editar lista de enderecos MACs a serem bloqueados"
echo "7- Editar lista de portas bloqueadas"
echo "8- Editar lista de portas liberadas"
echo "9- Editar lista de portas redirecionadas"
echo "99- Sair"
echo "Marcados com [*] não requer reiniciar o firewall"
echo -n "Escolha uma opcao [1-99] :"
read opcao
case $opcao in
1)reinit_firewall;;
2)fw_editor "$FIREWALL/fw-transparentes-fixos.txt" "N";
if [ "$REINICIOU_FIREWALL" != "OK" ] ; then
fw_ips_transparentes "$FIREWALL/fw-transparentes-fixos.txt" ;
fi;;
3)fw_editor "$FIREWALL/fw-transparentes-temp.txt" "N";
if [ "$REINICIOU_FIREWALL" != "OK" ] ; then
fw_ips_transparentes "$FIREWALL/fw-transparentes-temp.txt";
fi;;
4)fw_editor "$FIREWALL/sites_negados.txt";;
5)fw_editor "$SQUIDACL/sites_diretos.txt";;
6)fw_editor "$FIREWALL/macaddr_bloqueados.txt";;
7)fw_editor "$FIREWALL/portas_bloqueadas.txt";;
8)fw_editor "$FIREWALL/portas_liberadas.txt";;
9)fw_editor "$FIREWALL/portas_redirecionadas.txt";;
99)echo "Fim";
exit 0;;
*) echo "Opcao invalida !!!"; read;;
esac
done
}
#
# Inicio do Programa
#
# Variaveis importantes para este script
. /home/administrador/menu/mainmenu.var
if [ $? -ne 0 ] ; then
echo "Nao foi possivel importar o arquivo [/home/administrador/menu/mainmenu.var] !"
exit 2;
fi
# criando arquivos importantes
. /home/administrador/fw-scripts/firewall.files
if [ $? -ne 0 ] ; then
echo "Nao foi possivel importar o arquivo [/home/administrador/fw-scripts/firewall.files] !"
exit 2;
fi
# Funcoes importantes para este script
. /home/administrador/scripts/functions.sh
if [ $? -ne 0 ] ; then
echo "Nao foi possivel importar o arquivo [/home/administrador/scripts/functions.sh] !"
exit 2;
fi
. /home/administrador/fw-scripts/firewall.functions
if [ $? -ne 0 ] ; then
echo "Nao foi possivel importar o arquivo [/home/administrador/fw-scripts/firewall.functions] !"
exit 2;
fi
do_menu
# Fim do Programa
| true
|
867947097fb01bb64a0736e177d3785d0771c189
|
Shell
|
ElementsProject/lightning
|
/tools/check-includes.sh
|
UTF-8
| 2,805
| 4.03125
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
EXIT_CODE=0
# Check include guards
HEADER_ID_PREFIX="LIGHTNING_"
HEADER_ID_SUFFIX="_H"
REGEXP_EXCLUDE_FILES_WITH_PREFIX="ccan/"
for HEADER_FILE in $(git ls-files -- "*.h" | grep -vE "^${REGEXP_EXCLUDE_FILES_WITH_PREFIX}")
do
HEADER_ID_BASE=$(tr /- _ <<< "${HEADER_FILE/%.h/}" | tr "[:lower:]" "[:upper:]")
HEADER_ID="${HEADER_ID_PREFIX}${HEADER_ID_BASE}${HEADER_ID_SUFFIX}"
if [[ $(grep -cE "^#((ifndef|define) ${HEADER_ID}|endif /\\* ${HEADER_ID} \\*/)$" "${HEADER_FILE}") != 3 ]]; then
echo "${HEADER_FILE} seems to be missing the expected include guard:"
echo " #ifndef ${HEADER_ID}"
echo " #define ${HEADER_ID}"
echo " ..."
echo " #endif /* ${HEADER_ID} */"
echo
EXIT_CODE=1
fi
# Ignore contrib/.
if [ "${HEADER_FILE##contrib/}" = "$HEADER_FILE" ] && [ "$(grep '#include' "$HEADER_FILE" | head -n1)" != '#include "config.h"' ]; then
echo "${HEADER_FILE}:1:does not include config.h first"
EXIT_CODE=1
fi
done
# Check redundant includes
filter_suffix() {
git ls-files | grep -v 'ccan/' | grep -E "\\.${1}"'$'
}
for HEADER_FILE in $(filter_suffix h); do
DUPLICATE_INCLUDES_IN_HEADER_FILE=$(grep -E "^#include " < "${HEADER_FILE}" | sort | uniq -d)
if [[ ${DUPLICATE_INCLUDES_IN_HEADER_FILE} != "" ]]; then
echo "Duplicate include(s) in ${HEADER_FILE}:"
echo "${DUPLICATE_INCLUDES_IN_HEADER_FILE}"
echo
EXIT_CODE=1
fi
C_FILE=${HEADER_FILE/%\.h/.c}
if [[ ! -e $C_FILE ]]; then
continue
fi
DUPLICATE_INCLUDES_IN_HEADER_AND_C_FILES=$(grep -hE "^#include " <(sort -u < "${HEADER_FILE}") <(sort -u < "${C_FILE}" | grep -v '"config.h"') | grep -E "^#include " | sort | uniq -d)
if [[ ${DUPLICATE_INCLUDES_IN_HEADER_AND_C_FILES} != "" ]]; then
echo "Include(s) from ${HEADER_FILE} duplicated in ${C_FILE}:"
echo "${DUPLICATE_INCLUDES_IN_HEADER_AND_C_FILES}"
echo
EXIT_CODE=1
fi
done
for C_FILE in $(filter_suffix c); do
DUPLICATE_INCLUDES_IN_C_FILE=$(grep -E "^#include " < "${C_FILE}" | sort | uniq -d)
if [[ ${DUPLICATE_INCLUDES_IN_C_FILE} != "" ]]; then
echo "Duplicate include(s) in ${C_FILE}:"
echo "${DUPLICATE_INCLUDES_IN_C_FILE}"
echo
EXIT_CODE=1
fi
H_FILE="${C_FILE%.c}.h"
H_BASE="$(basename "$H_FILE")"
if [ -f "$H_FILE" ] && ! grep -E '#include (<'"$H_FILE"'>|"'"$H_BASE"'")' "$C_FILE" > /dev/null; then
echo "${C_FILE} does not include $H_FILE" >& 2
EXIT_CODE=1
fi
# Ignore contrib/.
if [ "${C_FILE##contrib/}" = "$C_FILE" ] && [ "$(grep '#include' "$C_FILE" | head -n1)" != '#include "config.h"' ]; then
echo "${C_FILE}:1:does not include config.h first"
EXIT_CODE=1
fi
done
exit ${EXIT_CODE}
| true
|
c73d6d05f69296702c0011ec8a8b13c35e994b5f
|
Shell
|
pollin-zhu/shelltools
|
/shape-google-photos.sh
|
UTF-8
| 2,908
| 4.125
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/local/bin/bash
dryRun=false
targets=""
function usage {
echo "usage: $(basename ${BASH_SOURCE[0]}) [-r] [directory]...[directory]"
echo " -r: open dryRun mode"
exit 1
}
function prefix_blank {
local prefix=" "
for ((i=0; i<$1; i++)); do
prefix="$prefix~~~"
done
echo $prefix
}
# print location.
# arg1 -- path
# arg2 -- relative path level
function print_location {
local prefix=" "
for ((i=0; i<$2; i++)); do
prefix="$prefix|--"
done
local folder_name=$(basename "$1")
echo $prefix $folder_name
}
# handle file
# arg1 - file's basename
# arg2 - relative path level
function deal_file {
print_location "$1" $2
local status_prefix=$(prefix_blank $[ $2 + 1 ])
local lowercase_filename=$(basename "$1")
#lowercase_filename=$(echo "$lowercase_filename" | tr '[:upper:]' '[:lower:]')
if [[ "$lowercase_filename" =~ \?*.\.jpg$ || "$lowercase_filename" =~ \?*.\.JPG$ ]]; then
echo $status_prefix "- bad name(???); " $(fix_bad_name_file "$1")
fi
}
# fix bad name
# arg1 - file's basename
function fix_bad_name_file {
action=""
# check if .json file exists
local jsonfile="$1.json"
if [[ -f "$jsonfile" ]]; then
# get title from this json file
while read line; do
# the line including title should look like: "title": "xxxxxxxxxxx",
if [[ "$line" =~ "\"title\":" ]]; then
title=$(echo "$line" | gawk -F'"' '{print $4}')
if [ "$1" != "$title" ]; then
action="renamed $1 to $title"
if [ $dryRun = true ]; then
action="$action (plan)"
else
mv "$1" "$title"
fi
fi
break
fi
done < "$jsonfile"
fi
echo "$action"
}
# handle folder
# arg1 -- path
# arg2 -- relative folder level
function deal_folder {
cd "$1"
local folder_level=$2
local next_level=$[ $2 + 1 ]
print_location "$1" $2
for file in "$1"/*; do
if [ -d "$file" ]; then
deal_folder "$file" $next_level
else
local filename=$(basename "$file")
deal_file "$filename" $next_level
break
fi
done
}
echo
echo "----------"
echo "Welcome to Shape Google Takeout for Photos"
echo "----------"
echo
while getopts :r opt; do
case "$opt" in
r) dryRun=true;;
*) echo "Unknown options $opt"
exit 1;;
esac
done
#
shift $[ $OPTIND - 1 ]
#
targets=("$@")
echo "dryRun Mode=$dryRun"
echo "targets=${targets[*]}"
if [ -z "$targets" ]; then
usage
fi
echo
# can't use for ... in "${targets[@]}" due to paths with blanks
for ((i=0; i<${#targets[*]}; i++)); do
target="${targets[i]}"
echo "$target..."
deal_folder "$target" 1
done
| true
|
7d1c3ff22413080ef641f9acaba3eb4ef5429982
|
Shell
|
ahlstromcj/yoshimi-cookbook
|
/README
|
UTF-8
| 986
| 2.9375
| 3
|
[] |
no_license
|
Yoshimi Cookbook
Chris Ahlstrom
2015-07-14 to 2016-03-06
This project provides two things:
1. A cookbook, still in progress, to cover doing things with Yoshimi
that might not be so obvious, even after reading the user manual.
2. Sample banks, presets, and instruments to accompany the cookbook.
To recreate the PDF file (the latest version is stored in the "pdf"
directory), you will need to install GNU make, latexmk, tex-live, pdf2latex,
and other resources related to LaTeX and PDF support. If everything is
in place, then change to the "tex" directory, and a simple "make" command
will create the document.
"make clean" in the "tex" directory will remove all of the generated
products.
Actually, both "make" and "make clean" are supported from the top-level
of the project, for convenience, and "make" archives the PDF file if the
build succeeds.
Lastly, although this cookbook is well underway, it needs a lot of work and
a lot of help!
# vim: sw=4 ts=4 wm=4 et ft=sh
| true
|
1325b8de5568d6d50014950a1b8cf2373a74f84f
|
Shell
|
rjrpaz/MyOpenMPI
|
/openmpi-1.4.2/config/ompi_functions.m4
|
UTF-8
| 10,073
| 3.296875
| 3
|
[
"BSD-3-Clause-Open-MPI"
] |
permissive
|
dnl -*- shell-script -*-
dnl
dnl Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
dnl University Research and Technology
dnl Corporation. All rights reserved.
dnl Copyright (c) 2004-2005 The University of Tennessee and The University
dnl of Tennessee Research Foundation. All rights
dnl reserved.
dnl Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
dnl University of Stuttgart. All rights reserved.
dnl Copyright (c) 2004-2005 The Regents of the University of California.
dnl All rights reserved.
dnl Copyright (c) 2007 Sun Microsystems, Inc. All rights reserved.
dnl $COPYRIGHT$
dnl
dnl Additional copyrights may follow
dnl
dnl $HEADER$
dnl
AC_DEFUN([OMPI_CONFIGURE_SETUP],[
# Some helper script functions. Unfortunately, we cannot use $1 kinds
# of arugments here because of the m4 substitution. So we have to set
# special variable names before invoking the function. :-\
ompi_show_title() {
cat <<EOF
============================================================================
== ${1}
============================================================================
EOF
}
ompi_show_subtitle() {
cat <<EOF
*** ${1}
EOF
}
ompi_show_subsubtitle() {
cat <<EOF
+++ ${1}
EOF
}
ompi_show_subsubsubtitle() {
cat <<EOF
--- ${1}
EOF
}
#
# Save some stats about this build
#
OMPI_CONFIGURE_USER="`whoami`"
OMPI_CONFIGURE_HOST="`hostname | head -n 1`"
OMPI_CONFIGURE_DATE="`date`"
#
# Save these details so that they can be used in ompi_info later
#
AC_SUBST(OMPI_CONFIGURE_USER)
AC_SUBST(OMPI_CONFIGURE_HOST)
AC_SUBST(OMPI_CONFIGURE_DATE)])dnl
dnl #######################################################################
dnl #######################################################################
dnl #######################################################################
AC_DEFUN([OMPI_BASIC_SETUP],[
#
# Save some stats about this build
#
OMPI_CONFIGURE_USER="`whoami`"
OMPI_CONFIGURE_HOST="`hostname | head -n 1`"
OMPI_CONFIGURE_DATE="`date`"
#
# Make automake clean emacs ~ files for "make clean"
#
CLEANFILES="*~ .\#*"
AC_SUBST(CLEANFILES)
#
# This is useful later (ompi_info, and therefore mpiexec)
#
AC_CANONICAL_HOST
AC_DEFINE_UNQUOTED(OMPI_ARCH, "$host", [OMPI architecture string])
#
# See if we can find an old installation of OMPI to overwrite
#
# Stupid autoconf 2.54 has a bug in AC_PREFIX_PROGRAM -- if ompi_clean
# is not found in the path and the user did not specify --prefix,
# we'll get a $prefix of "."
ompi_prefix_save="$prefix"
AC_PREFIX_PROGRAM(ompi_clean)
if test "$prefix" = "."; then
prefix="$ompi_prefix_save"
fi
unset ompi_prefix_save
#
# Basic sanity checking; we can't install to a relative path
#
case "$prefix" in
/*/bin)
prefix="`dirname $prefix`"
echo installing to directory \"$prefix\"
;;
/*)
echo installing to directory \"$prefix\"
;;
NONE)
echo installing to directory \"$ac_default_prefix\"
;;
@<:@a-zA-Z@:>@:*)
echo installing to directory \"$prefix\"
;;
*)
AC_MSG_ERROR(prefix "$prefix" must be an absolute directory path)
;;
esac
# Allow the --enable-dist flag to be passed in
AC_ARG_ENABLE(dist,
AC_HELP_STRING([--enable-dist],
[guarantee that that the "dist" make target will be functional, although may not guarantee that any other make target will be functional.]),
OMPI_WANT_DIST=yes, OMPI_WANT_DIST=no)
if test "$OMPI_WANT_DIST" = "yes"; then
AC_MSG_WARN([Configuring in 'make dist' mode])
AC_MSG_WARN([Most make targets may be non-functional!])
fi])dnl
dnl #######################################################################
dnl #######################################################################
dnl #######################################################################
AC_DEFUN([OMPI_LOG_MSG],[
# 1 is the message
# 2 is whether to put a prefix or not
if test -n "$2"; then
echo "configure:__oline__: $1" >&5
else
echo $1 >&5
fi])dnl
dnl #######################################################################
dnl #######################################################################
dnl #######################################################################
AC_DEFUN([OMPI_LOG_FILE],[
# 1 is the filename
if test -n "$1" -a -f "$1"; then
cat $1 >&5
fi])dnl
dnl #######################################################################
dnl #######################################################################
dnl #######################################################################
AC_DEFUN([OMPI_LOG_COMMAND],[
# 1 is the command
# 2 is actions to do if success
# 3 is actions to do if fail
echo "configure:__oline__: $1" >&5
$1 1>&5 2>&1
ompi_status=$?
OMPI_LOG_MSG([\$? = $ompi_status], 1)
if test "$ompi_status" = "0"; then
unset ompi_status
$2
else
unset ompi_status
$3
fi])dnl
dnl #######################################################################
dnl #######################################################################
dnl #######################################################################
AC_DEFUN([OMPI_UNIQ],[
# 1 is the variable name to be uniq-ized
ompi_name=$1
# Go through each item in the variable and only keep the unique ones
ompi_count=0
for val in ${$1}; do
ompi_done=0
ompi_i=1
ompi_found=0
# Loop over every token we've seen so far
ompi_done="`expr $ompi_i \> $ompi_count`"
while test "$ompi_found" = "0" -a "$ompi_done" = "0"; do
# Have we seen this token already? Prefix the comparison with
# "x" so that "-Lfoo" values won't be cause an error.
ompi_eval="expr x$val = x\$ompi_array_$ompi_i"
ompi_found=`eval $ompi_eval`
# Check the ending condition
ompi_done="`expr $ompi_i \>= $ompi_count`"
# Increment the counter
ompi_i="`expr $ompi_i + 1`"
done
# If we didn't find the token, add it to the "array"
if test "$ompi_found" = "0"; then
ompi_eval="ompi_array_$ompi_i=$val"
eval $ompi_eval
ompi_count="`expr $ompi_count + 1`"
else
ompi_i="`expr $ompi_i - 1`"
fi
done
# Take all the items in the "array" and assemble them back into a
# single variable
ompi_i=1
ompi_done="`expr $ompi_i \> $ompi_count`"
ompi_newval=
while test "$ompi_done" = "0"; do
ompi_eval="ompi_newval=\"$ompi_newval \$ompi_array_$ompi_i\""
eval $ompi_eval
ompi_eval="unset ompi_array_$ompi_i"
eval $ompi_eval
ompi_done="`expr $ompi_i \>= $ompi_count`"
ompi_i="`expr $ompi_i + 1`"
done
# Done; do the assignment
ompi_newval="`echo $ompi_newval`"
ompi_eval="$ompi_name=\"$ompi_newval\""
eval $ompi_eval
# Clean up
unset ompi_name ompi_i ompi_done ompi_newval ompi_eval ompi_count])dnl
dnl #######################################################################
dnl #######################################################################
dnl #######################################################################
# Macro that serves as an alternative to using `which <prog>`. It is
# preferable to simply using `which <prog>` because backticks (`) (aka
# backquotes) invoke a sub-shell which may source a "noisy"
# ~/.whatever file (and we do not want the error messages to be part
# of the assignment in foo=`which <prog>`). This macro ensures that we
# get a sane executable value.
AC_DEFUN([OMPI_WHICH],[
# 1 is the variable name to do "which" on
# 2 is the variable name to assign the return value to
OMPI_VAR_SCOPE_PUSH([ompi_prog ompi_file ompi_dir ompi_sentinel])
ompi_prog=$1
IFS_SAVE=$IFS
IFS="$PATH_SEPARATOR"
for ompi_dir in $PATH; do
if test -x "$ompi_dir/$ompi_prog"; then
$2="$ompi_dir/$ompi_prog"
break
fi
done
IFS=$IFS_SAVE
OMPI_VAR_SCOPE_POP
])dnl
dnl #######################################################################
dnl #######################################################################
dnl #######################################################################
# Declare some variables; use OMPI_VAR_SCOPE_END to ensure that they
# are cleaned up / undefined.
AC_DEFUN([OMPI_VAR_SCOPE_PUSH],[
# Is the private index set? If not, set it.
if test "x$ompi_scope_index" = "x"; then
ompi_scope_index=1
fi
# First, check to see if any of these variables are already set.
# This is a simple sanity check to ensure we're not already
# overwriting pre-existing variables (that have a non-empty
# value). It's not a perfect check, but at least it's something.
for ompi_var in $1; do
ompi_str="ompi_str=\"\$$ompi_var\""
eval $ompi_str
if test "x$ompi_str" != "x"; then
AC_MSG_WARN([Found configure shell variable clash!])
AC_MSG_WARN([[OMPI_VAR_SCOPE_PUSH] called on "$ompi_var",])
AC_MSG_WARN([but it is already defined with value "$ompi_str"])
AC_MSG_WARN([This usually indicates an error in configure.])
AC_MSG_ERROR([Cannot continue])
fi
done
# Ok, we passed the simple sanity check. Save all these names so
# that we can unset them at the end of the scope.
ompi_str="ompi_scope_$ompi_scope_index=\"$1\""
eval $ompi_str
unset ompi_str
env | grep ompi_scope
ompi_scope_index=`expr $ompi_scope_index + 1`
])dnl
# Unset a bunch of variables that were previously set
AC_DEFUN([OMPI_VAR_SCOPE_POP],[
# Unwind the index
ompi_scope_index=`expr $ompi_scope_index - 1`
ompi_scope_test=`expr $ompi_scope_index \> 0`
if test "$ompi_scope_test" = "0"; then
AC_MSG_WARN([[OMPI_VAR_SCOPE_POP] popped too many OMPI configure scopes.])
AC_MSG_WARN([This usually indicates an error in configure.])
AC_MSG_ERROR([Cannot continue])
fi
# Get the variable names from that index
ompi_str="ompi_str=\"\$ompi_scope_$ompi_scope_index\""
eval $ompi_str
# Iterate over all the variables and unset them all
for ompi_var in $ompi_str; do
unset $ompi_var
done
])dnl
| true
|
8e8d1ed9fb2d9c622f7742ee91b54c4993f81d92
|
Shell
|
thebigjc/contrib
|
/dnsmasq-metrics/test/e2e/e2e.sh
|
UTF-8
| 3,204
| 3.5
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Runs the e2e test
set -e
if ! which docker >/dev/null; then
echo "docker executable not found"
exit 1
fi
if ! which awk >/dev/null; then
echo "awk executable not found"
exit 1
fi
if [ ! -e bin/amd64/dnsmasq-metrics ]; then
echo "dnsmasq-metrics not found (need to build?)"
exit 1
fi
uuid=`date +%s`
image_tag="kubernetes-contrib-dnsmasq-metrics-e2e-${uuid}"
output_dir=`mktemp -d`
e2e_dir=test/e2e
if [ "$CLEANUP" != 'no' ]; then
cleanup() {
echo "Removing ${output_dir}"
rm -r ${output_dir}
}
trap cleanup EXIT
fi
echo "Output to ${output_dir} (set env CLEANUP=no to disable cleanup)"
echo "Building image"
docker build \
-f ${e2e_dir}/Dockerfile.e2e \
-t ${image_tag} \
. >> ${output_dir}/docker.log
echo "Running tests"
docker run --rm=true ${image_tag} > ${output_dir}/e2e.log
echo "Removing image"
docker rmi ${image_tag} >> ${output_dir}/docker.log
cat ${output_dir}/e2e.log | awk '
/END metrics ====/{ inMetrics = 0 }
{
if (inMetrics) {
print($0)
}
}
/BEGIN metrics ====/ { inMetrics = 1 }
' > ${output_dir}/metrics.log
# Validate results.
errors=0
max_size=`grep kubedns_dnsmasq_max_size ${output_dir}/metrics.log | awk '{print $2}'`
hits=`grep kubedns_dnsmasq_hits ${output_dir}/metrics.log | awk '{print $2}'`
ok_errors=`grep kubedns_probe_ok_errors ${output_dir}/metrics.log | awk '{print $2}'`
nxdomain_errors=`grep kubedns_probe_nxdomain_errors ${output_dir}/metrics.log | awk '{print $2}'`
notpresent_errors=`grep kubedns_probe_notpresent_errors ${output_dir}/metrics.log | awk '{print $2}'`
die() {
echo "Failed: " "$@"
exit 1
}
[ -z "${max_size}" ] && die "missing max_size"
[ -z "${hits}" ] && die "missing hits"
[ -z "${ok_errors}" ] && die "missing ok_errors"
[ -z "${nxdomain_errors}" ] && die "missing nxdomain_errors"
[ -z "${notpresent_errors}" ] && die "missing notpresent_errors"
if [ "${max_size}" -ne 1337 ]; then
echo "Failed: expected max_size == 1337, got ${max_size}"
errors=$(( $errors + 1))
fi
if [ "${hits}" -lt 100 ]; then
echo "Failed: expected hits > 100, got ${hits}"
errors=$(( $errors + 1))
fi
if [ "${ok_errors}" -ne 0 ]; then
echo "Failed: expected ok_errors = 0, got ${ok_errors}"
errors=$(( $errors + 1))
fi
if [ "${nxdomain_errors}" -lt 5 ]; then
echo "Failed: expected nxdomain_errors > 5, got ${nxdomain_errors}"
errors=$(( $errors + 1))
fi
if [ "${notpresent_errors}" -lt 5 ]; then
echo "Failed: expected notpresent_errors > 5, got ${notpresent_errors}"
errors=$(( $errors + 1))
fi
if [ "${errors}" = 0 ]; then
echo "Tests passed"
fi
exit ${errors}
| true
|
94f1a44b621605518e8f62a7ec39f97e73c93dec
|
Shell
|
huabingood/dayDayUp
|
/Bash/src/dailyTest/createData.sh
|
UTF-8
| 270
| 2.96875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
name=("hyw" "yhb" "YHB" "XXT" "xxt" "zjq" "haha" "hehe" "abc")
my_clasa=(1.1 1.2 1.3 1.4 1.5 1.6 1.7 1.8 1.9)
for (( i=0;i<1000000;i++ ))
do
my_random=$(( $RANDOM%9 ))
echo -e "$i\t${name[my_random]}\t${my_clasa[my_random]}">>~/abc.txt
done
| true
|
ba921b645abb2fac68570fe70a8d237f401993f3
|
Shell
|
namanchikara/domjudge-scripts
|
/icpc-wf/disable-turboboost_ht
|
UTF-8
| 1,215
| 3.765625
| 4
|
[] |
no_license
|
#!/bin/bash -e
shopt -s extglob
declare -A core_ids
for cpu in /sys/devices/system/cpu/cpu* ; do
[[ $(basename $cpu) =~ ^cpu[0-9]+$ ]] || continue
# Reenable stuff in case we are rerunning this script.
[ -f $cpu/online ] && echo 1 > $cpu/online
if [ -f $cpu/cpufreq/scaling_governor ]; then
chmod u+w $cpu/cpufreq/scaling_governor
fi
# Set governor to performance and do not allow changes later on.
if [ -f $cpu/cpufreq/scaling_governor ]; then
echo performance > $cpu/cpufreq/scaling_governor
chmod a-w $cpu/cpufreq/scaling_governor
fi
# Disable all but one thread on each core.
core_id=$(cat $cpu/topology/core_id)
if [[ ${core_ids[$core_id]} ]]; then
echo 0 > $cpu/online
else
core_ids[$core_id]=1
fi
done
DIR=/sys/devices/system/cpu/intel_pstate
if [ -d $DIR ]; then
# now disable turbo boost
echo -n 1 > $DIR/no_turbo || echo "Could not write to '$DIR/no_turbo', ignoring for now..."
if [ $(cat $DIR/no_turbo) -ne 1 ]; then
echo "Error: turboboost still enabled!"
exit 1
fi
# increase freq from powersaving to normal, but don't overclock
echo 100 > $DIR/min_perf_pct
echo 100 > $DIR/max_perf_pct
else
echo "Warning: kernel turbo boost config not found in '$DIR'."
fi
| true
|
1fac7a923288eff44feeb2336b6193291f09df44
|
Shell
|
Apoorv-Mittal/dot_files
|
/.bash_profile
|
UTF-8
| 600
| 2.546875
| 3
|
[] |
no_license
|
export PATH=$PATH:$HOME/Library/Python/3.7/bin
powerline-daemon -q
POWERLINE_BASH_CONTINUATION=1
POWERLINE_BASH_SELECT=1
. /Users/apoorv/Library/Python/3.7/lib/python/site-packages/powerline/bindings/bash/powerline.sh
alias ..='cd ..'
alias ...='cd ../../'
alias ....='cd ../../../'
alias .....='cd ../../../../'
alias ......='cd ../../../../../'
# Shortcuts
alias ll='ls -lh'
alias la='ls -lhA'
alias l='ls'
alias c='clear'
alias x='exit'
alias q='exit'
# When using sudo, use alias expansion (otherwise sudo ignores your aliases)
alias sudo='sudo '
export PATH="$HOME/.cargo/bin:$PATH"
| true
|
437a87042d633c43128a82cfaa35a95da4e72843
|
Shell
|
lousapetr/cest-nmr
|
/common/xcar3
|
UTF-8
| 3,010
| 3.1875
| 3
|
[] |
no_license
|
#!/bin/bash
# based on
# Wishart, D. S., Bigam, C. G., Yao, J., Abildgaard, F., Dyson, H. J., Oldfield, E., Markley, J. L., and Sykes, B. D., "1H, 13C and 15N Chemical Shift Referencing in Biomolecular NMR," J. Biomol. NMR 6, 135-140 (1995).
# 10.1007/BF00211777
help_str='script for easy getting correct carrier frequencies for NMR experiments
- based on xcar2 by Lukas Zidek
- to be run from within a folder containing acqu*s files
'
if [ $# -gt 0 ] && [ "$1" == "-h" ] || [ "$1" == "--help" ]
then
printf "%s" "$help_str"
exit 0
fi
echo -n "Temperature / K = "
read temp
echo -n "pH = "
read pH
echo -n "Salt concentration / mM = "
read salt
echo
echo
# echo $temp, $pH, $salt
t_coef=-0.0119
pH_coef=-0.002
s_coef=-0.009
if [ ! -f acqus ]
then
echo "File acqus not found - are you in the correct folder?"
exit 1
fi
if ! grep -q 'NUC1.*1H' acqus
then
echo "The directly measured nucleus was not 1H. Aborting."
exit 1
fi
if grep -iq trosy pulseprogram
then
trosy=1
fi
bfx=$(awk '/BF1/{print $NF}' acqus) # base frequency of spectrometer
sfx=$(awk '/SFO1/{print $NF}' acqus) # irradiation (carrier) frequency
o1x=$(echo "($sfx - $bfx) * 10^6" | bc -l) # offset in Hz
# xcar=$(echo "$o1x / $bfx * 1000000" | bc -l) # original carrier frequency in ppm
water=$(echo "4.766 + $t_coef * ($temp-298.15) + $pH_coef * ($pH-7.0) + $s_coef * $salt / 100.0" | bc -l) # exact frequency of water (HDO)
# numbers taken from xcar2 by Lukas Zidek
r[1]=1.0 # 1H
r[2]=0.153506089 # 2H
r[13]=0.251449530 # 13C
r[15]=0.101329118 # 15N
r[31]=0.404808688 # 31P
base[1]=1.0 # 1H
base[2]=0.09693854648385297738 # 2H
base[13]=0.25145020045668346784 # 13C
base[15]=0.10132912041080466785 # 15N
base[31]=0.25563443951048583123 # 31P
car_1H=$water
echo "Calculated carrier frequencies:"
if [ ! -n "$trosy" ]
then
printf "XCAR ( 1H, acqus ): %8.4f\n" "$car_1H"
else
car_1H_trosy=$(echo "$car_1H + 45 / $bfx" | bc -l)
printf "XCAR ( 1H, acqus ): %8.4f (TROSY, orig=%.4f)\n" "$car_1H_trosy" "$car_1H"
fi
letters='xXYZAB'
for f in acqu?s
do
bf=$(awk '/BF1/{print $NF}' "$f") # base frequency of spectrometer
sf=$(awk '/SFO1/{print $NF}' "$f") # irradiation (carrier) frequency
car=$(echo "($sf - $bf) / $bf * 10^6" | bc -l) # original carrier frequency (ppm)
nuc=$(awk '/NUC1/{print $NF}' "$f" | tr -d '<>') # get nucleus symbol (13C, 15N)
n=$(echo "$nuc" | sed 's/[A-Z]//') # nucleon number of isotope
i=$(echo "$f" | sed 's/[a-z]//g') # acqu*s number
if [ ! "$nuc" = "off" ]; then
correct=$(echo "${base[$n]} / ${r[$n]} * $bfx * (10^6 + $car) * (10^6 + $water) / (10^6 * $bfx + $o1x) - 10^6" | bc -l)
fi
if [ -n "$trosy" ] && [ "$nuc" == "15N" ]
then
correct_trosy=$(echo "$correct - 45 / $bf" | bc -l)
printf "${letters:i:1}CAR (%3s, $f): %8.4f (TROSY, orig=%.4f)\n" "$nuc" "$correct_trosy" "$correct"
else
printf "${letters:i:1}CAR (%3s, $f): %8.4f\n" "$nuc" "$correct"
fi
done
| true
|
cbb1c6cbcc41c4f92e0cdfff4c90469e9a174987
|
Shell
|
skinzor/android-bin
|
/power_opt.sh
|
UTF-8
| 2,142
| 2.890625
| 3
|
[] |
no_license
|
#!/system/xbin/bash
declare -a vo
vo[100000]=$((7000+125*23))
vo[200000]=$((7000+125*24))
vo[300000]=$((7000+125*25))
vo[400000]=$((7000+125*26))
vo[500000]=$((7000+125*29))
vo[600000]=$((7000+125*33))
vo[700000]=$((7000+125*36))
vo[800000]=$((7000+125*40))
vo[900000]=$((7000+125*45))
vo[1000000]=$((7000+125*50))
vo[1050000]=$((7000+125*52))
vo[1100000]=$((7000+125*52))
vo[1150000]=$((7000+125*53))
vo[1200000]=$((7000+125*53))
vo[1250000]=$((7000+125*53))
#set -x
set -e
stats_src=/sys/devices/system/cpu/cpu0/cpufreq/stats/time_in_state
[ -n "$1" ] && stats_src=$1
declare -a stats
while read freq time_cs; do
stats[$freq]=$time_cs
done < $stats_src
#echo ${!stats[@]}
#echo ${stats[@]}
declare -a voltages
for i in /sys/kernel/liveopp/arm_step*; do
freq=`grep "Frequency show:" $i | awk '{print $3}'`
v=`grep "Varm" $i | awk '{print $2}'`
voltages[$freq]=$((v/100))
done
#echo ${!voltages[@]}
#echo ${voltages[@]}
declare -a power
declare -a dp
declare -a po
total_power=0
total_power_o=0
for freq in ${!stats[@]}; do
v=voltages[$freq]
p=$(( (stats[$freq] * freq) /100000 * v/1000*v/1000))
power[$freq]=$p
total_power=$((total_power+p))
v=$((v-125))
p=$(( (stats[$freq] * freq) /100000 * v/1000*v/1000))
dp[$freq]=$p
v=vo[$freq]
p=$(( (stats[$freq] * freq) /100000 * v/1000*v/1000))
po[$freq]=$p-power[$freq]
total_power_o=$((total_power_o+p))
done
hformat="%5s %8s %8s %8s %8s %10s %17s\n"
format="%5s %8s %8.1f %3s.%04d %3s.%04d %6d.%03d %17s\n"
printf "$hformat" \
"freq" \
"time[s]" \
"P[%]" \
"V[V]" \
"Vorig[V]" \
"P_saved[%]" \
"delta_P_1[%/1000]" \
for freq in ${!stats[@]}; do
printf "$format" \
$((freq/1000)) \
$((stats[$freq]/100)) \
$((power[$freq]*100/total_power)).$(((power[$freq]*1000/total_power)%10)) \
$((voltages[$freq]/10000)) $((voltages[$freq]%10000)) \
$((vo[$freq]/10000)) $((vo[$freq]%10000)) \
$((po[$freq]*100/total_power)) $(( (po[$freq]*100*1000/total_power)%1000 )) \
$(((power[$freq]-dp[$freq])*100*1000/total_power)) \
done
tps=$(( (total_power_o-total_power)*10000/total_power ))
printf "Total power saved: %2d.%02d%%\n" $((tps/100)) $((tps%100))
| true
|
691902fa926d3309802b4220fe13d62b620d1fd3
|
Shell
|
enterstudio/popego
|
/popego/popserver/scripts/deploy.sh
|
UTF-8
| 1,443
| 3.296875
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
if [ ! $VIRTUAL_ENV ]
then
echo 'No virtual environment defined. Refusing to run.'
exit 1
fi
if [ ! $1 ]
then
echo "Usage: $0 path_to_ini_file"
exit 1
fi
# `dirname $0` is the 'scripts' directory
APP_ROOT="`dirname $0`/.."
echo 'setting maintenance page'
cp $APP_ROOT/../webtemplates/maintenance.html $APP_ROOT/popserver/public/maintenance.html
echo 'removing static assets'
rm -rf $APP_ROOT/popserver/public/javascripts $APP_ROOT/popserver/public/css
echo 'updating to latest revision'
svn up $APP_ROOT/..
echo 'compressing js and css'
$APP_ROOT/scripts/compress_assets.sh
# get current revision
DEPLOYED_REVISION=`svn info $APP_ROOT/popserver | grep Revision | awk '{print $2}'`
#echo 'creating stylesheet bundle'
#python $APP_ROOT/scripts/bundle_stylesheets.py $1 > $APP_ROOT/popserver/public/css/popego_style_$DEPLOYED_REVISION.css
echo 'running schema and data migrations'
python $APP_ROOT/scripts/migrate.py $1 -v
python $APP_ROOT/scripts/data_migrate.py $1 -v
echo 'restarting JQueue'
/etc/init.d/alpha-popego-queue restart
echo 'creating revision info file'
echo "Currently deployed revision: $DEPLOYED_REVISION" > $APP_ROOT/popserver/public/revision.txt
echo 'removing maintenance page'
rm $APP_ROOT/popserver/public/maintenance.html
# el usuario `popego` puede ejecutar `sudo /etc/init.d/apache2` sin password (ver /etc/sudoers)
echo 'restarting apache'
sudo /etc/init.d/apache2 force-reload
| true
|
4e00926444ea4896e1c56e1a9b45ff87a7881700
|
Shell
|
tyler46/tormap
|
/runme.sh
|
UTF-8
| 2,044
| 3.375
| 3
|
[] |
no_license
|
#!/bin/bash
KMLDIR='/var/www/maps'
TMPDIR='/tmp/tormap'
BINDIR='/usr/local/bin/'
if [ ! -d /tmp/tormap ]; then
mkdir -p $TMPDIR
fi
if [ -e $KMLDIR/tormap_auth.kml ]; then
# Find a random Authority:
AUTHORITY=`grep -A1 IP $KMLDIR/tormap_auth.kml | \
sed -e 's/.*\">\(.*\)<\/a.*/\1/g' -e 's/.*DirPort.*: \(.*\)<.*/\1/g' -e 's/\r//g' \
| grep -v -- "--" | xargs -n 2 | awk '{ print $1":"$2 }' | sort -R | head -n1`
# Get microdescriptors
wget "http://${AUTHORITY}/tor/server/all.z" -O $TMPDIR/all.z -o /dev/null -q
# Get consensus
wget "http://${AUTHORITY}/tor/status-vote/current/consensus.z" -O $TMPDIR/consensus.z -o /dev/null -q
else
# use dannenberg.ccc.de as fallback
wget "http://193.23.244.244/tor/server/all.z" -O $TMPDIR/all.z -o /dev/null -q
wget "http://193.23.244.244/tor/status-vote/current/consensus.z" -O $TMPDIR/consensus.z -o /dev/null -q
fi
cd $TMPDIR
printf "\x1f\x8b\x08\x00\x00\x00\x00\x00" |cat - $TMPDIR/all.z |gzip -dc > $TMPDIR/all 2>/dev/null
printf "\x1f\x8b\x08\x00\x00\x00\x00\x00" |cat - $TMPDIR/consensus.z |gzip -dc > $TMPDIR/consensus 2>/dev/null
if [ -s $TMPDIR/all ]; then
rm -f $TMPDIR/all.z
else
echo "all file uncompression failed. exiting."
exit 2
fi
if [ -s $TMPDIR/consensus ]; then
rm -f $TMPDIR/consensus.z
else
echo "consensus file uncompression failed. exiting."
exit 2
fi
#Download geolitecity database once a month
if [ -e $TMPDIR/GeoLiteCity.dat ]; then
DBAGE=`stat -c %Z $TMPDIR/GeoLiteCity.dat`
CDATE=`date +%s`
if [ $(( $CDATE - $DBAGE )) -gt 2592000 ]; then
wget "http://geolite.maxmind.com/download/geoip/database/GeoLiteCity.dat.gz" -O $TMPDIR/GeoLiteCity.dat.gz -o /dev/null -q
gunzip $TMPDIR/GeoLiteCity.dat.gz
fi
else
wget "http://geolite.maxmind.com/download/geoip/database/GeoLiteCity.dat.gz" -O $TMPDIR/GeoLiteCity.dat.gz -o /dev/null -q
gunzip $TMPDIR/GeoLiteCity.dat.gz
fi
if [ -e $TMPDIR/GeoLiteCity.dat ] && [ -e $TMPDIR/all ] && [ -e $TMPDIR/consensus ]; then
python $BINDIR/tormap.py
else
echo "missing important files. exiting."
exit 2
fi
| true
|
d5dfb30baa14b696a90073aa66743b91418f58bc
|
Shell
|
VishnuPillai135/Shell-Project
|
/Basic Git written in Shell/legit-show
|
UTF-8
| 1,209
| 4.09375
| 4
|
[] |
no_license
|
#!/bin/dash
address=$PWD
if [ ! -e ".legit" ] #checking if legit-init has been used
then
>&2 echo "legit-show: error: no .legit directory containing legit repository exists"
exit 1
fi
cd ".legit"
if [ ! -e "extra_files" ] #checking if legit-commit has been used
then
>&2 echo "legit-show: error: your repository does not have any commits yet"
exit 1
fi
if [ $# -ne 1 ] #checking if correct number of arguments
then
>&2 echo "usage: legit-show <commit>:<filename>"
exit 1
fi
cd "$address/.legit/extra_files"
ONE=`echo $1 | cut -d':' -f1`
TWO=`echo $1 | cut -d':' -f2`
if [ -z "$ONE" ] #if number is not specified
then
cd $address/.legit/index
if [ -f "$TWO" ]
then
cat $TWO
else
>&2 echo "legit-show: error: '$TWO' not found in index"
fi
else #if number is specified
#assuming the commit exists
if [ -e ".commit$ONE" ]
then
cd "$address/.legit/extra_files/.commit$ONE"
if [ -f "$TWO" ]
then
cat "$TWO"
else
>&2 echo "legit-show: error: '$TWO' not found in commit $ONE"
fi
else
>&2 echo "legit-show: error: unknown commit '$ONE'"
fi
fi
| true
|
4aa1505950824b4c9a287ddd78b8ae8d196114a7
|
Shell
|
helioz2000/ESP8266_Telemetry
|
/lc_broadcast.sh
|
UTF-8
| 905
| 3.9375
| 4
|
[] |
no_license
|
#!/bin/bash
#
# File: /usr/sbin/lc_broadcast.sh
#
# UDP broadcast for Lap Counter host
# This script needs to start on boot, see lc_broadcast.service
# network device (may need to be adjusted to suit)
DEV=eth0
# id to broadcast (the client identifies the correct host)
HOSTID="LC1"
# hostname
HOSTNAME=`hostname`
# broadcast port (the client listens for broadcast packets on this port)
BC_PORT=2000
#broadcast interval in seconds (must be less than the timneout specified in the client)
INTERVAL=10
# wait for system to settle down
sleep 20
#get the broadcast address for the specified ethernet device
bc_address=`/bin/ip a s dev $DEV | awk '/inet / {print $4}'`
echo "Broadcasting on $bc_address"
# broadcast endless loop
while true
do
echo -e "LC1\t2006\t$HOSTNAME" | /bin/nc -ub -w0 $bc_address $BC_PORT
echo "Broadcast sent.."
sleep $INTERVAL
done
echo "Broadcast exited"
| true
|
6c5424c39036e7503d25dbfe264a5b08eab94c98
|
Shell
|
CGS-Purdue/record-the-earth-app
|
/.pipeline/__lib__/style/style-functions
|
UTF-8
| 951
| 3.75
| 4
|
[] |
no_license
|
#!/bin/bash
# ===========
# FORMATTERS
# ===========
no_emptylines () {
grep -v -e "^[[:space:]]*$"
}
select_lines () {
grep "^/\*\?//"
}
strip_markers () {
sed -r 's/^[*\/]+\s?//g; s/\*\/$/ /g'
}
line_ending_spaces () {
sed -r 's/>$/ /g'
}
# ===========
# SASS FUNCTIONS
# ===========
scss_to_css () {
SRC=${1?'input required'}
node-sass \
--output-style expanded \
--indent-type space \
--indent-width 2 \
--linefeed lf \
--precision 4 \
$SRC
}
# SASS DIRECTORY INDEX
# LIST ALL FILES IN CURRENT DIRECTORY RECURSIVELY
# PRINT AND SORT BY GROUP AND ORDER IN STYLESHEET
# IMPORT ORDER, THEN APPEND TO BOTTOM OF CURRENT INDEX FILE
style_create_sass_index () {
SRC="${1:-.}"
echo "Building directory index file: $SRC"
find $SRC \
-type f \
-printf "%f\n" \
| sort --version-sort --reverse \
| sed 's/^[[:space:]]*_//; s/^/@import "/; s/\.scss//; s/$/";/' \
>> $SRC/index.scss
}
| true
|
502a2b944d7c3e548e7ee5c504ffd91cd54d2e66
|
Shell
|
chmstimoteo/Kronnix
|
/Scripts/gera-custom-cd
|
UTF-8
| 1,222
| 3.625
| 4
|
[] |
no_license
|
#!/bin/bash -e
if [ $UID != 0 ] ; then
echo "Você não é root!"
exit 1
fi
CHROOT_COMMAND="prepara_tudo.sh"
CHROOTDIR=squashfs-root
CDROMDIR=cdrom
cat > prepara_tudo.sh << EOF
#! /bin/bash
apt-get clean
rm /tmp/* -rf
history -c
exit 0
EOF
if [ ! -z $CHROOT_COMMAND ]; then
echo "Entrando no chroot..."
mount -t devpts none $CHROOTDIR/dev/pts/
mount -t proc none $CHROOTDIR/proc/
mount -t sysfs none $CHROOTDIR/sys/
mv $CHROOT_COMMAND $CHROOTDIR/
chmod a+x $CHROOTDIR/$CHROOT_COMMAND
chroot $CHROOTDIR/ /$CHROOT_COMMAND
rm $CHROOTDIR/$CHROOT_COMMAND
umount $CHROOTDIR/dev/pts/
umount $CHROOTDIR/proc/
umount $CHROOTDIR/sys/
echo "Saindo do chroot..."
fi
#Apagando resolv.conf se ele existir.
if [ -f $CHROOTDIR/etc/resolv.conf ]; then
rm -f $CHROOTDIR/etc/resolv.conf
fi
#Criando cdrom
rm -f $CDROMDIR/casper/filesystem.squashfs
mksquashfs $CHROOTDIR $CDROMDIR/casper/filesystem.squashfs
cd $CDROMDIR
rm md5sum.txt
find -type f -print0 | xargs -0 md5sum | grep -v isolinux/boot.cat | tee md5sum.txt
mkisofs -D -r -V "KRONNIX2" -cache-inodes -J -l -b isolinux/isolinux.bin -c isolinux/boot.cat -no-emul-boot -boot-load-size 4 -boot-info-table -o ../kronnix2-dev.iso .
cd -
exit 0
| true
|
28baca9965b7c127f8ce30a853088d798a6f5385
|
Shell
|
ShalokShalom/plan.sh
|
/pcmciautils/plan.sh
|
UTF-8
| 857
| 2.609375
| 3
|
[
"Apache-2.0"
] |
permissive
|
pkg_origin=cosmos
pkg_name=pcmciautils
pkg_version=018
pkg_description="Utilities for inserting and removing PCMCIA cards"
pkg_upstream_url="http://kernel.org/pub/linux/utils/kernel/pcmcia/pcmcia.html"
pkg_license=('GPL')
pkg_deps=('sysfsutils' 'systemd')
pkg_source=("http://ftp.de.debian.org/debian/pool/main/p/${pkg_name}/${pkg_name}_${pkg_version}.orig.tar.gz"
'initcpio-install-pcmcia')
pkg_shasum=('3c388cb559975b37a75cb5965e9800a9'
'0dd0544b346d478f2bed35c1a91aa1dc')
do_build() {
cd "${CACHE_PATH}/${pkg_name}-${pkg_version}"
sed -i -e 's|/usr/bin/install|install|g;
s|/lib/udev|/usr/lib/udev|g' Makefile
make
}
do_package() {
cd "${CACHE_PATH}/${pkg_name}-${pkg_version}"
make DESTDIR="${pkg_prefix}" install
install -D -m644 ../initcpio-install-pcmcia ${pkg_prefix}/usr/lib/initcpio/install/pcmcia
}
| true
|
79d004ea35d14bef73c3743379ba32cf7aec043d
|
Shell
|
thohal/openqrm
|
/trunk/src/plugins/zabbix/bin/openqrm-zabbix-manager
|
UTF-8
| 1,591
| 3.0625
| 3
|
[] |
no_license
|
#!/bin/bash
#
# This file is part of openQRM.
#
# openQRM is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2
# as published by the Free Software Foundation.
#
# openQRM is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with openQRM. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright 2009, Matthias Rechenburg <matt@openqrm.com>
#
# add path when running from cron
export PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:$PATH
OPENQRM_SERVER_BASE_DIR=$(dirname $0)/../../../..
OPENQRM_SERVER_BASE_DIR=$(pushd $OPENQRM_SERVER_BASE_DIR > /dev/null && echo $PWD && popd > /dev/null)
. $OPENQRM_SERVER_BASE_DIR/openqrm/include/openqrm-functions
. $OPENQRM_SERVER_BASE_DIR/openqrm/etc/openqrm-server.conf
. $OPENQRM_SERVER_BASE_DIR/openqrm/include/openqrm-server-functions
. $OPENQRM_SERVER_BASE_DIR/openqrm/plugins/zabbix/include/openqrm-plugin-zabbix-functions
. $OPENQRM_SERVER_BASE_DIR/openqrm/plugins/zabbix/etc/openqrm-plugin-zabbix.conf
CURRENTDIR=`pwd`
export LANG=C
# let only root run this script
WHOAMI=`whoami`
if [ "$WHOAMI" != "root" ]; then
echo "ERROR: Please run this script as root!"
exit 6
fi
# get the ip config
openqrm_server_get_config
export resource_openqrmserver=$OPENQRM_SERVER_IP_ADDRESS
# no additional functionality yet
| true
|
fb4af4daef59be98cbc1aeeb110f32a4d37bc521
|
Shell
|
romaonthego/dotfiles
|
/dotfiles/bash_profile
|
UTF-8
| 264
| 3.0625
| 3
|
[] |
no_license
|
if [ -f $HOME/.bashrc ]; then
. $HOME/.bashrc
fi
if [ -f $HOME/.bash_completion ]; then
. $HOME/.bash_completion
fi
if [ -f $HOME/.git-completion.bash ]; then
. $HOME/.git-completion.bash
fi
if [ -f $HOME/.bash_extended ]; then
. $HOME/.bash_extended
fi
| true
|
a2e3663543a0a935cf2d863a2b5c219fc3aa6691
|
Shell
|
ahmedelhilali/dotfiles-3
|
/.scripts/Shell/culturacomlegenda.sh
|
UTF-8
| 166
| 2.578125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
curl -s "$1" | grep -oP '(?<=\<a class="downloadlink" href=")http?.*[0-9](?=")' | while read -r l;do
wget -nv --content-disposition "$l"
done
| true
|
20f7bda22250290e2ec6ddd8b10fa2fb5dd5216a
|
Shell
|
RichardRanft/VBS
|
/Equinox/scripts/fix_perms.sh
|
UTF-8
| 1,765
| 4.1875
| 4
|
[] |
no_license
|
#! /bin/sh
# fix_perms.sh
#
# Boot & Root FS Build setup
#
# Sets permissions and extracts toolchains
#
# Use -e to extract toolchain tar files
# in addition to the standard permissions
# fixing. This should only need to be done
# once per computer.
#
# Note that the standard invocation
# is performed by the platform build
# scripts.
if [ -z "$DIR" ];then
DIR=`dirname $0`
fi
TOOLS_TAR=$DIR/toolchain_stargames.tgz
TOOLS_DIR=/
MBTOOLS_TAR=$DIR/microblaze-elf-tools-20040603.tar.gz
MBTOOLS_DIR=/mbtools
function check_tars()
{
if [[ ! -e "$TOOLS_TAR" || ! -e "$MBTOOLS_TAR" ]]; then
echo "Couldn't find necessary tar files:" >&2
echo "$TOOLS_TAR" >&2
echo "$MBTOOLS_TAR" >&2
exit 1
fi
}
function check_root()
{
if [ `id -ur` -ne 0 ];then
echo "Warning: Not root, tars will probably fail to extract" >&2
fi
}
function critical()
{
echo "$1"
shift
echo " $@"
"$@"
if [ $? -ne 0 ];then
echo "Command '$@' failed, aborting." >&2
exit 2
fi
}
case "$1" in
--extract|-e)
extract=1;;
esac
if [ "$extract" == "1" ];then
check_root
check_tars
critical "Extracting tools to $TOOLS_DIR..." tar xfz "$TOOLS_TAR" -C "$TOOLS_DIR"
critical "Creating $MBTOOLS_DIR directory..." mkdir -p "$MBTOOLS_DIR"
critical "Extracting mbtools to $MBTOOLS_DIR..." tar xfz "$MBTOOLS_TAR" -C "$MBTOOLS_DIR"
fi
critical "Setting execute permissions for bootloaders..." chmod +x "$DIR"/build/bootfs/*/make_bootloader.sh
critical "Setting execute permissions for patch script..." chmod +x "$DIR"/build/rootfs/sources/patch-kernel00.sh
critical "Setting execute permissions for utils/bin..." chmod -R +x "$DIR"/build/utils/bin/
critical "Setting execute permissions for utils/sbin..." chmod -R +x "$DIR"/build/utils/sbin/
| true
|
853b6087687a4e6f5e9c294b11b2a6d34e8dc5ac
|
Shell
|
facebookresearch/Private-ID
|
/etc/example/generate_cert.sh
|
UTF-8
| 1,653
| 3.3125
| 3
|
[
"Apache-2.0",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
#!/bin/bash
set -e
directory="$( dirname -- "$0";)/dummy_certs";
mkdir "$directory" || exit
cd "$directory" || exit
# Create cnf file. DNS name is VERY important
tee ssl-extensions-x509.cnf << EOF
[v3_ca]
authorityKeyIdentifier = keyid,issuer
basicConstraints = CA:FALSE
keyUsage = Digital Signature, Non Repudiation, Key Encipherment, Data Encipherment
subjectAltName = @alt_names
[alt_names]
DNS.1 = localhost
EOF
#Generate a private key for the CA:
openssl genrsa 2048 > ca.key
#Generate the X509 certificate for the CA:
openssl req -new -x509 -nodes -days 365000 \
-key ca.key \
-out ca.pem \
-subj "/C=US/ST=CA State/L=Menlo Park City/O=Meta Inc./CN=example.com"
#Generate the private key and certificate request of server:
openssl req -newkey rsa:2048 -nodes -days 365 \
-keyout server.key \
-out server_requst.pem \
-subj "/C=US/ST=CA State/L=Menlo Park City/O=Meta Inc./CN=server.example.com"
#Generate the X509 certificate for the server:
openssl x509 -req -days 365 \
-in server_requst.pem \
-out server.pem \
-CA ca.pem \
-CAkey ca.key \
-CAcreateserial \
-extensions v3_ca \
-extfile ./ssl-extensions-x509.cnf
#Generate the private key and certificate request of client:
openssl req -newkey rsa:2048 -nodes -days 365 \
-keyout client.key \
-out client_request.pem \
-subj "/C=US/ST=CA State/L=Menlo Park City/O=Meta Inc./CN=server.example.com"
#Generate the X509 certificate for the client:
openssl x509 -req -days 365 \
-in client_request.pem \
-out client.pem \
-CA ca.pem \
-CAkey ca.key \
-CAcreateserial \
-extensions v3_ca \
-extfile ./ssl-extensions-x509.cnf
| true
|
f5df53b4c2de02a546992ea18b61ed9e0df1dc77
|
Shell
|
ctison/config
|
/tusk/create-bin-install-template.sh
|
UTF-8
| 251
| 3.09375
| 3
|
[] |
no_license
|
#!/bin/bash
umask 0077
shopt -s nullglob
set -euo pipefail
VERSION=$(curl -fsS https://api.github.com/repos/user/repo/releases/latest | jq -r .tag_name)
echo "VERSION=$VERSION"
if [ $# = 1 ] && [ "$1" = 'version' ]; then exit 0; fi
set -x
curl -fsSLO
| true
|
23807bfc2ac4801cd9a271ba16592931fc866d26
|
Shell
|
mattdarwinla/hand-sanitizers
|
/Lab 2/buildwords
|
UTF-8
| 271
| 3.28125
| 3
|
[] |
no_license
|
#! /bin/bash
# buildwords --- extracts and builds a list of Hawaiian words from an HTMl page
file=$1
grep "<td>.*</td>" $file | sed "s/<[^>]*>//g" | sed "s/\`/'/g" | tr -cs '[:graph:]' '[\n*]' | tr ',' '[\n*]'| sort -u | grep -v "[^PpKk'MmNnWwLlHhAaEeIiOoUu]" > hwords
| true
|
8336da2825047d0aca318debbafd2480d82b33df
|
Shell
|
TheTask/TA_averages
|
/TA_averages.bash
|
UTF-8
| 1,106
| 4.03125
| 4
|
[] |
no_license
|
#!/bin/bash
present=$(pwd) #remember pwd
cd ./A3/ #change to the subdir
for dir in ./*; #all TA's subdirs
do
cd "$dir" #change into current TA subdir
name=${dir:2} #get rid of "./"
firstname=$(echo $name | cut -f1 -d" ") #separate first name for the purpose of file
echo $name #print the name
grades=$(cat ./*/*txt | grep "Your Score" | cut -f3 -d" ") #opens .txt files and saves the grades
n=0
sum=0
for grade in $grades
do
sum=$( echo "$sum+$grade" | bc ) #add all grades
((n+=1)) #add how many students
done
cd .. #change back to parental dir
c=$(echo "($sum) / $n" | bc -l) #full unrounded number
d=$(echo $c | rev | cut -c 19- | rev) #deleting last 19 digits (rounding haha )
echo -e Average: $d '\t''\t' "$sum/$n" #print it on screen for fun and sanity check alongside sum/n
echo "" #new line
echo -e $firstname '\t' $d >> ./results.txt #writing the first name, tab and the average into the file for Excel to parse
done
mv results.txt $present #move the result file to the original directory
| true
|
75f276b71a0fe89e83b8dd5f42886c6a963727c4
|
Shell
|
patczar/video-scripts
|
/src/bash/mencoder/vid-print-road-yt
|
UTF-8
| 3,507
| 3.84375
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# (c) Patryk Czarnik
# Distributed under MIT License. See LICENCE file in the root directory for details.
# This script creates an image containing some texts, used on my road records.
usage="vid-print-road-yt [options]
where options are:
-h -- prints this usage message
-o output_file -- sets output file
-R color -- the color of the road number, depending on the road category. Defaults to white.
In Poland we mark state (national) roads with red, voyevodship (district) roads with yellow, motorways with blue...
Texts to be printed. All optional:
-r -- road number
-p -- place (city, town, etc.)
-m -- normal message
-M -- emphased message
-n -- normal message (second line)
-N -- emphased message (second line)
-x -- speed (of playback, e.g. 'x4')
"
while getopts "ho:R:r:p:m:M:n:N:x:" opt; do
case "$opt" in
h|\?)
echo "$usage"
exit 1
;;
o) output="$OPTARG"
;;
R) road_color="$OPTARG"
;;
r) road="$OPTARG"
;;
p) place="$OPTARG"
;;
m) message="$OPTARG"
;;
M) emessage="$OPTARG"
;;
n) message2="$OPTARG"
;;
N) emessage2="$OPTARG"
;;
x) x="$OPTARG"
;;
esac
done
shift $(($OPTIND - 1))
# Use none for transparent background
background=none
#background=black
# This is the room left on the bottom of videos processed with e720 setting of my other script.
size=1280x56
if [ -z "$road_color" ]
then road_color=white
fi
#TODO refactor somehow to avoid code duplication...
# Settings for particular texts.
# r - usually used for the number of road
r_font="DejaVu-Sans-Bold"
r_size=40
r_color="$road_color"
r_loc=20,6
# p - usually used for the name of the town or other location
p_font="DejaVu-Sans-Condensed"
p_size=32
p_color=lightgreen
p_loc=160,8
# m - usually used for comments on the action
m_font="DejaVu-Sans-Condensed"
m_size=22
m_color=white
m_loc=500,30
# M - usually used for emphased comments on the action (near to accident etc.)
M_font="DejaVu-Sans-Condensed"
M_size=22
M_color=red
M_loc=500,30
# n - usually used for comments on the action (2nd line)
n_font="DejaVu-Sans-Condensed"
n_size=22
n_color=white
n_loc=500,4
# n - usually used for emphased comments on the action (2nd line)
N_font="DejaVu-Sans-Condensed"
N_size=22
N_color=red
N_loc=500,4
# x - usually used for playback speec indication
x_font="DejaVu-Sans"
x_size=32
x_color=grey
x_loc=1200,8
declare -a cmd
function add_to_cmd() {
for arg in "$@"
do cmd+=("$arg")
done
}
add_to_cmd convert -size $size canvas:"$background" -gravity SouthWest
if [ -n "$road" ]
then
add_to_cmd -font "$r_font" -pointsize "$r_size" -fill "$r_color" -draw "text $r_loc '$road'"
fi
if [ -n "$place" ]
then
add_to_cmd -font "$p_font" -pointsize "$p_size" -fill "$p_color" -draw "text $p_loc '$place'"
fi
if [ -n "$message" ]
then
add_to_cmd -font "$m_font" -pointsize "$m_size" -fill "$m_color" -draw "text $m_loc '$message'"
fi
if [ -n "$emessage" ]
then
add_to_cmd -font "$M_font" -pointsize "$M_size" -fill "$M_color" -draw "text $M_loc '$emessage'"
fi
if [ -n "$message2" ]
then
add_to_cmd -font "$n_font" -pointsize "$n_size" -fill "$n_color" -draw "text $n_loc '$message2'"
fi
if [ -n "$emessage2" ]
then
add_to_cmd -font "$N_font" -pointsize "$N_size" -fill "$N_color" -draw "text $N_loc '$emessage2'"
fi
if [ -n "$x" ]
then
add_to_cmd -font "$x_font" -pointsize "$x_size" -fill "$x_color" -draw "text $x_loc '$x'"
fi
add_to_cmd "$output"
#showargs "${cmd[@]}"
echo "${cmd[@]}"
"${cmd[@]}"
| true
|
ac1a4dc73a9ff77da2506bd19ff315aff99ca07b
|
Shell
|
elobdog/scripts
|
/unboundblock.sh
|
UTF-8
| 810
| 3.78125
| 4
|
[
"BSD-2-Clause"
] |
permissive
|
#!/bin/sh
URL="https://dbl.oisd.nl"
TMPFILE=$(mktemp) || exit 1
BLOCKL="blockhost.conf"
UBOUND="/var/unbound"
DATE=$(date '+%Y%m%d')
OS=$(uname -s)
trap 'rm -f $TMPFILE' INT HUP TERM
echo "====[script start: $(date)]===="
[ -f $BLOCKL ] && \
mv $BLOCKL $BLOCKL.$DATE
if [ "$OS" == "OpenBSD" ]; then
# use "-S noverifytime" in case of certain errors. man 1 ftp
ftp -o $TMPFILE $URL
else
wget -c --passive -O $TMPFILE $URL
fi
sed -e '/^#/d' -e '/^$/d' \
-e 's/\(.*\)/local-zone: "\1" always_nxdomain/' \
$TMPFILE > $TMPFILE.TMP \
&& mv $TMPFILE.TMP $BLOCKL \
&& rm -f $TMPFILE
# move the file to unbound's etc directory
if [ "$OS" == "OpenBSD" ]; then
mv $BLOCKL $UBOUND/etc/
fi
# cleanup, since the new blocklist generated successfully
rm -f $BLOCKL.$DATE
echo "====[script end: $(date)]===="
| true
|
1f49ab81f8507d6ca1e5f3401ec9ee06c537825f
|
Shell
|
xinyu2/cs533
|
/hw1/testgr.sh
|
UTF-8
| 564
| 3.125
| 3
|
[] |
no_license
|
#!/bin/bash
if [[ -e data/d_10_15 ]]; then
echo 'file "d_10_15" exists.'
else
echo 'file "d_10_15" not exists.'
fi
d[0]=d_10_15
d[1]=d_90_2000
d[2]=d_100_4900
d[3]=d_1000_499500
red=`tput setaf 1`
green=`tput setaf 2`
yellow=`tput setaf 3`
blue=`tput setaf 4`
magenta=`tput setaf 5`
cyan=`tput setaf 6`
reset=`tput sgr0`
for i in {0..3}; do
echo "${yellow}run" "$i" "data\(${d[i]})" "${green}=========================================>>>${reset}"
./g "data/${d[i]}" >> output/og"$i"
done
cat output/og* >> output/go
rm output/og*
cat output/go
| true
|
a6cc15019a4c003aa3dce5b5f6bcb080761a13e7
|
Shell
|
Joystream/onchain-git-poc
|
/tests/go-git/test-push-update.sh
|
UTF-8
| 652
| 3.046875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Test pushing a fast-forward update
set -eo pipefail
make install
rm -rf /tmp/gitservice && mkdir -p /tmp/gitservice
cd /tmp/gitservice
git init -q --bare targetrepo.git
git init -q sourcerepo && cd sourcerepo
git remote add origin debugf:///tmp/gitservice/targetrepo.git
echo "#Hello World" > README.md
git add README.md && git commit -q -m"Start repo"
gogitclient push origin +refs/heads/master:refs/heads/master
echo "This is a test of pushing branch updates" >> README.md
echo "This is another file" > test.txt
git add README.md test.txt && git commit -q -m"Edit README"
gogitclient push origin refs/heads/master:refs/heads/master
| true
|
5cdd2be0eedd3ba4bb1a010e8755b914a48c231b
|
Shell
|
fengdanhuang/bin_fred
|
/SWV_Proc_WheatleyFile.sh
|
UTF-8
| 1,458
| 3.53125
| 4
|
[] |
no_license
|
#!/bin/bash
#Functionality: Transfer Wheatley's reduced results to unreduced results.
if [ $# -lt 2 ]; then
echo "usage: Command_Name Wheatley_file order lamda"
exit 1
fi
echo -e "\n***************************************************************************************\n"
echo " The program is: "$0
echo " The Wheatley's result file is: " $1
echo -e "\n"
file1=$1
order=$2
lamda=$3
M_PI=3.14159265358979323846
sigmaHSRef=1.0 #Wheatley specially use this value.
echo " NOP = $order"
echo " lamda = $lamda"
echo " M_PI = $M_PI"
echo " sigmaHSRef = $sigmaHSRef"
echo -e "\n"
awk -v order=$order -v lamda=$lamda -v M_PI=$M_PI -v sigmaHSRef=$sigmaHSRef 'BEGIN {
i = 0;
HSB2 = 2.0 * M_PI / 3.0 * sigmaHSRef * sigmaHSRef * sigmaHSRef;
printf " HSB2 = %f\n\n", HSB2;
}{
if ($1 != "Power"){
v[i] = $2;
e[i] = $3;
# printf " %e %e\n", v[i], e[i];
i++;
}
}END{
# printf "%d\n", NR;
printf " The original data from Wheatley file:\n"
# TotalLines=NR-1;
TotalLines=NR; #Note: if there is a head line, use NR-1; Otherwise, use NR.
for (i=0; i<TotalLines; i++){
printf " %d %e %e\n", i, v[i], e[i];
}
printf "\n\n The coefficients got from original data are:\n"
printf " HSB2^(order-1) = %e\n", HSB2^(order-1);
for (i=0; i<TotalLines; i++){
printf " %d %e %e\n", i, v[i]*HSB2^(order-1), e[i]*HSB2^(order-1);
printf " %e %e\n", v[i]*HSB2^(order-1), e[i]*HSB2^(order-1)>"WheatleyTransfer_B"order"_l"lamda".txt";
}
}' $file1
| true
|
b371a91fb30ae581b60d3baada2c940a18eec3d9
|
Shell
|
petronny/aur3-mirror
|
/php-intarray/PKGBUILD
|
UTF-8
| 881
| 2.59375
| 3
|
[] |
no_license
|
# Maintainer: Patrick Palka <patrick@parcs.ath.cx>
# Development: http://closure.ath.cx/aur-dev
_name=intarray
pkgname="php-$_name"
pkgver=1.0.0
pkgrel=2
pkgdesc="Efficient integral arrays for PHP"
arch=('i686' 'x86_64')
url="http://opensource.dynamoid.com/"
license=('unknown')
depends=('php')
_ininame="$_name.ini"
_inifile="etc/php/conf.d/$_ininame"
backup=("$_inifile")
_filename="$_name-$pkgver"
source=("http://opensource.dynamoid.com/$_filename.tar.gz")
md5sums=('07955eb5e89ff0eadd41d988d1fb7d29')
_builddir="$srcdir/$_filename"
build() {
cd "$_builddir"
phpize
./configure --prefix=/usr --enable-intarray
make
#make test
}
package() {
cd "$_builddir"
make INSTALL_ROOT="$pkgdir" install
echo ";extension=$_name.so" > "$_ininame"
install -vDm644 "$_ininame" "$pkgdir/$_inifile"
install -vDm644 COPYING "$pkgdir/usr/share/licenses/$pkgname/COPYING"
}
| true
|
c01e68568ee7ddc75202f057ee819fd70166f44a
|
Shell
|
huangynj/awips2
|
/rpms/awips2.core/Installer.collab-dataserver/configuration/etc/init.d/collab-dataserver
|
UTF-8
| 974
| 4.03125
| 4
|
[
"LicenseRef-scancode-public-domain"
] |
permissive
|
#!/bin/bash
#
# Startup script for the HTTP Collaboration Dataserver servlet
#
# chkconfig: - 85 15
# description: Servlet for storing and retrieving collaboration data objects.
# processname: collabserver
# pidfile: /var/run/collabserver.pid
# Source function library.
. /etc/rc.d/init.d/functions
COLLAB_BIN=/awips2/collab-dataserver/bin
PROG=`basename $0`
start() {
echo $"Starting HTTP Collaboration Dataserver"
# start.sh script starts the service using nohup.
${COLLAB_BIN}/start.sh
return $?
}
status() {
${COLLAB_BIN}/status.sh
return $?
}
stop() {
echo $"Stopping HTTP Collaboration Dataserver"
${COLLAB_BIN}/stop.sh
return
}
# See how we were called.
case "$1" in
start)
start
RETVAL=$?
;;
stop)
stop
RETVAL=$?
;;
status)
status $httpd
RETVAL=$?
;;
restart)
stop
start
RETVAL=$?
;;
*)
echo $"Usage: $PROG {start|stop|restart|status}"
exit 1
;;
esac
exit $RETVAL
| true
|
d61123380297aecdd83d0276f6bb0ece33bbe7ab
|
Shell
|
kissthink/repo-slacky
|
/slackware/development/apache-ant/apache-ant.SlackBuild
|
UTF-8
| 3,540
| 3.453125
| 3
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
#!/bin/sh
# Heavily based on the Slackware 14.0 SlackBuild
# Written by baldelario ~at~ gmail ~dot~ com ( www.slacky.eu )
# Slackware build script for Apache Ant
# Official Site: http://ant.apache.org
# Redistribution and use of this script, with or without modification, is
# permitted provided that the following conditions are met:
#
# 1. Redistributions of this script must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AS IS AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Exit the script on errors
set -e
trap 'echo "$0 FAILED at line ${LINENO}"' ERR
# Catching variables
PKGNAME=${PKGNAME:-apache-ant}
VERSION=${VERSION:-1.8.4}
BUILD=${BUILD:-3}
TAG=${TAG:-sl}
ARCH=${ARCH:-i486}
SOURCE="http://www.apache.org/dist/ant/binaries/$PKGNAME-$VERSION-bin.tar.bz2"
CWD=${CWD:-`pwd`}
TMP=${TMP:-/tmp/buildpkgs/$PKGNAME}
PKG=${PKG:-$TMP/package-$PKGNAME}
OUTPUT=${OUTPUT:-$CWD}
CHOST="i486"
if [ "$ARCH" = "i486" ]; then
SLKCFLAGS="-O2 -march=i486 -mtune=i686"
SLKLDFLAGS=""
LIBDIRSUFFIX=""
elif [ "$ARCH" = "i586" ]; then
SLKCFLAGS="-O2 -march=i586 -mtune=i686"
SLKLDFLAGS=""
LIBDIRSUFFIX=""
elif [ "$ARCH" = "i686" ]; then
SLKCFLAGS="-O2 -march=i686 -mtune=i686"
SLKLDFLAGS=""
LIBDIRSUFFIX=""
elif [ "$ARCH" = "x86_64" ]; then
SLKCFLAGS="-O2 -fPIC"
SLKLDFLAGS="-L/usr/lib64"
LIBDIRSUFFIX="64"
CHOST="x86_64"
fi
# Source file availability
if [ ! -e $CWD/$PKGNAME-$VERSION-bin.tar.bz2 ] ; then
wget $SOURCE
fi
# Create working directories
rm -rf $TMP
mkdir -p $TMP $PKG $OUTPUT
# Package building
cd $TMP
tar xvjf $CWD/$PKGNAME-$VERSION-bin.tar.bz2
cd $PKGNAME-$VERSION
chmod -R u+w,go+r-w,a-s .
chown -R root:root .
mkdir -p $PKG/etc/profile.d
mkdir -p $PKG/usr/{bin,lib/ant,share/java}
cp -rf * $PKG/usr/lib/ant
ln -sf /usr/lib/ant/bin/ant $PKG/usr/bin/ant
for FILE in $PKG/$ANT_HOME/lib/*.jar ; do
ln -sf $ANT_HOME/lib/$(basename $FILE) $PKG/usr/share/java/$(basename $FILE)
done
rm -rf $PKG/usr/lib/ant/bin/{*.bat,*.cmd}
rm -rf $PKG/usr/lib/ant/docs
( cd $PKG/etc/profile.d; tar xzvf $CWD/profile.d.tar.gz
chmod 755 $PKG/etc/profile.d/*
)
# Adding documentation
mkdir -p $PKG/usr/doc/$PKGNAME-$VERSION
mv $PKG/usr/lib/ant/{KEYS,LICENS*,README,NOTICE,WHATSNEW,INSTALL} \
$PKG/usr/doc/$PKGNAME-$VERSION
cat $CWD/$PKGNAME.SlackBuild > $PKG/usr/doc/$PKGNAME-$VERSION/$PKGNAME.SlackBuild
cat $CWD/slack-desc > $PKG/usr/doc/$PKGNAME-$VERSION/slack-desc
# Adding slack-desc and slack-required
mkdir -p $PKG/install
cat $CWD/slack-desc > $PKG/install/slack-desc
if [ -x "$(which requiredbuilder 2>/dev/null)" ];then
requiredbuilder -y -v -s $CWD $PKG
fi
# Make the package
cd $PKG
chown -R root:root $PKG
/sbin/makepkg -l y -c n $OUTPUT/$PKGNAME-$VERSION-$ARCH-$BUILD$TAG.${PKGTYPE:-txz}
# Clean up the extra stuff
if [ "$1" = "--cleanup" ]; then
rm -rf $TMP $PKG
fi
| true
|
b44b3f7eb352acb4abd5ea95e3fe9f55a0d3e6f4
|
Shell
|
tomekr/vulny
|
/provision.sh
|
UTF-8
| 1,256
| 2.546875
| 3
|
[] |
no_license
|
apt update
apt install -y apt-transport-https ca-certificates
apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 561F9B9CAC40B2F7
echo "deb https://oss-binaries.phusionpassenger.com/apt/passenger xenial main" > /etc/apt/sources.list.d/passenger.list
apt update
apt upgrade -y
apt install -y ruby ruby-dev build-essential postgresql libpq-dev libsqlite3-dev nodejs passenger
gem install --no-ri --no-rdoc bundler
adduser --disabled-password --gecos 'Vulny user' vulny
git clone https://github.com/tomekr/vulny ~vulny/vulny
cd ~vulny/vulny
git checkout ui-change
bundle update
echo -e "default: &default\n adapter: postgresql\n encoding: unicode\n database: vulny\n pool: 5\n\ndevelopment:\n <<: *default\n\nproduction:\n <<: *default" > config/database.yml
mkdir log tmp
chown vulny:vulny log tmp
sudo -u postgres createuser --createdb vulny
sudo -u vulny rake db:setup
echo -e '{\n "environment":"development",\n "port":80,\n "daemonize":true,\n "user":"vulny"\n}' > Passengerfile.json
sudo -u postgres psql vulny -c "REVOKE ALL PRIVILEGES ON DATABASE vulny FROM vulny;"
sudo -u postgres psql vulny -c "GRANT SELECT ON users, schema_migrations TO vulny;"
sudo -u postgres psql vulny -c "ALTER ROLE vulny WITH NOCREATEDB;"
passenger start
| true
|
7a50d68310a5b0a3837b5f3b7cf5b7893cff22cf
|
Shell
|
zepto88/scripts
|
/addresolution.sh
|
UTF-8
| 342
| 3.21875
| 3
|
[] |
no_license
|
#!/bin/bash
if [[ "$@" == "" || "$#" != 4 ]]; then
echo "Usage: addresolution <width> <height> <rate> <xrandr output>"
fi
name=$(gtf $1 $2 $3 | awk '/Modeline/ {print $2}' | tr -d "\"")
args=$(gtf $1 $2 $3 | awk -F"\".*\"" '/Modeline/ {print $2}')
xrandr --newmode $name $args
xrandr --addmode $4 $name
xrandr --output $4 --mode $name
| true
|
ecf20ef5ed848b3a446177ca756b4f36cb6ec768
|
Shell
|
apomorph/scripts
|
/demo/traditional-multi-module/git-pull.sh
|
UTF-8
| 962
| 2.96875
| 3
|
[] |
no_license
|
#!/bin/bash
# 使环境变量生效
# 在当前bash环境中执行/etc/profile中的命令 可以无执行权限
# 与./etc/profile等效 source是bash shell的内置命令 .是Bourne Shell的命令
source /etc/profile
# pushd 切换目录
# pushd /data/code/demo-java 表示将该目录push到栈中 同时会切换到这个目录 再次执行pushd则会回到前一个目录
# popd 表示将栈顶目录弹出
# 此处表示切换到/data/code/demo-java 忽略父pom文件的修改 防止冲突 拉代码 并切换回原目录
pushd /data/code/demo-java && git checkout /data/code/demo-java/pom.xml && git pull && popd
# cp 复制 -f表示强制
#/bin/cp -f /data/code/demo-java/pom.xml /data/code/demo-java/pom.xml.1
# sed :流编辑器
# sed [options] 'command' file(s)
# -i :直接修改读取的文件内容,而不是输出到终端
# sed -i 's/原字符串/新字符串/' file
#sed -i 's/【ip1】/【ip2】/' /data/code/demo-java/pom.xml
| true
|
f5cb909159dcb4b7e3ee4a7ffed85b960303e90f
|
Shell
|
sharils/home
|
/shell_plugins/d.sh
|
UTF-8
| 2,544
| 3.140625
| 3
|
[
"Unlicense"
] |
permissive
|
#!/usr/bin/env sh
d() {
cmd="${1:-i}" && shift
case $cmd in
-1) (set -x && date -ujf%s -v-1S 0 "${@:-+%+%t%G-W%V}") ;;
-v)
[ $# -eq 0 ] && echo 'd -v +3d 2023-02-24 %F' >&2 && return $?
v="$1" && f="${3:-%F}" && d="${2:-$(date +"$f")}" && shift 3
date -jf"$f" -v"$v" "$d" +"$f"
;;
0) date -ujf%s 0 "${@:-+%FT%T}" ;;
F) date +%F ;;
I) date -Iseconds "$@" | sed 's/:00$//;s/+00$/Z/' | tr -d '[:space:]' ;;
a) direnv allow ;;
b) "$SHARILS_HOME/shell_plugins/d/b.sh" "$@" ;;
c) "$SHARILS_HOME/shell_plugins/d/c.sh" "$@" ;;
cmd) "$SHARILS_HOME/shell_plugins/d/cmd.sh" "$@" ;;
copy)
z t_init
(
g cl t --quiet "$PWD"
find -- * -type d -depth 0 -exec echo COPY {} /workdir/{} \;
echo "COPY $(find -- * ! -name CHANGELOG.md ! -name Dockerfile ! -name README.md ! -name LICENSE ! -name docker-compose.yml -type f -depth 0 -exec echo {} \+) /workdir"
)
;;
default.conf.template) "$SHARILS_HOME/shell_plugins/d/default.conf.template.sh" "$@" ;;
e)
case "${1:-e}" in
f) $EDITOR Dockerfile ;;
i) $EDITOR .dockerignore ;;
e) direnv edit . && chmod 600 .envrc ;;
esac
;;
i) "$SHARILS_HOME/shell_plugins/d/i.sh" "$@" ;;
j)
case "$1" in
sp)
cmd="${2:-"$(basename "$PWD" | sed 's/[^[:alnum:]]\{1,\}/_/g')"}" && shift 2
d j startproject --verbosity 2 "$cmd" "${@:-.}"
;;
*) p r p -mdjango "$@" ;;
esac
;;
l) d logs "$@" ;;
o) open -b com.docker.docker ;;
r) "$SHARILS_HOME/shell_plugins/d/r.sh" "$@" ;;
q) osascript -e 'quit app "Docker"' ;;
s)
case "$1" in
p) shift && set -- prune "$@" ;;
esac
d system "$@"
;;
v) d volume "$@" ;;
x) d exec "$@" ;;
swagger-codegen)
case "$1" in
dart | aspnetcore | csharp | csharp-dotnet2 | go | go-server | dynamic-html | html | html2 | java | jaxrs-cxf-client | jaxrs-cxf | inflector | jaxrs-cxf-cdi | jaxrs-spec | jaxrs-jersey | jaxrs-di | jaxrs-resteasy-eap | jaxrs-resteasy | micronaut | spring | nodejs-server | openapi | openapi-yaml | kotlin-client | kotlin-server | php | python | python-flask | r | ruby | scala | scala-akka-http-server | swift3 | swift4 | swift5 | typescript-angular | typescript-axios | typescript-fetch | javascript)
set -- generate --lang "$1" --output "/local/$1" --input-spec "${@:-https://petstore.swagger.io/v2/swagger.json}"
;;
esac
d run --rm --volume "${PWD}:/local" parsertongue/swagger-codegen-cli "${@:-langs}"
;;
*) docker "$cmd" "$@" ;;
esac
}
| true
|
a75ec48e1819524b3a643fa408fdd7f27f27e216
|
Shell
|
jbartok/hazelcast
|
/distribution/src/bin-filemode-755/hz-stop
|
UTF-8
| 300
| 3.46875
| 3
|
[
"LicenseRef-scancode-hazelcast-community-1.0",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash
PIDS=$(ps ax | grep com.hazelcast.core.server.HazelcastMemberStarter | grep -v grep | awk '{print $1}')
if [ -z "$PIDS" ]; then
echo "No Hazelcast server found to stop"
exit 1
else
kill -s TERM $PIDS
echo "Stopped Hazelcast instances with the following PIDs:"
echo "$PIDS"
fi
| true
|
91993b4eb2fa97f6f9e3cc3cafa8e16de1aa7324
|
Shell
|
snowfox1939/lamure
|
/apps/mesh_preprocessing/mesh_preprocessing.sh
|
UTF-8
| 1,095
| 3.171875
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
# Requires path of OBJ as argument
#lamure directory
#SCRIPT_CALL=$0
#LAMURE_DIR=$(basename -- "$SCRIPT_CALL")
#alternatively, use user specified directory:
LAMURE_DIR=~/svn/lamure/install/bin/
############################
# user settings
############################
# charting:
KDTREE_TRI_BUDGET=24000
COST_THRESHOLD=0.05 # max cost
# BVH hierarchy creation
TRI_BUDGET=16000
#maximum output texture size
MAX_FINAL_TEX_SIZE=8192
#dilations
NUM_DILATIONS=4096
############################
echo "RUNNING MESHLOD PIPELINE"
echo "------------------------"
SRC_OBJ=$1
echo "Using obj model $SRC_OBJ"
#create path to obj file
OBJPATH="$SRC_OBJ"
#convert textures to png if necessary
#echo "Converting jpgs to pngs"
#mogrify -format png *.jpg
#flip all texture images
#echo "Flipping texture images"
#mogrify -flip *.png
echo "Running chart creation with file $SRC_OBJ"
echo "-----------------------------------------"
time ${LAMURE_DIR}lamure_mesh_preprocessing -f $OBJPATH -tkd $KDTREE_TRI_BUDGET -co $COST_THRESHOLD -tbvh $TRI_BUDGET -multi-max $MAX_FINAL_TEX_SIZE
| true
|
402607e5cf998ed249744efada3fb47b7476d379
|
Shell
|
AnshumanSrivastavaGit/burmatscripts
|
/bash/bitnami-wp/download_backups.sh
|
UTF-8
| 888
| 3.53125
| 4
|
[] |
no_license
|
#!/bin/bash
# DOWNLOAD BACKUPS
# (run on staging server)
PUB_DNS_RECORD=`dig +short www.burmat.co`
LOC_DNS_RECORD=`cat /etc/hosts | grep burmat-primary.co | cut -f1 -d ' '`
BACKUP_DIR="/apps/aws/backups"
SSH_KEY="/apps/aws/keys/burmatcorp.pem"
echo "!! WARNING: VERIFY PRIMARY IP BEFORE CONTINUING !!"
echo "These two IP's should match:"
echo " PUBLISHED DNS RECORD: $PUB_DNS_RECORD"
echo " LOCAL DNS RECORD: $LOC_DNS_RECORD"
echo " "
echo "If they do not, QUIT this script and go update your '/etc/hosts' file to contain:"
echo "$PUB_DNS_RECORD burmat-primary.co"
echo " "
echo "______________________________________________"
read -p "Press ENTER to continue backup, CTRL+C to quit."
cd $BACKUP_DIR
scp -i $SSH_KEY bitnami@burmat-primary.co:/tmp/backup/bak* .
echo " "
echo "[>] download of backup files completed!"
echo "[*] don't forget to run 'remove_backup.sh' on primary server."
| true
|
646eb9810a345f520900411fbdc6070981e579c7
|
Shell
|
martinohmann/bin-pub
|
/volume
|
UTF-8
| 808
| 4.0625
| 4
|
[
"LicenseRef-scancode-other-permissive"
] |
permissive
|
#!/bin/bash
#
# wrapper script for amixer volume
#
# author: Martin Ohmann <martin@mohmann.de>
# master control name
master="Master"
# volume up/down step percentage
volstep="5"
usage() {
printf "usage: %s [up|down|toggle|mute|unmute|get|status] [-v]\n" "$(basename $0)"
printf " -v verbose\n"
}
[ $# -lt 1 ] && { usage; exit 1; }
if [ $# -ge 2 ] && [ $2 = '-v' ]; then
outfd=/dev/stdout
else
outfd=/dev/null
fi
exec 2>&1
case "$1" in
up)
amixer -D pulse sset $master ${volstep}%+ > $outfd ;;
down)
amixer -D pulse sset $master ${volstep}%- > $outfd ;;
toggle|mute|unmute)
amixer -D pulse sset $master $1 > $outfd ;;
get)
amixer -D pulse sget $master | tail -n1 |sed -r 's/.*\[(.*)%\].*/\1/' ;;
status)
amixer -D pulse sget $master ;;
*)
usage; exit 1 ;;
esac
exit 0
| true
|
5c94b299db68f16ab960f72531025e9632b1c63c
|
Shell
|
rojekabc/gencert
|
/reCert
|
UTF-8
| 3,058
| 3.90625
| 4
|
[] |
no_license
|
#!/bin/sh
# argument 1 - certificate name for recertification
# argument 2 - output certificate name
# check openssl command exists
OPENSSL=`which openssl`;
if test "$?" != "0"; then
echo "Cannot find openssl";
exit -1;
elif test -z "$OPENSSL"; then
echo "Cannot find openssl";
exit -1;
elif ! test -x "$OPENSSL"; then
echo "Cannot execute openssl (no rights)";
exit -1;
fi
if test -z $1; then
echo "No certificate name";
exit;
fi
if test -z $2; then
echo "No output name of new certificate after recertification process"
exit;
fi
echo "Creating configuration file"
CERTPROPFILE="$2.certconf";
if test -f $CERTPROPFILE; then
# Load user configuration, if exists
source ./$CERTPROPFILE
elif test -f "$1.certconf"; then
# Use previous configuration
cp $1.certconf $CERTPROPFILE;
source ./$CERTPROPFILE
fi
if test -z "$CERTKEYBITS"; then
CERTKEYBITS='1024';
fi
if test -z "$CERTKEYTYPE"; then
CERTKEYTYPE='RSA';
fi
if test -z "$CERTCOUNTRYNAME"; then
CERTCOUNTRYNAME='PL';
fi
if test -z "$CERTLOCALITYNAME"; then
CERTLOCALITYNAME='Unknown';
fi
if test -z "$CERTORGANIZATIONALNAME"; then
CERTORGANIZATIONALNAME='Unknown';
fi
if test -z "$CERTCOMMONNAME"; then
CERTCOMMONNAME="$1";
fi
if test -z "$CERTEMAIL"; then
CERTEMAIL="unknown@email.pl";
fi
if test -z "$CERTDAYS"; then
CERTDAYS="30";
fi
if test -z "$CERTPKCS12PASS"; then
CERTPKCS12PASS="test";
fi
echo "CERTNAME='$1'" > $CERTPROPFILE;
echo "CERTKEYBITS='$CERTKEYBITS'" >> $CERTPROPFILE;
echo "CERTCOUNTRYNAME='$CERTCOUNTRYNAME'" >> $CERTPROPFILE;
echo "CERTLOCALITYNAME='$CERTLOCALITYNAME'" >> $CERTPROPFILE;
echo "CERTORGANIZATIONALNAME='$CERTORGANIZATIONALNAME'" >> $CERTPROPFILE;
echo "CERTCOMMONNAME='$CERTCOMMONNAME'" >> $CERTPROPFILE;
echo "CERTEMAIL='$CERTEMAIL'" >> $CERTPROPFILE;
echo "CERTKEYTYPE='$CERTKEYTYPE'" >> $CERTPROPFILE;
echo "CERTDAYS='$CERTDAYS'" >> $CERTPROPFILE;
echo "CERTPKCS12PASS='$CERTPKCS12PASS'" >> $CERTPROPFILE;
chmod 700 $CERTPROPFILE;
echo "Linking keys"
ln -s $1.pem.key $2.pem.key
echo "Generating request"
rm -f $2.cnf;
while read line; do
eval echo $line >> $2.cnf;
done < "default.cer";
openssl req -out $2.req -new -key $2.pem.key -keyform PEM -days $CERTDAYS -config $2.cnf -batch
# check certificate is self-signed or signed by other CA
DIRNAME=`pwd`;
DIRNAME=${DIRNAME##*/};
if test -f $DIRNAME.ca; then
echo "Generating certificate signed by [$DIRNAME] CA"
# signed by CA
openssl x509 -inform PEM -outform DER -in $2.req -out $2.der.cer -CA $DIRNAME.pem.cer -CAkey $DIRNAME.pem.key -CAserial serial -CAcreateserial -req -extensions usr_cert -extfile $2.cnf
else
# self signed
openssl x509 -inform PEM -outform DER -in $2.req -out $2.der.cer -req -extensions usr_cert -extfile $2.cnf -signkey $2.pem.key
fi
openssl x509 -inform DER -outform PEM -in $1.der.cer -out $2.pem.cer
echo "Generating PKCS#8"
openssl pkcs8 -in $2.pem.key -out $2.der.key -inform PEM -outform DER -topk8 -nocrypt
echo "Generating PKCS#12"
openssl pkcs12 -out $2.pfx -clcerts -nodes -export -in $2.pem.cer -inkey $2.pem.key -password pass:$CERTPKCS12PASS
| true
|
72b819701ca379b6bfb2f781007343240b66e39b
|
Shell
|
lega911/ijson
|
/example/bash_worker/worker.sh
|
UTF-8
| 366
| 3.3125
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
while true
do
sleep 1
task=$(curl -s localhost:8001/run/command -H 'Type: get')
status="$?"
if [ $status -ne 0 ]; then
echo "Server error"
sleep 9
continue;
fi
if [ "$task" == "start" ]; then
echo START
fi
if [ "$task" == "stop" ]; then
echo STOP
fi
done
| true
|
ea35dec3a8ed3eaa4b7df57cd39ba51cd2f73f15
|
Shell
|
RasppleII/rasppleii-history
|
/website_2015-10/a2cloud/setup/raspbian-update.txt
|
UTF-8
| 9,309
| 4.03125
| 4
|
[] |
no_license
|
#!/bin/bash
# raspbian-update
# updates Raspbian to latest version, including NOOBS if installed
[[ -f /usr/bin/raspi-config ]] && isRpi=1 || isRpi=
if [[ ! $isRpi ]]; then
echo "This ain't a Raspberry Pi."
[[ $0 == "-bash" ]] && return 1 || exit 1
fi
skipRepoUpdate=
autoYes=
updateA2Cloud=
updateA2Server=
while [[ $1 ]]; do
if [[ $1 == "-r" ]]; then
shift
skipRepoUpdate="-r"
elif [[ $1 == "-y" ]]; then
shift
autoYes="-y"
elif [[ $1 == "-n" ]]; then
shift
noobsOnly="-n"
elif [[ $1 == "a2cloud" ]]; then
shift
updateA2Cloud=1
elif [[ $1 == "a2server" ]]; then
shift
updateA2Server=1
elif [[ $1 ]]; then
echo "options:"
echo "-y: auto-answer yes to all prompts and don't prompt for restart"
echo "-r: don't update package repositories"
echo "-n: update NOOBS only; don't update Raspbian"
echo "a2cloud : update A2CLOUD when complete"
echo "a2server: update A2SERVER when complete"
[[ $0 == "-bash" ]] && return 1 || exit 1
fi
done
noobs=
readarray -t partitions < <(sudo fdisk -l | grep '^/dev')
if [[ \
${partitions[0]:0:14} == "/dev/mmcblk0p1" && ${partitions[0]:57:2} == " e" &&
${partitions[1]:0:14} == "/dev/mmcblk0p2" && ${partitions[1]:57:2} == "85" &&
${partitions[2]:0:14} == "/dev/mmcblk0p3" && ${partitions[2]:57:2} == "83" &&
${partitions[3]:0:14} == "/dev/mmcblk0p5" && ${partitions[3]:57:2} == " c" &&
${partitions[4]:0:14} == "/dev/mmcblk0p6" && ${partitions[4]:57:2} == "83" ]]; then
noobs=" and the NOOBS install manager"
fi
if [[ ! $autoYes ]]; then
echo
echo "You are about to update your SD card to the latest version of the"
echo "Raspbian operating system${noobs}."
echo
echo "This may take an hour or more, and will require restarting when complete."
echo "You might want a backup before continuing in case it doesn't go as planned."
echo
echo -n "Update Raspbian? "
read
if [[ ${REPLY:0:1} != "Y" && ${REPLY:0:1} != "y" ]]; then
[[ $0 == "-bash" ]] && return 2 || exit 2
fi
fi
origDir="$PWD"
cd /tmp
if [[ ! $skipRepoUpdate ]]; then
echo "Updating package repositories..."
sudo apt-get -y update > /dev/null
else
echo "Not updating package repositories..."
echo
fi
if [[ ! $noobsOnly ]]; then
{ cd /tmp; sudo apt-get -y autoremove; sudo apt-get -y autoclean; sudo apt-get -y clean; } > /dev/null
freeSpace=$(df / | tail -1 | awk '{ print $4 }')
if (( $freeSpace < 400000 )); then
if dpkg -l | grep -q wolfram-engine; then
if [[ ! $autoYes ]]; then
echo "In order to create enough space on your SD card to upgrade,"
echo "the Wolfram Language and Mathematica software packages must be removed."
echo "If you don't know what these are, this won't affect you at all."
echo
echo -n "Remove Wolfram software? "
read
if [[ ${REPLY:0:1} != "Y" && ${REPLY:0:1} != "y" ]]; then
[[ $0 == "-bash" ]] && return 2 || exit 2
fi
sudo rm /opt/Wolfram/WolframEngine/10.0/SystemFiles/Java/Linux-ARM 2> /dev/null
sudo apt-get -y purge wolfram-engine
else
echo "Removing Wolfram software due to space constraints..."
sudo rm /opt/Wolfram/WolframEngine/10.0/SystemFiles/Java/Linux-ARM 2> /dev/null
sudo apt-get -y purge wolfram-engine
fi
else
echo "You don't have enough free space on your SD card to upgrade."
echo "Sorry, man. Delete some stuff or get a bigger card."
[[ $0 == "-bash" ]] && return 1 || exit 1
fi
fi
{ cd /tmp; sudo apt-get -y autoremove; sudo apt-get -y autoclean; sudo apt-get -y clean; } > /dev/null
dpkg -l | grep -q a2pi && sudo apt-get -y --force-yes install a2pi
dpkg -l | grep -q apple2user && sudo apt-get -y --force-yes install apple2user gsport
if dpkg -l | grep -q wolfram-engine; then
sudo rm /opt/Wolfram/WolframEngine/10.0/SystemFiles/Java/Linux-ARM 2> /dev/null
if [[ $freeSpace -lt 750000 && $(apt-get -s install wolfram-engine | grep upgraded) ]]; then
sudo apt-get -y purge wolfram-engine
{ cd /tmp; sudo apt-get -y autoremove; sudo apt-get -y autoclean; sudo apt-get -y clean; } > /dev/null
fi
sudo apt-get -y install wolfram-engine
{ cd /tmp; sudo apt-get -y autoremove; sudo apt-get -y autoclean; sudo apt-get -y clean; } > /dev/null
fi
sudo DEBIAN_FRONTEND=noninteractive apt-get -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" upgrade
{ cd /tmp; sudo apt-get -y autoremove; sudo apt-get -y autoclean; sudo apt-get -y clean; } > /dev/null
sudo DEBIAN_FRONTEND=noninteractive apt-get -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" dist-upgrade
{ cd /tmp; sudo apt-get -y autoremove; sudo apt-get -y autoclean; sudo apt-get -y clean; } > /dev/null
sudo apt-get -y install raspberrypi-ui-mods
{ cd /tmp; sudo apt-get -y autoremove; sudo apt-get -y autoclean; sudo apt-get -y clean; } > /dev/null
fi
if [[ $noobs ]]; then
echo "Updating NOOBS..."
# update Partition 3
mkdir -p /tmp/p3
sudo mount /dev/mmcblk0p3 /tmp/p3
sudo rm -rf /tmp/p3/os/* 2> /dev/null
if grep -q 'Raspple II' /tmp/p3/installed_os.json; then
echo "Downloading Raspple II lite..."
noobsUrl="ivanx.com/rasppleii/files/RasppleII_lite.zip"
noobsOSurl="ivanx.com/rasppleii/noobs-os"
distDir="Raspple_II"
sudo mkdir -p /tmp/p3/os/$distDir
sudo sed -i 's:/Raspbian:/Raspple_II:' /tmp/p3/installed_os.json
sudo wget -qO /tmp/p3/icon.png $noobsOSurl/Raspple_II.png
wget -qO- $noobsOSurl/slidesAB.tar | sudo tar -C /tmp/p3/os/$distDir -x
else
echo "Downloading NOOBS lite..."
noobsRoot="downloads.raspberrypi.org/NOOBS_lite/images/"
noobsDir=$(wget -qO- $noobsRoot | grep '^<tr><td' | tail -1 | grep -P -o 'href=".*?"' | cut -c 6- | tr -d '"')
noobsUrl=$noobsRoot$noobsDir$(wget -qO- $noobsRoot$noobsDir | grep -P -o 'href=".*.zip"' | cut -c 6- | tr -d '"')
noobsOSurl="downloads.raspberrypi.org/raspbian"
distDir="Raspbian"
sudo mkdir -p /tmp/p3/os/$distDir
sudo wget -qO /tmp/p3/icon.png $noobsOSurl/Raspbian.png
wget -qO- $noobsOSurl/marketing.tar | sudo tar -C /tmp/p3/os/$distDir -x
fi
sudo rm -rf /tmp/p3/cache 2> /dev/null
releaseDate=$(wget -qO- $noobsOSurl/os.json | grep 'release_date' | cut -f 4 -d '"')
sudo sed -i 's/"release_date".*$/"release_date" : "'$releaseDate'"/' /tmp/p3/installed_os.json
sudo sed -i 's/keyboard_layout=gb/keyboard_layout=us/' /tmp/p3/noobs.conf
sudo sed -i 's:/mnt/:/settings/:' /tmp/p3/installed_os.json
sudo sed -i 's@"icon".*,@"icon" : "/settings/os/'$distDir'/icon.png",@' /tmp/p3/installed_os.json
sudo cp /tmp/p3/icon.png /tmp/p3/os/$distDir
sudo wget -qO /tmp/p3/os/$distDir/os.json $noobsOSurl/os.json
sudo wget -qO /tmp/p3/os/$distDir/partition_setup.sh $noobsOSurl/partition_setup.sh
sudo wget -qO /tmp/p3/os/$distDir/partitions.json $noobsOSurl/partitions.json
sudo umount /tmp/p3
rmdir /tmp/p3
# update Partition 1
mkdir -p /tmp/p1
sudo mount /dev/mmcblk0p1 /tmp/p1
wget -qO /tmp/noobs_lite.zip $noobsUrl
sudo rm -rf /tmp/p1/*
sudo unzip -d /tmp/p1 /tmp/noobs_lite.zip
sudo sed -i 's/^runinstaller //' /tmp/p1/recovery.cmdline
sudo sed -i 's/silentinstall//' /tmp/p1/recovery.cmdline
grep -q 'keyboard=us' /tmp/p1/recovery.cmdline || sudo sed -i '1 s/^\(.*\)$/\1 keyboard=us/' /tmp/p1/recovery.cmdline
grep -q 'disablesafemode' /tmp/p1/recovery.cmdline || sudo sed -i '1 s/^\(.*\)$/\1 disablesafemode/' /tmp/p1/recovery.cmdline
sudo umount /tmp/p1
rmdir /tmp/p1
sudo sed -i 's/\(Raspple II release.*[^u]$\)/\1u/' /etc/issue
fi
echo
echo "*** Raspbian update completed. ***"
echo
cd /tmp
if [[ $updateA2Cloud ]]; then
wget -qO /tmp/a2cloud-setup ivanx.com/a2cloud/setup/
source /tmp/a2cloud-setup -y -r noSetGroups
if acmd -g /usr/share/gsport/disks/GSport\ Internet\ Starter\ Kit.2mg SYSTEM/FONTS/SIS.4.10 &> /dev/null; then
wget -qO /tmp/ua2.txt ivanx.com/rasppleii/files/a/ua2.txt
source /tmp/ua2.txt
fi
echo
echo "*** A2CLOUD update completed. ***"
echo
fi
cd /tmp
if [[ $updateA2Server ]]; then
wget -q -O /tmp/a2server-setup ivanx.com/a2server/setup/
if ps aux | grep -q [s]mbd; then
source /tmp/a2server-setup -y -r -w
else
source /tmp/a2server-setup -y -r
fi
echo
echo "*** A2SERVER update completed. ***"
echo
fi
cd "$origDir"
if [[ ! $autoYes ]]; then
echo
echo
echo "Your system has been updated and needs to reboot to use its new software."
echo
echo -n "Reboot now (recommended)? "
read
if [[ ${REPLY:0:1} == "Y" || ${REPLY:0:1} == "y" ]]; then
sudo shutdown -r now
fi
else
echo "*** raspbian-update completed. ***"
sudo shutdown -r now
fi
| true
|
e0e6db9f945755c655657c9dd29012d11908c532
|
Shell
|
yingyu157fu/person-repo
|
/shell/kvm-add.sh
|
UTF-8
| 1,087
| 3.828125
| 4
|
[] |
no_license
|
#!/bin/bash
[ "$1" == "--help" ]||[ "$1" == "-h" ]||[ "$1" == "" ] &&echo "[Usage]:domain name size"&&exit;
#在客户端查看磁盘
domain=$1
name=$2
size=$3
##新建qcow2格式磁盘
qemu-img create -f qcow2 /kvm/disk/$name.qcow2 $size &> /dev/null
if [ $? -eq 0 ];then
echo "$name磁盘已经新建成功!"
else
echo "$name磁盘未成功!"
exit
fi
#编写对应的xml文件$name.xml
cp /etc/libvirt/qemu/default.xml /etc/libvirt/qemu/$name.xml
sed -i "s/default/$name/" $name.xml
n=`ansible $domain -m shell -a "lsblk"|awk '/^sd/{print $1}'|tail -1|sed -nr 's/^..//p'`
a=({a..z})
for i in ${!a[*]};do [ "${a[$i]}" == "$n" ]&&let m=$i+1&&break;done
sed -i "s/sdb/sd${a[$m]}/" $name.xml
echo "已创建sd${a[$m]}"
#创建磁盘
cd /etc/libvirt/qemu
virsh attach-device $domain $name.xml
#挂载磁盘
ansible $domain -m shell -a "parted /dev/sd${a[$m]} mkpart primary 1 $size"
ansible $domain -m shell -a "mkfs.xfs /dev/sd${a[$m]}"
ansible $domain -m shell -a "mkdir /mnt/cdrom${a[$m]}"
ansible $domain -m shell -a "mount /dev/sd${a[$m]} /mnt/cdrom${a[$m]}"
echo "挂载成功"
| true
|
abe9c578529f9f78c9c861a86fb9a6f18fd60399
|
Shell
|
kwierman/universe_II
|
/driver/ins
|
UTF-8
| 906
| 3.5625
| 4
|
[] |
no_license
|
#!/bin/sh
module="universeII"
modname="universeII"
device="vmeex"
group="users"
version=`uname -r`
kernel=kernel-$version
clear
printf "Loading %s module...\n" $module
case $version in
2.2.* | 2.4.*) /sbin/insmod $kernel/$module.o || exit 1;;
2.6.*) /sbin/insmod $kernel/$module.ko || exit 1;;
3.*.*) /sbin/insmod $kernel/$module.ko || exit 1;;
4.*.*) /sbin/insmod $kernel/$module.ko || exit 1;;
esac
if [ -d /dev/$device ]
then
printf "Removing old device files...\n"
rm -f /dev/$device/ctl
rmdir /dev/$device
fi
major=`cat /proc/devices | awk "\\$2==\"$modname\" {print \\$1}"`
printf "Creating new device files...\n"
mkdir /dev/$device
mknod /dev/$device/ctl c $major 0
# set appropriate group/permissions
chown root /dev/$device
chown root /dev/$device/*
chgrp users /dev/$device
chgrp users /dev/$device/*
chmod 666 /dev/$device/*
printf "Installation complete\n"
| true
|
d9d788e1fe8b01eff1ab0e9154fa14e043df1c95
|
Shell
|
delkyd/alfheim_linux-PKGBUILDS
|
/xhtml-1-docs/PKGBUILD
|
UTF-8
| 678
| 2.5625
| 3
|
[] |
no_license
|
# Contributor: Ondrej Kucera <ondrej.kucera@centrum.cz>
pkgname=xhtml-1-docs
_version=20101123
pkgver=1.1.${_version}
pkgrel=2
pkgdesc="XHTML 1.1 Documentation"
arch=('any')
url="http://www.w3.org/TR/xhtml11"
license=('W3C')
depends=("xhtml-modularization-1-docs")
options=('docs' '!strip')
source=("http://www.w3.org/TR/2010/REC-xhtml11-${_version}/xhtml11.tgz")
md5sums=('56366fb9ff58b79a2de71f127b9baf76')
replaces=('xhtml11-doc')
build(){
cd "$srcdir/xhtml11-${_version}"
sed -i 's%http://www.w3.org/TR/xhtml-modularization/%../xhtml-modularization-1/%g' *.html
}
package() {
cd "$srcdir"
mkdir -p "$pkgdir/usr/share/doc"
cp -rf "xhtml11-${_version}" "$pkgdir/usr/share/doc/xhtml-1"
}
| true
|
dd05a82af79f28052e74a2ea088202cd4780afba
|
Shell
|
zzak/rvm
|
/scripts/functions/requirements/ubuntu
|
UTF-8
| 1,724
| 3.546875
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
set -e
function requirements_apt()
{
typeset -a command_to_run command_flags
command_to_run=()
command_flags=()
while
(( $# ))
do
case "$1" in
(echo)
command_to_run+=( $1 )
shift
;;
(run)
shift
;;
(force)
command_flags+=( --quiet --yes )
shift
;;
(*)
break
;;
esac
done
(( UID == 0 )) || command_to_run+=( sudo )
while
(( $# ))
do
case "$1" in
(update-system)
"${command_to_run[@]}" apt-get "${command_flags[@]}" update
;;
(rvm)
"${command_to_run[@]}" apt-get --no-install-recommends "${command_flags[@]}" install bash curl git patch
;;
(jruby*head)
"${command_to_run[@]}" apt-get --no-install-recommends "${command_flags[@]}" install ant openjdk-6-jdk
;;
(jruby*)
"${command_to_run[@]}" apt-get --no-install-recommends "${command_flags[@]}" install curl g++ openjdk-6-jre-headless
;;
(ir*)
"${command_to_run[@]}" apt-get --no-install-recommends "${command_flags[@]}" install curl mono-2.0-devel
;;
(opal)
"${command_to_run[@]}" apt-get --no-install-recommends "${command_flags[@]}" install nodejs npm
;;
(*)
"${command_to_run[@]}" apt-get --no-install-recommends "${command_flags[@]}" install build-essential openssl libreadline6 libreadline6-dev curl git-core zlib1g zlib1g-dev libssl-dev libyaml-dev libsqlite3-dev sqlite3 libxml2-dev libxslt-dev autoconf libc6-dev libgdbm-dev ncurses-dev automake libtool bison subversion pkg-config libffi-dev
;;
esac
shift
done
}
requirements_apt "$@"
| true
|
7fd80bd9e2820e4268e5f0db77dfabdc56b45262
|
Shell
|
tocamgar/wsl2-bin
|
/v2gif_
|
UTF-8
| 1,534
| 3.21875
| 3
|
[] |
no_license
|
#! /bin/bash
# CONVIERTE UN VIDEO EN GIF ANIMADO
echo -n "Introduce el video de entrada: "
read -e -i "/mnt/d/Vídeos/" v_entrada # RUTA COMPLETA DEL FICHERO DE VIDEO DE ENTRADA
ruta="${v_entrada%/*}/" # RUTA DEL FICHERO DE VIDEO DE ENTRADA SIN EL NOMBRE NI LA EXTENSIÓN
nom_f="${v_entrada##*/}" # NOMBRE DEL FICHERO DE VIDEO DE ENTRADA CON EXTENSIÓN
ext_f=".${nom_f##*.}" # EXTENSIÓN DEL FICHERO DE VIDEO DE ENTRADA
nom_f="${nom_f%.*}" # NOMBRE DEL FICHERO DE VIDEO DE ENTRADA SIN EXTENSIÓN
paleta="${ruta}${nom_f}.png" # NOMBRE DEL FICHERO DE LA PALETA DE 256 COLORES DEL GIF ANIMADO PARA OBTENER PRECISIÓN DE COLOR
echo -n "Introduce el número de fotogramas por segundo (10): "
read -e -i "10" fps # RUTA COMPLETA DEL FICHERO DE VIDEO DE ENTRADA
#fps="0.5" # NÚMERO DE FOTOGRAMAS QUE SE VAN A CAPTURAR DEL VIDEO CADA SEGUNDO
echo -n "Introduce en que proporción se modifica el retraso entre fotogramas (0.1 multiplica x10 la velocidad): "
read -e -i "0.1" pts # RUTA COMPLETA DEL FICHERO DE VIDEO DE ENTRADA
v_salida="${ruta}${nom_f}_v2gif.gif" # NOMBRE DEL FICHERO GIF ANIMADO INCLUIDA RUTA Y EXTENSIÓN
# CREACIÓN DE LA PALETA DE 256 COLORES ADAPTADA AL VIDEO
echo ffmpeg -y -i "${v_entrada}" -filter_complex "[0:v]fps=${fps},split[a][b];[b]palettegen[p];[a][p]paletteuse,setpts=$pts*PTS[v]" -map '[v]' "${v_salida}"
ffmpeg \
-y \
-i "${v_entrada}" \
-filter_complex \
"
[0:v]fps=${fps},
split[a][b];
[b]palettegen[p];
[a][p]paletteuse,
setpts=$pts*PTS[v]
" \
-map '[v]' \
"${v_salida}"
| true
|
a1c82b0186ee1d0d05b6045fc1924d6f19eeb35c
|
Shell
|
bbhunter/dirstalk
|
/functional-tests.sh
|
UTF-8
| 6,162
| 3.703125
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
################################################################################################################
## The purpose of this script is to make sure dirstalk basic functionalities are working as expected
################################################################################################################
###################################
## function to assert that the given string contains the given substring
## example usage: assert_contains "error" "my_special_error: blabla" "an error is expected for XY"
###################################
function assert_contains {
local actual=$1
local contains=$2
local msg=$3
if ! echo "$actual" | grep "$contains" > /dev/null; then
echo "ERROR: $msg"
echo "Failed to assert that $actual contains $contains"
exit 1;
fi
echo "Assertion passing"
}
###################################
## function to assert that the given string does not contain the given substring
## example usage: assert_contains "error" "my_special_error: blabla" "an error is expected for XY"
###################################
function assert_not_contains {
local actual=$1
local contains=$2
local msg=$3
if printf -- '%s' "$actual" | egrep -q -- "$contains"; then
echo "ERROR: $msg"
echo "Failed to assert that $actual does not contain: $contains"
exit 1;
fi
echo "Assertion passing"
}
## Starting test server running on the 8080 port
echo "Starting test server"
./dist/testserver&
SERVER_PID=$!
sleep 1
echo "Done"
function finish {
echo "Killing test server $SERVER_PID"
kill -9 "$SERVER_PID"
echo "Done"
}
trap finish EXIT
## Tests
ROOT_RESULT=$(./dist/dirstalk 2>&1);
assert_contains "$ROOT_RESULT" "dirstalk is a tool that attempts" "description is expected"
assert_contains "$ROOT_RESULT" "Usage" "description is expected"
VERSION_RESULT=$(./dist/dirstalk version 2>&1);
assert_contains "$VERSION_RESULT" "Version" "the version is expected to be printed when calling the version command"
assert_contains "$VERSION_RESULT" "Built" "the build time is expected to be printed when calling the version command"
assert_contains "$VERSION_RESULT" "Built" "the build time is expected to be printed when calling the version command"
SCAN_RESULT=$(./dist/dirstalk scan 2>&1 || true);
assert_contains "$SCAN_RESULT" "error" "an error is expected when no argument is passed"
SCAN_RESULT=$(./dist/dirstalk scan -d resources/tests/dictionary.txt http://localhost:8080 2>&1);
assert_contains "$SCAN_RESULT" "/index" "result expected when performing scan"
assert_contains "$SCAN_RESULT" "/index/home" "result expected when performing scan"
assert_contains "$SCAN_RESULT" "3 results found" "a recap was expected when performing a scan"
assert_contains "$SCAN_RESULT" "├── home" "a recap was expected when performing a scan"
assert_contains "$SCAN_RESULT" "└── index" "a recap was expected when performing a scan"
assert_contains "$SCAN_RESULT" " └── home" "a recap was expected when performing a scan"
assert_not_contains "$SCAN_RESULT" "error" "no error is expected for a successful scan"
SCAN_RESULT=$(./dist/dirstalk scan -h 2>&1);
assert_contains "$SCAN_RESULT" "\-\-dictionary" "dictionary help is expected to be printed"
assert_contains "$SCAN_RESULT" "\-\-cookie" "cookie help is expected to be printed"
assert_contains "$SCAN_RESULT" "\-\-header" "header help is expected to be printed"
assert_contains "$SCAN_RESULT" "\-\-http-cache-requests" "http-cache-requests help is expected to be printed"
assert_contains "$SCAN_RESULT" "\-\-http-methods" "http-methods help is expected to be printed"
assert_contains "$SCAN_RESULT" "\-\-http-statuses-to-ignore" "http-statuses-to-ignore help is expected to be printed"
assert_contains "$SCAN_RESULT" "\-\-http-timeout" "http-timeout help is expected to be printed"
assert_contains "$SCAN_RESULT" "\-\-socks5" "socks5 help is expected to be printed"
assert_contains "$SCAN_RESULT" "\-\-threads" "threads help is expected to be printed"
assert_contains "$SCAN_RESULT" "\-\-user-agent" "user-agent help is expected to be printed"
assert_contains "$SCAN_RESULT" "\-\-scan-depth" "scan-depth help is expected to be printed"
assert_not_contains "$SCAN_RESULT" "error" "no error is expected when priting scan help"
DICTIONARY_GENERATE_RESULT=$(./dist/dirstalk dictionary.generate resources/tests 2>&1);
assert_contains "$DICTIONARY_GENERATE_RESULT" "dictionary.txt" "dictionary generation should contains a file in the folder"
assert_not_contains "$DICTIONARY_GENERATE_RESULT" "error" "no error is expected when generating a dictionary successfully"
RESULT_VIEW_RESULT=$(./dist/dirstalk result.view -r resources/tests/out.txt 2>&1);
assert_contains "$RESULT_VIEW_RESULT" "├── adview" "result output should contain tree output"
assert_contains "$RESULT_VIEW_RESULT" "├── partners" "result output should contain tree output"
assert_contains "$RESULT_VIEW_RESULT" "│ └── terms" "result output should contain tree output"
assert_contains "$RESULT_VIEW_RESULT" "└── s" "result output should contain tree output"
assert_not_contains "$RESULT_VIEW_RESULT" "error" "no error is expected when displaying a result"
RESULT_DIFF_RESULT=$(./dist/dirstalk result.diff -f resources/tests/out.txt -s resources/tests/out2.txt 2>&1);
assert_contains "$RESULT_DIFF_RESULT" "├── adview" "result output should contain diff"
assert_contains "$RESULT_DIFF_RESULT" "├── partners" "result output should contain diff"
assert_contains "$RESULT_DIFF_RESULT" $(echo "│ └── \x1b[31mterms\x1b[0m\x1b[32m123\x1b[0m") "result output should contain diff"
assert_contains "$RESULT_DIFF_RESULT" "└── s" "result output should contain diff"
assert_not_contains "$RESULT_DIFF_RESULT" "error" "no error is expected when displaying a result"
RESULT_DIFF_RESULT=$(./dist/dirstalk result.diff -f resources/tests/out.txt -s resources/tests/out.txt 2>&1 || true);
assert_contains "$RESULT_DIFF_RESULT" "no diffs found"
assert_contains "$RESULT_DIFF_RESULT" "error" "error is expected when content is the same"
| true
|
1c5d8e90b3345f46e7dd07c133c1a3d86e315e23
|
Shell
|
oncoapop/data_reporting
|
/beast_scripts/SuppFig_Gen_Rev3.sh
|
UTF-8
| 2,546
| 3.03125
| 3
|
[] |
no_license
|
#!/bin/sh
# Script to use the latest generated AmpliconManifest files
# at
indir="/share/lustre/projects/breast_xeno_evolution/binomial_validation/amplicon_manifest_files"
# and SuppleFigfile.csv in
wd="/home/dyap/public_html"
sample="SA429"
ampsample="SA429-PrimerSet1"
# SA429 and SA501 have 2 sets
# SA429,493,494,495,496,499,500,501,530,531,532,533,534,535,536,542
#
#SA429,SA501,SA542
sufile="/home/dyap/public_html/Tumour_Xenograft_Rev-set2/SA429"
#sufile="/home/dyap/Projects/PrimerDesign/Tumour_Xenograft_Rev-test/primer3"
outfile="/home/dyap/Projects/PrimerDesign/SupplFig/"$sample"_Suppl_Table.csv"
failfile="/home/dyap/Projects/PrimerDesign/SupplFig/"$sample"_Fail_Table.csv"
rm -f $outfile
rm -f $failfile
list=`ls $indir | awk -F. '{print $1}' | sed 's/temp//'`
echo $ampsample".AmpliconManifest"
pos=`cat $indir"/"$ampsample".AmpliconManifest" | awk -F: '{print $2}'`
echo "==============================="
echo $pos
echo "==============================="
for j in $pos
do
echo $ampsample
inref=`grep $j $indir"/"$ampsample".AmpliconManifest" | awk -F: '{print $3}'| awk -F"_" '{print $2}'`
inst=`grep $j $indir"/"$ampsample".AmpliconManifest" | awk -F: '{print $5}'| awk -F"_" '{print $2}'`
inan=`grep $j $indir"/"$ampsample".AmpliconManifest" | awk -F: '{print $6}'| awk -F"_" '{print $2}'`
ingn=`grep $j $indir"/"$ampsample".AmpliconManifest" | awk -F: '{print $6}'| awk -F"_" '{print $3}' | awk -F" " '{print $1}'`
inchr=`grep $j $indir"/"$ampsample".AmpliconManifest" | awk -F"\t" '{print $2}'`
insta=`grep $j $indir"/"$ampsample".AmpliconManifest" | awk -F"\t" '{print $3}'`
inend=`grep $j $indir"/"$ampsample".AmpliconManifest" | awk -F"\t" '{print $4}'`
prchr=`grep $j $sufile/$sample"_SuppleFigFile.csv" | awk -F, '{print $2}'`
prsta=`grep $j $sufile/$sample"_SuppleFigFile.csv" | awk -F, '{print $3}'`
prend=`grep $j $sufile/$sample"_SuppleFigFile.csv" | awk -F, '{print $4}'`
prleft=`grep $j $sufile/$sample"_SuppleFigFile.csv" | awk -F, '{print $5}'`
prright=`grep $j $sufile/$sample"_SuppleFigFile.csv" | awk -F, '{print $6}'`
prlen=`grep $j $sufile/$sample"_SuppleFigFile.csv" | awk -F, '{print $7}'`
echo $inchr " = " $prchr
echo $insta " = " $prsta
echo $inend " = " $prend
if [[ $inchr == $prchr ]] && [[ $insta == $prsta ]] && [[ $inend == $prend ]];
then
echo $sample","$j","$inref","$inan","$ingn","$insta","$inend","$prleft","$prright","$prlen >> $outfile
else
echo $sample","$j","$inref","$inan","$ingn","$insta","$inend >> $failfile
fi
done
| true
|
13049bcdf847c34730d240ff8b81280d006bebc2
|
Shell
|
pepopowitz/dotfiles
|
/git.zsh
|
UTF-8
| 2,687
| 3.75
| 4
|
[] |
no_license
|
# Open main in browser
alias web='gh repo view --web'
# Open current branch in browser
alias webbranch='gh repo view --web --branch $(git symbolic-ref --quiet --short HEAD )'
# Open PR for current branch in browser
alias open-pr='gh pr create --web'
# Browse pulls for current repo
alias pulls='gh pr list --web'
# git aliases
alias gco='git checkout'
alias gpo='git push origin'
alias gpo1='git push --set-upstream origin $(git symbolic-ref --quiet --short HEAD )'
alias gst='git status'
alias gcam='git add . && git commit -am'
alias gcam!='git add . && git commit --no-verify -am'
alias gkm='git commit -m'
alias gkm!='git commit --no-verify -m'
# Thanks, [Elijah](https://twitter.com/elijahmanor/status/1562077209321512965)!
alias branchy="branches 20 | fzf --header \"Switch to recent branch\" --pointer=\"\" | xargs git switch"
alias unstashy="stashes 100 | fzf --header \"Apply recent stash\" --pointer=\"\" | cut -d: -f1 | xargs git stash apply"
function branch() {
git checkout -b $1
}
function stash() {
git add .
if [[ $1 ]] then
git stash push -m "$1"
else
git stash push
fi
}
function unstash() {
re='^[0-9]+$'
if [[ $1 ]] then
if [[ $1 =~ $re ]] then
echo "Applying stash@{$1}..."
git stash apply stash@{$1}
else
echo "Applying stash named "$1"..."
git stash apply $(git stash list | grep "$1" | cut -d: -f1)
fi
else
echo "Applying most recent stash..."
git stash apply
fi
}
function sync() {
local mainline=$(main_or_master)
git checkout $mainline
if [[ `git remote -v | grep upstream` ]]; then
echo "syncing to upstream..."
git pull upstream $mainline
git push origin
else
echo "syncing to origin..."
git pull origin $mainline
fi
}
function main_or_master() {
if (branch_exists main); then
echo 'main'
else
echo 'master'
fi
}
function branches() {
COUNT=${1:-5}
git branch --sort=-committerdate | head -n $COUNT
}
function stashes() {
COUNT=${1:-5}
git stash list | head -n $COUNT
}
function branch_exists() {
local branch=${1}
local exists=$(git branch --list ${branch})
if [[ -z ${exists} ]]; then
return 1
else
return 0
fi
}
function rebaseonmain() {
local mainline=$(main_or_master)
if [[ `git status --porcelain` ]]; then
local needToStashAndUnstash=true
else
local needToStashAndUnstash=false
fi
if [[ "$needToStashAndUnstash" = true ]]
then
stash
fi
echo "syncing $mainline branch to upstream...."
sync
git checkout -
echo "rebasing on $mainline...."
git rebase $mainline
if [[ "$needToStashAndUnstash" = true ]]
then
unstash
fi
}
| true
|
9110696585c1fdd5b3d92576af34a704707476ec
|
Shell
|
t-head-aosp/device-generic-goldfish
|
/tools/mk_vbmeta_boot_params.sh
|
UTF-8
| 2,502
| 3.59375
| 4
|
[] |
no_license
|
#!/bin/bash
if [ $# -ne 3 ]; then
echo "Usage: $0 <vbmeta.img> <system.img> <VbmetaBootParams.textproto>"
exit 0
fi
# Example Output from 'avbtool calculate_vbmeta_digest --image $OUT/vbmeta.img':
# 3254db8a232946c712b5c6f8c1a80b31f2a200bab98553d86f5915d06bfd5436
#
# Example Output from 'avbtool info_image --image $OUT/vbmeta.img':
#
# Minimum libavb version: 1.0
# Header Block: 256 bytes
# Authentication Block: 576 bytes
# Auxiliary Block: 1600 bytes
# Algorithm: SHA256_RSA4096
# Rollback Index: 0
# Flags: 0
# Release String: 'avbtool 1.1.0'
# Descriptors:
# ...
#
#
set -e
function die {
echo $1 >&2
exit 1
}
# Incrementing major version causes emulator binaries that do not support the
# version to ignore this file. This can be useful if there is a change
# not supported by older emulator binaries.
readonly MAJOR_VERSION=2
readonly VBMETAIMG=$1
readonly SYSIMG=$2
readonly TARGET=$3
# Extract the digest
readonly VBMETA_DIGEST=$(${AVBTOOL:-avbtool} calculate_vbmeta_digest --image $VBMETAIMG)
readonly INFO_OUTPUT=$(${AVBTOOL:-avbtool} info_image --image $VBMETAIMG | grep "^Algorithm:")
# Extract the algorithm
readonly ALG_OUTPUT=$(echo $INFO_OUTPUT | grep "Algorithm:")
readonly ALG_SPLIT=($(echo $ALG_OUTPUT | tr ' ' '\n'))
readonly ORG_ALGORITHM=${ALG_SPLIT[1]}
if [[ $ORG_ALGORITHM == "SHA256_RSA4096" ]]; then
VBMETA_HASH_ALG=sha256
else
die "Don't know anything about $ORG_ALGORITHM"
fi
# extract the size
function get_bytes {
MY_OUTPUT=$(${AVBTOOL:-avbtool} info_image --image $1 | grep "$2" )
MY_SPLIT=($(echo $MY_OUTPUT | tr ' ' '\n'))
MY_BYTES=${MY_SPLIT[2]}
echo $MY_BYTES
}
HEADER_SIZE=$(get_bytes $VBMETAIMG "Header Block:")
AUTHEN_SIZE=$(get_bytes $VBMETAIMG "Authentication Block:")
AUX_SIZE=$(get_bytes $VBMETAIMG "Auxiliary Block:")
SYSMETA_SIZE=$(get_bytes $SYSIMG "VBMeta size:")
VBMETA_SIZE=$(expr $HEADER_SIZE + $AUTHEN_SIZE + $AUX_SIZE + $SYSMETA_SIZE)
HEADER_COMMENT="# androidboot.vbmeta.size=$VBMETA_SIZE androidboot.vbmeta.hash_alg=$VBMETA_HASH_ALG androidboot.vbmeta.digest=$VBMETA_DIGEST"
echo $HEADER_COMMENT > $TARGET
echo "major_version: $MAJOR_VERSION" >> $TARGET
#echo "param: \"androidboot.slot_suffix=_a\"" >> $TARGET
echo "param: \"androidboot.vbmeta.size=$VBMETA_SIZE\"" >> $TARGET
echo "param: \"androidboot.vbmeta.hash_alg=$VBMETA_HASH_ALG\"" >> $TARGET
echo "param: \"androidboot.vbmeta.digest=$VBMETA_DIGEST\"" >> $TARGET
| true
|
12aba0add4fca87f6369976f6aa1f161165bd3a7
|
Shell
|
broadinstitute/ml4h
|
/scripts/detach_disk.sh
|
UTF-8
| 213
| 3
| 3
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/usr/bin/env bash
DISK=${1:-data}
shift 1
VMS=$(gcloud compute instances list | awk '{print $1}')
ZONE=us-central1-a
for VM in $VMS;
do gcloud compute instances detach-disk $VM --zone $ZONE --disk=$DISK ;
done
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.