blob_id
stringlengths
40
40
language
stringclasses
1 value
repo_name
stringlengths
4
115
path
stringlengths
2
970
src_encoding
stringclasses
28 values
length_bytes
int64
31
5.38M
score
float64
2.52
5.28
int_score
int64
3
5
detected_licenses
listlengths
0
161
license_type
stringclasses
2 values
text
stringlengths
31
5.39M
download_success
bool
1 class
100e204eae96a64601757962e8375d90f40e66a0
Shell
charilaouc/federatorai-operator
/deploy/prepare-private-repository.sh
UTF-8
1,696
3.84375
4
[ "Apache-2.0" ]
permissive
#!/bin/sh ## ## This script pull Federator.ai containter images from quay.io and push into private repository. ## By using the following example, you can install Federator.ai with private repository. ## # export RELATED_IMAGE_URL_PREFIX="repo.prophetservice.com/federatorai" ## # ./install.sh ## set -e ## show_usage() { cat << __EOF__ Usage: $0 [build_name] [private_repository_url] Example: $0 v4.2.614 repo.prophetstor.com/federatorai __EOF__ exit 1 } ## ## Main ## build_name="$1" PRIVATE_REPOSITORY_IMAGE_URL_PREFIX="$2" [ "${PRIVATE_REPOSITORY_IMAGE_URL_PREFIX}" = "" -o "${build_name}" = "" ] && show_usage ## Global vairables ORIGIN_URL_PREFIX="quay.io/prophetstor" IMAGE_LIST="alameda-admission-ubi alameda-ai-dispatcher alameda-ai alameda-analyzer-ubi alameda-datahub-ubi alameda-evictioner-ubi alameda-executor-ubi alameda-grafana alameda-influxdb alameda-notifier-ubi alameda-operator-ubi alameda-rabbitmq alameda-recommender-ubi fedemeter-api-ubi fedemeter-influxdb federatorai-agent-app federatorai-agent-gpu federatorai-agent-preloader federatorai-agent-ubi federatorai-dashboard-backend federatorai-dashboard-frontend federatorai-operator-ubi federatorai-rest-ubi" for image in ${IMAGE_LIST}; do echo "Preparing image ${PRIVATE_REPOSITORY_IMAGE_URL_PREFIX}/${image}:${build_name}" echo " from image ${ORIGIN_URL_PREFIX}/${image}:${build_name}" docker pull ${ORIGIN_URL_PREFIX}/${image}:${build_name} docker tag ${ORIGIN_URL_PREFIX}/${image}:${build_name} ${PRIVATE_REPOSITORY_IMAGE_URL_PREFIX}/${image}:${build_name} docker push ${PRIVATE_REPOSITORY_IMAGE_URL_PREFIX}/${image}:${build_name} /bin/echo -e "\n\n" done exit 0
true
819fe47b308009b455e553ac60023f4588f5decb
Shell
kodegrinder/dotfiles-2
/power_management/check_battery.sh
UTF-8
841
3.15625
3
[]
no_license
#!/bin/bash export DISPLAY=:0 export XAUTHORITY=/home/manuel/.Xauthority export XDG_RUNTIME_DIR=/run/user/$(id -u) ACPI=$(acpi) STATUS=$(echo $ACPI | grep 'Discharging') BATTERY_LEVEL=$(echo $ACPI | awk '{ print $4 } ' | sed 's/[^[:digit:]]//g' | bc) BATTERY_DANGER=$(echo $BATTERY_LEVEL | awk '{print $1 < 8}' | bc) SLEEPTIME=$(xprintidle | xargs printf "%s > 60000\n" | bc) if [ -n "${STATUS}" ] then if [ "$BATTERY_DANGER" -ne "0" ] then notify-send -u critical -i "/usr/share/icons/Paper/16x16/status/xfce-battery-critical.png" -t 3000 "Hibernando el sistema" elif [ "$BATTERY_LEVEL" -le "32" ] then notify-send -u critical -i "/usr/share/icons/Paper/16x16/status/xfce-battery-critical.png" -t 3000 $(echo $ACPI | awk '{ print $5 " " $6 }') elif [ "$SLEEPTIME" -eq "1" ] then echo $SLEEPTIME systemctl suspend fi fi
true
3a0f1ddd4eba0405f819d816ee23911e4e64db7b
Shell
pastcompute/freebsd-wifi-build
/build/bin/build_fullroot
UTF-8
1,613
3.78125
4
[]
no_license
#!/bin/sh SCRIPT_NAME="`basename $0`" SCRIPT_DIR="`dirname $0`" CUR_DIR="`pwd`" X_SRCDIR=${CUR_DIR} # XXX TODO for non-root builds # # We need to use the cross-build install program in order to # take advantage of the mtree-aware upgrades that it's recently # received. # # Unfortunately there's no "easy way" to derive that; so we have # to derive it ourselves. INSTALL_PROG="install" # suck in the per-device options CFGNAME=$1 shift . ${SCRIPT_DIR}/../cfg/${CFGNAME} || exit 1 # include the config variable generation code . ${SCRIPT_DIR}/../lib/cfg.sh || exit 1 # calculate basedir # XXX this should be generated in cfg.pm! X_BASEDIR=${SCRIPT_DIR}/../ echo "*** Deleting old file system.." ${INSTALL_PROG} -d ${X_STAGING_FSROOT} chflags -R noschg ${X_STAGING_FSROOT} rm -rf ${X_STAGING_FSROOT} rm -rf ${X_STAGING_TMPDIR} echo "*** Creating new filesystem..." mkdir -p ${X_STAGING_FSROOT} mkdir -p ${X_STAGING_TMPDIR} # ok, let's just populate the whole staging root with the installed root rsync -arH ${X_DESTDIR}/ ${X_STAGING_FSROOT} # .. now, kernel; that's living elsewhere? # now, default /etc/fstab # XXX TODO: label! echo "${X_ROOTFS_DEV} / ufs rw 1 1" > ${X_STAGING_TMPDIR}/fstab install -m 644 ${X_STAGING_TMPDIR}/fstab ${X_STAGING_FSROOT}/etc/ # .. and default to autosize_enable=yes echo "autosize_enable=\"YES\"" > ${X_STAGING_TMPDIR}/rc.conf echo "autosize_rootdev=\"${X_ROOTFS_DEV}\"" >> ${X_STAGING_TMPDIR}/rc.conf install -m 644 ${X_STAGING_TMPDIR}/rc.conf ${X_STAGING_FSROOT}/etc/ install -m 755 ${X_BASEDIR}/files.full/autosize ${X_STAGING_FSROOT}/etc/rc.d/ echo "**** Done."
true
cb658105c84b9541dfd89a845679f67c08a7693f
Shell
marianomms/vm-config
/scripts/setup/install/jet.sh
UTF-8
501
3.765625
4
[]
no_license
#!/bin/bash set -e function install_jet() { echo 'Installing Codeship Jet...' # jet version to install local version=${1:-'2.10.0'} wget -q "https://s3.amazonaws.com/codeship-jet-releases/$version/jet-darwin_amd64_$version.tar.gz" sudo tar -xC /usr/local/bin/ -f jet-darwin_amd64_$version.tar.gz sudo chmod +x /usr/local/bin/jet echo '' echo 'Jet installed.' echo '' echo '' echo 'Updating jet' echo '' jet update jet version rm jet-darwin_amd64_$version.tar.gz }
true
8844ec25fb139b342feac6474b1daa86994de5e6
Shell
hamiecod/dotfiles
/.local/bin/pdf-namer
UTF-8
405
3.421875
3
[]
no_license
#!/bin/sh not="not" zathura --version || echo "Install zathura" dmenu -v || echo "Install dmenu" mkdir ./correct cp ./*.pdf ./correct/ for file in `ls *.pdf`; do zathura "$file" newfile=`echo "" | dmenu -i -p "PDF Name:"` if [ $newfile = 'not' ] then mv "./correct/$file" "./correct/not$newfile.pdf" else mv "./correct/$file" "./correct/$newfile.pdf" fi done
true
24ff7892c48c829b2e4727e46d58a359ecde063b
Shell
vrtdev/aws-cloudfront-authorizer
/build-nodejs.sh
UTF-8
1,269
4.09375
4
[ "Apache-2.0" ]
permissive
#!/usr/bin/env bash # Usage: ./build.sh <directory-to-package> <ZIP-file-to-create> # # Defaults: # src/ -> build.zip set -o errexit set -o pipefail set -o nounset #set -o xtrace SRC_DIR="${1:-src}"; SRC_DIR="${SRC_DIR%%/}" OUT_FILE="${2:-build.zip}" HASH_FILE="${3:-}" make_absolute() { if [ -z "${1}" ]; then return fi case "${1}" in /*) echo "${1}";; # already absolute path *) echo "${PWD}/${1}";; esac } OUT_FILE="$( make_absolute "${OUT_FILE}" )" HASH_FILE="$( make_absolute "${HASH_FILE}" )" # make sure the file is empty. Zip will *add* if the file exists rm -f "${OUT_FILE}" BUILD_DIR=`mktemp -d 2>/dev/null || mktemp -d -t 'build'` # Linux & BSD-compatible cp -a "${SRC_DIR}/" "${BUILD_DIR}" ( cd "${BUILD_DIR}" rm -rf node_modules || true npm install --production # Optionally clean up caches if [ -n "${HASH_FILE}" ]; then git init -q echo "package.json" > .gitignore # npm stores absolute paths in there git add --all git commit --no-gpg-sign -qm 'whatever' git cat-file -p HEAD | grep '^tree' | awk '{print $2}' > "${HASH_FILE}" echo "Content hash: $(< "${HASH_FILE}" )" rm -rf .git fi zip "${OUT_FILE}" -r . )
true
3527270dc2795c8be294c5510b9e8f0f23ede9f1
Shell
MadlifeZhou/itmo-parallel-computing
/find-size.sh
UTF-8
427
4.15625
4
[]
no_license
#!/usr/bin/env bash # USAGE: ./find-size.sh -b PATH_TO_BINARY_FILE -s STEP -t TIME while getopts b:t:s: option do case "${option}" in b) BINARY=${OPTARG};; t) TIME=${OPTARG};; s) STEP=${OPTARG};; esac done COUNT=0; while(:) do COUNT=$(expr ${COUNT} + ${STEP}) CURRENT_TIME=`${BINARY} ${COUNT} | awk '{print $3}'` if [ ${CURRENT_TIME} -ge ${TIME} ] then echo ${COUNT} break fi done;
true
3a2d8d2803b84a1435435a6c22d08a952c5039b9
Shell
vipoolmakanji/dotfiles
/src/shell/zsh/functions/custom.zsh
UTF-8
74
2.578125
3
[ "MIT" ]
permissive
for file in $HOME/.zsh/functions/custom/*.zsh; do source "$file" done
true
45cd77a46b3118a2e0b60af166f50c4bf794c283
Shell
fhofherr/dot-files
/scripts/dotfiles.sh
UTF-8
2,056
3.796875
4
[]
no_license
#!/bin/bash : "${POETRY_URL:=https://install.python-poetry.org}" ASDF="$(command -v asdf 2>/dev/null)" CURL="$(command -v curl 2>/dev/null)" POETRY="$(command -v poetry 2>/dev/null)" PYTHON3="$(command -v python3 2>/dev/null)" set -eou pipefail function install_asdf() { local latest_tag if [[ -n "$ASDF" ]]; then return 0 fi if [[ ! -d "$HOME/.asdf" ]]; then git clone https://github.com/asdf-vm/asdf.git "$HOME/.asdf" fi pushd "$HOME/.asdf" latest_tag="$(git describe --tags --abbrev=0)" git checkout "$latest_tag" popd } function install_poetry() { if [[ -z "$POETRY" ]]; then echo "Installing poetry" if [ -z "$ASDF" ]; then echo "asdf is not installed" exit 1 fi "$ASDF" plugin add poetry || true "$ASDF" install poetry latest "$ASDF" global poetry latest "$ASDF" reshim poetry POETRY="$HOME/.asdf/shims/poetry" fi if [[ "$($POETRY config virtualenvs.in-project)" != "true" ]]; then "$POETRY" config virtualenvs.in-project true fi } function install_raspberrypi_dependencies() { sudo apt install \ autoconf \ automake \ bear \ build-essential \ cargo \ cmake \ curl \ fonts-noto-color-emoji \ g++ \ gettext \ libffi-dev \ libssl-dev \ libtool \ libtool-bin \ ninja-build \ pkg-config \ python3-dev \ universal-ctags \ unzip \ zsh } function install_dependencies() { case $(hostname) in pi400*) install_raspberrypi_dependencies ;; # TODO install dependencies for other hosts here. esac } if [[ -z "$PYTHON3" ]]; then echo "python3 is not installed" exit 1 fi if [[ -z "$POETRY" ]]; then install_dependencies install_asdf install_poetry fi if [[ ! -e "$(dirname "${BASH_SOURCE[0]}")/.venv" ]]; then "$POETRY" install fi "$POETRY" run dotfiles "${@}"
true
61b202f3ce8c360e5537e644db7e43d9b1236d4f
Shell
Appdynamics/php-demo
/startBundy.sh
UTF-8
3,076
2.8125
3
[]
no_license
#!/bin/bash # This is a script to start Bundy on Docker # Set variables CONTR_HOST= CONTR_PORT= VERSION= APP_NAME= COM_TIER_NAME= COM_NODE_NAME= FUL_TIER_NAME= FUL_NODE_NAME= INV_TIER_NAME= INV_NODE_NAME= CTRLR_ACCOUNT= CTRLR_KEY= GLOBAL_ACCOUNT_NAME= EUM_KEY= BEACON_HOST= BEACON_PORT= EVENT_ENDPOINT= # This is used for integrating C++ Demo App (leave blank for defaults) C_EXIT= C_HOST= echo "${CONTR_HOST} is the controller name and ${CONTR_PORT} is the controller port" # Pull images docker pull appdynamics/bundy_base:latest docker pull appdynamics/bundy_db:latest docker pull appdynamics/bundy_mem:latest docker pull appdynamics/bundy_inv:${VERSION} docker pull appdynamics/bundy_ful:${VERSION} docker pull appdynamics/bundy_web:${VERSION} docker pull appdynamics/bundy_load:test # Start containers docker run -d --name bundy_db -p 3306:3306 -p 2222:22 -v /etc/localtime:/etc/localtime:ro appdynamics/bundy_db:latest sleep 10 docker run -d --name bundy_mem -p 11211:11211 -v /etc/localtime:/etc/localtime:ro appdynamics/bundy_mem:latest sleep 10 docker run -d --name bundy_inv -e CONTROLLER=${CONTR_HOST} -e APPD_PORT=${CONTR_PORT} -e APP_NAME=${APP_NAME} -e INV_TIER_NAME=${INV_TIER_NAME} -e INV_NODE_NAME=${INV_NODE_NAME} -e CTRLR_ACCOUNT=${CTRLR_ACCOUNT} -e CTRLR_KEY=${CTRLR_KEY} --link bundy_mem:bundy_mem -v /etc/localtime:/etc/localtime:ro appdynamics/bundy_inv:${VERSION} sleep 10 docker run -d --name bundy_ful -h Fulfillment-Node1 -e CONTROLLER=${CONTR_HOST} -e APPD_PORT=${CONTR_PORT} -e APP_NAME=${APP_NAME} -e FUL_TIER_NAME=${FUL_TIER_NAME} -e FUL_NODE_NAME=${FUL_NODE_NAME} -e CTRLR_ACCOUNT=${CTRLR_ACCOUNT} -e CTRLR_KEY=${CTRLR_KEY} -e GLOBAL_ACCOUNT_NAME=${GLOBAL_ACCOUNT_NAME} -e EVENT_ENDPOINT=${EVENT_ENDPOINT} -e C_EXIT=${C_EXIT} -e C_HOST=${C_HOST} --link bundy_db:bundy_db --link bundy_mem:bundy_mem -v /etc/localtime:/etc/localtime:ro appdynamics/bundy_ful:${VERSION} sleep 10 docker run -d --name bundy_web -h Commerce-Node1 -e CONTROLLER=${CONTR_HOST} -e APPD_PORT=${CONTR_PORT} -e APP_NAME=${APP_NAME} -e COM_TIER_NAME=${COM_TIER_NAME} -e COM_NODE_NAME=${COM_NODE_NAME} -e EUM_KEY=${EUM_KEY} -e CTRLR_ACCOUNT=${CTRLR_ACCOUNT} -e CTRLR_KEY=${CTRLR_KEY} -e GLOBAL_ACCOUNT_NAME=${GLOBAL_ACCOUNT_NAME} -e EVENT_ENDPOINT=${EVENT_ENDPOINT} -p 80:80 --link bundy_db:bundy_db --link bundy_mem:bundy_mem --link bundy_ful:bundy_ful --link bundy_inv:bundy_inv -v /etc/localtime:/etc/localtime:ro appdynamics/bundy_web:${VERSION} sleep 30 docker run -d --name bundy_load -e BEACON_HOST=${BEACON_HOST} -e BEACON_PORT=${BEACON_PORT} -e RUM_KEY=${EUM_KEY} --link bundy_web:bundy_web appdynamics/bundy_load:test sleep 10 # Install adrum.js if present if [ -e '/root/adrum.js' ]; then echo "Copying adrum.js to /var/www/html/demoapp/Symfony/web/js/adrum.js in bundy_web container" docker exec -i bundy_web bash -c 'cat > /var/www/html/demoapp/Symfony/web/js/adrum.js' < adrum.js docker exec -i bundy_web bash -c 'rm -rf /var/www/html/demoapp/Symfony/app/cache/prod/*' else echo "No adrum.js present" fi exit 0
true
78e9e8f976c9882fae8d0e85c1294ef8f4f6d252
Shell
MuAlphaOmegaEpsilon/sinecrunch
/benchmark/build.sh
UTF-8
349
3.65625
4
[ "MIT" ]
permissive
#!/bin/sh set -euf # Navigate to the benchmark folder cd "$(dirname "$0")" ### COLORING SCHEME ### ORANGE=$(tput setaf 3) NOCOLOR=$(tput sgr0) if [ -d bin ]; then cd bin printf "\\n%sBUILDING BINARY%s\\n" "${ORANGE}" "${NOCOLOR}" cmake --build . --parallel else printf "\\n%sNo build folder found, aborting%s\\n" "${ORANGE}" "${NOCOLOR}" fi
true
8646f382b8232d16dc44ec991024276be8e7d033
Shell
space-concordia-robotics/robotics-prototype
/robot/rover/power-analysis/initial_scrape.sh
UTF-8
375
3.375
3
[]
no_license
#!/usr/bin/env bash if [ -z "$1" ]; then # default location for PdsNode.py logs LOG=~/.ros/log/pds_node.log else LOG=$1 echo "$1" fi while read p; do if [[ $p =~ "voltage=" ]]; then # some kind of grep here, save into variable echo "$p"; elif [[ $p =~ "temps=" ]]; then echo "$p"; elif [[ $p =~ "currents=" ]]; then echo "$p"; fi done <$LOG
true
94a258a8259b08f2b13005292f097afe15f608c3
Shell
zachlebar/debut
/syscheck
UTF-8
697
3.40625
3
[]
no_license
#!/bin/sh # Debut's System Check # -------------------- # Looking for required programs like lua, ruby, sass, etc. # Check for Ruby if [ $(command -v ruby) ] then RUBY=$(command -v ruby) echo $($RUBY -v) else echo "NO RUBY PRESENT. Install?" fi # Check for Ruby Gems if [ $(command -v gem) ] then GEM=$(command -v gem) echo "Ruby Gems Version: $($GEM -v)" else echo "NO RUBY GEMS PRESENT. Install?" fi # Check for Lua if [ $(command -v lua) ] then LUA=$(command -v lua) echo $($LUA -v) else echo "NO LUA PRESENT. Install?" fi # Check for SASS if [ $(command -v sass) ] then SASS=$(command -v sass) echo $($SASS -v) else echo "NO SASS PRESENT. Install?" fi
true
a2ea6f642e4957025637bcd868696a1c45a4b19e
Shell
heltondoria/alpine-base-platform
/install
UTF-8
507
2.734375
3
[ "MIT" ]
permissive
#!/bin/bash -el SOURCE_DIR=/var/lib/tsuru source ${SOURCE_DIR}/base/rc/config mkdir -p /home/application adduser -D -s /bin/bash ${USER} chown ${USER}:${USER} /home/application echo "${USER} ALL=(ALL) NOPASSWD:ALL" >> /etc/sudoers wget https://github.com/tsuru/deploy-agent/releases/download/0.2.5/deploy-agent_0.2.5_linux_amd64.tar.gz tar zxvf deploy-agent_0.2.5_linux_amd64.tar.gz mv tsuru_unit_agent /usr/sbin/tsuru_unit_agent rm deploy-agent_0.2.5_linux_amd64.tar.gz mkdir -p ${SOURCE_DIR}/default
true
89d0c3234048383dea36791702addaed390db741
Shell
BenWhitehead/.dotfiles
/fs/home/user/.bash/function/csv.bash
UTF-8
219
3.34375
3
[ "MIT" ]
permissive
#!/bin/bash function csvToTsv { awk '{$1=$1}1' FPAT="([^,]+)|(\"[^\"]+\")" OFS='\t' $1 } function readcsv { column -t -s ',' ${@} | less -S -# 10 } function readtsv { column -t -s $'\t' ${@} | less -S -# 10 }
true
5a7d0f30b2852a7ae0761c3c83390a55a48b6ae7
Shell
lcm-linux/ansible
/roles/raid_config/files/cat.sh
UTF-8
1,823
2.875
3
[]
no_license
#!/bin/bash #查看所有磁盘状态 /opt/MegaRAID/MegaCli/MegaCli64 -PDList -aAll -Nolog | grep -i -E 'state|Slot\ Number' #获取RAID适配器的信息(关键) /opt/MegaRAID/MegaCli/MegaCli64 -AdpGetPciInfo -aAll -Nolog #这条命令主要要找到类似这个Adpater id: 0 #example #Device Number : 0 #这里看到适配器信息, Controller 0:适配器id为0,后面的命令中的 -a0 就是对应于这个参数 # #获取硬盘背板信息(关键) /opt/MegaRAID/MegaCli/MegaCli64 -EncInfo -a0 -Nolog #example # 这条命令主要是要找到类似这个 Enclosure Device ID: 32 ## 即 ## Device ID : 32 #Enclosure Device ID: 32 ## Number of Physical Drives : 14 # 服务器上现有的物理磁盘数 #查看当前各个虚拟磁盘中包含了哪些物理磁盘,数字代表(硬盘id)插槽编号 /opt/MegaRAID/MegaCli/MegaCli64 -LdPdInfo -a0 -Nolog | grep -E "Virtual Drive:|Slot Number:" | xargs | sed -r 's/(Slot Number:)(\s[0-9]+)/\2,/g' | sed 's/(Target Id: .)/Physical Drives ids:/g' | sed 's/Virtual Drive:/\nVirtual Drive:/g' #example #Virtual Drive: 0 Physical Drives ids: 12, 13, #Virtual Drive: 1 Physical Drives ids: 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, #导入foreign磁盘 # #注意: 如果是raid卡导致的线上磁盘意外下线变成了foreign状态,则可以直接倒入。不要强制清除foreign状态,会丢失这个磁盘的所有数据。 # #/opt/MegaRAID/MegaCli/MegaCli64 -CfgForeign -Import -a0 -Nolog #清除Foreign状态 # #注意: 有时候服务器上的磁盘会莫名其妙的变成foreign状态,此时不可以用下面的这个命令,因为这个命令会清理掉磁盘的文件系统和数据,要用import命令进行倒入。 # #/opt/MegaRAID/MegaCli/MegaCli64 -CfgForeign -Clear -a0 -Nolog
true
ff48a9cad8cf136a4708a57c2bec9bc05520e0c4
Shell
illuzian/Quick-References
/quick_reference.zsh
UTF-8
219
2.609375
3
[]
no_license
# -z : Compress archive using gzip program in Linux or Unix # -c : Create archive on Linux # -v : Verbose i.e display progress while creating archive # -f : Archive File name tar -zcvf archive-name.tar.gz directory-name
true
85766667422fa72ea4154ae95f76a0dcdd05cee0
Shell
teogor/fprime
/docs/doxygen/generate_docs.bash
UTF-8
1,648
3.921875
4
[ "Apache-2.0" ]
permissive
#!/bin/bash #### # generate_docs.sh: A crude wrapper for generating fprime documents for API documents. #### SOURCE_DIR=`dirname $BASH_SOURCE` DOXYGEN="doxygen" FPRIME=`cd ${SOURCE_DIR}/../../; pwd` DOXY_OUTPUT="${FPRIME}/docs/UsersGuide/api/c++" CMAKE_OUTPUT="${FPRIME}/docs/UsersGuide/api/cmake" PY_OUTPUT="${FPRIME}/docs/UsersGuide/api/python" FPRIME_UTIL=`which fprime-util` if [[ "${FPRIME_UTIL}" == "" ]] then echo "[ERROR] Cannot run docs gen without sourcing an fprime virtual environment" exit 1 fi # Doxygen generation ( cd "${FPRIME}" if [ -e "${DOXY_OUTPUT}.prev" ] then echo "[ERROR] Backup already exists at ${DOXY_OUTPUT}.prev" exit 1 fi fprime-util build -j32 if (( $? != 0 )) then echo "[ERROR] Failed to build fprime please generate build cache" exit 2 fi mv "${DOXY_OUTPUT}" "${DOXY_OUTPUT}.prev" ${DOXYGEN} "${FPRIME}/docs/doxygen/Doxyfile" ) || exit 1 # CMake ( cd "${FPRIME}" if [ -e "${CMAKE_OUTPUT}.prev" ] then echo "[ERROR] Backup already exists at ${CMAKE_OUTPUT}.prev" exit 1 fi mv "${CMAKE_OUTPUT}" "${CMAKE_OUTPUT}.prev" mkdir -p "${CMAKE_OUTPUT}" "${FPRIME}/cmake/docs/docs.py" "${FPRIME}/cmake/" "${FPRIME}/docs/UsersGuide/api/cmake" ) || exit 1 # Python ( cd "${FPRIME}/Fw/Python/docs" if [ -e "${PY_OUTPUT}.prev" ] then echo "[ERROR] Backup already exists at ${PY_OUTPUT}.prev" exit 1 fi mv "${PY_OUTPUT}" "${PY_OUTPUT}.prev" "${FPRIME}/Fw/Python/docs/gendoc.bash" cd "${FPRIME}/Gds/docs" "${FPRIME}/Gds/docs/gendoc.bash" ) || exit 1
true
f348cc6d5548e46a3c248a793a0307aceb152e73
Shell
jeshan/lambdatv
/multi-tier-architecture-todomvc-part-2/aws/3-check-cloudfront-status.sh
UTF-8
348
2.8125
3
[ "BSD-2-Clause", "BSD-3-Clause" ]
permissive
#!/usr/bin/env bash DOMAIN= CLOUDFRONT_DISTRIBUTION_ID=`aws cloudfront list-distributions --query \ "DistributionList.Items[*] | [?Aliases.Items[?contains(@, '"$DOMAIN"')]] | [0].Id" --output text` echo "Cloudfront distribution status:" aws cloudfront get-distribution --id $CLOUDFRONT_DISTRIBUTION_ID --query Distribution.Status --output text
true
879f0cd0dafcd3d174e0c41cb5d027ecbd7cffc0
Shell
oskarkleincentre/MachineLearningInAstronomy
/install/install_python.sh
UTF-8
1,669
3.953125
4
[ "MIT" ]
permissive
### This script will install miniconda packages to ${HOME}/astroml_miniconda3 by default or to a user specifed ### location specified through ```$0 path/mydir``` , where path must already exist. ## Directory to install miniconda to miniconda_dir=${HOME}/astroml_miniconda3 if [ $# -gt 1 ]; then echo "illegal number of parameters" fi if [ $# -eq 1 ]; then miniconda_dir=$@ echo ${miniconda_dir} if [ -d ${miniconda_dir} ]; then echo "install directory ${miniconda_dir} already exists, please delete first to clobberr" exit fi dir=$(dirname ${miniconda_dir}) if [ ! -d "${dir}" ]; then echo "path to miniconda install does not exist" exit fi fi echo " installing python to $miniconda_dir" ## Download miniconda # miniconda version MINICONDA_VERSION="latest" # platform linux/osx system=`uname -s` echo ${system} if [ "$system" = "Linux" ]; then platform="Linux" elif [ "$system" = "Darwin" ]; then platform="MacOSX" else echo "Platform unknown\n" fi echo "platform = ${platform}\n" fname="Miniconda3-${MINICONDA_VERSION}-${platform}-x86_64.sh" url="https://repo.continuum.io/miniconda/${fname}" curl -OL ${url} ## Install miniconda bash ${fname} -b -p ${miniconda_dir} printf "You can clean up by typing \n rm -f $fname\n" echo "In order to use this python, please set your path to include ${miniconda_dir}/bin, for example in bash:" echo "export PATH=${miniconda_dir}"'/bin:$PATH' echo "export PATH=${miniconda_dir}"'/bin:$PATH'>./install/setup.sh export PATH=${miniconda_dir}/bin:$PATH conda env create -n astroml --file environment.yml echo "source activate astroml">>./install/setup.sh
true
7cee174744ebd8884fed2ffce3f9a1279a89508f
Shell
muccg/docker-ant
/build.sh
UTF-8
784
3.6875
4
[]
no_license
#!/bin/sh # # Script to build images # # break on error set -e REPO="muccg" DATE=`date +%Y.%m.%d` # build dirs, top level is python version for javadir in */ do javaver=`basename ${javadir}` for antdir in ${javadir}*/ do antver=`basename ${antdir}` image="${REPO}/ant:${javaver}-${antver}" echo "################################################################### ${image}" ## warm up cache for CI docker pull ${image} || true ## build docker build --pull=true -t ${image}-${DATE} ${antdir} docker build -t ${image} ${antdir} ## for logging in CI docker inspect ${image}-${DATE} # push docker push ${image}-${DATE} docker push ${image} done done
true
539eaaf7e586d4dd36f2bedab0292b6717cb5459
Shell
UniqKey/ocd-slackbot
/bin/oc_wrapper.sh
UTF-8
712
3.3125
3
[ "MIT" ]
permissive
#!/bin/bash # try assuming our previous login hasn't timed out $APP_ROOT/oc $@ 2>/dev/null # we it didn't work assume that our prevous login has timed out if [[ "$?" != "0" ]]; then if [ -z "$OPENSHIFT_USER" ]; then (>&2 echo "ERROR could not login OPENSHIFT_USER not set") exit 1 fi if [ -z "$OPENSHIFT_PASSWORD" ]; then (>&2 echo "echo ERROR could not login OPENSHIFT_PASSWORD not set") exit 2 fi # do login $APP_ROOT/oc login ${OPENSHIFT_SERVER} -u ${OPENSHIFT_USER} -p ${OPENSHIFT_PASSWORD} > /dev/null if [[ "$?" != "0" ]]; then (>&2 echo "ERROR Could not oc login. Exiting") exit 1 fi #try again $APP_ROOT/oc $@ fi
true
ba1f4928001bc9d53c97321ee1ae2c64eac8ec97
Shell
IBM-Cloud/terraform-ibmcloud-modules
/modules/ibm_services/shell/is_vpc_routing_table_route/scripts/create.sh
UTF-8
2,833
3.125
3
[ "Apache-2.0" ]
permissive
#!/bin/bash set -ex echo "creating..." IN=$(cat) echo "stdin: ${IN}" echo "vpc rtbl route name: $VPC_RTBL_RT_NAME" echo "vpc id: $VPC_ID" echo "vpc rtbl id: $VPC_RTBL_ID" echo "zone name: $ZONE_NAME" echo "destination: $DESTINATION" echo "next_hop: $NEXT_HOP" echo "action: $ACTION" ibmcloud login -a $IBMCLOUD_API_ENDPOINT -r $IBM_REGION -g $RESOURCE_GROUP ibmcloud is target --gen 2 if [ "$ACTION" = "deliver" ]; then is_vpc_rtbl_rt=$(ibmcloud is vpc-rtbl-rtc $VPC_ID $VPC_RTBL_ID \ --name $VPC_RTBL_RT_NAME \ --zone $ZONE_NAME \ --destination $DESTINATION \ --next-hop $NEXT_HOP \ --action $ACTION \ --output json) else is_vpc_rtbl_rt=$(ibmcloud is vpc-rtbl-rtc $VPC_ID $VPC_RTBL_ID \ --name $VPC_RTBL_RT_NAME \ --zone $ZONE_NAME \ --destination $DESTINATION \ --action $ACTION \ --output json) fi ibmcloud logout echo $is_vpc_rtbl_rt # root@8d3d894bd869:/system-tf-base# ic is vpc-rtbl-rtc --help # NAME: # vpc-routing-table-route-create - Create a VPC route # # USAGE: # ibmcloud is vpc-routing-table-route-create VPC ROUTING_TABLE --zone ZONE_NAME --destination DESTINATION_CIDR --next-hop NEXT_HOP [--action delegate | deliver | drop] [--name NAME] [--output JSON] [-q, --quiet] # VPC: ID of the VPC. # ROUTING_TABLE: ID of the VPC routing table. # # EXAMPLE: # ibmcloud is vpc-routing-table-route-create 72b27b5c-f4b0-48bb-b954-5becc7c1dcb3 72b27b5c-f4b0-48bb-b954-5becc7c1d456 --name my-vpc-route --action deliver --zone us-south-1 --destination 10.2.2.0/24 --next-hop 10.0.0.2 --output JSON # ibmcloud is vpc-routing-table-route-create 72b27b5c-f4b0-48bb-b954-5becc7c1dcb3 72b27b5c-f4b0-48bb-b954-5becc7c1d456 --name my-vpc-route --action delegate --zone us-south-1 --destination 10.2.2.0/24 --output JSON # ibmcloud is vpc-routing-table-route-create 72b27b5c-f4b0-48bb-b954-5becc7c1dcb3 72b27b5c-f4b0-48bb-b954-5becc7c1d456 --name my-vpc-route --action drop --zone us-south-1 --destination 10.2.2.0/24 --output JSON # # OPTIONS: # --zone value Name of the zone # --action value The action to perform with a packet matching the route. Enumeration type: delegate, deliver, drop. # --destination value The destination CIDR of the route. At most two routes per zone in a table can have the same destination, and only if both routes have an action of deliver. # --next-hop value If the action is 'deliver', the IP address or VPN connection ID of the next hop to which to route packets # --name value Name of the VPC routing table. # --output value Specify output format, only JSON is supported now. Enumeration type: JSON. # -q, --quiet Suppress verbose output # # root@8d3d894bd869:/system-tf-base#
true
2a2ef3a38982805348a11e8ee004544384253a81
Shell
baokui/text_search
/getData/dataProcess/Feature_Engineering/feature_user/run_main.sh
UTF-8
871
2.828125
3
[]
no_license
nDay=$1 curYear=`date -d "$nDay day ago" +"%Y"` curMonth=`date -d "$nDay day ago" +"%m"` curDay=`date -d "$nDay day ago" +"%d"` Date=$curYear$curMonth$curDay ########################################################## for((k=0;k<7;k++)); do nohup sh run_join.sh $k $Date >> ./log/$k.log 2>&1 & done k=7 nohup sh run_join.sh $k $Date >> ./log/$k.log 2>&1 & ########################################################## k=8 nohup sh run_join.sh $k $Date >> ./log/$k.log 2>&1 & k=9 nohup sh run_join.sh $k $Date >> ./log/$k.log 2>&1 & k=a nohup sh run_join.sh $k $Date >> ./log/$k.log 2>&1 & k=b nohup sh run_join.sh $k $Date >> ./log/$k.log 2>&1 & k=c nohup sh run_join.sh $k $Date >> ./log/$k.log 2>&1 & k=d nohup sh run_join.sh $k $Date >> ./log/$k.log 2>&1 & k=e nohup sh run_join.sh $k $Date >> ./log/$k.log 2>&1 & k=f nohup sh run_join.sh $k $Date >> ./log/$k.log 2>&1
true
7947f856b47e60f2199e334c286ff146688355ba
Shell
delkyd/alfheim_linux-PKGBUILDS
/phpsh/PKGBUILD
UTF-8
551
2.65625
3
[]
no_license
# Maintainer: DerekTBrown derek@allderek.com pkgname=phpsh pkgver=1.3.20.g8427a3c pkgrel=1 pkgdesc="PHPSH is a shell version of PHP" url="http://www.phpsh.org/" arch=('x86_64' 'i686') depends=('readline>=6.0','python2>=2.6') provides=("$pkgname") conflicts=("$pkgname") source=("$pkgname::git+https://github.com/DerekTBrown/phpsh.git") md5sums=('SKIP') pkgver() { cd "$srcdir/$pkgname" git describe --tags | sed 's|-|.|g' } build() { cd "$srcdir/$pkgname" python2 setup.py build } package() { cd "$srcdir/$pkgname" sudo python2 setup.py install }
true
c720985156511972827575075d9b1b96dbab0131
Shell
aina91/FivePrime
/PeakCallingPipeline/RunRSEM.sh
UTF-8
809
3.359375
3
[]
no_license
#!/bin/bash ##This command takes in a bam file (with some extra information) to run RSEM on it. ##The result is used to find 'covered' genes (genes with 5' reads on them), which is used in calculating the false positive ##Inputs: ##1) Name of downsampled bam file ##2) Name used for temporary fastq file ##3) Name used for the output from RSEM ##4) The location of the index for RSEM ## ##Assumes have java installed, have the jar file for PICARD on your computer, and have RSEM installed/ on your path ## ## BAM=$1 FASTQ=$2 RSEM_OUT=$3 RSEM_INDEX=$4 PICARD=PICARD/picard.jar rsem=rsem-calculate-expression echo "Convert bam into fastq file!" java -Xmx8g -jar $PICARD SamToFastq INPUT=$BAM FASTQ=$FASTQ echo "Run RSEM!" $rsem --no-bam-output --quiet --bowtie-chunkmbs 512 $FASTQ $RSEM_INDEX $RSEM_OUT
true
7a64fe4da12efa1e36c128387d4365da87339521
Shell
Thewessen/hello-world
/Exercism/bash/acronym/acronym.sh
UTF-8
134
3.234375
3
[ "MIT" ]
permissive
#!/usr/bin/env bash set -o errexit set -o nounset for word in ${1//[-_*]/ }; do first_chr=${word::1} printf ${first_chr^^} done
true
10c8fabb480415e8a4d7b9f4f5e9e904bfb97181
Shell
guokeyixiao/toolsbox
/monitorlog.sh
UTF-8
1,007
3.6875
4
[]
no_license
#!/bin/bash # # Log monitoring # #Please in '#' place accordingly while : do today=`date "+%Y%m%d"` #Fill in the log file absolute path log_file=/tmp/test.log #Fill in the script execution record log files out_log=/tmp/${today}.log #Fill in monitoring project name monitor_project=test #Fill in time interval with_time=5m #Fill in the restart script path sh_file=/tmp/restart_test.sh m_line_begin=`wc -l ${log_file} | awk '{print $1}'` sleep ${with_time} m_line_now=`wc -l ${log_file} | awk '{print $1}'` date_now=`date "+%Y%m%d %T"` if [ ${m_line_now} -gt ${m_line_begin} ] then echo "Time: ${date_now} ${monitor_project} Log check, the rolling to normal!" >> ${out_log} elif [ ${m_line_now} -lt ${m_line_begin} ] then echo "Time: ${date_now} ${monitor_project} Log check, the rolling to normal!" >> ${out_log} else echo "Time: ${date_now} ${monitor_project} Log check, not rolling!!! Execute the restart scripts!!!" >> ${out_log} sh ${sh_file} >>${out_log} 2>&1 fi done
true
df14b03b95f27d4eeae15d844356edc5ccbb8320
Shell
solana-labs/solana
/net/remote/remote-node.sh
UTF-8
15,552
3.265625
3
[ "Apache-2.0" ]
permissive
#!/usr/bin/env bash set -e cd "$(dirname "$0")"/../.. set -x deployMethod="$1" nodeType="$2" entrypointIp="$3" numNodes="$4" if [[ -n $5 ]]; then export RUST_LOG="$5" fi skipSetup="$6" failOnValidatorBootupFailure="$7" externalPrimordialAccountsFile="$8" maybeDisableAirdrops="$9" internalNodesStakeLamports="${10}" internalNodesLamports="${11}" nodeIndex="${12}" numBenchTpsClients="${13}" benchTpsExtraArgs="${14}" genesisOptions="${15}" extraNodeArgs="${16}" gpuMode="${17:-auto}" maybeWarpSlot="${18}" maybeFullRpc="${19}" waitForNodeInit="${20}" extraPrimordialStakes="${21:=0}" tmpfsAccounts="${22:false}" disableQuic="${23}" enableUdp="${24}" set +x missing() { echo "Error: $1 not specified" exit 1 } [[ -n $deployMethod ]] || missing deployMethod [[ -n $nodeType ]] || missing nodeType [[ -n $entrypointIp ]] || missing entrypointIp [[ -n $numNodes ]] || missing numNodes [[ -n $skipSetup ]] || missing skipSetup [[ -n $failOnValidatorBootupFailure ]] || missing failOnValidatorBootupFailure airdropsEnabled=true if [[ -n $maybeDisableAirdrops ]]; then airdropsEnabled=false fi cat > deployConfig <<EOF deployMethod="$deployMethod" entrypointIp="$entrypointIp" numNodes="$numNodes" failOnValidatorBootupFailure=$failOnValidatorBootupFailure genesisOptions="$genesisOptions" airdropsEnabled=$airdropsEnabled EOF source net/common.sh source multinode-demo/common.sh loadConfigFile initCompleteFile=init-complete-node.log cat > ~/solana/on-reboot <<EOF #!/usr/bin/env bash cd ~/solana source scripts/oom-score-adj.sh now=\$(date -u +"%Y-%m-%dT%H:%M:%SZ") ln -sfT validator.log.\$now validator.log EOF chmod +x ~/solana/on-reboot GPU_CUDA_OK=false GPU_FAIL_IF_NONE=false case "$gpuMode" in on) # GPU *required*, any vendor GPU_CUDA_OK=true GPU_FAIL_IF_NONE=true ;; off) # CPU-only ;; auto) # Use GPU if installed, any vendor GPU_CUDA_OK=true ;; cuda) # GPU *required*, CUDA-only GPU_CUDA_OK=true GPU_FAIL_IF_NONE=true ;; *) echo "Unexpected gpuMode: \"$gpuMode\"" exit 1 ;; esac case $deployMethod in local|tar|skip) PATH="$HOME"/.cargo/bin:"$PATH" export USE_INSTALL=1 ./fetch-perf-libs.sh cat >> ~/solana/on-reboot <<EOF PATH="$HOME"/.cargo/bin:"$PATH" export USE_INSTALL=1 ( sudo SOLANA_METRICS_CONFIG="$SOLANA_METRICS_CONFIG" scripts/oom-monitor.sh ) > oom-monitor.log 2>&1 & echo \$! > oom-monitor.pid scripts/fd-monitor.sh > fd-monitor.log 2>&1 & echo \$! > fd-monitor.pid scripts/net-stats.sh > net-stats.log 2>&1 & echo \$! > net-stats.pid scripts/iftop.sh > iftop.log 2>&1 & echo \$! > iftop.pid scripts/system-stats.sh > system-stats.log 2>&1 & echo \$! > system-stats.pid if ${GPU_CUDA_OK} && [[ -e /dev/nvidia0 ]]; then echo Selecting solana-validator-cuda export SOLANA_CUDA=1 elif ${GPU_FAIL_IF_NONE} ; then echo "Expected GPU, found none!" export SOLANA_GPU_MISSING=1 fi EOF case $nodeType in bootstrap-validator) set -x if [[ $skipSetup != true ]]; then clear_config_dir "$SOLANA_CONFIG_DIR" if [[ -n $internalNodesLamports ]]; then echo "---" >> config/validator-balances.yml fi setupValidatorKeypair() { declare name=$1 if [[ -f net/keypairs/"$name".json ]]; then cp net/keypairs/"$name".json config/"$name".json if [[ "$name" =~ ^validator-identity- ]]; then name="${name//-identity-/-vote-}" cp net/keypairs/"$name".json config/"$name".json name="${name//-vote-/-stake-}" cp net/keypairs/"$name".json config/"$name".json fi else solana-keygen new --no-passphrase -so config/"$name".json if [[ "$name" =~ ^validator-identity- ]]; then name="${name//-identity-/-vote-}" solana-keygen new --no-passphrase -so config/"$name".json name="${name//-vote-/-stake-}" solana-keygen new --no-passphrase -so config/"$name".json fi fi if [[ -n $internalNodesLamports ]]; then declare pubkey pubkey="$(solana-keygen pubkey config/"$name".json)" cat >> config/validator-balances.yml <<EOF $pubkey: balance: $internalNodesLamports owner: 11111111111111111111111111111111 data: executable: false EOF fi } for i in $(seq 1 "$numNodes"); do setupValidatorKeypair validator-identity-"$i" done setupValidatorKeypair blockstreamer-identity lamports_per_signature="42" # shellcheck disable=SC2206 # Do not want to quote $genesisOptions genesis_args=($genesisOptions) for i in "${!genesis_args[@]}"; do if [[ "${genesis_args[$i]}" = --target-lamports-per-signature ]]; then lamports_per_signature="${genesis_args[$((i+1))]}" break fi done for i in $(seq 0 $((numBenchTpsClients-1))); do # shellcheck disable=SC2086 # Do not want to quote $benchTpsExtraArgs solana-bench-tps --write-client-keys config/bench-tps"$i".yml \ --target-lamports-per-signature "$lamports_per_signature" $benchTpsExtraArgs # Skip first line, as it contains header tail -n +2 -q config/bench-tps"$i".yml >> config/client-accounts.yml echo "" >> config/client-accounts.yml done if [[ -f $externalPrimordialAccountsFile ]]; then cat "$externalPrimordialAccountsFile" >> config/validator-balances.yml fi if [[ -f config/validator-balances.yml ]]; then genesisOptions+=" --primordial-accounts-file config/validator-balances.yml" fi if [[ -f config/client-accounts.yml ]]; then genesisOptions+=" --primordial-accounts-file config/client-accounts.yml" fi if [[ -n $internalNodesStakeLamports ]]; then args+=(--bootstrap-validator-stake-lamports "$internalNodesStakeLamports") fi if [[ -n $internalNodesLamports ]]; then args+=(--bootstrap-validator-lamports "$internalNodesLamports") fi # shellcheck disable=SC2206 # Do not want to quote $genesisOptions args+=($genesisOptions) if [[ -f net/keypairs/faucet.json ]]; then export FAUCET_KEYPAIR=net/keypairs/faucet.json fi if [[ -f net/keypairs/bootstrap-validator-identity.json ]]; then export BOOTSTRAP_VALIDATOR_IDENTITY_KEYPAIR=net/keypairs/bootstrap-validator-identity.json fi if [[ -f net/keypairs/bootstrap-validator-stake.json ]]; then export BOOTSTRAP_VALIDATOR_STAKE_KEYPAIR=net/keypairs/bootstrap-validator-stake.json fi if [[ -f net/keypairs/bootstrap-validator-vote.json ]]; then export BOOTSTRAP_VALIDATOR_VOTE_KEYPAIR=net/keypairs/bootstrap-validator-vote.json fi echo "remote-node.sh: Primordial stakes: $extraPrimordialStakes" if [[ "$extraPrimordialStakes" -gt 0 ]]; then if [[ "$extraPrimordialStakes" -gt "$numNodes" ]]; then echo "warning: extraPrimordialStakes($extraPrimordialStakes) clamped to numNodes($numNodes)" extraPrimordialStakes=$numNodes fi for i in $(seq "$extraPrimordialStakes"); do args+=(--bootstrap-validator "$(solana-keygen pubkey "config/validator-identity-$i.json")" "$(solana-keygen pubkey "config/validator-vote-$i.json")" "$(solana-keygen pubkey "config/validator-stake-$i.json")" ) done fi multinode-demo/setup.sh "${args[@]}" maybeWaitForSupermajority= # shellcheck disable=SC2086 # Do not want to quote $extraNodeArgs set -- $extraNodeArgs while [[ -n $1 ]]; do if [[ $1 = "--wait-for-supermajority" ]]; then maybeWaitForSupermajority=$2 break fi shift done if [[ -z "$maybeWarpSlot" && -n "$maybeWaitForSupermajority" ]]; then maybeWarpSlot="--warp-slot $maybeWaitForSupermajority" fi if [[ -n "$maybeWarpSlot" ]]; then # shellcheck disable=SC2086 # Do not want to quote $maybeWarSlot solana-ledger-tool -l config/bootstrap-validator create-snapshot 0 config/bootstrap-validator $maybeWarpSlot fi solana-ledger-tool -l config/bootstrap-validator shred-version --max-genesis-archive-unpacked-size 1073741824 | tee config/shred-version if [[ -n "$maybeWaitForSupermajority" ]]; then bankHash=$(solana-ledger-tool -l config/bootstrap-validator bank-hash --halt-at-slot 0) extraNodeArgs="$extraNodeArgs --expected-bank-hash $bankHash" echo "$bankHash" > config/bank-hash fi fi args=( --gossip-host "$entrypointIp" --gossip-port 8001 --init-complete-file "$initCompleteFile" ) if [[ "$tmpfsAccounts" = "true" ]]; then args+=(--accounts /mnt/solana-accounts) fi if $maybeFullRpc; then args+=(--enable-rpc-transaction-history) args+=(--enable-extended-tx-metadata-storage) fi if $disableQuic; then args+=(--tpu-disable-quic) fi if $enableUdp; then args+=(--tpu-enable-udp) fi if [[ $airdropsEnabled = true ]]; then cat >> ~/solana/on-reboot <<EOF ./multinode-demo/faucet.sh > faucet.log 2>&1 & EOF fi # shellcheck disable=SC2206 # Don't want to double quote $extraNodeArgs args+=($extraNodeArgs) cat >> ~/solana/on-reboot <<EOF nohup ./multinode-demo/bootstrap-validator.sh ${args[@]} > validator.log.\$now 2>&1 & pid=\$! oom_score_adj "\$pid" 1000 disown EOF ~/solana/on-reboot if $waitForNodeInit; then net/remote/remote-node-wait-init.sh 600 fi ;; validator|blockstreamer) if [[ $deployMethod != skip ]]; then net/scripts/rsync-retry.sh -vPrc "$entrypointIp":~/.cargo/bin/ ~/.cargo/bin/ net/scripts/rsync-retry.sh -vPrc "$entrypointIp":~/version.yml ~/version.yml fi if [[ $skipSetup != true ]]; then clear_config_dir "$SOLANA_CONFIG_DIR" if [[ $nodeType = blockstreamer ]]; then net/scripts/rsync-retry.sh -vPrc \ "$entrypointIp":~/solana/config/blockstreamer-identity.json "$SOLANA_CONFIG_DIR"/validator-identity.json else net/scripts/rsync-retry.sh -vPrc \ "$entrypointIp":~/solana/config/validator-identity-"$nodeIndex".json "$SOLANA_CONFIG_DIR"/validator-identity.json net/scripts/rsync-retry.sh -vPrc \ "$entrypointIp":~/solana/config/validator-stake-"$nodeIndex".json "$SOLANA_CONFIG_DIR"/stake-account.json net/scripts/rsync-retry.sh -vPrc \ "$entrypointIp":~/solana/config/validator-vote-"$nodeIndex".json "$SOLANA_CONFIG_DIR"/vote-account.json fi net/scripts/rsync-retry.sh -vPrc \ "$entrypointIp":~/solana/config/shred-version "$SOLANA_CONFIG_DIR"/shred-version net/scripts/rsync-retry.sh -vPrc \ "$entrypointIp":~/solana/config/bank-hash "$SOLANA_CONFIG_DIR"/bank-hash || true net/scripts/rsync-retry.sh -vPrc \ "$entrypointIp":~/solana/config/faucet.json "$SOLANA_CONFIG_DIR"/faucet.json fi args=( --entrypoint "$entrypointIp:8001" --gossip-port 8001 --rpc-port 8899 --expected-shred-version "$(cat "$SOLANA_CONFIG_DIR"/shred-version)" ) if [[ $nodeType = blockstreamer ]]; then args+=( --blockstream /tmp/solana-blockstream.sock --no-voting --dev-no-sigverify --enable-rpc-transaction-history ) else if [[ -n $internalNodesLamports ]]; then args+=(--node-lamports "$internalNodesLamports") fi fi if [[ ! -f "$SOLANA_CONFIG_DIR"/validator-identity.json ]]; then solana-keygen new --no-passphrase -so "$SOLANA_CONFIG_DIR"/validator-identity.json fi args+=(--identity "$SOLANA_CONFIG_DIR"/validator-identity.json) if [[ ! -f "$SOLANA_CONFIG_DIR"/vote-account.json ]]; then solana-keygen new --no-passphrase -so "$SOLANA_CONFIG_DIR"/vote-account.json fi args+=(--vote-account "$SOLANA_CONFIG_DIR"/vote-account.json) if [[ $airdropsEnabled != true ]]; then args+=(--no-airdrop) else args+=(--rpc-faucet-address "$entrypointIp:9900") fi if [[ -r "$SOLANA_CONFIG_DIR"/bank-hash ]]; then args+=(--expected-bank-hash "$(cat "$SOLANA_CONFIG_DIR"/bank-hash)") fi set -x # Add the faucet keypair to validators for convenient access from tools # like bench-tps and add to blocktreamers to run a faucet scp "$entrypointIp":~/solana/config/faucet.json "$SOLANA_CONFIG_DIR"/ if [[ $nodeType = blockstreamer ]]; then # Run another faucet with the same keypair on the blockstreamer node. # Typically the blockstreamer node has a static IP/DNS name for hosting # the blockexplorer web app, and is a location that somebody would expect # to be able to airdrop from if [[ $airdropsEnabled = true ]]; then cat >> ~/solana/on-reboot <<EOF multinode-demo/faucet.sh > faucet.log 2>&1 & EOF fi # Grab the TLS cert generated by /certbot-restore.sh if [[ -f /.cert.pem ]]; then sudo install -o $UID -m 400 /.cert.pem /.key.pem . ls -l .cert.pem .key.pem fi fi args+=(--init-complete-file "$initCompleteFile") # shellcheck disable=SC2206 # Don't want to double quote $extraNodeArgs args+=($extraNodeArgs) maybeSkipAccountsCreation= if [[ $nodeIndex -le $extraPrimordialStakes ]]; then maybeSkipAccountsCreation="export SKIP_ACCOUNTS_CREATION=1" fi if [[ "$tmpfsAccounts" = "true" ]]; then args+=(--accounts /mnt/solana-accounts) fi if $maybeFullRpc; then args+=(--enable-rpc-transaction-history) args+=(--enable-extended-tx-metadata-storage) fi if $disableQuic; then args+=(--tpu-disable-quic) fi if $enableUdp; then args+=(--tpu-enable-udp) fi cat >> ~/solana/on-reboot <<EOF $maybeSkipAccountsCreation nohup multinode-demo/validator.sh ${args[@]} > validator.log.\$now 2>&1 & pid=\$! oom_score_adj "\$pid" 1000 disown EOF ~/solana/on-reboot if $waitForNodeInit; then net/remote/remote-node-wait-init.sh 600 fi if [[ $skipSetup != true && $nodeType != blockstreamer && -z $maybeSkipAccountsCreation ]]; then # Wait for the validator to catch up to the bootstrap validator before # delegating stake to it solana --url http://"$entrypointIp":8899 catchup config/validator-identity.json args=( --url http://"$entrypointIp":8899 ) if [[ $airdropsEnabled != true ]]; then args+=(--no-airdrop) fi if [[ -f config/validator-identity.json ]]; then args+=(--keypair config/validator-identity.json) fi if [[ ${extraPrimordialStakes} -eq 0 ]]; then echo "0 Primordial stakes, staking with $internalNodesStakeLamports" multinode-demo/delegate-stake.sh --vote-account "$SOLANA_CONFIG_DIR"/vote-account.json \ --stake-account "$SOLANA_CONFIG_DIR"/stake-account.json \ --force \ "${args[@]}" "$internalNodesStakeLamports" else echo "Skipping staking with extra stakes: ${extraPrimordialStakes}" fi fi ;; *) echo "Error: unknown node type: $nodeType" exit 1 ;; esac ;; *) echo "Unknown deployment method: $deployMethod" exit 1 esac
true
3b409bae4365eb981cb5f1be781e11b25577964f
Shell
isi-vista/adam
/adam_preprocessing/color_refine_curriculum.sh
UTF-8
1,368
3.3125
3
[ "MIT" ]
permissive
#!/usr/bin/env bash #SBATCH --job-name=clrRefSgCurriculum #SBATCH --account=adam #SBATCH --partition=adam #SBATCH --time=23:00:00 # Number of hours required per node, max 24 on SAGA #SBATCH --ntasks=1 #SBATCH --cpus-per-task=4 #SBATCH --mem=32g #SBATCH --gpus-per-task=1 #SBATCH --nodes=1 #SBATCH --mail-type=FAIL,END #SBATCH --output=R-%x.%j.out # Need to run on ADAM partition (saga03 or adam-dev) because this uses Matlab. set -u if [[ "$#" -lt 2 ]] || [[ "$1" = "--help" ]] ; then printf '%s\n' "usage: $0 input_curriculum_dir output_curriculum_dir" python color_refine_curriculum.py --help exit 1 else input_curriculum_dir=$1 output_curriculum_dir=$2 shift 2 fi # Because we force there to be only 2 args and then shift by 2, "$@" should expand to nothing. # It's included as future-proofing in case we add/allow more args. # # Note that because CentOS 7 uses an old version of glibc, we have to preload a shim in order to # import the Matlab extension used in the stroke extraction code. This shim provides a definition # of __cxa_thread_atexit_impl which is only defined in glibc 2.18+. Without preloading, the # extension causes a crash due to an undefined symbol error. shim_path=/nas/gaia/adam/matlab/bin/glnxa64/glibc-2.17_shim.so LD_PRELOAD="$shim_path" python color_refine_curriculum.py "$input_curriculum_dir" "$output_curriculum_dir" "$@"
true
1abea3b4bbdf5d747d3ba2c2d3d033f5c7e06241
Shell
BrankoVukmirovic/falco
/userspace/engine/lua/parser-smoke.sh
UTF-8
1,250
3.34375
3
[ "GPL-2.0-only", "AGPL-3.0-only", "LicenseRef-scancode-openssl-exception-agpl3.0monit", "LicenseRef-scancode-generic-cla", "LicenseRef-scancode-free-unknown", "GPL-1.0-or-later" ]
permissive
#!/bin/bash function error_exit_good { echo "Error: '$1' did not compiler" 1>&2 exit 1 } function error_exit_bad { echo "Error: incorrect filter '$1' compiler ok" 1>&2 exit 1 } function good { lua5.1 test.lua "$1" 2> /dev/null || error_exit_good "$1" } function bad { lua5.1 test.lua "$1" 2> /dev/null && error_exit_bad "$1" } # Filters good " a" good "a and b" good "#a and b; a and b" good "#a and b; # ; ; a and b" good "(a)" good "(a and b)" good "(a.a exists and b)" good "(a.a exists) and (b)" good "a.a exists and b" good "a.a=1 or b.b=2 and c" good "not (a)" good "not (not (a))" good "not (a.b=1)" good "not (a.a exists)" good "not a" good "a.b = 1 and not a" good "not not a" good "(not not a)" good "not a.b=1" good "not a.a exists" good "notz and a and b" good "a.b = bla" good "a.b = 'bla'" good "a.b = not" good "a.b contains bla" good "a.b icontains 'bla'" good "a.g in (1, 'a', b)" good "a.g in ( 1 ,, , b)" good "evt.dir=> and fd.name=*.log" good "evt.dir=> and fd.name=/var/log/httpd.log" good "a.g in (1, 'a', b.c)" good "a.b = a.a" good "evt.arg[0] contains /bin" bad "evt.arg[a] contains /bin" bad "evt.arg[] contains /bin" bad "a.b = b = 1" bad "(a.b = 1" echo echo "All tests passed." exit 0
true
9b57e8c1b365d78d2e4856decdc51fd43c0bb992
Shell
matt-tyler/osb-demo
/setup.sh
UTF-8
2,128
2.5625
3
[ "Apache-2.0" ]
permissive
#!/bin/zsh # bootstrap cluster gcloud beta container --project "schnauzer-163208" \ clusters create "test-cluster" --zone "australia-southeast1-a" \ --no-enable-basic-auth \ --cluster-version "1.9.7-gke.11" \ --machine-type "n1-standard-2" \ --image-type "COS" \ --disk-type "pd-standard" --disk-size "30" \ --scopes "https://www.googleapis.com/auth/compute","https://www.googleapis.com/auth/devstorage.read_only","https://www.googleapis.com/auth/logging.write","https://www.googleapis.com/auth/monitoring","https://www.googleapis.com/auth/servicecontrol","https://www.googleapis.com/auth/service.management.readonly","https://www.googleapis.com/auth/trace.append" \ --num-nodes "2" \ --no-enable-cloud-logging \ --no-enable-cloud-monitoring \ --enable-ip-alias \ --network "projects/schnauzer-163208/global/networks/default" \ --subnetwork "projects/schnauzer-163208/regions/australia-southeast1/subnetworks/default" \ --default-max-pods-per-node "110" \ --addons HorizontalPodAutoscaling \ --enable-autoupgrade --enable-autorepair # Get credentials for cluster #gcloud auth application-default login gcloud container clusters get-credentials test-cluster # Give gcloud account cluster admin permissions kubectl create clusterrolebinding cluster-admin-binding --clusterrole=cluster-admin --user=$(gcloud config get-value account) sc install # wait for all available READY=0 while [ $READY -ne 4 ] do sleep 5 echo "." READY=$( kubectl get deployment -n service-catalog | awk '{ print $5 }' | grep -c 1 ) done sleep 10 echo "READY\n" sc add-gcp-broker READY=0 while [ $READY -ne 1 ] do sleep 5 printf "." READY=$( kubectl get clusterservicebrokers -o 'custom-columns=BROKER:.metadata.name,STATUS:.status.conditions[0].reason' | grep -c FetchedCatalog ) done GCP_PROJECT_ID=$(gcloud config get-value project) GCP_PROJECT_NUMBER=$(gcloud projects describe $GCP_PROJECT_ID --format='value(projectNumber)') gcloud projects add-iam-policy-binding ${GCP_PROJECT_ID} \ --member serviceAccount:${GCP_PROJECT_NUMBER}@cloudservices.gserviceaccount.com \ --role=roles/owner
true
e9926ec962b12f3a1474484187b2a6fb02f0443b
Shell
chutsu/proto
/third_party/scripts/install_imgui.bash
UTF-8
281
3.3125
3
[ "MIT" ]
permissive
#!/bin/bash set -e # halt on first error source "config.bash" REPO_NAME=imgui REPO_URL=https://github.com/ocornut/imgui DOWNLOAD_PATH=${PREFIX}/src # Clone repo mkdir -p $DOWNLOAD_PATH cd $DOWNLOAD_PATH || return if [ ! -d $REPO_NAME ]; then git clone $REPO_URL $REPO_NAME fi
true
ec62429e7555159ae51de700dcccb7444dd00ae9
Shell
AL-YISVN/dotfiles
/scripts/battery_warn
UTF-8
390
3.078125
3
[]
no_license
#!/bin/sh battery_path=$(upower -e | grep BAT) battery_state=$(upower -i $battery_path | grep state | awk '{ print $2 }') battery_percent=$(upower -i $battery_path | grep percentage | awk '{ print $2 }') percent_num=$(echo $battery_percent | grep -oP '[^%]+') if [ $battery_state = "discharging" -a $percent_num -le 10 ]; then notify-send 'low battery:' "$battery_percent" fi exit 0
true
2da3b01118c9d3639554b97b4a5be207d060da2d
Shell
nautilor/dotfiles
/.config/eww/launch_bar.sh
UTF-8
150
3.03125
3
[]
no_license
#!/bin/bash function toggle() { eww $1 bar } WINDOWS=`eww windows | grep bar | grep "*"` [ "$WINDOWS" = "" ] && toggle "open" || toggle "close"
true
af87d46cb53cacb1741070f94c4308569d497e3b
Shell
TimothyJones/github-cognito-openid-wrapper
/scripts/deploy.sh
UTF-8
1,135
3.328125
3
[ "BSD-3-Clause", "JSON" ]
permissive
#!/bin/bash -eu SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")"; pwd)" # Figure out where the script is running . "$SCRIPT_DIR"/lib-robust-bash.sh # load the robust bash library PROJECT_ROOT="$SCRIPT_DIR"/.. # Figure out where the project directory is # Ensure dependencies are present require_binary aws require_binary sam # Ensure configuration is present if [ ! -f "$PROJECT_ROOT/config.sh" ]; then echo "ERROR: config.sh is missing. Copy example-config.sh and modify as appropriate." echo " cp example-config.sh config.sh" exit 1 fi source ./config.sh OUTPUT_TEMPLATE_FILE="$PROJECT_ROOT/serverless-output.yml" aws s3 mb "s3://$BUCKET_NAME" --region "$REGION" || true sam package --template-file template.yml --output-template-file "$OUTPUT_TEMPLATE_FILE" --s3-bucket "$BUCKET_NAME" sam deploy --region "$REGION" --template-file "$OUTPUT_TEMPLATE_FILE" --stack-name "$STACK_NAME" --parameter-overrides GitHubClientIdParameter="$GITHUB_CLIENT_ID" GitHubClientSecretParameter="$GITHUB_CLIENT_SECRET" CognitoRedirectUriParameter="$COGNITO_REDIRECT_URI" StageNameParameter="$STAGE_NAME" --capabilities CAPABILITY_IAM
true
3574fa8acd5483096f435d10a7db3313adb23b6c
Shell
vgrechko/cf-ghc
/bin/compile
UTF-8
328
2.828125
3
[]
no_license
#!/bin/bash echo Compiling... echo folder 1 $1 ls $1 echo folder 2 $2 ls $2 echo Making Cache directory $2 mkdir -p $2 echo Running diagnostics bash $1/diag.sh echo Copying $1/fastmake.exe to $2 cp $1/fastmake.exe $2 echo Chmoding +x $2/fastmake.exe chmod +x $2/fastmake.exe ls $2 echo Running $2/fastmake.exe $2/fastmake.exe
true
65554831b6a992d0ec725e0d53484756cf05dd8f
Shell
saitarn/shell
/part1/ex3_variable_command.sh
UTF-8
231
2.953125
3
[]
no_license
#!/bin/bash MY_SHELL="bash" echo "I like the ${MY_SHELL} shell." echo "I am ${MY_SHELL}ing on my keyboard." SERVER_NAME=$(hostname) echo "Your are running on ${SERVER_NAME}." LIST_DIR=$(ls -la) echo "Your list is \n${LIST_DIR}"
true
3042614877c8996d5e43d49469fbf7b35dcf82d0
Shell
vincseize/HDC
/DOCS-TESTS/HDC_daemonWifi_forChecking.sh
UTF-8
952
3.140625
3
[]
no_license
#!/bin/bash while true do # echo $(date)" Check Wifi" echo "######## if wifi connection and give name ################" iwconfig 2>&1 | grep ESSID #echo "######### all wifi ###########" #iwlist wlan0 scan # all wifi echo "######################" nmcli -f NAME con status # --> NOM : NAME CONNECTION #echo "######## scan wifi name if authorized #############" #iwlist NEUF_D918 scan # verboten sometimes echo "## Details even not connected ####" nmcli con list id "NEUF_D918" | awk '/key-mgmt/ {print $2}' # ---> TYPE WPA:WEP -> todo write at installation : ' Disconnect: nmcli d disconnect iface wlan0 Connect: nmcli d wifi connect <WiFiSSID> password <WiFiPassword> iface wlan0 Just change wlan0, <WiFiSSID>, <WiFiPassword> to reflect your setup. If WiFi info already saved, easier way: Disconnect: nmcli c down id <WiFiConn> Connect: nmcli c up id <WiFiConn> ' sleep 5 done
true
403fd9398a222ece3e9a33eec0e97e7abf2d4f3b
Shell
s0m35h1t/holberton-system_engineering-devops
/0x05-processes_and_signals/101-manage_my_process
UTF-8
1,210
3.96875
4
[]
no_license
#!/usr/bin/env bash # When passing the argument start: # Starts manage_my_process # Creates a file containing its PID in /var/run/my_process.pid # Displays manage_my_process started # When passing the argument stop: # Stops manage_my_process # Deletes the file /var/run/my_process.pid # Displays manage_my_process stopped # When passing the argument restart # Stops manage_my_process # Deletes the file /var/run/my_process.pid # Starts manage_my_process # Creates a file containing its PID in /var/run/my_process.pid # Displays manage_my_process restarted # Displays Usage: manage_my_process {start|stop|restart} if any other argument or no argument is passed if [ "$1" == "start" ] then ./manage_my_process & echo $$ > /var/run/my_process.pid echo "manage_my_process started" elif [ "$1" == "stop" ] then pkill -f manage_my_process rm -f /var/run/my_process.pid echo "manage_my_process stopped" elif [ "$1" == "restart" ] then pkill -f manage_my_process rm -f /var/run/my_process.pid ./manage_my_process & echo $$ > /var/run/my_process.pid echo "manage_my_process restarted" else echo "Usage: manage_my_process {start|stop|restart}" fi
true
b0ebd586319ce37aec2e5821b2a334eb089db861
Shell
unfoldingWord-dev/tools
/obs/dokuwiki/create-obs-zip.sh
UTF-8
791
3.25
3
[ "MIT" ]
permissive
#!/usr/bin/env sh # -*- coding: utf8 -*- # # Copyright (c) 2014 unfoldingWord # http://creativecommons.org/licenses/MIT/ # See LICENSE file for details. # # Contributors: # Jesse Griffin <jesse@distantshores.org> LANG="$1" [ -z "$LANG" ] && echo "Please specify language code." && exit 1 PAGES="/var/www/vhosts/door43.org/httpdocs/data/gitrepo/pages" LANGDIR="$PAGES/$LANG" ZIPNAME="$LANG-obs-`date +%F`" cd $LANGDIR find obs -maxdepth 1 -type f -name '[0-9][0-9].txt' | zip $ZIPNAME -@ zip -r $ZIPNAME obs/front-matter.txt obs/back-matter.txt obs/app_words.txt \ statement-of-faith.txt legal/license.txt translation-guidelines.txt mv $ZIPNAME.zip /tmp/ echo "Zip at: /tmp/$ZIPNAME.zip" # To add the notes afterward, you could run this: # find obs/notes -type f -name '*.txt' | zip -r $ZIPNAME -@
true
abf3849d959c73b1efd42f4760171fc56253f903
Shell
smile-io/phobos
/utils/stop-all.sh
UTF-8
156
3.125
3
[ "Apache-2.0" ]
permissive
#!/bin/bash set -eu UTILS_DIR=$(dirname $0) source ${UTILS_DIR}/env.sh for (( i=${#APPS[@]}-1 ; i>=0 ; i-- )) ; do ${UTILS_DIR}/${APPS[i]}.sh stop done
true
3e318868ac474167c540422c00df022f84104b0e
Shell
codacy-badger/Mengine
/build/xcode_ios_sdl/make_solution_xcode_ios_sdl_release.sh
UTF-8
562
2.625
3
[]
no_license
#! /bin/bash CONFIGURATION=Release mkdir -p ../../solutions/solution_xcode_ios_sdl/$CONFIGURATION pushd ../../solutions/solution_xcode_ios_sdl/$CONFIGURATION /Applications/CMake.app/Contents/bin/cmake -G"Xcode" "$PWD/../../../CMake/Xcode_IOS_SDL" -DCMAKE_BUILD_TYPE:STRING=$CONFIGURATION -DCMAKE_CONFIGURATION_TYPES:STRING=$CONFIGURATION -DMENGINE_LIB_DIR:STRING="build_xcode_ios_sdl/\"$CONFIGURATION\"" -DCMAKE_TOOLCHAIN_FILE="$PWD/../../../dependencies/ios-cmake/ios.toolchain.cmake" -DIOS_PLATFORM=OS64 -DENABLE_BITCODE=0 -DIOS_DEPLOYMENT_TARGET="9.0" popd
true
58a5d8e8e1877bca9149dd73524b5468d0c3f52d
Shell
anujkaliaiitd/pmem-bench
/nvme_perf/latency.sh
UTF-8
1,156
3.109375
3
[]
no_license
perf_exe="/home/akalia/sandbox/spdk/examples/nvme/perf/perf" rm -f tmpout_* rm -rf final_out touch final_out # Last one wins bench=read # Sequential reads bench=randwrite # Random writes bench=write # Sequential writes bench=randread # Random reads echo "size us_avg us_median us_999 us_99" >> final_out for ((size = 512; size <= 65536; size *= 2)); do tmpfile="tmpout_$size" # -q: queue depth # -o: object size to write # -t: time in seconds # -c: core mask (core 24) # -L: generate histogram sudo numactl --cpunodebind=1 --membind=1 $perf_exe \ -q 1 -o $size -w $bench -t 2 -c 0x1000000 -L > $tmpfile us_avg=`cat $tmpfile | grep Total | sed -s 's/ */ /g' | cut -d ' ' -f 5` us_median=`cat $tmpfile | grep "50\.00000" | tr -d ' ' | cut -d ":" -f 2 | sed 's/us//g'` us_99=`cat $tmpfile | grep "99\.00000" | tr -d ' ' | cut -d ":" -f 2 | sed 's/us//g'` us_999=`cat $tmpfile | grep "99\.90000" | tr -d ' ' | cut -d ":" -f 2 | sed 's/us//g'` echo $size $us_avg $us_median $us_999 $us_99 echo $size $us_avg $us_median $us_999 $us_99 >> final_out done cat final_out rm -f tmpout_* rm -rf final_out
true
e7c72da0a2da193eb6f065fbf6a7bab7badc2c4a
Shell
laymonage/dotfiles
/.zshrc
UTF-8
5,175
2.765625
3
[]
no_license
# If you come from bash you might have to change your $PATH. export PATH=$HOME/bin:/usr/local/bin:$PATH # Path to your oh-my-zsh installation. export ZSH="/home/sage/.oh-my-zsh" # Set name of the theme to load --- if set to "random", it will # load a random theme each time oh-my-zsh is loaded, in which case, # to know which specific one was loaded, run: echo $RANDOM_THEME # See https://github.com/robbyrussell/oh-my-zsh/wiki/Themes # ZSH_THEME="robbyrussell" ZSH_THEME="spaceship" # Set list of themes to pick from when loading at random # Setting this variable when ZSH_THEME=random will cause zsh to load # a theme from this variable instead of looking in ~/.oh-my-zsh/themes/ # If set to an empty array, this variable will have no effect. # ZSH_THEME_RANDOM_CANDIDATES=( "robbyrussell" "agnoster" ) # Uncomment the following line to use case-sensitive completion. # CASE_SENSITIVE="true" # Uncomment the following line to use hyphen-insensitive completion. # Case-sensitive completion must be off. _ and - will be interchangeable. # HYPHEN_INSENSITIVE="true" # Uncomment the following line to disable bi-weekly auto-update checks. # DISABLE_AUTO_UPDATE="true" # Uncomment the following line to change how often to auto-update (in days). # export UPDATE_ZSH_DAYS=13 # Uncomment the following line to disable colors in ls. # DISABLE_LS_COLORS="true" # Uncomment the following line to disable auto-setting terminal title. # DISABLE_AUTO_TITLE="true" # Uncomment the following line to enable command auto-correction. # ENABLE_CORRECTION="true" # Uncomment the following line to display red dots whilst waiting for completion. # COMPLETION_WAITING_DOTS="true" # Uncomment the following line if you want to disable marking untracked files # under VCS as dirty. This makes repository status check for large repositories # much, much faster. # DISABLE_UNTRACKED_FILES_DIRTY="true" # Uncomment the following line if you want to change the command execution time # stamp shown in the history command output. # You can set one of the optional three formats: # "mm/dd/yyyy"|"dd.mm.yyyy"|"yyyy-mm-dd" # or set a custom format using the strftime function format specifications, # see 'man strftime' for details. # HIST_STAMPS="mm/dd/yyyy" # Would you like to use another custom folder than $ZSH/custom? # ZSH_CUSTOM=/path/to/new-custom-folder # Which plugins would you like to load? # Standard plugins can be found in ~/.oh-my-zsh/plugins/* # Custom plugins may be added to ~/.oh-my-zsh/custom/plugins/ # Example format: plugins=(rails git textmate ruby lighthouse) # Add wisely, as too many plugins slow down shell startup. plugins=(git pip python sudo zsh-autosuggestions) source $ZSH/oh-my-zsh.sh # User configuration # export MANPATH="/usr/local/man:$MANPATH" # You may need to manually set your language environment # export LANG=en_US.UTF-8 # Preferred editor for local and remote sessions # if [[ -n $SSH_CONNECTION ]]; then # export EDITOR='vim' # else # export EDITOR='mvim' # fi # Compilation flags # export ARCHFLAGS="-arch x86_64" # ssh # export SSH_KEY_PATH="~/.ssh/rsa_id" # Set personal aliases, overriding those provided by oh-my-zsh libs, # plugins, and themes. Aliases can be placed here, though oh-my-zsh # users are encouraged to define aliases within the ZSH_CUSTOM folder. # For a full list of active aliases, run `alias`. # # Example aliases # alias zshconfig="mate ~/.zshrc" # alias ohmyzsh="mate ~/.oh-my-zsh" # powerline-daemon -q # . /usr/lib/python3.7/site-packages/powerline/bindings/zsh/powerline.zsh # POWERLEVEL9K_LEFT_PROMPT_ELEMENTS=(os_icon root_indicator context dir vcs) # POWERLEVEL9K_RIGHT_PROMPT_ELEMENTS=() SPACESHIP_PROMPT_ADD_NEWLINE=false SPACESHIP_PROMPT_SEPARATE_LINE=true SPACESHIP_CHAR_SYMBOL="❯" SPACESHIP_CHAR_SUFFIX=" " SPACESHIP_TIME_SHOW=true SPACESHIP_TIME_COLOR="#ffaf00" SPACESHIP_DIR_PREFIX="" SPACESHIP_DIR_COLOR="#5fafff" . /usr/share/LS_COLORS/dircolors.sh get_crtime() { for target in "${@}"; do inode=$(stat -c '%i' "${target}") fs=$(df --output=source "${target}" | tail -1) crtime=$(sudo debugfs -R 'stat <'"${inode}"'>' "${fs}" 2>/dev/null | grep -oP 'crtime.*--\s*\K.*') printf "%s\t%s\n" "${target}" "${crtime}" done } zstyle ':completion:*' list-colors "${(@s.:.)LS_COLORS}" #autoload -Uz compinit #compinit ### Added by Zplugin's installer #source '/home/sage/.zplugin/bin/zplugin.zsh' #autoload -Uz _zplugin #(( ${+_comps} )) && _comps[zplugin]=_zplugin ### End of Zplugin's installer chunk # # ~/.bashrc # # If not running interactively, don't do anything # [[ $- != *i* ]] && return # export PATH="$PATH:/home/sage/.pyenv/bin" # eval "$(pyenv init -)" # eval "$(pyenv virtualenv-init -)" export LESSOPEN="| /usr/bin/src-hilite-lesspipe.sh %s" export LESS=' -R ' alias ls='ls --color=auto' alias buka='xdg-open' alias dkbd='lk4b -p unlock -message ""' alias django='python manage.py' alias pvrun='pipenv run' alias pvshell='pipenv shell --fancy' alias codot='code .' alias keep='google-keep-desktop &' # added by travis gem # [ -f /home/sage/.travis/travis.sh ] && source /home/sage/.travis/travis.sh # End of .bashrc
true
0ab9051e0a02beb1539e2461cd0fb4a03cde6b6e
Shell
mer-hybris/droid-hal-configs
/sparse/usr/lib/startup/preinit/get_bootstate
UTF-8
3,355
3.34375
3
[]
no_license
#!/bin/sh # Contact: Pekka Lundstrom <pekka.lundstrom@jollamobile.com> # # Copyright (c) 2014, Jolla Ltd. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of the <organization> nor the # names of its contributors may be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # preinit plugin - get_bootstate # This is part of /sbin/preinit and finds out why Jolla device booted up # It will output "USER", "ACT_DEAD" or "TEST" # If wakeup happened by usb or rtc, then we boot to ACT_DEAD, else USER # jolla.test_mode in cmdline can override usb detection values check_bogus=0 bootreason_str="Normal boot" actdead_bootparam="" if [ -f /var/lib/environment/actdead/bootparam.conf ]; then source /var/lib/environment/actdead/bootparam.conf actdead_bootparam=$ACTDEAD_PARAMETER_STRING fi if grep -q 'jolla.test_mode=USER' /proc/cmdline; then # Device is in QA test mode forcing USER BOOTSTATE="USER" elif grep -q 'jolla.test_mode=ACT_DEAD' /proc/cmdline; then # Device is in QA test mode forcing ACT_DEAD BOOTSTATE="ACT_DEAD" elif grep -q 'androidboot.mode=charger' /proc/cmdline; then BOOTSTATE="ACT_DEAD" bootreason_str="androidboot.mode=charger" check_bogus=1 elif [ ! -z "$actdead_bootparam" ] && grep -q $actdead_bootparam /proc/cmdline; then # Device specific boot parameter BOOTSTATE="ACT_DEAD" bootreason_str=$actdead_bootparam check_bogus=1 else BOOTSTATE="USER" fi # Bootreason can be bogus (pwr down by low voltage peak or battery removal) # We try to detect that and boot to USER in that case # ACTDEAD boot would cause only shutdown if there wasn't real reason if [ "$BOOTSTATE" == "ACT_DEAD" ] && [ $check_bogus -eq 1 ] && [ -f /var/log/systemboot.log ]; then # In uncontrolled shutdown we don't have log entry for shutdown tail -1 /var/log/systemboot.log | grep -q Shutdown if [ $? -ne 0 ]; then BOOTSTATE="USER" echo "x-no-time-stamp Startup: preinit ignored $bootreason_str" >> /var/log/systemboot.log fi fi echo $BOOTSTATE exit 0
true
e4497c6b9af5760f66eb47e33dfa8df501c39b5b
Shell
andriipanchuk/test
/nagiosxi/install-html
UTF-8
4,150
3.25
3
[]
no_license
#!/bin/bash -e . ./xi-sys.cfg ########################################## # COPY OVER NEW XI HTML FILES ########################################## # Save old config.inc file if [ -f "$proddir/html/config.inc.php" ]; then cp -f "$proddir/html/config.inc.php" "$proddir/html/config.inc.saved" fi # Save old loginsplash file if [ -f "$proddir/html/loginsplash.inc.php" ]; then cp -f "$proddir/html/loginsplash.inc.php" "$proddir/html/loginsplash.inc.saved" fi # Copy over XI files echo "Copying over new XI directory..." cp -r ./nagiosxi/basedir/* "$proddir" # Restore original config.inc file, but save a copy of the new one if [ -f "$proddir/html/config.inc.saved" ]; then cp -f "$proddir/html/config.inc.php" "$proddir/html/config.inc.dist" cp -f "$proddir/html/config.inc.saved" "$proddir/html/config.inc.php" fi # Restore original loginsplash file, but save a copy of the new one if [ -f "$proddir/html/loginsplash.inc.saved" ]; then cp -f "$proddir/html/loginsplash.inc.php" "$proddir/html/loginsplash.inc.dist" cp -f "$proddir/html/loginsplash.inc.saved" "$proddir/html/loginsplash.inc.php" fi # Change ownership on directories and files chown -R "$nagiosuser:$nagiosgroup" "$proddir" chown "root:$nagiosgroup" "$proddir" chown "root:$nagiosgroup" $proddir/* chown "root:$nagiosgroup" "$proddir/scripts/components" # Permissions for var should be all nagios permissions chown "$nagiosuser:$nagiosgroup" "$proddir/var" # Change to correct perms find "$proddir" -type d -exec /bin/chmod 755 -- {} + find "$proddir"/var -type d -exec /bin/chmod 775 -- {} + find "$proddir"/html -type f -exec /bin/chmod o-wx+r -- {} + find "$proddir"/scripts -type f -exec /bin/chmod o-wx+r -- {} + find "$proddir"/tools -type f -exec /bin/chmod o-wx+r -- {} + chown -R "$nagiosuser:$nagiosgroup" $proddir/html/includes/components/highcharts/exporting-server chmod 775 $proddir/html/includes/components/highcharts/exporting-server/temp # Tmp directory has additional perms chmod g+s "$proddir/tmp" chmod -R ug+rwx "$proddir/tmp" # Fix perms on zip files in tmp directory if ls "$proddir/tmp/*.zip" 1> /dev/null 2>&1; then eval "$chownbin" "$nagiosuser:$nagiosgroup" $proddir/tmp/*.zip chmod ug+w "$proddir"/tmp/*.zip fi # Set permissions on component etc directory mkdir -p "$proddir/etc/components/bpi" eval "$chownbin" -R "$apacheuser:$nagiosgroup" "$proddir/etc/components" find "$proddir/etc/components/" -type d -exec chmod 6775 {} \; # Set permissions on component var directory eval "$chownbin" -R "$apacheuser:$nagiosgroup" "$proddir/var/components" find "$proddir/var/components/" -type d -exec chmod 6775 {} \; # Make sure all the sudo scripts are root:nagios chown "root:$nagiosgroup" "$proddir/scripts/reset_config_perms.sh" chown "root:$nagiosgroup" "$proddir/scripts/upgrade_to_latest.sh" chown "root:$nagiosgroup" "$proddir/scripts/change_timezone.sh" chown "root:$nagiosgroup" "$proddir/scripts/manage_services.sh" chown "root:$nagiosgroup" "$proddir/scripts/manage_ssl_config.sh" chown "root:$nagiosgroup" "$proddir/scripts/backup_xi.sh" chown "root:$nagiosgroup" "$proddir/scripts/repair_databases.sh" # Make sure all sudo component scripts are root:nagios chown "root:$nagiosgroup" "$proddir/scripts/components/getprofile.sh" chown "root:$nagiosgroup" "$proddir/scripts/components/autodiscover_new.php" chown "root:$nagiosgroup" "$proddir/scripts/migrate/migrate.php" # Set up script migrate jobs directory mkdir -p "$proddir/scripts/migrate/jobs" chown "$nagiosuser:$nagiosgroup" "$proddir/scripts/migrate/jobs" # Fix perms on PNP graph template permissions chown "$nagiosuser:$nagiosgroup" /usr/local/nagios/share/pnp/templates chmod g+ws /usr/local/nagios/share/pnp/templates chown ".$nagiosgroup" /usr/local/nagios/share/pnp/templates/*.php chmod g+w /usr/local/nagios/share/pnp/templates/*.php # Fix perms on SNMP MIBS chown -R "root:$nagiosgroup" "$mibsdir" chmod g+w -R "$mibsdir" chmod g+ws "$mibsdir" # Update deployment jobs section mkdir -p "$proddir/html/config/deployment/jobs" chown "$nagiosuser:$nagiosgroup" "$proddir/html/config/deployment/jobs" chmod 755 "$proddir/html/config/deployment/jobs"
true
01932beead92599bd13694afeae484445047275d
Shell
delkyd/alfheim_linux-PKGBUILDS
/pciutils-git/PKGBUILD
UTF-8
873
2.71875
3
[]
no_license
# Maintainer: Chocobo1 <chocobo1 AT archlinux DOT net> pkgname=pciutils-git pkgver=3.5.5.r0.gdfd15a8 pkgrel=1 pkgdesc="Programs for inspecting and manipulating configuration of PCI devices" arch=('i686' 'x86_64') url="http://mj.ucw.cz/sw/pciutils/" license=('GPL2') depends=('glibc' 'hwids' 'kmod') makedepends=('git') provides=('pciutils') conflicts=('pciutils') source=("git+https://git.kernel.org/pub/scm/utils/pciutils/pciutils.git") sha256sums=('SKIP') pkgver() { cd "pciutils" git describe --long --tags | sed 's/^v//;s/\([^-]*-g\)/r\1/;s/-/./g' } _OPTIONS='SHARED=yes ZLIB=no' build() { cd "pciutils" make OPT="$CFLAGS" "$_OPTIONS" } package() { cd "pciutils" make DESTDIR="$pkgdir" PREFIX="/usr" SBINDIR="/usr/bin" SHAREDIR="/usr/share/hwdata" "$_OPTIONS" install install-lib # supplied by hwids package rm -rf "$pkgdir/usr/share/hwdata" }
true
365c0809c0cdaf07a80d5c35a62eb08c7f7fdb0a
Shell
hertzbinnon/test_integration
/Platforms/Docker/setup_container.sh
UTF-8
3,023
2.6875
3
[]
no_license
#!/bin/bash # for jenkins plugins install update local dns hosts # Note: 1. jenkins need third package which will be download from ftp server # if local host cant resovle ftp server name , wen can detect mannul echo "203.178.132.80 ftp.tsukuba.wide.ad.jp" >> /etc/hosts jenkins_http_port=18080 docker run --detach -u root --name jenkins --restart always -p $jenkins_http_port:8080 -p 50000:50000 -v /home/docker.volume/jenkins:/var/jenkins_home jenkins gitlab_https_port=10443 gitlab_http_port=10080 gitlab_ssh_port=10022 local_host_name="" docker run --detach \ --hostname gitlab.example.com \ --publish $gitlab_https_port:443 --publish $gitlab_http_port:80 --publish $gitlab_ssh_port:22 \ --name gitlab -u root \ --restart always \ --volume /home/docker.volume/gitlab/config:/etc/gitlab \ --volume /home/docker.volume/gitlab/logs:/var/log/gitlab \ --volume /home/docker.volume/gitlab/data:/var/opt/gitlab \ gitlab/gitlab-ce:latest # Android OS compile docker run \ -i -t -p 20022:22 \ --hostname android.com \ --name android-vlc -u root \ --restart always \ --volume /home/android.tutorials:/opt/android \ --volume /home:/opt/android-vlc \ ubuntu:14.04.android.vlc.stable /bin/bash # ATS compile docker run \ -i -t -p 28080:8080 \ --hostname android.com \ --name trafficserver -u root \ --restart always \ --volume /home/trafficserver:/opt/trafficserver \ docker.io/centos /bin/bash # IMS compile docker run \ -i -t -p 25060:5060 \ --hostname android.com \ --name kamailio -u root \ --restart always \ --volume /home/kamailio:/opt/kamailio \ docker.io/centos /bin/bash docker pull ubuntu:16.04 docker exec -it android-vlc /bin/bash docker inspect container_name | grep Mounts -A 20 # 查看挂载目录 docker rm $(docker ps -a -q) nvidia-docker run -i -t --hostname vrsmsz-nvidia-docker --name vrsmsz-nvidia -u root --restart always --gpus all --volume /:/opt/root 5a214d77f5d7 bash nvidia-docker run -i -t --hostname vrsmsz-nvidia-docker --name vrsmsz-nvidia-18.04 -u root --restart always --gpus 'all,"capabilities=all"' --volume /:/opt/root 5a214d77f5d7 bash sudo cp -rf /var/lib/docker /home/hertz/sdb1/Docker/docker-data-root sudo vim /lib/systemd/system/docker.service # #ExecStart=/usr/bin/dockerd -H fd:// --containerd=/run/containerd/containerd.sock --data-root=/home/hertz/sdb1/Docker/docker-data-root/ --exec-root=/home/hertz/sdb1/Docker/docker-exec-root # sudo systemctl daemon-reload sudo systemctl restart docker.service sudo usermod -aG sudo <username> sudo cat /etc/group | grep sudo nvidia-docker exec -u root -it vrsmsz-nvidia-18.04 bash # 1.ould not select device driver "" with capabilities: [[all gpu]] sudo docker run --rm --gpus all nvidia/cuda:11.0-base nvidia-smi # 2. nvidia-docker run -i -t --hostname vrsmsz-nvidia-docker --name test -u root --restart always --gpus all --volume /:/opt/root 58da04891421 bash # newgrp docker sudo gpasswd -a $USER docker
true
d6af4c8c86fbc27b2aa3dcde2165d9858ef81d27
Shell
ROOAARR/leechcraft
/tools/scripts/geniconslist.sh
UTF-8
549
2.921875
3
[ "BSL-1.0" ]
permissive
#! /bin/sh grcmd='grep -E "(ActionIcon|\<GetIcon)"' touch tmp grep ActionIcon * | sed 's/.*\".*\".*\"\(.*\)\".*/\1/' > tmp2 grep "GetIcon (\"" * | sed 's/.*GetIcon (\"\(.*\)\".*)/\1/' >> tmp2 echo "List for Core:" >> tmp sort -u tmp2 >> tmp for i in `ls plugins`; do grep ActionIcon plugins/$i/* | sed 's/.*\".*\".*\"\(.*\)\".*/\1/' > tmp2 grep "GetIcon (\"" plugins/$i/* | sed 's/.*GetIcon (\"\(.*\)\".*)/\1/' >> tmp2 # if [ -n `cat $tmp2` ] # then echo "List for $i:" >> tmp sort -u tmp2 >> tmp # fi done cat tmp rm tmp2 rm tmp
true
16559e788b03d62d70425fd164ddd41e04ef87fa
Shell
jasonjanderson/serverconfigs
/debian_install/bash/bashrc
UTF-8
2,080
3.234375
3
[]
no_license
# ~/.bashrc: executed by bash(1) for non-login shells. # see /usr/share/doc/bash/examples/startup-files (in the package bash-doc) # for examples function bashrc_prompt() { local RESET="\[\e[0m\]" local BLACK="\[\e[30;1m\]" local B_BLACK="\[\e[30;1m\]" local RED="\[\e[31;1m\]" local B_RED="\[\e[31;1m\]" local GREEN="\[\e[32;1m\]" local B_GREEN="\[\e[32;1m\]" local YELLOW="\[\e[33;1m\]" local B_YELLOW="\[\e[33;1m\]" local DARKBLUE="\[\e[34;1m\]" local B_DARKBLUE="\[\e[34;1m\]" local PURPLE="\[\e[35;1m\]" local B_PURPLE="\[\e[35;1m\]" local LIGHTBLUE="\[\e[36;1m\]" local B_LIGHTBLUE="\[\e[36;1m\]" local WHITE="\[\e[37;1m\]" local B_WHITE="\[\e[37;1m\]" export PS1="\n$B_LIGHTBLUE-\w- $RESET$B_GREEN[ \d \@ ]$RESET\n$B_LIGHTBLUE\u@\h$RESET$B_YELLOW>$RESET$B_RED\W$RESET|> " } # If not running interactively, don't do anything [ -z "$PS1" ] && return # Populate the PS1 with the format for the prompt. bashrc_prompt ## Bash History ## # The number of commands bash_history should save. export HISTFILESIZE=3000 # Don't print duplicate lines in the history. export HISTCONTROL=ignoredups # Check the window size after each command and, if necessary, # update the values of LINES and COLUMNS. shopt -s checkwinsize # Add user's '~/bin' directory into $PATH (if exists). if [ -d ~/bin ] ; then PATH=~/bin:"${PATH}" fi ## Alias Definitions ## # Define all aliases in ~/.bash_aliases, instead of adding them to .bashrc directly. if [ -f ~/.bash_aliases ]; then . ~/.bash_aliases fi ## Scripts ## # Define custom scripting in ~/.bash_script, instead of adding them to .bashrc directly. if [ -f ~/.bash_script ]; then . ~/.bash_script fi ## MOTD ## # Define a user's custom Message Of The Day. if [ -f ~/.bash_motd ]; then . ~/.bash_motd fi ## Bash Completion ## # Enable programmable completion of common commands. # Does not need to be enabled if already enabled in '/etc/bash.bashrc', '/etc/profile', or '/etc/bash.bashrc'. if [ -f /etc/bash_completion ]; then . /etc/bash_completion fi
true
56d28f9ee8e926b4047f2297882d46b5b2f3dfe1
Shell
cyaguesa/Rho-seq
/HELPER SCRIPTS/rRNA_reads.sh
UTF-8
496
3.3125
3
[]
no_license
#!/bin/bash # get rRNA reads info set -e if [ $# -eq 0 ]; then echo "Usage : ./rRNA_reads.sh bam_file_1 [bam_file_2, ...]" exit 1 else for i in $* do echo ""; echo " bam file : ${i}"; echo "" samtools idxstats $i | cut -f 3 | tail -n +2 | awk '{s+=$1}END{print s}' echo " done. Next!" done echo "all done !" fi #./scripts/rRNA_reads.sh spike-in/*spike_in_reads_sorted.bam_fwd.bam ~/Desktop/these_Carlo/R_terminal/D-seq/fastq2/mapped/*spike_in_reads_sorted.bam.fwd.bam
true
6bda4a3538d2f7f3b55f15feea38aa6db1ba724f
Shell
VariousForks/docker-retroshare
/start_retro_nogui.sh
UTF-8
999
3.34375
3
[]
no_license
#!/bin/bash # Set these values # Generate the password hash with "RetroShare-nogui -G" SSH_PORT=0000 # Do not use the same prot as retroshare! SSH_USERNAME=ServerUser SSH_PASSWDHASH=PasswordHash # Protect this file it will contain your GPG password! GPG_PASS=GpgPass # For instructions see the retroshare wiki # http://retroshare.sourceforge.net/wiki/index.php/Documentation:retroshare-nogui # Automatically start RetroShare-nogui in a screen session screen -dmS RetroScreen screen -S "RetroScreen" -p 0 -X stuff "/usr/bin/expect -c 'spawn RetroShare-nogui -c /home/retrouser/.retroshare -C -X -S $SSH_PORT -L $SSH_USERNAME -P $SSH_PASSWDHASH&; expect \"(Generated by RetroShare)\"; send \"$GPG_PASS\r\"; interact' $(printf \\r)" # If you want to manually start RetroShare-nogui and type your password, comment out the previous line and run this command in the screen terminal manually. # RetroShare-nogui -c /home/retrouser/.retroshare -C -X -S $SSH_PORT -L $SSH_USERNAME -P $SSH_PASSWDHASH&
true
2637f2ebe8504e9103ccdad3665503e45bfd3da8
Shell
SparksNetwork/schemas
/bin/types2schemas.sh
UTF-8
278
3.3125
3
[]
no_license
#!/usr/bin/env bash find types -type f | while read f do N=$(basename $f .ts) if [[ ! -f "./schemas/models/$N.json" ]] then echo "Converting $f" ./type2schema.js $f | jq "." > "schemas/models/$N.json" else echo "Skipping $f" fi done
true
a669f84b1e94b93b05799d539b1dd1da08ca1958
Shell
gus3000/lightningbot-python-template
/run.sh
UTF-8
335
3.390625
3
[ "MIT" ]
permissive
#!/usr/bin/env bash COMMAND="python3 -m mybot" #COMMAND='echo coucou; sleep 2; echo bisous' N=$1 # number of processes if [ "$N" = "" ]; then N=1 fi for i in $(seq ${N}) do eval "$COMMAND" & pids[${i}]=$! # we store the process id done for pid in ${pids[*]}; do wait $pid # we wait for every process to finish done
true
3e8bd21da8eb434ff4b3c28d58ee607cfb8fd8a9
Shell
almumill/online-psl-commands
/run.sh
UTF-8
798
4.03125
4
[]
no_license
#!/usr/bin/env bash # Run all the experiments. PSL_ONLINE_DATASETS='movielens' function main() { trap exit SIGINT # dataset paths to pass to scripts psl_dataset_paths='' for dataset in $PSL_ONLINE_DATASETS; do psl_dataset_paths="${psl_dataset_paths}psl-datasets/${dataset} " done # PSL Experiments # Fetch the data and models if they are not already present and make some # modifications to the run scripts and models. ./scripts/psl_scripts/setup_psl_datasets.sh echo "Running psl performance experiments on datasets: [${PSL_ONLINE_DATASETS}]." pushd . > /dev/null cd "./scripts" || exit # shellcheck disable=SC2086 ./run_online_performance_experiments.sh "psl" ${psl_dataset_paths} popd > /dev/null } main "$@"
true
380739bde61c0c61a462f7b1c20afadab97ef263
Shell
KoapT/my_tf_train
/Image/SampleExample/create_tf_record_auto.sh
UTF-8
769
2.953125
3
[]
no_license
#!/bin/bash # create samples record automatically # user modify======================================================= #TRAIN_RATIO=0.99 #the ratio(e.g. 0.99 is 99%) of training samples occupy all samples, rest is evaluation samples # user modify end=================================================== source sample_cfg.properties CURRENT_DIR=$(pwd) WORK_DIR="${CURRENT_DIR}" TRAIN_DIR="${WORK_DIR}/train_dir" echo "Start to create samples record automatically" echo "Samples to train in all samples occupy: ${TRAIN_RATIO}" echo "------------------------" echo "clear train directory: ${TRAIN_DIR}" rm -rf ${TRAIN_DIR}/* echo " " echo " " python writeFileName_to_setTxt.py --r ${TRAIN_RATIO} echo " " echo " " sleep 1s python create_tf_record.py
true
e59324c5097d932b4d9d73b2f953f1646739180a
Shell
drbartz/puppet
/modules/oculus/files/init_oculus
UTF-8
3,335
3.671875
4
[ "Apache-2.0" ]
permissive
#!/bin/sh # # thin-server init file for starting up the oculus webserver daemon # # chkconfig: - 20 80 # Source function library. . /etc/rc.d/init.d/functions RUBY_VERSION=ruby-1.9.2-p330 PATH=/usr/local/rvm/gems/${RUBY_VERSION}/bin:/usr/local/rvm/gems/${RUBY_VERSION}@global/bin:/usr/local/rvm/rubies/${RUBY_VERSION}/bin:/usr/local/rvm/bin:/usr/bin:/sbin:/bin:/usr/sbin GEM_HOME=/usr/local/rvm/gems/${RUBY_VERSION} IRBRC=/usr/local/rvm/rubies/${RUBY_VERSION}/.irbrc MY_RUBY_HOME=/usr/local/rvm/rubies/${RUBY_VERSION} GEM_PATH=/usr/local/rvm/gems/${RUBY_VERSION}:/usr/local/rvm/gems/${RUBY_VERSION}@global export PATH GEM_HOME IRBRC MY_RUBY_HOME GEM_PATH RUBY_VERSION pidfile="/var/run/oculus/resque.pid" lockfile="/var/lock/subsys/thin" logfile="/var/log/oculus/resque_stdout" exec="/usr/local/rvm/gems/${RUBY_VERSION}/bin/rake" param=" resque:start_workers" name="oculus" wait_start() { rh_status_q retval=$? [ $retval -ne 0 ] && sleep 1 } start() { [ -x $exec ] || exit 5 cd /opt/oculus echo -n $"Starting $name: " daemon "$exec ${param} >> $logfile" wait_start;wait_start;wait_start;wait_start rh_status_q retval=$? if [ $retval -eq 0 ] then touch $lockfile success echo fi return $retval } try_stop() { rh_status_q retval=$? if [ $retval -ne 1 ] then list=`cat $pidfile | sed 's/\,/ /g '` kill $list 2> /dev/nul sleep 1 fi } stop() { echo -n $"Stopping $name: " try_stop;try_stop;try_stop;try_stop;try_stop;try_stop;try_stop;try_stop rh_status_q retval=$? if [ $retval -eq 1 ] then success echo [ -f $lockfile ] && rm -f $lockfile [ -f $pidfile ] && rm -f $pidfile fi return $retval } restart() { stop start } reload() { stop start } rh_status() { #status -p $pidfile `basename $exec` #status -p $pidfile if [ -f $pidfile ] then list=`cat $pidfile | sed 's/\,/ /g'` total_pid=0 pid_ok=0 for p in $list do total_pid=$(($total_pid + 1)) grep -vq ruby /proc/$p/cmdline 2> /dev/null && pid_ok=$(($pid_ok + 1)) done # check if there is any pid running if [ 0 -eq $pid_ok ] then echo "$name is stopped" return 1 else # check if process count is equal to the pid running if [ $total_pid -eq $pid_ok ] then echo "$name is running" return 0 else echo "ERR: process running [$pid_ok] less then totall [$total_pid] process" return 4 fi fi else echo "$name is stopped" return 1 fi } rh_status_q() { rh_status >/dev/null 2>&1 } case "$1" in start) rh_status_q && exit 0 $1 ;; stop) rh_status_q retval=$? [ 1 -eq $retval ] && exit 0 $1 ;; restart) $1 ;; reload) $1 ;; status) rh_status ;; condrestart|try-restart) rh_status_q || exit 0 restart ;; *) echo $"Usage: $0 {start|stop|status|restart|condrestart|try-restart}" exit 2 esac exit $?
true
5894c7b782e9611ac0e605132de4a4e928d96343
Shell
Cloudxtreme/dotfiles-26
/udev/scripts/canoscan_scanscript.sh
UTF-8
787
3.484375
3
[ "Unlicense" ]
permissive
#!/bin/bash scanimagebin="/usr/bin/scanimage" scanner="genesys:libusb:"$(ls -la /dev/canoscan | cut -f 2 -d '>' | cut -f 3-4 -d '/' | sed "s/\//\:/") mkdir -p /tmp/scans if [ ! -e scans ] then ln -s /tmp/scans scans fi while [ 1 ] do num=$(find ./scans/ -size +0k -iname "scan*.png" | wc -l) echo -n "Scanning scan$num.png..." errors=1 while [ $errors -gt 0 ] do $scanimagebin -d $scanner --mode Color --resolution 300 -p > scans/scan$num.png 2>scanlog.txt errors=$(cat scanlog.txt | wc -l) if [ $errors -gt 0 ] then rm scanlog.txt echo -n "." else rm scanlog.txt echo echo "Press return to scan again, Ctrl-C to quit." read fi done done
true
816e81afa57c8a6f0f4e0aef87de2d6e0c0e8f78
Shell
gitGNU/gnu_rtty
/agelog.sh
UTF-8
5,945
3.734375
4
[ "ISC" ]
permissive
#! /bin/sh # Copyright (c) 1996 by Internet Software Consortium. # # Permission to use, copy, modify, and distribute this software for any # purpose with or without fee is hereby granted, provided that the above # copyright notice and this permission notice appear in all copies. # # THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SOFTWARE CONSORTIUM DISCLAIMS # ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES # OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL INTERNET SOFTWARE # CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL # DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR # PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS # ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS # SOFTWARE. # (this is just a local copy to make sure it's available; master source for # this is elsewhere) # # $Id: agelog.sh,v 1.4 2005-04-25 15:47:42 vixie Exp $ # # agelog -- age log files, by Dave Lennert as told to Bob Desinger and # James Brister # # Usage: $0 [-m] [-p Bpid] [-s Bsig] logFile \ # [-p Apid] [-s Asig] howMany [stashDir] # # # The most recent <howMany> logs are kept around by renaming logFile to # logFile.0, after similarly rolling logFile.0 => logFile.1 => logFile.2 => ... # If <stashDir> is named, the old logs will be kept in that directory. If not # given, the old logs are left in the same directory as the original logFile. # # Example: # `agelog /usr/adm/sulog 2' will, if run weekly, keep 3 files around: # /usr/adm/sulog containing this week's log info # /usr/adm/sulog.0 containing last week's log info # /usr/adm/sulog.1 containing the week before last's log info # # A typical crontab entry: # # Keep the most recent 2 weeks worth of uucp logs around in # # /tmp/Oldlogs/*, one per day, so that old LOGFILEs will be in # # /tmp/Oldlogs/LOGFILE.{0,1,2,...} # 00 1 * * * /usr/local/agelog /usr/spool/uucp/LOGFILE 14 /tmp/Oldlogs # # # Modification Tue Oct 9 16:48:56 1990 James Brister # # This now accepts some options: # -m if given before the log file name then mv will be used instead # of cp to move the file. # -p pid if given before the log file name then a signal will be # sent to the specified pid before the moves are done. # if given after the log file name then the signal is sent # after the moves. The default signal is HUP # -s sig if given before the log file name then the signal sent # before the move is changed from HUP to sig. If specified # after the log file name then the signal sent after the move # is changed from HUP to sig. # -h just displays the usage and exits. # # examples: # agelog -p 9999 somelog 3 # this will send a HUP signal to pid 9999 then save 3 # versions of the log files (using cp for the final move). # # agelog -m -p 8888 somelog -p 8888 4 # this will send a HUP signal to pid 8888 before saving the # 4 versions of the log files and then after. It will use mv # for the final move (not cp). # # agelog -p 7777 -s ALRM somelog -p 7777 2 # this will send a ALRM signal to pid 7777, then it will save # the log files, then it will send a HUP to pid 7777 # # NOTE: the changing of the BEFORE signal doesn't affect the AFTER # signal. Likewise with the pid's # # # set -vx # Initialize: PATH=/usr/ucb:/usr/bin:/bin:/etc:/usr/lib # BSD systems have /usr/ucb export PATH # traps: 0=exit 1=hangup 2=interrupt 3=quit 15=terminate trap 'echo 1>&2 "$0: Ow!"; exit 15' 1 2 3 15 MOVE=cp # default is to COPY log file, not MOVE it. ASIGNAL=HUP # signal to send AFTER the move/copy APID=0 # pid to send signal to AFTER the move/copy BSIGNAL=HUP # signal to send BEFORE the move/copy. BPID=0 # pid to send signal to BEFORE the move/copy USAGE="Usage: `basename $0` [-m] [-p Bpid] [-s Bsig] logFile [-p Apid] [-s Asig] howMany [stashDir]" # # Digest arguments: # # get the BEFORE arguments while [ `expr -- "$1" : '-.*'` -gt 0 ] do case "$1" in -h) echo $USAGE exit 0 ;; -m) MOVE=mv ;; -p) BPID=$2 shift ;; -s) BSIGNAL=$2 shift ;; -*) echo 1>&2 $USAGE exit 2 ;; esac shift done # now get the log file name if [ 0 -eq $# ] then echo $USAGE exit 2 else log="$1" # logFileName shift fi # now get the AFTER arguments while [ `expr -- "$1" : '-.*'` -gt 0 ] do case "$1" in -p) APID=$2 shift ;; -s) ASIGNAL=$2 shift ;; -*) echo 1>&2 $USAGE exit 2 ;; esac shift done # now get the numer of copies to save and the stash directory if [ 0 -eq $# ] then echo 1>&2 $USAGE exit 2 else max="$1" # howMany shift fi if [ 0 -eq $# ] then # no directory to stash them in; use log's directory head=`expr $log : '\(.*/\).*'` # /a/b/x => /a/b/ else # user specified a directory if [ ! -d "$1" ] then echo 1>&2 "$0: $1 is not a directory" exit 2 else head="$1/" fi fi # # Send signal if required BEFORE move # if [ 0 -ne $BPID ] then kill -$BSIGNAL $BPID fi # # Rename log.$max-1 => ... => log.3 => log.2 => log.1 # arch="${head}`basename $log`" # name of archive files, sans {.0, .1, ...} older=`expr $max - 1` # ensure we had a number in $2 if [ $? -eq 2 ] then # not a number, or some problem echo 1>&2 "$0: cannot decrement $max" exit 2 fi while [ $older -gt 0 ] do # age the logfiles in the stashdir old=`expr $older - 1` if [ -f $arch.$old ] then mv $arch.$old $arch.$older fi older=`expr $older - 1` done # # Old logfiles are all rolled over; now move the current log to log.0 # # Use cp instead of mv to retain owner & permissions for the original file, # and to avoid prematurely aging any info going into the file right now, # unless the user has given the -m option if [ -f $log ] then # don't create an old log if $2 was 0 test $max -gt 0 && $MOVE $log $arch.0 cp /dev/null $log # clear out log fi # # Now send signals if required # if [ 0 -ne $APID ] then kill -$ASIGNAL $APID fi exit 0
true
a59c9b752e57e165e05aa057529b2403ff57f301
Shell
pisan342/memory-leaks-r-us
/runmemcheck.sh
UTF-8
362
3.234375
3
[]
no_license
#!/bin/bash # remove executable echo "Removing previous executable: rm -f ./a.out" rm -f ./a.out # compile echo "Compiling: clang++ -g -std=c++11 *.cpp" clang++ -g -std=c++11 *.cpp if [ ! -f "a.out" ] then echo "*** Failed to create executable ***" exit fi # executing under valgrind echo "Executing with valgrind: valgrind ./a.out" valgrind ./a.out
true
744c51c2a1d07fcdf3ccc14371128b38ce3d73dc
Shell
brownman/docker-selenium
/novnc/bin/start-novnc.sh
UTF-8
452
2.671875
3
[ "Apache-2.0" ]
permissive
#!/usr/bin/env bash # Exit immediately if a command exits with a non-zero status set -e # Wait for this process dependencies timeout --foreground ${WAIT_TIMEOUT} wait-xvfb.sh timeout --foreground ${WAIT_TIMEOUT} wait-xmanager.sh timeout --foreground ${WAIT_TIMEOUT} wait-vnc.sh # Usage at https://github.com/kanaka/noVNC/blob/master/utils/launch.sh ${NORMAL_USER_HOME}/noVNC/utils/launch.sh \ --listen ${NOVNC_PORT}\ --vnc localhost:${VNC_PORT}
true
63d63554e56c28c19ab289292893e68f1828fa83
Shell
millerh1/conda-testing
/conda-recipe/build.sh
UTF-8
326
2.96875
3
[]
no_license
#!/bin/bash BINARY_HOME=$PREFIX/bin RSEQ_HOME=$PREFIX/share/$PKG_NAME-$PKG_VERSION-$PKG_BUILDNUM # Copy source to the conda environment mkdir -p $RSEQ_HOME cp -R $SRC_DIR/* $RSEQ_HOME/ # Create symbolic links for RSeq launch script mkdir -p $BINARY_HOME chmod +x $RSEQ_HOME/bin/RSeq ln -s $RSEQ_HOME/bin/RSeq $BINARY_HOME/
true
534f8976391e1156bb7d4a2fdf74a2f61cffdab3
Shell
ccoVeille/dotfiles_old
/source/81_git.sh
UTF-8
350
3.171875
3
[ "MIT" ]
permissive
#!/bin/sh if [ ! -x "/usr/bin/git" ] ; then return fi alias g='git' source /usr/share/bash-completion/completions/git complete -o default -o nospace -F _git g _git_mkdir () { __gitcomp_nl "$(__git_refs)" } fgrep NAME ~/.gitconfig && echo "Please fix NAME in ~/.gitconfig" fgrep EMAIL ~/.gitconfig && echo "Please fix EMAIL in ~/.gitconfig"
true
191ab4aec754d2feca8d3a63b49013dbb24f47c4
Shell
zaleilynn/co
/script/start_all.sh
UTF-8
253
2.96875
3
[]
no_license
#!/bin/bash if [ $UID -ne 0 ] then echo "Must be root to run this script." exit -1 fi if [ -z $CELLO_HOME ] then echo "CELLO_HOME is not set." exit -1 fi $CELLO_HOME/bin/scheduler & $CELLO_HOME/bin/collector & $CELLO_HOME/bin/cellet &
true
50044927e8d72ffe68e237f247cbf3fe290753b5
Shell
csjones/swift
/utils/base-deps-docker/build.sh
UTF-8
528
3.640625
4
[ "Apache-2.0" ]
permissive
#!/bin/bash # Builds base deps docker images for various CUDA versions, and uploads them to # the Google Container Registry. set -exuo pipefail do_build() { export S4TF_CUDA_VERSION="$1" export S4TF_CUDNN_VERSION="$2" IMAGE_NAME="gcr.io/swift-tensorflow/base-deps-cuda${S4TF_CUDA_VERSION}-cudnn${S4TF_CUDNN_VERSION}-ubuntu18.04" sudo -E docker build \ -t "$IMAGE_NAME" \ --build-arg S4TF_CUDA_VERSION \ --build-arg S4TF_CUDNN_VERSION \ . docker push "$IMAGE_NAME" } do_build 10.1 7 do_build 10.2 7
true
85f371dd189357030e57d9de2c8e5851272caeaa
Shell
gjskha/bsbs
/bsbs
UTF-8
7,554
4.28125
4
[]
no_license
#!/bin/bash # bsbs - a bayesian spam filter written in the bash shell. #################################################################### # first, basic housekeeping stuff program=$(basename $0); pidfile=/tmp/$program.lockfile; corpora=$(pwd)/corpora # make sure we are not still training when we score: the shell can be slow. if [ -f $pidfile ]; then echo "Error: $program is currently running under process $(< $pidfile ). Exiting." exit 1 else echo $$ > $pidfile fi # cleanup: # exit nicely. trap 'cleanup 1' SIGINT SIGTERM function cleanup() { rm $pidfile rm -f /tmp/$program.$$* exit $1 } # usage_statement: # how to use this. function usage_statement() { echo "$program is a bayesian spam filter written in the bash shell." echo echo "Usage is one of:" echo " $program -h" echo " $program -t" echo " $program -p [spool] [dispensation]" echo " $program [-q] -s [document]" echo "Where:" echo " -h prints this message and exits" echo " -p takes a spool of messages to create a specified corpus under $corpora" echo " -t trains $program on $corpora" echo " -s scores the specified document" echo " -q tells $program to be quiet, for use within mutt for example" cleanup 0 } # corpora_exists: # check that there is usable data. function corpora_exists() { if [ ! -d $corpora ]; then echo "Error: corpora directory $corpora doesn't exist or is not accessible: Exiting." cleanup 1 fi } #################################################################### # Rest of our subroutines go here # populate_corpus: # take a mail spool, divide it into individual messages and add it to the specified corpus function populate_corpus() { spool=$1 disp=$2 #echo spool is $spool and dispensation is $disp last=$( ls -1 $corpora/$disp/[0-9]* 2>/dev/null | wc -l ) awk -v "last=$last" -v "corpus=$corpora/$disp" 'BEGIN {RS = "\n\nFrom "} {print "From",$0 > corpus"/"last+NR} \ END {print "put "NR" messages into "corpus}' $spool } # stats_file_rename: # give us the date of the last scoring function stats_file_rename { label=$1 grep "@@@@" $corpora/$label/stats.$label | awk '{print $3}' } # train_corpus: # counts the tokens and the number of times it occurs for a given corpus (spam or ham) function train_corpus() { label=$1 if [ -f $corpora/$label/stats.$label ]; then #mv $corpora/$label/stats.$label $corpora/$label/stats.$label.bak mv $corpora/$label/stats.$label $corpora/$label/stats.$label.$( stats_file_rename $label ) fi if [ -d $corpora/$label ]; then echo "training corpus: $label" file_count=0 for file in $( ls $corpora/$label | grep -v stats ); do file_count=$(( $file_count + 1 )) sed 's/\s/\n/g' $corpora/$label/$file >> /tmp/$program.$$.$label done echo "@@@@ $file_count $(date +%y-%m-%d-%H%M%S)" > $corpora/$label/stats.$label sort /tmp/$program.$$.$label | uniq -c >> $corpora/$label/stats.$label else echo "Error: $corpora/$label was not found or is unavailable. Exiting." cleanup 1 fi } # add_document_to_corpus: # takes a message and places it in either spam or ham, named sequentially. function add_document_to_corpus() { document=$1 corpus=$2 old_fc=$( ls -1 $corpora/$corpus/[0-9]* 2>/dev/null | wc -l ) new_fc=$(( $old_fc + 1 )) cp $document $corpora/$corpus/$new_fc if [[ $? != 0 ]]; then echo "Error: could not copy $document to $corpora/$corpus/$new_fc. Exiting." cleanup 1 fi } # score_document: # implements the bayesian formula function score_document() { docmt=$1 spam_file_count=$( grep "@@@@" $corpora/spam/stats.spam | awk '{print $2}' ) ham_file_count=$( grep "@@@@" $corpora/ham/stats.ham | awk '{print $2}' ) while read line; do token=${line##* } num_token_spam=$(grep $token $corpora/spam/stats.spam | awk '{print $1}' | head -1 ) # spam vs. spammy num_token_ham=$(grep $token $corpora/ham/stats.ham | awk '{print $1}' | head -1 ) if [[ $num_token_spam == "" ]]; then # a very small value, to avoid calculation errors bw=$( echo 0.00001 / $spam_file_count | bc -l ) else bw=$( echo $num_token_spam / $spam_file_count | bc -l ) fi if [[ $num_token_ham == "" ]]; then # a very small value, to avoid calculation errors gw=$( echo 0.00001 / $spam_file_count | bc -l ) else gw=$( echo $num_token_ham / $ham_file_count | bc -l ) fi pw=$( echo scale\=2\; $bw / \( $bw + $gw \) | bc -l ) all_token_probabilities+=( $pw ) done < <(sed 's/\s/\n/g' $docmt | sort | uniq -c | sort -rn | head -5) # 5 most interesting tokens only # create our equation for computing the overall score for ix in ${!all_token_probabilities[*]}; do if [[ $ix -gt 0 ]]; then # there is a subtle bug here, if you have files in the pwd that are named like .[0-9]* numerator=$( echo ${numerator}*${all_token_probabilities[$ix]} ); denomina1=$( echo ${denomina1}*${all_token_probabilities[$ix]} ); denomina2=$( echo ${denomina2}*\( 1 - ${all_token_probabilities[$ix]}\) ); else numerator=$( echo ${all_token_probabilities[$ix]} ); denomina1=$( echo ${all_token_probabilities[$ix]} ); denomina2=$( echo \(1 - ${all_token_probabilities[$ix]}\) ); fi done # get it. overall_score=$( echo scale\=2\; $numerator / \( $denomina1 + $denomina2 \) | bc -l ) echo -n "overall score is $overall_score. "; # if it is more than 60% spammy, add it to the spam corpus, otherwise, add to ham. if [ $( echo "${overall_score}>.6" | bc ) -eq 1 ]; then echo "Spammy"; add_document_to_corpus $docmt spam else echo "Hammy"; add_document_to_corpus $docmt ham fi } ###################################################################### # Get our command line variables and begin the execution of the script while getopts htp:s: opt; do case $opt in h) usage_statement ;; q) be_quiet=true ;; t) topt=1 ;; s) sopt=1; docmt2score=$OPTARG ;; p) popt=1; spool_info=( $@ ) ;; \?) usage_statement ;; esac; done # one thing at a time if [[ $topt && $sopt || $topt && $popt || $sopt && $popt ]]; then echo "Error: you specified more than one option. Please choose one only. Exiting." cleanup 1 fi # make sure the script has something to do if [[ -z $topt && -z $sopt && -z $popt ]]; then echo "Error: Nothing to do! (try the -h flag). Exiting." cleanup 1 fi # finally, decide what to do based on what options are passed. if [ $sopt ]; then if [ ! -f $docmt2score ]; then echo "Error: cannot access the document \"$docmt2score\". Exiting." cleanup 1 else score_document $docmt2score fi fi if [ $topt ]; then train_corpus ham train_corpus spam fi if [ $popt ]; then spool=${spool_info[1]} dispensation=${spool_info[2]} if [ ! -f $spool ]; then echo "Error: cannot access mail spool \"$spool\". Exiting." cleanup 1 else populate_corpus $spool $dispensation fi fi cleanup 0
true
d25010ca29f48599c4ff512deb8d9bbf65984e0b
Shell
joejag/ubuntu-1404-vagrant
/bootstrap.sh
UTF-8
819
2.96875
3
[]
no_license
# vagrant init box-cutter/ubuntu1404-desktop; vagrant up --provider virtualbox echo "* Install Base tools" sudo apt-get install vim git curl -y echo " * Setup installers for Ruby, Node, Chrome" # ruby sudo apt-add-repository ppa:brightbox/ruby-ng -y # node curl -sL https://deb.nodesource.com/setup_4.x | sudo bash - # chrome curl -sL https://dl-ssl.google.com/linux/linux_signing_key.pub | sudo apt-key add - sudo sh -c 'echo "deb http://dl.google.com/linux/chrome/deb/ stable main" >> /etc/apt/sources.list.d/google.list' # process these new repos sudo apt-get update echo "* Install Ruby 2.2, Node 4.x, Bundler" sudo apt-get install ruby2.2 ruby2.2-dev zlib1g-dev nodejs -y sudo gem install bundler echo "* Install Chrome" sudo apt-get install google-chrome-stable -y
true
da85f7f15da66f4fae973ed20d122e2c318d3307
Shell
MaGlush/CORS
/addDomain.sh
UTF-8
1,828
3.71875
4
[]
no_license
#!/bin/bash echo echo "*******************************************" echo \* Скрипт по добавлению виртуального хоста \* echo "*******************************************" echo read -n 1 -p "Apache установлен? [y/n]: " AMSURE if [ "$AMSURE" = "n" ] then echo Установка Apache sudo apt-get update sudo apt-get install apache2 fi echo echo Пример: domain.com read -p "Введите host: " HOST echo HOST_NAME: $HOST echo Создание структуры директорий sudo mkdir -p /var/www/$HOST/public_html echo Назначение прав sudo chown -R $USER:$USER /var/www/$HOST/public_html sudo chmod -R 755 /var/www echo Создание index.html echo "<html> <head> <title>Welcome to $HOST!</title> </head> <body> <h1>Success! $HOST virtual host is working!</h1> </body> </html>" > /var/www/$HOST/public_html/index.html echo Создание файла конфигурации хоста echo "<VirtualHost *:80> ServerAdmin admin@$HOST ServerName $HOST ServerAlias www.$HOST DocumentRoot /var/www/$HOST/public_html ErrorLog \${APACHE_LOG_DIR}/error.log CustomLog \${APACHE_LOG_DIR}/access.log combined </VirtualHost>" > $HOST.conf sudo mv $HOST.conf /etc/apache2/sites-available/$HOST.conf echo Включение виртуального хоста sudo service apache2 restart sudo a2ensite $HOST.conf echo Перезагрузка apache sudo systemctl restart apache2 echo Настройка hosts ifconfig echo Добавьте строку your_ip $HOST и сохраните echo read -p "Нажми ENTER, чтобы продолжить" CHECK sudo nano /etc/hosts echo read -p "Нажми ENTER, для проверки" CHECK xdg-open http://www.$HOST
true
c0c19ccc77c75d79b35163b90e5f10e53833c13d
Shell
stjordanis/atmi
/bin/cloc_wrapper.sh
UTF-8
13,569
3.359375
3
[ "MIT" ]
permissive
#!/bin/bash #MIT License # #Copyright © 2016 Advanced Micro Devices, Inc. # #Permission is hereby granted, free of charge, to any person obtaining a copy of #this software and associated documentation files (the "Software"), to deal in #the Software #without restriction, including without limitation the rights to use, copy, #modify, merge, publish, distribute, sublicense, and/or sell copies of the #Software, and to permit #persons to whom the Software is furnished to do so, subject to the following #conditions: # #The above copyright notice and this permission notice shall be included in all #copies or substantial portions of the Software. # #THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #FITNESS FOR A PARTICULAR #PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS #BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF #CONTRACT, TORT OR #OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE #OR OTHER DEALINGS IN THE SOFTWARE. # # Written by Ashwin Aji Ashwin.Aji@amd.com # PROGVERSION=0.3.0 function usage(){ /bin/cat 2>&1 <<"EOF" Converts a CL file to the BRIG char array that can be later loaded in HSA modules. Usage: cloc_wrapper.sh [ options ] filename.cl Options without values: -version Display version of this tool and exit -v Verbose messages -vv Get additional verbose messages from cloc.sh -n Dryrun, do nothing, show commands that would execute -h Print this help message -k Keep temporary files -hof Use HSA Offline Finalizer to create machine ISA Options with values: -opt <LLVM opt> Default=2, passed to cloc.sh to build HSAIL -t <tempdir> Default=/tmp/atmi_$$, Temp dir for files -s <symbolname> Default=filename -p <path> $ATMI_PATH or <sdir> if ATMI_PATH not set <sdir> is actual directory of cloc_wrapper.sh -cp <path> $HSA_LLVM_PATH or /usr/bin -rp <HSA RT path> Default=$HSA_RUNTIME_PATH or /opt/hsa -o <outfilename> Default=<file>_brig.h or <file>_hof.h -hsaillib <hsail file> Add HSAIL library to kernel code -clopts <compiler opts> Default="-cl-std=CL2.0" Examples: cloc_wrapper.sh my.cl /* create my_brig.h */ cloc_wrapper.sh -hof my.cl /* create my_hof.h (finalized) */ cloc_wrapper.sh -hsaillib mylib.hsail my.cl /* creates my_brig.h, a composition * * of my.cl & mylib.hsail */ cloc_wrapper.sh -hof -hsaillib mylib.hsail my.cl /* creates my_hof.h, a composition * * of my.cl & mylib.hsail (finalized)*/ cloc_wrapper.sh -t /tmp/foo my.cl /* will automatically set -k */ You may set environment variables ATMI_PATH,HSA_LLVM_PATH, or HSA_RUNTIME_PATH, instead of providing options -p, -cp, -rp. Command line options will take precedence over environment variables. Copyright (c) 2015 ADVANCED MICRO DEVICES, INC. EOF exit 1 } DEADRC=12 # Utility Functions function do_err(){ if [ ! $KEEPTDIR ] ; then rm -rf $TMPDIR fi exit $1 } function version(){ echo $PROGVERSION exit 0 } function getdname(){ local __DIRN=`dirname "$1"` if [ "$__DIRN" = "." ] ; then __DIRN=$PWD; else if [ ${__DIRN:0:1} != "/" ] ; then if [ ${__DIRN:0:2} == ".." ] ; then __DIRN=`dirname $PWD`/${__DIRN:3} else if [ ${__DIRN:0:1} = "." ] ; then __DIRN=$PWD/${__DIRN:2} else __DIRN=$PWD/$__DIRN fi fi fi fi echo $__DIRN } # -------- The main code starts here ----- # Argument processing while [ $# -gt 0 ] ; do case "$1" in -k) KEEPTDIR=true;; --keep) KEEPTDIR=true;; -n) DRYRUN=true;; -hof) HOF=true;; -opt) LLVMOPT=$2; shift ;; -s) SYMBOLNAME=$2; shift ;; -o) OUTFILE=$2; shift ;; -t) TMPDIR=$2; shift ;; -hsaillib) HSAILLIB=$2; shift ;; -clopts) CLOPTS=$2; shift ;; -p) ATMI_PATH=$2; shift ;; -cp) HSA_LLVM_PATH=$2; shift ;; -rp) HSA_RUNTIME_PATH=$2; shift ;; -h) usage ;; -help) usage ;; --help) usage ;; -version) version ;; --version) version ;; -v) VERBOSE=true;; -vv) CLOCVERBOSE=true;; --) shift ; break;; -*) usage ;; *) break;echo $1 ignored; esac shift done # The above while loop is exited when last string with a "-" is processed LASTARG=$1 shift # Allow output specifier after the cl file if [ "$1" == "-o" ]; then OUTFILE=$2; shift ; shift; fi if [ ! -z $1 ]; then echo " " echo "WARNING: This script can only process one .cl file at a time." echo " You can call the script multiple times to get multiple outputs." echo " Argument $LASTARG will be processed. " echo " These args are ignored: $@" echo " " fi sdir=$(getdname $0) [ ! -L "$sdir/cloc_wrapper.sh" ] || sdir=$(getdname `readlink "$sdir/cloc_wrapper.sh"`) ATMI_PATH=${ATMI_PATH:-$sdir/..} HSA_LLVM_PATH=${HSA_LLVM_PATH:-/usr/bin} HSA_HLC_BIN_PATH=/opt/rocm/hlc3.2/bin # Set Default values LLVMOPT=${LLVMOPT:-2} HSA_RUNTIME_PATH=${HSA_RUNTIME_PATH:-/opt/hsa} CMD_BRI=${CMD_BRI:-HSAILasm } RUNDATE=`date` filetype=${LASTARG##*\.} if [ "$filetype" != "cl" ] ; then if [ "$filetype" == "hsail" ] ; then HSAIL_OPT_STEP2=true else echo "ERROR: $0 requires one argument with file type cl or hsail " exit $DEADRC fi fi if [ ! -e "$LASTARG" ] ; then echo "ERROR: The file $LASTARG does not exist." exit $DEADRC fi if [ ! -d $HSA_LLVM_PATH ] ; then echo "ERROR: Missing directory $HSA_LLVM_PATH " echo " Set env variable HSA_LLVM_PATH or use -p option" exit $DEADRC fi [ -z $HSAILLIB ] && HSAILLIB=$ATMI_PATH/bin/builtins-hsail.hsail if [ "$HSAILLIB" != "" ] ; then if [ ! -f $HSAILLIB ] ; then echo "ERROR: The HSAIL file $HSAILLIB does not exist." exit $DEADRC fi fi # Parse LASTARG for directory, filename, and symbolname INDIR=$(getdname $LASTARG) CLOPTS=${CLOPTS:-cl-std=CL2.0 -I$INDIR -I$ATMI_PATH/include} CLNAME=${LASTARG##*/} # FNAME has the .cl extension removed, used for symbolname and intermediate filenames FNAME=`echo "$CLNAME" | cut -d'.' -f1` SYMBOLNAME=${SYMBOLNAME:-$FNAME} BRIGOFILE="${SYMBOLNAME}.brig" HOFOFILE="${SYMBOLNAME}.hof" BRIGHFILE="${SYMBOLNAME}_brig.h" HOFHFILE="${SYMBOLNAME}_hof.h" OTHERCLOCFLAGS="-opt $LLVMOPT" if [ -z $OUTFILE ] ; then # Output file not specified so use input directory OUTDIR=$INDIR # Make up the output file name based on last step if [ $HOF ]; then OUTFILE=$HOFOFILE else OUTFILE=$BRIGOFILE fi else # Use the specified OUTFILE. Bad idea for snack OUTDIR=$(getdname $OUTFILE) OUTFILE=${OUTFILE##*/} fi if [ $CLOCVERBOSE ] ; then VERBOSE=true fi if [ $HSAIL_OPT_STEP2 ] ; then [ $VERBOSE ] && echo " " && echo "#WARN: ***** Step 2 of manual HSAIL optimization process detected. ***** " fi TMPDIR=${TMPDIR:-/tmp/atmi_$$} if [ -d $TMPDIR ] ; then KEEPTDIR=true else if [ $DRYRUN ] ; then echo "mkdir -p $TMPDIR" else mkdir -p $TMPDIR fi fi # Be sure not to delete the output directory if [ $TMPDIR == $OUTDIR ] ; then KEEPTDIR=true fi if [ ! -d $TMPDIR ] && [ ! $DRYRUN ] ; then echo "ERROR: Directory $TMPDIR does not exist or could not be created" exit $DEADRC fi if [ ! -e $HSA_HLC_BIN_PATH/$CMD_BRI ] ; then echo "ERROR: Missing HSAILasm in $HSA_LLVM_PATH" echo " Set env variable HSA_LLVM_PATH or use -p option" exit $DEADRC fi if [ ! -d $OUTDIR ] && [ ! $DRYRUN ] ; then echo "ERROR: The output directory $OUTDIR does not exist" exit $DEADRC fi FULLOUTFILE=$OUTDIR/$OUTFILE [ $VERBOSE ] && echo "#Info: Version: cloc_wrapper.sh $PROGVERSION" [ $VERBOSE ] && echo "#Info: Input Files: " [ $VERBOSE ] && echo "# File: $INDIR/$CLNAME" [ $VERBOSE ] && echo "#Info: Output Files:" if [ $HOF ] ; then [ $VERBOSE ] && echo "# HOF incl: $FULLOUTFILE" else [ $VERBOSE ] && echo "# BRIG incl: $FULLOUTFILE" fi [ $VERBOSE ] && echo "#Info: Run date: $RUNDATE" [ $VERBOSE ] && echo "#Info: LLVM path: $HSA_LLVM_PATH" [ $MAKEOBJ ] && [ $VERBOSE ] && echo "#Info: Runtime: $HSA_RUNTIME_PATH" [ $MAKEOBJ ] && [ $VERBOSE ] && echo "#Info: ATMI Runtime: $ATMI_PATH" [ $KEEPTDIR ] && [ $VERBOSE ] && echo "#Info: Temp dir: $TMPDIR" rc=0 if [ $HSAIL_OPT_STEP2 ] ; then # This is step 2 of manual HSAIL BRIGDIR=$TMPDIR BRIGNAME=$FNAME.brig [ $VERBOSE ] && echo "#Step: hsail --> brig ..." if [ $DRYRUN ] ; then echo "$HSA_HLC_BIN_PATH/$CMD_BRI -o $BRIGDIR/$BRIGNAME $INDIR/$FNAME.hsail" else echo "$HSA_HLC_BIN_PATH/$CMD_BRI -o $BRIGDIR/$BRIGNAME $INDIR/$FNAME.hsail" $HSA_HLC_BIN_PATH/$CMD_BRI -o $BRIGDIR/$BRIGNAME $INDIR/$FNAME.hsail rc=$? if [ $rc != 0 ] ; then echo "ERROR: The following command failed with return code $rc." echo " $HSA_HLC_BIN_PATH/$CMD_BRI -o $BRIGDIR/$BRIGNAME $INDIR/$FNAME.hsail" do_err $rc fi fi else # Not step 2, do normal steps [ $VERBOSE ] && echo "Step: Copy: $INDIR/$CLNAME --> $TMPDIR/updated.cl" cp $INDIR/$CLNAME $TMPDIR/updated.cl # Call cloc to generate brig if [ $CLOCVERBOSE ] ; then OTHERCLOCFLAGS="$OTHERCLOCFLAGS -v" fi [ $VERBOSE ] && echo "#Step: cloc.sh cl --> brig ..." if [ $DRYRUN ] ; then echo "$HSA_LLVM_PATH/cloc.sh -brig -t $TMPDIR -k -clopts ""-I$INDIR -I$ATMI_PATH/include"" $OTHERCLOCFLAGS $TMPDIR/updated.cl" else [ $CLOCVERBOSE ] && echo " " && echo "#------ Start cloc.sh output ------" [ $CLOCVERBOSE ] && echo "$HSA_LLVM_PATH/cloc.sh -brig -t $TMPDIR -k -clopts "-I$INDIR -I$ATMI_PATH/include" $OTHERCLOCFLAGS $TMPDIR/updated.cl" $HSA_LLVM_PATH/cloc.sh -brig -t $TMPDIR -k -clopts "-I$INDIR -I$ATMI_PATH/include" $OTHERCLOCFLAGS $TMPDIR/updated.cl rc=$? [ $CLOCVERBOSE ] && echo "#------ End cloc.sh output ------" && echo " " if [ $rc != 0 ] ; then echo "ERROR: cloc.sh failed with return code $rc. Command was:" echo " $HSA_LLVM_PATH/cloc.sh -brig -t $TMPDIR -k -clopts "-I$INDIR -I$ATMI_PATH/include" $OTHERCLOCFLAGS $TMPDIR/updated.cl" do_err $rc fi fi BRIGDIR=$TMPDIR BRIGNAME=updated.brig fi if [ "$HSAILLIB" != "" ] ; then # disassemble brig $BRIGDIR/$BRIGNAME to composite.hsail [ $VERBOSE ] && echo "#Step: Add HSAIL brig --> hsail+hsaillib --> $BRIGHFILE ..." if [ $DRYRUN ] ; then echo $HSA_HLC_BIN_PATH/$CMD_BRI -disassemble -o $TMPDIR/composite.hsail $BRIGDIR/$BRIGNAME else $HSA_HLC_BIN_PATH/$CMD_BRI -disassemble -o $TMPDIR/composite.hsail $BRIGDIR/$BRIGNAME fi # Inject ATMI_CONTEXT sed -i -e "5i\ alloc(agent) global_u64 &ATMI_CONTEXT = 0;\n\ " $TMPDIR/composite.hsail entry_lines=($(grep -n "@__OpenCL_" $TMPDIR/composite.hsail | grep -Eo '^[^:]+')) num_kernels=${#entry_lines[@]} offset=2; for ((i=0; i<${num_kernels}; i++)) do entry_line=$((${entry_lines[$i]} + $offset)) offset=$(($offset + 4)) sed -i -e "${entry_line}i\ //init ATMI_CONTEXT\n\ ld_kernarg_align(8)_width(all)_u64 \$d0, [%__printf_buffer];\n\ st_global_align(8)_u64 \$d0, [&ATMI_CONTEXT];\n\ " $TMPDIR/composite.hsail done # Add $HSAILLIB to file if [ $DRYRUN ] ; then echo cat $HSAILLIB >> $TMPDIR/composite.hsail else cat $HSAILLIB >> $TMPDIR/composite.hsail fi # assemble complete hsail file to brig $BRIGDIR/$BRIGNAME if [ $DRYRUN ] ; then echo $HSA_HLC_BIN_PATH/$CMD_BRI -o $BRIGDIR/$BRIGNAME $TMPDIR/composite.hsail rc=0 else $HSA_HLC_BIN_PATH/$CMD_BRI -o $BRIGDIR/$BRIGNAME $TMPDIR/composite.hsail rc=$? fi if [ $rc != 0 ] ; then echo "ERROR: HSAIL assembly of HSAILLIB failed with return code $rc. Command was:" echo " $HSA_HLC_BIN_PATH/$CMD_BRI -o $BRIGDIR/$BRIGNAME $TMPDIR/composite.hsail" do_err $rc fi fi # HSA Offline Finalizer if [ $HOF ] ; then [ $VERBOSE ] && echo "#Step: offline finalization of brig --> $OUTFILE ..." if [ $DRYRUN ] ; then echo $ATMI_PATH/bin/hof -o $BRIGDIR/$FNAME.o -b $BRIGDIR/$BRIGNAME else echo $ATMI_PATH/bin/hof -o $BRIGDIR/$FNAME.o -b $BRIGDIR/$BRIGNAME $ATMI_PATH/bin/hof -o $BRIGDIR/$FNAME.o -b $BRIGDIR/$BRIGNAME #LD_LIBRARY_PATH=$HSA_RUNTIME_PATH/lib:$LD_LIBRARY_PATH hof -o $BRIGDIR/$FNAME.o -b $BRIGDIR/$BRIGNAME rc=$? if [ $rc != 0 ] ; then echo "ERROR: The hof command failed with return code $rc." exit $rc fi cp $BRIGDIR/$FNAME.o $FULLOUTFILE fi else cp $BRIGDIR/$BRIGNAME $FULLOUTFILE fi # cleanup if [ ! $KEEPTDIR ] ; then if [ $DRYRUN ] ; then echo "rm -rf $TMPDIR" else rm -rf $TMPDIR fi fi [ $VERBOSE ] && echo "#Info: Done" exit 0
true
38a4809c51c74bccad451559f9e381ff6bd95b76
Shell
KrasimirZahariev/dotfiles
/.local/bin/show-logs
UTF-8
484
3.234375
3
[]
no_license
#!/bin/dash # vim:filetype=sh # 0 - emerg # 1 = alert # 2 = crit # 3 = err # 4 = warning # 5 = notice # 6 = info # 7 = debug _LOG_HEADER="\n------------------------------------- LOG %s -------------------------------------\n" _level="$1" [ -z "$_level" ] && _level=3 for _boot in $(seq -4 0); do # shellcheck disable=SC2059 printf "$_LOG_HEADER" "$_boot" journalctl -p "$_level" -x -b "$_boot" \ | cut -d ' ' -f 1-2,5- \ | sort \ | uniq -u \ | ccze -A done
true
d0024af51f3af2f2d054602b7bd7c11a7d429a17
Shell
lrinQVQ/script
/other/china_only
UTF-8
7,302
3.65625
4
[]
no_license
#!/bin/sh # Get user id export USERID=$(id -u) # Check ROOT Permissions if [ "$USERID" != 0 ]; then echo "请以root用户运行" exit 1 fi # Check linux distribution # Debian base if [ -f /etc/debian_version ]; then export OS=debian export FIREWALL=iptables systemctl enable cron fi # Redhat base if [ -f /etc/redhat-release ]; then REDHAT_VERSION=$(cat /etc/redhat-release | grep -oE "[0-9.]+") case "$REDHAT_VERSION" in 7.0.1406 | 7.1.1503 | 7.2.1511 | 7.3.1611 | 7.4.1708 | 7.5.1804 | 7.6-1810 | 7.7-1908 | 8.0) export OS=redhat7 export FIREWALL=firewall-cmd systemctl enable cron ;; 6.0 | 6.1 | 6.2 | 6.3 | 6.4 | 6.5 | 6.6 | 6.7 | 6.8 | 6.9) export OS=redhat6 export FIREWALL=iptables service cron enable ;; esac fi # Other if [ "$OS" = "" ]; then read -p "请手动输入您的发行版所使用的防火墙,目前仅支持iptables和firewall-cmd(iptables/firewall-cmd): " FIREWALL fi choose() { echo "1.安装" echo "2.卸载" echo "3.退出" read -p "请输入数字: " NUM if [ "$NUM" -ne 1 ] && [ "$NUM" -ne 2 ] && [ "$NUM" -ne 3 ]; then choose fi } choose if [ "$NUM" = 1 ]; then if [ "$FIREWALL" = "firewall-cmd" ]; then # Check firewall-cmd if command -v firewall-cmd >/dev/null 2>&1; then FIREWALL_RUNTIME=$(firewall-cmd --state) if [ "$FIREWALL_RUNTIME" = "running" ]; then export FIREWALLCMD=yes else read -p "Firewall-cmd已被关闭是否启用?(yes/no): " FIREWALLCMD_START fi if [ "$FIREWALLCMD_START" = "yes" ]; then systemctl start firewalld systemctl enable firewalld elif [ "$FIREWALLCMD" != "yes" ]; then read -p "如不开启Firewall-cmd,是否配置人畜无害的iptables?(yes/no): " IPTABLES_INSTALL #read -p "是否配置人畜无害的iptables?(yes/no): " IPTABLES_INSTALL fi else if command -v iptables >/dev/null 2>&1; then export IPTABLES_INSTALL=yes else echo "请安装iptables" fi fi if [ "$OS" = "redhat7" ] && [ "$IPTABLES_INSTALL" = "yes" ]; then yum install iptables-services -y systemctl start iptables systemctl enable iptables export IPTABLES=yes IPTABLES_WHICH=$(which iptables) else export FIREWALLCMD=yes FIREWALLCMD_WHICH=$(which firewall-cmd) #echo "请先在Telegram/Github上联系lrinQVQ" #exit 0 fi elif [ "$FIREWALL" = "iptables" ]; then # Check iptables if ! command -v iptables >/dev/null 2>&1; then echo "请安装iptables" exit 1 else export IPTABLES=yes IPTABLES_WHICH=$(which iptables) fi if [ "$OS" = "redhat6" ]; then if [ -f /etc/init.d/iptables ]; then /etc/init.d/iptables start else yum install iptables-services -y /etc/init.d/iptables start fi export IPTABLES=yes IPTABLES_WHICH=$(which iptables) fi fi read -p "Telegram代理使用的端口: " PORT # Download echo "下载中国大陆IP数据" wget -O- 'http://ftp.apnic.net/apnic/stats/apnic/delegated-apnic-latest' | awk -F\| '/CN\|ipv4/ { printf("%s/%d\n", $4, 32-log($5)/log(2)) }' >/tmp/cnip.txt # Check download if [ ! -f /tmp/cnip.txt ]; then echo "下载失败,请检查网络" fi if [ "$FIREWALLCMD" = "yes" ]; then echo "<?xml version=\"1.0\" encoding=\"utf-8\"?>" > /usr/lib/firewalld/services/cnonly.xml echo "<service>" >> /usr/lib/firewalld/services/cnonly.xml echo " <short>Proxy CNONLY</short>" >> /usr/lib/firewalld/services/cnonly.xml echo " <description>Proxy China Only.</description>" >> /usr/lib/firewalld/services/cnonly.xml echo " <port protocol=\"tcp\" port=\"$PORT\"/>" >> /usr/lib/firewalld/services/cnonly.xml echo "</service>" >> /usr/lib/firewalld/services/cnonly.xml $FIREWALLCMD_WHICH --zone=public --add-service=cnonly --permanent for IP in `cat /tmp/cnip.txt`; do echo "$FIREWALLCMD_WHICH --add-rich-rule 'rule family=\"ipv4\" source address=\"$IP\" service name=\"cnonly\" accept'" >> /tmp/firewall-cmd.sh done sh /tmp/firewall-cmd.sh echo "#!""/bin/sh" > /mnt/china_only echo "$FIREWALLCMD_WHICH --reload" >> /mnt/china_only echo "wget -O- 'http://ftp.apnic.net/apnic/stats/apnic/delegated-apnic-latest' | awk -F\| '/CN\|ipv4/ { printf(\"%s/%d\\n\", \$4, 32-log(\$5)/log(2)) }' > /tmp/cnip.txt" >> /mnt/china_only echo "for IP in \`cat /tmp/cnip.txt\`; do" >>/mnt/china_only echo "$FIREWALLCMD_WHICH --add-rich-rule 'rule family=\"ipv4\" source address=\"\$IP\" service name=\"cnonly\" accept' >> /tmp/firewall-cmd.sh" >>/mnt/china_only echo "done" >>/mnt/china_only echo "sh /tmp/firewall-cmd.sh" >>/mnt/china_only elif [ "$IPTABLES" = "yes" ]; then # iptables start $IPTABLES_WHICH -N CNONLY $IPTABLES_WHICH -I INPUT -p TCP --dport $PORT -j CNONLY $IPTABLES_WHICH -I INPUT -p UDP --dport $PORT -j CNONLY $IPTABLES_WHICH -I CNONLY -p TCP --dport $PORT -j DROP $IPTABLES_WHICH -I CNONLY -p UDP --dport $PORT -j DROP for IP in $(cat /tmp/cnip.txt); do $IPTABLES_WHICH -I CNONLY -s $IP -p TCP --dport $PORT -j ACCEPT $IPTABLES_WHICH -I CNONLY -s $IP -p UDP --dport $PORT -j ACCEPT done echo "#!""/bin/sh" >/mnt/china_only echo "wget -q -O- 'http://ftp.apnic.net/apnic/stats/apnic/delegated-apnic-latest' | awk -F\| '/CN\|ipv4/ { printf(\"%s/%d\\n\", \$4, 32-log(\$5)/log(2)) }' > /tmp/cnip.txt" >>/mnt/china_only echo "$IPTABLES_WHICH -F CNONLY >/dev/null 2>&1" >>/mnt/china_only echo "$IPTABLES_WHICH -N CNONLY" >>/mnt/china_only echo "$IPTABLES_WHICH -I INPUT -p TCP --dport $PORT -j CNONLY" >>/mnt/china_only echo "$IPTABLES_WHICH -I INPUT -p UDP --dport $PORT -j CNONLY" >>/mnt/china_only echo "$IPTABLES_WHICH -I CNONLY -p TCP --dport $PORT -j DROP" >>/mnt/china_only echo "$IPTABLES_WHICH -I CNONLY -p UDP --dport $PORT -j DROP" >>/mnt/china_only echo "for IP in \`cat /tmp/cnip.txt\`; do" >>/mnt/china_only echo "$IPTABLES_WHICH -I CNONLY -s \$IP -p TCP --dport $PORT -j ACCEPT" >>/mnt/china_only echo "$IPTABLES_WHICH -I CNONLY -s \$IP -p UDP --dport $PORT -j ACCEPT" >>/mnt/china_only echo "done" >>/mnt/china_only fi if [ -f /var/spool/cron/root ]; then sed -i '/\/mnt\/china_only/d' /var/spool/cron/root elif [ -f /var/spool/cron/crontabs/root ]; then sed -i '/\/mnt\/china_only/d' /var/spool/cron/crontabs/root fi if [ -d /var/spool/cron/crontabs ]; then echo "@reboot sh /mnt/china_only >/dev/null 2>&1" >>/var/spool/cron/crontabs/root echo "0 0 * * * sh /mnt/china_only >/dev/null 2>&1" >>/var/spool/cron/crontabs/root elif [ -d /var/spool/cron ]; then echo "@reboot sh /mnt/china_only >/dev/null 2>&1" >>/var/spool/cron/root echo "0 0 * * * sh /mnt/china_only >/dev/null 2>&1" >>/var/spool/cron/root fi elif [ "$NUM" = 2 ]; then rm -rf /mnt/china_only if [ -f /var/spool/cron/root ]; then sed -i '/\/mnt\/china_only/d' /var/spool/cron/root elif [ -f /var/spool/cron/crontabs/root ]; then sed -i '/\/mnt\/china_only/d' /var/spool/cron/crontabs/root fi if [ "$FIREWALLCMD" = "yes" ]; then firewall-cmd --reload else iptables -F CNONLY fi elif [ "$NUM" = 3 ]; then exit 0 fi
true
884210f47aa42a4c4361ae6436779724a6d9e80f
Shell
cernvm/ci-scripts
/docker/publish_container.sh
UTF-8
1,760
4.21875
4
[]
permissive
#!/bin/sh # # This script runs build scripts from the ci directory inside a specified docker # container in the ci/docker directory. # set -e SCRIPT_LOCATION=$(cd "$(dirname "$0")"; pwd) . ${SCRIPT_LOCATION}/../jenkins/common.sh if [ $# -lt 2 ]; then echo "Usage: $0 <workspace> <docker image>" echo echo "This script imports and publishes the given image tarball." echo "Image name, tag, and user are derived from the tarball name." echo "E.g., cvmfs-shrinkwrap-2.6.0-1.tar.gz becomes cvmfs/shrinkwrap:2.6.0-1" exit 1 fi WORKSPACE="$1" IMAGE_LOCATION="$2" # check if docker is installed which docker > /dev/null 2>&1 || die "docker is not installed" which git > /dev/null 2>&1 || die "git is not installed" get_docker_library() { local image_name=$1 echo $image_name | cut -d\- -f1 } get_docker_tag() { local image_name=$1 local release=$(echo $image_name | rev | cut -d\- -f1 | rev | cut -d. -f1) local version=$(echo $image_name | rev | cut -d\- -f2 | rev) echo "$version-$release" } get_docker_name() { local image_name=$1 echo $image_name | cut -d\- -f2 } cleanup_images() { docker rmi --force $DOCKER_NAME || true docker rmi --force $DOCKER_NAME_LATEST || true } IMAGE_NAME="$(basename "$IMAGE_LOCATION")" DOCKER_NAME="$(get_docker_library $IMAGE_NAME)/$(get_docker_name $IMAGE_NAME):$(get_docker_tag $IMAGE_NAME)" DOCKER_NAME_LATEST="$(get_docker_library $IMAGE_NAME)/$(get_docker_name $IMAGE_NAME):latest" echo "*** Importing $IMAGE_LOCATION as $DOCKER_NAME" echo trap cleanup_images EXIT HUP INT TERM curl -f --connect-timeout 20 $IMAGE_LOCATION | zcat | docker import - $DOCKER_NAME docker tag $DOCKER_NAME $DOCKER_NAME_LATEST docker images docker push $DOCKER_NAME docker push $DOCKER_NAME_LATEST
true
3d87dea5f3d87079898d667f4eea546109679fd4
Shell
morika-t/capi-ci
/ci/scripts/configure_route53_zone_delegation
UTF-8
1,512
3.8125
4
[ "Apache-2.0" ]
permissive
#!/usr/bin/env bash set -xu function setup_aws_env() { export AWS_ACCESS_KEY_ID=$1 export AWS_SECRET_ACCESS_KEY=$2 } function delegate_zone() { declare zone=$1 delegation_zone_id=$2 nameservers=$3 servers="" json for server in $nameservers; do servers="${servers}{\"Value\":\"${server}\"}," done json=$(cat <<EOF { "Comment": "Update name server records", "Changes": [ { "Action": "UPSERT", "ResourceRecordSet": { "Name":"${zone}", "Type":"NS", "TTL": 60, "ResourceRecords": [ ${servers%,} ] } } ] } EOF) aws route53 change-resource-record-sets \ --hosted-zone-id "${delegation_zone_id}" \ --change-batch "${json}" } function find_name_servers() { declare zone_id=$1 aws route53 get-hosted-zone --id "${zone_id}" | jq -r '.DelegationSet.NameServers[]' } function hosted_zone_id_from_name() { declare zone=$1 aws route53 list-hosted-zones-by-name --dns-name "${zone}" | jq -r '.HostedZones[0].Id' | cut -d'/' -f3 } function main() { set +x setup_aws_env "${ZONE_ACCESS_KEY_ID}" "${ZONE_SECRET_ACCESS_KEY}" set -x zone_id="$(hosted_zone_id_from_name "${ZONE}")" nameservers=$(find_name_servers "${zone_id}") echo $nameservers set +x setup_aws_env "${DELEGATION_ACCESS_KEY_ID}" "${DELEGATION_SECRET_ACCESS_KEY}" set -x delegation_zone_id="$(hosted_zone_id_from_name "${DELEGATION_ZONE}")" delegate_zone "${ZONE}" "${delegation_zone_id}" "${nameservers}" } main
true
2a498fc6ea94031ede3e0513cb16dc9cfa974bea
Shell
santulucky/pi_mwan
/files/usr/sbin/mwan3track
UTF-8
6,888
3.859375
4
[]
no_license
#!/bin/sh #. /lib/functions.sh . /lib/mwan3/common.sh LOG="logger -t $(basename "$0")[$$] -p" INTERFACE="" DEVICE="" IFDOWN_EVENT=0 clean_up() { $LOG notice "Stopping mwan3track for interface \"${INTERFACE}\"" exit 0 } if_down() { $LOG info "Detect ifdown event on interface ${INTERFACE} (${DEVICE})" IFDOWN_EVENT=1 } validate_track_method() { case "$1" in ping) which ping 1>/dev/null 2>&1 || { $LOG warn "Missing ping. Please install iputils-ping package or enable ping util and recompile busybox." return 1 } ;; arping) which arping 1>/dev/null 2>&1 || { $LOG warn "Missing arping. Please install iputils-arping package." return 1 } ;; httping) which httping 1>/dev/null 2>&1 || { $LOG warn "Missing httping. Please install httping package." return 1 } [ -n "$2" -a "$2" != "0.0.0.0" -a "$2" != "::" ] || { $LOG warn "Cannot determine source IP for the interface which is required by httping." return 1 } ;; *) $LOG warn "Unsupported tracking method: $track_method" return 2 ;; esac } main() { local reliability count timeout interval failure_interval local recovery_interval down up size local keep_failure_interval local it data item [ -z "$5" ] && echo "Error: should not be started manually" && exit 0 INTERFACE=$1 DEVICE=$2 STATUS=$3 SRC_IP=$4 mkdir -p /var/run/mwan3track/$1 trap clean_up TERM trap if_down USR1 #config_load mwan3 #config_get track_method $1 track_method ping data=`jq -r . /tmp/mwan3.json` it=0 while true; do item=`echo $data | jq -r .interface[$it]` [ "$item" = "null" ] && break || it=$((it+1)) [ $1 = `echo $item | jq -r .name` ] && { reliability=`echo $item | jq -r .reliability` [ "$reliability" = "null" ] && reliability=1 count=`echo $item | jq -r .count` [ "$count" = "null" ] && count=1 timeout=`echo $item | jq -r .timeout` [ "$timeout" = "null" ] && timeout=4 interval=`echo $item | jq -r .interval` [ "$interval" = "null" ] && interval=10 down=`echo $item | jq -r .down` [ "$down" = "null" ] && down=5 up=`echo $item | jq -r .up` [ "$up" = "null" ] && up=5 size=`echo $item | jq -r .size` [ "$size" = "null" ] && size=56 failure_interval=`echo $item | jq -r .failure_interval` [ "$failure_interval" = "null" ] && failure_interval=$interval keep_failure_interval=`echo $item | jq -r .keep_failure_interval` [ "$keep_failure_interval" = "null" ] && keep_failure_interval=0 recovery_interval=`echo $item | jq -r .recovery_interval` [ "$recovery_interval" = "null" ] && recovery_interval=$interval track_method=`echo $item | jq -r .track_method` [ "$track_method" = "null" ] && track_method=ping break } done validate_track_method $track_method $SRC_IP || { track_method=ping if validate_track_method $track_method; then $LOG warn "Using ping to track interface $INTERFACE avaliability" else $LOG err "No track method avaliable" exit 1 fi } # config_get reliability $1 reliability 1 # config_get count $1 count 1 # config_get timeout $1 timeout 4 # config_get interval $1 interval 10 # config_get down $1 down 5 # config_get up $1 up 5 # config_get size $1 size 56 # config_get failure_interval $1 failure_interval $interval # config_get_bool keep_failure_interval $1 keep_failure_interval 0 # config_get recovery_interval $1 recovery_interval $interval local score=$(($down+$up)) local track_ips=$(echo $* | cut -d ' ' -f 5-99) local host_up_count=0 local lost=0 local sleep_time=0 local turn=0 if [ "$STATUS" = "unknown" ]; then echo "unknown" > /var/run/mwan3track/$1/STATUS score=0 else echo "online" > /var/run/mwan3track/$1/STATUS env -i ACTION="connected" INTERFACE="$1" DEVICE="$2" /sbin/hotplug-call iface fi while true; do sleep_time=$interval for track_ip in $track_ips; do if [ $host_up_count -lt $reliability ]; then case "$track_method" in ping) ping -I $DEVICE -c $count -W $timeout -s $size -q $track_ip &> /dev/null ;; arping) arping -I $DEVICE -c $count -w $timeout -q $track_ip &> /dev/null ;; httping) httping -y $SRC_IP -c $count -t $timeout -q $track_ip &> /dev/null ;; esac if [ $? -eq 0 ]; then let host_up_count++ echo "up" > /var/run/mwan3track/$1/TRACK_${track_ip} if [ $score -le $up ]; then $LOG info "Check ($track_method) success for target \"$track_ip\" on interface $1 ($2)" fi else let lost++ echo "down" > /var/run/mwan3track/$1/TRACK_${track_ip} if [ $score -gt $up ]; then $LOG info "Check ($track_method) failed for target \"$track_ip\" on interface $1 ($2)" fi fi else echo "skipped" > /var/run/mwan3track/$1/TRACK_${track_ip} fi done if [ $host_up_count -lt $reliability ]; then let score-- if [ $score -lt $up ]; then score=0 [ ${keep_failure_interval} -eq 1 ] && { sleep_time=$failure_interval } else sleep_time=$failure_interval fi if [ $score -eq $up ]; then echo "offline" > /var/run/mwan3track/$1/STATUS $LOG notice "Interface $1 ($2) is offline" env -i ACTION=ifdown INTERFACE=$1 DEVICE=$2 /sbin/hotplug-call iface env -i ACTION="disconnected" INTERFACE="$1" DEVICE="$2" /sbin/hotplug-call iface score=0 fi else if [ $score -lt $(($down+$up)) ] && [ $lost -gt 0 ]; then $LOG info "Lost $(($lost*$count)) ping(s) on interface $1 ($2)" fi let score++ lost=0 if [ $score -gt $up ]; then echo "online" > /var/run/mwan3track/$1/STATUS score=$(($down+$up)) elif [ $score -le $up ]; then sleep_time=$recovery_interval fi if [ $score -eq $up ]; then $LOG notice "Interface $1 ($2) is online" echo "online" > /var/run/mwan3track/$1/STATUS env -i ACTION=ifup INTERFACE=$1 DEVICE=$2 /sbin/hotplug-call iface exit 0 fi fi let turn++ mkdir -p "/var/run/mwan3track/${1}" echo "${lost}" > /var/run/mwan3track/$1/LOST echo "${score}" > /var/run/mwan3track/$1/SCORE echo "${turn}" > /var/run/mwan3track/$1/TURN echo "$(get_uptime)" > /var/run/mwan3track/$1/TIME host_up_count=0 sleep "${sleep_time}" & wait if [ "${IFDOWN_EVENT}" -eq 1 ]; then score=0 echo "offline" > /var/run/mwan3track/$1/STATUS IFDOWN_EVENT=0 fi done } main "$@"
true
acb90a9cf19914721a8371e2a337d225a3211053
Shell
stroxler/config
/_shtools/basics.sh
UTF-8
1,208
2.875
3
[]
no_license
# set up aliases to edit this file and the local env.sh alias vle='vim ~/.local/env.sh' alias vss='vim ~/shared-source.sh' # git aliases alias ga='git add' alias gs='git status -s' alias gd='git diff' alias gdc='git diff --cached' alias gl='git log --oneline --decorate -n 15' alias gre='git rebase' alias grom='git rebase origin/master' alias gco='git commit' alias gcoa='git commit --amend' alias gch='git checkout' alias gcb='git checkout -b' alias gpu='git push' alias gbr='git branch' alias gre='git rebase' alias gfe='git fetch' alias hgl='hg log --style compact --limit 10' # curl with json headers alias jcurl='curl -H "Content-Type: application/json"' # ls aliases alias ll='ls -lhG' alias ls='ls -G' alias la='ls -lah' # cd functions and aliases function c() { set -e target=$1 cd $target cd $(dirname $(fzf)) } # cd to ghar/config and .zpresto (my config repos) alias c='cd' alias cem='cd ~/.emacs' alias cgc='cd ~/ghar/config' alias cb='cd ~/Dropbox' alias c.='cd ..' alias c..='cd ../..' alias c...='cd ../../..' alias ck='cd /kode' # tree aliases alias ptree='tree -I "__pycache__|*.pyc|*.egg-info|target"' # pruned tree ckk () { cd /kode cd $( find . -type d | fzf ) }
true
b98cb613c47c9103ae67be3598a18bf2408ee834
Shell
tas-2014-11/debugging
/capsd/20090317/02.diff-new-list-against-old-lists
UTF-8
404
2.796875
3
[]
no_license
#!/bin/bash exec 1> $0.txt newest=01.locate-spamming-nodes-in-node-table.txt normalize_newest() { awk '{print $2}' $newest | sort -n } normalize_newest > $0.new.txt old1=../20090312/04.diff-old-spammers-and-new-spammers.old.txt old2=../20090312/04.diff-old-spammers-and-new-spammers.new.txt echo wc $old1 $old2 $0.new.txt echo echo diff $old1 $0.new.txt echo echo diff $old2 $0.new.txt echo
true
a2cf67568a1d1be700cb83e98588cb31f82b5ce0
Shell
kmalloc/snippet
/tool/diff_elf.sh
UTF-8
960
3.65625
4
[]
no_license
dir1=${1?"please specify old object directory"} dir2=${2?"please specify new object directory"} out=${3?"please specify output directory"} if [ ! -e $out ]; then mkdir -p $out fi pushd `pwd` cd $dir1 for f in *.a; do ar -x $f done old_count=$(ls -l *.o|wc -l) popd pushd `pwd` cd $dir2 for f in *.a; do ar -x $f done new_count=$(ls -l *.o|wc -l) popd if [ $old_count != $new_count ]; then echo "object files not the same:$old_count, $new_count" fi for f in $dir1/*.o; do n=$(basename "$f") n=${n%%.*} objdump -d $f > "$out/$n.old.d" done for f in $dir2/*.o; do n=$(basename "$f") n=${n%%.*} objdump -d $f > "$out/$n.new.d" done cd $out list=$(ls -l *.d|awk '{print $8}'|cut -d . -f 1|sort|uniq) rm -f ./res.dat for f in $list; do if [ -f $f.old.d ] && [ -f $f.new.d ]; then echo "\n" >> ./res.dat diff $f.old.d $f.new.d >> ./res.dat else echo "single file $f" fi done
true
d7c1ddb5afc1c7fd02fce3417026c0aeed88de25
Shell
Nagill/m3u8-to-mp4
/install
UTF-8
262
2.84375
3
[ "MIT" ]
permissive
#!/usr/bin/env bash if [ ! -f install ];then echo 'install must be run within its container folder' 1>&2 exit 1 fi currentDir=`pwd` oldGoPtah="$GOPATH" export GOPATH="$currentDir" gofmt -w src go install m3u8tomp4 export GOPATH="$oldGoPtah" echo 'finished'
true
b071ea56b8538bc196f4a84cf0436b4b00d50f55
Shell
willcro/edu.umn.cs.melt.exts.ableC.concur
/examples/compile.sh
UTF-8
569
3.671875
4
[]
no_license
#!/bin/bash # This script shows the steps in compiling an extended C program (.xc) # down to C via an extended instance of ableC and then using GCC to # compile the generated C code to an executable. # Of course, if the use of 'cut' below fails for you, then just run # the commands individually by hand. java -jar ../artifact/ableC.jar $1 # extract the base filename, everything before the dot (.) filename=$1 extension="${filename##*.}" filename_withoutpath=$(basename $filename) basefilename="${filename_withoutpath%.*}" cfile="${basefilename}.c" gcc ${cfile}
true
19fd185539dbbb77569017567a5369b43e31ac2c
Shell
ilabsea/ask
/pre-commit.sh
UTF-8
827
3.421875
3
[]
no_license
#!/bin/sh NC='\033[0m' GREEN='\033[0;32m' echo "----==== Running Mix tests ====----" MIX_TESTS="$(docker-compose run --rm app mix test)" if [ $? -eq 0 ]; then echo "${GREEN}OK${NC}"; else echo "${MIX_TESTS}" fi echo "----==== Running JS tests ====----" JS_TESTS="$(docker-compose run --rm webpack yarn test)" if [ $? -eq 0 ]; then echo "${GREEN}OK${NC}"; else echo "${JS_TESTS}" fi echo "----==== Running Flow tests ====----" if hash flow 2>/dev/null; then FLOW_TESTS="$(flow check)" else FLOW_TESTS="$(./node_modules/.bin/flow check)" fi if [ $? -eq 0 ]; then echo "${GREEN}OK${NC}"; else echo "${FLOW_TESTS}" fi echo "----==== Running Eslint tests ====----" ESLINT_TESTS="$(docker-compose run --rm webpack yarn eslint)" if [ $? -eq 0 ]; then echo "${GREEN}OK${NC}"; else echo "${ESLINT_TESTS}" fi
true
afa0561992c91d5bd26c69a171381b3fc9200490
Shell
renlord/dotfiles
/HOME-STOW/scripts/.local/bin/doscan
UTF-8
3,901
3.828125
4
[]
no_license
#!/usr/bin/env bash usage() { cat << EOF doscan -? prints this help -n [filename] sets file name to argument (default: SCAN-\$date.pdf) -2 duplex, 2-sided scan (default: simplex, 1-sided scan) -c toggle colour scanning (default: greyscale) -h sets scan height for scan document (default: A4 297mm) -r sets scan resolution for scan document (default: 300) -t document scan type [supports: doc (MERGE as single PDF)/photo (standalone JPEGs)] (default: doc) -D debug, dry run EOF } while getopts "12cmDn:r:t:h:w:" arg; do case $arg in 1) mode="simplex" ;; 2) mode="duplex" ;; c) color="Color" ;; t) scantype="${OPTARG}" case $scantype in photo) echo "scanning doctype ${scantype}" ;; doc) echo "scanning doctype ${scantype}" ;; *) echo "UNSUPPORTED filetype, BAILING" usage ;; esac ;; h) height="${OPTARG}" ;; n) # set filename filename="${OPTARG}" echo "saving file to ${filename}" ;; r) res=${OPTARG} ;; D) dryrun=1 ;; ? | *) # display help usage exit 0 ;; esac done date="$(date +%Y%m%d-%H%M%S)" : "${mode:=simplex}" : "${filename:=SCAN_$date.pdf}" : "${color:=Gray}" : "${height:=297}" : "${res:=300}" : "${scantype:=doc}" : "${dryrun:=0}" cat << EOF configuration: mode: $mode filename: $filename color: $color height: $height resolution: $res scantype: $scantype dryrun: $dryrun EOF merge() { COUNT="$(ls ./*.pnm | wc -l)" local _i=1 echo "converting $COUNT pnm files to pdf" while [ "$COUNT" -ne 0 ]; do _fs=$(ls -v ./*.pnm | head -n 10) echo $_fs convert -density $res $_fs $_i.pdf rm $(ls -v ./*.pnm | head -n 10) _i=$((_i+1)) COUNT="$(ls ./*.pnm | wc -l)" done echo "merging pdf to single file" [ -z "$NAME" ] && NAME="SCAN" _scanoutput="$NAME-$DATE-$scantype.pdf" pdftk $(ls -v ./*.pdf) output "$_scanoutput" if [ -n "${filename}" ]; then mv $_scanoutput ${filename} mv ${filename} ../ else mv "$_scanoutput" ../ fi cd .. rm -rf "${SCAN_DIR}" } scanf() { SCAN_DIR="SCAN-$(date +%Y%m%d_%H%M%S)" DATE="$(date +%Y%m%d_%H%M%S)" mkdir "${SCAN_DIR}" cd "${SCAN_DIR}" || exit 1 echo "Scanning to DIR: $(pwd)" [ -z $mode ] && exit_cmd [ -z $height ] && height=297 [ -z $color ] && color="Gray" [ -z $res ] && res=300 [ -z $scantype ] && scantype="doc" CMD="scanimage -d fujitsu --page-height $height --mode $color --resolution $res" case $scantype in doc) CMD="$CMD --format pnm --batch='%d.pnm' --ald=yes" ;; photo) batchstart=$(ls | sort -h | tail -n1 | cut -d\. -f1) [ -z $batchstart ] && echo "cant get batchstart" && exit 1 CMD="$CMD --format jpeg --batch='%d.jpg' --batch-start=$batchstart --ald=yes --swcrop=yes" ;; esac case $mode in simplex) CMD="$CMD" ;; duplex) CMD="$CMD --source 'ADF Duplex'" ;; esac [ "$dryrun" -eq 1 ] && echo "DRYRUN CMD: $CMD" && exit 0 sh -c "$CMD" echo "scan completed" } scanf if [ $scantype = "doc" ]; then merge fi unset scanf unset merge
true
f8bc950b75b10eeb93c62c86e577d584df94c079
Shell
nicotralab/chen-et-al-sex-determination
/filterbyGATKBP.sh
UTF-8
2,265
3.46875
3
[]
no_license
#!/bin/bash # this assumes you have the following tools on your system # picard # gatk (we used version 3.8.1) # bcftools # vcftools # samtools # CREATE FASTA INDEX printf "*\n*\n*\nNow creating FASTA index with samtools faidx\n" samtools faidx genomeAssembly.fa \ -o genomeAssembly.fa.fai # CREATE FASTA SEQUENCE DICTIONARY FILE printf "*\n*\n*\nNow creating FASTA sequence dictionary using Picard\n" java -Xmx64g \ -jar picard.jar CreateSequenceDictionary \ R=genomeAssembly.fa \ O=genomeAssembly.dict # EXTRACT SNPS and INDELS INTO SEPARATE FILES # printf "*\n*\n*\nNow extracting SNPS\n" java -Xmx64g \ -jar GenomeAnalysisTK.jar \ -T SelectVariants \ -R genomeAssembly.fa \ -V pythonfiltered.vcf \ -selectType SNP \ -o raw_snps.vcf printf "*\n*\n*\nNow extracting indels\n" java -Xmx64g \ -jar GenomeAnalysisTK.jar \ -T SelectVariants \ -R genomeAssembly.fa \ -V pythonfiltered.vcf \ -selectType INDEL \ -o raw_indels.vcf # HARD FILTERING SNPS and INDELS ACCORDING TO GATK BEST PRACTICES # printf "*\n*\n*\nNow HARD filtering SNPs\n" java -Xmx64g \ -jar GenomeAnalysisTK.jar \ -T VariantFiltration \ -R genomeAssembly.fa \ -V raw_snps.vcf \ --filterExpression "QD < 2.0 || FS > 60.0 || MQ < 40.0 || MQRankSum < -12.5 || ReadPosRankSum < -8.0" \ --filterName "GATKBP-snp" \ -o snps.GATKBP-flagged.vcf printf "*\n*\n*\nNow HARD filtering indels\n" java -Xmx64g \ -jar GenomeAnalysisTK.jar \ -T VariantFiltration \ -R genomeAssembly.fa \ -V raw_indels.vcf \ --filterExpression "QD < 2.0 || FS > 200.0 || ReadPosRankSum < -20.0" \ --filterName "GATKBP-indel" \ -o indels.GATKBP-flagged.vcf # EXTRACT VARIANTS THAT PASSED printf "*\n*\n*\nNow extracting variants that passed the filters\n" bcftools view -f "PASS" snps.GATKBP-flagged.vcf > snps.GATKBP-passed.vcf rm snps.GATKBP-flagged.vcf bcftools view -f "PASS" indels.GATKBP-flagged.vcf > indels.GATKBP-passed.vcf rm indels.GATKBP-flagged.vcf # CONCATENATE THE TWO FILES, THEN SORT THEM vcf-concat snps.GATKBP-passed.vcf indels.GATKBP-passed.vcf > GATKBP-passed.unsorted.vcf vcf-sort GATKBP-passed.unsorted.vcf > GATKBP-passed.vcf # CLEAN UP rm GATKBP-passed.unsorted.vcf
true
0367386c1abfb231277040717a1fbe11b17c07f2
Shell
openembedded/meta-openembedded
/meta-oe/recipes-extended/zram/zram/zram-swap-deinit
UTF-8
417
3.609375
4
[ "MIT" ]
permissive
#!/bin/sh set -e device=$1 if [ "$device" = "" ]; then echo "Usage: zram-swap-deinit <device>" exit 1 fi sysblockdev=/sys/block/$(basename $device) if [ ! -d $sysblockdev ]; then echo "Block device not found in sysfs" exit 1 fi # zramctl -r is not suitable as it also removes the actual device. Recreating # it is non-trivial, especially if not /dev/zram0 is used... echo 1 > ${sysblockdev}/reset
true
dd0d188d69871bcc2bffc8848046d9a017b071ce
Shell
ReeceHughes/dotfiles
/.bash/.bash_aliases
UTF-8
601
2.828125
3
[]
no_license
# Bash Aliases # Reece Hughes # File loaded by .bashrc on startup # ls alias ll='ls -alF --color=auto' alias la='ls -lah --color=auto' alias l='ls -CF --color=auto' alias cla='clear && ls -lah --color=auto' # cd alias ..='cd .. && la' # misc alias op='xdg-open' alias liveLog='less +F -R -S' alias local-portscan='nmap -F 192.168.1.1/24' alias grep='grep --color=auto' alias fgrep='fgrep --color=auto' alias egrep='egrep --color=auto' # Programming alias python='python3' # Git alias gits='git status' alias cgits='clear && git status' alias gitp='git pull' branchDiff() { git diff $1 --stat }
true
02b175540c000079a6cbb3404af6836c775639e2
Shell
notalex/dotfiles
/bin/gemopen
UTF-8
613
3.734375
4
[]
no_license
#! /usr/bin/env bash name=$1 library_path=$(gem which $name 2> /dev/null) if [ $? -ne 0 ]; then # Find matching gems and choose the first match. matches=($(gem list --no-versions -l $name)) library_path=$(gem which ${matches[0]}) fi if [ $? == 0 ]; then gem_path=$(expr match $library_path '\(.\+\)/lib/[^\/]\+$') # Ruby libraries will follow a pattern like `...ruby-2.1.5/lib/ruby/2.1.0/yaml.rb` if [ $? -ne 0 ]; then gem_path=$library_path fi vim $gem_path -c ':4' -c ':normal cd' else gem list printf "\033[1;31m" echo "No match library found for '$name'" printf "\033[0m" fi
true
43b8c3fb6f0d65e428e79b3651795c7b8083bd07
Shell
cluis-lopez/CSAP
/GeneraBBDD/create_mysql.sh
UTF-8
2,951
2.828125
3
[]
no_license
DATABASE=CBS URL=XXXX.azure.com USER=ZZZZ@YYYY PASSWORD=WWWWWW mysqladmin -h $URL --user=$USER --password=$PASSWORD -f drop $DATABASE mysqladmin -h $URL --user=$USER --password=$PASSWORD create $DATABASE # Crea las distintas tablas mysql -h $URL --user=$USER --password=$PASSWORD -D $DATABASE -e "DROP TABLE pedidos;" mysql -h $URL --user=$USER --password=$PASSWORD -D $DATABASE -e "DROP TABLE productos;" mysql -h $URL --user=$USER --password=$PASSWORD -D $DATABASE -e "DROP TABLE proveedores;" mysql -h $URL --user=$USER --password=$PASSWORD -D $DATABASE -e "DROP TABLE clientes;" mysql -h $URL --user=$USER --password=$PASSWORD -D $DATABASE -e "CREATE TABLE proveedores (id INT(10) NOT NULL PRIMARY KEY, nombre VARCHAR(50), pais VARCHAR(30), telefono INT(9)) ENGINE=INNODB;" mysql -h $URL --user=$USER --password=$PASSWORD -D $DATABASE -e"CREATE TABLE productos (id INT(10) NOT NULL PRIMARY KEY, nombre VARCHAR(50), proveedor INT(10) REFERENCES proveedores(id), precio INT(10), descuento INT(5)) ENGINE=INNODB;" mysql -h $URL --user=$USER --password=$PASSWORD -D $DATABASE -e"CREATE TABLE clientes (id INT(10) NOT NULL PRIMARY KEY, nombre VARCHAR(50), apellido VARCHAR(50), password VARCHAR(50), empresa VARCHAR(50), telefono INT(9), pedidos_pendientes INT(10) ) ENGINE=INNODB;" mysql -h $URL --user=$USER --password=$PASSWORD -D $DATABASE -e"CREATE TABLE pedidos (order_number INT(10) NOT NULL PRIMARY KEY auto_increment, cliente INT(10), FOREIGN KEY (cliente) REFERENCES clientes(id) ON UPDATE CASCADE ON DELETE RESTRICT, producto INT(10), FOREIGN KEY (producto) REFERENCES productos(id) ON UPDATE CASCADE ON DELETE RESTRICT, proveedor INT(10), FOREIGN KEY (proveedor) REFERENCES proveedores(id) ON UPDATE CASCADE ON DELETE RESTRICT, cantidad INT(3), descuento INT(3), fecha_entrada DATE, fecha_salida DATE) ENGINE=INNODB;" echo " ... Proveedores" java GeneraProveedores # Genera la tabla de productos echo " ... Productos" java GeneraProductos # Genera la tabla de clientes # El parametro es el numero de clientes que se pretende generar echo "Alimentando la tabla de clientes con $1 entradas" java GeneraClientes $1 # Creacion de indices echo "Creando indice en clientes(apellido)" mysql -h $URL --user=$USER --password=$PASSWORD -D $DATABASE -e "CREATE INDEX ind_apellido ON clientes(apellido);" echo "Creando indice en pedidos(cliente)" mysql -h $URL --user=$USER --password=$PASSWORD -D $DATABASE -e "CREATE INDEX ind_cliente ON pedidos(cliente);" # Crea una linea en la tabla de pedidos ... echo "Anadiendo una linea dummy a pedidos ..." mysql -h $URL --user=$USER --password=$PASSWORD -D $DATABASE -e "INSERT INTO pedidos VALUES (1,1,1,1,1,1,'1999-12-31','2000-1-1');" # Como le hemos asignado un pedido dummy al cliente con id=1, actualizamos # el campo correspondiente en la tabla de clientes mysql -h $URL --user=$USER --password=$PASSWORD -D $DATABASE -e "UPDATE clientes SET pedidos_pendientes=1 WHERE id=1;"
true
3e0dda639e5d7b1275972b365a0408a9ccaa6e02
Shell
funnybrum/site_monitor
/docker/docker_build.sh
UTF-8
342
2.953125
3
[]
no_license
#!/bin/bash if test -f ../config/secrets.yaml; then echo "config/secrets.yaml will be used in the generated Docker image." else echo "config/secrets.yaml is missing. Please, set proper credentials before building the Docker image." exit -1 fi cd .. mkdir database docker build -f ./docker/Dockerfile . -t site_monitor cd docker
true
63755f2fe5a3966fb8ed09c11c157fb21bca9208
Shell
TySweeden/spotify-ui
/source/start.sh
UTF-8
258
3.25
3
[]
no_license
if [[ -z "${CONFIG_JSON}" ]]; then echo "CONFIG_JSON environment variable not set. Not overriding config.json file." else echo "CONFIG_JSON environment variable set. Overriding config.json file." echo ${CONFIG_JSON} > /usr/src/app/static/config.json fi
true
19fdcb2b4f2a5ad91e47d78d9c7d6f169e880197
Shell
milksteak-project/steaks
/packages/lsftdi/info.sh
UTF-8
348
2.96875
3
[]
no_license
#!/usr/bin/env bash if [ ! -d $HOME/usr/etc/packages/lsftdi ]; then echo -e "Package: lsftdi (not installed)" else echo -e "Package: lsftdi" fi echo echo -e "Version: 1.4" echo -e "Homepage: https://www.intra2net.com/en/developer/libftdi" echo echo -e "Dependencies: libusb libftdi" echo echo -e "Description: A utility for listing FTDI chips."
true
78e8b286860abd076c007e6a85a82cfd408f9ef6
Shell
jrubix/AssemblyX86-64
/Compute_quotient_remainder/run.sh
UTF-8
529
3.015625
3
[]
no_license
#!/bin/bash #Author: Justin Drouin #Assignment 2 #Date: September 8th, 2019 #Program name: Assignment 2 rm *.o rm *.out echo "Assemble the X86 file compdiv.asm" nasm -f elf64 compdiv.asm -o compdiv.o echo "Compile the C file arithmetic2.c" gcc -c -m64 -std=c11 arithmetic2.c -o arithmetic2.o echo "Link the 'O' files arithmetic2.o and compdiv.o" g++ -m64 -fno-pie -no-pie -o assemblycomp.out arithmetic2.o compdiv.o echo "Run the assembly computation program." ./assemblycomp.out echo "This Bash script file will now terminate."
true
83bd7c9828473038d07951d622905348bbcfb9c4
Shell
jdhang/dotfiles
/zsh/zshrc
UTF-8
2,218
2.84375
3
[]
no_license
# Path to your oh-my-zsh installation. export ZSH=$HOME/.oh-my-zsh ZSH_THEME="spaceship" # spaceship theme overrides # SPACESHIP_PROMPT_SYMBOL="$" SPACESHIP_CONDA_SHOW=true SPACESHIP_DOCKER_SHOW=false SPACESHIP_NODE_SHOW=true SPACESHIP_TIME_SHOW=true # plugins=(git osx gem node npm ruby bower kubectl pip postgres redis-cli tmux vagrant wd yarn zsh-syntax-highlighting) plugins=(git osx gem node npm ruby bower kubectl pip postgres redis-cli vagrant wd yarn zsh-syntax-highlighting) source $ZSH/oh-my-zsh.sh # user configuration export PATH="/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin" # homebrew export PATH=$PATH:"/opt/homebrew/bin" # postgres # export PATH=$PATH:/Applications/Postgres.app/Contents/Versions/latest/bin # heroku # export PATH="/usr/local/heroku/bin:$PATH" # node # export PATH="$HOME/.node/bin:$PATH" # rbenv # export PATH="$HOME/.rbenv/bin:$PATH" # eval "$(rbenv init - zsh)" # anaconda # export PATH="$HOME/opt/anaconda3/bin:$PATH" # commented out by conda initialize # pyenv # eval "$(pyenv init -)" # python export PATH="$HOME/Library/Python/3.8/bin:$PATH" # docker # eval "$(docker-machine env)" # octave # export PATH="/usr/local/opt/texinfo/bin:$PATH" # go lang # export PATH="/usr/local/go/bin:$PATH" # export PATH="$HOME/go/bin:$PATH" # default editor export EDITOR='nvim' # Sourcing other files source $HOME/.dotfiles/zsh/aliases if [ -t 0 ]; then stty start '' stty stop '' stty -ixon stty ixoff stty ixany fi [ -f ~/.fzf.zsh ] && source ~/.fzf.zsh # nvm export NVM_DIR="$HOME/.nvm" [ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh" # This loads nvm [ -s "$NVM_DIR/bash_completion" ] && \. "$NVM_DIR/bash_completion" # This loads nvm bash_completion # >>> conda initialize >>> # !! Contents within this block are managed by 'conda init' !! __conda_setup="$('/Users/jdhang/opt/miniconda3/bin/conda' 'shell.zsh' 'hook' 2> /dev/null)" if [ $? -eq 0 ]; then eval "$__conda_setup" else if [ -f "/Users/jdhang/opt/miniconda3/etc/profile.d/conda.sh" ]; then . "/Users/jdhang/opt/miniconda3/etc/profile.d/conda.sh" else export PATH="/Users/jdhang/opt/miniconda3/bin:$PATH" fi fi unset __conda_setup # <<< conda initialize <<<
true
35abd54ef1844f4117d6573fac8b0aa610cb787c
Shell
Piasy/Xrash
/build_linux.sh
UTF-8
328
2.734375
3
[ "MIT" ]
permissive
#!/bin/bash rm -rf build/Linux mkdir -p symbols/linux_x64 mkdir -p build/Linux && cd build/Linux && \ cmake ../.. && \ make && \ ../../tools/dump_syms_linux_x64 libcrash.so > \ ../../symbols/linux_x64/libcrash.so.sym && \ strip -s libcrash.so && \ cp libcrash.so ../../libs && \ cp CrashExample ../../Linux && \ cd ../../
true
de33ca8e9f4d6e6bc464680501ba67733dee03f5
Shell
anokata/dotfiles
/linux/bin/order-old/mountnewsd
UTF-8
176
2.546875
3
[]
no_license
#!/usr/bin/env bash sd=$(newusb) echo "Mount $sd to /mnt/ftp" sudo mount /dev/$sd /mnt/ftp echo "Press Enter when need to unmound" read echo "Unmount $sd" sudo umount /dev/$sd
true
b25ebd70d9683c72b03c530f371c58474f24f8d5
Shell
prayjourney/CS-Java-LearnNotes
/编程工具集/Linux/Shell/learnshell.sh
UTF-8
1,908
3.59375
4
[ "BSD-3-Clause" ]
permissive
#!/usr/bin/bash #是注释符号, !/bin/sh要写在第一句,指出哪一个bash运行此程序 echo "Hello World!" echo "learn shell:https://blog.csdn.net/qq769651718/article/category/7467504" #定义常量,x= xxx, x = xxx是错误的 a="hello world!" num=2 echo "a is : $a , num is ${num}nd" #切片 stringass="123gdfsgsd1312313131231" echo ${stringass:1:4} if `ps -ef| grep pickup`; then echo hello; fi # 无限循环 #while : #do # echo "$a" #done echo "hello while loop" let s=124 while [ $s -gt 120 ] do : # set -x是开启调试, 会打印出来信息, set +x是关闭调试 set +x # set +x echo "==========" # 数值运算 s=$(($s - 2)) done #定义常量 i=94 sum=0 while [ $i -le 100 ] do #定义变量 let sum=sum+$i let i+=2 done echo $sum #定义数组 array_name=("value1" "value2" "value3") echo ${array_name[1]} #@和*都是打印参数 echo ${array_name[@]} length=${#array_name[@]} echo ${length} #这样直接定义了三个参数 array_name1=(value1 value2 value31) echo ${array_name1[*]} # if 和[]之间, 条件和[]之间,都要留下空格 # ifelse if [ "$SHELL" = "/bin/bash" ];then echo "your shell is the bash \n" echo "SHELL is :$SHELL" else echo "SHELL is not bash but $SHELL" fi #-p是输出时候的提示信息 read -p "请输入1,2,3三个参数: " var1 var2 var3 echo $var1 $var2 $var3 read -t 5 -p "5s之后就会过期,请快速输入: " varu echo $varu # 文件的判断 #[ -f "somefile" ] : 判断是否是一个文件 #[ -x "/bin/ls" ] : 判断/bin/ls是否存在并有执行权限 #{ -n "$var" } : 判断$var变量是否有值 #[ "$a" = "$b"] : 判断$a和$b是否相等 [ -f "/etc/shadow" ] && echo "this is password computer" if [ -f "/etc/shadow" ];then echo "123123123" else echo "没有" fi # 分片 string="a libaba, hhaha ,fsaf fasf" echo ${string:1:4} # 变量运算 let a=1 echo $a
true
efc0e68a3a0013273aa5a895f4c95ad83085cd7c
Shell
zmousm/git-stdio-push-fetch
/git-pipe-fetch-pack
UTF-8
1,636
3.890625
4
[]
no_license
#!/bin/sh USAGE='[--write-refs file] [git-fetch-pack options]' OPTIONS_SPEC= SUBDIRECTORY_OK=Yes save () { what="$1" shift for i; do if test "$what" = opts && printf %s\\n "$i" | grep -q -- "^--"; then : elif test "$what" = args && printf %s\\n "$i" | grep -qv -- "^--"; then : elif test "$what" = all; then : else continue fi # escape : for socat i=$(printf %s\\n "$i" | sed "s/:/\\\:/g") printf %s\\n "$i" | sed "s/'/'\\\\''/g;1s/^/'/;\$s/\$/' \\\\/" echo " " done unset what i } . "$(git --exec-path)/git-sh-setup" write_refs= i=1 while test ! $i -gt $# ; do eval "ival=\$$i" eval "jval=\$$((i+1))" case "$ival" in --write-refs=*) write_refs=$(printf %s\\n "$ival" | sed 's/--write-refs=//') shift ;; --write-refs) if test -n "$jval" && printf %s\\n "$jval" | grep -qv -- "^-"; then write_refs="$jval" shift 2 else usage fi ;; esac i=$((i+1)) done unset i ival jval opts=$(save opts "$@") args=$(save args "$@") # fetch-pack apparently needs some refs if test -z "$args" && printf %s\\n "$opts" | grep -qv -- "--all"; then eval "set -- $opts --all . $args" else eval "set -- $opts . $args" fi unset opts args socat=$(which socat) if test -z "$socat" || test ! -x "$socat"; then die "This command requires socat(1)" fi gfpcmd="git fetch-pack --upload-pack=\\\"$socat - 5 #\\\" $@" if test -n "$write_refs"; then if touch "$write_refs" && test -w "$write_refs"; then : else die "$write_refs not writeable" fi gfpcmd="$gfpcmd \>$write_refs" fi $socat STDIO SYSTEM:"$gfpcmd",fdin=5
true
736c498b3adc4429234b03b256613cbda1d3d9ed
Shell
hokim/server_config
/ho_config/00_backup/rvct41_001.sh
UTF-8
2,962
2.890625
3
[]
no_license
#!/bin/sh #-------------------------------------------------------------------------------------------------- #-------------------------------------------------------------------------------------------------- if [ "$1" = "work" ] || [ "$1" = "home" ] ; then current_build_place="$1" else # Default build place current_build_place=work fi #-------------------------------------------------------------------------------------------------- if [ "$ARMTOOLS" = "" ] && [ "$current_build_place" = "home" ] ; then HEXAGON_PATH=/pkg/qct/software/HEXAGON_Tools/3.0.10/gnu/bin PYTHON_PATH=/pkg/qct/software/python/2.6.6/bin MAKE_PATH=/pkg/gnu/make/3.81/bin SCONS_PATH=/pkg/qct/software/scons/1.2.0/bin ARM_COMPILER_PATH=/pkg/qct/software/arm/RVDS/rvds41/RVCT/Programs/4.1/713/linux-pentium ARMTOOLCHAIN_PATH=/pkg/qct/software/arm/arm-2011.03/bin export ARMTOOLS=RVCT41 export ARMROOT=/pkg/qct/software/arm/RVDS/rvds41 export ARMLIB=$ARMROOT/RVCT/Data/4.1/713/lib export ARMINCLUDE=$ARMROOT/RVCT/Data/4.1/713/include/unix export ARMINC=$ARMINCLUDE export ARMCONF=$ARMROOT/RVCT/Programs/4.1/713/linux-pentium export ARMDLL=$ARMROOT/RVCT/Programs/4.1/713/linux-pentium export ARMBIN=$ARMROOT/RVCT/Programs/4.1/713/linux-pentium #export PATH=$HEXAGON_PATH:$PYTHON_PATH:$ARM_COMPILER_PATH:$ARMTOOLCHAIN_PATH:$PATH export PATH=$HEXAGON_PATH:$SCONS_PATH:$MAKE_PATH:$PYTHON_PATH:$ARM_COMPILER_PATH:$ARMTOOLCHAIN_PATH:$PATH export ARMHOME=$ARMROOT export HEXAGON_ROOT=/pkg/qct/software/HEXAGON_Tools #export ARMLMD_LICENSE_FILE=27000@10.10.10.82 #export ARMLMD_LICENSE_FILE=27000@ahssa.iptime.org export ARMLMD_LICENSE_FILE=/pkg/qct/software/arm/RVDS/flexlm_41/linux-pentium/license.dat nohup /pkg/qct/software/arm/RVDS/flexlm_41/linux-pentium/lmgrd -c $ARMLMD_LICENSE_FILE -l ~/server.log fi #-------------------------------------------------------------------------------------------------- if [ "$ARMTOOLS" = "" ] && [ "$current_build_place" = "work" ] ; then HEXAGON_PATH=~/pkg/qct/software/HEXAGON_Tools/3.0.10/gnu/bin PYTHON_PATH=~/pkg/qct/software/python/2.6.6/bin ARM_COMPILER_PATH=~/pkg/qct/software/arm/RVDS/rvds41/RVCT/Programs/4.1/713/linux-pentium ARMTOOLCHAIN_PATH=~/pkg/qct/software/arm/arm-2011.03/bin export ARMTOOLS=RVCT41 export ARMROOT=~/pkg/qct/software/arm/RVDS/rvds41 export ARMLIB=$ARMROOT/RVCT/Data/4.1/713/lib export ARMINCLUDE=$ARMROOT/RVCT/Data/4.1/713/include/unix export ARMINC=$ARMINCLUDE export ARMCONF=$ARMROOT/RVCT/Programs/4.1/713/linux-pentium export ARMDLL=$ARMROOT/RVCT/Programs/4.1/713/linux-pentium export ARMBIN=$ARMROOT/RVCT/Programs/4.1/713/linux-pentium export PATH=$HEXAGON_PATH:$PYTHON_PATH:$ARM_COMPILER_PATH:$ARMTOOLCHAIN_PATH:$PATH export ARMHOME=$ARMROOT export HEXAGON_ROOT=~/pkg/qct/software/HEXAGON_Tools export ARMLMD_LICENSE_FILE=27000@10.10.10.82 #export ARMLMD_LICENSE_FILE=27000@ahssa.iptime.org fi #--------------------------------------------------------------------------------------------------
true
b0901cd0e66a689e8edf4ea2f5c000d73e67cd54
Shell
petronny/aur3-mirror
/pymg/PKGBUILD
UTF-8
658
2.65625
3
[]
no_license
# Contributor: mathieui <mathieui[at]mathieui.net> pkgname=pymg pkgrel=1 pkgver=0.2 pkgdesc="A simple recursive image viewer" url="http://codingteam.net/project/pymg" arch=('any') license=('WTFPL') depends=('python2' 'pygtk') source=("http://codingteam.net/project/${pkgname}/download/file/${pkgname}-$pkgver.tar.xz") md5sums=('28ef4f6306a68ac0f0ed5c93a24ac0f7') build() { cd $srcdir mkdir -p $pkgdir/usr/share mkdir -p $pkgdir/usr/bin cp -R ${pkgname}-$pkgver $pkgdir/usr/share/$pkgname echo -e '#!/bin/sh'"\npython2 /usr/share/${pkgname}/src/${pkgname}.py \$@" > $pkgdir/usr/bin/${pkgname} chmod +x $pkgdir/usr/bin/${pkgname} }
true
220afe16db15f83f2f674baecd3844814315fd9d
Shell
zstackio/zstack-utility
/kvmagent/kvmagent/plugins/bmv2_gateway_agent/scripts/build_nbd.sh
UTF-8
1,407
3.25
3
[ "Apache-2.0" ]
permissive
#! /bin/bash set -e # check nbd.ko exist # exit if the mod load lsmod | grep nbd && exit 0 # if not load, check the mod exist and try to load it. if [[ `find /lib/modules/$(uname -r) -type f -name 'nbd.ko*' | wc -l ` -gt 0 ]]; then echo 'options nbd max_part=16 nbds_max=64' > /etc/modprobe.d/nbd.conf modprobe nbd && exit 0 # failed to load, remove the mod and build it again # rm -rf `find /usr/modules$(uname -r) -type f -name 'nbd.ko'` # depmod -a fi # build export YUM0=`rpm -q zstack-release |awk -F'-' '{print $3}'` currkernel=/usr/src/kernels/$(uname -r) tempdir=`mktemp -d` kernel=`uname -r | awk -F '.x86_64' '{ print $1}'` yumdownloader --disablerepo=* --enablerepo=zstack-mn --archlist src --destdir=$tempdir kernel-$kernel rpm -ivh -r $tempdir $tempdir/kernel-$kernel* cd $tempdir/root/rpmbuild/SOURCES || cd $tempdir/rpmbuild/SOURCES tar Jxf ./linux-$kernel* && cd ./linux-$kernel make mrproper cp $currkernel/Module.symvers ./ cp /boot/config-$(uname -r) ./.config make oldconfig make prepare make scripts sed -i 's/REQ_TYPE_SPECIAL/REQ_TYPE_DRV_PRIV/' drivers/block/nbd.c make CONFIG_STACK_VALIDATION= CONFIG_BLK_DEV_NBD=m M=drivers/block cp drivers/block/nbd.ko /lib/modules/$(uname -r)/kernel/drivers/block/ depmod -a echo 'options nbd max_part=16 nbds_max=64' > /etc/modprobe.d/nbd.conf modprobe nbd echo 'nbd' > /etc/modules-load.d/nbd.conf rm -rf $tempdir
true
30d404246ce9a2629993a0663bfe2066f584fb8e
Shell
chaotic-aur/pkgbuild-kmix-git
/PKGBUILD
UTF-8
1,255
2.625
3
[]
no_license
# Merged with official ABS kmix PKGBUILD by João, 2021/09/29 (all respective contributors apply herein) # Maintainer: João Figueiredo & chaotic-aur <islandc0der@chaotic.cx> # Contributor: Antonio Rojas pkgname=kmix-git pkgver=21.11.70_r2383.g0ef1805b pkgrel=1 pkgdesc='KDE volume control program' url='https://apps.kde.org/kmix/' arch=($CARCH) license=(GPL LGPL FDL) depends=(knotifications-git kxmlgui-git solid-git kcompletion-git hicolor-icon-theme) makedepends=(git extra-cmake-modules-git kdoctools-git) conflicts=(${pkgname%-git}) provides=(${pkgname%-git}) groups=(kde-applications-git kde-multimedia-git) source=("git+https://github.com/KDE/${pkgname%-git}.git") sha256sums=('SKIP') pkgver() { cd ${pkgname%-git} _major_ver="$(grep -m1 'set *(RELEASE_SERVICE_VERSION_MAJOR' CMakeLists.txt | cut -d '"' -f2)" _minor_ver="$(grep -m1 'set *(RELEASE_SERVICE_VERSION_MINOR' CMakeLists.txt | cut -d '"' -f2)" _micro_ver="$(grep -m1 'set *(RELEASE_SERVICE_VERSION_MICRO' CMakeLists.txt | cut -d '"' -f2)" echo "${_major_ver}.${_minor_ver}.${_micro_ver}_r$(git rev-list --count HEAD).g$(git rev-parse --short HEAD)" } build() { cmake -B build -S ${pkgname%-git} \ -DBUILD_TESTING=OFF cmake --build build } package() { DESTDIR="$pkgdir" cmake --install build }
true