blob_id
stringlengths
40
40
language
stringclasses
1 value
repo_name
stringlengths
4
115
path
stringlengths
2
970
src_encoding
stringclasses
28 values
length_bytes
int64
31
5.38M
score
float64
2.52
5.28
int_score
int64
3
5
detected_licenses
listlengths
0
161
license_type
stringclasses
2 values
text
stringlengths
31
5.39M
download_success
bool
1 class
1b94623e385a8119faeb668e60f3d8d87b287cf7
Shell
JohnOmernik/zeta
/package_manager/dockerimages/build_images.sh
UTF-8
684
3.296875
3
[ "Apache-2.0" ]
permissive
#!/bin/bash CLUSTERNAME=$(ls /mapr) APP="dockerimages" APP_ID="dockerimagebase" . /mapr/${CLUSTERNAME}/mesos/kstore/zeta_inc/zetaincludes/inc_general.sh REG="${ZETA_DOCKER_REG_URL}" cd ${APP_HOME} IMAGES="minopenjdk7 minjdk8 minpython2 rsyncbase minopenjre7 minopenjdk7mvn333 minjdk8mvn333 minnpm minnpmgulppython ubuntu1404 ubuntu1404openjdk8" for IMAGE in $IMAGES; do echo "Running on $IMAGE" cd $IMAGE sudo docker build -t ${REG}/${IMAGE} . sudo docker push ${REG}/${IMAGE} cd .. done echo "" echo "" echo "Docker Base Images were built and pushed to registry at ${REG}" echo "You can always do it again by running ${APP_HOME}/build_images.sh" echo ""
true
4ef0d30778164c17ea1aebfedc40bb99453e84d5
Shell
alasconnect/habitat-plans
/vault/plan.sh
UTF-8
744
2.71875
3
[ "Apache-2.0" ]
permissive
pkg_name=vault pkg_origin=alasconnect pkg_version=0.10.3 pkg_maintainer="AlasConnect LLC <devops@alasconnect.com>" pkg_license=('MPL-2.0') pkg_upstream_url="https://www.vaultproject.io/" pkg_description="A tool for managing secrets. Binary package only." pkg_source="https://releases.hashicorp.com/vault/${pkg_version}/vault_${pkg_version}_linux_amd64.zip" pkg_shasum="ffec1c201f819f47581f54c08653a8d17ec0a6699854ebd7f6625babb9e290ed" pkg_filename="${pkg_name}-${pkg_version}_linux_amd64.zip" pkg_bin_dirs=(bin) pkg_deps=() pkg_build_deps=( core/unzip ) do_unpack() { cd "${HAB_CACHE_SRC_PATH}" || exit unzip ${pkg_filename} -d "${pkg_name}-${pkg_version}" } do_build() { return 0 } do_install() { install -D vault "${pkg_prefix}"/bin/vault }
true
a427073774e653bdf550c88f42caea115563ebc6
Shell
dragontt/proj_db_infer_pipe
/temp/compt_ortho_seq_pid.sh
UTF-8
563
3.125
3
[]
no_license
#! /bin/bash # Compute sequence percent identity of one query gene's sequence to those of all ortholog genes. DIR_IN=$1 DIR_RIDS_LIST=$2 DIR_OUT=$3 for RIDS_FILE in $DIR_RIDS_LIST/* do qsub -P long -l h_vmem=4G /home/mblab/ykang/proj_db_infer_pipe/temp/compt_ortho_seq_pid_function.sh $DIR_IN $RIDS_FILE $DIR_OUT done # function compute_pid() { # while read rid # do # FILE=$1/$rid # FN_OUT=$3/$(basename $FILE) # clustalo --infile $FILE --outfile ${FN_OUT}.out --seqtype dna --distmat-out ${FN_OUT}.pim --full --percent-id --force # done < $2 # }
true
c379c86b3d5198d4b24670287bafefe52501b45c
Shell
steder/words
/etc/words_initd.sh
UTF-8
1,102
3.546875
4
[]
no_license
#!/bin/bash # Start/stop words application # #set -e -u -x NAME=words APP=words RUNDIR=/home/steder/Words TWISTD=/usr/local/bin/twistd PID=/var/run/$NAME.pid LOGFILE=/var/log/$NAME/$NAME.log DESCRIPTION="Twistd Service" TB_UID=`id -u steder` TB_GID=`id -g steder` NEW_RELIC_CONFIG_FILE=/home/steder/Words/etc/newrelic.ini NEWRELIC_ADMIN=`which newrelic-admin` NEWRELIC_SUBCOMMAND="run-program" test -f $TWISTD || exit 0 . /lib/lsb/init-functions case "$1" in start) log_daemon_msg "Starting $DESCRIPTION" "$NAME" start-stop-daemon --start --verbose --chdir $RUNDIR --pidfile $PID --name $NAME --startas $NEWRELIC_ADMIN $NEWRELIC_SUBCOMMAND $TWISTD -- --logfile="$LOGFILE" --rundir="$RUNDIR" --pidfile="$PID" --uid=$TB_UID --gid=$TB_GID $APP log_end_msg $? ;; stop) log_daemon_msg "Stopping $DESCRIPTION" "$NAME" start-stop-daemon --stop --verbose --oknodo --pidfile $PID log_end_msg $? ;; restart) log_daemon_msg "Restarting $DESCRIPTION" "$NAME" $0 stop $0 start log_end_msg $? ;; *) log_action_msg "Usage: /etc/init.d/$NAME {start|stop|restart}" exit 2 ;; esac exit 0
true
d72efbac4bbe3a0125a2ea2cabdc9b1ad710aadf
Shell
milamber86/galera
/keydbhc.sh
UTF-8
327
2.953125
3
[]
no_license
#!/bin/bash host="${1}"; port="${2}"; pass="keydbpass"; clitimeout=5; test="$(echo -e "info replication\nQUIT\n" | timeout -k ${clitimeout} ${clitimeout} keydb-cli -h ${host} -p ${port} -a ${pass} 2>/dev/null | grep ^role | awk -F':' '{print $2}')"; if [[ "$test" =~ "active-replica" ]] then exit 0 else exit 1 fi
true
4fea9da06fefbe1a122ca1a4112fa0777cb14f22
Shell
agyaglikci/helpers
/docker_setup.sh
UTF-8
1,503
3.65625
4
[ "MIT" ]
permissive
#!/bin/bash InstallDirectory=/opt # AnacondaVersion=5.3.1 # uncomment this line if you prefer Anaconda if [ `whoami` != "root" ]; then echo "First run su root. Then run this script." exit fi cd ~ mkdir -p tmp cd tmp echo "Working under: "`pwd` apt-get update echo "Installing essentials" for package in "g++" "byobu" "vim" "ca-certificates" "git" "make" "r-cran-pkgmaker" "locales" "libc6-dev-i386 gcc-multilib g++-multilib"; do echo "" echo ">>> Installing $package" apt-get install $package done # echo "" # echo "Installing Anaconda2 (I prefer this over Python because of large library support. You can also run the following:" # echo "apt-get install python-dev" # wget https://repo.anaconda.com/archive/Anaconda2-${AnacondaVersion}-Linux-x86_64.sh # echo " The next script will run interactively." # echo " Hit Enter to continue." # echo " Type yes to accept the terms and conditions" # echo " Install anaconda under ${InstallDirectory}/anaconda2" # echo " Type no to not to install Microsoft VSCode (it is not needed)." # bash Anaconda2-${AnacondaVersion}-Linux-x86_64.sh echo "Installing gem5 dependencies" for package in "zlib1g-dev" "automake" "scons" "libprotobuf-dev python-protobuf protobuf-compiler libgoogle-perftools-dev" "python-dev"; do echo "" echo ">>> Installing $package" apt-get install $package done echo "Installing some helper packages" for package in "flex" "bison"; do echo "" echo ">>> Installing $package" apt-get install $package done
true
2bbda810b3fbd892e03f2a80b9f26a3d056fd6ec
Shell
nodebotanist/openwrt-tessel
/package/tessel/hotplug/files/ble-boot-start
UTF-8
225
2.734375
3
[ "MIT", "Apache-2.0" ]
permissive
#!/bin/sh /etc/rc.common START=65 STOP=65 boot() { logger -t BLE "Initializing any BLE devices plugged in at boot..." hciconfig hci0 up hciconfig hci1 up logger -t BLE "Done initializing." }
true
61450f4a07972faa92a88727376ec60ed03e26f9
Shell
connorjclark/zquest-data
/parse-quests.sh
UTF-8
173
2.859375
3
[]
no_license
# 1..768 for i in {700..768} do if [ ! -f "quests/$i/data.json" ] then rm -rf output python3 src/main.py quests/"$i"/*.qst mv output/* "quests/$i" fi done
true
80e95eff9d9d198521aa22b884bfa3915447517c
Shell
lxtxl/aws-pipeline-demo
/template/awscli/create_codepipeline_service_role
UTF-8
1,397
3.5625
4
[]
no_license
#!/bin/bash CODEPIPELINE_SERVICE_ROLE_NAME="$1" if [ -z $CODEPIPELINE_SERVICE_ROLE_NAME ]; then CODEPIPELINE_SERVICE_ROLE_NAME='AWSCodePipelineServiceRole' fi CODEPIPELINE_POLICY_NAME='AWSCodePipelineServicePolicy' ######################################### # Create AWS CodePipeline Service Role ######################################### CODEPIPELINE_POLICY_ARN="$(aws iam list-policies --scope Local 2> /dev/null | jq -c --arg policyname $CODEPIPELINE_POLICY_NAME '.Policies[] | select(.PolicyName == $policyname)' | jq -r '.Arn')" if [ -z $CODEPIPELINE_POLICY_ARN ] ; then # create codepipeline service policy CODEPIPELINE_POLICY_ARN="$(aws iam create-policy --policy-name $CODEPIPELINE_POLICY_NAME --path /service-role/ --policy-document file://policies/CodePipelineServicePolicy.json | jq .Policy.Arn | tr -d \")" fi CODEPIPELINE_SERVICE_ROLE="$(aws iam get-role --role-name $CODEPIPELINE_SERVICE_ROLE_NAME 2> /dev/null | jq -r '.Role.RoleName')" if [ -z $CODEPIPELINE_SERVICE_ROLE ] ; then # create codepipeline service role aws iam create-role --role-name $CODEPIPELINE_SERVICE_ROLE_NAME --path /service-role/ --assume-role-policy-document file://policies/TrustPolicyforCodePipeline.json # attach policy to codepipeline role aws iam attach-role-policy --policy-arn $CODEPIPELINE_POLICY_ARN --role-name $CODEPIPELINE_SERVICE_ROLE_NAME sleep 10 fi exit 0
true
869040d8e158c577fc0c2127853d476b7b8c77ba
Shell
marhel/microsvc-demo
/proxy/generate-proxy-config.sh
UTF-8
1,735
3.203125
3
[]
no_license
#!/bin/bash containers=$(docker ps -q --filter "label=com.factor10.splat.proxy") subsystems="" for container in $containers; do config=$(docker inspect $container 2> /dev/null) subsystem=$(echo $config | jq -r '.[0].Config.Labels["com.factor10.splat.proxy"]') subsystems+="${subsystem} " done upstream="" location="" subsystems=$(echo "$subsystems" | sort | uniq) for subsystem in $subsystems; do containers=$(docker ps -q --filter "label=com.factor10.splat.proxy=$subsystem") pool="" for container in $containers; do config=$(docker inspect $container 2> /dev/null) ipaddress=$(echo $config | jq -r .[0].NetworkSettings.IPAddress) port=$(echo $config | jq -r '.[0].Config.Labels["com.factor10.splat.proxy_port"]') containername=$(echo $config | jq -r '.[0].Name') pool+=" server $ipaddress:$port; # container ${containername#/} ($container) " done upstream+="upstream $subsystem.pool { $pool} " location+=" location /$subsystem/ { proxy_pass http://$subsystem.pool/; } " done echo "$upstream# our proxy fronting all subsystems server { gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript; server_name splat.dev; proxy_buffering off; error_log /proc/self/fd/2; access_log /proc/self/fd/1; proxy_set_header Host \$http_host; proxy_set_header X-Real-IP \$remote_addr; proxy_set_header X-Forwarded-For \$proxy_add_x_forwarded_for; proxy_set_header X-Forwarded-Proto \$scheme; # workaround VirtualBox sendfile bug sendfile off; root /www/data; location / { autoindex on; } # HTTP 1.1 support proxy_http_version 1.1; proxy_set_header Connection \"\"; $location}"
true
68c9bd177afecc2f0307174290bbd6215ef527fd
Shell
KituraKommunity/Package-Builder
/linux/install_swift_from_url.sh
UTF-8
1,551
3.328125
3
[ "Apache-2.0" ]
permissive
#!/bin/bash ## # Copyright IBM Corporation 2016,2018 # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ## # This script installs the Swift binaries. The following variable # must be set for this script to work: # SWIFT_SNAPSHOT - version of the Swift binaries to install. # If any commands fail, we want the shell script to exit immediately. set -e # Echo commands before executing them. #set -o verbose echo ">> Running ${BASH_SOURCE[0]}" # Suppress prompts of any kind while executing apt-get export DEBIAN_FRONTEND="noninteractive" sudo -E apt-get -q update sudo -E apt-get -y -q install clang lldb-3.8 libicu-dev libtool libcurl4-openssl-dev libbsd-dev build-essential libssl-dev uuid-dev tzdata libz-dev libblocksruntime-dev echo ">> Installing '${SWIFT_SNAPSHOT}'..." # Install Swift compiler cd $projectFolder wget --progress=dot:giga $SWIFT_SNAPSHOT FILENAME=$(echo $SWIFT_SNAPSHOT | rev | cut -d/ -f1 | rev) tar xzf $FILENAME SWIFT_FOLDER=$(basename -s .tar.gz $FILENAME) export PATH=$projectFolder/$SWIFT_FOLDER/usr/bin:$PATH rm $FILENAME
true
702be864c911c143806bb4943060b5944c8a1896
Shell
qaEngineerpm/WebClient
/tasks/updateTranslations.sh
UTF-8
517
3.359375
3
[ "MIT" ]
permissive
#!/bin/bash set -eo pipefail DEST_FILE="ProtonMail Web Application.pot" TEMPLATE_FILE=template.pot CROWDIN_KEY_API=$(cat env/.env | grep CROWDIN_KEY_API | awk -F '=' '{ print $2 }') if [ -z "$CROWDIN_KEY_API" ]; then echo "You must have env/.env to deploy. Cf: https://github.com/ProtonMail/Angular/wiki/Crowdin" exit 1 fi echo "Uploading $TEMPLATE_FILE file" curl \ -F "files[/$DEST_FILE]=@po/$TEMPLATE_FILE" \ https://api.crowdin.com/api/project/protonmail/update-file?key=$CROWDIN_KEY_API echo "Done!"
true
13b0bd3b0c436fe3f047cc908cc41124175d6711
Shell
MindFaktur/coin-flip-combination
/uc1.sh
UTF-8
160
3.046875
3
[]
no_license
#!/bin/bash echo "This is a coin flip simulator" value=$((RANDOM%2)) if (( $value==1 )) then echo "The winner is heads" else echo "The winner is tails" fi
true
944bf1452724c5d22c976d17da304183c0f3bf84
Shell
Cours-HE-ARC/boutique-service
/build/jelastic-prod-deploy.sh
UTF-8
2,295
3.484375
3
[]
no_license
#!/bin/bash JELASTIC_URL='app.jpc.infomaniak.com' PROJECT_ID='boutique-service-prod' ENV_NAME='maven' NODE_ID='4280' BOUTIQUE_URL='boutique-service-prod.jcloud.ik-server.com' login() { echo "=============================== Login to provider ===============================" echo "$JELASTIC_USER" echo "$JELASTIC_PASSWORD" # récupération de la valeur session dans la réponse SESSION=$(curl -d "login=$JELASTIC_USER&password=$JELASTIC_PASSWORD" -H "Content-Type: application/x-www-form-urlencoded" -X POST "https://$JELASTIC_URL/1.0/users/authentication/rest/signin" | \ sed -E 's/^.*"session":"([^"]+)".*$/\1/') # test de la validite de la session # pas vide, unqiquement caractères alpha et num, taille 36 if [[ -n $SESSION && $SESSION =~ ^[0-9a-z]+$ && ${#SESSION} -eq 36 ]] then echo "Login ok, session:$SESSION" else echo "Failed to login with credentials supplied" exit 0 fi } wait_about_env() { echo "=============================== WAITING ABOUT ENV $ENV_NAME | $(date +%d.%m.%y_%H-%M-%S) ===============================" echo "sleeping 10 second about env is up" # attente de 10s sur le déploiement sleep 10 # log echo "10s sleep end" # tant que les comit id's ne matche pas on check avec 5 secondes d'attente until check_commit_id_coherence -eq 1 ; do sleep 5 echo "check commit id coherence..." done } deploy_prod() { echo "=============================== DEPLOY TO TEST $ENV_NAME | $(date +%d.%m.%y_%H-%M-%S) ===============================" # affichage du dernier commit echo "Last commit id: $TRAVIS_COMMIT, message: $TRAVIS_COMMIT_MESSAGE" # log de déploiement echo "Deploy to provider:$JELASTIC_URL, with env:$ENV_NAME, projectId:$PROJECT_ID, nodeId:$NODE_ID" # appel de l'api pour lancer le build et le deploy du projet DEPLOY_RESPONSE=$(curl "https://$JELASTIC_URL/1.0/environment/deployment/rest/builddeployproject?delay=1&envName=$ENV_NAME&session=$SESSION&nodeid=$NODE_ID&projectid=$PROJECT_ID&isSequential=false" | \ jq --raw-output '.result') echo $DEPLOY_RESPONSE if [ $DEPLOY_RESPONSE -eq 0 ] then echo "Deploy command successfully send" else exit 1 fi } login deploy_test wait_about_env
true
06a2e5fa9bffbe08d2f2f8270733a097217c6b83
Shell
kergoth/dotfiles
/bitbake/scripts/wait-for-images
UTF-8
1,533
4.09375
4
[]
no_license
#!/bin/sh set -eu usage() { cat <<END >&2 ${0##*/} [options..] BUILD_DIR IMAGE_TARGET Options: -t FSTYPES Space-separated list of fstypes matching IMAGE_FSTYPES. -h Show usage END exit 2 } get_vars() { ( set +ue if [ -e ./setup-environment ]; then . ./setup-environment elif [ -e ../poky ]; then . ../poky/oe-init-build-env . elif [ -e ../oe-core ]; then . ../oe-core/oe-init-build-env . ../bitbake fi >/dev/null bitbake -e | grep -E '^(DEPLOY_DIR_IMAGE|IMAGE_FSTYPES)=' ) } fstypes="wic wic.gz wic.bz2 wic.bmap sdcard" while getopts t:h opt; do case "$opt" in t) fstypes="$OPTARG" ;; \? | h) usage ;; esac done fstypes_filter="$(echo "$fstypes" | tr ' ' '|')" shift $((OPTIND - 1)) if [ $# -ne 2 ]; then usage fi builddir="$1" shift image="$1" shift # Wait for bitbake to complete lsof +r -f -- "$builddir/bitbake.lock" >/dev/null 2>&1 || : cd "$builddir" eval "$(get_vars)" if [ -z "$DEPLOY_DIR_IMAGE" ]; then echo >&2 "Error determining DEPLOY_DIR_IMAGE from bitbake -e" exit 1 fi find_args="$(echo "$IMAGE_FSTYPES" | tr ' ' '\n' | grep -xE "($fstypes_filter)" | sed -e "s/^/-o -name \"$image*./; s/$/\"/;" | tr '\n' ' ' | sed -e 's/^-o //; s/ *$//')" if [ -z "$find_args" ]; then echo >&2 "Error: no valid image types found" exit 1 fi eval set -- "$find_args" set -x find "$DEPLOY_DIR_IMAGE" -type l -a \( "$@" \) -print
true
04571a5228dcc4d1bbc5b39da878c711293e61b0
Shell
t2sc0m/docker-tinydns
/init.sh
UTF-8
596
3.484375
3
[]
no_license
#!/bin/bash # nonblocking loop for docker stop trap 'exit 0' SIGTERM # compile initial zone file cd /etc/service/tinydns/root make # start daemontools run_svscan() { echo "Starting svscan.." >&2 svscan /etc/service & } # watch & update tinydns data file watch_for_changes() { echo "Watching for tinydns data file changes.." >&2 cd /etc/service/tinydns/root while true; do test data -nt data.cdb && \ ( make && pkill -HUP -u tinydns && \ touch data data.cdb && \ echo "Reloading TinyDNS" >&2 ) || echo -n "." >&2 sleep 5 done } run_svscan watch_for_changes
true
07f1bb9b73de11805f86a2aeed07e45434bbcfd2
Shell
spaezsuarez/Programacion-Bash
/Manejo_Archivos/22_leerArchivos.sh
UTF-8
442
3.671875
4
[ "MIT" ]
permissive
# !/bin/bash # Programa para mostrar la lectura de archivos en bash # Autor: Sergio David Paez Suarez - spaezsuarez@gmail.com echo "Leer archivos directamente" cat $1 echo -e "\n Almacenar los valores en una variable" valorCat=`cat $1` echo "$valorCat" #Se utiliza una variable especial llamada IFS (Internal File Separator) para evitar que los espacios en blanco se corten echo -e "\n Leer el archivo linea por linea" while IFS= read linea do echo "$linea" done < $1
true
4e18f639ac2c689971ea204f54ebc6e208884d35
Shell
HyunJunSik/Linux
/add_file.sh
UTF-8
121
3
3
[]
no_license
#!/bin/sh sum=0 for i in $* do sum=`expr $sum + $i` #sum에 계속 인자 하나씩 더해주기 done echo $sum #출력
true
14a4efb0d21afd498f67f00566c0a87146bdfb38
Shell
seboll13/ML_projects
/project2/data/generate_data.sh
UTF-8
326
2.96875
3
[]
no_license
#!/bin/bash echo 'Data generation started' for i in {1..30} do SEED=$(( $RANDOM % 10)) SHFT=$(( $RANDOM % 100 - 50)) CMPR=$(( $RANDOM % 5 + 1)) echo "Data generation with seed $SEED, shift $SHFT, compression $CMPR" python generate_synthetic_data.py $SEED $SHFT $CMPR done echo 'Data generation terminated'
true
436e57268ee8995572781511bd0ae435d5b69f40
Shell
sls-adeo/Give-IT-ModeOp
/slide.sh
UTF-8
3,256
3.9375
4
[]
no_license
#!/bin/sh # # # slide.sh : Generate a Pandoc file with photos # # # # Echantillon couleur https://htmlcolorcodes.com/fr/ # # 20210913 pandoc use xelatex for generating pdf #---------------- var MDFILE="slide2.md" DESCSLIDE="legende.csv" SLIDESHOW="ModOpGiveIT.html" PDF="ModOpGiveIT.pdf" export LC_ALL=C DATEJ=`date +'%a %d %Y'` #---------------- functions syntaxe() { echo "$0 [mode] : Generate a [mode] document for installation of GiveIT desktop/laptop" echo "[mode] is pdf or html format" exit 1 } header_pdf() { echo "% Installation Guide" > $MDFILE echo "% Team Give IT / $DATEJ" >> $MDFILE echo "% Well, installation will be done in 7mn without a mouse, YES!" >> $MDFILE echo " " >> $MDFILE } body_pdf() { echo "# $TITRE" >> $MDFILE #echo "========" >> $MDFILE echo "![$LEGENDE](./images/$IMAGENAME \"$LEGENDE\"){ width=250px }" >> $MDFILE echo " " >> $MDFILE } header_html() { echo "% Installation Guide" > $MDFILE echo "% Team Give IT / $DATEJ" >> $MDFILE echo "% Well, installation will be done in 7mn without a mouse, YES!" >> $MDFILE #echo "" >> $MDFILE echo " " >> $MDFILE } body_html() { echo "# $TITRE {style=\"background: #$COLOR; text-align:center;\"}" >> $MDFILE echo "<!-- ---------------PAGE $NBPAGE------------------------------------- -->" >> $MDFILE echo " " >> $MDFILE echo "<table border=\"0\" cellspacing=\"0\" cellpadding=\"4\" align=\"center\" width=\"90%\">" >> $MDFILE echo " <tr><td align=\"center\"><img src=\"./images/$IMAGENAME\" width=\"75%\" /></td></tr>" >> $MDFILE echo " <tr><td align=\"center\">$LEGENDE</tr>" >> $MDFILE echo "</table>" >> $MDFILE } #-------------- Main if [ $# -ne 1 ]; then syntaxe fi PARAM1=`echo $1 | awk '{print toupper($0)}'` if [ "X$PARAM1" != "XPDF" ] && [ "X$PARAM1" != "XHTML" ]; then echo ; echo "Bad Parameter 1 <$1>" ; echo syntaxe fi #-------------- Treatment if [ "$PARAM1" = "PDF" ]; then header_pdf else header_html fi NBPAGE=0 COLOR="DBCECB" while read line do if [ $NBPAGE -gt 0 ]; then # Skip first line IMAGENAME=`echo $line | cut -f1 -d'|'` TITRE=`echo $line | cut -f2 -d'|'` LEGENDE=`echo $line | cut -f3 -d'|'` COLOR=`printf "%X\n" $((0x${COLOR} + 1))` if [ "$PARAM1" = "PDF" ]; then body_pdf else body_html fi fi NBPAGE=`expr $NBPAGE + 1` done < ${DESCSLIDE} #---------- Produce Pdf/ SlideShow if [ "$PARAM1" = "PDF" ]; then #pandoc -t beamer $MDFILE -V theme:Warsaw -o $PDF pandoc --pdf-engine=xelatex $MDFILE -V theme:Warsaw -o $PDF [ $? -eq 0 ] && echo "Generating PDF <$PDF> OK" else pandoc -t slidy -s $MDFILE -o $SLIDESHOW [ $? -eq 0 ] && echo "Generating SlideShow <$SLIDESHOW> OK" fi
true
0628b58d06834014af96d66971214305e3d9cd12
Shell
moyue/HDedup
/sbin/hadoop-setup-single-node.sh
UTF-8
6,738
3.859375
4
[ "Apache-2.0", "LicenseRef-scancode-unknown-license-reference", "LicenseRef-scancode-unknown" ]
permissive
#!/usr/bin/env bash # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Script for setup HDFS file system for single node deployment bin=`dirname "$0"` bin=`cd "$bin"; pwd` if [ "$HADOOP_HOME" != "" ]; then echo "Warning: \$HADOOP_HOME is deprecated." echo fi . "$bin"/../libexec/hadoop-config.sh usage() { echo " usage: $0 <parameters> Optional parameters: --default Setup system as default -h Display this message " exit 1 } template_generator() { REGEX='(\$\{[a-zA-Z_][a-zA-Z_0-9]*\})' cat $1 | while read line ; do while [[ "$line" =~ $REGEX ]] ; do LHS=${BASH_REMATCH[1]} RHS="$(eval echo "\"$LHS\"")" line=${line//$LHS/$RHS} done echo $line >> $2 done } OPTS=$(getopt \ -n $0 \ -o '' \ -l 'default' \ -- "$@") if [ $? != 0 ] ; then usage fi if [ -e /etc/hadoop/hadoop-env.sh ]; then . /etc/hadoop/hadoop-env.sh fi eval set -- "${OPTS}" while true ; do case "$1" in --default) AUTOMATED=1; shift ;; -h) usage ;; --) shift ; break ;; *) echo "Unknown option: $1" usage exit 1 ;; esac done if [ "${AUTOMATED}" != "1" ]; then echo "Welcome to Hadoop single node setup wizard" echo echo -n "Would you like to use default single node configuration? (y/n) " read SET_CONFIG echo -n "Would you like to format name node? (y/n) " read SET_FORMAT echo -n "Would you like to setup default directory structure? (y/n) " read SET_MKDIR echo -n "Would you like to start up Hadoop? (y/n) " read STARTUP echo -n "Would you like to start up Hadoop on reboot? (y/n) " read SET_REBOOT echo echo "Review your choices:" echo echo "Setup single node configuration : ${SET_CONFIG}" echo "Format namenode : ${SET_FORMAT}" echo "Setup default file system structure: ${SET_MKDIR}" echo "Start up Hadoop : ${STARTUP}" echo "Start up Hadoop on reboot : ${SET_REBOOT}" echo echo -n "Proceed with setup? (y/n) " read CONFIRM if [ "${CONFIRM}" != "y" ]; then echo "User aborted setup, exiting..." exit 1 fi else SET_CONFIG="y" SET_FORMAT="y" SET_MKDIR="y" STARTUP="y" SET_REBOOT="y" fi AUTOMATED=${AUTOMATED:-0} SET_CONFIG=${SET_CONFIG:-y} SET_FORMAT=${SET_FORMAT:-n} SET_MKDIR=${SET_MKDIR:-y} STARTUP=${STARTUP:-y} SET_REBOOT=${SET_REBOOT:-y} # Make sure system is not already started /etc/init.d/hadoop-namenode stop 2>/dev/null >/dev/null /etc/init.d/hadoop-datanode stop 2>/dev/null >/dev/null /etc/init.d/hadoop-jobtracker stop 2>/dev/null >/dev/null /etc/init.d/hadoop-tasktracker stop 2>/dev/null >/dev/null if [ "${SET_CONFIG}" == "y" ]; then JAVA_HOME=${JAVA_HOME:-/usr/java/default} HADOOP_NN_HOST=${HADOOP_NN_HOST:-localhost} HADOOP_NN_DIR=${HADOOP_NN_DIR:-/var/lib/hadoop/hdfs/namenode} HADOOP_DN_DIR=${HADOOP_DN_DIR:-/var/lib/hadoop/hdfs/datanode} HADOOP_JT_HOST=${HADOOP_JT_HOST:-localhost} HADOOP_HDFS_DIR=${HADOOP_MAPRED_DIR:-/var/lib/hadoop/hdfs} HADOOP_MAPRED_DIR=${HADOOP_MAPRED_DIR:-/var/lib/hadoop/mapred} HADOOP_PID_DIR=${HADOOP_PID_DIR:-/var/run/hadoop} HADOOP_LOG_DIR="/var/log/hadoop" HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-/etc/hadoop} HADOOP_REPLICATION=${HADOOP_RELICATION:-1} ${HADOOP_PREFIX}/sbin/hadoop-setup-conf.sh --auto \ --hdfs-user=hdfs \ --mapreduce-user=mapred \ --conf-dir=${HADOOP_CONF_DIR} \ --datanode-dir=${HADOOP_DN_DIR} \ --hdfs-dir=${HADOOP_HDFS_DIR} \ --jobtracker-host=${HADOOP_JT_HOST} \ --log-dir=${HADOOP_LOG_DIR} \ --pid-dir=${HADOOP_PID_DIR} \ --mapred-dir=${HADOOP_MAPRED_DIR} \ --namenode-dir=${HADOOP_NN_DIR} \ --namenode-host=${HADOOP_NN_HOST} \ --replication=${HADOOP_REPLICATION} fi if [ ! -e ${HADOOP_NN_DIR} ]; then rm -rf ${HADOOP_HDFS_DIR} 2>/dev/null >/dev/null mkdir -p ${HADOOP_HDFS_DIR} chmod 755 ${HADOOP_HDFS_DIR} chown hdfs:hadoop ${HADOOP_HDFS_DIR} /etc/init.d/hadoop-namenode format elif [ "${SET_FORMAT}" == "y" ]; then rm -rf ${HADOOP_HDFS_DIR} 2>/dev/null >/dev/null mkdir -p ${HADOOP_HDFS_DIR} chmod 755 ${HADOOP_HDFS_DIR} chown hdfs:hadoop ${HADOOP_HDFS_DIR} rm -rf ${HADOOP_NN_DIR} /etc/init.d/hadoop-namenode format fi /etc/init.d/hadoop-namenode start /etc/init.d/hadoop-datanode start su -c '${HADOOP_PREFIX}/bin/hadoop --config ${HADOOP_CONF_DIR} dfs -mkdir /user/mapred' hdfs su -c '${HADOOP_PREFIX}/bin/hadoop --config ${HADOOP_CONF_DIR} dfs -chown mapred:mapred /user/mapred' hdfs su -c '${HADOOP_PREFIX}/bin/hadoop --config ${HADOOP_CONF_DIR} dfs -mkdir /tmp' hdfs su -c '${HADOOP_PREFIX}/bin/hadoop --config ${HADOOP_CONF_DIR} dfs -chmod 777 /tmp' hdfs /etc/init.d/hadoop-jobtracker start /etc/init.d/hadoop-tasktracker start if [ "${SET_REBOOT}" == "y" ]; then if [ -e /etc/debian_version ]; then ln -sf ../init.d/hadoop-namenode /etc/rc2.d/S90hadoop-namenode ln -sf ../init.d/hadoop-datanode /etc/rc2.d/S91hadoop-datanode ln -sf ../init.d/hadoop-jobtracker /etc/rc2.d/S92hadoop-jobtracker ln -sf ../init.d/hadoop-tasktracker /etc/rc2.d/S93hadoop-tasktracker ln -sf ../init.d/hadoop-namenode /etc/rc6.d/S10hadoop-namenode ln -sf ../init.d/hadoop-datanode /etc/rc6.d/S11hadoop-datanode ln -sf ../init.d/hadoop-jobtracker /etc/rc6.d/S12hadoop-jobtracker ln -sf ../init.d/hadoop-tasktracker /etc/rc6.d/S13hadoop-tasktracker elif [ -e /etc/redhat-release ]; then /sbin/chkconfig hadoop-namenode --add /sbin/chkconfig hadoop-datanode --add /sbin/chkconfig hadoop-jobtracker --add /sbin/chkconfig hadoop-tasktracker --add /sbin/chkconfig hadoop-namenode on /sbin/chkconfig hadoop-datanode on /sbin/chkconfig hadoop-jobtracker on /sbin/chkconfig hadoop-tasktracker on fi fi if [ "${STARTUP}" != "y" ]; then /etc/init.d/hadoop-namenode stop /etc/init.d/hadoop-datanode stop /etc/init.d/hadoop-jobtracker stop /etc/init.d/hadoop-tasktracker stop fi
true
379e86e68203ccf09e44224271ae8894618c77a6
Shell
laysakura/dotfiles
/install-support/installDotfiles.sh
UTF-8
1,466
2.859375
3
[]
no_license
export basedir=$(cd $(dirname ${BASH_SOURCE:-$0}); pwd)/.. # ssh mkdir -p $HOME/.ssh/ cpDeep $basedir/.ssh/config $HOME/.ssh/ cpDeep $basedir/.ssh/id_* $HOME/.ssh/ chmod 600 $HOME/.ssh/* # zsh cpDeep $basedir/.zshrc $HOME/ rm -rf $HOME/.zsh ; mkdir $HOME/.zsh cpDeep $basedir/.zsh/*.zsh $HOME/.zsh/ # emacs gitCopy $basedir $HOME HEAD:emacs # el-getのキャッシュは残しつつ、リポジトリの内容をコピー # tmux cpDeep $basedir/.tmux.conf $HOME/ # mintty cpDeep $basedir/.minttyrc $HOME/ # git cpDeep $basedir/.gitconfig $HOME/ cpDeep $basedir/.gitignore $HOME/ # ghq is_msys && (cmd /c "mklink /j C:\home C:\msys64\home" || :) # MSYS2のghqで /home が \home になって、/c/home を参照しに行ってしまう問題のworkaround mkdir -p $HOME/.ghq # rust cpDeep $basedir/.cargo $HOME/ # scala cpDeep $basedir/.sbt $HOME/ cpDeep $basedir/.sbtrc $HOME/ # python cpDeep $basedir/.pypirc $HOME/ # gdb cpDeep $basedir/.gdbinit $HOME/ # .config cpDeep $basedir/.config $HOME/ # OS X keymap if is_osx; then cp $basedir/MacKeyMap/* "$HOME/Library/Keyboard Layouts/" fi # xkb if is_linux && has_x11 ; then cpDeep $basedir/.xkb $HOME/ cat > $HOME/.config/autostart/xkb.desktop <<EOS [Desktop Entry] Name=xkb Exec=xkbcomp -I$HOME/.xkb $HOME/.xkb/keymap/mykbd $DISPLAY Type=Application X-GNOME-Autostart-enabled=true EOS fi # 雑多なスクリプト mkdir -p $HOME/.local/bin/ cp -p $basedir/myscripts/* $HOME/.local/bin/
true
b130cae8920bad8e371d365ca5ffae43b7702421
Shell
ericabriga/linuxWork
/oddOrEvenSum.sh
UTF-8
216
3.390625
3
[]
no_license
#!/bin/bash echo "enter first number" read value1 echo "enter second number" read value2 value=$(($value1 + $value2)) echo "sum value is $((value))" if [ $((value%2)) -eq 0 ] then echo "even" else echo "odd" fi
true
46b5e393b5de659ae2d9182981ed16456d4998b2
Shell
davidjsanders/studySim
/OLD/oldStudySim/stage1/includes/run_docker_phone_fn.sh
UTF-8
1,229
3.265625
3
[]
no_license
function run_docker_phone { echo -n "Starting phone (port $phonePort on $serverName): " check_docker "$1phone$phonePort" # sets $DOCKER_CHECK if [ "X" == "${DOCKER_CHECK}" ]; then docker run -p 16379:6379 -p $phonePort:$phonePort \ --net=isolated_nw \ --name $1phone$phonePort \ -e portToUse=$phonePort \ -e serverName="$serverName" \ -e TZ=`date +%Z` \ -d dsanderscan/mscit_$1phone sleep 1 else echo "Phone already running." fi } function run_docker_phone_persist { echo "Starting phone (port $phonePort on $serverName)." echo -n "Data is being persisted at $(pwd)/Phone/datavolume: " check_docker "$1phone$phonePort" # sets $DOCKER_CHECK if [ "X" == "${DOCKER_CHECK}" ]; then docker run -p 16379:6379 -p $phonePort:$phonePort \ --net=isolated_nw \ --name $1phone$phonePort \ -e portToUse=$phonePort \ -e serverName="$serverName" \ -e TZ=`date +%Z` \ -v $PWD/Phone/datavolume:/Phone/datavolume \ -d dsanderscan/mscit_$1phone sleep 1 else echo "Phone already running." fi }
true
9427371cd3d45b583fd3fe79739247d2f396b24a
Shell
philippgadow/bsm4tops-gnn
/plotting/setup.sh
UTF-8
253
2.890625
3
[]
no_license
#!/bin/bash if [ -d "venv" ]; then echo "Activating virtual environment." source venv/bin/activate else echo "Setting up virtual environment..." python3 -m venv venv source venv/bin/activate pip3 install -r requirements.txt fi
true
3414e59179a782322ff47f84242473e5b9640de2
Shell
Gorrd/ChainingStrategy
/data.bash
UTF-8
370
3.125
3
[]
no_license
#!/bin/bash # Bash script for automatic metrics' solution computation data. for i in {1..30}; do echo Execution $i; argos3 -c landmarks.argos; echo Writing in results file; tail -1 output.txt; tail -1 output.txt >> results/results.txt; echo Moving output file; mv output.txt results/output-$i.txt; done echo Finished!; exit 0
true
3399a80cd32aadada6506af5047b416ef79e3aa4
Shell
schannahalli/yelpdataanalysis
/code/scripts/events/ingest_yelp_business_data.sh
UTF-8
1,469
3.90625
4
[]
no_license
#!/bin/bash set -x __event_name='ingest_yelp_business_data' _run_time=`date '+%Y%m%d%H%M%S'` function usage() { echo "Need to pass base dir followed by input dir path" } if [ $# != 2 ]; then usage echo "Passed # of arguments $#" echo "Failed to pass right arguments at invocation time - ${_run_time}" exit 1 fi _base_dir=$1 _input_dir=$2 source $_base_dir/event_driven_library_function.sh _state='RUNNING' publish_event ${__event_name} ${_state} retval=$? if [ "$retval" -eq "0" ]; then echo "Event ${__event_name} registered successfully" else echo "Event ${__event_name} failed to register at ${_run_time}" exit 1 fi export AWS_ACCESS_KEY_ID=AKIAJS2GENQR35MLOE5A export AWS_SECRET_ACCESS_KEY=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 _date_str=`date '+%Y%m%d'` _file_in='s3://markev-developer-test/devtest-sharat/incoming/tmp/' _file_out='s3://markev-developer-test/devtest-sharat/repository/ETL/db/yelp_incoming_files/yelp_business_object/datestr='$_date_str'/' _command='aws s3 cp ${_file_in} ${_file_out}' `${command}` retval=$? if [ "$retval" -eq "0" ]; then echo "Copy from ${_file_in} to ${_file_out} was successfull" else echo "Copy from ${_file_in} to ${_file_out} failed" _state=FAILED publish_event ${__event_name} ${_state} exit 1 fi _state=SUCCEDED publish_event ${__event_name} ${_state} if [ "$retval" -eq "0" ]; then echo "Event ${__event_name} registered successfully with state ${_state}" else echo "Event ${__event_name} failed to register at ${_run_time}" exit 1 fi
true
799e0de23d3c163b3351f183906347def06a0f14
Shell
aaron-m-edwards/Shokunin-April-Magic-Square
/go.sh
UTF-8
185
3.5
4
[]
no_license
path_to_trump=$(which TRUMP) if [ -x "$path_to_trump" ] ; then TRUMP magic.ts | tr '\n' '\_' | tr '|' '\n' | sed 's/.*-//' | tr '\_' '\t' else echo "TRUMP not found on the path" fi
true
bc78487f71337d075455ac0828f282081d8e2fac
Shell
lixiaoqing/hiero-nnjm
/script/compile_lm.sh
UTF-8
343
2.953125
3
[]
no_license
#!/bin/bash CXX=g++ CXXFLAGS="-std=c++0x -O3 -fopenmp -lz -I. -DKENLM_MAX_ORDER=5" #Grab all cc files in these directories except those ending in test.cc or main.cc for i in util/double-conversion/*.cc util/*.cc lm/*.cc; do if [ "${i%test.cc}" == "$i" ] && [ "${i%main.cc}" == "$i" ]; then $CXX $CXXFLAGS -c $i -o ${i%.cc}.o fi done
true
3e3fff59c1505c2683100a7d0e39cdab48a15fff
Shell
stefan-lindstrom/fang
/lib/world/newzone
UTF-8
1,324
3.09375
3
[]
no_license
#!/bin/sh if [ "$1" == "" ]; then echo You must give a number for the new zone exit fi if [ -e zon/$1.zon ]; then echo Zone file $1.zon already exists exit fi if [ -e wld/$1.wld ]; then echo Room file $1.wld already exists exit fi echo "<?xml version=\"1.0\"?>" > $1.zon echo "<zone vnum=\"$1\" lifespan=\"30\" top=\""$1"99\" mode=\"2\" lock=\"yes\">" >> $1.zon echo " <name>New Zone</name>" >> $1.zon echo " <resetcmds numberof=\"1\">" >> $1.zon echo " <cmd command=\"STOP\"/>" >> $1.zon echo " </resetcmds>" >> $1.zon echo "</zone>" >> $1.zon echo "<?xml version=\"1.0\"?>" > $1.wld echo "<rooms>" >> $1.wld echo " <room>" >> $1.wld echo " <roombasic title=\"An unfinished room\" vnum=\""$1"00\" zone=\"$1\">" >> $1.wld echo " <roomdescription>You're in an unfinished room." >> $1.wld echo " </roomdescription>" >> $1.wld echo " <flags>0</flags>" >> $1.wld echo " <sector>Outside</sector>" >> $1.wld echo " <special>NONE</special>" >> $1.wld echo " </roombasic>" >> $1.wld echo " </room>" >> $1.wld echo "</rooms>" >> $1.wld echo "<?xml version=\"1.0\"?>" > $1.mob echo "<mobiles>" >> $1.mob echo "</mobiles>" >> $1.mob echo "<?xml version=\"1.0\"?>" > $1.obj echo "<objects>" >> $1.obj echo "</objects>" >> $1.obj #echo "\$" > $1.shp echo "\$" > $1.qst echo "\$" > $1.trg
true
8b572d4d8aef352fa1be94ca8241dd7ff05c916c
Shell
martokk/dotfiles
/core/.aliases/.navigation
UTF-8
747
2.71875
3
[]
no_license
#!/bin/bash # ------------------------------------------------------------------------- # CD INTO FAVORITES # ------------------------------------------------------------------------- alias cdi="cd /media/martokk/FILES/__INBOX__" alias cdd="cd /media/martokk/FILES/Dotfiles" alias cddl="cd /media/martokk/FILES/Downloads" alias cdp="cd /media/martokk/FILES/Projects" alias cds="cd /media/martokk/FILES/Servers" # ------------------------------------------------------------------------- # NAVIGATION HELPERS # ------------------------------------------------------------------------- # Easier navigation: .., ..., ...., ....., ~ and - alias ..="cd .." alias ...="cd ../.." alias ....="cd ../../.." alias .....="cd ../../../.." alias -- -="cd -"
true
d989e694a79b483324c83e92344e588fc6aa1c89
Shell
DATx-Protocol/DATx
/lsd/redis_install.sh
UTF-8
975
3.3125
3
[]
no_license
#!/bin/bash redis=redis-3.0.3.tar.gz installdir=/app softdir=/opt/soft if [ ! -d $softdir ]; then mkdir /opt/soft fi echo "install software" sleep 2 cd $softdir wget http://download.redis.io/releases/redis-3.0.3.tar.gz echo "install dependent environment" echo "install redis" sleep 2 tar zxvf $redis && cd `echo $redis | awk -F".tar.gz" '{print $1}'` make && make install mkdir -p /app/redis/bin/ && mkdir /app/redis/etc/ cp redis.conf /app/redis/etc/ cp /usr/local/bin/redis* /app/redis/bin/ ln -s /app/redis/bin/redis* /usr/bin/ sed -i 's/daemonize no/daemonize yes/' /app/redis/etc/redis.conf sed -i 's=pidfile /var/run/redis.pid=pidfile /app/redis/redis.pid=' /app/redis/etc/redis.conf # echo "start redis" # /app/redis/bin/redis-server /app/redis/etc/redis.conf # echo "set startup" # echo "/app/redis/bin/redis-server /app/redis/etc/redis.conf" >> /etc/rc.local echo "redis install success,port:6379,installation directory: /app/redis/"
true
b3b6c71f8fefbf8dd0d67c3840c5d8352c836aca
Shell
li--paul/dockerchallenge-smallest-image
/build.sh
UTF-8
302
2.546875
3
[ "MIT" ]
permissive
#!/bin/bash set -e set -x # compile and link assembly program as -o hello.o hello.S ld -s -o hello hello.o # show details of the binary file hello # build Docker image docker build -t dieterreuter/hello -f Dockerfile.armhf . # show details of Docker image docker image ls dieterreuter/hello:latest
true
e6c7374cbf0943270cb255c3e0a4dc5b6b09f046
Shell
lululau/dot_files
/emacs-config/crafts/start_normal_mitmproxy.sh
UTF-8
1,030
3.234375
3
[]
no_license
#!/bin/bash tmux_session=mitmproxy-normal if tmux has-session -t $tmux_session 2>/dev/null; then tmux attach -t $tmux_session fi dirname=$(cd "$(dirname $0)"; pwd) if [ "$1" = "true" ]; then SERVICE_GUID=`printf "open\nget State:/Network/Global/IPv4\nd.show" | scutil | grep "PrimaryService" | awk '{print $3}'` SERVICE_NAME=`printf "open\nget Setup:/Network/Service/$SERVICE_GUID\nd.show" | scutil | grep "UserDefinedName" | awk -F': ' '{print $2}'` sudo networksetup -setwebproxy "$SERVICE_NAME" 127.0.0.1 8888 sudo networksetup -setsecurewebproxy "$SERVICE_NAME" 127.0.0.1 8888 fi ulimit -n 200000 ulimit -u 2128 tmux new -s $tmux_session -n 'normal mitmproxy' "PAGER='$dirname'/mitmproxy-emacs-viewer.sh mitmproxy --showhost -k --set console_palette_transparent=true --set console_palette=dark -p 8888 --view-filter='~t json'" exit_code=$? if [ "$1" = "true" ]; then sudo networksetup -setwebproxystate "$SERVICE_NAME" off sudo networksetup -setsecurewebproxystate "$SERVICE_NAME" off fi exit $exit_code
true
a429f423325888b0cfb31e488f4a21168549ae54
Shell
OpenModelica/OMLibraries
/addMissingWithin.sh
UTF-8
675
3.921875
4
[]
no_license
#!/bin/sh # You need to be placed in the directory just above the library # $ ls # Modelica 1.6 # $ addMissingWithin.sh # Will then fix all mo-files visible from here (i.e. all in Modelica 1.6) if test "$1" = "--remove-old-within"; then find . -name "*.mo" -exec sed -i "/^within.*; *$/d" {} ";" fi for f in `find . -name "*.mo"`; do WITHIN=`dirname $f | sed "s,./,within ," | tr / .| sed 's/$/;/'` if test "package.mo" = "`basename $f`"; then WITHIN=`echo $WITHIN | sed 's/[. ][^.]*;/;/'` fi if grep -q "^within" "$f"; then echo "Skipping $f; already has within" else echo $WITHIN | cat - $f > tmp.mo mv tmp.mo $f echo "Fixed $f" fi done
true
4523cf4fd2af0648390f8a239f976b7f2c50552d
Shell
kalisio/kano
/.travis.app.sh
UTF-8
1,227
3.265625
3
[ "LicenseRef-scancode-unknown-license-reference", "MIT" ]
permissive
#!/bin/bash # # Provision the required files # travis_fold start "provision" source .travis.env.sh travis_fold end "provision" # # Build the app # travis_fold start "build" yarn build yarn pwa:build EXIT_CODE=$? tail -n 24 build.log check_code $EXIT_CODE 0 "Builing the client" # Log in to docker before building the app because of rate limiting docker login -u="$DOCKER_USER" -p="$DOCKER_PASSWORD" check_code $? 0 "Connecting to Docker" # Create an archive to speed docker build process cd ../.. tar --exclude='$APP/test' -zcf $TRAVIS_BUILD_DIR/kalisio.tgz kalisio # Build the image cd $TRAVIS_BUILD_DIR docker build --build-arg APP=$APP --build-arg FLAVOR=$FLAVOR --build-arg BUILD_NUMBER=$BUILD_NUMBER -f dockerfile -t kalisio/$APP:$TAG . check_code $? 0 "Building the app docker image" travis_fold end "build" # # Deploy the app # travis_fold start "deploy" # Push the app image to the hub with the version tag docker push kalisio/$APP:$TAG check_code $? 0 "Pushing the $APP:$TAG docker image" # Push the app image to the hub with the flavor tag docker tag kalisio/$APP:$TAG kalisio/$APP:$FLAVOR docker push kalisio/$APP:$FLAVOR check_code $? 0 "Pushing the $APP:$TAG docker image" travis_fold end "deploy"
true
49c8b3e68cf8813fc77ed9a4809a2648349ced23
Shell
four43/dotfiles
/zsh/config/hostenv/seth-mini-box/60-i3-workspaces.zsh
UTF-8
721
3.359375
3
[]
no_license
#!/bin/bash DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" # Sets up our "pinned" workspaces and what monitor they should go to. # I3_WS and I3_WS_MONITORS should be "zipped" together if we need them. DISPLAYS=($(echo $DISPLAY_ORDER)) # Pointer to self, so we can source this later to maintain our nested array data structure export WS_CONFIG_FILE="${DOTFILE_DIR}/zsh/config/hostenv/seth-mini-box/$(basename "$0")" # Array: [key] [name] [output] [command] WS_TERMINAL=("1" "terminal" "${DISPLAYS[1]}" "i3-sensible-terminal -e zsh -c \"$DOTFILE_DIR/bin/tmux-start\"") WS_BROWSER=("2" "browser" "${DISPLAYS[0]}" "google-chrome") export WS_CONFIG=( ${WS_TERMINAL[@]} ${WS_BROWSER[@]} )
true
8633565bb4484a7ca0c664a32c9d8804948fc9b1
Shell
ProMatirx/ClenaUp
/docleanup.sh
UTF-8
3,344
3.796875
4
[]
no_license
#!/bin/sh -x result=`which docker` if [ -z $result ]; then echo "Error: DOCKER command seems not to be present in this OS." echo "System defaults are missing.Sorry, Quitting from installation" echo "Thank You" exit 1 else DOCKER=$result fi result=`which awk` if [ -z $result ]; then echo "Error: AWK command seems not to be present in this OS." echo "System defaults are missing.Sorry, Quitting from installation" echo "Thank You" exit 1 else AWK=$result fi result=`which grep` if [ -z $result ]; then echo "Error: grep command seems not to be present in this OS." echo "System defaults are missing.Sorry, Quitting from installation" echo "Thank You" exit 1 else GREP=$result fi echo -e "\n\n=========== Starting the Docker Clean Up Script ==============\n\n" echo -e "======= Checking Docker images with imageID as 'None' ========" noneImages=$($DOCKER images | $GREP -w "<none>" | $AWK '{print $3}') if [ "${noneImages}" != "" ];then for nImages in ${noneImages} do echo ${nImages} ${DOCKER} rmi -f ${nImages} >> cleanUpLog if [ $# -eq 0 ]; then echo -n "\n======= Docker image with ImageId: ${nImages} Deleted Successfully =======\n" >> cleanUpLog else echo -n "\n======= Error while deleting Docker image with ImageId: ${nImages} =======\n" >> cleanUpLog fi done else echo -e "\n\n====================== [Image ID with <none>]:No Docker Images to delete============\n\n" fi echo -e "======= Proceeding to next step, i.e deletion of old images which are one month old ==============" oldImages=$($DOCKER images | $AWK '{print $3,$4,$5}' | $GREP '[5-9]\{1\} weeks\|months' | $AWK '{print $1}') #echo ${oldImages} >> cleanUpLog if [ "$oldImages" != "" ]; then for i in ${oldImages} do ${DOCKER} image prune --all -f >> cleanUpLog if [ $# -eq 0 ]; then echo -n "\n ======= Docker image with ImageId: ${i} Delted Successfully =======\n" >> cleanUpLog else echo -n "\n ======= Error while deleting Docker image with ImageId: ${i} ======= \n" >> cleanUpLog fi done else echo -e "\n =================== No Docker Images to delete ================== \n" fi echo -e "======= Proceeding to next step, i.e deletion of oldContainers which are [Dead|Exited] ==============" oldContainers=$($DOCKER ps -a | $GREP "Dead\|Exited" | $AWK '{print $1}') if [ "$oldContainers" != "" ]; then for oContainers in $oldContainers do echo $j $DOCKER logs ${oContainers} >> containerlogs $DOCKER rm ${oContainers} >> cleanUpLog if [ $# -eq 0 ]; then echo -n "\n========[Dead|Exited] Docker container with ContaineriID: ${oContainers} Deleted Successfully======= \n" >> cleanUpLog else echo -n "\n=======[Dead|Exited] Error while deleting Docker image with COntainedID: ${oContainers}=======\n" >> cleanUpLog fi done else echo -e "\n======= There no Docker containers with status as 'Exited' ======\n" >> cleanUpLog fi
true
44eaeeb64e0855bcf16da0696735fc3543e771ee
Shell
PennBBL/rewardAnalysisReprocScripts
/template/templateConstructRewardTest
UTF-8
21,379
3.796875
4
[]
no_license
#!/usr/bin/env bash ################################################################### # ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ # ################################################################### ################################################################### # This utility script uses ANTs routines to create a sample- # specific template. ################################################################### ################################################################### # Constants ################################################################### # none yet ################################################################### # Default settings ################################################################### run=1,2,3,4 out=$(pwd) name=template ################################################################### ################################################################### # BEGIN GENERAL MODULE HEADER ################################################################### ################################################################### # Read in: # * path to localised design file # * overall context in pipeline # * whether to explicitly trace all commands # Trace status is, by default, set to 0 (no trace) ################################################################### trace=0 while getopts "c:o:r:n:t:" OPTION do case $OPTION in c) cohort=${OPTARG} ;; o) out=${OPTARG} ;; r) run=${OPTARG} ;; n) name=${OPTARG} ;; t) trace=${OPTARG} if [[ ${trace} != "0" ]] && [[ ${trace} != "1" ]] then ${XCPEDIR}/xcpModusage mod exit fi ;; *) echo "Option not recognised: ${OPTARG}" ${XCPEDIR}/xcpModusage mod exit esac done shift $((OPTIND-1)) ################################################################### # Ensure that the compulsory cohort variable has been defined ################################################################### [[ -z ${cohort} ]] && Usage && exit [[ -z $(cat ${cohort}) ]] && Usage && exit cxt=0 ################################################################### # Set trace status, if applicable # If trace is set to 1, then all commands called by the pipeline # will be echoed back in the log file. ################################################################### [[ ${trace} == "1" ]] && set -x ################################################################### # Assemble the list of input images. ################################################################### inputs=$(cat ${cohort}) ################################################################### # Initialise the top-level output directory ################################################################### out=${out}/${name} mkdir -p ${out} echo "Template construction directory:" echo "${out}" cxt=$(expr $cxt + 1) ################################################################### # Initialise the unguided stage of template construction. ################################################################### [[ ${NUMOUT} == 1 ]] && prep=${cxt}_ outdir=${out}/${prep}templateInit/ [[ ! -e ${outdir} ]] && mkdir -p ${outdir} ################################################################### # Define paths to the potential outputs of the unguided template- # building step. # # Potential outputs include: # * warpInit: # * affineInit: # * deformedInit: # * inverseWarpInit: # * templateInit: # * templateInitUnpadded: # * templateInitDir: ################################################################### warpInit=${outdir}/warp affineInit=${outdir}/affine repairedInit=${outdir}/repaired inverseWarpInit=${outdir}/inverseWarp logInit=${outdir}/log templateInitDir=${outdir} templateInitUnpadded=${outdir}/template_unpadded.nii.gz templateInit=${outdir}/template0.nii.gz templatePadded=${outdir}/template.nii.gz if [[ -n $(echo $run|grep ${cxt}) ]] then echo ""; echo ""; echo "" echo "###################################################################" echo "# ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ #" echo "# #" echo "# ☭ CONSTRUCTING TEMPLATE: INITIALISATION ☭ #" echo "# #" echo "# ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ #" echo "###################################################################" echo "" echo "Output directory is $outdir" mkdir -p ${warpInit} #AS edit mkdir -p ${affineInit} mkdir -p ${repairedInit} mkdir -p ${inverseWarpInit} mkdir -p ${logInit} ################################################################ # Perform the unguided template-building procedure. ################################################################ $ANTSPATH/antsMultivariateTemplateConstructionReward.sh \ -d 3 \ -m 1x1x0 \ -r 1 \ -c 1 \ -o ${outdir} \ ${inputs} ################################################################ # Reorganise the output of the unguided template-building step. ################################################################ immv ${outdir}/*template* ${templateInitDir}/ immv ${outdir}/*InverseWarp* ${inverseWarpInit}/ immv ${outdir}/*Warp* ${warpInit}/ immv ${outdir}/*warp* ${warpInit}/ immv ${outdir}/*Repaired* ${repairedInit}/ mv ${outdir}/*Affine* ${affineInit}/ mv ${outdir}/job*.sh ${logInit}/ mv ${outdir}/*log.txt ${logInit}/ ################################################################ # Add padding to the initial template. ################################################################ immv ${templateInit} ${templateInitUnpadded} $ANTSPATH/ImageMath 3 \ ${templatePadded} \ PadImage ${templateInitUnpadded} 5 fi cxt=$(expr $cxt + 1) ################################################################### # Initialise the targeted stage of template construction. ################################################################### [[ ${NUMOUT} == 1 ]] && prep=${cxt}_ outdir=${out}/${prep}templateTarg/ [[ ! -e ${outdir} ]] && mkdir -p ${outdir} ################################################################### # Define paths to the potential outputs of the targeted template- # building step. # # Potential outputs include: # * warpTarg: # * affineTarg: # * deformedTarg: # * inverseWarpTarg: # * templateTarg: # * templateTargUnpadded: # * templateTargDir: ################################################################### warpTarg=${outdir}/warp affineTarg=${outdir}/affine deformedTarg=${outdir}/deformed repairedTarg=${outdir}/repaired inverseWarpTarg=${outdir}/inverseWarp logTarg=${outdir}/log templateTargDir=${outdir} templateTarg=${outdir}/template.nii.gz if [[ -n $(echo $run|grep ${cxt}) ]] then echo ""; echo ""; echo "" echo "###################################################################" echo "# ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ #" echo "# #" echo "# ☭ CONSTRUCTING TEMPLATE: TARGETED REGISTRATION ☭ #" echo "# #" echo "# ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ #" echo "###################################################################" echo "" echo "Output directory is $outdir" mkdir -p ${warpTarg} mkdir -p ${affineTarg} mkdir -p ${deformedTarg} mkdir -p ${repairedTarg} mkdir -p ${inverseWarpTarg} mkdir -p ${logTarg} ################################################################ # Perform the targeted template-building procedure. ################################################################ $ANTSPATH/antsMultivariateTemplateConstructionReward.sh \ -d 3 \ -z ${templatePadded} \ -o ${outdir} \ ${inputs} ################################################################ # Reorganise the output of the targeted template-building step. ################################################################ immv ${outdir}/*template* ${templateTargDir}/ immv ${outdir}/*InverseWarp* ${inverseWarpTarg}/ immv ${outdir}/*Warp* ${warpTarg}/ immv ${outdir}/*warp* ${warpTarg}/ immv ${outdir}/*Repaired* ${repairedTarg}/ immv ${outdir}/*deformed* ${deformedTarg}/ immv ${outdir}/*template0 ${templateTarg} mv ${outdir}/*Affine* ${affineTarg}/ mv ${outdir}/job*.sh ${logTarg}/ mv ${outdir}/*log.txt ${logTarg}/ mv ${templateInitDir}/antsBuildTemplate.* ${logTarg}/ fi cxt=$(expr $cxt + 1) ################################################################### # Initialise the masking stage of template construction. # # This is by far the worst-performing stage, and its output should # always be evaluated qualitatively (and probably edited manually). ################################################################### [[ ${NUMOUT} == 1 ]] && prep=${cxt}_ outdir=${out}/${prep}templateMask/ [[ ! -e ${outdir} ]] && mkdir -p ${outdir} ################################################################### # Define paths to the potential outputs of the masking stage. # # Potential outputs include: ################################################################### templateMask=${outdir}/templateMask.nii.gz templateMaskD=${outdir}/templateMaskMD.nii.gz templateMasked=${outdir}/templateBrain.nii.gz templateMaskedD=${outdir}/templateBrain_dilMasked.nii.gz if [[ -n $(echo $run|grep ${cxt}) ]] then echo ""; echo ""; echo "" echo "###################################################################" echo "# ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ #" echo "# #" echo "# ☭ CONSTRUCTING TEMPLATE: MASKING BRAIN ☭ #" echo "# #" echo "# ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ #" echo "###################################################################" echo "" echo "Output directory is $outdir" ################################################################ # Assemble all of the ground truth images for atlas # construction. ################################################################ oasis=$(cat $XCPEDIR/thirdparty/oasis30/subjects) unset jlfReg for o in ${oasis} do jlfReg="${jlfReg} -g ${XCPEDIR}/thirdparty/oasis30/Heads/${o}.nii.gz" jlfReg="${jlfReg} -l ${XCPEDIR}/thirdparty/oasis30/Segmentations6Class/${o}_seg.nii.gz" done ################################################################ # Perform the JLF routine to generate an anatomical atlas. ################################################################ mkdir -p ${outdir}/labels ${ANTSPATH}/antsJointLabelFusion.sh \ -d 3 \ -o ${outdir}/labels/ \ -t ${templateTarg} \ -c 1 \ -k 1 \ -q 0 \ -p ${outdir}/jlf_Posteriors%02d.nii.gz \ ${jlfReg} ################################################################ # Use the output of JLF to generate an extraction mask. ################################################################ fslmaths ${outdir}/labels/Labels.nii.gz \ -bin ${templateMask} fslmaths ${templateTarg} \ -mul ${templateMask} \ ${templateMasked} ImageMath 3 ${templateMaskD} \ MD ${templateMask} 1 fslmaths ${templateTarg} \ -mul ${templateMaskD} \ ${templateMaskedD} fi cxt=$(expr $cxt + 1) ################################################################### # Initialise the atlas priors stage of template construction. ################################################################### [[ ${NUMOUT} == 1 ]] && prep=${cxt}_ outdir=${out}/${prep}templatePriors/ [[ ! -e ${outdir} ]] && mkdir -p ${outdir} ################################################################### # Define paths to the potential outputs of the production of # atlas priors. # # Potential outputs include: ################################################################### labels=${outdir}/labels/Labels.nii.gz intensity=${outdir}/labels/Intensity.nii.gz posteriors=${outdir}/jlf_Posteriors if [[ -n $(echo $run|grep ${cxt}) ]] then echo ""; echo ""; echo "" echo "###################################################################" echo "# ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ #" echo "# #" echo "# ☭ CONSTRUCTING TEMPLATE: ATLAS PRIORS ☭ #" echo "# #" echo "# ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ #" echo "###################################################################" echo "" ################################################################ # Assemble all of the ground truth images for the second round # of atlas construction. ################################################################ oasis=$(cat $XCPEDIR/thirdparty/oasis30/subjects) unset jlfReg for o in ${oasis} do jlfReg="${jlfReg} -g ${XCPEDIR}/thirdparty/oasis30/Brains/${o}.nii.gz" jlfReg="${jlfReg} -l ${XCPEDIR}/thirdparty/oasis30/Segmentations6Class/${o}_seg.nii.gz" done ################################################################ # Perform the JLF routine on brain-only images to generate an # improved anatomical atlas. ################################################################ mkdir -p ${outdir}/labels ${ANTSPATH}/antsJointLabelFusion.sh \ -d 3 \ -o ${outdir}/labels/ \ -t ${templateMasked} \ -x ${templateMask} \ -c 1 \ -k 1 \ -q 0 \ -p ${outdir}/jlf_Posteriors%02d.nii.gz \ ${jlfReg} fi cxt=$(expr $cxt + 1) ################################################################### # Initialise the third stage of atlas priors generation for # template construction. ################################################################### [[ ${NUMOUT} == 1 ]] && prep=${cxt}_ outdir=${out}/${prep}templatePriorsRenorm/ [[ ! -e ${outdir} ]] && mkdir -p ${outdir} ################################################################### # Define paths to the potential outputs of the renormalisation of # atlas priors. # # Potential outputs include: ################################################################### binRoot=${outdir}/binary_ priorRoot=${outdir}/prior priorPrenorm=${outdir}/priorPrenorm kmeansDir=${outdir}/kmeans if [[ -n $(echo $run|grep ${cxt}) ]] then echo ""; echo ""; echo "" echo "###################################################################" echo "# ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ #" echo "# #" echo "# ☭ CONSTRUCTING TEMPLATE: RENORMALISING ATLAS PRIORS ☭ #" echo "# #" echo "# ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ #" echo "###################################################################" echo "" ################################################################ # Prepare non-CSF atlas priors. ################################################################ priorNum=(2 3 4 5 6) for i in "${priorNum[@]}" do ${ANTSPATH}/ThresholdImage 3 ${labels} ${binRoot}${i}.nii.gz ${i} ${i} ${ANTSPATH}/SmoothImage 3 ${binRoot}${i}.nii.gz 1.0 ${priorPrenorm}${i}.nii.gz imcall="${imcall} ${priorPrenorm}${i}.nii.gz" done imcall=$(echo ${imcall}|sed s@' '@','@g) ################################################################ # Prepare CSF atlas prior. ################################################################ mkdir ${kmeansDir} ${ANTSPATH}/Atropos -d 3 \ -a ${templateMasked} \ -i KMeans[3] \ -o [${kmeansDir}/kmeansSeg.nii.gz,${kmeansDir}/kmeansPosterior%02d.nii.gz] \ -v \ -x ${templateMask} csfPath=$(ls -d1 ${kmeansDir}/kmeansPosterior*1.nii.gz) ################################################################ # Renormalise all priors. ################################################################ echo ${XCPEDIR}/thirdparty/utils/renormalisePriorsPreserveCSF.R \ -m ${templateMask} \ -i ${imcall} \ -c ${csfPath} \ -o ${priorRoot} fi cxt=$(expr $cxt + 1) ################################################################### # Initialise the resampling stage of template construction. ################################################################### [[ ${NUMOUT} == 1 ]] && prep=${cxt}_ outdir=${out}/${prep}templateResample/ [[ ! -e ${outdir} ]] && mkdir -p ${outdir} ################################################################### # Define paths to the potential outputs of the template resample # procedure. # # Potential outputs include: ################################################################### templateResampled=${outdir}/template_brain_2mm.nii.gz priorResampled=${outdir}/priors/ if [[ -n $(echo $run|grep ${cxt}) ]] then echo ""; echo ""; echo "" echo "###################################################################" echo "# ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ #" echo "# #" echo "# ☭ CONSTRUCTING TEMPLATE: RESAMPLING ☭ #" echo "# #" echo "# ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ #" echo "###################################################################" echo "" flirt \ -in ${templateMasked} \ -ref ${templateMasked} \ -applyisoxfm 2 \ -out ${templateResampled} mkdir -p ${priorResampled} for p in $(seq 6) do flirt \ -in $(ls -d1 ${priorRoot}*0${p}.nii.gz) \ -ref $(ls -d1 ${priorRoot}*0${p}.nii.gz) \ -applyisoxfm 2 \ -out ${priorResampled}/prior_2mm_${p}.nii.gz done fi cxt=$(expr $cxt + 1) ################################################################### # Initialise the MNI registration stage of template construction. ################################################################### [[ ${NUMOUT} == 1 ]] && prep=${cxt}_ outdir=${out}/${prep}templateTransforms/ [[ ! -e ${outdir} ]] && mkdir -p ${outdir} ################################################################### # Define paths to the potential outputs of the registrations # between the template and MNI space. # # Potential outputs include: ################################################################### mniBrain=${FSLDIR}/data/standard/MNI152_T1_1mm_brain.nii.gz transform=${outdir}/transform xfmScript=${outdir}/antsRegisterMNI.sh xfmLog=${outdir}/antsRegisterMNI_log if [[ -n $(echo $run|grep ${cxt}) ]] then echo ""; echo ""; echo "" echo "###################################################################" echo "# ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ #" echo "# #" echo "# ☭ CONSTRUCTING TEMPLATE: REGISTERING TO MNI ☭ #" echo "# #" echo "# ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ ☭ #" echo "###################################################################" echo "" rm -f ${xfmScript} rm -f ${xfmLog} register="${templateMasked},${mniBrain}" echo "#!/usr/bin/env bash" >> ${xfmScript} echo "antsRegistration \ -d 3 \ -o ${transform} \ -u 1 \ -w [0.01,0.99] \ -r [${register},1] \ -m MI[${register},1,32,Regular,0.25] \ -c [1000x500x250x100,1e-8,10] \ -t Rigid[0.1] \ -f 8x4x2x1 \ -s 3x2x1x0 \ -m MI[${register},1,32,Regular,0.25] \ -c [1000x500x250x100,1e-8,10] \ -t Affine[0.1] \ -f 8x4x2x1 \ -s 3x2x1x0 \ -m CC[${register},1,4] \ -c [100x100x70x20,1e-9,15] \ -t SyN[0.1,3,0] \ -f 6x4x2x1 \ -s 3x2x1x0" >> ${xfmScript} id=$(qsub -V -S /bin/bash -cwd -j y -o ${xfmLog} \ ${xfmScript}) id=$(echo ${id}|cut -d' ' -f3) ${XCPEDIR}/utils/qstatus -e ${id} fi
true
7c5f863528ef07e0f4a20c3925ecafc9ecefd6c1
Shell
youchenlee/bin-local
/battery-check
UTF-8
294
2.96875
3
[]
no_license
#!/bin/bash # Source: http://unix.stackexchange.com/questions/60778/how-can-i-get-an-alert-when-my-battery-is-about-to-die-in-linux-mint battery_level=`acpi -b | grep -P -o '[0-9]+(?=%)'` if [ $battery_level -le 10 ] then notify-send "Battery low" "Battery level is ${battery_level}%!" fi
true
d45db2ca37ddcef5bc0b37f4da9b342291a7b915
Shell
valeriangalliat/phpfarm-tools
/install
UTF-8
166
2.90625
3
[]
no_license
#!/bin/sh -e cd "$(dirname "$0")/../inst/bin" for file in $(find "../../tools" -name 'phpfarm-*'); do base=$(basename "$file") ln -fsv "$file" "$base" done
true
2ea87f2d97fcf1305407c5326f1c7c5a2d07ed4d
Shell
franchalboha/csaopt
/csaopt/internal/aws_setup_scripts/aws_worker_setup.sh
UTF-8
1,104
2.640625
3
[ "MIT" ]
permissive
#!/usr/bin/env bash sudo apt-get update # Docker sudo apt-get install -y \ apt-transport-https \ ca-certificates \ curl \ software-properties-common curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add - sudo apt-key fingerprint 0EBFCD88 sudo add-apt-repository \ "deb [arch=amd64] https://download.docker.com/linux/ubuntu \ $(lsb_release -cs) \ stable" sudo apt update && sudo apt install docker-ce -y sudo usermod -aG docker $USER # Nvidia Docker curl -s -L https://nvidia.github.io/nvidia-docker/gpgkey | \ sudo apt-key add - distribution=$(. /etc/os-release;echo $ID$VERSION_ID) curl -s -L https://nvidia.github.io/nvidia-docker/$distribution/nvidia-docker.list | \ sudo tee /etc/apt/sources.list.d/nvidia-docker.list sudo apt update && sudo apt-get install nvidia-docker2 -y sudo pkill -SIGHUP dockerd # CUDA sudo add-apt-repository ppa:graphics-drivers/ppa sudo apt update && sudo apt install nvidia-410 cuda-drivers # Test NVidia Driver # docker run --runtime=nvidia --rm nvidia/cuda nvidia-smi docker pull d53dave/csaopt-worker:0.1.1
true
7439a51f6def39872ed6c33c387dde0537bb89ec
Shell
noud/mouse-bsd
/sbin/newbtconf/newbtconf.sh
UTF-8
1,260
4.09375
4
[]
no_license
#!/bin/sh # # Setup a new config directory # if [ $# -lt 1 ] ; then echo "Usage: $0 <newconfig> [<baseconfig>]" echo "Usage: $0 init" exit 1; fi dir=$1 if [ $dir = init ] ; then if [ -d /etc/etc.network -o -e /etc/etc/current ] ; then echo "Error: multi-configuration already initialized" exit 1 fi dir=etc.network cd /etc mkdir -m 755 $dir ln -s $dir etc.current ln -s $dir etc.default for i in fstab rc.conf netstart mrouted.conf ntp.conf resolv.conf \ nsswitch.conf rbootd.conf inetd.conf ifconfig.* myname \ mygate defaultdomain; do if [ -f $i ] ; then mv $i $dir ln -s etc.current/$i . fi done echo "/etc/$dir has now been created and populated." exit 0 fi if [ "`expr $dir : 'etc\.\(.*\)'`" != $dir ] ; then dir=etc.$dir fi if [ -e /etc/$dir ] ; then echo "Error: $dir already exists" exit 1; fi newname=`expr $dir : 'etc.\(.*\)'` if [ $# -lt 2 ] ; then orig=etc.current echo "Using current config as base for $newname" else orig=$2 fi if [ -z "`expr $orig : 'etc.\(.*\)'`" ] ; then orig=etc.$orig fi if [ ! -d /etc/$orig ] ; then echo "Original directory /etc/$orig does not exist." exit 1; fi mkdir -m 755 /etc/$dir cp -p /etc/$orig/* /etc/$dir echo "/etc/$dir has now been created and populated." exit 0
true
bd4d59de84606f6e1a1c938b63edcc06973868f0
Shell
ksheedlo/stek
/electl.bash
UTF-8
1,041
3.25
3
[]
no_license
#!/bin/bash # Rackspace Monitoring Control API # Usage: electl [METHOD] [ENDPOINT] [BODY] # # This plugin drives the control API for Rackspace Monitoring as defined # in [Mimic](https://github.com/rackerlabs/mimic). It works when running # stek against Mimic, but not against real Rackspace or Openstack services. # electl() { stekUser=$(_stek_user) _stek_ensure_valid_catalog "$stekUser" authToken=$(_stek_token "$stekUser") serviceBase=$(_stek_service "$stekUser" "cloudMonitoringControl") if [[ -n "$3" ]] then curl -H 'Content-Type: application/json' \ -H 'Accept: application/json' \ -H "User-Agent: $(uname -mnrs); $stekUser; stek 0.1.0" \ -H "X-Auth-Token: $authToken" \ "-X$1" "$serviceBase$2" -d "$3" \ 2>/dev/null | jq '.' else curl -H 'Content-Type: application/json' \ -H 'Accept: application/json' \ -H "User-Agent: $(uname -mnrs); $stekUser; stek 0.1.0" \ -H "X-Auth-Token: $authToken" \ "-X$1" "$serviceBase$2" \ 2>/dev/null | jq '.' fi }
true
212dd96e84d93dbdd27c0607654132a0e5f07ac6
Shell
ctic-sje-ifsc/ansible
/roles/openaudit/files/audit_linux.sh
UTF-8
59,795
3.421875
3
[]
no_license
#!/bin/bash #set -x #Este arquivo eh gerado automaticamente pelo Puppet, nao adianta editar. strComputer="." submit_online="y" create_file="n" url="http://inventario.ifsc.edu.br/open-audit/index.php/system/add_system" org_id="19" debugging=1 help="n" ping_target="y" system_id="" PATH="$PATH:/sbin:/usr/sbin" export PATH ORIGIFS=$IFS IFS=$'\n'; ######################################################## # DEFINE SCRIPT FUNCTIONS # ######################################################## timer () # Returns the elapsed time in seconds. # # usage : # # start=$(timer) # # commands... # total_seconds=$(timer "$start") # { if [ $# -eq 0 ]; then date +%s else local stime=$1 etime=$(date '+%s') if [ -z "$stime" ]; then stime=$etime; fi dt=$((etime - stime)) echo "$dt" fi } lcase () # Returns the lower case version of the argument. # # usage : # # lower_version=$(lcase "$var") # { result=$(echo "$1" | awk '{print tolower($0)}') echo "$result" } ucase () # Returns the upper case version of the argument. # # usage : # # upper_version=$(ucase "$var") # { result=$(echo "$1" | awk '{print toupper($0)}') echo "$result" } pcase () # Returns the propper case version of the argument. # # usage : # # proper_version=$(pcase "$var") # { result=$(lcase "$1" | awk '{ for ( i=1; i <= NF; i++) { sub(".", substr(toupper($i),1,1) , $i) } print }') echo "$result" } trim () # Remove the leading/trailing spaces from the argument. # # usage : # # trimmed_version=$(trim "$var") # { result=$(echo "$1" | sed 's/^ *//g' | sed 's/ *$//g') echo "$result" } escape_xml () # If a special character exists in the string, escape the XML. # # usage : # # xml_version=$(escape_xml "$var") # { # escape characters result="$1" if echo "$result" | grep -Eq -e '[&<>"]' -e "'"; then result="<![CDATA[$result]]>" fi # Trim leading/trailing spaces result=$(trim "$result") echo "$result" } # cidr2mask () # { # local i mask="" # local full_octets=$(($1/8)) # local partial_octet=$(($1%8)) # for ((i=0;i<4;i+=1)); do # if [ $i -lt $full_octets ]; then # mask+=255 # elif [ $i -eq $full_octets ]; then # mask+=$((256 - 2**(8-partial_octet))) # else # mask+=0 # fi # test $i -lt 3 && mask+=. # done # echo "$mask" # } cidr2mask () { # Number of args to shift, 255..255, first non-255 byte, zeroes set -- $(( 5 - ($1 / 8) )) 255 255 255 255 $(( (255 << (8 - ($1 % 8))) & 255 )) 0 0 0 [ $1 -gt 1 ] && shift $1 || shift echo ${1-0}.${2-0}.${3-0}.${4-0} } between_output () { # usage : # # test=$(between "command" "delimiter" "match" "variable") # command="$1" delimiter="$2" match="$3" variable="$4" # first get all lines between $start and $end (inclusive) for line in $(eval $command); do if [[ "$line" == *"$delimiter"* ]]; then if [ -n "$resultgroup" ]; then # resultgroup contains data, test it if [[ $(echo -e "$resultgroup" | grep "$match" -c ) -ne 0 ]]; then # our match is contained within the resultgroup result=$(echo -e "$resultgroup" | grep "vendor:") break fi resultgroup="" else # resultgroup doesn't contain data, start anew resultgroup="$line" fi else # not a delimiter, so add to the result group resultgroup=$(echo -e "$resultgroup\n$line") fi done # check a last time as we may not have a final delimiter if [[ $(echo -e "$resultgroup" | grep "$match" -c ) -ne 0 ]]; then # our match is contained within the resultgroup result=$(echo -e "$resultgroup" | grep "vendor:") fi echo "$result" } ######################################################## # PROCESS COMMAND-LINE PARAMETERS # ######################################################## # Below we take any command line arguements # to override the variables above, simply include them on the command line like submit_online=n # NOTE - argurments are case sensitive for arg in "$@"; do parameter=$(echo "$arg" | cut -d= -f1) parameter=$(lcase "$parameter") parameter=$(trim "$parameter") parameter_value=$(echo "$arg" | cut -d= -f2) parameter_value=$(trim "$parameter_value") case "$parameter" in "create_file" ) create_file="$parameter_value" ;; "debugging" ) debugging="$parameter_value" ;; "help" ) help="$parameter_value" ;; "--help" ) help="y" ;; "-h" ) help="y" ;; "org_id" ) org_id="$parameter_value" ;; "ping_target" ) ping_target="$parameter_value" ;; "strcomputer" ) strComputer="$parameter_value" ;; "submit_online" ) submit_online="$parameter_value" ;; "system_id" ) system_id="$parameter_value" ;; "url" ) url="$parameter_value" ;; "$parameter_value" ) strComputer="$parameter_value" ;; esac done if [ "$help" = "y" ]; then echo "" echo "-----------------------------" echo "Open-AudIT Linux Audit script" echo "-----------------------------" echo "This script should be run on a Linux based computer using root or sudo access rights." echo "" echo "Prerequisites for this script to function correctly can be tested by running audit_linux.sh check_commands=y." echo "" echo "Valid command line options are below (items containing * are the defaults) and should take the format name=value (eg: debugging=1)." echo "" echo " check_commands" echo " y - Run a test to determine if the required commands to run this script are present on the target system." echo " *n - Do not run the test." echo "" echo " create_file" echo " y - Create an XML file containing the audit result." echo " *n - Do not create an XML result file." echo "" echo " debugging" echo " 0 - No output." echo " 1 - Minimal Output." echo " *2 - Verbose output." echo "" echo " -h or --help or help=y" echo " y - Display this help output." echo " *n - Do not display this output." echo "" echo " org_id" echo " - The org_id (an integer) taken from Open-AudIT. If set all devices found will be associated to that Organisation." echo "" echo " submit_online" echo " *y - Submit the audit result to the Open-AudIT Server defined by the 'url' variable." echo " n - Do not submit the audit result" echo "" echo " url" echo " *http://localhost/open-audit/index.php/discovery/process_subnet - The http url of the Open-AudIT Server used to submit the result to." echo "" echo "" echo "NOTE - The netstat section can take a few minutes to complete." echo "" echo "The name of the resulting XML file will be in the format HOSTNAME-YYMMDDHHIISS.xml, as in the hostname of the machine the the complete timestamp the audit was started." exit fi # test pinging the server hosting the URL if [ "$submit_online" = "y" ]; then server=$(echo "$url" | cut -d"/" -f3 | cut -d: -f1) test=$(ping "$server" -n -c 3 | grep "100% packet loss") if [ -n "$test" ]; then if [ "$debugging" -gt 0 ]; then echo "Server $server is not responding to a ping. Cannot submit audit result. Exiting." fi exit fi fi ######################################################## # CREATE THE AUDIT FILE # ######################################################## start_time=$(timer) if [ "$debugging" -gt 0 ]; then echo "Starting audit - $strComputer" fi pc_alive=0 if [ "$ping_target" = "y" ]; then if [ "$strComputer" = "." ]; then pc_alive=1 else ping_result=$(ping -c1 "$strComputer" 2>/dev/null | grep "time") if [ "$ping_result" != "" ]; then pc_alive=1 fi fi fi if [ "$debugging" -gt 0 ]; then if [ "$ping_target" = "n" ]; then echo "Not pinging target, attempting to audit." else if [ "$pc_alive" = "1" ]; then echo "PC $strComputer responding to ping" else echo "PC $strComputer not responding to ping" fi fi fi local_hostname="" if [ -f /etc/hostname ]; then local_hostname=$(cat /etc/hostname 2>/dev/null) else local_hostname=$(hostname -s 2>/dev/null) fi if [ -z "$local_hostname" ]; then local_hostname=$(hostname 2>/dev/null) fi if [ "$strComputer" = "." ] || \ [ "$strComputer" = "127.0.0.1" ] || \ [ "$(lcase "$strComputer")" = "$(lcase "$local_hostname")" ]; then audit_location="local" else audit_location="remote" fi # Set the TimeSamp system_timestamp=$(date +'%F %T') # Get the script name #sScriptName=$(echo "$0" | rev | cut -d/ -f1 | rev) # Set the Process ID nPID="$BASHPID" if [ "$debugging" -gt 0 ]; then echo "My PID is : $nPID" echo "Audit Start Time : $system_timestamp" echo "Audit Location: $audit_location" echo "-------------------" fi #======================== # SYSTEM INFO # #======================== if [ "$debugging" -gt "0" ]; then echo "System Info" fi # Set the UUID system_uuid="" system_uuid=$(dmidecode -s system-uuid 2>/dev/null) if [ -z "$system_uuid" ] && [ -n "$(which lshal 2>/dev/null)" ]; then system_uuid=$(lshal | grep "system.hardware.uuid" | cut -d\' -f2) fi if [ -z "$system_uuid" ]; then system_uuid=$(cat /sys/class/dmi/id/product_uuid 2>/dev/null) fi # Get the hostname & DNS domain system_hostname="" if [ -f /etc/hostname ]; then system_hostname=$(cat /etc/hostname 2>/dev/null) else system_hostname=$(hostname -s 2>/dev/null) fi if [ -z "$system_hostname" ]; then system_hostname=$(hostname 2>/dev/null) system_domain="" else system_domain=$(hostname -d 2>/dev/null) fi system_ip_address=$(ip addr | grep 'state UP' -A2 | grep inet | awk '{print $2}' | cut -f1 -d'/' | head -n 1) # Get System Family (Distro Name) and the OS Name # Debian and Ubuntu will match on the below #system_description="" system_type="computer" system_os_group="Linux" system_os_family=$(lsb_release -is 2>/dev/null | tr -d '"') system_os_name=$(lsb_release -ds 2>/dev/null | tr -d '"') system_os_version=$(lsb_release -rs 2>/dev/null | tr -d '"') # Some DD-WRT specials stuff if [ -z "$system_os_family" ] && [ -n "$(cat /etc/motd | grep DD-WRT)" ]; then system_os_family="DD-WRT" system_os_version=$(cat /etc/motd | grep DD-WRT | cut -dv -f2) system_os_version="v$system_os_version" system_os_name="DD-WRT $system_os_version" #system_ip_address=$(ifconfig | grep UP | ) fi for system_release_file in /etc/*[_-]version /etc/*[_-]release; do [ -f "$system_release_file" ] || continue; [ "$system_release_file" = "/etc/os-release" ] && continue; if [ -z "$system_os_name" ]; then system_os_name=$(cat "$system_release_file") fi # Suse Based if echo "$system_os_name" | grep -Fqi "Suse" ; then if [ -z "$system_os_family" ]; then system_os_family="Suse" fi break; fi # CentOS based - must come before RedHat based if [ "$system_release_file" = "/etc/centos-release" ]; then if [ -z "$system_os_family" ]; then system_os_family="CentOS"; system_os_version=$(grep -o '[0-9]\.[0-9]' "$system_release_file" 2>/dev/null) if [ -z "$system_os_version" ]; then system_os_version=$(grep -o '[0-9].' "$system_release_file" 2>/dev/null) fi fi break; fi # RedHat based if [ "$system_release_file" = "/etc/redhat-release" ]; then if [[ "$(cat "$system_release_file")" == *"Red Hat"* ]]; then system_os_family="RedHat" fi if [[ "$(cat "$system_release_file")" == *"CentOS"* ]]; then system_os_family="CentOS" fi if [[ "$(cat "$system_release_file")" == *"Fedora"* ]]; then system_os_family="Fedora" fi if [ -z "$system_os_version" ]; then system_os_version=$(grep -o '[0-9]\.[0-9]' "$system_release_file" 2>/dev/null) if [ -z "$system_os_version" ]; then system_os_version=$(grep -o '[0-9].' "$system_release_file" 2>/dev/null) fi fi break; fi done # Set the icon as the lower case version of the System Family. system_os_icon=$(lcase $system_os_family) # Get the System Serial Number system_serial="" system_serial=$(dmidecode -s system-serial-number 2>/dev/null) if [ -z "$system_serial" ]; then if [ -n "$(which lshal 2>/dev/null)" ]; then system_serial=$(lshal | grep "system.hardware.serial" | cut -d\' -f2) fi fi if [ -z "$system_serial" ]; then system_serial=$(cat /sys/class/dmi/id/product_serial 2>/dev/null) fi # Get the System Model system_model="" system_model=$(dmidecode -s system-product-name 2>/dev/null) if [ -z "$system_model" ] && [ -n "$(which lshal 2>/dev/null)" ]; then system_model=$(lshal | grep "system.hardware.product" | cut -d\' -f2) fi if [ -z "$system_model" ]; then system_model=$(cat /sys/devices/virtual/dmi/id/product_name 2>/dev/null) fi # Get the System Manufacturer system_manufacturer="" system_manufacturer=$(dmidecode -s system-manufacturer 2>/dev/null) if [ -z "$system_manufacturer" ]; then if [ -n "$(which lshal 2>/dev/null)" ]; then system_manufacturer=$(lshal | grep "system.hardware.vendor" | cut -d\' -f2) fi fi if [ -z "$system_manufacturer" ]; then system_manufacturer=$(cat /sys/devices/virtual/dmi/id/sys_vendor 2>/dev/null) fi # A few specific checks below here if [ -z "$system_model" ] && [[ -e "/proc/user_beancounters" ]] && [[ "$(cat /proc/1/status 2>/dev/null | grep "^envID:" | cut -d: -f2 | awk '{print $1}')" != "1" ]]; then # Test for an OpenVZ guest system_model="OpenVZ" system_manufacturer="OpenVZ" fi if [ -z "$system_model" ] && [ -n "$(which dmidecode 2>/dev/null)" ]; then if [[ "$(dmidecode | egrep -i 'manufacturer')" == *"Microsoft"* ]]; then # test for a Microsoft virtual machine system_model="Virtual Machine" system_manufacturer="Microsoft" fi fi # Get the System Uptime system_uptime=$(cut -d. -f1 < /proc/uptime) # Get the System Form factor system_form_factor="" if [ "$system_model" = "Bochs" -o "$system_model" = "KVM" -o "$system_model" = "Virtual Machine" -o "$system_model" = "VMware Virtual Platform" -o "$system_model" = "OpenVZ" -o "$system_model" = "VirtualBox" ]; then system_form_factor="Virtual" else system_form_factor=$(dmidecode -s chassis-type 2>/dev/null) if [ "$system_form_factor" = "<OUT OF SPEC>" ]; then system_form_factor="Unknown" fi system_form_factor=$(pcase $system_form_factor) fi if [ -z "$system_form_factor" ]; then if [ -n "$(which lshal 2>/dev/null)" ]; then system_form_factor=$(lshal | grep "system.chassis.type" | cut -d\' -f2) fi fi # Get OS bits system_pc_os_bit=$(uname -m | grep 64 | cut -d_ -f2) if [ -z "$system_pc_os_bit" ]; then system_pc_os_bit=$(uname -i | grep 64 | cut -d_ -f2) fi if [ -z "$system_pc_os_bit" ]; then system_pc_os_bit=32 fi # Get the System Memory system_pc_memory=$(grep MemTotal /proc/meminfo | cut -d: -f2 | cut -dk -f1) system_pc_memory=$(trim "$system_pc_memory") # Get the Number of Physical Processors # # Each Physical Processor have one or more Processor Cores. # Each Processor Core have one or more Threads # Each thread appears as one active processor to the OS # EX: # Two Dual Core Processors with Hyper-Threading enabled will show : # system_pc_total_threads=8 # system_pc_threads_x_processor=4 # system_pc_cores_x_processor=2 # # Two Dual Core Processors with Hyper-Threading disabled will show : # system_pc_total_threads=4 # system_pc_threads_x_processor=2 # system_pc_cores_x_processor=2 # # One Quad Core Processor with Hyper-Threading disabled will show : # system_pc_total_threads=4 # system_pc_threads_x_processor=4 # system_pc_cores_x_processor=4 # # system_pc_physical_processors = system_pc_total_threads / system_pc_threads_x_processor # system_pc_total_threads=$(grep -c processor /proc/cpuinfo) system_pc_cores_x_processor=$(grep cores /proc/cpuinfo | head -n1 | cut -d: -f2) system_pc_cores_x_processor=$(trim "$system_pc_cores_x_processor") if [ -z "$system_pc_cores_x_processor" ] && [ -n "$(which lshal 2>/dev/null)" ]; then system_pc_cores_x_processor=$(lshal | grep -c "processor.number") fi # RedHat 6.5 doesn't work with the above, so.... if [ -z "$system_pc_cores_x_processor" ]; then system_pc_cores_x_processor=1 fi # The number of siblings tell us the number of Threads x Physical Processor system_pc_threads_x_processor=$(grep siblings /proc/cpuinfo | head -n1 | cut -d: -f2) system_pc_threads_x_processor=$(trim "$system_pc_threads_x_processor") if [ -z "$system_pc_threads_x_processor" ]; then system_pc_threads_x_processor=1 fi system_pc_physical_processors=$((system_pc_total_threads / system_pc_threads_x_processor)) if [ "$system_pc_physical_processors" == "0" ]; then system_pc_physical_processors="1" fi # Guess the OS Instalation Date # There is no way to know for sure the install date. /etc/distro-release should give a clue, but it is not really accurate # if [ -n "$(which stat 2>/dev/null)" ]; then system_pc_date_os_installation=$(stat "$system_release_file" | grep "^Modify:" | cut -d" " -f2) else system_pc_date_os_installation="" fi #''''''''''''''''''''''''''''''''' #' Write to the audit file ' #''''''''''''''''''''''''''''''''' xml_file="$system_hostname"-$(date +%Y%m%d%H%M%S).xml { echo "form_systemXML=<?xml version=\"1.0\" encoding=\"UTF-8\"?>" echo "<system>" echo " <sys>" echo " <timestamp>$(escape_xml "$system_timestamp")</timestamp>" echo " <uuid>$(escape_xml "$system_uuid")</uuid>" echo " <hostname>$(escape_xml "$system_hostname")</hostname>" echo " <man_ip_address>$(escape_xml "$system_ip_address")</man_ip_address>" echo " <domain>$(escape_xml "$system_domain")</domain>" echo " <description></description>" echo " <type>$(escape_xml "$system_type")</type>" echo " <os_icon>$(escape_xml "$system_os_icon")</os_icon>" echo " <os_group>$(escape_xml "$system_os_group")</os_group>" echo " <os_family>$(escape_xml "$system_os_family")</os_family>" echo " <os_name>$(escape_xml "$system_os_name")</os_name>" echo " <os_version>$(escape_xml "$system_os_version")</os_version>" echo " <serial>$(escape_xml "$system_serial")</serial>" echo " <model>$(escape_xml "$system_model")</model>" echo " <manufacturer>$(escape_xml "$system_manufacturer")</manufacturer>" echo " <uptime>$(escape_xml "$system_uptime")</uptime>" echo " <form_factor>$(escape_xml "$system_form_factor")</form_factor>" echo " <pc_os_bit>$(escape_xml "$system_pc_os_bit")</pc_os_bit>" echo " <pc_memory>$(escape_xml "$system_pc_memory")</pc_memory>" echo " <pc_num_processor>$(escape_xml "$system_pc_total_threads")</pc_num_processor>" echo " <pc_date_os_installation>$(escape_xml "$system_pc_date_os_installation")</pc_date_os_installation>" echo " <man_org_id>$(escape_xml "$org_id")</man_org_id>" echo " <system_id>$(escape_xml "$system_id")</system_id>" echo " </sys>" } > "$xml_file" ################################## # BIOS SECTION # ################################## if [ "$debugging" -gt "0" ]; then echo "BIOS Info" fi # Get the BIOS Manufacturer bios_manufacturer="" bios_manufacturer=$(dmidecode -s bios-vendor 2>/dev/null) if [ -z "$bios_manufacturer" ]; then if [ -n "$(which lshal 2>/dev/null)" ]; then bios_manufacturer=$(lshal | grep "smbios.bios.vendor" | cut -d\' -f2) if [ -z "$bios_manufacturer" ]; then bios_manufacturer=$(lshal | grep "system.firmware.vendor" | cut -d\' -f2) fi fi if [ -z "$bios_manufacturer" ]; then bios_manufacturer=$(cat /sys/class/dmi/id/bios_vendor 2>/dev/null) fi fi # Get the BIOS Firmware Revision bios_firm_rev="" bios_firm_rev=$(dmidecode 2>/dev/null | grep "Firmware Revision" | cut -d: -f2) bios_firm_rev=$(trim "$bios_firm_rev") if [ -z "$bios_firm_rev" ]; then if [ -n "$(which lshal 2>/dev/null)" ]; then bios_firm_rev=$(lshal | grep "smbios.bios.version" | cut -d\' -f2) if [ -z "$bios_firm_rev" ]; then bios_firm_rev=$(lshal | grep "system.firmware.version" | cut -d\' -f2) fi if [ -z "$bios_firm_rev" ]; then bios_firm_rev=$(cat /sys/class/dmi/id/bios_version) fi fi fi # Make the BIOS Description using the manufacturer - Firmware Rev if [ -n "$bios_firm_rev" ]; then bios_description=$(echo "$bios_manufacturer" | cut -d" " -f1)" BIOS - Firmware Rev. $bios_firm_rev" else bios_description=$(echo "$bios_manufacturer" | cut -d" " -f1)" BIOS" fi # Get the BIOS Serial = System Serial bios_serial="$system_serial" # Get the SMBIOS Version bios_smversion="" bios_smversion=$(dmidecode 2>/dev/null | grep -i SMBIOS | cut -d' ' -f2) if [ -z "$bios_smversion" ]; then if [ -n "$(which lshal 2>/dev/null)" ]; then bios_smversion=$(lshal | grep "smbios.bios.version" | cut -d\' -f2) if [ -z "$bios_smversion" ]; then bios_smversion=$(lshal | grep "system.firmware.version" | cut -d\' -f2) fi fi fi # Get the BIOS Version bios_version_p1=$(dmidecode -s bios-version 2>/dev/null) bios_version_p2=$(dmidecode 2>/dev/null | grep "BIOS Revision" | cut -d: -f2) bios_version_p2=$(trim "$bios_version_p2") bios_version_p3=$(dmidecode -s bios-release-date 2>/dev/null) if [ -n "$bios_version_p1" ]; then if [ -n "$bios_version_p2" ]; then bios_version="V.$bios_version_p1 Rev.$bios_version_p2 - $bios_version_p3" else bios_version="V.$bios_version_p1 - $bios_version_p3" fi fi if [ -z "$bios_version" ] && [ -n "$(which lshal 2>/dev/null)" ]; then bios_version=$(lshal | grep "smbios.bios.version" | cut -d\' -f2) if [ -z "$bios_version" ]; then bios_version=$(lshal | grep "system.firmware.version" | cut -d\' -f2) fi fi #''''''''''''''''''''''''''''''''' #' Write to the audit file ' #''''''''''''''''''''''''''''''''' { echo " <bios>" echo " <bios_description>$(escape_xml "$bios_description")</bios_description>" echo " <bios_manufacturer>$(escape_xml "$bios_manufacturer")</bios_manufacturer>" echo " <bios_serial>$(escape_xml "$bios_serial")</bios_serial>" echo " <bios_smversion>$(escape_xml "$bios_smversion")</bios_smversion>" echo " <bios_version>$(escape_xml "$bios_version")</bios_version>" echo " </bios>" } >> "$xml_file" ################################## # PROCESSOR SECTION # ################################## if [ "$debugging" -gt "0" ]; then echo "Processor Info" fi # Get processor socket type processor_socket=$(dmidecode -t processor 2>/dev/null | grep Upgrade | head -n1 | cut -d: -f2 2>/dev/null) # Get processor description processor_description=$(grep "model name" /proc/cpuinfo | head -n1 | cut -d: -f2) # Get processor speed processor_speed=$(grep "cpu MHz" /proc/cpuinfo | head -n1 | cut -d: -f2 | awk '{printf("%d\n",$1 + 0.5)}') # Get processor manufacturer processor_manufacturer=$(grep vendor_id /proc/cpuinfo | head -n1 | cut -d: -f2) # Get processor power management support processor_power_management_supported=$(dmidecode -t processor 2>/dev/null | grep Thermal 2>/dev/null) if [ -z "$processor_power_management_supported" ]; then if [ -n "$(which lshal 2>/dev/null)" ]; then processor_power_management_supported=$(lshal | grep -m 1 "processor.can_throttle" | cut -d= -f2 | cut -d" " -f2) fi fi if [ -n "$processor_power_management_supported" ]; then processor_power_management_supported="True" else processor_power_management_supported="False" fi #''''''''''''''''''''''''''''''''' #' Write to the audit file ' #''''''''''''''''''''''''''''''''' let total_cores=$system_pc_cores_x_processor*$system_pc_physical_processors let total_logical_processors=$system_pc_threads_x_processor*$system_pc_physical_processors { echo " <processor>" echo " <processor_count>$(escape_xml "$system_pc_physical_processors")</processor_count>" echo " <processor_cores>$(escape_xml "$total_cores")</processor_cores>" echo " <processor_logical>$(escape_xml "$total_logical_processors")</processor_logical>" echo " <processor_socket>$(escape_xml "$processor_socket")</processor_socket>" echo " <processor_description>$(escape_xml "$processor_description")</processor_description>" echo " <processor_speed>$(escape_xml "$processor_speed")</processor_speed>" echo " <processor_manufacturer>$(escape_xml "$processor_manufacturer")</processor_manufacturer>" echo " <processor_power_management_supported>$(escape_xml "$processor_power_management_supported")</processor_power_management_supported>" echo " </processor>" } >> "$xml_file" ################################## # MEMORY SECTION # ################################## if [ "$debugging" -gt "0" ]; then echo "Memory Info" fi memory_slots="0" memory_slots=$(dmidecode -t 17 2>/dev/null | awk '/DMI type 17/{print $2}' | wc -l) if [ "$memory_slots" != "0" ]; then echo " <memory>">> "$xml_file" for memory_handle in $(dmidecode -t 17 2>/dev/null | awk '/DMI type 17/{print $2}'); do # memory_detail and memory_type are switched here to match the Windows results bank_info=$(dmidecode -t 17 2>/dev/null | sed -n '/^Handle '"$memory_handle"'/,/^$/p') memory_bank=$(echo "$bank_info" | awk '/^[^B]+Locator:/{for (u=2; u<=NF; u++){printf("%s ", $u)}printf("\n")}' | awk '{gsub(" ","");print}') memory_detail=$(echo "$bank_info" | awk '/Type:/{for (u=2; u<=NF; u++){printf("%s ", $u)}printf("\n")}' | awk '{gsub(" ","");print}') if [ "$memory_detail" = "<OUT OF SPEC>" ]; then system_form_factor="Unknown" fi memory_form_factor=$(echo "$bank_info" | awk '/Form Factor/{for (u=3; u<=NF; u++){printf("%s ", $u)}printf("\n")}' | cut -d" " -f1) memory_type=$(echo "$bank_info" | awk '/Type Detail:/{for (u=3; u<=NF; u++){printf("%s ", $u)}printf("\n")}' | cut -d" " -f1) memory_capacity=$(echo "$bank_info" | awk '/Size:/{print $2}' | sed 's/[^0-9]//g') if [ "$(echo "$bank_info" | awk '/Size:/{print $3}')" = "kB" ];then memory_capacity=$((memory_capacity / 1024)) fi memory_speed=$(echo "$bank_info" |\ awk '/Speed:/{for (u=2; u<=NF; u++){printf("%s ", $u)}printf("\n")}' |\ sed 's/[[:space:]]MHz.*//g') memory_tag=$(echo "$bank_info" |\ awk '/Bank L.*:/{for (u=3; u<=NF; u++){printf("%s ", $u)}printf("\n")}') memory_serial=$(echo "$bank_info" |\ awk '/Serial Number:/{for (u=3; u<=NF; u++){printf("%s ", $u)}printf("\n")}' |\ cut -d" " -f1) if [ "$memory_serial" = "Not" ] || [ "$memory_serial" = "Not " ] || [ "$memory_serial" = "Not Specified" ]; then memory_serial="" fi # Ignore empty slots if [ -n "$memory_capacity" ]; then { echo " <slot>" echo " <bank>$(escape_xml "$memory_bank")</bank>" echo " <type>$(escape_xml "$memory_type")</type>" echo " <form_factor>$(escape_xml "$memory_form_factor")</form_factor>" echo " <detail>$(escape_xml "$memory_detail")</detail>" echo " <capacity>$(escape_xml "$memory_capacity")</capacity>" echo " <speed>$(escape_xml "$memory_speed")</speed>" echo " <tag>$(escape_xml "$memory_tag")</tag>" echo " <serial>$(escape_xml "$memory_serial")</serial>" echo " </slot>" } >> "$xml_file" fi done echo " </memory>">> "$xml_file" fi ################################## # MOTHERBOARD SECTION # ################################## if [ "$debugging" -gt "0" ]; then echo "Motherboard Info" fi mobo_manufacturer=$(dmidecode -s baseboard-manufacturer 2> /dev/null) mobo_model=$(dmidecode -s baseboard-product-name 2> /dev/null) mobo_version=$(dmidecode -s baseboard-version 2> /dev/null | grep -v Not) mobo_serial=$(dmidecode -s baseboard-serial-number 2> /dev/null) if [ -n "$mobo_version" ]; then # Report both Model and Version mobo_model="$mobo_model - $mobo_version" fi #''''''''''''''''''''''''''''''''' #' Write to the audit file ' #''''''''''''''''''''''''''''''''' if [ -n "$mobo_manufacturer" ] || [ -n "$mobo_model" ]; then { echo " <motherboard>" echo " <manufacturer>$(escape_xml "$mobo_manufacturer")</manufacturer>" echo " <model>$(escape_xml "$mobo_model")</model>" echo " <serial>$(escape_xml "$mobo_serial")</serial>" echo " <processor_slots>$(escape_xml "$system_pc_physical_processors")</processor_slots>" echo " <processor_type>$(escape_xml "$processor_socket")</processor_type>" echo " <memory_slots>$(escape_xml "$memory_slots")</memory_slots>" echo " </motherboard>" } >> "$xml_file" fi ################################## # OPTICAL DRIVES SECTION # ################################## if [ "$debugging" -gt "0" ]; then echo "Optical Drives Info" fi optical_num_devices=$(cdrdao scanbus 2>&1 | grep -c '/dev') if [ "$optical_num_devices" != "0" ]; then # The exact mount point will depend on the cd/dvd volume name. Older GNU/Linux distros always mount on /mnt/cdrom # Modern ditros use /media for any removable media (cd/dvd/usb) followed by the volume name if test -d /media; then optical_drive_mount_point="/media" else optical_drive_mount_point="/mnt/cdrom" fi #''''''''''''''''''''''''''''''''' #' Write to the audit file ' #''''''''''''''''''''''''''''''''' echo " <optical_drives>" >> "$xml_file" for optical_device in $(cdrdao scanbus 2>&1 | grep '/dev'); do temp="" temp=$(echo "$optical_device" | cut -d: -f2 | cut -d, -f1) optical_drive_vendor=$(ucase trim "$temp") temp="" temp=$(echo "$optical_device" | cut -d: -f2 | cut -d, -f2) optical_drive_model=$(trim "$temp") temp="" temp=$(echo "$optical_device" | cut -d: -f2 | cut -d, -f3) optical_drive_release=$(trim "$temp") if [ -n "$optical_drive_release" ]; then optical_drive_release="Rel.$optical_drive_release" fi temp="" temp=$(echo "$optical_device" | cut -d: -f1) optical_device_ID=$(trim "$temp") optical_caption="$optical_drive_vendor $optical_drive_model" { echo " <optical_drive>" echo " <optical_drive_caption>$(escape_xml "$optical_caption")</optical_drive_caption>" echo " <optical_drive_model>$(escape_xml "$optical_caption $optical_drive_release")</optical_drive_model>" echo " <optical_drive_device_id>$(escape_xml "$optical_device_ID")</optical_drive_device_id>" echo " <optical_drive_mount_point>$(escape_xml "$optical_drive_mount_point")</optical_drive_mount_point>" echo " </optical_drive>" } >> "$xml_file" done echo " </optical_drives>" >> "$xml_file" fi ################################## # VIDEO CARDS SECTION # ################################## if [ "$debugging" -gt "0" ]; then echo "Video Cards Info" fi video_pci_adapters="" video_pci_adapters=$(lspci 2>/dev/null | grep VGA | cut -d" " -f1) if [ -n "$video_pci_adapters" ]; then echo " <video_cards>" >> "$xml_file" for video_adapter in $video_pci_adapters; do video_device_name=$(lspci -vms "$video_adapter" | grep '^Device' | tail -n1 | cut -d: -f2 | cut -c2-) video_revision=$(lspci -vms "$video_adapter" | grep '^Rev' | cut -d: -f2 | cut -c2-) video_description="$video_device_name" # Add the revision if [ -n "$video_revision" ]; then video_description="$video_device_name (Rev: $video_revision)" fi video_manufacturer=$(lspci -vms "$video_adapter" | grep '^Vendor' | cut -d: -f2 | cut -c2-) video_memory=$(lspci -vs "$video_adapter" | grep Memory | tail -n1 | cut -d= -f2 | sed 's/[^0-9]//g') { echo " <video_card>" echo " <video_description>$(escape_xml "$video_description")</video_description>" echo " <video_manufacturer>$(escape_xml "$video_manufacturer")</video_manufacturer>" echo " <video_memory>$(escape_xml "$video_memory")</video_memory>" echo " </video_card>" } >> "$xml_file" done echo " </video_cards>" >> "$xml_file" fi ################################## # SOUND CARDS SECTION # ################################## if [ "$debugging" -gt "0" ]; then echo "Sound Cards Info" fi sound_pci_adapters="" sound_pci_adapters=$(lspci 2>/dev/null | grep -Ei 'audio | multmedia' | cut -d" " -f1) if [ -n "$sound_pci_adapters" ]; then echo " <sound_cards>" >> "$xml_file" for sound_adapter in $sound_pci_adapters; do sound_device_name=$(lspci -vms "$sound_adapter" | grep '^Device' | tail -n1 | cut -d: -f2 | cut -c2-) sound_revision=$(lspci -vms "$sound_adapter" | grep '^Rev' | cut -d: -f2 | cut -c2-) sound_name="$sound_device_name" if [ -n "$sound_revision" ]; then sound_name="$sound_device_name (Rev: $sound_revision)" fi sound_manufacturer=$(lspci -vms "$sound_adapter" | grep '^Vendor' | cut -d: -f2 | cut -c2-) { echo " <sound_card>" echo " <sound_name>$(escape_xml "$sound_name")</sound_name>" echo " <sound_manufacturer>$(escape_xml "$sound_manufacturer")</sound_manufacturer>" echo " <sound_device_id>$(escape_xml "$sound_adapter")</sound_device_id>" echo " </sound_card>" } >> "$xml_file" done echo " </sound_cards>" >> "$xml_file" fi ################################## # SHARES SECTION # ################################## if [ "$debugging" -gt "0" ]; then echo "Shares Info" fi echo " <shares>" >> "$xml_file" case $system_os_family in 'Ubuntu' | 'Debian' ) service smbd status 2> /dev/null |\ grep running > /dev/null && sed -e '/^$/d' -e 's/^[ \t]*//' -e '/^[#;]/d' /etc/samba/smb.conf |\ grep -E "^\[|^comment|^path" |\ sed -e '/^\[global\]/d' -e 's/\]$//' -e 's/^comment = //' -e 's/^path = //' |\ awk 'BEGIN { RS = "[" ; FS = "\n" } { print "\t\t<share>"; print "\t\t\t<share_name>",$1,"</share_name>"; print "\t\t\t<share_caption>",$2,"</share_caption>"; print "\t\t\t<share_path>",$3,"</share_path>"; print "\t\t</share>" }' |\ tail -n +6 >>\ "$xml_file" ;; 'CentOS' | 'RedHat' | 'SUSE' | 'Fedora' ) service smb status > /dev/null 2>&1 &&\ sed -e '/^$/d' -e 's/^[ \t]*//' -e '/^[#;]/d' /etc/samba/smb.conf |\ grep -E "^\[|^comment|^path" |\ sed -e '/^\[global\]/d' -e 's/\]$//' -e 's/^comment = //' -e 's/^path = //' |\ awk 'BEGIN { RS = "[" ; FS = "\n" } { print "\t\t<share>"; print "\t\t\t<share_name>",$1,"</share_name>"; print "\t\t\t<share_caption>",$2,"</share_caption>"; print "\t\t\t<share_path>",$3,"</share_path>"; print "\t\t</share>" }' |\ tail -n +6 >>\ "$xml_file" ;; esac echo " </shares>" >> "$xml_file" ################################## # NETWORK CARDS SECTION # ################################## if [ "$debugging" -gt "0" ]; then echo "Network Cards Info" fi net_cards="" temp=$(ls /sys/class/net/) for dir in $temp; do if [ -e "/sys/class/net/$dir/device" ]; then dev="" dev=$(echo "/sys/class/net/$dir" | readlink -f "/sys/class/net/$dir/device" | awk -F/ '{ print $5 }' | awk -F: '{ print $2":"$3 }' | tr -d '[:blank:]' 2>/dev/null) if [ -n "$dev" ]; then if [ -n "$net_cards" ]; then net_cards=$(trim "$net_cards"$'\n'"$dev/$dir") else net_cards=$(trim "$dev/$dir") fi fi fi done dev="" temp="" if [ -n "$net_cards" ]; then # Store the IP Addresses Information in a variable to write it later on the file addr_info="" echo " <network_cards>" >> "$xml_file"; for net_card_connection_id in $net_cards; do net_card_id=$(echo "$net_card_connection_id" | cut -d/ -f2) net_card_pci=$(echo "$net_card_connection_id" | cut -d/ -f1) net_card_mac=$(cat /sys/class/net/"$net_card_id"/address) net_index=$(cat /sys/class/net/"$net_card_id"/ifindex) if [ "$net_card_pci" = 'virtual' ]; then net_card_model="Virtual Interface" net_card_manufacturer="Linux" elif [ "$(which lspci 2>/dev/null)" != "" ]; then net_card_model=$(lspci -vms "$net_card_pci" | grep -v "$net_card_pci" | grep ^Device | cut -d: -f2 | cut -c2-) net_card_manufacturer=$(lspci -vms "$net_card_pci" | grep ^Vendor | cut -d: -f2 | cut -c2-) elif [[ "$system_model" == *"VMware"* ]]; then net_card_model="PCI bridge" net_card_manufacturer="VMware" else net_card_model="" net_card_manufacturer="" fi net_card_description="$net_card_model" net_card_speed="" if [ -z "$(echo "$net_card_id" | awk '/^wl/{print $1}')" ]; then if [ -z "$(which ethtool 2>/dev/null)" ]; then # we don't have ethtool installed net_card_type="Ethernet 802.3" else net_card_speed="" net_card_speed=$(ethtool "$net_card_id" 2>/dev/null | grep Speed | cut -d: -f2 | sed 's/[^0-9]//g') net_card_type="Ethernet 802.3" fi else # This is a wireless link if [ -z "$(which iwlist 2>/dev/null)" ]; then net_card_speed=$(iwlist "$net_card_id" bitrate | grep Current | cut -d. -f1 | grep -oE '[[:digit:]]*') else net_card_speed="" fi net_card_type="Wireless Ethernet 802.11" fi # if a speed was detected, it needs to be multiplied to show up in the web if [ $net_card_speed ]; then net_card_speed=$((net_card_speed * 1000000)) fi temp=$(cat /sys/class/net/"$net_card_id"/operstate) net_card_status=$(trim "$temp") if [ "$net_card_status" = "up" ]; then net_card_status="Connected" else net_card_status="Disconnected" fi net_card_enabled="False" # Get Info on active IPV4 Addresses for this card for net_card_enabled_ip4_addr in $(ip addr show "$net_card_id" | grep 'inet ' | cut -dt -f2 | cut -db -f1 | cut -c2- | cut -d" " -f1); do net_card_enabled="True" net_card_enabled_ip6_addr="" #echo "NCEIA: $net_card_enabled_ip4_addr" temp=$(echo "$net_card_enabled_ip4_addr" | cut -d/ -f2) net_card_enabled_ip_subnet=$(cidr2mask "$temp") net_card_enabled_ip_version="4" addr_info=$addr_info"\t\t<ip_address>\n" addr_info=$addr_info"\t\t\t<net_mac_address>$(escape_xml "$net_card_mac")</net_mac_address>\n" addr_info=$addr_info"\t\t\t<net_index>$(escape_xml "$net_index")</net_index>\n" temp=$(echo "$net_card_enabled_ip4_addr" | cut -d/ -f1) addr_info=$addr_info"\t\t\t<ip_address_v4>$(escape_xml "$temp")</ip_address_v4>\n" addr_info=$addr_info"\t\t\t<ip_address_v6>$(escape_xml "$net_card_enabled_ip6_addr")</ip_address_v6>\n" addr_info=$addr_info"\t\t\t<ip_subnet>$(escape_xml "$net_card_enabled_ip_subnet")</ip_subnet>\n" addr_info=$addr_info"\t\t\t<ip_address_version>$(escape_xml "$net_card_enabled_ip_version")</ip_address_version>\n" addr_info=$addr_info"\t\t</ip_address>\n" done # Get Info on active IPV6 Addresses for this card for net_card_enabled_ip6_addr in $(ip addr show "$net_card_id" | grep 'inet6' | cut -c11- | cut -ds -f1); do net_card_enabled="True" net_card_enabled_ip4_addr="" net_card_enabled_ip_subnet=$(echo "$net_card_enabled_ip6_addr" | cut -d/ -f2) net_card_enabled_ip_version="6" addr_info=$addr_info"\t\t<ip_address>\n" addr_info=$addr_info"\t\t\t<net_mac_address>$(escape_xml "$net_card_mac")</net_mac_address>\n" addr_info=$addr_info"\t\t\t<net_index>$(escape_xml "$net_index")</net_index>\n" addr_info=$addr_info"\t\t\t<ip_address_v4>$(escape_xml "$net_card_enabled_ip4_addr")</ip_address_v4>\n" temp=$(echo "$net_card_enabled_ip6_addr" | cut -d/ -f1) addr_info=$addr_info"\t\t\t<ip_address_v6>$(escape_xml "$temp")</ip_address_v6>\n" addr_info=$addr_info"\t\t\t<ip_subnet>$(escape_xml "$net_card_enabled_ip_subnet")</ip_subnet>\n" addr_info=$addr_info"\t\t\t<ip_address_version>$(escape_xml "$net_card_enabled_ip_version")</ip_address_version>\n" addr_info=$addr_info"\t\t</ip_address>\n" done # Check DHCP lease for this card # Distros store the lease info in different files/locations, I'm getting the file from the running process #net_card_lease_file=$(ps -ef | grep dhclient | grep "$net_card_id" | sed -e 's/^.*-lf//' | cut -d" " -f2) net_card_lease_file="/var/lib/dhcp/dhclient.$net_card_id.leases" if [ ! -e "$net_card_lease_file" ]; then net_card_dhcp_enab="False" net_card_dhcp_server="" net_card_dhcp_lease_expire="" else net_card_dhcp_enab="True" net_card_dhcp_server=$(grep dhcp-server "$net_card_lease_file" | tail -n1 | sed 's/;//' | cut -d" " -f5) net_card_dhcp_lease_expire=$(grep expire "$net_card_lease_file" | tail -n1 | sed 's/;//' | cut -d" " -f5 | sed 's|/|-|g') # To get the Obtained date we need to get lease time first net_card_dhcp_lease_time=$(grep lease-time "$net_card_lease_file" | tail -n1 | sed 's/;//' | cut -d" " -f5) net_card_dhcp_lease_days=$((net_card_dhcp_lease_time / 60 / 60 / 24)) net_card_dhcp_lease_obtained=$(date -d ''"$net_card_dhcp_lease_expire"' -'"$net_card_dhcp_lease_days"' days' +%F) fi # TODO: Domain Registration & WINS Info (Samba) net_card_domain_reg="" net_card_dns_server=$(awk '/^name/{print $2}' /etc/resolv.conf | head -n1) net_card_dns_domain=$(awk '/^domain/{print $2}' /etc/resolv.conf | head -n1) if [ -z "$net_card_dns_domain" ]; then net_card_dns_domain=$(awk '/^search/{print $2}' /etc/resolv.conf | head -n1) fi { echo " <network_card>" echo " <net_index>$(escape_xml "$net_index")</net_index>" echo " <net_mac_address>$(escape_xml "$net_card_mac")</net_mac_address>" echo " <net_manufacturer>$(escape_xml "$net_card_manufacturer")</net_manufacturer>" echo " <net_model>$(escape_xml "$net_card_model")</net_model>" echo " <net_description>$(escape_xml "$net_card_description")</net_description>" echo " <net_ip_enabled>$(escape_xml "$net_card_enabled")</net_ip_enabled>" echo " <net_connection_id>$(escape_xml "$net_card_id")</net_connection_id>" echo " <net_connection_status>$(escape_xml "$net_card_status")</net_connection_status>" echo " <net_speed>$(escape_xml "$net_card_speed")</net_speed>" echo " <net_adapter_type>$(escape_xml "$net_card_type")</net_adapter_type>" echo " <net_dhcp_enabled>$(escape_xml "$net_card_dhcp_enab")</net_dhcp_enabled>" echo " <net_dhcp_server>$(escape_xml "$net_card_dhcp_server")</net_dhcp_server>" echo " <net_dhcp_lease_obtained>$(escape_xml "$net_card_dhcp_lease_obtained")</net_dhcp_lease_obtained>" echo " <net_dhcp_lease_expires>$(escape_xml "$net_card_dhcp_lease_expire")</net_dhcp_lease_expires>" echo " <net_dns_host_name>$(escape_xml "$system_hostname")</net_dns_host_name>" echo " <net_dns_domain>$(escape_xml "$net_card_dns_domain")</net_dns_domain>" echo " <net_dns_domain_reg_enabled>$(escape_xml "$net_card_domain_reg")</net_dns_domain_reg_enabled>" echo " <net_dns_server>$(escape_xml "$net_card_dns_server")</net_dns_server>" echo " <net_wins_primary></net_wins_primary>" echo " <net_wins_secondary></net_wins_secondary>" echo " <net_wins_lmhosts_enabled></net_wins_lmhosts_enabled>" echo " </network_card>" } >> "$xml_file" done echo " </network_cards>" >> "$xml_file" fi ################################## # ADDRESSES SECTION # ################################## if [ -n "$addr_info" ]; then { echo " <addresses>" echo -e "$addr_info" echo " </addresses>" } >> "$xml_file" fi ################################## # DISK SECTION # ################################## if [ "$debugging" -gt "0" ]; then echo "Hard Disk Info" fi echo " <hard_disks>" >> "$xml_file" partition_result="" for disk in $(lsblk -ndo NAME -e 11,2,1 2>/dev/null); do hard_drive_caption="/dev/$disk" hard_drive_index="$disk" hard_drive_interface_type=$(udevadm info -q all -n /dev/"$disk" 2>/dev/null | grep ID_BUS= | cut -d= -f2) test=$(udevadm info -q all -n /dev/"$disk" 2>/dev/null | grep ID_ATA_SATA= | cut -d= -f2) if [ "$test" = "1" ]; then hard_drive_interface_type="sata" fi hard_drive_model=$(udevadm info -a -n /dev/"$disk" 2>/dev/null | grep "ATTRS{model}==" | head -n 1 | cut -d\" -f2) if [ -z "$hard_drive_model" ]; then hard_drive_model=$(lsblk -lbndo MODEL /dev/"$disk") fi hard_drive_serial=$(udevadm info -q all -n /dev/"$disk" 2>/dev/null | grep ID_SERIAL_SHORT= | cut -d= -f2) hard_drive_size=$(lsblk -lbndo SIZE /dev/"$disk") hard_drive_size=$((hard_drive_size /1024 / 1024)) hard_drive_device_id="/dev/$disk" hard_drive_partitions=$(lsblk -lno NAME /dev/$disk | grep -v "^$disk\$" -c) hard_drive_status="" hard_drive_model_family="" hard_drive_firmware=$(udevadm info -q all -n /dev/"$disk" 2>/dev/null | grep ID_REVISION= | cut -d= -f2) hard_drive_scsi_logical_unit="" mycommand="lshw -class disk 2>/dev/null" mydelimiter="*-disk" mymatch="logical name: /dev/$disk" myvariable="vendor:" myresult=$(between_output "$mycommand" "$mydelimiter" "$mymatch" "$myvariable") myresult=$(echo "$myresult" | cut -d: -f2) hard_drive_manufacturer=$(trim "$myresult") if [ -z "$hard_drive_manufacturer" ]; then hard_drive_manufacturer=$(udevadm info -q all -n /dev/"$disk" 2>/dev/null | grep ID_VENDOR= | cut -d= -f2) fi if [ -n "$(which smartctl 2>/dev/null)" ]; then # use smart tools as they are installed hard_drive_status=$(smartctl -H /dev/"$disk" 2>/dev/null | grep "SMART overall" | cut -d: -f2) hard_drive_model_family=$(smartctl -i /dev/"$disk" 2>/dev/null | grep "Model Family" | cut -d: -f2) fi # some hacks if [ -z "$hard_drive_manufacturer" ] && [[ "$hard_drive_model" == *"Crucial"* ]]; then hard_drive_manufacturer="Crucial" fi if [[ "$hard_drive_manufacturer" == *"VMware"* ]]; then hard_drive_manufacturer="VMware" hard_drive_model_family="VMware" hard_drive_model="VMware Virtual Disk" fi if [[ "$hard_drive_model" == *"VMware"* ]] || [[ "$hard_drive_model" == *"Virtual"* ]]; then hard_drive_model="VMware Virtual Disk" fi { echo " <hard_disk>" echo " <hard_drive_caption>$(escape_xml "$hard_drive_caption")</hard_drive_caption>" echo " <hard_drive_index>$(escape_xml "$hard_drive_index")</hard_drive_index>" echo " <hard_drive_interface_type>$(escape_xml "$hard_drive_interface_type")</hard_drive_interface_type>" echo " <hard_drive_manufacturer>$(escape_xml "$hard_drive_manufacturer")</hard_drive_manufacturer>" echo " <hard_drive_model>$(escape_xml "$hard_drive_model")</hard_drive_model>" echo " <hard_drive_serial>$(escape_xml "$hard_drive_serial")</hard_drive_serial>" echo " <hard_drive_size>$(escape_xml "$hard_drive_size")</hard_drive_size>" echo " <hard_drive_device_id>$(escape_xml "$hard_drive_device_id")</hard_drive_device_id>" echo " <hard_drive_partitions>$(escape_xml "$hard_drive_partitions")</hard_drive_partitions>" echo " <hard_drive_status>$(escape_xml "$hard_drive_status")</hard_drive_status>" echo " <hard_drive_firmware>$(escape_xml "$hard_drive_firmware")</hard_drive_firmware>" echo " <hard_drive_model_family>$(escape_xml "$hard_drive_model_family")</hard_drive_model_family>" echo " <hard_drive_scsi_logical_unit>$(escape_xml "$hard_drive_scsi_logical_unit")</hard_drive_scsi_logical_unit>" echo " </hard_disk>" } >> "$xml_file" for partition in $(lsblk -lno NAME /dev/$disk 2>/dev/null | grep -v ^$disk\$ ); do if [ -n "$partition" ] && [ "$partition" != "$disk" ]; then # partition_mount_type=$(lsblk -lndo TYPE /dev/"$partition" 2>/dev/null) partition_mount_type=$(lsblk -lno NAME,TYPE /dev/$disk 2>/dev/null | grep "^$partition " | sed -e "s/$partition//g") partition_mount_type=$(trim "$partition_mount_type") if [ "$partition_mount_type" = "part" ]; then partition_mount_type="partition" fi #partition_mount_point=$(lsblk -lndo MOUNTPOINT /dev/"$partition" 2>/dev/null) partition_mount_point=$(lsblk -lno NAME,MOUNTPOINT /dev/$disk 2>/dev/null | grep "^$partition " | sed -e "s/$partition//g") partition_mount_point=$(trim "$partition_mount_point") #partition_name=$(lsblk -lndo LABEL /dev/"$partition" 2>/dev/null) partition_name=$(lsblk -lno NAME,LABEL /dev/$disk 2>/dev/null | grep "^$partition " | sed -e "s/$partition//g") partition_name=$(trim "$partition_name") #partition_size=$(lsblk -lbndo SIZE /dev/"$partition" 2>/dev/null) partition_size=$(lsblk -lbo NAME,SIZE /dev/$disk 2>/dev/null | grep "^$partition" | sed -e "s/$partition//" ) partition_size=$(trim "$partition_size") partition_size=$((partition_size /1024 / 1024)) #partition_format=$(lsblk -lndo FSTYPE /dev/"$partition" 2>/dev/null) partition_format=$(lsblk -lno NAME,FSTYPE /dev/$disk 2>/dev/null | grep "^$partition " | sed -e "s/$partition//g") partition_format=$(trim "$partition_format") #partition_caption=$(lsblk -lndo LABEL /dev/"$partition" 2>/dev/null) partition_caption=$(lsblk -lno NAME,LABEL /dev/$disk 2>/dev/null | grep "^$partition " | sed -e "s/$partition//g") partition_caption=$(trim "$partition_caption") partition_device_id="/dev/$partition" partition_disk_index="$disk" partition_bootable="" partition_type="$partition_mount_type" partition_quotas_supported="" partition_quotas_enabled="" #partition_serial=$(lsblk -lndo UUID /dev/"$partition" 2>/dev/null) partition_serial=$(lsblk -lno NAME,UUID /dev/$disk 2>/dev/null | grep "^$partition " | sed -e "s/$partition//g") partition_serial=$(trim "$partition_serial") #partition_free_space=$(df -m /dev/"$partition" 2>/dev/null | grep /dev/"$partition" | awk '{print $4}') partition_free_space=$(df -m --total "$partition_mount_point" 2>/dev/null | grep ^total | awk '{print $4}') if [ -z "$partition_free_space" ] && [ -n "$partition_serial" ]; then partition_free_space=$(df -m /dev/disk/by-uuid/"$partition_serial" 2>/dev/null | grep "$partition_serial" | awk '{print $4}') fi #partition_used_space=$(df -m /dev/"$partition" 2>/dev/null | grep /dev/"$partition" | awk '{print $3}') partition_used_space=$(df -m --total "$partition_mount_point" 2>/dev/null | grep ^total | awk '{print $3}') if [ -z "$partition_used_space" ] && [ -n "$partition_serial" ]; then partition_used_space=$(df -m /dev/disk/by-uuid/"$partition_serial" 2>/dev/null | grep "$partition_serial" | awk '{print $3}') fi if [ "$partition_format" = "swap" ]; then partition_used_space=$(free -m | grep -i swap | awk '{print $3}') partition_free_space=$(free -m | grep -i swap | awk '{print $4}') fi partition_result=$partition_result" <partition>\n" partition_result=$partition_result" <hard_drive_index>$(escape_xml "$partition_disk_index")</hard_drive_index>\n" partition_result=$partition_result" <partition_mount_type>$(escape_xml "$partition_mount_type")</partition_mount_type>\n" partition_result=$partition_result" <partition_mount_point>$(escape_xml "$partition_mount_point")</partition_mount_point>\n" partition_result=$partition_result" <partition_name>$(escape_xml "$partition_name")</partition_name>\n" partition_result=$partition_result" <partition_size>$(escape_xml "$partition_size")</partition_size>\n" partition_result=$partition_result" <partition_free_space>$(escape_xml "$partition_free_space")</partition_free_space>\n" partition_result=$partition_result" <partition_used_space>$(escape_xml "$partition_used_space")</partition_used_space>\n" partition_result=$partition_result" <partition_format>$(escape_xml "$partition_format")</partition_format>\n" partition_result=$partition_result" <partition_caption>$(escape_xml "$partition_caption")</partition_caption>\n" partition_result=$partition_result" <partition_device_id>$(escape_xml "$partition_device_id")</partition_device_id>\n" partition_result=$partition_result" <partition_disk_index>$(escape_xml "$partition_disk_index")</partition_disk_index>\n" partition_result=$partition_result" <partition_bootable></partition_bootable>\n" partition_result=$partition_result" <partition_type>$(escape_xml "$partition_type")</partition_type>\n" partition_result=$partition_result" <partition_quotas_supported></partition_quotas_supported>\n" partition_result=$partition_result" <partition_quotas_enabled></partition_quotas_enabled>\n" partition_result=$partition_result" <partition_serial>$(escape_xml "$partition_serial")</partition_serial>\n" partition_result=$partition_result" </partition>" fi done done echo " </hard_disks>" >> "$xml_file" ################################## # PARTITION SECTION # ################################## if [ -n "$partition_result" ]; then { echo " <partitions>" echo -e "$partition_result" echo " </partitions>" } >> "$xml_file" fi ################################## # LOG SECTION # ################################## if [ "$debugging" -gt "0" ]; then echo "Log Info" fi echo " <logs>" >> "$xml_file" for log in ls /etc/logrotate.d/* ; do if [ -e "$log" ]; then log_file_name=$(grep -m 1 -E "^/" "$log" | sed -e 's/\ {//g') log_max_file_size=$(grep -E '\ size\ ' "$log" | grep -oE '[[:digit:]]*') { echo " <log>" echo " <log_name>$(escape_xml "$log")</log_name>" echo " <log_file_name>$(escape_xml "$log_file_name")</log_file_name>" echo " <log_file_size></log_file_size>" echo " <log_max_file_size>$(escape_xml "$log_max_file_size")</log_max_file_size>" echo " </log>" } >> "$xml_file" fi done echo " </logs>" >> "$xml_file" ################################## # SWAP SECTION # ################################## if [ "$debugging" -gt "0" ]; then echo "Swap Info" fi for swap in $(tail -n +2 /proc/swaps) ; do echo "$swap" | awk ' { print "\t<pagefile>\n\t\t<file_name>"$1"</file_name>\n\t\t<initial_size>"$3"</initial_size>\n\t\t<max_size>"$3"</max_size>\n\t</pagefile>" } ' >> "$xml_file" done ################################## # USER SECTION # ################################## if [ "$debugging" -gt "0" ]; then echo "User Info" fi echo " <users>" >> "$xml_file" IFS=$(echo -en "\n\b"); #for i in $(cat /etc/passwd) ; do # echo $i | awk -F: ' { print "\t\t<user>\n" "\t\t\t<user_name>"$1"</user_name>\n" "\t\t\t<user_full_name>"$5"</user_full_name>\n" "\t\t\t<user_sid>"$3"</user_sid>\n" "\t\t</user>" } ' >> "$xml_file" grep -v '^ *#' < /etc/passwd | while IFS= read -r line; do echo "$line" | awk -F: ' { print "\t\t<user>\n" "\t\t\t<user_name>"$1"</user_name>\n" "\t\t\t<user_full_name><![CDATA["$5"]]></user_full_name>\n" "\t\t\t<user_sid>"$3"</user_sid>\n" "\t\t</user>" } ' >> "$xml_file" done echo " </users>" >> "$xml_file" ######################################################## # SOFTWARE SECTION # ######################################################## if [ "$debugging" -gt "0" ]; then echo "Software Info" fi echo " <software>" >> "$xml_file" case $system_os_family in 'Ubuntu' | 'Debian' | 'LinuxMint' ) #dpkg-query --show --showformat="\t\t<package>\n\t\t\t<software_name><![CDATA[\${Package}]]></software_name>\n\t\t\t<software_version><![CDATA[\${Version}]]></software_version>\n\t\t\t<software_url><![CDATA[\${Homepage} ]]></software_url>\n\t\t</package>\n" |\ dpkg-query --show --showformat="\t\t<package>\n\t\t\t<software_name><![CDATA[\${Package}]]></software_name>\n\t\t\t<software_version><![CDATA[\${Version}]]></software_version>\n\t\t\t<software_url></software_url>\n\t\t</package>\n" |\ sed -e 's/\&.*</</' >> "$xml_file" #sed -e 's/url><.*><\/software/url><\/software/' >> "$xml_file" ;; 'CentOS' | 'RedHat' | 'SUSE' | 'Fedora' ) rpm -qa --queryformat="\t\t<package>\n\t\t\t<software_name><\!\[CDATA\[%{NAME}\]\]></software_name>\n\t\t\t<software_version><\!\[CDATA\[%{VERSION}-%{RELEASE}\]\]></software_version>\n\t\t\t<software_version_orig><\!\[CDATA\[%{VERSION}\]\]></software_version_orig>\n\t\t\t<software_url><\!\[CDATA\[%{URL}\]\]></software_url>\n\t\t</package>\n" |\ sed -e 's/\&.*</</' >> "$xml_file" #sed -e 's/url><.*><\/software/url><\/software/' >> "$xml_file" ;; esac echo " </software>" >> "$xml_file" ######################################################## # SERVICE SECTION # ######################################################## if [ "$debugging" -gt "0" ]; then echo "Service Info" fi echo " <services>" >> "$xml_file" case $system_os_family in 'Ubuntu' | 'Debian' ) if [ -r /etc/inittab ]; then INITDEFAULT=$(awk -F: '/id:/,/:initdefault:/ { print $2 }' /etc/inittab) else INITDEFAULT=$(awk -F= ' /^env\ DEFAULT_RUNLEVEL/ { print $2 } ' /etc/init/rc-sysinit.conf) fi # upstart services for s in $(q 2>/dev/null | awk ' { print $1 } ' | sort | uniq) ; do if [ "$s" = "rc" ]; then service_start_mode="Auto" else service_start_mode="Manual" fi service_name=$(escape_xml "$s") echo -e "\t\t<service>\n\t\t\t<service_name>$service_name</service_name>\n\t\t\t<service_start_mode>$service_start_mode</service_start_mode>\n\t\t</service>" >> "$xml_file" done # SysV init services for service_name in /etc/init.d/* ; do [[ -e $service_name ]] || break if [[ "$service_name" != "README" ]] && [[ "$service_name" != "upstart" ]]; then { echo " <service>" echo " <service_name>$(escape_xml "$service_name")</service_name>" } >> "$xml_file" temp="" if ls /etc/rc"$INITDEFAULT".d/*"$service_name"* &>/dev/null ; then echo " <service_start_mode>Manual</service_start_mode>" >> "$xml_file" else echo " <service_start_mode>Auto</service_start_mode>" >> "$xml_file" fi echo " </service>" >> "$xml_file" fi done ;; 'CentOS' | 'RedHat' | 'SUSE' ) INITDEFAULT=$(awk -F: '/id:/,/:initdefault:/ { print $2 }' /etc/inittab) chkconfig --list |\ sed -e '/^$/d' -e '/xinetd based services:/d' |\ awk -v ID="$INITDEFAULT" ' { sub(/:/, "", $1); print "\t\t<service>\n\t\t\t<service_name>"$1"</service_name>"; if ($2 =="on" || $5 ==ID":on") print "\t\t\t<service_start_mode>Auto</service_start_mode>"; else if ($2 =="off" || $5 ==ID":off") print "\t\t\t<service_start_mode>Manual</service_start_mode>"; print "\t\t</service>" } ' >> "$xml_file" ;; esac echo " </services>" >> "$xml_file" ######################################################## # ROUTE SECTION # ######################################################## if [ "$debugging" -gt "0" ]; then echo "Route Info" fi echo " <routes>" >> "$xml_file" if [ -n "$(which route 2>/dev/null)" ]; then for i in $(route -n | tail -n +3) ; do echo "$i" | awk ' { print "\t\t<route>\n\t\t\t<destination>"$1"</destination>\n\t\t\t<mask>"$3"</mask>\n\t\t\t<metric>"$5"</metric>\n\t\t\t<next_hop>"$2"</next_hop>\n\t\t\t<type>"$4"</type>\n\t\t</route>" } ' >> "$xml_file" done fi if [ -n "$(which route 2>/dev/null)" ] && [ -n "$(which ip 2>/dev/null)" ]; then #route_mask=$(cidr2mask `ip r | grep "default via" | cut -d" " -f1 | cut -d"\"" -f2`) route_next_hop=$(ip r | grep "default via" | cut -d" " -f3) route_metric=$(ip r | grep "default via" | cut -d" " -f10) { echo " <route>" echo " <destination>0.0.0.0</destination>" echo " <mask></mask>" echo " <metric>$(escape_xml "$route_metric")</metric>" echo " <next_hop>$(escape_xml "$route_next_hop")</next_hop>" echo " </route>" } >> "$xml_file" fi echo " </routes>" >> "$xml_file" ######################################################## # NETSTAT LISTENING PORTS # ######################################################## if [ "$debugging" -gt "0" ]; then echo "Netstat Info" fi netstatdump=$(netstat -lntup 2>/dev/null | grep -v "(only servers)" | grep -v "Foreign Address") { echo " <netstat>" echo " <![CDATA[$netstatdump]]>" echo " </netstat>" } >> "$xml_file" ######################################################## # CLOSE THE AUDIT FILE # ######################################################## echo "</system>" >> "$xml_file" ######################################################## # SUBMIT RESULTS # ######################################################## if [ "$debugging" -gt 0 ]; then elapsed_time=$(timer "$start_time") echo "Audit Generated in '$elapsed_time' seconds." fi if [ "$submit_online" = "y" ]; then sed -i -e 's/+/%2B/g' "$xml_file" sed -i -e 's/"/%22/g' "$xml_file" sed -i -e 's/&/%26/g' "$xml_file" if [ "$debugging" -gt 1 ]; then echo "Submitting results to server" echo "URL: $url" fi wget --delete-after --post-file="$xml_file" "$url" 2>/dev/null fi sed -i -e 's/form_systemXML=//g' "$xml_file" sed -i -e 's/%2B/+/g' "$xml_file" sed -i -e 's/%22/"/g' "$xml_file" sed -i -e 's/%26/&/g' "$xml_file" if [ "$create_file" != "y" ]; then rm -f "$PWD"/"$xml_file" fi if [ "$debugging" -gt 0 ]; then elapsed_time=$(timer "$start_time") echo "Audit Completed in '$elapsed_time' seconds." fi IFS="$ORIGIFS" exit 0
true
7c3cd5f26c0a9937d689ae6e80433daf8862faf2
Shell
z-oz/kali-exercises
/Session_2/netscript
UTF-8
453
3.0625
3
[]
no_license
#!/bin/bash Target_URL="google.com" Target_URL2="ebay.com" echo "First, let's get our IP Address..." ifconfig echo "" echo "Starting to Ping..." ping -c 5 $Target_URL echo "" echo "Starting to Ping second URL..." ping -c 5 $Target_URL2 echo echo "Starting nslookup..." nslookup $Target_URL echo "Looking for google.com on whois..." whois $Target_URL #Traceroute won't work using NAT echo "Starting traceroute..." traceroute $Target_URL #End of Script
true
64aed7aab57281eb241e884d20229030802b33d2
Shell
s5unty/dotfiles
/mutt/mutt-task-it
UTF-8
269
2.515625
3
[]
no_license
#!/bin/sh # 获取这封邮件的 message-id ANNO=$(formail -fXMessage-ID | perl -pe 's/\n//' | perl -pe 's/: </:</') # 获取刚刚由 mutt2task 添加的 task id TASK=$(task ids | cut -d"-" -f2) # 为最新的 task 添加 message-id task ${TASK} annotate ${ANNO}
true
9f4b27ce565ebd5f1844514afb41143e462b8412
Shell
afamilyman/crosvm
/ci/crosvm_test_vm/runtime/exec_file
UTF-8
685
3.796875
4
[ "BSD-3-Clause" ]
permissive
#!/bin/bash # Copyright 2021 The Chromium OS Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # # Uploads and executes a file in the VM. ${0%/*}/exec exit || exit 1 # Wait for VM to be available if [ "$1" = "--no-sync" ]; then shift else echo "Syncing shared objects..." ${0%/*}/sync_so || exit 1 fi filepath=$1 filename=$(basename $filepath) echo "Executing $filename ${@:2}" scp -q $filepath vm:/tmp/$filename # Make sure to preserve the exit code of $filename after cleaning up the file. ssh vm -q -t "cd /tmp && ./$filename ${@:2}" ret=$? ssh vm -q -t "rm /tmp/$filename" exit $ret
true
23407b33597f9010628952bca9d2a066c4ba3269
Shell
001466/My-Blog
/startup.sh
UTF-8
480
3.703125
4
[ "Apache-2.0" ]
permissive
#!/bin/sh echo "执行启动脚本..." #JAR路径 JAR_MAIN=myblog.jar #获取PID pid=0 getPid(){ javaps='jps -l | grep $JAR_MAIN' if [ -n "$javaps" ]; then pid=$(jps -l | grep $JAR_MAIN | awk '{print $1}') else pid=0 fi } #检查是否有已启动的服务 startup(){ getPid if [ -n "$pid" ]; then echo "检查到服务已启动... $pid" else exec java -server -jar $JAR_MAIN & fi } startup
true
8263f7a499d42afa26017987e4f35f9cfa2560e1
Shell
cht8687/dotfiles
/.bashrc
UTF-8
1,565
2.71875
3
[ "LicenseRef-scancode-warranty-disclaimer", "MIT" ]
permissive
[ -n "$PS1" ] && source ~/.bash_profile; [ -f ~/.fzf.bash ] && source ~/.fzf.bash export FZF_DEFAULT_COMMAND='ag -g ""' # tabtab source for serverless package # uninstall by removing these lines or running `tabtab uninstall serverless` [ -f /home/robert/Documents/work/hireslist/packages/best-places/node_modules/tabtab/.completions/serverless.bash ] && . /home/robert/Documents/work/hireslist/packages/best-places/node_modules/tabtab/.completions/serverless.bash # tabtab source for sls package # uninstall by removing these lines or running `tabtab uninstall sls` [ -f /home/robert/Documents/work/hireslist/packages/best-places/node_modules/tabtab/.completions/sls.bash ] && . /home/robert/Documents/work/hireslist/packages/best-places/node_modules/tabtab/.completions/sls.bash export NVM_DIR="$HOME/.nvm" [ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh" # This loads nvm [ -s "$NVM_DIR/bash_completion" ] && \. "$NVM_DIR/bash_completion" # This loads nvm bash_completion eval $(thefuck --alias) source ~/.bin/tmuxinator.bash source ~/.ghcup/env # automatic nvm use enter_directory() { if [[ $PWD == $PREV_PWD ]]; then return fi PREV_PWD=$PWD [[ -f ".nvmrc" ]] && nvm use } export PROMPT_COMMAND=enter_directory # tabtab source for slss package # uninstall by removing these lines or running `tabtab uninstall slss` [ -f /Users/robert/Documents/cht/javascript/packages/auth-challenge/node_modules/tabtab/.completions/slss.bash ] && . /Users/robert/Documents/cht/javascript/packages/auth-challenge/node_modules/tabtab/.completions/slss.bash
true
61a95f1a8e27c0fe4e95032dfa5baba72ebcbedc
Shell
cristianoliveira/funzzy
/tests/integration/functions.sh
UTF-8
2,643
4
4
[ "MIT" ]
permissive
#!/usr/bin/env bash # TEST_REFERENCE="" function test() { rm -rf "$TEST_DIR/workdir" TEST_REFERENCE="$1" echo "test: $1" mkdir "$TEST_DIR/workdir" } function cleanup() { echo "kill funzzy $FUNZZY_PID" # https://github.com/drbeco/killgracefully for SIG in 2 9 ; do echo $SIG ; kill -$SIG $FUNZZY_PID || break ; sleep 2 ; done } function assert_equal() { if [ "$1" != "$2" ]; then echo "ERROR: $1 != $2" report_failure fi } function report_failure() { echo "Failed: $TEST_REFERENCE" cleanup exit 1 } function assert_file_content_at() { local success=0 for i in {1..5} do if sed "$3!d" $1 | grep -q "$2"; then success=1 break else echo "Expected: $2" echo "Content:" sed "$3!d" "$1" echo "Attempt failed: file $1 does not contain $2 at line $3" echo "Attempt $i..." sleep 5 fi done if [ $success -eq 0 ]; then echo "ERROR: file $1 does not contain $2" echo "file content:" echo "file content:" cat $1 report_failure fi } function assert_file_occurrencies() { local success=0 for i in {1..5} do occurrencies=$(grep -o "$2" "$1" | wc -l) echo "occurrencies: $occurrencies" if [ $occurrencies -eq $3 ]; then success=1 break else echo "Attempt failed: file $1 does not contain $2" echo "Attempt $i..." sleep 5 fi done if [ $success -eq 0 ]; then echo "ERROR: file $1 does not contain $2" echo "final output content:" cat $1 report_failure fi } function assert_file_contains() { local success=0 for i in {1..5} do if grep -q "$2" "$1"; then success=1 break else echo "Attempt failed: file $1 does not contain $2" echo "Attempt $i..." sleep 5 fi done if [ $success -eq 0 ]; then echo "ERROR: file $1 does not contain $2" echo "final output content:" cat $1 report_failure fi } function assert_file_not_contains() { local success=0 for i in {1..5} do if grep -q "$2" "$1"; then echo "Attempt failed: file $1 does contain $2" echo "Attempt $i..." sleep 5 else success=1 break fi done if [ $success -eq 0 ]; then echo "ERROR: file $1 does not contain $2" echo "final output content:" cat "$1" report_failure fi } function wait_for_file() { local file_exists=0 for i in {1..5} do if [ -s "$1" ] then file_exists=1 break fi echo "Waiting for $1..." sleep 5 done if [ $file_exists -eq 0 ]; then echo "ERROR: file $1 does not exist" report_failure fi }
true
8769de7da52cb4692a6be09316e53e46869eb2b4
Shell
poirierlouis/Epitech-Scripts
/my_git
UTF-8
580
3.75
4
[]
no_license
#!/bin/bash if [ $# -eq 1 ]; then if [ -f "Makefile" ]; then make fclean fi git add --all git commit -m "$1" if [ $? -ne 0 ]; then echo -e "\e[0;33mWarning : try to commit again...\e[0;m" git commit -m "$1" fi git pull if [ $? -eq 0 ]; then git push else echo -e "\e[0;31mError: pulling doesn't succeed, you may have to fix merging conflicts :s\e[0;m" exit 2; fi echo -e "\e[0;32mSuccess: pull & push done :)\e[0;m" exit 0; else echo -e "\e[0;31mError: you have to provide a message :@\e[0;m" exit 1; fi
true
0be8592f2de812fb6b52c5aff5cd2afdd5f26e5f
Shell
hirmyama/wordpress
/wp2.sh
UTF-8
2,382
2.8125
3
[]
no_license
# Tess2ラボ6(バックアップ、別リージョンでのリカバリ): # # シンガポールでAMIから立ち上げた直後のEC2上のWordPressは下記の状態で稼働している # # ・HTMLはシンガポールのEC2 # ・JSやCSSなどのアセットは東京のELB # ・画像は東京のS3バケットから持ってきている # # したがって、完全にシンガポールリージョンだけで稼働させるにはさらに下記の処理が必要 # # ・RDS内部に記録された「サイトURL」「ホーム」をシンガポールのEC2のURLに更新 # ・シンガポールにS3バケットを作り、東京のS3バケットの中身を全部コピーし、公開状態にする # ・S3プラグインが使用するバケットをシンガポールのバケットに変更 # 東京リージョンのリージョン名、バケット名 source_region=ap-northeast-1 source_bucket=wp-xxxxxxxxxxx # シンガポールリージョンのリージョン名、バケット名 target_region=ap-southeast-1 target_bucket=wp-yyyyyyyyyyy # シンガポールリージョンのEC2インスタンスのURL(「http://ipアドレス」の形式であること!) target_ec2_url=http://18.136.101.106 # シンガポールにバケットを作成 aws s3 mb s3://$target_bucket --region $target_region # 東京のバケットの内容をすべてシンガポールのバケットにコピー aws s3 cp --recursive s3://$source_bucket s3://$target_bucket # バケット内のオブジェクトをすべて公開 aws s3 ls --recursive s3://$target_bucket/ \ | awk '{print $4}' \ | xargs -I{} aws s3api put-object-acl --acl public-read --bucket $target_bucket --key "{}" # DB内のリージョン名を書き換え sudo -u apache /usr/local/bin/wp search-replace \ --path=/var/www/html \ "$source_region" "$target_region" # DB内のバケット名を書き換え sudo -u apache /usr/local/bin/wp search-replace \ --path=/var/www/html \ "$source_bucket" "$target_bucket" # サイトURLを書き換え sudo -u apache /usr/local/bin/wp option set \ --path=/var/www/html \ siteurl $target_ec2_url # ホームURLを書き換え sudo -u apache /usr/local/bin/wp option set \ --path=/var/www/html \ home $target_ec2_url # TODO: プラグインの画面で、アップロード先のS3バケットを # シンガポールのバケットに変更する。
true
2cdd6298fdcc3406184199d57e659db5308fd28c
Shell
pn/dotfiles
/files/zshrc
UTF-8
3,084
2.546875
3
[]
no_license
setopt interactivecomments setopt no_share_history # confiugre oh-my-zsh ZSH=$HOME/.oh-my-zsh ZSH_THEME="agnoster" plugins=( git zsh-kubectl-prompt virtualenv ) source $ZSH/oh-my-zsh.sh # kube prompt autoload -U colors; colors source ~/.oh-my-zsh/custom/plugins/zsh-kubectl-prompt/kubectl.zsh RPROMPT='%{$fg[blue]%}($ZSH_KUBECTL_PROMPT)%{$reset_color%}' # remove user@host from prompt prompt_context() {} alias pbcopy='xsel --clipboard --input' alias pbpaste='xsel --clipboard --output' alias gvim='gvim --servername ${VSRV:-GVIM} --remote-tab ${1:-""}' alias sgrep="grep --binary-files=without-match --exclude-dir='.svn'" alias sdiff="svn diff --diff-cmd kdiff3" alias gdiff="git diff --color-words --word-diff-regex=. --no-index" alias ack=ack-grep alias vi="vim -u NONE" # do not load vimrc - super fast which colordiff 2>&1 > /dev/null && alias diff=colordiff alias -g ND='*(/om[1])' # newest directory alias -g NF='*(.om[1])' # newest file kubeuse() { kubectl config use-context ${1:-minikube} --namespace=${2:-default} } kubesetnm() { kubectl config set-context ${1:-minikube} --namespace=${2:-default} } alias k=kubectl export EDITOR=vim export CSCOPE_LINEFLAG_AFTER_FILE=yes export ANDROID_SDK=$HOME/Android/Sdk export ANDROID_NDK=$HOME/Programs/ndk export PATH=$HOME/bin:$HOME/.local/bin:$PATH:$ANDROID_SDK/platform-tools:$ANDROID_SDK/tools:$HOME/bin/scripts:$HOME/bin/privscripts export PAGER='less -r' #export PYTHONPATH=$PYTHONPATH:~/py-lib export GOPATH=$HOME/gopath export PATH=$GOPATH:$GOPATH/bin:$PATH if [ -x ~/bin.paths ]; then . ~/bin/bin.paths fi export PARALLEL_UPDATE=true serial () { test -w /dev/ttyUSB0 || sudo chmod 666 /dev/ttyUSB0; screen /dev/ttyUSB0 115200 } # editing of commands autoload -U edit-command-line zle -N edit-command-line bindkey -M vicmd v edit-command-line autoload edit-command-line zle -N edit-command-line bindkey '^Xe' edit-command-line bindkey -v eval `dircolors ~/.dircolors/dircolors.ansi-dark` # solarized colors for grep unset GREP_COLOR export GREP_COLORS='01;33' # fix colors for vim in tmux if [ -n "$TMUX" ]; then export TERM="screen-256color" else export TERM="xterm-256color" fi bindkey '^R' history-incremental-search-backward bindkey '^R' history-incremental-pattern-search-backward DISABLE_AUTO_TITLE=true # marks export MARKPATH=$HOME/.marks function jump { cd -P "$MARKPATH/$1" 2>/dev/null || echo "No such mark: $1" } function mark { mkdir -p "$MARKPATH"; ln -s "$(pwd)" "$MARKPATH/$1" } function unmark { rm -i "$MARKPATH/$1" } function marks { ls -l "$MARKPATH" | sed 's/ / /g' | cut -d' ' -f9- | sed 's/ -/\t-/g' && echo } function _completemarks { reply=($(ls $MARKPATH)) } compctl -K _completemarks jump compctl -K _completemarks unmark test -n "$DISPLAY" && synclient -l >/dev/null 2>&1 && synclient VertEdgeScroll=0 # disable touchpad scroll if [ -f ~/.zshrc_local ] then . ~/.zshrc_local fi fs() { find . -name \*"$1" } fp() { find . -name "$1"\* } fe() { find . -name "$1" } if [ $commands[kubectl] ]; then source <(kubectl completion zsh); fi true
true
5438935e843d528ad253887c2c07a9d8088423a2
Shell
rumatoest/jdblender
/run.sh
UTF-8
4,691
3.953125
4
[ "MIT" ]
permissive
#!/bin/bash # This is the only way to correct run JDBLENDER tests help() { echo " __ ___ ___ __ ___ _ _ ___ ___ ___ ( )( \( ,)( ) ( _)( \( )( \( _)( ,) __)( ) ) )) ,\ )(__ ) _) ) ( ) ) )) _) ) \ (___/ (___/(___/(____)(___)(_)\_)(___/(___)(_)\_) Usage: $0 [options] implementation/s Argument: implementation/s Implementation to run or comma separated list (type all to run all) Options: -t number Times to run each test implementation (by default 1) -r number Reduce tests data sets up to N times if possible -d Disable JIT -p port Enable local profiler connection at specific port -j string Additional JVM arguments -f flags Specific test flags. Example: -F \"h2ds;hibernate=cache,jta;caynne=fast\" Flags are just strings delimited by comma. There are flag groups, that are delimited by semicolon. Flags group related to some implemntation must have prefix like \"mybatis=f1,f2,f3\" otherwise such flags will be treated as global. There are no global flags yet available. " } start_db() { db_start_command & PID=$! } run_test() { JAR="${APP_DIR}/$1/build/libs/${1}.jar" OUT="${APP_DIR}/report-${1}.csv" if [ $INIT -eq 0 ]; then #Compile project here echo "COMPILE $1" ./gradlew ":${1}:jar" fi echo "RUN: $1" #echo "java $JVM -jar $JAR $OUT $REDUCE $FLAGS" java $JVM -jar $JAR $OUT $REDUCE $FLAGS } INIT=0 run_tests() { if [ $CYCLES -gt 1 ]; then echo "WILL RUN $CYCLES CYCLES" fi COUNTER=0 while [ $COUNTER -lt $CYCLES ]; do let COUNTER=COUNTER+1 echo "CYCLE $COUNTER" for i in $(echo $1 | sed "s/,/ /g") do run_test $i || break done INIT=1 done } cd "$(dirname "$0")" #INITIALIZE DEFAULTS HELP=0 CYCLES=1 REDUCE=1 ALL="jdbc,springjdbc,mybatis,jooq,cayenne,eclipse-link,hibernate,springdata" JVM="-XX:CompileThreshold=10000 " FLAGS="" APP_DIR="$(dirname "$0")" while getopts ":dp:t:r:f:h" opt; do case $opt in h) HELP=1 ;; d) JVM="${JVM} -Djava.compiler=NONE " export JDB_JIT="FALSE" ;; p) if [ -z "$OPTARG" ]; then echo -e "\n ERROR: Invalid -p port value" HELP=-1 else export JDB_PROFILE="TRUE" JVM="$JVM -Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.port=$OPTARG -Dcom.sun.management.jmxremote.local.only=true -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false " fi ;; t) if [ -z "$OPTARG" ]; then echo -e "\n ERROR: Invalid -t argument value" HELP=-1 else let CYCLES=$(($OPTARG)) fi ;; r) if [ -z "$OPTARG" ]; then echo -e "\n ERROR: Invalid reduce value" HELP=-1 else let REDUCE=$(($OPTARG)) fi ;; f) if [ -z "$OPTARG" ]; then echo -e "\n ERROR: Invalid flags" HELP=-1 else FLAGS="$OPTARG" fi ;; *) echo -e "\n ERROR: Invalid arguments!!!" HELP=-1 ;; esac done shift $((OPTIND - 1)) if [[ -z "$1" ]]; then echo -e "\n ERROR: Implementation argument is not specified!!!" HELP=-1 fi if [[ 0 -ne $HELP ]]; then help exit 0; else if [ "$1" == "all" ]; then run_tests $ALL else run_tests $1 fi fi echo "ERROR" exit 1; ## OLD CODE usage() { echo "Usage: $0 project_name|all cycles_to_run" echo " project_name - project to execute or type \"all\" to run on all projects" echo " cycles_to_run - number of cycles to run (default 1)" echo " test_set_factor - (default 1)" } JIT="jitc" while getopts ":j" option; do case $option in j) JIT="NONE" ;; esac done shift $((OPTIND - 1)) if [[ -z "$1" ]]; then usage exit 0 fi run_one() { CYCLES=$2 echo "RUN $1 $CYCLES times" COUNTER=0 ARG=${@:3} while [ $COUNTER -lt $CYCLES ]; do let COUNTER=COUNTER+1 echo "CYCLE $COUNTER" ./gradlew :$1:run -Dargs="$ARG" -Pjit="$JIT" || break done } run_all() { COUNTER=0 CYCLES=$1 echo "RUN ALL $CYCLES times" ARG=${@:2} while [ $COUNTER -lt $CYCLES ]; do let COUNTER=COUNTER+1 echo "CYCLE $COUNTER" ./gradlew run -Dargs="$ARG" -Pjit="$JIT" || break done } CYCLES=1 if [ ! -z "$2" ]; then let CYCLES=$(($2)) fi ARGS="" if [ ! -z "$3" ]; then ARGS="-f $3" fi if [ "$1" == "all" ]; then run_all $CYCLES $ARGS else run_one $1 $CYCLES $ARGS fi
true
ab4e9cb2abc02f99d8f93194c9f36cdeb479f86b
Shell
uzh/gridcertlib
/upload_release_files.sh
UTF-8
3,800
4.1875
4
[ "Apache-2.0" ]
permissive
#! /bin/sh # PROG="$(basename $0)" usage () { cat <<EOF Usage: $PROG [options] Upload files to http://gridcertlib.googlecode.com/files Files to upload are searched for in the "target/" directory; any "*.jar"/"*.tar.gz"/"*.zip" file is uploaded. The release number, file type and contents (binary, javadoc, sources) are automatically used as labels. This program assumes that the script "googlecode_upload.py" is available on your PATH. You can get this script from: http://support.googlecode.com/svn/trunk/scripts/googlecode_upload.py Options: -h, --help Print this help text. -n, --no-act Print upload commands instead of executing them. Any other option given on the command line is passed down to the "googlecode_upload.py" script. EOF } ## defaults # GoogleCode project name PROJECT='gridcertlib' # space-separated list of labels to add to *any* file upload LABELS="$PROJECT" # name of the uploader script googlecode_upload='googlecode_upload.py' ## helper functions add_to_labels () { if [ -z "$LABELS" ]; then LABELS="$1" else LABELS="$LABELS,$1" fi } # TRUE if $1 contains $2 as a substring contains () { echo "$1" | fgrep -q -e "$2" } die () { rc="$1" shift (echo -n "$PROG: ERROR: "; if [ $# -gt 0 ]; then echo "$@"; else cat; fi) 1>&2 exit $rc } # TRUE if $1 ends with (literal) string $2 endswith () { expr "$1" : ".*$2\$" 1>/dev/null 2>&1 } have_command () { type "$1" >/dev/null 2>/dev/null } require_command () { if ! have_command "$1"; then die 1 "Could not find required command '$1' in system PATH. Aborting." fi } is_absolute_path () { expr match "$1" '/' >/dev/null 2>/dev/null } ## sanity check require_command googlecode_upload.py ## parse command-line MAYBE='' args='' while [ $# -gt 0 ]; do case "$1" in --help|-h) usage exit 0 ;; --labels*|-l) # did we get '--labels=x,y,z' ? labels=$(echo "$1" | cut -d= -f2) if [ -z "$labels" ]; then # no, it's '--labels x,y,z' shift labels="$1" fi add_to_labels "$labels" ;; --no-act|--dry-run|-n) MAYBE=echo ;; --) shift break ;; *) args="$args '$1'" ;; esac shift done ## main common_labels="$LABELS" for path in $(ls target/${PROJECT}-* 2>/dev/null); do LABELS="$common_labels" summary='' filename=$(basename "$path") release=$(echo $filename | egrep --only-matching '[0-9]+(\.[0-9]+)+') add_to_labels $release if contains "$filename" '-bin.'; then summary="Package combining JAR files of compiled classes, source files and documentation" elif contains "$filename" '-sources.' || contains "$filename" '-src.'; then summary="Source files of ${PROJECT} ${release}" elif contains "$filename" '-javadoc.'; then summary="API documentation of ${PROJECT} ${release}" fi if endswith "$filename" '.asc'; then summary="PGP/GPG signature for file ${filename}" add_to_labels "pgp,gpg,signature" elif endswith "$filename" '.jar'; then add_to_labels 'jar' elif endswith "$filename" '.tar.gz'; then add_to_labels 'tar,gzip' elif endswith "$filename" '.tar.bz2'; then add_to_labels 'tar.bzip2' elif endswith "$filename" '.zip'; then add_to_labels 'zip' fi if [ -z "$summary" ]; then echo 1>&2 "Could not deduce a description for '$filename': skipping it." else eval $MAYBE $googlecode_upload "--project='$PROJECT'" "--labels='$LABELS'" "--summary='$summary'" $args "'$path'" fi done
true
670899021932492f6a9fc51dd8aaf62b4ffdaaed
Shell
InstantWebP2P/nodejs-httpp
/appbld
UTF-8
624
3.609375
4
[ "LicenseRef-scancode-unicode", "NTP", "Artistic-2.0", "MIT", "CC0-1.0", "Zlib", "LicenseRef-scancode-public-domain-disclaimer", "BSD-3-Clause", "ISC", "NAIST-2003", "Apache-2.0", "LicenseRef-scancode-public-domain", "BSD-2-Clause", "ICU", "LicenseRef-scancode-unknown-license-reference", ...
permissive
#!/usr/bin/env bash # check docker image for building if [ "$(docker images -q httpp-bld:12.x 2> /dev/null)" == "" ]; then echo "Build Docker image as httpp-bld:12.x" docker image build --compress -t httpp-bld:12.x -f ./Dockerfile.bld . fi # create install dir mkdir -p appins # execute commands if [ $# -ne 0 ]; then echo "$@ ..." docker run -v `pwd`:`pwd` -w `pwd` -i -t \ --rm --network host \ httpp-bld:12.x $@ else echo "Start building ..." docker run -v `pwd`:`pwd` -w `pwd` -i -t \ --rm --network host \ httpp-bld:12.x bash -c "./configure --prefix=./appins/ && make -j 6 && make install" fi
true
54fd62c04d7e5aaab221f9cff96f582c618dbf41
Shell
ElliottSobek/C-Server-Collection
/single-HTTP/getpaths.bash
UTF-8
376
3.703125
4
[ "MIT" ]
permissive
#!/bin/bash getPaths() { if [ "$#" -lt 1 ]; then echo "Usage:" `basename "$0"` "<dir_names>" exit 1 fi local relative_wds=() for arg in "$@"; do if [ -d $arg ]; then local dirlist=`find -O3 lib -type f -name "*.c" -printf "%h\n" | uniq` for dir in $dirlist; do relative_wds+="$dir " done fi done echo $relative_wds return 0 } getPaths "$@"
true
ee7eaa50e335ed1b48d204fe3b233da53eddeb5d
Shell
rookiemonkey/git-cheat
/bin/gbupdate.sh
UTF-8
208
3.015625
3
[]
no_license
#!/bin/bash # FUNCTION: update a branch based on master branch # PARAMS: # - 1st: branch name git checkout $1; git merge origin/master; git push origin $1; echo "UPDATED $1 based on origin/master branch";
true
4d26d29a0542d1616e8c6a195bc9f9758a5ba9d0
Shell
dharmaxbum1/dotfiles
/config/shell/bash_profile
UTF-8
854
3.90625
4
[ "MIT", "LicenseRef-scancode-warranty-disclaimer" ]
permissive
#!/usr/bin/env bash # If not running interactively, don't do anything case $- in *i*) ;; *) return ;; esac [ -z "$PS1" ] && return # set default umask umask 002 # Build PATH and put /usr/local/bin before existing PATH export PATH="/usr/local/bin:$PATH:$HOME/bin" ### SOURCE BASH PLUGINS ### # Locations containing files *.bash to be sourced to your environment bashFileLocations=( "${HOME}/dotfiles/config/bash" "${HOME}/dotfiles-private/config/bash" ) for bashFileLocation in "${bashFileLocations[@]}"; do if [ -d "${bashFileLocation}" ]; then for config_file in ${bashFileLocation}/*.bash; do if [ -f "${config_file}" ]; then source "${config_file}" fi done fi done # Always list directory contents upon 'cd'. # (Somehow this always failed when I put it in a sourced file) cd() { builtin cd "$@" ll }
true
e73b890f82a484f44736254b64ab2bd553e2e1b1
Shell
ushinosuke/Documents
/recipes/recipe1.4_1.sh
UTF-8
498
3.390625
3
[]
no_license
#!/bin/sh check="" while [ -z "$check" ]; do echo "Enter your cat's name" echo " 1. p-chan" echo " 2. ushinosuke" echo " 56. komegoro" echo -n "Input (1,2,56) ? " read check case $check in 1) echo "DEBUNESU!" ;; 2) echo "Hiyokko-taiyo" ;; 3) echo "He's torajiro" ;; *) echo '**** Bad choice !' check="" ;; esac done
true
55a887b897ec96a2a8942fdca7ea8000f2ba4378
Shell
simonswine/docker-mysqldumper
/kube-mysqldumper-cron.sh
UTF-8
1,332
3.453125
3
[]
no_license
#!/bin/bash set -e job_namespace="mysql" job_name="mysqldumper" kubectl replace --force -f - > /dev/null <<EOF apiVersion: batch/v1 kind: Job metadata: name: $job_name namespace: $job_namespace spec: activeDeadlineSeconds: 7200 template: metadata: name: mysql-backup spec: containers: - name: mysql-backup image: simonswine/mysqldumper imagePullPolicy: Always env: - name: MYSQL_HOST value: mysql - name: MYSQL_USER value: root - name: BACKUP_DIR value: /_backup - name: MYSQL_PASSWORD valueFrom: secretKeyRef: name: mysql key: root.password volumeMounts: - mountPath: /_backup name: mysql-backup volumes: - name: mysql-backup <add your backup volume here> restartPolicy: Never EOF # wait for job to succeed tries=0 while true; do succeeded=$(kubectl get job --namespace=${job_namespace} ${job_name} --output=jsonpath={.status.succeeded}) if [[ $succeeded -eq 1 ]]; then break fi if [[ $tries -gt 3600 ]]; then echo "job timed out" kubectl describe job --namespace=${job_namespace} ${job_name} exit 1 fi tries=$((tries + 1)) sleep 1 done exit 0
true
2a3b559cd19fdfa7450f0e419e6d0e77ac00b013
Shell
bvaudour/core
/python2-systemd/PKGBUILD
UTF-8
603
2.578125
3
[]
no_license
pkgname=python2-systemd _pkgname=python-systemd pkgver=233 pkgrel=1 pkgdesc="Python module for native access to the systemd facilities." arch=('x86_64') license=('LGPLv2+') url="https://github.com/systemd/python-systemd" depends=('python2') makedepends=('python2-sphinx' 'python2-lxml' 'systemd') source=("https://github.com/systemd/python-systemd/archive/v${pkgver}.tar.gz") md5sums=('daa3ecd2c78c538cda7e8c9a7c7d8556') build() { cd ${_pkgname}-${pkgver} python2 setup.py build make #make sphinx-html } package() { cd ${_pkgname}-${pkgver} python2 setup.py install --root=${pkgdir}/ }
true
6efddae658214bd6ee58279d236817ca4d2cf2f9
Shell
Lollipop321/weight-distillation
/scripts/get_ende_bleu.sh
UTF-8
971
2.671875
3
[ "BSD-3-Clause" ]
permissive
#!/bin/bash decodes_file=$1 reference_file=$reference sed -i s'/@@ //g' $decodes_file #detokenize the decodes file to format the manner to do tokenize perl $detokenizer -l de < $decodes_file > $decodes_file.dtk #replace unicode perl $replace_unicode_punctuation -l de < $decodes_file.dtk > $decodes_file.dtk.punc #tokenize the decodes file by moses tokenizer.perl perl $tokenizer -l de < $decodes_file.dtk.punc > $decodes_file.dtk.punc.tok #"rich-text format" --> rich ##AT##-##AT## text format. perl -ple 's{(\S)-(\S)}{$1 ##AT##-##AT## $2}g' < $decodes_file.dtk.punc.tok > $decodes_file.dtk.punc.tok.atat #convert all quot '„' to $quot #cmd="python -u $conver_quot --i=$decodes_file.dtk.punc.tok.atat $decodes_file.dtk.punc.tok.atat.quot" #cp $decodes_file.dtk.punc.tok.atat $decodes_file.dtk.punc.tok.atat.quot #sed -i s'/„/\&quot;/g' $decodes_file.dtk.punc.tok.atat.quot #compute the bleu score perl $multi_bleu $reference_file < $decodes_file.dtk.punc.tok.atat
true
be2d3e0cef93488734f336f545b44eda07f7df89
Shell
jeonghanlee/Work
/FPGA/FPS_src/MitigationSrc/FPS_M_src/setSoftwareBuildDate.sh
UTF-8
161
2.953125
3
[]
no_license
#!/bin/sh set -ex SECONDS=`date -u '+%s'` ( echo "// MACHINE GENERATED -- DO NOT EDIT" echo "#define SOFTWARE_BUILD_DATE ${SECONDS}UL" ) >softwareBuildDate.h
true
133a642aa72d6a47da9d742380d368b51d3f3554
Shell
andreashergert1984/docker-solar-sis
/entry.sh
UTF-8
358
2.9375
3
[]
no_license
#!/bin/bash if [ -d /config ]; then echo "/config exists" if [ -d /config/solar-sis ]; then echo "config-dir exists. try to start" cd /config/solar-sis node project.js else echo "config-dir does not exist. copiing template" mkdir /config/solar-sis cp -Rv /root/PIP4084/* /config/solar-sis/ fi else echo "creating /config"; mkdir /config fi
true
ac15b8ef7a94af0799b04e20a65d9622dd732187
Shell
tgupta3/dotfiles
/dotfiles/config/polybar/launch.sh
UTF-8
615
3.234375
3
[]
no_license
#!/usr/bin/env sh # Terminate already running bar instances killall -q polybar # Wait until the processes have been shut down while pgrep -x polybar >/dev/null; do sleep 1; done #Wait until set monitor has finished while pgrep -f setOutput.py > /dev/null; do sleep 1; done #Write battery name to file ls /sys/class/power_supply | grep BAT > /tmp/battery # ls /sys/class/power_supply | grep AC > /tmp/AC if type "xrandr"; then for m in $(xrandr --query | grep "primary" | cut -d" " -f1); do MONITOR=$m polybar bar & done else polybar bar & fi # Launch bar #polybar bar & echo "Bars launched..."
true
51af60076e556236897524a8a6e283f746f38031
Shell
burvilc/kubernetes
/11-e2e-tests_on-controller-sonobuoy
UTF-8
682
2.640625
3
[]
no_license
#!/bin/bash #sudo add-apt-repository -y ppa:longsleep/golang-backports #sudo apt update -y #sudo apt install -y golang-go python3 python python-minimal python-pip gnupg unzip #sudo apt install -y golang-go #export GOPATH="${HOME}/go" #export PATH=$PATH:/usr/local/go/bin:$GOPATH/bin wget https://github.com/vmware-tanzu/sonobuoy/releases/download/v0.18.0/sonobuoy_0.18.0_linux_amd64.tar.gz tar zxvf sonobuoy_0.18.0_linux_amd64.tar.gz EXE=sonobuoy file ${EXE} sudo mv ${EXE} /usr/local/bin #go get -v -u github.com/vmware-tanzu/${EXE} which ${EXE} #sudo find $HOME -name ${EXE} ${EXE} run --mode=certified-conformance --wait RESULTS=$(${EXE} retrieve) ${EXE} e2e $RESULTS
true
4eb594783e8269e56c34872cd659d953e6837e0a
Shell
gj86/zram-config
/tests/test-zram-devices.bash
UTF-8
2,036
3.828125
4
[ "MIT" ]
permissive
#!/usr/bin/env bash BASEDIR="$(cd -P "$(dirname "${BASH_SOURCE[0]}")" && pwd)" check_zram_mounts() { while read -r line; do case "$line" in "#"*) # Skip comment line continue ;; "") # Skip empty line continue ;; *) # shellcheck disable=SC2086 set -- $line ZTYPE="$1" TARGET_DIR="$5" if [[ $ZTYPE == "swap" ]]; then if [[ "$(swapon | awk '/zram/ { print $1 }' | tr -d '0-9')" != "/dev/zram" ]]; then echo "Test failed: swap not on zram." zramctl --output-all return 1 fi elif [[ $ZTYPE == "dir" ]] || [[ $ZTYPE == "log" ]]; then if [[ "$(df "$TARGET_DIR" | awk '/overlay/ { print $1 }' | tr -d '0-9')" != "overlay" ]]; then echo "Test failed: overlay for '$TARGET_DIR' not found." zramctl --output-all return 1 fi fi ;; esac done < "${BASEDIR}/../ztab" } check_zram_removal() { while read -r line; do case "$line" in "#"*) # Skip comment line continue ;; "") # Skip empty line continue ;; *) # shellcheck disable=SC2086 set -- $line ZTYPE="$1" TARGET_DIR="$5" if [[ $ZTYPE == "swap" ]]; then if [[ "$(swapon | awk '/zram/ { print $1 }' | tr -d '0-9')" == "/dev/zram" ]]; then echo "Test failed: swap on zram." zramctl --output-all return 1 fi elif [[ $ZTYPE == "dir" ]] || [[ $ZTYPE == "log" ]]; then if [[ "$(df "$TARGET_DIR" | awk '/overlay/ { print $1 }' | tr -d '0-9')" == "overlay" ]]; then echo "Test failed: overlay for '$TARGET_DIR' found." zramctl --output-all return 1 fi fi ;; esac done < "${BASEDIR}/../ztab" } if [[ $1 == "removal" ]]; then check_zram_removal || exit 1 else check_zram_mounts || exit 1 fi
true
354f510b033cb93f9cc0f867a7073f1b8c0f8dee
Shell
lnls-dig/bpm-cfg
/04-fpga-configure/openocd-program.sh
UTF-8
391
3.140625
3
[]
no_license
#!/usr/bin/env bash # Script for flashing FPGAs # NOTE: By now, only AFCv4 is supported by openocd-prog-flash.sh set -auxo pipefail SCRIPTPATH="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" FPGA_PROGRAMMING_REPOS=${SCRIPTPATH}/../foreign/fpga-programming MCH=$1 BIN=$2 SLOT=$3 cd ${FPGA_PROGRAMMING_REPOS} ./openocd-prog-flash.sh ${BIN} afcv4_sfp xvc ${MCH} $((2540 + ${SLOT}))
true
899fcf01e46d07a18542484b222858f5f807ef4a
Shell
szepeviktor/debian-server-tools
/monitoring/syslog.sh
UTF-8
1,131
3.671875
4
[ "MIT" ]
permissive
#!/bin/sh # # Show colorized syslog without cron and imapd. # # VERSION :0.3.1 # DATE :2016-07-14 # AUTHOR :Viktor Szépe <viktor@szepe.net> # LICENSE :The MIT License (MIT) # URL :https://github.com/szepeviktor/debian-server-tools # BASH-VERSION :4.2+ # DEPENDS :apt-get install ccze # LOCATION :/usr/local/sbin/syslog.sh if [ "$1" = "-f" ]; then FOLLOW="1" else FOLLOW="0" fi LOG_OUTPUT="" if realpath /sbin/init | grep -q "systemd"; then if [ "$FOLLOW" = "1" ]; then LOG_SOURCE="journalctl -n 300 -f" else LOG_SOURCE="journalctl" LOG_OUTPUT="less -r" fi else if [ "$FOLLOW" = "1" ]; then LOG_SOURCE="tail -n 300 -f /var/log/syslog" else LOG_SOURCE="cat /var/log/syslog" LOG_OUTPUT="less -r" fi fi ${LOG_SOURCE} \ | grep -E --line-buffered --invert-match '(imapd|CRON)(\[[0-9]+\])?:' \ | if [ -z "$LOG_OUTPUT" ]; then # ccze (or cat?) holds back some lines with "ccze | cat" ccze --mode ansi --plugin syslog else eval "ccze --mode ansi --plugin syslog | ${LOG_OUTPUT}" fi
true
46eed399ad48b41f2cc147942ee1e480c27ca3c8
Shell
cjhanno/CISC220_3
/luckyNumbers.sh
UTF-8
612
3.21875
3
[]
no_license
#!/bin/bash for (( n=1000; n<=10000; n++)); do length="${#n}" j=0 for ((i=0; i<length; i++)); do k="${n:i:1}" j=$(($j + $k)) if (($1 + 1 == $length)); then if (($j==7)); then echo $n " is a special number" elif (($j < 15)); then break else lenght2="${#j}" x=0 for ((y=0; y<length2; y++)); do z="${j:y:1}" x=${{$x + $z}} if (($y + 1 == $length2)); then if(($x == 7)); then echo $n " is a special number!" fi fi done fi fi done done
true
7b2fab2f54d50aef5a3585c6ba8110e25bb9a67a
Shell
kaminow/ConsDB_analysis
/scripts/make_full_pers_vcfs.sh
UTF-8
639
3.375
3
[]
no_license
#!/bin/bash individ=$1 id_table=$2 vcf_out_dir=$3 awk_script=$4 individ_dir=$(grep -F $individ $id_table | cut -d, -f 4) mkdir -p ${vcf_out_dir}/${individ} for fn in ${individ_dir}/ALL.chr*.vcf.gz; do c=$(grep -oP 'chr.*?(?=\.)' <<< $fn) echo $c fn_out=${vcf_out_dir}/${individ}/${c}.vcf zcat $fn | awk -v samp=${individ} -f $awk_script | \ sed -r 's/^([^#])/chr\1/' > $fn_out & done wait fn_out=${vcf_out_dir}/${individ}/${individ}.vcf cp ${vcf_out_dir}/${individ}/chr1.vcf $fn_out for c in {2..22} X; do grep -v '^#' ${vcf_out_dir}/${individ}/chr${c}.vcf >> $fn_out done rm ${vcf_out_dir}/${individ}/chr*.vcf
true
08233be86ce47494eb699f94bae94309b86ca535
Shell
slushman/slushhost
/initialsetup.sh
UTF-8
2,344
3.921875
4
[ "MIT" ]
permissive
#!/bin/bash scriptloop="y" while [ "$scriptloop" = "y" ]; do echo -e "" echo -e "" echo -e "Server Setup:" echo -e "" echo -e "1 - Add SSH keys" echo -e "2 - Create git Keys" echo -e "3 - Check github connection" echo -e "4 - Build, install, and configure git" echo -e "5 - Clone slushhost github repo" echo -e "" echo -e "q - Exit Installers" echo -e "" echo -e "Please enter NUMBER of choice (example: 3):" read choice case $choice in 1) sudo mkdir -p ~/.ssh echo -e "Paste in the contents of the id_rsa.pub file from your local computer." read -p "Hit enter to paste the SSH key." sudo vi ~/.ssh/authorized_keys ;; 2) sudo chown $USER /home/$USER ~/.ssh ~/.ssh/authorized_keys sudo chmod go-w /home/$USER ~/.ssh ~/.ssh/authorized_keys read -p "Please enter your email address: " email sudo ssh-keygen -f ~/.ssh/id_rsa -t rsa -C "$email" -N '' sudo chown $USER:$USER ~/.ssh/id_rsa sudo chown $USER:$USER ~/.ssh/id_rsa.pub sudo chmod 0700 ~/.ssh/id_rsa sudo chmod 0700 ~/.ssh/id_rsa.pub echo -e "Copy the following SSH key" echo -e "Go to Github, click the Edit Your Profile button, and go to SSH keys" echo -e "Click the Add SSH Key button, paste in the SSH key and give it a name." echo -e "Click the Add Key button." echo -e "When your finished, on server, press escape, then Shift ZZ to save and exit the file." read -p "Hit enter to see the SSH key." sudo vi ~/.ssh/id_rsa.pub ;; 3) ssh -T git@github.com ;; 4) sudo yum -y groupinstall "Development Tools" sudo yum -y install zlib-devel perl-ExtUtils-MakeMaker asciidoc xmlto openssl-devel wget -O git.zip https://github.com/git/git/archive/master.zip unzip git.zip cd git-master make configure ./configure --prefix=/usr/local make all doc sudo make install install-doc install-html read -p "Please enter the user name to use for git: " gituser read -p "Please enter the email address to use for git: " gitemail sudo git config --global user.name "$gituser" sudo git config --global user.email "$gitemail" sudo git config --list ;; 5) cd git clone git@github.com:slushman/slushhost.git sudo chmod +x slushhost/installs.sh sudo chmod +x slushhost/managesites.sh sudo chmod +x slushhost/selinux.sh sudo mv -f slushhost/bash_profile.txt ~/.bash_profile ./slushhost/installs.sh scriptloop="n" ;; q) scriptloop="n" ;; *) echo - "Unknown choice! Exiting..." ;; esac done
true
c90d15e1a18bc472a8a66a426627cd1c14a24fba
Shell
roshal/dotfiles
/user/.shell/action.sh
UTF-8
2,724
3
3
[]
no_license
source "${HOME}/.shell/actions.sh" source "${HOME}/.shell/actions/audio.sh" source "${HOME}/.shell/actions/bluetooth.sh" ARGUMENTS=("${@}") function is () { test -n "${STOP}" && exit || test "${1}" = "${ARGUMENTS[0]}" && STOP=TRUE } function ok () { "action--${1-${ARGUMENTS[0]}}" } ### audio is 'audio--notify' && ok is 'audio--volume' && ok audio--get--volume > /-/mako/pulsemixer ### audio profile is 'audio--profile--alsa--analog' && ok is 'audio--profile--alsa--analog--duplex' && ok is 'audio--profile--alsa--hdmi' && ok is 'audio--profile--alsa--hdmi--duplex' && ok is 'audio--profile--alsa--off' && ok is 'audio--profile--bluetooth--headset--a2dp' && ok is 'audio--profile--bluetooth--headset--headset' && ok is 'audio--profile--bluetooth--headset--off' && ok is 'audio--profile--bluetooth--speaker--a2dp' && ok is 'audio--profile--bluetooth--speaker--headset' && ok is 'audio--profile--bluetooth--speaker--off' && ok ### audio source is 'audio--source--decrease' && ok is 'audio--source--increase' && ok is 'audio--source--mute' && ok is 'audio--source--play' && ok is 'audio--source--reset' && ok is 'audio--source--toggle' && ok ### audio volume is 'audio--volume--decrease' && ok && ok audio--notify is 'audio--volume--increase' && ok && ok audio--notify is 'audio--volume--mute' && ok is 'audio--volume--play' && ok is 'audio--volume--reset' && ok && ok audio--notify is 'audio--volume--toggle' && ok ### bluetooth is 'bluetooth--headset--connect' && ok is 'bluetooth--headset--disconnect' && ok is 'bluetooth--speaker--connect' && ok is 'bluetooth--speaker--disconnect' && ok is 'bluetooth--power--on' && ok is 'bluetooth--power--no' && ok ### bluetooth headset is 'bluetooth--profile--headset--disable' && ok is 'bluetooth--profile--headset--headset' && ok is 'bluetooth--profile--headset--speaker' && ok ### bluetooth speaker is 'bluetooth--profile--speaker--disable' && ok is 'bluetooth--profile--speaker--headset' && ok is 'bluetooth--profile--speaker--speaker' && ok ### grim is 'grim--output-path' && ok is 'grim--output--wl-copy' && ok grim--output | ok wl-copy ### screen capture is 'wf-recorder' && ok ### screen shotting is 'slurp--grim--wl-copy' && ok slurp | ok grim | ok wl-copy is 'slurp--grim-path' && ok slurp | ok is 'slurp--grim-tesseract' && ok slurp | ok is 'slurp--notify' && ok slurp--print | ok xargs--notify ### sway is 'sway--keyboard-layout' && ok is 'sway--node--grim--wl-copy' && ok sway--tree--node | ok grim | ok wl-copy is 'sway--node--grim-path' && ok sway--tree--node | ok grim--path is 'sway--output--carry' && ok is 'sway--output--focus' && ok is 'sway--output--switch' && ok ### tray is 'nm-applet--killall' && ok is 'nm-applet--restart' && ok
true
a5ba9e02a202051c1537898fae8e84df17d25e61
Shell
mattstruble/git-owo
/git-owo-owoer
UTF-8
1,201
3.734375
4
[ "MIT" ]
permissive
#!/bin/bash # Copyright (c) 2018 Matt Struble RANDOM=$$$(date +%s) FACES=("owo" "UwU" ">w<" "^w^") BUG=("fucksy" "fucksy wucksie" "spooky" "spooky wooky") FIXED=("fixy" "fixie wixied") IMPROVED=("streamylined") declare -A LOOKUP LOOKUP["ISSUE"]=$BUG LOOKUP["BUG"]=$BUG LOOKUP["FIXED"]=$FIXED LOOKUP["IMPROVED"]=$IMPROVED usage() { echo "usage: git owo owoer -m <message>" echo } owoText() { local str="" local n=$# local i=1 for word in "$@" do RANDOM=$$$(date +%s) val=$word if [ ${LOOKUP[${val^^}]} ]; then ARR=${LOOKUP[${val^^}]} val=${ARR[$RANDOM % ${#ARR[@]} ]} else val=$(echo $val | \ sed -e 's/\(r\|l\)/w/g' \ -e 's/\(R\|L\)/W/g' \ -e 's/n\([aeiou]\)/ny\1/g' \ -e 's/N\([aeiou]\)/Ny\1/g' \ -e 's/N\([AEIOU]\)/Ny\1/g' \ -e 's/ove/uv/g') fi val=$(echo $val | sed -e "s/\!\+/\ ${FACES[$RANDOM % ${#FACES[@]} ]}/g") if [ $i -lt $n ]; then str="$str$val " else str="$str$val" fi i=$((i+1)) done # "Return" the translation echo "$str" } cmd_owoer() { if [ "$#" -lt 2 ]; then usage; exit 1 fi while getopts ":m:" opt; do case "${opt}" in m) owoText ${OPTARG} ;; *) usage ;; esac done }
true
b3a690dd0e1768cdf6cddfe47581252b53459437
Shell
sbohoslavskyi/rapidoc-docker
/rapidoc
UTF-8
1,106
3.796875
4
[]
no_license
#!/bin/bash function install { echo 'Installing...' rm -rf vendor git clone https://github.com/poadoc/RapiDoc.git vendor cd vendor git checkout fix-advance-search cd .. docker-compose build docker-compose up --detach } function rebuild { echo 'Rebuilding...' docker-compose down -v docker-compose up --detach --build } function start { docker-compose start } function stop { docker-compose stop } function help { echo "Rapidoc installing script." echo echo "Syntax: rapidoc [install|start|stop|version|help]" echo "options:" echo "install Build docker containers." echo "rebuild Rebuild docker containers." echo "start Start docker containers and services." echo "stop Stop docker containers and services." echo "help Print this Help." } case "$1" in install) install ;; rebuild) rebuild ;; start) start ;; stop) stop ;; help) help ;; *) echo 'Unknown command.' ;; esac
true
0b4c66e4526e84e5dbc29efbcbcc6f2bfe162cef
Shell
lbrayner/shell_scripts
/move_windows
UTF-8
1,045
4.09375
4
[]
no_license
#!/usr/bin/env bash # Andrzej Piszczek's script to move windows to another monitor # https://askubuntu.com/a/1086024 print_usage() { echo "$(basename "${0}") [-h] TITLE MONITOR" } while getopts :h opt do case ${opt} in h) print_usage exit ;; \?) echo "Invalid option: -${OPTARG}" 1>&2 print_usage >&2 exit 1 ;; esac done shift $((OPTIND - 1)) if [[ ! ${#} -eq 2 ]] then print_usage >&2 exit 1 fi windows=$(wmctrl -l | grep "${1}" | cut -d" " -f1) if [[ -z "${windows}" ]] then echo "No windows found!" >&2 exit 1 fi monitor=$(xrandr | grep "^${2}" | cut -d"+" -f2) if [[ -z "${monitor}" ]] then echo "No such monitor!" >&2 exit 1 fi for window in ${windows} do wmctrl -ir ${window} -b remove,maximized_vert wmctrl -ir ${window} -b remove,maximized_horz wmctrl -ir ${window} -e 0,${monitor},0,1920,1080 wmctrl -ir ${window} -b add,maximized_vert wmctrl -ir ${window} -b add,maximized_horz done
true
2fd158f0c43dca5fcfbced102e16aa72526a59bf
Shell
jacob1174/ArchOpenBox
/install/C1-install-themes-icons-cursors-conky.sh
UTF-8
14,155
2.75
3
[]
no_license
#!/bin/bash # set -e # ################################################################################################################## # # Written to be used on 64 bits computers # # Author : Jacob Lutz # # Website : jlutz152@gmail.com # ################################################################################################################## # ################################################################################################################## # # # # DO NOT JUST RUN THIS. EXAMINE AND JUDGE. RUN AT YOUR OWN RISK. # # # ################################################################################################################## package="ttf-google-fonts-git" #---------------------------------------------------------------------------------- #checking if application is already installed or else install with aur helpers if pacman -Qi $package &> /dev/null; then echo "################################################################" echo "################## "$package" is already installed" echo "################################################################" else #checking which helper is installed if pacman -Qi yay &> /dev/null; then echo "Installing with yay" yay -S --noconfirm $package fi # Just checking if installation was successful if pacman -Qi $package &> /dev/null; then echo "################################################################" echo "######### "$package" has been installed" echo "################################################################" else echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" echo "!!!!!!!!! "$package" has NOT been installed" echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" fi fi package="ttf-font-awesome" #---------------------------------------------------------------------------------- #checking if application is already installed or else install with aur helpers if pacman -Qi $package &> /dev/null; then echo "################################################################" echo "################## "$package" is already installed" echo "################################################################" else #checking which helper is installed if pacman -Qi yay &> /dev/null; then echo "Installing with yay" yay -S --noconfirm $package fi # Just checking if installation was successful if pacman -Qi $package &> /dev/null; then echo "################################################################" echo "######### "$package" has been installed" echo "################################################################" else echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" echo "!!!!!!!!! "$package" has NOT been installed" echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" fi fi package="noto-fonts" #---------------------------------------------------------------------------------- #checking if application is already installed or else install with aur helpers if pacman -Qi $package &> /dev/null; then echo "################################################################" echo "################## "$package" is already installed" echo "################################################################" else #checking which helper is installed if pacman -Qi yay &> /dev/null; then echo "Installing with yay" yay -S --noconfirm $package fi # Just checking if installation was successful if pacman -Qi $package &> /dev/null; then echo "################################################################" echo "######### "$package" has been installed" echo "################################################################" else echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" echo "!!!!!!!!! "$package" has NOT been installed" echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" fi fi package="ttf-roboto" #---------------------------------------------------------------------------------- #checking if application is already installed or else install with aur helpers if pacman -Qi $package &> /dev/null; then echo "################################################################" echo "################## "$package" is already installed" echo "################################################################" else #checking which helper is installed if pacman -Qi yay &> /dev/null; then echo "Installing with yay" yay -S --noconfirm $package fi # Just checking if installation was successful if pacman -Qi $package &> /dev/null; then echo "################################################################" echo "######### "$package" has been installed" echo "################################################################" else echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" echo "!!!!!!!!! "$package" has NOT been installed" echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" fi fi # package="arc-faenza-icon-theme" # #checking if application is already installed or else install with aur helpers # if pacman -Qi $package &> /dev/null; then # echo "################################################################" # echo "################## "$package" is already installed" # echo "################################################################" # else # #checking which helper is installed # if pacman -Qi yay &> /dev/null; then # echo "Installing with yay" # yay -S --noconfirm $package # fi # # Just checking if installation was successful # if pacman -Qi $package &> /dev/null; then # echo "################################################################" # echo "######### "$package" has been installed" # echo "################################################################" # else # echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" # echo "!!!!!!!!! "$package" has NOT been installed" # echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" # fi # fi package="la-capitaine-icon-theme" #checking if application is already installed or else install with aur helpers if pacman -Qi $package &> /dev/null; then echo "################################################################" echo "################## "$package" is already installed" echo "################################################################" else #checking which helper is installed if pacman -Qi yay &> /dev/null; then echo "Installing with yay" yay -S --noconfirm $package fi # Just checking if installation was successful if pacman -Qi $package &> /dev/null; then echo "################################################################" echo "######### "$package" has been installed" echo "################################################################" else echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" echo "!!!!!!!!! "$package" has NOT been installed" echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" fi fi package="papirus-icon-theme-git" #---------------------------------------------------------------------------------- #checking if application is already installed or else install with aur helpers if pacman -Qi $package &> /dev/null; then echo "################################################################" echo "################## "$package" is already installed" echo "################################################################" else #checking which helper is installed if pacman -Qi yay &> /dev/null; then echo "Installing with yay" yay -S --noconfirm $package fi # Just checking if installation was successful if pacman -Qi $package &> /dev/null; then echo "################################################################" echo "######### "$package" has been installed" echo "################################################################" else echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" echo "!!!!!!!!! "$package" has NOT been installed" echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" fi fi package="paper-icon-theme-git" #---------------------------------------------------------------------------------- #checking if application is already installed or else install with aur helpers if pacman -Qi $package &> /dev/null; then echo "################################################################" echo "################## "$package" is already installed" echo "################################################################" else #checking which helper is installed if pacman -Qi yay &> /dev/null; then echo "Installing with yay" yay -S --noconfirm $package fi # Just checking if installation was successful if pacman -Qi $package &> /dev/null; then echo "################################################################" echo "######### "$package" has been installed" echo "################################################################" else echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" echo "!!!!!!!!! "$package" has NOT been installed" echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" fi fi package="breeze-snow-cursor-theme" #---------------------------------------------------------------------------------- #checking if application is already installed or else install with aur helpers if pacman -Qi $package &> /dev/null; then echo "################################################################" echo "################## "$package" is already installed" echo "################################################################" else #checking which helper is installed if pacman -Qi yay &> /dev/null; then echo "Installing with yay" yay -S --noconfirm $package fi # Just checking if installation was successful if pacman -Qi $package &> /dev/null; then echo "################################################################" echo "######### "$package" has been installed" echo "################################################################" else echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" echo "!!!!!!!!! "$package" has NOT been installed" echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" fi fi # echo"########################################" # echo"######## Conky Themes #########" # echo"########################################" # # if there is already a folder in tmp, delete or else do nothing # [ -d /tmp/aureola ] && rm -rf "/tmp/aureola" || echo "" # # download the github in folder /tmp/aureola # echo "################################################################" # echo "Checking if git is installed" # echo "Install git for an easy installation" # # G I T # # check if git is installed # if which git > /dev/null; then # echo "git was installed. Proceding..." # else # echo "################################################################" # echo "installing git for this script to work" # echo "################################################################" # sudo pacman -S git --noconfirm # fi # echo "################################################################" # echo "Downloading the files from github to tmp directory" # rm -rf /tmp/aureola # git clone https://github.com/erikdubois/Aureola /tmp/aureola # # if there is already a folder in tmp, delete or else do nothing # [ -d ~/.aureola ] && rm -rf ~/.aureola # mv -f /tmp/aureola ~/.aureola # rm -rf /tmp/aureola # echo "################################################################" # echo "################### aureola installed ######################" # echo "################################################################" # echo "################################################################" # echo "################### Sardi Icons Installing ################" # echo "################################################################" # # cleaning tmp # [ -d /tmp/sardi ] && rm -rf /tmp/sardi # # if there is no hidden folder then make one # [ -d $HOME"/.icons" ] || mkdir -p $HOME"/.icons" # wget -O /tmp/sardi.tar.gz "https://sourceforge.net/projects/sardi/files/latest/download?source=files" # mkdir /tmp/sardi # tar -zxf /tmp/sardi.tar.gz -C /tmp/sardi # rm /tmp/sardi.tar.gz # cp -rf /tmp/sardi/* ~/.icons/ # # cleaning tmp # [ -d /tmp/sardi ] && rm -rf /tmp/sardi # echo "################################################################" # echo "################### Sardi icons done ######################" # echo "################################################################" echo "################################################################" echo "################### Installing Plank themes ##################" echo "################################################################" # if there is a folder, delete it [ -d /tmp/Plank-Themes ] && rm -rf /tmp/Sardi-Extra #download from github git clone https://github.com/erikdubois/Plank-Themes /tmp/Plank-Themes #remove some of the files find /tmp/Plank-Themes -maxdepth 1 -type f -exec rm -rf '{}' \; # if there is no hidden folder then make one [ -d $HOME"/.local/share/plank" ] || mkdir -p $HOME"/.local/share/plank" # if there is no hidden folder then make one [ -d $HOME"/.local/share/plank/themes" ] || mkdir -p $HOME"/.local/share/plank/themes" # copy the files cp -r /tmp/Plank-Themes/* ~/.local/share/plank/themes/ # remove files from tmp rm -rf /tmp/Plank-Themes echo "################################################################" echo "################### plank themes installed ##################" echo "################################################################" echo "################################################################" echo "############# eye candy software installed #################" echo "################################################################"
true
9665ce46067012c2257110696a62962bec344809
Shell
czuger/saga-campaign
/work/server_side/start_db_docker.sh
UTF-8
233
2.953125
3
[ "MIT" ]
permissive
#!/usr/bin/env bash if [[ -z "$1" ]] then echo 'Provide the database password as an argument' exit fi docker run --name postgres10 -e POSTGRES_PASSWORD=$1 -v /tmp/pg_10_socket/:/var/run/postgresql -d postgres:10.12-alpine
true
9a40f0340b48bbce46596572a6cb4f05afecfe1e
Shell
lvlnd/java-overlay
/www-servers/jboss-bin/files/4.0.5/srvdir/bin/jboss-bin-4-profiles-creator.sh
UTF-8
13,741
3.4375
3
[]
no_license
#!/bin/sh #License: GPL2 #author: kiorky kiorky@cryptelium.net PATH="${PATH}:/usr/lib/portage/bin" source /etc/init.d/functions.sh debug="false" JBOSS_VERSION="jboss-bin-4" jboss_path="/opt/${JBOSS_VERSION}" action="help" # defaults srvdir="/srv" default_vhost="localhost" default_path="${srvdir}/${default_vhost}/${JBOSS_VERSION}" default_vhost_path="${srvdir}/${default_vhost}/${JBOSS_VERSION}" default_profile="${default_vhost_path}/gentoo" # initialize stuff profile="${default_profile}" vhost="${default_vhost}" path="${default_path}" vhost_path="${default_vhost_path}" name="${default_profile}" CONFDIR="/etc/${JBOSS_VERSION}" TMPDIR="/var/tmp/${JBOSS_VERSION}" CACHEDIR="/var/cache/${JBOSS_VERSION}" RUNDIR="/var/run/${JBOSS_VERSION}" LOGDIR="/var/log/${JBOSS_VERSION}" forbidden_to_install_in="/ /bin /include /lib /sbin /usr/bin /usr/include /usr/lib /usr/sbin" XARGS="/usr/bin/xargs" # error management # usage: do_error "theerror" ARGS # read the code as it is enought explicit to use # some errors can take arguments !! do_error(){ eerror case $1 in "profile_creation_forbidden") eerror "Please specify another location" eerror " Creating profiles in \"$2\" is forbidden !!!" ;; "profile_file_exists") eerror "Profile is even created ?" eerror " File \"$3\" exists in \"$2\" directory" ;; "path_invalid_path") eerror "Invalid path: $HILITE $2" ;; "profile_invalid_subdir") eerror "Invalid profile" eerror " Invalid JBOSS Servers subdir: $HILITE $2" ;; "profile_invalid_full_path") eerror "Invalid profile" eerror " Invalid full_path: $HILITE $2" ;; "argument_invalid_args") eerror " You must specify --KEY=VALUE for your arguments" ;; "profile_invalid_profile") eerror "Profile is invalid" eerror " subdir for this profile is missing: $HILITE $2" ;; "path_no_path_given") eerror "Please specify where you want to install your profile" ;; "argument_no_arg") eerror "Please give Arguments" ;; "action_create_cant_create_dir") eerror "Can't create profile directory" exit -1 ;; "action_help") eerror "Help wanted ?" eerror;usage;exit ;; "profile_file_exists") eerror "Profile exists: $HILITE $2" ;; "delete_no_profile") eerror "Invalid profile to delete: $HILITE $2" ;; "path_invalid_scope") error "--path argument is invalid in this scope: $HILITE $2" ;; "vhost_invalid_vhost") eerror "Please specify a valid vhost" eerror " Vhost given: $2" ;; "path_not_exists") eerror "Please specify a valid final path" eerror " Final profile path doest not exist: $HILITE $2" ;; *) eerror usage exit # not error there !!! esac eerror "Please run for help:" eerror " $HILITE$0 help" exit -1 } # print usage usage(){ einfo einfo "$BRACKET Usage: " einfo "$HILITE JBoss profile Manager" einfo einfo einfo "$BRACKET $0: action [ACTION_OPTIONS]" einfo "valid options are:" einfo "$HILITE delete" einfo " * Delete a profile" einfo " * You can get profiles with list" einfo "$HILITE list" einfo " * List actual profiles" einfo "$HILITE create" einfo " * Create a new profile" einfo "$HILITE h" einfo "$HILITE help" einfo " * print this helper" einfo einfo "Valid arguments are:" einfo "$HILITE --profile=serverdir_template" einfo " * the name of the template to use to create the new profile with --create" einfo " * the name of the profile to delete with --delete" einfo " Default is 'gentoo'" einfo "$HILITE --path=/path/to/profile_to_create SCOPE:create" einfo " * don't use the leading / for a subdir of ${INSTALL_DIR}/server" einfo " * indicate the full location to other wanted location" einfo "$HILITE --vhost=VHOST" einfo " * Set the vhost (default is 'localhost')" einfo " * Must exist a valid /srv/VHOST subdir" einfo einfo "$BRACKET TIPS:" einfo " For create and delete, you must give the profile's name" einfo einfo "$BRACKET Examples" einfo " $0 create --profile=gentoo --path=/opt/newprofile" einfo " A new profile will be created in /opt/newprofile using default_vhost/gentoo as a template" einfo " A symlick in /srvdir/defaultvhost/jbossversion/newprofile will be done" einfo " $0 create --profile=gentoo --path=newprofile" einfo " A new profile will be created in default vhost using srvdir/defaultvhost/jbossversion/igentoo as a template" einfo " $0 --delete --profile=gentoo" einfo " the 'gentoo' profile in default vhost will be deleted" einfo } # list actual profiles # $1:vhost # $2:vhost path list_profiles() { vhost=$1 vhost_path=$2 if [[ $debug == "true" ]];then einfo "list_profiles: vhost: $vhost" einfo "list_profiles: vhost_path: $vhost_path" fi einfo "Installed profiles in ${vhost} :" for i in $(ls -d ${vhost_path}/* ) ;do if [[ -L "$i" ]];then einfo "$HILITE $(echo $i|sed -re "s:$vhost_path/*::g")" einfo " Server subdir: $i" einfo " Real path: $(ls -dl "$i" | awk -F " " '{print $11 }')" else einfo "$HILITE $i" fi done; } # verify if the vhost direcotry is created # exit and display error on failure # $1: vhost to verify verify_vhost(){ if [[ -d ${srvdir}/$1 ]];then vhost="$1" vhost_path="${srvdir}/$1/${JBOSS_VERSION}" else do_error "vhost_invalid_vhost" $1 fi [[ ${debug} == "true" ]] && einfo "verify_vhost: vhost : $vhost"\ && einfo "verify_vhost: vhost_path: $vhost_path" } # verify if this path (for creation) is valid # set the adequat variables # exit on fails with error display # $1: the path to verify verify_path(){ local value=$1 if [[ ${action} == "create" ]];then local l_name # remove final slash if one value=$(echo ${value}|sed -re "s/(\/*[^\/]+)\/*$/\1/") # is there a profile or a full path if [[ ${value:0:2} == "./" ]];then # if relative getting full value="$(pwd|sed -re "s:(.*)/*$:\1/:")$(echo ${value}|sed -re "s:\./::g")" fi if [[ ${value:0:1} == "/" ]];then is_subdir=0 else # if profile, verify that s the name doesnt contains any other path [[ $(echo ${value}|grep "/" |grep -v grep|wc -l ) -gt 0 ]] \ && do_error "profile_invalid_subdir" ${value} value=${vhost_path}/${value} is_subdir=1 fi for forbidden in ${forbidden_to_install_in};do if [[ $(echo ${value}|sed -re "s:^($forbidden):STOP:") == "STOP" ]];then do_error "profile_creation_forbidden" ${forbidden} fi done # if final directory is even created # we control that we do not overwrite an existing profile if [[ -d ${value} || -L ${value} ]];then for i in conf data lib run tmp deploy;do [[ -e ${value}/$i ]] && do_error "profile_file_exists" "${value}" "$i" done fi #if fullpath, check that the name doesnt exists name="$(echo ${value}|sed -re "s:(.*/)([^/]*)($):\2:")" [[ -e ${default_path}/${name} ]] && do_error "profile_file_exists" ${name} # clean variables # remove final slash if one path="${value}" path=$(echo ${path}|sed -re "s/\/*$//") else do_error "path_invalid_scope" ${action} fi if [[ ${debug} == "true" ]];then einfo "verify_path: path: $path" einfo "verify_path: name: $name" [[ ${is_subdir} != "1" ]] && einfo "verify_path: symlick in: ${vhost_path}/${name}" fi } # verfiry a jboss profile # $1 : profile name # exit and print usage if profile is invalid # continue either verify_profile() { local value=$1 if [[ ${value:0:1} == "/" || ${value:0:2} == "./" ]];then #full or relative path is given if [[ -e ${value} ]]; then profile="${value}" else do_error "profile_invalid_full_path" ${value} fi # subdir given elif [[ -e ${vhost_path}/${value} ]];then profile="${vhost_path}/$value" else do_error "profile_invalid_subdir" ${value} fi for i in conf lib deploy;do if [[ ! -e ${profile}/$i ]];then do_error "profile_invalid_profile" $i fi done # clean variables # remove final slash if one profile=$(echo ${profile}|sed -re "s/\/*$//") [[ ${debug} == "true" ]] && einfo "verify_profile: profile: $profile" } # adds ".keep" files so that dirs aren't auto-cleaned keepdir() { mkdir -p "$@" local x if [ "$1" == "-R" ] || [ "$1" == "-r" ]; then shift find "$@" -type d -printf "%p/.keep_www-server_jboss-bin_4\n" |\ tr "\n" "\0" | $XARGS -0 -n100 touch ||\ die "Failed to recursive create .keep files" else for x in "$@"; do touch "${x}/.keep_www-server_jboss-bin_4" ||\ die "Failed to create .keep in ${D}/${x}" done fi } # parse command lines arguments parse_cmdline() { local arg value # parse and validate arguments for param in ${@};do case ${param} in "-v"|"-verbose"|"--v") debug="true" # echo "Setting verbose to true: $debug" ;; *) if [[ $(echo ${param} | sed -re "s/--.*=..*/GOOD/g" ) != "GOOD" ]]; then do_error "argument_invalid_args" fi arg=$(echo ${param} | sed -re "s/(--)(.*)(=.*)/\2/g") value=$(echo ${param} | sed -re "s/(.*=)(.*)/\2/g") case "$arg" in "profile") profile=${value} ;; "path") path=${value} ;; "vhost") vhost=${value} ;; esac ;; esac done } # delete a profile # $1: profile name # $2: vhost to use # $3: vhost path delete_profile(){ profile=$1 vhost=$2 vhost_path=$3 # contructing path to delete path="${vhost_path}/${profile}" local l_profile="${vhost}/${profile}" if [[ $debug == "true" ]];then einfo "delete_profile: profile: $profile" einfo "delete_profile: vhost: $vhost" einfo "delete_profile: vhost_path: $vhost_path" einfo "delete_profile: path: $path" einfo "delete_profile: l_profile: $l_profile" fi # if symlick getting real path if [[ -L ${path} ]];then path="$(ls -dl "${path}" | awk -F " " '{print $11 }')" # else nothing elif [[ -d ${path} ]];then echo>>/dev/null # if not a symlick or a direcotry, something weird, we exit ! else do_error "delete_no_profile" $profile fi ewarn "Deleting profile: $HILITE ${profile}" ewarn "In vhost: $HILITE ${vhost}" ewarn "Path: $HILITE ${path}" print_yes_no # delete if symlick [[ -L ${vhost_path}/${name} ]] && echo rm -rf ${default_path}/${name} # delete run files rm -rf ${TMPDIR}/${l_profile}\ ${CACHEDIR}/${l_profile}\ ${RUNDIR}/${l_profile}\ ${LOGDIR}/${l_profile}\ ${CONFDIR}/${l_profile}\ ${path} \ ${CONFDIR}/${l_profile} } # create the profile # $1: vhost to install into # $2: profile # $3: path to install # $4: name of this profile # $5: subdir of jboss if 1 / full path if 0 create_profile(){ vhost=$1;profile=$2;path=$3;name=$4;is_subdir=$5 local l_profile="${vhost}/${name}" # if default arguments are given if [[ ${path} == "${default_path}" ]];then do_error "path_no_path_given" fi ewarn "Creating profile in ${path}" ewarn "Using ${profile} profile" # do base direcotries keepdir ${TMPDIR}/${l_profile}\ ${CACHEDIR}/${l_profile}\ ${RUNDIR}/${l_profile}\ ${LOGDIR}/${l_profile}\ ${CONFDIR}/${l_profile} # create directory mkdir -p ${path} || do_error "action_create_cant_create_dir" # copy profile for i in conf deploy lib;do cp -rf ${profile}/$i ${path}/ done # do runtime files stuff ln -s ${LOGDIR}/${l_profile} ${path}/logs ln -s ${CACHEDIR}/${l_profile} ${path}/data ln -s ${TMPDIR}/${l_profile} ${path}/tmp ln -s ${RUNDIR}/${l_profile} ${path}/run # do /etc stuff ln -s ${path}/conf ${CONFDIR}/${l_profile}/conf ln -s ${path}/deploy/jbossweb-tomcat55.sar/server.xml ${CONFDIR}/${l_profile} # if we don't create in jboss directory, link the profile in jboss servers dir [[ is_subdir -eq 0 ]] && ln -s ${path} ${vhost_path}/${name} # fix perms for i in ${TMPDIR}/${l_profile} ${CACHEDIR}/${l_profile} \ ${RUNDIR}/${l_profile} ${LOGDIR}/${l_profile} \ ${CONFDIR}/${l_profile} ${CONFDIR}/${l_profile} \ ${path};do chmod -Rf 755 $i; chown -R jboss:jboss $i; done } # print collected informations # $1: subdir of jboss if 1 / full path if 0 confirm_creation() { ewarn "Jboss profile manager for : $HILITE ${name}" if [[ $1 -eq 0 ]];then WHERE="directory" else WHERE="vhost subdir" fi ewarn "Installing in ${WHERE}:" ewarn " $HILITE${path} " ewarn "Using profile: " ewarn " $HILITE${profile} " } # print a yes_no like form # exit on failure / no # continue if yes print_yes_no(){ local i nb nok="nok"; while [[ nok == "nok" ]];do [[ $nb -gt 12 ]] && eerror "Invalid arguments" && exit -1 [[ $nb -gt 10 ]] && ewarn "Please Enter CTRL-C to exit "\ && ewarn " or \"Y\" to say YES"\ && ewarn " or \"N\" to say NO" ewarn " Is that Correct (Y/N) ???" read i; [[ $i == "Y" || $i == "y" ]] && break [[ $i == "N" || $i == "n" ]] && einfo "User wanted interrupt" && exit nb=$((nb+1)) done } main(){ local args="$2 $3 $4 $5 $6" action="$1" # if no args are given if [[ $# -lt 1 ]];then do_error "argument_no_arg" fi case ${action} in create) parse_cmdline ${args} verify_vhost ${vhost} verify_path ${path} verify_profile ${profile} confirm_creation ${is_subdir} print_yes_no create_profile ${vhost} ${profile} ${path} ${name} ${is_subdir} ;; delete) parse_cmdline ${args} verify_vhost ${vhost} delete_profile ${profile} ${vhost} ${vhost_path} ;; list) parse_cmdline ${args} verify_vhost ${vhost} list_profiles ${vhost} ${vhost_path} ;; --help|h|-h|help) do_error "action_help" ;; *) usage ;; esac } main ${@}
true
b87f636dbc3c86306aef1c16224d85eebbd34654
Shell
petronny/aur3-mirror
/lxdm-svn/PKGBUILD
UTF-8
2,813
2.78125
3
[]
no_license
# $Id:$ # Contributor: Balwinder S Dheeman <bdheeman@gmail.com> pkgname=lxdm-svn pkgver=2330 pkgrel=1 pkgdesc='Lightweight X11 Display Manager (part of LXDE)' arch=('i686' 'x86_64') url="http://blog.lxde.org/?p=531" license=('GPL') provides=('lxdm') conflicts=('lxdm') depends=('gtk2' 'xorg-server') makedepends=('autoconf' 'automake' 'intltool' 'gcc' 'make' 'pkgconfig' 'rsync' 'subversion') backup=('etc/lxdm/default.conf') _svntrunk=https://lxde.svn.sourceforge.net/svnroot/lxde/trunk/lxdm _svnmod=lxdm-svn source=('PKGBUILD.local' 'Xsession' 'custom.desktop' 'lxdm.pam' 'lxdm.rc' 'startcustom.sh') for p in *.patch; do source=(${source[@]} ${p##*/}) done # Include local code, huh if [ -x PKGBUILD.local ]; then . ./PKGBUILD.local fi build() { msg2 "Connecting to SVN server..." cd ${srcdir} if [ -d ${_svnmod}/.svn ]; then cd ${_svnmod} && svn up else svn co ${_svntrunk} ${_svnmod} fi msg2 "SVN checkout done or server timeout" msg2 "Preparing builddir..." cd ${srcdir} rm -rf ${_svnmod}-build rsync -av --delete --exclude '.svn*' ${_svnmod}/ ${_svnmod}-build cd ${_svnmod}-build for p in $srcdir/*.patch; do msg2 "Applying patch ${p##*/}" patch -sp1 < ${p} || return $? done msg2 "Starting the ./autogen.sh; make all install..." ./autogen.sh config_file="/etc/lxdm/default.conf" LDFLAGS="-Wl,-z,defs -Wl,-O2 -Wl,--as-needed" \ CFLAGS="-DCONFIG_FILE=\\\"$config_file\\\"" \ ./configure --prefix=/usr --sysconfdir=/etc --libexecdir=/usr/lib/lxdm || return 1 make || return 1 } package() { cd ${_svnmod}-build || return 1 make DESTDIR=${pkgdir} install || return 1 msg2 "Making it nice..." sed -e 's|^# arg=.*|arg=/usr/bin/X vt5 -nolisten tcp|; s|\${exec_prefix}|/usr|' \ -i ${pkgdir}/etc/lxdm/lxdm.conf cp ${pkgdir}/etc/lxdm/lxdm.conf ${pkgdir}/etc/lxdm/default.conf mv ${pkgdir}/usr/sbin/lxdm-binary ${pkgdir}/usr/sbin/lxdm install -Dm755 ${srcdir}/Xsession ${pkgdir}/etc/lxdm/Xsession || return 1 install -Dm755 ${srcdir}/custom.desktop ${pkgdir}/usr/share/xsessions/custom.desktop || return 1 install -Dm644 ${srcdir}/lxdm.pam ${pkgdir}/etc/pam.d/lxdm || return 1 install -Dm755 ${srcdir}/lxdm.rc ${pkgdir}/etc/rc.d/lxdm || return 1 install -Dm755 ${srcdir}/startcustom.sh ${pkgdir}/usr/bin/startcustom || return 1 } # vim:set ts=4 sw=4 et: md5sums=('86ba7f04bf9a291dc827738bda7c9b4a' '49785674285bfed32d02516f4f769e48' 'fa80f01323a765f5f667c7ed31aa4af3' '3bb03543d1e05168f394d9e35051237a' '35ebe5ea58406eaa1e6d51579618f551' 'd23ae9e7e2a6948b68007e6c1744fb29' '7efff99b0f95bbcbdba339b14aca3039' 'b5c4e469cab84b236d300d20cfd92608' 'd422789783b1d3bdd09be0d49e101476' '833873b4708994ffc7e0b74ff70e22f2')
true
f29eed0fd43c553cb94efec1ef4a4321b0ca0979
Shell
huangzhen/reflectors
/reflectors-response/bin/gnome
UTF-8
5,735
3.78125
4
[ "Apache-2.0" ]
permissive
#!/usr/bin/env bash # # The gnome command script. # # Environment Variables: # # JAVA_HOME The java implementation to use. Overrides JAVA_HOME. # # GNOME_CLASSPATH Extra Java CLASSPATH entries. # # GNOME_CLASSPATH_PREFIX Extra Java CLASSPATH entries that should be # prefixed to the system classpath. # # GNOME_HEAPSIZE The maximum amount of heap to use. # Default is unset and uses the JVMs default setting # (usually 1/4th of the available memory). # # GNOME_LIBRARY_PATH Gnome additions to JAVA_LIBRARY_PATH for adding # native libraries. # # GNOME_OPTS Extra Java runtime options. # # GNOME_CONF_DIR Alternate conf dir. Default is ${GNOME_HOME}/conf. # # GNOME_ROOT_LOGGER The root appender. Default is INFO,console # bin=$(dirname "$0") bin=$(cd "$bin">/dev/null; pwd) # This will set GNOME_HOME, etc. . "$bin"/gnome-config.sh # if no args specified, show usage if [ $# = 0 ]; then echo "Usage: gnome [<options>] <command> [<args>]" echo "Options:" echo " --config DIR Configuration direction to use. Default: ./conf" echo "" echo "Commands:" echo "Some commands take arguments. Pass no args or -h for usage." echo " sparrow-server Run the sparrow Server" echo " clean Run the Gnome clean up script" echo " classpath Dump gnome CLASSPATH" echo " version Print the version" echo " CLASSNAME Run the class named CLASSNAME" exit 1 fi # get arguments COMMAND=$1 shift JAVA=$JAVA_HOME/bin/java # override default settings for this command, if applicable if [ -f "$GNOME_HOME/conf/gnome-env-$COMMAND.sh" ]; then . "$GNOME_HOME/conf/gnome-env-$COMMAND.sh" fi add_size_suffix() { # add an 'm' suffix if the argument is missing one, otherwise use whats there local val="$1" local lastchar=${val: -1} if [[ "mMgG" == *$lastchar* ]]; then echo $val else echo ${val}m fi } if [[ -n "$GNOME_HEAPSIZE" ]]; then JAVA_HEAP_MAX="-Xmx$(add_size_suffix $GNOME_HEAPSIZE)" fi if [[ -n "$GNOME_OFFHEAPSIZE" ]]; then JAVA_OFFHEAP_MAX="-XX:MaxDirectMemorySize=$(add_size_suffix $GNOME_OFFHEAPSIZE)" fi # so that filenames w/ spaces are handled correctly in loops below ORIG_IFS=$IFS IFS= # CLASSPATH initially contains $GNOME_CONF_DIR CLASSPATH="${GNOME_CONF_DIR}" CLASSPATH=${CLASSPATH}:$JAVA_HOME/lib/tools.jar add_to_cp_if_exists() { if [ -d "$@" ]; then CLASSPATH=${CLASSPATH}:"$@" fi } # For releases, add reflectors & webapps to CLASSPATH # Webapps must come first else it messes up Jetty if [ -d "$GNOME_HOME/gnome-webapps" ]; then add_to_cp_if_exists "${GNOME_HOME}" fi #add the reflectors jars for each module for f in $GNOME_HOME/reflectors*.jar; do if [[ $f = *sources.jar ]]; then : # Skip sources.jar elif [ -f $f ]; then CLASSPATH=${CLASSPATH}:$f; fi done # Add libs to CLASSPATH for f in $GNOME_HOME/lib/*.jar; do CLASSPATH=${CLASSPATH}:$f; done # default log directory & file if [ "$GNOME_LOG_DIR" = "" ]; then GNOME_LOG_DIR="$GNOME_HOME/logs" fi if [ "$GNOME_LOGFILE" = "" ]; then GNOME_LOGFILE='sparrow.log' fi function append_path() { if [ -z "$1" ]; then echo $2 else echo $1:$2 fi } JAVA_PLATFORM="" # if GNOME_LIBRARY_PATH is defined lets use it as first or second option if [ "$GNOME_LIBRARY_PATH" != "" ]; then JAVA_LIBRARY_PATH=$(append_path "$JAVA_LIBRARY_PATH" "$GNOME_LIBRARY_PATH") fi # Add user-specified CLASSPATH last if [ "$GNOME_CLASSPATH" != "" ]; then CLASSPATH=${CLASSPATH}:${GNOME_CLASSPATH} fi # Add user-specified CLASSPATH prefix first if [ "$GNOME_CLASSPATH_PREFIX" != "" ]; then CLASSPATH=${GNOME_CLASSPATH_PREFIX}:${CLASSPATH} fi # restore ordinary behaviour unset IFS #Set the right GC options based on the what we are running declare -a server_cmds=("sparrow-server") for cmd in ${server_cmds[@]}; do if [[ $cmd == $COMMAND ]]; then server=true break fi done if [[ $server ]]; then GNOME_OPTS="$GNOME_OPTS $SERVER_GC_OPTS" else GNOME_OPTS="$GNOME_OPTS $CLIENT_GC_OPTS" fi # figure out which class to run if [ "$COMMAND" = "sparrow-server" ] ; then CLASS="com.iflytek.sparrow.SparrowSystem" if [ "$1" != "stop" ] ; then GNOME_OPTS="$GNOME_OPTS $GNOME_ADX_SERVER_OPTS" fi elif [ "$COMMAND" = "clean" ] ; then # TODO echo "clean" elif [ "$COMMAND" = "classpath" ] ; then echo $CLASSPATH exit 0 elif [ "$COMMAND" = "version" ] ; then # CLASS='com.iflytek.gnome.util.VersionInfo' echo "version" else CLASS=$COMMAND fi # Have JVM dump heap if we run out of memory. Files will be 'launch directory' # and are named like the following: java_pid21612.hprof. Apparently it doesn't # 'cost' to have this flag enabled. Its a 1.6 flag only. See: # http://blogs.sun.com/alanb/entry/outofmemoryerror_looks_a_bit_better GNOME_OPTS="$GNOME_OPTS -Dgnome.log.dir=$GNOME_LOG_DIR" GNOME_OPTS="$GNOME_OPTS -Dgnome.log.file=$GNOME_LOGFILE" GNOME_OPTS="$GNOME_OPTS -Dgnome.home.dir=$GNOME_HOME" GNOME_OPTS="$GNOME_OPTS -Dgnome.id.str=$GNOME_IDENT_STRING" GNOME_OPTS="$GNOME_OPTS -Dgnome.root.logger=${GNOME_ROOT_LOGGER:-INFO,console}" if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then GNOME_OPTS="$GNOME_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH" export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:$JAVA_LIBRARY_PATH" fi HEAP_SETTINGS="$JAVA_HEAP_MAX $JAVA_OFFHEAP_MAX" # Exec unless GNOME_NOEXEC is set. export CLASSPATH if [ "${GNOME_NOEXEC}" != "" ]; then "$JAVA" -Dproc_$COMMAND -XX:OnOutOfMemoryError="kill -9 %p" $HEAP_SETTINGS $GNOME_OPTS $CLASS "$@" else echo "exe2" exec "$JAVA" -Dproc_$COMMAND -XX:OnOutOfMemoryError="kill -9 %p" $HEAP_SETTINGS $GNOME_OPTS $CLASS "$@" fi
true
03e72507bd900a2751afa151d66255355581b810
Shell
grepp/heroku-buildpack-nodejs
/lib/build-data.sh
UTF-8
407
3.265625
3
[ "MIT" ]
permissive
# variable shared by this whole module BUILD_DATA_FILE="" bd_create() { local cache_dir="$1" BUILD_DATA_FILE="$cache_dir/build-data/node" kv_create $BUILD_DATA_FILE } bd_get() { kv_get $BUILD_DATA_FILE "$1" } bd_set() { kv_set $BUILD_DATA_FILE "$1" "$2" } log_build_data() { # print all values on one line in logfmt format # https://brandur.org/logfmt echo $(kv_list $BUILD_DATA_FILE) }
true
d774fc27f37bbe92796158764e10ba8d6c25bdcc
Shell
genepi-freiburg/ckdgen-metaanalysis
/prepare-ma-input.sh
UTF-8
4,545
3.46875
3
[]
no_license
#!/bin/bash if [[ "$#" -lt "7" ]] || [[ "$#" -gt "9" ]] then echo "Usage: $0 <INPUT_FILE_LIST> <MAF_FILTER> <INFO_FILTER> <MAC_FILTER> <EFF_SAMPLE_SIZE> <BETA_FILTER> <INDEL_REMOVAL> [<OUTDIR> [<SKIPEXIST>]]" exit 3 fi INPUT_FILE_LIST=$1 MAF_FILTER=$2 INFO_FILTER=$3 MAC_FILTER=$4 EFF_SAMPLE_SIZE=$5 BETA_FILTER=$6 INDEL_REMOVAL=$7 OUTDIR=$8 SKIP_EXIST=$9 if [ ! -f "$INPUT_FILE_LIST" ] then echo "Input file list file does not exist: $INPUT_FILE_LIST" exit 3 fi I=0 for FN in `cat $INPUT_FILE_LIST` do if [ ! -f $FN ] then echo "Input file does not exist: $FN" exit 3 fi I=$((I+1)) done if [ "$OUTDIR" == "" ] then OUTDIR="input" fi mkdir -p $OUTDIR echo "Input file list: $INPUT_FILE_LIST (has $I files)" echo "MAF filter: >= $MAF_FILTER" echo "INFO filter: >= $INFO_FILTER" echo "MAC (minor allele count) filter: >= $MAC_FILTER" echo "Effective sample size filter: >= $EFF_SAMPLE_SIZE" echo "BETA filter: > -${BETA_FILTER} && < ${BETA_FILTER}" echo "INDEL removal: $INDEL_REMOVAL" echo "OUTDIR: $OUTDIR" if [ "$INDEL_REMOVAL" != "1" ] && [ "$INDEL_REMOVAL" != "0" ] then echo "INDEL removal must be '0' (off) or '1' (on)." exit 3 fi ERRORS=`awk -v maf=$MAF_FILTER -v info=$INFO_FILTER -v mac=$MAC_FILTER -v eff=$EFF_SAMPLE_SIZE -v beta=$BETA_FILTER \ 'BEGIN { if (maf < 0 || maf > 0.5) { print("MAF filter must be in [0..0.5]!"); } if (info < 0 || info > 1) { print("INFO filter must be in [0..1]!"); } if (mac < 0) { print("MAC filter must be >= 0!"); } if (eff < 0) { print("EFF filter must be >= 0!"); } if (beta < 0) { print("BETA filter must be >= =0!"); } }'` if [ "$ERRORS" != "" ] then echo "$ERRORS" exit 3 else echo "Parameters OK" fi FIND_COL="/shared/cleaning/scripts/find-column-index.pl" for FN in `cat $INPUT_FILE_LIST` do # identify columns SNP_COL=`$FIND_COL MARKER $FN` ## alternative names for SNP col? CHR_COL=`$FIND_COL chr $FN` POS_COL=`$FIND_COL position $FN` REF_ALL_COL=`$FIND_COL noncoded_all $FN` CODED_ALL_COL=`$FIND_COL coded_all $FN` AF_COL=`$FIND_COL AF_coded_all $FN` INFO_COL=`$FIND_COL oevar_imp $FN` BETA_COL=`$FIND_COL beta $FN` SE_COL=`$FIND_COL SE $FN` PVAL_COL=`$FIND_COL pval $FN` N_COL=`$FIND_COL n_total $FN` if [ "$SNP_COL" == "-1" ] || [ "$CHR_COL" == "-1" ] || [ "$POS_COL" == "-1" ] || \ [ "$REF_ALL_COL" == "-1" ] || [ "$CODED_ALL_COL" == "-1" ] || [ "$AF_COL" == "-1" ] || \ [ "$INFO_COL" == "-1" ] || [ "$BETA_COL" == "-1" ] || [ "$SE_COL" == "-1" ] || \ [ "$PVAL_COL" == "-1" ] || [ "$N_COL" == "-1" ] then echo "COLUMN NAME PROBLEMS in file: $FN" echo "SNP_COL=$SNP_COL CHR_COL=$CHR_COL POS_COL=$POS_COL REF_ALL_COL=$REF_ALL_COL CODED_ALL_COL=$CODED_ALL_COL" echo "AF_COL=$AF_COL INFO_COL=$INFO_COL BETA_COL=$BETA_COL SE_COL=$SE_COL PVAL_COL=$PVAL_COL N_COL=$N_COL" # exit 3 echo "SKIP FILE" continue fi # filter file OUTFN=`basename $FN` echo "Process $FN ==> $OUTDIR/$OUTFN" if [ -f "$OUTDIR/$OUTFN" ] then echo "Output file exists!" if [ "$SKIP_EXIST" == "1" ] then echo "Skip file!" continue fi fi zcat $FN | awk -v mafFilter=$MAF_FILTER -v infoFilter=$INFO_FILTER -v macFilter=$MAC_FILTER -v effFilter=$EFF_SAMPLE_SIZE -v betaFilter=$BETA_FILTER \ -v indelRemove=$INDEL_REMOVAL \ -v snpCol=$SNP_COL -v chrCol=$CHR_COL -v posCol=$POS_COL -v refAllCol=$REF_ALL_COL -v codedAllCol=$CODED_ALL_COL \ -v afCol=$AF_COL -v infoCol=$INFO_COL -v nCol=$N_COL \ -v betaCol=$BETA_COL -v seCol=$SE_COL -v pvalCol=$PVAL_COL \ 'BEGIN { print "MARKER", "chr", "position", "coded_all", "noncoded_all", "AF_coded_all", "MAF", "MAC", "n_total", "n_effective", "oevar_imp", "beta", "SE", "pval"; } { if (FNR > 1) { af = $(afCol+1); n = $(nCol+1); info = $(infoCol+1) + 0; if (af > 0.5) { maf = 1-af; } else { maf = af; } mac = 2 * maf * n; n_eff = n * info; if (n == "NA") { n_eff = 999999; mac = 999999; } beta = $(betaCol+1); marker = $(snpCol+1); if (indelRemove == 1 && marker ~ /_I$/) { next; } se = $(seCol+1); pval = $(pvalCol+1); if (beta > -betaFilter && beta < betaFilter && maf >= mafFilter && info >= infoFilter && mac >= macFilter && n_eff >= effFilter && se >= 0 && pval >= 0 && se != "NA" && se != "Inf" && pval != "Inf" && pval != "NA") { print marker, $(chrCol+1), $(posCol+1), $(codedAllCol+1), $(refAllCol+1), af, maf, mac, n, n_eff, info, beta, $(seCol+1), $(pvalCol+1); } } }' | gzip > $OUTDIR/$OUTFN done
true
e6b4dd3d7a4b6759a505d0175de929cfcc9341aa
Shell
mcharleb/armnn
/scripts/get_compute_library.sh
UTF-8
2,444
3.90625
4
[ "MIT", "LicenseRef-scancode-unknown-license-reference" ]
permissive
#!/bin/bash # # Copyright © 2017 Arm Ltd. All rights reserved. # SPDX-License-Identifier: MIT # CMD=$( basename $0 ) usage() { echo "Usage: $CMD -g <GITHUB_USERNAME>" exit 1 } function AssertZeroExitCode { EXITCODE=$? if [ $EXITCODE -ne 0 ]; then echo "$1" echo "+++ Command exited with code $EXITCODE. Please fix the above errors and re-run" exit 1 fi } # process the options given while getopts "g:h" opt; do case "$opt" in g) GITHUB_USERNAME="$OPTARG";; h|\?) usage;; esac done shift $((OPTIND - 1)) # # This script is designed to be called from anywhere # so it will resolve where to checkout out the clframework # relative to its own location in armnn/scripts # SRC="${BASH_SOURCE[0]}" # resolve $SRC until it is no longer a symlink while [ -h "$SRC" ]; do DIR="$( cd -P "$( dirname "$SRC" )" >/dev/null && pwd )" SRC="$(readlink "$SRC")" # if $SRC was a relative symlink, we need to resolve it # relative to the path where the symlink file originally was [[ $SRC != /* ]] && SRC="$DIR/$SRC" done DIR="$( cd -P "$( dirname "$SRC" )" >/dev/null && pwd )" pushd ${DIR} > /dev/null cd ../.. if [ -z "$USERNAME" ]; then USERNAME=$USER fi if [ -z "$GITHUB_USERNAME" ]; then GITHUB_USERNAME=$USERNAME echo "setting GITHUB_USERNAME: ${GITHUB_USERNAME} use -g command line option to change" fi if [ ! -d clframework ]; then echo "+++ Cloning clframework" git clone https://review.mlplatform.org/ml/ComputeLibrary clframework AssertZeroExitCode "Cloning CL Framework failed" fi pushd clframework > /dev/null # Use the latest pinned version of the CL framework # For pinnning to a ref use this: # CLFRAMEWORKREVISION="branches/arm_compute_19_02" # Release 19.02 # git fetch https://review.mlplatform.org/ml/ComputeLibrary $CLFRAMEWORKREVISION && git checkout FETCH_HEAD # For pinning to a revision use this: CLFRAMEWORKREVISION="b4a44ff3aa98d2b51f1621a7525db3f81108a1bd" # COMPMID-1995: Removed layout checks from Reduction ops git fetch https://review.mlplatform.org/ml/ComputeLibrary && git checkout ${CLFRAMEWORKREVISION} AssertZeroExitCode # Set commit hook so we can submit reviews to gerrit (curl -Lo `git rev-parse --git-dir`/hooks/commit-msg https://review.mlplatform.org/tools/hooks/commit-msg; chmod +x `git rev-parse --git-dir`/hooks/commit-msg) AssertZeroExitCode popd > /dev/null # out of clframework popd > /dev/null # back to wherever we were when called exit 0
true
d747bdf0cf9df0ab91ad491821d8a49222675c67
Shell
XQuartz/xorg-server
/.gitlab-ci/cross-prereqs-build.sh
UTF-8
3,731
3.140625
3
[ "SGI-B-2.0", "ISC", "LicenseRef-scancode-mit-old-style", "LicenseRef-scancode-warranty-disclaimer", "LicenseRef-scancode-x11-adobe", "LicenseRef-scancode-xfree86-1.0", "HPND-sell-variant", "ICU", "MIT-open-group", "BSD-3-Clause", "HPND", "X11-distribute-modifications-variant", "MIT", "Lice...
permissive
#!/bin/bash set -e set -o xtrace HOST=$1 # Debian's cross-pkg-config wrappers are broken for MinGW targets, since # dpkg-architecture doesn't know about MinGW target triplets. # https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=930492 cat >/usr/local/bin/${HOST}-pkg-config <<EOF #!/bin/sh PKG_CONFIG_SYSROOT_DIR=/usr/${HOST} PKG_CONFIG_LIBDIR=/usr/${HOST}/lib/pkgconfig:/usr/share/pkgconfig pkg-config \$@ EOF chmod +x /usr/local/bin/${HOST}-pkg-config # when cross-compiling, some autoconf tests cannot be run: # --enable-malloc0returnsnull export xorg_cv_malloc0_returns_null=yes build() { url=$1 commit=$2 config=$3 name=$(basename ${url} .git) if [[ $commit =~ ^[[:xdigit:]]{1,}$ ]] then git clone ${url} ${name} git -C ${name} checkout ${commit} else git clone --depth 1 --branch ${commit:-master} --recurse-submodules -c advice.detachedHead=false ${url} ${name} fi pushd ${name} NOCONFIGURE=1 ./autogen.sh || ./.bootstrap ./configure ${config} --host=${HOST} --prefix= --with-sysroot=/usr/${HOST}/ make -j$(nproc) DESTDIR=/usr/${HOST} make install popd rm -rf ${OLDPWD} } build 'https://gitlab.freedesktop.org/pixman/pixman.git' 'pixman-0.38.4' build 'https://gitlab.freedesktop.org/xorg/lib/pthread-stubs.git' '0.4' # we can't use the xorgproto pkgconfig files from /usr/share/pkgconfig, because # these would add -I/usr/include to CFLAGS, which breaks cross-compilation build 'https://gitlab.freedesktop.org/xorg/proto/xorgproto.git' 'xorgproto-2021.4.99.2' '--datadir=/lib' build 'https://gitlab.freedesktop.org/xorg/lib/libXau.git' 'libXau-1.0.9' build 'https://gitlab.freedesktop.org/xorg/proto/xcbproto.git' 'xcb-proto-1.14.1' build 'https://gitlab.freedesktop.org/xorg/lib/libxcb.git' 'libxcb-1.14' build 'https://gitlab.freedesktop.org/xorg/lib/libxtrans.git' 'xtrans-1.4.0' # the default value of keysymdefdir is taken from the includedir variable for # xproto, which isn't adjusted by pkg-config for the sysroot # Using -fcommon to address build failure when cross-compiling for windows. # See discussion at https://gitlab.freedesktop.org/xorg/xserver/-/merge_requests/913 CFLAGS="-fcommon" build 'https://gitlab.freedesktop.org/xorg/lib/libX11.git' 'libX11-1.6.9' "--with-keysymdefdir=/usr/${HOST}/include/X11" build 'https://gitlab.freedesktop.org/xorg/lib/libxkbfile.git' 'libxkbfile-1.1.0' # freetype needs an explicit --build to know it's cross-compiling # disable png as freetype tries to use libpng-config, even when cross-compiling build 'git://git.savannah.gnu.org/freetype/freetype2.git' 'VER-2-10-1' "--build=$(cc -dumpmachine) --with-png=no" build 'https://gitlab.freedesktop.org/xorg//font/util.git' 'font-util-1.3.2' build 'https://gitlab.freedesktop.org/xorg/lib/libfontenc.git' 'libfontenc-1.1.4' build 'https://gitlab.freedesktop.org/xorg/lib/libXfont.git' 'libXfont2-2.0.3' build 'https://gitlab.freedesktop.org/xorg/lib/libXdmcp.git' 'libXdmcp-1.1.3' build 'https://gitlab.freedesktop.org/xorg/lib/libXfixes.git' 'libXfixes-5.0.3' build 'https://gitlab.freedesktop.org/xorg/lib/libxcb-util.git' '0.4.0' build 'https://gitlab.freedesktop.org/xorg/lib/libxcb-image.git' '0.4.0' build 'https://gitlab.freedesktop.org/xorg/lib/libxcb-wm.git' '0.4.1' # workaround xcb_windefs.h leaking all Windows API types into X server build # (some of which clash which types defined by Xmd.h) XXX: This is a bit of a # hack, as it makes this header depend on xorgproto. Maybe an upstreamable # fix would involve a macro defined in the X server (XFree86Server? # XCB_NO_WINAPI?), which makes xcb_windefs.h wrap things like XWinsock.h # does??? sed -i s#winsock2#X11/Xwinsock# /usr/${HOST}/include/xcb/xcb_windefs.h
true
c98a178f1aee7417d179f6915a1c35cbf33fb692
Shell
dvignoles/asrc_geoserver
/backup.sh
UTF-8
275
2.609375
3
[]
no_license
#!/bin/bash # https://docs.docker.com/storage/volumes/#restore-container-from-backup CONTAINER="${1}" TARNAME="${2}" docker run --rm --volumes-from ${CONTAINER} -v /asrc/ecr/danielv/geoserver_volumes/backups:/backup ubuntu tar cvf "/backup/${TARNAME}" /opt/geoserver/data_dir
true
3edcb2ed5e5b3279b349003f77fccba4b8f208d0
Shell
gurjeet/gurjeet.singh.im
/edb_file_list.sh
UTF-8
511
3.40625
3
[]
no_license
#!/bin/bash echo > edb_file_list.html ( for (( i = 0 ; i <= 4000; ++i )); do # echo processing $i curl -I http://sbp.enterprisedb.com/getfile.jsp?fileid=$i > /tmp/edb.getfile if ! grep '404 Not Found' /tmp/edb.getfile; then link=$(grep 'Location: ' /tmp/edb.getfile | cut -d ' ' -f 2-) # Convert download.enterprisedb.com to get.enterprisedb.com link=${link/download/get} echo '<a href="'$link'">'$link'</a><br>' >> edb_file_list.html fi done ) >/dev/null 2>&1
true
4cada16f22c6259e072e28c397c5c2ed904bb605
Shell
inventhouse/BenBin
/cleanall
UTF-8
879
4.15625
4
[ "MIT" ]
permissive
#!/bin/bash # Simple hack to clean all Makefile projects in a directory # Copyright (c) 2018 Benjamin Holt -- MIT License if [ "$1" == "-h" -o "$1" == "--help" ]; then cat <<USAGE usage: cleanall [DIR] Cleans all projects with a Makefile immediately in DIR or the current directory cleanall -h|--help Print this message and exit USAGE exit 0 fi pushd "${1:-.}" >> /dev/null for ProjMake in */Makefile; do Proj="${ProjMake%/Makefile}" if [ "$Proj" == "*" ]; then echo "cleanall: no Makefile projects found" >&2 popd >> /dev/null exit 1 fi echo "-- $Proj --" if grep -E "^clean( ?):" "$ProjMake" >> /dev/null; then pushd "$Proj" >> /dev/null make clean echo popd >> /dev/null else echo "[ No 'clean:' target found ]" echo fi done popd >> /dev/null ###
true
f819e221c36785e15dd1a277265297ab0a9a48e6
Shell
mrquincle/merijn-microapps
/9.pio_own_platform_and_tools/update_repos.sh
UTF-8
1,606
3.84375
4
[]
no_license
#!/bin/bash # Script to create and update repo's for platformio packages/platforms working_dir=$(pwd) can_clean_pio_dirs=false arg1=$1 setup_repo(){ echo ===================================================================== echo - SETTING UP: $2 echo ===================================================================== mkdir ~/.piorepos/$2/ cd ~/.piorepos/$2/ git init git remote add origin $3 git pull origin master } update_repo(){ [ "$arg1" = "-d" ] && clear_pio_dir $2 && return [ "$arg1" = "-n" ] && clear_pio_dir $2 if [ ! -d ~/.piorepos/$2/ ]; then setup_repo $1 $2 $3 fi # Remove all old files except for the .git folder cd ~/.piorepos/$2/ rm -r $(ls ~/.piorepos/$2 -I .git) ## Copy new files to repo and upload them cp -rf $working_dir/$1/$2 ~/.piorepos/ echo ===================================================================== echo - UPDATING: $2 echo ===================================================================== cd ~/.piorepos/$2/ git add -A git commit -m "..." git push --set-upstream origin master } clear_pio_dir(){ echo removing: $1 rm -rf ~/.platformio/packages/$1/ rm -rf ~/.platformio/platforms/"${1:9}" } mkdir -p ~/.piorepos update_repo\ "ble_uploader"\ "tool-cs-ble-uploader"\ "git@gitlab.com:mPlagge/tool-cs-ble-uploader.git" update_repo\ "framework"\ "framework-arduinomicroapps"\ "git@gitlab.com:mPlagge/framework-arduinomicroapps.git" update_repo\ "header_tool"\ "tool-cs-header-maker"\ "git@gitlab.com:mPlagge/tool-cs-header-maker.git" update_repo\ "platform"\ "platform-csmicroapps"\ "git@gitlab.com:mPlagge/platform-csmicroapps.git"
true
b55e3d24ae9bccb8b4356d141f843f63a538a755
Shell
amrragab8080/cryoem-awsbatch
/cryo_wrapper.sh
UTF-8
994
3.375
3
[ "MIT" ]
permissive
#!/bin/bash -xe ################################### env ################################### if [ -x "$(command -v nvidia-smi)" ] ; then nvidia-smi else : fi ################################### echo "DOWNLOADING CRYOEM INPUT FILES..." export SCRATCHDIR=$JOBDIR/$AWS_BATCH_JOB_ID/scratch mkdir -p $SCRATCHDIR aws s3 cp $S3_INPUT $JOBDIR/$AWS_BATCH_JOB_ID tar -xvf $JOBDIR/$AWS_BATCH_JOB_ID/*.tar.gz -C $JOBDIR/$AWS_BATCH_JOB_ID --strip 1 echo "STARTING UP MAIN CRYOEM WORKFLOW..." cd $JOBDIR/$AWS_BATCH_JOB_ID if [[ -z "${AWS_BATCH_JOB_ARRAY_INDEX}" ]]; then : else LINE=$((AWS_BATCH_JOB_ARRAY_INDEX + 1)) CRYO_SYSTEM=$(sed -n ${LINE}p $JOBDIR/$AWS_BATCH_JOB_ID/cryoem.txt) export CRYO_SYSTEM fi $@ --o $SCRATCHDIR echo "JOB FINISHED, COMPRESSING OUTPUT..." tar czvf $JOBDIR/batch_output_$AWS_BATCH_JOB_ID.tar.gz $SCRATCHDIR aws s3 cp $JOBDIR/batch_output_$AWS_BATCH_JOB_ID.tar.gz $S3_OUTPUT echo "CLEANUP..." rm -rf $JOBDIR/$AWS_BATCH_JOB_ID echo "BATCH JOB DONE"
true
3d24f02419abedbe4b5a95fe710214170e25216d
Shell
pawamoy/tetris-ia
/tetris/go_ia
UTF-8
285
3.234375
3
[]
no_license
#!/bin/bash usage() { echo "usage: ${0##*/} tetris | demo LIST [PIECES]" exit 1 } [ -z "$1" ] && usage if [ "$1" = "demo" ]; then [ -z "$2" ] && usage [ ! -f "$2" ] && usage [[ -n "$3" && ! -f "$3" ]] && usage fi sleep 0.5 rmiregistry & java IA $* pkill rmiregistry pkill java
true
2036bdb5a392e86b01e41921824d0fa7236ce224
Shell
jukuan/vagralamp
/vagrant_bootstrap.sh
UTF-8
1,085
2.65625
3
[]
no_license
#!/usr/bin/env bash echo "_ENV_SETUP_" curl https://www.dotdeb.org/dotdeb.gpg | sudo apt-key add - curl -sL https://deb.nodesource.com/setup_8.x | sudo -E bash - #echo 'deb http://packages.dotdeb.org jessie all' >> /etc/apt/sources.list #echo 'deb-src http://packages.dotdeb.org jessie all' >> /etc/apt/sources.list apt-get update && apt-get upgrade -y apt-get install -y build-essential nodejs apt-get install -y curl make vim mc git openssl apache2 mysql-server php7.0 apt-get install -y php7.0-zip php7.0-mbstring php7.0-readline php7.0-cli php7.0-common php7.0-gd php7.0-json php7.0-mcrypt php7.0-mysql php7.0-readline a2enmod rewrite 2> /dev/null npm install gulp -g echo "_VGR_SETUP_" # Install composer cd /tmp php -r "copy('https://getcomposer.org/installer', '/tmp/composer-setup.php');" sudo php /tmp/composer-setup.php --install-dir=/usr/local/bin --filename=composer rm /tmp/composer-setup.php # Cp a2 configuration rm /etc/apache2/sites-available/*.loc.conf 2> /dev/null cp /vagrant/_conf/*.loc.conf /etc/apache2/sites-available/ a2ensite * service apache2 restart
true
6cec85f7a81f557bb41a3451637870db648dadc5
Shell
kellmant/admin-shell
/enterportal.sh
UTF-8
487
2.859375
3
[ "MIT" ]
permissive
#!/bin/bash #set -eo pipefail set -a PATH=~/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin export PATH if [ $# -eq 1 ];then /usr/local/bin/ttyd -r 4 -p 3000 -S -C /efs/ca/local/local.cert.pem -K /efs/ca/local/local.key.pem exec tmux new -A -s $1 "bash -l" || { exec tmux attach -t $1 ; } else /usr/local/bin/ttyd -r 4 -p 3000 -S -C /efs/ca/local/local.cert.pem -K /efs/ca/local/local.key.pem exec tmux new -A -s SecLab "bash -l" || { exec tmux attach -t SecLab ; } fi
true
472c0e75edd8397e3c8cf09d619ea43e3b68f6f0
Shell
jwmasekre/self-site
/publish.sh
UTF-8
361
2.53125
3
[]
no_license
#!/bin/sh #error protection set -e printf "\033[0;32mdeploying changes to master\033[0m\n" git pull git add --all git commit -m "$1" git push printf "\033[0;32mbuilding site\033[0m\n" ./hugo.exe printf "\033[0;32mdeploying site to gh-pages\033[0m\n" cd public git add --all git commit -m "$1" git push printf "\033[0;32mdone\033[0m\n"
true
53b13538a4a1d5cb43cddffa3d3f9859a932e180
Shell
MrBitBucket/rl-ci-tools-mirror
/manylinux/build-wheels
UTF-8
989
3.359375
3
[]
no_license
#!/bin/bash set -ev cd "$(dirname "${BASH_SOURCE[0]}")" UNICODES=${UNICODES:-'16 32'} PYTHONS=${PYTHONS:-'2.7 3.3 3.4 3.5 3.6'} ARCHS=${ARCHS:-'x86_64 i686'} IMAGESRC=${IMAGESRC:-quay.io/pypa} REPO=${REPO:-https://bitbucket.org/rptlab/reportlab} BRANCH=${BRANCH:-default} REQUIREMENT=${REQUIREMENT:-$(basename ${REPO})} DOCKER_SCRIPT=${DOCKER_SCRIPT:-container-build-wheels} DOCKER_PATH=${DOCKER_PATH:-/io/${DOCKER_SCRIPT}} sudo rm -rf wheels wheelsu .pip-cache mkdir wheels wheelsu for arch in ${ARCHS}; do sudo rm -rf wheelhouse wheels_unfixed mkdir wheelhouse wheels_unfixed DOCKER_IMAGE=${IMAGESRC}/manylinux1_${arch} docker pull $DOCKER_IMAGE docker run --rm \ ${DOCKER_ARGS} \ -e PYTHONS="$PYTHONS" \ -e UNICODES="$UNICODES" \ -e REPO="$REPO" \ -e BRANCH="$BRANCH" \ -e REQUIREMENT="$REQUIREMENT" \ -e ARCH="$arch" \ -v $(pwd):/io ${DOCKER_IMAGE} ${DOCKER_PATH} cp wheelhouse/* wheels/ cp wheels_unfixed/* wheelsu/ done sudo rm -rf wheels_unfixed wheelhouse
true
f4b750b7d5f0108a70025142683135c13ce1a5d2
Shell
yan-kuan/sel4-tutorials
/run-sabre.sh
UTF-8
378
2.625
3
[ "BSD-2-Clause" ]
permissive
#! /bin/bash set -e APP=$1 make sabre_${APP}_defconfig make silentoldconfig make if [[ ${APP} =~ .*camkes.* ]]; then qemu-system-arm -M sabrelite -nographic -kernel images/capdl-loader-experimental-image-arm-imx6 else qemu-system-arm -M sabrelite -nographic -machine sabrelite -m size=1024M -serial null -serial mon:stdio -kernel images/${APP}-image-arm-imx6 fi
true
818979f6614ded8e742fdddfc30cc62c45faf1e2
Shell
amamama/dotfiles
/.bash_funcs
UTF-8
1,038
3.34375
3
[]
no_license
# vim: set filetype=sh : rlgs() { export RLWRAP_EDITOR='vim -c "set filetype=ps"' rlwrap -c -q '"' -b "'"'(){}[].,#@;|`"' -m gs "$@" } rlgosh() { export RLWRAP_EDITOR='vim -c "set filetype=scheme"' rlwrap -c -q '"' -b "'"'(){}[].,#@;|`"' -m gosh "$@" } rlguile() { export RLWRAP_EDITOR='vim -c "set filetype=scheme"' rlwrap -c -q '"' -b "'"'(){}[].,#@;|`"' -m guile "$@" } gitclone() { git clone $GITURL/$1/$2.git } tmpvim() { tmpname=$(mktemp --tmpdir=./ --suffix=".${1}" ${1}_XXXXX) vim $tmpname } genmf() { SRC=$1 TARGET=$2 if [ -z ${SRC} ]; then SRC=main.c fi if [ -z ${TARGET} ]; then TARGET=a.out fi echo SRC=${SRC},TARGET=${TARGET} cat << 'EOF' | sed s/main\.c/${SRC}/g | sed s/a\.out/${TARGET}/g > Makefile CC=gcc override CFLAGS:= -std=c11 -O2 -Wall -g $(CFLAGS) override LDFLAGS:= $(LDFLAGS) SRC = main.c TARGET = a.out all: $(TARGET) $(TARGET): $(SRC) $(CC) $(CFLAGS) $(LDFLAGS) $(SRC) -o $(TARGET) run : all ./$(TARGET) EOF } if [ -f ~/.bash_funcs_local ]; then . ~/.bash_funcs_local fi
true
ea3ff903a0fb9309c4be28ebce33bc0142d9ff24
Shell
rsenn/scripts
/sh/colorgcc.sh
UTF-8
761
4.0625
4
[]
no_license
#!/bin/sh MYNAME=`basename "$0" .sh` MYDIR=`dirname "$0"` COMPILER=${MYDIR}/${MYNAME#color*-} COLOR_RED="" COLOR_GREEN="" COLOR_YELLOW="" COLOR_BLUE="" COLOR_MAGENTA="" COLOR_NONE="" colorize() { eval "shift; echo \"\${COLOR_$1}\$*\${COLOR_NONE}\"" } #set -x exec "$COMPILER" "$@" 2>&1 | while read -r MESSAGE; do FILE=${MESSAGE%%:*} LINE=${MESSAGE#$FILE:} LINE=${LINE%%:*} case "$MESSAGE" in *:*:" warning: "*) WARNING=${MESSAGE#*:*": warning: "} echo "`colorize YELLOW $FILE`:`colorize GREEN $LINE`: warning: $WARNING" ;; *:*:" error: "*) ERROR=${MESSAGE#*:*": error: "} echo "`colorize YELLOW $FILE`:`colorize GREEN $LINE`: error: $ERROR" ;; *) echo "$MESSAGE" ;; esac done
true