blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 4 115 | path stringlengths 2 970 | src_encoding stringclasses 28 values | length_bytes int64 31 5.38M | score float64 2.52 5.28 | int_score int64 3 5 | detected_licenses listlengths 0 161 | license_type stringclasses 2 values | text stringlengths 31 5.39M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
4ba62b44d9910b1c2966d0a8cd61223cb282b641 | Shell | SutCEGoS/website | /deployscript.sh | UTF-8 | 621 | 3.1875 | 3 | [] | no_license | #!/bin/sh
log_file_name="deploy_$(date +%Y-%m-%d.%H:%M:%S).log"
log_directory="/home/shora/logs/deploy/"
log_file="$log_directory$log_file_name"
mkdir -p $log_directory
touch $log_file
# exec &> >(tee -a "$log_file")
echo Deploy started
echo User: $USER
echo Activating virtual environment
. ~/venv/bin/activate
echo Installing new python dependencies
pip install -U -r requirements.txt
echo Collecting static files
python manage.py collectstatic --noinput -c -l
echo Migrating database
python manage.py migrate
echo Reload uwsgi
pkill uwsgi
sudo nginx reload
echo Deployment finished. Check logs in $log_file
| true |
8918ea8110ff54c541d694e8b5557f7bf12fa199 | Shell | giladm/stash | /scripts/push.sh | UTF-8 | 1,496 | 3.375 | 3 | [] | no_license | # send directly to APS using token certificate and topic
# to get .pem certificate use convert-cert.sh
# send to prod or sand (i.e: send to production or sandbox server)
#
server=$1
cert=$2
topic=$3
token=$4
if [ $# -ne 4 ]
then
echo
echo usage $0 prod/sand pem_certificate_file topic token
echo
echo server[$1] cert[$2] topic[$3] token[$4]
echo Example $0 sand "/Users/gilad/Documents/keys/worklight/apns-dev-cert.pem" "gm.worklight.multi" bf8e37ad287de32f3910fafd0dd9f8e69a11827dda430c14d05892d2f337ca0f
echo or $0 sand gilad-testsp-dev.pem gilad.testsp 1d1e782590e1464a749d7d7bb0d7ed37d4e0135cb309f468ea0c8ca2c06c6dfd
echo
exit
fi
if [ "$server" == "sand" ]
then
echo Sending to sandbox
sleep 2
curl -v \
-d '{"aps":{"alert":"test apns sandbox from http2"}}' \
-H "apns-topic: $topic" \
-H "apns-expiration: 1" \
-H "apns-priority: 10" \
--http2 \
--cert $cert:"" \
https://api.development.push.apple.com/3/device/$token
exit 0
fi
if [ "$server" == "prod" ]
then
echo Sending to $token in Production
sleep 2
# -d '{"aps":{"alert":"curl test3 to apns","sound":"default","badge":3}}' \
curl -v \
-d '{"aps":{"alert":"test apns from http2"}}' \
-H "apns-topic: $topic" \
-H "apns-expiration: 1" \
-H "apns-priority: 10" \
--http2 \
--cert $cert:"" \
https://api.push.apple.com/3/device/$token
else
echo Sandbox or Production ?
fi
| true |
c742d2ed583479b6a7c9ddeb4f50fa09f96a94ce | Shell | yevheniir/kubernetes-bgp | /scripts/setup-base.sh | UTF-8 | 597 | 2.796875 | 3 | [] | no_license | #!/bin/bash
echo "[------ Begin install-sh -----]"
# Base kubernetes requirements
echo "Enable Forwarding"
cat <<EOF > /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
EOF
sysctl --system
echo "Disable swap"
swapoff -a
sed -i.bak '/swap/s/^/#/g' /etc/fstab
# Download the matching version
curl -L \
https://github.com/stedolan/jq/releases/download/jq-1.5/jq-linux64 \
-o /usr/local/bin/jq
# Make it executable
chmod +x /usr/local/bin/jq
# Verify
cat /proc/swaps
echo "[----- install-bash.sh Complete -----]"
| true |
3a87c49dd5a5dddedfa32c3bc9aa5e2bf986a094 | Shell | ScottMcCammon/dotfiles | /.bashrc | UTF-8 | 1,663 | 3.359375 | 3 | [] | no_license | source /etc/bashrc
source $HOME/.alias
# hostname completion for SSH from known_hosts file
complete -W "$(cat ~/.ssh/known_hosts | cut -f 1 -d ' ' | sed -e 's/,.*//g' | sort | uniq)" ssh
function sam_pushd {
pushd "${@}" >/dev/null;
dirs -v;
}
function sam_popd {
popd "${@}" >/dev/null;
dirs -v;
}
function parse_git_branch() {
git branch 2> /dev/null | sed -e '/^[^*]/d' -e 's/* \(.*\)/(git:\1)/'
}
function better_git_diff() {
git diff "${@}" | colordiff.py | less -iFXRS -x4
}
function better_git_show() {
git show "${@}" | colordiff.py | less -iFXRS -x4
}
function better_ag() {
/usr/local/bin/ag -H --color "${@}" | less -iFXRS -x4
}
function diffu() {
diff -u "${@}" | colordiff.py | less -iFXRS -x4
}
WHITE="\[\033[0;37m\]"
BLACK="\[\033[0;30m\]"
RED="\[\033[0;31m\]"
RED_BOLD="\[\033[1;31m\]"
BLUE="\[\033[0;34m\]"
BLUE_BOLD="\[\033[1;34m\]"
GREEN="\[\033[0;32m\]"
CYAN="\[\033[0;36m\]"
NORM="\[\033[0m\]"
PS1="$GREEN\u$NORM@$RED\h$NORM:$BLUE\W $CYAN\$(parse_git_branch)$NORM\$ "
PROMPT_COMMAND='echo -ne "\033]0;${USER}@${HOSTNAME%%.*}: ${PWD/#$HOME/~}\007"'
#export PIP_REQUIRE_VIRTUALENV=true
#export WORKON_HOME=~/virtualenvs
#source /usr/local/bin/virtualenvwrapper.sh
# Add RVM to PATH for scripting. Make sure this is the last PATH variable change.
export PATH="$PATH:$HOME/.rvm/bin"
# Load RVM into a shell session *as a function*
[[ -s "$HOME/.rvm/scripts/rvm" ]] && source "$HOME/.rvm/scripts/rvm"
export NVM_DIR="$HOME/.nvm"
[ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh" # This loads nvm
[ -s "$NVM_DIR/bash_completion" ] && \. "$NVM_DIR/bash_completion" # This loads nvm bash_completion
| true |
a933dc0e1c2aa04f496465ac3455f22fc9cfb142 | Shell | thomas-vl/airbyte | /airbyte-integrations/connectors/destination-s3/finalize_build.sh | UTF-8 | 217 | 2.75 | 3 | [
"MIT",
"LicenseRef-scancode-free-unknown",
"Elastic-2.0"
] | permissive | #!/bin/bash
set -e
echo "Running destination-s3 docker custom steps..."
ARCH=$(uname -m)
if [ "$ARCH" == "x86_64" ] || [ "$ARCH" = "amd64" ]; then
echo "$ARCH"
yum install lzop lzo lzo-dev -y
fi
yum clean all
| true |
5b2e43f15c9255fb494198d1415d5fd2d6bc2e16 | Shell | botic/ringodev-docker | /docker/build-images.sh | UTF-8 | 401 | 2.984375 | 3 | [
"Apache-2.0"
] | permissive | # build buildservice
docker build -t ringojs/buildservice buildservice/
if [ $? -eq 0 ]; then
echo "buildservice image SUCCESS"
else
echo "buildservice image ERROR"
exit 1
fi;
# build package test image
docker build -t ringojs/buildtests buildtests/
if [ $? -eq 0 ]; then
echo "buildtests image SUCCESS"
else
echo "buildtests image ERROR"
exit 2
fi;
echo "Built all images."
exit 0 | true |
d47205341e3fb9346635d1bf7c539eff8b2dd420 | Shell | kiteam/kiteam | /src/shell/backup.sh | UTF-8 | 282 | 3.015625 | 3 | [
"MIT"
] | permissive | #!/bin/sh
# Database info
DB_NAME="bhf"
DB_USER="root"
DB_PASS="123456"
BIN_DIR="/usr/bin"
BCK_DIR="/var/backup/bhf-api/mysql"
DATE=`date +%F`
$BIN_DIR/mysqldump --opt -u$DB_USER -p$DB_PASS $DB_NAME | gzip >$BCK_DIR/db_$DATE.gz
echo "Backup sucessful -> $BCK_DIR/db_$DATE.gz" | true |
2363133ff0ac9ea57db94a860539025de9d01527 | Shell | marccampbell/replicated-action | /release/entrypoint.sh | UTF-8 | 328 | 3.28125 | 3 | [] | no_license | #!/bin/bash
# Create a new release from replicated.yaml and promote the Unstable channel to use it.
# Aborts if version tag is empty.
set -e
VERSION="${GITHUB_SHA:0:7}"
RESULT=$(replicated release create --yaml "$(< ${REPLICATED_YAML:-./replicated.yaml})" --promote "${REPLICATED_CHANNEL:-Unstable}" --version "${VERSION}")
| true |
e40e44d5b989db5c093e91e22365331779d584ff | Shell | ianlee/standalone-fw | /setupint.sh | UTF-8 | 923 | 3.234375 | 3 | [] | no_license | ##################################################################################
# Basic Linux Firewall - Setting up network.
#
# setupfw.sh
#
# Author: Ian Lee Luke Tao
# Date: February 20, 2014
#
# setting up network on internal machine
#
# Note: disable networking applet for p3p1 and em1 as it will overwrite
# any settings made with this script
#
##################################################################################
EXTERNAL_INTERFACE="em1"
INTERNAL_GATEWAY_BINDING="1"
INTERNAL_INTERFACE="p3p1"
INTERNAL_SUBNET="192.168.10"
INTERNAL_BINDING="2"
DOMAIN="ad.bcit.ca"
DNS_IP1="142.232.191.39"
DNS_IP2="142.232.191.38"
ifconfig $EXTERNAL_INTERFACE down
ifconfig $INTERNAL_INTERFACE $INTERNAL_SUBNET.$INTERNAL_BINDING up
route add default gw $INTERNAL_SUBNET.$INTERNAL_GATEWAY_BINDING
echo -e "domain $DOMAIN\nsearch $DOMAIN\nnameserver $DNS_IP1\nnameserver $DNS_IP2\n" >/etc/resolv.conf
| true |
19decbc17bbaa0bd997b0b649087f1479c12f739 | Shell | jdmaloney/zfs_utils | /zfs_default_quota_set.sh | UTF-8 | 1,577 | 3.203125 | 3 | [] | no_license | #!/bin/bash
pushd $(dirname "$0") > /dev/null
source ./zfs_utils.conf
while IFS= read -r line; do
IFS=" " read -r username block_quota obj_quota <<< "${line}"
if [ "${block_quota}" != "${def_ublock_quota}" ] || [ "${obj_quota}" != "${def_uobj_quota}" ]; then
## Quota not set right
if ! printf '%s\0' "${users_exempt[@]}" | grep -Fxqz ${username}; then
## User is not exempt from quota enforcement
/usr/sbin/zfs set userquota@${username}=${def_ublock_quota} ${dataset_path}
/usr/sbin/zfs set userobjquota@${username}=${def_uobj_quota} ${dataset_path}
echo "Set default quotas for user ${username} at $(date +%Y-%m-%d_%H-%M-%S)" >> "${zfs_log}"
fi
fi
done < <(/usr/sbin/zfs userspace -H ${dataset_path} | awk '{print $3" "$5" "$7}')
while IFS= read -r line; do
IFS=" " read -r groupname block_quota obj_quota <<< "${line}"
if [ "${block_quota}" != "${def_gblock_quota}" ] || [ "${obj_quota}" != "${def_gobj_quota}" ]; then
## Quota not set right
if ! printf '%s\0' "${groups_exempt[@]}" | grep -Fxqz ${groupname}; then
## Group is not exempt from quota enforcement
/usr/sbin/zfs set groupquota@${groupname}=${def_gblock_quota} ${dataset_path}
/usr/sbin/zfs set groupobjquota@${groupname}=${def_gobj_quota} ${dataset_path}
echo "Set default quotas for group ${groupname} at $(date +%Y-%m-%d_%H-%M-%S)" >> "${zfs_log}"
fi
fi
done < <(/usr/sbin/zfs groupspace -H ${dataset_path} | awk '{print $3" "$5" "$7}')
| true |
b68f23b22b657e123db42c342b3de396389213ab | Shell | dmoll1974/docker-jmxtrans | /run-jmxtrans.sh | UTF-8 | 2,854 | 2.828125 | 3 | [] | no_license | #!/bin/bash
echo "starting..."
LOG_DIR=${LOG_DIR:-"/var/log/jmxtrans"}
LOG_FILE=${LOG_FILE:-"/var/log/jmxtrans/jmxtrans.log"}
SECONDS_BETWEEN_RUNS=${SECONDS_BETWEEN_RUNS:-"10"}
JSON_DIR=${JSON_DIR:-"/var/lib/jmxtrans"}
JAR_FILE=${JAR_FILE:-"/usr/share/jmxtrans/lib/jmxtrans-all.jar"}
JAVAJDK_BIN=${JAVAJDK_BIN:-"/usr/bin/java"}
JAVA_OPTS=${JAVA_OPTS:-"-Djava.awt.headless=true -Djava.net.preferIPv4Stack=true"}
HEAP_SIZE=${HEAP_SIZE:-"512"}
PERM_SIZE=${PERM_SIZE:-"384"}
MAX_PERM_SIZE=${MAX_PERM_SIZE:-"384"}
JMX_PORT=${JMX_PORT:-"2101"}
LOG_LEVEL=${LOG_LEVEL:-"debug"}
CONTINUE_ON_ERROR=${CONTINUE_ON_ERROR:-"false"}
JMXTRANS_OPTS=${JMXTRANS_OPTS:-"-Djmxtrans.log.level=${LOG_LEVEL} -Djmxtrans.log.dir=$LOG_DIR"}
CLASSPATH=/usr/share/jmxtrans/jboss/jboss-client.jar
#/usr/share/jmxtrans/jboss/jboss-as-cli-7.1.1.Final.jar:\
#/usr/share/jmxtrans/jboss/jboss-as-controller-7.1.1.Final.jar:\
#/usr/share/jmxtrans/jboss/jboss-as-controller-client-7.1.1.Final.jar:\
#/usr/share/jmxtrans/jboss/jboss-as-protocol-7.1.1.Final.jar:\
#/usr/share/jmxtrans/jboss/jboss-dmr-1.1.1.Final.jar:\
#/usr/share/jmxtrans/jboss/jboss-logging-3.1.0.GA.jar:\
#/usr/share/jmxtrans/jboss/jboss-marshalling-1.3.11.GA.jar:\
#/usr/share/jmxtrans/jboss/jboss-marshalling-river-1.3.11.GA.jar:\
#/usr/share/jmxtrans/jboss/jboss-remoting-3.2.3.GA.jar:\
#/usr/share/jmxtrans/jboss/jboss-sasl-1.0.0.Final.jar:\
#/usr/share/jmxtrans/jboss/jboss-threads-2.0.0.GA.jar:\
#/usr/share/jmxtrans/jboss/remoting-jmx-1.0.2.Final.jar:\
#/usr/share/jmxtrans/jboss/staxmapper-1.1.0.Final.jar:\
#/usr/share/jmxtrans/jboss/xnio-api-3.0.3.GA.jar:\
#/usr/share/jmxtrans/jboss/xnio-nio-3.0.3.GA.jar
MONITOR_OPTS=${MONITOR_OPTS:-"-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.port=${JMX_PORT} -Dcom.sun.management.jmxremote.rmi.port=${JMX_PORT} "}
GC_OPTS=${GC_OPTS:-"-Xms${HEAP_SIZE}m -Xmx${HEAP_SIZE}m -XX:PermSize=${PERM_SIZE}m -XX:MaxPermSize=${MAX_PERM_SIZE}m"}
if [ "${ADDITIONAL_JARS}" == "" ]; then
ADDITIONAL_JARS_OPTS=""
else
ADDITIONAL_JARS_OPTS="-a ${ADDITIONAL_JARS}"
fi
echo "jar file location = ${JAR_FILE}"
if [ ! -f $JAR_FILE ]; then
echo "ENV SPECIFIED JAR_FILE File not found - $JAR_FILE"
exit 1
fi
if [ -z "$FILENAME" ]; then
EXEC=${EXEC:-"-jar $JAR_FILE -cp $CLASSPATH -e -j $JSON_DIR -s $SECONDS_BETWEEN_RUNS -c $CONTINUE_ON_ERROR $ADDITIONAL_JARS_OPTS"}
else
EXEC=${EXEC:-"-jar $JAR_FILE -cp $CLASSPATH -e -f $FILENAME -s $SECONDS_BETWEEN_RUNS -c $CONTINUE_ON_ERROR $ADDITIONAL_JARS_OPTS"}
fi
echo $CLASSPATH
echo "exec details : ${EXEC}"
echo "INIT.." >>$LOG_FILE
echo $JAVAJDK_BIN -server $JAVA_OPTS $JMXTRANS_OPTS $GC_OPTS $MONITOR_OPTS $EXEC
exec $JAVAJDK_BIN -server $JAVA_OPTS $JMXTRANS_OPTS $GC_OPTS $MONITOR_OPTS $EXEC >>$LOG_FILE 2>&1
| true |
2d7a7a1c43f6e3eaf6f38ce17fd2f7a82ceedf9a | Shell | zacharypiell/website | /bin/launch | UTF-8 | 310 | 2.890625 | 3 | [] | no_license | #!/bin/bash
#
# Personal Website
#
# Clean, build and start server
#
# Zachary Piell
# Stop on errors, print commands
# See https://vaneyckt.io/posts/safer_bash_scripts_with_set_euxo_pipefail/
set -Eeuo pipefail
set -x
# make sure in right directory
cd ~/Desktop/grid-website
# Serve
python3 -m http.server 8000
| true |
4ec01c924866333e7e1265522680311a0d610eb9 | Shell | PetraGuy/CMEECourseWork | /Week1/Code/CountLines.sh | UTF-8 | 425 | 3.5625 | 4 | [] | no_license | #!/bin/bash
#Author pg5117@ic.ac.uk
# Script: CountLines.sh
# Desc: Count and output number of lines in a file input by user
#Arguments: Requires any file to count, enter any file name in Data dir
#October 2017
#
echo 'enter a filename to count the lines of'
echo 'eg, spawannxs.txt exists in Data'
echo 'or any of these '
cd ../Data
ls
read filename
NumLines=$(wc -l <$filename)
echo "The file" $filename 'has' $NumLines 'lines'
| true |
08c0b957697f09f68d6e8808f026eb93072c89ab | Shell | acorg/lasse-capture-20180918-diamond-refseq | /03-diamond/diamond.sh | UTF-8 | 1,645 | 3.671875 | 4 | [] | no_license | #!/bin/bash -e
. ../common.sh
task=$1
log=$logDir/$task.log
fastq=../02-map/$task-unmapped.fastq.gz
out=$task.json.bz2
dbfile=$root/share/ncbi/diamond-dbs/viral-protein-20190729.dmnd
logStepStart $log
logTaskToSlurmOutput $task $log
checkFastq $fastq $log
if [ ! -f $dbfile ]
then
echo " DIAMOND database file $dbfile does not exist!" >> $log
exit 1
fi
function skip()
{
# Make it look like we ran and produced no output.
echo " Creating no-results output file due to skipping." >> $log
bzip2 < header.json > $out
}
function run_diamond()
{
echo " DIAMOND blastx started at $(date)" >> $log
diamond blastx \
--threads $(($(nproc --all) - 2)) \
--query $fastq \
--db $dbfile \
--outfmt 6 qtitle stitle bitscore evalue qframe qseq qstart qend sseq sstart send slen btop |
convert-diamond-to-json.py | bzip2 > $out
echo " DIAMOND blastx stopped at $(date)" >> $log
}
if [ $SP_SIMULATE = "0" ]
then
echo " This is not a simulation." >> $log
if [ $SP_SKIP = "1" ]
then
echo " DIAMOND is being skipped on this run." >> $log
skip
elif [ -f $out ]
then
if [ $SP_FORCE = "1" ]
then
echo " Pre-existing output file $out exists, but --force was used. Overwriting." >> $log
run_diamond
else
echo " Will not overwrite pre-existing output file $out. Use --force to make me." >> $log
fi
else
echo " Pre-existing output file $out does not exist. Mapping." >> $log
run_diamond
fi
else
echo " This is a simulation." >> $log
fi
logStepStop $log
| true |
de1f4eaca7c1753825da5ad796b7904786d10a84 | Shell | stajichlab/Spizellomyces_DAOM_BR117_genome | /DNA/make_histo.sh | UTF-8 | 786 | 3.15625 | 3 | [] | no_license | #!/usr/bin/bash -l
#SBATCH -a 1 -p short -C xeon -N 1 -n 32 --mem 256gb --out make_histo.%a.log
CPU=${SLURM_CPUS_ON_NODE}
if [ -z $CPU ]; then
CPU=1
fi
N=${SLURM_ARRAY_TASK_ID}
if [ -z $N ]; then
N=1
fi
module load jellyfish
module load workspace/scratch
for n in $(ls */*_R1_001.fastq.gz | sed -n ${N}p )
do
name=$(basename $n _L001_R1_001.fastq.gz | perl -p -e 's/_S\d+//')
r=$(echo -n "$n" | perl -p -e 's/_R1/_R2/')
echo "$name $n $r"
if [ ! -f $name.histo ]; then
mkdir -p $SCRATCH/$name
pigz -dc $n > $SCRATCH/$name/$(basename $n .gz)
pigz -dc $r > $SCRATCH/$name/$(basename $r .gz)
jellyfish count -C -m 21 -s 5000000000 -t $CPU $SCRATCH/$name/*.fastq -o $SCRATCH/$name.jf
jellyfish histo -t $CPU $SCRATCH/$name.jf > $name.histo
fi
done
| true |
1480010ce4a98e3467881355d0e633a36fe0bf1f | Shell | Oxyd76/dotfiles | /rofi/.config/rofi/scripts/wallpaper | UTF-8 | 2,033 | 3.65625 | 4 | [] | no_license | #!/bin/bash
rofi_command='rofi -theme themes/wallpaper.rasi'
# Variable passed to rofi
options=("random"
"random from latest"
"random from most viewed"
"random from toplist"
"random from favorites"
"random and blurred"
"random from saved"
"keep wallpaper")
chosen="$(printf '%s\n' "${options[@]}" | $rofi_command -p "wallpaper" -dmenu -selected-row 2)"
# Image folder location
img_dir="$HOME/Pictures"
# Wallpapers folder location
wallpapers_dir="$img_dir/wallpapers"
# Wallpaper
wallpaper="$img_dir/wallpaper.jpg"
case $chosen in
"random")
img=$(swd -l "$img_dir")
convert "$img" "$wallpaper"
rm "$img"
hsetroot -fill "$wallpaper" >/dev/null
;;
"random from latest")
img=$(swd -l "$img_dir" -s latest)
convert "$img" "$wallpaper"
rm "$img"
hsetroot -fill "$wallpaper" >/dev/null
;;
"random from most viewed")
img=$(swd -l "$img_dir" -s views)
convert "$img" "$wallpaper"
rm "$img"
hsetroot -fill "$wallpaper" >/dev/null
;;
"random from toplist")
img=$(swd -l "$img_dir" -s toplist)
convert "$img" "$wallpaper"
rm "$img"
hsetroot -fill "$wallpaper" >/dev/null
;;
"random from favorites")
img=$(swd -l "$img_dir" -s favorites)
convert "$img" "$wallpaper"
rm "$img"
hsetroot -fill "$wallpaper" >/dev/null
;;
"random and blurred")
img=$(swd -l "$img_dir")
convert -blur 0x80 "$img" "$wallpaper"
rm "$img"
hsetroot -fill "$wallpaper" >/dev/null
;;
"random from saved")
mapfile -t wallpapers < <(find "$wallpapers_dir" -type f)
cp -rf "${wallpapers[$(("$RANDOM" % ${#wallpapers[@]}))]}" "$wallpaper"
hsetroot -fill "$wallpaper" >/dev/null
;;
"keep wallpaper")
# Name of image
img="wallpaper-$(date +%y%m%d-%H%M%S).jpg"
# Copy the wallpaper image to archive folder
[[ ! -d "$wallpapers_dir" ]] && mkdir -p "$wallpapers_dir"
cp "$wallpaper" "$wallpapers_dir/$img"
notify-send --hint=string:x-dunst-stack-tag:wallpaper "$img saved"
;;
esac
| true |
ce5247e8a817d4d35a05409b85f7285b4cf00a6e | Shell | sipb/minecraft | /config/run-afs.sh | UTF-8 | 1,766 | 3.4375 | 3 | [] | no_license | #!/bin/bash
# Modified from https://github.com/mit-scripts/sql-backup/blob/master/run-afs.sh
# We can't do an AFS backup unless
# a) we're under the lock
# b) we have tokens
#
# unfortunately, we can't guarantee both of these at the same time,
# but we can guarantee a valid token once we get one, using k5start.
# As a result, we're going to use a clever scheme, in which we
# premptively lock, run k5start, and if that succeeds, we can then do
# the backup. If not, we release the lock and try again.
#
# For this to work, we need to run k5start in "kinit daemon" mode,
# which means we need a pid fie.
base=/srv/data/mysql/db
# Remember that du reports values in kb
max_size=$((100 * 1024))
kstartpid=$(mktemp /tmp/backup-ng-k5start.XXXXXXXXXX)
kstartret=1
while [ $kstartret -ne 0 ]; do
echo "trying";
(
flock --exclusive 200
k5start -f /etc/daemon.keytab -u daemon/froyo-machine.mit.edu -t -K 15 -l6h -b -p "$kstartpid" || exit 1
# If we get here, we're under both the lock and the k5start
aklog -force
aklog -c sipb
RETENTION='5D'
ROOT="/home/minecraft/creative"
BACKUP_ROOT="/afs/sipb/project/minecraft/backups/creative"
mkdir -p "$BACKUP_ROOT"
cd "$ROOT"
for dir in mitworld; do
echo "Backing up $dir"
rdiff-backup "$ROOT/$dir" "$BACKUP_ROOT/$dir" || echo "Failed to backup"
rdiff-backup "$ROOT/$dir" "/mnt/gdrive/" || echo "Failed to gbackup"
#rdiff-backup --force --remove-older-than "$RETENTION" "$BACKUP_ROOT/$dir" >/dev/null || "Failed to purge old backups"
echo "Done Backing up $dir"
done
# Okay, we're all done. Kill k5start
kill -TERM $(cat "$kstartpid")
exit 0
) 200> /home/minecraft/.lock/backup-ng.lock
kstartret=$?
done
rm -f "$kstartpid"
| true |
aae7c158f27b054676821d624495bdd752b7f469 | Shell | fmishin/libsmi | /test/smilint-smiv2.test.in | UTF-8 | 1,306 | 4.15625 | 4 | [
"BSD-3-Clause",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-newlib-historical",
"Beerware"
] | permissive | #!/bin/sh
#
# smilint-FORMAT.test --
#
# smilint test for various input formats.
#
# FORMAT=xxx :
# Check `smilint -l9' output for all TESTMIBS read from dumps/xxx/
# against nominal dumps/smilint-xxx/ output files.
#
FORMAT=`echo $0 | sed -e 's/^.*smilint-\(.*\).test/\1/'`
ACTUALDIR=smilint-${FORMAT}.out
NOMINALDIR=dumps/smilint-${FORMAT}
rm -rf ${ACTUALDIR}
mkdir ${ACTUALDIR}
RC=0
FAILED=""
for mib in ${TESTMIBS} ; do
echo "comparing \`smilint -l9 dumps/${FORMAT}/$mib' output with ${NOMINALDIR}/*."
cd ${ACTUALDIR}
../../tools/smilint -c/dev/null -l9 ../dumps/${FORMAT}/$mib > $mib 2>&1
cd ..
@DIFF@ ${ACTUALDIR}/$mib ${NOMINALDIR}/$mib > ${ACTUALDIR}/$mib.diff
if [ ! -s ${ACTUALDIR}/$mib.diff ] ; then
rm ${ACTUALDIR}/$mib.diff
else
FAILED=1
fi
done
if [ "$FAILED" ] ; then
echo "*** smilint output differs, see ${ACTUALDIR}/*.diff"
RC=1
fi
rm -f sync-dumps
cat > sync-dumps <<EOF
#
# sync-dumps
#
# This script copies test output files to the nominal files in the
# dumps/ directory. It MUST ONLY be executed after carefully ensuring
# that the current output files are valid. Otherwise the sense of
# this test suite would be dubious.
#
CMDPREFIX=echo
if [ "\$1" == "-f" ] ; then CMDPREFIX="" ; fi
EOF
chmod a+x sync-dumps
exit ${RC}
| true |
2053d67f6714d5f1e7c2ed9c9ababa8b9ca59470 | Shell | skmpersonal/docs | /2011_WSTC_WMQ_Security_Lab/Lab Files/01_Introduction/build.ksh | UTF-8 | 1,627 | 2.75 | 3 | [] | no_license | #!/usr/bin/ksh
cd "$(cd "$(dirname "$0")"; pwd)"
# Run build scripts from all prior modules
../00_Initialize/build.ksh
function _MQSC {
TimeStamp=$(date "+%y%m%d-%H%M")
(date;echo) >"${1}.$TimeStamp.out"
print Building ${1} QMgr objects | tee -a ${1}.$TimeStamp.out
runmqsc ${1} >>"${1}.$TimeStamp.out" 2>&1 << EOF
* -----------------------------------------------------------
* ${0##*/} - Define ${1} objects
*
* 20100303 T.Rob - New script
* 20110227 T.Rob - Split out init to 00_ module
*
* -----------------------------------------------------------
dis qmgr qmname
* Queue Manager Name ${1}
*Local Queue Defintions for Transmit Queueus
DEFINE QLOCAL ('${2}') +
DESCR('Transmit queue ${2}') +
USAGE(XMITQ) +
TRIGGER +
TRIGTYPE(FIRST) +
INITQ('SYSTEM.CHANNEL.INITQ') +
REPLACE
* Loopback queue
DEFINE QREMOTE(${1}.${2}.LPBK) +
RQMNAME(${2}) +
RNAME(${2}.${1}.LPBK) +
XMITQ(' ') +
REPLACE
* Channel Definitions
DEFINE CHANNEL ('${1}.${2}') CHLTYPE(SDR) +
TRPTYPE(TCP) +
CONNAME('localhost(${4}') +
DESCR('Sender Channel to ${2} ') +
XMITQ('${2}') +
REPLACE
RESET CHANNEL ('${1}.${2}')
DEFINE CHANNEL ('${2}.${1}') CHLTYPE(RCVR) +
DESCR('Reciever channel from ${2} ') +
TRPTYPE(TCP) +
REPLACE
* -----------------------------------------------------------
* E N D O F S C R I P T
* -----------------------------------------------------------
EOF
}
# Run the object definitions in
_MQSC VENUS MARS 1414 1415
_MQSC MARS VENUS 1415 1414
dspmq
# End of script
exit 1
| true |
2737088b86c254e27bdbdbf57ffee032a0d0a516 | Shell | johnsonhit/furybsd-ports | /mkport.sh | UTF-8 | 1,570 | 4.15625 | 4 | [
"BSD-3-Clause"
] | permissive | #!/bin/sh
# Helper script which will create the port / distfiles
# from a checked out git repo
if [ -z "$1" ] ; then
echo "Please provide port path"
echo "For example x11-themes/furybsd-wallpapers"
exit 1
fi
# Only run as superuser
if [ "$(id -u)" != "0" ]; then
echo "This script must be run as root" 1>&2
exit 1
fi
# Deal with input for script below
port=$1
dfile=`echo $1 | cut -d'/' -f2-`
topdir=`echo $1 | cut -d'/' -f1-`
# Cleanup previous copies of ports overlay
rm -rf /usr/ports/${port}/ || true
cp -R ${topdir} /usr/ports/${topdir}
# Get the version
if [ -e "version" ] ; then
verTag=$(cat version)
else
verTag=$(date '+%Y%m%d%H%M')
fi
# Set the version numbers
sed -i '' "s|%%CHGVERSION%%|${verTag}|g" /usr/ports/${port}/Makefile
# Get the GIT tag
if [ $1 = "sysutils/furybsd-dsbdriverd" ]; then
echo "skipping git ls for forked repo"
else
ghtag=`git ls-remote https://github.com/furybsd/${dfile} HEAD | awk '{ print $1}'`
sed -i '' "s|%%GHTAG%%|${ghtag}|g" /usr/ports/${port}/Makefile
fi
# Create the makesums / distinfo file
cd "/usr/ports/${port}"
make makesum
if [ $? -ne 0 ] ; then
echo "Failed makesum"
exit 1
fi
# Clean ports for plist
cd "/usr/ports/${port}"
make clean
if [ $? -ne 0 ] ; then
echo "Failed make clean"
exit 1
fi
# Create the package plist
cd "/usr/ports/${port}"
make makeplist > pkg-plist
if [ $? -ne 0 ] ; then
echo "Failed makeplist"
exit 1
fi
sed 1,/you/d pkg-plist >> pkg-plist.fixed
mv pkg-plist.fixed pkg-plist
make clean
if [ $? -ne 0 ] ; then
echo "Failed make clean"
exit 1
fi
| true |
f3ae4e3dbd5a36c81ad29e50c3432ad27d7ee70a | Shell | l4rz/idrac-7-8-reverse-engineering | /squashfs-linux/Linux/etc/sysconfig/eth0_speed_setting.sh | UTF-8 | 1,641 | 3.890625 | 4 | [] | no_license | #!/bin/sh
wait_for_iface()
{
loop=30
while [ $loop -gt 0 ]
do
/sbin/ifplugstatus -q $1
case $? in
0)
echo "success"
;;
1)
echo "error"
;;
2)
echo "link detect"
break
;;
3)
echo "unplugged"
;;
esac
loop=$(( $loop - 1 ))
sleep 1
done
if [ $loop -le 0 ]; then
exit 1
fi
}
wait_fot_ethtool()
{
loop=30
while [[ "Unknown!" == "`ethtool eth0 | grep Speed | awk '{printf $2}'`" ]]
do
echo "Wait for ethtool to get ready"
if [ $loop -gt 0 ]; then
loop=$(( $loop - 1 ))
else
exit 1
fi
done
}
MemAccess PLATFORM_TYPE > /dev/null
PLATFORM_TYPE=$?
if [[ "$PLATFORM_TYPE" == "1" ]]; then
echo "For Modular Always set the speed to Manual, 100Mbps, Full duplex"
# wait_for_iface eth0
wait_fot_ethtool
/usr/sbin/ethtool -s eth0 autoneg off speed 100 duplex full
else
#########################################################
# Apply Ethernet settings (Dell 12G use ETHTOOL_IFNAME, not IFNAME)
#########################################################
if [[ -e /tmp/network_config/iDRACnet.conf ]]; then
source /tmp/network_config/iDRACnet.conf
if [[ -e /tmp/network_config/NICSwitch.conf ]]; then
source /tmp/network_config/NICSwitch.conf
if [[ "${AUTO}" == "1" ]]; then
NET_MODE="${AUTO_NET_MODE}"
fi
fi
if [[ "${IFACE_ENABLED}" == "yes" ]] && [[ "${NET_MODE}" == "0" ]]; then
wait_fot_ethtool
if [[ "${ENET_AUTONEG_ENABLED}" == "yes" ]]; then
ethtool -s eth0 autoneg on
else
ethtool -s eth0 autoneg off speed ${ENET_SPEED} duplex ${ENET_DUPLEX}
fi
fi
fi
fi
exit 0
| true |
010c52fe917fbb100400212f4fdc5482059d7b36 | Shell | emartinez-usgs/dvlp-dnvr-demo | /router/remove-config.sh | UTF-8 | 611 | 3.734375 | 4 | [] | no_license | #!/bin/bash -e
if [ $# -ne 1 ]; then
echo "Usage: $0 <APP_NAME>";
exit -1;
fi
APP_NAME=$1;
SERVICE_NAME='router_nginx';
# Get configuration to be removed
CONFIGS=$(docker config ls \
--filter name="router-server--${APP_NAME}--" \
--filter name="router-config--${APP_NAME}--" \
| grep -v 'NAME' \
| awk '{print $2}'
);
# Detach configuration from service
DETACH_CONFIGS=();
for config in ${CONFIGS}; do
DETACH_CONFIGS+="--config-rm ${config} ";
done
docker service update ${DETACH_CONFIGS[@]} ${SERVICE_NAME};
# Remove the configurations from server
docker config rm ${CONFIGS};
exit 0;
| true |
40463e857f50cb9baa9f276709066e9cafc07631 | Shell | doekman/postgres-with-trello | /tool/loaddoc.sh | UTF-8 | 1,246 | 3.796875 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
# Unofficial Bash Strict Mode
set -euo pipefail
IFS=$'\n\t'
# Idea from: <https://stackoverflow.com/a/48396608/56>
table_schema="trello"
table_name="document"
doc_column="doc"
if (( $# < 1 )); then
>&2 echo "Usage: $(basename "$0") file_to_load.json [another/file.json...]
Load one or more JSON documents in to the current (env PG*) database. Table info:
table schema: ${table_schema}
table name: ${table_name}
id column: auto generated (int)
id_doc column: filled by trigger from ${doc_column}->>'id' json value.
${doc_column} column: the json-document
"
exit 1
fi
while (( $# >= 1 )); do
file_to_load="$1"
if [[ ! -r "$file_to_load" ]]; then
>&2 echo "Can't read from $file_to_load"
exit 1
fi
psql_file="${0/.sh/.psql}"
s=$( printf "%$(tput cols)s" )
echo "${s// /-}"
echo "json file | $file_to_load "
# Can't use "-c" because that can't handle "psql-specific features"
# see: <https://www.postgresql.org/docs/current/app-psql.html#APP-PSQL-INTERPOLATION>
psql --no-psqlrc --quiet --expanded --tuples-only -v "table_schema=$table_schema" -v "table_name=$table_name" -v "doc_column=$doc_column" -v "file_to_load=$file_to_load" -f "$psql_file" | grep -v ^$
shift
done
| true |
cc3ce97cb4c12c1dc9d1bc989b1d361cf609ef31 | Shell | nicolarossi/BBX | /WebContent/script_carousel.sh | UTF-8 | 460 | 2.734375 | 3 | [] | no_license |
#find images/carousel -type f |sort |while read f ; do
#echo '<img class="mySlides w3-animate-fading img img-rounded" src="'$f'" style="width:100%">'
#done
active="active"
for ((i=1;i<=30;i++)); do
IMG=$(printf "img/slide_%02d.png" $i)
echo "
<div class=\"item $active\">
<div class='row'>
<p class='text-center'>
<img src=\"${IMG}\" class='img-rounded' alt='Logo' >
</p>
</div>
</div>"
active=""
done
| true |
7eebedc9584b5e4fb505490ca8cd7215e3e9c794 | Shell | yaohuan23/python-coding | /sam2bw/sam2bed.sh | UTF-8 | 395 | 3.578125 | 4 | [] | no_license | #!/bin/bash
EXPECTED_ARGS=2
if [ $# -ne $EXPECTED_ARGS ];
then
echo "Usage: '$0' genome amFile"
exit
fi
genomeHeader=/sharedata/genome/$1/$1.fa.fai
if [[ $2 =~ "sam" ]]
then
input=${2%.sam}
samtools view -bt $genomeHeader $input.sam | bamToBed -i - | cut -f1-3 | sort -k1,1 -k2,2n > $input.bed
fi
if [[ $2 =~ "bam" ]]
then
input=${2%.bam}
bamToBed -i $input.bam | cut -f1-3 > $input.bed
fi
| true |
f090d8e18297c77b449d24386378a83d76ca3133 | Shell | rowlandr71/lfcs-practice | /essential-commands/redirect-output1/setup.sh | UTF-8 | 632 | 3.125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
USER=$(basename $(pwd))
# set up the task prerequisites
mkdir /opt/EC001/
touch /opt/EC001/stdout.txt
touch /opt/EC001/stderr.txt
echo "This should not be removed" > /opt/EC001/both.txt
chown -R $USER /opt/EC001/
mv print.sh /usr/local/bin/test_printer
chown $USER /usr/local/bin/test_printer
chmod +x /usr/local/bin/test_printer
# set up the solution
mkdir /opt/solutions_EC001/
mv solution/*.txt /opt/solutions_EC001/
mv solution/checker.sh /usr/local/bin/check_solution
chmod +x /usr/local/bin/check_solution
mv solution/show.sh /usr/local/bin/show_solution
chmod +x /usr/local/bin/show_solution
rmdir solution
| true |
be08d0c5b25656b11542cc983d3f9ea759f858e2 | Shell | hokuto-fukunaga/oase | /oase_install_package/install_scripts/bin/oase_db_setup_core.sh | UTF-8 | 8,019 | 3.5 | 4 | [
"BSD-3-Clause",
"LGPL-3.0-only",
"MIT",
"Apache-2.0",
"LicenseRef-scancode-public-domain"
] | permissive | #!/bin/bash
# Copyright 2019 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
############################################################
#
# 【概要】
# init_custom.yaml作成ツール
#
############################################################
################################################################################
# generic functions(should have no dependencies on global variables)
check_result() {
if [ $1 -ne 0 ]; then
log "ERROR : $2."
exit 1
fi
}
################################################################################
# configuration functions
################################################################################
# append to init_custom.yaml
create_initcustom() {
if [ $# -ne 6 ]; then
log "ERROR : missing required positional argument."
exit 1
fi
cat << EOS >> $OASE_INICUSTOM_FILE
- model: web_app.System
pk: $1
fields:
config_name: $2
category: $3
config_id: $4
value: $5
maintenance_flag: 0
last_update_timestamp: $6
last_update_user: システム管理者
EOS
}
################################################################################
log "INFO : Start DB existence check."
################################################################################
result=$(echo "show databases" | mysql -u root -p${db_root_password} 2>&1)
check_result $? "$result"
db_exists=$(echo "$result" | grep -E ^${db_name}$ 2>&1)
_db_exists_flag=false
if [ -n "$db_exists" ]; then
################################################################################
log "INFO : ${db_name} exists."
################################################################################
_db_exists_flag=true
else
################################################################################
log "INFO : Start CREATE DATABASE."
################################################################################
result=$(echo "CREATE DATABASE ${db_name} CHARACTER SET utf8;" | mysql -u root -p${db_root_password} 2>&1)
check_result $? "$result"
################################################################################
log "INFO : CREATE DATABASE is completed."
################################################################################
fi
result=$(echo "SELECT User FROM mysql.user;" | mysql -u root -p${db_root_password} 2>&1)
check_result $? "$result"
user_exits=$(echo "$result" | grep -E ^${db_username}$ 2>&1)
if [ -z "$user_exits" ]; then
################################################################################
log "INFO : Start CREATE USER."
################################################################################
result=$(echo "CREATE USER '"${db_username}"' IDENTIFIED BY '"${db_password}"';" | mysql -u root -p${db_root_password} 2>&1)
check_result $? "$result"
################################################################################
log "INFO : CREATE USER is completed."
################################################################################
fi
result=$(echo "GRANT ALL ON "${db_name}".* TO '"${db_username}"';" | mysql -u root -p${db_root_password} 2>&1)
check_result $? "$result"
################################################################################
log "INFO : DB existence check is completed."
################################################################################
if ${_db_exists_flag}; then
################################################################################
log "INFO : Skip the following because the DB existed."
################################################################################
exit 0
fi
################################################################################
log "INFO : Start create init_custom.yaml."
################################################################################
# get init_costom.yaml
OASE_FIXTURES_DIR=$(cd $oase_directory/OASE/oase-root/web_app/fixtures/;pwd)
OASE_INICUSTOM_FILE=$OASE_FIXTURES_DIR/init_custom.yaml
# initialize init_custom.yaml
if [ -e $OASE_INICUSTOM_FILE ]; then
log "INFO : Initialize init_custom.yaml."
cp /dev/null $OASE_INICUSTOM_FILE
fi
# append to init_custom.yaml
log "INFO : append to init_custom.yaml."
# password encryption
encrypter=$oase_directory/OASE/tool/encrypter.py
date=`date +"%Y-%m-%dT%H:%M:%S"`
create_initcustom 2 "ルールファイル設置ルートパス" "RULE" "RULEFILE_ROOTPATH" ${rulefile_rootpath} $date
create_initcustom 26 "DMリクエスト送信先" "DMSETTINGS" "DM_IPADDRPORT" ${dm_ipaddrport} $date
create_initcustom 27 "DMユーザID" "DMSETTINGS" "DM_USERID" ${rhdm_adminname} $date
encrypted_password=$(python3 $encrypter ${rhdm_password} 2>&1)
check_result $? $encrypted_password
create_initcustom 28 "DMパスワード" "DMSETTINGS" "DM_PASSWD" $encrypted_password $date
create_initcustom 29 "適用君待ち受け情報" "APPLYSETTINGS" "APPLY_IPADDRPORT" ${apply_ipaddrport} $date
create_initcustom 31 "OASEメールSMTP" "OASE_MAIL" "OASE_MAIL_SMTP" ${oasemail_smtp} $date
create_initcustom 32 "Maven repositoryパス" "RULE" "MAVENREP_PATH" ${mavenrep_path} $date
create_initcustom 50 "RabbitMQユーザID" "RABBITMQ" "MQ_USER_ID" ${RabbitMQ_username} $date
encrypted_password=$(python3 $encrypter ${RabbitMQ_password} 2>&1)
check_result $? $encrypted_password
create_initcustom 51 "RabbitMQパスワード" "RABBITMQ" "MQ_PASSWORD" $encrypted_password $date
create_initcustom 52 "RabbitMQIPアドレス" "RABBITMQ" "MQ_IPADDRESS" ${RabbitMQ_ipaddr} $date
create_initcustom 53 "RabbitMQキュー名" "RABBITMQ" "MQ_QUEUE_NAME" ${RabbitMQ_queuename} $date
################################################################################
log "INFO : Create init_custom.yaml is completed."
################################################################################
################################################################################
log "INFO : Start DB migrations."
################################################################################
OASE_WEBAPP_DIR=$(cd $oase_directory/OASE/oase-root/web_app/;pwd)
# if the migrations directory does not exist
if [ ! -e "$OASE_WEBAPP_DIR/migrations" ]; then
log "INFO : create migrations directory."
mkdir -p $OASE_WEBAPP_DIR/migrations
fi
OASE_MIGRATIONS_DIR=$(cd $OASE_WEBAPP_DIR/migrations;pwd)
# if the __Init__.py does not exist
if [ ! -e $OASE_MIGRATIONS_DIR/__init__.py ]; then
log "INFO : create __init__.py."
touch $OASE_MIGRATIONS_DIR/__init__.py
fi
cd $(dirname $OASE_WEBAPP_DIR)
migrate_log=$(python manage.py makemigrations web_app 2>&1)
check_result $? "$migrate_log"
log "INFO : $migrate_log"
migrate_log=$(python manage.py migrate 2>&1)
check_result $? "$migrate_log"
log "INFO : $migrate_log."
migrate_log=$(python manage.py loaddata init init_custom 2>&1)
check_result $? "$migrate_log"
log "INFO : $migrate_log."
cd - > /dev/null 2>&1
################################################################################
log "INFO : DB migrations is completed."
################################################################################
| true |
d10fbaa365a558838527997d56ae6673ae341ff4 | Shell | sych74/wordpress-dev | /scripts/changeDomainON.sh | UTF-8 | 291 | 2.875 | 3 | [] | no_license | #!/bin/bash
NEW_URL=$1
WP=`which wp`
ROOT_DIR=/var/www/webroot/ROOT
read -r -a CURRENT_URL <<< $(echo "select option_value from wp_options where option_name='siteurl'" | ${WP} --path=${ROOT_DIR} db query)
${WP} search-replace "${CURRENT_URL[1]}" "${NEW_URL}" --all-tables --path=${ROOT_DIR}
| true |
601caa35636f3a0b0c82b9e773877f67547630fa | Shell | ngkim/CloudWork2013 | /vm-sriov/image/mount-vm-image.sh | UTF-8 | 513 | 3.75 | 4 | [] | no_license | #!/bin/bash
source "$MNG_ROOT/include/print.sh"
MNT_DIR=/mnt/ubuntu
usage() {
echo "* Mount a VM image to $MNT_DIR"
echo " - $0 [IMG-to-mount]"
echo " - ex) $0 cloud.img"
exit -1
}
VM_IMG=$1
if [ -z $VM_IMG ]; then
usage
fi
print_info "VM-IMAGE" "Load nbd module"
modprobe nbd max_part=8
print_info "VM-IMAGE" "Run qemu-nbd..."
qemu-nbd -c /dev/nbd0 $VM_IMG
mkdir -p $MNT_DIR
print_info "VM-IMAGE" "Mount $MNT_DIR"
mount /dev/nbd0p1 $MNT_DIR
print_err "VM-IMAGE" "You should run 'chroot $MNT_DIR'"
| true |
fe218ac1ca44511a0a20b3d2059c238393474170 | Shell | usamasaqib/pwnkernel | /launch.sh | UTF-8 | 640 | 3.09375 | 3 | [
"BSD-2-Clause"
] | permissive | #!/bin/bash
KERNEL_VERSION=$1
BUILD_DIR=$(pwd)/build
HOST_SHARE=$(pwd)/shared
if [[ -e $2 ]]
then
HOST_SHARE=$2
fi
if [[ -z $KERNEL_VERSION ]]
then
KERNEL_VERSION=5.4
fi
echo "Use Ctrl+] to send interrupt to QEMU. Sleeping for 3 seconds..."
stty intr ^]
#
# launch
#
exec qemu-system-x86_64 \
-kernel build/linux-$KERNEL_VERSION/arch/x86/boot/bzImage \
-smp $(nproc) \
-nographic \
-monitor none \
-fsdev local,security_model=passthrough,id=fsdev0,path=$HOST_SHARE \
-device virtio-9p-pci,id=fs0,fsdev=fsdev0,mount_tag=hostshare \
-hda qemu-image.img \
-s \
-append "console=ttyS0 root=/dev/sda rw single" \
--enable-kvm
| true |
9f0f7b272d8ad7bcfbed1f589193a75971ae0739 | Shell | npklein/ngs-utils | /makeBedForDiagnostics.sh | UTF-8 | 2,344 | 4.15625 | 4 | [] | no_license | #!/usr/bin/bash
set -e
set -u
function showHelp() {
#
# Display commandline help on STDOUT.
#
cat <<EOH
===============================================================================================================
Script to make Bed files for Diagnostics.
Usage:
$(basename $0) OPTIONS
Options:
-h Show this help.
-b Name of the BED file
-n Name of the new BED file
-e Making BED file for exomekit [default=false]
===============================================================================================================
EOH
trap - EXIT
exit 0
}
while getopts "b:d:e:h" opt;
do
case $opt in h)showHelp;; b)bedfile="${OPTARG}";; d)name="${OPTARG}";; e)exome="${OPTARG}";;
esac
done
if [[ -z "${bedfile:-}" ]]
then
echo -e '\nERROR: Must specify a BED file!\n'
showHelp
exit 1
fi
if [[ -z "${name:-}" ]]
then
echo -e '\nERROR: Must specify a Name for the new Bed file!\n'
showHelp
exit 1
fi
if [[ -z "${exome:-}" ]]
then
exome="false"
else
exome="true"
fi
if [ -d "/apps/data/Agilent/${name}" ]
then
echo "/apps/data/Agilent/${name} already exists"
exit 1
elif [ -d "/apps/data/UMCG/Diagnostics/${name}" ]
then
echo "/apps/data/UMCG/Diagnostics/${name} already exists"
exit 1
fi
thisDir=$(pwd)
umcgDir=/apps/data/UMCG/Diagnostics/
mkdir -p "${name}/human_g1k_v37/"
echo "created ${name}/human_g1k_v37/"
cp "${bedfile}" "${name}"/
echo "copied ${bedfile} ${name}/"
## navigate to folder
cd "${name}"
cp "${bedfile}" "human_g1k_v37/captured.bed"
echo "copied ${bedfile} to human_g1k_v37/captured.bed"
module load ngs-utils
cd human_g1k_v37/
## Run the prepare step
if [[ "${exome}" == 'true' ]]
then
echo 'Creating bedfiles for a new exomekit ${name}'
sh ${EBROOTNGSMINUTILS}/prepare_NGS_Bedfiles.sh -n captured
else
echo "Creating bedfiles for a new kit ${name}"
sh ${EBROOTNGSMINUTILS}/prepare_NGS_Bedfiles.sh -n captured -c true -d targeted
fi
##
cd "${thisDir}"
echo "copied ${name} to ${umcgDir}"
cp -r "${name}" ${umcgDir}
cd "${umcgDir}/${name}/human_g1k_v37/"
echo "renaming captured into ${name}"
rename "captured" "${name}" "captured."*
#perbase
cd "${umcgDir}/CoveragePerBase/"
mkdir "${name}"
cd "${name}"
ln -sf "../../${name}"/
#pertarget
cd "${umcgDir}/CoveragePerTarget/"
mkdir "${name}"
cd "${name}"
ln -sf "../../${name}"/
echo "FINISHED"
| true |
f04f1c2771f35a414accfcadafefc8c084f80bd0 | Shell | thoran/ymendel-dotfiles | /system/functions.bash | UTF-8 | 540 | 3.40625 | 3 | [] | no_license | grok()
{
grep -ri "$*" . | grep -v '\.svn' | grep -v '\.git'
}
files_with()
{
grok $* | cut -d : -f 1 | sort | uniq
}
ip()
{
ifconfig ${1:-en0} | awk '$1 == "inet" { print $2 }'
}
mac()
{
ifconfig ${1:-en0} | awk '$1 == "ether" { print $2 }'
}
wiki()
{
dig +short txt $1.wp.dg.cx
}
flatten()
{
FLATTEN_TARGET=${1:-.}
find $FLATTEN_TARGET -type f -mindepth 2 -exec mv {} $FLATTEN_TARGET \;
find $FLATTEN_TARGET -type d -d -depth 1 -exec rm -fr {} \;
}
go_to()
{
loc=`which $1`
cd `dirname $loc`
}
alias gt=go_to
| true |
5a8fba3ca59810a2c8815c9a0de54f53b55356e4 | Shell | bryk/csharp-classdiagram-generator | /src/bash/cscdgenerator | UTF-8 | 885 | 3.609375 | 4 | [] | no_license | #!/bin/bash
DIR="$(cd "$( dirname "${BASH_SOURCE[0]}")" && pwd )"
NAME="cscdgenerator"
HELP="MANUAL\n OPTIONS\n -h help\n -d debug\n -o=outputFilename (required)\n -s=sourceDir (required)\n SAMPLE USAGE\n ./$NAME -h\n ./$NAME -d -o=image -s=ins\n ./$NAME -o=image -s=ins/\n PARAMETERS SHOULD BE PASSED IN ORDER!"
if [[ ("$#" = 1 && $1 =~ ^-h$ ) ]]; then
echo -e "$HELP"
exit 1
fi
if [[ ("$#" == 3 && $1 =~ ^-d$ && $2 =~ ^-o=.+$ && $3 =~ ^-s=.+$) ]]; then
F=`echo $2 |cut -d= -f 2`
W=`echo $3 |cut -d= -f 2`
find "$W" -name '*.cs' | xargs -d '\n' $DIR/cscdgenerator.py -debug -filename "$F"
exit 0
fi
if [[ ("$#" == 2 && $1 =~ ^-o=.+$ && $2 =~ ^-s=.+$ ) ]]; then
F=`echo $1 |cut -d= -f 2`
W=`echo $2 |cut -d= -f 2`
find "$W" -name '*.cs' | xargs -d '\n' $DIR/cscdgenerator.py -filename "$F"
exit 0
fi
echo -e "$HELP"
exit 0
| true |
4c6d09343c81774a3370acdb7c520beaf1ab9d9d | Shell | DamienRobert/dotfiles | /script/firefox/query_bookmarks | UTF-8 | 513 | 3.3125 | 3 | [
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] | permissive | #!/bin/sh
# cf aussi: https://github.com/websafe/ffbx/blob/master/ffbx.sh
# Firefox bookmarks extractor - extract bookmarks from user profiles.
FILE="$@"
[ -z $FILE ] && FILE=$(ls -t $HOME/.mozilla/firefox/*default/places.sqlite | sed q)
# --: sql comment
sqlite3 $FILE "
SELECT DISTINCT
moz_places.title,url FROM moz_bookmarks
INNER JOIN moz_places ON moz_bookmarks.fk=moz_places.id
ORDER BY
visit_count DESC,
last_visit_date DESC;" |
awk -F '|' '{printf "%-'$cols's \x1b[36m%s\x1b[m\n", $1, $2}'
| true |
50165ea52e968da4214f0c8431d5d964a2a2ffdd | Shell | paranoidtruth/SagaCoin_install | /boxy_blocks.sh | UTF-8 | 1,071 | 2.859375 | 3 | [] | no_license | #!/bin/bash
#paranoidtruth
echo "=================================================================="
echo "PARANOID TRUTH BOXY BLOCKS"
echo "=================================================================="
echo "installing unzip"
cd ~/
echo "STOPPING BOXY"
boxycoin-cli stop
sleep 5
echo "install unzip"
sudo apt-get install unzip -y
echo "pulling blockchain zip file"
wget http://www.boxycoin.org/dl/blockchain.zip
echo "unzipping"
unzip blockchain.zip
rm blockchain.zip
cd ~/.boxycoin
echo "clear out old dir"
rm -rf blocks mnpayments.dat mncache.dat fee_estimates.dat .lock backups db.log chainstate database peers.dat debug.log
cd ~/
echo "move blocks into boxycoin"
mv blocks ~/.boxycoin/
echo "RESTART WALLET WITH BLOCKS BOOTSTRAP wait..."
boxycoind -daemon
echo "Loading wallet, be patient, wait 60 seconds ..."
sleep 60
boxycoin-cli getmininginfo
echo "THIS MAY TAKE UP TO 10 MINUTES."
echo "RUN: "
echo "boxycoin-cli getmininginfo"
echo "UNTIL it stops saying loading block index & returns valid data instead"
echo "THEN YOU ARE ALL SET TO CONTINUE"
| true |
d1912932a3ec53d4abf490bdbcbb11e37904b799 | Shell | chunfuchang/settings | /bin/runjhat | UTF-8 | 780 | 3.578125 | 4 | [] | no_license | #!/bin/bash
# runjhat
#
#{{IS_NOTE
# Purpose:
# Running jmap and jhat
# Description:
#
# History:
# Wed May 30 15:27:04 2007, Created by tomyeh
#}}IS_NOTE
#
#Copyright (C) 2007 Potix Corporation. All Rights Reserved.
#
#{{IS_RIGHT
#}}IS_RIGHT
#
if [ "$1" = "-h" ] ; then
echo Usage:
echo runjhat [process-name]
echo
echo "process-name: The process name (not need to be complete)"
echo " If omitted, tomcat is assumed"
exit 1
fi
if [ "$1" = "" ] ; then
procnm=tomcat
else
procnm=$1
fi
pid=$(ps -a | grep $procnm | tr -s ' ' | cut -d ' ' -f5)
if [ "$pid" = "" ] ; then
echo $procnm not found
exit 1
fi
echo "Dump process $pid"
mkdir -p /tmp/jmap
cd /tmp/jmap
flnm=heap.bin
if [ -f $flnm ] ; then
rm -f $flnm
fi
jmap -dump:format=b,file=$flnm $pid
jhat $flnm
| true |
9f313f76acb6bf19f29b98724d9c41e94515cc72 | Shell | tfynes-pivotal/pcf-powertools | /PcfStartStop/pcfstartstop3om.sh | UTF-8 | 1,633 | 3.515625 | 4 | [] | no_license | #!/bin/bash
shopt -s expand_aliases
source ~/.profile
# Deletes BOSH vms with ruthless abandon
if [[ ($1 == "shut") || ($1 == "start" ) || ($1 == "shutall") ]]
then
echo "Running PCF $1 Process (warning: this toggles director resurrection off/on!)..."
else
echo "Usage: $0 [shut|start|shutall]"
exit 1
fi
if [ $1 == "shutall" ]; then
deployments=$(bosh deployments --column=Name)
#jobVMs=$(bosh vms --column="VM CID" --json | jq --raw-output .Tables[].Rows[].vm_cid)
for thisDeployment in $deployments; do
jobVMs=$(bosh -d $thisDeployment vms --column="VM CID")
for thisVM in $jobVMs; do
echo "DELETING $thisDeployment : $thisVM"
bosh -n -d $thisDeployment delete-vm $thisVM &
done
done
fi
if [ $1 == "shut" ]; then
jobVMs=$(bosh instances --details| awk -F '|' '{gsub(/ /, "", $0); print $2","$7 }')
deleteVMs
fi
if [ $1 == "start" ]; then
#bosh -n deploy
#bosh vm resurrection on
declare -a boshdeployments=()
deployments=$(bosh deployments --json | jq --raw-output .Tables[].Rows[].name)
for thisDeployment in $deployments; do
if [[ $thisDeployment == "cf-"* ]]; then
bosh -d $thisDeployment manifest > /tmp/$thisDeployment.yml
bosh -d $thisDeployment -n deploy /tmp/$thisDeployment.yml &
fi
done
sleep 10m
for thisDeployment in $deployments; do
bosh -d $thisDeployment manifest > /tmp/$thisDeployment.yml
bosh -d $thisDeployment -n deploy /tmp/$thisDeployment.yml &
done
watch -n 10 'bosh tasks --no-filter'
fi
| true |
28d81ed0e5f4725bc86090f41dc859ccd4b94590 | Shell | Leukas/VUMT | /preprocess_msr.sh | UTF-8 | 188 | 2.546875 | 3 | [] | no_license | file=$1
cut -f 2 $file | tail -n +2 | awk "NR%2==0" > msr_2.txt
cut -f 2 $file | tail -n +2 | awk "NR%2==1" > msr_1.txt
./preprocess_file.sh msr_1.txt en
./preprocess_file.sh msr_2.txt en | true |
50423246da05254e21f37b7217a43b1e286bdfb4 | Shell | snh-clj/collab-config | /bin/push-changes | UTF-8 | 304 | 3.140625 | 3 | [] | no_license | #!/bin/bash
CURR_PROJ=$(readlink ~/repos/collab/meetup0/current-collab)
for x in ~/repos/collab/meetup*; do
echo "Pushing $x"
(cd $x \
&& (
(git fetch \
&& git ci -a -m "Committing $(basename $x) $CURR_PROJ solutions." \
&& git rbu \
&& git push origin master); \
git pull ));
done
| true |
eb200ea63bbbbcc12afe821d94487aad5dbceef1 | Shell | KrisSaxton/lfs-build-6.2 | /bzip/bzip2-1.0.3/install/060-fix-files | UTF-8 | 263 | 2.53125 | 3 | [] | no_license | #!/bin/bash
source configure
pushd ../pkg/debian
{
ln -sv ../../lib/libbz2.so.1.0 usr/lib/libbz2.so
rm -v usr/bin/{bunzip2,bzcat,bzip2}
ln -v bin/bzip2 bin/bunzip2
ln -v bin/bzip2 bin/bzcat
rm -vrf usr/share/doc
} | tee -a ../../logs/060-fix-files.log
popd
| true |
002837c38ec2b5023aea26377bf144789efc1250 | Shell | jvanzyl/harbor-client | /src/test/kind/02-local-ca.sh | UTF-8 | 583 | 2.890625 | 3 | [] | no_license | #!/usr/bin/env bash
# https://deliciousbrains.com/ssl-certificate-authority-for-local-https-development/
caKey="local.dev-ca.key"
caCertificate="local.dev-ca.crt"
# Generate the CA private key
openssl genrsa -out ${caKey} -passout pass:foobar 2048
# Generate the CA certificate
openssl req -x509 -new -nodes -key ${caKey} -sha256 -days 1825 -out ${caCertificate} \
-passin pass:foobar \
-subj "/C=CA/ST=LocalDev/L=LocalDev/O=Dev/OU=IT/CN=local.dev"
# Add CA certificate
sudo security add-trusted-cert -d -r trustRoot -k "/Library/Keychains/System.keychain" ${caCertificate}
| true |
a62860414ac29c4dd2d34312a80634253603dbea | Shell | logicalclocks/karamel-chef | /dela/bbc5_hs_ports.sh | UTF-8 | 1,637 | 3.078125 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
set -e
if [ ! -d "dela" ]; then
echo "Run the script from the karamel-chef dir"
exit 1
fi
KCHEF_DIR=${PWD}
. ${KCHEF_DIR}/dela/running/hs_env.sh
echo "#!/bin/bash" > ${KCHEF_DIR}/dela/running/hs_ports.sh
PORT=$((21000 + ${CLUSTER_SUFFIX}))
echo "SSH_P=${PORT}" >> ${KCHEF_DIR}/dela/running/hs_ports.sh
PORT=$((22000 + ${CLUSTER_SUFFIX}))
echo "MYSQL_P=${PORT}" >> ${KCHEF_DIR}/dela/running/hs_ports.sh
PORT=$((23000 + ${CLUSTER_SUFFIX}))
echo "KARAMEL_P=${PORT}" >> ${KCHEF_DIR}/dela/running/hs_ports.sh
PORT=8080
echo "WEB_P=${PORT}" >> ${KCHEF_DIR}/dela/running/hs_ports.sh
PORT=$((25000 + ${CLUSTER_SUFFIX}))
echo "DEBUG_P=${PORT}" >> ${KCHEF_DIR}/dela/running/hs_ports.sh
PORT=$((26000 + ${CLUSTER_SUFFIX}))
echo "GFISH_P=${PORT}" >> ${KCHEF_DIR}/dela/running/hs_ports.sh
for i in {1..9}
do
PORT=$((26000 + ${i} * 1000 + ${CLUSTER_SUFFIX}))
echo "PORT${i}=${PORT}" >> ${KCHEF_DIR}/dela/running/hs_ports.sh
done
PORT=43001
echo "DELA1_P=${PORT}" >> ${KCHEF_DIR}/dela/running/hs_ports.sh
PORT=43002
echo "DELA2_P=${PORT}" >> ${KCHEF_DIR}/dela/running/hs_ports.sh
PORT=43003
echo "DELA3_P=${PORT}" >> ${KCHEF_DIR}/dela/running/hs_ports.sh
PORT=$((44000 + ${CLUSTER_SUFFIX}))
echo "DELA4_P=${PORT}" >> ${KCHEF_DIR}/dela/running/hs_ports.sh
PORT=$((51000 + ${CLUSTER_SUFFIX}))
echo "HS_GFISH_P=${PORT}" >> ${KCHEF_DIR}/dela/running/hs_ports.sh
PORT=43080
echo "HS_WEB_P=${PORT}" >> ${KCHEF_DIR}/dela/running/hs_ports.sh
chmod +x ${KCHEF_DIR}/dela/running/hs_ports.sh
PORT=$((53000 + ${CLUSTER_SUFFIX}))
echo "HS_GFISH_DEBUG=${PORT}" >> ${KCHEF_DIR}/dela/running/hs_ports.sh
chmod +x ${KCHEF_DIR}/dela/running/hs_ports.sh | true |
da105c02e3f324cd7da8297e75b4cc8048f9faec | Shell | chrispaterson/envconfig | /bin/git-aliases/unwind | UTF-8 | 303 | 3.578125 | 4 | [] | no_license | #!/bin/bash
BRANCH=$(git branch --show-current)
TO_BRANCH='main' && [[ $# > 0 ]] && TO_BRANCH=$1
if [[ $TO_BRANCH == $BRANCH ]]; then
echo "Can not unwind from $BRANCH to $TO_BRANCH because they are the same"
exit 1
fi
COMMON_COMMIT=$(git merge-base $TO_BRANCH $BRANCH)
git reset $COMMON_COMMIT
| true |
cca01c97f1ef01d74707113e5046f3925736ca38 | Shell | dotCMS/zarchive_qa | /artifacts/aws/jobs/nightlyRun.sh | UTF-8 | 1,333 | 3.234375 | 3 | [] | no_license | #!/bin/bash
# expects DOTCMS_VERSION to be set prior to executing
env
export LASTCOMMIT=$(cat buildinfo.txt | python -c 'import sys, json; print json.load(sys.stdin)["contentlets"][0]["commitNumber"]')
curl http://dotcms.com/api/content/query/+structureName:DotcmsNightlyBuilds%20+conHost:SYSTEM_HOST%20+DotcmsNightlyBuilds.version:${DOTCMS_VERSION}/orderby/moddate%20desc/limit/1 2>/dev/null > buildinfo.txt
export DOTCMS_ZIP_URL=http://dotcms.com$(cat buildinfo.txt | python -c 'import sys, json; print json.load(sys.stdin)["contentlets"][0]["zip"]')
export DOTCMS_TAR_GZ_URL=http://dotcms.com$(cat buildinfo.txt | python -c 'import sys, json; print json.load(sys.stdin)["contentlets"][0]["targz"]')
export DOTCMS_COMMIT=$(cat buildinfo.txt | python -c 'import sys, json; print json.load(sys.stdin)["contentlets"][0]["commitNumber"]')
# only do something if commit is different than the last commit processed
echo "LASTCOMMIT = ${LASTCOMMIT}"
echo "DOTCMS_COMMIT = ${DOTCMS_COMMIT}"
echo "DOTCMS_ZIP_URL = ${DOTCMS_ZIP_URL}"
echo "DOTCMS_TAR_GZ_URL = ${DOTCMS_TAR_GZ_URL}"
if [ "$LASTCOMMIT" != "$DOTCMS_COMMIT" ]; then
echo 'calling saveParams'
./saveParams.sh
echo 'returned from saveParams'
else
echo 'INFO - Since commit already tested - not triggering builds'
if [ -f params ];
then
rm params
fi
fi | true |
147bbc44fd57c81827bdccc593606a6de1414265 | Shell | solidcell/dotfiles | /config/pianobar/echo-current-song.sh | UTF-8 | 116 | 2.53125 | 3 | [] | no_license | #!/bin/zsh
if [ -z `ps aux | grep 'pianobar$'` ]; then
echo ''
else
cat ~/.config/pianobar/current-song.txt
fi
| true |
85ee922e13e0113f703340fced317e8112d46fea | Shell | hafs-community/HAFS | /jobs/JHAFS_MSG_CHECK | UTF-8 | 1,178 | 3.09375 | 3 | [] | no_license | #!/bin/sh
date
export PS4='+ $SECONDS + '
set -xue
export cycle=${cycle:-t${cyc:?}z}
export hafs_ver=${hafs_ver:?}
export NET=${NET:?}
export RUN=${RUN:?}
export envir=${envir:-test}
export RUN_ENVIR=${RUN_ENVIR:-DEV}
export EMAIL_SDM=${EMAIL_SDM:-NO}
export MAILFROM=${MAILFROM:-"nco.spa@noaa.gov"}
export MAILTO=${MAILTO:-"sdm@noaa.gov,nco.spa@noaa.gov,ncep.sos@noaa.gov"}
export HOMEhafs=${HOMEhafs:-${OPSROOT:?}/hafs.${hafs_ver:?}}
export USHhafs=$HOMEhafs/ush
export EXEChafs=$HOMEhafs/exec
export PARMhafs=$HOMEhafs/parm
export FIXhafs=$HOMEhafs/fix
# Prepend $USHhafs to python package search path
export PYTHONPATH=$USHhafs${PYTHONPATH:+:$PYTHONPATH}
export DATA=${DATA:-${DATAROOT}/${jobid}}
mkdir -p $DATA
cd $DATA
# Initialize PDY
setpdy.sh
# Get PDY
. ./PDY
export COMINnhc=${COMINnhc:-${DCOMROOT}/nhc/atcf/ncep}
export COMINjtwc=${COMINjtwc:-${DCOMROOT}/${PDY}/wtxtbul/storm_data}
export COMINmsg=${COMINmsg:-$(compath.py ${envir}/${NET}/${hafs_ver})/inp${RUN}}
# Execute ex-script
${HOMEhafs}/scripts/exhafs_msg_check.py "${PDY:2:6}" "$cyc"
status=$?; [[ $status -ne 0 ]] && exit $status
# Cleanup DATA dir
cd ${DATAROOT}
if [ "${KEEPDATA:-YES}" != "YES" ]; then
rm -rf $DATA
fi
date
| true |
f3568ad86f36c887ca1f19c7256601123c5d8082 | Shell | Ctfbuster/forensic_automation | /setup_sift.sh | UTF-8 | 3,287 | 3.703125 | 4 | [] | no_license | #!/bin/bash
source parameters.sh
source functions.sh
# Test for dependency
# https://linux.die.net/man/1/sshpass
if [ $(which sshpass| wc -c) -lt 2 ]; then
echo "This script requires the sshpass utility"
exit
fi
# Launch a SIFT Workstation
SIFT_AMI=ami-0b9ef98f6dbcfe23d
SIFT_INSTANCE=$(aws ec2 run-instances --image-id $SIFT_AMI --count 1 \
--instance-type t2.xlarge --security-groups $SECURITY_GROUP \
--iam-instance-profile Name=EC2_Responder \
--query Instances[0].InstanceId --tag-specifications \
'ResourceType=volume,Tags=[{Key=Name,Value=SIFT},{Key=Ticket,Value=123456}]' \
'ResourceType=instance,Tags=[{Key=Name,Value=SIFT},{Key=Ticket,Value=123456}]' \
--output json --region $REGION --profile $PROFILE)
if [ $(echo $SIFT_INSTANCE | wc -c) -lt 5 ]; then echo "Failed to Launch SIFT Instance"; exit; fi
SIFT_INSTANCE=$(sed -e 's/^"//' -e 's/"$//' <<<"$SIFT_INSTANCE") # Remove Quotes
echo "The SIFT Workstation has launched"
echo "*** The SIFT InstanceId is $SIFT_INSTANCE"
# Wait until the SIFT Workstation is Running
echo "Waiting for the SIFT Workstation to enter RUNNING state"
aws ec2 wait instance-running --instance-ids $SIFT_INSTANCE \
--region $REGION --profile $PROFILE
echo "*** The SIFT Instance is in the RUNNING State"
# Determine the Public IP Address of the SIFT Workstation
SIFT_IP=$(aws ec2 describe-instances --instance-ids $SIFT_INSTANCE --output json \
--region $REGION --profile $PROFILE --query "Reservations[0].Instances[0].PublicIpAddress")
SIFT_IP=$(sed -e 's/^\"//' -e 's/\"$//' <<<"$SIFT_IP") # Remove Quotes
echo "*** The SIFT Public IP Address is $SIFT_IP"
# Determine the Availability Zone of the SIFT Workstation
AZ=$(aws ec2 describe-instances --instance-ids $SIFT_INSTANCE --output json \
--region $REGION --profile $PROFILE --query "Reservations[0].Instances[0].Placement.AvailabilityZone")
export AZ=$(sed -e 's/^\"//' -e 's/\"$//' <<<"$AZ") # Remove Quotes
echo "*** The SIFT Workstation is in the $AZ availability zone"
# Install the SSM Agent on the SIFT Workstation
echo "Installing the Systems Manager Agent"
while : ; do #Wait for SSH
sshpass -p "forensics" ssh -o StrictHostKeyChecking=no sansforensics@$SIFT_IP 'mkdir /tmp/ssm; \
wget https://s3.amazonaws.com/ec2-downloads-windows/SSMAgent/latest/debian_amd64/amazon-ssm-agent.deb; \
sudo dpkg -i amazon-ssm-agent.deb'
if [ "$?" = "0" ]; then break ;fi
sleep 3
printf "*"
done
echo "*** The SSM Agent has been installed via SSH"
# Update the SIFT Workstation
echo "Updating the SIFT Workstation via Systems Manager"
PARAMETERS='{"commands":["sudo apt -y update && sudo DEBIAN_FRONTEND=noninteractive apt -y upgrade"]}'
COMMENT="Update the SIFT Workstation"
run_ssm_command SIFT wait
# Disable the sansforensics user
echo "Disable the sansforensics user via Systems Manager"
PARAMETERS='{"commands":[
"usermod -L sansforensics",
"echo \"The sansforensics user has been disabled\""
]}'
COMMENT="Disable the sansforensics user"
run_ssm_command SIFT wait
# Install the AWS Command Line Interface
echo "Installing the AWS Command Line Interface"
PARAMETERS='{"commands":["pip install awscli"]}'
COMMENT="Install the AWS Command Line Interface"
run_ssm_command SIFT wait
echo; echo "*** The SIFT Workstation has been updated and is ready for use"
| true |
bbe2938626156d286a311b30044708a7f6b712bc | Shell | nuxeo/integration-scripts | /qa-ovh-maintenance/common/jenkins_workspace_cleanup.sh | UTF-8 | 2,156 | 2.75 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash -xe
#
# (C) Copyright 2018 Nuxeo (http://nuxeo.com/) and others.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Contributors:
# alexis timic
# jcarsique
#
# Cleanup Jenkins hosts: Docker and workspaces
echo "### Prune stopped images"
docker image prune -f > /dev/null
echo "### Prune stopped volumes"
docker volume prune -f > /dev/null
echo "### Delete "exited" containers"
docker ps -a | awk '/Exited.*(hours|days|weeks) ago/ {print $1}' | xargs --no-run-if-empty docker rm -v > /dev/null 2>&1
echo "### Delete T&P jobs older than 3 days"
find /opt/jenkins/workspace/TestAndPush -maxdepth 1 -mindepth 1 -type d -mtime +3 -exec rm -r -- {} +
echo "### Delete Nuxeo server ZIP files and unzipped folders older than 3 days"
find /opt/jenkins/workspace*/ -name 'nuxeo-server-tomcat-*.zip' -o -name 'nuxeo-cap-*.zip' -mtime +3 -exec rm -- {} +
find /opt/jenkins/workspace*/ -path '*/target/tomcat' -type d -prune -mtime +3 -exec rm -r -- {} +
echo "### Remove Git repositories parent folders older than 5 days"
find /opt/jenkins/workspace*/ -maxdepth 2 -type d -execdir test -d {}/.git \; -mtime +5 -prune -print -exec rm -r -- {} +
echo "### Remove Git repositories parent folders older than 2 days and bigger than 100M"
find /opt/jenkins/workspace*/ -maxdepth 2 -type d -execdir test -d {}/.git \; -mtime +2 -prune -print |xargs du -sh |sort -hr|grep -P "^(.*G|\d{3}M)\t" |cut -d$'\t' -f 2-|xargs rm -r --
echo "### Remove files that the Workspace Cleanup plugin has no permission to delete (NXBT-2205, JENKINS-24824)"
find /opt/jenkins/workspace*/ -path '*/*ws-cleanup*' ! -perm -u+w -prune -exec chmod u+w {} + -exec rm -r -- {} +
| true |
967430b4b6113c0c922c64bfa22e284b5c03c6ac | Shell | akka/doc.akka.io | /docs/alpakka-kafka/s.sh | UTF-8 | 430 | 2.84375 | 3 | [] | no_license | #!/bin/bash
f=$1
TMP1=`echo $f|sed 's/^0\.[^\/]*\///'`
TMP=`echo $TMP1|sed "s=\/=\\\\\\/=g"`
echo $TMP
sed -i -e "s/<meta name=\"description\" content='akka-stream-kafka-docs'\/>/<meta name=\"description\" content=\"Alpakka is a Reactive Enterprise Integration library for Java and Scala, based on Reactive Streams and Akka.\"\/><link rel=\"canonical\" href=\"https:\/\/doc.akka.io\/docs\/alpakka-kafka\/current\/$TMP\"\/>/g" $f
| true |
1dd516695d1d97a02a1abdc9d088672ec080eb25 | Shell | kkomissarchik/polyglot | /hello/kotlin/test.sh | UTF-8 | 273 | 3.46875 | 3 | [
"MIT"
] | permissive | #!/bin/sh
set -e
test() {
echo
echo "*************************************"
echo "Testing $1"
echo "*************************************"
echo
cd $1
./test.sh
cd ..
}
for f in *; do
if [ -d ${f} ]; then
test "$f"
fi
done
| true |
fd991a0e6eca47952968613a1cb13f8dacd4adbc | Shell | sialm/dotfiles | /node/install.sh | UTF-8 | 354 | 2.984375 | 3 | [
"MIT"
] | permissive | #!/bin/bash
echo ' installing node'
curl https://raw.githubusercontent.com/creationix/nvm/v0.31.1/install.sh >/dev/null 2>/dev/null | bash >/dev/null 2>/dev/null
export NVM_DIR="$HOME/.nvm"
if [ -d "$NVM_DIR" ]; then
[ -s "$NVM_DIR/nvm.sh" ] && . "$NVM_DIR/nvm.sh" # This loads nvm
fi
nvm install node >/dev/null
nvm alias default node >/dev/null
| true |
b19bdaad5687fe57f0b482eb54691608c8a55b0e | Shell | mmaitenat/ideafix-behind | /colon_liver/intersect_data.sh | UTF-8 | 596 | 3.234375 | 3 | [] | no_license | #!/bin/bash
# Change the following variables according to your system
pair_filename="maxwell_FFPE_FF_tumor_pairs.csv"
DIR="~/data/colon_liver/"
while read pair; do
FFPE=`echo $pair | awk -F, '{print $1}'`
FF=`echo $pair | awk -F, '{print $2}'`
echo "FFPE filename is $FFPE and FF filename is $FF"
bcftools isec -f PASS -p "${DIR}${FFPE}/aligned/" -w1 "${DIR}${FFPE}/aligned/${FFPE}_filtermarks_annotated.vcf.gz" "${DIR}${FF}/aligned/${FF}_filtermarks_annotated.vcf.gz" && mv "${DIR}${FFPE}/aligned/0002.vcf" "${DIR}${FFPE}/aligned/${FFPE}_${FF}_isec.vcf"
done <${pair_filename}
exit 0
| true |
83566f11e5fe0f5bc504b7d6a4d7246c63e1fcf5 | Shell | j0hnds/rpi_projects | /chart_temp_humidity.sh | UTF-8 | 295 | 3.046875 | 3 | [] | no_license | #!/bin/bash
EXE_DIR=$(dirname $0)
# First we need to dump the readings from the database into a file
# that has line numbers
${EXE_DIR}/dump_readings_mongo.rb | nl > /tmp/temp_humidity.dat
# Then we run gnuplot to create the chart
gnuplot ${EXE_DIR}/temp_humidity.gp
# Done. Just that simple | true |
55ee9d3602381e802249fa0eda4781059f8bc9a5 | Shell | devops-recipes/e2eShippableDemo | /infra/awsProdECS/provProdInfra.sh | UTF-8 | 3,605 | 3.359375 | 3 | [] | no_license | #!/bin/bash -e
export ACTION=$1
export CURR_JOB_CONTEXT="infra/awsProdECS"
export STATE_RES="prod_tf_state"
export RES_CONF="prod_vpc_conf"
export RES_AMI="ami_sec_approved"
export OUT_RES_SET="e2eshipdemo-cluster-ecs-prod"
export RES_REPO="auto_repo"
export RES_AWS_CREDS="aws_creds"
export RES_AWS_PEM="aws_pem"
export TF_STATEFILE="terraform.tfstate"
# get the path where gitRepo code is available
export RES_REPO_STATE=$(ship_resource_get_state $RES_REPO)
export RES_REPO_CONTEXT="$RES_REPO_STATE/$CURR_JOB_CONTEXT"
# Now get AWS keys
export AWS_ACCESS_KEY_ID=$(ship_resource_get_integration $RES_AWS_CREDS aws_access_key_id)
export AWS_SECRET_ACCESS_KEY=$(ship_resource_get_integration $RES_AWS_CREDS aws_secret_access_key)
# Now get all VPC settings
export REGION=$(ship_resource_get_param $RES_CONF REGION)
export PROD_VPC_ID=$(ship_resource_get_param $RES_CONF PROD_VPC_ID)
export PROD_PUBLIC_SN_ID=$(ship_resource_get_param $RES_CONF PROD_PUBLIC_SN_ID)
export PROD_PUBLIC_SG_ID=$(ship_resource_get_param $RES_CONF PROD_PUBLIC_SG_ID)
export AMI_ID=$(ship_resource_get_param $RES_AMI AMI_ID)
set_context(){
pushd $RES_REPO_CONTEXT
echo "CURR_JOB_CONTEXT=$CURR_JOB_CONTEXT"
echo "RES_REPO=$RES_REPO"
echo "RES_AWS_CREDS=$RES_AWS_CREDS"
echo "RES_AWS_PEM=$RES_AWS_PEM"
echo "RES_REPO_CONTEXT=$RES_REPO_CONTEXT"
echo "AWS_ACCESS_KEY_ID=${#AWS_ACCESS_KEY_ID}" #print only length not value
echo "AWS_SECRET_ACCESS_KEY=${#AWS_SECRET_ACCESS_KEY}" #print only length not value
# This restores the terraform state file
ship_resource_copy_file_from_state $STATE_RES $TF_STATEFILE .
# now setup the variables based on context
# naming the file terraform.tfvars makes terraform automatically load it
echo "aws_access_key_id = \"$AWS_ACCESS_KEY_ID\"" > terraform.tfvars
echo "aws_secret_access_key = \"$AWS_SECRET_ACCESS_KEY\"" >> terraform.tfvars
echo "region = \"$REGION\"" >> terraform.tfvars
echo "prod_vpc_id = \"$PROD_VPC_ID\"" >> terraform.tfvars
echo "prod_public_sn_id = \"$PROD_PUBLIC_SN_ID\"" >> terraform.tfvars
echo "prod_public_sg_id = \"$PROD_PUBLIC_SG_ID\"" >> terraform.tfvars
echo "ami_id = \"$AMI_ID\"" >> terraform.tfvars
popd
}
destroy_changes() {
pushd $RES_REPO_CONTEXT
echo "---------------- Destroy changes -------------------"
terraform destroy -force
ship_resource_post_state $OUT_RES_SET versionName \
"Version from build $BUILD_NUMBER"
ship_resource_put_state $OUT_RES_SET PROV_STATE "Deleted"
popd
}
apply_changes() {
pushd $RES_REPO_CONTEXT
echo "---------------- Planning changes -------------------"
terraform plan
echo "----------------- Apply changes ------------------"
terraform apply
ship_resource_post_state $OUT_RES_SET versionName \
"Version from build $BUILD_NUMBER"
ship_resource_put_state $OUT_RES_SET PROV_STATE "Active"
ship_resource_put_state $OUT_RES_SET REGION $REGION
ship_resource_put_state $OUT_RES_SET PROD_ECS_INS_0_IP \
$(terraform output prod_ecs_ins_0_ip)
ship_resource_put_state $OUT_RES_SET PROD_ECS_INS_1_IP \
$(terraform output prod_ecs_ins_1_ip)
ship_resource_put_state $OUT_RES_SET PROD_ECS_INS_2_IP \
$(terraform output prod_ecs_ins_2_ip)
ship_resource_put_state $OUT_RES_SET PROD_ECS_CLUSTER_ID \
$(terraform output prod_ecs_cluster_id)
popd
}
main() {
echo "---------------- Testing SSH -------------------"
eval `ssh-agent -s`
ps -eaf | grep ssh
which ssh-agent
set_context
if [ $ACTION = "create" ]; then
apply_changes
fi
if [ $ACTION = "destroy" ]; then
destroy_changes
fi
}
main
| true |
3533036d4dac1b703be7158b276058c220eba1c9 | Shell | BenUtzmich/Nautilus-Caja-Scripte | /FEScriptsNautilus/AUDIOtools/07G_anyfile4Dir_PLAYall-mp3sOfDir-in-Gnome-mplayer_ls-grep-sed-makeTmpPlaylistFile.sh | UTF-8 | 3,948 | 4.1875 | 4 | [] | no_license | #!/bin/sh
##
## Nautilus
## SCRIPT: 07G_anyfile4Dir_PLAYall-mp3sOfDir-in-Gnome-mplayer_ls-grep-sed-makeTmpPlaylistFile.sh
##
## PURPOSE: Plays all the mp3 files in the current directory,
## sequentially using 'gnome-mplayer'.
##
## METHOD: Uses the 'ls' and 'grep' commands to make a '.pls'
## playlist file (in /tmp).
##
## Plays the files by passing the playlist file to 'gnome-mplayer'.
##
## The mp3's are played sequentially, from 'first' to 'last',
## according to the 'ls' sorting algorithm.
##
## HOW TO USE: In Nautilus, navigate to a directory of mp3 files,
## right-click on any file in the directory, then
## choose this Nautilus script to run.
##
###########################################################################
## Created: 2011jun13
## Changed: 2011jul07 Changed '-playlist' to '--playlist'.
## Changed: 2012feb28 Changed the scriptname, in the comment above.
## Added a comment on the sort/play order.
## Changed: 2012feb29 Reorged the 'METHOD' comment section above.
## Changed: 2012oct01 Changed script name from '_Gnome-mplayer' to
## '-in-Gnome-mplayer' --- and added
## '_ls-grep-sed-makeTmpPlaylistFile'.
## Changed: 2013apr10 Added check for the player executable.
###########################################################################
## FOR TESTING: (show statments as they execute)
# set -x
#########################################################
## Check if the player executable exists.
#########################################################
EXE_FULLNAME="/usr/bin/gnome-mplayer"
if test ! -f "$EXE_FULLNAME"
then
zenity --info --title "Player NOT FOUND." \
--no-wrap \
--text "\
The player executable
$EXE_FULLNAME
was not found. Exiting."
exit
fi
#########################################################
## Prepare a 'play list' file to hold the audio filenames.
#########################################################
TEMPFILE="/tmp/${USER}_gnome-mplayer.pls"
rm -f $TEMPFILE
#########################################################
## Generate the 'play list' file.
##
## We use 'ls' with 'grep', but we could use a file loop
## to have greater control on filtering the files.
##
## NOTE: mplayer seems to need the string 'file://'
## prefixed on (fully-qualified) filenames.
#########################################################
# CURDIR="`pwd`"
ls | grep '\.mp3$' | eval "sed 's|^|file://$CURDIR|' > $TEMPFILE"
# /usr/bin/gnome-mplayer --loop 0 --playlist $TEMPFILE
# /usr/bin/gnome-mplayer --showplaylist --playlist $TEMPFILE
$EXE_FULLNAME --showplaylist --playlist $TEMPFILE
## We could add a zenity prompt to 'shuffle' the files.
# /usr/bin/gnome-mplayer -loop 0 --random -playlist $TEMPFILE
exit
###################################
## This exit is to avoid executing
## the following, alternative code.
###################################
#####################################################
## BELOW IS AN ALTERNATE VERSION :
## gnome-mplayer invoked once for each music file.
##
## But it is difficult to break in and cancel
## the loop. Could use a 'kill' command on this
## script or Gnome System Monitor.
#####################################################
FILENAMES=`ls`
###################################
## START THE LOOP on the filenames.
###################################
for FILENAME in $FILENAMES
do
#################################################
## Get and check that file extension is 'mp3'.
## THIS ASSUMES one '.' in filename, at the extension.
#################################################
FILEEXT=`echo "$FILENAME" | cut -d\. -f2`
if test "$FILEEXT" != "mp3"
then
continue
# exit
fi
#############################
## Play the file.
#############################
# /usr/bin/gnome-mplayer "$FILENAME"
$EXE_FULLNAME "$FILENAME"
done
| true |
0e8e9d9dc0ba83deb49a93a7401d47690a7362c4 | Shell | specialworld83/condres-packages | /catalyst-utils/pxp_switch_catalyst | UTF-8 | 5,097 | 3.96875 | 4 | [] | no_license | #!/bin/sh
# Vi0L0: it's my switching script, version 2.1:
# * switching xorg.conf - it will rename xorg.conf into xorg.conf.cat
# (if there's fglrx inside) or xorg.conf.oth (if there's intel or radeon inside).
# And then it will create link to xorg.conf depending on what did you choose
# * running aticonfig --px-Xgpu
# * running switchlibGL
# * adding/removing fglrx into/from /etc/modules-load.d/catalyst.conf
#
# You can do with it whatever you want :P
# NO WARRANTY OF ANY KIND
# Set local language always to C
export LC_ALL=C
export LANG=C
if [ "$(whoami)" != "root" ]; then
echo "Must be root to run this script." 1>&2
exit 1
fi
if [ $# -ge 2 ]; then
echo "Usage: $(basename $0) <amd|intel|query>" 1>&2
echo "Please choose one parameter" 1>&2
exit 1
fi
function check_xorg_conf() {
if [ ! -e /etc/X11/xorg.conf.cat ];then
if [ -e /etc/X11/xorg.conf ] && [[ `cat /etc/X11/xorg.conf | grep -c fglrx` != 0 ]]; then
mv /etc/X11/xorg.conf /etc/X11/xorg.conf.cat
echo "/etc/X11/xorg.conf renamed to /etc/X11/xorg.conf.cat"
fi
fi
if [ ! -e /etc/X11/xorg.conf.oth ]; then
if [ -e /etc/X11/xorg.conf ];then
if [[ `cat /etc/X11/xorg.conf | grep -c radeon` != 0 ]] || [[ `cat /etc/X11/xorg.conf | grep -c intel` != 0 ]]; then
mv /etc/X11/xorg.conf /etc/X11/xorg.conf.oth
echo "/etc/X11/xorg.conf renamed to /etc/X11/xorg.conf.oth (radeon or intel inside)"
fi
fi
fi
if [ -e /etc/X11/xorg.conf ] && [[ `cat /etc/X11/xorg.conf | grep -c fglrx` = 0 ]] && [[ `cat /etc/X11/xorg.conf | grep -c radeon` = 0 ]] && [[ `cat /etc/X11/xorg.conf | grep -c intel` = 0 ]]; then
mv /etc/X11/xorg.conf /etc/X11/xorg.conf.wth
echo "/etc/X11/xorg.conf have no fglrx or radeon or intel inside!"
echo "/etc/X11/xorg.conf renamed to /etc/X11/xorg.conf.wth"
fi
}
function switch_to_amd() {
#switching xorg.conf
echo -e '\E[37;44m'"\033[1mSwitching xorg.conf ...\033[0m"
check_xorg_conf
if [ -e /etc/X11/xorg.conf.cat ]; then
ln -snf /etc/X11/xorg.conf.cat /etc/X11/xorg.conf
echo "/etc/X11/xorg.conf.cat linked to /etc/X11/xorg.conf"
fi
#linking!
echo -e '\E[37;44m'"\033[1mRunning aticonfig --px-dgpu ...\033[0m"
aticonfig --px-dgpu
echo -e '\E[37;44m'"\033[1mRunning /usr/lib/fglrx/switchlibGL amd ...\033[0m"
/usr/lib/fglrx/switchlibGL amd
echo -e '\E[37;44m'"\033[1mRunning /usr/lib/fglrx/switchlibglx amd ...\033[0m"
/usr/lib/fglrx/switchlibglx amd
#checking MODULES for fglrx
echo -e '\E[37;44m'"\033[1mChecking /etc/modules-load.d/catalyst.conf for fglrx ...\033[0m"
check1=$(grep fglrx /etc/modules-load.d/catalyst.conf)
if [ "$check1" != "fglrx" ]; then
echo fglrx > /etc/modules-load.d/catalyst.conf
echo "fglrx was added into /etc/modules-load.d/catalyst.conf"
fi
}
function switch_to_intel() {
#switching xorg.conf part 1
echo -e '\E[37;44m'"\033[1mSwitching xorg.conf (part 1) ...\033[0m"
check_xorg_conf
if [ -e /etc/X11/xorg.conf.cat ]; then
ln -snf /etc/X11/xorg.conf.cat /etc/X11/xorg.conf
echo "/etc/X11/xorg.conf.cat linked to /etc/X11/xorg.conf"
fi
#linking!
echo -e '\E[37;44m'"\033[1mRunning aticonfig --px-igpu ...\033[0m"
aticonfig --px-igpu
echo -e '\E[37;44m'"\033[1mRunning /usr/lib/fglrx/switchlibGL intel ...\033[0m"
/usr/lib/fglrx/switchlibGL intel
echo -e '\E[37;44m'"\033[1mRunning /usr/lib/fglrx/switchlibglx intel ...\033[0m"
/usr/lib/fglrx/switchlibglx intel
#switching xorg.conf part 2
echo -e '\E[37;44m'"\033[1mSwitching xorg.conf (part 2) ...\033[0m"
if [ -e /etc/X11/xorg.conf.oth ]; then
ln -snf /etc/X11/xorg.conf.oth /etc/X11/xorg.conf
echo "/etc/X11/xorg.conf.oth linked to /etc/X11/xorg.conf"
else
rm /etc/X11/xorg.conf
echo "/etc/X11/xorg.conf was removed"
fi
#checking MODULES for fglrx
echo -e '\E[37;44m'"\033[1mChecking /etc/modules-load.d/catalyst.conf for fglrx ...\033[0m"
check=$(grep fglrx /etc/modules-load.d/catalyst.conf)
if [[ "$check" != "" ]]; then
sed 's/fglrx//' -i /etc/modules-load.d/catalyst.conf
echo "fglrx was removed from /etc/modules-load.d/catalyst.conf"
fi
}
function get_current_driver() {
LIB_LINK="`readlink /usr/lib/libGL.so 2>/dev/null`"
if [[ "${LIB_LINK}" = "/usr/lib/fglrx/fglrx-libGL.so.1.2" || "${LIB_LINK}" = "fglrx/fglrx-libGL.so.1.2" ]]; then
echo "amd (catalyst)"
elif [[ "${LIB_LINK}" = "/usr/lib/mesa/libGL.so.1" || "${LIB_LINK}" = "/usr/lib/mesa/libGL.so.1.2.0" || "${LIB_LINK}" = "/usr/lib/mesa/libGL.so" ]]; then
echo "intel (or oss radeon)"
else
echo "unknown"
fi
}
case "$1" in
amd)
switch_to_amd
echo -e '\E[37;44m'"\033[1mSwitched to Catalyst\033[0m" 1>&2
;;
intel)
switch_to_intel
echo -e '\E[37;44m'"\033[1mSwitched to Intel (or Radeon)\033[0m" 1>&2
;;
query)
get_current_driver
;;
*)
echo "Usage: $(basename $0) <amd|intel|query>" 1>&2
exit 1
;;
esac
exit 0
| true |
a60bf0eb9e33ce7218742bb7f79943b2f1ac60bd | Shell | BCCVL/bccvldev | /bin/buildout.sh | UTF-8 | 338 | 2.546875 | 3 | [] | no_license | #!/bin/sh
# TONES:
# after changes to buildout rerun docker-compose build bccvl
# run this script once to create source clones in files/src -> TODO: maybe use another folder?
# and generate .egg-info folders
if [ -e './.env' ] ; then
. './.env'
fi
docker-compose run --rm -e "ADMIN_PASS=${C9_PASS:-admin}" bccvl buildout
| true |
313db6c9069569625b7d53dc7ad29bc9d6de4b63 | Shell | scm-manager/svn-server-spec | /issues/422.bats | UTF-8 | 293 | 2.546875 | 3 | [] | no_license | #!/usr/bin/env bats
load ../setup
load ../functions
# https://bitbucket.org/sdorra/scm-manager/issues/422/svnkit-cannot-handle-copy-request
@test "#422 create branch test-1" {
cd "${WORKDIR}/trunk"
add_small_files
create_branch "test-1"
update
[ -d "${WORKDIR}/branches/test-1" ]
} | true |
e684a28a1bf374a1aa9ebb7582475e6379278c62 | Shell | sontekliu/redis-book | /deploy.sh | UTF-8 | 539 | 2.640625 | 3 | [] | no_license | #!/bin/sh
echo "=================start build==================="
gitbook build >> /dev/null
echo "=================clean files==================="
git checkout gh-pages
git rm --cached -r . >> /dev/null
git clean -df >> /dev/null
echo "*~" > .gitignore
echo "_book" >> .gitignore
echo "================copy static file and commit ==============="
cp -r _book/* .
rm -rf deploy.sh
git add .
git commit -m "publish book"
git push origin gh-pages:gh-pages
git checkout master
echo "=================success!!!====================="
| true |
9be60d9a92f31d3dfbdbefccbd6fd1db41ff2b46 | Shell | ONNC/onnc-umbrella | /docker/in-container/run-nvdla-quick-regression.sh | UTF-8 | 327 | 3.1875 | 3 | [
"BSD-3-Clause"
] | permissive | #!/usr/bin/env bash
set -ex
MODEL_ZOO=/models
LOADABLE_DIR=/onnc/loadables
MODELS=($(ls -1 $LOADABLE_DIR))
ONNC=onnc
# check output NVDLA lodable content is consistent
for model in "${MODELS[@]}"
do
$ONNC -mquadruple nvdla $MODEL_ZOO/$model/model.onnx
diff out.nvdla $LOADABLE_DIR/$model/$model.nvdla
done
echo "DONE"
| true |
50a2c48de6d6aa587faf4b2b836a77605dbf68df | Shell | rezajam/Chess-Solitaire | /chess_solitaire_undo/EIFGENs/chess_solitaire_undo/W_code/C19/Makefile.SH | UTF-8 | 2,598 | 2.703125 | 3 | [] | no_license | case $CONFIG in
'')
if test ! -f ../config.sh; then
(echo "Can't find ../config.sh."; exit 1)
fi 2>/dev/null
. ../config.sh
;;
esac
case "$O" in
*/*) cd `expr X$0 : 'X\(.*\)/'` ;;
esac
echo "Compiling C code in C19"
$spitshell >Makefile <<!GROK!THIS!
INCLUDE_PATH = -I"\$(ISE_LIBRARY)/library/encoding/implementation/include" -I"\$(ISE_LIBRARY)/library/time/spec/include" -I"\$(ISE_LIBRARY)/library/vision2/spec/include" -I"\$(ISE_LIBRARY)/library/vision2/implementation/gtk/Clib" `$ISE_LIBRARY/library/encoding/implementation/unix/Clib/iconv-config --include_path` `$ISE_LIBRARY/library/vision2/implementation/gtk/Clib/vision2-gtk-config --include_path`
SHELL = /bin/sh
CC = $cc
CPP = $cpp
CFLAGS = $wkoptimize $mtccflags $large -DWORKBENCH -I"$rt_include" -I. \$(INCLUDE_PATH)
CPPFLAGS = $wkoptimize $mtcppflags $large -DWORKBENCH -I"$rt_include" -I. \$(INCLUDE_PATH)
LDFLAGS = $ldflags
CCLDFLAGS = $ccldflags $windows_flags
LDSHAREDFLAGS = $mtldsharedflags
EIFLIB = "$rt_lib/$prefix$mt_prefix$wkeiflib$suffix"
EIFTEMPLATES = $rt_templates
LIBS = $mtlibs
MAKE = $make
AR = $ar
LD = $ld
MKDEP = $mkdep \$(DPFLAGS) --
MV = $mv
CP = $cp
RANLIB = $ranlib
RM = $rm -f
FILE_EXIST = $file_exist
RMDIR = $rmdir
X2C = "$x2c"
SHAREDLINK = $sharedlink
SHAREDLIBS = $sharedlibs
SHARED_SUFFIX = $shared_suffix
COMMAND_MAKEFILE =
START_TEST = $start_test
END_TEST = $end_test
CREATE_TEST = $create_test
SYSTEM_IN_DYNAMIC_LIB = chess_solitaire_undo$shared_suffix
!GROK!THIS!
$spitshell >>Makefile <<'!NO!SUBS!'
.SUFFIXES:.cpp .o
.c.o:
$(CC) $(CFLAGS) -c $<
.cpp.o:
$(CPP) $(CPPFLAGS) -c $<
OBJECTS = big_file_C19_c.o
OLDOBJECTS = ti1568.o ti1568d.o da1566.o da1566d.o et1582.o et1582d.o et1577.o \
et1577d.o ti1567.o ti1567d.o da1571.o da1571d.o da1565.o da1565d.o \
kl1555.o kl1555d.o yy1576.o yy1576d.o et1575.o et1575d.o da1570.o \
da1570d.o da1569.o da1569d.o kl1554.o kl1554d.o yy1573.o yy1573d.o \
et1574.o et1574d.o ev1583.o ev1583d.o kl1556.o kl1556d.o ev1585.o \
ev1585d.o ev1559.o ev1559d.o kl1564.o kl1564d.o kl1562.o kl1562d.o \
kl1563.o kl1563d.o kl1561.o kl1561d.o uc1580.o uc1580d.o ev1584.o \
ev1584d.o ki1578.o ki1578d.o kl1579.o kl1579d.o kl1558.o kl1558d.o \
kl1553.o kl1553d.o kl1557.o kl1557d.o ki1560.o ki1560d.o uc1581.o \
uc1581d.o uc1572.o uc1572d.o
all: Cobj19.o
Cobj19.o: $(OBJECTS) Makefile
$(LD) $(LDFLAGS) -r -o Cobj19.o $(OBJECTS)
$(RM) $(OBJECTS)
$(CREATE_TEST)
clean: local_clean
clobber: local_clobber
local_clean::
$(RM) core finished *.o
local_clobber:: local_clean
$(RM) Makefile
!NO!SUBS!
chmod 644 Makefile
$eunicefix Makefile
| true |
4f1b48c4d5de04efc416056cce8059614724eeb1 | Shell | harmishhk/packer_vms | /scripts/ros.sh | UTF-8 | 2,719 | 3.265625 | 3 | [] | no_license | #!/bin/bash -eux
LOGFILE=/tmp/commands.txt
touch $LOGFILE
if [[ ! "$ROS" =~ ^(true|yes|on|1|TRUE|YES|ON)$ ]]; then
echo "==> ros installation is disabled" 2>&1 | tee -a $LOGFILE
exit
fi
echo "==> installing ros $ROS_VERSION" 2>&1 | tee -a $LOGFILE
# setup source-list and keys
sudo sh -c "echo 'deb http://packages.ros.org/ros/ubuntu $(lsb_release -sc) main' > /etc/apt/sources.list.d/ros-latest.list"
sudo apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-key 0xB01FA116
sudo apt-get update --fix-missing
# install ros-indigo-base
sudo apt-get -y install \
ros-$ROS_VERSION-desktop \
ros-$ROS_VERSION-perception \
ros-$ROS_VERSION-navigation \
ros-$ROS_VERSION-joy \
ros-$ROS_VERSION-teleop-twist-joy \
python-catkin-tools
## setup rosdep
sudo rosdep init
rosdep update
if [[ ! "$SPENCER" =~ ^(true|yes|on|1|TRUE|YES|ON)$ ]]; then
echo "==> ros-spencer installation is disabled" 2>&1 | tee -a $LOGFILE
else
echo "==> installing spencer related packages for ros $ROS_VERSION" 2>&1 | tee -a $LOGFILE
sudo apt-get -y install \
libmrpt-dev \
mrpt-apps \
freeglut3-dev \
libsvm-dev \
libsdl-image1.2-dev \
libpcap-dev \
libgsl0-dev \
ros-$ROS_VERSION-bfl \
ros-$ROS_VERSION-control-toolbox \
ros-$ROS_VERSION-driver-base \
ros-$ROS_VERSION-sound-play \
ros-$ROS_VERSION-joy \
ros-$ROS_VERSION-yocs-cmd-vel-mux
fi
if [[ ! "$GAZEBO" =~ ^(true|yes|on|1|TRUE|YES|ON)$ ]]; then
echo "==> ros-gazebo installation is disabled" 2>&1 | tee -a $LOGFILE
else
echo "==> installing gazebo and related packages for ros $ROS_VERSION" 2>&1 | tee -a $LOGFILE
# install dependencies
sudo apt-get -y install wget
# setup source-list and keys
sudo sh -c "echo 'deb http://packages.osrfoundation.org/gazebo/ubuntu-stable $(lsb_release -cs) main' > /etc/apt/sources.list.d/gazebo-stable.list"
wget http://packages.osrfoundation.org/gazebo.key -O /tmp/gazebo.key
sudo apt-key add /tmp/gazebo.key
sudo apt-get update
# install gazebo
sudo apt-get -y install \
ros-$ROS_VERSION-gazebo7-ros-pkgs \
ros-$ROS_VERSION-gazebo7-ros-control
# install pr2 simulator dependencies
sudo apt-get -y install \
ros-$ROS_VERSION-pr2-controller-manager \
ros-$ROS_VERSION-pr2-msgs \
ros-$ROS_VERSION-pr2-dashboard-aggregator \
ros-$ROS_VERSION-pr2-controllers
export TERM=xterm
source /opt/ros/$ROS_VERSION/setup.bash
git clone -b $ROS_VERSION-devel https://github.com/harmishhk/pr2_simulator.git /tmp/pr2_ws/src/pr2_simulator
catkin config --workspace /tmp/pr2_ws -i /opt/ros/$ROS_VERSION --install
sudo catkin build --workspace /tmp/pr2_ws
fi
| true |
e4f076fce3038e06d559844930c9ebf84a6f9aae | Shell | noaaport/nbsp | /tclhttpd/src/tclhttpd3.5.1/bin/test/badpost.tcl | UTF-8 | 647 | 2.953125 | 3 | [
"TCL",
"ISC"
] | permissive | #!/bin/sh
# \
exec tclsh "$0"
package require http 2.0
set url [lindex $argv 0]
if {[string length $url] == 0} {
set url http://localhost:8015/debug/echo
}
regexp {(http://)?([^:/]+)(:([0-9]+))?(/.*)$} $url x j1 server j2 port url
if {$port == ""} {
set port 80
}
set body "hello\n"
while {[string length $body] < 17000} {
set body $body$body
}
set f [socket $server $port]
puts $f "POST $url HTTP/1.0"
puts $f "Content-length: [string length $body]"
puts $f ""
flush $f
after 200
puts $f [string range $body 0 100]
flush $f
puts stderr "Pausinng 5 minutes"
after [expr 1000 * 300]
puts $f [string range $body 101 end]
flush $f
| true |
5e61711189f33e7aaeea232d687c198fedf4ed24 | Shell | saj1th/bdlabs-ops | /utils/mesos/forecaster.run.sh | UTF-8 | 865 | 2.84375 | 3 | [] | no_license | SPARK_VERSION='spark-1.2.0'
HADOOP_VERSION='hadoop2.4'
SPARK_BINARY="${SPARK_VERSION}-bin-${HADOOP_VERSION}"
SPARK_TAR="${SPARK_BINARY}.tgz"
FORECASTER_VERSION='0.2'
FORECASTER_JAR="forecaster-${FORECASTER_VERSION}.jar"
RUN_DIR='/home/jclouds/forecaster/bin'
[ -d $RUN_DIR ] || mkdir -p $RUN_DIR
cd $RUN_DIR
hdfs dfs -copyToLocal /opt/"$SPARK_TAR"
hdfs dfs -copyToLocal /projects/forecaster/bin/$FORECASTER_JAR
tar zxvf ${SPARK_TAR}
rm -f ${SPARK_TAR}
$SPARK_BINARY/bin/spark-submit \
--class org.bitbucket.saj1th.forecaster.Forecast \
--master mesos://x.x.x.x:5050 \
$RUN_DIR/$FORECASTER_JAR \
--master mesos://x.x.x.x:5050 \
--sparkexecutor hdfs://x.x.x.x/opt/$SPARK_TAR \
--cassandrahost x.x.x.x \
--modelspath hdfs://x.x.x.x:8020/projects/forecaster/models \
hdfs://x.x.x.x:8020/projects/forecaster/data/traindata.csv
rm -rf $RUN_DIR | true |
cb4ba7a387e4a1e5e018db0ce6b62cbd507b41a8 | Shell | nitlang/nitutorial | /extract.sh | UTF-8 | 700 | 3.5 | 4 | [] | no_license |
for i in "$@"; do
b=`basename "$i" .md`
tmpl="tests/$b.nit"
res="tests/$b.res"
# Magic to extract the template
perl -ne 'print if(/~~~nit/ ... /~~~/);' "$i" | tac | sed -ne '1,/~~~/{/~~~/!p}' | tac > "$tmpl"
# Magic to extract the result
perl -ne 'print if(/~~~/ ... /~~~/);' "$i" | tac | sed -ne '1,/~~~/{/~~~/!p}' | tac > "$res"
if ! grep -q '^\s*# CODE HERE\s*$' "$tmpl"; then
if ! grep -q '^\s*# CHANGE BELOW\s*$' "$tmpl"; then
echo "$i: no CODE HERE nor CHANGE BELOW"
rm "$tmpl" "$res"
continue
fi
if ! grep -q '^\s*# CHANGE ABOVE\s*$' "$tmpl"; then
echo "$i: no CHANGE ABOVE"
rm "$tmpl" "$res"
continue
fi
fi
echo "$i `wc -l "$tmpl"` `wc -l "$res"`"
done
| true |
717ed57860dffc82411f784aa192d84c2ed3d5dd | Shell | shiyun/react-redux | /build.sh | UTF-8 | 481 | 2.578125 | 3 | [] | no_license | #!/bin/sh
filename='fabaoM-'
version='1.0.1'
day=`date +%Y%m%d%H`
MY_BUILD_NUMBER=`echo 0000$BUILD_NUMBER |sed 's/.*\([0-9]\{4\}\)$/\1/'`
filename=$filename$version'-'$day'-'$MY_BUILD_NUMBER
echo 'file name:'$filename
tar czvf build/$filename.tgz run.sh package.json app.js config.alpha.js serverPrivateKey_Alpha.pem serverPrivateKey_beta.pem serverPrivateKey_release.pem config.beta.js config.release.js adapter/ bin/ node_modules/ public/ routes/ util/ views/
| true |
aafaff92f48b8a78fc0375c6f240d2bb9cda9de6 | Shell | larsbrinkhoff/tots-tools | /scripts/extract.sh | UTF-8 | 273 | 3.09375 | 3 | [] | no_license | switches="$1"
tape="$2"
tid=`basename $2`
rm -rf extract/"$tid"
mkdir extract/"$tid"
if test -L "$tape.tap"; then
tape=`readlink -f "$tape.tap"`.bz2
else
tape="$tape.tap.bz2"
fi
echo TAPE: $tape
bzcat $tape > TMP
cd extract/"$tid"
itstar "$switches" ../../TMP
| true |
c4de7b6d56475381b103cf9685bef99e6c6ee717 | Shell | vinohs/chef-local | /ansible-setup/install-ansible-macOSx.sh | UTF-8 | 830 | 3.84375 | 4 | [] | no_license | #!/usr/bin/env bash
#Please install xcode on your system before running this.
# Bash "strict" mode
set -euo pipefail
IFS=$'\n\t'
# Install the Command Line Tools
set +e
xcode-select -p
RETVAL=$?
set -e
if [[ "$RETVAL" -ne "0" ]]; then
echo "Installing XCode Command Line Tools"
xcode-select --install
read -p "Continue? [Enter]"
fi
# Install brew
if [[ ! -x "/usr/local/bin/brew" ]]; then
/usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
fi
# Install Python 3
if [[ ! -d "/usr/local/Cellar/python3" ]]; then
echo "Installing python 3"
brew install python3
fi
/usr/local/bin/pip3 install -U pip setuptools wheel
# Install Ansible
if [[ ! -x "/usr/local/bin/ansible" ]]; then
echo "Installing ansible"
/usr/local/bin/pip3 install ansible
fi
| true |
15ee7d2c89e1d83320b7ead3f27d1b3b75550103 | Shell | zemiak/quarkus-jvm-s2i | /s2i/bin/assemble | UTF-8 | 515 | 3.453125 | 3 | [
"Unlicense"
] | permissive | #!/bin/bash -e
# If the 'quarkus-s2i' assemble script is executed with the '-h' flag, print the usage.
if [[ "$1" == "-h" ]]; then
exec /usr/libexec/s2i/usage
fi
if [ "$(ls /tmp/artifacts/ 2>/dev/null)" ]; then
echo "---> Restoring build artifacts..."
rm -rf .m2
mkdir .m2
mv /tmp/artifacts/* .m2/
else
echo "---> NO build artifacts..."
fi
echo "---> Installing application source..."
cp -Rf /tmp/src/. ./
echo "---> Building application from source..."
mvn package -DskipTests || exit 10
| true |
2697bfe5b070b382ad663494b96854a32ec684ea | Shell | ekacnet/junkcode | /ramdev-init.sh | UTF-8 | 461 | 3.0625 | 3 | [] | no_license | #!/bin/bash
cd /usr/local/srcmirror
mount /ram1 2>/dev/null
dd if=/dev/zero of=/ram1/disk bs=4M count=10000 >/dev/null 2>&1
lo=`losetup -v -f /ram1/disk | sed s@.*/dev/@@`
mkfs.ext4 /dev/$lo >/dev/null 2>&1
mount -o acl /dev/$lo /usr/local/src
nb=`mount | grep /usr/local/src |wc -l`
if [ $nb -eq 1 ];then
rm lock 2>/dev/null
touch /usr/local/src/lock
chmod o-t /usr/local/src
chmod o-x /usr/local/src
cp -a . /usr/local/src
rm /usr/local/src/lock
fi
| true |
5e996cbd09cc7f8bf5d7d5789cd23e9f1da23188 | Shell | CLOVIS-AI/Dotfiles | /scripts/tasker | UTF-8 | 505 | 3.296875 | 3 | [] | no_license | #!/bin/bash
. cl-depend announce "tasker"
if [[ $1 == "--dir" ]]; then
options+=" --workdir $2 "
shift
shift
fi
echo " → Starting '$*' with options '$options'"
command=". ~/config/selector.sh"
command+=" && announce Welcome to Tasker"
command+=" && echo Running: $* && echo"
command+=" && $*"
command+=" && exit"
command+=" || echo -e 'Something went wrong, press enter to close.' && read"
# shellcheck disable=SC2086
konsole $options --new-tab -e "$(command -v bash)" -c "$command" 2>/dev/null &
| true |
770f576cddede71ae3fec7cfefb213082437e53c | Shell | mangOH/GroveIoTCard_Test | /GroveCard.sh | UTF-8 | 8,709 | 3.78125 | 4 | [] | no_license | #!/bin/bash
# Disable colors if stdout is not a tty
if [ -t 1 ]; then
COLOR_TITLE="\\033[1;94m"
COLOR_ERROR="\\033[0;31m"
COLOR_WARN="\\033[0;93m"
COLOR_PASS="\\033[0;32m"
COLOR_RESET="\\033[0m"
else
COLOR_TITLE=""
COLOR_ERROR=""
COLOR_WARN=""
COLOR_PASS=""
COLOR_RESET=""
fi
# testing variables
TEST_RESULT="p"
# Configuration loading
source ./configuration.cfg
# Libraries poll
source ./lib/common.sh
target_setup() {
prompt_char "
Connect the following to the Grove IoT card:\n
A0: light sensor\n
D2: red led\n
D3: green led\n
D4: blue led\n
D5: white led\n
UART: fingerprint sensor\n
I2C: led matrix\n
\n
Press ENTER to continue..."
WaitForDevice "Up" "$rbTimer"
# create test folder
echo -e "${COLOR_TITLE}Creating testing folder${COLOR_RESET}"
SshToTarget "mkdir -p /tmp/iot_grove_card/apps"
# default state
target_default_state
return 0
}
#=== FUNCTION ==================================================================
#
# NAME: prompt_char
# DESCRIPTION: Request user to input a character for prompt
# PARAMETER 1: prompt message
#
# RETURNS: user inputed value
#
#===============================================================================
prompt_char() {
echo -e $1 >&2
read prompt_input
echo $(echo $prompt_input | tr 'a-z' 'A-Z')
}
target_default_state() {
# GPIO
SshToTarget "/legato/systems/current/bin/app runProc GroveGPIO --exe=GroveGPIO -- -p 0 output low"
SshToTarget "/legato/systems/current/bin/app runProc GroveGPIO --exe=GroveGPIO -- -p 1 output low"
SshToTarget "/legato/systems/current/bin/app runProc GroveGPIO --exe=GroveGPIO -- -p 2 output low"
SshToTarget "/legato/systems/current/bin/app runProc GroveGPIO --exe=GroveGPIO -- -p 3 output low"
# Led Matrix
SshToTarget "/legato/systems/current/bin/app runProc LedMatrix --exe=LedMatrix -- red string \" \""
# FingerPrint
# Do nothing
return 0
}
#=== FUNCTION ==================================================================
#
# NAME: numCompare
# DESCRIPTION: Compare two number
# PARAMETER 1: number1
# PARAMETER 2: number2
#
# RETURNS: 0 number1 is less than number2 + 100
# 1 number1 is greater than number2 + 100
#
#===============================================================================
numCompare() {
local res=$(awk -v n1="$1" -v n2="$2" -v res="0" 'BEGIN {print (n1>n2+100?"1":"0") }')
return $res
}
magic_P() {
while true
do
lightSensor=$(SshToTarget "/legato/systems/current/bin/app runProc lightSensor --exe=lightSensor --")
# echo "$lightSensor"
# color=""
if [[ $lightSensor -gt 1700 ]]
then
color="red"
else
if [[ $lightSensor -gt 1300 ]]
then
color="green"
else
if [[ $lightSensor -gt 900 ]]
then
color="blue"
else
if [[ $lightSensor -gt 500 ]]
then
color="pink"
else
color="white"
fi
fi
fi
fi
SshToTarget "/legato/systems/current/bin/app runProc LedMatrix --exe=LedMatrix -- $color string P"
done
}
target_start_test() {
# GPIO D2
SshToTarget "/legato/systems/current/bin/app runProc GroveGPIO --exe=GroveGPIO -- -p 0 output high"
local resp=""
while [ "$resp" != "Y" ] && [ "$resp" != "N" ]
do
local resp=$(prompt_char "Do you see LED on IO D2 is turned ON? (Y/N)")
done
if [ "$resp" = "N" ]
then
echo -e "${COLOR_ERROR}Failed to turn on LED on D2. GPIO check is failed.${COLOR_RESET}"
return 1
else
SshToTarget "/legato/systems/current/bin/app runProc GroveGPIO --exe=GroveGPIO -- -p 0 output low"
local resp=""
while [ "$resp" != "Y" ] && [ "$resp" != "N" ]
do
local resp=$(prompt_char "Do you see LED on IO D2 is turned OFF? (Y/N)")
done
if [ "$resp" = "N" ]
then
echo -e "${COLOR_ERROR}Failed to turn off LED on D2. GPIO check is failed.${COLOR_RESET}"
return 1
else
echo -e "${COLOR_PASS}GPIO D2 check is passed.${COLOR_RESET}"
fi
fi
# GPIO D3
SshToTarget "/legato/systems/current/bin/app runProc GroveGPIO --exe=GroveGPIO -- -p 1 output high"
local resp=""
while [ "$resp" != "Y" ] && [ "$resp" != "N" ]
do
local resp=$(prompt_char "Do you see LED on IO D3 is turned ON? (Y/N)")
done
if [ "$resp" = "N" ]
then
echo -e "${COLOR_ERROR}Failed to turn on LED on D3. GPIO check is failed.${COLOR_RESET}"
return 1
else
SshToTarget "/legato/systems/current/bin/app runProc GroveGPIO --exe=GroveGPIO -- -p 1 output low"
local resp=""
while [ "$resp" != "Y" ] && [ "$resp" != "N" ]
do
local resp=$(prompt_char "Do you see LED on IO D3 is turned OFF? (Y/N)")
done
if [ "$resp" = "N" ]
then
echo -e "${COLOR_ERROR}Failed to turn off LED on D3. GPIO check is failed.${COLOR_RESET}"
return 1
else
echo -e "${COLOR_PASS}GPIO D3 check is passed.${COLOR_RESET}"
fi
fi
# GPIO D4
SshToTarget "/legato/systems/current/bin/app runProc GroveGPIO --exe=GroveGPIO -- -p 2 output high"
local resp=""
while [ "$resp" != "Y" ] && [ "$resp" != "N" ]
do
local resp=$(prompt_char "Do you see LED on IO D4 is turned ON? (Y/N)")
done
if [ "$resp" = "N" ]
then
echo -e "${COLOR_ERROR}Failed to turn on LED on D4. GPIO check is failed.${COLOR_RESET}"
return 1
else
SshToTarget "/legato/systems/current/bin/app runProc GroveGPIO --exe=GroveGPIO -- -p 2 output low"
local resp=""
while [ "$resp" != "Y" ] && [ "$resp" != "N" ]
do
local resp=$(prompt_char "Do you see LED on IO D4 is turned OFF? (Y/N)")
done
if [ "$resp" = "N" ]
then
echo -e "${COLOR_ERROR}Failed to turn off LED on D4. GPIO check is failed.${COLOR_RESET}"
return 1
else
echo -e "${COLOR_PASS}GPIO D4 check is passed.${COLOR_RESET}"
fi
fi
# GPIO D5
SshToTarget "/legato/systems/current/bin/app runProc GroveGPIO --exe=GroveGPIO -- -p 3 output high"
local resp=""
while [ "$resp" != "Y" ] && [ "$resp" != "N" ]
do
local resp=$(prompt_char "Do you see LED on IO D5 is turned ON? (Y/N)")
done
if [ "$resp" = "N" ]
then
echo -e "${COLOR_ERROR}Failed to turn on LED on D5. GPIO check is failed.${COLOR_RESET}"
return 1
else
SshToTarget "/legato/systems/current/bin/app runProc GroveGPIO --exe=GroveGPIO -- -p 3 output low"
local resp=""
while [ "$resp" != "Y" ] && [ "$resp" != "N" ]
do
local resp=$(prompt_char "Do you see LED on IO D5 is turned OFF? (Y/N)")
done
if [ "$resp" = "N" ]
then
echo -e "${COLOR_ERROR}Failed to turn off LED on D5. GPIO check is failed.${COLOR_RESET}"
return 1
else
echo -e "${COLOR_PASS}GPIO D5 check is passed.${COLOR_RESET}"
fi
fi
# RGB LED matrix (I2C)
SshToTarget "/legato/systems/current/bin/app runProc LedMatrix --exe=LedMatrix -- green string P"
local resp=""
while [ "$resp" != "Y" ] && [ "$resp" != "N" ]
do
local resp=$(prompt_char "Do you see leter 'P' displayed on LED matrix? (Y/N)")
done
if [ "$resp" = "N" ]
then
echo -e "${COLOR_ERROR}Failed to display string on LED matrix. I2C check is failed.${COLOR_RESET}"
return 1
else
echo -e "${COLOR_PASS}I2C check is passed.${COLOR_RESET}"
fi
# FingerPrint (UART)
prompt_char "Put your finger to FingerPrint sensor then press ENTER to continue..."
local finger_detect=$(SshToTarget "/legato/systems/current/bin/app runProc FingerPrint --exe=FingerPrint --")
echo "$finger_detect"
echo "$finger_detect" | grep "Image taken"
if [ $? = 0 ]
then
echo -e "${COLOR_PASS}UART check is passed.${COLOR_RESET}"
else
echo -e "${COLOR_ERROR}Failed to take image of your finger. UART check is failed.${COLOR_RESET}"
return 1
fi
# lightSensor (ADC)
prompt_char "Please cover light sensor then press ENTER to continue..."
cover_value=$(SshToTarget "/legato/systems/current/bin/app runProc lightSensor --exe=lightSensor --")
echo "Light Sensor Value: '$cover_value'" >&2
prompt_char "Please uncover light sensor then press ENTER to continue..."
uncover_value=$(SshToTarget "/legato/systems/current/bin/app runProc lightSensor --exe=lightSensor --")
echo "Light Sensor Value: '$uncover_value'" >&2
numCompare $cover_value $uncover_value
if [ $? = 0 ]
then
echo "Light sensor value when uncover greater than light sensor value when cover" >&2
SshToTarget "/legato/systems/current/bin/app runProc LedMatrix --exe=LedMatrix -- green string P"
else
echo "Light sensor value when uncover is not greater than light sensor value when cover" >&2
SshToTarget "/legato/systems/current/bin/app runProc LedMatrix --exe=LedMatrix -- red string F"
return 1
fi
return 0
}
# main
if ! target_setup
then
TEST_RESULT="f"
echo -e "${COLOR_ERROR}Failed to setup target${COLOR_RESET}"
fi
if ! target_start_test
then
TEST_RESULT="f"
echo -e "${COLOR_ERROR}Test is failed${COLOR_RESET}"
fi
echo -e "${COLOR_TITLE}Test is finished${COLOR_RESET}"
EchoPassOrFail $TEST_RESULT
| true |
f5b7e2d9c2386ad94ad031640b2d5d926d7df8a9 | Shell | lavabit/robox | /scripts/rhel6/updatedb.sh | UTF-8 | 632 | 3.140625 | 3 | [] | no_license | #!/bin/bash -eux
# Check whether the install media is mounted, and if necessary mount it.
if [ ! -f /media/media.repo ]; then
mount /dev/cdrom /media; error
fi
yum --assumeyes install mlocate
# Update the locate database.
cp /etc/cron.daily/mlocate.cron /etc/cron.hourly/mlocate.cron && /etc/cron.daily/mlocate.cron
# A very simple script designed to ensure the locate database gets updated
# automatically when the box is booted and provisioned.
printf "@reboot root bash -c '/usr/bin/updatedb ; rm --force /etc/cron.d/updatedb'\n" > /etc/cron.d/updatedb
chcon "system_u:object_r:system_cron_spool_t:s0" /etc/cron.d/updatedb
| true |
fe6b876edf0a04a38afd7f1c064dac74b0eb0b1c | Shell | spennington45/Shell_scripts | /forkRepo.sh | UTF-8 | 1,046 | 3.8125 | 4 | [] | no_license | #!/bin/sh
# Author Steven Pennington
# v 1.0
# This script is to create a fork of a repo using bitbucket REST api
# User name of who is signing in and password
echo Username:
read username
echo Password:
read password
# The repo that will be forked
echo Main repository name:
read repoName
# Set the name of the fork i.e. module1-capstone-team
echo Fork names not including numbers:
read fork
# The number of forks that will be made
echo How many forks will there be?
read forkNum
# The workspace id which can be username if this is a personal repo or the UUID of the project such as te-otw-2011
echo Workspace Id
read workspaceId
echo $username is creating $forkNum forks of the $repoName repo with the name of $fork and a number.
# Loop that creates the repos
for ((i=1; i<=$forkNum; i++))
do
curl -X POST -u $username:$password https://api.bitbucket.org/2.0/repositories/$workspaceId/$repoName/forks -H 'Content-Type: application/json' -d '{
"name": "'$fork''$i'",
"workspace": {
"slug": "'$workspaceId'"
}
}'
done
| true |
589bd298ae7d39fbe108a57645389b3ad67405f0 | Shell | domaubert/EMMA | /utils/hop/run_hop.sh | UTF-8 | 656 | 2.59375 | 3 | [] | no_license | #!/bin/bash
#MSUB -r hop # Nom du job
#MSUB -n 1 # Reservation de 4 processus au total
#MSUB -N 1 # Les processus sont répartis sur 2 noeuds
#MSUB -T 86400 # Limite de temps elapsed du job ici 600s
#MSUB -o stdout_hop # Sortie standard
#MSUB -e stderr_hop # Sortie d'erreur
#MSUB -p gen2191 # Allocation
#MSUB -q mono # sélection de la queue GPU (groupe genci ou challenge uniquement)
set -x
export DATE=`date +%F_%Hh%M`
./hop -in /scratch/cont003/pocvirk/localgroup_64proc/output_00015/part_00015.out -p 1. > run_hop$DATE.log
| true |
aa362094cdf64472f0b64fe548a9b34021a1d702 | Shell | dframework/ddk | /build/lib/make_mk.sh | UTF-8 | 14,549 | 3.59375 | 4 | [] | no_license | #!/bin/sh
ddk_compile_add(){
tmp_mkbuf="${tmp_mkbuf}\n${1}\n"
}
ddk_compile_clear_cmd_prefix(){
if [ "${tmp_cmd_prefix}" != "" ]; then
ddk_compile_add "}"
tmp_cmd_prefix=""
fi
}
ddk_compile_call_func(){
tmp_nm=`expr "${tmp_call}" : '\([a-zA-Z0-9_-]\{1,\}\)[[:blank:]]*[[:print:]]*'`
tmp_va=`expr "${tmp_call}" : '[a-zA-Z0-9_-]\{1,\}[[:blank:]]*\([[:print:]]*\)'`
if [ "$tmp_nm" = "" ]; then
ddk_exit 1 "syntax error(100): $tmp_call"
fi
case "${tmp_nm}" in
call)
ddk_compile_add "${tmp_val}"
;;
mk)
ddk_compile_add "call_make_dir \"${tmp_va}\""
;;
mkdir)
ddk_compile_add "call_make_dir \"${tmp_va}\""
;;
target)
tmp_va2=`echo "${tmp_va}" | sed -e 's/-/_/g'`
ddk_compile_add "call_target ${tmp_va2}"
;;
install)
ddk_compile_add "call_install ${tmp_va}"
;;
package-start)
ddk_compile_add "call_package_start \"${tmp_va}\""
;;
package-end)
ddk_compile_add "call_package_end \"${tmp_va}\""
;;
package-install)
ddk_compile_add "call_package_install \"${tmp_va}\""
;;
package)
ddk_compile_add "call_package ${tmp_va}"
;;
*)
ddk_compile_add "${tmp_nm} ${tmp_va}"
;;
esac
}
ddk_compile_mk_include(){
tmp_nm=`expr "${tmp_val}" : '^[[:blank:]]*\$([[:blank:]]*\([a-zA-Z0-9_]\{1,\}\)[[:blank:]]*[[:print:]]*'`
case "${tmp_nm}" in
CLEAR_VARS)
ddk_compile_add "CLEAR_VARS"
;;
BUILD_STATIC_LIBRARY)
ddk_compile_clear_cmd_prefix
ddk_compile_add "BUILD_STATIC_LIBRARY"
;;
BUILD_SHARED_LIBRARY)
ddk_compile_clear_cmd_prefix
ddk_compile_add "BUILD_SHARED_LIBRARY"
;;
BUILD_EXCUTABLE)
ddk_compile_clear_cmd_prefix
ddk_compile_add "BUILD_EXCUTABLE"
;;
*)
ddk_compile_add "include \"${tmp_val}\""
;;
esac
}
ddk_compile_mk_ifdef(){
tmp_env=`env | egrep "^${1}="`
if [ "${tmp_env}" = "" ]; then
echo "0"
else
echo "1"
fi
}
ddk_compile_mk_ifex(){
tmp_if=`expr "$tmp_val" : '^[[:blank:]]*(\([[:print:]]\{1,\}\))[[:blank:]]*$'`
tmp_a_if=$(echo "${tmp_if}" | tr "," "\n")
tmp_no_if=0
for tmp_o_if in $tmp_a_if
do
tmp_no_if=$(($tmp_no_if+1))
if [ $tmp_no_if -eq 1 ]; then
tmp_1_if=$tmp_o_if
elif [ $tmp_no_if -eq 2 ]; then
tmp_2_if=$tmp_o_if
else
ddk_exit 1 "syntax error(7): ${line}"
fi
done
if [ $tmp_no_if -ne 2 ]; then
ddk_exit 1 "syntax error(8): ${line}"
fi
case "${1}" in
eq)
ddk_compile_add "if [ ${tmp_1_if} = ${tmp_2_if} ]; then"
;;
eleq)
ddk_compile_add "elif [ ${tmp_1_if} = ${tmp_2_if} ]; then"
;;
ne)
ddk_compile_add "if [ ${tmp_1_if} != ${tmp_2_if} ]; then"
;;
elne)
ddk_compile_add "elif [ ${tmp_1_if} != ${tmp_2_if} ]; then"
;;
gt)
ddk_compile_add "if [ ${tmp_1_if} > ${tmp_2_if} ]; then"
;;
elgt)
ddk_compile_add "elif [ ${tmp_1_if} > ${tmp_2_if} ]; then"
;;
lt)
ddk_compile_add "if [ ${tmp_1_if} < ${tmp_2_if} ]; then"
;;
ellt)
ddk_compile_add "elif [ ${tmp_1_if} < ${tmp_2_if} ]; then"
;;
esac
}
ddk_compile_mk_nomak(){
case "${tmp_cmd}" in
include)
ddk_compile_mk_include
;;
ifeq)
ddk_compile_mk_ifex "eq"
;;
ifne|ifneq)
ddk_compile_mk_ifex "ne"
;;
ifle|ifgt)
ddk_compile_mk_ifex "gt"
;;
ifge|iflt)
ddk_compile_mk_ifex "lt"
;;
elifeq|el-ifeq|else-ifeq|el_ifeq|else_ifeq|eleq)
ddk_compile_mk_ifex "eleq"
;;
elifne|el-ifne|else-ifne|el_ifne|else_ifne|elne)
ddk_compile_mk_ifex "elne"
;;
elifle|el-ifle|else-ifle|el_ifle|else_ifle|elle)
ddk_compile_mk_ifex "elgt"
;;
elifge|el-ifge|else-ifge|el_ifge|else_ifge|elge)
ddk_compile_mk_ifex "ellt"
;;
ifdef)
if [ "${tmp_val}" = "" ]; then
ddk_exit 1 "syntax error(9): ${line}"
fi
ddk_compile_add "if [ \"\$(ddk_compile_mk_ifdef \"${tmp_val}\")\" = \"1\" ]; then"
;;
ifndef)
if [ "${tmp_val}" = "" ]; then
ddk_exit 1 "syntax error(10): ${line}"
fi
ddk_compile_add "if [ \"\$(ddk_compile_mk_ifdef \"${tmp_val}\")\" = \"0\" ]; then"
;;
elifdef|el-ifdef|else-ifdef|el_ifdef|else_ifdef)
if [ "${tmp_val}" = "" ]; then
ddk_exit 1 "syntax error(11): ${line}"
fi
ddk_compile_add "elif [ \"\$(ddk_compile_mk_ifdef ${tmp_val})\" = \"1\" ]; then"
;;
elifndef|el-ifndef|else-ifndef|el_ifndef|else_ifndef)
if [ "${tmp_val}" = "" ]; then
ddk_exit 1 "syntax error(12): ${line}"
fi
ddk_compile_add "elif [ \"\$(ddk_compile_mk_ifdef ${tmp_val})\" = \"0\" ]; then"
;;
else)
ddk_compile_add "else"
;;
endif)
ddk_compile_add "fi"
;;
echo)
if [ "${tmp_val}" = "" ]; then
ddk_compile_add "echo \"\""
else
ddk_compile_add "echo ${tmp_val}"
fi
;;
*)
#echo "x: $tmp_cmd, $tmp_val"
tmp_cmd=`echo "${tmp_str}" | sed -e 's/-/_/g'`
ddk_compile_add "${line}"
#ddk_exit 1 "syntax error(13): ${line}"
;;
esac
}
ddk_compile_mk_cmd_prefix_add_s(){
if [ "${tmp_cmd_prefix_s}" != "" ]; then
tmp_cmd_prefix_a_s=$(echo $tmp_cmd_prefix_s | tr " " "\n")
for tmp_x in $tmp_cmd_prefix_a_s
do
if [ "${tmp_x}" = "${tmp_cmd_module}" ]; then
return 0
fi
done
fi
tmp_cmd_prefix_s="${tmp_cmd_prefix_s} ${tmp_cmd_module}"
}
ddk_compile_mk_cmd_prefix_add_index(){
if [ "${tmp_cmd_prefix_index}" != "" ]; then
tmp_cmd_prefix_a_index=$(echo $tmp_cmd_prefix_index | tr " " "\n")
for tmp_x in $tmp_cmd_prefix_a_index
do
if [ "${tmp_x}" = "${tmp_cmd}" ]; then
return 0
fi
done
fi
tmp_cmd_prefix_index="${tmp_cmd_prefix_index} ${tmp_cmd}"
}
ddk_compile_mk_cmd_prefix(){
if [ "${tmp_cmd_module}" = "" ]; then
ddk_exit 1 "syntax error(3): ${line}\nsyntax error(3): This syntax between LOCAL_MODULE and BUILD_STATIC_LIBRARY, BUILD_SHARED_LIBRARY, BUILD_EXCUTABLE."
fi
tmp_has_prefix=0
if [ "${tmp_cmd_prefix}" != "" ]; then
tmp_has_prefix=1
ddk_compile_clear_cmd_prefix
fi
tmp_str="${tmp_cmd_module}_${tmp_cmd}"
tmp_str=`echo "${tmp_str}" | sed -e 's/-/_/g'`
tmp_cmd_prefix="${tmp_cmd}"
ddk_compile_mk_cmd_prefix_add_s
ddk_compile_mk_cmd_prefix_add_index
if [ $tmp_has_prefix -eq 1 ]; then
ddk_compile_add "${tmp_str}(){"
else
ddk_compile_add "${tmp_str}(){"
fi
}
ddk_compile_mk_set(){
if [ "${tmp_val}" = "=" ]; then
tmp_val=""
fi
case "${tmp_cmd}" in
LOCAL_MODULE)
tmp_cmd_module="${tmp_val}"
;;
LOCAL_SUBDIRS)
DDK_SET_SUBDIRS="${tmp_val}"
;;
LOCAL_NO_SUBDIRS)
DDK_SET_NO_SUBDIRS="${tmp_val}"
;;
LOCAL_NO_VERSION)
tmp_val="1"
;;
esac
if [ "${tmp_pfix}" = "\"" ]; then
tmp_val=`echo "${tmp_val}" | sed -e 's/\"/\\\"/g'`
fi
ddk_compile_add "${tmp_cmd}=${tmp_pfix}${tmp_val}${tmp_sfix}"
case "${tmp_cmd}" in
LOCAL_MODULE)
ddk_compile_add "LOCAL_MODULE_BIN=${tmp_pfix}\${DDK_ENV_TARGET_BUILD}/${tmp_val}${tmp_sfix}"
;;
esac
}
ddk_compile_mk_plus(){
tmp_pfix="\""
tmp_sfix="\""
if [ "${tmp_val}" = "=" ]; then
tmp_val=""
fi
case "${tmp_cmd}" in
LOCAL_SUBDIRS)
DDK_SET_SUBDIRS="${DDK_SET_SUBDIRS} ${tmp_val}"
;;
LOCAL_NO_SUBDIRS)
DDK_SET_NO_SUBDIRS="${DDK_SET_NO_SUBDIRS} ${tmp_val}"
;;
esac
ddk_compile_add "${tmp_cmd}=${tmp_pfix}\${${tmp_cmd}} ${tmp_val}${tmp_sfix}"
}
ddk_compile_mk_hasmak(){
tmp_s3=""
tmp_s1=`expr "$tmp_val" : '^[[:blank:]]*\(\"\)[[:print:]]\{1,\}'`
if [ "${tmp_s1}" != "" ]; then
tmp_s2=`expr "$tmp_val" : '[[:print:]]\{1,\}\(\"\)[[:blank:]]*\$'`
if [ "${tmp_s2}" != "\"" ]; then
ddk_exit 1 "syntax error(4): ${line}"
fi
tmp_s3=`expr "$tmp_val" : '^[[:blank:]]*\"\([[:print:]]\{1,\}\)\"[[:blank:]]*\$'`
fi
tmp_pfix=""
tmp_sfix=""
if [ "${tmp_s3}" != "" ]; then
tmp_pfix="\""
tmp_sfix="\""
tmp_val="${tmp_s3}"
else
tmp_sb=`echo "${tmp_val}" | grep " "`
if [ "${tmp_sb}" != "" ]; then
tmp_pfix="\""
tmp_sfix="\""
fi
fi
tmp_cmd=`echo "${tmp_cmd}" | sed -e 's/-/_/g'`
case "${tmp_mak}" in
:)
ddk_compile_mk_cmd_prefix
;;
=)
ddk_compile_mk_set
;;
:=)
ddk_compile_mk_set
;;
+=)
ddk_compile_mk_plus
;;
*)
ddk_exit 1 "syntax error(5): ${line} at ${1}:${tmp_no}"
;;
esac
}
ddk_compile_mk(){
# ${1} : directory
# ${2} : Dframework.mk or Application.mk
tmp_mk_fnm="${1}/${2}"
if test ! -f "${tmp_mk_fnm}"; then
return 1
fi
tmp_mkbuf="#!/bin/sh\n"
tmp_cmd_prefix_s=""
tmp_cmd_prefix_index=""
tmp_no=0
ddk_compile_clear_cmd_prefix
while read line
do
tmp_cmd=""
tmp_call=""
tmp_val=""
tmp_mak=""
tmp_nm=""
tmp_no=$(($tmp_no+1))
if [ "${line}" = "" ]; then
continue
fi
cmt=`expr "$line" : '\(^[[:blank:]]*\#\)'`
if [ "${cmt}" = "#" ]; then
continue
fi
cmt=`expr "$line" : '\(^[[:blank:]]*\@\)'`
if [ "${cmt}" = "@" ]; then
tmp_val=`expr "$line" : '^[[:blank:]]*\@\([[:print:]]*\)'`
ddk_compile_add "${tmp_val}"
continue
fi
tmp_cmd=`expr "$line" : '\(^[a-zA-Z0-9_-]\{1,\}\)[[:blank:]\:\{1,\}]*'`
if [ "${tmp_cmd}" = "" ]; then
tmp_call=`expr "$line" : '^[[:blank:]]*\$(\([[:print:]]\{1,\}\))[[:blank:]]*$'`
if [ "${tmp_call}" != "" ]; then
ddk_compile_call_func
continue
else
ddk_exit 1 "syntax error(6b): ${line} at ${tmp_mk_fnm}:${tmp_no}"
fi
fi
tmp_mak=`expr "$line" : '^[a-zA-Z0-9_-]\{1,\}[[:blank:]]*\([\:\+\=]\{1,\}\)'`
if [ "${tmp_mak}" != "" ]; then
tmp_val=`expr "$line" : '^[a-zA-Z0-9_-]\{1,\}[[:blank:]]*[\:\+\=]\{1,\}[[:blank:]]*\([[:print:]]\{1,\}\)[[:blank:]]*$'`
else
tmp_val=`expr "$line" : '^[a-zA-Z0-9_-]\{1,\}[[:blank:]]\{1,\}\([[:print:]]\{1,\}\)[[:blank:]]*$'`
fi
if [ "${tmp_mak}" = "" ]; then
ddk_compile_mk_nomak
else
ddk_compile_mk_hasmak
fi
done < "${tmp_mk_fnm}"
ddk_compile_clear_cmd_prefix
#################################################################
if [ "${tmp_cmd_prefix_index}" != "" ]; then
tmp_cmd_prefix_a_index=$(echo $tmp_cmd_prefix_index | tr " " "\n")
for tmp_x in $tmp_cmd_prefix_a_index
do
if [ "${tmp_x}" != "" ]; then
ddk_compile_add ""
ddk_compile_add "${tmp_x}(){"
if [ "${tmp_cmd_prefix_s}" != "" ]; then
tmp_cmd_prefix_a_s=$(echo $tmp_cmd_prefix_s | tr " " "\n")
for tmp_y in $tmp_cmd_prefix_a_s
do
if [ "${tmp_y}" != "" ]; then
tmp_str="${tmp_y}_${tmp_x}"
tmp_str=`echo "${tmp_str}" | sed -e 's/-/_/g'`
ddk_compile_add " ${tmp_str}"
tmp_count=$((tmp_count+1))
fi
done
fi
ddk_compile_add "\n}"
fi
done
fi
ddk_compile_add "LOCAL_CMD_PREFIX=\"${tmp_cmd_prefix_index}\""
#echo $tmp_mkbuf
return 0
}
ddk_get_app_mk(){
tmp_find=1
tmp_path=$1
tmp_init_path=$1
tmp_init_pwd=`pwd`
tmp_app_nm=""
cd $tmp_path
ddk_exit $? "error:: cd $tmp_path"
while [ "$tmp_path" != "/" ];
do
tmp_app_nm="${tmp_path}/Application.mk"
if test -f $tmp_app_nm; then
tmp_find=0
break
fi
cd ..
tmp_path=`pwd`
done
cd $tmp_init_pwd
ddk_exit $? "error:: cd $tmp_init_pwd"
return $tmp_find
}
ddk_app_mk(){
ddk_get_app_mk "${1}"
tmp_r=$?
if [ $tmp_r -eq 0 ]; then
ddk_compile_mk "${tmp_path}" "Application.mk"
ddk_load_mk "${tmp_path}" "Application.mk"
fi
}
ddk_excute_mk(){
if [ "${2}" = "Dframework.mk" ]; then
ddk_app_mk "${1}"
fi
. $3
if [ "${2}" = "Dframework.mk" ]; then
if [ "${DDK_ENV_CMD}" != "" ]; then
tmp_find=0
for tmp_x in $LOCAL_CMD_PREFIX
do
if [ "$tmp_x" = "$DDK_ENV_CMD" ]; then
tmp_find=1
fi
done
if [ $tmp_find -eq 1 ]; then
$DDK_ENV_CMD
fi
fi
fi
}
ddk_load_mk(){
# ${1} : directory
# ${2} : Dframework.mk or Application.mk
tmp_mk_input="${1}/${2}"
if test ! -f "${tmp_mk_input}"; then
return 1
fi
tmp_mk_output_folder="${DDK_ENV_TARGET_WORKING}${1}"
tmp_mk_output="${tmp_mk_output_folder}/${2}.S"
tmp_mk_time_input=$(ddk_call_mtime "$tmp_mk_input")
tmp_mk_time_output=$(ddk_call_mtime "$tmp_mk_output")
if [ $tmp_mk_time_input -eq 1 ]; then
ddk_exit 1 "don't get mtime: $tmp_mk_input"
fi
if [ $tmp_mk_time_output -eq 1 ]; then
ddk_exit 1 "don't get mtime: $tmp_mk_output"
fi
if [ $tmp_mk_time_input -eq 0 ]; then
ddk_exit 1 "ERROR: input file mtime : $tmp_mk_time_input"
fi
if [ $tmp_mk_time_input -ne 0 ]; then
if [ $tmp_mk_time_input -eq $tmp_mk_time_output ]; then
ddk_excute_mk "${1}" "${2}" "${tmp_mk_output}"
return 0
fi
fi
if [ "${2}" = "Dframework.mk" ]; then
if test -d "${tmp_mk_output_folder}"; then
rm -rf ${tmp_mk_output_folder}/*.Plo
rm -rf ${tmp_mk_output_folder}/*.o
rm -rf ${tmp_mk_output_folder}/*.mk.S
rm -rf ${tmp_mk_output_folder}/*.a
rm -rf ${tmp_mk_output_folder}/*.so
rm -rf ${tmp_mk_output_folder}/*.dll
rm -rf ${tmp_mk_output_folder}/*.exe
fi
fi
if test ! -d "${tmp_mk_output_folder}"; then
mkdir -p "${tmp_mk_output_folder}"
if [ $? -ne 0 ]; then
ddk_exit 1 "error: mkdir -p \"${tmp_mk_output_folder}\""
exit 1
fi
fi
case "$DDK_ENV_OSNAME" in
centos|redhat|windows)
`echo -e ${tmp_mkbuf} > ${tmp_mk_output}`
res=$?
;;
*)
`echo ${tmp_mkbuf} > ${tmp_mk_output}`
res=$?
;;
esac
if [ $res -ne 0 ]; then
ddk_exit 1 "error: write to ${tmp_mk_output}"
fi
ddk_excute_mk "${1}" "${2}" "${tmp_mk_output}"
touch -r ${tmp_mk_input} ${tmp_mk_output}
if [ $? -ne 0 ]; then
ddk_exit 1 "touch -r ${tmp_mk_input} ${tmp_mk_output}"
fi
return 0
}
| true |
326960f4006195dcacd700a7c81551e32928800e | Shell | pedroamador/docker-ubuntu-cucumber | /entrypoint.sh | UTF-8 | 371 | 2.609375 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
if [ $DESKTOP == "false" ]; then
Xvfb :7 -ac -screen 0 1280x1024x24 2>&1 &
export DISPLAY=:7
/usr/sbin/xrdp-sesman --nodaemon &
/usr/sbin/xrdp -nodaemon &
java -jar selenium-server-standalone-3.14.0.jar 2>&1 &
sleep 5
cd /opt/cucumber
cucumber --format pretty --format html --out report.html
else
/usr/bin/supervisord -n
fi | true |
a9402fd7a6744e4d88ca506c58b7f7a38b9a3a10 | Shell | TomNussbaumer/static-webserver | /tests/run-tests.sh | UTF-8 | 4,219 | 3.890625 | 4 | [
"MIT"
] | permissive | #!/bin/sh
# script to run tests against static-webserver.sh
#
# The MIT License (MIT)
#
# Copyright (c) 2015 Tom Nussbaumer <thomas.nussbaumer@gmx.net
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
## test for preserving spaces
# VAR1="a b c"; IFS=''; for a in $VAR1; do echo $a; done; unset IFS
LOGFILE=tests.log
[ -f "$LOGFILE" ] && rm $LOGFILE
#params infomessage
showInfo() {
echo "[INFO] $1" | tee -a $LOGFILE
}
#params errormessage
showErrorNoExit() {
echo "[ERROR] $1" | tee -a $LOGFILE
}
#params errormessage exitcode
showError() {
showErrorNoExit $1
exit $2
}
if [ "$(which docker)" = "" ]; then
showErrorNoExit "docker is not installed or cannot be found"
showInfo ""
showInfo "Please install docker to run the tests."
showInfo ""
showInfo "see: https://docs.docker.com/installation/"
exit 1
fi
if [ "$(docker info)" -ne 0 ]; then
showErrorNoExit "ERROR: docker not working properly"
showInfo ""
showInfo "check: sudo service docker status"
showInfo "start: sudo service docker start"
showInfo ""
exit 1
fi
TMPVAR=$(dirname "$(pwd)/$0")
ABSPATH_HOME=$(readlink -f "$TMPVAR/..")
TESTIMAGE=static-webserver-test:latest
TESTCONTAINER=static-webserver-testcontainer
if [ "$(docker images -q $TESTIMAGE)" = "" ]; then
showInfo "$TESTIMAGE not found. building it now ..."
docker build -t $TESTIMAGE "$ABSPATH_HOME/tests"
if [ $? -ne 0 ]; then
showError "build failed. wtf?" 1
fi
fi
CID=$(docker ps -aq --no-trunc --filter "name=$TESTCONTAINER");
if [ "$CID" ]; then
showInfo "old container found. removing it"
docker rm -f $CID
fi
TESTSCRIPT=test_variant.sh
testVariant() {
showInfo "run $TESTSCRIPT in container with param [$1]"
docker run -ti --rm --name=$TESTCONTAINER \
-v "$ABSPATH_HOME":/home/tester/import \
$TESTIMAGE import/tests/$TESTSCRIPT $1
}
testVariant "" | tee -a $LOGFILE
testVariant "--force=python" | tee -a $LOGFILE
testVariant "--force=python2" | tee -a $LOGFILE
testVariant "--force=python3" | tee -a $LOGFILE
testVariant "--force=php" | tee -a $LOGFILE
testPackagesRemoved() {
CMDLINE="$1 && import/tests/$TESTSCRIPT"
# running it with user root so we can anything we want
showInfo "run $TESTSCRIPT in container with CMD:"
showInfo "[$CMDLINE]"
docker run -ti --rm --name=$TESTCONTAINER \
-v "$ABSPATH_HOME":/home/tester/import \
-u root \
$TESTIMAGE bash -c "$CMDLINE"
}
###############################################################################
# NOTE:
#
# there is no way to uninstall python2 from ubuntu (wtf?)
#
# For this reason and for speedup i will just delete the links and executables
# from the container. Since it is nevertheless a throw-away container this
# doesn't matter.
#
# DON'T do this ever on a REAL machine! Use 'apt-get remove' or whatever ...
###############################################################################
testPackagesRemoved 'rm $(which python) $(which python2)' | tee -a $LOGFILE
testPackagesRemoved 'rm $(which python) $(which python2) $(which python3)' | tee -a $LOGFILE
# this last one is EXPECTED TO FAIL
testPackagesRemoved 'rm $(which python) $(which python2) $(which python3) $(which php)' | tee -a $LOGFILE
| true |
c62953bb78e8cc88ecb1ee363e0270a5671fd153 | Shell | bolishettianjali/MiniBootCampAssignment | /Day-6/Functions_Problems/CheckTwoNumPalindrome.sh | UTF-8 | 435 | 3.8125 | 4 | [] | no_license | #! /bin/bash
function check() {
num=$1
sum=0
rev=""
temp=$num
while [ $num -gt 0 ]
do
sum=$(( $num % 10 ))
num=$(( $num / 10 ))
rev=$( echo ${rev}${sum} )
done
if [ $temp -eq $rev ];
then
echo "$temp is palindrome"
else
echo "$temp is NOT palindrome"
fi
}
read -p "enter first number: " num1
read -p "enter second number: " num2
check $num1
check $num2
| true |
f8052379af45b52d08843c7e7bc8ed5755b46a32 | Shell | tevin-tan/shell | /basic_cmd/factorial.sh | UTF-8 | 222 | 3.8125 | 4 | [] | no_license | #!/usr/bin/env bash
value=1
if [ -z $1 ];then
echo "Useage: Please enter an argv"
exit -1
fi
for (( number=1; number <= $1; number++ ))
do
value=$[ $value *number ]
done
echo "The factorial of $1 is $value"
| true |
4d2d95b98ff2b0035b4c61c47228548a75b06047 | Shell | reihan35/2018_IRILL_INTERNSHIP | /programs/week_4_temps_d_exec/timesjs.sh | UTF-8 | 2,281 | 3.5625 | 4 | [] | no_license | echo $1
if [ $1=="js_of_ocaml" ]; then
opam switch 4.06.0 > /dev/null
eval `opam config env`
for i in $(seq 1 $2)
do
echo "exécution n° $i finie."
(time -p ocamlfind ocamlc js_affiche.ml -o js_affiche.byte -package js_of_ocaml -package js_of_ocaml-ppx -linkpkg | tail -n 3 | head -n 1) > outfile 2>>time1.txt
js_of_ocaml js_affiche.byte
while IFS='' read -r line || [[ -n "$line" ]]; do
a=($line)
if [ ${a[0]} == "real" ]
then
echo ${a[1]} >> realsm$1.txt
fi
if [ ${a[0]} == "user" ]
then
echo ${a[1]} >> usersm$1.txt
fi
if [ ${a[0]} == "sys" ]
then
echo ${a[1]} >> sysm$1.txt
fi
#echo "Text read from file: $line"
done < "time1.txt"
done
elif [ $1=="bucklescript" ];then
echo "je suis là"
opam switch 4.02.3 > /dev/null
eval `opam config env`
cd /home/fati/UPMC/Stage_été_2018/2018_IRILL_INTERNSHIP/programs/week_4_temps_d_exec/CtoF_Buckle_scripts_alert/src
(time -p npm run build ) > outfile 2>>time2.txt
while IFS='' read -r line || [[ -n "$line" ]]; do
a=($line)
if [ ${a[0]} == "real" ]
then
echo ${a[1]} >> realsm$1.txt
fi
if [ ${a[0]} == "user" ]
then
echo ${a[1]} >> usersm$1.txt
fi
if [ ${a[0]} == "sys" ]
then
echo ${a[1]} >> sysm$1.txt
fi
#echo "Text read from file: $line"
done < "time2.txt"
else
opam switch 3.12.1 > /dev/null
eval `opam config env`
cd /home/fati/UPMC/Stage_été_2018/2018_IRILL_INTEsRNSHIP/lib/obrowser-master/examples/CtoF_Obrowser_alert
(time -p make) > outfile 2>>time3.txt
fi
while IFS='' read -r line || [[ -n "$line" ]]; do
a=($line)
if [ ${a[0]} == "real" ]
then
echo ${a[1]} >> realsm$1.txt
fi
if [ ${a[0]} == "user" ]
then
echo ${a[1]} >> usersm$1.txt
fi
if [ ${a[0]} == "sys" ]
then
echo ${a[1]} >> sysm$1.txt
fi
#echo "Text read from file: $line"
done < "time3.txt"
sed 's/,/./' realsm$1.txt >> real$1.txt
rm -f realsm$1.txt
sed 's/,/./' usersm$1.txt >> user$1.txt
rm -f usersm$1.txt
sed 's/,/./' sysm$1.txt >> sys$1.txt
rm -f sysm$1.txt
echo -e "***************************************"
echo -e "Pour "$1
echo -e "temps moyen (réel) sur $2 exécution : "
./bench real$1.txt
echo -e "\ntemps moyen (utilisateur) sur $2 exécution :"
./bench user$1.txt
echo -e "\ntemps moyen (système) sur $2 exécution :"
./bench sys$1.txt
| true |
ecef793a0ef3b954cae58c351b097c7c0f502d51 | Shell | ShalokShalom/plan.sh | /icedtea-web/plan.sh | UTF-8 | 2,094 | 2.890625 | 3 | [
"Apache-2.0"
] | permissive | pkg_origin=
pkg_name=icedtea-web
pkg_description="Provides a Free Software web browser plugin running applets written in the Java programming
language and an implementation of Java Web Start, originally based on the NetX project."
pkg_version=1.6.2
pkg_upstream_url="http://icedtea.classpath.org/wiki/IcedTea-Web"
pkg_license=('GPL2')
pkg_deps=('openjdk' 'desktop-file-utils')
pkg_build_deps=('zip' 'npapi-sdk' 'rhino' 'junit' 'cups' 'mercurial' 'firefox')
pkg_source=("hg+http://icedtea.classpath.org/hg/icedtea-web")
#noextract="${pkg_name}-$pkg_version.tar.gz"
pkg_shasum=('SKIP')
_javaver=8
_jvmdir="/usr/lib/jvm/java-${_javaver}-openjdk"
do_build() {
#cd ${CACHE_PATH}
#LANG=en_US.UTF-8 bsdtar -x -f ${CACHE_PATH}/${pkg_name}-${pkg_version}.tar.gz
cd ${CACHE_PATH}/${pkg_name}*
./autogen.sh
./configure --prefix="${_jvmdir}" \
--with-jdk-home="${_jvmdir}" \
--datarootdir=/usr/share \
--disable-docs
make
}
do_check() {
cd ${CACHE_PATH}/${pkg_name}*
#make -k check
}
do_package() {
cd ${CACHE_PATH}/${pkg_name}*
# possible make target (see bottom of Makefile.am: install-exec-local install-data-local
make install-exec-local install-data-local DESTDIR="${pkg_prefix}"
# Install desktop files.
install -m755 -d "${pkg_prefix}/usr/share"/{applications,pixmaps}
install -m644 javaws.png "${pkg_prefix}/usr/share/pixmaps"
install -m644 {javaws,itweb-settings}.desktop "${pkg_prefix}/usr/share/applications"
# link binaries into /usr/bin + jre/bin
install -m755 -d "${pkg_prefix}/usr/bin"
install -m755 -d "${pkg_prefix}/${_jvmdir}/jre/bin"
pushd "${pkg_prefix}/${_jvmdir}/bin"
for file in *; do
ln -sf "${_jvmdir}/bin/${file}" "${pkg_prefix}/usr/bin"
ln -sf "${_jvmdir}/bin/${file}" "${pkg_prefix}/${_jvmdir}/jre/bin"
done
popd
# link the mozilla-plugin - test it here http://www.java.com/en/download/help/testvm.xml
install -m755 -d "${pkg_prefix}/usr/lib/mozilla/plugins/"
ln -sf "${_jvmdir}/lib/IcedTeaPlugin.so" "${pkg_prefix}/usr/lib/mozilla/plugins/"
}
| true |
c817fad29cb01450c794abe2060946cbb3cb931b | Shell | mtrw/MTRW_eggplant_genome_assembly | /scripts/mapping_stats_ep.zsh | UTF-8 | 930 | 2.703125 | 3 | [] | no_license | #!/bin/zsh
md=( "" "." )
ed=( "" "." )
zparseopts -D -K -prefix:=pf -mapdir:=md -errdir:=ed
base=${pf[2]}
echo -n "#sample\tall_reads\ttrimmed_reads\tdup_rate\tmapped_reads"
echo -n "\tassigned_to_fragment\tpaired_end"
echo "\ttrue_links\tsame_fragment\tbetween_scaffolds\twithin_scaffolds"
echo -n $base"\t"
cat ${ed[2]}/${base}_cutadapt.err | grep -m 1 "^Total read pairs processed:" \
| awk -F: '{printf $2"\t"}' | tr -d ', '
cat ${ed[2]}/${base}_cutadapt.err | grep -m1 'Pairs written (passing filters):' \
| awk -F: '{print $2}' | tr -d ', ' | cut -d '(' -f 1 | tr '\n' '\t'
grep -m1 Improper ${ed[2]}/${base}_novosort1.err | tr -s ' ' | cut -d ' ' -f 4,5 | tr -d , \
| awk '{printf $2/($2+$1)"\t"}'
awk '{printf $1"\t"}' ${md[2]}/${base}_both_mapped_q10.len
awk '{printf $1"\t"}' ${md[2]}/${base}_reads_to_fragments.bed.len
awk '{printf $1"\t"}' ${md[2]}/${base}_pe_count.txt
cat ${md[2]}/${base}_frag_stat.txt
| true |
75186f9e45e4e10274d4197b4c9b181b33f31a59 | Shell | SamDanielThangarajan/tools | /mactools/launch_agents/scripts/launch_agent.sh | UTF-8 | 4,176 | 3.375 | 3 | [] | no_license | #!/usr/bin/env bash
plainB_redF="#[fg=colour196,bg=colour238]"
redB_whiteF="#[fg=colour15,bg=colour196]"
blueB_whiteF="#[fg=colour15,bg=colour39]"
plainB_blueF="#[fg=colour39,bg=colour238]"
reset_color="#[fg=colour238,bg=colour238]"
plainB_yelF="#[fg=colour3,bg=colour238]"
yelB_blaF="#[fg=colour0,bg=colour3]"
plainB_greF="#[fg=colour118,bg=colour238]"
greB_blaF="#[fg=colour0,bg=colour118]"
plainB_greenF="#[fg=colour46,bg=colour238]"
greenB_yellowF="#[fg=colour220,bg=colour46]"
rc="$HOME/.tmuxstatusrc"
push_notification_dir="${HOME}/.pushnotification"
[[ -z ${TOOLS} ]] \
&& >&2 echo "ENV{TOOLS} not defined" \
&& exit 1
# Extract the operation
operation=$1 && shift
config_file=$HOME/.launchagents/$operation
function prunedockercontainers() {
/usr/local/bin/docker container prune -f
sleep 15
}
function prunesetupbackups() {
ls -t -1 ${HOME}/setup_tools.sh_* | tail -n +2 | xargs rm
ls -t -1 ${HOME}/tools_alias_* | tail -n +2 | xargs rm
ls -t -1 ${HOME}/.gitconfig_* | tail -n +2 | xargs rm
ls -dt -1 ${HOME}/.vim_* | tail -n +2 | xargs rm -rf
sleep 15
}
function timesheetslave() {
open -na safari $(cat ${rc}/timesheet.url)
}
function tmuxunreadmailcount() {
count=$(${TOOLS}/mactools/launch_agents/scripts/outlook.unread-mail-count)
if [[ $count -eq 0 ]]
then
echo "" > $rc/unread
else
echo "${plainB_redF}${redB_whiteF} Outlook: $count${reset_color}" > $rc/unread
fi
sleep 15
}
function gmailinboxunreadcount() {
export HTTPS_PROXY=$(cat ${HOME}/.tmuxstatusrc/https_proxy)
count=$(${TOOLS}/google_scripts/gmail/get_label.py -t ${HOME}/.gmail/.creds/token.json -l INBOX -f threadsUnread 2> /tmp/error.txt)
if [[ $? -ne 0 ]]
then
echo "${plainB_redF}${redB_whiteF} gmail: -${reset_color}" > $rc/gmail.unread
else
if [[ $count -eq 0 ]]
then
echo "" > $rc/gmail.unread
else
echo "${plainB_redF}${redB_whiteF} gmail: $count${reset_color}" > $rc/gmail.unread
fi
fi
sleep 15
}
function pushnotification() {
for f in `ls ${push_notification_dir}/*`
do
title=$(awk -F':' '{print $1}' $f)
desc=$(awk -F':' '{print $2}' $f)
$(${TOOLS}/mactools/launch_agents/scripts/push-notification "${title}" "${desc}")
rm -rf $f
done
}
function monitorvisastatus() {
tmp_op=$rc/visastat.tmp
op=$rc/visastat
control_no=$(cat $rc/visastat.inp)
url="https://www.migrationsverket.se/Kontakta-oss/Min-sida-och-Kontrollera-din-ansokan/Kontrollera-din-ansokan-utan-inloggning.html"
rm -rf ${tmp_op} 2>/dev/null
# If previous curl failed, don't query again.. Manual fix is needed
# To prevent getting locked out in proxy.
if [[ -f $op ]]
then
grep -q visa-monitor-failed:FIX_PROXY $op
[[ $? -eq 0 ]] && sleep 15 && return
fi
HTTPS_PROXY=$(cat $rc/https_proxy) /usr/bin/curl -s -d "typenr=2&q=${control_no}" -X POST ${url} -o $tmp_op
[[ $? -ne 0 ]] \
&& echo "${plainB_redF}${redB_whiteF} visa-monitor-failed:FIX_PROXY ${reset_color}" > $op \
&& sleep 15 && return
grep ">I väntan på beslut<" ${tmp_op} | grep -q 'class="active"'
[[ $? -eq 0 ]] \
&& echo "${plainB_yelF}${yelB_blaF} visa-pending ${reset_color}" > $op \
&& sleep 15 && return
grep ">Beslut fattat<" ${tmp_op} | grep -q 'class="active"'
[[ $? -eq 0 ]] \
&& echo "${plainB_greF}${greB_blaF} visa-decided ${reset_color}" > $op \
&& sleep 15 && return
echo "${plainB_redF}${redB_whiteF} visa-monitor-failed ${reset_color}" > $op
sleep 15
}
# Function that lists all the service agents to be launched.
function list-service-info() {
cat <<EOS
prunedockercontainers:300
prunesetupbackups:3600
tmuxunreadmailcount:30
gmailinboxunreadcount:60
monitorvisastatus:1800
EOS
}
# Funtion for all directory service
function list-dir-service-info() {
cat <<EOS
pushnotification:${push_notification_dir}
EOS
}
function list-timedservice-info() {
#service:minute:hour:day:weekday:Month
#timesheetslave:21:16:-:5:-
cat <<EOTS
EOTS
}
# If switch is off, then exit the script
grep -sq "switch: off" $config_file
[[ $? -eq 0 ]] && sleep 15 && exit 0
$operation $@
exit 0
| true |
d5fabf8baf0f00d8dda1ba053a0e1076b1a8c48b | Shell | yangyis/queuepusherl | /start_rabbit.sh | UTF-8 | 398 | 2.59375 | 3 | [] | no_license | #!/bin/sh
SCRIPT=`realpath $0`
SCRIPTPATH=`dirname $SCRIPT`
ROOTPATH=`dirname $SCRIPTPATH`
RABBITPATH="${ROOTPATH}/rabbitmq-server"
cd $RABBITPATH
export RABBITMQ_NODENAME="rabbit@localhost"
#export RABBITMQ_NODE_IP_ADDRESS=localhost
#export RABBITMQ_NODE_PORT=5672
export RABBITMQ_LOG_BASE=/tmp
export RABBITMQ_CONFIG_FILE=${SCRIPTPATH}/priv/rabbit
export RABBITMQ_CONSOLE_LOG=1
exec make run
| true |
1e7390124835b60f8af0172bf392e317458ab6fa | Shell | OpenPrunus/mission-improbable | /update.sh | UTF-8 | 3,886 | 3.6875 | 4 | [] | no_license | #!/bin/bash
set -e
usage() {
echo "Usage: $0 -c <new_copperhead_factory_dir> -d <device_type> [--no-tor]"
exit 1
}
# Use GNU getopt to capture arguments as it allows us to have long options
# which the bash builtin getopts doesn't support. We also still support the
# old # positional arguments for now, but don't advertise them in the usage().
TEMP=$(getopt -o 'hc:d:T::' --long 'help,copperhead:,device:,no-tor::' -- "$@")
[ $? -ne 0 ] && usage
eval set -- "$TEMP"; unset TEMP
# Set defaults
NO_TOR=0
# Parse the args
while true; do
case "$1" in
'-c'|'--copperhead')
COPPERHEAD_DIR=$2
shift 2;
continue
;;
'-d'|'--device')
DEVICE=$2
shift 2
continue
;;
'-T'|'--no-tor')
NO_TOR=1
shift
continue
;;
'-h'|'--help')
usage
;;
'-*')
echo "Unknown option: $1" >&2
exit 1
;;
'--')
shift
break
;;
*)
POSITIONAL="$POSITIONAL $1"
shift
continue
;;
esac
done
set +e
# Backwards compatibility for positional arguments
[ -z $COPPERHEAD_DIR ] && [ -n $1 ] && COPPERHEAD_DIR=$1 && shift
[ -z $DEVICE ] && [ -n $1 ] && DEVICE=$1 && shift
set -e
export COPPERHEAD_DIR
export DEVICE
export NO_TOR
# Bail out if no Copperhead directory was provided or no device defined
[ -z ${COPPERHEAD_DIR} ] && usage
[ -z ${DEVICE} ] && usage
SUPERBOOT_DIR=$PWD/helper-repos/super-bootimg
SIMG2IMG_DIR=$PWD/helper-repos/android-simg2img
#if [ ! -f "./packages/gapps-delta.tar.xz" ]
#then
# echo "You have to have a gapps-delta zip from a previous install :("
# exit 1
#fi
if [ ! -f "./extras/${DEVICE}/updater-script" ]
then
echo "./extras/${DEVICE}/updater-script not found. Device unsupported?"
exit 1
fi
cd $COPPERHEAD_DIR
mkdir -p images
cd images
if [ ! -f "boot.img" ]
then
unzip ../*.zip
fi
cd ../..
./fetch-apks.sh
./install-su.sh $COPPERHEAD_DIR $SUPERBOOT_DIR
./apply-gapps-delta.sh $COPPERHEAD_DIR $SIMG2IMG_DIR
./re-sign.sh $COPPERHEAD_DIR $SIMG2IMG_DIR $SUPERBOOT_DIR
# We need to extract raw system, vendor images
$SIMG2IMG_DIR/simg2img ./images/system-signed.img ./images/system-signed.raw
$SIMG2IMG_DIR/simg2img ./images/vendor-signed.img ./images/vendor-signed.raw
mkdir -p update
cp ./images/system-signed.raw ./update/
cp ./images/vendor-signed.raw ./update/
cp ./images/boot-signed.img ./update/
cp ./images/recovery-signed.img ./update/
python ./extras/${DEVICE}/convert-factory.py $COPPERHEAD_DIR/radio-*.img $COPPERHEAD_DIR/bootloader-*.img ./update
cd update
mkdir -p META-INF/com/google/android/
mkdir -p META-INF/com/android/
cp ../extras/${DEVICE}/updater-script META-INF/com/google/android/updater-script
cp ../extras/${DEVICE}/update-binary META-INF/com/google/android/
cp ../extras/${DEVICE}/metadata META-INF/com/android
# XXX: bootloader.. not sure how to do that..
zip -r ../${DEVICE}-update.zip .
cd ..
java -jar ./extras/blobs/signapk.jar -w ./keys/releasekey.x509.pem ./keys/releasekey.pk8 ${DEVICE}-update.zip ${DEVICE}-update-signed.zip
echo
echo "Now please reboot your device into recovery:"
echo " 1. Reboot into Fastboot with Power + Volume Down"
echo " 2. Use Volume Down to select Recovery, and press Power"
echo " 3. Briefly tap Power + Volume-Up to get past the broken android logo."
echo -n "[Hit Enter to continue...]"
read junk
echo "Now select 'Apply Update from ADB' with Volume Down, and press Power."
echo -n "[Hit Enter to continue...]"
read junk
if [ -z "$(adb devices | grep sideload)" ]
then
echo
echo "You need to unplug and replug your device after starting sideload.."
echo -n "[Hit Enter to continue...]"
read junk
# A sleep is needed to ensure the device is successfully detected after plugging back in
sleep 5
fi
adb sideload ${DEVICE}-update-signed.zip
echo
echo "All done! Yay! Select Reboot into System and press power."
| true |
882dec1533ccf7803a6c67c4f02a86fc60eff9a0 | Shell | PurpleBabar/VagrantUtils | /newhost.sh | UTF-8 | 1,950 | 3.75 | 4 | [] | no_license | #!/bin/bash
# Defining variables
host="dev.$1"
host_name="$1"
host_file="vhosts/$1.conf"
conf_file="$1.conf"
# Testing if host exists
if [ -f $host_file ];
then
echo "Host $host_file already exists"
exit
else
echo "Creating new host : $host_name..."
fi
# Testing if projects directory exists
if [ -d "projects" ];
then
echo "Projects directory ok..."
else
mkdir projects
chmod 777 projects
fi
# Starting creation
printf "\n192.168.42.42 $host" >> /etc/hosts
echo "New host added : $host..."
# Creating Host File in vhosts
touch $host_file
chmod 777 $host_file
printf "# This tells Apache that you will be using name-based vhosts on port 80" >> $host_file
printf "\n# Note: Only required when using Apache version 2.2.x or lower" >> $host_file
echo "Writing comments..."
echo "Init..."
echo "Creating Virtual Host"
printf "\n<VirtualHost *:80>" >> $host_file
echo "Creating Virtual Host..."
# Init ServerAdmin
echo "Please enter ServerAdmin mail:"
read server_admin
printf "\n ServerAdmin $server_admin" >> $host_file
echo "Writing ServerName : $host..."
printf "\n ServerName $host" >> $host_file
# Init Project Path
project_path="/home/vagrant/share/$host_name"
echo "Writing DocumentRoot : $project_path..."
# Completing Project Path
echo "Kind of project [Symfony, Twiger]:"
read $kind
if [[ "$kind" = "Symfony" ]];
then
project_path="$project_path/web"
fi
printf "\n DocumentRoot $project_path" >> $host_file
printf "\n <Directory $project_path>" >> $host_file
printf "\n Options Indexes FollowSymlinks MultiViews" >> $host_file
printf "\n AllowOverride All" >> $host_file
printf "\n Order allow,deny" >> $host_file
printf "\n Allow from all" >> $host_file
printf "\n </Directory>" >> $host_file
printf "\n</VirtualHost>" >> $host_file
# Update commands to vagrant
sudo -u alexandrelalung vagrant ssh -- -t "cd /etc/apache2/sites-available; sudo a2ensite $conf_file; sudo service apache2 reload"
| true |
80ab7351bcfb72dc2974882bb13b403c42dc2fe3 | Shell | htugraz/abs | /i686/community/python-dogpile.core/PKGBUILD | UTF-8 | 1,416 | 2.59375 | 3 | [] | no_license | # $Id: PKGBUILD 142429 2015-10-01 16:04:57Z fyan $
# Maintainer: Felix Yan <felixonmars@archlinux.org>
# Contributor: Sibren Vasse <arch at sibrenvasse.nl>
# Contributor: Quentin Stievenart <acieroid@awesom.eu>
pkgbase=python-dogpile.core
pkgname=(python-dogpile.core python2-dogpile.core)
_pypiname=dogpile.core
pkgver=0.4.1
pkgrel=4
pkgdesc="A 'dogpile' lock, typically used as a component of a larger caching solution"
arch=('any')
url="http://pypi.python.org/pypi/dogpile.core"
license=('BSD')
makedepends=('python-setuptools' 'python2-setuptools')
checkdepends=('python-nose' 'python2-nose')
source=("http://pypi.python.org/packages/source/d/$_pypiname/$_pypiname-$pkgver.tar.gz")
md5sums=('01cb19f52bba3e95c9b560f39341f045')
prepare() {
cp -a "$_pypiname-$pkgver"{,-py2}
}
build() {
cd $_pypiname-$pkgver
python setup.py build
cd ../$_pypiname-$pkgver-py2
python2 setup.py build
}
check() {
cd $_pypiname-$pkgver
nosetests3
cd ../$_pypiname-$pkgver-py2
nosetests2
}
package_python-dogpile.core() {
depends=('python')
cd "$srcdir/$_pypiname-$pkgver"
python setup.py install --root="$pkgdir/" --optimize=1
install -Dm644 LICENSE "$pkgdir/usr/share/licenses/$pkgname/LICENSE"
}
package_python2-dogpile.core() {
depends=('python2')
cd "$srcdir/$_pypiname-$pkgver-py2"
python2 setup.py install --root="$pkgdir/" --optimize=1
install -Dm644 LICENSE "$pkgdir/usr/share/licenses/$pkgname/LICENSE"
}
# vim:set ts=2 sw=2 et:
| true |
82211518cbe96d4bba8f819242c446b8dda783ff | Shell | mozilla-vn/mozilla-l10n | /ttk-update | UTF-8 | 144 | 2.703125 | 3 | [] | no_license | #!/bin/bash
source $(dirname $0)/ttk.inc.sh
stop_if_running
if [ $# -ne 0 ]; then
langs=$(which_langs $*)
fi
update_against_templates $langs
| true |
dd0174199ab2b25c4f236d5ba3fca0702b635eb2 | Shell | jpilet/opticode | /generate_script/compress_images.sh | UTF-8 | 303 | 2.75 | 3 | [
"MIT"
] | permissive | #!/bin/bash
set -e
ROOT=$(cd "$(dirname "$0")/.." ; pwd)
IMG="${ROOT}/img"
convert -resize 50% "${IMG}/code_high_res.png" -define "png:compression-level=9" -type palette "${IMG}/code.png"
convert -resize 50% "${IMG}/code_review_high_res.png" -define "png:compression-level=9" "${IMG}/codereview.png"
| true |
92e3fe7712a1556250c268f81f51cdb2e46a76be | Shell | anc15791/P4 | /scripts/count_bytes.sh | UTF-8 | 1,350 | 3.53125 | 4 | [] | no_license | #!/bin/bash
#echo "INFO: This command runs on Firewall. It wont show any output unless firewall is activated and initialised"
#echo ""
counter="-1"
if [ "$1" = "192.168.56.2" ]
then
if [ "$2" = "192.168.60.2" ]
then
case "$3" in
"icmp")
counter="0" ;;
"tcp")
counter="2" ;;
"udp")
counter="4" ;;
esac
else
echo "ERROR: invalid dest"
exit
fi
elif [ "$1" = "192.168.60.2" ]
then
if [ "$2" = "192.168.56.2" ]
then
case "$3" in
"icmp")
counter="1" ;;
"tcp")
counter="3" ;;
"udp")
counter="5" ;;
esac
else
echo "ERROR: invalid dest"
exit
fi
else
echo "ERROR: invalid src"
exit
fi
if [ "$4" = "s1" ]
then
addr="192.168.59.101"
fi
if [ "$4" = "s2" ]
then
addr="192.168.59.102"
fi
if [ "$4" = "s3" ]
then
addr="192.168.59.104"
fi
cmd1='counter_read pkt_src_counter '$counter
cmd2='/home/ubuntu/p4guard/targets/p4guard/sswitch_CLI firewall.json 9090 '$addr
output="$(echo "$cmd1" | $cmd2 2>/dev/null)"
#echo $output | sed -e ''
nob="0"
out=$(echo $output | awk -F'[=,]' '{print $5}')
nob=$(echo $out | awk -F'[)]' '{print $1}')
#echo "Number Of "$3" packets = "$nop
echo $nob
| true |
7a1cf4cd7bc2056b9025cb11b5336aa7564f7b5b | Shell | abrownfi/abrownfi_cli | /mpathcleaner | UTF-8 | 671 | 3.78125 | 4 | [] | no_license | #!/bin/bash
# Convert `multipath -v4 -ll` output to `multipath -ll` output.
# Works for RHEL 5 and RHEL 6 output.
#
file=$1
# If no parameter, look for an existing one. Naiively grab the first
if [[ -z $file ]]
then
file=`find . -name multipath_-v4_-ll | head -n 1`
fi
# Need to be in a place where there is a multipath_-v4_-ll or specify one
if [[ -z $file ]]
then
echo "Usage: mpathcleaner <filename>"
exit 1
fi
# do work
#grep --color=always -e ") dm-" -e "^\[\?size" -e "^ *[\\|\`]" -e "[a-zA-Z0-9\-]* dm-" "$file" | grep -v -e "blacklist"
grep --color=never -e ") dm-" -e "^\[\?size" -e "^ *[\\|\`]" -e "[a-zA-Z0-9\-]* dm-" "$file" | grep -v -e "blacklist"
| true |
5081594bf32b08184b8fe02c47a572891c9965c8 | Shell | predicateacademy/coding-cameras | /clip/memer | UTF-8 | 293 | 3.140625 | 3 | [] | no_license | #!/bin/bash
FILE=$1
TOP_TEXT=$2
BOTTOM_TEXT=$3
mkdir -p /tmp/unpack
convert -coalesce $1 /tmp/unpack/out%05d.jpg
for f in `ls /tmp/unpack/*.jpg`
do
python memegenerator.py $f "$TOP_TEXT" "$BOTTOM_TEXT"
done
convert -delay 10 -loop 0 /tmp/unpack/out* animation.gif
rm -rf /tmp/unpack
| true |
926c511699beaa4544b56e107273bf409ad576c0 | Shell | its-me-mario/arizona-bootstrap | /scripts/serve-review-site.sh | UTF-8 | 839 | 3.296875 | 3 | [
"MIT"
] | permissive | #!/bin/sh
#------------------------------------------------------------------------------
#
# serve-review-site.sh: serve the documentation site locally.
#
# Required environment variables
# - AZ_BOOTSTRAP_DEST_DIR Internal directory used for the build
# - AZ_BOOTSTRAP_SOURCE_DIR Source directory for files and directories
#
# Optional environment variables
# - AZ_SHORT_VERSION Short (generally two-digit) documentation version
# - AZ_SITE_BASE_URL Pefix to add after the host to all URLs served locally
# - AZ_SITE_HOST Name or IP address at which to serve the documentation site
# - AZ_VERSION Full current Arizona Bootstrap version number
#
#------------------------------------------------------------------------------
set -e
create-source-links
cd "$AZ_BOOTSTRAP_DEST_DIR"
create-jekyll-config
npm run dist
npm run docs-serve
| true |
782a8784ff988005e04397f759ee30bcac01b2c0 | Shell | loli/nspipeline | /pop_sequencelesionsegmentation.sh | UTF-8 | 1,386 | 3.28125 | 3 | [] | no_license | #!/bin/bash
#####
# APplyies the forests to a (preliminary) segmentation of the brain lesion in sequence space.
#####
## Changelog
# 2014-05-08 Adapted to the new, distributed calculation scheme.
# 2013-04-03 Added a morphological post-processing step (and removed again).
# 2013-03-25 Updated to new, variable version.
# 2013-11-25 Updated to use new script to distinguish between sequence space and std space features
# 2013-11-05 adapted to new brain mask location
# 2013-10-29 created
# include shared information
source $(dirname $0)/include.sh
# main code
log 2 "Applying random decision forests to segment lesion" "[$BASH_SOURCE:$FUNCNAME:$LINENO]"
for i in "${images[@]}"; do
mkdircond ${sequencelesionsegmentation}/${i}
runcond "${scripts}/apply_rdf.py ${sequenceforests}/${i}.pkl ${sequencefeatures}/${i}/ ${sequencebrainmasks}/${i}.nii.gz ${featurecnf} ${sequencelesionsegmentation}/${i}/segmentation.nii.gz ${sequencelesionsegmentation}/${i}/probabilities.nii.gz"
done
log 2 "Morphological post-processing" "[$BASH_SOURCE:$FUNCNAME:$LINENO]"
function post_processing ()
{
i=$1
runcond "${scripts}/remove_small_objects.py ${sequencelesionsegmentation}/${i}/segmentation.nii.gz ${sequencelesionsegmentation}/${i}/segmentation_post.nii.gz ${minimallesionsize}"
}
parallelize post_processing ${threadcount} images[@]
log 2 "Done." "[$BASH_SOURCE:$FUNCNAME:$LINENO]"
| true |
02c5138be4901f8d75bd47a10d9355fb8a7e87b5 | Shell | gpupo/common-schema | /bin/build.sh | UTF-8 | 475 | 3.21875 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
cd "$(dirname "$0")/..";
DESTPATH="${1:-var/src}";
NAMESPACE="${2:-App}";
rm -rf build/* var/src/*;
rsync -aq src/ORM/ build/;
sed bin/sed.txt -e "s/CS_NAMESPACE/$NAMESPACE/g" > var/sed.txt;
build_replace() {
FILE="${1}";
sed -i'' --file=var/sed.txt $FILE;
}
export -f build_replace
find build/ -type f -print0 | xargs -0 -I {} bash -c "build_replace {}"
rsync -av --ignore-existing --exclude-from './bin/update-exclude.txt' build/ ./$DESTPATH/;
| true |
319e092cf6b1e01262711321f8adb15c73fb30c4 | Shell | delkyd/alfheim_linux-PKGBUILDS | /python2-pystache-git/PKGBUILD | UTF-8 | 788 | 2.546875 | 3 | [] | no_license | # Maintainer: Daniel Nagy <danielnagy at gmx de>
# Contributor: Tom Vincent <http://tlvince.com/contact/>
# Contributor: Tyler Harper <tyler@cowboycoding.net>
_gitname=pystache
pkgname=python2-pystache-git
pkgver=1057.6a54b9a
pkgrel=2
pkgdesc="The mustache template engine written in python"
arch=(any)
url="https://github.com/defunkt/pystache"
license=('MIT')
depends=('python2')
makedepends=('git')
provides=('python2-pystache')
conflicts=('python2-pystache')
source=( "git+$url" )
md5sums=( 'SKIP' )
pkgver() {
cd "$srcdir/$_gitname"
# Use the tag of the last commit
printf "%s.%s" "$(git rev-list --count HEAD)" "$(git rev-parse --short HEAD)"
}
build() {
cd "$srcdir/$_gitname"
python2 setup.py build
}
package() {
cd "$srcdir/$_gitname"
python2 setup.py install --prefix=/usr --root="$pkgdir"
}
| true |
11ddead7be63d4baf733b099da156b6060acbb21 | Shell | enbarberis/sistemi_operativi | /lab11/es4/es4.sh | UTF-8 | 311 | 3.625 | 4 | [] | no_license | #!/bin/bash
if [ $# -lt 2 ] ; then
echo "Parameters error!"
echo "Usage $0 <user> <dir>"
exit 1
fi
files=$(find $2 -user $1 -type f)
for f in $files
do
if egrep --quiet "^\*\*\*Da modificare" $f
then
awk '{if ($0 !~ /^\*\*\*Da modificare/) print $0;}' $f > tmp
mv tmp $f"_mod"
#rm -f $f
fi
done
| true |
71d7267f9d42d89d3980f53a685352c75b500857 | Shell | woggioni/x-toolchain | /packages/sqlite/PKGBUILD | UTF-8 | 1,988 | 2.65625 | 3 | [] | no_license | # $Id$
# Maintainer: Andreas Radke <andyrtr@archlinux.org>
# Contributor: Tom Newsom <Jeepster@gmx.co.uk>
pkgname="${_target}-sqlite"
_srcver=3360000
_docver=${_srcver}
#_docver=3330000
pkgver=3.36.0
pkgrel=1
pkgdesc="A C library that implements an SQL database engine"
arch=('x86_64')
license=('custom:Public Domain')
url="http://www.sqlite.org/"
depends=("${_target}-gcc" "${_target}-readline")
makedepends=("${_target}-readline" "${_target}-configure")
source=("https://www.sqlite.org/2021/sqlite-autoconf-${_srcver}.tar.gz")
sha256sums=('bd90c3eb96bee996206b83be7065c9ce19aef38c3f4fb53073ada0d0b69bbce3')
options=('!emptydirs' '!makeflags' 'staticlibs' '!buildflags') # json extensions breaks parallel build
prepare() {
pushd "${srcdir}/sqlite-autoconf-${_srcver}"
autoreconf -vfi
mkdir -p build-${_target}
popd
}
build() {
export CPPFLAGS="$CPPFLAGS -DSQLITE_ENABLE_COLUMN_METADATA=1 \
-DSQLITE_ENABLE_UNLOCK_NOTIFY \
-DSQLITE_ENABLE_DBSTAT_VTAB=1 \
-DSQLITE_ENABLE_FTS3_TOKENIZER=1 \
-DSQLITE_SECURE_DELETE \
-DSQLITE_ENABLE_STMTVTAB \
-DSQLITE_MAX_VARIABLE_NUMBER=250000 \
-DSQLITE_MAX_EXPR_DEPTH=10000 \
-DSQLITE_ENABLE_MATH_FUNCTIONS"
# build sqlite
pushd "${srcdir}/sqlite-autoconf-${_srcver}/build-${_target}"
${_target}-configure \
--disable-editline \
--enable-readline \
--enable-fts3 \
--enable-fts4 \
--enable-fts5 \
--enable-rtree \
--enable-json1 \
--enable-fts5 \
--enable-session \
.
make
popd
}
package() {
pkgdesc="A C library that implements an SQL database engine"
provides=("${_target}-sqlite3=$pkgver")
replaces=("${_target}-sqlite3")
pushd "${srcdir}/sqlite-autoconf-${_srcver}/build-${_target}"
make DESTDIR=${pkgdir} install
popd
}
strip() {
${_target}-strip $@
}
export -f strip
| true |
a7c8f7b85495c53e16fd9a55c44932db2db24774 | Shell | eccenca/openlink-virtuoso-7-docker | /assets/virtuoso_helper.sh | UTF-8 | 3,441 | 3.859375 | 4 | [] | no_license |
#!/bin/bash
set -e
CONFIG="${VIRT_DB}/virtuoso.ini"
importData () {
if [ -z ${oldpassword} ]; then
echo "Please enter the current password (default pw is dba)"
return 1
fi
if [ -z ${filename} ]; then
echo "Please specify a filename"
return 1
fi
if [ -z ${graph} ]; then
echo "Please specify a graph for the data which should be imported"
return 1
fi
/opt/virtuoso-opensource/bin/isql 1111 dba $oldpassword exec="DB.DBA.TTLP_MT (file_to_string_output ('${filename}'), '', '${graph}');"
}
deleteData(){
if [ -z ${oldpassword} ]; then
echo "Please enter the current password (default pw is dba)"
return 1
fi
if [ -z ${graph} ]; then
echo "Please specify a graph which should be deleted"
return 1
fi
/opt/virtuoso-opensource/bin/isql 1111 dba $oldpassword exec="SPARQL DROP SILENT GRAPH <${graph}>;"
}
backupData () {
if [ -z ${oldpassword} ]; then
echo "Please enter the current password (default pw is dba)"
return 1
fi
BACKUPDIR="$VIRT_DB/backup"
mkdir -p $BACKUPDIR
BACKUPDATE=`date +%y%m%d-%H%M`
/opt/virtuoso-opensource/bin/isql 1111 dba $oldpassword <<ScriptDelimit
backup_context_clear();
checkpoint;
backup_online('virt_backup_$BACKUPDATE#',150,0,vector('$BACKUPDIR'));
exit;
ScriptDelimit
}
restoreData () {
if [ -z ${backupprefix} ]; then
echo "Please enter a dump/backup prefix (e.g. virt_backup_yymmdd-hhmm#)"
return 1
fi
BACKUPDIR="$VIRT_DB/backup"
cd $BACKUPDIR
/opt/virtuoso-opensource/bin/virtuoso-t -c $CONFIG +foreground +restore-backup $backupprefix
}
changeAdminPassword () {
if [ -z ${oldPW} ]; then
echo "Please enter the current password (default pw is dba)"
return 1
fi
if [ -z ${newPW} ]; then
echo "Please enter the new password for user dba (virtuoso-admin)"
return 1
fi
/opt/virtuoso-opensource/bin/isql 1111 dba $oldPW exec="set password ${oldPW} ${newPW};"
}
appHelp () {
echo "Available options:"
echo " app:importData [DBA-PASSWD, FILENAME, GRAPH] - import given FILENAME to requested GRAPH "
echo " app:deleteData [DBA-PASSWD, GRAPH] - delete requested GRAPH"
echo " app:backupData [DBA-PASSWD] - create a backup with todays timestamp in $VIRT_DB/backup"
echo " app:restoreData [BACKUP-PREFIX] - restore a backup with given backup-prefix (e.g. virt_backup_yymmdd-hhmm#)"
echo " app:changeAdminPassword [OLD-DBA-PASSWD, NEW-DBA-PASSWD] - change the admin password"
echo " app:help - Displays the help"
echo " [command] - Execute the specified linux command eg. bash."
}
case "$1" in
app:importData)
shift
oldpassword=$1
filename=$2
graph=$3
importData
;;
app:deleteData)
shift
oldpassword=$1
graph=$2
deleteData
;;
app:backupData)
shift
oldpassword=$1
backupData
;;
app:restoreData)
shift
backupprefix=$1
restoreData
;;
app:changeAdminPassword)
shift
oldPW=$1
newPW=$2
changeAdminPassword
;;
app:help)
appHelp
;;
*)
if [ -x $1 ]; then
$1
else
prog=$(which $1)
if [ -n "${prog}" ] ; then
shift 1
$prog $@
else
appHelp
fi
fi
;;
esac
exit 0 | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.