blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
48ae56f829ad86b91abc507120072d39ce67a299
|
Shell
|
williwaas/dotfileselitebook
|
/bardylcustom.sh
|
UTF-8
| 1,691
| 3.703125
| 4
|
[] |
no_license
|
#!/usr/bin/env dash
# vim: set ft=sh:
#
# Dylan's bar script
#
# Created by Dylan Araps.
#
# Depends on: xorg-xrandr, wmctrl, mpc, lemonbar, ip, xdotool
font="${BAR_FONT:-"-*-lemon-*"}"
icon="${BAR_ICON:-"-*-siji-*"}"
height="${BAR_HEIGHT:-36}"
get_mon_width() {
# Get the monitor width.
command -v xrandr >/dev/null 2>&1 && \
resolution="$(xrandr --nograb --current |
awk -F 'primary |x' '/primary/ {print $2}')"
printf "%s\\n" "${resolution:-1920}"
}
get_workspaces() {
# Create a dynamic workspace switcher.
workspaces="$(wmctrl -d | awk -v fg="${color8:-#F0F0F0}" \
'$6!="0,0"{next}
$2=="-"{printf " %{F"fg"}" $NF " %{F}"}
$2=="*"{printf " " $NF " "}')"
printf "%s\\n" "$workspaces"
}
get_window() {
# Get title of focused window.
printf "%.75s\\n" "$(xdotool getwindowfocus getwindowname)"
}
get_date() {
# Get the date and time.
printf "%s\\n" "$(date +"%a %d %b - %H:%M")"
}
get_battery() {
upower -i /org/freedesktop/UPower/devices/battery_BAT0 | grep percentage | awk '{print $2}'
}
main() {
# Main script function.
# Info that doesn't need to grabbed more than once.
width="$(get_mon_width)"
# Loop and print the info.
while :; do
workspaces="$(get_workspaces)"
window="$(get_window)"
date="$(get_date)"
volume="$(get_battery)"
printf "%s%s%s\\n" \
"%{l} ${workspaces} | ${window}" \
"%{c}${date}" \
"%{r}${song} | ${volume} "
sleep .1s
done
}
main "$@"
| true
|
062a2d30d90f09036abf135eeb0bdeba4ca275ac
|
Shell
|
idolivneh-stratoscale/inaugurator
|
/sh/relative_copy_driver.sh
|
UTF-8
| 487
| 3.140625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
#copy into the initrd the executable and its dependencies
#requires DEST environment set
#requires KERNEL_UNAME_R environment set
set -e
for ko in `modprobe --show-depends $1 --set-version=$KERNEL_UNAME_R | sed 's/insmod //'`; do
sh/relative_copy_glob.sh $ko
done
modinfo --field firmware $1 --set-version=$KERNEL_UNAME_R >& /dev/null
for firmware in `modinfo --field firmware $1 --set-version=$KERNEL_UNAME_R`; do
sh/relative_copy_glob.sh /lib/firmware/$firmware
done
| true
|
30da52c59ce69ed2ef5dbae1db19fd1647c0740c
|
Shell
|
SUSE/sesci
|
/docker/deepsea/build.sh
|
UTF-8
| 1,046
| 3.59375
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash -ex
DEEPSEADIR=${2:-"deepsea"}
TARGETPATH=$(cd ${1:-"$PWD"}; echo $PWD)
TARGETNAME=${TARGETPATH##*/}
TARGETBASE=${TARGETNAME,,}
TARGETBUILD=${TARGETBASE}-rpm
echo TARGET PATH=$TARGETPATH
echo TARGET NAME=${TARGETPATH##*/}
export DHOME=${DHOME:-$(cd $(dirname $BASH_SOURCE); echo $PWD)}
echo Docker home: $DHOME
[[ -d ${DEEPSEADIR} ]] ||
git clone https://github.com/SUSE/DeepSea.git deepsea
BASEIMAGE=$(grep FROM $TARGETPATH/Dockerfile-base | grep -v '^\s*#' | cut -d ' ' -f 2)
docker pull $BASEIMAGE
docker build \
-t $TARGETBASE \
-f $TARGETPATH/Dockerfile-base \
$TARGETPATH
[[ "$PWD" == "$TARGETPATH" ]] || {
rm -rf $TARGETPATH/deepsea
cp -a $DEEPSEADIR $TARGETPATH/deepsea
}
docker build \
--no-cache \
-t $TARGETBUILD \
-f $TARGETPATH/Dockerfile-rpm \
$TARGETPATH
rm -rf ./repo
#docker run -v $(pwd):/mnt $TARGETBUILD:latest sh -c 'mkdir -p /mnt/repo && cp -a rpmbuild/RPMS/* /mnt/repo'
ID=$(docker create $TARGETBUILD:latest)
docker cp $ID:/home/jenkins/rpmbuild/RPMS repo
docker rm $ID
find ./repo
| true
|
e4b3db623ce56a5dd6a64aef83828474db85333a
|
Shell
|
darrenclark/my-vim-config
|
/export.sh
|
UTF-8
| 346
| 3.53125
| 4
|
[] |
no_license
|
#!/bin/bash
# Copies all files from 'files' directory back
# Requires that the files/folders don't already exist
GLOBIGNORE=.
for f in ./files/*; do
destpath="$HOME/$(basename $f)"
if [[ -e "$destpath" ]]; then
echo "FAILED: $destpath already exists"
exit 1;
fi
done
echo "hi"
cp -rv ./files/* ~/
echo "vim configs exported to $HOME"
| true
|
4b1e921db7446096fc6f0f1ab53c15bd040fe111
|
Shell
|
cloudfoundry/bosh-cli
|
/bin/clean
|
UTF-8
| 157
| 2.859375
| 3
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/usr/bin/env bash
set -eu -o pipefail
ROOT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )/.." && pwd )"
echo -e "\n Cleaning..."
go clean -r "${ROOT_DIR}"
| true
|
320d8c56a8bd5146707162882a091ef2d57bc74a
|
Shell
|
wzqcloud/cloud
|
/script/menu.sh
|
UTF-8
| 787
| 3.5625
| 4
|
[] |
no_license
|
#!/bin/bash
path=/server/script
[ ! -d "$path" ] && mkdir -p $path
menu(){
cat <<END
1.[install lamp]
2.[内核优化]
3.[exit]
END
read -p "please input the num you want: " num
expr $num + 1 &>/dev/null
[ $? -ne 0 ] && {
echo "the num you input must be {1|2|3}"
exit 1
}
[ $num -eq 1 ] && {
echo "start install lamp"
sleep 3
[ -x "$path/lamp.sh" ] || {
echo "$path/lamp.sh does not exit or can not be exec."
exit 1
}
$path/lamp.sh
exit $?
}
[ $num -eq 2 ] && {
echo "内核优化................................"
sleep 3
[ -x "$path/socket.sh" ] || {
echo "$path/socket.sh does not exit or can not be exec."
exit 1
}
$path/socket.sh
exit $?
}
[ $num -eq 3 ] && {
echo "goodbye"
exit 3
}
[[ ! "$num" =~ [1-3] ]] && {
echo "Input error"
}
}
menu
| true
|
a6e22274396dd45321671c731d970b37f4fb7cfa
|
Shell
|
tsweeney256/file_host
|
/database/install_db.sh
|
UTF-8
| 365
| 3.390625
| 3
|
[] |
no_license
|
#!/bin/bash
thisdir="$(dirname $(readlink -f $0))"
dbname="${1:-file_host}"
superuser="${2:-postgres}"
port="${3:-5432}"
host="${4:-/var/run/postgresql}"
function run_psql_file {
psql -U "$superuser" -p "$port" -h "$host" -f "$1" "$dbname"
}
run_psql_file "${thisdir}/users.sql"
run_psql_file "${thisdir}/tables.sql"
run_psql_file "${thisdir}/functions.sql"
| true
|
d67779a4be7bd9c633b57e16e99f03ee6e9294e4
|
Shell
|
bellyfat/Decentralized-Spotify
|
/audius-tooling/deploy-scripts/change-k8s-service-versions.sh
|
UTF-8
| 2,245
| 4.0625
| 4
|
[
"MIT"
] |
permissive
|
set -e
source ../setup-scripts/config/conf.sh
source ./utils.sh
# Inputs
# $1 -- new tag
# $2 -- old tag
# Validations
if [ "$1" == "" ] || [ "$2" == "" ]; then
echo "One or both tagnames are invalid: new -- $1 old -- $2";
echo "Example usage: sh change-service-version.sh 0.3.6 0.3.5";
exit 1;
fi
echo $1 -- new tagname
echo $2 -- old tagname
PROTOCOL_BRANCH=$(git rev-parse --abbrev-ref HEAD)
CURRENT_COMMIT=$(git rev-parse HEAD)
if [[ "$PROTOCOL_BRANCH" != "master" ]]; then
echo 'Not on master branch! On' $PROTOCOL_BRANCH;
exit 1;
fi
# check if master branch is up to date
if git merge-base --is-ancestor origin/master master; then
echo 'Master branch is up to date!'
else
echo 'Master branch is not up to date!';
exit 1;
fi
# Step 1 - go to k8s repo
go_to_k8s_repo
# Step 2 - replace old version with new version
# meant for OS X, may need to be tweaked to run on Linux
# replace old version with new version in the helm/ folder
find helm -type f -exec sed -i "" "s/$2/$1/g" {} \;
git add -u helm/charts/
# Step 3 - Commit and wait for push to master
git commit -m "Bump to version $1"
# You need to manually push to github. Script waits for confirmation
read -p "Please push the version bump to Github. Did it get pushed successfully? (y/n)?" choice
case "$choice" in
y|Y|yes ) echo "Moving on to next step";;
n|N|no ) echo "Exiting"; exit 1;;
* ) echo "invalid"; exit 1;;
esac
# Step 4 - template the public k8s manifests
./ops/scripts/template.sh
# Step 5 - go to audius-k8s-manifests and copy the templates over
cd ../audius-k8s-manifests/
git pull origin master
cp -r ../audius-k8s/kube/ ./audius
# Step 6 - create a new branch and commit the changes
git checkout -b dm-v$1
git add -u .
git commit -m "Bump to version $1"
# Step 7 - wait for push to branch and PR merge
# You need to manually push to github and merge the PR. Script waits for confirmation
read -p "Please push the version bump to Github and merge the PR. Did it get pushed AND merged successfully? (y/n)?" choice
case "$choice" in
y|Y|yes ) echo "Moving on to next step";;
n|N|no ) echo "Exiting"; exit 1;;
* ) echo "invalid"; exit 1;;
esac
# Step 8 - update master
git checkout master
git pull origin master
| true
|
71fb638a4189aa6ea2207a707e1417233b025497
|
Shell
|
kontax/snooker-broadcast-tracking
|
/install.sh
|
UTF-8
| 1,371
| 3.71875
| 4
|
[] |
no_license
|
#!/bin/bash
# Install's the rest of the
# Largely taken from steps outlined at:
# https://github.com/BVLC/caffe/wiki/Install-Caffe-on-EC2-from-scratch-(Ubuntu,-CUDA-7,-cuDNN)
echo -e "\nCUDA installation script\n"
# Script must be run as root
if [ "$(id -u)" != "0" ]; then
echo "Please re-run this script as the root user." 1>&2
exit 1
fi
# Ensure the system requirements are met
system=$(lsb_release -i | awk '{print $3}')
version=$(lsb_release -r | awk '{print $2}')
user=$(who am i | awk '{print $1}')
if [[ $system != 'Ubuntu' || $version != "14.04" ]]; then
echo "This script needs to be run on Ubuntu 14.04" 1>&2
exit 1
fi
echo -e "Installing build-essential if not already done\n"
apt-get update
apt-get -y install build-essential
echo -e "Adding drivers and downloaded repository to repo list\n"
add-apt-repository -y ppa:graphics-drivers/ppa
echo -e "Updating the image for NVIDIA driver compatibility\n"
apt-get -y install linux-image-extra-virtual
echo -e "Blacklisting Nouveau\n"
cat > /etc/modprobe.d/blacklist-nouveau.conf << EOF
blacklist nouveau
blacklist lbm-nouveau
options nouveau modeset=0
alias nouveau off
alias lbm-nouveau off
EOF
echo options nouveau modeset=0 > /etc/modprobe.d/nouveau-kms.conf
update-initramfs -u
echo -e "\nFirst step of installation is complete.\nPlease restart the system before continuing.\n"
| true
|
332a89a27ab24da5dc586e557a8936dedac46b21
|
Shell
|
realeroberto/bashlets-math
|
/lib/poly/pretty
|
UTF-8
| 2,787
| 2.859375
| 3
|
[] |
no_license
|
#!/bin/bash
################################################################################
#
# | | | |
# |---.,---.,---.|---.| ,---.|--- ,---.
# | |,---|`---.| || |---'| `---.
# `---'`---^`---'` '`---'`---'`---'`---'
#
#
# Bashlets -- A modular extensible toolbox for Bash
#
# Copyright (c) 2014-6 Roberto Reale
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
################################################################################
# simple ``pretty-printer'' for polynomials
#@public
function bashlets::math::poly::pretty::print()
{
echo $@ | awk -e '
#
# construct a string of n consecutive spaces
#
function sp(n, i, str) {
for (i = 0; i < n; i++) {
str = str " "
}
return str
}
#
# pretty-printer
#
function poly_print(poly, n, line1, line2, i) {
for (i = 1; i <= n; i++) {
line2 = line2 sp(length(line1) - length(line2))
if (i > 1) { line2 = line2 " + " }
line2 = line2 poly[i]
if (i < n) { line2 = line2 "x" }
line1 = line1 sp(length(line2) - length(line1))
if (i <= n-2) { line1 = line1 n-i }
}
return line1 "\n" line2
}
{
split($0, poly)
split(poly_print(poly, NF), lines, "\n")
print lines[1] "\n" lines[2]
}
'
}
# Local variables:
# mode: shell-script
# sh-basic-offset: 4
# sh-indent-comment: t
# indent-tabs-mode: nil
# End:
# ex: ts=4 sw=4 et filetype=sh
| true
|
f50dc323445546d309587542876104ac935d6a0b
|
Shell
|
dwighteb/ansible-provision
|
/playbook/roles/openvpn/files/configure-pat.sh
|
UTF-8
| 3,129
| 3.578125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Configure the instance to run as a Port Address Translator (PAT) to provide
# Internet connectivity to private instances.
#
set -x
echo "Determining the MAC address on eth0"
ETH0_MAC=`/sbin/ifconfig | /bin/grep eth0 | awk '{print tolower($5)}' | grep '^[0-9a-f]\{2\}\(:[0-9a-f]\{2\}\)\{5\}$'`
if [ $? -ne 0 ] ; then
echo "Unable to determine MAC address on eth0" | logger -t "ec2"
exit 1
fi
echo "Found MAC: ${ETH0_MAC} on eth0" | logger -t "ec2"
VPC_CIDR_URI="http://169.254.169.254/latest/meta-data/network/interfaces/macs/${ETH0_MAC}/vpc-ipv4-cidr-block"
echo "Metadata location for vpc ipv4 range: ${VPC_CIDR_URI}" | logger -t "ec2"
VPC_CIDR_RANGE=`curl --retry 3 --retry-delay 0 --silent --fail ${VPC_CIDR_URI}`
if [ $? -ne 0 ] ; then
echo "Unable to retrive VPC CIDR range from meta-data. Using 0.0.0.0/0 instead. PAT may not function correctly" | logger -t "ec2"
VPC_CIDR_RANGE="0.0.0.0/0"
else
echo "Retrived the VPC CIDR range: ${VPC_CIDR_RANGE} from meta-data" |logger -t "ec2"
fi
echo 1 > /proc/sys/net/ipv4/ip_forward && \
echo 0 > /proc/sys/net/ipv4/conf/eth0/send_redirects && \
/sbin/iptables -t nat -A POSTROUTING -o eth0 -s ${VPC_CIDR_RANGE} -j MASQUERADE
if [ $? -ne 0 ] ; then
echo "Configuration of PAT failed" | logger -t "ec2"
exit 0
fi
# additional rules for dropping unwanted traffic.
# rc.local will be run after fail2ban
/sbin/iptables -N blackhole
/sbin/iptables -I INPUT -j blackhole
/sbin/iptables -I FORWARD -j blackhole
/sbin/iptables -I OUTPUT -j blackhole
for ip in $(egrep -v "^#|^$" /usr/local/etc/blackhole)
do
/sbin/iptables -A blackhole -s $ip -j DROP
done
/sbin/iptables -A blackhole -j RETURN
/sbin/iptables -N logdrop
/sbin/iptables -A logdrop -m limit --limit 5/min -j LOG --log-prefix "iptables denied: " --log-level 7
/sbin/iptables -A logdrop -j DROP
/sbin/iptables -A INPUT -m conntrack --ctstate ESTABLISHED,RELATED -j ACCEPT
/sbin/iptables -A INPUT -i lo -j ACCEPT
/sbin/iptables -A INPUT ! -i lo -d 127.0.0.0/8 -j logdrop
/sbin/iptables -A INPUT -p tcp ! --syn -m state --state NEW -j logdrop
/sbin/iptables -A INPUT -i eth0 -p icmp -m limit --limit 3/s -j ACCEPT
/sbin/iptables -A INPUT -i eth0 -p icmp -j DROP
/sbin/iptables -A INPUT -i eth0 -s ${VPC_CIDR_RANGE} -j ACCEPT
for interface in $(ifconfig | awk '/^tun/ {print $1}')
do
/sbin/iptables -A INPUT -i ${interface} -j ACCEPT
done
/sbin/iptables -A INPUT -i eth0 -p tcp -m tcp --dport 22 -j ACCEPT
/sbin/iptables -A INPUT -i eth0 -p tcp -m tcp --dport 443 -j ACCEPT
/sbin/iptables -A INPUT -i eth0 -p udp -m udp --dport 443 -j ACCEPT
/sbin/iptables -A INPUT -j logdrop
/sbin/iptables -A OUTPUT -o eth0 -p icmp -m limit --limit 3/s -j ACCEPT
/sbin/iptables -A OUTPUT -o eth0 -p icmp -j DROP
# create masquerade rules for openvpn tunnels that push our server as
# the default gateway
for config in $(egrep -l '^push "redirect-gateway def1 bypass-dhcp"$' \
/etc/openvpn/*.conf)
do
/sbin/iptables -t nat -A POSTROUTING -o eth0 -s \
$(awk '/^server / {print $2"/"$3}' ${config}) -j MASQUERADE
done
echo "Configuration of PAT complete" |logger -t "ec2"
exit 0
| true
|
d349fbe67f38ea8d8e3f542e3e3a55662caa7112
|
Shell
|
jiaming-cs/arfl
|
/experiments/femnist.sh
|
UTF-8
| 1,805
| 2.78125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
dataset="femnist"
pc=0.5
ps=1.0
epochs=20
num_rounds=2000
per_round=20
attack_type=3
actors=16
gpus=2
lambda=10000.0
seed=0
setup_clients=1.0
batch_size=64
mkdir -p results/${dataset} log/${dataset}
pushd ../
pushd models/ || exit
#method=$1
for method in "arfl" "cfl" "mkrum" "rfa"
do
for seed in 0 1 2 3 4
do
output_name=${dataset}_${method}_${pc}_${ps}_${attack_type}_${lambda}_${seed}
python main.py --dataset ${dataset} --model cnn --setup_clients ${setup_clients} --num_actors ${actors} \
--batch-size ${batch_size} --num_gpus ${gpus} --seed ${seed} --clients-per-round ${per_round} \
--reg_weight ${lambda} --num-epochs ${epochs} --num-rounds ${num_rounds} --method ${method} -pc ${pc} -ps ${ps} \
--attack_type ${attack_type} --metrics-dir ../experiments/results/${dataset} --metrics_name \
${output_name} > ../experiments/log/${dataset}/${output_name}.txt
done
done
#for attack_type in 0 1 2
# do
# for pc in 0.3 0.5
# do
# for seed in 0 1 2 3 4
# do
# output_name=${dataset}_$1_${pc}_${ps}_${attack_type}_${lambda}_${seed}
# python main.py --dataset ${dataset} --model cnn --setup_clients ${setup_clients} --num_actors ${actors} \
# --batch-size ${batch_size} --num_gpus ${gpus} --seed ${seed} --clients-per-round ${per_round} \
# --reg_weight ${lambda} --num-epochs ${epochs} --num-rounds ${num_rounds} --method ${method} -pc ${pc} -ps ${ps} \
# --attack_type ${attack_type} --metrics-dir ../experiments/results/${dataset} --metrics_name \
# ${output_name} > ../experiments/log/${dataset}/${output_name}.txt
# done
# done
# done
popd || exit
popd || exit
| true
|
70297b700e1dbca37970e29cd530ff33bdd09398
|
Shell
|
alfpedraza-aws-devops/infrastructure
|
/modules/environment/scripts/master/share-join-data.sh
|
UTF-8
| 4,273
| 3.5625
| 4
|
[] |
no_license
|
PRIVATE_MASTER_HOST=""
PRIVATE_TOKEN_VALUE=""
PRIVATE_TOKEN_HASH=""
PRIVATE_REGION_NAME=""
PRIVATE_BUCKET_NAME=""
function generate_join_data() {
# Generate the values to share with the nodes so they can join the cluster later.
local NODE_IP=$(get_node_ip)
local TOKEN_HASH_VALUE=$(openssl x509 -pubkey -in /etc/kubernetes/pki/ca.crt | openssl rsa -pubin -outform der 2>/dev/null | openssl dgst -sha256 -hex | sed 's/^.* //')
PRIVATE_MASTER_HOST=$NODE_IP:6443
PRIVATE_TOKEN_VALUE=$(kubeadm token list | awk 'NR == 2 {print $1}')
PRIVATE_TOKEN_HASH="sha256:$TOKEN_HASH_VALUE"
}
function create_s3_bucket() {
# Create an AWS S3 bucket where the join data will be uploaded.
local ACCOUNT_ID=$(get_account_id)
local HOST_NAME=$(get_host_name)
PRIVATE_REGION_NAME=$(get_region_name)
PRIVATE_BUCKET_NAME="$ACCOUNT_ID-$GLOBAL_MASTER_NAME"
local BUCKET_POLICY="{\"Version\": \"2012-10-17\",\"Id\": \"Policy1583629506118\",\"Statement\": [{\"Sid\": \"Stmt1583629432359\",\"Effect\": \"Allow\",\"Principal\": {\"AWS\": [\"arn:aws:iam::$ACCOUNT_ID:role/$GLOBAL_NODE_ROLE_NAME\",\"arn:aws:iam::$ACCOUNT_ID:role/$GLOBAL_JENKINS_ROLE_NAME\"]},\"Action\": [\"s3:GetObject\"],\"Resource\": [\"arn:aws:s3:::$PRIVATE_BUCKET_NAME/*\"]}]}"
if [[ $(aws s3api list-buckets --query "Buckets[?Name == '$PRIVATE_BUCKET_NAME'].[Name]" --output text) = "$PRIVATE_BUCKET_NAME" ]]; then
aws s3 rb s3://$PRIVATE_BUCKET_NAME --force
fi
aws s3api create-bucket \
--bucket $PRIVATE_BUCKET_NAME \
--region $PRIVATE_REGION_NAME \
--create-bucket-configuration \
LocationConstraint=$PRIVATE_REGION_NAME
aws s3api wait bucket-exists \
--bucket $PRIVATE_BUCKET_NAME \
--region $PRIVATE_REGION_NAME
aws s3api put-public-access-block \
--bucket $PRIVATE_BUCKET_NAME \
--region $PRIVATE_REGION_NAME \
--public-access-block-configuration \
"BlockPublicAcls=true,IgnorePublicAcls=true,BlockPublicPolicy=true,RestrictPublicBuckets=true"
aws s3api put-bucket-policy \
--bucket $PRIVATE_BUCKET_NAME \
--region $PRIVATE_REGION_NAME \
--policy "$BUCKET_POLICY"
aws s3api put-bucket-encryption \
--bucket $PRIVATE_BUCKET_NAME \
--region $PRIVATE_REGION_NAME \
--server-side-encryption-configuration \
'{"Rules":[{"ApplyServerSideEncryptionByDefault":{"SSEAlgorithm":"AES256"}}]}'
}
function upload_join_data() {
# Stores the MasterHost, TokenValue and TokenHash values in temporal files.
echo $PRIVATE_MASTER_HOST > /tmp/aws-devops/masterhost.txt
echo $PRIVATE_TOKEN_VALUE > /tmp/aws-devops/tokenvalue.txt
echo $PRIVATE_TOKEN_HASH > /tmp/aws-devops/tokenhash.txt
# Stores the kubectl admin config file, the ca.crt and ca.key in temporal files.
cp /etc/kubernetes/admin.conf /tmp/aws-devops/admin.conf
cp /etc/kubernetes/pki/ca.crt /tmp/aws-devops/ca.crt
cp /etc/kubernetes/pki/ca.key /tmp/aws-devops/ca.key
chmod a+r /tmp/aws-devops/admin.conf
chmod a+r /tmp/aws-devops/ca.crt
chmod a+r /tmp/aws-devops/ca.key
# Upload the files to the private S3 bucket.
local BUCKET=$PRIVATE_BUCKET_NAME
local REGION=$PRIVATE_REGION_NAME
aws s3 cp /tmp/aws-devops/masterhost.txt s3://$BUCKET/ --region $REGION
aws s3 cp /tmp/aws-devops/tokenvalue.txt s3://$BUCKET/ --region $REGION
aws s3 cp /tmp/aws-devops/tokenhash.txt s3://$BUCKET/ --region $REGION
aws s3 cp /tmp/aws-devops/admin.conf s3://$BUCKET/ --region $REGION
aws s3 cp /tmp/aws-devops/ca.crt s3://$BUCKET/ --region $REGION
aws s3 cp /tmp/aws-devops/ca.key s3://$BUCKET/ --region $REGION
# Remove temporal files.
rm /tmp/aws-devops/masterhost.txt
rm /tmp/aws-devops/tokenvalue.txt
rm /tmp/aws-devops/tokenhash.txt
rm /tmp/aws-devops/admin.conf
rm /tmp/aws-devops/ca.crt
rm /tmp/aws-devops/ca.key
}
function assign_instance_tag() {
local INSTANCE_ID=$(get_instance_id)
aws ec2 create-tags \
--resources $INSTANCE_ID \
--tags Key=BUCKET_NAME,Value=$PRIVATE_BUCKET_NAME
}
function share_join_data() {
generate_join_data
create_s3_bucket
upload_join_data
assign_instance_tag
}
| true
|
d95046a9662938375073a6406f12dfcf4a9d6bf0
|
Shell
|
cloudcafetech/kube-katakoda
|
/host-setup.sh
|
UTF-8
| 9,886
| 3.375
| 3
|
[] |
no_license
|
#!/bin/bash
# Kubernetes host setup script for CentOS
master=$1
KUBEMASTER=10.128.0.5
MinIO=10.128.0.9
NFSRV=10.128.0.9
NFSMOUNT=/root/nfs/nfsdata
#K8S_VER=1.14.5
K8S_VER=$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt | cut -d v -f2)
CRI=docker
velver=v1.4.2
DATE=$(date +"%d%m%y")
TOKEN=$DATE.1a7dd4cc8d1f4cc5
#CRI=crio
if [[ "$master" == "" || "$master" != "master" || "$master" != "node" ]]; then
echo "Usage: host-setup.sh <master or node>"
echo "Example: host-setup.sh master/node"
exit
fi
#Stopping and disabling firewalld by running the commands on all servers:
systemctl stop firewalld
systemctl disable firewalld
#Disable swap. Kubeadm will check to make sure that swap is disabled when we run it, so lets turn swap off and disable it for future reboots.
swapoff -a
sed -i.bak -r 's/(.+ swap .+)/#\1/' /etc/fstab
#Disable SELinux
setenforce 0
sed -i --follow-symlinks 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/sysconfig/selinux
#Add the kubernetes repository to yum so that we can use our package manager to install the latest version of kubernetes.
cat <<EOF > /etc/yum.repos.d/kubernetes.repo
[kubernetes]
name=Kubernetes
baseurl=https://packages.cloud.google.com/yum/repos/kubernetes-el7-x86_64
enabled=1
gpgcheck=1
repo_gpgcheck=1
gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg
exclude=kube*
EOF
#Install some of the tools (including CRI-O, kubeadm & kubelet) we’ll need on our servers.
yum install -y git curl wget bind-utils jq httpd-tools zip unzip nfs-utils go nmap telnet
if [[ $CRI != "docker" ]]
then
# Setup for CRIO
modprobe overlay
modprobe br_netfilter
cat > /etc/sysctl.d/99-kubernetes-cri.conf <<EOF
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1
net.bridge.bridge-nf-call-ip6tables = 1
EOF
sysctl --system
# Install CRI-O prerequisites & tool
cat << EOF > /etc/yum.repos.d/crio.repo
[cri-o]
name=CRI-O Packages for CentOS 7 — $basearch
baseurl=http://mirror.centos.org/centos/7/paas/x86_64/openshift-origin311/
enabled=1
gpgcheck=0
EOF
# Install CRI-O
yum -y install cri-o cri-tools
# Modify CRI-O config in cgroup_manager = "systemd" to "cgroupfs"
sed -i 's/cgroup_manager = "systemd"/cgroup_manager = "cgroupfs"/g' /etc/crio/crio.conf
# Modify CRI-O config for disabling selinux
sed -i 's/selinux = true/selinux = false/g' /etc/crio/crio.conf
# upgrade crio version due to POD hostNetwork loopback (127.0.0.1) ip address
yum install -y https://cbs.centos.org/kojifiles/packages/cri-o/1.13.9/1.el7/x86_64/cri-o-1.13.9-1.el7.x86_64.rpm
# To escape error "failed: no ...directory"
mkdir -p /usr/share/containers/oci/hooks.d
# Remove CRI-o default CNI configuration
rm -rf /etc/cni/net.d/*
# Start CRI-O
systemctl start crio
systemctl enable crio
else
# Setup for docker
yum install -y docker
# Modify /etc/sysconfig/docker file as follows.
more /etc/sysconfig/docker | grep OPTIONS
sed -i "s/^OPTIONS=.*/OPTIONS='--selinux-enabled --signature-verification=false'/g" /etc/sysconfig/docker
more /etc/sysconfig/docker | grep OPTIONS
systemctl enable docker
systemctl start docker
cat <<EOF > /etc/sysctl.d/k8s.conf
net.bridge.bridge-nf-call-ip6tables = 1
net.bridge.bridge-nf-call-iptables = 1
EOF
sysctl --system
systemctl restart docker
fi
# Installation with speceifc version
yum install -y kubelet-$K8S_VER kubeadm-$K8S_VER kubectl-$K8S_VER kubernetes-cni-0.6.0 --disableexcludes=kubernetes
# After installing crio and our kubernetes tools, we’ll need to enable the services so that they persist across reboots, and start the services so we can use them right away.
systemctl enable kubelet; systemctl start kubelet
# Setting up Kubernetes Node using Kubeadm
if [[ "$master" == "node" ]]; then
echo ""
echo "Waiting for Master ($KUBEMASTER) API response .."
while [[ $(nc $KUBEMASTER 6443 &> /dev/null) != "True" ]]; do printf '.'; sleep 2; done
kubeadm join --discovery-token-unsafe-skip-ca-verification --token=$TOKEN $KUBEMASTER:6443
exit
fi
# Setting up Kubernetes Master using Kubeadm
if [[ "$master" == "master" && $CRI != "docker" ]]; then
kubeadm init --pod-network-cidr=10.244.0.0/16 --kubernetes-version $(kubeadm version -o short) --cri-socket "/var/run/crio/crio.sock" --ignore-preflight-errors=all 2>&1 | tee kubeadm-output.txt
else
kubeadm init --token=$TOKEN --pod-network-cidr=10.244.0.0/16 --kubernetes-version $(kubeadm version -o short) --ignore-preflight-errors=all | grep -Ei "kubeadm join|discovery-token-ca-cert-hash" 2>&1 | tee kubeadm-output.txt
fi
sudo cp /etc/kubernetes/admin.conf $HOME/
sudo chown $(id -u):$(id -g) $HOME/admin.conf
export KUBECONFIG=$HOME/admin.conf
echo "export KUBECONFIG=$HOME/admin.conf" >> $HOME/.bash_profile
echo "alias oc=/usr/bin/kubectl" >> /root/.bash_profile
wget https://raw.githubusercontent.com/coreos/flannel/2140ac876ef134e0ed5af15c65e414cf26827915/Documentation/kube-flannel.yml
kubectl create -f kube-flannel.yml
sleep 20
kubectl get nodes
# Make Master scheduble
MASTER=`kubectl get nodes | grep master | awk '{print $1}'`
kubectl taint nodes $MASTER node-role.kubernetes.io/master-
kubectl get nodes -o json | jq .items[].spec.taints
# Install krew
set -x; cd "$(mktemp -d)" &&
curl -fsSLO "https://github.com/kubernetes-sigs/krew/releases/latest/download/krew.{tar.gz,yaml}" &&
tar zxvf krew.tar.gz &&
KREW=./krew-"$(uname | tr '[:upper:]' '[:lower:]')_amd64" &&
"$KREW" install --manifest=krew.yaml --archive=krew.tar.gz &&
"$KREW" update
export PATH="${KREW_ROOT:-$HOME/.krew}/bin:$PATH"
# Install kubectl plugins using krew
kubectl krew install modify-secret
kubectl krew install doctor
kubectl krew install ctx
kubectl krew install ns
echo 'export PATH="${PATH}:${HOME}/.krew/bin"' >> /root/.bash_profile
# Deploying Ingress
wget https://raw.githubusercontent.com/cloudcafetech/kube-katakoda/master/kube-ingress.yaml
sed -i "s/kube-master/$MASTER/g" kube-ingress.yaml
kubectl create ns kube-router
kubectl create -f kube-ingress.yaml
# Deploying dynamic NFS based persistant storage
wget https://raw.githubusercontent.com/cloudcafetech/kube-katakoda/master/nfs-rbac.yaml
wget https://raw.githubusercontent.com/cloudcafetech/kube-katakoda/master/nfs-deployment.yaml
wget https://raw.githubusercontent.com/cloudcafetech/kube-katakoda/master/kubenfs-storage-class.yaml
sed -i "s/10.128.0.9/$NFSRV/g" nfs-deployment.yaml
sed -i "s|/root/nfs/kubedata|$NFSMOUNT|g" nfs-deployment.yaml
kubectl create ns kubenfs
kubectl create -f nfs-rbac.yaml -f nfs-deployment.yaml -f kubenfs-storage-class.yaml -n kubenfs
SC=`kubectl get sc | grep kubenfs | awk '{print $1}'`
kubectl patch sc $SC -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}'
# Setup Velero
wget https://github.com/vmware-tanzu/velero/releases/download/$velver/velero-$velver-linux-amd64.tar.gz
tar -xvzf velero-$velver-linux-amd64.tar.gz
mv -v velero-$velver-linux-amd64/velero /usr/local/bin/velero
echo "alias vel=/usr/local/bin/velero" >> /root/.bash_profile
cd
cat <<EOF > credentials-velero
[default]
aws_access_key_id = admin
aws_secret_access_key = bappa2675
EOF
HOST_NAME=$(hostname)
HOST_IP=`ip -o -4 addr list eth0 | awk '{print $4}' | cut -d/ -f1`
openssl req -x509 -nodes -days 3650 -newkey rsa:2048 -keyout private.key -out public.crt -subj "/CN=$HOST_IP/O=$HOST_NAME"
velero install \
--provider aws \
--bucket velero-cluster1 \
--plugins velero/velero-plugin-for-aws:v1.1.0 \
--use-restic \
--secret-file ./credentials-velero \
--use-volume-snapshots=true \
--backup-location-config region=minio,s3ForcePathStyle="true",s3Url=http://$MinIO:9000 \
--snapshot-location-config region=minio
# Setup Helm Chart
wget https://raw.githubusercontent.com/cloudcafetech/kube-katakoda/master/setup-helm.sh
chmod +x setup-helm.sh
./setup-helm.sh
# Setup for monitring and logging
#exit
wget https://raw.githubusercontent.com/cloudcafetech/kube-katakoda/master/kubemon.yaml
wget https://raw.githubusercontent.com/cloudcafetech/kube-katakoda/master/kubelog.yaml
wget https://raw.githubusercontent.com/cloudcafetech/kube-katakoda/master/loki.yaml
wget https://raw.githubusercontent.com/cloudcafetech/kube-katakoda/master/loki-ds.json
wget https://raw.githubusercontent.com/cloudcafetech/kube-katakoda/master/pod-monitoring.json
wget https://raw.githubusercontent.com/cloudcafetech/kube-katakoda/master/kube-monitoring-overview.json
wget https://raw.githubusercontent.com/cloudcafetech/kube-katakoda/master/cluster-cost.json
kubectl create ns monitoring
kubectl create -f kubemon.yaml -n monitoring
kubectl create ns logging
kubectl create secret generic loki -n logging --from-file=loki.yaml
kubectl create -f kubelog.yaml -n logging
## Upload Grafana dashboard & loki datasource
echo ""
echo "Waiting for Grafana POD ready to upload dashboard & loki datasource .."
while [[ $(kubectl get pods kubemon-grafana-0 -n monitoring -o 'jsonpath={..status.conditions[?(@.type=="Ready")].status}') != "True" ]]; do printf '.'; sleep 2; done
HIP=`ip -o -4 addr list eth0 | awk '{print $4}' | cut -d/ -f1`
curl -vvv http://admin:admin2675@$HIP:30000/api/dashboards/db -X POST -d @pod-monitoring.json -H 'Content-Type: application/json'
curl -vvv http://admin:admin2675@$HIP:30000/api/dashboards/db -X POST -d @kube-monitoring-overview.json -H 'Content-Type: application/json'
curl -vvv http://admin:admin2675@$HIP:30000/api/dashboards/db -X POST -d @cluster-cost.json -H 'Content-Type: application/json'
curl -vvv http://admin:admin2675@$HIP:30000/api/datasources -X POST -d @loki-ds.json -H 'Content-Type: application/json'
# Setup Demo application
#exit
wget https://raw.githubusercontent.com/cloudcafetech/kube-katakoda/master/mongo-employee.yaml
kubectl create ns demo-mongo
kubectl create -f mongo-employee.yaml -n demo-mongo
| true
|
16e898a10bc7af7cfd7f60efe6f5c653393ca952
|
Shell
|
yurimdh/dotfiles
|
/.macos
|
UTF-8
| 33,170
| 3.171875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# ~/.macos — https://mths.be/macos
# Modified by Yuri
# Run without downloading:
# curl https://raw.githubusercontent.com/yurimdh/dotfiles/HEAD/.macos | bash
# Close any open System Preferences panes, to prevent them from overriding
# settings we’re about to change
osascript -e 'tell application "System Preferences" to quit'
# Ask for the administrator password upfront
sudo -v
# Keep-alive: update existing `sudo` time stamp until `.macos` has finished
while true; do sudo -n true; sleep 60; kill -0 "$$" || exit; done 2>/dev/null &
###############################################################################
# Kent's Customizations #
###############################################################################
echo "Hello $(whoami)! Let's get you set up."
echo "mkdir -p ${HOME}/Code"
mkdir -p "${HOME}/Code"
echo "installing homebrew"
# install homebrew https://brew.sh
/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"
echo "brew installing stuff"
# hub: a github-specific version of git
# libdvdcss: makes handbreak capable of ripping DVDs
# ripgrep: rg is faster than alternatives
# imagemagick: eventually I will need this for something
# ffmpeg: eventually I'll need this for something
# tree: really handy for listing out directories in text
# bat: A cat(1) clone with syntax highlighting and Git integration.
# switchaudio-osx: allows me to switch the audio device via the command line
brew install \
awscli \
bat \
blackhole-2ch \
cmake \
coreutils \
fzf \
git \
global \
gnupg \
hub \
libpq \
luarocks \
openssl \
openssl@1.1 \
readline \
sqlite3 \
tree \
xz \
zlib
brew install yarn --without-node
brew install --HEAD neovim
echo "installing asdf"
git clone https://github.com/asdf-vm/asdf.git ~/.asdf
cd ~/.asdf
git checkout "$(git describe --abbrev=0 --tags)"
echo "importing NodeJS OpenPGP keys"
bash -c '${ASDF_DATA_DIR:=$HOME/.asdf}/plugins/nodejs/bin/import-previous-release-team-keyring'
echo "installing asdf plugins"
asdf plugin-add nodejs https://github.com/asdf-vm/asdf-nodejs.git
asdf plugin-add ruby https://github.com/asdf-vm/asdf-ruby.git
asdf plugin-add python
asdf plugin-add yarn https://github.com/twuni/asdf-yarn.git
echo "installs Node, Ruby, Python and Yarn via asdf"
asdf install
echo "installing apps with brew cask"
brew install --cask \
1password \
alfred \
avibrazil-rdm \
brave-browser \
dash \
discord \
dropbox \
firefox \
google-chrome \
obs \
rectangle \
slack \
spotify \
suspicious-package \
visual-studio-code \
vlc \
webpquicklook \
zoomus \
echo "Generating a new SSH key for GitHub"
ssh-keygen -t ed25519 -C "yurisantos47@gmail.com" -f ~/.ssh/id_ed25519
eval "$(ssh-agent -s)"
touch ~/.ssh/config
echo "Host *\n AddKeysToAgent yes\n UseKeychain yes\n IdentityFile ~/.ssh/id_ed25519" | tee ~/.ssh/config
ssh-add -K ~/.ssh/id_ed25519
echo "run 'pbcopy < ~/.ssh/id_ed25519.pub' and paste that into GitHub"
# get bat and delta all configured
mkdir -p "${HOME}/.config/bat/themes"
ln -s "${HOME}/dotfiles/.config/bat/config" "${HOME}/.config/bat/config"
git clone https://github.com/batpigandme/night-owlish "${HOME}/.config/bat/themes/night-owlish"
bat cache --build
echo "making system modifications:"
# Make Chrome Two finger swipe for back and forward
defaults write com.google.Chrome AppleEnableSwipeNavigateWithScrolls -bool TRUE
###############################################################################
# General UI/UX #
###############################################################################
# Set standby delay to 24 hours (default is 1 hour)
sudo pmset -a standbydelay 86400
# Disable the sound effects on boot
sudo nvram SystemAudioVolume=" "
# Expand save panel by default
defaults write NSGlobalDomain NSNavPanelExpandedStateForSaveMode -bool true
defaults write NSGlobalDomain NSNavPanelExpandedStateForSaveMode2 -bool true
# Expand print panel by default
defaults write NSGlobalDomain PMPrintingExpandedStateForPrint -bool true
defaults write NSGlobalDomain PMPrintingExpandedStateForPrint2 -bool true
# Save to disk (not to iCloud) by default
defaults write NSGlobalDomain NSDocumentSaveNewDocumentsToCloud -bool false
# Automatically quit printer app once the print jobs complete
defaults write com.apple.print.PrintingPrefs "Quit When Finished" -bool true
# Remove duplicates in the “Open With” menu (also see `lscleanup` alias)
/System/Library/Frameworks/CoreServices.framework/Frameworks/LaunchServices.framework/Support/lsregister -kill -r -domain local -domain system -domain user
# Disable the crash reporter
defaults write com.apple.CrashReporter DialogType -string "none"
# Set Help Viewer windows to non-floating mode
defaults write com.apple.helpviewer DevMode -bool true
# Reveal IP address, hostname, OS version, etc. when clicking the clock
# in the login window
sudo defaults write /Library/Preferences/com.apple.loginwindow AdminHostInfo HostName
# Never go into computer sleep mode
sudo systemsetup -setcomputersleep Off > /dev/null
# Disable automatic capitalization as it’s annoying when typing code
defaults write NSGlobalDomain NSAutomaticCapitalizationEnabled -bool false
# Disable smart dashes as they’re annoying when typing code
defaults write NSGlobalDomain NSAutomaticDashSubstitutionEnabled -bool false
# Disable automatic period substitution as it’s annoying when typing code
defaults write NSGlobalDomain NSAutomaticPeriodSubstitutionEnabled -bool false
# Disable smart quotes as they’re annoying when typing code
defaults write NSGlobalDomain NSAutomaticQuoteSubstitutionEnabled -bool false
# Disable auto-correct
defaults write NSGlobalDomain NSAutomaticSpellingCorrectionEnabled -bool false
###############################################################################
# Trackpad, mouse, keyboard, Bluetooth accessories, and input #
###############################################################################
# Trackpad: enable tap to click for this user and for the login screen
defaults write com.apple.driver.AppleBluetoothMultitouch.trackpad Clicking -bool true
defaults -currentHost write NSGlobalDomain com.apple.mouse.tapBehavior -int 1
defaults write NSGlobalDomain com.apple.mouse.tapBehavior -int 1
# enable draging
defaults write com.apple.AppleMultitouchTrackpad DragLock -int 0
defaults write com.apple.AppleMultitouchTrackpad Dragging -int 1
defaults write com.apple.driver.AppleBluetoothMultitouch.trackpad DragLock -int 0
defaults write com.apple.driver.AppleBluetoothMultitouch.trackpad Dragging -int 1
# Enable full keyboard access for all controls
# (e.g. enable Tab in modal dialogs)
defaults write NSGlobalDomain AppleKeyboardUIMode -int 2
# Use scroll gesture with the Ctrl (^) modifier key to zoom
defaults write com.apple.universalaccess closeViewScrollWheelToggle -bool true
defaults write com.apple.universalaccess HIDScrollZoomModifierMask -int 262144
# Disable press-and-hold for keys in favor of key repeat
defaults write NSGlobalDomain ApplePressAndHoldEnabled -bool false
# Set a blazingly fast keyboard repeat rate
defaults write NSGlobalDomain KeyRepeat -int 1
defaults write NSGlobalDomain InitialKeyRepeat -int 10
###############################################################################
# Screen #
###############################################################################
# Require password immediately after sleep or screen saver begins
defaults write com.apple.screensaver askForPassword -int 1
defaults write com.apple.screensaver askForPasswordDelay -int 0
# Save screenshots to the desktop
defaults write com.apple.screencapture location -string "${HOME}/Desktop"
# Save screenshots in PNG format (other options: BMP, GIF, JPG, PDF, TIFF)
defaults write com.apple.screencapture type -string "png"
# Enable subpixel font rendering on non-Apple LCDs
# Reference: https://github.com/kevinSuttle/macOS-Defaults/issues/17#issuecomment-266633501
defaults write NSGlobalDomain AppleFontSmoothing -int 1
# Enable HiDPI display modes (requires restart)
sudo defaults write /Library/Preferences/com.apple.windowserver DisplayResolutionEnabled -bool true
###############################################################################
# Finder #
###############################################################################
# Finder: allow quitting via ⌘ + Q; doing so will also hide desktop icons
defaults write com.apple.finder QuitMenuItem -bool true
# Set Desktop as the default location for new Finder windows
# For other paths, use `PfLo` and `file:///full/path/here/`
defaults write com.apple.finder NewWindowTarget -string "PfDe"
defaults write com.apple.finder NewWindowTargetPath -string "file://${HOME}/Desktop/"
# Show icons for hard drives, servers, and removable media on the desktop
defaults write com.apple.finder ShowExternalHardDrivesOnDesktop -bool true
defaults write com.apple.finder ShowHardDrivesOnDesktop -bool false
defaults write com.apple.finder ShowMountedServersOnDesktop -bool false
defaults write com.apple.finder ShowRemovableMediaOnDesktop -bool true
# Finder: show all filename extensions
defaults write NSGlobalDomain AppleShowAllExtensions -bool true
# Finder: show status bar
defaults write com.apple.finder ShowStatusBar -bool true
# Finder: show path bar
defaults write com.apple.finder ShowPathbar -bool true
# Display full POSIX path as Finder window title
defaults write com.apple.finder _FXShowPosixPathInTitle -bool true
# Keep folders on top when sorting by name
defaults write com.apple.finder _FXSortFoldersFirst -bool true
# When performing a search, search the current folder by default
defaults write com.apple.finder FXDefaultSearchScope -string "SCcf"
# Disable the warning when changing a file extension
defaults write com.apple.finder FXEnableExtensionChangeWarning -bool false
# Enable spring loading for directories
defaults write NSGlobalDomain com.apple.springing.enabled -bool true
# Reduce the spring loading delay for directories
defaults write NSGlobalDomain com.apple.springing.delay -float 0.2
# Avoid creating .DS_Store files on network or USB volumes
defaults write com.apple.desktopservices DSDontWriteNetworkStores -bool true
defaults write com.apple.desktopservices DSDontWriteUSBStores -bool true
# Disable disk image verification
defaults write com.apple.frameworks.diskimages skip-verify -bool true
defaults write com.apple.frameworks.diskimages skip-verify-locked -bool true
defaults write com.apple.frameworks.diskimages skip-verify-remote -bool true
# Automatically open a new Finder window when a volume is mounted
defaults write com.apple.frameworks.diskimages auto-open-ro-root -bool true
defaults write com.apple.frameworks.diskimages auto-open-rw-root -bool true
defaults write com.apple.finder OpenWindowForNewRemovableDisk -bool true
# Enable snap-to-grid for icons on the desktop and in other icon views
/usr/libexec/PlistBuddy -c "Set :DesktopViewSettings:IconViewSettings:arrangeBy grid" ~/Library/Preferences/com.apple.finder.plist
/usr/libexec/PlistBuddy -c "Set :FK_StandardViewSettings:IconViewSettings:arrangeBy grid" ~/Library/Preferences/com.apple.finder.plist
/usr/libexec/PlistBuddy -c "Set :StandardViewSettings:IconViewSettings:arrangeBy grid" ~/Library/Preferences/com.apple.finder.plist
# Increase grid spacing for icons on the desktop and in other icon views
/usr/libexec/PlistBuddy -c "Set :DesktopViewSettings:IconViewSettings:gridSpacing 100" ~/Library/Preferences/com.apple.finder.plist
/usr/libexec/PlistBuddy -c "Set :FK_StandardViewSettings:IconViewSettings:gridSpacing 100" ~/Library/Preferences/com.apple.finder.plist
/usr/libexec/PlistBuddy -c "Set :StandardViewSettings:IconViewSettings:gridSpacing 100" ~/Library/Preferences/com.apple.finder.plist
# Increase the size of icons on the desktop and in other icon views
/usr/libexec/PlistBuddy -c "Set :DesktopViewSettings:IconViewSettings:iconSize 56" ~/Library/Preferences/com.apple.finder.plist
/usr/libexec/PlistBuddy -c "Set :FK_StandardViewSettings:IconViewSettings:iconSize 56" ~/Library/Preferences/com.apple.finder.plist
/usr/libexec/PlistBuddy -c "Set :StandardViewSettings:IconViewSettings:iconSize 56" ~/Library/Preferences/com.apple.finder.plist
# Use column view in all Finder windows by default
# Four-letter codes for the other view modes: `icnv`, `clmv`, `Flwv`, `Nlsv`
defaults write com.apple.finder FXPreferredViewStyle -string "clmv"
# Disable the warning before emptying the Trash
defaults write com.apple.finder WarnOnEmptyTrash -bool false
# Enable AirDrop over Ethernet and on unsupported Macs running Lion
defaults write com.apple.NetworkBrowser BrowseAllInterfaces -bool true
# Show the ~/Library folder
chflags nohidden ~/Library
# Show the /Volumes folder
sudo chflags nohidden /Volumes
# Expand the following File Info panes:
# “General”, “Open with”, and “Sharing & Permissions”
defaults write com.apple.finder FXInfoPanesExpanded -dict \
General -bool true \
OpenWith -bool true \
Privileges -bool true
###############################################################################
# Dock, Dashboard, and hot corners #
###############################################################################
# Enable highlight hover effect for the grid view of a stack (Dock)
defaults write com.apple.dock mouse-over-hilite-stack -bool true
# Set the icon size of Dock items to 16 pixels
defaults write com.apple.dock tilesize -int 16
# Change minimize/maximize window effect
defaults write com.apple.dock mineffect -string "scale"
# Minimize windows into their application’s icon
defaults write com.apple.dock minimize-to-application -bool true
# Enable spring loading for all Dock items
defaults write com.apple.dock enable-spring-load-actions-on-all-items -bool true
# Show indicator lights for open applications in the Dock
defaults write com.apple.dock show-process-indicators -bool true
# Wipe all (default) app icons from the Dock
# This is only really useful when setting up a new Mac, or if you don’t use
# the Dock to launch apps.
defaults write com.apple.dock persistent-apps -array
# Show only open applications in the Dock
defaults write com.apple.dock static-only -bool true
# Don’t animate opening applications from the Dock
defaults write com.apple.dock launchanim -bool false
# Speed up Mission Control animations
defaults write com.apple.dock expose-animation-duration -float 0.1
# Automatically hide and show the Dock
defaults write com.apple.dock autohide -bool true
# Make Dock icons of hidden applications translucent
defaults write com.apple.dock showhidden -bool true
# Disable the Launchpad gesture (pinch with thumb and three fingers)
defaults write com.apple.dock showLaunchpadGestureEnabled -int 0
# Hot corners
# Possible values:
# 0: no-op
# 2: Mission Control
# 3: Show application windows
# 4: Desktop
# 5: Start screen saver
# 6: Disable screen saver
# 7: Dashboard
# 10: Put display to sleep
# 11: Launchpad
# 12: Notification Center
# 1048576 is ⌘ I think
# Top left screen corner → Launchpad
defaults write com.apple.dock wvous-tl-corner -int 11
defaults write com.apple.dock wvous-tl-modifier -int 1048576
# Top right screen corner → Notification
defaults write com.apple.dock wvous-tr-corner -int 12
defaults write com.apple.dock wvous-tr-modifier -int 1048576
# Bottom left screen corner → Desktop
defaults write com.apple.dock wvous-bl-corner -int 4
defaults write com.apple.dock wvous-bl-modifier -int 1048576
# Bottom right screen corner → Mission Control
defaults write com.apple.dock wvous-br-corner -int 2
defaults write com.apple.dock wvous-br-modifier -int 1048576
###############################################################################
# Safari & WebKit #
###############################################################################
# Privacy: don’t send search queries to Apple
defaults write com.apple.Safari UniversalSearchEnabled -bool false
defaults write com.apple.Safari SuppressSearchSuggestions -bool true
# Press Tab to highlight each item on a web page
defaults write com.apple.Safari WebKitTabToLinksPreferenceKey -bool true
defaults write com.apple.Safari com.apple.Safari.ContentPageGroupIdentifier.WebKit2TabsToLinks -bool true
# Show the full URL in the address bar (note: this still hides the scheme)
defaults write com.apple.Safari ShowFullURLInSmartSearchField -bool true
# Set Safari’s home page to `about:blank` for faster loading
defaults write com.apple.Safari HomePage -string "about:blank"
# Prevent Safari from opening ‘safe’ files automatically after downloading
defaults write com.apple.Safari AutoOpenSafeDownloads -bool false
# Show Safari’s bookmarks bar by default
defaults write com.apple.Safari ShowFavoritesBar -bool true
# Show Safari’s sidebar in Top Sites
defaults write com.apple.Safari ShowSidebarInTopSites -bool true
# Disable Safari’s thumbnail cache for History and Top Sites
defaults write com.apple.Safari DebugSnapshotsUpdatePolicy -int 2
# Enable Safari’s debug menu
defaults write com.apple.Safari IncludeInternalDebugMenu -bool true
# Make Safari’s search banners default to Contains instead of Starts With
defaults write com.apple.Safari FindOnPageMatchesWordStartsOnly -bool false
# Remove useless icons from Safari’s bookmarks bar
defaults write com.apple.Safari ProxiesInBookmarksBar "()"
# Enable the Develop menu and the Web Inspector in Safari
defaults write com.apple.Safari IncludeDevelopMenu -bool true
defaults write com.apple.Safari WebKitDeveloperExtrasEnabledPreferenceKey -bool true
defaults write com.apple.Safari com.apple.Safari.ContentPageGroupIdentifier.WebKit2DeveloperExtrasEnabled -bool true
# Add a context menu item for showing the Web Inspector in web views
defaults write NSGlobalDomain WebKitDeveloperExtras -bool true
# Enable continuous spellchecking
defaults write com.apple.Safari WebContinuousSpellCheckingEnabled -bool true
# Disable auto-correct
defaults write com.apple.Safari WebAutomaticSpellingCorrectionEnabled -bool false
# Disable AutoFill
defaults write com.apple.Safari AutoFillFromAddressBook -bool false
defaults write com.apple.Safari AutoFillPasswords -bool false
defaults write com.apple.Safari AutoFillCreditCardData -bool false
defaults write com.apple.Safari AutoFillMiscellaneousForms -bool false
# Warn about fraudulent websites
defaults write com.apple.Safari WarnAboutFraudulentWebsites -bool true
# Enable plug-ins
defaults write com.apple.Safari WebKitPluginsEnabled -bool true
defaults write com.apple.Safari com.apple.Safari.ContentPageGroupIdentifier.WebKit2PluginsEnabled -bool true
# Disable Java
defaults write com.apple.Safari WebKitJavaEnabled -bool false
defaults write com.apple.Safari com.apple.Safari.ContentPageGroupIdentifier.WebKit2JavaEnabled -bool false
# Block pop-up windows
defaults write com.apple.Safari WebKitJavaScriptCanOpenWindowsAutomatically -bool false
defaults write com.apple.Safari com.apple.Safari.ContentPageGroupIdentifier.WebKit2JavaScriptCanOpenWindowsAutomatically -bool false
# Disable auto-playing video
defaults write com.apple.Safari WebKitMediaPlaybackAllowsInline -bool false
defaults write com.apple.SafariTechnologyPreview WebKitMediaPlaybackAllowsInline -bool false
defaults write com.apple.Safari com.apple.Safari.ContentPageGroupIdentifier.WebKit2AllowsInlineMediaPlayback -bool false
defaults write com.apple.SafariTechnologyPreview com.apple.Safari.ContentPageGroupIdentifier.WebKit2AllowsInlineMediaPlayback -bool false
# Enable “Do Not Track”
defaults write com.apple.Safari SendDoNotTrackHTTPHeader -bool true
# Update extensions automatically
defaults write com.apple.Safari InstallExtensionUpdatesAutomatically -bool true
###############################################################################
# Mail #
###############################################################################
# Disable send and reply animations in Mail.app
# defaults write com.apple.mail DisableReplyAnimations -bool true
# defaults write com.apple.mail DisableSendAnimations -bool true
# Copy email addresses as `Foo Bar <foo@example.com>` instead of `foo@example.com` in Mail.app
defaults write com.apple.mail AddressesIncludeNameOnPasteboard -bool true
# Add the keyboard shortcut ⌘ + Enter to send an email in Mail.app
defaults write com.apple.mail NSUserKeyEquivalents -dict-add "Send" "@\U21a9"
# Display emails in threaded mode, sorted by date (oldest at the top)
defaults write com.apple.mail DraftsViewerAttributes -dict-add "DisplayInThreadedMode" -string "yes"
# defaults write com.apple.mail DraftsViewerAttributes -dict-add "SortedDescending" -string "yes"
# defaults write com.apple.mail DraftsViewerAttributes -dict-add "SortOrder" -string "received-date"
# Disable inline attachments (just show the icons)
# defaults write com.apple.mail DisableInlineAttachmentViewing -bool true
# Disable automatic spell checking
# defaults write com.apple.mail SpellCheckingBehavior -string "NoSpellCheckingEnabled"
###############################################################################
# Spotlight #
###############################################################################
# Hide Spotlight tray-icon (and subsequent helper)
sudo chmod 600 /System/Library/CoreServices/Search.bundle/Contents/MacOS/Search
# Disable Spotlight indexing for any volume that gets mounted and has not yet
# been indexed before.
# Use `sudo mdutil -i off "/Volumes/foo"` to stop indexing any volume.
sudo defaults write /.Spotlight-V100/VolumeConfiguration Exclusions -array "/Volumes"
# Change indexing order and disable some search results
# Yosemite-specific search results (remove them if you are using macOS 10.9 or older):
# MENU_DEFINITION
# MENU_CONVERSION
# MENU_EXPRESSION
# MENU_SPOTLIGHT_SUGGESTIONS (send search queries to Apple)
# MENU_WEBSEARCH (send search queries to Apple)
# MENU_OTHER
defaults write com.apple.spotlight orderedItems -array \
'{"enabled" = 1;"name" = "APPLICATIONS";}' \
'{"enabled" = 1;"name" = "SYSTEM_PREFS";}' \
'{"enabled" = 1;"name" = "DIRECTORIES";}' \
'{"enabled" = 1;"name" = "PDF";}' \
'{"enabled" = 1;"name" = "FONTS";}' \
'{"enabled" = 1;"name" = "DOCUMENTS";}' \
'{"enabled" = 0;"name" = "MESSAGES";}' \
'{"enabled" = 0;"name" = "CONTACT";}' \
'{"enabled" = 0;"name" = "EVENT_TODO";}' \
'{"enabled" = 0;"name" = "IMAGES";}' \
'{"enabled" = 0;"name" = "BOOKMARKS";}' \
'{"enabled" = 0;"name" = "MUSIC";}' \
'{"enabled" = 0;"name" = "MOVIES";}' \
'{"enabled" = 0;"name" = "PRESENTATIONS";}' \
'{"enabled" = 0;"name" = "SPREADSHEETS";}' \
'{"enabled" = 0;"name" = "SOURCE";}' \
'{"enabled" = 0;"name" = "MENU_DEFINITION";}' \
'{"enabled" = 0;"name" = "MENU_OTHER";}' \
'{"enabled" = 0;"name" = "MENU_CONVERSION";}' \
'{"enabled" = 0;"name" = "MENU_EXPRESSION";}' \
'{"enabled" = 0;"name" = "MENU_WEBSEARCH";}' \
'{"enabled" = 0;"name" = "MENU_SPOTLIGHT_SUGGESTIONS";}'
# Load new settings before rebuilding the index
killall mds > /dev/null 2>&1
# Make sure indexing is enabled for the main volume
sudo mdutil -i on / > /dev/null
# Rebuild the index from scratch
sudo mdutil -E / > /dev/null
###############################################################################
# Terminal & iTerm 2 #
###############################################################################
# Only use UTF-8 in Terminal.app
defaults write com.apple.terminal StringEncodings -array 4
# Use a modified version of the Solarized Dark theme by default in Terminal.app
# osascript <<EOD
# tell application "Terminal"
# local allOpenedWindows
# local initialOpenedWindows
# local windowID
# set themeName to "Solarized Dark xterm-256color"
# (* Store the IDs of all the open terminal windows. *)
# set initialOpenedWindows to id of every window
# (* Open the custom theme so that it gets added to the list
# of available terminal themes (note: this will open two
# additional terminal windows). *)
# do shell script "open '$HOME/init/" & themeName & ".terminal'"
# (* Wait a little bit to ensure that the custom theme is added. *)
# delay 1
# (* Set the custom theme as the default terminal theme. *)
# set default settings to settings set themeName
# (* Get the IDs of all the currently opened terminal windows. *)
# set allOpenedWindows to id of every window
# repeat with windowID in allOpenedWindows
# (* Close the additional windows that were opened in order
# to add the custom theme to the list of terminal themes. *)
# if initialOpenedWindows does not contain windowID then
# close (every window whose id is windowID)
# (* Change the theme for the initial opened terminal windows
# to remove the need to close them in order for the custom
# theme to be applied. *)
# else
# set current settings of tabs of (every window whose id is windowID) to settings set themeName
# end if
# end repeat
# end tell
# EOD
# Enable “focus follows mouse” for Terminal.app and all X11 apps
# i.e. hover over a window and start typing in it without clicking first
#defaults write com.apple.terminal FocusFollowsMouse -bool true
#defaults write org.x.X11 wm_ffm -bool true
# Enable Secure Keyboard Entry in Terminal.app
# See: https://security.stackexchange.com/a/47786/8918
defaults write com.apple.terminal SecureKeyboardEntry -bool true
# Disable the annoying line marks
defaults write com.apple.Terminal ShowLineMarks -int 0
# Install the Solarized Dark theme for iTerm
# open "${HOME}/init/Solarized Dark.itermcolors"
# Don’t display the annoying prompt when quitting iTerm
# defaults write com.googlecode.iterm2 PromptOnQuit -bool false
###############################################################################
# Time Machine #
###############################################################################
# Prevent Time Machine from prompting to use new hard drives as backup volume
defaults write com.apple.TimeMachine DoNotOfferNewDisksForBackup -bool true
# Disable local Time Machine backups
# hash tmutil &> /dev/null && sudo tmutil disablelocal
###############################################################################
# Activity Monitor #
###############################################################################
# Show the main window when launching Activity Monitor
defaults write com.apple.ActivityMonitor OpenMainWindow -bool true
# Visualize CPU usage in the Activity Monitor Dock icon
# defaults write com.apple.ActivityMonitor IconType -int 5
# Show all processes in Activity Monitor
defaults write com.apple.ActivityMonitor ShowCategory -int 0
# Sort Activity Monitor results by CPU usage
defaults write com.apple.ActivityMonitor SortColumn -string "CPUUsage"
defaults write com.apple.ActivityMonitor SortDirection -int 0
###############################################################################
# Address Book, Dashboard, iCal, TextEdit, and Disk Utility #
###############################################################################
# Enable the debug menu in Address Book
# defaults write com.apple.addressbook ABShowDebugMenu -bool true
# Enable Dashboard dev mode (allows keeping widgets on the desktop)
# defaults write com.apple.dashboard devmode -bool true
# Enable the debug menu in iCal (pre-10.8)
# defaults write com.apple.iCal IncludeDebugMenu -bool true
# Use plain text mode for new TextEdit documents
defaults write com.apple.TextEdit RichText -int 0
# Open and save files as UTF-8 in TextEdit
defaults write com.apple.TextEdit PlainTextEncoding -int 4
defaults write com.apple.TextEdit PlainTextEncodingForWrite -int 4
# Enable the debug menu in Disk Utility
# defaults write com.apple.DiskUtility DUDebugMenuEnabled -bool true
# defaults write com.apple.DiskUtility advanced-image-options -bool true
# Auto-play videos when opened with QuickTime Player
defaults write com.apple.QuickTimePlayerX MGPlayMovieOnOpen -bool true
###############################################################################
# Mac App Store #
###############################################################################
# Enable the WebKit Developer Tools in the Mac App Store
defaults write com.apple.appstore WebKitDeveloperExtras -bool true
# Enable Debug Menu in the Mac App Store
# defaults write com.apple.appstore ShowDebugMenu -bool true
# Enable the automatic update check
defaults write com.apple.SoftwareUpdate AutomaticCheckEnabled -bool true
# Check for software updates daily, not just once per week
defaults write com.apple.SoftwareUpdate ScheduleFrequency -int 1
# Download newly available updates in background
defaults write com.apple.SoftwareUpdate AutomaticDownload -int 1
# Install System data files & security updates
defaults write com.apple.SoftwareUpdate CriticalUpdateInstall -int 1
# Automatically download apps purchased on other Macs
defaults write com.apple.SoftwareUpdate ConfigDataInstall -int 1
# Turn on app auto-update
defaults write com.apple.commerce AutoUpdate -bool true
# Disallow the App Store to reboot machine on macOS updates
defaults write com.apple.commerce AutoUpdateRestartRequired -bool false
###############################################################################
# Photos #
###############################################################################
# Prevent Photos from opening automatically when devices are plugged in
defaults -currentHost write com.apple.ImageCapture disableHotPlug -bool true
###############################################################################
# Messages #
###############################################################################
# Disable automatic emoji substitution (i.e. use plain text smileys)
defaults write com.apple.messageshelper.MessageController SOInputLineSettings -dict-add "automaticEmojiSubstitutionEnablediMessage" -bool false
# Disable smart quotes as it’s annoying for messages that contain code
defaults write com.apple.messageshelper.MessageController SOInputLineSettings -dict-add "automaticQuoteSubstitutionEnabled" -bool false
# Disable continuous spell checking
defaults write com.apple.messageshelper.MessageController SOInputLineSettings -dict-add "continuousSpellCheckingEnabled" -bool false
###############################################################################
# Google Chrome & Google Chrome Canary #
###############################################################################
# Disable the all too sensitive backswipe on trackpads
defaults write com.google.Chrome AppleEnableSwipeNavigateWithScrolls -bool false
defaults write com.google.Chrome.canary AppleEnableSwipeNavigateWithScrolls -bool false
# Disable the all too sensitive backswipe on Magic Mouse
defaults write com.google.Chrome AppleEnableMouseSwipeNavigateWithScrolls -bool false
defaults write com.google.Chrome.canary AppleEnableMouseSwipeNavigateWithScrolls -bool false
# Use the system-native print preview dialog
# defaults write com.google.Chrome DisablePrintPreview -bool true
# defaults write com.google.Chrome.canary DisablePrintPreview -bool true
# Expand the print dialog by default
# defaults write com.google.Chrome PMPrintingExpandedStateForPrint2 -bool true
# defaults write com.google.Chrome.canary PMPrintingExpandedStateForPrint2 -bool true
###############################################################################
# Kill affected applications #
###############################################################################
for app in "Activity Monitor" \
"Address Book" \
"Calendar" \
"cfprefsd" \
"Contacts" \
"Dock" \
"Finder" \
"Mail" \
"Messages" \
"Photos" \
"Safari" \
"SystemUIServer" \
"iCal"; do
killall "${app}" &> /dev/null
done
echo "Done. Note that some of these changes require a logout/restart to take effect."
printf "TODO:\n\
install: \n\
Gifski (App Store) \n\
Todoist (App Store) \n\
Camera Settings (https://prosupport.logi.com/hc/en-us/articles/360039591834-Downloads-BRIO) \n\
Audacity (https://www.audacityteam.org/download/) \n\
\n\
"
| true
|
533db3d308e0a10afa586aa67650173794b14275
|
Shell
|
JeremyGrosser/binscripts
|
/turbo
|
UTF-8
| 242
| 2.796875
| 3
|
[] |
no_license
|
#!/bin/bash
if [ $1 == "on" ]; then
sudo /bin/bash -c 'echo 0 >/sys/devices/system/cpu/intel_pstate/no_turbo'
echo "TURBO ON"
else
sudo /bin/bash -c 'echo 1 >/sys/devices/system/cpu/intel_pstate/no_turbo'
echo "TURBO OFF"
fi
| true
|
33e4159c24376e246a2b6ed658b4bce64d2b69bb
|
Shell
|
solocell/gb_code
|
/superenhancer_analysis_02_calculate_sample_signal.sh
|
UTF-8
| 2,845
| 3.828125
| 4
|
[] |
no_license
|
#!/bin/bash
########################################
#
# superenhancer_analysis_02_calculate_sample_signal.sh
#
# Mike Fletcher
# 20201025
#
# (original name: superenhancers_subtype_02_calculate_sample_signal.sh)
#
########################################
#
# WHAT THIS DOES
#
########################################
#
# based on Carl's script define_enhancers.sh
#
# takes the 12.5kbp-stitched 'enhancer union' from each GBM subtype (bed file, output of script 01)
# uses bigWigAverageOverBed to calculate signal (for **all** samples, not just that subtype!) per-sample
#
########################################
# INPUTS
########################################
#
# for each subtype analysed: the stitched H3K27ac peak .bed file from superenhancer_analysis_01_prepare_data.Rscript
#
# for each sample: the H3K27ac signal intensities, in genome-wide .bw/.bigWig format
# available from GEO, see https://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc=GSE121719
#
########################################
# OUTPUTS
########################################
#
# per-sample .tab files giving the signal per region
#
########################################
# RESOURCE USAGE
########################################
#
# bigWigAverageOverBed is very lightweight and runs quickly (<1min/sample)
#
########################################
##############################
##############################
#
# DEFINITIONS
#
##############################
# histone mark to use
mark="H3K27ac"
# data dir: view-by-pid dir for the ChIPseq data
datadir="/icgc/dkfzlsdf/analysis/hipo/hipo_016/chipseq_newAlignments/wgs_bowtie/"
# output dir: my analysis dir
outdir="/icgc/dkfzlsdf/analysis/hipo/hipo_016/analysis/chipseq_superenhancers/"
##############################
#
# START CODE
#
##############################
# work in my output dir:
cd ${outdir}
# make new output dir for the per-sample signal files, if doesn't already exist
if [ ! -d ${outdir}/sample_signal/ ]; then
mkdir ${outdir}/sample_signal/
fi
echo "Scoring with bigwig ..."
for st in MES IDH RTK_I RTK_II; do
echo "=== ${st} ==="
# get the 12.5kbp stitched per-sample enhancer union; take first 3 columns (chrom/start/end) and 8th (peak ID), then remove header line
cut -f1-3,8 ${outdir}/${st}_subtype_enhancer_union_12500_stitched.bed | tail -n +2 > lite
# now: for all sample IDs (found by looking in view-by-pid dir and taking anything matching AK*)
# calculate the per-sample signal for the subtype
for i in `ls ${datadir} | grep ^AK`; do
echo " $i"
if [ -f ${datadir}/${i}/${mark}/bw/${i}_${mark}_SES_subtract.bw ]; then
/ibios/tbi_cluster/11.4/x86_64/bin/bigWigAverageOverBed ${datadir}/${i}/${mark}/bw/${i}_${mark}_SES_subtract.bw lite ${outdir}/sample_signal/${i}_${mark}_in_${st}_enhancer_union.tab
fi
done
# clean up tmp files
rm lite
done
| true
|
ddc4fc272a42d560c132a58676841d71b85cdac7
|
Shell
|
warrickball/s4tess
|
/runs/add_white_noise.sh
|
UTF-8
| 269
| 2.875
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# given a folder, create AADG3 input for one run
OLDPWD=$(pwd)
# BASENAME=$(echo $1 | sed 's:/:_:g')
BASENAME=$(basename $1)
cd $1
python3 ../../../scripts/add_white_noise.py $BASENAME.atl $BASENAME.tri $BASENAME.asc "$BASENAME"_WN.asc
cd $OLDPWD
| true
|
23ba02cbb863e04b0c40141226a9c3878b17d0d5
|
Shell
|
DukeOfEtiquette/dotfiles
|
/profiles/ts3d/bin/clone_ts3d_docs.sh
|
UTF-8
| 448
| 2.640625
| 3
|
[] |
no_license
|
#!/bin/bash
echo -e '!!!\nrun "eval $(ssh-agent) && ssh-add ~/.ssh/id_rsa" to stop constant asking of password\n!!!\n'
mkdir -p $TS3D_DOCS
cd $TS3D_DOCS
echo -e "Cloning at $TS3D_DOCS\n"
git clone git@bitbucket.org:techsoft3d/com_docs.git
git clone git@bitbucket.org:techsoft3d/hps_docs.git
git clone git@bitbucket.org:techsoft3d/3df_docs.git
git clone git@bitbucket.org:techsoft3d/hex_docs.git
git clone git@bitbucket.org:techsoft3d/pub_docs.git
echo -e "run 'docs' to go navigate to repos\n"
| true
|
d00b04c2271ed3bf695e21e0c1cd106cf991ba57
|
Shell
|
jsh/git-add-to
|
/t/3.t
|
UTF-8
| 285
| 2.75
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash -eu
source testenv.sh
_testname ensure init is handled with multi-args
sleep 1
git-add-to $repo init {a..c}
# verify that they're still there
[ -d $repo ] && [ -d $clone ]
# verify that they're newly created
ls -lt $products | sort | diff - <( ls -ult $products | sort )
| true
|
ccc15b5e18db8c6cd3e7e3e377eaddf4a5c96802
|
Shell
|
cicdenv/cicdenv
|
/terraform/backend/bin/enable-ram-with-organizations.sh
|
UTF-8
| 448
| 2.546875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -eu -o pipefail
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
pushd "$DIR/.." >/dev/null
../../bin/cicdctl creds aws-mfa main
# Source config
backend_config="../../terraform/backend-config.tfvars"
region=$(hclq -i "$backend_config" get --raw 'region')
AWS_PROFILE="admin-main"
AWS_OPTS="--profile=${AWS_PROFILE} --region=${region}"
aws $AWS_OPTS ram enable-sharing-with-aws-organization
popd >/dev/null
| true
|
3ee5270bf1b3eafba6a2c858bbcde481713006bc
|
Shell
|
DnZmfr/realworldapp-app-gke
|
/mongodb-backup/backup.sh
|
UTF-8
| 475
| 3.65625
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -e
bold() {
echo ". $(tput bold)" "$*" "$(tput sgr0)";
}
CURRENT_DT=$(date +%Y%m%d_%H%M)
BACKUP_FILENAME=mongodb_backup_${CURRENT_DT}.gz
bold "Back-up mongoDB database to ${BACKUP_FILENAME}"
mongodump --archive=${BACKUP_FILENAME} --gzip --uri ${MONGODB_URI}
bold "Copy backup file to Google Cloud Storage..."
gsutil cp ${BACKUP_FILENAME} ${GCS_BUCKET}
bold "Delete local backup file..."
rm -f ${BACKUP_FILENAME}
bold "Done. Backup complete."
| true
|
621d1c7f1b8dd1ffb3660bad61abe1213904198e
|
Shell
|
Jaymon/.bash
|
/bin/search/whered
|
UTF-8
| 524
| 3.53125
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# -*- mode: sh -*-
# vi: set ft=sh :
###############################################################################
#
#? whered <NAME> -> find all directories matching <NAME>
#
###############################################################################
if [[ $1 == "--help" ]] || [[ $1 == "-h" ]] || [[ $# -lt 1 ]]; then
echo "usage: $(basename $0) NAME"
echo "Find all directories matching <NAME>"
exit 0
fi
>&2 echo "sudo find / -type d -iname $1"
sudo find / -type d -iname "$1"
| true
|
8c32cc86f87300b7360ef3437deb933b98e50385
|
Shell
|
MenkeTechnologies/zpwr
|
/autoload/common/zpwrgh
|
UTF-8
| 211
| 2.578125
| 3
|
[
"MIT"
] |
permissive
|
# -*- mode: sh -*-
# vim: set ft=sh:
function zpwrgh(){
if test -z $1;then
zpwrOpenmygh "$ZPWR_GITHUB_ACCOUNT/$ZPWR_REPO_NAME"
else
source "$ZPWR_VERBS_FILE" "$@"
fi
}
zpwrgh "$@"
| true
|
9cc4d05b9c52c39f529076a5589559340a55704d
|
Shell
|
gidoBOSSftw5731/ProjectJoker
|
/update.sh
|
UTF-8
| 469
| 3.53125
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
hostname=''
username=''
password=''
print_usage() {
cat USAGE
}
while getopts 'h:u:p:' flag; do
case "${flag}" in
h) hostname="${OPTARG}" ;;
u) username="${OPTARG}" ;;
p) password="${OPTARG}" ;;
*) print_usage
exit 1 ;;
esac
done
if [ -z $hostname ] || [ -z $password ] || [ -z $username ]
then
print_usage
exit 1
fi
curl "https://svc.joker.com/nic/update?username=${username}&password=${password}&hostname=${hostname}"
| true
|
6f1d8ef9f2468bfc3604c6114ee9fc9178db9392
|
Shell
|
BxDorn/bashmap
|
/client_bashmap_tcp.sh
|
UTF-8
| 803
| 3.640625
| 4
|
[] |
no_license
|
#!/bin/bash
# Introduction
echo "This is a Single source / destination threading application
with a ratio of 100:1 source/destination ports per thread.
Ensure you have the server running on the destination host"
# Read the destination IP address.
echo "What is the IP of the destination?"
read dst
# Number of threads
echo "This script will initiate 100 streams per thread, how many threads
should be initiated?"
read sockets
#throughput
echo "how many bits / sec? (enter [0] for unlimited"
read speed
# How long
echo "how long would you like to run each thread (in seconds)?"
read t_time
dst_port=5201
counter=1
while [ $counter -le $sockets ]; do
echo Building Thread $counter
iperf3 -c $dst -p $dst_port -P 100 -i 0 -t $t_time -b $speed &
let counter=counter+1
let dst_port=dst_port+1
done
| true
|
ff881583402e52e9499804103c26109f020fe31f
|
Shell
|
tikalk/os-images
|
/buildfile.sh
|
UTF-8
| 338
| 2.84375
| 3
|
[] |
no_license
|
#!/bin/bash
PACKERCMD=`which packer`
CURRENT_PUPPET_VERSION='3.5.1'
CURRENT_CHEF_VERSION='11.12.0-1'
[[ -z ${PACKERCMD} ]] && echo "Please install packer, and re-run $0"
packer build -var 'mirror=http://192.168.5.18' -var "chef_version=${CURRENT_CHEF_VERSION}" -var "puppet_version=${CURRENT_PUPPET_VERSION}" templates/CentOS_6.5.json
| true
|
ee523f687a77a2fdc56d0bd0759a8f729d04880b
|
Shell
|
p-l-/blackarch
|
/packages/veil/PKGBUILD
|
UTF-8
| 1,670
| 3.0625
| 3
|
[] |
no_license
|
# This file is part of BlackArch Linux ( https://www.blackarch.org/ ).
# See COPYING for license details.
pkgname=veil
pkgver=287.d4dd4ff
pkgrel=1
epoch=5
groups=('blackarch' 'blackarch-automation' 'blackarch-exploitation')
pkgdesc='A tool designed to generate metasploit payloads that bypass common anti-virus solutions.'
url='https://github.com/Veil-Framework/Veil'
arch=('x86_64')
license=('GPL2')
depends=('sudo' 'wine' 'wine-mono' 'wine_gecko' 'git' 'python'
'python-pycryptodome' 'python-pefile' 'mingw-w64-binutils'
'mingw-w64-crt' 'mingw-w64-gcc' 'mingw-w64-headers'
'mingw-w64-winpthreads' 'mono' 'mono-tools' 'python-pip' 'wget' 'unzip'
'ruby' 'python-capstone' 'ca-certificates' 'metasploit' 'capstone'
'pyinstaller')
makedepends=('git')
options=('!strip')
source=("$pkgname::git+https://github.com/Veil-Framework/Veil.git"
'git+https://github.com/Veil-Framework/VeilDependencies.git')
sha512sums=('SKIP'
'SKIP')
install="$pkgname.install"
pkgver() {
cd $pkgname
echo $(git rev-list --count HEAD).$(git rev-parse --short HEAD)
}
prepare() {
cd $pkgname
cp -a "$srcdir/VeilDependencies/"* config/.
}
package() {
cd $pkgname
install -dm 755 "$pkgdir/usr/bin"
install -dm 755 "$pkgdir/usr/share/$pkgname"
install -Dm 644 -t "$pkgdir/usr/share/doc/$pkgname/" README.md CHANGELOG
install -Dm 644 LICENSE "$pkgdir/usr/share/licenses/$pkgname/LICENSE"
rm CHANGELOG LICENSE README.md
cp -a * "$pkgdir/usr/share/$pkgname"
cat > "$pkgdir/usr/bin/$pkgname" << EOF
#!/bin/sh
cd /usr/share/$pkgname/
exec python Veil.py "\$@"
EOF
chmod +x "$pkgdir/usr/bin/$pkgname"
}
| true
|
eaf02cbe3ae75ac6699c2c7325078fb848b3c780
|
Shell
|
lia2790/xbot2_examples
|
/docker/run.sh
|
UTF-8
| 808
| 2.734375
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
# get path to script and change working directory
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
cd $DIR/..
docker run -it \
--env="DISPLAY" \
--env="QT_X11_NO_MITSHM=1" \
--device=/dev/dri:/dev/dri \
--volume="/tmp/.X11-unix:/tmp/.X11-unix:rw" \
--name xbot2examples \
--publish 8080:8080 \
-v $(pwd)/src:/home/user/src/xbot2_examples/src \
-v $(pwd)/world:/home/user/src/xbot2_examples/world \
-v $(pwd)/config:/home/user/src/xbot2_examples/config \
-v $(pwd)/CMakeLists.txt:/home/user/src/xbot2_examples/CMakeLists.txt \
arturolaurenzi/xbot2:examples bash
#
#-v $(pwd)/world:/home/user/src/xbot2_examples/world \
#-v $(pwd)/config:/home/user/src/xbot2_examples/config \
#-v $(pwd)/CMakeLists.txt:/home/user/src/xbot2_examples/CMakeLists.txt \
| true
|
73f9f6d88eb9fee329837b9c1cf051dfefc302a8
|
Shell
|
bsc-wdc/compss
|
/compss/programming_model/bindings/python/scripts/security/check_bandit.sh
|
UTF-8
| 274
| 2.78125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
CURRENT_DIR="$(pwd)"
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
cd "${SCRIPT_DIR}/../../src/pycompss/"
##################
## Check Pylint ##
##################
bandit -r -c ../../pyproject.toml .
cd "${CURRENT_DIR}"
# Exit all ok
exit 0
| true
|
8edbbf2e2756edb7226001dba3b300fdc1020ed4
|
Shell
|
RobertZenz/Bivalvia
|
/DownloadHelpers/machall.sh
|
UTF-8
| 716
| 3.8125
| 4
|
[] |
no_license
|
#!/usr/bin/env sh
# Downloads all MacHall comics.
# http://www.machall.com
#
# Public Domain or CC0
# Note that the comics this script downloads are under copyright.
#
# Usage: machall.sh
# Just execute it in the directory in which you want the images.
# We can use a fixed set of years as there are no more comics coming, sadly.
years="2000 2001 2002 2003 2004 2005 2006"
archiveUrl=http://www.machall.com/archive.php?year=
imageUrl=http://www.machall.com/comics/
imageUrlEnd=.jpg
for year in $years; do
comics=$(curl --silent ${archiveUrl}${year} | egrep -o "[0-9]+-[0-9]+-[0-9]+" | tr -d "-")
mkdir $year
cd $year
for comic in $comics; do
wget ${imageUrl}${comic}${imageUrlEnd}
done
cd -
done
| true
|
c9bf809a7c0118760798fbdf7da6a3f41c71a744
|
Shell
|
mwall/auditr
|
/api
|
UTF-8
| 153
| 2.84375
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
if [ ! -f auditr-api/play ]; then
echo "You must run script/bootstrap to setup the environment"
exit 1
fi
cd auditr-api
./play $@
| true
|
ddd81a37066a15945a62a9004d22f3b45e189f93
|
Shell
|
Cryolite/intro
|
/template/x86_64-unknown-linux-gnu/asan2line
|
UTF-8
| 1,748
| 3.875
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
if test $# -eq 0; then
echo "$0: missing program name" >&2
echo "Try \`$0 --help' for more information." >&2
exit 1
fi
prog="$1"
shift
asan_output="${TMPDIR:-/tmp}/asan_output"
pid_output=`mktemp ${TMPDIR:-/tmp}/pid_output.XXXXXX` || exit
(echo -n $BASHPID > "$pid_output"
# Redirect stderr to avoid termination message from shell.
ASAN_OPTIONS="log_path=$asan_output" exec 3>&2 2>/dev/null "$prog" ${1+"$@"} 2>&3 3>&-)
exval=$?
pid=`cat "$pid_output"`
rm -f "$pid_output"
asan_output="$asan_output.$pid"
if test -s "$asan_output"; then
cat "$asan_output" |
(while IFS=$'\n' read line; do
if echo -n "$line" | grep -Eq '^[[:space:]]*#[[:digit:]]+[[:space:]]*0x[[:xdigit:]]+[[:space:]]*\(.+\+0x[[:xdigit:]]+\)$'; then
indent=`echo -n "$line" | grep -Eo '^[[:space:]]*#[[:digit:]]+[[:space:]]*'`
tmp=`echo -n "$line" | sed -e 's/^[[:space:]]*#[[:digit:]]*[[:space:]]*0x[[:xdigit:]]*[[:space:]]*(\(.*\))$/\1/'`
exe=`echo -n "$tmp" | sed -e 's/^\(.*\)+0x[[:xdigit:]]*$/\1/'`
addr=`echo -n "$tmp" | sed -e 's/^.*+\(0x[[:xdigit:]]*\)$/\1/'`
addr2line_output=`addr2line --exe="$exe" -fCp $addr | sed -e 's/ (discriminator [[:digit:]]\{1,\})$//'`
file=`echo "$addr2line_output" | grep -Eo ' at .+:[[:digit:]]+$' | sed -e 's/ at \(.\{1,\}\):[[:digit:]]\{1,\}$/\1/'`
if test ! -e "$file"; then
addr2line_output="$addr2line_output (file does not exist)"
fi
if test `echo "$addr2line_output" | wc -l` -eq 1; then
echo "$indent$addr2line_output"
else
echo "$indent(function name, file name or line number can not be determined)"
fi
else
echo "$line"
fi
done)
fi
rm -f "$asan_output"
exit $exval
| true
|
3474453bd893ffe59d594320dd1c43e9c2dac72a
|
Shell
|
Foxboron/goto
|
/bin/goto
|
UTF-8
| 1,740
| 3.953125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Checking if we are sourced
# see: https://stackoverflow.com/questions/2683279/how-to-detect-if-a-script-is-being-sourced
([[ -n $ZSH_EVAL_CONTEXT && $ZSH_EVAL_CONTEXT =~ :file$ ]] ||
[[ -n $BASH_VERSION ]] && (return 0 2>/dev/null)) && sourced=0 || sourced=1
# If goto is not run in sourced mode, it is not setup properly yet
if [[ $sourced -eq 1 ]]; then
echo "Ah Hoy!"
echo
echo "In order to make goto work properly, run this command now:"
echo
echo " install_goto"
echo
exit 1
fi
# GOTOPATH may be set from environment,
# if not default to ~/.goto
if [[ -z "$GOTOPATH" ]]; then
GOTOPATH="${HOME}/.goto"
fi
PROJECT=$(cat "${GOTOPATH}/active-project")
PROJECTFILE="${GOTOPATH}/projects/${PROJECT}.json"
# Catching deactivated state
if [ -z "$PROJECT" ]; then
echo "Ah hoy!"
echo
echo "Error: Goto has no active project selected currently. "
echo
echo
echo "To activate a project type:"
echo
echo " project <project-name>"
echo
echo
echo "For more help about projects in goto, type:"
echo
echo " project help"
echo
return 1
fi
# Special case 1 (goto cd <magicword>)
if [ "$1" = "cd" ]; then
# hack to cd in this shell to the ouput of goto show <magicword>
path=$(goto show "$2")
cd "$path"
return 0
fi
# Special case 2 (goto <magicword>)
if [ "$#" -eq 1 ]; then
# if run like: goto <magicword>
path=$(goto show "$1")
# if path is folder, cd to folder
if [ -d "$path" ]; then
cd "$path"
return 0
# if path is file, open file
elif [ -f "$path" ]; then
goto open "$1"
return 0
fi
fi
# General case
the_real_goto.py "$PROJECTFILE" "$@"
| true
|
916190a68b7ba4910a832954e5a96c896f2eac68
|
Shell
|
halka/deploy-ovpn-for-aws-client-vpn
|
/deploy-ovpn-for-aws-client-vpn.sh
|
UTF-8
| 4,920
| 3.65625
| 4
|
[] |
no_license
|
#!/bin/bash
#######################################
# Deploy a ovpn file for aws client vpn
# Arguments:
# $1: OpenVPN file
# e.g.) file://tmp/aws-vpn.ovpn
# $2: Profile name displayed in AWS VPN Client
# e.g.) aws-vpn
# $3: CvpnEndpointId
# e.g.) cvpn-endpoint-XXXXXXXXXXXXXXXXX
# $4: CvpnEndpointRegion
# e.g.) ap-northeast-1
# $5: CompatibilityVersion
# 1 : Use mutual authentication
# : Use Active Directory authentication
# 2 : Use Federated authentication
# $6: FederatedAuthType
# 0 : Use mutual authentication
# : Use Active Directory authentication
# 1 : Use Federated authentication
# If you do not know the Arguments, please check the following file path.
# {LOGGED_IN_USER}/.config/AWSVPNClient/ConnectionProfiles
#######################################
# TODO(enpipi) : Checking the behavior when using Active Directory authentication. (enhancement #1)
VERSION='0.2.0'
export PATH=/usr/bin:/bin:/usr/sbin:/sbin
# Output info log with timestamp
print_info_log(){
local timestamp
timestamp=$(date +%F\ %T)
echo "$timestamp [INFO] $1"
}
# Output error log with timestamp
print_error_log(){
local timestamp
timestamp=$(date +%F\ %T)
echo "$timestamp [ERROR] $1"
}
# Check for the existence of aws client vpn
if [[ ! -e "/Applications/AWS VPN Client/AWS VPN Client.app" ]];then
print_error_log "It seems that the AWS VPN Clinet is not installed. Please install it and try again."
exit 1
fi
if [[ "${1}" = "/" ]];then
# Jamf uses sends '/' as the first argument
print_info_log "Shifting arguments for Jamf."
shift 3
fi
if [[ "${1:l}" = "version" ]];then
echo "${VERSION}"
exit 0
fi
if [[ ! "${1}" ]];then
print_error_log "You need to set ovpn file location."
exit 1
fi
OVPN_FILE_PATH="${1}"
# TODO(enpipi): Check .ovpn file
# print_error_log "File format is not ovpn. You need to set .ovpn file."
if [[ ! "${2}" ]];then
print_error_log "You need to set aws vpn client profile name."
exit 1
fi
PRFILE_NAME="${2}"
# TODO(enpipi): Only alphanumeric characters and " , -, _, (,)" can be used for display name.
# print_error_log "Only alphanumeric characters and " , -, _, (,)" can be used for display name."
if [[ ! "${3}" ]];then
print_error_log "You need to set CvpnEndpointId."
exit 1
fi
C_VPN_ENDPOINT_ID="${3}"
if [[ ! "${4}" ]];then
print_error_log "You need to set CvpnEndpointRegion."
exit 1
fi
C_VPN_ENDPOINT_REGION="${4}"
if [[ ! "${5}" ]];then
print_error_log "You need to set CompatibilityVersion."
exit 1
fi
COMATIBILITY_VERSION="${5}"
if [[ ! "${6}" ]];then
print_error_log "You need to set FederatedAuthType."
exit 1
fi
FEDERATED_AUTH_TYPE="${6}"
print_info_log "Start aws vpn client profile deplyment..."
# Launch and exit the application to generate the initial config file.
# If you don't do this, the application won't launch properly even if you place the ovpn file in the config.
# TODO: Find a way to get the difference when adding and not launch the application.
open -j -a "/Applications/AWS VPN Client/AWS VPN Client.app"
osascript -e 'quit app "AWS VPN Client.app"'
# Find the loggedInUser
LOGGED_IN_USER=$(stat -f %Su /dev/console)
# Set the file path to the ConnectionProfiles file with the loggedIn user
CONNECTION_PROFILES="/Users/$LOGGED_IN_USER/.config/AWSVPNClient/ConnectionProfiles"
OPEN_VPN_CONFIGS_DIRECTORY="/Users/$LOGGED_IN_USER/.config/AWSVPNClient/OpenVpnConfigs"
# Delete auth-federate in OVPN_FILE_PATH
print_info_log "delete auth-federate in ${OVPN_FILE_PATH}"
sed -i -e '/auth-federate/d' "${OVPN_FILE_PATH}"
# Copy and rename ovpn file
print_info_log "copy and rename ovpn file from ${OVPN_FILE_PATH} to ${OPEN_VPN_CONFIGS_DIRECTORY}/${PRFILE_NAME}"
cp "${OVPN_FILE_PATH}" "${OPEN_VPN_CONFIGS_DIRECTORY}/${PRFILE_NAME}"
# Get backup of ConnectionProfiles
print_info_log "Get backup of ${CONNECTION_PROFILES}"
CONNECTION_PROFILES_BACKUP="/Users/$LOGGED_IN_USER/.config/AWSVPNClient/_ConnectionProfiles"
cp "$CONNECTION_PROFILES" "$CONNECTION_PROFILES_BACKUP"
# Make the file
# TODO(enpipi): Add the profile if it already exists, or overwrite it if it doesn't.
# We need to realize this TODO with awk and sed.
# This is because we have to assume that the terminal does not have JQ installed on it.
cat <<EOF > "$CONNECTION_PROFILES"
{
"Version":"1",
"LastSelectedProfileIndex":0,
"ConnectionProfiles":[
{
"ProfileName":"${PRFILE_NAME}",
"OvpnConfigFilePath":"/Users/$LOGGED_IN_USER/.config/AWSVPNClient/OpenVpnConfigs/${PRFILE_NAME}",
"CvpnEndpointId":"${C_VPN_ENDPOINT_ID}",
"CvpnEndpointRegion":"${C_VPN_ENDPOINT_REGION}",
"CompatibilityVersion":"${COMATIBILITY_VERSION}",
"FederatedAuthType":${FEDERATED_AUTH_TYPE}
}
]
}
EOF
print_info_log "End aws vpn client profile deplyment..."
# Fix permissions
chown "$LOGGED_IN_USER" "$CONNECTION_PROFILES"
| true
|
a7ecc3aeb79bca5ddfb69e3a93d6dfc1df1c6ec9
|
Shell
|
mclarsen/build-test
|
/configs/setup-spack.sh
|
UTF-8
| 3,975
| 3.65625
| 4
|
[] |
no_license
|
#!/bin/bash
#Script for copying the recommended configuration for Spack onto your system
#for building Nalu-Wind, be it on any systems listed below
if [ -z "${SPACK_ROOT}" ]; then
echo "SPACK_ROOT must be set first"
exit 1
fi
set -e
OS=$(uname -s)
#Use kind of ridiculous logic to find the machine name
if [ ${OS} == 'Darwin' ]; then
MACHINE=mac
OSX=$(sw_vers -productVersion)
case "${OSX}" in
10.12*)
MACHINE=mac_sierra
;;
10.13*)
MACHINE=mac_high_sierra
;;
esac
elif [ ${OS} == 'Linux' ]; then
case "${NERSC_HOST}" in
cori)
MACHINE=cori
;;
"")
if [ -f "/etc/nrel" ]; then
MACHINE=merlin
else
MYHOSTNAME=$(hostname -d)
case "${MYHOSTNAME}" in
hpc.nrel.gov)
MACHINE=peregrine
;;
mcp.alcf.anl.gov)
MACHINE=mira
;;
fst.alcf.anl.gov)
MACHINE=mira
;;
ices.utexas.edu)
MACHINE=ices
;;
esac
MYHOSTNAME=$(hostname)
case "${MYHOSTNAME}" in
mutrino)
MACHINE=mutrino
;;
esac
MYHOSTNAME=$(hostname -s)
case "${MYHOSTNAME}" in
rhodes)
MACHINE=rhodes
;;
esac
fi
;;
esac
fi
# Copy machine-specific configuration for Spack if we recognize the machine
if [ "${MACHINE}" == 'peregrine' ] || \
[ "${MACHINE}" == 'rhodes' ] || \
[ "${MACHINE}" == 'merlin' ] || \
[ "${MACHINE}" == 'cori' ] || \
[ "${MACHINE}" == 'mira' ] || \
[ "${MACHINE}" == 'mutrino' ] || \
[ "${MACHINE}" == 'ices' ] || \
[ "${MACHINE}" == 'mac' ] || \
[ "${MACHINE}" == 'mac_sierra' ] || \
[ "${MACHINE}" == 'mac_high_sierra' ]; then
printf "Machine is detected as ${MACHINE}.\n"
#All machines do this
(set -x; cp machines/${MACHINE}/*.yaml ${SPACK_ROOT}/etc/spack/)
(set -x; cp -R custom-package-files/nalu-wind ${SPACK_ROOT}/var/spack/repos/builtin/packages/)
(set -x; cp -R custom-package-files/catalyst-ioss-adapter ${SPACK_ROOT}/var/spack/repos/builtin/packages/)
(set -x; cp -R custom-package-files/freetype ${SPACK_ROOT}/var/spack/repos/builtin/packages/)
#Extra stuff for peregrine
#if [ ${MACHINE} == 'peregrine' ]; then
#nothing at the moment
#fi
#Extra stuff for rhodes
#if [ ${MACHINE} == 'rhodes' ]; then
#nothing at the moment
#fi
#Extra stuff for merlin
#if [ ${MACHINE} == 'merlin' ]; then
#nothing at the moment
#fi
#Extra stuff for cori
if [ ${MACHINE} == 'cori' ]; then
(set -x; cp -R machines/${MACHINE}/netcdf ${SPACK_ROOT}/var/spack/repos/builtin/packages/)
fi
#Extra stuff for mutrino
if [ ${MACHINE} == 'mutrino' ]; then
(set -x; cp -R machines/${MACHINE}/netcdf ${SPACK_ROOT}/var/spack/repos/builtin/packages/)
fi
#Extra stuff for mira
if [ ${MACHINE} == 'mira' ]; then
(set -x; cp -R machines/${MACHINE}/nalu ${SPACK_ROOT}/var/spack/repos/builtin/packages/)
(set -x; cp -R machines/${MACHINE}/trilinos ${SPACK_ROOT}/var/spack/repos/builtin/packages/)
(set -x; cp -R machines/${MACHINE}/superlu ${SPACK_ROOT}/var/spack/repos/builtin/packages/)
fi
#Extra stuff for ices
#if [ ${MACHINE} == 'ices' ]; then
#nothing at the moment
#fi
#Extra stuff for macs
#if [ ${MACHINE} == 'mac' ] || [ "${MACHINE}" == 'mac_sierra' ] || [ "${MACHINE}" == 'mac_high_sierra' ]; then
#nothing at the moment
#fi
#Use branch instead of tag so spack will checkout a real git repo instead of caching a tar.gz of a branch
if [ ${MACHINE} == 'mac' ] || [ "${MACHINE}" == 'mac_sierra' ] || [ "${MACHINE}" == 'mac_high_sierra' ]; then
(set -x; sed -i "" -e "s/tag=/branch=/g" ${SPACK_ROOT}/var/spack/repos/builtin/packages/trilinos/package.py)
else
(set -x; sed -i "s/tag=/branch=/g" ${SPACK_ROOT}/var/spack/repos/builtin/packages/trilinos/package.py)
fi
else
printf "\nMachine name not found.\n"
fi
| true
|
672b032d93f9ec45bccd45727c91f3f4ec807def
|
Shell
|
jembi/openhim-mediator-mapping
|
/packaging/build-release-zip.sh
|
UTF-8
| 1,122
| 4.21875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -eu
if (( $# < 1)); then
echo "OpenHIM Mediator Mapping release build: Builds a specific tagged release ready for deployment";
echo "Usage: $0 TAG";
exit 0;
fi
tag=$1;
shift;
echo "NB!"
echo "To create the tagged build, various git interactions need to take place. "
echo "This will create a temporary branch as well as remove any changes you have havent yet committed"
read -p "Do you wish to proceed? [Y/y]" -n 1 -r
echo ""
if [[ $REPLY =~ ^[Yy]$ ]]; then
cd ../
echo "Git: setup branch/tag"
git checkout -- .
git checkout master
git pull origin master
git fetch --tags
git checkout tags/$tag -b "build-release-$tag"
echo "yarn: clean and build package"
rm -rf node_modules
yarn
# yarn run build
echo "zip: build release version: $tag"
zip \
-i 'src/*' 'node_modules/*' 'mediatorConfig.json' 'LICENSE' 'package.json' 'yarn.lock' 'README.md' \
-r packaging/build.openhim-mediator-mapping.$tag.zip .
echo "Git cleanup"
git checkout -- .
git checkout master
git branch -D "build-release-$tag"
echo "New OpenHIM Mediator Mapping build zipped";
fi
| true
|
62af5b9839cf44924da7f556c5d9a3f09317c81e
|
Shell
|
hasselyoko/ADS
|
/tools/90_run_tests.sh
|
UTF-8
| 296
| 2.78125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
# setup fake ads server and install cleanup trap
ncat -l 48898 --keep-open &
ncat_pid=$!
trap "{ kill ${ncat_pid}; exit; }" EXIT
set -e
# wait for fake ads server to accept connections
while ! ncat --send-only localhost 48898; do sleep 1; done
./AdsLibTest.bin
./example/example.bin
| true
|
71c3d083cb8c0f58f0fdfb27cbb36e841d6b5b1b
|
Shell
|
OPwonShinobi/amos-in-wonder-bash
|
/middle.sh
|
UTF-8
| 1,604
| 3.890625
| 4
|
[] |
no_license
|
#!/bin/bash
# only load rest of script if called via another script; user shouldn't run this script directly
# makes sure a path has been entered
if ! [[ $(echo "$1" | awk '/^[[:upper:]]{1}$/') ]]; then
exit
fi
# save current node letter as something more readable
currentNode=$1
# read & display the current scene text
cat "./txt/${currentNode}"
# empty line after printing body of text
echo
# Scan choice file for possible middle commands and paths
# Won't run unless middle paths present in choice file
while read cmd node; do
# if not defined nextNode1
if [[ -z ${nextNode1} ]]; then
nextNode1=${node}
else
nextNode2=${node}
fi
done < <(awk /middle/ ./txt/${currentNode}C )
# Scan choice file for possible end command and ending path
# Won't run unless end paths present in choice file
while read cmd node; do
# the current process is replaced with ./end.sh
# There might be a better way to run it in a new non-subshell but this good enough
exec ./end.sh $currentNode
done < <(awk /end/ ./txt/${currentNode}C)
while [[ ${userInput} != ${nextNode1} && ${userInput} != ${nextNode2} ]]; do
# The previous while loop seems to mess with default stream read uses
# Explicitly read from stdin
read userInput < /dev/tty
# all choices coded into scenes are uppercase
# convert userInput to all uppercase to do case-insensitive check
userInput=${userInput^^}
if [[ ${userInput} = ${nextNode1} ]]; then
exec ./middle.sh ${nextNode1}
elif [[ ${userInput} = ${nextNode2} ]]; then
exec ./middle.sh ${nextNode2}
else
echo 'Only 2 paths are open before you, choose again.'
fi
done
| true
|
36142fee419d3a4081763abb495f6c5c78f6d71c
|
Shell
|
kamir/faqsearch
|
/INGEST/FLUME/bin/runFlumeAgent.sh~
|
UTF-8
| 466
| 3.125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
#
# Create an index to strore FAQ data in Cloudera-Search
#
export FLUME_SPOOL_DIR=/flume/faq_files_spooldir
export COLLECTION=faq_collection1
clear
echo Run the Flume agent now ...
echo
echo Collection name: $COLLECTION
echo Spool-Directory: $FLUME_SPOOL_DIR
echo
read -p "Select agent >" a
cd ../conf
flume-ng agent --conf /etc/flume-ng/conf --conf-file faq-flume-v1.conf --name $a -Dflume.root.logger=INFO,console > $a.log
cd ../bin
echo Done.
| true
|
cfac928c741bb69a0eded18cd187f12354151837
|
Shell
|
unimock/mailmonit-docker
|
/build/bin/mailmonit.sh
|
UTF-8
| 873
| 3.359375
| 3
|
[] |
no_license
|
#!/bin/bash
. /etc/mailmonit.dat
S_DIR=$VAR_DIR/echo
FI=`mktemp`
INC=`mktemp`
_exit()
{
rm $FI $INC
exit 0
}
cat > $FI
SENDER=`egrep "^From: " $FI | sed -1 | sed "s,^From: ,,"`
SUBJECT=`egrep "^Subject: " $FI | head -1 | sed "s,^Subject: ,,"`
PW=`echo ${SUBJECT} | cut -d" " -f1`
S=${SUBJECT#${PW} }
#
rm -f $INC
for i in $S ; do
if [ "$i" = "cmd=echo" ] ; then
echo "RECEIVE_pw=${PW}" >> $INC
for j in $S ; do
echo "RECEIVE_$j" >> $INC
done
echo "RECEIVE_rs_receive=`date +%s`" >> $INC
fi
done
if [ ! -e $INC ] ; then
_exit
fi
. $INC
if [ -e ${S_DIR}/${RECEIVE_id}.send ] ; then
. ${S_DIR}/${RECEIVE_id}.send
if [ "${SEND_pw}" = "${RECEIVE_pw}" ] ; then
cat $INC >> ${S_DIR}/${RECEIVE_id}.send
mv ${S_DIR}/${RECEIVE_id}.send ${R_DIR}/${S_DIR}/${RECEIVE_id}.receive
fi
fi
_exit
| true
|
9ae4c96fd61a17e6703b8baee9579fe979dbb94a
|
Shell
|
D0han/mockmaker
|
/tests/test_unknown_command.sh
|
UTF-8
| 125
| 2.515625
| 3
|
[
"Zlib"
] |
permissive
|
#!/bin/bash
command="unknown_command"
./mockmaker "${command}" > /dev/null 2>&1
if [ $? -ne 2 ]; then
exit 1
fi
exit 0
| true
|
9b0968695511a80c13c3cdb97f73b628caff45a6
|
Shell
|
RY-2718/frontera_crawler
|
/scripts/speed_per_minute.sh
|
UTF-8
| 259
| 2.53125
| 3
|
[] |
no_license
|
#!/bin/bash
cd `dirname $0`
v=`cat ../scrapy_log/scrapy.log | grep Crawled | head -1 | awk '{print $1 "_" $2 " " $6}'`
w=`cat ../scrapy_log/scrapy.log | grep Crawled | tail -1 | awk '{print $1 "_" $2 " " $6}'`
python ../scripts/py/speed_per_minute.py $v $w
| true
|
89e9e64a68d32f0f27b2b11f185cb306dfda1abb
|
Shell
|
jrodolfo/script-samples
|
/example-02-create-react-redux-projec/create-react-redux-project.sh
|
UTF-8
| 2,071
| 3.96875
| 4
|
[] |
no_license
|
#!/bin/bash
########################################################################################################################
#
# Script Name: create-react-redux-project.sh
#
# Description: create a react/redux project
#
# Assumption: npm is installed
#
# Args: -n [name] (mandatory argument, name of the project, e.g.: react-redux-project)
#
# Author: Rod Oliveira
#
########################################################################################################################
if [ "$1" = "-n" ]
then
startedAt=$(date +"%Y-%m-%d %T");
echo "script started at $startedAt";
echo "calling npx create-react-app $2...";
npx create-react-app $2;
echo "deleting content of src...";
cd $2/src || exit;
rm -rf *;
echo "creating index.js...";
importReact="import React from 'react';";
importReactDOM="import ReactDOM from 'react-dom';";
importApp="import App from './components/App';";
render="ReactDOM.render(<App />, document.querySelector('#root'));";
indexContent="$importReact\n$importReactDOM\n$importApp\n$render";
echo -e $indexContent > index.js;
echo "creating folder components and the file App.js...";
mkdir components;
cd components || exit;
appDef="const App = () => {return <div>App</div>;}";
exportApp="export default App;";
appContent="$importReact\n$appDef\n$exportApp";
echo -e $appContent > App.js;
echo "adding semantic-ui to index.html...";
cd ../../public || exit;
match="<meta name=\"theme-color\" content=\"#000000\" \/>";
insert="<link rel=\"stylesheet\" href=\"https:\/\/cdnjs.cloudflare.com\/ajax\/libs\/semantic-ui\/2.4.1\/semantic.min.css\" \/>";
file=index.html;
sed -i "s/$match/$match\n $insert/" $file;
echo "installing redux and react-redux...";
cd ..;
npm install --save redux react-redux;
echo "starting the app...";
npm start;
else
echo "Please call this script with a name argument, e.g.: ./create-react-redux-project.sh -n react-redux-project";
fi;
| true
|
f59921f2f767038bbe5a09542e086a9ba889c1ec
|
Shell
|
larroy/python_project_structure
|
/devtool
|
UTF-8
| 1,568
| 3.96875
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
set -euo pipefail
install_deps_dev() {
pip install -e .[test]
}
env_setup() {
true
}
test_with_coverage() {
coverage run -m pytest --pspec tests/unit
coverage report -m --fail-under=90
}
lint() {
echo ""
if [ -d .git ]
then
echo "1. pre-commit hooks"
echo "==================="
pre-commit run -v -a
echo ""
else
echo "1. pre-commit hooks (Skipped)"
echo "==================="
echo ""
fi
echo "2. Flake8"
echo "========="
flake8 . --config=setup.cfg
echo ""
echo "3. Mypy"
echo "======="
echo y | mypy --install-types --junit-xml reports/typecheck.xml --html-report reports --config-file setup.cfg src/ || \
mypy --junit-xml reports/typecheck.xml --html-report reports --config-file setup.cfg --show-traceback src/
echo ""
echo "Lint: SUCCESS"
}
all() {
env_setup
lint
test_with_coverage
build_package
docs
echo "All build and tests passed. 😊"
}
##############################################################
# MAIN
#
# Run function passed as argument
set +x
if [ $# -gt 0 ]
then
SCRIPTPATH="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )"
echo "CD -> $SCRIPTPATH"
cd $SCRIPTPATH
echo ==================
echo $@
echo ==================
$@
else
cat<<EOF
**Developer tool**
==================
$0: Execute a function by passing it as an argument to the script:
Possible commands:
==================
EOF
declare -F | cut -d' ' -f3
echo
fi
| true
|
a0a49bacc017390de4610a9e94a283243f7f2068
|
Shell
|
bermanjacob96/CSCI3753
|
/pa4/looptest.sh
|
UTF-8
| 10,980
| 3.28125
| 3
|
[] |
no_license
|
#!/bin/bash
#File: looptest
#Author: Jacob Berman
#Project: CSCI 3753 Programming Assignment 4
#Create Date: 2016/12/11
#Modify Date: 2016/15/11
#Description:
# A simple bash script to run a 3 iterations of each test case
# and gather the relevent data.
HEADER="Schedule, Wall Clock Time (sec), Total User Mode CPU Seconds, Total Kernel Mode CPU Seconds, CPU Percentage, Total Non-Voluntary Context Switches, Total Voluntary Context Switches, # Processes"
TIMEFORMAT1="SCHED_OTHER, %e, %U, %S, %P, %c, %w"
TIMEFORMAT2="SCHED_FIFO, %e, %U, %S, %P, %c, %w"
TIMEFORMAT3="SCHED_RR, %e, %U, %S, %P, %c, %w"
SIMULATIONS_LIGHT="10"
SIMULATIONS_MEDIUM="50"
SIMULATIONS_HIGH="100"
CPU_INTENSIVE="CPU"
IO_INTENSIVE="IO"
MIXED_INTENSIVE="Mixed"
RAND= "RAND"
make clean
make
echo "$HEADER" > data.csv
#-------------------CPU Bound Tests-------------------#
echo "Starting CPU_BOUND tests..."
#OTHER CPU LIGHT
echo "OTHER CPU LIGHT"
for i in 1 2 3
do
/usr/bin/time -f "$TIMEFORMAT1" -o data.csv -a sudo ./test SCHED_OTHER "$SIMULATIONS_LIGHT" "$CPU_INTENSIVE"
done
echo "1 done"
#OTHER CPU LIGHT RANDOM
echo "OTHER CPU LIGHT RADNOM"
for i in 1 2 3
do
/usr/bin/time -f "$TIMEFORMAT1" -o data.csv -a sudo ./test SCHED_OTHER "$SIMULATIONS_LIGHT" "$CPU_INTENSIVE" "RAND"
done
echo "2 done"
#OTHER CPU MEDIUM
echo "OTHER CPU MEDIUM RADNOM"
for i in 1 2 3
do
/usr/bin/time -f "$TIMEFORMAT1" -o data.csv -a sudo ./test SCHED_OTHER "$SIMULATIONS_MEDIUM" "$CPU_INTENSIVE"
done
echo "3 done"
#OTHER CPU MEDIUM RAND
for i in 1 2 3
do
/usr/bin/time -f "$TIMEFORMAT1" -o data.csv -a sudo ./test SCHED_OTHER "$SIMULATIONS_MEDIUM" "$CPU_INTENSIVE" "RAND"
done
echo "4 done"
#OTHER CPU HIGH
for i in 1 2 3
do
/usr/bin/time -f "$TIMEFORMAT1" -o data.csv -a sudo ./test SCHED_OTHER "$SIMULATIONS_HIGH" "$CPU_INTENSIVE"
done
echo "5 done"
#OTHER CPU HIGH RAND
for i in 1 2 3
do
/usr/bin/time -f "$TIMEFORMAT1" -o data.csv -a sudo ./test SCHED_OTHER "$SIMULATIONS_HIGH" "$CPU_INTENSIVE" "RAND"
done
echo "6 done"
#FIFO CPU LIGHT
for i in 1 2 3
do
/usr/bin/time -f "$TIMEFORMAT2" -o data.csv -a sudo ./test SCHED_FIFO "$SIMULATIONS_LIGHT" "$CPU_INTENSIVE"
done
echo "7 done"
#FIFO CPU LIGHT RAND
for i in 1 2 3
do
/usr/bin/time -f "$TIMEFORMAT2" -o data.csv -a sudo ./test SCHED_FIFO "$SIMULATIONS_LIGHT" "$CPU_INTENSIVE" "RAND"
done
echo "8 done"
#FIFO CPU MEDIUM
for i in 1 2 3
do
/usr/bin/time -f "$TIMEFORMAT2" -o data.csv -a sudo ./test SCHED_FIFO "$SIMULATIONS_MEDIUM" "$CPU_INTENSIVE"
done
echo "9 done"
#FIFO CPU MEDIUM RAND
for i in 1 2 3
do
/usr/bin/time -f "$TIMEFORMAT2" -o data.csv -a sudo ./test SCHED_FIFO "$SIMULATIONS_MEDIUM" "$CPU_INTENSIVE" "RAND"
done
echo "10 done"
#FIFO CPU HIGH
for i in 1 2 3
do
/usr/bin/time -f "$TIMEFORMAT2" -o data.csv -a sudo ./test SCHED_FIFO "$SIMULATIONS_HIGH" "$CPU_INTENSIVE"
done
echo "11 done"
#FIFO CPU HIGH RAND
for i in 1 2 3
do
/usr/bin/time -f "$TIMEFORMAT2" -o data.csv -a sudo ./test SCHED_FIFO "$SIMULATIONS_HIGH" "$CPU_INTENSIVE" "RAND"
done
echo "12 done"
#RR CPU LIGHT
for i in 1 2 3
do
/usr/bin/time -f "$TIMEFORMAT3" -o data.csv -a sudo ./test SCHED_RR "$SIMULATIONS_LIGHT" "$CPU_INTENSIVE"
done
echo "13 done"
#RR CPU LIGHT RAND
for i in 1 2 3
do
/usr/bin/time -f "$TIMEFORMAT3" -o data.csv -a sudo ./test SCHED_RR "$SIMULATIONS_LIGHT" "$CPU_INTENSIVE" "RAND"
done
echo "14 done"
#RR CPU MEDIUM
for i in 1 2 3
do
/usr/bin/time -f "$TIMEFORMAT3" -o data.csv -a sudo ./test SCHED_RR "$SIMULATIONS_MEDIUM" "$CPU_INTENSIVE"
done
echo "15 done"
#RR CPU MEDIUM RAND
for i in 1 2 3
do
/usr/bin/time -f "$TIMEFORMAT3" -o data.csv -a sudo ./test SCHED_RR "$SIMULATIONS_MEDIUM" "$CPU_INTENSIVE" "RAND"
done
echo "16 done"
#RR CPU HIGH
for i in 1 2 3
do
/usr/bin/time -f "$TIMEFORMAT3" -o data.csv -a sudo ./test SCHED_RR "$SIMULATIONS_HIGH" "$CPU_INTENSIVE"
done
echo "17 done"
#RR CPU HIGH
for i in 1 2 3
do
/usr/bin/time -f "$TIMEFORMAT3" -o data.csv -a sudo ./test SCHED_RR "$SIMULATIONS_HIGH" "$CPU_INTENSIVE" "RAND"
done
echo "18 done"
echo "CPU_BOUND TESTS COMPLETED"
#-------------------IO BOUND TESTS-------------------#
echo "Starting IO_BOUND tests..."
#OTHER IO LIGHT
echo "OTHER IO_BOUND lIGHT"
for i in 1 2 3
do
/usr/bin/time -f "$TIMEFORMAT1" -o data.csv -a sudo ./test SCHED_OTHER "$SIMULATIONS_LIGHT" "$IO_INTENSIVE"
done
echo "19 done"
#OTHER IO LIGHT RANDOM
echo "OTHER IO_BOUND lIGHT RANDOM"
for i in 1 2 3
do
/usr/bin/time -f "$TIMEFORMAT1" -o data.csv -a sudo ./test SCHED_OTHER "$SIMULATIONS_LIGHT" "$IO_INTENSIVE" "RAND"
done
echo "20 done"
#OTHER IO MEDIUM
echo "OTHER IO_BOUND MEDIUM"
for i in 1 2 3
do
/usr/bin/time -f "$TIMEFORMAT1" -o data.csv -a sudo ./test SCHED_OTHER "$SIMULATIONS_MEDIUM" "$IO_INTENSIVE"
done
echo "21 done"
#OTHER IO MEDIUM RANDOM
echo "OTHER IO_BOUND MEDIUM"
for i in 1 2 3
do
/usr/bin/time -f "$TIMEFORMAT1" -o data.csv -a sudo ./test SCHED_OTHER "$SIMULATIONS_MEDIUM" "$IO_INTENSIVE" "RAND"
done
echo "22 done"
#OTHER IO HIGH
echo "OTHER IO_BOUND HIGH"
for i in 1 2 3
do
/usr/bin/time -f "$TIMEFORMAT1" -o data.csv -a sudo ./test SCHED_OTHER "$SIMULATIONS_HIGH" "$IO_INTENSIVE"
done
echo "23 done"
#OTHER IO HIGH RANDOM
echo "OTHER IO_BOUND HIGH"
for i in 1 2 3
do
/usr/bin/time -f "$TIMEFORMAT1" -o data.csv -a sudo ./test SCHED_OTHER "$SIMULATIONS_HIGH" "$IO_INTENSIVE" "RAND"
done
echo "24 done"
#FIFO IO LIGHT
echo "FIFO IO_BOUND LIGHT"
for i in 1 2 3
do
/usr/bin/time -f "$TIMEFORMAT2" -o data.csv -a sudo ./test SCHED_FIFO "$SIMULATIONS_LIGHT" "$IO_INTENSIVE"
done
echo "25 done"
#FIFO IO LIGHT RADNOM
echo "FIFO IO_BOUND LIGHT RADNOM"
for i in 1 2 3
do
/usr/bin/time -f "$TIMEFORMAT2" -o data.csv -a sudo ./test SCHED_FIFO "$SIMULATIONS_LIGHT" "$IO_INTENSIVE" "RAND"
done
echo "26 done"
#FIFO IO MEDIUM
echo "FIFO IO_BOUND MEDIUM"
for i in 1 2 3
do
/usr/bin/time -f "$TIMEFORMAT2" -o data.csv -a sudo ./test SCHED_FIFO "$SIMULATIONS_MEDIUM" "$IO_INTENSIVE"
done
echo "27 done"
#FIFO IO MEDIUM RANDOM
echo "FIFO IO_BOUND MEDIUM RANDOM"
for i in 1 2 3
do
/usr/bin/time -f "$TIMEFORMAT2" -o data.csv -a sudo ./test SCHED_FIFO "$SIMULATIONS_MEDIUM" "$IO_INTENSIVE" "RAND"
done
echo "28 done"
#FIFO IO HIGH
echo "FIFO IO_BOUND HIGH"
for i in 1 2 3
do
/usr/bin/time -f "$TIMEFORMAT2" -o data.csv -a sudo ./test SCHED_FIFO "$SIMULATIONS_HIGH" "$IO_INTENSIVE"
done
echo "29 done"
#FIFO IO HIGH RANDOM
echo "FIFO IO_BOUND HIGH RADNOM"
for i in 1 2 3
do
/usr/bin/time -f "$TIMEFORMAT2" -o data.csv -a sudo ./test SCHED_FIFO "$SIMULATIONS_HIGH" "$IO_INTENSIVE" "RAND"
done
echo "30 done"
#RR IO LIGHT
echo "RR IO_BOUND LIGHT"
for i in 1 2 3
do
/usr/bin/time -f "$TIMEFORMAT3" -o data.csv -a sudo ./test SCHED_RR "$SIMULATIONS_LIGHT" "$IO_INTENSIVE"
done
echo "31 done"
#RR IO LIGHT RANDOM
echo "RR IO_BOUND LIGHT RANDOM"
for i in 1 2 3
do
/usr/bin/time -f "$TIMEFORMAT3" -o data.csv -a sudo ./test SCHED_RR "$SIMULATIONS_LIGHT" "$IO_INTENSIVE" "RAND"
done
echo "32 done"
#RR IO MEDIUM
echo "RR IO_BOUND MEDIUM"
for i in 1 2 3
do
/usr/bin/time -f "$TIMEFORMAT3" -o data.csv -a sudo ./test SCHED_RR "$SIMULATIONS_MEDIUM" "$IO_INTENSIVE"
done
echo "33 done"
#RR IO MEDIUM RANDOM
echo "RR IO_BOUND MEDIUM RADNOM"
for i in 1 2 3
do
/usr/bin/time -f "$TIMEFORMAT3" -o data.csv -a sudo ./test SCHED_RR "$SIMULATIONS_MEDIUM" "$IO_INTENSIVE" "RAND"
done
echo "34 done"
#RR IO HIGH
echo "RR IO_BOUND HIGH"
for i in 1 2 3
do
/usr/bin/time -f "$TIMEFORMAT3" -o data.csv -a sudo ./test SCHED_RR "$SIMULATIONS_HIGH" "$IO_INTENSIVE"
done
echo "35 done"
#RR IO HIGH RANDOM
echo "RR IO_BOUND HIGH RANDOM"
for i in 1 2 3
do
/usr/bin/time -f "$TIMEFORMAT3" -o data.csv -a sudo ./test SCHED_RR "$SIMULATIONS_HIGH" "$IO_INTENSIVE" "RAND"
done
echo "36 done"
#-------------------MIXED TESTS-------------------#
echo "Starting Mixed tests..."
#OTHER MIXED LIGHT
echo "OTHER MIXED LIGHT"
for i in 1 2 3
do
/usr/bin/time -f "$TIMEFORMAT1" -o data.csv -a sudo ./test SCHED_OTHER "$SIMULATIONS_LIGHT" "$MIXED_INTENSIVE"
done
echo "37 done"
#OTHER MIXED LIGHT RANDOM
echo "OTHER MIXED LIGHT RANDOM"
for i in 1 2 3
do
/usr/bin/time -f "$TIMEFORMAT1" -o data.csv -a sudo ./test SCHED_OTHER "$SIMULATIONS_LIGHT" "$MIXED_INTENSIVE" "RAND"
done
echo "38 done"
#OTHER MIXED MEDIUM
for i in 1 2 3
do
/usr/bin/time -f "$TIMEFORMAT1" -o data.csv -a sudo ./test SCHED_OTHER "$SIMULATIONS_MEDIUM" "$MIXED_INTENSIVE"
done
echo "39 done"
#OTHER MIXED MEDIUM RANDOM
for i in 1 2 3
do
/usr/bin/time -f "$TIMEFORMAT1" -o data.csv -a sudo ./test SCHED_OTHER "$SIMULATIONS_MEDIUM" "$MIXED_INTENSIVE" "RAND"
done
echo "40 done"
#OTHER MIXED HIGH
for i in 1 2 3
do
/usr/bin/time -f "$TIMEFORMAT1" -o data.csv -a sudo ./test SCHED_OTHER "$SIMULATIONS_HIGH" "$MIXED_INTENSIVE"
done
echo "41 done"
#OTHER MIXED HIGH RANDOM
for i in 1 2 3
do
/usr/bin/time -f "$TIMEFORMAT1" -o data.csv -a sudo ./test SCHED_OTHER "$SIMULATIONS_HIGH" "$MIXED_INTENSIVE" "RAND"
done
echo "42 done"
#FIFO MIXED LIGHT
for i in 1 2 3
do
/usr/bin/time -f "$TIMEFORMAT2" -o data.csv -a sudo ./test SCHED_FIFO "$SIMULATIONS_LIGHT" "$MIXED_INTENSIVE"
done
echo "43 done"
#FIFO MIXED LIGHT RANDOM
for i in 1 2 3
do
/usr/bin/time -f "$TIMEFORMAT2" -o data.csv -a sudo ./test SCHED_FIFO "$SIMULATIONS_LIGHT" "$MIXED_INTENSIVE" "RAND"
done
echo "44 done"
#FIFO MIXED MEDIUM
for i in 1 2 3
do
/usr/bin/time -f "$TIMEFORMAT2" -o data.csv -a sudo ./test SCHED_FIFO "$SIMULATIONS_MEDIUM" "$MIXED_INTENSIVE"
done
echo "45 done"
#FIFO MIXED MEDIUM RANDOM
for i in 1 2 3
do
/usr/bin/time -f "$TIMEFORMAT2" -o data.csv -a sudo ./test SCHED_FIFO "$SIMULATIONS_MEDIUM" "$MIXED_INTENSIVE" "RAND"
done
echo "46 done"
#FIFO MIXED HIGH
for i in 1 2 3
do
/usr/bin/time -f "$TIMEFORMAT2" -o data.csv -a sudo ./test SCHED_FIFO "$SIMULATIONS_HIGH" "$MIXED_INTENSIVE"
done
echo "47 done"
#FIFO MIXED HIGH RANDOM
for i in 1 2 3
do
/usr/bin/time -f "$TIMEFORMAT2" -o data.csv -a sudo ./test SCHED_FIFO "$SIMULATIONS_HIGH" "$MIXED_INTENSIVE" "RAND"
done
echo "48 done"
#RR MIXED LIGHT
for i in 1 2 3
do
/usr/bin/time -f "$TIMEFORMAT3" -o data.csv -a sudo ./test SCHED_RR "$SIMULATIONS_LIGHT" "$MIXED_INTENSIVE"
done
echo "49 done"
#RR MIXED LIGHT RANDOM
for i in 1 2 3
do
/usr/bin/time -f "$TIMEFORMAT3" -o data.csv-a sudo ./test SCHED_RR "$SIMULATIONS_LIGHT" "$MIXED_INTENSIVE" "RAND"
done
echo "50 done"
#RR MIXED MEDIUM RANDOM
for i in 1 2 3
do
/usr/bin/time -f "$TIMEFORMAT3" -o data.csv -a sudo ./test SCHED_RR "$SIMULATIONS_MEDIUM" "$MIXED_INTENSIVE"
done
echo "51 done"
#RR MIXED MEDIUM RANDOM
for i in 1 2 3
do
/usr/bin/time -f "$TIMEFORMAT3" -o data.csv -a sudo ./test SCHED_RR "$SIMULATIONS_MEDIUM" "$MIXED_INTENSIVE" "RAND"
done
echo "52 done"
#RR MIXED HIGH RANDOM
for i in 1 2 3
do
/usr/bin/time -f "$TIMEFORMAT3" -o data.csv -a sudo ./test SCHED_RR "$SIMULATIONS_HIGH" "$MIXED_INTENSIVE"
done
echo "53 done"
#RR MIXED HIGH RANDOM
for i in 1 2 3
do
/usr/bin/time -f "$TIMEFORMAT3" -o data.csv -a sudo ./test SCHED_RR "$SIMULATIONS_HIGH" "$MIXED_INTENSIVE" "RAND"
done
echo "54 done"
echo "Mixed tests completed..."
| true
|
85843d020e8f8ac376ee2662378c75e4788c4e34
|
Shell
|
norm/media-tools
|
/bin/media-convert-flac-to-wav
|
UTF-8
| 1,877
| 3.71875
| 4
|
[] |
no_license
|
#!/bin/bash
set -e
# shellcheck source=/dev/null
source $(which media)
is_sub "$*" false
summary_line "$*" 'convert a FLAC album to WAVs for processing by add-cd'
help_text "$*" <<'EOF'
Usage: media convert-flac-to-wav <dir>
Given a directory containing FLAC audio files, convert them to WAVs and
create the metadata file for the album, ready for use by `add-cd`.
Assumes the files are named "Artist - Album - 01 Title.flac" (this is
directly influenced by Bandcamp's albums delivered in FLAC format).
EOF
directory="$1"
mkdir -p "$directory/wavs"
cd "$directory"
IFS=
declare -a flac_files=( *.flac )
artist=$( echo "${flac_files[0]}" | awk -F' - ' '{ print $1 }' )
album=$( echo "${flac_files[0]}" | awk -F' - ' '{ print $2 }' )
sed -e 's/^ //'> metadata.conf <<EOF
album = $album
artist = $artist
compilation = false
disk = 1/1
genre = unknown
purchaseDate = 1970-01-01T12:00:00Z
year = 1970-01-01T12:00:00Z
# tracks can also over sleep-ride options, such as:
# artist = <track artist>
# genre = <track genre>
# year = <track year>
#
# other options:
# grouping = <grouping>
# comment = [instrumental]
EOF
for file in "${flac_files[@]}"; do
track=$( echo "$file" | awk -F' - ' '{ print $3 }' | cut -c1-2 )
title=$( echo "$file" | awk -F' - ' '{ print $3 }' | cut -c4- )
title=$( apply_title_case "$(basename "$title" .flac)" )
echo "Converting $file"
flac \
--decode "$file" \
--force \
--output-name="wavs/$track.wav" \
>/dev/null 2>&1
# shellcheck disable=SC2129
echo '' >> metadata.conf
echo "[$track]" >> metadata.conf
echo "title = $title" >> metadata.conf
done
echo ''
echo 'Done. Edit the "metadata.conf" file, and then run:'
echo ''
echo " media add-cd '${directory}'"
echo ''
| true
|
6ace0edd4c27eb9bfb0c255c84917bd3adcfca7f
|
Shell
|
pbravakos/Miscellaneous
|
/RemoveTmpDir.sh
|
UTF-8
| 8,691
| 4.34375
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
<<EOF
IMPORTANT NOTE:
This script relies on some assumptions which were all true during testing.
1) Each node (on every partition) should have a unique name.
2) Bash version should be 4.4.12.
3) Slurm version should be 16.05.9.
A change to any of the above assumptions could cause the script to collapse.
Also, Backfill was set as the scheduler type during testing.
A change to the scheduler type does not break the code but,
could cause the exclusion of some nodes.
Moreover, Basic was set as the priority type during testing.
Change of the priority type to a more sophisticated one (i.e. multifactor)
could also potentially cause the exclusion of some or all the nodes.
Minor note:
The --immediate sbatch directive was not functioning as expected during testing and decided not to use it.
Had it been used, the script could have been slightly less complex (and milliseconds faster!).
EOF
# Create a help function.
show_help () {
cat <<END
Submits an sbatch job to each available compute node not currently in use by user $USER, to remove user directories from /tmp.
Moreover, all directories created by user $USER in /tmp of the current node ($HOSTNAME) are also removed.
Usage: bash ${0##*/} [-h] [-s <string>] [-m <integer>] [-t <integer>]
-h Display this help and exit
-s SLURM script file name. Default is "RmUserTmp.XXXXXXXXX".This file is created in the working directory and deleted automatically.
-m Memory limit in MB for each sbatch job submitted to SLURM. Integer values should be between 10-1000. Default is 100.
-t Time limit in minutes for each sbatch job submitted to SLURM. Integer values should be between 1-100. Default is 10.
ATTENTION!
It is possible that not all nodes will be accessible, and thus not all user /tmp directories will be removed.
For that reason, it is recommended to run this script frequently at different time periods.
END
}
# Use getopts to create some options that users can set.
# OPTIND Holds the index to the next argument to be processed.
# It is initially set to 1, and needs to be re-set to 1 if we want to parse anything again with getopts.
OPTIND=1
# getopts sets an exit status of FALSE when there's nothing left to parse.
# It parses the positional parameters of the current shell or function by default (which means it parses "$@").
# OPTARG is set to any argument for an option found by getopts.
while getopts ':m:t:s:h' opt
do
case "$opt" in
m)
declare -i JobMem="$OPTARG" 2> /dev/null
if (( JobMem <= 1000 && JobMem >= 10 )); then
echo "SLURM Job Memory was set to ${JobMem}MB"
echo
else
echo "Memory (-m) should be given as an integer between 10 and 1000" >&2
show_help >&2
exit 1
fi
;;
t)
declare -i Time="$OPTARG" 2> /dev/null
if (( Time <= 100 && Time >= 1 )); then
echo "SLURM Time Limit was set to ${Time}min"
echo
else
echo "Time limit (-t) should be given as an integer between 1 and 100" >&2
show_help >&2
exit 1
fi
;;
s)
SlurmScript="$OPTARG"
echo "SLURM bash script file name was set to be \"${SlurmScript}\""
echo
;;
h)
show_help >&2
exit 0
;;
\?)
echo "Invalid option: -$OPTARG" >&2
echo "To check valid options use -h" >&2
echo >&2
;;
:)
echo "Option -$OPTARG requires an argument." >&2
show_help >&2
exit 1
;;
esac
done
# If getopts exits with a return value greater than zero, OPTIND is set to the index of the first non-option argument.
# Shift command removes all the options that have been parsed by getopts from the parameters list, and so after that point, $1 will refer to the first non-option argument passed to the script.
# In our case we ignore all these arguments.
shift "$(( OPTIND - 1 ))"
# INITIAL PARAMETERS
# SLURM bash script file
SlurmScript="${SlurmScript:-$(mktemp RmUserTmp.XXXXXXXXX)}"
# Parameters for sbatch
Ntasks=1
JobMem="${JobMem:-100}" # memory requirement in MB.
Time="${Time:-10}" # in minutes. Set a limit on the total run time of each submitted job.
JobName="RmUserTmp"
# Check whether SLURM manager is installed on the system.
command -v sinfo &>/dev/null \
|| { echo "SLURM is required to run this script, but is not currently installed. Please ask the administrator for help." >&2; exit 1; }
# Check that we are using Bash version 4.
(( BASH_VERSINFO[0] < 4 )) || { echo "This script can only run by bash 4 or higher." >&2; exit 1; }
# Check for the existence of any file with the same name as the produced bash script. If such a file exists, exit the script.
[[ -f ${SlurmScript} ]] && { echo "${SlurmScript} exists in ${PWD}. Please either rename the existing file or set the -s option to a different file name." >&2; exit 1; }
[[ $(type ${SlurmScript}) ]] 2>/dev/null && { echo "There is a command named ${SlurmScript}. Please set the -s option to a different file name." >&2; exit 1; }
# Remove the produced bash script upon exit or any other interrupt.
cleanup="rm -f $SlurmScript"
trap 'echo; echo Terminating. Please wait; $cleanup; scancel --quiet ${AllJobIDs} 2>/dev/null; exit;' ABRT INT QUIT
trap '$cleanup' EXIT HUP
# Create a new variable with all the nodes which do not have enough available memory.
FullMemNode=$(sinfo -O NodeAddr,Partition,AllocMem,Memory | sed 's/ \+/ /g;s/*//g' \
| awk -v mem=${JobMem} 'NR>1 && ($4-$3)<mem {print "^"$1"$"}')
# Also, create another variable with all the nodes currently in use by the user.
UserNode=$(squeue -o "%.u %N %.t" | awk -v user="$USER" '$1==user && $3~"^R$|^CG$" {print "^"$2"$"}')
# Combine the two variables to a new one, containing all the unavailable nodes.
# The goal is to prevent running a job on any of these nodes.
NewLine=$'\n'
UnavailNode=$(echo "${FullMemNode}${NewLine}${UserNode}" | sort -u \
| tr "\n" "|" | sed 's/|$//g;s/^|//g')
# Find all the available nodes and export them to an array.
# Each element of the array will have the name of the node and the name of the partition.
if [[ -z ${UnavailNode} ]]
then
mapfile -t AvailNodes < <(sinfo --Node | awk '$4 ~ /mix|idle/ {print $1, $3}' | sed 's/*$//g')
else
mapfile -t AvailNodes < <(sinfo --Node \
| awk -v node=${UnavailNode} '$1!~node && $4 ~ /mix|idle/ {print $1, $3}' 2> /dev/null \
| sed 's/*$//g')
echo
echo "User directories in /tmp of ${UnavailNode//|/,} will NOT be deleted." | sed -E 's/\^|\$//g'
echo
fi
# If there are no available nodes, exit the script
[[ -z ${AvailNodes} ]] \
&& { echo "There are no available nodes. No files were deleted. Please try again later." >&2; exit 1; }
# Create the SLURM bash input file.
cat > "${SlurmScript}" <<"EOF"
#!/bin/bash
#find /tmp -maxdepth 1 -user "$USER" -exec sh -c "rm -fr {} || exit 1" \;
find /tmp -maxdepth 1 -mmin +1 -user "$USER" -exec rm -fr {} + &
wait
exit 0
EOF
# Remove user's directories in /tmp by running an sbatch job on each available node.
AllJobIDs=""
while read -r node partition
do
NewJobID=$(sbatch --partition="${partition}" \
--nodelist="${node}" \
--ntasks="${Ntasks}" \
--mem="${JobMem}M" \
--output=/dev/null \
--time="${Time}" \
--job-name="${JobName}" \
"${SlurmScript}" 2> /dev/null)
if [[ $? -eq 0 ]]; then
NewJobID=$(echo ${NewJobID} | grep -Eo "[0-9]+$")
AllJobIDs=${AllJobIDs}${NewJobID}" "
echo "User directories in /tmp of ${partition} ${node} have been deleted."
else
echo "User directories in /tmp of ${partition} ${node} have NOT been deleted. Please try again later."
fi
echo
done < <(for i in "${AvailNodes[@]}"; do echo $i; done)
# Remove user /tmp directories in current node.
find /tmp -maxdepth 1 -mmin +15 -user "$USER" -exec rm -fr {} +
echo "User directories in /tmp of current node ${HOSTNAME} have been deleted."
echo
echo "Please wait"
echo
sleep 2
# Find all the jobs submitted by this script which do NOT currently have a runnning status and cancel those jobs.
JobsPattern=$(echo ${AllJobIDs// /|} | sed 's/|$//')
AllJobIDs=$(squeue -o'%.i %.u %.t' | awk -v job="${JobsPattern}" 'BEGIN {ORS=" "} $2=="${USER}" && $3!="R" && $1~job {print $1}')
scancel --quiet ${AllJobIDs} 2> /dev/null
exit 0
| true
|
a9367ff52294e8e18e8c65a7e3cfadaa2b0b5ade
|
Shell
|
Sonnbc/CS438
|
/mp2/plot-results.sh
|
UTF-8
| 571
| 3.40625
| 3
|
[] |
no_license
|
#!/bin/bash
format=$1
if [ "$format" == "" ];
then
echo "You did not specify format. Assuming eps by default."
echo "Recommended format is eps, pdf, or png. pdf or png requires cairo library."
echo "Format can also be anything that gnuplot can work with."
echo "For example, you can specify dumb, latex, or gif."
format="eps"
fi
if [ -f cwnd ];
then
./plot.sh cwnd $format 1 2 "0.2" "cwnd vs Time" "Time (ms)" "cwnd (bytes)" $2 $3
fi
if [ -f trace ];
then
./plot.sh trace $format 3 2 "0.2" "Sequence Number vs Time" "Time (ms)" "Sequence Number" $2 $3
fi
| true
|
821f26b2bf5b4a736be624e853db8274c9be1a03
|
Shell
|
rbong/vim-flog
|
/t/run.sh
|
UTF-8
| 647
| 3.9375
| 4
|
[] |
no_license
|
#!/bin/bash
set -e
TEST_DIR=$(realpath -- "$(dirname -- "$0")")
. "$TEST_DIR/lib_dir.sh"
. "$TEST_DIR/lib_print.sh"
. "$TEST_DIR/lib_vim.sh"
# Setup
cd "$BASE_DIR"
install_vim
# Get args
if [ "$1" != "" ]; then
TESTS="$TEST_DIR/$1"
else
TESTS=$(ls "$TEST_DIR"/t_*)
fi
# Run tests
FAILED_TESTS=0
for TEST in $TESTS; do
# Reset
cd "$BASE_DIR"
remove_tmp_dirs
# Run the test
print_title "${TEST}"
set +e
"${TEST}"
RESULT=$?
set -e
# Process result
if [ $RESULT -eq 0 ]; then
print_success
else
print_fail
FAILED_TESTS=$(expr "$FAILED_TESTS" + 1)
fi
done
if [ $FAILED_TESTS -gt 0 ]; then
exit 1
fi
| true
|
ced6172ad7e3df58e043e599c23e795b428156b3
|
Shell
|
GT9305/RetroPie-Setup
|
/scriptmodules/emulators/basilisk.sh
|
UTF-8
| 1,062
| 2.859375
| 3
|
[] |
no_license
|
rp_module_id="basilisk"
rp_module_desc="Macintosh emulator"
rp_module_menus="2+"
rp_module_flags="dispmanx"
function depends_basilisk() {
getDepends libsdl1.2-dev autoconf automake
}
function sources_basilisk() {
gitPullOrClone "$md_build" git://github.com/cebix/macemu.git
}
function build_basilisk() {
cd BasiliskII/src/Unix
./autogen.sh --prefix="$md_inst" --enable-sdl-video --enable-sdl-audio --disable-vosf --disable-jit-compiler --without-x --without-mon --without-esd --without-gtk
make clean
make
md_ret_require="$md_build/BasiliskII/src/Unix/BasiliskII"
}
function install_basilisk() {
cd "BasiliskII/src/Unix"
make install
}
function configure_basilisk() {
mkRomDir "macintosh"
touch "$romdir/macintosh/Start.txt"
mkdir -p "$configdir/macintosh"
chown $user:$user "$configdir/macintosh"
addSystem 1 "$md_id" "macintosh" "$md_inst/bin/BasiliskII --rom $romdir/macintosh/mac.rom --disk $romdir/macintosh/disk.img --config $configdir/macintosh/basiliskii.cfg" "Apple Macintosh" ".txt"
}
| true
|
234311c3ce9f75aba75185c5e124252003114926
|
Shell
|
nitinawathare/RenoirExperiment
|
/removeDelay.sh
|
UTF-8
| 176
| 2.671875
| 3
|
[] |
no_license
|
#!/bin/bash
SERVER_LIST=ipList
while read REMOTE_SERVER
do
ssh -n -i quorum2.key ubuntu@$REMOTE_SERVER "nohup sudo tc qdisc del dev eth0 root" &
done < $SERVER_LIST
| true
|
8d66d930dc852e6b52482d94015f73cc99832ba3
|
Shell
|
HD2-goes-future/android_patches
|
/data2ext4_initrd_patcher_init.htcleo.rc/tools/revert_initrd_patch/tools/checksys.sh
|
UTF-8
| 149
| 2.796875
| 3
|
[] |
no_license
|
#!/sbin/sh
find=`grep -i "clk" /proc/cmdline`
if [ -n "$find" ]
then
echo "clk=true" >> /tmp/nfo.prop
else
echo "clk=null" >> /tmp/nfo.prop
fi
| true
|
c25dbe96e82356f8db0fc76385dcb36a24b033e9
|
Shell
|
troydhanson/pmtr
|
/initscripts/sysvinit.template
|
UTF-8
| 590
| 3.84375
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
#
# Starts pmtr
#
umask 077
# Source function library.
. /etc/init.d/functions
if [ -d /var/run ]
then
PIDFILE=/var/run/pmtr.pid
else
PIDFILE=/tmp/pmtr.pid
fi
start() {
start-stop-daemon --start --pidfile $PIDFILE --exec __SYSBINDIR__/pmtr -- -p $PIDFILE
}
stop() {
start-stop-daemon --stop --pidfile $PIDFILE
if [ $? -eq 0 ]; then rm -f $PIDFILE; fi
}
restart() {
stop
start
}
case "$1" in
start)
start
;;
stop)
stop
;;
restart|reload)
restart
;;
status)
status pmtr
;;
*)
echo $"Usage: $0 {start|stop|restart|status}"
exit 1
esac
exit $?
| true
|
2bd3b6759d804c0267b02bb06890c12d5d549450
|
Shell
|
RyanTech/yueji
|
/tools/study/linux/script/do-links.sh
|
UTF-8
| 243
| 3.515625
| 4
|
[] |
no_license
|
#!/bin/sh
src=$1
dest=$2
#for file in $src/*; do
for file in `find $src -name *.rpm -a ! -name *.src.rpm -print`; do
base=`basename $file;`
if test ! -f $dest/$base; then
echo "Linking $file";
ln $file $dest
else
echo "EXISTS: $file";
fi
done
| true
|
21fa6dc95e191f4b1c51cd80d239a456e1f06bdd
|
Shell
|
shellib/grab
|
/examples/hello-world.sh
|
UTF-8
| 441
| 3.84375
| 4
|
[] |
no_license
|
#!/bin/bash
# Let's load the a shell library from: github.com/shellib/cli
# That provides the following functions:
#
# - or
# - hasflag
# - readopt
source $(grab github.com/shellib/cli)
help=$(hasflag --help $*)
greeting=$(or $(readopt --greeting $*) "Hello World!")
if [ -n "$help" ]; then
echo "Usage: hello-world.sh --greeting <Your Greeting>"
echo ""
echo "Example:"
echo "hello-world.sh --greeting Aloha"
else
echo $greeting
fi
| true
|
10483dadb94be5fc855e1038b6f22830cac5b646
|
Shell
|
VC4Africa/automation
|
/js-syntax-checker.sh
|
UTF-8
| 2,051
| 3.9375
| 4
|
[] |
no_license
|
#!/bin/sh
# Check js syntax for changed files with latest commits only.
GIT_PREVIOUS_COMMIT=$1
GIT_COMMIT=$2
REPOSITORY_URL=$3
# always good to know where are we and who are we!
echo "Who am I?"
whoami
echo "Where am I?"
pwd
echo "Git username?"
git git config user.name
echo "Repository: $REPOSITORY_URL"
if [ $GIT_PREVIOUS_COMMIT = $GIT_COMMIT ] || [ $GIT_PREVIOUS_COMMIT = "" ]; then
# let's assume going back to 30 commits would be enough for covering even an exceptional huge PR case.
GIT_PREVIOUS_COMMIT=$(git rev-list -30 --skip=29 --max-count=1 HEAD)
fi
echo "Set ownership to deploy"
sudo chown deploy:www-data . -R
# Stripping https://github.com/
REPOSITORY_NAME=${REPOSITORY_URL:19}
echo "Repository name = $REPOSITORY_NAME"
git config --unset-all remote.origin.fetch
echo "Set origin = git@github.com:$REPOSITORY_NAME"
git remote set-url origin git@github.com:${REPOSITORY_NAME}
git config remote.origin.fetch '+refs/heads/*:refs/remotes/origin/*'
echo "track all"
git branch -r | grep -v '\->' | while read remote; do git branch --track "${remote#origin/}" "$remote"; done
echo "Git Fetch --all"
git fetch --all
echo "All changed files"
git diff --diff-filter=ACMR --name-only $GIT_PREVIOUS_COMMIT $GIT_COMMIT
# show different js files only
changedjs=$(git diff --diff-filter=ACMR --name-only $GIT_PREVIOUS_COMMIT $GIT_COMMIT | grep .js$ | grep -v wpml-translation-management/ | grep -v node_modules/ | grep -v vendor/ | grep -v gulpfile.babel.js)
# only run esvalidate where there are changes
if [[ -n $changedjs ]]; then
echo "Checking syntax of modified js files.."
echo "Using eslint -v"
eslint -v || sudo npm install -g eslint
git diff --diff-filter=ACMR --name-only $GIT_PREVIOUS_COMMIT $GIT_COMMIT | grep .js$ | grep -v wpml-translation-management/ | grep -v node_modules/ | grep -v vendor/ | grep -v gulpfile.babel.js | xargs -n1 echo eslint --no-eslintrc --env es6 --parser-options=ecmaVersion:8 | bash
else
echo "No JS modifications found, skipping syntax checks."
fi
echo "no js syntax errors detected" && exit 0
| true
|
226d5124cf471ef2e43d17d8834797878ac8844d
|
Shell
|
mcmhav/s
|
/bin/_csys/_csys_is_package_installed
|
UTF-8
| 699
| 3.5
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
read -ra ORIGINAL_ARGS <<<"$*"
PACKAGE_TO_INSTALL=""
while [ "$1" != "" ]; do
case "$1" in
-*) ;;
*)
if [ -z "$PACKAGE_TO_INSTALL" ]; then
PACKAGE_TO_INSTALL="$1"
fi
;;
esac
shift
done
if
{
[ "$CSYS_SHALLOW_SETUP" = "true" ] &&
! command -v "$PACKAGE_TO_INSTALL" >/dev/null &&
! ls "/usr/local/Cellar/$PACKAGE_TO_INSTALL" 1>/dev/null 2>&1 &&
! ls "/usr/local/Caskroom/$PACKAGE_TO_INSTALL" 1>/dev/null 2>&1 &&
! brew ls --version "${ORIGINAL_ARGS[@]}" >/dev/null
} ||
{
[ "$CSYS_SHALLOW_SETUP" = "false" ] &&
! brew ls --version "${ORIGINAL_ARGS[@]}" >/dev/null
}
then
exit 1
else
exit 0
fi
| true
|
0100660cab7a788eb25225b6edb1b2de5a9d744d
|
Shell
|
xiangmingxu/apple_replant
|
/ITS.sh
|
UTF-8
| 193
| 2.65625
| 3
|
[] |
no_license
|
#!/bin/bash
SCRIPT_DIR=$(readlink -f ${0%/*})
RFILE=$1
DIR=$2
REG1=$3
REG2=$4
FASTA=$5
ID=$6
LOWQUAL=$7
qsub $SCRIPT_DIR/submit_ITS.sh $RFILE $DIR $REG1 $REG2 $FASTA $ID $LOWQUAL $SCRIPT_DIR
| true
|
8346e2aa10af3598b7f226a894d8faab05148d4a
|
Shell
|
wjmn/dsb-bioinformatics-project
|
/src/liftOver_GSE25577_18to38.sh
|
UTF-8
| 439
| 2.78125
| 3
|
[] |
no_license
|
# LIFTOVER FOR hg18 BED FILES -----------------------------------------------------------------------------
cd ../data/temp/GSE25577_concatenated
# PAY ATTENTION TO THE LINE DIRECTLY BELOW: 18 VS 19 VS 38
dir_chain=../../raw/liftOverchains/hg18ToHg38.over.chain
dir_save=../markers_hg38/
for filename in *.bed;
do
liftOver $filename $dir_chain $dir_save${filename/.bed/_liftedto38.bed} $dir_save${filename/.bed/_liftedto38.err}
done
| true
|
85d5a8b0fc8ddcc305e0780722aaad3f6b59e0f3
|
Shell
|
rgodishela/cloud-inquisitor
|
/packer/scripts/install.sh
|
UTF-8
| 5,775
| 3.5625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash -e
validate_environment() {
echo "Validating environment"
if [ -z "${APP_TEMP_BASE}" ]; then echo "Missing APP_TEMP_BASE environment variable" && exit -1; fi
if [ -z "${APP_DEBUG}" ]; then echo "Missing APP_DEBUG environment variable" && exit -1; fi
if [ -z "${APP_PYENV_PATH}" ]; then echo "Missing APP_PYENV_PATH environment variable" && exit -1; fi
if [ -z "${APP_FRONTEND_BASE_PATH}" ]; then echo "Missing APP_FRONTEND_BASE_PATH environment variable" && exit -1; fi
if [ -z "${APP_BACKEND_BASE_PATH}" ]; then echo "Missing APP_BACKEND_BASE_PATH environment variable" && exit -1; fi
if [ -z "${APP_DB_URI}" ]; then echo "Missing APP_DB_URI environment variable" && exit -1; fi
if [ -z "${APP_SSL_ENABLED}" ]; then echo "Missing APP_SSL_ENABLED environment variable" && exit -1; fi
if [ -z "${APP_USE_USER_DATA}" ]; then echo "Missing APP_USE_USER_DATA environment variable" && exit -1; fi
if [ -z "${APP_KMS_ACCOUNT_NAME}" ]; then echo "Missing APP_KMS_ACCOUNT_NAME environment variable" && exit -1; fi
if [ -z "${APP_USER_DATA_URL}" ]; then echo "Missing APP_USER_DATA_URL environment variable" && exit -1; fi
}
create_virtualenv() {
echo "Setting up python virtualenv"
if [ -d "${APP_PYENV_PATH}" ]; then
echo "VirtualEnv folder already exists, skipping"
else
virtualenv -p python3 "${APP_PYENV_PATH}"
fi
}
install_backend() {
python=${APP_PYENV_PATH}/bin/python3
pip=${APP_PYENV_PATH}/bin/pip3
echo "Installing backend"
mkdir -p ${APP_BACKEND_BASE_PATH}
cp -a ${APP_TEMP_BASE}/cinq-backend/* ${APP_BACKEND_BASE_PATH}
# setup.py to prepare for dynamic module reloading.
# TODO: Remove PBR_VERSION once we are building CInq as a package
(
cd ${APP_BACKEND_BASE_PATH}
PBR_VERSION=1.7.0 $python setup.py install
$pip install --upgrade -r ${APP_BACKEND_BASE_PATH}/requirements.txt
# Create log folders for the application and allow the backend user to write to them
mkdir -p logs
chown -R www-data:www-data logs
)
}
install_frontend() {
FRONTEND_TEMP_BASE=${APP_TEMP_BASE}/cinq-frontend
pushd ${FRONTEND_TEMP_BASE}
# Delete node_modules and dist if they exist to prevent
# bad versions being used
if [ -d node_modules ]; then rm -rf node_modules; fi
if [ -d dist ]; then rm -rf dist; fi
echo "Installing NPM modules"
npm i --quiet
echo "Building frontend application"
./node_modules/.bin/gulp build.prod
popd
mkdir -p ${APP_FRONTEND_BASE_PATH}
cp -a ${FRONTEND_TEMP_BASE}/dist/* ${APP_FRONTEND_BASE_PATH}
}
configure_application() {
echo "Configuring backend"
SECRET_KEY=$(openssl rand -hex 32)
sed -e "s|APP_DEBUG|${APP_DEBUG}|" \
-e "s|APP_DB_URI|${APP_DB_URI}|" \
-e "s|APP_SECRET_KEY|${SECRET_KEY}|" \
-e "s|APP_USE_USER_DATA|${APP_USE_USER_DATA}|" \
-e "s|APP_USER_DATA_URL|${APP_USER_DATA_URL}|" \
-e "s|APP_KMS_ACCOUNT_NAME|${APP_KMS_ACCOUNT_NAME}|" \
-e "s|APP_INSTANCE_ROLE_ARN|${APP_INSTANCE_ROLE_ARN}|" \
-e "s|APP_AWS_API_ACCESS_KEY|${APP_AWS_API_ACCESS_KEY}|" \
-e "s|APP_AWS_API_SECRET_KEY|${APP_AWS_API_SECRET_KEY}|" \
files/backend-settings.py > ${APP_BACKEND_BASE_PATH}/settings/production.py
}
install_certs() {
if [ -z "$APP_SSL_CERT_DATA" -o -z "$APP_SSL_KEY_DATA" ]; then
echo "Certificate or key data missing, installing self-signed cert"
generate_self_signed_certs
else
echo "Installing certificates"
CERTDATA=$(echo "$APP_SSL_CERT_DATA" | base64 -d)
KEYDATA=$(echo "$APP_SSL_KEY_DATA" | base64 -d)
echo "$CERTDATA" > $APP_BACKEND_BASE_PATH/settings/ssl/cinq-frontend.crt
echo "$KEYDATA" > $APP_BACKEND_BASE_PATH/settings/ssl/cinq-frontend.key
fi
}
function generate_jwt_key() {
echo "Generating JWT private key"
openssl genrsa -out ${APP_BACKEND_BASE_PATH}/settings/ssl/private.key 2048
}
generate_self_signed_certs() {
CERTINFO="/C=US/ST=CA/O=Your Company/localityName=Your City/commonName=localhost/organizationalUnitName=Operations/emailAddress=someone@example.com"
openssl req -x509 -subj "$CERTINFO" -days 3650 -newkey rsa:2048 -nodes \
-keyout ${APP_BACKEND_BASE_PATH}/settings/ssl/cinq-frontend.key \
-out ${APP_BACKEND_BASE_PATH}/settings/ssl/cinq-frontend.crt
}
configure_nginx() {
if [ "${APP_SSL_ENABLED}" = "True" ]; then
echo "Configuring nginx with ssl"
NGINX_CFG="nginx-ssl.conf"
else
echo "Configuring nginx without ssl"
NGINX_CFG="nginx-nossl.conf"
fi
sed -e "s|APP_FRONTEND_BASE_PATH|${APP_FRONTEND_BASE_PATH}|g" \
-e "s|APP_BACKEND_BASE_PATH|${APP_BACKEND_BASE_PATH}|g" \
files/${NGINX_CFG} > /etc/nginx/sites-available/cinq.conf
rm -f /etc/nginx/sites-enabled/default;
ln -sf /etc/nginx/sites-available/cinq.conf /etc/nginx/sites-enabled/cinq.conf
# redirect output to assign in-function stdout/err to global
service nginx restart 1>&1 2>&2
}
configure_supervisor() {
echo "Configuring supervisor"
sed -e "s|APP_BACKEND_BASE_PATH|${APP_BACKEND_BASE_PATH}|g" \
-e "s|APP_PYENV_PATH|${APP_PYENV_PATH}|g" \
files/supervisor.conf > /etc/supervisor/conf.d/cinq.conf
# If running on a systemd enabled system, ensure the service is enabled and running
if [ ! -z "$(which systemctl)" ]; then
systemctl enable supervisor.service
else
update-rc.d supervisor enable
fi
}
cd ${APP_TEMP_BASE}
validate_environment
create_virtualenv
install_frontend
install_backend
install_certs
generate_jwt_key
configure_application
configure_supervisor
configure_nginx
| true
|
a1959b982437d884b14eee9887d47ebd38b9f045
|
Shell
|
GoreLab/Vitamaize_NAM_GWAS_LabVersion
|
/2_GWAS.and.GWAS.Permutations/Core_Analysis/code/run_iterations.sh
|
UTF-8
| 10,688
| 4.34375
| 4
|
[] |
no_license
|
#!/bin/bash
#===================================
# SOURCE CONFIG FILE
#+++++++++++++++++++++++++++++++++++
#get full (canonical) path to the script location
script_f=$(readlink -enq ${0});
script_d=$(dirname ${script_f});
#source configuration file (required)
#Default: iterations.cfg in same directory as the script
cfg_f="${script_d}/iterations.cfg"
if cfg=$(readlink -enq ${cfg_f})
then
source ${cfg}
else
echo "ERROR: Configuration file ${cfg_f} was not found!" >&2
exit 1
fi
#===================================
# FUNCTION DEFINITIONS
#+++++++++++++++++++++++++++++++++++
#print instructions for script use when called
usage () {
echo "
Script to run several permutations of GWAS analysis using local copy of tassel.
For defaults and other runtime options see configuration file:
${cfg}
REQUIRED PARAMETERS:
-t <name>
Name of trait to be analyzed
-r <filename>
File containing the residuals for this analysis
OPTIONAL PARAMETERS:
-i <directory>
Directory containing the input file for this analysis
Default: ${in_d}
-o <directory>
Outputs will be saved here in a sub-directory based on trait name
Default: ${out_d}
-n <numeric>
Number of itterations to run
Default: ${total_iterations}
-c <numeric>
Total number of chromosomes to process
Default: ${nr_chr}
-s <numeric>
Starting chromosome
Default: ${start_chr}
-m <numeric>
Map resolution to use in centimorgans)
Default: ${res}
-x <directory>
Directory where the tassel libraries and .jar file are located
Default: ${tassel_d}
-p <number>
Number of concurrent tassel runs
Default: ${nr_proc}
-a <number>
Maximum ram (Gb) available to any child process
Default: ${max_ram}
-k use existing output directory and keep existing permutation files
-h Print this help and exit
-V Verbose mode -- output all parameters before running
";
}
#clean up any child processes on exit
function finish {
if pgrep -d'|' -P $$
then
#send SIGTERM to all child processes
echo -e "Cleaning up child processes:\nSending SIGTERM to child processes of $$\n"
pkill -15 -P $$
fi
#if any still exist wait 10 seconds for cleanup
if pgrep -d'|' -P $$
then
sleep 10
fi
#then send SIGKILL
if pgrep -d'|' -P $$
then
echo -e "Child processes still present, sending SIGKILL"
pkill -9 -P $$
fi
}
#set up EXIT trap for cleanup in case of premature exit
trap finish EXIT
#==================================
# PROCESS CONFIG AND OPTIONS
#++++++++++++++++++++++++++++++++++
#print usage if no options were given
if [ -z "$1" ]
then
usage
exit 1
fi
#parse command line options
#NOTE: these will overwrite any options specified in the config file
OPTIND='1'
options=':t:r:i:o:n:c:s:m:x:p:a:e:hVk'
while getopts $options option
do
case $option in
t ) trait=$OPTARG;;
r ) residuals_f=$OPTARG;;
i ) in_d=$OPTARG;;
o ) trait_out_d=$OPTARG;;
n ) total_iterations=$OPTARG;;
c ) nr_chr=$OPTARG;;
s ) start_chr=$OPTARG;;
m ) res=$OPTARG;;
x ) tassel_d=$OPTARG;;
p ) nr_proc=$OPTARG;;
a ) max_ram=$OPTARG;;
e) enterlimit=$OPTARG;;
k ) keep='TRUE';;
h ) usage; exit;;
V ) verbose='TRUE';;
\? ) echo "Unknown option: -$OPTARG" >&2; exit 1;;
: ) echo "Missing option argument for -$OPTARG" >&2; exit 1;;
* ) echo "Unimplemented option: -$OPTARG" >&2; exit 1;;
esac
done
if [ -z "${trait}" ]
then
echo "ERROR: Trait name must be specified with -t" >&2
exit 1
fi
if [ -z "${residuals_f}" ]
then
echo "ERROR: Residuals file must be specified with -r" >&2
exit 1
fi
if ! res_f_path=$(readlink -enq ${residuals_f})
then
echo "ERROR: Cannot find residuals file $residuals_f" >&2
exit 1
else
residuals_f=${res_f_path}
fi
# Set the max memory to use for child processes using soft caps in ulimit
#-------------------------------------------------------------------------
ram_request=$((${max_ram}*1048576)) #amount requested by user
local_max_ram=$[ $(free|awk '/^Mem:/{print $2}') - 512000 ] #amount of physical RAM on the system minus 500MB
# If the RAM requested by user exceeds (physical RAM - 500MB), lower it to that value
# We want to avoid splilling over into swap, as that will slow things down tremendously and
# likely lock up the system for a while.
if [ ${ram_request} -gt ${local_max_ram} ]
then ram_max=${local_max_ram}
else ram_max=${ram_request}
fi
# Check to see if our allocation exceeds the system hard limit on resources
v_ulimit_H=$(ulimit -H -v) #hard limit set by the system
if [ ${v_ulimit_H} != 'unlimited' ] && [ ${ram_max} -gt ${v_ulimit_H} ]
then ram_max=${v_ulimit_H}
fi
ulimit -S -v ${ram_max}
#set file templates for the chosen resolution
map_chr_t=$(echo -n ${map_t} | sed -re "s/\#\#res\#\#/${res}/g")
founders_chr_t=$(echo -n ${founders_t} | sed -re "s/\#\#res\#\#/${res}/g")
rils_chr_t=$(echo -n ${rils_t} | sed -re "s/\#\#res\#\#/${res}/g")
###########################################################
# We're done setting up runtime envirionment and variables
# Next we'll start jobs running tassel
# We assume tassel is not multi-core optimized, so we'll start
# one job for each processor
echo "
Starting permutation runs with the following parameters:
-t ${trait}
-r ${residuals_f}
-i ${in_d}
-o ${trait_out_d}
-n ${total_iterations}
-c ${nr_chr}
-s ${start_chr}
-m ${res}
-x ${tassel_d}
-p ${nr_proc}
-a ${max_ram}
-k ${keep}
-e ${enterlimit}
Other variables:
Java starting heap: ${Xms}g
Java max heap: ${Xmx}g
Tassel java libs: ${tassel_d}/${lib_d}
Tassel jar file: ${tassel_d}/${tassel_jar}
Map resolution: ${res}
File templates (see config file):
Map file template: ${map_t}
Founders file template: ${founders_t}
RILs file template: ${rils_t}
"
#==================================
# MAIN SCRIPT LOGIC
#++++++++++++++++++++++++++++++++++
#verify tassel location and set java libraries
if ! tassel_d_abs=$(readlink -enq ${tassel_d})
then
echo "ERROR: Could not find tassel directory ${tassel_d}" >&2
exit 1
fi
if ! tassel_lib_d=$(readlink -enq "${tassel_d}/${lib_d}")
then
echo "ERROR: Could not find tassel library directory ${tassel_d}/${lib_d}" >&2
exit 1
fi
if ! tassel_exe=$(readlink -enq "${tassel_d}/${tassel_jar}")
then
echo "ERROR: Could not find tassel jar file ${tassel_d}/${tassel_jar}" >&2
exit 1
fi
classpath="${tassel_lib_d}/*:${tassel_exe}"
#If output directory exists, make a new one by appending unix time to trait name
#unless -k (keep files) option was passed
if [ -e ${trait_out_d} ]
then
if [ ! "$keep" = 'TRUE' ]
then
trait_out_d=${trait_out_d}-$(date +%s)
if ! mkdir ${trait_out_d}
then
echo "ERROR: Could not make trait output directory: ${trait_out_d}" >&2
exit 1
fi
fi
else
if ! mkdir ${trait_out_d}
then
echo "ERROR: Could not make trait output directory: ${trait_out_d}" >&2
exit 1
fi
fi
echo "Saving output to ${trait_out_d}"
#Divide iterations among the number of concurent runs
#If division has remainder, the last run assigned will have less than ${step} itterations
iter_step=$(perl -e 'use POSIX; print ceil($ARGV[0]/$ARGV[1]), qq{\n}' ${total_iterations} ${nr_proc})
#Do permutations one chromosome at a time
chr=${start_chr}
end_chr=$[${start_chr} + ${nr_chr} - 1] #1-based chromosome numbering
while [ "${chr}" -le "${end_chr}" ]
do
#Set up chromosome files
map_chr=$(echo -n ${map_chr_t} | sed -re "s/\#\#chr\#\#/${chr}/g")
founders_chr=$(echo -n ${founders_chr_t} | sed -re "s/\#\#chr\#\#/${chr}/g")
rils_chr=$(echo -n ${rils_chr_t} | sed -re "s/\#\#chr\#\#/${chr}/g")
if ! map=$(readlink -enq ${in_d}/${map_chr})
then
echo "ERROR: Could not find map file ${in_d}/${map_chr} for chr ${chr}; Trying next chr." >&2
continue
fi
if ! founders=$(readlink -enq ${in_d}/${founders_chr})
then
echo "ERROR: Could not find founders file ${in_d}/${founders_chr} for chr ${chr}; Trying next chr." >&2
continue
fi
if ! rils=$(readlink -enq ${in_d}/${rils_chr})
then
echo "ERROR: Could not find rils file ${in_d}/${rils_chr} for chr ${chr}; Trying next chr." >&2
continue
fi
iter_start='0'
while [ "${iter_start}" -lt "${total_iterations}" ]
do
step=${iter_step}
iter_end=$[${iter_start} + ${iter_step}]
if [ "${iter_end}" -gt "${total_iterations}" ]
then
iter_end=${total_iterations}
step=$[${iter_end} - ${iter_start}]
fi
#set output model file
if ! model_out_f=$(readlink -fnq "${trait_out_d}/${trait}_model_chr${chr}_iter${iter_start}-${iter_end}.txt")
then
echo "ERROR: could not set model output file ${model_out_f}: cannot resolve path" >&2
exit 1
fi
#set output steps file
if ! steps_out_f=$(readlink -fnq "${trait_out_d}/${trait}_steps_chr${chr}_iter${iter_start}-${iter_end}.txt")
then
echo "ERROR: could not set steps output file ${steps_out_f}: cannot resolve path" >&2
exit 1
fi
#set output log file for each java run
if ! log_out_f=$(readlink -fnq "${trait_out_d}/${trait}_chr${chr}_iter${iter_start}-${iter_end}-log.txt")
then
echo "ERROR: could not set log output file ${log_out_f}: cannot resolve path" >&2
exit 1
fi
#=====================================
#EXECUTION CODE FROM ORIGINAL SCRIPTS
#-------------------------------------
#echo "perl ${indir}/tassel4-updated/run_pipeline.pl -Xms500m -Xmx800m -fork1 -NamGwasPlugin -map ${map} -trait ${residuals} -rils ${rils} -founders ${founders} -model ${outdir}/${trait}/${trait}_model_chr${chr}_iter${iter}.txt -steps ${outdir}/${trait}/${trait}_steps_chr${chr}_iter${iter}.txt -enterlimit $enterlimit -iterations $step -start $iter -fullmodel -endPlugin -runfork1 &" >> $script
#print "java -classpath '$CP' $java_mem_min $java_mem_max net.maizegenetics.pipeline.TasselPipeline @args\n";
#====================================
java -classpath ${classpath} -Xms${Xms}g -Xmx${Xmx}g net.maizegenetics.pipeline.TasselPipeline -fork1 -NamGwasPlugin -map ${map} -trait ${residuals_f} -rils ${rils} -founders ${founders} -model ${model_out_f} -steps ${steps_out_f} -enterlimit $enterlimit -iterations ${step} -start ${iter_start} -fullmodel -endPlugin -runfork1 &>${log_out_f} &
(( iter_start += ${step} ))
(( iter_start++ ))
done
# wait for all spawned jobs to finish before going to the next chromosome
wait
((chr++))
done
#run cleanup after everything is done
trap - EXIT
finish
exit 0
| true
|
7d1853498f0ec084fb6fb4423baf48d7fa14e7aa
|
Shell
|
ohsejoon/new-ara-api
|
/.docker/codebuild.sh
|
UTF-8
| 1,014
| 3.40625
| 3
|
[] |
no_license
|
#!/bin/bash
# CODEBUILD_WEBHOOK_TRIGGER: branch/<branch_name>, tag/<tag_name>, pr/<pr_number>
if [ ! -z $CODEBUILD_WEBHOOK_TRIGGER ]; then
WEBHOOK_TYPE=$(echo $CODEBUILD_WEBHOOK_TRIGGER | cut -d '/' -f1)
NAME=$(echo $CODEBUILD_WEBHOOK_TRIGGER | cut -d '/' -f2-)
if [ $WEBHOOK_TYPE = "branch" ]; then
export PUSH=true
if [ $NAME = "master" ]; then
export DOCKER_TAG=prod
export CACHE_DOCKER_TAG=prod
else
# Docker tag에 /가 들어갈 수 없어서 -로 변경
export DOCKER_TAG=$(echo $NAME | sed -e "s/\//-/g")
export CACHE_DOCKER_TAG=dev
fi
elif [ $WEBHOOK_TYPE = "tag" ]; then
export PUSH=true
export DOCKER_TAG=$NAME
export CACHE_DOCKER_TAG=prod
else # pr
export PUSH=false
export CACHE_DOCKER_TAG=dev
fi
else # 직접 codebuild 실행
export PUSH=false
export CACHE_DOCKER_TAG=dev
fi
echo $WEBHOOK_TYPE $CACHE_DOCKER_TAG $DOCKER_TAG $PUSH
| true
|
461497e74ebfc92bbb2e8328d297c0cf1eaea676
|
Shell
|
Scalingo/pgbouncer-buildpack
|
/support/pgbouncer-build
|
UTF-8
| 617
| 3.328125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
PGBOUNCER_VERSION=${PGBOUNCER_VERSION-1.7.2}
pgbouncer_tarball_url=https://pgbouncer.github.io/downloads/files/${PGBOUNCER_VERSION}/pgbouncer-${PGBOUNCER_VERSION}.tar.gz
temp_dir=$(mktemp -d /tmp/pgbouncer.XXXXXXXXXX)
cd $temp_dir
echo "Temp dir: $temp_dir"
echo "Downloading $pgbouncer_tarball_url"
curl -L $pgbouncer_tarball_url | tar xzv
(
cd pgbouncer-${PGBOUNCER_VERSION}
git apply /buildpack/support/0001-Disable-SIGTERM.patch
./configure \
--prefix=/tmp/pgbouncer
make -j 2 install
)
tar -zcvf /buildpack/pgbouncer-${PGBOUNCER_VERSION}-heroku.tgz -C /tmp/pgbouncer .
| true
|
79fa4c73d78c4626d1cdd78082d7861bebd2aada
|
Shell
|
KIT-CMS/sm-htt-analysis
|
/gof/plot_gof.sh
|
UTF-8
| 214
| 2.609375
| 3
|
[] |
no_license
|
#!/bin/bash
source utils/setup_cvmfs_sft.sh
source utils/setup_python.sh
ERA=$1
OUTPUTS=$2
VARIABLES=gof/variables.yaml
for CHANNEL in et mt tt em
do
./gof/plot_gof.py $VARIABLES $OUTPUTS $CHANNEL $ERA
done
| true
|
982eeef4903f7a39abf02be576d53c5cd9d2df85
|
Shell
|
eloyvallinaes/protein-foreplay
|
/scripts/MDsim/pro-cleanup.sh
|
UTF-8
| 261
| 2.859375
| 3
|
[] |
no_license
|
#!/bin/bash
path=$( pwd )
e=0
for folder in $( ls -d */ )
do
file=${folder%/}
ful="$folder$file".pdb
rm "$folder"pro.trr
rm "$folder"pro.mdp
rm "$folder"pro.gro
rm "$folder"pro*.cpt
rm "$folder"pro.edr
rm "$folder"#pro*
rm "$folder"pro-run.log
done
| true
|
972d2a7ff49d815385adc05e68e3c12aa8343bf3
|
Shell
|
cdrw-nutyx/NuTyX-Pkgfile
|
/Network/Connman/connman-gtk/Pkgfile
|
UTF-8
| 511
| 2.96875
| 3
|
[] |
no_license
|
#!/bin/bash
# Depends on: glib gtk3 openconnect
description="GTK GUI for ConnMan"
packager="cdrw"
url="https://github.com/jgke/connman-gtk"
name=connman-gtk
version=1.1.1
release=1
source=(${url}/releases/download/v${version}/$name-$version.tar.bz2)
build() {
cd $name-$version
./configure \
--bindir=/usr/bin \
--datarootdir=/usr/share \
--mandir=/usr/share/man \
--with-openconnect=dynamic
make
make DESTDIR=$PKG install
}
| true
|
4bd606c4bd715d8ede9a994afc4876e307043ec2
|
Shell
|
GeoKnow/GeoBenchLab
|
/FacetBench/sql/geotput_sql_postgis.sh
|
UTF-8
| 2,146
| 2.765625
| 3
|
[
"BSD-2-Clause"
] |
permissive
|
#!/bin/bash
# LOD2 Geo Benchmark - compute throughput (multi-stream) scores
#
# Copyright (c) 2012, CWI (www.cwi.nl). Author: Peter Boncz
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# price of the system
PRICE=$1
# perf.out file with milliseconds per query in a stream in position four
(for stream in $2 $3 $4 $5 $6 $7 $8 $9 ${10} ${11} ${12} ${13} ${14} ${15} ${16} ${17}; do ./geopower_sql_postgis.sh $PRICE $stream; done) | awk "{ \
for(i=1;i<=6; i++) { \
k[i] += \$i; \
}\
for(i=8;i<=14; i++) { \
k[i-2] += \$i; \
} \
} END {\
for(i=p=1;i<=6; i++) { \
p*=k[i]; printf k[i] \" \" \
} \
printf \"%s (%s) \", p^0.1666, (p^0.1666)*1000/$PRICE; \
for(i=q=1;i<=6; i++) { \
q*=k[6+i]; printf k[6+i] \" \" \
} \
printf \"%s (%s) %s (%s)\n\", q^0.1666, (q^0.1666)*1000/$PRICE, (p*q)^0.0833, ((p*q)^0.0833)*1000/$PRICE; \
}"
| true
|
7e8b9e54693647aa73596d21f71d247bd02646ed
|
Shell
|
carlosmoran092/generator-sls
|
/generators/app/templates/scripts/vsdebug.sh
|
UTF-8
| 544
| 2.59375
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
path="${1}/.vscode"
if [[ ! -d ${path} ]]; then
mkdir ${path}
fi
cd ${path}
echo "{
\"version\": \"0.2.0\",
\"configurations\": [
{
\"name\": \"Connect to Lambda container\",
\"type\": \"go\",
\"request\": \"launch\",
\"mode\": \"remote\",
\"remotePath\": \"\",
\"port\": 8997,
\"host\": \"127.0.0.1\",
\"program\": \"${workspaceRoot}\",
\"apiVersion\": 1,
\"env\": {},
\"args\": [],
},
]
}" > launch.json
| true
|
0275a08bb7f57dc6084f64541cbd29737a516757
|
Shell
|
drmatthewclark/ReaxysLoader
|
/fullupdate.sh
|
UTF-8
| 548
| 3.46875
| 3
|
[] |
no_license
|
#!/bin/bash
# download latest rmc dataset
loader="ReaxysLoader"
echo "reaxys "
source update.sh
source ./${loader}/credentials.py
update ${download} ${dataset}
if [ "${success}" = "no" ]; then
echo "loading ended"
exit 1
fi
cd ${release}
pwd
eval "$(conda shell.bash hook)"
conda activate standard
time python ../${loader}/load_all.py
../../fix-perms
cd ..
del() {
shift
for d in $*; do
if [ ! "${d}" == "${release}" ]; then
echo 'removing dataset' $d
rm -r "${d}"
fi
done
}
dirs=`ls -dc 2[0-9]*`
del ${dirs}
| true
|
c86de2f8a9f3dfdcdac9be7820cb5e4c809570a8
|
Shell
|
Mic92/xfstests-cntr
|
/tests/xfs/119
|
UTF-8
| 1,102
| 3.140625
| 3
|
[] |
no_license
|
#! /usr/bin/env bash
# SPDX-License-Identifier: GPL-2.0
# Copyright (c) 2000-2005 Silicon Graphics, Inc. All Rights Reserved.
#
# FS QA Test No. 119
#
# Leaking reservation space in the GRH
# Test out pv#942130
# This can hang when things aren't working
#
seq=`basename $0`
seqres=$RESULT_DIR/$seq
echo "QA output created by $seq"
here=`pwd`
tmp=/tmp/$$
status=1 # failure is the default!
trap "_cleanup; exit \$status" 0 1 2 3 15
_cleanup()
{
cd /
rm -f $tmp.*
}
# get standard environment, filters and checks
. ./common/rc
. ./common/filter
# real QA test starts here
# Modify as appropriate.
_supported_fs xfs
_require_scratch
# this may hang
sync
export MKFS_OPTIONS="-l version=2,su=64k"
logblks=$(_scratch_find_xfs_min_logblocks)
export MKFS_OPTIONS="-l version=2,size=${logblks}b,su=64k"
export MOUNT_OPTIONS="-o logbsize=64k"
_scratch_mkfs_xfs >/dev/null
_scratch_mount
max=99
i=0
echo "start freezing and unfreezing"
while [ $i -lt $max ]; do
xfs_freeze -f $SCRATCH_MNT
xfs_freeze -u $SCRATCH_MNT
echo -n .
let i=$i+1
done
echo "done"
# success, all done
status=0
exit
| true
|
fd21e764f989ecb69116ee6b9b6b5d572511d3b2
|
Shell
|
vagetablechicken/rdsn
|
/thirdparty/download-thirdparty.sh
|
UTF-8
| 5,755
| 3.78125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# check_and_download package_name url md5sum extracted_folder_name
# return:
# 0 if download and extract ok
# 1 if already downloaded
# 2 if download or extract fail
function check_and_download()
{
package_name=$1
url=$2
correct_md5sum=$3
extracted_folder_name=$4
if [ -f $package_name ]; then
echo "$package_name has already downloaded, skip it"
if [ -d $extracted_folder_name ]; then
echo "$package_name has been extracted"
return 1
else
echo "extract package $package_name"
extract_package $package_name
local ret_code=$?
if [ $ret_code -ne 0 ]; then
return 2
else
return 0
fi
fi
else
echo "download package $package_name"
curl -L $url > $package_name
local ret_code=$?
if [ $ret_code -ne 0 ]; then
rm -f $1
echo "package $package_name download failed"
return 2
fi
md5=`md5sum $1 | cut -d ' ' -f1`
if [ "$md5"x != "$correct_md5sum"x ]; then
rm -f $1
echo "package $package_name is broken, already deleted"
return 2
fi
extract_package $package_name
local ret_code=$?
if [ $ret_code -ne 0 ]; then
return 2
else
return 0
fi
fi
}
function extract_package()
{
package_name=$1
is_tar_gz=$(echo $package_name | grep ".tar.gz")
if [[ $is_tar_gz != "" ]]; then
tar xf $package_name
fi
is_zip=$(echo $package_name | grep ".zip")
if [[ $is_zip != "" ]]; then
unzip -oq $package_name
fi
local ret_code=$?
if [ $ret_code -ne 0 ]; then
rm -f $package_name
echo "extract $package_name failed, please delete the incomplete folder"
return 2
else
return 0
fi
}
function exit_if_fail()
{
if [ $1 -eq 2 ]; then
exit $1
fi
}
TP_DIR=$( cd $( dirname $0 ) && pwd )
TP_SRC=$TP_DIR/src
TP_BUILD=$TP_DIR/build
TP_OUTPUT=$TP_DIR/output
if [ ! -d $TP_SRC ]; then
mkdir $TP_SRC
fi
if [ ! -d $TP_BUILD ]; then
mkdir $TP_BUILD
fi
if [ ! -d $TP_OUTPUT ]; then
mkdir $TP_OUTPUT
fi
cd $TP_SRC
# concurrent queue
check_and_download "concurrentqueue-v1.0.0-beta.tar.gz"\
"https://codeload.github.com/cameron314/concurrentqueue/tar.gz/v1.0.0-beta"\
"761446e2392942aa342f437697ddb72e"\
"concurrentqueue-1.0.0-beta"
exit_if_fail $?
# googletest
check_and_download "googletest-1.8.0.tar.gz"\
"https://codeload.github.com/google/googletest/tar.gz/release-1.8.0"\
"16877098823401d1bf2ed7891d7dce36"\
"googletest-release-1.8.0"
exit_if_fail $?
# gperftools
check_and_download "gperftools-2.7.tar.gz"\
"https://github.com/gperftools/gperftools/releases/download/gperftools-2.7/gperftools-2.7.tar.gz"\
"c6a852a817e9160c79bdb2d3101b4601"\
"gperftools-2.7"
exit_if_fail $?
#rapidjson
check_and_download "rapidjson-v1.1.0.tar.gz"\
"https://codeload.github.com/Tencent/rapidjson/tar.gz/v1.1.0"\
"badd12c511e081fec6c89c43a7027bce"\
"rapidjson-1.1.0"
exit_if_fail $?
# thrift 0.9.3
check_and_download "thrift-0.9.3.tar.gz"\
"http://archive.apache.org/dist/thrift/0.9.3/thrift-0.9.3.tar.gz"\
"88d667a8ae870d5adeca8cb7d6795442"\
"thrift-0.9.3"
ret_code=$?
if [ $ret_code -eq 2 ]; then
exit 2
elif [ $ret_code -eq 0 ]; then
echo "make patch to thrift"
cd thrift-0.9.3
patch -p1 < ../../fix_thrift_for_cpp11.patch
if [ $? != 0 ]; then
echo "ERROR: patch fix_thrift_for_cpp11.patch for thrift failed"
exit 2
fi
cd ..
fi
# use zookeeper c client
check_and_download "zookeeper-3.4.10.tar.gz"\
"http://ftp.jaist.ac.jp/pub/apache/zookeeper/zookeeper-3.4.10/zookeeper-3.4.10.tar.gz"\
"e4cf1b1593ca870bf1c7a75188f09678"\
"zookeeper-3.4.10"
exit_if_fail $?
# libevent for send http request
check_and_download "libevent-2.1.8.tar.gz"\
"https://github.com/libevent/libevent/archive/release-2.1.8-stable.tar.gz"\
"80f8652e4b08d2ec86a5f5eb46b74510"\
"libevent-release-2.1.8-stable"
exit_if_fail $?
# poco 1.7.8
check_and_download "poco-1.7.8.tar.gz"\
"https://codeload.github.com/pocoproject/poco/tar.gz/poco-1.7.8-release"\
"4dbf02e14b9f20940ca0e8c70d8f6036"\
"poco-poco-1.7.8-release"
exit_if_fail $?
# fds
if [ ! -d $TP_SRC/fds ]; then
git clone https://github.com/XiaoMi/galaxy-fds-sdk-cpp.git
if [ $? != 0 ]; then
echo "ERROR: download fds wrong"
exit 2
fi
echo "mv galaxy-fds-sdk-cpp fds"
mv galaxy-fds-sdk-cpp fds
else
echo "fds has already downloaded, skip it"
fi
# fmtlib
check_and_download "fmt-4.0.0.tar.gz"\
"https://codeload.github.com/fmtlib/fmt/tar.gz/4.0.0"\
"c9be9a37bc85493d1116b0af59a25eba"\
"fmt-4.0.0"
exit_if_fail $?
# gflags
check_and_download "gflags-2.2.1.zip"\
"https://github.com/gflags/gflags/archive/v2.2.1.zip"\
"2d988ef0b50939fb50ada965dafce96b"\
"gflags-2.2.1"
exit_if_fail $?
# s2geometry
check_and_download "s2geometry-0239455c1e260d6d2c843649385b4fb9f5b28dba.zip"\
"https://github.com/google/s2geometry/archive/0239455c1e260d6d2c843649385b4fb9f5b28dba.zip"\
"bfa5f1c08f535a72fb2c92ec16332c64"\
"s2geometry-0239455c1e260d6d2c843649385b4fb9f5b28dba"
ret_code=$?
if [ $ret_code -eq 2 ]; then
exit 2
elif [ $ret_code -eq 0 ]; then
echo "make patch to s2geometry"
cd s2geometry-0239455c1e260d6d2c843649385b4fb9f5b28dba
patch -p1 < ../../fix_s2_for_pegasus.patch
if [ $? != 0 ]; then
echo "ERROR: patch fix_s2_for_pegasus.patch for s2geometry failed"
exit 2
fi
cd ..
fi
cd $TP_DIR
| true
|
93f2bc970c2cb35b844bdc04061838e597645339
|
Shell
|
kflu/linux-settings-kfl
|
/rr/shell/rsync.sh
|
UTF-8
| 1,328
| 4
| 4
|
[] |
no_license
|
#!/bin/sh
# syncs local directory to remote.
USAGE="$(cat <<EOF
$(basename "$0") -r <remote> -d <sync_dir> [-p <port>] [-l <bw_limit (kBps)>] [-c <rc_file>]
remote: user@host:~user/path
sync_dir: Do NOT add tailing slash '/' (means the content of the dir), or I will remove it.
port: default to 22
bw_limit (kBps): number of kB/s. default to 5MB/s
rc_file: shell script to source for settings
$( <"$0" grep -E '^ *: ' )
EOF
)"
while getopts 'hr:d:p:l:c:' opt; do case "$opt" in
r) remote="$OPTARG" ;;
p) port="$OPTARG" ;;
d) sync_dir="$OPTARG" ;;
l) bw_limit="$OPTARG" ;;
c) rc="$OPTARG" ;;
h|*) echo "$USAGE" >&2; exit 1 ;;
esac done
shift $((OPTIND-1))
: ${rc:=$(pwd)/.rsync.sh.rc}
if [ -e "$rc" ]; then
2>&1 echo "Sourcing $rc"
. "$rc"
fi
: ${sync_dir:?'directory to backup'}
: ${remote:?'user@host:~user/path'}
: ${port:=22}
: ${bw_limit:=$((5*1024))}
# trailing slash in rsync means 'the content of', so remove it.
sync_dir="${sync_dir%/}"
(
set -x
done_root="$(dirname "$sync_dir")/DONE"
done_dir="${done_root}/$(basename "$sync_dir")"
mkdir -p "$done_root"
rsync -avz --bwlimit="$bw_limit" -e "ssh -p ${port}" --progress "$sync_dir" "$remote" "$@" &&
mv "$sync_dir" "$done_dir" &&
ln -sf "$done_dir" "$sync_dir"
)
| true
|
d331b2d9d557e227bd2387d32bc071465498210e
|
Shell
|
petronny/aur3-mirror
|
/kernel26-pf-grsec/PKGBUILD
|
UTF-8
| 16,516
| 2.828125
| 3
|
[] |
no_license
|
# Maintainer: Slane <kozak dot szymon at gmail dot com>
# PKGBUILD assembled from kernel26, kernel26-bfs, kernel26-ck, kernel26-pf
# Credit to respective maintainers and milka_witek
_basekernel=2.6.37
_pkgname=kernel26-pf-grsec
_pfrel=pf3
_kernelname=-pf
_pfpatchhome="http://pf.natalenko.name/sources/${_basekernel}/"
_pfpatchname="patch-${_basekernel}-${_pfrel}.bz2"
# fixed compatibility problem between patches (sysctl.c)
_grsecpatchhome="http://slane.no-ip.org/pub/kernel/"
_grsecpatchname="grsecurity-2.2.1-2.6.37-201101272240-pf3.patch"
pkgname=kernel26-pf-grsec
pkgver=${_basekernel}
pkgrel=${_pfrel}
arch=(i686 x86_64)
_pkgdesc="Linux kernel and modules with pf-kernel and grsecurity patchset [-ck patchset (BFS included), TuxOnIce, BFQ]."
pkgdesc=${_pkgdesc}
license=('GPL2')
groups=('base')
url=('http://grsecurity.net')
backup=(etc/mkinitcpio.d/${_pkgname}.preset)
depends=('coreutils' 'module-init-tools' 'linux-firmware' 'mkinitcpio>=0.5.20')
optdepends=('paxctl: Manages various PaX related program header flags for Elf32, Elf64, binaries'
'paxtest: PaX regression test suite'
'gradm: Administrative interface for the grsecurity Role Based Access Control system'
'checksec: Tool designed to test what standard Linux OS and PaX security features are being used.'
'pm-utils: utilities and scripts for suspend and hibernate power management'
'tuxonice-userui: TuxOnIce userspace user interface'
'hibernate-script: set of scripts for managing TuxOnIce, hibernation and suspend to RAM'
'modprobed_db: Keeps track of EVERY kernel module that has ever been probed. Useful for make localmodconfig.')
replaces=('kernel24' 'kernel24-scsi' 'kernel26-scsi'
'alsa-driver' 'ieee80211' 'hostap-driver26'
'pwc' 'nforce' 'ivtv' 'zd1211' 'kvm-modules'
'iwlwifi' 'rt2x00-cvs' 'gspcav1' 'atl2'
'wlan-ng26' 'rt2500')
#conflicts=(kernel26-pf-core2 kernel26-pf-k8 kernel26-pf-psc kernel26-pf-atom kernel26-pf-k7 kernel26-pf-p3 kernel26-pf-pm kernel26-pf-p4)
conflicts=()
provides=(${_pkgname}=${_basekernel}) # for $pkgname-optimized
# below 'provides' is for when you have no other kernel (which is a bad idea anyway)
# provides=(${_pkgname}=${_basekernel} 'kernel26-headers' 'kernel26=$pkgver' 'aufs2')
install='kernel26.install'
source=(ftp://ftp.kernel.org/pub/linux/kernel/v2.6/linux-${_basekernel}.tar.bz2
config config.x86_64 # the main kernel config files
kernel26.preset # standard config files for mkinitcpio ramdisk
logo_linux_clut224.ppm.bz2
logo_linux_mono.pbm.bz2 # the archlinux boot logos
logo_linux_vga16.ppm.bz2
streamline_config.pl
${_pfpatchhome}${_pfpatchname} # the -pf patchset
${_grsecpatchhome}${_grsecpatchname}) # the grsec patchset
build() {
cd ${srcdir}/linux-${_basekernel}
# This is for me, to test the PKGBUILD
if [[ $NOEXTRACT = "0" ]]; then
# Arch linux logo
msg "Replacing penguins with arches"
bzip2 -dk ${startdir}/logo_linux_*.bz2
mv -f ${startdir}/logo_linux_*.p?m drivers/video/logo/
# pf-kernel
msg "Applying pf-kernel patch" && sleep 3
bzip2 -dc ${srcdir}/${_pfpatchname} | patch -Np1
# grsecurity
msg "Applying grsecurity patchset" && sleep 3
patch -Np1 < ${srcdir}/${_grsecpatchname}
if [ "$CARCH" = "x86_64" ]; then
cat ${startdir}/config.x86_64 >| .config
else
cat ${startdir}/config >| .config
fi
cat ${startdir}/streamline_config.pl >| scripts/kconfig/streamline_config.pl
fi
sed -i "s/EXTRAVERSION = -${_pfrel}/EXTRAVERSION = ${_kernelname}/" Makefile
_arch=$CARCH
#----------------------------------------
if [[ "$_BATCH" != "y" ]]; then # for batch building
echo
echo "======================================================="
msg "You might be prompted below for some config options"
echo "======================================================="
echo
msg "Hit <ENTER> (or just <N>) to build an all-inclusive kernel like stock -ARCH"
echo "(URGENT: change ${startdir}/config! [CONFIG_GRKERNSEC_HIGH=y by default])"
echo
msg "Hit <Y> to use your running kernel's config"
echo " (needs IKCONFIG and IKCONFIG_PROC)"
msg "Hit <L> to run make localmodconfig"
echo
read answer
shopt -s nocasematch
if [[ "$answer" = "y" ]]; then
msg "running 'sudo modprobe configs'"
sudo modprobe configs
if [[ -s /proc/config.gz ]]; then
msg "Extracting config from /proc/config.gz..."
zcat /proc/config.gz >| ./.config
else
msg "You kernel was not compiled with IKCONFIG_PROC."
# Copied from kernel26-ck's PKGBUILD
msg "Attempting to run /usr/bin/reload_database from modprobe_db..."
if [ -e /usr/bin/reload_database ]; then
/usr/bin/reload_database
fi
msg "Running make localmodconfig instead..."
chmod +x ./scripts/kconfig/streamline_config.pl
./scripts/kconfig/streamline_config.pl >| config_strip
mv config_strip .config
fi
elif [[ "$answer" = "l" ]]; then
# Copied from kernel26-ck's PKGBUILD
msg "Attempting to run /usr/bin/reload_database from modprobe_db..."
if [ -e /usr/bin/reload_database ]; then
/usr/bin/reload_database
fi
msg "Running the functional equivalent of make localmodconfig now..."
chmod +x ./scripts/kconfig/streamline_config.pl
./scripts/kconfig/streamline_config.pl >| config_strip
mv config_strip .config
msg "An error about ksource in line 118 blah blah is NORMAL as is one about nvidia config too!"
else
msg "Using kernel .config (with BFS, BFQ, TuxOnIce and Grsecurity enabled)."
fi
# Make some good use of MAKEFLAGS
MAKEFLAGS=`grep -v '#' /etc/makepkg.conf | grep MAKEFLAGS= | sed s/MAKEFLAGS=// | sed s/\"//g`
msg "Make prepare"
make prepare
# Option for make menuconfig
echo
msg "Run make menuconfig before build? (y/N)"
read answer
if [[ "$answer" = "y" ]]; then
make menuconfig
cp -v .config ${startdir}/config.local
fi
CPU=`egrep "MK8=y|MCORE2=y|MPSC=y|MATOM=y|MPENTIUMII=y|MPENTIUMIII=y|MPENTIUMM=y|MPENTIUM4=y|MK7=y|CONFIG_GENERIC_CPU=y|M686=y" ./.config`
CPU=`sed -e "s/CONFIG_M\(.*\)=y/\1/" <<<$CPU`
CPU=`sed -e "s/CONFIG_GENERIC_CPU=y/GENERIC/" <<<$CPU`
CPU=`sed -e "s/^686$/GENERIC/" <<<$CPU`
cp -v .config ${startdir}/config.$CPU-$CARCH
fi # batch check ends here
#----------------------------------------
# Strip config of uneeded localversion
if [ "${_kernelname}" != "" ]; then
sed -i "s|CONFIG_LOCALVERSION=.*|CONFIG_LOCALVERSION=\"\"|g" ./.config
fi
# build!
msg "Make"
make bzImage modules
}
package() {
KARCH=x86
cd ${srcdir}/linux-${_basekernel}
_kernver=$(make kernelrelease)
# work around the AUR parser
# This allows building cpu-optimized packages with according package names.
# Useful for repo maintainers.
CPU=`egrep "MK8=y|MCORE2=y|MPSC=y|MATOM=y|MPENTIUMII=y|MPENTIUMIII=y|MPENTIUMM=y|MPENTIUM4=y|MK7=y|CONFIG_GENERIC_CPU=y|M686=y" ./.config`
CPU=`sed -e "s/CONFIG_M\(.*\)=y/\1/" <<<$CPU`
case $CPU in
CORE2)
pkgname="${_pkgname}-core2"
pkgdesc="${_pkgdesc} Intel Core2 optimized."
;;
K8)
pkgname="${_pkgname}-k8"
pkgdesc="${_pkgdesc} AMD K8 optimized."
;;
PSC)
pkgname="${_pkgname}-psc"
pkgdesc="${_pkgdesc} Intel Pentium4/D/Xeon optimized."
;;
ATOM)
pkgname="${_pkgname}-atom"
pkgdesc="${_pkgdesc} Intel Atom optimized."
;;
K7)
pkgname="${_pkgname}-k7"
pkgdesc="${_pkgdesc} AMD K7 optimized."
;;
PENTIUMII)
pkgname="${_pkgname}-p2"
pkgdesc="${_pkgdesc} Intel Pentium2 optimized."
;;
PENTIUMIII)
pkgname="${_pkgname}-p3"
pkgdesc="${_pkgdesc} Intel Pentium3 optimized."
;;
PENTIUMM)
pkgname="${_pkgname}-pm"
pkgdesc="${_pkgdesc} Intel Pentium-M optimized."
;;
PENTIUM4)
pkgname="${_pkgname}-p4"
pkgdesc="${_pkgdesc} Intel Pentium4 optimized."
;;
default)
# Note to me: DO NOT EVER REMOVE THIS. It's for the AUR PKGBUILD parser.
pkgname="${_pkgname}"
pkgdesc="Linux kernel and modules with pf-kernel and grsecurity patchset [-ck patchset (BFS included), TuxOnIce, BFQ]."
conflicts=(kernel26-pf-core2 kernel26-pf-k8 kernel26-pf-psc kernel26-pf-atom kernel26-pf-k7 kernel26-pf-p3 kernel26-pf-pm kernel26-pf-p4)
;;
esac
if [[ "$pkgname" != "$_pkgname" ]]; then
conflicts=('kernel26-pf')
fi
echo
echo "======================================="
msg "The package will be named ${pkgname}"
msg "${pkgdesc}"
echo "======================================="
echo
### package_kernel26
mkdir -p ${pkgdir}/{lib/modules,boot}
make INSTALL_MOD_PATH=${pkgdir} modules_install
cp System.map ${pkgdir}/boot/System.map26${_kernelname}-grsec
cp arch/$KARCH/boot/bzImage ${pkgdir}/boot/vmlinuz26${_kernelname}-grsec
# add vmlinux
install -m644 -D vmlinux ${pkgdir}/usr/src/linux-${_kernver}/vmlinux
# install fallback mkinitcpio.conf file and preset file for kernel
# make sure ${_pkgname} is used for the mkinitcpio process
install -m644 -D ${srcdir}/kernel26.preset ${pkgdir}/etc/mkinitcpio.d/${_pkgname}.preset
# set correct depmod command for install
#sed \
# -e "s/KERNEL_NAME=.*/KERNEL_NAME=${_kernelname}/g" \
# -e "s/KERNEL_VERSION=.*/KERNEL_VERSION=${_kernver}/g" \
# -i ${startdir}/kernel26.install
#sed \
# -e "s|source .*|source /etc/mkinitcpio.d/kernel26${_kernelname}.kver|g" \
# -e "s|default_image=.*|default_image=\"/boot/${_pkgname}.img\"|g" \
# -e "s|fallback_image=.*|fallback_image=\"/boot/${_pkgname}-fallback.img\"|g" \
# -i ${pkgdir}/etc/mkinitcpio.d/${_pkgname}.preset
echo -e "# DO NOT EDIT THIS FILE\nALL_kver='${_kernver}'" > ${pkgdir}/etc/mkinitcpio.d/${_pkgname}.kver
### package_kernel26-headers
mkdir -p ${pkgdir}/lib/modules/${_kernver}
cd ${pkgdir}/lib/modules/${_kernver}
ln -sf ../../../usr/src/linux-${_kernver} build
cd ${srcdir}/linux-$_basekernel
install -D -m644 Makefile \
${pkgdir}/usr/src/linux-${_kernver}/Makefile
install -D -m644 kernel/Makefile \
${pkgdir}/usr/src/linux-${_kernver}/kernel/Makefile
install -D -m644 .config \
${pkgdir}/usr/src/linux-${_kernver}/.config
mkdir -p ${pkgdir}/usr/src/linux-${_kernver}/include
for i in acpi asm-generic config generated linux math-emu media net pcmcia scsi sound trace video xen; do
cp -a include/$i ${pkgdir}/usr/src/linux-${_kernver}/include/
done
# copy arch includes for external modules
mkdir -p ${pkgdir}/usr/src/linux-${_kernver}/arch/x86
cp -a arch/x86/include ${pkgdir}/usr/src/linux-${_kernver}/arch/x86/
# copy files necessary for later builds, like nvidia and vmware
cp Module.symvers ${pkgdir}/usr/src/linux-${_kernver}
cp -a scripts ${pkgdir}/usr/src/linux-${_kernver}
# fix permissions on scripts dir
chmod og-w -R ${pkgdir}/usr/src/linux-${_kernver}/scripts
mkdir -p ${pkgdir}/usr/src/linux-${_kernver}/.tmp_versions
mkdir -p ${pkgdir}/usr/src/linux-${_kernver}/arch/$KARCH/kernel
cp arch/$KARCH/Makefile ${pkgdir}/usr/src/linux-${_kernver}/arch/$KARCH/
if [ "$CARCH" = "i686" ]; then
cp arch/$KARCH/Makefile_32.cpu ${pkgdir}/usr/src/linux-${_kernver}/arch/$KARCH/
fi
cp arch/$KARCH/kernel/asm-offsets.s ${pkgdir}/usr/src/linux-${_kernver}/arch/$KARCH/kernel/
# add headers for lirc package
mkdir -p ${pkgdir}/usr/src/linux-${_kernver}/drivers/media/video
cp drivers/media/video/*.h ${pkgdir}/usr/src/linux-${_kernver}/drivers/media/video/
for i in bt8xx cpia2 cx25840 cx88 em28xx et61x251 pwc saa7134 sn9c102 usbvideo; do
mkdir -p ${pkgdir}/usr/src/linux-${_kernver}/drivers/media/video/$i
cp -a drivers/media/video/$i/*.h ${pkgdir}/usr/src/linux-${_kernver}/drivers/media/video/$i
done
# add docbook makefile
install -D -m644 Documentation/DocBook/Makefile \
${pkgdir}/usr/src/linux-${_kernver}/Documentation/DocBook/Makefile
# add dm headers
mkdir -p ${pkgdir}/usr/src/linux-${_kernver}/drivers/md
cp drivers/md/*.h ${pkgdir}/usr/src/linux-${_kernver}/drivers/md
# add inotify.h
mkdir -p ${pkgdir}/usr/src/linux-${_kernver}/include/linux
cp include/linux/inotify.h ${pkgdir}/usr/src/linux-${_kernver}/include/linux/
# add wireless headers
mkdir -p ${pkgdir}/usr/src/linux-${_kernver}/net/mac80211/
cp net/mac80211/*.h ${pkgdir}/usr/src/linux-${_kernver}/net/mac80211/
# add dvb headers for external modules
# in reference to:
# http://bugs.archlinux.org/task/9912
mkdir -p ${pkgdir}/usr/src/linux-${_kernver}/drivers/media/dvb/dvb-core
cp drivers/media/dvb/dvb-core/*.h ${pkgdir}/usr/src/linux-${_kernver}/drivers/media/dvb/dvb-core/
# add dvb headers for external modules
# in reference to:
# http://bugs.archlinux.org/task/11194
if [[ -d include/config/dvb ]]; then
mkdir -p ${pkgdir}/usr/src/linux-${_kernver}/include/config/dvb/
cp include/config/dvb/*.h ${pkgdir}/usr/src/linux-${_kernver}/include/config/dvb/
fi
# add dvb headers for http://mcentral.de/hg/~mrec/em28xx-new
# in reference to:
# http://bugs.archlinux.org/task/13146
mkdir -p ${pkgdir}/usr/src/linux-${_kernver}/drivers/media/dvb/frontends/
cp drivers/media/dvb/frontends/lgdt330x.h ${pkgdir}/usr/src/linux-${_kernver}/drivers/media/dvb/frontends/
cp drivers/media/video/msp3400-driver.h ${pkgdir}/usr/src/linux-${_kernver}/drivers/media/dvb/frontends/
# add dvb headers
# in reference to:
# http://bugs.archlinux.org/task/20402
mkdir -p ${pkgdir}/usr/src/linux-${_kernver}/drivers/media/dvb/dvb-usb
cp drivers/media/dvb/dvb-usb/*.h ${pkgdir}/usr/src/linux-${_kernver}/drivers/media/dvb/dvb-usb/
mkdir -p ${pkgdir}/usr/src/linux-${_kernver}/drivers/media/dvb/frontends
cp drivers/media/dvb/frontends/*.h ${pkgdir}/usr/src/linux-${_kernver}/drivers/media/dvb/frontends/
mkdir -p ${pkgdir}/usr/src/linux-${_kernver}/drivers/media/common/tuners
cp drivers/media/common/tuners/*.h ${pkgdir}/usr/src/linux-${_kernver}/drivers/media/common/tuners/
# add xfs and shmem for aufs building
mkdir -p ${pkgdir}/usr/src/linux-${_kernver}/fs/xfs
mkdir -p ${pkgdir}/usr/src/linux-${_kernver}/mm
cp fs/xfs/xfs_sb.h ${pkgdir}/usr/src/linux-${_kernver}/fs/xfs/xfs_sb.h
# add headers vor virtualbox
# in reference to:
# http://bugs.archlinux.org/task/14568
cp -a include/drm $pkgdir/usr/src/linux-${_kernver}/include/
# add headers for broadcom wl
# in reference to:
# http://bugs.archlinux.org/task/14568
cp -a include/trace $pkgdir/usr/src/linux-${_kernver}/include/
# add headers for crypto modules
# in reference to:
# http://bugs.archlinux.org/task/22081
cp -a include/crypto $pkgdir/usr/src/linux-${_kernver}/include/
# copy in Kconfig files
for i in `find . -name "Kconfig*"`; do
mkdir -p ${pkgdir}/usr/src/linux-${_kernver}/`echo $i | sed 's|/Kconfig.*||'`
cp $i ${pkgdir}/usr/src/linux-${_kernver}/$i
done
chown -R root.root ${pkgdir}/usr/src/linux-${_kernver}
find ${pkgdir}/usr/src/linux-${_kernver} -type d -exec chmod 755 {} \;
# remove unneeded architectures
rm -rf ${pkgdir}/usr/src/linux-${_kernver}/arch/{alpha,arm,arm26,avr32,blackfin,cris,frv,h8300,ia64,m32r,m68k,m68knommu,mips,microblaze,mn10300,parisc,powerpc,ppc,s390,sh,sh64,sparc,sparc64,um,v850,xtensa}
# make correct build and source links
rm -f ${pkgdir}/lib/modules/${_kernver}/{source,build}
cd ${pkgdir}/lib/modules/${_kernver} && \
(rm -f source build; ln -sf ../../../usr/src/linux-${_kernver} build)
# remove the firmware
rm -rf ${pkgdir}/lib/firmware
}
# makepkg -g >>PKGBUILD
sha256sums=('edbf091805414739cf57a3bbfeba9e87f5e74f97e38f04d12060e9e0c71e383a'
'46fea7163eb586b6baae50a226e8c9b7b057b6c097a152d4c7b0ac376234f32f'
'43316c9c3478f802837de0a54161b1bd4059b40d408fb16e0036eb623ef63313'
'7250c85494a256297c015801090f604fcd4b70e422b6df6e3d062ff9936a3d69'
'03ed4eb4a35d42ae6beaaa5e6fdbada4244ed6c343944bba6462defaa6fed0bf'
'51ea665cfec42d9f9c7796af2b060b7edbdeb367e42811f8c02667ad729f6b19'
'9e1e81d80afac6f316e53947e1b081017090081cd30e6c4c473420b77af4b52b'
'47008d49add12a0e952065dbaa61285f654bf3c3b53d1577baef128b7c0c404b'
'362331148ae78a66fcfe8f2cf2b55e4e5b589bbab3024c4c864769b3c816707c'
'a90434e9d096ff172de37a424933a3d65115a3edd90cdfc5c6b178d1bff743a8')
| true
|
92384e2215bf394febd1adad44cb808a79cc79dc
|
Shell
|
syyunn/fec-bulk-loader
|
/expenditures/load.sh
|
UTF-8
| 501
| 3.328125
| 3
|
[] |
no_license
|
#!/bin/bash
set -euo pipefail
CYCLES="${CYCLES:-20 18 16 14 12 10 08 06 04}" # 02 00 98 96 94 92 90
psql -f load.sql
for cycle in $CYCLES; do
if [ "$cycle" -eq 02 ] || [ "$cycle" -eq 00 ]
then
continue
fi
wget -nv -N "https://www.fec.gov/files/bulk-downloads/20"$cycle"/oppexp"$cycle".zip"
unzip "oppexp"$cycle".zip" "oppexp.txt"
psql -c "\COPY fec_bulk_expenditures FROM 'oppexp.txt' WITH CSV DELIMITER '|' QUOTE E'\b' ENCODING 'LATIN1';"
rm "oppexp.txt"
done
| true
|
51f4189a4014ec66064ae4d12c18936e797fe17d
|
Shell
|
SwiftXcode/swift-xcode
|
/scripts/swift-xcode
|
UTF-8
| 1,251
| 4.21875
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#
# Copyright 2017-2018 ZeeZide GmbH. All rights reserved.
#
# swift-xcode -- A collection of SwiftPM extension to deal with building
# Xcode projects.
#
if [ "$DEBUG" = "yes" ]; then
set -x
fi
SCRIPT_DIR=$(dirname "$0")
SCRIPT_BASENAME=$(basename "$0")
usage() {
echo "usage: swift xcode <subcommand>"
echo
echo "Available subcommands are:"
echo " build Build SPM package so that Xcode can consume it."
echo " image Build an image from an SPM Template package."
echo " env Print environment etc."
echo " link-templates Link Xcode templates in Homebrew to Xcode location."
echo
echo "Try 'swift xcode <subcommand> help' for details."
}
main() {
if [ $# -lt 1 ]; then
usage
exit 1
fi
# sanity checks
SUBCOMMAND="$1"; shift
if test -x "${SCRIPT_DIR}/${SCRIPT_BASENAME}-$SUBCOMMAND"; then
RUNCMD="${SCRIPT_DIR}/${SCRIPT_BASENAME}-$SUBCOMMAND"
else
THECMD="`ls ${SCRIPT_DIR}/${SCRIPT_BASENAME}-${SUBCOMMAND}* | head -n 1`"
if test -x "${THECMD}"; then
RUNCMD="${THECMD}"
else
echo "Unknown subcommand: '$SUBCOMMAND'"
echo
usage
exit 1
fi
fi
# run command
. "${RUNCMD}"
}
main "$@"
| true
|
1063dca31f7b683a500e55534ec73cb966b19b3e
|
Shell
|
rtavenar/elects
|
/experiments/mori_runtime_uniform.sh
|
UTF-8
| 729
| 2.5625
| 3
|
[] |
no_license
|
#!/bin/#!/usr/bin/env bash
epochs=200
logstore="/data/ec_runtimes_uniform"
for nsamples in 50 75 100 250 500 750 1000 2500 5000;
do
echo samples $nsamples > $logstore/UniformCrops_$nsamples.log
echo $(date -Iseconds -u) : started optimization with $nsamples examples >> $logstore/UniformCrops_$nsamples.log
python ../src/train.py -d UniformCrops_$nsamples -m DualOutputRNN --epsilon 10 --test_every_n_epochs $epochs --loss_mode early_reward -x "" -b 1024 --warmup-steps 100 --dropout 0.5 -w 16 -i 1 -a .25 --store $logstore --no-visdom --overwrite --train_on train --test_on eval -r 64 -n 4 -e $epochs -s -1
echo $(date -Iseconds -u) : ended optimization with $nsamples examples >> $logstore/UniformCrops_$nsamples.log
done
| true
|
27dca5b1c1416572e59bfa04c7b8dacb84b67fa4
|
Shell
|
grounds/grounds-exec-deprecated
|
/hack/test-unit.sh
|
UTF-8
| 312
| 3.5
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
set -e
if [ ! "$GOPATH" ]; then
echo >&2 'error: missing GOPATH; please see http://golang.org/doc/code.html#GOPATH'
exit 1
fi
get_pkg_dirs() {
echo $(find pkg -maxdepth 1 -type d | grep pkg/)
}
# For every pkg
for dir in $(get_pkg_dirs); do
echo "Testing: $dir"
gom test -cover "./$dir"
done
| true
|
1035e2de5b8ce9fb40120b481051c8b633db5a9c
|
Shell
|
omakoto/misc
|
/wat
|
UTF-8
| 858
| 3.421875
| 3
|
[] |
no_license
|
#!/bin/bash -e
set -e
. mutil.sh
geo=200x80
zoom=0.85
title=""
interval=2
command=""
watch=gaze
opt_x=""
eval "$(getopt.pl '
g|geo|geometry=s geo=% # Set window size. e.g. "80x25"
z|zoom=s zoom=% # Set zoom scale. e.g. "0.5"
t|title=s title=% # Set window title.
n|i|interval=i interval=% # Set refresh interval in seconds.
c|command=s command=% # Run with bash -c
w|use-watch watch=watch # Use watch instead of gaze.
x|use-exec opt_x=-x # Use exec() instead of system()
' "$@")"
cmd=()
cmd+=($watch "--color" "-n" "$interval" $opt_x)
cmd+=("$@")
if [[ "$command" != "" ]] ; then
cmd+=(bash -c "$command")
fi
title="${title:-$@}"
unset COLORTERM
set TERM=vt100
if isx ; then
start-terminal -t "$title" "${cmd[@]}"
else
ee "${cmd[@]}"
fi
| true
|
13bb3be4ab93b9a24a0f25da4709509db72c6f77
|
Shell
|
jgfaisca/arduino-programming
|
/ArduinoISP_verify_connection.sh
|
UTF-8
| 2,543
| 4.15625
| 4
|
[] |
no_license
|
#!/bin/bash
###############################################################################
#
# Script to verify connection to AVR device
# using Arduino UNO as a programmer (Arduino ISP)
#
# Requires:
# - Arduino UNO
# - AVRDUDE / Arduino IDE
# - GNU Linux operating system
# - Linux STTY
#
# The following variables affect the behavior of this script:
# ARDUINO_IDE_PATH; Arduino IDE path
# AVRDUDE; avrdude binary path
# CONFIG_FILE; avrdude configuration path
# PARTNO; AVR device(MCU)
# PROGRAMMER - AVR programmer
# PORT; Connection Port
#
# 1 - Install Arduino IDE on user home directory
# https://www.arduino.cc/en/main/software
#
# 2 - Use Arduino UNO as an AVR ISP
# https://www.arduino.cc/en/Tutorial/ArduinoISP
#
# 3 - Run script using Arduino UNO port as argument (e.g. /dev/ttyUSB0)
#
#
# Authors:
# Jose G. Faisca <jose.faisca@gmail.com>
#
# Version: 1.0
# Date: 08-2015
#
###############################################################################
ARDUINO_IDE_PATH="$HOME/Code/arduino-*"
AVRDUDE="${ARDUINO_IDE_PATH}/\
hardware/tools/avr/bin/avrdude"
CONFIG_FILE="${ARDUINO_IDE_PATH}/\
hardware/tools/avr/etc/avrdude.conf"
PARTNO="m328p"
PROGRAMMER="stk500v1"
PORT="$1"
# find usb devices
function find_usb(){
for sysdevpath in $(find /sys/bus/usb/devices/usb*/ -name dev); do
(
syspath="${sysdevpath%/dev}"
devname="$(udevadm info -q name -p $syspath)"
[[ "$devname" == "bus/"* ]] && continue
eval "$(udevadm info -q property --export -p $syspath)"
[[ -z "$ID_SERIAL" ]] && continue
device=$(echo "/dev/$devname - $ID_SERIAL" | grep tty)
if [ -n "$device" ]; then
echo $device
port=${device%% *}
check_port $port
fi
)
done
}
# check port
function check_port(){
port=$1
CMD0="stty -F ${port} cs8 115200 ignbrk -brkint \
-icrnl imaxbel -opost -onlcr -isig -icanon \
-iexten -echo -echoe -echok -echoctl -echoke \
noflsh -ixon -crtscts"
CMD1="stty -a -F ${port}"
eval $CMD0 >/dev/null 2>&1
eval $CMD1 >/dev/null
if [ $? -eq 0 ]; then
return 0 # true
else
echo "$(tput setaf 1)An error occurred trying to connect ${port} $(tput sgr0)"
return 1 # false
fi
}
# ckeck arguments
if [[ $# -ne 1 ]] ; then
SCRIPT_NAME=$(basename "$0")
echo "Usage: $SCRIPT_NAME <PORT>"
echo "Available USB devices ..."
find_usb
exit 1
else
if ! check_port $PORT; then
exit 1
fi
fi
# try connection
CMD="${AVRDUDE} \
-p ${PARTNO} \
-C ${CONFIG_FILE} \
-c ${PROGRAMMER} \
-P ${PORT} \
-b 19200 \
-v"
echo $CMD
eval $CMD
exit 0
| true
|
cdf68d903725c7ee8658a930b8d2cad4ee91c428
|
Shell
|
inutano/docker-recipes
|
/galaxy-exom-seq/install_repo_wrapper.sh
|
UTF-8
| 347
| 2.78125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
service postgresql start
service apache2 start
./run.sh --daemon
sleep 60
python ./scripts/api/install_tool_shed_repositories.py --api admin -l http://localhost:8080 --tool-deps --repository-deps $1
exit_code=$?
if [ $exit_code != 0 ] ; then
exit $exit_code
fi
./run.sh --stop-daemon
service postgresql stop
service apache2 stop
| true
|
0afe6292423ae4ef33554930eb795b30806098c5
|
Shell
|
JuSeong39/typeO_Script
|
/typeO_Script.sh
|
UHC
| 3,702
| 3.28125
| 3
|
[] |
no_license
|
#!/bin/bash
a=`ifconfig -a | grep wlx | awk '{print $1}'`
echo "wireless interface name is : $a"
if [ -n "ifconfig | grep br-AP" ]
then
ifconfig br-AP down
brctl delif br-AP veth1 && brctl delif br-AP $a
brctl delbr br-AP
fi
if [ -n "ifconfig | grep br-IoT" ]
then
ifconfig br-IoT down
ovs-vsctl del-port br-OF veth2 && ovs-vsctl del-port br-OF eno7
ovs-vsctl del-br br-IoT
fi
if [ -z "ifconfig | grep eno4" ]
then
ifconfig eno7 up
ifconfig eno3 up
ifconfig eno4 up
fi
if [ -z "ifconfig | grep eno5" ]
then
ifconfig eno5 up
fi
if [ -z "ifconfig | grep eno6" ]
then
ifconfig eno6 up
fi
apt-get purge -y hostapd bridge-utils openvswitch-switch
apt-get update && apt-get install -y hostapd dnsmasq bridge-utils openvswitch-switch
#Type O ʿ ƿ(Hostapd, Linux Bridge Util, OpenvSwitch)
# ֽ Ʈϱ ؼ ٽ ġϴ
if [ -n "ifconfig -a | grep veth1" ]
then
ifconfig veth1 down&&ifconfig veth2 down
ip link delete veth1 type veth
fi
# Ǿ ִ Veth1, Veth2 ִ
ip link add name veth1 type veth peer name veth2
ifconfig veth1 up && ifconfig veth2 up
#Open vSwitch Bridge(Br-IoT) Bridge(Br-AP) patching Ǵ Veth1, Veth2
echo -e "\nvirtual interfaces setting\n"
ifconfig
ovs-vsctl add-br br-IoT
ovs-vsctl add-port br-IoT veth2
#Patching Ǵ Veth2 Br-IoT
ovs-vsctl add-port br-IoT eno3
ovs-vsctl add-port br-IoT eno4
ovs-vsctl add-port br-IoT eno5
ovs-vsctl add-port br-IoT eno6
ovs-vsctl set-controller br-IoT tcp:203.237.53.130:6633
#Task 3-1 ̽ ش Bridge ONOS Controller
echo -e "\novs switch setting\n"
ovs-vsctl show
#Open vSwitch Bridge Br-IoT ִ
#Br-IoT D(Data Plane) Interface ִ
brctl addbr br-AP
# Bridge
iw dev $a set 4addr on
#<wlx485d601fbca4> ̽ ġ ̸ Ʈ ٸ
#ش ؼ ̽ IPv4 ּҸ ֵ
brctl addif br-AP veth1 && sudo brctl addif br-AP $a
ifconfig br-AP 192.168.50.80 up
echo -e "\nlinux bridge seting\n"
# ̽ Veth1 Br-AP
sysctl -w net.ipv4.ip_forward=1
sleep 6
ifconfig br-AP up
ifconfig br-IoT up
ifconfig | grep br-AP
ifconfig | grep br-IoT
echo -e "no-resolv/no-resolv">> /etc/dnsmasq.conf
echo -e "dhcp-range=interface:$a,192.168.50.81,192.168.55.99,12h">> /etc/dnsmasq.conf
echo -e "server=8.8.8.8">> /etc/dnsmasq.conf
#dnsmasq(DHCP )
#DHCP IP pool Data Plane Ʈ ÿ ϴ Ȳ ġ ʵ Ϸ
echo -e "interface=$a
#bridge=br-AP
driver=nl80211
ssid=typeO_GIST
hw_mode=g
channel=6
macaddr_acl=0
auth_algs=1
ignore_broadcast_ssid=0
wpa=3
wpa_passphrase=typeo0070
wpa_key_mgmt=WPA-PSK
wpa_pairwise=TKIP
rsn_pairwise=CCMP"> ~/hostapd.conf
# AP Daemon Hostapd
#Interface ̸ Ʈ ٸ
#AP SSID Ʈ ٸ Ϸ
echo -e " ifconfig $a up
ip link add name veth1 type veth peer name veth2
ifconfig veth1 up
ifconfig veth2 up
brctl addbr br-AP
ifconfig br-AP 192.168.50.80 netmask 255.255.255.0 up
ifconfig br-IoT netmask 255.255.255.0 up
brctl addif br-AP veth1
iw dev $a set 4addr on
brctl addif br-AP $a
ifconfig eno2 up
ifconfig eno3 up
ifconfig eno4 up
ifconfig eno5 up
ifconfig eno6 up">> /etc/rc.local
| true
|
7e05db7c7acdea16f3294b547a3b89654aad308e
|
Shell
|
dak180/fink-experimental
|
/danielj/make-vcs-tarball.sh
|
UTF-8
| 1,924
| 4.5
| 4
|
[] |
no_license
|
#!/bin/bash -e
# make-vcs-tarball.sh
# Written by Daniel Johnson 15 Feb 2012
# 6 Mar 2012 - Use case instead of if/else. Duh.
# Released to the public domain.
usage ()
{
echo "Usage:"
echo "`basename $0` VCS_TYPE URL TARBALL_BASENAME [REVISION]"
echo
echo "Create a bzipped tarball of a repository."
echo
echo " VCS_TYPE is one of git, hg, bzr or svn."
echo " URL is the remote repository url."
echo " TARBALL_BASENAME is the tarball filename without .tar.bz2."
echo " REVISION is an optional revision specification."
echo " Value depends on the VCS used."
exit 0
}
cmd=$1
url=$2
tarball=$3
revision=$4
if [ "$#" = 0 ] || [ $cmd = "-h" ]; then
usage
fi
cmd=${cmd:?"Must specify a VCS."}
url=${url:?"Must specify a repo URL."}
tarball=${tarball:?"Must specify a tarball name minus the extension."}
dir=`pwd`
case $cmd in
git )
cd /tmp
echo "Cloning git repo $url."
git clone --no-checkout $url $tarball
cd $tarball
echo "Making tarball at ${dir}/${tarball}.tar.bz2"
git archive --format=tar --prefix=${tarball}/ ${revision:=HEAD} | bzip2 >${dir}/${tarball}.tar.bz2
cd ..
rm -rf /tmp/${tarball};;
hg )
cd /tmp
echo "Cloning hg repo $url."
hg clone --noupdate $url $tarball
cd $tarball
echo "Making tarball at ${dir}/${tarball}.tar.bz2"
hg archive --prefix=${tarball} ${revision:+--rev=$revision} ${dir}/${tarball}.tar.bz2
cd ..
rm -rf /tmp/${tarball};;
bzr )
echo "Making tarball at ${dir}/${tarball}.tar.bz2 from bzr repo $url"
bzr export ${tarball}.tar.bz2 $url --root=${tarball} ${revision:+--revision=$revision};;
svn )
cd /tmp
echo "Exporting svn repo $url."
svn export ${revision:+--revision=$revision} $url $tarball
echo "Making tarball at ${dir}/${tarball}.tar.bz2"
tar -cjf ${dir}/${tarball}.tar.bz2 $tarball
rm -rf /tmp/${tarball};;
* )
echo "VCS '$cmd' not supported."
echo "Supported VCSes are git, hg, bzr and svn."
exit 1
esac
md5 ${dir}/${tarball}.tar.bz2
exit 0
| true
|
fb4d9ef3a9dd41a33fed45c9850d30175b6023a6
|
Shell
|
AppScriptIO/deploymentShellscript
|
/yarn.installation.sh
|
UTF-8
| 347
| 3.25
| 3
|
[] |
no_license
|
#!/bin/bash
if [ -z "$1" ]; then
#⭐ Install Yarn:
curl -sS https://dl.yarnpkg.com/debian/pubkey.gpg | apt-key add -
echo "deb https://dl.yarnpkg.com/debian/ stable main" | tee /etc/apt/.s.list.d/yarn.list
apt-get update && apt-get install yarn
elif [ $1 == "uninstall" ]; then
apt-get remove yarn && apt-get purge yarn
fi;
| true
|
40ec1bedc4625f857b285b80a551cbffdc426937
|
Shell
|
leochu2010/CUHK-MscProject
|
/tools/test_cpu.sh
|
UTF-8
| 1,088
| 3.953125
| 4
|
[] |
no_license
|
echo "#test for $1 times"
TEST_TIMES=$1
CPU_FROM=$2
CPU_TO=$3
CPU_STEP=$4
INPUT_FILE=$5
TOKEN="tmp_CPU_"
#create device number matlab array
echo -e "cpu=[\c"
for (( t=$CPU_FROM; t <= $CPU_TO; t+=$CPU_STEP ))
do
echo -e "$t \c"
> ./tmp/$TOKEN$t
done
echo "];"
#put result into separated tmp files
for i in `seq 1 $TEST_TIMES`
do
#total=0
for (( t=$CPU_FROM; t <= $CPU_TO; t+=$CPU_STEP ))
do
RESULT=$(./../cal -algorithm pvalue -processor cpu -test 1 -file $INPUT_FILE -stdout processing_time)
#echo -e "$(echo $RESULT | cut -d ' ' -f 2) \c"
processing_time=$(echo $RESULT | cut -d ' ' -f 2)
echo $processing_time >> ./tmp/$TOKEN$t
#total=$(($total + $processing_time))
done
done
#calculate avg
re='^[0-9]+$'
echo -e "processing_time_ms=[\c"
for (( t=$CPU_FROM; t <= $CPU_TO; t+=$CPU_STEP ))
do
total=0
#read file line by line
while read -r line
do
processing_time=$line
if [[ $processing_time =~ $re ]] ; then
total=$(($total + $processing_time))
fi
done < ./tmp/$TOKEN$t
#calculate avg
echo -e "$(($total/$TEST_TIMES)) \c"
done
echo "];"
| true
|
41a01605e2fb2116d5b66f69be709cbf83d8b64f
|
Shell
|
jhadvig/wildfly-8-centos
|
/bin/usage
|
UTF-8
| 433
| 2.671875
| 3
|
[] |
no_license
|
#!/bin/bash
echo "This is a Source To Image (STI) builder image.
To use it, install STI: https://github.com/openshift/docker-source-to-images.
Sample invocation:
sti build git://github.com/bparees/openshift-jee-sample openshift/wildfly-8-centos wildflytest -s https://raw.githubusercontent.com/openshift/wildfly-8-centos/master/.sti/bin
You can then run the resulting image via:
docker run -p :8080 wildflytest
"
| true
|
6f35c83d19d02f1eb387f94be1ba5d5f48a7eb58
|
Shell
|
rosstimson/oh-my-zsh
|
/lib/theme-and-appearance.zsh
|
UTF-8
| 1,984
| 2.859375
| 3
|
[
"MIT"
] |
permissive
|
# ls colors
autoload colors; colors;
# LS colors, made with http://geoff.greer.fm/lscolors/
# BSD
export LSCOLORS="exfxcxdxbxegehabagacaf"
# Linux
export LS_COLORS='di=34;40:ln=35;40:so=32;40:pi=33;40:ex=31;40:bd=34;46:cd=34;47:su=0;41:sg=0;46:tw=0;42:ow=0;45:'
# Enable ls colors
if [ "$DISABLE_LS_COLORS" != "true" ]
then
# Find the option for using colors in ls, depending on the version: Linux or BSD
if [[ "$(uname -s)" == "NetBSD" ]]; then
# On NetBSD, test if "gls" (GNU ls) is installed (this one supports colors);
# otherwise, leave ls as is, because NetBSD's ls doesn't support -G
gls --color -d . &>/dev/null 2>&1 && alias ls='gls --color=tty'
else
ls --color -d . &>/dev/null 2>&1 && alias ls='ls --color=tty' || alias ls='ls -G'
fi
fi
#setopt no_beep
setopt auto_cd
setopt multios
setopt cdablevarS
if [[ x$WINDOW != x ]]
then
SCREEN_NO="%B$WINDOW%b "
else
SCREEN_NO=""
fi
# Apply theming defaults
PS1="%n@%m:%~%# "
# git theming default: Variables for theming the git info prompt
ZSH_THEME_GIT_PROMPT_PREFIX="git:(" # Prefix at the very beginning of the prompt, before the branch name
ZSH_THEME_GIT_PROMPT_SUFFIX=")" # At the very end of the prompt
ZSH_THEME_GIT_PROMPT_DIRTY="*" # Text to display if the branch is dirty
ZSH_THEME_GIT_PROMPT_CLEAN="" # Text to display if the branch is clean
# Setup the prompt with pretty colors
setopt prompt_subst
# Colours
export black=$'%{\e[0;30m%}'
export red=$'%{\e[0;31m%}'
export green=$'%{\e[0;32m%}'
export brown=$'%{\e[0;33m%}'
export blue=$'%{\e[0;34m%}'
export purple=$'%{\e[0;35m%}'
export cyan=$'%{\e[0;36m%}'
export light_gray=$'%{\e[0;37m%}'
export dark_gray=$'%{\e[1;30m%}'
export light_red=$'%{\e[1;31m%}'
export light_green=$'%{\e[1;32m%}'
export yellow=$'%{\e[1;33m%}'
export light_blue=$'%{\e[1;34m%}'
export pink=$'%{\e[1;35m%}'
export light_cyan=$'%{\e[1;36m%}'
export white=$'%{\e[1;37m%}'
export reset_color=$'%{\e[0m%}'
| true
|
c24a30d4370489b950fc4b0bf3ae8ee12a3c012d
|
Shell
|
mcchrish/dotfiles
|
/zsh/Library/Preferences/zsh/functions/general.zsh
|
UTF-8
| 438
| 3.671875
| 4
|
[] |
no_license
|
# cd into the current Finder directory
function cdf {
local dir
dir=$(osascript -e 'tell application "Finder" to get the POSIX path of (target of front window as alias)' 2> /dev/null) \
&& cd "$dir"
}
# Makes a directory and changes to it.
function mkdcd {
[[ -n "$1" ]] && mkdir -p "$1" && builtin cd "$1"
}
# Changes to a directory and lists its contents.
function cdls {
builtin cd "$argv[-1]" && ls "${(@)argv[1,-2]}"
}
| true
|
5562cebd9a82236af052faf8d0698ee468b23292
|
Shell
|
korpiq/bash_utils
|
/bin/favro.sh
|
UTF-8
| 1,308
| 3.6875
| 4
|
[] |
no_license
|
#!/bin/bash
favro_fail () {
echo "Favro failed: $*" >&2
return 1
}
favro_org_id () {
curl -su "$TOKEN" -X GET "https://favro.com/api/v1/organizations" | jq -r '.entities[0].organizationId'
}
favro_get () {
curl -su "$TOKEN" -H "organizationId: $ORG" -X GET "https://favro.com/api/v1/$1"
}
favro_query () {
URI="$1"
shift
favro_get "$URI" | jq "$@"
}
favro_id_of_widget () { # give widget name
favro_query widgets -r '.entities[] | select(.name == "'"$1"'") | .widgetCommonId'
}
favro_id_of_widget_column () { # give widget name and column name
WIDGET_ID=${WIDGET_ID:-$(favro_id_of_widget "$1")}
favro_query "columns?widgetCommonId=$WIDGET_ID" -r '.entities[] | select(.name == "'"$2"'") | .columnId'
}
favro_query_cards_of_widget_column () {
COLUMN_ID=${COLUMN_ID:-$(favro_id_of_widget_column "$1" "$2")}
FILTER="$3"
shift; shift; shift
favro_query "cards?columnId=$COLUMN_ID&$FILTER" "$@"
}
favro_do () {
export ORG=${ORG:-$(favro_org_id)}
if [ -n "$ORG" ]
then
favro_"$@"
else
favro_fail "Missing favro organizationID (ORG)."
fi
}
if [ "$BASH_SOURCE" = "$0" -a -n "$*" ]
then
if [ -n "$TOKEN" ]
then
favro_do "$@"
else
favro_fail "Missing TOKEN (favro email:token)"
fi
fi
| true
|
4905083ea934c924129d8c04009b7d06dffb6cc8
|
Shell
|
sepses/file-system-log-service
|
/randomEvents.sh
|
UTF-8
| 6,281
| 3.8125
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
files=()
num=0
arrOps=(create editFile copyFileDiffDir moveFile moveFileToTrash renameFile)
arrTargetPaths=("/Users/Agnes/Desktop/test" "/Users/Agnes/Desktop/test-2" "/Volumes/USB" "/Users/Agnes/Dropbox/test" "/Users/Agnes/Google Drive/test" "/Users/Agnes/Desktop/test")
arrFileExt=(xml docx txt xlsx txt)
echo "see log file 'logRandomEvents.txt' in current directory for performed file operations"
touch logRandomEvents.txt
editFile(){
# get random file to edit
getFilesOfDirectory
# select random file from files selection
calcRandom $((${#files[@]}-1))
fileToEdit="${files[$num]}"
if [ -e "$fileToEdit" ]
then
addLogEntry "edit;""$fileToEdit"";"
open "$fileToEdit"
sleep 10
#send some text to open file and hit cmd+s to save changes and close program again
cliclick w:1000 kd:cmd t:v ku:cmd
cliclick w:500 kd:cmd t:s ku:cmd w:1000 kd:cmd t:q
fi
}
copyFileDiffDir(){
# get random selection of files to copy
getFilesOfDirectory
# select random file from files selection
calcRandom $((${#files[@]}-1))
fileToCopy="${files[$num]}"
# get random target directory to copy to
calcRandom $((${#arrTargetPaths[@]}-1))
targetDir="${arrTargetPaths[$num]}"
dir=$(dirname "$fileToCopy")
basename=$(basename -- "$fileToCopy")
newDirAndName=$targetDir"/"$basename
if [ -e "$fileToCopy" ]
then
open "$fileToCopy"
cliclick w:1000 kd:cmd t:q
diff=false
#check if current file and "new file" (to copy to) are identical
cmp --silent "$fileToCopy" "$newDirAndName" || diff=true
if [ "$diff" = true ] ; then
addLogEntry "copy;""$fileToCopy"";""$targetDir"
#cp -fr "$fileToCopy" "$targetDir"
cp "$fileToCopy" "$targetDir"
fi
fi
}
#copyFileSameDir(){
# get random selection of files to copy
# getFilesOfDirectory
# select random file from files selection
# calcRandom $((${#files[@]}-1))
# fileToCopy="${files[$num]}"
# if [ -e "$fileToCopy" ]
# then
# extract dir, filename and extension
# dir=$(dirname "$fileToCopy")
# filename=$(basename -- "$fileToCopy")
# extension="${filename##*.}"
# filename="${filename%.*}"
#create new filename + previous path
# newDirAndFilename=$dir"/"$filename"Copy."$extension
# addLogEntry "copy;""$fileToCopy"";""$newDirAndFilename"
# cp "$fileToCopy" "$newDirAndFilename"
# fi
#}
moveFile(){
# get random selection of files to move
getFilesOfDirectory
# select random file from files selection
calcRandom $((${#files[@]}-1))
fileToMove="${files[$num]}"
if [ -e "$fileToMove" ]
then
# get random target directory to copy to
calcRandom $((${#arrTargetPaths[@]}-1))
targetDir="${arrTargetPaths[$num]}"
originalDir=$(dirname "$fileToMove")
if [ "$originalDir" != "$targetDir" ] ; then
addLogEntry "move;""$fileToMove"";""$targetDir"
mv "$fileToMove" "$targetDir"
fi
fi
}
moveFileToTrash(){
# get random selection of files to move
getFilesOfDirectory
# select random file from files selection
calcRandom $((${#files[@]}-1))
fileToMove="${files[$num]}"
if [ -e "$fileToMove" ]
then
addLogEntry "moveToTrash;""$fileToMove"";/Users/Agnes/.Trash"
mv "$fileToMove" "/Users/Agnes/.Trash/"
fi
}
renameFile(){
# get random selection of file to rename
getFilesOfDirectory
# select random file from files selection
calcRandom $((${#files[@]}-1))
fileToRename="${files[$num]}"
if [ -e "$fileToRename" ]
then
dir=$(dirname "$fileToRename")
filename=$(basename "$fileToRename")
extension="${filename##*.}"
filename="${filename%.*}"
newName=$dir"/"$filename"X."$extension
addLogEntry "rename;""$fileToRename"";""$newName"
mv "$fileToRename" "$newName"
fi
}
create(){
# get random target directory to create a file in
calcRandom $((${#arrTargetPaths[@]}-1))
targetDir="${arrTargetPaths[$num]}"
# get random file extension
calcRandom $((${#arrFileExt[@]}-1))
fileExt="${arrFileExt[$num]}"
timestamp=$(date +%s)
filename=$timestamp".""$fileExt"
filepath="$targetDir""/""$filename"
if [[ "$fileExt" == "xlsx" ]]; then
open -a "Microsoft Excel"
sleep 2
cliclick kd:cmd t:n ku:cmd # neue Arbeitsmappe erstellen
cliclick kd:cmd t:v ku:cmd
sleep 3
cliclick kd:cmd t:s ku:cmd
timestamp=$(date +%s)
filename=$timestamp
pathname="$targetDir""/"$timestamp
addLogEntry "create;""$pathname"".xlsx;"
cliclick w:500 t:"$pathname"
sleep 3
cliclick kp:enter
sleep 3
cliclick kp:enter
sleep 1
cliclick kd:cmd t:q
else
addLogEntry "create;""$filepath"";"
touch "$filepath"
open "$filepath"
sleep 10
cliclick w:1000 kd:cmd t:v ku:cmd
cliclick kd:cmd t:s ku:cmd w:1000 kd:cmd t:q
fi
}
# returns a random number between 0 and a max (parameter)
calcRandom(){
if [ $1 == 0 ]; then
num=0
else
num=$(( ( RANDOM % $1 ) + 0 ))
fi
}
# saves all files of a directory (1. input parameter) in array "files"
getFilesOfDirectory(){
calcRandom $((${#arrTargetPaths[@]}-1))
if [ $num == -1 ]; then
echo no event triggered
exit 1
else
path=${arrTargetPaths[$num]}
for f in "$path"/*; do
if [[ -f "$f" ]]; then
files+=("$f")
fi
done
fi
if [ ${#files[@]} == 0 ]; then
echo no files found in dir ${arrTargetPaths[$num]} - no event triggered
exit 1
fi
}
addLogEntry(){
echo "$1"";"$(date +%Y-%m-%d-%H:%M:%S) >> logRandomEvents.txt
}
numlines=$(wc -l logRandomEvents.txt | awk '{ print $1 }')
echo create events until 100 events are logged
while [ $numlines -lt 100 ]
do
calcRandom $((${#arrOps[@]}))
echo $i event: ${arrOps[$num]}
${arrOps[$num]}
sleep 7
numlines=$(wc -l logRandomEvents.txt | awk '{ print $1 }')
done
| true
|
b5a51135917e4b0d8af21004dde5dcfdd8087148
|
Shell
|
williamswalsh/BASh-scripting-bible
|
/21_ioWithCase.sh
|
UTF-8
| 303
| 3.78125
| 4
|
[] |
no_license
|
#!/bin/bash
# interesting prompt ability using case.
# Could also use :toupper: command to reduce number of case possibilities.
# Could just take first letter.
read -p "Enter y/n:" keyboard
case $keyboard in
y|Y|YES|yes)
echo "Yes";;
n|N|NO|no)
echo "No";;
*)
echo "Invalid option";;
esac
| true
|
b16d1e7794cab3bc7c1f285ea151db65d363c00e
|
Shell
|
ioppermann/munin-contrib
|
/plugins/php/php_opcache
|
UTF-8
| 1,557
| 3.484375
| 3
|
[] |
no_license
|
#!/bin/sh
###############################################################################
#
# Munin plugin to monitor Zend OPCache <http://php.net/manual/en/book.opcache.php>
# By Daniel Lo Nigro <http://dan.cx/>
#
# Installation:
# 1. Copy php_opcache.php file onto server and verify you can hit it in a browser
# 2. Add to Munin config:
# [php_opcache]
# env.URL http://example/php_opcache.php
###############################################################################
# Settigs required for autoconf
#%# family=auto
#%# capabilities=autoconf suggest
URL=${URL:-'http://localhost/php_opcache.php'}
WGET=`which wget`;
WGET_FLAGS="-Yoff"; # refer to wget manual, you may set extra parameters like disable proxy
act=memory
if [ "$1" = "autoconf" ]; then
[ -z "$URL" ] && echo "no (edit URL config in header file !)" && exit 1
[ -n "$URL" ] && echo "yes" && exit 0
fi
if [ "$1" = "suggest" ]; then
echo "memory"
exit 0
fi
if [ "$1" = "config" ] && [ "$act" = "memory" ]; then
cat <<'EOM'
graph_title OPCache Memory usage
graph_args -l 0 --base 1024
graph_vlabel Memory usage
graph_category memory
graph_order mem_used mem_free mem_wasted
graph_total Total
mem_free.label Memory Free
mem_free.draw STACK
mem_free.min 0
mem_used.label Memory Used
mem_used.draw AREA
mem_used.min 0
mem_wasted.label Memory Wasted
mem_wasted.draw STACK
mem_wasted.min 0
EOM
exit 0
fi
###############################################################################
[ -x $WGET ] && $WGET -q $WGET_FLAGS "$URL?act=$act" -O - && exit 0
exit 1
| true
|
7081877eab6e1f213f81402bdd8cd39271e0c8b5
|
Shell
|
tagawa0525/dotfiles
|
/zsh/alias.zsh
|
UTF-8
| 2,419
| 3.109375
| 3
|
[] |
no_license
|
#------------------------------------------------------------
# chdir
#------------------------------------------------------------
function chpwd()
{
ls
_cdd_chpwd
}
alias cd..='cd ..'
alias cdp='cd ${HOME}/python/prac'
alias cdr='cd ${HOME}/ruby/prac'
alias cdf='cd ${HOME}/fortran/prac'
alias pd="pushd"
alias po="popd"
#alias gd='dirs -v; echo -n "select number: "; read newdir; cd -"${newdir}"'
alias sd='screen -X chdir ${PWD}'
#------------------------------------------------------------
# back
#------------------------------------------------------------
alias back='cd -'
alias bk='cd -'
alias b='cd -'
#------------------------------------------------------------
# up
#------------------------------------------------------------
cmd=""
dir=""
for num in {1..10} ; do
cmd+="u"
dir+="../"
alias ${cmd}="cd ${dir}"
alias u${num}="cd ${dir}"
alias ${num}u="cd ${dir}"
done
unset cmd dir num
#------------------------------------------------------------
# list
#------------------------------------------------------------
alias sl='ls'
alias la='ls -A'
alias lt='ls -ltr'
alias ll='ls -lGh'
alias dir='ls -lGh'
#------------------------------------------------------------
# vim
#------------------------------------------------------------
alias vd='vimdiff'
alias vv='vi -O'
alias vh='vi -o'
#------------------------------------------------------------
# Grid Engine
#------------------------------------------------------------
alias qs="qstat"
alias qa="qstat -g t"
alias qu="qstat -u ${USER}"
#------------------------------------------------------------
# others
#------------------------------------------------------------
#alias mv='mv -i'
#alias cp='cp -i'
#alias cp='cp -p'
alias grep='egrep'
alias bc='bc -l'
alias h='history'
alias ha='history -E l'
alias j='jobs'
alias cls='clear'
alias rm='rm -i'
#alias rr="rm -rf"
alias ssh='ssh -Y'
#alias screen='screen -xR'
#alias screen='echo -ne "\033]0;[ USEING SCREEN ]@${HOST}\007" ; screen -xR'
#alias cl="make -f ~/Makefile clean"
#------------------------------------------------------------
# sudo
#------------------------------------------------------------
#------------------------------------------------------------
# global
#------------------------------------------------------------
alias -g G='| egrep'
alias -g L='| ${PAGER}'
alias -g V='| vi -R -'
alias -g T='| tail'
alias -g H='| head'
alias -g W='| wc'
| true
|
5e834ab91218d9bf6f90606d1fb62ab673ff5ed7
|
Shell
|
bkj/rec-benchmarks
|
/models/dsstne/run.sh
|
UTF-8
| 1,199
| 2.78125
| 3
|
[] |
no_license
|
#!/bin/bash
# run.sh
mkdir -p {data,models,results}
export PATH="/home/bjohnson/software/amazon-dsstne/src/amazon/dsstne/bin/:$PATH"
function prep_data {
generateNetCDF -d gl_input -i $TRAIN_PATH -o data/gl_input.nc -f data/features_input -s data/samples_input -c
generateNetCDF -d gl_output -i $TRAIN_PATH -o data/gl_output.nc -f data/features_output -s data/samples_input -c
}
function run {
TRAIN_PATH='../../data/dsstne-train.txt'
TEST_PATH='../../data/dsstne-test.txt'
NUM_EPOCHS=50
BATCH_SIZE=1024
rm models/*
train -b $BATCH_SIZE -e $NUM_EPOCHS -n models/network.nc \
-d gl \
-i data/gl_input.nc \
-o data/gl_output.nc \
-c config.json
predict -b 2048 -k 10 -n models/network.nc \
-d gl \
-i data/features_input \
-o data/features_output \
-f $TRAIN_PATH \
-r $TRAIN_PATH \
-s results/recs
head results/recs
python inspect-results.py $TEST_PATH results/recs
}
# prep_data # Nee to run this first
run
# Results
#
# p@01 -> 0.530431
# p@05 -> 0.419587
# p@10 -> 0.352990
#
# cat config.json | jq . | openssl md5
# (stdin)= c7225754458f71886aa0bd111487ff92
| true
|
93335a3005ad2534546a38e910aa3f5795ba22e6
|
Shell
|
Sawndip/LookbackNeuron
|
/examples/install.sh
|
UTF-8
| 439
| 2.703125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
if [ ! -d ./IzhikTestModule ]; then
raise error "Cannot find the Module directory IzhikTestModule in the current directory `pwd`"
fi
if [ ! -d ./build_IzhikTestModule ]; then
mkdir ./build_IzhikTestModule
fi
cd ./build_IzhikTestModule
cmake -Dwith-nest=/home/arjun/nest-bleeding-python3/bin/nest-config -DCMAKE_CXX_FLAGS='-std=c++11 -Wno-unused-variable -Wno-reorder' ../IzhikTestModule
make -j4
make install
cd ..
| true
|
101fbb3bf8a17574579c6a8fa826e12c181e862f
|
Shell
|
twpDone/sanitizeFileName
|
/sanitize.sh
|
UTF-8
| 4,565
| 3.71875
| 4
|
[] |
no_license
|
DONOTRM=0
DEBUG=0
RECURSE=0
START=0
usage(){
echo "Usage: ""$0""[-dtR] start"
echo ""
echo $0" Rename all badly named files and directories at your current position"
echo ""
echo "Options :"
echo " -d : Debug Mode, more Verbose"
echo " -t : Test Mode, do not remove original files"
echo " -R : Enable Recursion in subdirectories. /!\ Warning this could break things, test before"
echo " -h : Help, Print this message"
echo ""
echo " start : Start the program, protect you from accidental run of this command"
echo ""
echo ""
exit 3
}
for arg in "$@"
do
case $arg in
"-d")
DEBUG=1
;;
"-t")
DONOTRM=1
;;
"-R")
RECURSE=1
;;
"-h")
usage
;;
"start")
START=1
;;
*)
;;
esac
done
if test "$START" -ne 1
then
echo ""
echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"
echo ""
echo "You must specify 'start' as an argument of this command"
echo ""
echo "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!"
echo ""
usage
fi
message(){
indent=""
indentLevel=$1
mtext=$2
if test $indentLevel -lt 0
then
indentLevel=0
fi
if test $indentLevel -gt 0
then
for space in `seq 1 $indentLevel`
do
indent=$indent" "
done
fi
echo $indent$mtext
}
fail(){
message 0 "[-] ""$1"
}
success(){
message 0 "[+] ""$1"
}
action(){
message 0 "[*] ""$1"
}
skip(){
message 0 "[/] ""$1"
}
debug(){
message 0 "[D] ""$1"
}
recurmessage(){
message 0 "[R] ""$1"
}
process(){
if test "$DEBUG" -eq 1
then
debug "DEBUG : ""$DEBUG"
debug "DONOTRM / TESTMODE : ""$DONOTRM"
debug "RECURSE : ""$RECURSE"
fi
for fic in `ls | tr " " "|"`
do
echo ""
originalFic=`echo $fic | tr "|" " "`
newname=`echo "$fic" | sed y/":\?=, |"/"--____"/ | sed s/"%2F"/"_"/g`
if test "$DEBUG" -eq 1
then
debug "originalFile : ""$originalFic"
debug "newname : ""$newname"
fi
if test -e "$originalFic"
then
echo "Orginal : "$originalFic
if test "$originalFic" != "$newname"
then
action "Trying cp "
cp -R "$originalFic" "$newname"
if test -e "$newname"
then
echo "cp ""$originalFic""--->""$newname"
if test "$DONOTRM" -ne 1
then
action "rm ""$originalFic"
rm -Rf "$originalFic"
else
skip "$originalFic"" Have not been removed"
fi
else
fail "Copy Failed : ""$originalFic""--->""$newname"
fi
if test "$DEBUG" -eq 1
then
debug "originalFile : ""$originalFic"
debug "newname : ""$newname"
fi
if test -e "$originalFic"
then
echo "Orginal : "$originalFic
if test "$originalFic" != "$newname"
then
action "Trying cp "
cp -R "$originalFic" "$newname"
if test -e "$newname"
then
echo "cp ""$originalFic""--->""$newname"
if test "$DONOTRM" -ne 1
then
action "rm ""$originalFic"
rm -Rf "$originalFic"
else
skip "$originalFic"" Have not been removed"
fi
else
fail "Copy Failed : ""$originalFic""--->""$newname"
fi
else
success "No renaming is necesary"
fi
else
fail "Fichier introuvable : ""$originalFic"
fi
if test "$RECURSE" -eq 1
then
if test -d "$newname"
then
recurmessage "cd to [""$newname""] from [""$PWD""]"
cd "$newname"
process
cd ..
recurmessage "cd back to [""$PWD""] from [""$newname""]"
fi
fi
done
}
process
exit 1 167,6 Bot
| true
|
9eeda59fbc48ba27c1778e92e42a15fc423ae583
|
Shell
|
baoson2211/script
|
/Shell/example/example3/example3.sh
|
UTF-8
| 7,317
| 3.6875
| 4
|
[] |
no_license
|
#!/bin/bash
#
# Description of this script
# $1 -- instruction: help or exec
# $2 & $4 -- navigator arg
# $3 -- input file
# $5 -- output directory
#
# Description output file exported by MySQL
# $1 -- activation
# $2 -- username
# $3 -- id_student
# $4 -- grade
# $[5-9] -- subname and mainname
case "$1" in
# HELP
"-h" | "--help")
echo " INSTRUCTION: "
echo " -h --help Show this instruction"
echo " -x --exec Run"
echo " --input-file Input text file"
echo " --output-dir Output directory (option: default as default)"
echo
echo " Example: "
echo " ./example3.sh -x --input-file <Input file path> --output-dir <Output directory path>"
echo " ./example3.sh --exec --input-file <Input file path> --output-dir default"
echo
;;
# EXECUTIVE
"-x" | "--exec")
# catch number of line in file and loop - lay so dong trong file text va thuc hien lap
numline=$(cat $3 | wc -l)
# catch maximum of id - lay gia tri id lon nhat
#max_id=$(tail -n 1 $3 | awk '{print $1}')
# browse begin 2nd line - bat dau duyet tu dong thu hai
i=2
while [ $i -le $numline ] ;
do
# activated or not
activated=$(cat $3 | awk -v line=$i 'NR==line' | awk '{print $1}' | sed 's/"//g')
# lay username
username=$(cat $3 | awk -v line=$i 'NR==line' | awk '{print $2}' | sed 's/"//g')
# lay id_student
id_student=$(cat $3 |awk -v line=$i 'NR==line' | awk '{print $3}' | sed 's/"//g')
# lay grade
grade=$(cat $3 | awk -v line=$i 'NR==line' | awk '{print $4}' | sed 's/"//g')
# paraphrase
#
# with awk: an interpreted programming language designed for text processing
# the leftmost longest rule: http://www.boost.org/doc/libs/1_56_0/libs/regex/doc/html/boost_regex/syntax/leftmost_longest_rule.html
# attach a variable: awk -v <name of variable of awk command>=$<another variable in the shell> '{script}'
# awk -v <name of variable of awk command>=<value> '{script}'
# print field contain: awk '{ print $<numerical order of field>}'
# numerical order of field start from 1 to NF
# sub(regexp, replacement [, target]): see http://www.delorie.com/gnu/docs/gawk/gawk_135.html
# gsub(regexp, replacement [, target]): see http://www.delorie.com/gnu/docs/gawk/gawk_135.html
# replace print with printf to printing without newline
# 'NR' as Number of Records Variable
# 'NF' as Number of Fields in a record
#
# with tr: translate or delete characters
# replace SET1 by SET2: tr SET1 SET2
# delete a pattern: tr -d '<pattern>'
# '\r' as Carriage Return
# '\n' as Line Feed
#
# with sed: stream editor for filtering and transforming text
# sed 's/"//g' same as tr -d '"'
# sed ':a;N;$!ba;s/\n//g' same as tr -d '\n'
#
# name dir is followed real name
#folder=$(cat $3 | awk -v line=$i 'NR==line' | awk -v line=$i '{ if ($9!="") print $5"_"$6"_"$7"_"$8"_"$9 ; else if ($8!="") print $5"_"$6"_"$7"_"$8 ; else if ($7!="") print $5"_"$6"_"$7 ; else print $5"_"$6 ; }' | sed 's/"//g')
#folder=$(cat $3 | awk -v line=$i 'NR==line' | awk -v line=$i '{ if ($9!="") print $5$6$7$8$9 ; else if ($8!="") print $5$6$7$8 ; else if ($7!="") print $5$6$7 ; else print $5$6 ; }' | sed 's/"//g')
#folder=$(cat $3 | awk -v line=$i 'NR==line' | awk '{print $5$6$7$8$9 ; }' | sed 's/"//g')
#folder=$(cat $3 | awk -v line=$i 'NR==line' | awk '{ for(field=5;field<=NF;field++) print $field; }' | sed 's/"//g' | sed ':a;N;$!ba;s/\n//g')
folder=$(cat $3 | awk -v line=$i 'NR==line' | awk '{ for(field=5;field<=NF;field++) { printf $field ; } }' | awk '{ gsub("\"",""); print ; }')
#folder=$(cat $3 | awk -v line=$i 'NR==line' | awk '{ for(field=5;field<=NF;field++) { printf $field ; } }' | tr -d '"' | tr -d '\r\n')
realname=$folder
# Root or not
if [ $username != "Root" ];
then
# is activated
if [ $activated == '1' ];
then
# output preset default
if [ $5 == "default" ];
then
# write the following contents to file - ghi noi dung vao file
echo "chroot_local_user=YES" > /etc/vsftpd_user_conf/$username
echo "local_root=/home/vsftpd/Root/Archives/K$grade/$folder" >> /etc/vsftpd_user_conf/$username
echo "user_sub_token=$username" >> /etc/vsftpd_user_conf/$username
# make new directory named realname of user with grade (continuous, nospace)
# khoi tao thu muc moi mang ho ten cua thanh vien viet lien khong khoang trang
mkdir /home/vsftpd/Root/Archives/K$grade/$realname
# make a symbolic link named realname of user with grade (continuous, nospace) pointed a file named realname of user
# khoi tao mot symbolic link mang ho ten cua thanh vien tro toi file mang ten cua username
ln -s /etc/vsftpd_user_conf/$username /etc/vsftpd_user_conf/$folder
# change owner - dat lai quyen so huu
chown vsftpd:nogroup -R /home/vsftpd/Root/Archives/*
# change mode bits - dat lai quyen truy cap
chmod 757 -R /home/vsftpd/Root/Archives/*
# make a symbolic link placed in user's directory pointed Book directory of Root
# khoi tao mot symbolic link nam trong thu muc cua thanh vien tro toi thu muc Book cua Root
ln -s /home/vsftpd/Root/Book /home/vsftpd/Root/Archives/K$grade/$realname/Book
else
# write the following contents to file - ghi noi dung vao file
echo "chroot_local_user=YES" > $5$username
echo "local_root=/home/vsftpd/Root/Archives/K$grade/$folder" >> $5$username
echo "user_sub_token=$username" >> $5$username
# make new directory named realname of user with grade (continuous, nospace)
# khoi tao thu muc moi mang ho ten cua thanh vien viet lien khong khoang trang
mkdir $5K$grade/$folder
# make a symbolic link named realname of user with grade (continuous, nospace) pointed a file named realname of user
# khoi tao mot symbolic link mang ho ten cua thanh vien tro toi file mang ten cua username
ln -s $5$username $5$folder
fi
fi
fi
#i=$(( i++ ))
i=`expr $i + 1`
done
echo "write done"
;;
# DELETE
#"-D" | "--delete")
# kiem tra doi so truyen vao
#if [ $2 != "--input-file" ];
#then
# echo "error: wrong syntax, please show --help"
#else
# if [ $3 == "" ];
# then
# echo "please fill input file path"
# fi
#fi
#if [ $4 != "--output-dir" ];
#then
# echo "error: wrong syntax, please show --help"
#else
# if [ $5 == "" ];
# then
# echo "please fill output directory path"
# fi
#fi
# lay so dong trong file text va thuc hien lap
#numline=$(cat $3 | wc -l)
#i=1
#while [ $i -le $numline ] ;
#do
# lay username
#username=$(cat $3/pattern | awk -v line=4 'NR==line' | awk '{print $2}' | sed 's/"//g')
# lay id_student
#id_student=$(cat $3/pattern |awk -v line=4 'NR==line' | awk '{print $4}' | sed 's/"//g')
# xoa file
#rm -f $5/*
# i++
#(( i++ ))
#done
#echo "erase done"
#;;
# The others
*)
echo "error: wrong syntax, please show --help"
;;
esac
| true
|
d893f517b7fbe53aa1ae882c6251c6bf371eedac
|
Shell
|
s243a/TazPup64_BuildKit
|
/tazpup-core-files/puppy/usr/bin/manage-bluetooth
|
UTF-8
| 1,373
| 3.390625
| 3
|
[] |
no_license
|
#!/bin/sh
mode="$1"
if [ -f /tmp/bt-manage-lock ]; then
exit
fi
touch /tmp/bt-manage-lock
if [ "$(rfkill list | grep "luetooth")" == "" ]; then
echo "BT disabled" >> /tmp/bt-device.log
udevadm trigger --subsystem-match=bluetooth --action=remove
killall bluetoothd
killall bluetooth-applet
killall obexd
killall hidd
killall pand
killall rfcomm
rm -f /tmp/bt-manage-lock
exit
fi
echo "BT enabled" >> /tmp/bt-device.log
btd=`ps | grep "bluetoothd" | grep -v "grep"`
hid=`ps | grep "hidd" | grep -v "grep"`
obd=`ps | grep "obexd" | grep -v "grep"`
bta=`ps | grep "bluetooth-applet" | grep -v "grep"`
xrun=`ps | grep "X" | grep -v "grep"`
if [ "$xrun" != "" ]; then
export DISPLAY=:0.0
fi
if [ "$btd" == "" ]; then
bluetoothd
sleep 1
fi
if [ "$hid" == "" ]; then
hidd --server
sleep 1
fi
if [ "$bta" == "" ]; then
if [ "$xrun" != "" ]; then
bluetooth-applet &
fi
elif [ "$mode" == "refresh-tray" ] && [ "$bta" != "" ]; then
if [ "$xrun" != "" ]; then
killall bluetooth-applet
bluetooth-applet &
fi
fi
sleep 1
if [ "$obd" == "" ]; then
if [ ! -d /root/bluetooth-share ]; then
mkdir /root/bluetooth-share
fi
/usr/libexec/obexd --ftp --opp -a -r /root/bluetooth-share
sleep 1
if [ "$(pidof obexd)" != "" ] && [ "$xrun" != "" ]; then
notify "Ready to receive files if the bluetooth is enabled and discoverable"
fi
fi
rm -f /tmp/bt-manage-lock
| true
|
c1630419d1048f9c9787d6ef037f29c7ed26dd1f
|
Shell
|
smrhp/utilserv
|
/roles/firewall/files/firewall
|
UTF-8
| 908
| 3.578125
| 4
|
[] |
no_license
|
#! /bin/sh
### BEGIN INIT INFO
# Provides: custom firewall
# Required-Start: $remote_fs $syslog
# Required-Stop: $remote_fs $syslog
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: firewall initscript
# Description: Custom Firewall
### END INIT INFO
# Lancement du script de Firewall
# Fonction pour le lancement du firewall
start() {
echo "Application des règles IpTables : "
/usr/local/bin/utilserv/firewall/init_iptables.sh
}
# Fonction pour arrêter le firewall (on flush)
stop() {
echo "Flush des règles IpTables : "
/usr/local/bin/utilserv/firewall/flush_iptables.sh
}
case $1 in
start)
start
;;
stop)
stop
;;
restart)
stop
start
;;
status)
/sbin/iptables -L
/sbin/iptables -t nat -L
/sbin/ip6tables -L
;;
*)
echo "Usage: /etc/init.d/firewall {start|stop|restart|status}."
esac
exit
| true
|
117ac22bfaa9c64e16a132073f92aa5fc8437a10
|
Shell
|
NunationFL/SDIS
|
/proj1/src/scripts/peer.sh
|
UTF-8
| 967
| 3.625
| 4
|
[] |
no_license
|
#! /usr/bin/bash
# Script for running a peer
# To be run in the root of the build tree
# No jar files used
# Assumes that peer.Peer is the main class
# and that it belongs to the peer package
# Modify as appropriate, so that it can be run
# from the root of the compiled tree
# Check number input arguments
argc=$#
if (( argc != 9 ))
then
echo "Usage: $0 <version> <peer_id> <svc_access_point> <mc_addr> <mc_port> <mdb_addr> <mdb_port> <mdr_addr> <mdr_port>"
exit 1
fi
# Assign input arguments to nicely named variables
ver=$1
id=$2
sap=$3
mc_addr=$4
mc_port=$5
mdb_addr=$6
mdb_port=$7
mdr_addr=$8
mdr_port=$9
# Execute the program
# Should not need to change anything but the class and its package, unless you use any jar file
# echo "java peer.peer.Peer ${ver} ${id} ${sap} ${mc_addr} ${mc_port} ${mdb_addr} ${mdb_port} ${mdr_addr} ${mdr_port}"
java peer.Peer ${ver} ${id} ${sap} ${mc_addr} ${mc_port} ${mdb_addr} ${mdb_port} ${mdr_addr} ${mdr_port}
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.