blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 4 115 | path stringlengths 2 970 | src_encoding stringclasses 28 values | length_bytes int64 31 5.38M | score float64 2.52 5.28 | int_score int64 3 5 | detected_licenses listlengths 0 161 | license_type stringclasses 2 values | text stringlengths 31 5.39M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
59d232ff9ab8de515b20e29361b48dc7a6cb889c | Shell | 7kms/km7-kit | /packages/km7/templates/projects/web-backend/deploy/pre-publish.sh | UTF-8 | 659 | 2.96875 | 3 | [] | no_license | #!/usr/bin/env sh
projectName="${JOB_NAME}"
dockerImageName=${projectName}-${BUILD_TIMESTAMP}
echo '*********************************** 准备打包从Github仓库拉取的代码...*********************************************'
echo "**********${WORKSPACE}**********"
pwd
tar --exclude=.git/** --exclude=*.tar.gz -zcf /tmp/${dockerImageName}.tar.gz -C ${WORKSPACE} . \
&& mv /tmp/${dockerImageName}.tar.gz ${WORKSPACE} \
&& echo '*********************************** 打包成功 *********************************************' \
&& echo '************************************** 准备发送至服务器... **********************************************'
| true |
93fee53e4eccfb008e2efe8e752bd528af91f249 | Shell | vsoch/matlab-duke | /hugin/April11/DNS/Genetic/23andme_convert.sh | UTF-8 | 5,365 | 3.9375 | 4 | [] | no_license | #!/bin/bash
# ------23andme_convert----------------------------------------------------
# This script reads in new, raw genetic text data files downloaded from
# from 23andme in the Data/Genetic folder and first converts them all to
# .ped/.map files, and then creates a new master .ped/.map file
# (with all subjects) appended with the date.
#
# ------DNS Workflow and Organization-------------------------------------
#
# 1) Spenser manually downloads, unzips, and renames .txt file from 23andme.com
# 2) puts into Data/Genetic folder
# 3) Individual output goes to Analysis/Genetic/Individual/DNSXXXX.ped/.map
# 4) Group output goes to Analysis/Genetic/Group/YYYY_MM_DD_dns.ped/.map
# 5) Text files used to create group merges go to Analysis/Genetic/Group/Merge_Lists
# -------Instructions for Submission
#
# 1) Add lists of new subjects to variables below under either male or female
# 2) The other variables do not need to be changed!
# 3) Save and submit on command line:
#
# qsub -v EXPERIMENT=DNS.01 23andme_convert.sh
#
# Output goes to $EXPERIMENT/Analysis/Genetic/Logs
#############################################################
# VARIABLES
# Formatting for individual IDs under male and female
# should be ( DNS0001 DNS0002 ) #(see spaces)
maleID=( DNS0060 DNS0113 DNS0242 DNS0259 )
femID=( DNS0260 )
uID=( ) # In the case of unidentified gender
fid=0 # Family ID
pid=0 # Paternal ID
mid=0 # Maternal ID
pheno=0 # Phenotype
#############################################################
# --- BEGIN GLOBAL DIRECTIVE --
#$ -S /bin/sh
#$ -o $HOME/$JOB_NAME.$JOB_ID.out
#$ -e $HOME/$JOB_NAME.$JOB_ID.out
#$ -m ea
# -- END GLOBAL DIRECTIVE --
# -- BEGIN PRE-USER --
#Name of experiment whose data you want to access
EXPERIMENT=${EXPERIMENT:?"Experiment not provided"}
source /etc/biac_sge.sh
EXPERIMENT=`biacmount $EXPERIMENT`
EXPERIMENT=${EXPERIMENT:?"Returned NULL Experiment"}
if [ $EXPERIMENT = "ERROR" ]
then
exit 32
else
#Timestamp
echo "----JOB [$JOB_NAME.$JOB_ID] START [`date`] on HOST [$HOSTNAME]----"
# -- END PRE-USER --
# **********************************************************
# Send notifications to the following address
#$ -M @vsoch
###################################################################
# SCRIPT START
###################################################################
# Add plink to path
export PATH=/usr/local/packages/plink-1.07/:$PATH
# Cycle through the list of males and females
# MALES
for id in ${maleID[@]}; do
sex=1
make_plink
done
# FEMALES
for id in ${femID[@]}; do
sex=2
make_plink
done
# UNIDENTIFIED
for id in ${uID[@]}; do
sex=0
make_plink
done
#-------------------------------------------------------------------
# make text file with all individual .ped/.maps and make merged file
#-------------------------------------------------------------------
# Format date variable
NOW=$(date +"%Y%b%d")
cd $EXPERIMENT/Analysis/Genetic/Individual/
for file in *.ped; do
name=${file%\.*}
echo $name.ped $name.map >> $EXPERIMENT/Analysis/Genetic/Merge_Lists/$NOW"_dns.txt"
done
# Create new master .ped/.map file
plink --merge-list $EXPERIMENT/Analysis/Genetic/Merge_Lists/$NOW"_dns.txt" --out $EXPERIMENT/Analysis/Genetic/Group/$NOW"_dns" --recode
#-------------------------------------------------------------------
# make_plink
# gets called or each sex to make the .ped nd .map files
#-------------------------------------------------------------------
function make_plink() {
# Check to make sure input file exists
if [ -f "$EXPERIMENT/Data/Genetic/raw_text/$id.txt" ]; then
# Go to individual output folder
cd $EXPERIMENT/Analysis/Genetic/Individual/
if [ ! -f "$EXPERIMENT/Analysis/Genetic/Individual/$id.ped" ]; then
echo "Data " $id".txt found. Creating plink files."
# Create .tfam file
echo "$fid $id $pid $mid $sex $pheno" > $EXPERIMENT/Analysis/Genetic/Individual/$id.tfam
# Make sure we don't have windows characters / carriage returns
dos2unix $EXPERIMENT/Data/Genetic/raw_text/$id.txt
# Read only the data after the comment
sed '/^\#/d' $EXPERIMENT/Data/Genetic/raw_text/$id.txt > $id.nocomment
# Print the tped file
awk '{ if (length($4)==1) print $2,$1,"0",$3,substr($4,1,1),substr($4,1,1); else
print $2,$1,"0",$3,substr($4,1,1),substr($4,2,1) }' $id.nocomment > $id.tped
# Print the csv file
awk '{ if (length($4)==1) print $2,",",$1,",","0",",",$3,",",substr($4,1,1),",",substr($4,1,1)","; else
print $2,",",$1,",","0",",",$3,",",substr($4,1,1),",",substr($4,2,1),"," }' $id.nocomment > $EXPERIMENT/Analysis/Genetic/csv/$id.csv
# Create Individual .ped/.map files
plink --tfile $id --out $id --recode --allele1234 --missing-genotype - --output-missing-genotype 0
else
echo "Data " $id".ped already exists! Skipping subject."
fi
else
echo "Cannot find " $id".txt. Exiting!"
exit 32
fi
}
# -- END USER SCRIPT -- #
# **********************************************************
# -- BEGIN POST-USER --
echo "----JOB [$JOB_NAME.$JOB_ID] STOP [`date`]----"
OUTDIR=${OUTDIR:-$EXPERIMENT/Analysis/Genetic/Logs}
mv $HOME/$JOB_NAME.$JOB_ID.out $OUTDIR/$JOB_NAME.$JOB_ID.out
RETURNCODE=${RETURNCODE:-0}
exit $RETURNCODE
fi
# -- END POST USER--
| true |
2ce0573b7aef676b9cff15b98d8117cc6b6970ca | Shell | jsavatgy/haskell-ohjelmointikieli | /pandoc-script-minimal | UTF-8 | 545 | 2.984375 | 3 | [] | no_license | #!/bin/bash
# exit when any command fails
set -e
clear
# date "+%Y-%m-%d" > current.date
# date "+%-d.%-m.%Y" > current.date
runhaskell "$1".hs > latest.tikz
pandoc minimal.md metadata.yaml\
--template=minimal.sty \
-f markdown+implicit_figures+link_attributes+latex_macros+tex_math_dollars+yaml_metadata_block+grid_tables \
-V date="`date "+%-d.%-m.%Y"`" \
--quiet \
--pdf-engine=xelatex \
-o "$1".pdf
# cp "$1".pdf ../figs/
if [ $? -eq 0 ]; then
echo "PDF valmis."
else
echo ERROR
fi
# Just use
# :!chmod +w %
| true |
14c51b8bce636a8ff3ad559d4670c62278fe2395 | Shell | mullovc/config-files | /scripts/warn_low_battery.sh | UTF-8 | 239 | 2.71875 | 3 | [] | no_license | #!/bin/bash
bat_path=/sys/class/power_supply/BAT0
capacity_now=$(cat $bat_path/uevent | grep "POWER_SUPPLY_CAPACITY=" | cut -d '=' -f 2)
low=25
if [ $capacity_now -le $low ]
then
notify-send "low battery charge" --icon=battery-caution
fi
| true |
eaca5b19667be5f4722a71e0f81800438cf0c6a2 | Shell | rworkman/slackpkg | /doinst.sh | UTF-8 | 1,063 | 3.546875 | 4 | [] | no_license | config() {
NEW="$1"
OLD="$(dirname $NEW)/$(basename $NEW .new)"
# If there's no config file by that name, mv it over:
if [ ! -r $OLD ]; then
mv $NEW $OLD
elif [ "$(cat $OLD | md5sum)" = "$(cat $NEW | md5sum)" ]; then # toss the redundant copy
rm $NEW
fi
# Otherwise, we leave the .new copy for the admin to consider...
}
copy_mirror_file() {
ARCH=$(uname -m)
case $ARCH in
i386|i486|i586|i686)
SRCMIRROR=mirrors-x86.sample
;;
x86-64|x86_64|X86-64|X86_64)
SRCMIRROR=mirrors-x86_64.sample
;;
s390)
SRCMIRROR=mirrors-s390.sample
;;
arm*)
SRCMIRROR=mirrors-arm.sample
;;
aarch64)
SRCMIRROR=mirrors-aarch64.sample
;;
*)
SRCMIRROR=mirrors-x86.sample
;;
esac
cp usr/doc/slackpkg-@VERSION@/$SRCMIRROR etc/slackpkg/mirrors.new
}
copy_mirror_file
config etc/slackpkg/mirrors.new
config etc/slackpkg/slackpkg.conf.new
config etc/slackpkg/blacklist.new
rm -f var/lib/slackpkg/ChangeLog.txt
rm -f var/lib/slackpkg/pkglist
rm -f var/lib/slackpkg/CHECKSUMS.md5*
| true |
a51154d0fe46328d1b47ea121d14c44a379e0671 | Shell | cupracer/myshell | /scripts/docker-volume-usage.sh | UTF-8 | 2,665 | 4.25 | 4 | [] | no_license | #!/bin/bash
UNUSEDONLY=0
UNUSEDVIDONLY=0
function checkUtils() {
ISERR=0
which docker > /dev/null 2>&1
if [ $? -ne 0 ]; then
printf "No \"docker\" binary found in path.\n"
ISERR=1
fi
which jq > /dev/null 2>&1
if [ $? -ne 0 ]; then
printf "No \"jq\" binary found in path.\n"
ISERR=1
fi
if [ $ISERR -ne 0 ]; then
printf "Exiting.\n"
exit 1
fi
}
function usage() {
printf "Usage: $0 [ -u | -q | -h ]\n\n"
printf "Options:\n\n"
printf "%-10s %s\n" " -u" "Print unused volumes only"
printf "%-10s %s\n" " -q" "Print unused volume ID's only"
printf "\n"
}
function parseContainers() {
V=$1
VID=$2
declare -a CONTAINERIDS
declare -a CONTAINERNAMES
for C in $(docker ps -aq); do
CRAW=$(docker inspect ${C})
echo $CRAW | grep -q ${VID}
if [ $? -eq 0 ]; then
CNAME=$(echo ${CRAW} | jq -r '.[0].Name')
CONTAINERIDS+=("${C}")
CONTAINERNAMES+=("${CNAME}")
fi
done
if [ ${#CONTAINERIDS[@]} -gt 0 ]; then
if [ $UNUSEDONLY -ne 1 ]; then
for C in "${!CONTAINERIDS[@]}"; do
printf "%-65s %-13s %s\n" ${V} "${CONTAINERIDS[$C]}" "${CONTAINERNAMES[$C]}"
done
fi
else
if [ $UNUSEDVIDONLY -ne 1 ]; then
printf "%-65s %-13s %s\n" ${V} "-" "(unused)"
else
printf "${V}\n"
fi
fi
}
checkUtils
while getopts ":quh" opt; do
case $opt in
u)
UNUSEDONLY=1
;;
q)
UNUSEDONLY=1
UNUSEDVIDONLY=1
;;
h)
usage
exit 0
;;
\?)
echo "Invalid option: -$OPTARG" >&2
usage
exit 1
;;
esac
done
if [ $UNUSEDVIDONLY -ne 1 ]; then
printf "%-65s %-13s %s\n" "VOLUME" "CONTAINER ID" "CONTAINER NAME"
fi
VOLUMES=$(docker volume ls -q | sort)
for V in ${VOLUMES}; do
VRAW=$(docker volume inspect ${V})
VID=$(echo ${VRAW} | jq -r '.[0].Name')
parseContainers ${V} ${VID}
done
| true |
a6a59c1de0b21f6e5efbec9be8222efedaff0cc0 | Shell | vamega/vector_jsonnet | /scripts/validate-examples.sh | UTF-8 | 916 | 4.0625 | 4 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
# validate-examples.sh
#
# SUMMARY
#
# Generate vector.toml file from JSONNET example and inject them into
# a vector container.
set -e
if [ -z "$1" ]; then
echo "usage: $0 <example_file.toml>"
exit 1
fi
command -v jsonnet >/dev/null 2>&1 || { echo >&2 "jsonnet is required (https://github.com/google/jsonnet)... Aborting."; exit 1; }
command -v docker >/dev/null 2>&1 || { echo >&2 "docker is required (https://docs.docker.com/get-docker/)... Aborting."; exit 1; }
VECTOR_TOML=$(mktemp)
pushd $(dirname $1) > /dev/null
sed -e 's|^\.json$|.toml|' $(basename $1) | jsonnet -S - > ${VECTOR_TOML} || (echo "$1 is unvalid... need to fixit"; exit 1)
popd > /dev/null
docker run -v ${VECTOR_TOML}:/etc/vector/vector.toml:ro timberio/vector:latest-alpine validate
status=$?
if test $status -eq 0
then echo "$1 is valid"
else echo "$1 is unvalid... need to fixit"
fi
exit $status | true |
1c302efb2bfe759ffbcc9ecd86171b563e57e546 | Shell | sillsdev/fwmeta | /fwmeta/git-start | UTF-8 | 5,252 | 4.125 | 4 | [] | no_license | #!/bin/bash
# Starts a new release|hotfix|support or long-lived feature branch
# Creating short-lived feature branches is delegated to git-start-task script.
# Gitflow allows only one release branch, but we can have multiple support and hotfix
# branches. A release branch is based on develop, a hotfix branch on master or a support branch,
# and a support branch on master or a support branch. A feature branch is based on develop, or
# the current feature branch.
set -e
usage()
{
echo "Usage:"
echo
echo "$(basename "$0") <kind> <version> [<base>]"
echo "$(basename "$0") task <taskkind> <name> [<parentbranch>]"
echo "$(basename "$0") feature <name> [<parentbranch>]"
echo "$(basename "$0") support <version> <tag>"
echo
echo "Creates a new long-lived release, hotfix, or support branch and pushes it to the"
echo "server."
echo "The second form creates a short-lived feature branch locally."
echo "The third form creates a long-lived feature branch and pushes it to the server."
echo
echo "<kind>"
echo " One of release or hotfix"
echo
echo "<version>"
echo " Version number of upcoming release. This will be part of the release, hotfix,"
echo " or support branch name."
echo
echo "<base>"
echo " Optional. Commit to use as base for release, hotfix, or support branch instead of"
echo " master or develop."
echo
echo "<taskkind>"
echo " One of release|hotfix|develop. Kind of topic branch. Determines the parent"
echo " branch unless one is explictly specified."
echo
echo "<name>"
echo " Name of the topic branch (without prefix)."
echo
echo "<parentbranch>"
echo " Optional. The name of the parent branch. If not given and <taskkind> is 'release'"
echo " we use the current release branch as base. If <taskkind> is 'hotfix' we fail"
echo " unless the current branch is a hotfix branch. If <taskkind> is 'develop' and the"
echo " current branch has a corresponding remote branch we base on the current branch,"
echo " otherwise we use 'develop' as base."
exit $1
}
. "$(dirname "$0")/defs.sh"
. "$(dirname "$0")/functions.sh"
if [ $# -lt 1 ]; then
echo "fatal: missing parameter"
usage 1
fi
kind=$1
shift
case "$kind" in
"task")
$(dirname "$0")/git-start-task "$@"
exit 0
;;
"release"|"feature"|"support"|"hotfix")
prefix=$(git config --get gitflow.prefix.$kind || echo $kind/)
;;
"--help"|"-h")
usage 0
;;
*)
echo "fatal: Unknown kind: '$1'."
usage 1
;;
esac
# Unfortunately there's no Gerrit command that we could use to check if we have permission to
# push a branch. Instead we check the group. That's a hack but better than no check. The group
# is only visible to admins and members of the group. And we don't want to do the expensive
# check when running unit tests.
if ! $unittest && ! ssh -p $GERRITPORT $gerritusername@$GERRIT gerrit ls-groups | grep -q "$GERRITGROUP"; then
echo "fatal: No permission to create $kind branch on Gerrit server"
exit 1
fi
# We can have multiple support, hotfix and feature branches, but only one release branch
case "$kind" in
"support"|"feature"|"hotfix")
;;
*)
if git ls-remote $origin | grep -q $prefix; then
echo "fatal: There is already an existing $kind branch ($(git ls-remote $origin | grep $prefix | cut -c53-))."
echo " Finish that one first."
exit 1
fi
;;
esac
branch=$prefix${1#$prefix}
git fetch $origin
if [ -n "$2" ]; then
baseCommit=$2
else
case "$kind" in
"release")
baseCommit=$origin/$develop
;;
"feature")
currentBranch=$(currentBranch)
if [ "${currentBranch#$prefix}" != "$currentBranch" ]; then
# we're on a feature branch. Use that as parent branch
baseCommit=$currentBranch
else
# base on develop
baseCommit=$origin/$develop
fi
;;
"hotfix")
currentBranch=$(currentBranch)
if [ "${currentBranch#$prefix}" != "$currentBranch" ]; then
# we're on a support branch. Use that as parent branch
baseCommit=$currentBranch
else
# base on master
baseCommit=$origin/$master
fi
;;
"support")
echo "fatal: please specify base commit for support branch."
usage 1
;;
esac
fi
git checkout -b $branch $baseCommit
git push --set-upstream $origin $branch:$branch
git config --add remote.$origin.fetch "+refs/heads/$branch:refs/remotes/origin/$branch"
git config --add remote.$origin.push "+refs/heads/$branch:refs/for/$branch"
# It would be beneficial to do a git fetch here. However, because the mirroring from gerrit
# to github takes some time this would probably fail, so we don't do it now.
echo
echo "Summary of actions:"
echo "- A new branch '$branch' was created, based on '$baseCommit'"
echo "- Branch '$branch' was pushed to the server"
echo "- You are now on branch '$branch'"
echo
case "$kind" in
"release"|"hotfix"|"support")
echo "Follow-up actions:"
echo "- Bump the version number now! (FieldWorks/Src/MasterVersionInfo.txt)"
echo "- Create new '$kind' branches in other repos (FieldWorks, FwDebian, PatchableInstaller)"
echo "- Start committing bug fixes"
echo "- When done, run:"
;;
"feature")
echo "Now, start working on the long-lived feature branch. When done, use:"
;;
esac
echo
echo " git finish $kind ${branch#$prefix}"
echo
| true |
0b34322b2e92279b1b88a700715124a0b8117f98 | Shell | CZ-NIC/turris-build | /helpers/generate_junit.sh | UTF-8 | 1,322 | 3.828125 | 4 | [] | no_license | #!/bin/sh
# It doesn't make sense to run this script if there is no source data
if [ ! -d $PWD/logs ]; then
echo "Directory $PWD/logs doesn't exist!"
echo "Skipping junit output"
exit 0
fi
# Insert XML header
cat > $PWD/logs/junit.xml << EOF
<?xml version='1.0' encoding='utf-8'?>
<testsuites errors='$(cat $PWD/logs/package/error.txt | wc -l)' tests='$(find $PWD/logs/package/ -name compile.txt | wc -l)'>
EOF
# Going through the package build logs
(cd $PWD/logs; find package/ -name compile.txt; cd ..) | while read log; do
PKG_NAME="$(dirname $log)"
# This branch takes care about packages which haven't been build.
if grep "ERROR: $PKG_NAME failed" logs/package/error.txt 2> /dev/null; then
cat >> $PWD/logs/junit.xml << EOF
<testsuite errors='1' name='$PKG_NAME' tests='1'>
<testcase name='compile'>
<error message='$PKG_NAME failed to compile' type='error'><![CDATA[
$(tail -n 300 $PWD/logs/$log | sed -e 's|\]\]>|\]\] >|g' -e 's/\x1b//g')
]]></error>
</testcase>
</testsuite>
EOF
# This branch just makes notes about packages which have been built without problems
else
cat >> $PWD/logs/junit.xml << EOF
<testsuite errors='0' failures='0' name='$PKG_NAME' tests='1'>
<testcase name='compile'/>
</testsuite>
EOF
fi
done
# Close the XML file
cat >> $PWD/logs/junit.xml << EOF
</testsuites>
EOF
| true |
7e54efe5434bd19ba2631e44681b5583da0a8e58 | Shell | gsiciliano/terraform-ecs-sample | /templates/cluster-user-data.sh | UTF-8 | 345 | 3.046875 | 3 | [] | no_license | #!/bin/bash
if [ -e /dev/nvme1n1 ]; then
if ! file -s /dev/nvme1n1 | grep -q filesystem; then
mkfs.ext4 /dev/nvme1n1
fi
cat >> /etc/fstab <<-EOF
/dev/nvme1n1 /data ext4 defaults,noatime,nofail 0 2
EOF
mkdir /data
mount /data
fi
echo ECS_CLUSTER=${ecs_cluster} >> /etc/ecs/ecs.config
systemctl try-restart ecs --no-block | true |
57fb7a0ec3e0331d78bec7f5844804b6ce86b54c | Shell | praveensastry/cloudflow | /examples/taxi-ride/test-data/send-data-fares.sh | UTF-8 | 1,368 | 3.03125 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
#
# Copyright (C) 2016-2020 Lightbend Inc. <https://www.lightbend.com>
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
while getopts ":p:" opt; do
case $opt in
p)
port="$OPTARG"
;;
\?)
echo "Invalid option -$OPTARG" >&2
exit 1
;;
:)
echo "Option -$OPTARG requires an argument." >&2
exit 1
;;
esac
done
if [[ -z "$port" ]]; then
echo "Usage: send-data-fares.sh -p <port>"
exit 1
fi
for str in $(cat nycTaxiFares.json)
do
echo "Using $str"
curl -i -X POST http://localhost:"$port" -H "Content-Type: application/json" --data "$str"
done
| true |
7c1e5327ab904eeff1c39d8cdfe22ecdad615815 | Shell | cschroed-usgs/stig | /files/default/check_duplicate_gid.sh | UTF-8 | 756 | 3.953125 | 4 | [
"CC0-1.0"
] | permissive | #!/bin/bash
# Check for Duplicate GIDs
# Although the groupadd program will not let you create a duplicate
# Group ID (GID), it is possible for an administrator to manually
# edit the /etc/group file and change the GID field.
#
# User groups must be assigned unique GIDs for accountability and
# to ensure appropriate access protections.
echo "The Output for the Audit of Control 9.2.16 - Check for Duplicate GIDs is"
/bin/cat /etc/group | /bin/cut -f3 -d":" | /bin/sort -n | /usr/bin/uniq -c |\
while read x ; do
[ -z "${x}" ] && break
set - $x
if [ $1 -gt 1 ]; then
grps=`/bin/gawk -F: '($3 == n) { print $1 }' n=$2 \
/etc/group | xargs`
echo "Duplicate GID ($2): ${grps}"
fi
done | true |
ee930515c39f025963b0039d324e81d4064bab5f | Shell | janejpark/niehs | /final_scripts/191126_merge1.sh | UTF-8 | 587 | 2.78125 | 3 | [] | no_license | #!/bin/bash -l
#SBATCH -D /home/jajpark/niehs/final_scripts/
#SBATCH -o /home/jajpark/niehs/slurm-log/191126-merge1-stout-%j.txt
#SBATCH -e /home/jajpark/niehs/slurm-log/191126-merge1-stderr-%j.txt
#SBATCH -J merge1
#SBATCH -p high
#SBATCH -t 24:00:00
module load bamtools
inbam=~/niehs/Data/dedup
outdir=~/niehs/Data/mergedalignments
cd $inbam
f=$(find $inbam -name "GTC_00_35_1*dedup.bam")
name=$(echo $f | cut -d "/" -f 7 | cut -d "_" -f 1-4)
echo "Processing sample ${name}"
find $inbam -name "*$name*bam" >$name.list
bamtools merge -list $name.list -out $outdir/$name.bam
| true |
5791644d22150651d1f121d7e1cc6442752835e0 | Shell | pdhhiep/Computation_using_Python | /triangle_grid/triangle_grid_test.sh | UTF-8 | 235 | 2.6875 | 3 | [] | no_license | #!/bin/bash
#
python triangle_grid_test.py > triangle_grid_test_output.txt
if [ $? -ne 0 ]; then
echo "Errors running triangle_grid_test.py"
exit
fi
#
rm *.pyc
#
echo "Test program output written to triangle_grid_test_output.txt."
| true |
429bf3e5d229e8a0b3626188a78367dad55f34f9 | Shell | voiser/zoe-mother | /zam/postinst | UTF-8 | 236 | 3.546875 | 4 | [
"MIT"
] | permissive | #!/bin/bash
F=zoe-mother.conf
ORIG=${ZOE_HOME}/etc/${F}.example
DEST=${ZOE_HOME}/etc/${F}
if [ -f "${DEST}" ]; then
echo "Config file exists, keeping it..."
else
echo "Creating default config file"
cp "$ORIG" "$DEST"
fi
| true |
f4478d2ae4924d391d8262548972e1e013b93fd8 | Shell | yarec/svm | /shell/funcs/scsh.sh | UTF-8 | 900 | 3.3125 | 3 | [] | no_license |
ins_scsh(){
SCSH_TMP_DIR=$HOME/.svm/src/scsh
SCSH_SOURCE="https://e.coding.net/ear/scsh.git"
if [ ! -d "$SCSH_TMP_DIR/.git" ]; then
git_clone "scsh" $SCSH_TMP_DIR $SCSH_SOURCE
fi
cd "$SCSH_TMP_DIR" && \
git submodule update --init && \
autoreconf && \
./configure && $sudo_str make install
}
#deps git gcc autoconf
ins_s48(){
S48_TMP_DIR=$HOME/.svm/src/s48
S48_SOURCE="https://e.coding.net/ear/s48.git"
if [ ! -d "$S48_TMP_DIR/.git" ]; then
git_clone "s48" $S48_TMP_DIR $S48_SOURCE
fi
S48_TAR=$S48_TMP_DIR/scheme48-1.9.2.tgz
S48_TAR_DIR=$S48_TMP_DIR/scheme48-1.9.2
cd $S48_TMP_DIR && tar xvf $S48_TAR
cd "$S48_TAR_DIR" && \
./configure && $sudo_str make install
}
check_scsh(){
if ! has "scsh"; then
if ! has "scheme48"; then
ins_s48
fi
ins_scsh
fi
}
| true |
2e8598e0bc3a84065b0115fba64e5d1641cdf5fa | Shell | nptit/simplecode | /.svn/pristine/2e/2e8598e0bc3a84065b0115fba64e5d1641cdf5fa.svn-base | UTF-8 | 567 | 3.46875 | 3 | [] | no_license | #!/bin/bash
# $IFS 处理空白与处理其他字符不同.
output_args_one_per_line()
{
for arg
do echo "[$arg]"
done
}
echo; echo "IFS=\" \""
echo "-------"
IFS=" "
var=" a b c "
output_args_one_per_line $var # output_args_one_per_line `echo " a b c "`
#
# [a]
# [b]
# [c]
echo; echo "IFS=:"
echo "-----"
IFS=:
var=":a::b:c:::" # 与上边一样, 但是用" "替换了":".
output_args_one_per_line $var
#
# []
# [a]
# []
# [b]
# [c]
# []
# []
# []
# 同样的事情也会发生在awk的"FS"域中.
# 感谢, Stephane Chazelas.
echo
exit 0
| true |
cf3cce851c0551bdc2b806e083f887a85809beb9 | Shell | wresch/old_code_for_bed_files | /entropy.sh | UTF-8 | 927 | 2.75 | 3 | [] | no_license | #! /usr/bin/env bash
BINWIDTH=50
MIN=0
MAX=200
CHRLIST=( 1 3 5 7 9 11 15 19 )
for i in ${CHRLIST[@]}
do
if [ -e "chr${i}.test" ]
then
echo "File chr${i}.test already exists - skipping"
else
$HOME/src/c/bed_tools/frag-size-mm9 chr${i}.bed chr${i} $BINWIDTH $MIN $MAX > "chr${i}.test"
fi
done
cat chr*.test > all.test
R --slave <<'R_INPUT_END'
H <- read.table("all.test", sep="|")
colnames(H) <- c("shift", "H", "Chrom")
# normalize each chromosome to it's max
H$norm <- H$H
chromosomes <- unique(H$Chrom)
for (chrom in chromosomes) {
f <- H$Chrom == chrom
H$norm[f] <- H$norm[f] / max(H$norm[f])
}
minH <- by(H$H, H$Chrom, min)
for (chrom in names(minH)) {
print(paste(chrom, ": ", H$shift[H$H==minH[[chrom]]]))
}
library(ggplot2)
pdf("all.test.pdf")
p <- ggplot(data=H, mapping=aes(x=shift, y=norm, col=Chrom)) +
geom_line(size=1)
print(p)
dev.off()
R_INPUT_END
open "all.test.pdf"
| true |
c54073a873c432d9342309c5faed4fa2d6db8bb6 | Shell | marvinpinto/actions | /scripts/mirror-actions.sh | UTF-8 | 4,209 | 3.828125 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
set -e
if [[ $# -ne 1 ]]; then
echo "This needs a release tag. e.g. $0 v0.0.1"
exit 1
fi
if [[ -z "$GITHUB_SUPER_TOKEN" ]]; then
echo "This script needs a GitHub personal access token."
exit 1
fi
ACTION_KEYBASE_NOTIFICATIONS_REPO="action-keybase-notifications"
ACTION_AUTOMATIC_RELEASES_REPO="action-automatic-releases"
ACTION_INJECT_SSM_SECRETS_REPO="action-inject-ssm-secrets"
TAG=$1
GITHUB_LOGIN="marvinpinto"
RELEASE_BODY="Details available at [marvinpinto/actions@${TAG}](https://github.com/marvinpinto/actions/releases/tag/${TAG})."
PRERELEASE="false"
if [[ "$TAG" == "latest" ]]; then
PRERELEASE="true"
fi
if [[ "$GITHUB_REPOSITORY" != "marvinpinto/actions" ]]; then
echo "This mirror script is only meant to be run from marvinpinto/actions, not ${GITHUB_REPOSITORY}. Nothing to do here."
exit 0
fi
create_tagged_release() {
REPO=$1
pushd /tmp/${REPO}/
# Set the local git identity
git config user.email "${GITHUB_LOGIN}@users.noreply.github.com"
git config user.name "$GITHUB_LOGIN"
# Obtain the release ID for the previous release of $TAG (if present)
local previous_release_id=$(curl --user ${GITHUB_LOGIN}:${GITHUB_SUPER_TOKEN} --request GET --silent https://api.github.com/repos/${GITHUB_LOGIN}/${REPO}/releases/tags/${TAG} | jq '.id')
# Delete the previous release (if present)
if [[ -n "$previous_release_id" ]]; then
echo "Deleting previous release: ${previous_release_id}"
curl \
--user ${GITHUB_LOGIN}:${GITHUB_SUPER_TOKEN} \
--request DELETE \
--silent \
https://api.github.com/repos/${GITHUB_LOGIN}/${REPO}/releases/${previous_release_id}
fi
# Delete previous identical tags, if present
git tag -d $TAG || true
git push origin :$TAG || true
# Add all the changed files and push the changes upstream
git add -f .
git commit -m "Update release files for tag: ${TAG}" || true
git push -f origin master:master
git tag $TAG
git push origin $TAG
# Generate a skeleton release on GitHub
curl \
--user ${GITHUB_LOGIN}:${GITHUB_SUPER_TOKEN} \
--request POST \
--silent \
--data @- \
https://api.github.com/repos/${GITHUB_LOGIN}/${REPO}/releases <<END
{
"tag_name": "$TAG",
"name": "Auto-generated release for tag $TAG",
"body": "$RELEASE_BODY",
"draft": false,
"prerelease": $PRERELEASE
}
END
popd
}
# Mirroring Keybase Notifications
rm -rf "/tmp/${ACTION_KEYBASE_NOTIFICATIONS_REPO}"
git clone "https://marvinpinto:${GITHUB_SUPER_TOKEN}@github.com/marvinpinto/${ACTION_KEYBASE_NOTIFICATIONS_REPO}.git" /tmp/${ACTION_KEYBASE_NOTIFICATIONS_REPO}
cp -R packages/keybase-notifications/dist /tmp/${ACTION_KEYBASE_NOTIFICATIONS_REPO}/
cp -R packages/keybase-notifications/images /tmp/${ACTION_KEYBASE_NOTIFICATIONS_REPO}/
cp packages/keybase-notifications/README.md /tmp/${ACTION_KEYBASE_NOTIFICATIONS_REPO}/
cp packages/keybase-notifications/action.yml /tmp/${ACTION_KEYBASE_NOTIFICATIONS_REPO}/
cp LICENSE /tmp/${ACTION_KEYBASE_NOTIFICATIONS_REPO}/
create_tagged_release "$ACTION_KEYBASE_NOTIFICATIONS_REPO"
# Mirroring Automatic Releases
rm -rf "/tmp/${ACTION_AUTOMATIC_RELEASES_REPO}"
git clone "https://marvinpinto:${GITHUB_SUPER_TOKEN}@github.com/marvinpinto/${ACTION_AUTOMATIC_RELEASES_REPO}.git" /tmp/${ACTION_AUTOMATIC_RELEASES_REPO}
cp -R packages/automatic-releases/dist /tmp/${ACTION_AUTOMATIC_RELEASES_REPO}/
cp packages/automatic-releases/README.md /tmp/${ACTION_AUTOMATIC_RELEASES_REPO}/
cp packages/automatic-releases/action.yml /tmp/${ACTION_AUTOMATIC_RELEASES_REPO}/
cp LICENSE /tmp/${ACTION_AUTOMATIC_RELEASES_REPO}/
create_tagged_release "$ACTION_AUTOMATIC_RELEASES_REPO"
# Mirroring SSM Secrets
rm -rf "/tmp/${ACTION_INJECT_SSM_SECRETS_REPO}"
git clone "https://marvinpinto:${GITHUB_SUPER_TOKEN}@github.com/marvinpinto/${ACTION_INJECT_SSM_SECRETS_REPO}.git" /tmp/${ACTION_INJECT_SSM_SECRETS_REPO}
cp -R packages/aws-ssm-secrets/dist /tmp/${ACTION_INJECT_SSM_SECRETS_REPO}/
cp packages/aws-ssm-secrets/README.md /tmp/${ACTION_INJECT_SSM_SECRETS_REPO}/
cp packages/aws-ssm-secrets/action.yml /tmp/${ACTION_INJECT_SSM_SECRETS_REPO}/
cp LICENSE /tmp/${ACTION_INJECT_SSM_SECRETS_REPO}/
create_tagged_release "$ACTION_INJECT_SSM_SECRETS_REPO"
| true |
b086196b71a46c5d81a2229ab1e5ea280a735732 | Shell | mouchabeurre/dotfiles | /dotfiles/run_once_before_decrypt-private-key.sh.tmpl | UTF-8 | 257 | 3.15625 | 3 | [] | no_license | #!/usr/bin/env bash
echo "[#] Decrypting key needed:"
KEY="{{ .chezmoi.cacheDir }}/key.txt"
if [[ ! -f "${KEY}" ]]; then
mkdir -p "{{ .chezmoi.cacheDir }}"
age --decrypt --output "${KEY}" "{{ .chezmoi.workingTree }}/key.txt.age"
chmod 600 "${KEY}"
fi
| true |
b712cd4fd1ba093e04c8aa8e8a0b39537e2dffc0 | Shell | vcalderon2009/ECO_Mocks_Catls_CAM | /Synch_files.sh | UTF-8 | 1,304 | 4 | 4 | [
"MIT"
] | permissive | #!/bin/sh
#
# Description: Synchronizes files and figures from Bender
#
# Parameters
# ----------
# file_opt: string
# Options:
# - catalogues
# Defining Directory
DIR_LOCAL="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
SSH_USERNAME="caldervf"
SSH_HOST="zoidberg.phy.vanderbilt.edu"
SSH_PATH="/home/www/groups/data_eco_vc/ECO_CAM/Mock_Catalogues"
# Commands to Send over SSH
CATL_COPY_COMMAND="${SSH_USERNAME}@${SSH_HOST}:${SSH_PATH}"
# option
file_opt=$1
echo "\n==> Option: ${file_opt}"
## Help option
usage="Synch_files.sh [-h] [file_opt] -- Program that synchronizes files between 'local' and Bender
where:
-h show this help text
Options for 'file_opt':
- 'catalogues' Synchronizes catalogues files in 'data' folder"
if [[ ${file_opt} == '-h' ]]; then
echo "==> Usage: $usage\n"
# exit 0
fi
##
## Synchronizing
# Catalogues
if [[ ${file_opt} == 'catalogues' ]]; then
# Removes previous catalogues
echo "ssh "${SSH_USERNAME}@${SSH_HOST}" rm -rf ${SSH_PATH}/*"
ssh "${SSH_USERNAME}@${SSH_HOST}" rm -rf ${SSH_PATH}/*
# Copying over files
echo "==> rsync -chavzP --stats "${DIR_LOCAL}/data/processed/TAR_files/" "${CATL_COPY_COMMAND}"\n"
rsync -chavzP --stats "${DIR_LOCAL}/data/processed/TAR_files/" "${CATL_COPY_COMMAND}"
fi | true |
dfab629af2af1d192ee84bf57a3936d3b3086107 | Shell | s0urc3c0d3/marathon | /gen_hosts.sh | UTF-8 | 577 | 2.59375 | 3 | [] | no_license | #!/bin/bash
for stack in $(curl -s rancher-metadata.rancher.internal/2015-12-19/stacks | awk -F= '{print $2}')
do
for service in $(curl -s rancher-metadata.rancher.internal/2015-12-19/stacks/${stack}/services | awk -F= '{print $2}')
do
for container in $(curl -s rancher-metadata.rancher.internal/2015-12-19/stacks/${stack}/services/${service}/containers | awk -F= '{print $2}')
do
echo $(curl -s rancher-metadata.rancher.internal/2015-12-19/stacks/${stack}/services/${service}/containers/${container}/primary_ip) ${stack}-${container} ${container}
done
done
done
| true |
130222e8dcf3ef8de8edd6c7904768e5644455c9 | Shell | webgears/docker-php | /bin/slack | UTF-8 | 27,732 | 3.09375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
# The MIT License (MIT)
#
# Copyright (c) 2018 Rocky Madden (https://rockymadden.com/)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
bindir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
etcdir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
if [ -n "${SLACK_CLI_TOKEN}" ]; then
token="${SLACK_CLI_TOKEN}"
elif [ -r "${etcdir}/.slack" ] && [ -f "${etcdir}/.slack" ]; then
token=$(sed -n '1p' < "${etcdir}/.slack")
elif [ -r "${HOME}/.slack" ] && [ -f "${HOME}/.slack" ]; then
token=$(sed -n '1p' < "${HOME}/.slack")
elif [ -r "/etc/.slack" ] && [ -f "/etc/.slack" ]; then
token=$(sed -n '1p' < "/etc/.slack")
fi
# COMMAND PARSING #################################################################################
cmd="${1}" ; shift
# SUBCOMMAND PARSING ##############################################################################
case "${cmd}${1}" in
chatdelete|chatsend|chatupdate|\
filedelete|fileinfo|filelist|fileupload|\
presenceactive|presenceaway|\
reminderadd|remindercomplete|reminderdelete|reminderinfo|reminderlist| \
snoozeend|snoozeinfo|snoozestart|\
statusclear|statusedit)
sub=${1} ; shift
;;
esac
# PIPE FRIENDLINESS ###############################################################################
case "${cmd}${sub}" in
chatsend|chatupdate|fileupload) [ -p /dev/stdin ] && stdin=$(cat <&0) ;;
esac
# ARGUMENT AND OPTION PARSING #####################################################################
as_user=true
while (( "$#" )); do
case "${1}" in
--actions=*) actions=${1/--actions=/''} ; shift ;;
--actions*|-a*) actions=${2} ; shift ; shift ;;
--author-icon=*) authoricon=${1/--author-icon=/''} ; shift ;;
--author-icon*|-ai*) authoricon=${2} ; shift ; shift ;;
--author-link=*) authorlink=${1/--author-link=/''} ; shift ;;
--author-link*|-al*) titlelink=${2} ; shift ; shift ;;
--author=*) author=${1/--author=/''} ; shift ;;
--author*|-at*) author=${2} ; shift ; shift ;;
--bot|-b) as_user=false ; shift ;;
--channels=*) channels=${1/--channels=/''} ; shift ;;
--channels*|-chs*) channels=${2} ; shift ; shift ;;
--channel=*) channel=${1/--channel=/''} ; shift ;;
--channel*|-ch*) channel=${2} ; shift ; shift ;;
--color=*) color=${1/--color=/''} ; shift ;;
--color*|-cl*) color=${2} ; shift ; shift ;;
--compact|-cp) compact='-c' ; shift ;;
--comment=*) comment=${1/--comment=/''} ; shift ;;
--comment*|-cm*) comment=${2} ; shift ; shift ;;
--count=*) count=${1/--count=/''} ; shift ;;
--count|-cn*) count=${2} ; shift ; shift ;;
--emoji=*) emoji=${1/--emoji=/''} ; shift ;;
--emoji*|-em*) emoji=${2} ; shift ; shift ;;
--fields=*) fields=${1/--fields=/''} ; shift ;;
--fields*|-flds*) fields=${2} ; shift ; shift ;;
--filename=*) filename=${1/--filename=/''} ; shift ;;
--filename*|-fln*) filename=${2} ; shift ; shift ;;
--filetype=*) filetype=${1/--filetype=/''} ; shift ;;
--filetype*|-flt*) filetype=${2} ; shift ; shift ;;
--file=*) file=${1/--file=/''} ; shift ;;
--file*|-fl*) file=${2} ; shift ; shift ;;
--filter=*) filter=${1/--filter=/''} ; shift ;;
--filter*|-f*) filter=${2} ; shift ; shift ;;
--footer-icon=*) footericon=${1/--footer-icon=/''} ; shift ;;
--footer-icon*|-fi*) footericon=${2} ; shift ; shift ;;
--footer=*) footer=${1/--footer=/''} ; shift ;;
--footer*|-ft*) footer=${2} ; shift ; shift ;;
--image=*) image=${1/--image-url=/''} ; shift ;;
--image*|-im*) image=${2} ; shift ; shift ;;
--monochrome|-m) monochrome='-M' ; shift ;;
--minutes=*) minutes=${1/--minutes=/''} ; shift ;;
--minutes*|-mn*) minutes=${2} ; shift ; shift ;;
--page=*) page=${1/--page=/''} ; shift ;;
--page|-pg*) page=${2} ; shift ; shift ;;
--pretext=*) pretext=${1/--pretext=/''} ; shift ;;
--pretext*|-pt*) pretext=${2} ; shift ; shift ;;
--reminder=*) reminder=${1/--reminder=/''} ; shift ;;
--reminder|-rm*) reminder=${2} ; shift ; shift ;;
--text=*) text=${1/--text=/''} ; shift ;;
--text*|-tx*) text=${2} ; shift ; shift ;;
--thumbnail=*) thumbnail=${1/--thumbnail=/''} ; shift ;;
--thumbnail*|-th*) thumbnail=${2} ; shift ; shift ;;
--time=*) _time=${1/--time=/''} ; shift ;;
--time|-tm*) _time=${2} ; shift ; shift ;;
--timestamp-from=*) timestampfrom=${1/--timestampfrom=/''} ; shift ;;
--timestamp-from|-tf*) timestampfrom=${2} ; shift ; shift ;;
--timestamp-to=*) timestampto=${1/--timestampto=/''} ; shift ;;
--timestamp-to|-tt*) timestampto=${2} ; shift ; shift ;;
--timestamp=*) timestamp=${1/--timestamp=/''} ; shift ;;
--timestamp*|-ts*) timestamp=${2} ; shift ; shift ;;
--title-link=*) titlelink=${1/--title-link=/''} ; shift ;;
--title-link*|-tl*) titlelink=${2} ; shift ; shift ;;
--title=*) title=${1/--title=/''} ; shift ;;
--title*|-ti*) title=${2} ; shift ; shift ;;
--token=*) token=${1/--token=/''} ; shift ;;
--token|-tk*) token=${2} ; shift ; shift ;;
--trace|-x) trace='set -x' ; shift ;;
--types=*) types=${1/--types=/''} ; shift ;;
--types|-ty*) types=${2} ; shift ; shift ;;
--user=*) user=${1/--user=/''} ; shift ;;
--user|-ur*) user=${2} ; shift ; shift ;;
*)
case "${cmd}${sub}" in
chatdelete)
[ -n "${1}" ] && [ -n "${timestamp}" ] && [ -z "${channel}" ] && channel=${1}
[ -n "${1}" ] && [ -z "${timestamp}" ] && timestamp=${1}
;;
chatsend)
[ -n "${1}" ] && [ -n "${text}" ] && [ -z "${channel}" ] && channel=${1}
[ -n "${1}" ] && [ -z "${text}" ] && text=${1}
;;
chatupdate)
[ -n "${1}" ] && [ -n "${text}" ] && [ -n "${timestamp}" ] && [ -z "${channel}" ] && channel=${1}
[ -n "${1}" ] && [ -n "${text}" ] && [ -z "${timestamp}" ] && timestamp=${1}
[ -n "${1}" ] && [ -z "${text}" ] && text=${1}
;;
filedelete|fileinfo)
[ -n "${1}" ] && [ -z "${file}" ] && file=${1}
;;
fileupload)
[ -n "${1}" ] && [ -n "${file}" ] && [ -z "${channels}" ] && channels=${1}
[ -n "${1}" ] && [ -z "${file}" ] && file=${1}
;;
snoozeinfo)
[ -n "${1}" ] && [ -z "${user}" ] && user=${1}
;;
snoozestart)
[ -n "${1}" ] && [ -z "${minutes}" ] && minutes=${1}
;;
statusedit)
[ -n "${1}" ] && [ -n "${text}" ] && [ -z "${emoji}" ] && emoji=${1}
[ -n "${1}" ] && [ -z "${text}" ] && text=${1}
;;
reminderadd)
[ -n "${1}" ] && [ -n "${text}" ] && [ -z "${_time}" ] && _time=${1}
[ -n "${1}" ] && [ -z "${text}" ] && text=${1}
;;
remindercomplete|reminderdelete|reminderinfo)
[ -n "${1}" ] && [ -z "${reminder}" ] && reminder=${1}
;;
esac
shift
;;
esac
done
# TRACING #########################################################################################
${trace}
# ARGUMENT AND OPTION PROMPTING ###################################################################
case "${cmd}${sub}" in
chatdelete)
[ -z "${channel}" ] && read -e -p 'Enter channel (e.g. #general): ' channel
[ -z "${timestamp}" ] && read -e -p 'Enter timestamp (e.g. 1405894322.002768): ' timestamp
;;
chatsend)
[ -z "${channel}" ] && [ -z "${text}" ] && prompt='true'
[ -n "${prompt}" ] && [ -z "${actions}" ] && read -e -p 'Enter actions (e.g. {"type": “button", "style": "primary", "text": "my text", "url": "http://example.com"}, ...): ' actions
[ -n "${prompt}" ] && [ -z "${author}" ] && read -e -p 'Enter author name (e.g. slackbot): ' author
[ -n "${prompt}" ] && [ -z "${authoricon}" ] && read -e -p 'Enter author icon (e.g. a URL): ' authoricon
[ -n "${prompt}" ] && [ -z "${authorlink}" ] && read -e -p 'Enter author link (e.g. a URL): ' authorlink
[ -z "${channel}" ] && read -e -p 'Enter channel (e.g. #general): ' channel
[ -n "${prompt}" ] && [ -z "${color}" ] && read -e -p 'Enter color (e.g. good): ' color
[ -n "${prompt}" ] && [ -z "${fields}" ] && read -e -p 'Enter fields (e.g. {"title": "My Field Title", "value": "My field value", "short": true}, ...): ' fields
[ -n "${prompt}" ] && [ -z "${footer}" ] && read -e -p 'Enter footer (e.g. Hello footer!): ' footer
[ -n "${prompt}" ] && [ -z "${footericon}" ] && read -e -p 'Enter footer icon (e.g. a URL): ' footericon
[ -n "${prompt}" ] && [ -z "${image}" ] && read -e -p 'Enter image (e.g. a URL): ' image
[ -n "${prompt}" ] && [ -z "${pretext}" ] && read -e -p 'Enter pretext (e.g. Hello pretext!): ' pretext
[ -z "${text}" ] && read -e -p 'Enter text (e.g. Hello text!): ' text
[ -n "${prompt}" ] && [ -z "${thumbnail}" ] && read -e -p 'Enter thumbnail (e.g. a URL): ' thumbnail
[ -n "${prompt}" ] && [ -z "${_time}" ] && read -e -p 'Enter time (e.g. 123456789): ' _time
[ -n "${prompt}" ] && [ -z "${title}" ] && read -e -p 'Enter title (e.g. Hello title!): ' title
[ -n "${prompt}" ] && [ -z "${titlelink}" ] && read -e -p 'Enter title link (e.g. a URL): ' titlelink
;;
chatupdate)
[ -z "${text}" ] && read -e -p 'Enter text (e.g. Hello World!): ' text
[ -z "${timestamp}" ] && read -e -p 'Enter timestamp (e.g. 1405894322.002768): ' timestamp
[ -z "${channel}" ] && read -e -p 'Enter channel (e.g. #general): ' channel
;;
filedelete)
[ -z "${file}" ] && read -e -p 'Enter file (e.g. F2147483862): ' file
;;
fileinfo)
[ -z "${file}" ] && read -e -p 'Enter file (e.g. F2147483862): ' file
;;
fileupload)
[ -z "${file}" ] && read -e -p 'Enter file (e.g. file.log): ' file
[ -z "${channels}" ] && read -e -p 'Enter channels (e.g. #general,C1234567890): ' channels
;;
init)
if [ -n "${SLACK_CLI_TOKEN}" ]; then
_token="${SLACK_CLI_TOKEN}"
elif [ -r "${etcdir}/.slack" ] && [ -f "${etcdir}/.slack" ]; then
_token=$(sed -n '1p' < "${etcdir}/.slack")
fi
if [ -z "${token}" ] || [ "${token}" == "${_token}" ]; then
read -e -p 'Enter Slack API token: ' token
fi
;;
reminderadd)
[ -z "${text}" ] && read -e -p 'Enter text (e.g. lunch): ' text
[ -z "${_time}" ] && read -e -p 'Enter time (e.g. 123456789): ' _time
;;
remindercomplete|reminderdelete|reminderinfo)
[ -z "${reminder}" ] && read -e -p 'Enter reminder (e.g. RmCT7QGVBF): ' reminder
;;
snoozestart)
[ -z "${minutes}" ] && read -e -p 'Enter minutes (e.g. 60): ' minutes
;;
statusedit)
[ -z "${text}" ] && read -e -p 'Enter text (e.g. lunch): ' text
[ -z "${emoji}" ] && read -e -p 'Enter emoji (e.g. :hamburger:): ' emoji
;;
esac
# COMMAND UTILITY FUNCTIONS #######################################################################
function attachify() {
[ -n "${actions}" ] && local _actions=", \"actions\": [${actions}]"
[ -n "${author}" ] && local _author=", \"author_name\": \"${author}\""
[ -n "${authoricon}" ] && local _authoricon=", \"author_icon\": \"${authoricon}\""
[ -n "${authorlink}" ] && local _authorlink=", \"author_link\": \"${authorlink}\""
[ -n "${color}" ] && local _color=", \"color\": \"${color}\""
[ -n "${fields}" ] && local _fields=", \"fields\": [${fields}]"
[ -n "${footer}" ] && local _footer=", \"footer\": \"${footer}\""
[ -n "${footericon}" ] && local _footericon=", \"footer_icon\": \"${footericon}\""
[ -n "${image}" ] && local _image=", \"image_url\": \"${image}\""
[ -n "${pretext}" ] && local _pretext=", \"pretext\": \"${pretext}\""
[ -n "${thumbnail}" ] && local _thumbnail=", \"thumb_url\": \"${thumbnail}\""
[ -n "${_time}" ] && local __time=", \"ts\": \"${_time}\""
[ -n "${title}" ] && local _title=", \"title\": \"${title}\""
[ -n "${titlelink}" ] && local _titlelink=", \"title_link\": \"${titlelink}\""
local _json="${_author}${_authoricon}${_authorlink}${_color}${_footer}${_footericon}${_image}${_pretext}${_thumbnail}${__time}${_title}${_titlelink}${_fields}${_actions}"
[ -z "${_json}" ] && local _attachments="[{\"mrkdwn_in\": [\"pretext\"], \"fallback\": \"${text}\", \"pretext\": \"${text}\"}]"
[ -n "${_json}" ] && local _attachments="[{\"mrkdwn_in\": [\"fields\", \"pretext\", \"text\"], \"fallback\": \"${text}\", \"text\": \"${text}\"${_json}}]"
echo "${_attachments}"
}
function jqify() {
case "$(echo ${1} | jq -r '.ok')" in
true) echo ${1} | jq -r ${compact} ${monochrome} "${filter:=.}" ;;
*) echo ${1} | jq -r ${compact} ${monochrome} "${filter:=.}" ; return 1 ;;
esac
}
function lchannel() {
case "${channel}" in
@*)
local _user=$(\
curl -s -X POST https://slack.com/api/users.list --data-urlencode "token=${token}" 2>&1 | \
jq -r ".members | map(select(.name == \"${channel/@/}\" or .profile.display_name == \"${channel/@/}\")) | .[0].id")
local _channel=$(\
curl -s -X POST https://slack.com/api/im.list --data-urlencode "token=${token}" 2>&1 | \
jq -r ".ims | map(select(.user == \"${_user}\")) | .[].id")
echo ${_channel}
;;
*) echo ${channel} ;;
esac
}
function luser() {
case "${user}" in
@*)
local _user=$(\
curl -s -X POST https://slack.com/api/users.list --data-urlencode "token=${token}" 2>&1 | \
jq -r ".members | map(select(.name == \"${user/@/}\" or .profile.display_name == \"${user/@/}\")) | .[0].id")
echo ${_user}
;;
*) echo ${user} ;;
esac
}
# COMMAND FUNCTIONS ###############################################################################
function chatdelete() {
local msg=$(\
curl -s -X POST https://slack.com/api/chat.delete \
--data-urlencode "as_user=${as_user}" \
--data-urlencode "channel=$(lchannel)" \
--data-urlencode "ts=${timestamp}" \
--data-urlencode "token=${token}")
jqify "${msg}"
}
function chatsend() {
[ -n "${stdin}" ] && [ -z "${text}" ] && text=\`\`\`${stdin//$'\n'/'\n'}\`\`\`
local msg=$(\
curl -s -X POST https://slack.com/api/chat.postMessage \
--data-urlencode "as_user=${as_user}" \
--data-urlencode "attachments=$(attachify)" \
--data-urlencode "channel=$(lchannel)" \
--data-urlencode "token=${token}")
jqify "${msg}"
}
function chatupdate() {
[ -n "${stdin}" ] && [ -z "${text}" ] && text=\`\`\`${stdin//$'\n'/'\n'}\`\`\`
local msg=$(\
curl -s -X POST https://slack.com/api/chat.update \
--data-urlencode "as_user=${as_user}" \
--data-urlencode "attachments=$(attachify)" \
--data-urlencode "channel=$(lchannel)" \
--data-urlencode "ts=${timestamp}" \
--data-urlencode "token=${token}")
jqify "${msg}"
}
function help() {
local a=(${0//\// })
local bin=${a[${#a[@]}-1]}
echo 'Usage:'
echo " ${bin} chat delete [<timestamp> [channel]]"
echo ' [--channel|-ch <channel>] [--compact|-c] [--filter|-f <filter>] [--monochrome|-m]'
echo ' [--timestamp|-ts <timestamp>] [--trace|-x]'
echo
echo " ${bin} chat send [<text> [channel]]"
echo ' [--author|-at <author>] [--author-icon|-ai <author-icon-url>]'
echo ' [--author-link|-al <author-link>] [--channel|-ch <channel>] [--color|-cl <color>]'
echo ' [--compact|-cp] [--fields|-flds <fields>] [--filter|-f <filter>] [--footer|-ft <footer>]'
echo ' [--footer-icon|-fi <footer-icon-url>] [--image|-im <image-url>] [--monochrome|-m]'
echo ' [--pretext|-pt <pretext>] [--text|-tx <text>] [--thumbnail|-th <thumbnail-url>]'
echo ' [--time|-tm <time>] [--title|-ti <title>] [--title-link|-tl <title-link>]'
echo ' [--trace|-x]'
echo
echo " ${bin} chat update [<text> [<timestamp> [channel]]]"
echo ' [--author|-at <author>] [--author-icon|-ai <author-icon-url>]'
echo ' [--author-link|-al <author-link>] [--channel|-ch <channel>] [--color|-cl <color>]'
echo ' [--compact|-cp] [--fields|flds <fields>] [--filter|-f <filter>] [--footer|-ft <footer>]'
echo ' [--footer-icon|-fi <footer-icon-url>] [--image|-im <image-url>] [--monochrome|-m]'
echo ' [--pretext|-pt <pretext>] [--text|-tx <text>] [--thumbnail|-th <thumbnail-url>]'
echo ' [--time|-tm <time>] [--timestamp|-ts <timestamp>] [--title|-ti <title>]'
echo ' [--title-link|-tl <title-link>] [--trace|-x]'
echo
echo " ${bin} init"
echo ' [--compact|-c] [--filter|-f <filter>] [--monochrome|-m] [--token|-tk <token>]'
echo ' [--trace|-x]'
echo
echo " ${bin} file delete [file]"
echo ' [--compact|-c] [--file|-fl <file>] [--filter|-f <filter>] [--monochrome|-m]'
echo ' [--trace|-x]'
echo
echo " ${bin} file info [file]"
echo ' [--count|-cn <count>] [--compact|-c] [--file|-fl <file>] [--filter|-f <filter>]'
echo ' [--monochrome|-m] [--page|-pg <page>] [--trace|-x]'
echo
echo " ${bin} file list"
echo ' [--channel|-ch <channel>] [--count|-cn <count>] [--compact|-c] [--filter|-f <filter>]'
echo ' [--monochrome|-m] [--page|-pg <page>] [--timestamp-from|-tf <timetamp>]'
echo ' [--timestamp-to|-tt <timestamp>] [--trace|-x] [--types|-ty <types>]'
echo ' [--user|-ur <user>]'
echo
echo " ${bin} file upload [<file> [channels]]"
echo ' [--channels|-chs <channels>] [--comment|-cm <comment>] [--compact|-c]'
echo ' [--file|fl <file>] [--filename|-fn <filename>] [--filetype|-ft <filetype>]'
echo ' [--filter|-f <filter>] [--monochrome|-m] [--title|-ti <title>] [--trace|-x]'
echo
echo " ${bin} presence active"
echo ' [--compact|-c] [--filter|-f <filter>] [--monochrome|-m] [--trace|-x]'
echo
echo " ${bin} presence away"
echo ' [--compact|-c] [--filter|-f <filter>] [--monochrome|-m] [--trace|-x]'
echo
echo " ${bin} reminder add [<text> [time]]"
echo ' [--compact|-c] [--filter|-f <filter>] [--monochrome|-m] [--text|tx <text>]'
echo ' [--time|tm <time>] [--trace|-x] [--user|-ur <user>]'
echo
echo " ${bin} reminder complete [reminder]"
echo ' [--compact|-c] [--filter|-f <filter>] [--monochrome|-m] [--reminder|rm <reminder>]'
echo ' [--trace|-x]'
echo
echo " ${bin} reminder delete [reminder]"
echo ' [--compact|-c] [--filter|-f <filter>] [--monochrome|-m] [--reminder|rm <reminder>]'
echo ' [--trace|-x]'
echo
echo " ${bin} reminder info [reminder]"
echo ' [--compact|-c] [--filter|-f <filter>] [--monochrome|-m] [--reminder|rm <reminder>]'
echo ' [--trace|-x]'
echo
echo " ${bin} reminder list"
echo ' [--compact|-c] [--filter|-f <filter>] [--monochrome|-m] [--trace|-x]'
echo
echo " ${bin} snooze end"
echo ' [--compact|-c] [--filter|-f <filter>] [--monochrome|-m] [--trace|-x]'
echo
echo " ${bin} snooze info [user]"
echo ' [--compact|-c] [--filter|-f <filter>] [--monochrome|-m] [--trace|-x]'
echo ' [--user|-ur <user>]'
echo
echo " ${bin} snooze start [minutes]"
echo ' [--compact|-c] [--filter|-f <filter>] [--minutes|-mn <minutes>] [--monochrome|-m]'
echo ' [--trace|-x]'
echo
echo " ${bin} status clear"
echo ' [--compact|-c] [--filter|-f <filter>] [--monochrome|-m] [--trace|-x]'
echo
echo " ${bin} status edit [<text> [<emoji>]]"
echo ' [--compact|-c] [--filter|-f <filter>] [--monochrome|-m] [--trace|-x]'
echo
echo 'Configuration Commands:'
echo ' init Initialize'
echo
echo 'Chat Commands:'
echo ' chat delete Delete chat message'
echo ' chat send Send chat message'
echo ' chat update Update chat message'
echo
echo 'File Commands:'
echo ' file delete Delete file'
echo ' file info Info about file'
echo ' file list List files'
echo ' file upload Upload file'
echo
echo 'Presence Commands:'
echo ' presence active Active presence'
echo ' presence away Away presence'
echo
echo 'Reminder Commands:'
echo ' reminder add Add reminder'
echo ' reminder complete Complete reminder'
echo ' reminder delete Delete reminder'
echo ' reminder info Info about reminder'
echo ' reminder list List reminders'
echo
echo 'Snooze Commands:'
echo ' snooze end End snooze'
echo ' snooze info Info about snooze'
echo ' snooze start Start snooze'
echo
echo 'Status Commands:'
echo ' status clear Clear status'
echo ' status edit Edit status'
echo
echo 'More Information:'
echo ' repo https://github.com/rockymadden/slack-cli'
}
function filedelete() {
local msg=$(\
curl -s -X POST https://slack.com/api/files.delete \
--data-urlencode "file=${file}" \
--data-urlencode "token=${token}")
jqify "${msg}"
}
function fileinfo() {
local msg=$(\
curl -s -X POST https://slack.com/api/files.info \
${count:+ --data-urlencode "count=${count}"} \
--data-urlencode "file=${file}" \
${page:+ --data-urlencode "page=${page}"} \
--data-urlencode "token=${token}")
jqify "${msg}"
}
function filelist() {
local msg=$(\
curl -s -X POST https://slack.com/api/files.list \
${channel:+ --data-urlencode "channel=${channel}"} \
${count:+ --data-urlencode "count=${count}"} \
${page:+ --data-urlencode "page=${page}"} \
${timestampfrom:+ --data-urlencode "ts_from=${timestampfrom}"} \
${timestampto:+ --data-urlencode "ts_to=${timestampto}"} \
${types:+ --data-urlencode "types=${types}"} \
${user:+ --data-urlencode "user=$(luser)"} \
--data-urlencode "token=${token}")
jqify "${msg}"
}
function fileupload() {
[ -n "${stdin}" ] && [ -z "${file}" ] && file=${stdin}
if [ -f "${file}" ]; then
local _file=${file}
case "${filename}" in
'') local _filename=$(basename ${file}) ;;
*) local _filename=${filename} ;;
esac
else
local _content=${file}
case "${filename}" in
'') local _filename='stdin' ;;
*) local _filename=${filename} ;;
esac
fi
case "${_file}" in
'')
local msg=$(\
curl -s -X POST https://slack.com/api/files.upload \
${channels:+ --data-urlencode "channels=${channels}"} \
${comment:+ --data-urlencode "initial_comment=${comment}"} \
${_content:+ --data-urlencode "content=${_content}"} \
${_filename:+ --data-urlencode "filename=${_filename}"} \
${filetype:+ --data-urlencode "filetype=${filetype}"} \
${title:+ --data-urlencode "title=${title}"} \
--data-urlencode "token=${token}")
;;
*)
local msg=$(\
curl -s -X POST https://slack.com/api/files.upload \
${channels:+ --form-string "channels=${channels}"} \
${comment:+ --form "initial_comment=${comment}"} \
${_file:+ --form "file=@${_file}"} \
${_filename:+ --form "filename=${_filename}"} \
${filetype:+ --form "filetype=${filetype}"} \
${title:+ --form "title=${title}"} \
--form "token=${token}")
;;
esac
jqify "${msg}"
}
function init() {
echo "${token}" > "${etcdir}/.slack"
case "${?}" in
0) echo '{"ok": true}' | jq -r ${compact} ${monochrome} "${filter:=.}" ;;
*)
echo '{"ok": false, "error": "not_writable"}' |
jq -r ${compact} ${monochrome} "${filter:=.}" ; return 1
;;
esac
}
# Setting presence=auto + setActive works more consistently than just setActive.
function presenceactive() {
local msg0=$(\
curl -s -X POST https://slack.com/api/users.setPresence \
--data-urlencode "presence=auto" \
--data-urlencode "token=${token}")
local msg1=$(\
curl -s -X POST https://slack.com/api/users.setActive \
--data-urlencode "token=${token}")
jqify "${msg0}" && jqify "${msg1}" >/dev/null 2>&1
}
function presenceaway() {
local msg=$(\
curl -s -X POST https://slack.com/api/users.setPresence \
--data-urlencode "presence=away" \
--data-urlencode "token=${token}")
jqify "${msg}"
}
function reminderadd() {
local msg=$(\
curl -s -X POST https://slack.com/api/reminders.add \
--data-urlencode "text=${text}" \
--data-urlencode "time=${_time}" \
${user:+ --data-urlencode "user=$(luser)"} \
--data-urlencode "token=${token}")
jqify "${msg}"
}
function remindercomplete() {
local msg=$(\
curl -s -X POST https://slack.com/api/reminders.complete \
--data-urlencode "reminder=${reminder}" \
--data-urlencode "token=${token}")
jqify "${msg}"
}
function reminderdelete() {
local msg=$(\
curl -s -X POST https://slack.com/api/reminders.delete \
--data-urlencode "reminder=${reminder}" \
--data-urlencode "token=${token}")
jqify "${msg}"
}
function reminderinfo() {
local msg=$(\
curl -s -X POST https://slack.com/api/reminders.info \
--data-urlencode "reminder=${reminder}" \
--data-urlencode "token=${token}")
jqify "${msg}"
}
function reminderlist() {
local msg=$(\
curl -s -X POST https://slack.com/api/reminders.list \
--data-urlencode "token=${token}")
jqify "${msg}"
}
function snoozeend() {
local msg=$(\
curl -s -X POST https://slack.com/api/dnd.endSnooze \
--data-urlencode "token=${token}")
jqify "${msg}"
}
function snoozeinfo() {
local msg=$(\
curl -s -X POST https://slack.com/api/dnd.info \
${user:+ --data-urlencode "user=$(luser)"} \
--data-urlencode "token=${token}")
jqify "${msg}"
}
function snoozestart() {
local msg=$(\
curl -s -X POST https://slack.com/api/dnd.setSnooze \
--data-urlencode "num_minutes=${minutes}" \
--data-urlencode "token=${token}")
jqify "${msg}"
}
function statusclear() {
local msg=$(\
curl -s -X POST https://slack.com/api/users.profile.set \
--data-urlencode 'profile={"status_text":"", "status_emoji":""}' \
--data-urlencode "token=${token}")
jqify "${msg}"
}
function statusedit() {
local msg=$(\
curl -s -X POST https://slack.com/api/users.profile.set \
--data-urlencode "profile={\"status_text\":\"${text}\", \"status_emoji\":\"${emoji}\"}" \
--data-urlencode "token=${token}")
jqify "${msg}"
}
function version() {
echo '0.18.0'
}
# COMMAND ROUTING #################################################################################
case "${cmd}${sub}" in
--help|-h) help ; exit 0 ;;
--version|-v) version ; exit 0 ;;
init) init ; exit $? ;;
chatdelete|chatsend|chatupdate|\
filedelete|fileinfo|filelist|fileupload|\
presenceactive|presenceaway|\
reminderadd|remindercomplete|reminderdelete|reminderinfo|reminderlist|\
snoozeend|snoozeinfo|snoozestart|\
statusclear|statusedit)
if [ -z "${token}" ]; then
echo '{"ok": false, "error": "not_inited"}' |
jq -r ${compact} ${monochrome} "${filter:=.}" ; exit 1
fi
${cmd}${sub} ; exit $?
;;
*) help ; exit 1 ;;
esac
| true |
7ec4df563331551d359ba67642b80d8ac7f08d62 | Shell | nbaksalyar/rust-playground | /compiler/fetch.sh | UTF-8 | 288 | 2.8125 | 3 | [
"Apache-2.0",
"MIT"
] | permissive | #!/bin/bash
set -euv -o pipefail
repository=shepmaster
for image in rust-stable rust-beta rust-nightly rustfmt clippy miri; do
docker pull "${repository}/${image}"
# The backend expects images without a repository prefix
docker tag "${repository}/${image}" "${image}"
done
| true |
909270f486e0cb23175195c564916065f301f2a7 | Shell | PlatinMarket/docker-images | /baseimage/build/bin/toolbox | UTF-8 | 184 | 3.34375 | 3 | [] | no_license | #!/usr/bin/env bash
args="$@"
cmd="$1"
args="${args/$cmd /}"
cmd="$TOOLBOXFOLDER/$cmd.sh"
if [ -e $cmd ]; then
/bin/bash $cmd $args
else
echo "Command $cmd not found"
exit 1
fi
| true |
77ee48e01126df79d4a770c83481ea4f6e4ecde6 | Shell | claytonfraga/covid19 | /gitclean.sh | UTF-8 | 490 | 3.203125 | 3 | [] | no_license | ## This script is used to clean all git commit
if [[ "$1" = 'all' ]];then
echo "Clean all git commit"
git checkout --orphan latest_branch
git add -A
git commit -am "Delete all previous commit"
git branch -D master
git branch -m master
fi
echo "Cleanup refs and logs"
rm -Rf .git/refs/original
rm -Rf .git/logs/
echo "Cleanup unnecessary files"
git gc --aggressive --prune=now
echo "Prune all unreachable objects"
git prune --expire now
#git push -f origin master | true |
be4773965b1188e5eef5cf046f21084b1305baf6 | Shell | 4lee/scripts | /5xx.sh | UTF-8 | 1,051 | 3.484375 | 3 | [] | no_license | #/bin/bash
INSTANCEIP=$(curl ifconfig.me) #Store Public IP in a variable
domain_file=/data/ril_domains.txt
count=0
mail_count=0
status_code_count=0
while true
do
for URL in `cat $domain_file`
do
while [ $count -le 20 ]
do
curl_code=`curl -o /dev/null --silent --head --write-out '%{http_code}\n' $URL`
#if [ "$curl_code" =~ 400|401|402|403|404|405|500|501|502|503|504|505 ];then
if [ "$curl_code" =~ 200 ];then
status_code_count=`expr $status_code_count + 1`
fi
` count=`expr $count + 1`
done
if [ $status_code_count -ge 18 ];then
if [ $mail_count -le 3 ];then
echo "We are recieving multiple URL faliure for $URL on $INSTANCEIP" > /data/status_code.log
echo "Current count is $status_code_count" > /data/status_code.log
python /data/mail.py
mail_count=`expr $mail_count + 1`
sleep 5m
else
echo "Do nothing"
else
status_code_count=0
echo "No alert triggered" > /data/status_code.log
fi
status_code_count=0
count=0
done
done
| true |
de5bb68868553c9c0b593c1fbafacda72fa71fd8 | Shell | PhilipTrauner/dotfiles-old | /install.sh | UTF-8 | 7,104 | 3.609375 | 4 | [
"MIT"
] | permissive | #!/bin/bash
set -e
if [[ $EUID -eq 0 ]]; then
echo "This script should not be run as root." 1>&2
exit 1
fi
COLOR='\033[0;33m'
END='\033[0m\n' # No Color
DEVICE_SPECIFIC=false
read -p "If you are on a slow connection executing this script with caffeinate is recommended (caffeinate -isd ./install.sh)"
# Ask for the administrator password upfront.
sudo -v
if system_profiler SPHardwareDataType | grep -q "Mac mini"; then
MAC='mm'
elif system_profiler SPHardwareDataType | grep -q "MacBook Pro"; then
MAC='mb'
else
printf "${COLOR}Unrecognized Mac type${END}"
exit 1
fi
read -r -p "Apply device specific settings? [y/N] " response
if [[ "$response" =~ ^([yY][eE][sS]|[yY])+$ ]]
then
DEVICE_SPECIFIC=true
fi
if [[ "$MAC" = "mb" ]]
then
echo "MacBook detected."
if [ "$DEVICE_SPECIFIC" = true ] ; then
printf "${COLOR}Setting hostname: Aperture${END}"
sudo scutil --set HostName Aperture
sudo scutil --set ComputerName Aperture
sudo scutil --set LocalHostName Aperture
fi
elif [[ "$MAC" = "mm" ]]
then
echo "Mac mini detected."
if [ "$DEVICE_SPECIFIC" = true ] ; then
printf "${COLOR}Setting hostname: Abstergo${END}"
sudo scutil --set HostName Abstergo
sudo scutil --set ComputerName Abstergo
sudo scutil --set LocalHostName Abstergo
printf "${COLOR}Disabling energy saving${END}"
sudo pmset -a displaysleep 0 womp 1 disksleep 1 autorestart 1 powernap 1
fi
fi
# Keep-alive: update existing `sudo` time stamp until the script has finished.
while true; do sudo -n true; sleep 60; kill -0 "$$" || exit; done 2>/dev/null &
printf "${COLOR}Installing command line tools${END}"
xcode-select --install || echo "Xcode command line tools already installed"
read -p "Press Enter when either the command line tools or Xcode are installed"
command -v clang >/dev/null 2>&1 || { echo "Command line tools aren't installed"; exit 1; }
printf "${COLOR}Installing brew${END}"
command -v brew >/dev/null 2>&1 || { ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"; }
printf "${COLOR}Automatically load keys into ssh-agent${END}"
mkdir -p ~/.ssh
cat > ~/.ssh/config <<EOF
Host *
AddKeysToAgent yes
UseKeychain yes
IdentityFile ~/.ssh/id_rsa
EOF
printf "${COLOR}Overriding .zshrc${END}"
# Copy zshrc
cp .zshrc ~/
printf "${COLOR}Installing antigen${END}"
mkdir -p ~/.antigen
curl -L git.io/antigen > ~/.antigen/antigen.zsh
touch ~/.hushlogin
printf "${COLOR}Unhiding ~/Library${END}"
chflags nohidden ~/Library/
printf "${COLOR}Creating Developer folder${END}"
[[ -d ~/Developer ]] || mkdir ~/Developer
printf "${COLOR}Setting defaults${END}"
# Don't write .DS_Store files to network drives and external storage media
defaults write com.apple.desktopservices DSDontWriteNetworkStores -bool true
defaults write com.apple.desktopservices DSDontWriteUSBStores -bool true
defaults write NSGlobalDomain AppleShowAllExtensions -bool true
defaults write NSGlobalDomain NSDocumentSaveNewDocumentsToCloud -bool false
defaults write com.apple.ImageCapture disableHotPlug -bool true
# Expand save panel by default
defaults write NSGlobalDomain NSNavPanelExpandedStateForSaveMode -bool true
defaults write NSGlobalDomain NSNavPanelExpandedStateForSaveMode2 -bool true
# Trackpad: enable tap to click for the login screen
defaults -currentHost write NSGlobalDomain com.apple.mouse.tapBehavior -int 1
# Save screenshots in PNG format
defaults write com.apple.screencapture type -string "png"
# Check for software updates daily, not just once per week
defaults write com.apple.SoftwareUpdate ScheduleFrequency -int 1
defaults write com.apple.menuextra.battery ShowPercent -bool true
# Crash reports as notifications
defaults write com.apple.CrashReporter UseUNC 1
# Disable MissionControl
defaults write com.apple.dashboard mcx-disabled -boolean true
# Use plain text in TextEdit
defaults write com.apple.TextEdit RichText -int 0
# Finder
defaults write com.apple.finder ShowPathbar -bool true
# When performing a search, search the current folder by default
defaults write com.apple.finder FXDefaultSearchScope -string "SCcf"
# Disable the warning when changing a file extension
defaults write com.apple.finder FXEnableExtensionChangeWarning -bool false
# Use list view in all Finder windows by default
defaults write com.apple.finder FXPreferredViewStyle -string "Nlsv"
# Safari
defaults write com.apple.Safari ShowFullURLInSmartSearchField -bool true
# Time Machine
defaults write com.apple.TimeMachine DoNotOfferNewDisksForBackup -bool true
# Dock
defaults write com.apple.dock autohide -bool true
defaults write com.apple.dock tilesize -int 50
# HyperDock
defaults write de.bahoom.HyperDock disclaimer_accepted -int 1
defaults write de.bahoom.HyperDock itunes_preview_ratings -int 0
defaults write de.bahoom.HyperDock move_windows -int 0
defaults write de.bahoom.HyperDock license_accepted -int 1
defaults write de.bahoom.HyperDock keyboard_arrange -int 0
defaults write de.bahoom.HyperDock resize_windows -int 0
defaults write de.bahoom.HyperDock window_snapping_delay_near -float 0.2
defaults write de.bahoom.HyperDock titlebar_scroll_arrange -int 0
# Spotify Notifications
defaults write io.citruspi.Spotify-Notifications iconSelection -int 2
defaults write io.citruspi.Spotify-Notifications playpausenotifs -int 1
# Spotlight
defaults write com.apple.Spotlight showedLearnMore -int 1
# iTerm2
defaults write com.googlecode.iterm2 PrefsCustomFolder -string "$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
defaults write com.googlecode.iterm2 LoadPrefsFromCustomFolder -int 1
printf "${COLOR}Disabling Time Machine${END}"
# Disable Time Machine
sudo tmutil disable
printf "${COLOR}Overriding Spectacle config${END}"
mkdir -p ~/Library/Application\ Support/Spectacle
cp Shortcuts.json ~/Library/Application\ Support/Spectacle
printf "${COLOR}Installing BaseBrewfile${END}"
brew bundle --file=BaseBrewfile || echo "Some packages could not be installed."
# Mac App Store apps
# (not included in Brewfile to allow for non-blocking install)
# Pixelmator, Keynote, Pages, Numbers, Xcode, Microsoft Remote Desktop 10
apps=("407963104" "409183694" "409201541" "409203825" \
"497799835" "1295203466")
printf "${COLOR}Installing specific Brewfile${END}"
if [[ "$MAC" = "mb" ]]
then
brew bundle --file=MacBookBrewfile
# Solitaire
apps+=("515113678")
# Amphetamine
apps+=("937984704")
elif [[ "$MAC" = "mm" ]]
then
brew bundle --file=MacMiniBrewfile
fi
printf "${COLOR}Installing Mac App Store apps in background${END}"
mas install ${apps[*]} > /dev/null 2>&1 &
printf "${COLOR}Change shell to zsh${END}"
sudo python -c 'if not "/usr/local/bin/zsh" in open("/etc/shells").read(): open("/etc/shells", "a").write("/usr/local/bin/zsh\n")'
sudo chsh -s /usr/local/bin/zsh $(whoami)
printf "${COLOR}Installing rustup${END}"
curl https://sh.rustup.rs -sSf > rustup.sh
chmod +x rustup.sh
./rustup.sh -y
rm rustup.sh
printf "${COLOR}Resetting Launchpad${END}"
defaults write com.apple.dock ResetLaunchPad -bool true; killall Dock
# Apply changes with reboot
read -p "Press Enter to restart (Crtl+C to skip)"
sudo reboot
| true |
ae520f9c68ea3e490110332be49108c640612bd3 | Shell | fostane/Trishul | /scripts/openredirect.sh | UTF-8 | 741 | 2.96875 | 3 | [] | no_license | #!/bin/bash
mkdir -p ~/recon/$1/OpenRedirect
results=~/recon/$1/OpenRedirect
echo -e "\n[+] gathering Open Redirect urls for $1...!!"
cat ~/recon/$1/Extraction/GF/$1_redirect.txt | qsreplace FUZZ | tee -a $results/$1_potential_redirects.txt
cat ~/recon/$1/Extraction/$1_param_value.txt | qsreplace FUZZ | tee -a $results/$1_potential_redirects.txt
cat $results/$1_potential_redirects.txt | sort -u | tee -a $results/$1_potential_unique_redirects.txt
echo -e "\n[+] Running OpenRedireX on $1...!!"
python3 ~/tools/OpenRedireX/openredirex.py -l $results/$1_potential_unique_redirects.txt -p ~/tools/OpenRedireX/payloads.txt --keyword FUZZ | tee -a $results/$1_open_redirects.txt
echo -e "\n[+] Completed...!! Results stored in: $results" | true |
8208b7037ee4858c74e1c0dd895179d0e32333eb | Shell | FOSCAR-2020/segmentation | /contest.sh | UTF-8 | 636 | 2.59375 | 3 | [
"MIT"
] | permissive | #!bin/bash
for file1 in ~/yolact/polygon/1/*
do
for file2 in $file1/*
do
a=$file2
b=${a: -1}
c='t'
if [[ "${b}" != "${c}" ]]
then
python3 eval.py --trained_model=weights/yolact_im700_54_800000.pth --score_threshold=0.15 --top_k=15 --images=$a:$file1"/"$b"_result"
fi
done
done
echo " "
for file1 in ~/yolact/polygon/2/*
do
for file2 in $file1/*
do
a=$file2
b=${a: -1}
c='t'
if [[ "${b}" != "${c}" ]]
then
python3 eval.py --trained_model=weights/yolact_im700_54_800000.pth --score_threshold=0.15 --top_k=15 --images=$a:$file1"/"$b"_result"
fi
done
done
| true |
6f938909634ab5291e4ef8b5b4450355de1f19ea | Shell | mchchiang/senescence | /analysis/run/ContactFractionAvg.sh | UTF-8 | 1,385 | 3.3125 | 3 | [] | no_license | #!/bin/bash
ehh_start=$1
ehh_end=$2
ehh_inc=$3
ehl_start=$4
ehl_end=$5
ehl_inc=$6
run_start=$7
run_end=$8
run_inc=$9
in_dir=${10}
out_dir=${11}
# Selection arguments
N=6303
L=35
chr=20
# Average
avg_py="../src/TimeAverage.py"
multi_avg_py="../src/AverageMultiFiles.py"
t_start=150000
t_end=200000
t_inc=1000
out_file="${out_dir}/wall-frac_sene_chr_${chr}_L_${L}.dat"
> $out_file
ehh=$(python -c "print '%.1f' % ($ehh_start)")
ehl=$(python -c "print '%.1f' % ($ehl_start)")
while (( $(bc <<< "$ehh < $ehh_end") ))
do
ehl=$(python -c "print '%.1f' % ($ehl_start)")
while (( $(bc <<< "$ehl < $ehl_end") ))
do
echo "Doing ehh = $ehh ehl = $ehl"
name="sene_chr_${chr}_L_${L}_HH_${ehh}_HL_${ehl}"
for (( run=$run_start; $run<=$run_end; run+=$run_inc ))
do
frac_file="${in_dir}/wall-frac_${name}_run_${run}.dat"
avg_file="${out_dir}/wall-frac_${name}_run_${run}_avg.dat"
python $avg_py 0 1 $t_start $t_end $t_inc $frac_file $avg_file
# data=$(cat $avg_file)
# echo $data >> $out_file
done
multi_avg_file="${out_dir}/wall-frac_${name}_avg.dat"
python $multi_avg_py -1 0 -1 -1 $multi_avg_file "${out_dir}/wall-frac_${name}_run"*_avg.dat
data=$(cat $multi_avg_file)
echo $ehh $ehl $data >> $out_file
ehl=$(python -c "print '%.1f' % ($ehl + $ehl_inc)")
done
echo >> $out_file
ehh=$(python -c "print '%.1f' % ($ehh + $ehh_inc)")
done
| true |
56f3c6e16f684bf120ee174b8c057bc9c120613a | Shell | kirksw/dotfiles | /resources/scripts/.local/bin/scripts/brightness | UTF-8 | 524 | 3 | 3 | [] | no_license | #!/bin/sh
dir_icons="/usr/local/bin/icons"
max_bri=$(cat /sys/class/backlight/*/max_brightness)
cur_bri=$(cat /sys/class/backlight/*/brightness)
# calculating current percentage
cur_per=$((cur_bri * 100 / max_bri))
if [ "$cur_per" -ge 50 ]; then
dunstify -a "Screen" \
"Screen" \
"Brightness: $cur_per%" \
-r 100 \
-i "$dir_icons"/sun.svg
else
dunstify -a "Screen" \
"Screen" \
"Brightness: $cur_per%" \
-r 100 \
-i "$dir_icons"/moon.svg
fi | true |
443682b07b82cce792c33307d76d450b914de2eb | Shell | awalkerca/dotfiles | /profiles/linux_home/install/custom_applications.sh | UTF-8 | 1,227 | 2.859375 | 3 | [] | no_license | #!/bin/bash
## Albert (Alfred clone)
echo 'deb http://download.opensuse.org/repositories/home:/manuelschneid3r/xUbuntu_22.04/ /' | sudo tee /etc/apt/sources.list.d/home:manuelschneid3r.list
curl -fsSL https://download.opensuse.org/repositories/home:manuelschneid3r/xUbuntu_22.04/Release.key | gpg --dearmor | sudo tee /etc/apt/trusted.gpg.d/home_manuelschneid3r.gpg > /dev/null
sudo apt update
apt install albert
# Install 1password
wget https://downloads.1password.com/linux/debian/amd64/stable/1password-latest.deb -O 1password-latest.deb
sudo dpkg -i 1password-latest.deb
rm 1password-latest.deb
# vim-plug
curl -fLo ~/.vim/autoload/plug.vim --create-dirs https://raw.githubusercontent.com/junegunn/vim-plug/master/plug.vim
# Install wine
sudo dpkg --add-architecture i386
sudo mkdir -pm755 /etc/apt/keyrings
sudo wget -O /etc/apt/keyrings/winehq-archive.key https://dl.winehq.org/wine-builds/winehq.key
sudo wget -NP /etc/apt/sources.list.d/ https://dl.winehq.org/wine-builds/ubuntu/dists/jammy/winehq-jammy.sources
sudo apt update
sudo apt install --install-recommends winehq-stable
# dice roll script
git clone git@github.com:shadsbot/roll.git ~/Code
mv ~/Code/roll/roll.py /usr/local/bin/roll
chmod +x /usr/local/bin/roll
| true |
e5cf648d9a69a57ca67435eb9747e765f264f360 | Shell | w-dee/kirikiri3-legacy | /external/wxWidgets/build.sh | UTF-8 | 2,608 | 3.078125 | 3 | [] | no_license | #!/bin/sh -e
prefix=`pwd`
# TODO: GC のスレッドのサポートを wxWidgets でも有効にしなければならない
# (RisseのtRisseThreadはwxThreadをベースにしているため)
# TODO: 各プラットフォームに応じて GC に必要なオプションが変わるので
# 注意。
prefix_zlib=$prefix/../zlib
prefix_libpng=$prefix/../libpng
prefix_libjpeg=$prefix/../libjpeg
prefix_expat=$prefix/../expat
prefix_gc=$prefix/../gc
includes=" \
-I$prefix_zlib/include \
-I$prefix_libpng/include \
-I$prefix_libjpeg/include \
-I$prefix_expat/include \
"
libs=" \
-L$prefix_zlib/lib \
-L$prefix_libpng/lib \
-L$prefix_libjpeg/lib \
-L$prefix_expat/lib \
"
case "`uname -s`" in
MINGW* | CYGWIN* )
;;
Linux*)
# see details at README.linux of Boehm GC document.
includes="$includes \
-I$prefix_gc/include \
-DGC_LINUX_THREADS -D_REENTRANT \
-include gc.h \
"
# Ubuntu Intrepid 対策。これがないとvfwprintfの再定義でエラーになる
includes="$includes -U_FORTIFY_SOURCE"
# ライブラリ
libs="$libs \
-L$prefix_gc/lib -lgc \
"
;;
*)
echo "Your platform is not supported yet at this time."
echo "Please contact W.Dee <dee@kikyou.info>"
exit 1
;;
esac
CFLAGS="$CFLAGS $includes"
export CFLAGS
CXXFLAGS="$CXXFLAGS $includes"
export CXXFLAGS
CPPFLAGS="$CPPFLAGS $includes"
export CPPFLAGS
LDFLAGS="$LDFLAGS $libs"
export LDFLAGS
common_configure_options="--with-opengl --enable-exceptions \
--enable-catch_segvs --enable-mousewheel --enable-unicode \
--enable-intl --enable-mslu \
--disable-compat24 \
--disable-compat26 \
--prefix=$prefix \
--with-zlib=sys \
--with-expat=sys \
--with-libpng=sys \
--with-libjpeg=sys \
--enable-vendor=k3 \
"
build_debug()
{
# build_debug ディレクトリを作成
rm -rf build_debug
mkdir build_debug
cd build_debug
# configure
../configure \
--disable-shared --enable-static \
--enable-debug --enable-debug_gdb --enable-debug_cntxt \
--disable-optimise \
$common_configure_options
make
# mo ファイルだけはなぜか個別に作り直さなければならない
(cd ../locale && make allmo)
make install
cd ..
}
build_release()
{
# build_release ディレクトリを作成
rm -rf build_release
mkdir build_release
cd build_release
# configure
../configure \
$common_configure_options
make
# mo ファイルだけはなぜか個別に作り直さなければならない
(cd ../locale && make allmo)
make install
cd ..
}
# ビルドを行う
cd src_temp
build_debug
build_release
| true |
933aa9570b72d884a2af63aafbc77e5e3ba6cc42 | Shell | chaimleib/dotfiles | /mac/settings/system.sh | UTF-8 | 319 | 2.578125 | 3 | [] | no_license | #!/bin/bash
echo
echo "## System ##"
if false; then
echo Disable the boot chime
sudo nvram SystemAudioVolume=" "
else
echo Re-enable the boot chime
sudo nvram -d SystemAudioVolume
fi
echo Don\'t create .DS_Store on network volumes
defaults write com.apple.desktopservices DSDontWriteNetworkStores -bool true
| true |
50b1a690f10494b838ee77f6421db7aa27cfc2dd | Shell | xenonal/IIBMonitor | /Unix_Scripts/System_setup/OS_Setup.sh | UTF-8 | 3,001 | 3.984375 | 4 | [] | no_license | #!/bin/bash
#-------------------------------------------------------------------------------------------
# Copyright 2017 Tri
#
# All rights reserved.
# Warning: No part of this work covered by copyright may be reproduced,
# distributed or copied in any form or by any means (graphic, electronic
# or mechanical, including photocopying, recording, taping, or information
# retrieval systems) without the written permission of Tri.
# Unauthorised reproduction, distribution or copying of this work may
# result in severe civil and criminal penalties, and will be prosecuted
# to the maximum extent possible under the law.
#
#-------------------------------------------------------------------------------------------
#
# SCRIPT NAME: OS_Setup.sh
# VERSION: V1.00
# July 2017 - James McVicar - Created
#
# Method: manually as required.
# Usage: OS_Setup.sh <command>
#
# Description: Used to set up Kernal settings, directories and install 0S tools
#
#------------------------------------------------------------------------------------------
echo "=================================================================" >> brksetuplog.txt
echo "= " >> brksetuplog.txt
date >> brksetuplog.txt
echo "= " >> brksetuplog.txt
if [ "X$1" != "X" ]
then
GOARG=$1
fi
function func_adddate() {
while IFS= read -r line; do
echo "$(date) $line"
done
}
function func_Option {
case "$1" in
#------------------------------------------------------------------------------------------
# ************************* Setup Unix Environment *******************************
#------------------------------------------------------------------------------------------
'SystemSetup')
echo "`. /opt/setup/kernal/kernal_settings.sh`" # exact path for the script file
echo; echo " ------- Completed System Setup -------"
;;
'CreateDirectories')
echo ; echo " ------- Competed Directory Setup -------"
;;
'SystemUtilsSetup')
echo "`. /opt/setup/packages/packages.sh`" # exact path for the script file
echo ; echo " ------- Competed Directory Setup -------"
;;
'SystemUserSetup')
echo ; echo " ------- Competed System Setup -------"
;;
*)
echo " ---- Incorrect Parameter Specified"; echo
;;
esac
esac
}
#------------------------------------------------------------------------------------------
# ************************* Start of Script Orchestration *******************************
#------------------------------------------------------------------------------------------
if [ "X$GOARG" = "X" ]
then
while :
do
##func_ShowMenu
read option
clear
func_Option $option
done
else
func_Option $GOARG
fi
| true |
39c037d2f3a171a1f40a1590d3fa5d7437fbf5f4 | Shell | ohumbe/ocp-configs-checklists | /setup-repo.sh | UTF-8 | 720 | 3.109375 | 3 | [] | no_license | #!/bin/bash
if [ "$HOSTNAME" = "ocpmgm01.llnl.gov" ]; then
cd /tmp
[ -d ocp-configs-checklists ] && rm -rf ocp-configs-checklists
git clone https://github.com/Prestovich/ocp-configs-checklists.git
cp -rp /tmp/ocp-configs-checklists/rpms /var/www/html
cd /var/www/html/rpms
tar xzf ocp4.6-rhel7-rpms.tar.gz
tar xzf rhel7-ose-4.6.tar.gz
createrepo -v /var/www/html/rpms
fi
if [ "$HOSTNAME" = "ocpwrk01.llnl.gov" ] || [ "$HOSTNAME" = "ocpwrk02.llnl.gov" ]; then
cat > /etc/yum.repos.d/ose.repo <<-EOF
[rhel-7-server-ose-4.6-rpms]
name=Red Hat OpenShift Container Platform 4.6 (RPMs)
baseurl=http://ocpmgm01.llnl.gov:8080/rpms
enabled=1
gpgcheck=0
EOF
yum repolist
yum list cri-tools
fi
| true |
2f95f171e297c46733219da19686168cc5b530ea | Shell | gepard5/linda-comm | /tests/read_concurrent.sh | UTF-8 | 425 | 3.125 | 3 | [] | no_license | #!/bin/bash
echo "Testing concurrent reading"
if [ -e "./text" ]
then
rm ./text
fi
../lindaCommunicator -o "( 1 )"
echo "Setting up readers"
for i in {1..100}
do
../lindaCommunicator -i "( * )" -t 30000 &
done
echo "Filling file"
sleep 5
for i in {1..50}
do
../lindaCommunicator -o "( $i )" -t 30000
done
sleep 5
echo "Filling file second time"
for i in {1..50}
do
../lindaCommunicator -o "( $i )" -t 30000
done
| true |
5a437342c55ae11e89d50bfcc78da4c3bc37ceba | Shell | luke-c-sargent/compose-services | /scripts/fence_setup.sh | UTF-8 | 396 | 2.765625 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
# entrypoint script for fence to sync user.yaml before running
sleep 2
until (echo > /dev/tcp/postgres/5432) >/dev/null 2>&1; do
echo "Postgres is unavailable - sleeping"
sleep 2
done
echo "postgres is ready"
update-ca-certificates
fence-create sync --yaml user.yaml --arborist http://arborist-service
rm -f /var/run/apache2/apache2.pid && /usr/sbin/apache2ctl -D FOREGROUND
| true |
a8a34ce6fb0d261e2116f99e0e5da67e5e5b13ba | Shell | deweysasser/docker-xymon | /add-files/usr/lib/xymon/server/bin/dynamics-hosts.sh | UTF-8 | 336 | 3.109375 | 3 | [
"Artistic-2.0"
] | permissive | #!/bin/bash
# Purpose: turn xymon ghost hosts (hosts we don't know about) into displayable records
xymon=/usr/lib/xymon/server/bin/xymon
list=/etc/xymon/ghostlist.cfg
tmp=/tmp/ghostlist.$$
trap 'rm $tmp' 0
cp $list $tmp
(
$xymon localhost ghostlist | awk -F\| '{print $2 " " $1 " # noconn"}'
cat $tmp
) | sort -u > $list
| true |
efcd70f067b3fd5635495e2703ce762c120d0b9f | Shell | acid23m/vds_docker | /app_setup.sh | UTF-8 | 2,358 | 3.5625 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
# --------------------------------------------------------------------
# THIS SCRIPT AUTOMATICALLY CONFIGURES DOCKER ORIENTED WEB APPLICATION
# --------------------------------------------------------------------
set -ae
. ./.env
set +a
if [[ $(id -u) != 0 ]]; then
echo "Run script with superuser privileges!"
exit 1
fi
if [[ -z $1 ]] || [[ -z $2 ]] || [[ -z $3 ]] || [[ -z $4 ]]; then
echo "Usage: ./app_setup.sh [GIT ADDRESS] [PROJECT DIR NAME] [SITE_DOMAIN] [SITE_PORT]"
echo "ex.: ./app_setup.sh git@bitbucket.org:acid23m/base-docker-app.git base base-app.com 8000"
exit 2
fi
GIT_ADDRESS=$1
PROJECT_DIR_NAME=$2
SITE_DOMAIN=$3
SITE_PORT=$4
# nginx config
echo -e "\n *** Setup NGINX ***"
echo -e "-------------------------------------------\n"
service nginx stop
if [[ "$VDS_IS_REMOTE" = "y" ]]; then
CERT_PATH="/etc/certs/${SITE_DOMAIN}/cert.crt"
CERT_KEY_PATH="/etc/certs/${SITE_DOMAIN}/cert.key"
mkdir -pv "/etc/certs/${SITE_DOMAIN}"
# get letsencrypt certificate
/root/.acme.sh/acme.sh --issue -d "${SITE_DOMAIN}" --standalone -k 4096 --force
/root/.acme.sh/acme.sh --install-cert -d "${SITE_DOMAIN}" --key-file "${CERT_KEY_PATH}" --fullchain-file "${CERT_PATH}"
else
CERT_PATH="/etc/certs/self-signed/cert.crt"
CERT_KEY_PATH="/etc/certs/self-signed/cert.key"
fi
SITE_NGINX_CONF="${PROJECT_DIR_NAME}_${SITE_PORT}.conf"
sed -e "s|SITE_DOMAIN|${SITE_DOMAIN}|g; s|PORT|${SITE_PORT}|g; s|CERT_PATH|${CERT_PATH}|g; s|CERT_KEY_PATH|${CERT_KEY_PATH}|g" "$PWD/nginx/site.conf" > "/etc/nginx/conf.d/${SITE_NGINX_CONF}"
chmod 644 "/etc/nginx/conf.d/${SITE_NGINX_CONF}"
service nginx start
# get project
echo -e "\n *** Get project ***"
echo -e "-------------------------------------------\n"
git clone "${GIT_ADDRESS}" "/var/www/${PROJECT_DIR_NAME}"
chown -R "${VDS_USER}:www-data" "/var/www/${PROJECT_DIR_NAME}"
# install project
echo -e "\n *** Install project ***"
echo -e "-------------------------------------------\n"
INIT_DIR=$PWD
cd "/var/www/${PROJECT_DIR_NAME}"
cp -av .env.example .env
nano .env
/bin/bash "$PWD/start.sh"
/bin/bash "$PWD/install.sh"
cd ${INIT_DIR}
# result
echo -e "\n *** All Done! ***"
echo "-------------------------------------------"
echo "Visit web site at: https://${SITE_DOMAIN}"
echo "Docker manager: https://${PORTAINER_DOMAIN}"
exit 0
| true |
f2fe2c69bf949b987b70d6e27083d1bc889b4acf | Shell | georgmartius/lpzrobots | /matrixviz/configure | UTF-8 | 179 | 2.765625 | 3 | [] | no_license | #!/bin/sh
SPEC=
if [ `uname -s` = "Darwin" ]; then
SPEC=-spec macx-g++
fi
QMAKE=qmake-qt4
if !(which $QMAKE); then
QMAKE=qmake;
fi
$QMAKE -makefile $SPEC matrixviz.pro
| true |
99e7ff80a59ab36a7d8e5af4a4befeeef1d696ce | Shell | qbaxl/qbaxl.github.io | /.test.bats | UTF-8 | 175 | 2.796875 | 3 | [] | no_license | setup() {
source /var/batslib/batslib.bash
}
add_remote() {
grep "remote.*origin" */.git/config
}
@test "Check for the remote" {
run add_remote
[ "$status" -eq 0 ]
} | true |
e8ca0f8218c60cf3151725a9423653b5b6f9117e | Shell | MomsFriendlyDevCo/Init | /500-objectivefs | UTF-8 | 1,017 | 3.890625 | 4 | [
"MIT"
] | permissive | #!/bin/bash
# Install ObjectiveFS, setup a mount point and install into fstab
source common
INIT status "Install ObjectiveFS from Deb"
INIT apt-install-url 'https://objectivefs.com/user/download/axn2xgkwt/objectivefs_6.8_amd64.deb'
INIT status "Setup ObjectiveFS"
sudo mount.objectivefs config
grep -s objectivefs /etc/fstab
if [ "$?" == 0 ]; then
INIT skip "OFS already configured in /etc/fstab"
INIT status "Unmounting OFS in fstab"
sudo umount -a -t objectivefs
else
INIT status "Create OFS filesystem"
read -p "Enter S3 mountpoint to create (only alpha + dashes): " FS
sudo mount.objectivefs create "$FS"
INIT status "Setup OFS-on-boot"
read -p "Re-enter OFS passphase to install OFS into fstab: " PASS
read -p "Enter mount path: " MOUNTPATH
echo "$PASS" | sudo tee /etc/objectivefs.env/OBJECTIVEFS_PASSPHRASE >/dev/null
echo "s3://$FS $MOUNTPATH objectivefs auto,_netdev,mt,noatime,nodiratime 0 0" | sudo tee -a /etc/fstab >/dev/null
fi
INIT status "Mounting OFS"
sudo mount -a -t objectivefs
| true |
ae9a58b5f53e7c34f9bdb0d4cea07067d1c77bd8 | Shell | OULibraries/ansible-role-dspace | /files/ds_reconfigure.sh | UTF-8 | 325 | 2.921875 | 3 | [
"MIT"
] | permissive | ## Configuration
. /opt/oulib/dspace/etc/conf.sh
cd ${DSPACE_SRC}/dspace/target/dspace-installer
if [ -d ${DSPACE_RUN} ];then
$ANT -Doverwrite=true update clean_backups
else
echo "CAN'T RECONFIGURE, no dspace install dir"
fi
OUT=$?
if [ $OUT -eq 0 ];then
echo "ANT Update successful"
else
exit $OUT
fi
| true |
e5ef99acc8923e4028224ff35cdf232b9b5c11ad | Shell | tunzao/assembly-demo | /src/main/bin/run.sh | UTF-8 | 515 | 3.296875 | 3 | [] | no_license | #!/usr/bin/env bash
PRG="$0"
while [ -h "$PRG" ] ; do
ls=`ls -ld "$PRG"`
link=`expr "$ls" : '.*-> \(.*\)$'`
if expr "$link" : '/.*' > /dev/null; then
PRG="$link"
else
PRG=`dirname "$PRG"`/"$link"
fi
done
base_dir=`dirname "$PRG"`/..
base_dir=`cd "$base_dir" && pwd`
bin_dir=${base_dir}/bin
lib_dir=${base_dir}/lib
conf_dir=${base_dir}/conf
CLASS_PATH=${conf_dir}:$(ls ${lib_dir}/*.jar | tr '\n' :)
mainclass="me.tunzao.assembly.demo.Hello"
cd ${base_dir}
java -cp ${CLASS_PATH} ${mainclass}
| true |
ed9234b5667d6d96e70c5c4b1cf6c77be23aa3a1 | Shell | skydrome/random | /shell/aurdiff.sh | UTF-8 | 316 | 3.390625 | 3 | [] | no_license | #!/usr/bin/env bash
# diff an AUR pkgbuild based on your local copy
[[ ! -f PKGBUILD ]] && {
echo "error: No PKGBUILD found in working directory."
exit 1
}
eval $(grep '^pkgname=' PKGBUILD)
colordiff ${@:--Naur} \
<(curl -sk "https://aur.archlinux.org/packages/${pkgname:0:2}/$pkgname/PKGBUILD") PKGBUILD
| true |
952972bae1b697d909ea8ac20ec5836ef9168566 | Shell | kulkultech/ngecilin | /publish.sh | UTF-8 | 1,085 | 3.140625 | 3 | [] | no_license | #!/usr/bin/env bash
function check_var(){
if [ -z "$1" ]; then
echo "$2" is not set
exit 1
fi
}
check_var "$CHROME_CLIENT_ID" CHROME_CLIENT_ID
check_var "$CHROME_CLIENT_SECRET" CHROME_CLIENT_SECRET
check_var "$CHROME_REFRESH_TOKEN" CHROME_REFRESH_TOKEN
check_var "$CHROME_APP_ID" CHROME_APP_ID
echo "===Getting access token"
CHROME_ACCESS_TOKEN=$(curl "https://accounts.google.com/o/oauth2/token" -d "client_id=${CHROME_CLIENT_ID}&client_secret=${CHROME_CLIENT_SECRET}&refresh_token=${CHROME_REFRESH_TOKEN}&grant_type=refresh_token&redirect_uri=urn:ietf:wg:oauth:2.0:oob" | jq -r .access_token)
echo "===Access token granted"
echo "===Uploading artifacts"
curl -H "Authorization: Bearer ${CHROME_ACCESS_TOKEN}" -H "x-goog-api-version: 2" -X PUT -T public.zip -v "https://www.googleapis.com/upload/chromewebstore/v1.1/items/${CHROME_APP_ID}"
echo "===Publishing artifacts"
curl -H "Authorization: Bearer ${CHROME_ACCESS_TOKEN}" -H "x-goog-api-version: 2" -H "Content-Length: 0" -X POST -v "https://www.googleapis.com/chromewebstore/v1.1/items/${CHROME_APP_ID}/publish"
| true |
b4ab216c9759ebd29e82f27f855250b9a7c9801d | Shell | zd200572/bioinformatics-pipeline | /WES_vs_WGS-master/WES_pipeline_analysis.sh | UTF-8 | 4,919 | 3.203125 | 3 | [] | no_license | #!/bin/bash
# Aziz Belkadi July 2014
# Paired ends WES sequencing pipeline
# This script generates a final alignment bam file
# Maximum Exact Matching (MEM) alignment algorithm
# Needs BWA, GATK, Picard Tools in the same repertory (/resources)
# The two fastq files should have the same name with R1.fastq extention for the first end read file (Ex: Ind1_R1.fastq.gz) and R2.fastq for the second end read file (Ex: Ind1_R2.fastq.gz)
# The two fastq files should be in the same repertory (/WES/fastq)
# Take one parameter : Fastq files name without exetention
!/bin/bash
if [ $# -ne 1 ]
then
echo "Usage: <SampleID>"
exit 65
fi
############################
master_time_start=`date +%s`
############################
mkdir -p /WES/bam/$1-b37/logs
mkdir /WES/bam/$1-b37/tmpdir
######################################
############ Declaration #############
######################################
data_dir=/WES/fastq
out_dir=/WES/bam/$1-b37
reference=/resources/human_g1k_v37.fasta
sureselect_intervals=/resources/sureselect/SureSelect-v4plusUTR_baits.interval_list
bwa_dir=/resources/bwa-0.7.8
picard_dir=/resources/apps/picard-tools-1.113
gatk=/resources/apps/gatk/GenomeAnalysisTKLite-3.1-1/GenomeAnalysisTKLite.jar
kg_mills=/resources/Mills_and_1000G_gold_standard.indels.b37.sites.vcf.gz
kg_indels=/resources/1000G_phase1.indels.b37.vcf.gz
dbsnp=/resources/dbnsp/dbSNP135-All.vcf
########################################################
############ Alignment using GATK pipeline #############
########################################################
#BWA ALIGNMENT
$bwa_dir/bwa mem \
-t 8 \
-M \
$reference \
$data_dir/$1*R1*fastq*gz \
$data_dir/$1*R2*fastq*gz \
> $out_dir/$1.aln.sampe.sam
## SORT BAM AND ADD INFO
java -Djava.io.tmpdir=$out_dir/tmpdir \
-Xmx20g \
-jar $picard_dir/AddOrReplaceReadGroups.jar \
I=$out_dir/$1.aln.sampe.sam \
O=$out_dir/$1.aln.sampe.sorted.bam \
SORT_ORDER=coordinate \
CREATE_INDEX=true \
RGID=$1 \
RGLB="pe" \
RGPU="HiSeq-2000" \
RGSM=$1 \
RGCN="Human Genetics of Infectious Disease" \
RGDS=$intervals--GRCh37 \
RGPL=illumina \
VALIDATION_STRINGENCY=SILENT \
&>> $out_dir/logs/$1.log
## CREATE INTERVALS FOR LOCAL REALIGNMENT
java -Djava.io.tmpdir=$out_dir/tmpdir \
-Xmx20g \
-jar $gatk \
-R $reference \
-L $intervals \
--interval_padding 50 \
-T RealignerTargetCreator \
-rf BadCigar \
-known $kg_mills \
-known $kg_indels \
-nt 1 \
-I $out_dir/$1.aln.sampe.sorted.bam \
-o $out_dir/$1.aln.sampe.sorted.bam.forRealigner.intervals \
--allow_potentially_misencoded_quality_scores \
&>> $out_dir/logs/$1.log
## PERFORM LOCAL REALIGNMENT
java -Djava.io.tmpdir=$out_dir/tmpdir \
-Xmx20g \
-jar $gatk \
-R $reference \
-T IndelRealigner \
-rf BadCigar \
-known $kg_mills \
-known $kg_indels \
--consensusDeterminationModel USE_READS \
-compress 0 \
-targetIntervals $out_dir/$1.aln.sampe.sorted.bam.forRealigner.intervals \
-I $out_dir/$1.aln.sampe.sorted.bam \
-o $out_dir/$1.aln.sampe.sorted.realigned.bam \
--allow_potentially_misencoded_quality_scores \
&>> $out_dir/logs/$1.log
## MARK DUPLICATES
java -Djava.io.tmpdir=$out_dir/tmpdir \
-Xmx20g \
-jar $picard_dir/MarkDuplicates.jar \
REMOVE_DUPLICATES=false \
M=$out_dir/metrics/$1.duplicate.metrics \
I=$out_dir/$1.aln.sampe.sorted.realigned.bam \
O=$out_dir/$1.aln.sampe.sorted.realigned.dedup.bam \
VALIDATION_STRINGENCY=SILENT \
CREATE_INDEX=true \
&>> $out_dir/logs/$1.log
## BASE RECALIBRATOR
java -Djava.io.tmpdir=$out_dir/tmpdir \
-Xmx20g \
-jar $gatk \
-L $intervals \
--interval_padding 50 \
-R $reference \
--disable_indel_quals \
-T BaseRecalibrator \
-rf BadCigar \
-knownSites $dbsnp \
-knownSites $kg_mills \
-knownSites $kg_indels \
-I $out_dir/$1.aln.sampe.sorted.realigned.dedup.bam \
-o $out_dir/$1.recal_data.grp \
&>> $out_dir/logs/$1.log
## APPLY RECALIBRATION (PRINT READS)
java -Djava.io.tmpdir=$out_dir/tmpdir \
-Xmx10g \
-jar $gatk \
-R $reference \
-T PrintReads \
-rf BadCigar \
-BQSR $out_dir/$1.recal_data.grp \
-I $out_dir/$1.aln.sampe.sorted.realigned.dedup.bam \
-o $out_dir/$1.aln.sampe.sorted.realigned.dedup.recal.bam \
--allow_potentially_misencoded_quality_scores \
&>> $out_dir/logs/$1.log
#####################################
###### Remove temporary files #######
#####################################
rm $out_dir/$1.recal_data.grp
rm $out_dir/$1.aln.sampe.sorted.realigned.dedup.bai
rm $out_dir/$1.aln.sampe.sorted.realigned.dedup.bam
rm $out_dir/$1.aln.sampe.sorted.realigned.bai
rm $out_dir/$1.aln.sampe.sorted.realigned.bam
rm $out_dir/$1.aln.sampe.sorted.bam.forRealigner.intervals
rm $out_dir/$1.aln.sampe.sorted.bai
rm $out_dir/$1.aln.sampe.sorted.bam
rm $out_dir/$1.aln.sampe.sam
rm -r $out_dir/tmpdir
############################
master_time_end=`date +%s`
(master_time_exec=`expr $(( $master_time_end - $master_time_start ))`; echo "$1 analysis completed in $master_time_exec seconds") >> $out_dir/logs/$1.log
############################
| true |
cc3b51fd196d4e698fbf23ec47c14f72a5af4791 | Shell | pacoCroket/p5js-sketches | /finding-people/autoLoad.sh | UTF-8 | 639 | 2.6875 | 3 | [] | no_license | #!/bin/bash
# fetch changes from repo
git pull
# Run this script in display 0 - the monitor
export DISPLAY=:0
# Hide the mouse from the display
unclutter &
# If Chrome crashes (usually due to rebooting), clear the crash flag so we don't have the annoying warning bar
sed -i 's/"exited_cleanly":false/"exited_cleanly":true/' /home/pi/.config/chromium/Default/Preferences
sed -i 's/"exit_type":"Crashed"/"exit_type":"Normal"/' /home/pi/.config/chromium/Default/Preferences
# Run Chromium and open tabs
/usr/bin/chromium-browser --noerrdialogs --kiosk --start-fullscreen https://pacocroket.github.io/p5js-sketches/finding-people/ &
| true |
72eed99ad022575572e4de4819db1c724e2429fc | Shell | supercoeus/KSYLive_Android | /KSYLiveDemo/build_streamer.sh | UTF-8 | 1,847 | 3.5625 | 4 | [] | no_license | #! /bin/sh
set -e
while [ $# -gt 0 ]; do
case $1 in
help|--help|-h)
echo "With none params means build ndk"
echo "exp: $0"
echo "Use -release to gen streamer out project"
echo "-v to set the version"
echo "exp: $0 -release -v v4.0.0"
exit 0
;;
release|-release)
RELEASE=1
;;
v|-v)
CUR_BRANCH=$2
shift
;;
esac
shift
done
cd libksylive-arm64
ndk-build clean
ndk-build
cd ..
cd libksylive-armv7a
ndk-build clean
ndk-build
cd ..
cd libksylive-x86
ndk-build clean
ndk-build
cd ..
echo "ndk build success"
if [ -z "$RELEASE" ]; then
exit 0
fi
KSYSTREAMER_ANDROID_URL=https://github.com/ksvc/KSYStreamer_Android.git
OUT_DIR=KSYStreamer_Android_Release
RELEASE_LIBS=$OUT_DIR/libs
RELEASE_DOC=$OUT_DIR/docs
DEMO_DIR=$OUT_DIR/demo
echo "====================== check KSYStreamer_Android ========"
if [ ! -d "$OUT_DIR" ] ; then
echo "can't find $OUT_DIR, clone again"
git clone ${KSYSTREAMER_ANDROID_URL} $OUT_DIR
fi
cd $OUT_DIR
git fetch origin
git reset --hard
git checkout master
git pull origin master
if [ -n "$CUR_BRANCH" ]; then
git checkout -b $CUR_BRANCH
fi
cd -
echo "====================== copy to ========"
cp -p ksystreamerdemo/src/main/AndroidManifest.xml $DEMO_DIR/
rm -rf $DEMO_DIR/res
cp -p -r ksystreamerdemo/src/main/res/ $DEMO_DIR/res
rm -rf $DEMO_DIR/src/*
cp -p -r ksystreamerdemo/src/main/java/ $DEMO_DIR/src
rm -rf $RELEASE_LIBS
mkdir -p $RELEASE_LIBS
LIB_DIRS="java arm64 armv7a x86"
for LIB_DIR in $LIB_DIRS
do
cp -p -r libksylive-$LIB_DIR/libs/ $RELEASE_LIBS
done
cd $DEMO_DIR
rm -rf libs
ln -s ../libs libs
cd -
rm -rf $RELEASE_DOC
cp -p -r ../prebuilt/docs/streamer/docs/ $RELEASE_DOC
echo "====================== copy done ========"
| true |
3afdab449d48389c719da9e6fbe5c9396a9dcd05 | Shell | feiyunwill/qnode | /bootstrap.sh | UTF-8 | 487 | 3.484375 | 3 | [] | no_license | #!/bin/sh
path=`pwd`
pkg_dir=$path/pkg/
install_dir=$path/lib/install
liblua_path=$pkg_dir/lua-5.1.4.tar.gz
if [ ! -f $liblua_path ]; then
echo "lua not exits!"
exit 1
fi
tar xvf $liblua_path -C $pkg_dir
mv $pkg_dir/lua-5.1.4 $pkg_dir/lua
echo "install lua"
cd $pkg_dir/lua
make linux
mkdir -p $install_dir/lua/lib/
mkdir -p $install_dir/lua/include/
cp $pkg_dir/lua/src/liblua.a $install_dir/lua/lib/
cp $pkg_dir/lua/src/*.h $install_dir/lua/include/
echo "install lua done"
| true |
f5ae158a2d91fb697efccb4ff9fd90251f856001 | Shell | moujahedk/shell | /exerc4 | UTF-8 | 520 | 3.125 | 3 | [] | no_license | #!/bin/bash
COST_PINEAPPLE=50
COST_BANANA=4
COST_WATERMELON=23
COST_BASKET=1
TOTAL=$(($COST_PINEAPPLE +2*$COST_BANANA + 3$COST_WATERMELON + $COST_BASKET))
echo "Total Cost is $Total"
# Code to extract the first name from the data record
Datarecord="last=Clifford,first=Johnny Boy,state=CA"
COMMA1=`expr index "DATARECORD" ','` # 14 position of first COMMA
CHOP1FIELD=${DATARECORD:$COMMA1} #
COMMA2=`expr index "$CHOP1FIELD"','`
LENGTH=`expr $COMMA2 - 6 - 1`
FIRSTNAME=${CHOP1FIELD:6:$LENGTH} # Johnny Boy
echo $FIRSTNAME | true |
8f226721e150c60647ccd279b3524df36088b35f | Shell | s-morgan-jeffries/ansible_webapp_manager | /roles/network_analysis/files/bin/pingstats | UTF-8 | 340 | 3.453125 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
pingstats() {
stats="$(ping "$@" | tail -n 2)"
packet_loss="$(echo "$stats" | head -n 1 | sed -E 's/^.+([[:digit:]]+)% packet loss.+$/\1/')"
avg="$(echo "$stats" | tail -n 1 | cut -d '=' -f 2 | cut -d '/' -f 2)"
echo "% packet loss : average ping time (ms)"
echo "$packet_loss:$avg"
}
pingstats "$@" | true |
f0c4363c118214191bb2b7ec8c973efcb2727190 | Shell | jchirayath/packer | /Ubuntu/scripts/AdJoin.sh | UTF-8 | 1,226 | 2.609375 | 3 | [] | no_license | # Join the Domain (MANUALLY)
#echo awsConnector123! | adcli join -U awsadconnector@ATLANTA.TESTING-LABS.NET ATLANTA.TESTING-LABS.NET
# Fix the krb5.conf file
sed -i 's#EXAMPLE.COM#ATLANTA.TESTING-LABS.NET#g' /etc/krb5.conf
sed -i 's#kerberos.example.com#adminatlad02.atlanta.testing-labs.net#g' /etc/krb5.conf
sed -i 's#example.com#atlanta.testing-labs.net#g' /etc/krb5.conf
# Add ssss to /etc/nsswitch.conf
authconfig --enablesssd --enablesssdauth --enablemkhomedir --update
# Create sssd.conf file
cat << EOF >> /etc/sssd/sssd.conf
[sssd]
services = nss, pam, ssh, autofs
config_file_version = 2
domains = atlanta.testing-labs.net
#debug_level = 9
[domain/ATLANTA.TESTING-LABS.NET]
id_provider = ad
#auth_provider = ad
#chpass_provider = ad
#access_provider = ad
ad_server = adminatlad02.atlanta.testing-labs.net
override_homedir = /home/%u
default_shell = /bin/bash
#ad_gpo_access_control = enforcing
#debug_level = 9
[nss]
#debug_level = 9
[pam]
#debug_level = 9
EOF
# Change permissions for sssd.conf file
chown root:root /etc/sssd/sssd.conf
chmod 600 /etc/sssd/sssd.conf
# Restart sssd daemon
service sssd start
chkconfig sssd on
# Add AD Group to Sudoers File
echo "%AWS\ Admins ALL=(ALL)ALL" >> /etc/sudoers
| true |
d2baf0c525350eeee35caae89b6b0ab5a42760e6 | Shell | gvoskuilen/sst-sqe | /buildsys/deps/bin/sstDep_qsim_0.1.4.sh | UTF-8 | 4,108 | 3.265625 | 3 | [] | no_license | # !/bin/bash
# sstDep_qsim_0.1.4.sh
# Description:
# A bash script containing functions to process SST's Qsim dependency.
PARENT_DIR="$( cd -P "$( dirname "$0" )"/.. && pwd )"
. ${PARENT_DIR}/include/depsDefinitions.sh
# Environment variable unique to Qsim
export SST_BUILD_QSIM=1
# Environment variable uniquely identifying this script
export SST_BUILD_QSIM_0_1_4=1
#===============================================================================
# Qsim
#===============================================================================
#-------------------------------------------------------------------------------
# Function:
# sstDepsStage_qsim
# Purpose:
# Prepare Qsim code for patching.
# Inputs:
# None
# Outputs:
# Pass/fail
# Expected Results
# Staged Qsim code that is ready for patching
# Caveats:
# None
#-------------------------------------------------------------------------------
export SST_DEPS_SRC_STAGED_QSIM=${SST_DEPS_SRC_STAGING}/qsim-0.1.4
sstDepsStage_qsim ()
{
sstDepsAnnounce -h $FUNCNAME -m "Staging Qsim 0.1.4"
# fetch 0.1.4 from Qsim web page
# http://www.cdkersey.com/qsim-web/releases
pushd ${SST_DEPS_SRC_PRISTINE}
if [ -f qsim-0.1.4.tar.bz2 ]
then
# Use local copy
sstDepsAnnounce -h $FUNCNAME -m "INFO: Using local copy of qsim-0.1.4.tar.bz2."
else
# fetch using wget
wget http://www.cdkersey.com/qsim-web/releases/qsim-0.1.4.tar.bz2
retval=$?
if [ $retval -ne 0 ]
then
# bail out on error
echo "ERROR: sstDep_qsim_0.1.4.sh: Qsim 0.1.4 wget fetch failure"
sstDepsAnnounce -h $FUNCNAME -m "Is http_proxy configured properly in $HOME/.wgetrc?"
popd
return $retval
fi
fi
tar xfj qsim-0.1.4.tar.bz2 -C ${SST_DEPS_SRC_STAGING}
popd
}
#-------------------------------------------------------------------------------
# Function:
# sstDepsDeploy_qsim
# Purpose:
# Build and install SST Qsim dependency, after patching.
# Inputs:
# None
# Outputs:
# Pass/fail
# Expected Results
# Deployed Qsim dependency
# Caveats:
# Depends on Boost, and Boost depends on an MPI selection, so both need to
# to be built before successful build of Qsim.
#-------------------------------------------------------------------------------
sstDepsDeploy_qsim ()
{
sstDepsAnnounce -h $FUNCNAME -m "Deploying Qsim 0.1.4"
pushd ${SST_DEPS_SRC_STAGED_QSIM}
# qsim requries QEMU. NOTE: getqemu.sh expects wget to be installed
# and properly configured for any proxies
# UPDATE (2012-08-09): getqemu script has bad URL
# OLD URL: http://download.savannah.gnu.org/releases/qemu/qemu-0.12.3.tar.gz
# NEW URL: http://savannah.spinellicreations.com/qemu/qemu-0.12.3.tar.gz
# Fix getqemu.sh to use updated URL
sed -i.bak 's/download.savannah.gnu.org\/releases/savannah.spinellicreations.com/' ./getqemu.sh
./getqemu.sh
pushd qemu-0.12.3
make
popd
# Configure qsim client installation root
export QSIM_PREFIX=${SST_DEPS_INSTALL_DEPS}
# Build and install qsim client
make install
retval=$?
if [ $retval -ne 0 ]
then
# bail out on error
echo "ERROR: sstDep_qsim_0.1.4.sh: Qsim make install failure"
popd
return $retval
fi
popd
}
# Installation location of Qsim (installation root)
export SST_DEPS_INSTALL_QSIM=${SST_DEPS_INSTALL_DEPS}
#-------------------------------------------------------------------------------
# Function:
# sstDepsQuery_qsim
# Purpose:
# Query SST Qsim dependency. Export information about this installation.
# Inputs:
# None
# Outputs:
# Pass/fail
# Expected Results
# Exported Qsim dependency information
# Caveats:
# None
#-------------------------------------------------------------------------------
sstDepsQuery_qsim ()
{
# provide version and installation location info
echo "export SST_DEPS_VERSION_QSIM=\"0.1.4\""
echo "export SST_DEPS_INSTALL_QSIM=\"${SST_DEPS_INSTALL_QSIM}\""
}
| true |
bb1d52f7dad296e6fe1d610d79713a6e3f1e0eea | Shell | kahenteikou/Alter-WSL-FS | /env.sh | UTF-8 | 687 | 2.9375 | 3 | [
"MIT"
] | permissive | #!/bin/bash
ARCH="x86_64"
ROOTFS_VER="2020.06.01"
ROOTFS_FN="archlinux-bootstrap-${ROOTFS_VER}-${ARCH}.tar.gz"
ROOTFS_URL="http://mirrors.kernel.org/archlinux/iso/${ROOTFS_VER}/${ROOTFS_FN}"
OSRELEASE_URL="https://raw.githubusercontent.com/FascodeNet/alterlinux/dev-stable/channels/share/airootfs.any/usr/lib/os-release"
PAC_PKGS="base automake autoconf gcc make pkgconf which m4 groff grep bison findutils less nano sudo vim curl iputils alterlinux-keyring alterlinux-calamares aptpac libpwquality yay-bin fakeroot"
if type curl >/dev/null 2>&1 ;then
DLR="curl"
fi
if type wget >/dev/null 2>&1;then
DLR="wget"
fi
if type aria2c >/dev/null 2>&1; then
DLR="aria2c -x4"
fi
| true |
764794b342197575c6952f0bf97e8e14e56df236 | Shell | snehilk1312/Basic_Sysadmin_and_Pentesting | /Shell_Programming/24_hereDocument.sh | UTF-8 | 288 | 2.71875 | 3 | [] | no_license | #! /bin/bash
wc -l << EOF
only checking whether
the program is working
or not.If it's working
it will print 5, as first
output.
EOF
# here document can be used to pring multiples lines as follows
cat << EOF
i am checking whether this
here document works as intended
or not.
EOF
| true |
29965a3a48e4a6c6af77e4dd1e4315a9d6d651e1 | Shell | MoneroOcean/xmrig_setup | /uninstall_moneroocean_miner.sh | UTF-8 | 879 | 3.578125 | 4 | [] | no_license | #!/bin/bash
VERSION=1.0
# printing greetings
echo "MoneroOcean mining uninstall script v$VERSION."
echo "(please report issues to support@moneroocean.stream email with full output of this script with extra \"-x\" \"bash\" option)"
echo
if [ -z $HOME ]; then
echo "ERROR: Please define HOME environment variable to your home directory"
exit 1
fi
if [ ! -d $HOME ]; then
echo "ERROR: Please make sure HOME directory $HOME exists"
exit 1
fi
echo "[*] Removing moneroocean miner"
if sudo -n true 2>/dev/null; then
sudo systemctl stop moneroocean_miner.service
sudo systemctl disable moneroocean_miner.service
rm -f /etc/systemd/system/moneroocean_miner.service
sudo systemctl daemon-reload
sudo systemctl reset-failed
fi
sed -i '/moneroocean/d' $HOME/.profile
killall -9 xmrig
echo "[*] Removing $HOME/moneroocean directory"
rm -rf $HOME/moneroocean
echo "[*] Uninstall complete"
| true |
3c45e58f3aba46e5460290e057e880e961519b5c | Shell | miniscruff/dotfiles | /settings/.local/bin/projects.sh | UTF-8 | 585 | 3.453125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
# to use add to i3 bindings - current binding is Mod+q
# can alter project path or init file below
PROJECT_PATH=$HOME/projects
REPOS=()
OWNERS=($(ls "${PROJECT_PATH}"))
for OWNER in "${OWNERS[@]}"
do
NEW_REPOS=($(ls $PROJECT_PATH/${OWNER}))
for REPO in "${NEW_REPOS[@]}"
do
REPOS+=("$OWNER/$REPO")
done
done
ROFI_REPOS=$(printf "%s," "${REPOS[@]}")
SELECT=$(echo -e $ROFI_REPOS | rofi -dmenu -p "Repo" -sep ",")
if [ -z "$SELECT" ]; then
exit 0
fi
echo $SELECT
i3 exec "$HOME/.local/bin/alacritty --working-directory $PROJECT_PATH/$SELECT"
| true |
3007ad8cf90b18e0d072b74913457a4ae714ec03 | Shell | Vinny1017/Student-Data---HR-System | /delete_data.sh | UTF-8 | 872 | 3.8125 | 4 | [] | no_license | #!/bin/bash
myfile="./student_data"
clear
echo
echo " Delete menu"
echo " ==================================="
echo
read -p " Enter pattern to delete: " pattern
# grep the patter in the file
result=$(grep $pattern $myfile)
# if result (the previous ) is empty, no records found
if [ -z "$result" ];then
echo " No records found"
exit
else
# print the resulst of the grep, and order with columns
echo " $result" | column -t -s":"
echo
#print ask the user if want to delete
read -p " Want to delete these records? (y)es or (n)o: " choice
case $choice in
# if yes, use sed to remove the line that star twith the pattern
["y"]) sed -i "/^$pattern/ d" $myfile
;;
["n"]) exit
;;
*) echo " Error input"
esac
fi
echo
read -p " Press ENTER to continue..."
exit
| true |
d3a5b50bca8f615efd55cfdeddb3e71822966f33 | Shell | nevesnunes/env | /tasks/linux-ctf.sh | UTF-8 | 1,402 | 2.96875 | 3 | [
"MIT"
] | permissive | #!/bin/sh
set -eux
. ./lib.sh
debloat
sync_debian_packages ./debian-ctf.txt
sync_debian_packages ./debian-essentials.txt
sync_debian_packages ./debian-graphical.txt
sync_python_packages ./python3-site-packages-ctf.txt
sync_git ./git-ctf.txt
sync_git ./git-essentials.txt
sudo locale-gen en_US.UTF-8
sudo update-locale
sudo sysctl -w kernel.sysrq=1
sudo sed -i '
s/^#\?\(GRUB_TERMINAL\)=.*/\1=console/g;
s/^#\?\(GRUB_GFXMODE\)=.*/\1=text/g
' /etc/default/grub
sudo grub-mkconfig -o /boot/grub/grub.cfg
touch "/home/$USER/50-autologin.conf"
cat > "/home/$USER/50-autologin.conf" << EOF
[SeatDefaults]
autologin-user=$USER
autologin-user-timeout=0
EOF
sudo mv "/home/$USER/50-autologin.conf" /usr/share/lightdm/lightdm.conf.d/.
( cd ~/opt/fzf/ && yes | ./install; )
( cd ~/opt/pwndbg/ && sudo ./setup.sh; )
# Populate z
find /home/"$USER" -maxdepth 3 -type d | \
grep -E -v '/(\.)|_[a-zA-Z0-9]' | \
grep -E -v '/opt/' | \
sort | uniq | xargs -d'\n' -I{} -n1 -r echo "{}|1|1" \
> /home/"$USER"/.z
mkdir -p ~/.local/share/fonts
( cd ~/.local/share/fonts && wget 'https://github.com/andreberg/Meslo-Font/raw/master/dist/v1.2.1/Meslo%20LG%20DZ%20v1.2.1.zip' && atool -x 'Meslo LG DZ v1.2.1.zip' && rm -f 'Meslo LG DZ v1.2.1.zip' )
sed -i 's%\(^ExecStart\)=.*%\1='$(command -v tint2)'%g' ~/.config/systemd/user/tint2.service
systemctl --user daemon-reload
systemctl --user enable tint2
| true |
ad6e5fc0ea98e6f3f43d445ad22115d24c95408a | Shell | lpimem/hlcsrv | /scripts/stop.sh | UTF-8 | 178 | 3.390625 | 3 | [] | no_license | #! /bin/bash
DIR=$(dirname `readlink -f "$0"`)
if [[ -f "$DIR/_run.pid" ]] ; then
pid=`cat $DIR/_run.pid`
kill $pid
rm $DIR/_run.pid
else
echo "Not running."
fi
| true |
0143fdaf7a8b090c645c35d2dce669ac26d90ee0 | Shell | SharathGowdru/Shell-Programming1 | /dictionary/dieroll.sh | UTF-8 | 1,498 | 3.640625 | 4 | [] | no_license | #!/bin/bash
declare -A die_rolls
key=0
max_1=0
max_2=0
max_3=0
max_4=0
max_5=0
max_6=0
while [ true ]
do
roll_die=$((1+RANDOM%6))
die_rolls[$key]=$roll_die
((key++))
case "${die_rolls[$(($key-1))]}" in
# when it rolls 1
((max_1++))
sum1=$(($sum1+$roll_die))
if [ $sum1 -eq $(($roll_die*10)) ]
then
max_reached=1
break;
fi
;;
((max_2++))
sum2=$(($sum2+$roll_die))
if [ $sum2 -eq $(($roll_die*10)) ]
then
max_reached=2
break;
fi
;;
((max_3++))
sum3=$(($sum3+$roll_die))
if [ $sum3 -eq $(($roll_die*10)) ]
then
max_reached=3
break;
fi
;;
((max_4++))
sum4=$(($sum4+$roll_die))
if [ $sum4 -eq $(($roll_die*10)) ]
then
max_reached=4
break;
fi
;;
((max_5++))
sum5=$(($sum5+$roll_die))
if [ $sum5 -eq $(($roll_die*10))
then
max_reached=5
break;
fi
;;
*)
((max_6++))
sum6=$(($sum6+$roll_die))
if [ $sum6 -eq $(($roll_die*10)) ]
then
max_reached=6
break;
fi
;;
esac
done
declare -A die_roll_track
for ((i=1;i<=6;i++))
die_roll_track[$i]=$((max_$i))
done
echo ${die_roll_track[@]}
echo "$max_reached rolled maximun of 10 times"
let least_rolled_times=${die_roll_track[1]}
for ((i=2;i<=6;i++)) # To find least rolled number
do
if [ $least_rolled_times -gt ${die_roll_track[$i]} ]
then
least_rolled_times=${die_roll_track[$i]}
fi
done
for ((i=1;i<=6;i++)) # To find multiple least rolled numbers if any.
do
if [ $least_rolled_times -eq ${die_roll_track[$i]} ]
then
echo "$i rolled minimum of $least_rolled_times times"
fi
done
| true |
ec25e72406c9dab462d89788ce58fa8c384fa7f8 | Shell | o1-labs/snarky | /scripts/depends.sh | UTF-8 | 2,190 | 4.09375 | 4 | [
"MIT"
] | permissive | #!/bin/bash
set -e # Exit on error
set -u # Fail if an undefined variable is used
OS_NAME=$(uname)
if [ "$OS_NAME" = 'Linux' ]; then
# Find the Linux distro
if [ -n "$(command -v lsb_release)" ]; then
DISTRO=$(lsb_release --short --id)
elif [ -f "/etc/os-release" ]; then
DISTRO=$(grep NAME /etc/os-release | sed 's/NAME=//g' | tr -d '="')
elif [ -f "/etc/debian_version" ]; then
DISTRO="Debian"
elif [ -f "/etc/redhat-release" ]; then
DISTRO="Fedora"
else
DISTRO="Unknown"
fi
if [ "$DISTRO" = 'Ubuntu' ]; then
# Install common dependencies
sudo apt-get install build-essential cmake git libgmp3-dev libboost-all-dev libssl-dev
# Install the right version of libprocps-dev for the release
RELEASE=$(lsb_release -s --release)
if [ "$RELEASE" = '18.04' ]; then
sudo apt-get install libprocps-dev
elif [ "$RELEASE" = '16.04' ]; then
sudo apt-get install libprocps4-dev
elif [ "$RELEASE" = '14.04' ]; then
sudo apt-get install libprocps3-dev
else
# Try all of the different packages for libprocps-dev, in order of package recency
sudo apt-get install libprocps-dev || sudo apt-get install libprocps4-dev || sudo apt-get install libprocps3-dev
fi
elif [ "$DISTRO" = 'Fedora' ]; then
# Install common dependencies
sudo yum install gcc-c++ cmake make git gmp-devel procps-ng-devel
elif [ "$DISTRO" = 'Debian' ]; then
# Install common dependencies (WARNING: Untested)
sudo apt-get install build-essential cmake git libgmp3-dev libboost-all-dev libssl-dev libprocps-dev
else
echo "Unrecognised Linux distribution: $DISTRO"
fi
elif [ "$OS_NAME" = 'Darwin' ]; then
PACKAGES="gpatch opam cmake gmp pkg-config openssl libffi libsodium boost zlib libomp"
# removing already installed packages from the list
for p in $(env HOMEBREW_NO_AUTO_UPDATE=1 brew list); do
PACKAGES=${PACKAGES//$p/}
done;
# only run if there's work to do
if [[ $PACKAGES = *[![:space:]]* ]]; then
yes | env HOMEBREW_NO_AUTO_UPDATE=1 brew install $PACKAGES
else
echo 'All brew packages have already been installed.'
fi
else
echo 'Unrecognised operating system: $OS_NAME'
fi
| true |
2653a86ed018379dcbc772a64a8eaf4544bd23d7 | Shell | gevgev/hh-viewership | /build-ec2.sh | UTF-8 | 771 | 2.9375 | 3 | [] | no_license | #!/bin/sh
set -x
mkdir build-ec2
cd build-ec2/
echo "Build cdwdatagetter"
GOOS=linux go build -v github.com/gevgev/cdwdatagetter
rc=$?; if [[ $rc != 0 ]]; then
echo "Build failed: cdwdatagetter"
cd ..
exit $rc;
fi
echo "Build aws-s3-uploader"
GOOS=linux go build -v github.com/gevgev/aws-s3-uploader
rc=$?; if [[ $rc != 0 ]]; then
echo "Build failed: aws-s3-uploader"
cd ..
exit $rc;
fi
echo "Build precondition"
GOOS=linux go build -v github.com/gevgev/precondition
rc=$?; if [[ $rc != 0 ]]; then
echo "Build failed: precondition"
cd ..
exit $rc;
fi
echo "Copying script and mso list"
cp ../run-ubuntu-raw-data.sh run.sh
cp ../mso-list-full.csv mso-list.csv
cp ../run-pipeline.sh loop.sh
echo "Archiving"
zip archive.zip *
echo 'Success'
cd .. | true |
c1f78f56a9fe22999b8a60a42d4a9169cdd19be4 | Shell | kmcdermo/Timing | /TimingAnalyzer/macros/scripts/skimAndMerge/skimAndMerge_SignalGridPoint.sh | UTF-8 | 519 | 2.8125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
source scripts/skimAndMerge/common_variables.sh
## input
label=${1}
model=${2}
gridpoint=${3}
tune=${4}
## other input vars
usePUWeights=1
## global vars
mcbase="${model}"
text="${mcbase}_${gridpoint}"
## directories needed
indir="${inbase}/${mcbase}_${gridpoint}_${tune}/${label}_${mcbase}_${gridpoint}_${tune}"
tmpdir="${tmpbase}/${mcbase}/${gridpoint}"
outdir="${outbase}/MC/${mcbase}/${gridpoint}"
./scripts/skimAndMerge/processSkimAndMerge.sh ${text} ${indir} ${tmpdir} ${outdir} ${usePUWeights}
| true |
b7a492acb9bb53f56f7f0613296e6d0c8bb551a9 | Shell | vcoatalem/wav_spectrum_visualisation | /create_samples_spectrograms.sh | UTF-8 | 744 | 3.359375 | 3 | [] | no_license | PROGRAM_NAME=$0
test ! -d env && echo "$PROGRAM_NAME: could not find env directory to source into" && exit 2
cd env
source bin/activate
cd ..
SPECTROGRAMS_DIR=spectrograms
SPECTROGRAMS_F_DIR="$SPECTROGRAMS_DIR/f"
SPECTROGRAMS_M_DIR="$SPECTROGRAMS_DIR/m"
SAMPLES_DIR=samples
F_DIR="$SAMPLES_DIR/f"
M_DIR="$SAMPLES_DIR/m"
rm -rf spectrograms
mkdir $SPECTROGRAMS_DIR 2>/dev/null
mkdir $SPECTROGRAMS_F_DIR
mkdir $SPECTROGRAMS_M_DIR
PLOTTING_PROGRAM=plotter/plot_csv.py
FILES=$(find $F_DIR -name "*.wav")
for FILE in $FILES; do
python3 $PLOTTING_PROGRAM $FILE
mv *.wav.png $SPECTROGRAMS_F_DIR
done
FILES=$(find $M_DIR -name "*.wav")
for FILE in $FILES; do
python3 $PLOTTING_PROGRAM $FILE
mv *.wav.png $SPECTROGRAMS_M_DIR
done
| true |
0af1d8aa94cab13b03620c2cc57e561b0d3fb54e | Shell | jonchampagne/dusty-9-bot | /1.0/run_docker.sh | UTF-8 | 886 | 3.609375 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
mkdir -p config
echo "{\"Production\":\"$API_KEY\"}" > config/bot_credentials.json
#if [[ -v $API_KEY ]]
#then
# if ! [ $API_KEY == "unset" ]
# then
# echo "Using provided API_KEY"
# echo "{Production:\"$API_KEY\"} > bot_credentials.json"
# else
# echo "Error: API_KEY unset. Cannot run."
# return 1
# fi
#fi
# Installs or updates dependancies, then starts the program
# Make sure we've got pipenv installed
if ! [ -x `which pipenv` ]
then
echo "Pipenv required! Please install pipenv."
return 1
fi
# Kill any other instance of this bot
if [ -f pid ]; then
kill `cat pid`
fi
# Install dependancies
pipenv install
echo
# Run the program
pipenv run python ./bot &
echo $! > pid
wait $!
# Remove the pid file if it's still our's
if [ -f pid ]; then
if [ `cat pid` == $! ]; then
rm pid
fi
fi
| true |
1d761b34212f8d66d624b37b9d1970849a1843a7 | Shell | mandelsoft/kubedyndns | /hack/generate-code | UTF-8 | 487 | 2.5625 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
#
# SPDX-FileCopyrightText: 2019 SAP SE or an SAP affiliate company and Gardener contributors
#
# SPDX-License-Identifier: Apache-2.0
set -x
rm -f $GOPATH/bin/*-gen
PROJECT_ROOT=$(dirname $0)/..
DIR=$(pwd)
BASE="$1"
SPEC="$2"
bash "${PROJECT_ROOT}"/vendor/k8s.io/code-generator/generate-internal-groups.sh \
deepcopy,client,informer,lister,defaulter \
$BASE/client \
$BASE/apis \
$BASE/apis \
"$SPEC" \
--go-header-file "$(dirname $0)/LICENSE_BOILERPLATE.txt"
| true |
589e0f4425542570bb29b936ad20d14f08808cdf | Shell | hessamhz/HostingService | /Scripts/Admin/change_username.sh | UTF-8 | 1,190 | 3.203125 | 3 | [] | no_license | #! /bin/bash
new=$1
old=$2
domain=$(grep -i "($old:" < /home/user_domain | cut -d ":" -f 2 | cut -d ")" -f 1)
email=$(grep -i "($old:" < /home/user_email | cut -d ":" -f 2 | cut -d ")" -f 1)
volume=$(grep -i "($old:" < /home/user_volume | cut -d ":" -f 2 | cut -d ")" -f 1)
passwd=$(grep -i "($old:" < /home/user_passwd | cut -d ":" -f 2 | cut -d ")" -f 1)
usermod -l $new -d /home/$new -m $old
grep -v "^($old:$domain)$" /home/user_domain > tmp
cat tmp > /home/user_domain
echo "($new:$domain)" >> /home/user_domain
grep -v "^$old$" /etc/vsftpd.userlist > tmp
cat tmp > /etc/vsftpd.userlist
echo "$new" >> /etc/vsftpd.userlist
grep -v "^($old:$email)$" /home/user_email > tmp
cat tmp > /home/user_email
echo "($new:$email)" >> /home/user_email
grep -v "^($old:$passwd)$" /home/user_passwd > tmp
cat tmp > /home/user_passwd
echo "($new:$passwd)" >> /home/user_passwd
grep -v "^($old:$volume)$" /home/user_volume > tmp
cat tmp > /home/user_volume
echo "($new:$volume)" >> /home/user_volume
userDir='/var/www/'
rootDir=$userDir$domain
chown $new:users $rootDir
chmod 775 $rootDir
chown $new:users /home/$new
chmod 775 /home/$new
systemctl restart httpd
systemctl restart vsftpd
| true |
73903d858c2f49a484f07e27939cbe1ee0403cef | Shell | bymathias/dotfiles | /bash/paths.bash | UTF-8 | 355 | 2.6875 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
export PATH="/usr/local/bin:/usr/local/sbin:/usr/bin:$PATH"
# (first) Add RVM to PATH for scripting
[[ -d "$HOME/.rvm/bin" ]] && export PATH="$PATH:$HOME/.rvm/bin"
# Add Snap to PATH
[[ -d "/snap/bin" ]] && export PATH="$PATH:/snap/bin"
# Local bin scripts
[[ -d "$HOME/.dotfiles/bin" ]] && export PATH="$PATH:$HOME/.dotfiles/bin"
| true |
5ba2f500ee24a5b7125c2f55d4e32aef6df4a91c | Shell | nesi/ARCS-systems | /rpms/SOURCES/scripts/gbuild/UpdateCerts.sh | UTF-8 | 1,094 | 3.703125 | 4 | [] | no_license | #!/bin/sh
# UpdateCerts.sh APAC Client/Gateway Certificate Check/Update
# Graham Jenkins <graham@vpac.org> Feb 2007. Rev: 20070314
#
# Define 'fail' function, set environment, perform usage checks
fail () {
echo "==> $@"; exit 1
}
. /etc/profile
[ -n "$http_proxy" ] && ProxyString="-http-proxy $http_proxy" &&echo "==> Using Proxy: $http_proxy"
[ `id -un` = root ] || fail "You must be 'root' to run this program!"
vdt-version 2>/dev/null | grep Certificates >/dev/null || fail "CA-Certificate package not installed!"
cd $PACMAN_LOCATION && source setup.sh && cd .. || fail "Can't find Pacman!"
#
# Ascertain platform, perform the check/update, then fetch CRLs
grep Zod /etc/redhat-release >/dev/null 2>&1 && Platform=linux-fedora-4 || Platform=linux-rhel-4
echo "==> Performing: Certificate Check/Update"
pacman -pretend-platform $Platform $ProxyStrin -update CA-Certificates
echo "==> Running: /opt/vdt/fetch-crl/share/doc/fetch-crl-2.6.2/fetch-crl.cron"
cd /tmp && nohup /opt/vdt/fetch-crl/share/doc/fetch-crl-2.6.2/fetch-crl.cron >/dev/null &
exit 0
| true |
20dd09b89fc52a1990fab9178873343ef276b7fa | Shell | RafalSladek/install_pool | /install_node.sh | UTF-8 | 190 | 2.75 | 3 | [] | no_license | #!/bin/bash -uex
echo 'Install node with nvm'
if [[ `whoami` == "root" ]]; then
echo "You ran me as root! Do not run me as root!"
exit 1
fi
sudo nvm install v6.9.2
node -v
npm -v
| true |
ee4d6b9111530b3cfe4e32410f1ae80f6100ffff | Shell | kachowjs/fix-ubuntu-1404 | /main/app/utc/bin/configure.sh | UTF-8 | 198 | 2.734375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
THE_BASE_DIR_PATH=$(cd -P -- "$(dirname -- "$0")" && pwd -P)
source $THE_BASE_DIR_PATH/_init.sh
echo 'configure utc start:'
sudo sed -i 's/UTC=yes/UTC=no/g' /etc/default/rcS
| true |
6a9d47170539c1312c9b934e736ffd902de1daef | Shell | anomen-s/anomen-kernel-config | /savara/customs/usr/local/sbin/pre-hibernate.sh | UTF-8 | 479 | 2.65625 | 3 | [] | no_license | #! /bin/sh
# disable USB and LAN wakup
echo XHC | sudo tee /proc/acpi/wakeup
echo GLAN | sudo tee /proc/acpi/wakeup
#if ! grep -q /dev/sda3 /proc/swaps ; then sudo swapon /dev/sda3 ; fi
dmesg -c > /var/log/dmesg.hib.log
# free caches
# once was needed to avoid bug(?) when hibernating into small swap
ZEROFILE=/dev/shm/zerofile.tmp
dd if=/dev/zero of=$ZEROFILE bs=1M count=8190
rm $ZEROFILE
sync
sleep 1
echo $0
date
# hibernate using:
# /usr/sbin/hibernate --force
| true |
e0174022bf912416caa59e9ec1003e6568705754 | Shell | c0sco/centos-cis-benchmark | /test/5.6.sh | UTF-8 | 468 | 2.75 | 3 | [
"MIT"
] | permissive | #!/bin/sh
# ** AUTO GENERATED **
# 5.6 - Ensure access to the su command is restricted (Scored)
# 5.6 "Ensure access to the su command is restricted (Scored)" Yes Server1 Workstation1
execute(){
grep pam_wheel.so /etc/pam.d/su | grep -qE "^auth\s*required\s*pam_wheel.so\s*use_uid\s*$" || return $?
}
test_serial_number="5.6"
test_name="Ensure access to the su command is restricted (Scored)"
scored="Yes"
server="Server1"
workstation="Workstation1"
important="Yes"
| true |
39ba58c0c193f2f0ceb23fa50e94e80aeb5ee1f7 | Shell | SlaSerX/zsh-autocomplete | /utility/.autocomplete.screen-space | UTF-8 | 270 | 2.546875 | 3 | [
"MIT"
] | permissive | #!/bin/zsh
zmodload -F zsh/terminfo b:echoti
.autocomplete.screen-space() {
emulate -L zsh -o extendedglob
echoti sc
print -r $'\e[6n'; IFS=$'\e[;' read -Asd R
echoti rc
_autocomplete__screen_space=$(( LINES - reply[-2] ))
}
.autocomplete.screen-space "$@"
| true |
3dd0679f447dbd4fd54f560177429f1aaa07e492 | Shell | h4ck3rm1k3/B-Translator | /install/db.sh | UTF-8 | 855 | 3.703125 | 4 | [] | no_license | #!/bin/bash
### create the database
### get the directory of the installation scripts
scripts=$(dirname $0)
### get the DB connection settings
. btranslator-config.sh
### confirm/modify the settings
echo "Give db_name, db_user and db_pass."
read -p "db_name [$db_name]: " dbname
dbname=${dbname:-$db_name}
read -p "db_user [$db_user]: " dbuser
dbuser=${dbuser:-$db_user}
read -p "db_pass [$db_pass]: " dbpass
dbpass=${dbpass:-$db_pass}
### save DB settings to the configuration file
db_name=$dbname
db_user=$dbuser
db_pass=$dbpass
. $scripts/save.sh
### create the database and user
mysql_commands="
DROP DATABASE IF EXISTS $dbname;
CREATE DATABASE $dbname;
GRANT ALL ON $dbname.* TO $dbuser@localhost IDENTIFIED BY '$dbpass';
"
echo "$mysql_commands"
echo "Enter the mysql root password below."
echo "$mysql_commands" | mysql -u root -p
| true |
92471e0cb1500dde1b66783f40450e0caa615646 | Shell | lazrapp/package-webkiosk | /install.sh | UTF-8 | 591 | 3.265625 | 3 | [] | no_license | #!/bin/bash
print_status() {
echo
echo "## $1"
echo
}
bail() {
echo 'Error executing command, exiting'
exit 1
}
exec_cmd_nobail() {
echo "+ $1"
bash -c "$1"
}
exec_cmd() {
exec_cmd_nobail "$1" || bail
}
if [[ $EUID > 0 ]]
then bail "Please run as root"
fi
setup() {
exec_cmd "apt-get update -y"
exec_cmd "apt-get install -y imagemagick lightdm unclutter chromium-browser"
exec_cmd "raspi-config nonint do_boot_behaviour B4"
exec_cmd `sed /etc/lightdm/lightdm.conf -i -e "s/^autologin-user=.*/autologin-user=lazr-pck-webkiosk/"`
}
setup | true |
a66102ea7d8f3515550d84910b796776264aac35 | Shell | ECALELFS/ECALELF | /ZFitter/script/makeTable_2.sh | UTF-8 | 7,446 | 3.53125 | 4 | [] | no_license | #!/bin/bash
usage(){
echo "`basename $0`"
echo " --help"
echo " ------- Mandatory"
echo " --regionsFile arg"
echo " --outDirFitResMC arg"
echo " --outDirFitResData arg"
echo " ------- Optional"
echo " --runRangesFile arg"
echo " --commonCut arg"
echo " --cruijff"
}
# options may be followed by one colon to indicate they have a required argument
if ! options=$(getopt -u -o hf:s: -l help,regionsFile:,runRangesFile:,outDirFitResMC:,outDirFitResData:,commonCut:,cruijff,selEff: -- "$@")
then
# something went wrong, getopt will put out an error message for us
exit 1
fi
set -- $options
while [ $# -gt 0 ]
do
case $1 in
-h|--help) usage; exit 0;;
--regionsFile) regionsFile=$2; shift;;
--runRangesFile) runRangesFile=$2; shift;;
--commonCut) commonCut=$2; shift;;
--outDirFitResMC) outDirFitResMC=$2; shift;;
--outDirFitResData) outDirFitResData=$2; shift;;
-s|--step) STEP=$2; shift;;
--cruijff) varVar=cruijff;;
--selEff) SELEFF=y;;
(--) shift; break;;
(-*) usage; echo "$0: error - unrecognized option $1" 1>&2; usage >> /dev/stderr; exit 1;;
(*) break;;
esac
shift
done
#------------------------------ check mandatory options
if [ -z "${regionsFile}" ];then
exit 1
fi
if [ ! -r "${regionsFile}" ];then
exit 1
fi
if [ -z "${outDirFitResData}" -o -z "${outDirFitResMC}" ];then
echo "[ERROR] outDirFitResData or outDirFitResMC not defined"
exit 1
fi
if [ ! -d "${outDirFitResData}" -o ! -d "${outDirFitResMC}" ];then
echo "[ERROR] outDirFitResData or outDirFitResMC not found or not directories"
exit 1
fi
TYPE=0
case $TYPE in
0)
# scale
;;
1)
#sigmaCB
;;
2)
#sigmaCB/peakCB
;;
*)
exit 1
;;
esac
regions=`cat $regionsFile | grep -v '#'`
if [ -n "${runRangesFile}" ];then
#if [ -s "${runRangesFile}" ];then # -s means "size not zero"
runRanges=`cat $runRangesFile | grep -v '#' | awk '{print $1}'`
if [ -n "$JSON" ];then
./script/selEff.sh --runRangesFile=$runRangesFile --json=$JSON
fi
for region in $regions
do
for runRange in $runRanges
do
runRange=`echo $runRange | sed 's|-|_|'`
category=${region}"-runNumber_"${runRange}"-"$commonCut
categories="${categories} $category"
done
done
else
for region in $regions
do
category=${region}"-"$commonCut
categories="${categories} $category"
done
fi
echo "#category & events & DeltaM_data & DeltaM_MC & DeltaP & width_data & width_MC & rescaledWidth_data & rescaledWidth_MC & additionalSmearing & chi2data & chi2mc & events/lumi"
for category in $categories
do
case $category in
*runNumber*)
runrange=`echo $category | sed 's|.*runNumber|runNumber|;s|.*runNumber_\([0-9]*\)_\([0-9]*\).*|\1_\2|'`
;;
*)
unset runrange
;;
esac
if [ -n "${runrange}" ];then
runrange="-runNumber_${runrange}"
runMin=`echo ${runrange} | cut -d '_' -f 2`
runMax=`echo ${runrange} | cut -d '_' -f 3`
#for runDependent mc
if [ "$runMin" -ge "190456" -a "$runMin" -le "196531" -a "$runMax" -le "198115" ];then runrange="-runNumber_194533_194533";
elif [ "$runMin" -ge "198111" -a "$runMin" -le "203742" -a "$runMax" -le "203852" ];then runrange="-runNumber_200519_200519";
elif [ "$runMin" -ge "203756" -a "$runMin" -le "208686" ];then runrange="-runNumber_206859_206859";
else runrange="";
fi
fi
categoryMC=`echo $category | sed "s|-runNumber_[0-9]*_[0-9]*|${runrange}|"`
fileMC="${outDirFitResMC}/$categoryMC.tex"
##echo "###File MC is ${outDirFitResMC}/$categoryMC.tex"
if [ ! -r "${fileMC}" ];then
echo "[ERROR] ${fileMC} not found" >> /dev/stderr
exit 1
fi
fileData="${outDirFitResData}/$category.tex"
##echo "###File Data is ${outDirFitResData}/$category.tex"
if [ ! -r "${fileData}" ];then
echo "[WARNING] ${fileData} not found: skipping this category" >> /dev/stderr
echo "%$category & not fitted \\\\"
continue
fi
category=`echo $category | sed "s|-${commonCut}||"`
events="`grep nEvents $fileData | cut -d '=' -f 2 | awk '{printf(\"%.0f\",$1)}'`"
chi2_Data="`grep chi2 $fileData | cut -d '=' -f 2 | awk '{printf(\"%.2f\",$1)}'`"
chi2_MC="`grep chi2 $fileMC | cut -d '=' -f 2 | awk '{printf(\"%.2f\",$1)}'`"
if [ -n "${runRanges}" ];then
runRange=`echo $category | sed 's|.*-runNumber_||;s|-.*||;s|_|-|'`
lumi=`grep $runRange $runRangesFile | awk '{if(NF>3){ print $4} else{ print "0"};}'`
selEff=`echo $events $lumi | awk '{if($2>0){ printf("%.4f \\\pm 0",$1/$2)} else{ print "--"};}'`
# selEff=0
# echo $runRange $runRangesFile
#echo $events $lumi
else
selEff="0"
fi
## DeltaM
var=Delta
if [ -n "${varVar}" ];then
var="M_{Z}"
M_Data="`grep ${var} $fileData | cut -d '&' -f 2 | sed 's|GeV.*|$|' | sed 's|\\$||g;s|\\\\pm||g;s|&||g'`"
M_MC="`grep ${var} $fileMC | cut -d '&' -f 2 | sed 's|GeV.*|$|' | sed 's|\\$||g;s|\\\\pm||g;s|&||g'`"
deltaM_Data=`echo $M_Data | awk '{printf("$ %.6f \\\pm %.6f $", $1-91.188, $2)}'`
deltaM_MC=`echo $M_MC | awk '{printf("$ %.6f \\\pm %.6f $", $1-91.188, $2)}'`
else
deltaM_Data="`grep ${var} $fileData | cut -d '&' -f 2 | sed 's|GeV.*|$|'`"
deltaM_MC="`grep ${var} $fileMC | cut -d '&' -f 2 | sed 's|GeV.*|$|'`"
fi
#old definition
deltaP=`echo "${deltaM_Data} ${deltaM_MC}" | sed 's|\\$||g;s|\\\\pm||g;s|&||g' | awk '{print "$",(\$1-\$3)/91.188*100,"\\\\pm",sqrt($2 * $2 + $4 * $4)/91.188*100,"$"}'`
# deltaP=`echo "${deltaM_Data} ${deltaM_MC}" | sed 's|\\$||g;s|\\\\pm||g;s|&||g' | awk '{print "$",(\$1-\$3)/\$3*100,"\\\\pm",sqrt($2 * $2 + $4 * $4)/91.188*100,"$"}'`
# definition linear with injected variable
# deltaP=`"${deltaM_Data} ${deltaM_MC}" | sed 's|\\$||g;s|\\\\pm||g;s|&||g' | awk '{print "$",(\$1/\$3 -1) *100,"\\\\pm",sqrt($2 * $2 + $4 * $4 * ($1*$1)/($3*$3) )/$3 *100,"$"}'`
## Width
var=sigma
if [ -n "${varVar}" ];then
var="sigma_{L}"
width_Data_L="`grep ${var} $fileData | cut -d '&' -f 2 | sed 's|GeV.*|$|'`"
width_MC_L="`grep ${var} $fileMC | cut -d '&' -f 2 | sed 's|GeV.*|$|'`"
var="sigma_{R}"
width_Data_R="`grep ${var} $fileData | cut -d '&' -f 2 | sed 's|GeV.*|$|'`"
width_MC_R="`grep ${var} $fileMC | cut -d '&' -f 2 | sed 's|GeV.*|$|'`"
width_Data="`echo ${width_Data_L} ${width_Data_R} | sed 's|\\$||g;s|\\\pm||g' | awk '{sigma_L=$1; errSigma_L=$2; sigma_R=$3; errSigma_R=$4; printf(\"$%.6f \\\pm %.6f $\", 0.5*(sigma_L+sigma_R), 0.5* sqrt(errSigma_L * errSigma_L + errSigma_R * errSigma_R ))}'`"
width_MC="`echo ${width_MC_L} ${width_MC_R} | sed 's|\\$||g;s|\\\pm||g' | awk '{sigma_L=$1; errSigma_L=$2; sigma_R=$3; errSigma_R=$4; printf(\"$%.6f \\\pm %.6f $\", 0.5*(sigma_L+sigma_R), 0.5* sqrt(errSigma_L * errSigma_L + errSigma_R * errSigma_R ))}'`"
else
width_Data="`grep ${var} $fileData | cut -d '&' -f 2 | sed 's|GeV.*|$|'`"
width_MC="`grep ${var} $fileMC | cut -d '&' -f 2 | sed 's|GeV.*|$|'`"
fi
## rescaledWidth
resolutionLine=`echo "$category events $deltaM_Data $deltaM_MC $deltaP $width_Data $width_MC xxx xxx" |sed 's|\\$||g;s|\\\pm||g;s|&||g' | awk -f awk/newResolution.awk`
# echo $resolutionLine
line="$category & $events & $deltaM_Data & $deltaM_MC & $deltaP & $width_Data & $width_MC $resolutionLine & \$ $chi2_Data \$ & \$ $chi2_MC \$ & \$ $selEff \$\\\\"
echo $line
# mettere la formattazione qui
done
| true |
6f1427521371d7e07d941d4c0479dc867992ba21 | Shell | karlpilkington/qos | /my_bettershaper.txt | UTF-8 | 5,402 | 3.15625 | 3 | [
"MIT"
] | permissive | #!/bin/bash
ACT="$1"; shift
DEV="$1"; shift
BW="$1"; shift
if [ ! "$ACT" -o ! "$DEV" -o ! "$BW" ] ; then
echo "usage: $0 <interface> <kbit/s>"
exit 1
fi
BW90=$((9*$BW/10))
BW80=$((8*$BW/10))
BW70=$((7*$BW/10))
BW20=$((2*$BW/10))
BW10=$((1*$BW/10))
# low priority OUTGOING traffic - you can leave this blankbit if you want
NOPRIOHOSTSRC=
NOPRIOHOSTDST=
NOPRIOPORTSRC="6881 6882 6883 6884 6885 6886 6887 6888 6889 6890 6891 6892 6893 4225 4915 6346"
NOPRIOPORTDST="6881 6882 6883 6884 6885 6886 6887 6888 6889 6890 6891 6892 6893 4225 4915 6346"
# high priority OUTGOING traffic
HIPRIOHOSTSRC=
HIPRIOHOSTDST=66.132.16.130
HIPRIOPORTSRC="53 27960 27970 27990 1109"
HIPRIOPORTDST="53 27960 27970 27990 1109"
#########################################################
if [ "$ACT" = "test" ] ; then
echo "DEV ='$DEV'"
echo "BW ='$BW'"
echo "BW90='$BW90'"
echo "BW10='$BW10'"
fi
if [ "$ACT" = "status" ]; then
tc -s qdisc ls dev $DEV
tc -s class ls dev $DEV
exit
fi
# clean existing down- and uplinkbit qdiscs, hide errors
tc qdisc del dev $DEV root 2> /dev/null > /dev/null
if [ "$ACT" = "stop" ]
then
exit
fi
# Set up HTB scheduler
tc qdisc add dev $DEV root handle 1: htb default 20
# High priority traffic gets 80% of the pipeline with a big burst
# Bulkbit traffic gets 10% of the pipeline then can borrow the rest
# low priority traffic gets same but at lower priority and lower burst
tc class add dev $DEV parent 1: classid 1:1 htb rate ${BW}kbit burst 8kbit cburst 8k
tc class add dev $DEV parent 1:1 classid 1:10 htb rate ${BW80}kbit ceil ${BW90}kbit burst 8kbit cburst 8kbit prio 1
tc class add dev $DEV parent 1:1 classid 1:20 htb rate ${BW20}kbit ceil ${BW80}kbit burst 4kbit cburst 1kbit prio 2
tc class add dev $DEV parent 1:20 classid 1:21 htb rate ${BW10}kbit ceil ${BW70}kbit burst 4kbit cburst 1kbit prio 3
tc class add dev $DEV parent 1:20 classid 1:22 htb rate ${BW10}kbit ceil ${BW70}kbit burst 4kbit cburst 1kbit prio 4
tc class add dev $DEV parent 1:20 classid 1:23 htb rate ${BW10}kbit ceil ${BW70}kbit burst 4kbit cburst 1kbit prio 5
tc class add dev $DEV parent 1:20 classid 1:24 htb rate ${BW10}kbit ceil ${BW70}kbit burst 4kbit cburst 1kbit prio 6
tc class add dev $DEV parent 1:1 classid 1:30 htb rate ${BW10}kbit ceil ${BW70}kbit burst 2kbit cburst 1kbit prio 7
# all get Stochastic Fairness:
tc qdisc add dev $DEV parent 1:10 handle 10: sfq perturb 10
tc qdisc add dev $DEV parent 1:20 handle 20: sfq perturb 10
tc qdisc add dev $DEV parent 1:21 handle 21: sfq perturb 10
tc qdisc add dev $DEV parent 1:22 handle 22: sfq perturb 10
tc qdisc add dev $DEV parent 1:23 handle 23: sfq perturb 10
tc qdisc add dev $DEV parent 1:24 handle 24: sfq perturb 10
tc qdisc add dev $DEV parent 1:30 handle 30: sfq perturb 10
# start filters
# TOS Minimum Delay (ssh, NOT scp) in 1:10:
tc filter add dev $DEV parent 1:0 protocol ip prio 10 u32 \
match ip tos 0x10 0xff flowid 1:10
# ICMP (ip protocol 1) in the interactive class 1:10 so we
# can do measurements & impress our friends:
tc filter add dev $DEV parent 1:0 protocol ip prio 11 u32 \
match ip protocol 1 0xff flowid 1:10
# Prioritize small UDP packets under 256 bytes
tc filter add dev $DEV parent 1: protocol ip prio 12 u32 \
match ip protocol 17 0xff \
match u8 0x05 0x0f at 0 \
match u16 0x0000 0xff00 at 2 \
flowid 1:10
# prioritize small TCP packets (<64 bytes)
# XXX change ffc0 -> ff80 to allow up to 128 byte packets to be high priority
tc filter add dev $DEV parent 1: protocol ip prio 13 u32 \
match ip protocol 6 0xff \
match u8 0x05 0x0f at 0 \
match u16 0x0000 0xff80 at 2 \
flowid 1:10
# prioritize other stuff too
tc filter add dev $DEV parent 1: protocol ip prio 14 u32 match ip sport 0x0000 0x0001 match ip dport 0x0000 0x0001 flowid 1:21
tc filter add dev $DEV parent 1: protocol ip prio 15 u32 match ip sport 0x0001 0x0001 match ip dport 0x0000 0x0001 flowid 1:22
tc filter add dev $DEV parent 1: protocol ip prio 16 u32 match ip sport 0x0000 0x0001 match ip dport 0x0001 0x0001 flowid 1:23
tc filter add dev $DEV parent 1: protocol ip prio 17 u32 match ip sport 0x0001 0x0001 match ip dport 0x0001 0x0001 flowid 1:24
for a in $HIPRIOPORTDST; do
tc filter add dev $DEV parent 1: protocol ip prio 18 u32 match ip dport $a 0xffff flowid 1:10
done
for a in $HIPRIOPORTSRC; do
tc filter add dev $DEV parent 1: protocol ip prio 19 u32 match ip sport $a 0xffff flowid 1:10
done
for a in $HIPRIOHOSTSRC; do
tc filter add dev $DEV parent 1: protocol ip prio 20 u32 match ip src $a flowid 1:10
done
for a in $HIPRIOHOSTDST; do
tc filter add dev $DEV parent 1: protocol ip prio 21 u32 match ip dst $a flowid 1:10
done
# some traffic however suffers a worse fate
for a in $NOPRIOPORTDST; do
tc filter add dev $DEV parent 1: protocol ip prio 22 u32 match ip dport $a 0xffff flowid 1:30
done
for a in $NOPRIOPORTSRC; do
tc filter add dev $DEV parent 1: protocol ip prio 23 u32 match ip sport $a 0xffff flowid 1:30
done
for a in $NOPRIOHOSTSRC; do
tc filter add dev $DEV parent 1: protocol ip prio 24 u32 match ip src $a flowid 1:30
done
for a in $NOPRIOHOSTDST; do
tc filter add dev $DEV parent 1: protocol ip prio 25 u32 match ip dst $a flowid 1:30
done
# rest is 'non-interactive' ie 'bulk' and ends up in 1:20
tc filter add dev $DEV parent 1: protocol ip prio 26 u32 match ip dst 0.0.0.0/0 flowid 1:20
| true |
80410a26c7847fe6811013c7b522b85823e480ae | Shell | damienflament/.dotfiles | /.local/share/zsh/functions/zerror | UTF-8 | 333 | 3.546875 | 4 | [
"Unlicense"
] | permissive | readonly -g EXIT_SUCCESS=0
readonly -g EXIT_FAILURE=1
# zerror message
#
# Prints an error message to STDERR.
#
# The caller name and line are prepended.
function zerror
{
autoload caller
local -r message="${1:?The message must be specified.}"
print -P "$(caller):%F{red} $message%f" >&2
}
zerror "$@"
# vim: ft=zsh
| true |
26b46715875831aebdc36726810f7a66e35cffd3 | Shell | Palotie-lab/Finnish-Whole-genomes | /scripts/calculate_EX_WG_SNP_INDEL.sh | UTF-8 | 1,383 | 3.34375 | 3 | [] | no_license | #!/bin/bash
### PROGRAM TO SUBSET .VCF FILES IN SNP/INDELS EXOME/NON EXOMES AND OUTPUT STATISTICS ###
### Argument 1: Input .vcf.gz file
### Argument 2: Output .vcf file subset, it also output two additional files *_gatkreport_* and *_bcfreport_* with stats
### Argument 3: It is "INDEL" if interested in indels or "SNP" if want to extract SNPs
### Argument 4: It is "YES" if want to extract only exomes and "NO" otherwise
source /home/unix/aganna/.my.bashrc
source /broad/software/scripts/useuse
use .zlib-1.2.8
#use Bcftools
#use Tabix
inD="$1"
outD="$2"
selectD="$3"
exomeD="$4"
if [ $exomeD == "YES" ]
then
java -Xmx72g -jar /home/unix/aganna/GenomeAnalysisTK.jar \
-R /humgen/gsa-hpprojects/GATK/bundle/current/b37/human_g1k_v37.fasta \
-T SelectVariants \
-V $inD \
-selectType $selectD \
-L /humgen/gsa-hpprojects/GATK/bundle/current/b37/Broad.human.exome.b37.interval_list \
-o $outD
else
if [ $exomeD == "NO" ]
then
java -Xmx180g -jar /home/unix/aganna/GenomeAnalysisTK.jar \
-R /humgen/gsa-hpprojects/GATK/bundle/current/b37/human_g1k_v37.fasta \
-T SelectVariants \
-V $inD \
-selectType $selectD \
-o $outD
else
echo "The exome field should contain YES or NO"
fi
fi
bgzip $outD
tabix -f $outD.gz | true |
1928308da4685389ae1b2dd4b0f58e7ec40c0d60 | Shell | yarreg/werf | /tests/integration/build/stapel_image/git/stages/base.bats | UTF-8 | 3,407 | 3.515625 | 4 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | load ../../../../../helpers/common
setup() {
werf_home_init
test_dir_create
test_dir_cd
}
teardown() {
test_dir_werf_stages_purge
test_dir_rm
werf_home_deinit
}
files_checksum_command() {
echo "find ${1:-.} -xtype f -not -path '**/.git' -not -path '**/.git/*' | xargs md5sum | awk '{ print \$1 }' | sort | md5sum | awk '{ print \$1 }'"
}
files_checksum() {
eval "$(files_checksum_command ${1:-.})"
}
container_files_checksum() {
image_name=$(werf run -s :local --dry-run | tail -n1 | cut -d' ' -f3)
docker run --rm $image_name bash -ec "eval $(files_checksum_command ${1:-/app})"
}
@test "gitArchive, gitCache and gitLatestPatch stages" {
git init
# check: file werf.yaml is added on gitArchive stage
cat << EOF > werf.yaml
project: werf-test-stapel-image-git-stages-base
configVersion: 1
---
image: ~
from: ubuntu
git:
- to: /app
EOF
git add werf.yaml
git commit -m "Initial commit"
run werf build --stages-storage :local
[ "$status" -eq 0 ]
[[ "$output" =~ "gitCache: <empty>" ]]
[[ "$output" =~ "gitLatestPatch: <empty>" ]]
[[ "$output" =~ "Git files will be actualized on stage gitArchive" ]]
[[ "$output" =~ "Building stage gitArchive" ]]
run werf build --stages-storage :local
[ "$status" -eq 0 ]
[[ ! "$output" =~ "Building stage " ]]
[[ "$(files_checksum $WERF_TEST_DIR)" == "$(container_files_checksum /app)" ]]
# check: file test is added on gitLatestPatch stage
date > test
git add test
git commit -m "Add file"
run werf build --stages-storage :local
[ "$status" -eq 0 ]
[[ "$output" =~ "gitCache: <empty>" ]]
[[ "$output" =~ "Git files will be actualized on stage gitLatestPatch" ]]
[[ "$output" =~ "Use cache image for stage gitArchive" ]]
[[ "$output" =~ "Building stage gitLatestPatch" ]]
run werf build --stages-storage :local
[ "$status" -eq 0 ]
[[ ! "$output" =~ "Building stage " ]]
[[ "$(files_checksum $WERF_TEST_DIR)" == "$(container_files_checksum /app)" ]]
# check: file large is added on gitCache stage
openssl rand -base64 $((1024*1024)) > large
git add large
git commit -m "Add large file"
run werf build --stages-storage :local
[ "$status" -eq 0 ]
[[ "$output" =~ "gitLatestPatch: <empty>" ]]
[[ "$output" =~ "Git files will be actualized on stage gitCache" ]]
[[ "$output" =~ "Use cache image for stage gitArchive" ]]
[[ "$output" =~ "Building stage gitCache" ]]
run werf build --stages-storage :local
[ "$status" -eq 0 ]
[[ ! "$output" =~ "Building stage " ]]
[[ "$(files_checksum $WERF_TEST_DIR)" == "$(container_files_checksum /app)" ]]
# check: files are added on gitArchive stage (reset commit [werf reset]|[reset werf])
git commit --allow-empty -m "[werf reset] Reset git archive"
run werf build --stages-storage :local
[ "$status" -eq 0 ]
[[ "$output" =~ "gitCache: <empty>" ]]
[[ "$output" =~ "gitLatestPatch: <empty>" ]]
[[ "$output" =~ "Git files will be actualized on stage gitArchive" ]]
[[ "$output" =~ "Building stage gitArchive" ]]
run werf build --stages-storage :local
[ "$status" -eq 0 ]
[[ ! "$output" =~ "Building stage " ]]
[[ "$(files_checksum $WERF_TEST_DIR)" == "$(container_files_checksum /app)" ]]
}
| true |
34ff2f1c1f074e8f42ed060215b0967a8695c96a | Shell | trimbakeshwar/shellScriptingProgram | /set_of_instruction/printDayOfWeek.sh | UTF-8 | 695 | 3.328125 | 3 | [] | no_license | #!/bin/bash -x
read -p "enter month: " m
read -p "enter day: " d
read -p "enter year: " y
if [ $m<1 ] && [ $m>13 ]
then
echo "enter month between 1 to 12"
elif [ $d<1 ] && [ $d>32 ]
then
echo "enter day between 1 to 31"
else
y0=$(( $y - (14 - $m) / 12 )) ;
x=$(($y0 + $y0/4 - $y0 / 100 + $y0 / 400)) ;
m0=$(($m + 12 * ((14 - $m) / 12) - 2)) ;
d0=$((($d + $x + 31*$m0 / 12) % 7)) ;
fi
if [ $d0 -eq 0 ]
then
echo sunday
elif [ $d0 -eq 1 ]
then
echo monday
elif [ $d0 -eq 2 ]
then
echo tuesday
elif [ $d0 -eq 3 ]
then
echo wensday
elif [ $d0 -eq 4 ]
then
echo thursday
elif [ $d0 -eq 5 ]
then
echo friday
elif [ $d0 -eq 6 ]
then
echo saturday
fi
| true |
222c2933b6c394b83c2eb6f7e250dee3d6410c3c | Shell | avinashdesireddy/interlock-playground | /interlock-proxy-custom/watch.sh | UTF-8 | 786 | 3.5625 | 4 | [] | no_license | #!/bin/sh
event="$1"
directory="$2"
file="$3"
echo $(date +%s) - $event - $directory - $file
case "$event" in
#u|o|x) # Inotify problems - kill nginx and let swarm respawn us
# kill -9 1
# ;;
w) # Any other change
# may not want to have this copy
# we need to exclude read and access because the copy triggers those
# however with just W we weren't getting anything
# an update being written into the volume seems to look
# like c or w, or possibly dnrc. Our copying the file looks like ra0
echo Updating /etc/nginx/nginx.conf
cp /out/nginx/nginx.conf /etc/nginx/nginx.conf
echo "watch.sh: adding fail_timeout and max_fails"
sed -i -e '/server / s/;/ fail_timeout=0s max_fails=1000;/g' /etc/nginx/nginx.conf
nginx -t
nginx -s reload
;;
esac
| true |
5f5d12ce447cfdde30025567ddce2e4260a078ed | Shell | sloanahrens/devops-toolkit-test | /bash-scripts/legacy/destroy-legacy-stack.sh | UTF-8 | 1,037 | 3.40625 | 3 | [] | no_license | #!/bin/bash
set -e
source ${ROOT_PATH}/bash-scripts/devops-functions.sh
source ${ROOT_PATH}/legacy/legacy_environment.sh
if [ -d "${SOURCE_PATH}" ]; then
echo "${SOURCE_PATH} found. Deleting deployment..."
if [ -d "${TF_INFRA_PATH}" ]; then
echo "${TF_INFRA_PATH} found. Deleting infrastructure..."
# terraform won't destroy if we have incorrect (obfuscated) parameters
apply_legacy_templates
cd ${TF_INFRA_PATH}
terraform init
time terraform destroy --auto-approve
echo "Removing deployment files..."
cd ..
rm -rf ${TF_INFRA_PATH}
else
echo "Deployment terraform path '${TF_INFRA_PATH}' does not exist, no infrastructure to delete. ;)"
fi
if [ "${DESTROY_KEY_PAIR}" = "true" ]; then
destroy_ec2_key_pair
fi
if [ "${DESTROY_REMOTE_STATE}" = "true" ]; then
destroy_remote_state_resources
fi
else
echo "Deployment terraform path '${TF_INFRA_PATH}' does not exist. So let's not delete it. ;)"
fi | true |
5f6c3cb7802d7bb24addf2556879c2004bbdd542 | Shell | lcj199211/ops | /update/update_svn.sh | UTF-8 | 930 | 3.34375 | 3 | [] | no_license | #!/bin/bash
# rsync 配合 svn更新
chown -R nginx.nginx /data/svn && chmod -R 700 /data/svn
rsync_log=/data/logs/rsync_update_scripts.log
#speed='--bwlimit=5000'
progress='--progress'
delete='--delete'
relRsync="rsync -vzrtopg ${speed} ${progress} --password-file=/etc/rsyncd/rsyncd.pass"
# 更新svn
line_number=$(/usr/bin/svn up /data/svn/repo | wc -l)
# 判断svn有没有新提交
if [ ${line_number} -gt 2 ];then
# 分割空格
echo >> ${rsync_log}
echo >> ${rsync_log}
start_ctime=$(date +%s)
echo "$(date '+%F %T %s') ${0} ${@} 开始更新" >> ${rsync_log}
# sdk
${relRsync} --exclude="*.svn" --exclude="*.apk" --exclude="*.log" /data/svn/repo/sdk/ rsync@192.168.0.3::sdk | tee -a ${rsync_log}
end_ctime=$(date +%s)
echo "$(date '+%F %T %s') ${0} ${@} 结束更新 脚本用时:$((${end_ctime}-${start_ctime}))s " >> $rsync_log
else
echo "No file to update ..."
fi
| true |
5cdcc9efa035c3b8c46cc136c91983898f7a8f8d | Shell | utherp/ezfw | /sbin/heartbeat.sh | UTF-8 | 268 | 2.640625 | 3 | [] | no_license | #!/bin/bash
IFCONFIG=/sbin/ifconfig
HWMAC=`$IFCONFIG eth0 |grep eth0 | sed 's, *, ,g' | cut -d' ' -f 5`
wget -O /usr/local/ezfw/logs/last_heartbeat.log "http://server.cv-internal.com/ezfw/service/heartbeat.php?mac=$HWMAC" > /usr/local/ezfw/logs/heartbeat.log 2>&1
| true |
3d0a53e5a076a47ee80aee1ead1d834a61edcad7 | Shell | wisnuc/appifi-system | /install-scripts/ubuntu-16-04-3-amd64/preseed-install | UTF-8 | 1,348 | 3.578125 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
set -e
cat <<'EOF' > /lib/systemd/system/wisnuc-installer.service
[Unit]
Description=Wisnuc Installer
Before=getty@tty1.service
After=multi-user.target
[Service]
Type=oneshot
ExecStartPre=/usr/bin/touch /tmp/debug-wisnuc-installer-pre
ExecStart=/bin/bash /usr/bin/wisnuc-installer
ExecStartPost=/usr/bin/touch /tmp/debug-wisnuc-installer-post
StandardInput=tty
StandardOutput=inherit
StandardError=inherit
[Install]
WantedBy=multi-user.target
EOF
cat <<'EOF' > /usr/bin/wisnuc-installer
#!/bin/bash
URL=https://raw.githubusercontent.com/wisnuc/appifi-system/master/install-scripts/ubuntu-16-04-3-amd64/install-appifi.sh
SHA1=/wisnuc/bootstrap/appifi-bootstrap.js.sha1
UPDATE=/wisnuc/bootstrap/appifi-bootstrap-update.packed.js
LOG=/wisnuc-install.log
if [ -f $SHA1 ] || [ -f $UPDATE ]; then exit 0; fi
curl -s $URL | bash - 2>&1 | tee $LOG
if [ $? -eq 0 ]; then
systemctl disable wisnuc-installer.service | tee -a $LOG
systemctl is-enabled wisnuc-installer.service | tee -a $LOG
read -n 1 -s -t 30 -p "WISNUC system successfully installed. Wait 30 seconds or hit any key to reboot system"
reboot
else
read -n 1 -s -t 30 -p "Failed to install WISNUC system, please see $LOG for detail. Wait 30 seconds or hit any key to continue"
fi
EOF
chmod a+x /usr/bin/wisnuc-installer
systemctl enable wisnuc-installer.service
| true |
1e3c108cacb2632973323a27d14cc6decd3d4ff0 | Shell | ecmwf/eccodes | /tests/grib_ecc-1230.sh | UTF-8 | 1,726 | 2.984375 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/sh
# (C) Copyright 2005- ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
#
# In applying this licence, ECMWF does not waive the privileges and immunities granted to it by
# virtue of its status as an intergovernmental organisation nor does it submit to any jurisdiction.
#
. ./include.ctest.sh
label="prod_ECC-1230_test"
temp1=temp1.$label
temp2=temp2.$label
sample_grib1=$ECCODES_SAMPLES_PATH/GRIB1.tmpl
# paramId 228 is "Total precipitation" (=tp)
# ------------------------------------------
${tools_dir}/grib_set -s \
type=pf,localDefinitionNumber=30,stream=enfo,\
perturbationNumber=10,numberOfForecastsInEnsemble=51,indicatorOfParameter=228 \
$sample_grib1 $temp1
grib_check_key_equals $temp1 "stepTypeForConversion" "accum"
grib_check_key_equals $temp1 "paramId,shortName,productDefinitionTemplateNumber" "228 tp 11"
# Convert
${tools_dir}/grib_set -s edition=2 $temp1 $temp2
grib_check_key_equals $temp2 "productDefinitionTemplateNumber" "11"
grib_check_key_equals $temp2 "numberOfForecastsInEnsemble,perturbationNumber" "51 10"
# Now check 2m temperature (not accumulated)
# ------------------------------------------
${tools_dir}/grib_set -s \
type=pf,localDefinitionNumber=30,stream=enfo,\
perturbationNumber=10,numberOfForecastsInEnsemble=51,shortName=2t \
$sample_grib1 $temp1
grib_check_key_equals $temp1 "productDefinitionTemplateNumber" "1"
# Convert
${tools_dir}/grib_set -s edition=2 $temp1 $temp2
grib_check_key_equals $temp2 "productDefinitionTemplateNumber" "1"
grib_check_key_equals $temp2 "numberOfForecastsInEnsemble,perturbationNumber" "51 10"
# Clean up
rm -f $temp1 $temp2
| true |
1dd3c67a2d3dee905556e8ba62afde1cd916b8ef | Shell | kuntaldevo/devo-appli | /provision/shell/perf-backup/syncFromHadoop.sh | UTF-8 | 3,714 | 3.4375 | 3 | [] | no_license | #!/bin/bash
export PATH=/usr/lib64/qt-3.3/bin:/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin:/root/bin:/root/bin:$PATH
export START=`date +%s`
export ROOTDIR=/usr/local/backup
export FILES='currentBackup removedRowOne sortedBackup currentHadoop sortedHadoop backupDiff HDFSSyncOutput HDFSSyncErrorOutput'
export HADOOP_CLIENT_OPTS="-XX:-UseGCOverheadLimit -Xmx4096m"
readonly PROGNAME=$(basename "$0")
readonly LOCKFILE_DIR=/tmp
readonly LOCK_FD=200
readonly LOG_FILE="/var/log/hdfs-duplicity.log"
lock() {
local prefix=$1
local fd=${2:-$LOCK_FD}
local lock_file=$LOCKFILE_DIR/$prefix.lock
# create lock file
eval "exec $fd>$lock_file"
# seize the lock
flock -n $fd \
&& return 0 \
|| return 1
}
crash() {
local error_str="$@"
echo $error_str
exit 1
}
rotateOldFiles() {
for i in `echo $FILES`
do
rm -f $ROOTDIR/${i}.old
done
for i in `echo $FILES`
do
mv $ROOTDIR/${i} $ROOTDIR/${i}.old
done
}
createFileLists() {
echo "Create list of sorted files already backed up"
find $ROOTDIR/prod-e1/ |awk -F "prod-e1" '{print $2}' >$ROOTDIR/currentBackup; sed '1d' $ROOTDIR/currentBackup > $ROOTDIR/removedRowOne; cat $ROOTDIR/removedRowOne |sort -r >$ROOTDIR/sortedBackup
echo "Create list of sorted files currently in hadoop"
hadoop dfs -ls -R /user/datalibrary/prod-e1/ |awk -F "prod-e1" '{print $2}' >$ROOTDIR/currentHadoop; cat $ROOTDIR/currentHadoop |sort -r >$ROOTDIR/sortedHadoop
echo "Diff the files currently in the backup with those currently in Hadoop"
diff $ROOTDIR/sortedHadoop $ROOTDIR/sortedBackup |grep paxata | sed 's/> //' | sed 's/< //' >$ROOTDIR/backupDiff
}
removeOldFiles() {
echo "Remove files from current backup that are no longer in hadoop"
for PASSED in `cat $ROOTDIR/backupDiff`
do
if [[ -d $ROOTDIR/prod-e1/$PASSED ]]; then
echo "$PASSED is a directory"
rmdir $ROOTDIR/prod-e1/$PASSED
elif [[ -f $ROOTDIR/prod-e1/$PASSED ]]; then
echo "$PASSED is a file"
rm -f $ROOTDIR/prod-e1/$PASSED
else
echo "notADirectoryOrFile $PASSED"
fi
done
}
syncFromHadoop() {
echo "Resync data from Hadoop"
#DLEV - Remove 4-23-19 - You cannot sync ALL of hadoop, it will java lang oom and use all CPU forever
#date +%s; hadoop dfs -copyToLocal /user/datalibrary/prod-e1 $ROOTDIR/ >$ROOTDIR/HDFSSyncOutput 2>$ROOTDIR/HDFSSyncErrorOutput; date +%s
for i in `hdfs dfs -ls /user/datalibrary/prod-e1/paxata/ |grep -v 'prod-e1/paxata/library' |grep -v hive |awk -F ' ' '{print $8}' |grep prod-e1`
do
date +%s; hadoop dfs -copyToLocal $i $ROOTDIR/prod-e1/paxata/ >>$ROOTDIR/HDFSSyncOutput 2>>$ROOTDIR/HDFSSyncErrorOutput; date +%s
done
for i in `hdfs dfs -ls /user/datalibrary/prod-e1/paxata/library |awk -F ' ' '{print $8}' |grep prod-e1`
do
date +%s; hadoop dfs -copyToLocal $i $ROOTDIR/prod-e1/paxata/library/ >>$ROOTDIR/HDFSSyncOutput 2>>$ROOTDIR/HDFSSyncErrorOutput; date +%s
done
echo "Check that local backup now matches"
#find $ROOTDIR/prod-e1/ |awk -F "prod-e1" '{print $2}' >$ROOTDIR/currentBackup; sed '1d' $ROOTDIR/currentBackup > $ROOTDIR/removedRowOne; cat $ROOTDIR/removedRowOne |sort -r >$ROOTDIR/sortedBackup
#diff $ROOTDIR/sortedHadoop $ROOTDIR/sortedBackup
export FINISH=`date +%s`
echo "Time taken in seconds is `expr $FINISH - $START`"
}
main() {
lock $PROGNAME \
|| crash "Only one instance of $PROGNAME can run at one time."
rotateOldFiles
createFileLists
removeOldFiles
syncFromHadoop
# echo -e "\n\nSyncing to S3 at $(date)\n"
# date +%s; duply hdfs backup --s3-use-multiprocessing | tee -a ${LOG_FILE}; date +%s
rm /tmp/syncFromHadoop.sh.lock
}
main
| true |
e6dd4446293434347430aa5481483047e2a61232 | Shell | aavarghese/community-operators | /scripts/ci/run-deployment-tests.sh | UTF-8 | 488 | 3.296875 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
eval $(scripts/ci/operators-env)
if [ -z "${IS_TESTABLE}" ];
then
if [ -z "${OP_PATH}" ] ;
then
echo "No operator modification detected. Exiting."
exit 0
else
echo "Detected modified Operator in ${OP_PATH}"
echo "Detected modified Operator version ${OP_VER}"
fi
make operator.install OP_PATH="${OP_PATH}" OP_VER="${OP_VER}"
else
if [[ $IS_TESTABLE -eq 0 ]]; then
echo "Nothing was changed, not running deployment tests."
exit 0
fi
fi | true |
ff77b035e99431d003d70ef4d1e0ebce8baf8f29 | Shell | Altersoundwork/Random-Scripts | /00-soundwork-motd | UTF-8 | 867 | 3.59375 | 4 | [] | no_license | #!/bin/bash
#
# Custom MOTD.
# v0.4 - 31-07-2021
# dev: @Altersoundwork
#
clear
# Print header
[ -r /etc/lsb-release ] && . /etc/lsb-release
if [ -z "$DISTRIB_DESCRIPTION" ] && [ -x /usr/bin/lsb_release ]; then
# Fall back to using the very slow lsb_release utility
DISTRIB_DESCRIPTION=$(lsb_release -s -d)
fi
printf "Welcome to %s (%s %s %s)\n" "$DISTRIB_DESCRIPTION" "$(uname -o)" "$(uname -r)" "$(uname -m)"
# Print Neofetch output
neofetch
# Print SysInfo
cores=$(grep -c ^processor /proc/cpuinfo 2>/dev/null)
[ "$cores" -eq "0" ] && cores=1
threshold="${cores:-1}.0"
if [ $(echo "`cut -f1 -d ' ' /proc/loadavg` < $threshold" | bc) -eq 1 ]; then
echo
echo -n " System information as of "
/bin/date
echo
/usr/bin/landscape-sysinfo
else
echo
echo " System information disabled due to load higher than $threshold"
fi
| true |
5169f5c91231a875f4f69c1fdddc345379d40cbe | Shell | outstand/docker-parallels-tools | /docker-entrypoint.sh | UTF-8 | 429 | 3.78125 | 4 | [] | no_license | #!/bin/bash
set -e
stop_tools () {
if [ -x /etc/init.d/prltoolsd ]; then
echo "Stopping tools..."
/etc/init.d/prltoolsd stop
sleep 1
fi
exit
}
if [ "$1" = 'start' ]; then
trap stop_tools SIGHUP SIGINT SIGTERM
echo "Installing parallels tools ..."
cd /
tar -Jxf prl-tools-lin.tar.xz
cd prl-tools-lin
./install --install-unattended --verbose
while true; do sleep 1000; done
else
exec "$@"
fi
| true |
0342a78ca97d3c54574bc086100fa4f76bfce649 | Shell | jkells29/scripts | /evenlydivisible.sh | UTF-8 | 275 | 3.765625 | 4 | [] | no_license | #!/bin/bash
echo "Computing everything that's evenly divisible by 4 and 7"
for num in {1..200}
do
four=$(echo "$num % 4" | bc)
if [[ $four -eq 0 ]]
then
seven=$(echo "$num % 7" | bc)
if [[ $seven -eq 0 ]]
then
echo $num " is divisible by 4 and 7"
fi
fi
done
| true |
6f784d232ba156fbd77619f08a0e183575321147 | Shell | rimaulana/eks-fargate-alb-ingress-sample | /irsa.sh | UTF-8 | 9,270 | 3.625 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
# Copyright 2019 Rio Maulana
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# source: https://github.com/rimaulana/eks-irsa-helper
function print_help {
echo "usage: $0 [ensure|delete] [options]"
echo "Associate a service account with an IAM policy documents or IAM managed policy arns"
echo ""
echo "-h,--help print this help"
echo "--cluster The name of the EKS cluster. (default: eks-fargate)"
echo "--region The EKS cluster region. (default: us-east-2)"
echo "--sa-name The name of the service account. (default: alb-ingress-controller)"
echo "--namespace The namespace of the service account. (default: kube-system)"
echo "--policy-document The name of the policy document file, can be use multiple times, if it is a URL, it will be downloaded first. For local file, use the file path without any file:// prefix"
echo "--policy-arn the arn of managed policy, can be use multiple times"
}
POSITIONAL=()
POLICY_DOCUMENTS=()
POLICY_ARNS=()
while [[ $# -gt 0 ]]; do
key="$1"
case $key in
-h|--help)
print_help
exit 1
;;
--cluster)
CLUSTER_NAME="$2"
shift
shift
;;
--region)
REGION=$2
shift
shift
;;
--sa-name)
SERVICE_ACCOUNT_NAME=$2
shift
shift
;;
--namespace)
SERVICE_ACCOUNT_NAMESPACE=$2
shift
shift
;;
--policy-document)
POLICY_DOCUMENTS+=("$2")
shift
shift
;;
--policy-arn)
POLICY_ARNS+=("$2")
shift
shift
;;
*) # unknown option
POSITIONAL+=("$1") # save it in an array for later
shift # past argument
;;
esac
done
set +u
set -- "${POSITIONAL[@]}" # restore positional parameters
OPERATION="$1"
CLUSTER_NAME="${CLUSTER_NAME:-eks-fargate}"
REGION="${REGION:-us-east-2}"
SERVICE_ACCOUNT_NAME="${SERVICE_ACCOUNT_NAME:-alb-ingress-controller}"
SERVICE_ACCOUNT_NAMESPACE="${SERVICE_ACCOUNT_NAMESPACE:-kube-system}"
ROLE_NAME="sa-$SERVICE_ACCOUNT_NAME-in-$CLUSTER_NAME-$REGION"
ACCOUNT_ID=$(aws sts get-caller-identity --query Account --output text)
log(){
echo "[$(date '+%Y-%m-%d %H:%M:%S %Z')] $1"
}
function clean_policies_association() {
ATTACHED_POLICIES=$(aws iam list-attached-role-policies --role-name $ROLE_NAME)
for row in $(echo "${ATTACHED_POLICIES}" | jq -r '.AttachedPolicies[] | @base64'); do
_jq() {
echo ${row} | base64 --decode | jq -r ${1}
}
aws iam detach-role-policy --role-name $ROLE_NAME --policy-arn $(_jq '.PolicyArn')
done
ROLE_POLICIES=$(aws iam list-role-policies --role-name $ROLE_NAME)
for row in $(echo "${ROLE_POLICIES}" | jq -r '.PolicyNames[]'); do
aws iam delete-role-policy --role-name $ROLE_NAME --policy-name $row
done
}
function ensure_irsa() {
log "Populating $CLUSTER_NAME properties using describe cluster API call"
ISSUER_URL=$(aws eks describe-cluster \
--name $CLUSTER_NAME \
--query cluster.identity.oidc.issuer \
--region $REGION \
--output text)
ISSUER_HOSTPATH=$(echo $ISSUER_URL | cut -f 3- -d'/')
log "Getting current AWS account ID"
PROVIDER_ARN="arn:aws:iam::$ACCOUNT_ID:oidc-provider/$ISSUER_HOSTPATH"
cat > trust-policy.json << EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Principal": {
"Federated": "$PROVIDER_ARN"
},
"Action": "sts:AssumeRoleWithWebIdentity",
"Condition": {
"StringEquals": {
"${ISSUER_HOSTPATH}:sub": "system:serviceaccount:$SERVICE_ACCOUNT_NAMESPACE:$SERVICE_ACCOUNT_NAME"
}
}
}
]
}
EOF
log "Creating role"
RESULT=$(aws iam create-role \
--role-name $ROLE_NAME \
--assume-role-policy-document file://trust-policy.json 2>&1)
if [[ $RESULT =~ .*EntityAlreadyExists.* ]]; then
log "Role arn:aws:iam::$ACCOUNT_ID:role/$ROLE_NAME exists"
log "Updating role trust relationships"
aws iam update-assume-role-policy \
--role-name $ROLE_NAME \
--policy-document file://trust-policy.json
log "Trust relationships updated"
elif [[ $RESULT =~ .*error.* ]]; then
# Any error should be printed out and exit the process
log $RESULT
exit 1;
else
log "Role arn:aws:iam::$ACCOUNT_ID:role/$ROLE_NAME created"
fi
log "Adding inline policies to the role"
ROLE_POLICIES=$(aws iam list-role-policies --role-name $ROLE_NAME)
POLICY_NUMBER=1
TIMESTAMP=$(date +%s)
for document in "${POLICY_DOCUMENTS[@]}"
do
if [[ "$document" =~ ^(http|https):\/\/ ]]; then
log "Downloading policy document from $document"
curl --silent $document > downloaded-iam-policy.json
DOC_NAME=downloaded-iam-policy.json
else
DOC_NAME=$document
fi
aws iam put-role-policy \
--role-name $ROLE_NAME \
--policy-name "$SERVICE_ACCOUNT_NAME-policy-$TIMESTAMP-$POLICY_NUMBER" \
--policy-document file://$DOC_NAME
POLICY_NUMBER=$((POLICY_NUMBER+1))
if [[ $DOC_NAME = "downloaded-iam-policy.json" ]]; then
rm -f downloaded-iam-policy.json
fi
done
log "Cleaning up old inline policies (if exists)"
for row in $(echo "${ROLE_POLICIES}" | jq -r '.PolicyNames[]'); do
aws iam delete-role-policy --role-name $ROLE_NAME --policy-name $row
done
ATTACHED_POLICIES_RAW=$(aws iam list-attached-role-policies --role-name $ROLE_NAME)
ATTACHED_POLICIES=()
for row in $(echo "${ATTACHED_POLICIES_RAW}" | jq -r '.AttachedPolicies[] | @base64'); do
_jq() {
echo ${row} | base64 --decode | jq -r ${1}
}
ATTACHED_POLICIES+=("$(_jq '.PolicyArn')")
done
TO_BE_ADDED=()
for item1 in "${POLICY_ARNS[@]}"; do
for item2 in "${ATTACHED_POLICIES[@]}"; do
[[ $item1 == "$item2" ]] && continue 2
done
# If we reached here, nothing matched.
TO_BE_ADDED+=( "$item1" )
done
log "Adding managed policies to the role"
for arn in "${TO_BE_ADDED[@]}"
do
aws iam attach-role-policy \
--role-name $ROLE_NAME \
--policy-arn $arn
done
TO_BE_REMOVED=()
for item1 in "${ATTACHED_POLICIES[@]}"; do
for item2 in "${POLICY_ARNS[@]}"; do
[[ $item1 == "$item2" ]] && continue 2
done
# If we reached here, nothing matched.
TO_BE_REMOVED+=( "$item1" )
done
log "Cleaning unused managed policies (if exists)"
for arn in "${TO_BE_REMOVED[@]}"
do
aws iam detach-role-policy \
--role-name $ROLE_NAME \
--policy-arn $arn
done
ALB_INGRESS_ROLE_ARN=$(aws iam get-role \
--role-name $ROLE_NAME \
--query Role.Arn --output text)
rm -f trust-policy.json
log "Associating IAM Role with service account $SERVICE_ACCOUNT_NAME in $SERVICE_ACCOUNT_NAMESPACE namespace"
kubectl annotate sa -n $SERVICE_ACCOUNT_NAMESPACE $SERVICE_ACCOUNT_NAME eks.amazonaws.com/role-arn=$ALB_INGRESS_ROLE_ARN --overwrite
echo;
}
function delete_irsa() {
log "Removing annotation on service account $SERVICE_ACCOUNT_NAME in namespace $SERVICE_ACCOUNT_NAMESPACE"
kubectl annotate sa -n $SERVICE_ACCOUNT_NAMESPACE $SERVICE_ACCOUNT_NAME eks.amazonaws.com/role-arn- --overwrite
log "Deleting IAM role $ROLE_NAME"
RESULT=$(aws iam get-role \
--role-name $ROLE_NAME 2>&1)
if [[ $RESULT =~ .*NoSuchEntity.* ]]; then
log "Role arn:aws:iam::$ACCOUNT_ID:role/$ROLE_NAME does not exists, nothing to delete"
elif [[ $RESULT =~ .*error.* ]]; then
# Any error should be printed out and exit the process
log $RESULT
exit 1;
else
log "Detaching and deleting any associated policies"
clean_policies_association
aws iam delete-role --role-name $ROLE_NAME
log "Role arn:aws:iam::$ACCOUNT_ID:role/$ROLE_NAME deleted"
fi
}
case $OPERATION in
ensure)
ensure_irsa
;;
delete)
delete_irsa
;;
*)
echo "Operation $OPERATION does not exist, exiting"
exit 1
;;
esac | true |
e0c9029b3aced38085c6da987d5ee4c28d082a93 | Shell | algae-elbaum/website-sandbox | /server/startup.sh | UTF-8 | 871 | 3.625 | 4 | [] | no_license | #!/usr/bin/env bash
# Run with sudo for port 80
while :
do
case "$1" in
-p)
port="$2"
if ! [[ "$port" =~ ^[0-9]+$ ]]
then
echo "invalid port"
fi
shift 2
;;
-*)
echo "Error: Unknown option: $1" >&2
exit 1
;;
*) # No more options
break
;;
esac
done
# Don't like this.. assumes virtual env exists, is named venv, and is at the
# repo's top level. Should be a better way to get sudo to use the venv...
# I can think about it more later if it ever matters
DIR="$( cd -P "$( dirname "$0" )" && pwd )"
$DIR/../venv/bin/python $DIR/manage.py runmodwsgi \
--url-alias /media /home/pi/website-sandbox/server/media \
--user pi --group pi \
--reload-on-changes --log-to-terminal --startup-log \
--port ${port-80}
| true |
8118cec6a15b85fc510e0c00e1125e25fe4466b1 | Shell | dipendughosh/programs | /MSc_CompSc/mou/4th sem/vlsi-course/cbl_ncsu_edu benchmarks/CircuitSim90/tanner/tanner/cazmtest/helios/qsp2 | UTF-8 | 340 | 2.671875 | 3 | [
"LicenseRef-scancode-public-domain"
] | permissive | #! /bin/sh
if [ -s $1.sp ] ; then
h9002 $1.sp > $1.spout
fi
nawk '
BEGIN {print "*** CAzM simulation ***"
print ""
print "TRANSFER ANALYSIS"
print "Volt Qgs Qgd"}
/^ volt/,/^$/ { if($1!="volt" && $1!="" && NF==3 ) {
printf("%11.5e %11.5e %11.5e\n",$1,$2,$3)
}
}' < $1.spout > $1.spsig
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.