blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 4 115 | path stringlengths 2 970 | src_encoding stringclasses 28 values | length_bytes int64 31 5.38M | score float64 2.52 5.28 | int_score int64 3 5 | detected_licenses listlengths 0 161 | license_type stringclasses 2 values | text stringlengths 31 5.39M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
adb44d7457ebe71355493e30086e14174765465d | Shell | hce-project/hce-bundle | /src/api/python/manage/dc-daemon_modes.sh | UTF-8 | 5,487 | 3.359375 | 3 | [] | no_license | #!/bin/bash
#Manage the modes of all periodic processing types and classes
if [ "$1" = "" ]; then
echo "Usage: $0 < all | crawling | processing | purging | recall | rcrawling | icrawling | recrawling | aging > [ 0 | 1 ]"
echo "To get current state of all periodic processes:"
echo "$0 all"
echo ""
echo "To disable all periodic processes:"
echo "$0 all 0"
echo ""
echo "To enable all periodic processes:"
echo "$0 all 1"
echo ""
echo "To disable crawling (all but not processing):"
echo "$0 crawling 0"
else
if [ "$1" = "all" ]; then
if [ "$2" = "" ]; then
#Get all modes
../bin/dtm-admin.py --config=../ini/dc-admin.ini --cmd="GET" --fields="RegularCrawlingMode,ReturnURLsMode,IncrMode,PurgeMode,AgingMode" --classes=BatchTasksManager > ../log/$0.log
echo "" >> ../log/$0.log
../bin/dtm-admin.py --config=../ini/dc-admin.ini --cmd="GET" --fields="ProcessingMode" --classes=BatchTasksManagerProcess >> ../log/$0.log
echo "" >> ../log/$0.log
../bin/dtm-admin.py --config=../ini/dc-admin.ini --cmd="GET" --fields="RecrawlSiteMode" --classes=SitesManager >> ../log/$0.log
exit
else
#Set all modes
../bin/dtm-admin.py --config=../ini/dc-admin.ini --cmd="SET" --fields="RegularCrawlingMode:$2,ReturnURLsMode:$2,IncrMode:$2,PurgeMode:$2,AgingMode:$2" --classes=BatchTasksManager > ../log/$0.log
echo "" >> ../log/$0.log
../bin/dtm-admin.py --config=../ini/dc-admin.ini --cmd="SET" --fields="ProcessingMode:$2" --classes=BatchTasksManagerProcess >> ../log/$0.log
echo "" >> ../log/$0.log
../bin/dtm-admin.py --config=../ini/dc-admin.ini --cmd="SET" --fields="RecrawlSiteMode:$2" --classes=SitesManager >> ../log/$0.log
fi
fi
if [ "$1" = "crawling" ]; then
if [ "$2" = "" ]; then
#Get crawling modes
../bin/dtm-admin.py --config=../ini/dc-admin.ini --cmd="GET" --fields="RegularCrawlingMode,ReturnURLsMode,IncrMode,PurgeMode,AgingMode" --classes=BatchTasksManager > ../log/$0.log
echo "" >> ../log/$0.log
../bin/dtm-admin.py --config=../ini/dc-admin.ini --cmd="GET" --fields="RecrawlSiteMode" --classes=SitesManager >> ../log/$0.log
else
#Set crawling modes
../bin/dtm-admin.py --config=../ini/dc-admin.ini --cmd="SET" --fields="RegularCrawlingMode:$2,ReturnURLsMode:$2,IncrMode:$2,PurgeMode:$2,AgingMode:$2" --classes=BatchTasksManager > ../log/$0.log
echo "" >> ../log/$0.log
../bin/dtm-admin.py --config=../ini/dc-admin.ini --cmd="SET" --fields="RecrawlSiteMode:$2" --classes=SitesManager >> ../log/$0.log
fi
fi
if [ "$1" = "processing" ]; then
if [ "$2" = "" ]; then
#Get processing mode
../bin/dtm-admin.py --config=../ini/dc-admin.ini --cmd="GET" --fields="ProcessingMode" --classes=BatchTasksManagerProcess > ../log/$0.log
else
#Set processing mode
../bin/dtm-admin.py --config=../ini/dc-admin.ini --cmd="SET" --fields="ProcessingMode:$2" --classes=BatchTasksManagerProcess > ../log/$0.log
fi
fi
if [ "$1" = "purging" ]; then
if [ "$2" = "" ]; then
#Get purging mode
../bin/dtm-admin.py --config=../ini/dc-admin.ini --cmd="GET" --fields="PurgeMode" --classes=BatchTasksManager > ../log/$0.log
else
#Set purging mode
../bin/dtm-admin.py --config=../ini/dc-admin.ini --cmd="SET" --fields="PurgeMode:$2" --classes=BatchTasksManager > ../log/$0.log
fi
fi
if [ "$1" = "recall" ]; then
if [ "$2" = "" ]; then
#Get urls recall mode
../bin/dtm-admin.py --config=../ini/dc-admin.ini --cmd="GET" --fields="ReturnURLsMode" --classes=BatchTasksManager > ../log/$0.log
else
#Set urls recall mode
../bin/dtm-admin.py --config=../ini/dc-admin.ini --cmd="SET" --fields="ReturnURLsMode:$2" --classes=BatchTasksManager > ../log/$0.log
fi
fi
if [ "$1" = "rcrawling" ]; then
if [ "$2" = "" ]; then
#Get regular crawling mode
../bin/dtm-admin.py --config=../ini/dc-admin.ini --cmd="GET" --fields="RegularCrawlingMode" --classes=BatchTasksManager > ../log/$0.log
else
#Set regular crawling mode
../bin/dtm-admin.py --config=../ini/dc-admin.ini --cmd="SET" --fields="RegularCrawlingMode:$2" --classes=BatchTasksManager > ../log/$0.log
fi
fi
if [ "$1" = "icrawling" ]; then
if [ "$2" = "" ]; then
#Get incremental crawling mode
../bin/dtm-admin.py --config=../ini/dc-admin.ini --cmd="GET" --fields="IncrMode" --classes=BatchTasksManager > ../log/$0.log
else
#Set incremental crawling mode
../bin/dtm-admin.py --config=../ini/dc-admin.ini --cmd="SET" --fields="IncrMode:$2" --classes=BatchTasksManager > ../log/$0.log
fi
fi
if [ "$1" = "recrawling" ]; then
if [ "$2" = "" ]; then
#Get recrawling mode
../bin/dtm-admin.py --config=../ini/dc-admin.ini --cmd="GET" --fields="RecrawlSiteMode" --classes=SitesManager > ../log/$0.log
else
#Set recrawling mode
../bin/dtm-admin.py --config=../ini/dc-admin.ini --cmd="SET" --fields="RecrawlSiteMode:$2" --classes=SitesManager > ../log/$0.log
fi
fi
if [ "$1" = "aging" ]; then
if [ "$2" = "" ]; then
#Get aging mode
../bin/dtm-admin.py --config=../ini/dc-admin.ini --cmd="GET" --fields="AgingMode" --classes=BatchTasksManager > ../log/$0.log
else
#Set aging mode
../bin/dtm-admin.py --config=../ini/dc-admin.ini --cmd="SET" --fields="AgingMode:$2" --classes=BatchTasksManager > ../log/$0.log
fi
fi
fi
| true |
d0ed528ee58b1659b97e4bdaf469c9f130a3d165 | Shell | lxsang/antix | /packages/mkwpasupplicant.sh | UTF-8 | 505 | 2.875 | 3 | [
"MIT"
] | permissive | #! /bin/bash
# this package require libressl and libnl
set -e
. ../env.sh
. ../toolchain.sh
dir="wpa_supplicant-2.7"
cd ${ANTIX_BASE}/source
if [ ! -f "${dir}.tar.gz" ]; then
# download it
wget https://w1.fi/releases/${dir}.tar.gz
fi
tar xvf ${dir}.tar.gz
cd ${dir}/wpa_supplicant
cp defconfig .config
make -j 8
#make CC=arm-linux-gnueabi-gcc
make install DESTDIR=${ANTIX_PKG_BUILD}
cp -avrf ${ANTIX_PKG_BUILD}/usr/local/sbin/wpa_* ${ANTIX_ROOT}/usr/bin
cd ${ANTIX_BASE}/source
rm -rf ${dir} | true |
2872ca79029ecea682f564e46d2e772611a6ff6c | Shell | cfoust/cawnfig | /configs/keynav/install_x | UTF-8 | 151 | 2.59375 | 3 | [
"MIT"
] | permissive | #!/bin/bash
THIS_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
ensure_package "keynav"
ensure_link "$THIS_DIR/keynavrc" "$HOME/.keynavrc"
| true |
9666213480f5d97596ff88bf22b9b4fe75dba222 | Shell | nicktelford/auriel | /src/cmd-remove.sh | UTF-8 | 357 | 3.234375 | 3 | [] | no_license | #!/usr/bin/env bash
#
# Removes one or more packages
check_help "$1" "remove"
require_sudo
for pkg in $@
do
(process "Removing $pkg" && \
call_git submodule deinit --force "$pkg" && \
call_git rm "$pkg" && \
run "rm -rf $AURIEL_REPO_DIR/.git/modules/$pkg" && \
run "sudo pacman -R $pkg" && \
ok) || fail
done
| true |
8f2ba38fd997854fc9df499f088968fc637e34ea | Shell | preesm/preesm-apps | /scripts/preesm_system.sh | UTF-8 | 3,988 | 3.96875 | 4 | [] | no_license | #!/bin/bash -u
#apt install git cmake make gcc g++ build-essential libsdl2-* xvfb openjdk-8-jdk maven wget p7zip-full ffmpeg
function preesm_jdk_version() {
local result=
local java_cmd
if [[ -n $(type -p java) ]]
then
java_cmd=java
elif [[ (-n "$JAVA_HOME") && (-x "$JAVA_HOME/bin/javac") ]]
then
java_cmd="$JAVA_HOME/bin/javac"
fi
local IFS=$'\n'
# remove \r for Cygwin
local lines=$("$java_cmd" -Xms32M -Xmx32M -version 2>&1 | tr '\r' '\n')
if [[ -z $java_cmd ]]
then
result=no_java
else
for line in $lines; do
if [[ (-z $result) && ($line = *"version \""*) ]]
then
local ver=$(echo $line | sed -e 's/.*version "\(.*\)"\(.*\)/\1/; 1q')
# on macOS, sed doesn't support '?'
if [[ $ver = "1."* ]]
then
result=$(echo $ver | sed -e 's/1\.\([0-9]*\)\(.*\)/\1/; 1q')
else
result=$(echo $ver | sed -e 's/\([0-9]*\)\(.*\)/\1/; 1q')
fi
fi
done
fi
echo "$result"
}
function preesm_check_java_version {
v="$(preesm_jdk_version)"
if [ $v -lt 8 ]; then
cat << "EOF"
Eclipse requires Java 8 or higher. On debian like systems (as root) :
$ apt-get install openjdk-8-jdk openjdk-8-source
$ JVM=`update-java-alternatives -l | grep 1.8 | cut -d" " -f 1 | head -n 1`
$ update-java-alternatives -s $JVM
On other Linux distributions, Windows and MacOSX systems, please
visit http://www.oracle.com/technetwork/java/javase/downloads/index.html
EOF
exit 1
fi
}
function preesm_check_system() {
preesm_check_java_version
which git &> /dev/null
[ $? != 0 ] && echo -e "Error: requires git to fetch Preesm and CLI\nOn Ubuntu: sudo apt install git" && exit 1
which mvn &> /dev/null
[ $? != 0 ] && echo -e "Error: requires maven to build Preesm\nOn Ubuntu: sudo apt install maven" && exit 1
which Xvfb &> /dev/null
[ $? != 0 ] && echo -e "Error: requires Xvfb to run graphical apps\nOn Ubuntu: sudo apt install xvfb" && exit 1
which cmake &> /dev/null
[ $? != 0 ] && echo -e "Error: requires CMake to build apps\nOn Ubuntu: sudo apt install cmake" && exit 1
which gcc &> /dev/null
[ $? != 0 ] && echo -e "Error: requires GCC to build apps\nOn Ubuntu: sudo apt install gcc build-essential" && exit 1
which g++ &> /dev/null
[ $? != 0 ] && echo -e "Error: requires G++ to build Spider apps\nOn Ubuntu: sudo apt install g++ build-essential" && exit 1
which wget &> /dev/null
[ $? != 0 ] && echo -e "Error: requires wget to fetch apps data\nOn Ubuntu: sudo apt install wget" && exit 1
which 7z &> /dev/null
[ $? != 0 ] && echo -e "Error: requires p7zip-full to fetch apps data\nOn Ubuntu: sudo apt install p7zip-full" && exit 1
which ffmpeg &> /dev/null
[ $? != 0 ] && echo -e "Error: requires ffmpeg to convert app data\nOn Ubuntu: sudo apt install ffmpeg" && exit 1
/sbin/ldconfig -p | grep libSDL2_ttf &> /dev/null
[ $? != 0 ] && echo -e "Error: requires lib SDL2 ttf as apps dependencies\nOn Ubuntu: sudo apt install libsdl2-*" && exit 1
/sbin/ldconfig -p | grep libSDL2_image &> /dev/null
[ $? != 0 ] && echo -e "Error: requires lib SDL2 image as apps dependencies\nOn Ubuntu: sudo apt install libsdl2-*" && exit 1
return 0
}
function preesm_find_free_display_number {
USEDXDISPLAYS=`find /tmp -maxdepth 1 -type f -name ".X*-lock" | rev | cut -d"/" -f 1 | colrm 1 5 | rev | colrm 1 2`
for i in {99..1}
do
FREE=YES
for usedd in $USEDXDISPLAYS; do
if [ "$usedd" == "$i" ]; then
FREE=NO
break
fi
done
if [ "$FREE" == "YES" ]; then
echo $i
return
fi
done
}
function preesm_start_xvfb() {
PREESM_XDN=$(preesm_find_free_display_number)
export DISPLAY=:${PREESM_XDN}.0
/usr/bin/Xvfb :${PREESM_XDN} -ac -screen 0 1280x1024x16&
export PREESM_XVFBPID=$!
echo " -- Start Xvfb on $PREESM_XDN (Pid = $PREESM_XVFBPID)"
}
function preesm_kill_xvfb() {
echo " -- Kill Xvfb (Pid = $PREESM_XVFBPID)"
kill -2 $PREESM_XVFBPID
}
| true |
2976741d9819d5f538f96dc3daed437011cd7fce | Shell | vod-ka/kali_upgrade_script | /daliy_upgrade.sh | UTF-8 | 2,907 | 3.703125 | 4 | [] | no_license | #!/bin/bash
# @Author: Aliao
# @Repository: https://github.com/vod-ka
# @Date: 2021-03-09 16:10:09
# @Last Modified by: Aliao
# @Last Modified time: 2021-03-09 19:29:21
#升级kali系统和清楚旧包
PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:~/bin
export PATH
cutimer=$(date +'%Y%m%d%H%M')
cumonth=$(date +%m)
cuyear=$(date +%Y)
udlog="system_update_$cutimer.log"
basedst="$HOME/update_log"
logdst="${basedst}/${cuyear}/${cumonth}"
kcmd="aptitude"
cuid=$(id -u)
#If you're running the script as a normal user, you'll need to assign your Password to the Password variable
Password="hjkl;'" #You password
Blue(){
echo -e "\033[34;01m$1\033[0m"
}
Red(){
echo -e "\033[31;01m$1\033[0m"
}
check_dst(){
if [ -d "$logdst" ]
then
Blue "-------------------------\nThe task of System update is running ...\n$(date "+%F %T")" > "$logdst"/"$udlog"
else
mkdir -p "$basedst"/"$cuyear"/"$cumonth"
Blue "-------------------------\nThe path of log does not exist, creating now...\n$(date "+%F %T")" >> "$basedst"/error.log
fi
}
check_network(){
ping -c 1 mirrors.aliyun.com > /dev/null 2>&1
local a=$?
ping -c 1 mirrors.tuna.tsinghua.edu.cn > /dev/null 2>&1
local b=$?
if [ $a -eq 0 ] || [ $b -eq 0 ]
then
Blue "-------------------------\nNetwork connection is fine, system is updating!\n$(date "+%F %T")"
else
Red "-------------------------\nThe device is offline, please check whether the network connection is \
normal!\nSystem update task failed!\n$(date "+%F %T")"
exit 1
fi
}
Action (){
echo "$Password" | sudo -S $kcmd "$1" -y
}
check_user(){
if [ "$cuid" != 0 ]
then
common_user
else
root_user
fi
}
check_aptitude(){
if ! aptitude -h > /dev/null 2>&1;
then
if [ "$cuid" = 0 ]
then
apt-get install -y aptitude
else
local kcmd="apt-get"
Action install aptitude
fi
fi
}
common_user(){
echo
Red "-------------------------"
echo
Action update
Action safe-upgrade
Blue "-------------------------\nSystem upgrade completed!\n$(date "+%F %T")"
echo
Red "-------------------------"
echo
Action clean
local kcmd="apt-get"
Action autoremove
Blue "-------------------------\nMission all over\n$(date "+%F %T")"
}
root_user(){
echo
Red "-------------------------"
echo
aptitude update
aptitude safe-upgrade -y
Blue "-------------------------\nSystem upgrade completed!\n$(date "+%F %T")"
echo
Red "-------------------------"
echo
aptitude clean
apt autoremove -y
Blue "-------------------------\nMission all over\n$(date "+%F %T")"
}
Main(){
check_network
check_aptitude
check_user
}
check_dst
Main >> "$logdst"/"$udlog" | true |
83b2f108eebc03d957f7057dfef80a14949ff76a | Shell | StamLab/stampipes | /scripts/bwa/aggregate/basic/reset.bash | UTF-8 | 2,646 | 3 | 3 | [] | no_license | # Requires LIBRARY_NAME, AGGREGATION_ID, HOTSPOT2_DIR and GENOME to be in the environment
# Removes any of the listed files that exist
echo "RESETTING AGGREGATION ${AGGREGATION_ID} FOR ${LIBRARY_NAME}"
files=( \
"${LIBRARY_NAME}.${GENOME}.sorted.bam" \
"${LIBRARY_NAME}.${GENOME}.sorted.bam.bai" \
"${LIBRARY_NAME}.tagcounts.txt" \
"${LIBRARY_NAME}.${GENOME}.uniques.sorted.bam" \
"${LIBRARY_NAME}.${GENOME}.uniques.sorted.bam.bai" \
"${LIBRARY_NAME}.CollectInsertSizeMetrics.picard" \
"${LIBRARY_NAME}.CollectInsertSizeMetrics.picard.pdf" \
"${LIBRARY_NAME}.CollectInsertSizeMetrics.picard.info" \
"${LIBRARY_NAME}.MarkDuplicates.picard" \
"${LIBRARY_NAME}.75_20.${GENOME}.uniques-density.bed.starch" \
"${LIBRARY_NAME}.75_20.${GENOME}.uniques-density.bed.starch.bgz" \
"${LIBRARY_NAME}.75_20.${GENOME}.uniques-density.bed.starch.bgz.tbi" \
"${LIBRARY_NAME}.75_20.${GENOME}.bw" \
"${LIBRARY_NAME}.75_20.normalized.${GENOME}.bw" \
"${LIBRARY_NAME}.75_20.normalized.${GENOME}.uniques-density.bed.starch" \
"${LIBRARY_NAME}.75_20.normalized.${GENOME}.uniques-density.bed.starch.bgz" \
"${LIBRARY_NAME}.75_20.normalized.${GENOME}.uniques-density.bed.starch.bgz.tbi" \
"${LIBRARY_NAME}.${GENOME}.cuts.sorted.bed.starch" \
"${LIBRARY_NAME}.${GENOME}.cutcounts.sorted.bed.starch" \
"${LIBRARY_NAME}.${GENOME}.cutcounts.sorted.bed.starch.bgz" \
"${LIBRARY_NAME}.${GENOME}.cutcounts.sorted.bed.starch.bgz.tbi" \
"${LIBRARY_NAME}.${GENOME}.fragments.sorted.bed.starch" \
"${LIBRARY_NAME}.${GENOME}.cutcounts.$READ_LENGTH.bw" \
"${LIBRARY_NAME}.${GENOME}.R1.rand.uniques.sorted.spotdups.txt" \
"${LIBRARY_NAME}.${GENOME}.R1.rand.uniques.sorted.spot.info" \
"${LIBRARY_NAME}.${GENOME}.R1.rand.uniques.sorted.spot.out" \
"${LIBRARY_NAME}.${GENOME}.uniques.sorted.hotspot2.info" \
"${LIBRARY_NAME}.adaptercounts.txt" \
"${LIBRARY_NAME}.${GENOME}.uniques.sorted.proxdist.info" \
"${LIBRARY_NAME}.${GENOME}.rand.uniques.sorted.spotdups.txt" \
"${LIBRARY_NAME}.${GENOME}.rand.uniques.sorted.spot.out" \
"${LIBRARY_NAME}.${GENOME}.rand.uniques.sorted.spot.info" \
"${LIBRARY_NAME}.uniques.duphist.txt" \
"${LIBRARY_NAME}.uniques.preseq.txt" \
"${LIBRARY_NAME}.uniques.preseq.targets.txt" \
)
for FILE in "${files[@]}"; do
if [ -e "$FILE" ]; then
echo "Removing $FILE"
rm $FILE
fi
done
if [[ -d "$HOTSPOT2_DIR" && "$HOTSPOT2_DIR" = "peaks_v2_1_1" ]]; then
rm -r $HOTSPOT2_DIR
fi
python3 $STAMPIPES/scripts/lims/upload_data.py --clear_aggregation_stats --aggregation_id ${AGGREGATION_ID}
| true |
4d4c46c07efa886a2a41b078c5d0f3ca8feea2ed | Shell | cherepan/CSCDev | /runDisplay.sh | UTF-8 | 660 | 2.90625 | 3 | [] | no_license | plotbase=$1
# run event displays
for chamber in "ME11" "nonME11"
do
for cat in "seg_uf_0_ru_1" "seg_uf_1_ru_0" "seg_uf_1_ru_2" "seg_uf_2_ru_1" "seg_uf_4_ru_4" "rh_uf_5_ru_6" "rh_uf_6_ru_5"
do
cp /raid/raid8/mhl/CSC_Run2/CMSSW_dev/analysisCode/logs/${cat}"_"${chamber}".txt" /raid/raid8/mhl/CSC_Run2/CMSSW_dev/CMSSW_9_2_13/src/eventList.txt
echo $displaydir
displaydir=${plotbase}"eventdisplay/"${cat}"_"${chamber}
mkdir -p $displaydir
cp /home/mhl/public_html/index.php $displaydir
cmsRun gifDisplay.py plotdir=$displaydir
date >> log.txt
echo ${cat}"_"${chamber}" plots done" >> log.txt
done
done
date >> log.txt
| true |
822a5d2998926399509a8ba1a65051d45df9b83c | Shell | jlaw9/TRI_Dev | /Variants/start_list_variants.sh | UTF-8 | 4,120 | 3.859375 | 4 | [] | no_license | #! /bin/bash
# GOAL: Run annovar on all of the vcf files in the project, and generate a list of variants
#$ -S /bin/bash
#$ -cwd
#$ -N Get_All_Variants
#$ -o results.txt
#$ -e errors.txt
#$ -V
PROJECT_DIR='/rawdata/project_data/Wales_Archived_data2' # This is the directory where I'm still QC multiple runs
# There shouldn't normally be two different project directoreis, so I won't make that a parameter
PROJECT_METADATA_CSV='/home/ionadmin/jeff/Wales_data/WalesPlates_5_23_14.csv'
USAGE='USAGE: bash get_list_of_all_variants.sh <path/to/Project_Dir> <path/to/Project_Metadata_CSV> <Amplicon Coverage Cutoff> <Depth Cutoff>'
#for arguments
if [ $# -eq 4 ];
then
PROJECT_DIR=$1
PROJECT_METADATA_CSV=$2
AMP_COV_CUTOFF=$3 # The minimum amount of coverage each amplicon needs to have
DEPTH_CUTOFF=$4 # The minimum depth for each base.
else
echo $USAGE
exit
fi
# Now check the files to see if they exist
if [ ! -d $PROJECT_DIR ]
then
echo "$PROJECT_DIR not found"
exit
elif [ ! -r $PROJECT_METADATA_CSV ]
then
echo "$PROJECT_METADATA_CSV not found"
exit
fi
#PROJECT_DATA=$(tail -n +2 ${PROJECT_FILE}) # This didn't work to remove the header line, so looks like I'll just have to write to a file
tail -n +2 $PROJECT_METADATA_CSV > noheader.csv
PROJECT_FILE_WO_HEADER="noheader.csv"
echo "sample_dir;id;plate;sample;case_ctr;rotID_col;barcode" > successful_samples.txt
# If everything checks out, then read the csv file line by line, and call push_Data.sh
while IFS=',' read id plate sample case_ctr rotID_col barcode status leftovers
do
status=`echo "$status" | sed "s/^ *//g" | sed "s/ *$//g"`
sample_dir=''
IFS="&" read -a rotIDs <<< "${rotID_col}"
# If there is only one run for this sample, then take the data from the normal place.
if [ "${#rotIDs[@]}" == "1" -a "$status" != 'Fail' ]
then
# Normal output will be put into the output csv. Error output will be printed to the screen
if [ "`find ${PROJECT_DIR}/${plate}/${sample} -maxdepth 0 2>/dev/null`" ]
then
sample_dir=`find ${PROJECT_DIR}/${plate}/${sample} -maxdepth 0`
else
echo "ERROR: $plate/$sample not found in $PROJECT_DIR"
echo "ERROR: $plate/$sample not found in $PROJECT_DIR" 1>&2
fi
# If there are multiple runs, then use the data that ozlem already merged and such.
elif [ "${#rotIDs[@]}" -gt "1" -a "$status" != 'Fail' ]
then
if [ "`find ${PROJECT_DIR}/${plate}/${sample}/Merged -maxdepth 0 2>/dev/null`" ]
then
sample_dir=`find ${PROJECT_DIR}/${plate}/${sample}/Merged -maxdepth 0`
else
echo "ERROR: $plate/$sample not found in $PROJECT_DIR"
echo "ERROR: $plate/$sample not found in $PROJECT_DIR" 1>&2
fi
fi
# if the plate/sample was found, then call_filter_and_run_annovar.sh
if [ "$sample_dir" != '' ]
then
# If annovar was already run, then don't run it again.
if [ ! -f "${sample_dir}/Analysis_files/filtered.vcf" ]
then
bash filter_and_run_annovar.sh $sample_dir $AMP_COV_CUTOFF $DEPTH_CUTOFF
error_code=$?
if [ "$error_code" == "0" ]
then
# script was successful. Adding this sample_dir to the list
echo "${plate}_${sample} finished running Annovar without errors"
echo "$sample_dir;$id;$plate;$sample;$case_ctr;$rotID_col;$barcode" >> successful_samples.txt
elif [ "$error_code" == "4" ]
then
# one of the files was not found.
echo "ERROR: ${sample_dir} did not have the necessary files"
echo "ERROR: ${sample_dir} did not have the necessary files" 1>&2
else
# Something went wrong...
echo "ERROR: ${sample_dir} failed. See sample_dir's log.txt file"
echo "ERROR: ${sample_dir} failed. See sample_dir's log.txt file" 1>&2
fi
else
echo "$sample_dir;$id;$plate;$sample;$case_ctr;$rotID_col;$barcode" >> successful_samples.txt
fi
fi
done < ${PROJECT_FILE_WO_HEADER}
echo "Finished generating the annovar data."
echo "Generating the list of all variants"
python2.7 generate_sheets_allvars.py successful_samples.txt -d data_matrix.xls -s varsPerSample.xls -v varInfo.xls
#head -1 all_vars.xls > sorted_all_vars.xls
#tail -n +2 all_vars2.xls | sort -k 1 >>sorted_all_vars.xls
rm $PROJECT_FILE_WO_HEADER
| true |
87ed69edd69b24e70b7ec8888499760ac67d3e23 | Shell | cherishing99/MiNoPy | /docs/install | UTF-8 | 362 | 2.71875 | 3 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | #! /bin/bash
cd $MINOPY_HOME/minopy/lib;
python setup.py
echo 'Installing snaphu...';
cd $MINOPY_HOME;
wget https://web.stanford.edu/group/radar/softwareandlinks/sw/snaphu/snaphu-v2.0.4.tar.gz
tar -xvf snaphu-v2.0.4.tar.gz
mv snaphu-v2.0.4 snaphu;
rm snaphu-v2.0.4.tar.gz;
sed -i 's/\/usr\/local/$(MINOPY_HOME)\/snaphu/g' snaphu/src/Makefile
cd snaphu/src; make
| true |
0d70a93520515dc8d2b573e039af830494aac1d6 | Shell | nimmis/docker-spigot | /rootfs/usr/local/bin/build_mc_ver | UTF-8 | 239 | 2.65625 | 3 | [] | no_license | #!/bin/bash
/usr/local/bin/get_mc_ver $1
if [ -f $SPIGOT_HOME/spigot-$1.jar ]; then
echo "Setting $1 as current spigot version"
rm -f $SPIGOT_HOME/spigot.jar
ln -s $SPIGOT_HOME/spigot-$1.jar $SPIGOT_HOME/spigot.jar
fi
| true |
f2f54391164fa62c66abdc3164d811da48827522 | Shell | HybridiSpeksi/hybridispeksi | /scripts/utils.sh | UTF-8 | 148 | 2.671875 | 3 | [] | no_license |
getCommitHash() {
git log --pretty=format:'%h' -n 1
}
readonly DOCKER_IMAGE_NAME="hybridispeksi/web"
readonly DOCKER_IMAGE_TAG=$(getCommitHash)
| true |
1216aa30b4306b136eb715685c9da74ee23ba08d | Shell | ansagay/watchman | /travis/run.sh | UTF-8 | 407 | 3.015625 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/sh
set -x
uname -a
set -e
PATH=$PWD:$PATH
./autogen.sh
./configure --with-pcre --with-python --without-ruby $CONFIGARGS
make clean
make
set +e
rm -rf /tmp/watchman*
TMPDIR=/var/tmp
TMP=/var/tmp
export TMPDIR TMP
if ! make integration ; then
cat /tmp/watchman*
exit 1
fi
INST_TEST=/tmp/install-test
test -d $INST_TEST && rm -rf $INST_TEST
make DESTDIR=$INST_TEST install
find $INST_TEST
exit 0
| true |
e8c009cb73f793011c29802bd15fa7e5159acffa | Shell | mhousworth/cpsc223N-Assignment-1 | /build.sh | UTF-8 | 1,806 | 3.59375 | 4 | [] | no_license | #!/bin/bash
#In the official documemtation the line above always has to be the first line of any script file. But, students have
#told me that script files work correctly without that first line.
#Ruler:==1=========2=========3=========4=========5=========6=========7=========8=========9=========0=========1=========2=========3**
#Author: Michael Housworth
#Email: mhousworth@csu.fullerton.edu
#Course: CPSC 223n
#Semester: Fall 2019
#Assignment: 1
#Program name: Flashing Red Stop Light
#Due: 9-16-2019
#This is a bash shell script to be used for compiling, linking, and executing the C sharp files of this assignment.
#Execute this file by navigating the terminal window to the folder where this file resides, and then enter the command: ./build.sh
#System requirements:
# A Linux system with BASH shell (in a terminal window).
# The mono compiler must be installed. If not installed run the command "sudo apt install mono-complete" without quotes.
# The three source files and this script file must be in the same folder.
# This file, build.sh, must have execute permission. Go to the properties window of build.sh and put a check in the
# permission to execute box.
echo First remove old binary files
rm *.dll
rm *.exe
echo View the list of source files
ls -l
echo Compile FlashingLightInterface.cs to create the file: FlashingLightInterface.dll
mcs -target:library -r:System.Drawing.dll -r:System.Windows.Forms.dll -out:FlashingLightInterface.dll FlashingLightInterface.cs
echo Compile FlashingLightMain.cs and link the two previously created dll files to create an executable file.
mcs -r:System -r:System.Windows.Forms -r:FlashingLightInterface.dll -out:FL.exe FlashingLightMain.cs
echo View the list of files in the current folder
ls -l
echo Run the Assignment 1 program.
./FL.exe
echo The script has terminated.
| true |
235105e6097ba82add208ab80cc8fee3acf3ac80 | Shell | einervonvielen/family-scripts | /fotodiary/diary.sh | UTF-8 | 2,220 | 4.09375 | 4 | [
"MIT"
] | permissive | #!/bin/bash
# apt-get install exiftool
if type exiftool >/dev/null 2>&1 ; then
echo "ok, exiftool is installed"
else
echo "Install apt-get install exiftool"
exit
fi
if [ "$1" == "-h" ]; then
echo "Create a HTML page (diary) from images"
echo "You can use one of the following parameters"
echo " "
echo " path to image directory"
echo " example: diary.sh images/another-subdir/"
echo " "
echo " "
echo " -h ... show this help message"
echo " example: diary.sh -h"
exit
fi
if [ -z "$1" ]; then
imgdir='.'
else
imgdir=$1
imgdir=$(echo $imgdir | sed 's:/*$::')
fi
echo "image directory is: $imgdir"
outfile="diary.html"
echo "" > $outfile
shopt -s nullglob
for file in "$imgdir"/*.{jpg,JPG,jpeg,JPEG,png,PNG,dng,DNG,cr2,CR2} ; do
content+="<p>\n"
echo "# $file"
lines=$(exiftool "$file")
# echo "lines: $lines"
title=""
description=""
creation_date=""
gps_position=""
while read -r line; do
key="$( cut -d ':' -f 1 <<< "$line")"
key="$(echo "$key" | xargs)"
key="${key,,}"
#echo "key is not lower case: $key"
value="$( cut -d ':' -f 2- <<< "$line")"
#echo "key is:-$key-"
if [ "$key" = "title" ]; then
title="$value"
fi
if [ "$key" = "description" ] || [ "$key" = "image description" ]; then
description="$value"
fi
if [ "$key" = "create date" ] ; then
creation_date="$value"
fi
if [ "$key" = "gps position" ] ; then
gps_position="$value"
fi
done <<< "$lines"
if [ "$title" != "" ]; then
echo "TITLE:$title"
content+="<div class='title'>$title</div>\n"
fi
content+="<img src='$file'>\n"
if [ "$creation_date" != "" ]; then
echo "CREATION DATE:$creation_date"
content+="<div class='date'>$creation_date</div>\n"
fi
if [ "$gps_position" != "" ]; then
echo "GPS POSITION:$gps_position\n"
content+="<div class='position'>$gps_position</div>\n"
fi
if [ "$description" != "" ]; then
echo "DESCRIPTION:$description"
content+="<div class='description'>$description</div>\n"
fi
content+="</p>\n"
done
read -r -d '' html <<- EOM
<html>
<head>
<meta charset="utf-8"/>
<style>
img {width:100%}
.title {font-size:2.5em}
</style>
</head>
<body>
diary
</body>
</html>
EOM
echo -e "${html/diary/$content}" >> $outfile
| true |
1dc490cbb447441ba531f8ab33d2bc646e297223 | Shell | iCodeIN/llama | /scripts/lib/wait-for-function | UTF-8 | 496 | 3.59375 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
set -eu
eval "$(llama config -shell)"
func="$1"
echo "Sleeping until function is ready..."
while :; do
status=$(aws --region "$llama_region" --query Configuration.LastUpdateStatus --output text \
lambda get-function --function-name "$func")
case "$status" in
Successful)
exit 0
;;
None|InProgress)
;;
*)
echo "Unexpected status: $status" >&2
exit 1
esac
sleep 1
done
| true |
5eaeb96f8d0a5faedefb800c57939461552124bb | Shell | gangsteel/simple-db-dist | /config/query_running_scripts/run_query.sh | UTF-8 | 291 | 2.546875 | 3 | [
"MIT"
] | permissive | #!/bin/bash
query=$1
bash ./config/query_running_scripts/run_single_distributed.sh $query
echo 'Wait a few seconds before running on SimpleDb'
sleep 3
bash ./config/query_running_scripts/run_simpledb.sh $query
echo 'Killing all child nodes'
bash ./config/killChildNodes.sh
echo 'Done'
| true |
cae6ee585db65c7c234b15c463620ef7e6d92541 | Shell | jjo/bin | /mplayer-radioparadise.sh | UTF-8 | 539 | 3.34375 | 3 | [] | no_license | #!/bin/sh
stream=aac-
qual=320
while [ $# -gt 0 ]; do
case "$1" in
-rock|-mellow|-eclectic|-world) stream=${1#-}-; shift;;
-64|-128|-320) qual=${1#-}; shift;;
-flac) qual=${1#-}; shift;;
*) break;;
esac
done
# Particular case :/ ->
[ ${stream}${qual} = "aac-flac" ] && stream=""
set -x
case $(uname -s) in
Darwin) exec vlc -I ncurses "$@" http://stream.radioparadise.com/${stream}${qual};;
*) exec mplayer -prefer-ipv4 -cache 256 "$@" http://stream.radioparadise.com/${stream}${qual};;
esac
| true |
4f33348b7846292065279690fb6ad06fdbca0292 | Shell | RobertsLab/code | /20-bismark.sh | UTF-8 | 7,232 | 3.921875 | 4 | [] | no_license | #!/bin/bash
## Job Name - can be changed
#SBATCH --job-name=BISMARK
## Allocation Definition - confirm correctness
#SBATCH --account=coenv
#SBATCH --partition=coenv
## Resources
## Nodes (often you will only use 1)
#SBATCH --nodes=1
## Walltime (days-hours:minutes:seconds format)
#SBATCH --time=30-00:00:00
## Memory per node
#SBATCH --mem=120G
## email notification
#SBATCH --mail-type=ALL
#SBATCH --mail-user=$USER@uw.edu
## Specify the working directory for this job
#SBATCH --workdir=
# Exit script if a command fails
set -e
##########################
# This is a script written to assess bisulfite sequencing reads
# using Bismark. The user needs to supply the following:
# 1. A single directory location contaning BSseq reads.
# 2. BSseq reads need to be gzipped FastQ and end with .fq.gz
# 3. A bisulfite-converted genome, produced with Bowtie2.
# 4. Indicate if deduplication should be performed (whole genome or reduced genome sequencing)
#
# Set these values below
### USER NEEDS TO SET VARIABLES FOR THE FOLLOWING:
# Set --workdir= path in SBATCH header above.
#
# Full path to directory with sequencing reads
reads_dir=""
# Full path to bisulftie-converted genome directory
genome_dir=""
# Enter y (for yes) or n (for no) between the quotes.
# Yes - Whole genome bisulfite sequencing, MBD.
# No - Reduced genome bisulfite sequencing (e.g. RRBS)
deduplicate=""
# Run Bismark on desired number of reads/pairs subset
# The default value is 0, which will run Bismark on all reads/pairs
subset="-u 0"
####################################################
# DO NOT EDIT BELOW THIS LINE
####################################################
# Evaluate user-edited variables to make sure they have been filled
[ -z ${deduplicate} ] \
&& { echo "The deduplicate variable is not defined. Please edit the SBATCH script and add y or n to deduplicate variable."; exit 1; }
[ -z ${genome_dir} ] \
&& { echo "The bisulfite genome directory path has not been set. Please edit the SBATCH script."; exit 1; }
[ -z ${reads_dir} ] \
&& { echo "The reads directory path has not been set. Please edit the SBATCH script."; exit 1; }
# Directories and programs
wd=$(pwd)
bismark_dir="/gscratch/srlab/programs/Bismark-0.21.0_dev"
bowtie2_dir="/gscratch/srlab/programs/bowtie2-2.3.4.1-linux-x86_64/"
samtools="/gscratch/srlab/programs/samtools-1.9/samtools"
threads="28"
reads_list="input_fastqs.txt"
## Concatenated FastQ Files
R1=""
R2=""
# Initialize arrays
R1_array=()
R2_array=()
# Create list of input FastQ files for easier confirmation.
for fastq in ${reads_dir}/*.fq.gz
do
echo ${fastq##*/} >> ${reads_list}
done
# Check for paired-end
# Capture grep output
# >0 means single-end reads
# set +e/set -e prevents error >0 from exiting script
set +e
grep "_R2_" ${reads_list}
paired=$?
set -e
# Confirm even number of FastQ files
num_files=$(wc -l < ${reads_list})
fastq_even_odd=$(echo $(( ${num_files} % 2 )) )
## Save FastQ files to arrays
R1_array=(${reads_dir}/*_R1_*.fq.gz)
## Send comma-delimited list of R1 FastQ to variable
R1=$(echo ${R1_array[@]} | tr " " ",")
# Evaluate if paired-end FastQs
# Run Bismark as paired-end/single-end based on evaluation
if [[ ${paired} -eq 0 ]]; then
# Evaluate if FastQs have corresponding partner (i.e. R1 and R2 files)
# Evaluated on even/odd number of files.
if [[ ${fastq_even_odd} -ne 0 ]]; then
{ echo "Missing at least one FastQ pair from paired-end FastQ set."; \
echo "Please verify input FastQs all have an R1 and corresponding R2 file.";
exit 1; \
}
fi
## Save FastQ files to arrays
R2_array=(${reads_dir}/*_R2_*.fq.gz)
## Send comma-delimited list of R2 FastQ to variable
R2=$(echo ${R2_array[@]} | tr " " ",")
# Run bismark using bisulftie-converted genome
# Generates a set of BAM files as outputs
# Records stderr to a file for easy viewing of Bismark summary info
${bismark_dir}/bismark \
--path_to_bowtie2 ${bowtie2_dir} \
--genome ${genome_dir} \
--samtools_path=${samtools} \
--non_directional \
${subset} \
-p ${threads} \
-1 ${R1} \
-2 ${R2} \
2> bismark_summary.txt
else
# Run Bismark single-end
${bismark_dir}/bismark \
--path_to_bowtie2 ${bowtie2_dir} \
--genome ${genome_dir} \
--samtools_path=${samtools} \
--non_directional \
${subset} \
-p ${threads} \
${R1} \
2> bismark_summary.txt
fi
# Determine if deduplication is necessary
# Then, determine if paired-end or single-end
if [ ${deduplicate} == "y" ]; then
# Sort Bismark BAM files by read names instead of chromosomes
find *.bam \
| xargs basename -s .bam \
| xargs -I bam_basename \
${samtools} sort \
--threads ${threads} \
-n bam_basename.bam \
-o bam_basename.sorted.bam
if [ ${paired} -eq 0 ]; then
# Deduplication
find *sorted.bam \
| xargs basename -s .bam \
| xargs -I bam_basename \
${bismark_dir}/deduplicate_bismark \
--paired \
--samtools_path=${samtools} \
bam_basename.bam
else
find *sorted.bam \
| xargs basename -s .bam \
| xargs -I bam_basename \
${bismark_dir}/deduplicate_bismark \
--single \
--samtools_path=${samtools} \
bam_basename.bam
fi
# Methylation extraction
# Extracts methylation info from deduplicated BAM files produced by Bismark
# Options to created a bedgraph file, a cytosine coverage report, counts, remove spaces from names
# and to use the "scaffolds" setting.
${bismark_dir}/bismark_methylation_extractor \
--bedGraph \
--cytosine_report \
--genome_folder ${genome_dir} \
--gzip
--counts \
--scaffolds \
--remove_spaces \
--multicore ${threads} \
--buffer_size 75% \
--samtools_path=${samtools} \
*deduplicated.bam
# Sort deduplicated BAM files
find *deduplicated.bam \
| xargs basename -s .bam \
| xargs -I bam_basename \
${samtools} sort \
--threads ${threads} \
bam_basename.bam \
-o bam_basename.sorted.bam
# Index sorted files for IGV
# The "-@ ${threads}" below specifies number of CPU threads to use.
find *deduplicated.sorted.bam \
| xargs -I sorted_bam \
${samtools} index \
-@ ${threads} \
sorted_bam
else
# Methylation extraction
# Extracts methylation info from BAM files produced by Bismark
# Options to created a bedgraph file, a cytosine coverage report, counts, remove spaces from names
# and to use the "scaffolds" setting.
${bismark_dir}/bismark_methylation_extractor \
--bedGraph \
--cytosine_report \
--genome_folder ${genome_dir} \
--gzip \
--counts \
--scaffolds \
--remove_spaces \
--multicore ${threads} \
--buffer_size 75% \
--samtools_path=${samtools} \
*.bam
# Sort BAM files
find *.bam \
| xargs basename -s .bam \
| xargs -I bam_basename \
${samtools} sort \
--threads ${threads} \
-o bam_basename.sorted.bam
# Index sorted files for IGV
# The "-@ ${threads}" below specifies number of CPU threads to use.
find *sorted.bam \
| xargs -I sorted_bam \
${samtools} index \
-@ ${threads} \
sorted_bam
fi
# Bismark processing report
# Generates HTML reports from previously created files
${bismark_dir}/bismark2report
#Bismark summary report
# Generates HTML summary reports from previously created files
${bismark_dir}/bismark2summary
| true |
250814b749ed4aad78d0fc2b2c08af0a9bf7d785 | Shell | coveredinc/davinci | /bin/prme | UTF-8 | 704 | 3.15625 | 3 | [] | no_license | #!/bin/bash
set -euo pipefail
repo="$(basename "$(git rev-parse --show-toplevel)")"
org="${1:?must set org}" ; shift
title="${1:?must set PR title}" ; shift
body="${1:-${title}}" ; shift
branch="$(git branch-search)"
function _url_encode() {
python -c "import urllib, sys; print urllib.quote(sys.argv[1])" "$1"
}
if [[ "${body}" == "@infra" ]]; then
body='@alexebird'
fi
title="$(_url_encode "${title}")"
body="$(_url_encode "${body}")"
#URI.encode("http://github.com/ConsultingMD/#{repo}/compare/master...#{branch}?title=#{sanitize(title)}&body=#{sanitize(body)}")
url="http://github.com/${org}/${repo}/compare/master...${branch}?title=${title}&body=${body}"
#echo "${url}"
xdg-open "${url}"
| true |
6ccdcf80aab86976d6827dc6d1cace1f2131e0f1 | Shell | r26D/slack-action | /entrypoint.sh | UTF-8 | 1,930 | 3.28125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
##!/bin/sh -l
set -e
replace_in_file() {
local -r key="${1:?key is required}"
local -r value="${2:?value is required}"
local safe_value=$(echo $value | sed -e 's/[\/&]/\\&/g')
local -r filename="${3:?filename is required}"
#echo "sed -i -e \"s/$key/$safe_value/\" $filename"
sed -i -e "s/$key/$safe_value/" $filename
}
if [[ -z "${SLACK_WEBHOOK_URL}" ]]; then
echo "SLACK_WEBHOOK_URL missing!"
exit 127
fi
if [[ -z "${INPUT_HEADLINE}" ]]; then
echo "You must at least set a INPUT_HEADLINE"
exit 127
fi
if [[ -z "${INPUT_CHANNEL}" ]]; then
echo "You must at least set a INPUT_CHANNEL"
exit 127
fi
OUTPUT_JSON=$(cat /templates/full.json | jq ".channel=\"${INPUT_CHANNEL}\"")
OUTPUT_JSON=$(echo ${OUTPUT_JSON} | jq ".text=\"${INPUT_HEADLINE}\"")
OUTPUT_JSON=$(echo ${OUTPUT_JSON} | jq ".blocks[0].text.text=\"${INPUT_HEADLINE}\"")
if [[ -z "${INPUT_USERNAME}" ]]; then
OUTPUT_JSON=$(echo ${OUTPUT_JSON} | jq "del(.username)")
else
OUTPUT_JSON=$(echo ${OUTPUT_JSON} | jq ".username=\"${INPUT_USERNAME}\"")
fi
if [[ -z "${INPUT_ICONEMOJI}" ]]; then
OUTPUT_JSON=$(echo ${OUTPUT_JSON} | jq "del(.icon_emoji)")
else
OUTPUT_JSON=$(echo ${OUTPUT_JSON} | jq ".icon_emoji=\"${INPUT_ICONEMOJI}\"")
fi
if [[ -z "${INPUT_BODY}" ]]; then
OUTPUT_JSON=$(echo ${OUTPUT_JSON} | jq "del(.blocks[1,2])")
else
if [[ -z "${INPUT_IMAGEURL}" ]]; then
OUTPUT_JSON=$(echo ${OUTPUT_JSON} | jq "del(.blocks[2].accessory)")
OUTPUT_JSON=$(echo ${OUTPUT_JSON} | jq ".blocks[2].text.text=\"${INPUT_BODY}\"")
else
OUTPUT_JSON=$(echo ${OUTPUT_JSON} | jq ".blocks[2].accessory.image_url=\"${INPUT_IMAGEURL}\"")
OUTPUT_JSON=$(echo ${OUTPUT_JSON} | jq ".blocks[2].text.text=\"${INPUT_BODY}\"")
fi
fi
#echo $OUTPUT_JSON
#curl -X POST -v -H 'Content-type: application/json' --data @message.json $SLACK_WEBHOOK_URL
curl -X POST -s --data-urlencode "payload=${OUTPUT_JSON}" $SLACK_WEBHOOK_URL
| true |
687d6dd00e853f7afe7a77ce87d6b7a14fe0c0f5 | Shell | FoodConnectsPeople/foodconnectspeople-tool | /data_sources/scripts/files/check-consistency.sh | UTF-8 | 3,125 | 2.890625 | 3 | [] | no_license | #! /bin/sh
export LC_ALL=C
rm -f u-*.csv
tail -n +2 recipe2ingredients.csv | cut -f2 -d"," > u-recipeingredients0.csv
tail -n +2 recipe2ingredients.csv | cut -f6 -d"," >> u-recipeingredients0.csv
cat u-recipeingredients0.csv | sort | uniq > u-recipeingredients.csv
tail -n +2 ingredients.csv | cut -f1 -d"," | sort | uniq > u-ingredients.csv
echo "==== Differences between recipe-ingredients and ingredients: only in ingredients"
diff u-recipeingredients.csv u-ingredients.csv | grep ">"
echo "==== DONE === "
echo " "
echo "==== Differences between recipe-ingredients and ingredients: only in recipe ingredients"
diff u-recipeingredients.csv u-ingredients.csv | grep "<"
echo "==== DONE === "
echo " "
tail -n +2 recipe2ingredients.csv | cut -f2,4,7 -d"," > u-recipeingredientsunits0.csv
grep -v ",," u-recipeingredientsunits0.csv | sort | uniq > u-recipeingredientsunits.csv
tail -n +2 unitconversions.csv | cut -f1,2,6 -d"," > u-unitconversions0.csv
grep -v ",," u-unitconversions0.csv | sort | uniq > u-unitconversions.csv
echo "==== Differences between recipe-ingredients and unit conversions: only in conversions"
diff -b u-recipeingredientsunits.csv u-unitconversions.csv | grep ">"
echo "==== DONE === "
echo " "
echo "==== Differences between recipe-ingredients and unit conversions: only in recipe ingredients"
diff u-recipeingredientsunits.csv u-unitconversions.csv | grep "<"
echo "==== DONE === "
echo " "
tail -n +2 recipe2tools.csv | cut -f2 -d"," | sort | uniq > u-recipetools.csv
tail -n +2 tools.csv | cut -f2 -d"," | sort | uniq > u-tools.csv
echo "==== Differences between recipe-tools and tools: only in tools"
diff -w u-recipetools.csv u-tools.csv | grep ">"
echo "==== DONE === "
echo " "
echo "==== Differences between recipe-tools and tools: only in recipe-tools"
diff -w u-recipetools.csv u-tools.csv | grep "<"
echo "==== DONE === "
echo " "
tail -n +2 recipes.csv | cut -f2 -d"," > terms.csv
tail -n +2 recipes.csv | cut -f3 -d"," >> terms.csv
tail -n +2 recipes.csv | cut -f7 -d"," >> terms.csv
tail -n +2 recipes.csv | cut -f10 -d"," >> terms.csv
tail -n +2 recipes.csv | cut -f11 -d"," >> terms.csv
tail -n +2 recipes.csv | cut -f12 -d"," >> terms.csv
tail -n +2 recipe2ingredients.csv | cut -f2 -d"," >> terms.csv
tail -n +2 recipe2ingredients.csv | cut -f4 -d"," >> terms.csv
tail -n +2 recipe2ingredients.csv | cut -f5 -d"," >> terms.csv
tail -n +2 recipe2ingredients.csv | cut -f6 -d"," >> terms.csv
tail -n +2 events.csv | cut -f2 -d"," >> terms.csv
tail -n +2 events.csv | cut -f6 -d"," >> terms.csv
tail -n +2 recipe2tools.csv | cut -f2 -d"," >> terms.csv
tail -n +2 countries.csv | cut -f2 -d"," >> terms.csv
tail -n +2 categories.csv | cut -f2 -d"," >> terms.csv
cat terms.csv | sed -e 's/^[ \t]*//' | sort | uniq > u-terms.csv
tail -n +2 translations.csv | cut -f1 -d"," | sed -e 's/^[ \t]*//' | sort | uniq > u-translations.csv
echo "==== Differences between terms and translations: only in terms"
diff -w u-terms.csv u-translations.csv | grep "<"
echo "==== DONE === "
echo " "
read -rsp $'Press any key to continue...\n' -n 1 key
rm -f terms.csv
rm -f u-*.csv
| true |
c9e7b80083511c9668dc289b4f199b866b486f9f | Shell | marafa/openstack-utils | /utils/openstack-service | UTF-8 | 3,690 | 3.859375 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/sh
# Copyright (C) 2013, Red Hat, Inc.
# Lars Kellogg-Stedman <lars@redhat.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
if [ "$#" -lt 1 ] || [ "$1" = '--help' ]; then
echo "\
NAME
openstack-service - control enabled openstack services
SYNOPSIS
$(basename "$0") <action> [service]...
DESCRIPTION
<action> can be 'list' or an action supported by the service.
'list' will list the enabled openstack services.
Any specified [service]s filter the list of enabled openstack services.
SEE ALSO
openstack-status(1)" >&2
[ "$1" = '--help' ] && exit 0 || exit 1
fi
systemctl --version >/dev/null 2>&1 && systemctl=1
[ "$systemctl" ] || RUNLEVEL=$(LANG=C who -r | sed 's/.*run-level \([0-9]\).*/\1/')
# This generates a list of all services currently enabled on the host
# (for certain values of "enabled" where systemd is concerned -- currently,
# we check multi-user.target.wants for a list of enabled services).
#
# The systemctl command used here comes mostly from:
# http://fedoraproject.org/wiki/SysVinit_to_Systemd_Cheatsheet
enabled_services() {
if [ "$systemctl" = 1 ]; then
if systemctl list-unit-files >/dev/null 2>&1; then
systemctl list-unit-files --type=service --full --no-legend --no-pager |
awk '$2 == "enabled" {print $1}' |
sed 's/.service$//'
else
# This fallback may list disabled but loaded services
systemctl list-units --type=service --full --no-pager |
sed -n 's/\.service.*//p'
fi
else
chkconfig --list | grep "${RUNLEVEL}:on" | awk '{print $1}'
fi
}
# This filters the results of enabled_services() for those services
# related to openstack, and optionally restricts those to ones
# that start with a specifc prefix (e.g., "cinder" or "glance").
enabled_openstack_services() {
local svcprefix=$1
enabled_services |
egrep '^(openstack|neutron|quantum)' | grep -v "neutron-.*-cleanup" |
( [ "$svcprefix" ] && egrep "^(openstack-)?${svcprefix}" || cat )
}
# This calls enable_openstack_services once for each value in "$@", or once
# with no prefix if there are no arguments.
generate_service_list() {
if [ "$*" ]; then
for svcprefix in "$@"; do
enabled_openstack_services $svcprefix
done
else
enabled_openstack_services
fi
}
# $action may be empty, "list", or any argument appropriate to the "service"
# command. $action can only be empty if there are no service prefixes
# on the command line.
action="$1"
shift
run_action() {
SVCs=$(cat)
if [ "$systemctl" ]; then
if [ "$action" = "status" ]; then
# Generate simple status like: service status $SVC
systemctl show --property=Id,MainPID,ActiveState $SVCs |
sed '/^$/d' | paste -d' ' - - - | sed \
's/Id=\(.*\)\.service ActiveState=\(.*\) MainPID=\(.*\)/\1 (pid \3) is \2/'
else
systemctl $action $SVCs
fi
else
for SVC in $SVCs; do
service $SVC $action
done
fi
}
# Generate a list of services and either print the list on stdout for "list"
# or use the "service" command to perform the requested action on each of
# the services.
generate_service_list "$@" |
( [ "$action" = "list" ] && cat || run_action )
| true |
9f2513e238c10814639d015bbdf5fd90ba005a8e | Shell | apalpant/formation-devops | /CI-CD/Jenkins/provision_jenkins.sh | UTF-8 | 2,246 | 3.15625 | 3 | [] | no_license | #!/bin/sh
# On installe le pare-feu
apt install ufw
# On le met en route
ufw --force enable
# On lui fixe de nouvelles regles
ufw allow ssh
ufw allow http
ufw allow https
ufw allow 8080
# On prepare l'installation de jenkins
apt-get install -y gnupg
apt-get install -y openjdk-11-jre
# On installe jenkins suivant les preconisations du site
wget -q -O - https://pkg.jenkins.io/debian-stable/jenkins.io.key | apt-key add -
sh -c 'echo deb https://pkg.jenkins.io/debian-stable binary/ > \
/etc/apt/sources.list.d/jenkins.list'
# On met a jour la base de donnees des paquets
apt-get update
# On installe le paquet jenkins
apt-get install -y jenkins
# On demarre le service
service jenkins start
# On ajoute le nouvel utilisateur userjob
useradd -m userjob
# On lui donne un mot de passe
echo "userjob:userjob" | chpasswd
# On sauvegarde le fichier avant modification
cp /etc/sudoers /etc/sudoers.old
# On mets les droits sur la commande apt pour l'utilisateur userjob
echo "userjob ALL=(ALL) /usr/bin/apt" | tee -a /etc/sudoers
# On affiche le mot de passe de jenkins
echo "initialAdminPassword Jenkins: "
cat /var/lib/jenkins/secrets/initialAdminPassword | xargs echo
# On sauvegarde le fichier sshd_config
cp /etc/ssh/sshd_config /etc/ssh/sshd_config.bck
# On change l'option PasswordAuthentication de no à yes dans le fichier sshd_config
sed "s/PasswordAuthentication no/PasswordAuthentication yes/" \
/etc/ssh/sshd_config.bck > /etc/ssh/sshd_config
# On restart le service
systemctl restart sshd
apt install git
# install python
apt-get install python3 python3-dev python3-pip -q -y
apt remove -y python
cp /usr/bin/python3 /usr/bin/python
apt install -y openjdk-8-jdk
apt install -y unzip
## Récupération de la dernière version de GRADLE
VERSION=7.0
wget https://downloads.gradle-dn.com/distributions/gradle-${VERSION}-bin.zip -P /tmp
unzip -d /opt/gradle /tmp/gradle-${VERSION}-bin.zip
# Faire pointer le lien vers la dernière version de gradle
ln -s /opt/gradle/gradle-${VERSION} /opt/gradle/latest
# Ajout de gradle au PATH
touch /etc/profile.d/gradle.sh
echo "export PATH=/opt/gradle/latest/bin:${PATH}" > /etc/profile.d/gradle.sh
chmod +x /etc/profile.d/gradle.sh
source /etc/profile.d/gradle.sh | true |
4dc93c8ee1a496d46ecad917e7234891bf4c2e1d | Shell | linyimin0812/my-shell | /查看IP.sh | UTF-8 | 421 | 3.328125 | 3 | [] | no_license | #! /bin/bash
# 查看本地IP, 此方法包含docker对应的IP
ifconfig | grep "inet " | grep -Fv "127.0.0.1" | awk '{print $2}' | sed 's/'addr:'//g'
# 上面的方法如果存在docker时,也会把docker的IP打印出来,使用ip命令获取真实的IP
([[ ! -z $(type -P ip) ]] && (ip route get 1 | awk '$7!="" {print $7}')) || (ifconfig | grep "inet " | grep -Fv "127.0.0.1" | awk '{print $2}' | sed 's/'addr:'//g') | true |
7233bf7db0ea9f67846fab82baf723fcef8e6060 | Shell | lahemi/assigncodement | /Arduino/st7920_datadisplay/mem.sh | UTF-8 | 678 | 3.0625 | 3 | [] | no_license | #!/usr/bin/env bash
# See: man proc
# GPLv2, 2013, Ilkka Jylhä & Lauri Peltomäki
data=$(awk 'NR' /proc/meminfo)
mt=$(echo -n "$data"|awk '/MemTotal/ {print $2}')
mb_f=$(echo -n "$data"|awk '/MemFree/ {print $2}')
mb_b=$(echo -n "$data"|awk '/Buffers/ {print $2}')
mb_c=$(echo -n "$data"|awk '/^Cached/ {print $2}')
#st=$(echo -n "$data"|awk '/SwapTotal/ {print $2}')
#sf=$(echo -n "$data"|awk '/SwapFree/ {print $2}')
let memfree=$mb_f+$mb_b+$mb_c
let inuse=$mt-$memfree
let usep=$inuse/$mt*100
let rmt=$mt/1024
let rmb_f=$mb_f/1024
let rmb_b=$mb_b/1024
let rmb_c=$mb_c/1024
let rmfree=$rmb_f+$rmb_b+$rmb_c
let rinuse=$rmt-$rmfree
let rusep=$rinuse/$rmt*100
printf "MEM: %dM\0" $rinuse
| true |
e2937def0b228365b153df42d05b32aeb651fc81 | Shell | outofcoffee/semver-cli | /release.sh | UTF-8 | 1,228 | 3.53125 | 4 | [] | no_license | #!/usr/bin/env bash
set -e
#set -x
CURRENT_VERSION="$( git describe --tags --exact-match )"
GITHUB_USER="outofcoffee"
GITHUB_REPO="semver-cli"
function create_release() {
CREATE_RELEASE_REQ=$( cat << EOF
{
"tag_name": "${CURRENT_VERSION}",
"target_commitish": "master",
"name": "${CURRENT_VERSION}",
"body": "Latest edge release",
"draft": false,
"prerelease": true
}
EOF
)
curl -X POST -u "${GITHUB_USER}:${GITHUB_PASSWORD}" \
https://api.github.com/repos/${GITHUB_USER}/${GITHUB_REPO}/releases -d "${CREATE_RELEASE_REQ}"
}
function upload_binary() {
RELEASE_INFO="$( curl -s -u "${GITHUB_USER}:${GITHUB_PASSWORD}" https://api.github.com/repos/${GITHUB_USER}/${GITHUB_REPO}/releases/tags/${CURRENT_VERSION} )"
RELEASE_ID="$( echo ${RELEASE_INFO} | jq '.id' )"
curl -X POST -u "${GITHUB_USER}:${GITHUB_PASSWORD}" \
https://uploads.github.com/repos/${GITHUB_USER}/${GITHUB_REPO}/releases/${RELEASE_ID}/assets?name=$( basename $1 ) \
-H 'Content-Type: application/octet-stream' \
--data-binary @$1
}
echo "Building ${CURRENT_VERSION}"
echo "GitHub password for ${GITHUB_USER}:"
read -s GITHUB_PASSWORD
go build .
create_release
upload_binary semver-cli
| true |
00382f27c608dee16c242e8f6e64cc238c8a69ae | Shell | semoho/Linux-Tutorial | /students-assignments/1/Assignment 06/color.sh | UTF-8 | 185 | 3.34375 | 3 | [] | no_license | #!/bin/bash
echo "Enter color name"
read color
case $color in
black | white ) echo "Pure Colors"
;;
"red" ) echo "I like this one"
;;
* ) echo "I think $color is also nice"
esac | true |
07eb4f045b0d19dca6341ac026aa7ccc47cc40be | Shell | nxmatic/multipass-vpnkit | /hack/hyperkit.sh | UTF-8 | 1,387 | 3.6875 | 4 | [] | no_license | #!/bin/bash
# Idea taken from https://github.com/AlmirKadric-Published/docker-tuntap-osx/blob/master/sbin/docker.hyperkit.tuntap.sh
set -o nounset
set -o errexit
# VPNKit socket
vpnkitintf=/var/run/vpnkit.socket
# Find index and id of highest network interface argument
ethILast=false
ethIDLast=false
argI=0
while [ $argI -lt $# ]; do
arg=${@:$argI:1}
# Find device arguments
if [ "$arg" == "-s" ]; then
argDeviceI=$(($argI + 1))
argDevice=${@:$argDeviceI:1}
# Check if device argument is a network device
if echo $argDevice | grep -qE "^2:[0-9]+"; then
# Finally check if network interface ID is higher than current highest
# If so update highest
ethID=$(echo $argDevice | sed -E 's/2:([0-9]+),.*/\1/')
if [ $ethIDLast = false ] || [ $ethID -gt $ethIDLast ]; then
ethILast=$argDeviceI
ethIDLast=$ethID
fi
fi
# Skip device argument since we already processed it
argI=$(($argI + 1))
fi
argI=$(($argI + 1))
done
# Make sure we found network interfaces
# If not something went wrong
if [ $ethILast = false ] || [ $ethIDLast = false ]; then
echo "Network interface arguments not found" >&2
exit 1
fi
# Inject additional tap network interface argument after the highest one
ethintf=$(($ethIDLast + 1))
set -- \
"${@:1:$ethILast}" \
"-s" "2:$ethintf,virtio-vpnkit,path=$vpnkitintf" \
"${@:$(($ethILast + 1))}"
exec "$0.original" "$@"
| true |
9ddf89d8e0d9c848d9c7ab20e504cb964a47bdc8 | Shell | antv0/archsetup | /archsetup.sh | UTF-8 | 6,398 | 3.96875 | 4 | [
"MIT"
] | permissive | #!/bin/sh
#mount your drive in /mnt before running this script
# -c as option
hostname="arch"
country="france" #for reflector
timezone="Europe/Paris"
locale="fr_FR.UTF-8"
lang="fr_FR.UTF8"
keymap="fr-pc"
additional_packages="https://raw.githubusercontent.com/antv0/archsetup/master/packages.txt"
use_reflector=false
root_password=""
users=()
passwords=()
dotfiles=("https://github.com/antv0/dotfiles")
grub=true
efi=true
mbr=""
message() {
echo -e "\033[36m[installarch.sh]\033[0m $1"
}
error(){
echo -e "\033[31m$1\033[0m"; exit
}
pkg() { # print the list of packages in $1 section
curl -s -f $additional_packages | sed 's/#.*$//g;/^$/d' $file | awk -v field=$1 'BEGIN {ok=0;s=""} /.*:$/ {ok = 0} { re="^" field ":$"; if ($0 ~ re) { ok=1 } else { if(ok) { print $0} } }' | tr '\n' ' ' | tr -s ' '
}
ping -c 1 archlinux.org >/dev/null 2>&1 || error "check your internet connexion."
mountpoint -q /mnt || error "Nothing is mounted on /mnt."
#update the mirrors with reflector in installation environment
if [ "$use_reflector" = true ]; then
message "Installing reflector in installation environment..."
pacman -Sy --noconfirm --needed reflector >/dev/null 2>&1
reflector -c $country --score 5 --save /etc/pacman.d/mirrorlist >/dev/null 2>&1
fi
# if [ -z users ]; then
# message "No user will be created. Continue ? [y/N]"
# read yn
# case $yn in
# [Yy]* ) break;;
# [Nn]*|'' ) exit;;
# * ) echo "Please answer.";;
# esac
# fi
# check if usernames are valid
for user in "${users[@]}"
do
echo "$user" | grep "^[a-z_][a-z0-9_-]*$" >/dev/null 2>&1 || error "invalid username : \"$user\""
done
[ $efi != true ] && [ -z $mbr ] && error "Specify the install device for grub in 'mbr=\"\"'"
message "running pacstrap..."
pacstrap /mnt base linux linux-firmware
message "Generating fstab..."
genfstab -U /mnt >> /mnt/etc/fstab
message "Setting up timezone, language, keymap, hostname..."
arch-chroot /mnt ln -sf /usr/share/zoneinfo/$timezone /etc/localtime
arch-chroot /mnt hwclock --systohc >/dev/null 2>&1
sed -i "s/#$locale/$locale/g" /mnt/etc/locale.gen
arch-chroot /mnt locale-gen >/dev/null 2>&1
echo "LANG=$lang" > /mnt/etc/locale.conf
echo "KEYMAP=$keymap" > /mnt/etc/vconsole.conf
echo $hostname > /mnt/etc/hostname
echo "127.0.0.1 localhost
::1 localhost
127.0.1.1 "$hostname".localdomain "$hostname > /mnt/etc/hosts
message "running mkinicpio -P..."
arch-chroot /mnt mkinitcpio -P >/dev/null 2>&1
message "Setting root password."
if [ -z $root_password ]; then arch-chroot /mnt passwd;
else printf "$root_password\n$root_password" | arch-chroot /mnt passwd >/dev/null 2>&1 ; fi
unset root_password
#update the mirrors with reflector
if [ "$use_reflector" = true ]; then
message "Updating mirrors with reflector."
arch-chroot /mnt pacman -Sy --noconfirm --needed reflector >/dev/null 2>&1
arch-chroot /mnt reflector -c $country --score 5 --save /etc/pacman.d/mirrorlist
fi
# add users
message "Adding users : ${users[@]}"
for user in "${users[@]}"
do
arch-chroot /mnt useradd -m -g wheel -s /bin/zsh "$user" >/dev/null 2>&1 || error "Error while adding user"
done
# set passwords
for n in $( eval echo {0..$((${#users[@]}-1))})
do
[ -z ${passwords[n]} ] || printf "${passwords[n]}\n${passwords[n]}" | arch-chroot /mnt passwd >/dev/null 2>&1 ${users[n]}
done
unset passwords
message "Installing doas, curl, base-devel, git..."
pacstrap /mnt opendoas curl base-devel git
echo "permit nopass :wheel as root" > /mnt/etc/doas.conf
sed -i 's/^# %wheel ALL=(ALL) NOPASSWD: ALL$/%wheel ALL=(ALL) NOPASSWD: ALL/' /mnt/etc/sudoers
# Make pacman and yay colorful.
sed -i "s/^#Color/Color/" /mnt/etc/pacman.conf
# Use all cores for compilation.
sed -i "s/-j2/-j$(nproc)/;s/^#MAKEFLAGS/MAKEFLAGS/" /mnt/etc/makepkg.conf
# installing packages
# arch repo:
message "Installing aditional packages..."
pacstrap /mnt $(pkg arch)
#aur
if [ -z $users ]; then
message "you need at least one user to install anything from aur. Skiping yay and aur packages..."
else
# Install yay
message "Installing yay..."
dir=/home/${users[0]}/archinstall/aur/yay
arch-chroot /mnt sudo -u "${users[0]}" git clone https://aur.archlinux.org/yay.git $dir >/dev/null 2>&1 || error "Error while downloading yay."
arch-chroot /mnt sudo -u "${users[0]}" sh -c "cd $dir && makepkg -si --noconfirm" >/dev/null 2>&1 || error "Error while installing yay."
# aur packages
arch-chroot /mnt sudo -u "${users[0]}" yay -S --noconfirm $(pkg aur)
#git
for name in $(pkg git)
do
bn=$(basename "$name" .git)
dir=/home/${users[0]}/archinstall/git/$bn
arch-chroot /mnt sudo -u "${users[0]}" git clone "$name" $dir
arch-chroot /mnt sudo -u "${users[0]}" sh -c "cd $dir && makepkg -si --noconfirm"
done
rm -rf /mnt/home/${users[0]}/archinstall
fi
# Install the dotfiles in the user's home directory
for n in $( eval echo {0..$((${#users[@]}-1))})
do
message "Installing dotfiles..."
dir=/home/${users[n]}/dotfiles
arch-chroot /mnt sudo -u "${users[n]}" git clone --depth 1 ${dotfiles[n]} "$dir"
arch-chroot /mnt sudo -u "${users[n]}" cp -rfT "$dir" /home/${users[n]}
arch-chroot /mnt rm -f "/home/${users[n]}/README.md" "/home/${users[n]}/LICENSE"
done
# Enable the network manager
message "Enabling NetworkManager..."
arch-chroot /mnt systemctl enable NetworkManager.service >/dev/null 2>&1
# Enable the auto time synchronisation.
message "Enabling systemd-timesyncd..."
arch-chroot /mnt systemctl enable systemd-timesyncd.service >/dev/null 2>&1
# Most important commands! Get rid of the beep!
message "Get rid of the beep!"
arch-chroot /mnt rmmod pcspkr
echo "blacklist pcspkr" > /mnt/etc/modprobe.d/nobeep.conf
message "Disable mouse acceleration!"
echo 'Section "InputClass"
Identifier "My Mouse"
MatchIsPointer "yes"
Option "AccelerationProfile" "-1"
Option "AccelerationScheme" "none"
Option "AccelSpeed" "-1"
EndSection' > /mnt/usr/share/X11/xorg.conf.d/50-mouse-acceleration.conf
if [ "$grub" = true ]; then
echo "Downloading grub..."
pacman --noconfirm -S grub efibootmgr >/dev/null 2>&1
if [ "$efi" = true ]; then
grub-install
else
grub_install $mbr
fi
grub-mkconfig -o /boot/grub/grub.cfg
fi
message "Installation completed."
# message "Chroot into new system."
# arch-chroot /mnt
| true |
1f748d8a2c33e63da92f3860c7c7351fdae07cc2 | Shell | lambdasawa/dotfiles | /script/install/starship.sh | UTF-8 | 198 | 2.84375 | 3 | [
"MIT"
] | permissive | #!/bin/bash
if ! which starship >/dev/null 2>&1; then
[ "$(uname)" = Darwin ] && brew install starship
[ "$(uname)" = Linux ] && FORCE=1 sh -c "$(curl -fsSL https://starship.rs/install.sh)"
fi
| true |
ed243f26d49574fdf4ad17ad806f3abc88ed7084 | Shell | abra7134/ansible-roles | /backups_rclone/templates/opt/backups_rclone.inc.sh.j2 | UTF-8 | 10,136 | 3.96875 | 4 | [
"MIT"
] | permissive | #!/bin/bash
{{ ansible_managed | comment }}
ELASTICDUMP_OPTIONS=({{ backups_rclone_elasticdump_options }})
MKSQUASHFS_OPTIONS=({{ backups_rclone_mksquashfs_options }})
MC_OPTIONS=({{ backups_rclone_mc_options }})
MONGODUMP_OPTIONS=({{ backups_rclone_mongodump_options }})
MYSQLDUMP_OPTIONS=({{ backups_rclone_mysqldump_options }})
RCLONE_OPTIONS=({{ backups_rclone_rclone_options }})
TAR_OPTIONS=({{ backups_rclone_tar_options }})
set -o pipefail
_check_commands() {
for required_command in ${*};
do
if ! type -P "${required_command}" >/dev/null
then
echo "ERROR: The required command '${required_command}' is not exist"
echo " Please check your PATH environment variable"
echo " And install a required command through your package manager"
echo
exit 1
fi
done
}
_make_simple_any() {
${@}
}
_make_simple_about() {
local backup_rclone_remote="${1}"
rclone \
about \
"${backup_rclone_remote}" \
"${RCLONE_OPTIONS[@]}"
}
_make_simple_cleanup() {
local backup_rclone_remote="${1}"
rclone \
cleanup \
"${backup_rclone_remote}" \
"${RCLONE_OPTIONS[@]}"
}
_make_simple_delete() {
local backup_rclone_remote="${1}"
local backup_remote_age_days="${2}"
rclone \
delete \
"${backup_rclone_remote}" \
"${RCLONE_OPTIONS[@]}" \
--min-age "${backup_remote_age_days}d"
}
_make_simple_delete_tmp() {
local backup_rclone_remote="${1}"
rclone \
delete \
"${backup_rclone_remote}" \
"${RCLONE_OPTIONS[@]}" \
--include '*.rclone_chunk.*..tmp_*'
}
_make_simple_rmdirs() {
local backup_rclone_remote="${1}"
rclone \
rmdirs \
"${backup_rclone_remote}" \
"${RCLONE_OPTIONS[@]}" \
--leave-root
}
_make_simple_copy() {
local backup_rclone_remote="${1}"
local backup_remote_age_days="${2}"
local backup_sync_exclude="${3}"
rclone \
copy \
"." \
"${backup_rclone_remote}" \
"${RCLONE_OPTIONS[@]}" \
--exclude "${backup_sync_exclude}" \
--max-age "${backup_remote_age_days}d"
}
make_simple() {
local operation="${1}"
local description="${2}"
shift 2
echo "--> ${description}:"
SECONDS=0
_make_simple_${operation} "${@}"
if [ ${?} -gt 0 ]
then
echo " FAILED to ${description,,}, skipping."
else
echo " OK, complete (by $((SECONDS/60)) minutes $((SECONDS%60)) seconds)."
fi
}
_make_archive() {
local options="${1}"
local archiver="${2}"
shift 2
local paths="${@}"
if [ "${archiver}" == "tar.bz2" ]
then
_check_commands bzip2 tar
backup_filename+=".tar.bz2"
echo "--> Make tar.bz2 archive from paths: ${paths}"
if test -n "${options}"
then
echo "--> with additional options: ${options}"
fi
if ! test -s "${backup_filename}"
then
tar \
"${TAR_OPTIONS[@]}" \
${options} \
--bzip2 \
--create \
--file "${backup_filename}" \
${paths}
if [ ${?} -gt 0 ]
then
echo " FAILED to create tar.bz2 archive, skipping."
return 1
fi
else
echo " EXIST, skipping."
return 1
fi
elif [ "${archiver}" == "sfs.gz" ]
then
_check_commands mksquashfs
backup_filename+=".sfs.gz"
echo "--> Make sfs.gz archive from paths: ${paths}"
if ! test -s "${backup_filename}"
then
mksquashfs \
${paths} \
${backup_filename} \
"${MKSQUASHFS_OPTIONS[@]}" \
-comp gzip
if [ ${?} -gt 0 ]
then
echo " FAILED to create sfs.gz archive, skipping."
return 1
fi
else
echo " EXIST, skipping."
return 1
fi
else
echo " FAILED: The specified archiver type = '${archiver}' is not supported, skipping."
return 1
fi
}
_make_elasticdump() {
local index_name="${1}"
local dump_type="${2}"
backup_filename+="-${index_name}.${dump_type}.json.bz2"
_check_commands elasticdump bzip2
echo "--> Dump Elasticsearch index '${index_name}' (${dump_type}):"
if ! test -s "${backup_filename}"
then
elasticdump \
"${ELASTICDUMP_OPTIONS[@]}" \
--input="http://127.0.0.1:9200/${index_name}" \
--output=$ \
--type="${dump_type}" \
| bzip2 \
--best \
> "${backup_filename}"
if [ ${?} -gt 0 ]
then
echo " FAILED to getting of dump, skipping."
rm \
--force \
"${backup_filename}"
return 1
fi
else
echo " EXIST, skipping."
return 1
fi
}
_make_influxdbackup() {
local host="${1}"
local database_name="${2}"
local archiver="${3}"
local since="${4}"
backup_filename+="-${database_name}"
_check_commands influxd mktemp
echo "--> Dump Influx database '${database_name}':"
if ! test -s "${backup_filename}"
then
TMP_DIR=$(mktemp --directory)
if [ ${?} -gt 0 ]
then
echo " FAILED to create a temporary directory, skipping."
return 1
fi
influxd \
backup \
-host "${host}" \
-database "${database_name}" \
${since:+-since "${since}"} \
"${TMP_DIR}"
if [ ${?} -gt 0 ]
then
echo " FAILED to getting of dump, skipping."
rm \
--force \
--recursive \
"${TMP_DIR}" \
"${backup_filename}"
return 1
fi
_make_archive "" "${archiver}" "${TMP_DIR}"
local make_archive_exitcode="${?}"
rm \
--force \
--recursive \
"${TMP_DIR}"
if [ ${?} -gt 0 ]
then
echo " FAILED to remove a temporary directory, skipping."
return 1
fi
return "${make_archive_exitcode}"
else
echo " EXIST, skipping."
return 1
fi
}
_make_minio() {
local source="${1}"
local bucket="${2}"
local archiver="${3}"
backup_filename+="-${bucket}"
_check_commands mc mktemp
echo "--> Dump Minio bucket '${bucket}':"
if ! test -s "${backup_filename}"
then
TMP_DIR=$(mktemp --directory)
if [ ${?} -gt 0 ]
then
echo " FAILED to create a temporary directory, skipping."
return 1
fi
mc \
cp \
"${MC_OPTIONS[@]}" \
--recursive \
"${source}/${bucket}" \
"${TMP_DIR}"
if [ ${?} -gt 0 ]
then
echo " FAILED to getting of copy, skipping."
rm \
--force \
--recursive \
"${TMP_DIR}" \
"${backup_filename}"
return 1
fi
_make_archive "" "${archiver}" "${TMP_DIR}"
local make_archive_exitcode="${?}"
rm \
--force \
--recursive \
"${TMP_DIR}"
if [ ${?} -gt 0 ]
then
echo " FAILED to remove a temporary directory, skipping."
return 1
fi
return "${make_archive_exitcode}"
else
echo " EXIST, skipping."
return 1
fi
}
_make_mongodump() {
local host="${1}"
local database_name="${2}"
local collection_name="${3}"
local query_filter="${4}"
backup_filename+="-${database_name}-${collection_name:-all}.mongodump.gz"
_check_commands mongodump
echo "--> Dump Mongo database '${database_name}' with '${collection_name:-all}' collection(s):"
if test -n "${query_filter}"
then
echo "--> with query filter: ${query_filter}"
fi
if ! test -s "${backup_filename}"
then
mongodump \
"${MONGODUMP_OPTIONS[@]}" \
--archive="${backup_filename}" \
--db="${database_name}" \
${collection_name:+--collection="${collection_name}"} \
--gzip \
--host="${host}" \
--query="${query_filter}" \
--readPreference="nearest"
if [ ${?} -gt 0 ]
then
echo " FAILED to getting of dump, skipping."
rm \
--force \
"${backup_filename}"
return 1
fi
else
echo " EXIST, skipping."
return 1
fi
}
_make_mysqldump() {
local database_name="${1}"
backup_filename+="-${database_name}.sql.bz2"
local max_statement_time
_check_commands bzip2 mysql mysqldump sed
echo "--> Dump MySQL database '${database_name}':"
if ! test -s "${backup_filename}"
then
max_statement_time=$(
mysql \
--batch \
--execute="show global variables;" \
--skip-column-names \
| sed \
--silent \
"/max_statement_time/s///gp"
)
if [ ${?} -gt 0 ]
then
echo " FAILED to get current values of 'max_statement_time' variable, skipping."
return 1
fi
mysql \
--execute="set global max_statement_time=0;"
if [ ${?} -gt 0 ]
then
echo " FAILED to set 'max_statement_time' variable to 0, skipping."
return 1
fi
mysqldump \
"${MYSQLDUMP_OPTIONS[@]}" \
"${database_name}" \
| bzip2 \
--best \
> "${backup_filename}"
if [ ${?} -gt 0 ]
then
echo " FAILED to getting of dump, skipping."
rm \
--force \
"${backup_filename}"
return 1
fi
mysql \
--execute="set global max_statement_time=${max_statement_time};"
if [ ${?} -gt 0 ]
then
echo " FAILED to revert value of 'max_statement_time' variable to ${max_statement_time}, skipping."
return 1
fi
else
echo " EXIST, skipping."
return 1
fi
}
make_operation() {
local operation="${1}"
shift
local backup_filename="${DEFAULT_FILENAMES_PREFIX}-${backup_name}"
local backup_filename_size
# If $backup_filename contains a directory, then create it
if [ "${backup_filename%/*}" != "${backup_filename}" ]
then
mkdir \
--parents \
"${backup_filename%/*}"
fi
SECONDS=0
_make_${operation} "${@}"
if [ ${?} -lt 1 ]
then
backup_filename_size=$(
stat \
--printf="%s" \
"${backup_filename}"
)
if [ ${?} -gt 0 ]
then
echo " FAILED to get size of dump, skipping."
return
fi
md5sum \
"${backup_filename}" \
> "${backup_filename}.md5"
if [ ${?} -gt 0 ]
then
echo " FAILED to md5sum calculate, skipping."
return
fi
echo " OK, complete ($((backup_filename_size/1024)) kbytes by $((SECONDS/60)) minutes $((SECONDS%60)) seconds)."
fi
}
_check_commands rclone stat md5sum
| true |
de5498b1fa25160094187a3b02e523f04e29ca44 | Shell | crashcoredump/freebsd-desktop | /backupzfs.sh | UTF-8 | 12,349 | 3.46875 | 3 | [
"CC0-1.0"
] | permissive | #!/bin/bash
# https://wiki.freebsd.org/bhyve/UEFI
export MK_PATH="/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/sbin:/usr/local/bin"
export PATH="$MK_PATH"
export MK_SCRIPT="$0"
export MK_OPTS="$@"
# YES to enable debug
export PDEBUG=""
tolower(){
local msg="$@"
echo "${msg^^}"
}
tolower(){
local msg="$@"
echo "${msg,,}"
}
item_uniq(){
local all="$@"
local res="`doitem_uniq $all`"
echo "$res"
}
doitem_uniq(){
local all="$@"
local aaa=''
for aaa in $all
do
echo "$aaa"
done | sort | uniq
}
necho(){
local msg="$@"
if [ -z "$msg" ]
then
1>&2 echo " -"
else
1>&2 echo " - $msg"
fi
}
pecho(){
local msg="$@"
if [ -z "$msg" ]
then
1>&2 echo " -"
else
1>&2 echo " - `date` $msg"
fi
}
iecho(){
pecho "INFO: $@"
}
wecho(){
pecho "WARNING: $@"
}
eecho(){
pecho "ERROR: $@"
}
decho(){
test "$PDEBUG" != "1" || 1>&2 pecho "DEBUG: $@"
}
efecho(){
local msg="$@"
local fn=${FUNCNAME[1]}
eecho "$fn: $msg"
}
pfecho(){
local msg="$@"
local fn=${FUNCNAME[1]}
pecho "$fn: $msg"
}
genmac(){
local msg="$@"
if [ -z "$msg" ]
then
echo -n 02-60-2F; dd bs=1 count=3 if=/dev/random 2>/dev/null |hexdump -v -e '/1 "-%02X"'
else
echo -n 02-60-2F; echo "$msg" | md5 | dd bs=1 count=3 2>/dev/null |hexdump -v -e '/1 "-%02X"'
fi
}
pathprune(){
local line="$1"
test -z "$line"&&return 0
local pline=""
while [ "$line" != "$pline" ]
do
pline="$line"
line=`echo "$line" | sed -e 's#//#/#g'`
done
echo "$line"
}
sc(){
local msg="$@"
${SRCSSH}${msg}
return $?
}
dc(){
local msg="$@"
${DSTSSH}${msg}
return $?
}
ss(){
sc zfs $@
return $?
}
ds(){
dc zfs $@
return $?
}
ss(){
sc zfs $@
return $?
}
usage(){
pecho "$MK_SCRIPT [-x] [-t tag] <[host@]src dataset> <[host@]dst dataset>"
exit 1
}
# 30 days
export EXPIRETS=`expr 30 \* 24 \* 60 \* 60`
export xtrace=""
export CLEAN=""
export debug=""
export snapshot="0"
export progress="0"
export nodedup="0"
export utag=""
export SRCHOST=""
export SRCDS=""
export SRCTR=""
export SRCSSH=""
export DSTHOST=""
export DSTDS=""
export DSTTR=""
export DSTSSH=""
needval=''
for aaa in $@
do
if [ "$needval" = "-t" ]
then
echo "$aaa" | grep -q '^-' && eecho "invalid switch: $aaa" && usage
utag="$aaa"
needval=''
continue
fi
if [ "$aaa" = "-t" ]
then
needval="$aaa"
continue
fi
if [ "$aaa" = "-T" ]
then
snapshot="1"
continue
fi
if [ "$aaa" = "-v" ]
then
progress="1"
continue
fi
if [ "$aaa" = "-dup" ]
then
nodedup="1"
continue
fi
if [ "$aaa" = "-x" ]
then
set -x
xtrace="-x"
continue
fi
if [ "$aaa" = "-D" ]
then
debug="YES"
continue
fi
if [ "$aaa" = "-C" ]
then
CLEAN="YES"
continue
fi
echo "$aaa" | grep -q '^-' && eecho "unknow switch: $aaa" && continue
echo "$aaa" | grep -q '@'
if [ $? -eq 0 ]
then
onehost=`echo $aaa|awk -F'@' '{print $1}'`
oneds=`echo $aaa|awk -F'@' '{print $2}'`
onetr="@"
onessh="ssh $onehost "
test -z "$onehost" -o -z "$oneds" && eecho "invalid dataset parameter: $aaa" && usage
else
onehost=""
oneds="$aaa"
onetr=""
onessh=""
fi
if [ -z "$SRCDS" ]
then
SRCDS=$oneds
SRCHOST=$onehost
SRCTR=$onetr
SRCSSH=$onessh
continue
fi
if [ -z "$DSTDS" ]
then
DSTDS=$oneds
DSTHOST=$onehost
DSTTR=$onetr
DSTSSH=$onessh
continue
fi
eecho "Too many parameters: $aaa"
done
if [ -z "$SRCDS" -o -z "$DSTDS" ]
then
eecho "too few parameters: $@"
usage
fi
if [ `id -u` -ne 0 ]
then
pecho ""
pecho "sudo ..."
pecho ""
sudo $MK_SCRIPT $MK_OPTS
exit $?
fi
export MK_TAG
if [ -z "$MK_TAG" ]
then
MK_TAG=`date +%Y%m%d%H%M%S`
test -n "$utag" && MK_TAG="${MK_TAG}-$utag"
fi
SRCDS=`pathprune $SRCDS`
DSTDS=`pathprune $DSTDS`
SRCINFO=${SRCHOST}${SRCTR}${SRCDS}
DSTINFO=${DSTHOST}${DSTTR}${DSTDS}
pecho "zfs sync tag $MK_TAG from $SRCINFO => $DSTINFO ..."
pecho "source information ..."
srcimp=0
SRCPOOL=`echo $SRCDS|awk -F'/' '{print $1}'`
ss list $SRCPOOL >/dev/null 2>&1
if [ $? -ne 0 ]
then
pecho "src pool $SRCPOOL no exist, try to import ..."
sc zpool export -f $SRCPOOL >/dev/null 2>&1
sc zpool import -N -f $SRCPOOL || exit 1
srcimp=1
fi
ss list $DSTPOOL >/dev/null 2>&1
if [ $? -ne 0 ]
then
pecho "import $SRCPOOL failed."
exit 1
else
pecho "src pool $SRCPOOL ready."
fi
ss list $SRCDS >/dev/null 2>&1
if [ $? -ne 0 ]
then
eecho "$SRCDS no exist"
exit 1
fi
cleanshapshot(){
local site="$1"
local dstds="$2"
local scmd=ss
if [ "$site" = "remote" ]
then
scmd=ds
fi
local limits=`date +%s`
let limits=${limits}-${EXPIRETS}
limits=`date -j -f %s ${limits} +%Y%m%d%H%M%S`
pecho "cleaning snapshots old then ${EXPIRETS}($limits) for $site $dstds ..."
$scmd list -H -o name,creation -s creation $dstds | head -n 1 > /tmp/zfs.${site}.${MK_TAG}.clean.list || exit 1
local oname=""
local ots=""
local sts=""
read oname ots < /tmp/zfs.${site}.${MK_TAG}.clean.list
let sts=${ots:0:14}+1-1 2>/dev/null
if [ $? -ne 0 ]
then
pecho "invalid source zfs list output for creation time(need patched zfs): $ots"
exit 1
fi
$scmd list -t snapshot -H -o name,creation -s creation -r $dstds > /tmp/zfs.${site}.${MK_TAG}.clean.list || exit 1
while read oname ots;
do
sts=${ots:0:14}
if [ $sts -ge $limits ]
then
continue
fi
pecho "clean $sts - $oname"
$scmd destroy -f $oname || exit 1
done < /tmp/zfs.${site}.${MK_TAG}.clean.list
pecho "clean $site $dstds done."
return 0
}
if [ "$CLEAN" = "YES" ]
then
cleanshapshot source $SRCDS || exit 1
fi
if [ "$snapshot" = "1" ]
then
ss list -t snapshot ${SRCDS}@${MK_TAG} >/dev/null 2>&1
if [ $? -ne 0 ]
then
ss snapshot -r ${SRCDS}@${MK_TAG} || exit 1
pecho "src snapshot ${SRCDS}@${MK_TAG} created."
else
pecho "src snapshot ${SRCDS}@${MK_TAG} already existed."
fi
fi
cat /dev/null > /tmp/zfs.src.${MK_TAG}.sn.list || exit 1
for oneds in `ss list -r -H -o name $SRCDS`
do
ss list -t snapshot -H -o name -S creation -r $oneds | uniq >> /tmp/zfs.src.${MK_TAG}.sn.list || exit 1
done
srcdslist=''
# NOTE: does not sync base SRCDS
dslen=${#SRCDS}
parents=""
prebase=''
for onesn in `cat /tmp/zfs.src.${MK_TAG}.sn.list`
do
test -n "$prebase" && parents="$parents $prebase" && prebase=""
suffix=${onesn:${dslen}};
dsname="`echo $suffix | awk -F'@' '{print $1}'`"
dsmark="`echo $dsname | tr '/' '_'`"
echo "$suffix" >> /tmp/zfs.src.${MK_TAG}.suffix-$dsmark-ds.list
srcdslist="$srcdslist $dsname"
prebase=`dirname $onesn`
test "$prebase" = "." && prebase=''
done
srcdslist=`item_uniq $srcdslist`
parents=`item_uniq $parents`
# test -n "$parents" && pecho "parents: $parents"
pecho "dest information ..."
dstimp=0
DSTPOOL=`echo $DSTDS|awk -F'/' '{print $1}'`
ds list $DSTPOOL >/dev/null 2>&1
if [ $? -ne 0 ]
then
pecho "pool $DSTPOOL no exist, try to import ..."
dc zpool export -f $DSTPOOL >/dev/null 2>&1
dc zpool import -N -f $DSTPOOL || exit 1
dstimp=1
fi
ds list $DSTPOOL >/dev/null 2>&1
if [ $? -ne 0 ]
then
pecho "import $DSTPOOL failed."
exit 1
else
pecho "dst $DSTPOOL ready."
fi
dstcreate(){
local dstds="$1"
local dsname=""
ds list $dstds >/dev/null 2>&1
if [ $? -eq 0 ]
then
return 0
fi
local item=''
for item in `echo $dstds | tr '/' ' '`
do
if [ -z "$dsname" ]
then
dsname="$item"
else
dsname="$dsname/$item"
fi
ds list $dsname >/dev/null 2>&1
if [ $? -eq 0 ]
then
continue
else
pecho "$dsname no exist, create it ..."
ds create $dsname
fi
ds list $dsname >/dev/null 2>&1
if [ $? -ne 0 ]
then
eecho "create $dsname failed"
exit 1
else
pecho "$dsname created."
fi
done
}
dstcreate $DSTDS
if [ "$CLEAN" = "YES" ]
then
cleanshapshot dest $DSTDS || exit 1
fi
cat /dev/null > /tmp/zfs.dst.${MK_TAG}.sn.list || exit 1
for oneds in `ds list -r -H -o name $DSTDS`
do
ds list -t snapshot -H -o name -S creation -r $oneds | uniq >> /tmp/zfs.dst.${MK_TAG}.sn.list || exit 1
done
dslen=${#DSTDS}
for onesn in `cat /tmp/zfs.dst.${MK_TAG}.sn.list`
do
suffix=${onesn:${dslen}};
dsname="`echo $suffix | awk -F'@' '{print $1}'`"
dsmark="`echo $dsname | tr '/' '_'`"
echo "$suffix" >> /tmp/zfs.dst.${MK_TAG}.suffix-$dsmark-ds.list
done
pecho "matching and sync ..."
dup="-D "
if [ $nodedup -eq 1 ]
then
dup=""
fi
prg=""
if [ $progress -eq 1 ]
then
prg="-v "
fi
srcdslist="# $srcdslist"
#pecho "dataset: $srcdslist"
for dsname in $srcdslist
do
dsmark="`echo $dsname | tr '/' '_'`"
test "$dsname" = "#" && dsmark="" && dsname=""
matchfx=""
firstfx=""
srcds=${SRCDS}${dsname}
nosend=0
for onep in $parents
do
if [ "$srcds" = "$onep" ]
then
wecho ""
wecho "parent dataset skipped: $srcds"
wecho ""
nosend=1
break
fi
done
test "$nosend" -ne 0 && continue
pecho "sync $srcds ..."
touch "/tmp/zfs.dst.${MK_TAG}.suffix-$dsmark-ds.list" || exit 1
touch "/tmp/zfs.src.${MK_TAG}.suffix-$dsmark-ds.list" || exit 1
while read srcfx
do
test -z "$firstfx" && firstfx=$srcfx && test "$debug" = "YES" && pecho "first snapshot: $firstfx"
while read dstfx
do
if [ "$srcfx" = "$dstfx" ]
then
matchfx=$dstfx
# break, find the newest match
test "$debug" = "YES" && pecho "MATCH, last matchfx: $matchfx"
break
else
test "$debug" = "YES" && pecho "mismatch, src $srcfx, dst $dstfx, last matchfx: $matchfx"
fi
done < /tmp/zfs.dst.${MK_TAG}.suffix-$dsmark-ds.list
#
# break, find the newest match
test -n "$matchfx" && break
done < /tmp/zfs.src.${MK_TAG}.suffix-$dsmark-ds.list
dstcreate ${DSTDS}$dsname
if [ -z "$firstfx" ]
then
pecho "src snapshot not exist, creating ..."
firstfx="${dsname}@${MK_TAG}"
matchfx=""
srcsnapshot="${SRCDS}${firstfx}"
ss list -t snapshot ${srcsnapshot} >/dev/null 2>&1
if [ $? -ne 0 ]
then
ss snapshot ${srcsnapshot} || exit 1
ss list -t snapshot ${srcsnapshot} >/dev/null 2>&1
if [ $? -ne 0 ]
then
pecho "src snapshot ${srcsnapshot} failed."
exit 1
else
pecho "src snapshot ${srcsnapshot} created."
fi
else
pecho "src snapshot exist ? should not happen"
exit 1
fi
fi
if [ -z "$matchfx" ]
then
ds list ${DSTDS}$dsname >/dev/null 2>&1
if [ $? -eq 0 ]
then
pecho "destroy dest befor sync: ${DSTDS}$dsname ..."
ds destroy -rf ${DSTDS}$dsname || exit 1
fi
wecho ""
wecho "snapshots out of sync, full sync ${SRCDS}${firstfx} to ${DSTDS}$dsname in $DSTINFO ..."
wecho ""
ss send $dup$prg ${SRCDS}${firstfx} | ds recv -F ${DSTDS}$dsname
if [ $? -ne 0 ]
then
eecho "full sync ${SRCDS}${firstfx} to ${DSTDS}$dsname in $DSTINFO failed."
exit 1
fi
else
if [ "$firstfx" = "$matchfx" ]
then
pecho "destination $DSTINFO ${DSTDS}$dsname already synced with source: $SRCINFO ${SRCDS}${firstfx}"
pecho ""
continue
fi
pecho "sync $SRCINFO($matchfx - $firstfx) to $DSTINFO ..."
ss send $dup$prg -I $SRCDS${matchfx} $SRCDS${firstfx} | ds recv -F ${DSTDS}$dsname
if [ $? -ne 0 ]
then
eecho "sync $SRCINFO($matchfx - $firstfx) to $DSTINFO failed."
pecho "re-try full sync ..."
ds list ${DSTDS}$dsname >/dev/null 2>&1
if [ $? -eq 0 ]
then
pecho "destroy snapshot in dest ${DSTDS}$dsname ..."
ds destroy -rf ${DSTDS}$dsname || exit 1
fi
ss send $dup$prg ${SRCDS}${firstfx} | ds recv -F ${DSTDS}$dsname
if [ $? -ne 0 ]
then
eecho "re-try full sync ${SRCDS}${firstfx} to ${DSTDS}$dsname in $DSTINFO failed."
exit 1
fi
pecho "full sync(re-try) done."
fi
fi
done
if [ $srcimp -ne 0 ]
then
if [ -n "$ZBAK_DELAY_EXPORT_FILE" ]
then
echo "${SRCHOST}${SRCTR}$SRCPOOL" >> $ZBAK_DELAY_EXPORT_FILE
else
sc zpool export -f $SRCPOOL >/dev/null 2>&1
if [ $? -ne 0 ]
then
pecho "src pool $SRCPOOL export failed."
else
pecho "src pool $SRCPOOL exported."
fi
fi
fi
if [ $dstimp -ne 0 ]
then
if [ -n "$ZBAK_DELAY_EXPORT_FILE" ]
then
echo "${DSTHOST}${DSTTR}$DSTPOOL" >> $ZBAK_DELAY_EXPORT_FILE
else
dc zpool export -f $DSTPOOL >/dev/null 2>&1
if [ $? -ne 0 ]
then
pecho "dest pool $DSTPOOL export failed."
else
pecho "dest pool $DSTPOOL exported."
fi
fi
fi
pecho ""
pecho "all done."
pecho ""
exit 0
#
| true |
b4876461a4bd6b147a169b588abb2398dcaeca1a | Shell | pvkc/Online-Grocery-Store-SQLite | /OnlineGroceryStore/stop_gunicorn.sh | UTF-8 | 88 | 2.65625 | 3 | [] | no_license | #!/bin/bash
PID=/tmp/gunicorn.pid
cat $PID | xargs kill
if [ -f $PID ]
then
rm $PID
fi
| true |
d0868cefae9c0b53c79276a71c2ef577b14b7578 | Shell | iynaix/dotfiles-old | /bin/bin/git-reword.sh | UTF-8 | 760 | 3.828125 | 4 | [] | no_license | #! /bin/bash
if [ -z "$1" ];
then
echo "No SHA provided. Usage: \"git reword <SHA>\"";
exit 1;
fi;
if [ $(git rev-parse $1) == $(git rev-parse HEAD) ];
then
echo "$1 is the current commit on this branch. Use \"git commit --amend\" to reword the current commit.";
exit 1;
fi;
git merge-base --is-ancestor $1 HEAD;
ANCESTRY=$?;
if [ $ANCESTRY -eq 1 ];
then
echo "SHA is not an ancestor of HEAD.";
exit 1;
elif [ $ANCESTRY -eq 0 ];
then
git stash;
START=$(git rev-parse --abbrev-ref HEAD);
git branch savepoint;
git reset --hard $1;
git commit --amend;
git rebase -p --onto $START $1 savepoint;
git checkout $START;
git merge savepoint;
git branch -d savepoint;
git stash pop;
else
exit 2;
fi
| true |
cd50f2c60178a469fc521e75d2a6524cfc43adf2 | Shell | hanslovsky/gnuplot | /janelia/thickness/plot_animation.sh | UTF-8 | 2,882 | 3 | 3 | [] | no_license | #!/bin/bash
MIN="${MIN:-0}"
MAX="${MAX:-1000}"
STEP="${STEP:-1}"
DIGITS="${#MAX}"
N_ZERO_PADDING="${N_ZERO_PADDING:-$DIGITS}"
N_WHITESPACE_PADDING="${N_WHITESPACE_PADDING:-$DIGITS}"
SOURCE_DIR="${SOURCE_DIR:-fit_coordinates}"
TARGET_DIR="${TARGET_DIR:-plot_animation}"
SOURCE_FORMAT="$SOURCE_DIR/fitCoordinates_%d.csv"
SEPARATOR="${SEPARATOR:-,}"
TITLE_FORMAT="Iteration: %$N_WHITESPACE_PADDING""d"
SIZE_X="${SIZE_X:-480}"
SIZE_Y="${SIZE_Y:-$SIZE_X}"
TERM_TYPE="${TERM_TYPE:-pngcairo}"
TARGET_DIR="$TARGET_DIR""_$TERM_TYPE""_$SIZE_X""x$SIZE_Y"
if [[ "$TERM_TYPE" = "pngcairo" ]]; then
SUFFIX="png"
elif [[ "$TERM_TYPE" = "pdf" ]]; then
SUFFIX="pdf"
else
SUFFIX="unknown"
fi
TARGET_FORMAT="$TARGET_DIR/%0$N_ZERO_PADDING""d.$SUFFIX"
mkdir -p "$TARGET_DIR"
# can this help with mixed italic/normal text?
# http://stackoverflow.com/questions/17809917/gnuplot-text-color-palette
for index in $(seq $MIN $STEP $MAX); do
SOURCE_FILE=$(printf "$SOURCE_FORMAT" "$index" )
TARGET_FILE=$(printf "$TARGET_FORMAT" "$index" )
PLOT_TITLE="$(printf "$TITLE_FORMAT" $index)"
echo "reset;
# set key top left;
unset key
set datafile separator '$SEPARATOR';
# coordinate system
set style line 11 lc rgb '#101010' lt 1
set border 3 back linestyle 11;
set xtics axis;
set ytics axis;
set tics scale 0.5;
# arrows
# set arrow from graph 1,0 to graph 1.05,0 size screen 0.015,20 filled linestyle 11;
# set arrow from graph 0,1 to graph 0,1.05 size screen 0.015,20 filled linestyle 11;
#grid
set style line 12 lc rgb'#808080' lt 0 lw 1;
set grid front ls 12;
# set term wxt
# set term pngcairo size 640,480 enhanced font 'monofur,12';
set term $TERM_TYPE size $SIZE_X,$SIZE_Y enhanced font 'URW Palladio L';
STRDIST = 0.025
CORRECTED_LENGTH=0.165
set title '$PLOT_TITLE';
set output '$TARGET_FILE';
set xlabel 'z-index';
set ylabel 'corrected z-index';
# set label 1 'z' at screen 0.5,0.5 font 'URW Palladio L-Italic';
# set label 2 '-' at screen 0.5+1*STRDIST,0.5;
# set label 3 'index' at screen 0.5+2*STRDIST,0.5;
# set label 4 'corrected' at screen 0.3,0.5 rotate by 90;
# set label 5 'z' at screen 0.3,0.5+CORRECTED_LENGTH rotate by 90 font 'URW Palladio L-Italic';
# set label 6 '-' at screen 0.3,0.5+CORRECTED_LENGTH+1*STRDIST rotate by 90;
# set label 7 'index' at screen 0.3,0.5+CORRECTED_LENGTH+2*STRDIST rotate by 90;
load '/groups/saalfeld/home/hanslovskyp/git/gnuplot/colors/sequential/hhmi.plt'
fraction(x) = ( x - $MIN ) * 1.0 / ($MAX - $MIN)
set xrange [0:]
set yrange [0:]
unset colorbox
# set object 1 rectangle from 750,750 to 950,950 back fs solid 0.15 noborder fc palette frac fraction($index)
plot '$SOURCE_FILE' using 1:2 title 'coordinate mapping' pointtype 7 pointsize 1.0 linecolor palette frac fraction($index) , \
'$SOURCE_FILE' using 1:1 with lines title 'one-to-one mapping' linetype 0 linecolor rgb 'black';
" | gnuplot
done
| true |
10a5c7c670fb340985c6c995d79a6e9a5af2c0a6 | Shell | ansonken/jd-1 | /docker/default_task.sh | UTF-8 | 10,953 | 3.59375 | 4 | [
"MIT"
] | permissive | #!/bin/sh
set -e
# 放在这个初始化python3环境,目的减小镜像体积,一些不需要使用bot交互的用户可以不用拉体积比较大的镜像
# 在这个任务里面还有初始化还有目的就是为了方便bot更新了新功能的话只需要重启容器就完成更新
function initPythonEnv() {
echo "开始安装运行jd_bot需要的python环境及依赖..."
apk add --update python3-dev py3-pip py3-cryptography py3-numpy py-pillow
echo "开始安装jd_bot依赖..."
#测试
#cd /jd_docker/docker/bot
#合并
cd /scripts/docker/bot
pip3 install --upgrade pip
pip3 install -r requirements.txt
python3 setup.py install
}
#启动tg bot交互前置条件成立,开始安装配置环境
if [ "$1" == "True" ]; then
initPythonEnv
if [ -z "$DISABLE_SPNODE" ]; then
echo "增加命令组合spnode ,使用该命令spnode jd_xxxx.js 执行js脚本会读取cookies.conf里面的jd cokie账号来执行脚本"
(
cat <<EOF
#!/bin/sh
set -e
first=\$1
cmd=\$*
echo \${cmd/\$1/}
if [ \$1 == "conc" ]; then
for job in \$(cat \$COOKIES_LIST | grep -v "#" | paste -s -d ' '); do
{ export JD_COOKIE=\$job && node \${cmd/\$1/}
}&
done
elif [ -n "\$(echo \$first | sed -n "/^[0-9]\+\$/p")" ]; then
echo "\$(echo \$first | sed -n "/^[0-9]\+\$/p")"
{ export JD_COOKIE=\$(sed -n "\${first}p" \$COOKIES_LIST) && node \${cmd/\$1/}
}&
elif [ -n "\$(cat \$COOKIES_LIST | grep "pt_pin=\$first")" ];then
echo "\$(cat \$COOKIES_LIST | grep "pt_pin=\$first")"
{ export JD_COOKIE=\$(cat \$COOKIES_LIST | grep "pt_pin=\$first") && node \${cmd/\$1/}
}&
else
{ export JD_COOKIE=\$(cat \$COOKIES_LIST | grep -v "#" | paste -s -d '&') && node \$*
}&
fi
EOF
) >/usr/local/bin/spnode
chmod +x /usr/local/bin/spnode
fi
echo "spnode需要使用的到,cookie写入文件,该文件同时也为jd_bot扫码获自动取cookies服务"
if [ -z "$JD_COOKIE" ]; then
if [ ! -f "$COOKIES_LIST" ]; then
echo "" >"$COOKIES_LIST"
echo "未配置JD_COOKIE环境变量,$COOKIES_LIST文件已生成,请将cookies写入$COOKIES_LIST文件,格式每个Cookie一行"
fi
else
if [ -f "$COOKIES_LIST" ]; then
echo "cookies.conf文件已经存在跳过,如果需要更新cookie请修改$COOKIES_LIST文件内容"
else
echo "环境变量 cookies写入$COOKIES_LIST文件,如果需要更新cookie请修改cookies.conf文件内容"
echo $JD_COOKIE | sed "s/[ &]/\\n/g" | sed "/^$/d" >$COOKIES_LIST
fi
fi
CODE_GEN_CONF=/scripts/logs/code_gen_conf.list
echo "生成互助消息需要使用的到的 logs/code_gen_conf.list 文件,后续需要自己根据说明维护更新删除..."
if [ ! -f "$CODE_GEN_CONF" ]; then
(
cat <<EOF
#格式为
#互助类型-机器人ID-提交代码(根据bot作者配置得来)-活动脚本日志文件名-活动代码(根据bot作者配置得来)-查找互助码需要用到的定位字符串
#长期活动示例
#long-@TuringLabbot-jd_sgmh.log-sgmh-暂无
#临时活动示例
#temp-@TuringLabbot-jd_sgmh.log-sgmh-暂无
#每天变化活动示例
#daily-@TuringLabbot-jd_818.log-818-暂无
#种豆得豆
long-@TuringLabbot-/submit_activity_codes-jd_plantBean.log-bean-种豆得豆好友互助码】
#京东农场
long-@TuringLabbot-/submit_activity_codes-jd_fruit.log-farm-东东农场好友互助码】
#京东萌宠
long-@TuringLabbot-/submit_activity_codes-jd_pet.log-pet-东东萌宠好友互助码】
#东东工厂
long-@TuringLabbot-/submit_activity_codes-jd_jdfactory.log-ddfactory-东东工厂好友互助码】
#京喜工厂
long-@TuringLabbot-/submit_activity_codes-jd_dreamFactory.log-jxfactory-京喜工厂好友互助码】
#临时活动
temp-@TuringLabbot-/submit_activity_codes-jd_sgmh.log-sgmh-您的好友助力码为:
#临时活动
temp-@TuringLabbot-/submit_activity_codes-jd_cfd.log-jxcfd-主】你的互助码:
temp-@TuringLabbot-/submit_activity_codes-jd_global.log-jdglobal-好友助力码为
#分红狗活动
long-@LvanLamCommitCodeBot-/jdcrazyjoy-jd_crazy_joy.log-@N-crazyJoy任务好友互助码】
#签到领现金
long-@LvanLamCommitCodeBot-/jdcash-jd_cash.log-@N-您的助力码为
#京东赚赚
long-@LvanLamCommitCodeBot-/jdzz-jd_jdzz.log-@N-京东赚赚好友互助码】
EOF
) >$CODE_GEN_CONF
else
echo "logs/code_gen_conf.list 文件已经存在跳过初始化操作"
fi
echo "容器jd_bot交互所需环境已配置安装已完成..."
curl -sX POST "https://api.telegram.org/bot$TG_BOT_TOKEN/sendMessage" -d "chat_id=$TG_USER_ID&text=恭喜🎉你获得feature容器jd_bot交互所需环境已配置安装已完成,并启用。请发送 /help 查看使用帮助。如需禁用请在docker-compose.yml配置 DISABLE_BOT_COMMAND=True" >>/dev/null
fi
#echo "暂停更新配置,不要尝试删掉这个文件,你的容器可能会起不来"
#echo '' >/scripts/logs/pull.lock
echo "定义定时任务合并处理用到的文件路径..."
defaultListFile="/scripts/docker/$DEFAULT_LIST_FILE"
echo "默认文件定时任务文件路径为 ${defaultListFile}"
mergedListFile="/scripts/docker/merged_list_file.sh"
echo "合并后定时任务文件路径为 ${mergedListFile}"
echo "第1步将默认定时任务列表添加到并后定时任务文件..."
cat $defaultListFile >$mergedListFile
echo "第2步判断是否存在自定义任务任务列表并追加..."
if [ $CUSTOM_LIST_FILE ]; then
echo "您配置了自定义任务文件:$CUSTOM_LIST_FILE,自定义任务类型为:$CUSTOM_LIST_MERGE_TYPE..."
# 无论远程还是本地挂载, 均复制到 $customListFile
customListFile="/scripts/docker/custom_list_file.sh"
echo "自定义定时任务文件临时工作路径为 ${customListFile}"
if expr "$CUSTOM_LIST_FILE" : 'http.*' &>/dev/null; then
echo "自定义任务文件为远程脚本,开始下载自定义远程任务。"
wget -O $customListFile $CUSTOM_LIST_FILE
echo "下载完成..."
elif [ -f /scripts/docker/$CUSTOM_LIST_FILE ]; then
echo "自定义任务文件为本地挂载。"
cp /scripts/docker/$CUSTOM_LIST_FILE $customListFile
fi
if [ -f "$customListFile" ]; then
if [ $CUSTOM_LIST_MERGE_TYPE == "append" ]; then
echo "合并默认定时任务文件:$DEFAULT_LIST_FILE 和 自定义定时任务文件:$CUSTOM_LIST_FILE"
echo -e "" >>$mergedListFile
cat $customListFile >>$mergedListFile
elif [ $CUSTOM_LIST_MERGE_TYPE == "overwrite" ]; then
echo "配置了自定义任务文件:$CUSTOM_LIST_FILE,自定义任务类型为:$CUSTOM_LIST_MERGE_TYPE..."
cat $customListFile >$mergedListFile
else
echo "配置配置了错误的自定义定时任务类型:$CUSTOM_LIST_MERGE_TYPE,自定义任务类型为只能为append或者overwrite..."
fi
else
echo "配置的自定义任务文件:$CUSTOM_LIST_FILE未找到,使用默认配置$DEFAULT_LIST_FILE..."
fi
else
echo "当前只使用了默认定时任务文件 $DEFAULT_LIST_FILE ..."
fi
echo "第3步判断是否配置了随机延迟参数..."
if [ $RANDOM_DELAY_MAX ]; then
if [ $RANDOM_DELAY_MAX -ge 1 ]; then
echo "已设置随机延迟为 $RANDOM_DELAY_MAX , 设置延迟任务中..."
sed -i "/\(jd_bean_sign.js\|jd_blueCoin.js\|jd_joy_reward.js\|jd_joy_steal.js\|jd_joy_feedPets.js\|jd_car_exchange.js\)/!s/node/sleep \$((RANDOM % \$RANDOM_DELAY_MAX)); node/g" $mergedListFile
fi
else
echo "未配置随机延迟对应的环境变量,故不设置延迟任务..."
fi
echo "第4步判断是否配置自定义shell执行脚本..."
if [ 0"$CUSTOM_SHELL_FILE" = "0" ]; then
echo "未配置自定shell脚本文件,跳过执行。"
else
if expr "$CUSTOM_SHELL_FILE" : 'http.*' &>/dev/null; then
echo "自定义shell脚本为远程脚本,开始下载自定义远程脚本。"
wget -O /scripts/docker/shell_script_mod.sh $CUSTOM_SHELL_FILE
echo "下载完成,开始执行..."
echo "#远程自定义shell脚本追加定时任务" >>$mergedListFile
sh -x /scripts/docker/shell_script_mod.sh
echo "自定义远程shell脚本下载并执行结束。"
else
if [ ! -f $CUSTOM_SHELL_FILE ]; then
echo "自定义shell脚本为docker挂载脚本文件,但是指定挂载文件不存在,跳过执行。"
else
echo "docker挂载的自定shell脚本,开始执行..."
echo "#docker挂载自定义shell脚本追加定时任务" >>$mergedListFile
sh -x $CUSTOM_SHELL_FILE
echo "docker挂载的自定shell脚本,执行结束。"
fi
fi
fi
echo "第5步删除不运行的脚本任务..."
if [ $DO_NOT_RUN_SCRIPTS ]; then
echo "您配置了不运行的脚本:$DO_NOT_RUN_SCRIPTS"
arr=${DO_NOT_RUN_SCRIPTS//&/ }
for item in $arr; do
sed -ie '/'"${item}"'/d' ${mergedListFile}
done
fi
echo "第6步设定下次运行docker_entrypoint.sh时间..."
echo "删除原有docker_entrypoint.sh任务"
sed -ie '/'docker_entrypoint.sh'/d' ${mergedListFile}
# 12:00前生成12:00后的cron,12:00后生成第二天12:00前的cron,一天只更新两次代码
if [ $(date +%-H) -lt 12 ]; then
random_h=$(($RANDOM % 12 + 12))
else
random_h=$(($RANDOM % 12))
fi
random_m=$(($RANDOM % 60))
echo "设定 docker_entrypoint.sh cron为:"
echo -e "\n# 必须要的默认定时任务请勿删除" >>$mergedListFile
echo -e "${random_m} ${random_h} * * * docker_entrypoint.sh >> /scripts/logs/default_task.log 2>&1" | tee -a $mergedListFile
echo "第7步 自动助力"
if [ -n "$ENABLE_AUTO_HELP" ]; then
#直接判断变量,如果未配置,会导致sh抛出一个错误,所以加了上面一层
if [ "$ENABLE_AUTO_HELP" = "true" ]; then
echo "开启自动助力"
#在所有脚本执行前,先执行助力码导出
sed -i 's/node/ . \/scripts\/docker\/auto_help.sh export > \/scripts\/logs\/auto_help_export.log \&\& node /g' ${mergedListFile}
else
echo "未开启自动助力"
fi
fi
echo "第8步增加 |ts 任务日志输出时间戳..."
sed -i "/\( ts\| |ts\|| ts\)/!s/>>/\|ts >>/g" $mergedListFile
echo "第9步执行proc_file.sh脚本任务..."
sh /scripts/docker/proc_file.sh
echo "第10步加载最新的定时任务文件..."
if [[ -f /usr/bin/jd_bot && -z "$DISABLE_SPNODE" ]]; then
echo "bot交互与spnode 前置条件成立,替换任务列表的node指令为spnode"
sed -i "s/ node / spnode /g" $mergedListFile
#conc每个cookies独立并行执行脚本示例,cookies数量多使用该功能可能导致内存爆掉,默认不开启 有需求,请在自定义shell里面实现
#sed -i "/\(jd_xtg.js\|jd_car_exchange.js\)/s/spnode/spnode conc/g" $mergedListFile
fi
crontab $mergedListFile
echo "第11步将仓库的docker_entrypoint.sh脚本更新至系统/usr/local/bin/docker_entrypoint.sh内..."
cat /scripts/docker/docker_entrypoint.sh >/usr/local/bin/docker_entrypoint.sh
echo "发送通知"
export NOTIFY_CONTENT=""
cd /scripts/docker
node notify_docker_user.js
| true |
eff36c210b80a21aa97171511e2a262dee9babf4 | Shell | sebhoerl/lead-lyon | /parcels/run_all.sh | UTF-8 | 1,714 | 3.140625 | 3 | [] | no_license | ## Check for input files
INPUT_FILES="
input/activities.csv
input/homes.gpkg
input/persons.csv
input/confluence_areas.gpkg
input/rhone-alpes-latest.osm.pbf
input/lyon_network.xml.gz
"
for f in $INPUT_FILES; do
if [ ! -f "$f" ]; then
echo "Input file missing: $f"
exit 1
fi
done
## Check osmosis
if [ ! -x "$(which osmosis)" ]; then
echo "Osmosis is not executable"
exit 1
fi
## Check Maven
if [ ! -x "$(which mvn)" ]; then
echo "Maven does not exist (mvn is not executable)"
exit 1
fi
## Initialize output directory
mkdir -p output
## Generate poly file from GPKG for cutting the OSM data
jupyter nbconvert "Make Polygon.ipynb" --execute --output-dir output
## Extract OSM data for the area
osmosis --read-pbf input/rhone-alpes-latest.osm.pbf --tf accept-ways highway=* --bounding-polygon file=output/confluence_areas.poly --used-node --write-pbf output/confluence.osm.pbf
## Generate parcels based on synthetic travel demand and ADM survey
jupyter nbconvert "Generate Parcels.ipynb" --execute --ExecutePreprocessor.timeout=-1 --output-dir output
## Generate the VRP problem to solve
jupyter nbconvert "Generate VRP.ipynb" --execute --ExecutePreprocessor.timeout=-1 --output-dir output
## Build the Java part
sh -c "cd lead-java && mvn clean package"
## Solve the VRP problem using JSprit
java -cp lead-java/target/lead-0.0.1-SNAPSHOT.jar lead.RunOptimization \
output/vrp_deliveries.csv output/vrp_distances.csv 6068627214 output/vrp_solution.csv
## Simulate vehicles in MATSim to create visualisations
java -cp lead-java/target/lead-0.0.1-SNAPSHOT.jar lead.RunMovements \
input/lyon_network.xml.gz output/vrp_nodes.csv output/vrp_solution.csv output/vrp_movements
| true |
5dc8f58e702eff7146e1249c5dd2280dabe61450 | Shell | sifive/RiscvSpecFormal | /simpleTests.sh | UTF-8 | 1,160 | 3.34375 | 3 | [
"Apache-2.0"
] | permissive | #! /usr/bin/env bash
#
# Example Usage: ./simpleTests.sh '/nettmp/netapp1a/vmurali/riscv-tests/isa/' 'ui' 'add'
source common.sh
path=$1
[[ $path == '' ]] && error "Error: PATH argument is missing."
pre=$2
[[ $pre == '' ]] && error "Error: PRE argument is missing (example 'ui')."
test=$3
[[ $test == '' ]] && error "Error: TEST argument is missing (example 'add')."
rm -rf simpleTestsResult
mkdir -p simpleTestsResult
function runTest {
local xlen=$1
local type=$2
local sim=$3
local fileName="rv$xlen$pre-$type-$test"
local resPath="simpleTestsResult/$fileName$sim.out"
echo "run Test xlen=$xlen type=$type sim=$sim fileName=$fileName resPath=$resPath"
rm -f $resPath
./runElf.sh --xlen $xlen $sim --path $path/$fileName &> $resPath
}
function runTests {
local xlen=$1
local sim=$2
echo "run Tests xlen=$xlen sim=$sim"
runTest $xlen 'p' $sim & runTest $xlen 'v' $sim
}
./doGenerate.sh --coq-sim --parallel &&(runTests 64 --coq-sim & runTests 32 --coq-sim) #&& ./doGenerate.sh --xlen 64 --parallel && ./doGenerate.sh --xlen 32 --parallel &&(runTests 64 --verilog-sim & runTests 32 --verilog-sim)
cat simpleTestsResult/*
| true |
62a947c92a94de073d1c436eb86145a0d255e913 | Shell | lapesd/PSkel-MPPA-Async | /tests/mppaPlotExercise/getSpeedup.sh | UTF-8 | 364 | 3.203125 | 3 | [
"BSD-3-Clause"
] | permissive | #!/bin/bash
DATADIR=$1
APPNAME=$2
CLUSTERBASE=$(head -n 1 $DATADIR/tmpTime${APPNAME}.txt)
&> $DATADIR/tmpTimeSpeedup${APPNAME}.txt
while read p; do
if (( $(bc <<< "$CLUSTERBASE != $p") )); then
speedup=$(echo "scale=4;$CLUSTERBASE/$p" | bc)
echo " $speedup" >> $DATADIR/tmpTimeSpeedup${APPNAME}.txt
fi
done < $DATADIR/tmpTime${APPNAME}.txt
| true |
ad356e79065c61a5b1d605d1ea7ce9c56369947e | Shell | umn-enhs/linux-image-scanning-automation | /image_deskew.sh | UTF-8 | 1,293 | 3.796875 | 4 | [] | no_license | #!/bin/bash
scan="$1"
temptif=`mktemp /tmp/temp-XXXXXXXX`
sub_folder=deskew
rm -f $temptif
temptif="$temptif.tif"
if [ -e "${scan}" ]; then
scan_folder=`dirname ${scan}`
pushd ${scan_folder} > /dev/null
scan_folder=`pwd`
popd > /dev/null
if [ ! -d ${scan_folder}/${sub_folder} ]; then
mkdir ${scan_folder}/${sub_folder}
fi
skew_log=${scan_folder}/${sub_folder}/log.txt
scanfile=`basename "${scan}"`
deskewed=0
if [ -f "${skew_log}" ]; then
grep -q "${scanfile}" "${skew_log}" && deskewed=1
fi
# Only de-skew files once.
if [ "${deskewed}" == "0" ]; then
# Find the skew
skew=`nice tiff_findskew $scan`
echo "${scanfile} ${skew}" >> "${skew_log}"
# Now we need to reverse the skew angle
skew=`echo "$skew * -1" | bc`
# Use BC to calculate is skew is outside +/- 0.25 degrees
offenough=`echo "$skew > 0.25 || $skew < -0.25" | bc`
if [ "$offenough" == "1" ] ; then
echo "Deskewing $scan by $skew degrees."
nice tiff2rgba "${scan}" ${temptif}
nice mogrify -adaptive-blur 3x3 -rotate ${skew} -type Bilevel ${temptif}
mv ${temptif} "${scan}"
fi
fi
else
echo "file $scan not found."
fi
| true |
11c439956310c61f9722c08f11f695e3ed6013aa | Shell | AlphaBoom9/AlphaBoom | /contrib/initscripts/bsd/novacoin | UTF-8 | 1,839 | 3.859375 | 4 | [
"MIT"
] | permissive | #!/bin/sh
# PROVIDE: AlphaBoom
# REQUIRE: LOGIN
# KEYWORD: shutdown
#
# Add the following lines to /etc/rc.conf.local or /etc/rc.conf
# to enable this service:
#
# AlphaBoom_enable (bool): Set to NO by default. Set this variable to YES if you want to enable AlphaBoom service.
# AlphaBoom_config (path): Set to /usr/local/etc/AlphaBoom.conf by default.
# AlphaBoom_user: The user account AlphaBoom daemon runs as. It uses 'root' user by default.
# AlphaBoom_group: The group account AlphaBoom daemon runs as. It uses 'wheel' group by default.
# AlphaBoom_datadir (str): Default to "/var/db/AlphaBoom". Base data directory.
. /etc/rc.subr
name=AlphaBoom
rcvar=AlphaBoom_enable
: ${AlphaBoom_enable:=NO}
: ${AlphaBoom_config=/usr/local/etc/AlphaBoom.conf}
: ${AlphaBoom_datadir=/var/db/AlphaBoom}
: ${AlphaBoom_user="root"}
: ${AlphaBoom_group="wheel"}
required_files=${AlphaBoom_config}
command=/usr/local/bin/AlphaBoomd
AlphaBoom_chdir=${AlphaBoom_datadir}
pidfile="${AlphaBoom_datadir}/AlphaBoomd.pid"
stop_cmd=AlphaBoom_stop
command_args="-conf=${AlphaBoom_config} -datadir=${AlphaBoom_datadir} -daemon -pid=${pidfile}"
start_precmd="${name}_prestart"
AlphaBoom_create_datadir()
{
echo "Creating data directory"
eval mkdir -p ${AlphaBoom_datadir}
[ $? -eq 0 ] && chown -R ${AlphaBoom_user}:${AlphaBoom_group} ${AlphaBoom_datadir}
}
AlphaBoom_prestart()
{
if [ ! -d "${AlphaBoom_datadir}/." ]; then
AlphaBoom_create_datadir || return 1
fi
}
AlphaBoom_requirepidfile()
{
if [ ! "0`check_pidfile ${pidfile} ${command}`" -gt 1 ]; then
echo "${name} not running? (check $pidfile)."
exit 1
fi
}
AlphaBoom_stop()
{
AlphaBoom_requirepidfile
echo "Stopping ${name}."
eval ${command} -conf=${AlphaBoom_config} -datadir=${AlphaBoom_datadir} stop
wait_for_pids ${rc_pid}
}
load_rc_config $name
run_rc_command "$1"
| true |
8cc7294caba7c862549a808c54928a40d9f3f5c3 | Shell | emcorrales/bash101 | /section2/exer6.bash | UTF-8 | 550 | 4.46875 | 4 | [] | no_license | #!/bin/bash
# Exercise 6:
#
# Write a shell script that prompts the user for a name of a file or directory
# and reports if it is a regular file, a directory, or other type of file. Also
# perform an ls command against the file or directory with the long listing
# option.
read -p "Please enter the path of a file or directory: " FILE
if [ ! -e $FILE ]; then
echo "File $FILE does not exist!"
else
if [ -f $FILE ]; then
TYPE="regular"
elif [ -d $FILE ]; then
TYPE="directory"
fi
echo "$FILE is a $TYPE file."
ls -l $FILE
fi
| true |
60104f9acd5274e2a77c8ea79750227042482720 | Shell | beevelop/TSHITS | /meta/checks.sh | UTF-8 | 1,311 | 3.984375 | 4 | [
"Apache-2.0"
] | permissive | check_udp() {
HOST=$1
PORT=$2
nc -uzv $HOST $PORT
if [[ $? == 0 ]]; then
echo "✅ [UDP-] ${HOST}:${PORT} is reachable"
return 0
else
echo "📛 [UDP-] ${HOST}:${PORT} is not reachable"
return 1
fi
}
check_tcp() {
HOST=$1
PORT=$2
nc -zv $HOST $PORT
if [[ $? == 0 ]]; then
echo "✅ [TCP-] ${HOST}:${PORT} is reachable"
return 0
else
echo "📛 [TCP-] ${HOST}:${PORT} is not reachable"
return 1
fi
}
check_file() {
FILE=$1
if [[ -e $FILE ]]; then
echo "✅ [FILE] ${FILE} does exist"
return 0
else
echo "📛 [FILE] ${FILE} does not exist"
return 1
fi
}
check_traefik() {
HOST=$1
EXPECTED=$2
REQ=`curl --header "Host: $HOST" -ksSI 127.0.0.1 | head -n1 | tr -d $'\r'` # \r is ^M and pollutes the string $
if [[ "$REQ" == "$EXPECTED" ]]; then
echo "✅ [TRFK] Got ${REQ} (${HOST})"
return 0
else
echo "📛 [TRFK] Got ${REQ} (expected ${EXPECTED}) (${HOST})"
return 1
fi
}
check_curl() {
URL=$1
EXPECTED=$2
REQ=`curl -m 3 -ksSI $1 | head -n1 | tr -d $'\r'` # \r is ^M and pollutes the string value
if [[ "$REQ" == "$EXPECTED" ]]; then
echo "✅ [CURL] Got ${REQ} (${URL})"
return 0
else
echo "📛 [CURL] Got ${REQ} (expected ${EXPECTED}) (${URL})"
return 1
fi
}
| true |
9f47077f0e54d7de23c020b3349c947e47298bee | Shell | twistedmove/jsalt2019-diadet | /egs/jsalt2019-diadet/v2/local/make_chime5_spkdiar_jsalt19_enhanced_dev_eval.sh | UTF-8 | 1,804 | 3.390625 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
# Copyright
# 2019 Johns Hopkins University (Author: Jesus Villalba)
# Apache 2.0.
# Make lists for JSALT19 worshop speaker diarization
# for CHiMe5 dataset
set -e
if [ $# != 3 ]; then
echo "$0 <wav-path> <list-path> <output_path>"
exit 1
fi
train_mics="PXX U01 U02 U03 U04 U05 U06"
test_mics="U01 U06"
wav_path=$1
list_path=$2
output_path=$3
data_name=jsalt19_spkdiar_chime5_enhanced
for mic in $test_mics
do
# Make dev data
echo "making $data_name dev mic-$mic"
python local/make_jsalt19_spkdiar.py \
--list-path $list_path/dev \
--wav-path $wav_path/CHiME5/dev/SE_1000h_model_m3_s3 \
--output-path $output_path \
--data-name $data_name \
--partition dev \
--rttm-suffix ${mic}_dev \
--mic $mic
cp $list_path/train/all.$mic.uem $output_path/${data_name}_dev_$mic/diarization.uem
#make spk2utt so kaldi don't complain
utils/utt2spk_to_spk2utt.pl $output_path/${data_name}_dev_$mic/utt2spk \
> $output_path/${data_name}_dev_$mic/spk2utt
utils/fix_data_dir.sh $output_path/${data_name}_dev_$mic
done
for mic in $test_mics
do
# Make eval data
echo "making $data_name eval mic-$mic"
python local/make_jsalt19_spkdiar.py \
--list-path $list_path/eval \
--wav-path $wav_path/CHiME5/test/SE_1000h_model_m3_s3 \
--output-path $output_path \
--data-name $data_name \
--partition eval \
--rttm-suffix ${mic}_test \
--mic $mic
cp $list_path/eval/all.$mic.uem $output_path/${data_name}_eval_$mic/diarization.uem
#make spk2utt so kaldi don't complain
utils/utt2spk_to_spk2utt.pl $output_path/${data_name}_eval_$mic/utt2spk \
> $output_path/${data_name}_eval_$mic/spk2utt
utils/fix_data_dir.sh $output_path/${data_name}_eval_$mic
done
| true |
d3f856bd7b0cc94a29def8cca29c0ba1951f24e6 | Shell | metrilyx/metrilyx-dataserver | /etc/init.d/metrilyx-dataserver | UTF-8 | 2,302 | 4.125 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/sh
#
# description: Metrilyx Data Server. This provides data via websockets.
#
[ -f "/etc/rc.d/init.d/functions" ] && . /etc/rc.d/init.d/functions
[ -f "/etc/sysconfig/metrilyx" ] && . /etc/sysconfig/metrilyx
PGREP="/usr/bin/pgrep -f"
NAME="metrilyx-dataserver"
METRILYX_HOME="/opt/metrilyx"
BIN=${METRILYX_HOME}/bin/${NAME}.py
LOGDIR="$METRILYX_HOME/logs"
LOGLEVEL="WARNING"
#
# Number of servers to spawn. Increase based on cpu's
SERVER_COUNT=1
#
# Used if running websocket from behind a proxy
#
#EXT_PORT_OPT="-e 80"
EXT_PORT_OPT=""
DEFAULT_OPTS="-l${LOGLEVEL} --log-dir ${LOGDIR} --hostname $(hostname) ${EXT_PORT_OPT}"
RETVAL=0
#
# Detect number of cpu's/cores on the system
#
set_server_count() {
[ -f /proc/cpuinfo ] || {
svrcnt = $(cat /proc/cpuinfo | grep processor | wc -l)
if [ $svrcnt -gt 1 ]; then
SERVER_COUNT=`expr $svrcnt \- 1`
fi
}
}
check_logdir() {
if [ ! -d "$LOGDIR" ]; then
mkdir -p $LOGDIR;
fi
}
#
# Starts a single dataserver
#
start_dataserver() {
PORT=$1
OPTS="${DEFAULT_OPTS} --port ${PORT}";
PIDS=`${PGREP} "${BIN} ${OPTS}" | xargs`;
if [ "$PIDS" = "" ]; then
echo -n "${NAME} starting (port ${PORT})... "
METRILYX_HOME=${METRILYX_HOME} PYTHONPATH=${PYTHONPATH} ${BIN} ${OPTS} &
sleep 2;
PIDS=`${PGREP} "${BIN} ${OPTS}" | xargs`;
if [ "$PIDS" = "" ]; then
echo "[failed]";
RETVAL=2;
else
echo "[ok]";
fi
else
echo -n "${NAME} already running... ${PIDS}";
RETVAL=1
fi
}
#
# Start $SERVER_COUNT instances of the dataserver
#
start() {
check_logdir;
set_server_count;
for c in `seq ${SERVER_COUNT}`; do
PORT=`expr 9000 \+ $c`
start_dataserver $PORT;
if [ "$RETVAL" -ne 0 ]; then
echo "Failed to start server on port: $PORT"
exit $RETVAL;
fi
done
}
status() {
PIDS=`${PGREP} "${BIN}" | xargs`;
if [ "$PIDS" = "" ]; then
echo "${NAME} not running";
else
echo "${NAME} running... ${PIDS}"
fi
}
stop() {
echo -n "${NAME} stopping...";
PIDS=`${PGREP} "${BIN}" | sort -r`;
for pid in $PIDS; do
kill $pid;
done
sleep 2;
( ${PGREP} "${BIN}" && echo "[failed]" ) || echo "[ok]";
}
case "$1" in
start)
start;
;;
stop)
stop;
;;
status)
status;
;;
restart)
$0 stop
sleep 3
$0 start
;;
*)
echo "$0 [start|stop|restart|status]"
exit 1;
esac
exit $RETVAL
| true |
a577cecf4a548991d1b38098fea9534f8cf1e270 | Shell | pkhuong/app_jetex | /server/s/build | UTF-8 | 3,939 | 3.359375 | 3 | [
"BSD-2-Clause"
] | permissive | #!/bin/bash
set -e
OUT="libjetex_server.so";
SRC="src shared utility";
VENDOR="";
LIBS="-lpthread -lm -ldl";
CC="${CC:-cc}";
CCACHE="${CCACHE:-$(if which ccache > /dev/null 2> /dev/null; then echo ccache; fi)}";
NCPU="${NCPU:-$(grep -c -E '^processor\s+:' /proc/cpuinfo)}";
SYMBOLS=$(awk '{ if (NR == 1) printf("%s", $1); else printf("|%s", $1)}' < SYMBOLS);
DEFAULT_CFLAGS="-std=gnu99 -O2 -D_GNU_SOURCE -fPIC -ggdb3 -gdwarf-4";
DEFAULT_CFLAGS+=" -fno-omit-frame-pointer -fno-common -fvisibility=hidden";
DEFAULT_CFLAGS+=" -fno-strict-aliasing -fwrapv -fexceptions -fstack-protector-all";
DEFAULT_CFLAGS+=" -msse4.2 -msse4.1 -mpopcnt -maes -mpclmul -mrdrnd -march=core2 -mtune=native";
if [ -z "$RELEASE" ];
then
# base warnings.
CHECK_CFLAGS="-Werror -W -Wall -Wextra -Wuninitialized -Wformat=2 -Wundef";
# bad prototypes are never acceptable.
CHECK_CFLAGS+=" -Wstrict-prototypes -Wold-style-definition -Wmissing-prototypes -Wmissing-declarations";
# local "style" errors: unused variables/params, declarations in the middle of blocks,
# variables that lexically shadow another, using sizeof(void), string constants as
# non-const arrays.
CHECK_CFLAGS+=" -Wunused -Wdeclaration-after-statement -Wshadow -Wpointer-arith -Wwrite-strings";
# switch safety.
CHECK_CFLAGS+=" -Wswitch-enum -Wswitch-default";
# object/frame size limits
CHECK_CFLAGS+=" -Wlarger-than=$((2 ** 24)) -Wframe-larger-than=30000";
# cast errors: unsafe *increase* in alignment, lossy conversion.
CHECK_CFLAGS+=" -Wcast-align -Wconversion";
# potential traps: hidden padding, old-style varargs, VLA.
CHECK_CFLAGS+=" -Wpadded -Wvarargs -Wvla";
if [ "x$LOGBRAID_CHECK" = "xgcc" ] || $($CC -v 2>&1 | grep -q 'gcc.* version');
then
# might as well annotate with what we know.
CHECK_CFLAGS+=" -Wsuggest-attribute=pure -Wsuggest-attribute=const";
CHECK_CFLAGS+=" -Wsuggest-attribute=noreturn -Wsuggest-attribute=format"
# misc bad ideas.
CHECK_CFLAGS+=" -Wtrampolines -Wjump-misses-init -Wnormalized=nfkc";
# let's try to avoid getting burned by '&' VS '&&'.
CHECK_CFLAGS+=" -Wlogical-op";
elif [ "x$LOGBRAID_CHECK" = "xclang" ] || $($CC -v 2>&1 | grep -q 'clang.* version');
then
CHECK_CFLAGS=" -Wformat-pedantic";
fi
fi
CFLAGS="${CFLAGS:-$DEFAULT_CFLAGS}";
if [ -z "$DISABLE_CCACHE" ] && [ ! -z "$CCACHE" ];
then
echo "Enabling ccache ($CCACHE); define DISABLE_CCACHE to override";
else
echo "Disabling ccache. Consider undefining DISABLE_CCACHE and installing ccache.";
CCACHE="";
fi
echo "Cleaning build/object";
mkdir -p build
rm -r build
mkdir -p build/object
echo "Creating directory structure for build/object";
find -L $SRC $VENDOR -type d -exec mkdir -p build/object/{} \;;
if [ ! -z "$VENDOR" ];
then
echo "Building vendored dependencies in build/object";
time find -L $VENDOR -type f -name '*\.c' -print0 | \
sed -e 's/\.c\x00/\x00/g' | \
xargs -0 -n 1 -P $NCPU sh -c "echo \"\$0.c\"; $CCACHE $CC $CFLAGS -O3 -fstrict-aliasing $EXTRA_CFLAGS -c \"\$0.c\" -o \"build/object/\$0.o\" || exit 255";
fi
echo "Building in build/object";
time find -L $SRC -type f -name '*\.c' -print0 | \
sed -e 's/\.c\x00/\x00/g' | \
xargs -0 -n 1 -P $NCPU sh -c "echo \"\$0.c\"; $CCACHE $CC $CHECK_CFLAGS $CFLAGS $EXTRA_CFLAGS -isystem vendor/ -Iinclude/ -I. -c \"\$0.c\" -o \"build/object/\$0.o\" || exit 255";
BUILT=$(find build/object/ -type f -iname '*\.o' -print0 | sed -e 's/\s/\\\0/g' -e 's/\x00/ /g');
COMMAND="$CC $CFLAGS $EXTRA_CFLAGS $LDFLAGS $EXTRA_LDFLAGS $BUILT $LIBS -shared -o output/$OUT";
echo -n "Linking output/$OUT: $COMMAND";
time (sh -c "$COMMAND" || exit $?);
echo "Done building output/$OUT";
EXPORTS=$((nm output/$OUT | grep ' [A-TV-Z] ' | egrep -v "^\s*[0-9a-f]+ [A-Z] ($SYMBOLS)\s*$") || true);
if [ ! -z "$EXPORTS" ];
then
echo;
echo -e "\e[1;31mUnexpected exports:\e[0m";
echo "$EXPORTS";
echo;
fi
| true |
da6ae73afc86ed1ab97511042417cbf3cbd6444e | Shell | jaylyerly/Python-aux | /build_libpng.sh | UTF-8 | 3,410 | 2.96875 | 3 | [
"BSD-3-Clause"
] | permissive | #!/bin/bash
# Required with Xcode 12 beta:
export M4=$(xcrun -f m4)
OSX_SDKROOT=$(xcrun --sdk macosx --show-sdk-path)
IOS_SDKROOT=$(xcrun --sdk iphoneos --show-sdk-path)
SIM_SDKROOT=$(xcrun --sdk iphonesimulator --show-sdk-path)
curl -OL https://download.sourceforge.net/libpng/libpng-1.6.37.tar.gz
tar xzf libpng-1.6.37.tar.gz
rm libpng-1.6.37.tar.gz
export DYLD_ROOT_PATH\=$(xcrun --sdk iphonesimulator --show-sdk-path)
# libpng
pushd libpng-1.6.37
make distclean
./configure CC=clang CXX=clang++ \
CFLAGS="-arch arm64 -miphoneos-version-min=11.0 -isysroot ${IOS_SDKROOT} -fembed-bitcode" \
CPPFLAGS="-arch arm64 -miphoneos-version-min=11.0 -isysroot ${IOS_SDKROOT} -fembed-bitcode" \
CXXFLAGS="-arch arm64 -miphoneos-version-min=11.0 -isysroot ${IOS_SDKROOT} -fembed-bitcode" \
--build=x86_64-apple-darwin --host=armv8-apple-darwin cross_compiling=yes
make -j4 --quiet
# We're going to need them:
mkdir -p ../libheif/lib_iphoneos
mkdir -p ../libheif/include_iphoneos
cp .libs/libpng16.16.dylib ../libheif/lib_iphoneos/libpng.dylib
cp png.h pnglibconf.h pngconf.h ../libheif/include_iphoneos/
# Library is now in: .libs/libpng16.16.dylib. Create framework:
popd
binary=libpng
FRAMEWORK_DIR=build/Release-iphoneos/$binary.framework
rm -rf ${FRAMEWORK_DIR}
mkdir -p ${FRAMEWORK_DIR}
mkdir -p ${FRAMEWORK_DIR}/Headers
cp libpng-1.6.37/png.h ${FRAMEWORK_DIR}/Headers
cp libpng-1.6.37/pnglibconf.h ${FRAMEWORK_DIR}/Headers
cp libpng-1.6.37/pngconf.h ${FRAMEWORK_DIR}/Headers
cp libpng-1.6.37/.libs/libpng16.16.dylib ${FRAMEWORK_DIR}/$binary
cp basic_Info.plist ${FRAMEWORK_DIR}/Info.plist
plutil -replace CFBundleExecutable -string $binary ${FRAMEWORK_DIR}/Info.plist
plutil -replace CFBundleName -string $binary ${FRAMEWORK_DIR}/Info.plist
plutil -replace CFBundleIdentifier -string Nicolas-Holzschuch.$binary ${FRAMEWORK_DIR}/Info.plist
install_name_tool -id @rpath/$binary.framework/$binary ${FRAMEWORK_DIR}/$binary
pushd libpng-1.6.37
make distclean
./configure CC=clang CXX=clang++ \
CFLAGS="-arch x86_64 -mios-simulator-version-min=11.0 -isysroot ${SIM_SDKROOT} -fembed-bitcode" \
CPPFLAGS="-arch x86_64 -mios-simulator-version-min=11.0 -isysroot ${SIM_SDKROOT} -fembed-bitcode" \
CXXFLAGS="-arch x86_64 -mios-simulator-version-min=11.0 -isysroot ${SIM_SDKROOT} -fembed-bitcode" \
--build=x86_64-apple-darwin --host=x86_64-apple-darwin cross_compiling=yes
make -j4 --quiet
# We're going to need them:
mkdir -p ../libheif/lib_iphonesimulator
mkdir -p ../libheif/include_iphonesimulator
cp .libs/libpng16.16.dylib ../libheif/lib_iphonesimulator/libpng.dylib
cp png.h pnglibconf.h pngconf.h ../libheif/include_iphonesimulator/
popd
FRAMEWORK_DIR=build/Release-iphonesimulator/$binary.framework
rm -rf ${FRAMEWORK_DIR}
mkdir -p ${FRAMEWORK_DIR}
mkdir -p ${FRAMEWORK_DIR}/Headers
cp libpng-1.6.37/png.h ${FRAMEWORK_DIR}/Headers
cp libpng-1.6.37/pnglibconf.h ${FRAMEWORK_DIR}/Headers
cp libpng-1.6.37/pngconf.h ${FRAMEWORK_DIR}/Headers
cp libpng-1.6.37/.libs/libpng16.16.dylib ${FRAMEWORK_DIR}/$binary
cp basic_Info_Simulator.plist ${FRAMEWORK_DIR}/Info.plist
plutil -replace CFBundleExecutable -string $binary ${FRAMEWORK_DIR}/Info.plist
plutil -replace CFBundleName -string $binary ${FRAMEWORK_DIR}/Info.plist
plutil -replace CFBundleIdentifier -string Nicolas-Holzschuch.$binary ${FRAMEWORK_DIR}/Info.plist
install_name_tool -id @rpath/$binary.framework/$binary ${FRAMEWORK_DIR}/$binary
| true |
6a26ed8ba184b05049d2fc6d93c1d92434461ffb | Shell | THAO1999/Linux | /ÔN----/Linux_2018_2019/Midterm/bai2.sh | UTF-8 | 269 | 3.4375 | 3 | [] | no_license | #!/bin/bash
nt() {
a=$1
b=`expr $a / 2 `
for ((i=2; i<=b; i++))
do
c=`expr $a % $i`
if [ $c -eq 0 ]
then
return 1
else
return 0
fi
done
}
check() {
var=$@
for va in $var
do
nt $va
if [ $? -eq 0 ]
then
echo "$va"
fi
done
}
echo "Cac so nt la: "
check $@
| true |
0439a311011c266ba9c960de0ea8b219d311fb0b | Shell | colis-anr/colis-language | /tests/test_e.sh | UTF-8 | 176 | 3.125 | 3 | [] | no_license |
if test -e '/bin'; then
echo '/bin exists'
else
echo '/bin does not exist'
fi
if [ -e '/home' ]; then
echo '/home exists'
else
echo '/home does not exist'
fi
| true |
0b67bd0cfea84a78186de3e1e5741596b1b8e96e | Shell | alexjh/buildpacks-ci | /tasks/create-bosh-release/run.sh | UTF-8 | 907 | 3.5625 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash -l
set -o errexit
set -o nounset
set -o pipefail
set -x
version=$(cat version/number)
pushd "$RELEASE_DIR"
if [ -n "${SECRET_ACCESS_KEY:+1}" ]; then
echo "creating private.yml..."
cat > config/private.yml <<EOF
---
blobstore:
s3:
access_key_id: $ACCESS_KEY_ID
secret_access_key: $SECRET_ACCESS_KEY
EOF
fi
rm -f config/blobs.yml
# we actually want globbing here, so:
# shellcheck disable=SC2086
bosh -n add blob ../$BLOB_GLOB "$BLOB_NAME"
bosh -n upload blobs
git add config/blobs.yml
git commit -m "Updating blobs for $RELEASE_NAME $version"
bosh -n create release --final --version "$version" --name "$RELEASE_NAME" --force
git add releases/**/*-"$version".yml releases/**/index.yml
git add .final_builds/**/index.yml .final_builds/**/**/index.yml
git commit -m "Final release for $BLOB_NAME at $version"
popd
rsync -a release/ release-artifacts
| true |
b9e9f3e0a4769cf0f6d397c7e3d5352b60e24a34 | Shell | tezizzm/dotnet-pipelines | /concourse-tasks/generate-version/task.sh | UTF-8 | 1,006 | 3.03125 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
set -o errexit
set -o errtrace
ROOT_FOLDER="$( pwd )"
PIPELINE_RESOURCE=dotnet-pipelines
TASK_SCRIPTS_RESOURCE="${PIPELINE_RESOURCE}/scripts"
CONCOURSE_TASKS_RESOURCE="${PIPELINE_RESOURCE}/concourse-tasks"
SRC_VERSION_RESOURCE=src-version
KEYVAL_RESOURCE=keyval
KEYVALOUTPUT_RESOURCE=keyvalout
#######################################
# Initialize Task
#######################################
source "${ROOT_FOLDER}/${CONCOURSE_TASKS_RESOURCE}/functions/init-task.sh"
#######################################
# Run Task
#######################################
export VERSION_ROOT="${ROOT_FOLDER}/${SRC_VERSION_RESOURCE}"
source "${ROOT_FOLDER}/${TASK_SCRIPTS_RESOURCE}/tasks/generate-version/run.sh"
#add the new version number to keyval store
export PASSED_PIPELINE_VERSION="${NEW_VERSION_NUMBER}"
#######################################
# Finalize task
#######################################
source "${ROOT_FOLDER}/${CONCOURSE_TASKS_RESOURCE}/functions/finish-task.sh" | true |
e9ed3cdc55129a217305dc50acc37c93f1cb6eb4 | Shell | siollb/e-comBox_scriptsMacOS | /install_docker_docker-compose.sh | UTF-8 | 1,243 | 3.390625 | 3 | [] | no_license | #!/bin/sh
# Ce script lance des scripts qui automatise l'installation de Docker et Docker-Compose
# Couleurs
COLTITRE="\033[1;35m" # Rose
COLPARTIE="\033[1;34m" # Bleu
COLTXT="\033[0;37m" # Gris
COLCHOIX="\033[1;33m" # Jaune
COLDEFAUT="\033[0;33m" # Brun-jaune
COLSAISIE="\033[1;32m" # Vert
COLCMD="\033[1;37m" # Blanc
COLERREUR="\033[1;31m" # Rouge
COLINFO="\033[0;36m" # Cyan
clear
echo -e "$COLTITRE"
echo "************************************************************"
echo "* INSTALLATION DE DOCKER ET DOCKER-COMPOSE *"
echo "************************************************************"
# Installation de Docker
# Utilisation du script officiel fourni par Docker
# https://github.com/docker/docker-install pour Docker
echo -e ""
echo -e "$COLPARTIE"
echo -e "Installation de Docker et Docker-Compose"
echo -e ""
echo -e "$COLCMD"
brew cask install docker
sleep 2
#On lance l'application Docker Desktop afin de démarrer le démon
open --background -a Docker
#On attend que la vm docker démmarre
sleep 50
echo -e ""
echo -e "$COLINFO"
echo -e "Docker et Docker-Compose sont installés"
echo -e "Le script va maintenant procéder à l'installation de e-comBox"
echo -e ""
echo -e "$COLCMD"
| true |
db75e735776a9677ff94f96a1807d7da80928aa2 | Shell | hato-lab/kidney-endotoxin-sepsis-timeline-featureplot | /prepare.sh | UTF-8 | 387 | 2.8125 | 3 | [
"BSD-3-Clause"
] | permissive | #!/usr/bin/env bash
set -euo pipefail
# This script downloads the necessary R data object file from GEO.
# Download and gunzip the R data object from GEO.
mkdir -p data
wget -O /dev/stdout ftp://ftp.ncbi.nlm.nih.gov/geo/series/GSE151nnn/GSE151658/suppl/GSE151658%5Fintegrated%2E0h%2E1h%5F4h%5F16h%5F27h%5F36h%5F48h%2Erds%2Egz | gunzip -c > data/integrated.0h.1h_4h_16h_27h_36h_48h.rds
| true |
2fa23bf46abed90c0a259cda2a4dc02020af524c | Shell | jonathanwillett/dotfiles | /helpers/gpg-agent.sh | UTF-8 | 371 | 3.46875 | 3 | [
"MIT"
] | permissive | #!/bin/bash
# Only attempt to setup the gpg-agent if we're not on an SSH connection
if [ -z "${SSH_CONNECTION}" ]; then
# Set GPG TTY
export GPG_TTY=$(tty)
# Start the gpg-agent if not already running
gpg-connect-agent /bye >/dev/null 3>&1
# Set SSH to use gpg-agent
unset SSH_AGENT_PID
export SSH_AUTH_SOCK="/run/user/$(id -u)/gnupg/S.gpg-agent.ssh"
fi
| true |
de1216d05316e94b8eee998c5c586ba61f098fe7 | Shell | Bubblemelon/scripts | /wpa-connect.sh | UTF-8 | 293 | 3.046875 | 3 | [] | no_license | #!/bin/bash
echo "wpa -- cancel systemd stuff"
echo "revert -- have the system as was before"
case $1
"wpa")
sudo systemctl stop NetworkManager
sudo systemctl stop systemd-networkd
"revert)
sudo systemctl start NetworkManager
sudo systemctl enable system-networkd
)
exit
esac
| true |
c5cea41de7ebec5e8b1522fde7f83cf57ee290fa | Shell | colorfulberry/notes | /script/backupSystem.sh | UTF-8 | 168 | 2.8125 | 3 | [] | no_license | #!/bin/bash
backdir="/etc /home /root /var/spool/mail"
basedir=/backup
[! -d "$basedir"] && mkdir $backdir
backfile=$basedir/backup.tar.gz
tar -zcvf $backfile $backdir
| true |
f6c9843b1c2b6776edf5b0abb42292c3dd5119a3 | Shell | ppx17/advent-of-code | /2018/rust/rustc | UTF-8 | 383 | 2.578125 | 3 | [] | no_license | #!/bin/bash
docker run --rm \
--volume=$(realpath .):/opt/advent/rust \
--volume=$(realpath ../input):/opt/advent/input \
--volume=$(realpath ../expected):/opt/advent/expected \
-w /opt/advent/rust \
-ti \
rust:latest \
rustc "$@"
if [ "$?" == "0" ]; then
./$(basename "$@" .rs)
echo ""
fi | true |
ae8b21ecb8fa36f2071a6565df7a9022fc77d541 | Shell | SpenceKonde/ReleaseScripts | /build_megaTinyCore.sh | UTF-8 | 5,592 | 3.65625 | 4 | [] | no_license | #!/bin/bash
##########################################################
## ##
## Shell script for generating a boards manager release ##
## Created by MCUdude ##
## Requires wget, jq and a bash environment ##
## ##
##########################################################
# Change these to match your repo
AUTHOR=SpenceKonde # Github username
REPOSITORY=megaTinyCore # Github repo name
# Get the download URL for the latest release from Github
DOWNLOAD_URL=$(curl -s https://api.github.com/repos/$AUTHOR/$REPOSITORY/releases/latest | grep "tarball_url" | awk -F\" '{print $4}')
# Download file
wget --no-verbose $DOWNLOAD_URL
# Get filename
DOWNLOADED_FILE=$(echo $DOWNLOAD_URL | awk -F/ '{print $8}')
# Add .tar.bz2 extension to downloaded file
mv $DOWNLOADED_FILE ${DOWNLOADED_FILE}.tar.bz2
# Extract downloaded file and place it in a folder (the #"v"} part removes the v in the version number if it is present)
printf "\nExtracting folder ${DOWNLOADED_FILE}.tar.bz2 to $REPOSITORY-${DOWNLOADED_FILE#"v"}\n"
mkdir -p "$REPOSITORY-${DOWNLOADED_FILE#"v"}" && tar -xzf ${DOWNLOADED_FILE}.tar.bz2 -C "$REPOSITORY-${DOWNLOADED_FILE#"v"}" --strip-components=1
printf "Done!\n"
# Move files out of the megaavr folder
mv $REPOSITORY-${DOWNLOADED_FILE#"v"}/megaavr/* $REPOSITORY-${DOWNLOADED_FILE#"v"}
# Delete the extras folder
rm -rf $REPOSITORY-${DOWNLOADED_FILE#"v"}/extras
# Delete downloaded file and empty megaavr folder
rm -rf ${DOWNLOADED_FILE}.tar.bz2 $REPOSITORY-${DOWNLOADED_FILE#"v"}/megaavr
# Comment out the github/manual installation's tools.pymcuprog.cmd...
sed -i 's/^tools.pymcuprog.cmd/#tools.pymcuprog.cmd/' $REPOSITORY-${DOWNLOADED_FILE#"v"}/platform.txt
sed -i 's/^tools.serialupdi.cmd/#tools.serialupdi.cmd/' $REPOSITORY-${DOWNLOADED_FILE#"v"}/platform.txt
#
sed -i 's/^#REMOVE#//' $REPOSITORY-${DOWNLOADED_FILE#"v"}/platform.txt
cp $REPOSITORY-${DOWNLOADED_FILE#"v"}/platform.txt platform.extract
# Compress folder to tar.bz2
printf "\nCompressing folder $REPOSITORY-${DOWNLOADED_FILE#"v"} to $REPOSITORY-${DOWNLOADED_FILE#"v"}.tar.bz2\n"
tar -cjSf $REPOSITORY-${DOWNLOADED_FILE#"v"}.tar.bz2 $REPOSITORY-${DOWNLOADED_FILE#"v"}
printf "Done!\n"
# Get file size on bytes
FILE_SIZE=$(wc -c "$REPOSITORY-${DOWNLOADED_FILE#"v"}.tar.bz2" | awk '{print $1}')
# Get SHA256 hash
SHA256="SHA-256:$(shasum -a 256 "$REPOSITORY-${DOWNLOADED_FILE#"v"}.tar.bz2" | awk '{print $1}')"
# Create Github download URL
URL="https://${AUTHOR}.github.io/${REPOSITORY}/$REPOSITORY-${DOWNLOADED_FILE#"v"}.tar.bz2"
cp "package_drazzy.com_index.json" "package_drazzy.com_index.json.tmp"
# Add new boards release entry
jq -r \
--arg repository $REPOSITORY \
--arg version ${DOWNLOADED_FILE#"v"} \
--arg url $URL \
--arg checksum $SHA256 \
--arg file_size $FILE_SIZE \
--arg file_name $REPOSITORY-${DOWNLOADED_FILE#"v"}.tar.bz2 \
'(.packages[] | select(.name==$repository)).platforms[(.packages[] | select(.name==$repository)).platforms | length] |= . +
{
"name": $repository,
"architecture": "megaavr",
"version": $version,
"category": "Contributed",
"url": $url,
"archiveFileName": $file_name,
"checksum": $checksum,
"size": $file_size,
"boards": [
{
"name": "Full Arduino support for the tinyAVR 0-series, 1-series, and the new 2-series!<br/> 24-pin parts: ATtiny3227/3217/1627/1617/1607/827/817/807/427<br/> 20-pin parts: ATtiny3226/3216/1626/1616/1606/826/816/806/426/416/406<br/> 14-pin parts: ATtiny3224/1624/1614/1604/824/814/804/424/414/404/214/204<br/> 8-pin parts: ATtiny412/402/212/202<br/> Microchip Boards: Curiosity Nano 3217/1627/1607 and Xplained Pro (3217/817), Mini (817) Nano (416). Direct USB uploads may not work on linux, but you can export hex and <br/> upload through the mass storage projection."
},
{
"name": "2.6.5 corrects a number of small, mostly minor issues. Reorders the list of chips now that all sizes of all families are shipping, adds new bootloader entry conditions and a menu to tweak optimization settings, but nothing particularly big."
},
{
"name": "Supported UPDI programmers: SerialUPDI (serial adapter w/diode or resistor), jtag2updi, nEDBG, mEDBG, EDBG, SNAP, Atmel-ICE and PICkit4 - or use one of those to load<br/> the Optiboot serial bootloader (included) for serial programming. Which programing method makes more sense depends on your application and requirements. <br/><br/> The full documentation is not included with board manager installations (it is hard to find and the images bloat the download); we recommend viewing it through github at the link above<br/> or if it must be read withouht an internet connection by downaloding the manual installation package"
}
],
"toolsDependencies": [
{
"packager": "DxCore",
"name": "avr-gcc",
"version": "7.3.0-atmel3.6.1-azduino6"
},
{
"packager": "DxCore",
"name": "avrdude",
"version": "6.3.0-arduino17or18"
},
{
"packager": "arduino",
"name": "arduinoOTA",
"version": "1.3.0"
},
{
"packager": "megaTinyCore",
"version": "3.7.2-post1",
"name": "python3"
}
]
}' "package_drazzy.com_index.json.tmp" > "package_drazzy.com_index.json"
# Remove files that's no longer needed
rm -rf "$REPOSITORY-${DOWNLOADED_FILE#"v"}" "package_drazzy.com_index.json.tmp"
| true |
6c99fc61aa5ddef15d3041cfdbb6ef62ca8e047d | Shell | termux/termux-packages | /packages/pcaudiolib/build.sh | UTF-8 | 967 | 3.140625 | 3 | [
"Apache-2.0"
] | permissive | TERMUX_PKG_HOMEPAGE=https://github.com/espeak-ng/pcaudiolib
TERMUX_PKG_DESCRIPTION="Portable C Audio Library"
TERMUX_PKG_LICENSE="GPL-3.0"
TERMUX_PKG_MAINTAINER="@termux"
TERMUX_PKG_VERSION=1.2
TERMUX_PKG_REVISION=2
TERMUX_PKG_SRCURL=https://github.com/espeak-ng/pcaudiolib/archive/$TERMUX_PKG_VERSION.tar.gz
TERMUX_PKG_SHA256=44b9d509b9eac40a0c61585f756d76a7b555f732e8b8ae4a501c8819c59c6619
TERMUX_PKG_AUTO_UPDATE=true
TERMUX_PKG_DEPENDS="pulseaudio"
TERMUX_PKG_BUILD_IN_SRC=true
termux_step_post_get_source() {
# Do not forget to bump revision of reverse dependencies and rebuild them
# after SOVERSION is changed.
local _SOVERSION=0
local a
for a in CURRENT AGE; do
local _LT_${a}=$(sed -En 's/^'"${a}"'=([0-9]+).*/\1/p' \
Makefile.am)
done
local v=$(( _LT_CURRENT - _LT_AGE ))
if [ ! "${_LT_CURRENT}" ] || [ "${v}" != "${_SOVERSION}" ]; then
termux_error_exit "SOVERSION guard check failed."
fi
}
termux_step_pre_configure() {
./autogen.sh
}
| true |
8f300391ab1884b4c8561c43bbb3ec23727306dd | Shell | Hamdih4/scripts | /common/print.sh | UTF-8 | 547 | 3.640625 | 4 | [] | no_license | #!bin/bash
###############
# Print Error #
###############
function print_error {
msg=$1
tput setaf 1;
echo ${msg}
tput sgr0;
}
#####################
# Print Fatal Error #
#####################
function print_fatal_error {
msg=$1
print_error "$msg"
exit 1;
}
##############
# Print Text #
##############
function print {
msg=$1
tput setaf 3;
echo ${msg}
tput sgr0;
}
#####################
# Print Header Text #
#####################
function print_header {
msg=$1
tput setaf 2;
echo ${msg}
tput sgr0;
}
| true |
5497d058abbfce1752bf83c2dd87b12a504dca53 | Shell | KiLJ4EdeN/Cpp_Tutorial | /src/ComputerVision/OpenCV/install_opencv.sh | UTF-8 | 1,607 | 2.90625 | 3 | [
"MIT"
] | permissive | # install dependencies first
sudo dnf install epel-release git gcc gcc-c++ cmake3 qt5-qtbase-devel \
python3 python3-devel python3-pip cmake python3-devel python3-numpy \
gtk2-devel libpng-devel jasper-devel openexr-devel libwebp-devel \
libjpeg-turbo-devel libtiff-devel tbb-devel libv4l-devel \
eigen3-devel freeglut-devel mesa-libGL mesa-libGL-devel \
boost boost-thread boost-devel gstreamer1-plugins-base libcanberra-gtk2
# create a temp dir
mkdir -p ~/opencv_build && cd ~/opencv_build
# clone opencv repos
git clone https://github.com/opencv/opencv.git
git clone https://github.com/opencv/opencv_contrib.git
# build the library
cd ~/opencv_build/opencv && mkdir build && cd build
cmake3 -D CMAKE_BUILD_TYPE=RELEASE \
-D CMAKE_INSTALL_PREFIX=/usr/local \
-D INSTALL_C_EXAMPLES=ON \
-D INSTALL_PYTHON_EXAMPLES=ON \
-D OPENCV_GENERATE_PKGCONFIG=ON \
-D OPENCV_EXTRA_MODULES_PATH=~/opencv_build/opencv_contrib/modules \
-D BUILD_EXAMPLES=ON ..
# use cores based on the "nproc" command.
make -j8
sudo make install
# create a symbolic link to the library for pkg-config
export lib_dir = lib64
sudo ln -s /usr/local/{lib_dir}/pkgconfig/opencv4.pc /usr/share/pkgconfig/
# find where the file is if faced an error.
# whereis pkgconfig
# sudo ln -s /usr/local/lib/pkgconfig/opencv4.pc /usr/share/pkgconfig/
# sudo ldconfig
# add shared libraries
export LD_LIBRARY_PATH=~/opencv_build/opencv/build/lib/:$LD_LIBRARY_PATH
# also to find packages use: pkg-config --list-all | grep packagename
g++ somecode.cpp -o output `pkg-config --cflags --libs opencv4`
| true |
49e70e41d9f4ee7729760fc42290d14befffc114 | Shell | kinghows/dbdb.io | /restartdb.sh | UTF-8 | 903 | 3.359375 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
if [[ $# -ne 1 ]]; then
printf '\nusage: restartdb.sh <user>\n'
printf '\nuser: username for mysql\n'
exit
fi
printf '\nDropping database..\n'
mysqladmin drop -u $1 -p dbdb_io
printf '\nCreating database..\n'
mysqladmin create -u $1 -p dbdb_io
printf '\nDeleting migrations..\n'
cd systems/migrations
printf '\nDeleting files:\n'
ls | grep -v '__init__'
ls | grep -v '__init__' | xargs rm
cd ../..
printf '\nCreating migrations..\n'
python manage.py makemigrations
python manage.py migrate
printf '\nCreating the initial data from the current_data directory...\n'
cd current_data
python ./parse_system_data.py
printf '\nFinished creating intial data! Please check current_data directory for error output. \n'
cd ..
printf '\nLoading fixtures...\n'
python manage.py loaddata systems/fixtures/*
printf '\nCreating the super user\n'
python manage.py createsuperuser
| true |
57371381ef9b82437940bbcd384b6b7ca5e5fc4a | Shell | barionleg/rut_rac_ker-download-scripts | /rutracker-catalog-magnet.sh | UTF-8 | 3,677 | 3.859375 | 4 | [] | no_license | #!/usr/bin/env bash
export LC_ALL=C
# rutracker-catalog-magnet
# Create list with magnet url's from custom category ID
# Usage : sh rutracker-catalog-magnet.sh <ID_CATEGORY>
#
# Copyright (c) 2018 Denis Guriyanov <denisguriyanov@gmail.com>
# Variables
################################################################################
TR_URL='https://rutracker.org/forum'
TR_CATEGORY="$1"
DIR_DWN="$HOME/Downloads/Torrents"
DIR_TMP='/tmp/rds'
DIR_TMP_CAT="$DIR_TMP/category_$TR_CATEGORY"
SC_UA='Mozilla/5.0 (Macintosh; Intel Mac OS X 10.11; rv:44.0) Gecko/20100101 Firefox/44.0'
# BEGIN
################################################################################
if [ -z $TR_CATEGORY ]; then
echo 'Please, enter category ID.'
echo 'Example: rutracker-catalog-magnet.sh <ID_CATEGORY>'
exit
fi
echo "Let's Go!\n"
# Check and create directories
################################################################################
if [ ! -d $DIR_TMP ]; then
mkdir "$DIR_TMP"
fi
if [ ! -d $DIR_TMP_CAT ]; then
mkdir -p "$DIR_TMP_CAT"
else
# remove old files
rm -rf "$DIR_TMP_CAT"/*
fi
if [ ! -d $DIR_DWN ]; then
mkdir "$DIR_DWN"
fi
# Total pages
################################################################################
echo 'Get total pages in category...'
category_page=$(curl "$TR_URL/viewforum.php?f=$TR_CATEGORY&start=0" \
-A "$SC_UA" \
--show-error \
-L \
-s
)
# find latest pager link
# <a class="pg" href="viewforum.php?f=###&start=###">###</a>
total_pages=$(echo $category_page \
| sed -En 's/.*<a class=\"pg\" href=\".*\">([0-9]*)<\/a> .*/\1/p' \
| head -1
)
echo "...complete!\n"
sleep 1
# Category Page
################################################################################
echo 'Download category pages...'
for page in $(seq 1 $total_pages); do
page_link=$((page * 50 - 50)) # 50 items per page, ex. 0..50..100
category_pages=$(curl "$TR_URL/viewforum.php?f=$TR_CATEGORY&start=$page_link" \
-A "$SC_UA" \
--show-error \
-L \
-s
)
echo "$category_pages" > "$DIR_TMP_CAT/category_page_$page.html"
printf "\rProgress : %d of $total_pages" $page
done
echo "\n...complete!\n"
sleep 1
# Torrent ID
################################################################################
echo "Get torrent IDs..."
id_list="$DIR_TMP_CAT/ids_list.txt"
touch "$id_list"
for page in $(seq 1 $total_pages); do
category_page="$DIR_TMP_CAT/category_page_$page.html"
# find torrent topic link
# <a id="tt-###" href="viewtopic.php?t=###">
ids=$(cat $category_page \
| sed -En 's/.*<a.*href=\"viewtopic\.php\?t=([0-9]*)\".*>.*/\1/p'
)
echo "$ids" >> "$id_list"
done
echo "...complete!\n"
sleep 1
# Magnet URL
################################################################################
echo 'Get magnet URLs...'
total_ids=$(cat $id_list \
| wc -l \
| sed 's/ //g'
)
i=1
magnet_list="$DIR_DWN/$TR_CATEGORY.txt"
if [ -f $magnet_link ]; then
rm -f "$magnet_list"
else
touch "$magnet_list"
fi
for id in $(cat $id_list); do
torrent_page=$(curl "$TR_URL/viewtopic.php?t=$id" \
-A "$SC_UA" \
--show-error \
-L \
-s
)
# find magnet link on page
# <a href="magnet:###">
magnet_link=$(echo $torrent_page \
| sed -En 's/.*<a.*href=\"(magnet:[^"]*)\".*>.*/\1/p'
)
if [ $magnet_link ]; then
echo "$magnet_link" >> "$magnet_list"
fi
printf "\rProgress : %d of $total_ids" $i
i=$((i+1))
done
echo "\n...complete!\n"
# FINISH
################################################################################
total_links=$(cat $magnet_list \
| wc -l \
| sed 's/ //g'
)
echo "Total URLs : $total_links\n"
echo 'Enjoy...'
exit
| true |
2bbbe2745bd187de8c01dadf31ec113151806f12 | Shell | CirculoSiete/adoptopenjdk-wait-for-it | /build.sh | UTF-8 | 227 | 2.578125 | 3 | [] | no_license | #!/usr/bin/env bash
VER=$(grep "version" image.properties|cut -d'=' -f2) && \
docker build -t circulo7/adoptopenjdk-w4i:$VER . && \
git release $VER && \
docker push circulo7/adoptopenjdk-w4i:$VER && \
echo "done"
| true |
9728f79865d8f02c4fcbb2cf4884c64ffe98f43f | Shell | hathitrust/ingest_helpers | /ingest_fixers/update_checksum.sh | UTF-8 | 255 | 2.75 | 3 | [] | no_license | VOLUMES=$@
for id in $VOLUMES; do grep -v meta.yml $id/checksum.md5 > $id/checksum.new; mv $id/checksum.new $id/checksum.md5; md5sum $id/meta.yml >> $id/checksum.md5; done
for id in $VOLUMES; do echo $id; zip $id.zip $id/meta.yml $id/checksum.md5; done
| true |
42c0fd4ce0d421eb518908c390d2324b0bfbaf57 | Shell | yrachid/roman-numerals | /scripts/src/main/bash/run.sh | UTF-8 | 1,040 | 3.703125 | 4 | [] | no_license | #!/bin/bash -eu
readonly SOURCE_LOCATION="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
readonly RESOURCES_LOCATION="${SOURCE_LOCATION}/../resources"
readonly EXECUTION_MODE=${1}
readonly CONTEXT_PATH=${2}
readonly INPUT_OPTION=${3}
readonly ARABIC_TO_ROMAN_INPUT_OPTION=$(cat "${RESOURCES_LOCATION}/inputs/arabic")
readonly ROMAN_TO_ARABIC_INPUT_OPTION=$(cat "${RESOURCES_LOCATION}/inputs/roman")
readonly MISC_INPUT_OPTION=$(cat "${RESOURCES_LOCATION}/inputs/misc")
case $INPUT_OPTION in
arabic-to-roman)
TEST_VALUES=$ARABIC_TO_ROMAN_INPUT_OPTION
;;
roman-to-arabic)
TEST_VALUES=$ROMAN_TO_ARABIC_INPUT_OPTION
;;
misc)
TEST_VALUES=$MISC_INPUT_OPTION
;;
*)
echo "Select a valid input: arabic-to-roman, roman-to-arabic, misc"
;;
esac
case $EXECUTION_MODE in
docker)
docker build -t numeral-converter "${CONTEXT_PATH}"
docker run number-converter "app.jar" ${TEST_VALUES}
;;
bash)
java -jar "${CONTEXT_PATH}" ${TEST_VALUES}
;;
*)
echo "Select a valid execution mode: docker, bash"
;;
esac
| true |
eeb30532cec6ce670318afbc704abda0cbf02107 | Shell | sarahpeiffer/Docker-Provider | /sarah-test-deployment/ServiceGroupRoot/Scripts/pushAgentToAcr.sh | UTF-8 | 1,964 | 3.671875 | 4 | [
"MIT"
] | permissive | #!/bin/bash
set -e
# Note - This script used in the pipeline as inline script
# These are plain pipeline variable which can be modified anyone in the team
# AGENT_RELEASE=ciprod
# AGENT_IMAGE_TAG_SUFFIX=08202021
#Name of the ACR for ciprod & cidev images
ACR_NAME=containerinsightsprod
LINUX_AGENT_IMAGE_FULL_PATH=${ACR_NAME}/public/azuremonitor/containerinsights/${AGENT_RELEASE}:${AGENT_RELEASE}${AGENT_IMAGE_TAG_SUFFIX}
WINDOWS_AGENT_IMAGE_FULL_PATH=${ACR_NAME}/public/azuremonitor/containerinsights/${AGENT_RELEASE}:win-${AGENT_RELEASE}${AGENT_IMAGE_TAG_SUFFIX}
if [ -z ${AGENT_IMAGE_TAR_FILE_NAME+x} ]; then
echo "AGENT_IMAGE_TAR_FILE_NAME is unset, unable to continue"
exit 1;
fi
if [ -z $AGENT_IMAGE_TAG_SUFFIX ]; then
echo "-e error value of AGENT_IMAGE_TAG_SUFFIX variable shouldnt be empty"
exit 1
fi
if [ ${#AGENT_IMAGE_TAG_SUFFIX} -ne 8 ]; then
echo "-e error length of AGENT_IMAGE_TAG_SUFFIX should be 8. Make sure it is in MMDDYYYY format"
exit 1
fi
if [ -z $AGENT_RELEASE ]; then
echo "-e error AGENT_RELEASE shouldnt be empty"
exit 1
fi
#Install crane
echo "Installing crane"
wget -O crane.tar.gz https://github.com/google/go-containerregistry/releases/download/v0.4.0/go-containerregistry_Linux_x86_64.tar.gz
tar xzvf crane.tar.gz
echo "Installed crane"
#echo "Login cli using managed identity"
az login --identity
echo "Getting acr credentials"
TOKEN_QUERY_RES=$(az acr login -n "$ACR_NAME" -t)
TOKEN=$(echo "$TOKEN_QUERY_RES" | jq -r '.accessToken')
DESTINATION_ACR=$(echo "$TOKEN_QUERY_RES" | jq -r '.loginServer')
./crane auth login "$DESTINATION_ACR" -u "00000000-0000-0000-0000-000000000000" -p "$TOKEN"
if [[ "$AGENT_IMAGE_TAR_FILE_NAME" == *"tar.gz"* ]]; then
gunzip $AGENT_IMAGE_TAR_FILE_NAME
fi
if [[ "$AGENT_IMAGE_TAR_FILE_NAME" == *"tar.zip"* ]]; then
unzip $AGENT_IMAGE_TAR_FILE_NAME
fi
echo "Pushing file $TARBALL_IMAGE_FILE to $AGENT_IMAGE_FULL_PATH"
./crane push *.tar "$AGENT_IMAGE_FULL_PATH" | true |
4c4b0d03fa29023a9b8e246be3a6c3280f87efff | Shell | cfengine/masterfiles | /templates/federated_reporting/transport.sh | UTF-8 | 3,837 | 4.1875 | 4 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/bin/bash
#
# Transport dump files from the feeder hubs to the superhub.
#
# Can be run as:
# transport.sh
# On a feeder hub, pushes dump files to the superhub.
# transport.sh push
# The same as with no arguments.
# transport.sh pull FEEDER_HUB [FEEDER_HUB2...FEEDER_HUBn]
# On the superhub, pull dumps from the given feeder hubs (in parallel).
#
set -e
# make sure a failure in any part of a pipe sequence is a failure
set -o pipefail
source "$(dirname "$0")/config.sh"
source "$(dirname "$0")/log.sh"
source "$(dirname "$0")/parallel.sh"
# check that we have all the variables we need
true "${CFE_FR_TRANSPORT_DIR?undefined}"
true "${CFE_FR_SUPERHUB_DROP_DIR?undefined}"
true "${CFE_FR_TRANSPORTER?undefined}"
true "${CFE_FR_TRANSPORTER_ARGS?undefined}"
true "${CFE_FR_COMPRESSOR_EXT?undefined}"
true "${CFE_FR_SUPERHUB_LOGIN?undefined}"
if ! type "$CFE_FR_TRANSPORTER" >/dev/null; then
log "Transporter $CFE_FR_TRANSPORTER not available!"
exit 1
fi
function push() {
# Runs on the feeder hub, pushes dumps to the superhub.
dump_files="$(ls -1 "$CFE_FR_TRANSPORT_DIR/"*".sql.$CFE_FR_COMPRESSOR_EXT" 2>/dev/null)" ||
{
log "No files to transport."
exit 0
}
log "Transporting files: $dump_files"
some_failed=0
for dump_file in $dump_files; do
failed=0
mv "$dump_file" "$dump_file.transporting"
"$CFE_FR_TRANSPORTER" "$CFE_FR_TRANSPORTER_ARGS" "$dump_file.transporting" "$CFE_FR_SUPERHUB_LOGIN:$CFE_FR_SUPERHUB_DROP_DIR/$(basename "$dump_file")" ||
failed=1
rm -f "$dump_file.transporting"
if [ "$failed" != 0 ]; then
log "Transporting file $dump_file to $CFE_FR_SUPERHUB_LOGIN:$CFE_FR_SUPERHUB_DROP_DIR failed!"
some_failed=1
fi
done
if [ "$some_failed" != "0" ]; then
log "Transporting files: FAILED"
return 1
else
log "Transporting files: DONE"
return 0
fi
}
function pull() {
# $@ -- feeder hubs to pull the dumps from
feeder_lines="$(printf "%s\n" "$@")"
log "Pulling dumps from: $feeder_lines"
chmod u+x "$(dirname "$0")/pull_dumps_from.sh"
# create and work inside a process specific sub-directory for WIP
mkdir "$CFE_FR_SUPERHUB_DROP_DIR/$$"
# Determine the absolute path of the pull_dumps_from.sh script. If this was
# run with absolute path, use the absolute path, otherwise use the relative
# part as the base path.
if [ "${0:0:1}" = "/" ]; then
pull_dumps_path="$(dirname "$0")/pull_dumps_from.sh"
else
pull_dumps_path="$PWD/$(dirname "$0")/pull_dumps_from.sh"
fi
pushd "$CFE_FR_SUPERHUB_DROP_DIR/$$" >/dev/null
failed=0
echo "$feeder_lines" | run_in_parallel "$pull_dumps_path" - || failed=1
if [ "$failed" != "0" ]; then
log "Pulling dumps: FAILED"
for feeder in "$@"; do
if [ -f "$feeder.failed" ]; then
log "Failed to pull dumps from: $feeder"
rm -f "$feeder.failed"
fi
done
else
log "Pulling dumps: DONE"
fi
for feeder in "$@"; do
if ! ls "$feeder/"*".sql.$CFE_FR_COMPRESSOR_EXT" >/dev/null 2>/dev/null; then
log "No dump files from $feeder"
continue
fi
mv "$feeder/"*".sql.$CFE_FR_COMPRESSOR_EXT" "$CFE_FR_SUPERHUB_DROP_DIR/"
# the $feeder directory is not supposed to contain anything else
rmdir "$feeder" || log "Failed to remove directory after $feeder"
done
popd >/dev/null
rm -rf "$CFE_FR_SUPERHUB_DROP_DIR/$$"
return $failed
}
if [ $# = 0 ]; then
push
elif [ $# = 1 ]; then
if [ "$1" = "push" ]; then
push
else
if [ "$1" = "pull" ]; then
log "No feeder hubs given to pull from"
else
log "Invalid command given to $0: $1"
fi
exit 1
fi
else
# more than one argument given
if [ "$1" = "pull" ]; then
shift
pull "$@"
else
log "Invalid command given to $0: $1"
exit 1
fi
fi
| true |
00a1393c64195e1fd72dbbb3208abff4519cc70e | Shell | JT5D/AppleTree | /Applescripts/ocefpaf-shell/toolsui-java.sh | UTF-8 | 443 | 2.625 | 3 | [] | no_license | #!/bin/bash
# toolsui-java
#
# purpose:
# author: Filipe P. A. Fernandes
# e-mail: ocefpaf@gmail.com
# web: http://ocefpaf.tiddlyspot.com/
# date: 02-Jan-10
# modified: 02-Jan-10
#
# obs:
#
# check dependencies
commds=( java )
for commd in ${commds[@]}; do
if [[ -z $( type -p ${commd} ) ]]; then
echo -e "${commd} -- NOT INSTALLED !"
exit 1
fi
done
java -Xmx512m -jar /home/${USER}/svn-tools/mymatlab/toolsUI-4.1.jar > /dev/null 2>&1 &
| true |
21ae6cf1046e8ecf4d29365458ce4a562515d9bc | Shell | sthaid/proj_reversi | /android-project/.archive/do_create_ic_launcher | UTF-8 | 669 | 2.8125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
# set directory variables
TOP=`pwd`
SDL_DIR=$TOP/SDL2-2.0.12
BUILD=$SDL_DIR/build/org.sthaid.reversi
# create the launcher icon
echo "Creating launcher icons"
set -x
cd $BUILD/app/src/main/res
pwd
gcc -Wall `sdl2-config --cflags` -lSDL2 -lSDL2_ttf -lpng -o create_ic_launcher \
../../../../../../../setup_files/create_ic_launcher.c
./create_ic_launcher mipmap-mdpi/ic_launcher.png 48
./create_ic_launcher mipmap-hdpi/ic_launcher.png 72
./create_ic_launcher mipmap-xhdpi/ic_launcher.png 96
./create_ic_launcher mipmap-xxhdpi/ic_launcher.png 144
./create_ic_launcher mipmap-xxxhdpi/ic_launcher.png 192
rm create_ic_launcher
echo
| true |
d6556abd74ebd3983678ab47b815a8d75fa0e194 | Shell | arduino-libraries/azure-iot-sdks | /build/linux_node.sh | UTF-8 | 902 | 3.484375 | 3 | [
"MIT"
] | permissive | #!/bin/bash
# Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE file in the project root for full license information.
build_root=$(cd "$(dirname "$0")/.." && pwd)
cd $build_root/node
# Set up links in the npm cache to ensure we're exercising all the code in
# the repo, rather than downloading released versions of our packages from
# npm.
build/dev-setup.sh
[ $? -eq 0 ] || exit $?
# Lint all JavaScript code and run unit + integration tests
build/build.sh --min --integration-tests
[ $? -eq 0 ] || exit $?
# The 'npm link' commands in this script create symlinks to tracked repo
# files from ignored locations (under ./node_modules). This means a call to
# 'git clean -xdf' will delete tracked files from the repo's working
# directory. To avoid any complications, we'll unlink everything before
# exiting.
build/dev-teardown.sh
[ $? -eq 0 ] || exit $?
| true |
8e60f41429fbf0da2515acc260f813bab0831fa5 | Shell | ddkclaudio/qb_install | /xercesc.sh | UTF-8 | 529 | 2.9375 | 3 | [] | no_license | # INSTALACAO DE DEPENDENCIAS
sudo apt-get install build-essential cmake wget make tar
sudo apt-get install build-essential g++ python-dev autotools-dev libicu-dev build-essential libbz2-dev libxml2 git
# CLONANDO SOURCE
SOURCE_FOLDER="SOURCE_FOLDER"
mkdir -p $SOURCE_FOLDER && cd $SOURCE_FOLDER
wget https://archive.apache.org/dist/xerces/c/3/sources/xerces-c-3.1.4.tar.gz
tar xf xerces-c-3.1.4.tar.gz
mv xerces-c-3.1.4 xerces-c
rm xerces-c-3.1.4.tar.gz
# COMPILACAO E INSTALACAO
cd xerces-c
./configure
make
sudo make install
| true |
9efbc87e5d94f35c2be0e5bc86f6b882cc43b81b | Shell | mare-imbrium/film-wiki | /wiki/src/extract_directors_links.sh | UTF-8 | 1,429 | 3.921875 | 4 | [] | no_license | #!/usr/bin/env bash
#===============================================================================
#
# FILE: extract_directors_links.sh
#
# USAGE: ./extract_directors_links.sh
#
# DESCRIPTION: from a film file, extract the directors section and get the links.
# the cssselector is different for each file so i cannot rely on that.
#
# This works on the unmodified files. Now /wiki/ has been removed.
#
# OPTIONS: ---
# REQUIREMENTS: ---
# BUGS: ---
# NOTES: ---
# AUTHOR: YOUR NAME (),
# ORGANIZATION:
# CREATED: 12/31/2015 11:34
# REVISION: 2015-12-31 19:30
#===============================================================================
if [ $# -eq 0 ]; then
echo pass file name
exit 1
fi
if [[ ! -f "$1" ]]; then
echo "File: $1 not found"
exit 1
fi
file=$1
# we have not changed the links .... darn that means we have lost the link, need to redo
text=$( sed -n '/<div style="font-size:110%;">Films directed by <a href="\/wiki\//,/>v</p' $file)
if [[ -z "$text" ]]; then
text=$( sed -n '/<div style="font-size:110%;">The Films of <a href="\/wiki\//,/>v</p' $file)
fi
if [[ -z "$text" ]]; then
echo not found any links for $file
echo this program works on unmodified files. Once links are converted /wiki/ is removed.
exit 1
fi
echo "$text" | grep href | grep -o '/wiki/[^"]*' | grep -v ':'
| true |
beace49a68f9c66ba77656192421b4bcfa4c15df | Shell | roidayan/ovs-tests | /test-tc-vxlan-meter.sh | UTF-8 | 5,621 | 2.921875 | 3 | [] | no_license | #!/bin/bash
#
# Test act_police action.
# Bug SW #2707092, metering doesn't work before version xx.31.0354 xx.32.0114
my_dir="$(dirname "$0")"
. $my_dir/common.sh
min_nic_cx6
require_module act_police
require_remote_server
IP=1.1.1.7
REMOTE=1.1.1.8
LOCAL_TUN=7.7.7.7
REMOTE_IP=7.7.7.8
VXLAN_ID=42
DSTPORT=4789
VXLAN_MAC=24:25:d0:e2:00:00
RATE=200
BURST=65536
TMPFILE=/tmp/meter.log
function cleanup_remote() {
on_remote "ip a flush dev $REMOTE_NIC
ip l del dev vxlan1 &>/dev/null"
}
function cleanup() {
ip netns del ns0 2> /dev/null
reset_tc $REP
ip link del dev vxlan1 2> /dev/null
ifconfig $NIC 0
cleanup_remote
}
trap cleanup EXIT
function config_vxlan() {
echo "config vxlan dev"
ip link add vxlan1 type vxlan dstport $DSTPORT external
ip link set vxlan1 up
ifconfig $NIC $LOCAL_TUN/24 up
}
function config_remote() {
on_remote "ip link del vxlan1 &>/dev/null
ip link add vxlan1 type vxlan id $VXLAN_ID dev $REMOTE_NIC dstport $DSTPORT
ip a flush dev $REMOTE_NIC
ip a add $REMOTE_IP/24 dev $REMOTE_NIC
ip a add $REMOTE/24 dev vxlan1
ip l set dev vxlan1 up
ip l set vxlan1 address $VXLAN_MAC
ip l set dev $REMOTE_NIC up"
}
function add_arp_rules() {
echo "add arp rules"
tc_filter add dev $REP protocol arp parent ffff: prio 1 flower skip_hw \
src_mac $LOCAL_MAC \
action tunnel_key set \
src_ip $LOCAL_TUN \
dst_ip $REMOTE_IP \
dst_port $DSTPORT \
id $VXLAN_ID \
action mirred egress redirect dev vxlan1
tc_filter add dev vxlan1 protocol arp parent ffff: prio 1 flower skip_hw \
src_mac $VXLAN_MAC \
enc_src_ip $REMOTE_IP \
enc_dst_ip $LOCAL_TUN \
enc_dst_port $DSTPORT \
enc_key_id $VXLAN_ID \
action tunnel_key unset pipe \
action mirred egress redirect dev $REP
}
function ping_remote() {
# icmp
ip netns exec ns0 ping -q -c 1 -w 1 $REMOTE
if [ $? -ne 0 ]; then
err "ping failed"
return
fi
}
function run() {
add_arp_rules
echo "add vxlan police rules"
tc_filter add dev $REP protocol ip parent ffff: prio 2 flower \
src_mac $LOCAL_MAC \
dst_mac $VXLAN_MAC \
action tunnel_key set \
src_ip $LOCAL_TUN \
dst_ip $REMOTE_IP \
dst_port $DSTPORT \
id $VXLAN_ID \
action mirred egress redirect dev vxlan1
tc_filter add dev vxlan1 protocol ip parent ffff: prio 3 flower \
src_mac $VXLAN_MAC \
dst_mac $LOCAL_MAC \
enc_src_ip $REMOTE_IP \
enc_dst_ip $LOCAL_TUN \
enc_dst_port $DSTPORT \
enc_key_id $VXLAN_ID \
action tunnel_key unset \
action police rate ${RATE}mbit burst $BURST conform-exceed drop/pipe \
action mirred egress redirect dev $REP
fail_if_err
ping_remote
t=10
# traffic
ip netns exec ns0 timeout -k 1 $((t+4)) iperf3 -s -J > $TMPFILE &
pid2=$!
sleep 2
on_remote timeout -k 1 $t iperf3 -c $IP -t $t -u -l 1400 -b 2G -P2 &
pid1=$!
verify
}
function verify() {
# verify client pid
sleep 2
kill -0 $pid1 &>/dev/null
if [ $? -ne 0 ]; then
err "iperf3 failed"
return
fi
timeout $((t-2)) tcpdump -qnnei $REP -c 10 'udp' &
local tpid=$!
sleep $t
verify_no_traffic $tpid
killall -9 iperf3 &>/dev/null
echo "wait for bgs"
wait
verify_iperf3_bw $TMPFILE $RATE
}
function run2() {
add_arp_rules
echo "add vxlan police rules"
tc_filter add dev $REP protocol ip parent ffff: prio 2 flower \
src_mac $LOCAL_MAC \
dst_mac $VXLAN_MAC \
action police rate ${RATE}mbit burst $BURST conform-exceed drop/pipe \
action tunnel_key set \
src_ip $LOCAL_TUN \
dst_ip $REMOTE_IP \
dst_port $DSTPORT \
id $VXLAN_ID \
action mirred egress redirect dev vxlan1
tc_filter add dev vxlan1 protocol ip parent ffff: prio 3 flower \
src_mac $VXLAN_MAC \
dst_mac $LOCAL_MAC \
enc_src_ip $REMOTE_IP \
enc_dst_ip $LOCAL_TUN \
enc_dst_port $DSTPORT \
enc_key_id $VXLAN_ID \
action tunnel_key unset \
action mirred egress redirect dev $REP
fail_if_err
ping_remote
t=10
# traffic
on_remote timeout -k 1 $((t+5)) iperf3 -s -J > $TMPFILE &
pid2=$!
sleep 2
ip netns exec ns0 timeout -k 1 $((t+2)) iperf3 -u -c $REMOTE -t $t -l 1400 -b 2G -P2 &
pid1=$!
verify
}
config_sriov
enable_switchdev
require_interfaces REP
bind_vfs
LOCAL_MAC=$(cat /sys/class/net/$VF/address)
config_vxlan
config_vf ns0 $VF $REP $IP
config_remote
title "limit the speed on vxlan"
reset_tc $REP vxlan1
run
title "limit the speed on rep"
reset_tc $REP vxlan1
run2
cleanup
test_done
| true |
72f3233b7095486653f9aa9c142abd07a8371c8c | Shell | lilin5819/mintenv | /install.sh | UTF-8 | 7,604 | 3.03125 | 3 | [] | no_license | #!/bin/bash
# Author:lilin
# email:1657301947@qq.com
# github:https://github.com/lilin5819/mintenv
function configMirrors()
{
sudo cp -f etc/apt/sources.list.d/official-package-repositories.list /etc/apt/sources.list.d
sudo dpkg --add-architecture i386
echo "update mirrors,waiting for a while......"
sudo apt update
sudo apt dist-upgrade
}
function installBasicTools()
{
echo "install basic tools"
sudo apt install vim openssh-server openssh-client git git-svn git-remote-hg subversion mercurial hgsvn \
hgsubversion mercurial-git mercurial-server curl wget samba nfs-kernel-server xinetd tftp-hpa \
tftpd-hpa lrzsz minicom manpages-zh firefox-locale-zh-hans expect axel dos2unix ack-grep -y
sudo apt install silversearcher-ag -y
git clone --depth 1 https://github.com/junegunn/fzf.git ~/.fzf
~/.fzf/install
sudo cp -f etc/samba/smb.conf /etc/samba/smb.conf
sudo cp -f etc/default/tftpd-hpa /etc/default/tftpd-hpa
sudo cp -f etc/xinetd.d/tftp /etc/xinetd.d/tftp
sudo mkdir /opt/{smb,tftp} && sudo chmod -R 777 /opt/{smb,tftp}
sudo systemctl enbale smbd xinetd tftpd-hpa
sudo systemctl restart smbd xinetd tftpd-hpa
}
function installOpengrok()
{
URL_OPENGROK=https://github.com/oracle/opengrok/releases/download/1.1-rc18/opengrok-1.1-rc18.tar.gz
PKG_OPENGROK=$(basename ${URL_OPENGROK})
sudo apt install opendjk8-jdk -y
sudo apt remove sudo apt remove --purge openjdk-9-jre-headless openjdk-9-jdk-headless -y
sudo wget https://github.com/oracle/opengrok/releases/download/1.1-rc18/opengrok-1.1-rc18.tar.gz
sudo tar xf ${PKG_OPENGROK} -C /opt/
sudo echo -e "PATH=\$PATH:/opt/$PKG_OPENGROK" >> ~/.$(basename ${SHELL})rc
sudo OPENGROK_TOMCAT_BASE=/var/lib/tomcat8 /opt/${PKG_OPENGROK}/bin/OpenGrok deploy
}
function installDevTools()
{
echo "install dev libs and tools"
sudo apt install ia32-libs automake autoconf llvm cmake libncurses5-dev gcc-multilib g++-multilib g++-5-multilib gcc-5-doc libstdc++6-5-dbg \
kernel-package linux-source libstdc++-5-doc binutils build-essential cpio u-boot-tools kernel-common openjdk-9-jdk wireshark nmap libpcap-dev -y
sudo apt install ctags cscope
}
function installVPN()
{
echo "install VPN"
sudo apt install udhcpd pppoe pptpd xl2tpd -y
wget https://raw.github.com/philpl/setup-simple-ipsec-l2tp-vpn/master/setup.sh && sudo sh setup.sh
sudo cp -rf etc/{ppp,pptpd.conf,xl2tpd,udhcpd.conf} /etc/
sudo cp -rf etc/default/udhcpd /etc/default/
}
function createUser()
{
echo "create new User....................."
user=$1
if [[ $user = $USER || -z $user ]]; then
echo "Stop Create User!"
return 1
fi
[ -z $user ] && echo "Usage: $0 user <username> # create a user named <username>" && exit 1
sudo useradd -s /bin/bash -G adm,cdrom,sudo,dip,plugdev,lpadmin,vboxsf,sambashare $user
[ -z $? ] && echo "create user failed!" && exit 1
echo "Please input you new user passwd:"
sudo passwd $user
sudo cp -rf $HOME /home/$user
sudo chown -R $user:$user /home/$user
echo "please input you samba passwd:"
sudo smbpasswd -a $user
sudo systemctl restart smbd
eval "sudo sed -i 's/${USER}/${user}/g' /etc/hostname"
eval "sudo sed -i 's/${USER}/${user}/g' /etc/hosts"
eval "sudo sed -i 's/${USER}/${user}/g' `ag -l lilin`"
echo ""
echo "Success ! You has add a new user: $user!"
echo "Now ,Your hostname is $user-VB,please reboot to refresh hostname!"
echo "Then you can login and del the old user with typing this:"
echo ""
echo "su - $user"
echo "cd ~/mintenv && bash install.sh deluser $USER"
echo ""
echo "Rebooting now ............."
sleep 4
sudo reboot now
}
function delUser() {
user=$1
if [[ $user = $USER ]]; then
echo "You can't del youself!"
echo "Please retry after login another account!"
return 1
else
sudo userdel $user
sudo rm -rf /home/$user
fi
}
function installVim8() {
echo "Installing vim8.0 from source for better features....."
sudo apt remove vim vim-runtime vim-common -y
sudo apt-get install libncurses5-dev libgnome2-dev libgnomeui-dev \
libgtk2.0-dev libatk1.0-dev libbonoboui2-dev \
libcairo2-dev libx11-dev libxpm-dev libxt-dev python-dev \
python3-dev ruby-dev lua5.1 lua5.1-dev libperl-dev git checkinstall -y
if [[ -d './vim' ]]; then
echo "You have vim src"
cd vim
sudo make uninstall
make clean
make distclean
git reset --hard
git pull
else
git clone https://github.com/vim/vim.git
cd vim
fi
python2_config_dir=`sudo find /usr/lib -type d -a -regex ".*python2.7/config.*"`
python3_config_dir=`sudo find /usr/lib -type d -a -regex ".*python3.5/config.*"`
./configure --with-features=huge \
--enable-multibyte \
--enable-rubyinterp=yes \
--enable-pythoninterp=yes \
--with-python-config-dir=$python2_config_dir \
--enable-python3interp=yes \
--with-python3-config-dir=$python3_config_dir \
--enable-perlinterp=yes \
--enable-luainterp=yes \
--enable-gui=gtk2 --enable-cscope --prefix=/usr
make VIMRUNTIMEDIR=/usr/share/vim/vim80
[ -n $? ] && sudo make install || echo "compile vim8 failed!"
cd ..
echo "end install vim8"
}
function installSpacevim() {
sh -c "$(curl -fsSL https://raw.githubusercontent.com/liuchengxu/space-vim/master/install.sh)"
rm ~/.spacevim && ln -s ~/mintenv/spacevim ~/.spacevim
}
function installOhmyzsh() {
sudo apt install zsh
sh -c "$(curl -fsSL https://raw.github.com/robbyrussell/oh-my-zsh/master/tools/install.sh)"
}
function installAcEnv() {
sudo apt install gcc-arm-linux-gnueabi gcc-mips-linux-gnu -y
[ -d ~/bin ] || mkdir -p ~/bin
sudo ln -sf /usr/bin/arch /bin/arch
sudo ln -sf /usr/bin/arm-linux-gnueabi-strip ~/bin/strip
echo "please select NO !!!!!!!!!"
sleep 3
sudo dpkg-reconfigure dash
}
function main()
{
case $1 in
env)
installBasicTools
installDevTools
installVPN
installAcEnv
;;
vpn)
installVPN
;;
vim8)
installVim8
installSpacevim
;;
adduser)
createUser $2
;;
deluser)
delUser $2
;;
zsh)
installOhmyzsh
;;
acenv)
installAcEnv
;;
all)
configMirrors
installBasicTools
installDevTools
installVPN
#installVim8
installSpacevim
installOhmyzsh
installAcEnv
createUser $2
;;
*)
echo "Usage: $0 env # install develop envs"
echo " $0 vpn # install udhcp pppoe l2tp"
echo " $0 vim8 # install vim8 with python、python3、ruby、perl、lua support,and other features."
echo " $0 adduser <username> # create a user named <username>"
echo " $0 deluser <username> # del a user named <username>"
echo " $0 all <username> # install with all features and create a user named <username>"
echo "-------- powered by lilin ----------"
echo "fork me in github: https://github.com/lilin5819/mintenv ----------"
;;
esac
}
main $*
| true |
cfa8f198d01009c67adb490291130b82d40e2051 | Shell | Azure-Samples/azure-intelligent-edge-patterns | /Research/kubeflow-on-azure-stack-lab/04-KFServing/tensorflow_runme.sh | UTF-8 | 868 | 2.71875 | 3 | [
"MIT"
] | permissive | #!/bin/bash
kubectl create -f tensorflow_flowers.yaml -n kfserving-test
echo give the inferenceservice time to create the pods...
sleep 5
export INGRESS_HOST=$(kubectl -n istio-system get service istio-ingressgateway -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
export INGRESS_PORT=$(kubectl -n istio-system get service istio-ingressgateway -o jsonpath='{.spec.ports[?(@.name=="http2")].port}')
export MODEL_NAME=flowers-sample
export INPUT_PATH=@./tensorflow_input.json
export SERVICE_HOSTNAME=$(kubectl get inferenceservice ${MODEL_NAME} -n kfserving-test -o jsonpath='{.status.url}' | cut -d "/" -f 3)
echo curl -v -H "Host: ${SERVICE_HOSTNAME}" http://${INGRESS_HOST}:${INGRESS_PORT}/v1/models/$MODEL_NAME:predict -d $INPUT_PATH
curl -v -H "Host: ${SERVICE_HOSTNAME}" http://${INGRESS_HOST}:${INGRESS_PORT}/v1/models/$MODEL_NAME:predict -d $INPUT_PATH
| true |
acd88374a8c102005ac8a624854b5f222b493892 | Shell | elftech/vmware-code | /shell/trap_ignore.sh | UTF-8 | 459 | 3.46875 | 3 | [] | no_license | #!/bin/bash
trap "" 1 2 3 15
LOOP=0
my_exit()
{
echo "Received interrupt on count $LOOP"
echo "Now exiting..."
exit 1
}
LOOP=0
while :
do
LOOP=`expr $LOOP + 1`
echo "critical processing..$LOOP..you cannot interrupt me"
sleep 1
if [ "$LOOP" -eq 6 ];then
break
fi
done
LOOP=0
trap "my_exit" 1 2 3 15
while :
do
LOOP=`expr $LOOP + 1`
echo "critical processing..$LOOP..you cannot interrupt me"
sleep 1
if [ "$LOOP" -eq 6 ];then
break
fi
done
| true |
ae24adf6643b39cb18a4491be10194c451252ccf | Shell | binhonglee/RandomScripts | /cleanOldNodeVersions.sh | UTF-8 | 526 | 3.671875 | 4 | [] | no_license | #!/bin/bash
export NVM_DIR="$HOME/.nvm"
[ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh"
input="tempNodeVer.txt"
if [ $(uname) == "Darwin" ]; then
command="gsed"
else
command="sed"
fi
nvm ls | $command -r "s/\x1B\[[0-9;]*[JKmsu]//g" > $input
end="no"
while read -r line && [ "$end" = "no" ]; do
toUninstall=`echo "$line"`
if [ ${toUninstall:0:2} == "->" ]; then
echo "Ending..."
end="yes"
else
echo "Uninstalling $toUninstall"
nvm uninstall $toUninstall
fi
done<$input
nvm use node
rm $input
| true |
9e18c093f7c0e15c0e781037637e54e0359de285 | Shell | kdave/xfstests | /tests/generic/338 | UTF-8 | 1,341 | 3.125 | 3 | [] | no_license | #! /bin/bash
# SPDX-License-Identifier: GPL-2.0
# Copyright (c) 2016 Red Hat Inc., All Rights Reserved.
#
# FS QA Test 338
#
# Test I/O on dm error device.
#
# Motivated by an ext4 bug that crashes kernel on error path when trying to
# update atime.
#
. ./common/preamble
_begin_fstest auto quick rw eio
# Override the default cleanup function.
_cleanup()
{
cd /
rm -f $tmp.*
_dmerror_cleanup
}
# Import common functions.
. ./common/filter
. ./common/dmerror
# real QA test starts here
_supported_fs generic
_require_scratch_nocheck # fs went down with a dirty log, don't check it
_require_dm_target error
# If SCRATCH_DEV is not a valid block device, FSTYP cannot be mkfs'ed either
_require_block_device $SCRATCH_DEV
echo "Silence is golden"
_scratch_mkfs >>$seqres.full 2>&1
_dmerror_init
# Use strictatime mount option here to force atime updates, which could help
# trigger the NULL pointer dereference on ext4 more easily
_dmerror_mount "-o strictatime"
_dmerror_load_error_table
# flush dmerror block device buffers and drop all caches, force reading from
# error device
blockdev --flushbufs $DMERROR_DEV
echo 3 > /proc/sys/vm/drop_caches
# do some test I/O
ls -l $SCRATCH_MNT >>$seqres.full 2>&1
$XFS_IO_PROG -fc "pwrite 0 1M" $SCRATCH_MNT/testfile >>$seqres.full 2>&1
# no panic no hang, success, all done
status=0
exit
| true |
bb6bb9d0ef94c2794840ab1ec76a6e3cce514471 | Shell | rolfposchmann/linux-theme | /install_theme.sh | UTF-8 | 625 | 2.671875 | 3 | [] | no_license | #!/bin/bash
#chmod u+x install_theme.sh
mkdir -p ~/bin
echo "export PATH=$PATH:$HOME/bin" >> ~/.profile
source ~/.profile
#arc-theme installieren
sudo apt update
sudo apt install arc-theme
#numix circle icons hinzufügen
sudo add-apt-repository ppa:numix/ppa
sudo apt update
sudo apt install numix-icon-theme-circle
#Themen -> Hinzufügen: "Adapter"
#conky installieren
sudo apt install conky-all
sudo apt install conky curl lm-sensors hddtemp
mkdir -p ~/Scripts
mkdir -p ~/.conky
cp conkyrc1 ~/.conky/.conkyrc1
cp conkyrc2 ~/.conky/.conkyrc2
cp conkystart.sh ~/Scripts/conkystart.sh
chmod a+x ~/Scripts/conkystart.sh
| true |
30ed51f867c3ff40cf480c005d923f7661d29466 | Shell | morristech/dotfiles-37 | /.zshrc | UTF-8 | 3,930 | 2.859375 | 3 | [] | no_license | ##
# cloudhead - .zshrc
#
xset r rate 180 40 # Sane repeat rate
xset -b # No bell
xset -dpms # Keep screen on at all times
xset s off #
xset m 7/5 0 # Pointer settings
setxkbmap us -variant altgr-intl
# `ls` colors
if [ -f ~/.dircolors ]; then
eval $(dircolors -b ~/.dircolors)
fi
#
# Includes
#
autoload colors && colors
autoload -U compinit && compinit
autoload -U complist
# Fzy integration
if command -v fzy >/dev/null 2>&1 && test -f ~/.fzy.zsh; then
source ~/.fzy.zsh
fi
# K8 integration. We lazy load because it's slow otherwise.
function kubectl() {
if ! type __start_kubectl >/dev/null 2>&1; then
source <(kubectl completion zsh)
fi
command kubectl "$@"
}
# Fzy history search doesn't sort things in a useful way, so we use zsh for now.
bindkey '^R' history-incremental-search-backward
#
# ls
#
LS_IGNORE="Dropbox" # ~/Dropbox is symlinked.
alias ls="/bin/ls -I $LS_IGNORE"
alias l="/bin/ls -lFGhL --color=auto -I $LS_IGNORE"
alias ll='/bin/ls -lFAGh --color=auto'
#
# Aliases
#
alias g='git'
alias n='sudo netctl'
alias mv='/bin/mv -i'
alias ..='cd ..'
alias img='sxiv -a'
alias df='df -h'
alias sys='systemctl'
alias x='startx'
alias web='chromium &'
alias e=$EDITOR
alias pdf='mupdf'
alias webserver='python2 -m SimpleHTTPServer'
alias pacman='sudo pacman --color=auto'
alias netctl='sudo netctl'
alias vim=nvim
alias clip='xclip -sel clip'
#
# History
#
HISTFILE=~/.zsh_history
HISTSIZE=65536
SAVEHIST=65536
REPORTTIME=10
# Treat the '!' character specially during expansion.
setopt BANG_HIST
# Write the history file in the ":start:elapsed;command" format.
setopt EXTENDED_HISTORY
# Write to the history file immediately, not when the shell exits.
setopt INC_APPEND_HISTORY
# Expire duplicate entries first when trimming history.
setopt HIST_EXPIRE_DUPS_FIRST
# Don't record an entry that was just recorded again.
setopt HIST_IGNORE_DUPS
# Delete old recorded entry if new entry is a duplicate.
setopt HIST_IGNORE_ALL_DUPS
# Do not display a line previously found.
setopt HIST_FIND_NO_DUPS
# Don't write duplicate entries in the history file.
setopt HIST_SAVE_NO_DUPS
# Remove superfluous blanks before recording entry.
setopt HIST_REDUCE_BLANKS
autoload up-line-or-beginning-search
autoload down-line-or-beginning-search
zle -N up-line-or-beginning-search
zle -N down-line-or-beginning-search
bindkey "\e[A" up-line-or-beginning-search
bindkey "\e[B" down-line-or-beginning-search
#
# Options
#
setopt NO_BG_NICE
setopt NO_HUP
setopt NO_LIST_BEEP
setopt LOCAL_OPTIONS
setopt LOCAL_TRAPS
setopt EXTENDED_HISTORY
setopt PROMPT_SUBST
setopt CORRECT
setopt COMPLETE_IN_WORD
setopt NO_IGNORE_EOF
setopt AUTO_CD
setopt INTERACTIVECOMMENTS
#
# Set prompt
#
precmd() {
local last=$?
local remote=""
if [ -n "$SSH_CLIENT" ] || [ -n "$SSH_TTY" ]; then
remote=" $(whoami)@$(hostname)"
fi
# Status
if [ "$last" -eq 0 ]; then
PROMPT='; '
RPROMPT="$remote"
else
PROMPT="%{$fg[red]%}; %{$reset_color%}"
RPROMPT=" $last"
fi
if [ "$RPROMPT" != "" ]; then
RPROMPT="%{$fg[red]%}#$RPROMPT%{$reset_color%}"
fi
}
#
# Vi-mode
#
bindkey -v
export KEYTIMEOUT=1
#
zle-keymap-select zle-line-init() {
# Check ~/.st/config.h for the cursor escape sequences.
case $KEYMAP in
vicmd) print -n -- "\e[2 q";;
viins|main) print -n -- "\e[4 q";;
esac
zle reset-prompt
zle -R
}
zle-line-finish() {
print -n -- "\e[2 q"
}
zle -N zle-line-init
zle -N zle-line-finish
zle -N zle-keymap-select
col() {
awk "{ print \$$1 }"
}
#
# Switch to `fg` process with Ctrl-Z
#
fg-command() {
if [[ ! $#BUFFER -eq 0 ]]; then
zle push-input
fi
BUFFER="fg"
zle accept-line
}
zle -N fg-command
bindkey '^Z' fg-command
# Zsh syntax highlighting
# source /usr/share/zsh/plugins/zsh-syntax-highlighting/zsh-syntax-highlighting.zsh
| true |
cf4ede432597fda6580a873551ae5e080448b40d | Shell | csg001/clang-stm32 | /cmake.sh | UTF-8 | 1,105 | 2.921875 | 3 | [] | no_license | #! /bin/bash
function clean() {
rm -rf build
mkdir build
}
function cmake_all_debug() {
cd build
time cmake -GNinja -DCMAKE_BUILD_TYPE=debug -D CMAKE_TOOLCHAIN_FILE=cmake/toolchain-arm-none-eabi.cmake -D COMPILER="clang" ..
#cmake -DCMAKE_BUILD_TYPE=debug -DCMAKE_VERBOSE_MAKEFILE:BOOL=ON -D CMAKE_TOOLCHAIN_FILE=cmake/toolchain-arm-none-eabi.cmake -D COMPILER="clang" ..
#make >> output_file.txt 2>&1
_make
return 1
}
function cmake_all_release() {
cd build
time cmake -GNinja -DCMAKE_BUILD_TYPE=release -D CMAKE_TOOLCHAIN_FILE=cmake/toolchain-arm-none-eabi.cmake -D COMPILER="clang" ..
#cmake -DCMAKE_BUILD_TYPE=debug -DCMAKE_VERBOSE_MAKEFILE:BOOL=ON -D CMAKE_TOOLCHAIN_FILE=cmake/toolchain-arm-none-eabi.cmake -D COMPILER="clang" ..
#make >> output_file.txt 2>&1
_make
return 1
}
function _make() {
cd build
time ninja
return 1
}
function flash() {
openocd.exe -f openocd.cfg -c "program build/arm_minisys.hex verify reset exit"
return 1
}
function start_debugersever() {
openocd.exe
}
for arg in "$@"; do
$arg
done
| true |
fddfbed474b2cbf28b4c2b9dfd4d6844fef4ac01 | Shell | xxhank/Automator | /run-click.sh | UTF-8 | 1,818 | 3.390625 | 3 | [] | no_license | #!/usr/bin/env sh
mode=$1
if [[ ! -z $mode ]]; then
mode=" -m $mode"
fi
# 进入程序
#./cliclick $mode c:1419,584 # w:1000 c:345,862.5
if [[ $mode != "test" ]]; then
echo "进入程序"
osascript ./bringAppToFront.scpt || exit 1
sleep 1
fi
function random_value(){
min=$1
max=$2
echo $(( RANDOM % (`expr $max - $min`) + $min ))
}
function random_move(){
x=$(random_value -50 50)
y=$(random_value -50 50)
echo "rc:+$x,+$y "
}
function confirm_skill(){
x=$(random_value -50 50)
y=$(random_value -50 50)
echo "c:+$x,+$y "
}
App="Heroes"
function check_and_run(){
top=`osascript ./check_terminal_is_front.scpt $App`
echo "$App is front :" $top
if [[ $top == "false" ]]; then
exit 0
else
./cliclick $@
fi
}
while [[ true ]]; do
#statements
check_and_run $mode c:120,34.5 # 游戏
sleep 0.5
check_and_run $mode c:131,79.5 # 训练模式
sleep 0.5
check_and_run $mode c:696,854 # 准备
# 等候过场动画
sleep 1
# 模拟移动
check_and_run $mode -w 400 $(random_move) $(random_move) $(random_move)
sleep 1
#./cliclick $mode -w 200 c:497,870
# 释放第一技能
check_and_run $mode -w 200 kp:q $(confirm_skill) rc:+1,+1
sleep 1
# 释放第二技能
check_and_run $mode -w 200 kp:w $(confirm_skill) rc:+1,+1
sleep 1
# 释放第三技能
check_and_run $mode -w 200 kp:e $(confirm_skill) rc:+1,+1
sleep 1
# 释放第四技能
check_and_run $mode -w 200 kp:r $(confirm_skill) rc:+1,+1
sleep 1
# 退出按钮
check_and_run $mode -w 200 c:100,852
sleep 1
# 点击天赋
check_and_run $mode -w 200 c:100,620 kd:ctrl ku:ctrl
sleep $(random_value 2 15)
done
# 获取随机数 echo $(( RANDOM % (10000 ) + 5000 )) | true |
89ff4f897e13d7f7b4d1d1cb6c0a0cc434e9ea0b | Shell | ChefIronBelly/BSD-Configs | /open/home/.bin/ufetch | UTF-8 | 1,380 | 3.484375 | 3 | [] | no_license | #!/bin/sh
#
# ufetch-openbsd - tiny system info for openbsd
#
## INFO
# user is already defined
HOST="$(hostname)"
OS="$(uname -sr)"
KERNEL="$(uname -v)"
UPTIME="$(uptime | awk -F, '{sub(".*up ",x,$1);print $1}' | sed -e 's/^[ \t]*//')"
PACKAGES="$(pkg_info -A | wc -l | sed -e 's/^[ \t]*//')"
# shell is already defined
if [ -z "$WM" ]; then
WM=$(tail -n 1 "$HOME/.xinitrc" | cut -d ' ' -f 2)
fi
## DEFINE COLORS
rc=$(tput sgr0) # reset
c0=$(tput setaf 0 0 0) # black
c1=$(tput setaf 1 0 0) # red
c2=$(tput setaf 2 0 0) # green
c3=$(tput setaf 3 0 0) # yellow
c4=$(tput setaf 4 0 0) # blue
c5=$(tput setaf 5 0 0) # magenta
c6=$(tput setaf 6 0 0) # cyan
c7=$(tput setaf 7 0 0) # white
bc=$(tput bold) # bold
# you can change these
lc=${rc}${bc}${c4} # labels
nc=${rc}${bc}${c4} # user and hostname
ic=${rc} # info
fc=${rc}${c4} # first color
sc=${rc}${c7} # second color
tc=${rc}${bc}${c4} # third color
## OUTPUT
cat <<EOF
${fc} _____ ${nc}${USER}${ic}@${nc}${HOST}${rc}
${fc} \- -/ ${lc}OS: ${ic}${OS}${rc}
${fc} \_/ \ ${lc}KERNEL: ${ic}${KERNEL}${rc}
${fc} | ${sc}O O${fc} | ${lc}UPTIME: ${ic}${UPTIME}${rc}
${fc} |_ ${tc}< ${fc}) ${tc}3 ${fc}) ${lc}PACKAGES: ${ic}${PACKAGES}${rc}
${fc} / \ / ${lc}SHELL: ${ic}${SHELL}${rc}
${fc} /-_____-\ ${lc}WM: ${ic}${WM}${rc}
EOF
| true |
2a3e814fc0670e94a2cfc972b6490b7769b6bc3d | Shell | adryd325/dotfiles | /legacy/personal/_install/30-pnpm.sh | UTF-8 | 307 | 2.984375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
# shellcheck source=../../../constants.sh
[[ -z "${AR_DIR}" ]] && echo "Please set AR_DIR in your environment" && exit 0; source "${AR_DIR}"/constants.sh
ar_os
AR_MODULE="pnpm"
if [[ "${AR_OS}" = "darwin_macos" ]]; then
log info "Installing pnpm"
npm install -g pnpm --silent
fi | true |
8d91dc7ddd63a30df821e51d4822da1e870fa8d3 | Shell | rskumar/sourcegraph | /dev/circle-release-sourcemaps.sh | UTF-8 | 518 | 2.875 | 3 | [] | no_license | #!/bin/bash
set -ex
VERSION=$1
# release sourcemaps for Splunk JavaScript logging
rm -rf sourcemaps && mkdir -p sourcemaps/assets
cp -R app/node_modules/* sourcemaps/
cp -R app/web_modules/* sourcemaps/
cp -R app/assets/*.map sourcemaps/assets/
tar -czf "sourcemaps-$VERSION.tar.gz" -C sourcemaps .
echo $GCLOUD_SERVICE_ACCOUNT | base64 --decode > gcloud-service-account.json
gcloud auth activate-service-account --key-file gcloud-service-account.json
gsutil cp "sourcemaps-$VERSION.tar.gz" gs://splunk-sourcemaps
| true |
61159c56042fa69ae95e466e936ca412e14c104a | Shell | rndsolutions/hawkcd | /Server/hawkcd.sh | UTF-8 | 3,697 | 3.546875 | 4 | [
"Apache-2.0"
] | permissive | # Copyright (C) 2016 R&D Solutions Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/bin/bash
version=<replace>
RED='\033[0;31m'
GREEN='\033[0;32m'
RESET="$(tput sgr0)"
function start {
#check for java
#echo "hi"
check
is_prereq_met=$?
if [[ "$is_prereq_met" -eq 0 ]]; then
#statements
start_db
start_server
fi
}
function stop {
#statements\
if [[ -z $(pgrep -x -f "java -jar $version.jar") ]]; then
#statements
echo "hawkcd is not running"
else
pgrep -x -f "java -jar $version.jar" | kill $(tail)
echo "Application server stopped"
fi
if [[ -z $(pgrep redis-server) ]]; then
echo "redis is not running"
else
pgrep redis-server | kill $(tail)
fi
}
function check {
#check java version
check_java
is_java_available=$?
check_open_port $db_port "redis"
is_redis_port_open=$?
check_open_port $server_port "hawkcd"
is_hawkcd_default_port_open=$?
if [[ $is_redis_port_open -eq 1 ]] || [[ $is_hawkcd_default_port_open -eq 1 ]]; then
#statements
return 1
else
return 0
fi
}
function check_java {
if type -p java; then
#statements
_java=java
JAVA_EXISTS=true;
elif [[ -n "$JAVA_HOME" ]] && [[ -x "$JAVA_HOME/bin/java" ]]; then
echo -e "${GREEN}Found java executable in JAVA_HOME ${RESET}"
_java="$JAVA_HOME/bin/java"
JAVA_EXISTS=true;
else
echo -e "${RED} java installation was not found ${RESET}"
return 1;
fi
if [[ "$_java" ]]; then
JAVA_VERSION=$("$_java" -version 2>&1 | awk -F '"' '/version/ {print $2}')
fi
if [ -n "$JAVA_EXISTS" ]; then
echo -e "${GREEN}found java executable in PATH $(type -p java) ${RESET}"
else
echo "Java not found"
return 1;
fi
if [[ "$JAVA_VERSION" > '1.8' ]]; then
echo -e "${GREEN}Java version $JAVA_VERSION is found ${RESET}"
else
echo -e "${RED} java 1.8 or above is required ${RESET}"
return 1;
fi
}
function check_open_port {
#statements
_port="$1"
server="$2"
nc -z localhost $_port
_is_port_in_use=$?
if [ "$_is_port_in_use" -eq "0" ]; then
#statements
echo -e "${RED}Default $server port $_port is in use${RESET}"
return 1;
else
echo -e "${GREEN}Default $server port $_port is free${RESET}"
return 0;
fi
}
function start_server {
#statements
java -jar $version.jar &
}
function start_db {
data/Linux/redis/redis-server &
}
function list {
echo "---Available commands---"
echo "start - starts database and application server"
echo "stop - stops database and application server"
echo "check - checks if the enviroment has all required software"
echo "--------End-------------"
}
#read input args
tmp1=$1 ;tmp2=$2; tm3=$3;
#set default values
func=${tmp1:-'start'}
db_port=${tmp2:='6379' }
server_port=${tmp3:-'8080' }
case "$func" in
"start")
is_running=$(pgrep -x -f "java -jar $version.jar")
echo $is_running
if [[ -z $is_running ]]; then
"start"
else
echo -e "${RED}The server is already running ${RESET}"
"list"
fi
;;
"stop")
"stop"
;;
"check")
"check"
;;
*)
"list"
;;
esac
| true |
757d103d67f575de4b91621138fe3eecf1a57ee4 | Shell | felipebelucena/eventex | /scripts/setup-env.sh | UTF-8 | 656 | 2.71875 | 3 | [] | no_license | #!/bin/bash
# install pyenv
curl -L https://raw.githubusercontent.com/yyuu/pyenv-installer/master/bin/pyenv-installer | bash
echo >> ~/.bashrc
echo 'export PATH="/home/vagrant/.pyenv/bin:$PATH"' >> ~/.bashrc
echo 'eval "$(pyenv init -)"' >> ~/.bashrc
echo 'eval "$(pyenv virtualenv-init -)"' >> ~/.bashrc
export PATH="/home/vagrant/.pyenv/bin:$PATH"
eval "$(pyenv init -)"
eval "$(pyenv virtualenv-init -)"
# install python 3.5.0
pyenv install -s 3.5.0
pyenv rehash
pyenv global 3.5.0
echo `python --version`
# config django env
cd /vagrant
python -m venv .wttd
source .wttd/bin/activate
pip install -r requirements-dev.txt
cp contrib/env-sample .env
| true |
7bac904ed57667297731606171964272a4955bd3 | Shell | Diaradougou/kbash | /setup/component/commands/.scope.sh | UTF-8 | 198 | 2.875 | 3 | [] | no_license | #!/bin/bash
# Not-used
ENTRYPOINT_scope_usage() {
echo "<<DESCRIBE ANY OPTIONS FOR THIS SCOPE>>"
}
ENTRYPOINT_scope_help() {
printf "`cat << EOF
ENTRYPOINT COMPONENT_NAME commands
EOF
`\n\n"
}
| true |
ebc808f7d0f4b866a6ec20c6f7b0699dde1ef488 | Shell | rbogusze/oracleinfrastructure | /scripto/oracle/setup/oracle_setup.wrap | UTF-8 | 80,196 | 3.71875 | 4 | [] | no_license | #!/bin/bash
#$Id: oracle_setup.wrap,v 1.1 2012-05-07 13:48:38 remik Exp $
#
# This is a script to run blocks of task (not related to each other) that are often performed on
# database hosts. These are usually simple things that I am tired to do again and again.
#
# This script will perform checks and action with accordance to PGF best practices to setup database environment.
# It should be possible to run this script multiple times with no negative effect.
# If some changes are already there the script should notice that and do no action. Do no harm is the motto.
#
# In particular:
# - as fallback stores the current .profile and crontab contents in CVS
# - modify .profile in accordance to desired contents, see HalInfrastructureProfile
# - create .forward file if not already there with dba@notes.pgf.com.pl alias in it
# Load usefull functions
if [ ! -f $HOME/scripto/bash/bash_library.sh ]; then
echo "[error] $HOME/scripto/bash/bash_library.sh not found. Exiting. "
exit 1
else
. $HOME/scripto/bash/bash_library.sh
fi
SCRIPTO_DIR=`pwd`
b_crontab_scripts_to_rm_gzip_archivelogs()
{
B_PAR=$1 # Check if block was run with parameter
# Info section
msgb "${FUNCNAME[0]} Beginning."
msgb "I will create an entry in crontab to gzip and delete archivelogs "
# If block was run with INFO parameter I exit now
if [ "$B_PAR" = "INFO" ]; then
return 0
fi
f_confirm_block_run
if [ "${CONFIRM_BLOCK_STATUS}" -eq 0 ]; then
# Block actions start here
SQLPLUS=$ORACLE_HOME/bin/sqlplus
check_file $SQLPLUS
check_parameter ${ORACLE_SID}
msgi "Get from database local location of directories where archivelogs are stored"
f_execute_sql "select VALUE from v\$parameter where NAME like 'log_archive_dest%' and lower(value) like 'location%';"
NR_OF_LOCATIONS=`cat $F_EXECUTE_SQL | grep -v '^ *$' | wc -l | tr -d '[:blank:]'`
F_TMP_OUTPUT=$D_TMP/sql_output.tmp_clean_${USERNAME}_${ORACLE_SID}
cat $F_EXECUTE_SQL | grep -v '^ *$' > $F_TMP_OUTPUT
while read LINE
do
echo $LINE
LOCATION1=`echo $LINE | sed -e s/\"//g | $GREP -o "[lL][oO][cC][aA][tT][iI][oO][nN]=.*" | awk '{ print $1 }' | sed s/,// | sed -e s/^location=// | sed -e s/^LOCATION=//`
read LINE
echo $LINE
LOCATION2=`echo $LINE | sed -e s/\"//g | $GREP -o "[lL][oO][cC][aA][tT][iI][oO][nN]=.*" | awk '{ print $1 }' | sed s/,// | sed -e s/^location=// | sed -e s/^LOCATION=//`
done < $F_TMP_OUTPUT
if [ "$LOCATION1" = "no rows selected" ]; then
msge "No arch location found. Exiting"
exit 0
fi
case $NR_OF_LOCATIONS in
"2")
msgi "2 locations found"
;;
"1")
msgi "1 locations found"
LOCATION2=$LOCATION1
;;
*)
echo "Not supported number of locations. Exiting."
exit 1
;;
esac
echo LOCATION1: $LOCATION1
echo LOCATION2: $LOCATION2
msga "Updating arch scripts"
check_parameter ${HOME}
run_command_e "cd $HOME/scripto/oracle/arch_gz_rm"
cvs update
msga "Storing current crontab in CVS"
msg "[b_store_in_cvs] Storing the current crontab in CVS"
check_parameter ${USERNAME}
check_parameter ${HOSTNAME}
check_directory "$HOME/scripto/crontabs"
cd $HOME/scripto/crontabs
crontab -l > crontab_${USERNAME}_${HOSTNAME}
cvs add crontab_${USERNAME}_${HOSTNAME} > /dev/null 2>&1
cvs commit -m "[b_store_in_cvs] Commit before changes to crontab" crontab_${USERNAME}_${HOSTNAME} > /dev/null 2>&1
msgi "Decide if gziping of archive logs is required"
msgi "On vxfs filesystems where we store many storage checkpoints it makes no sense"
msgi " to gzip the archivelogs, as it only adds to the occupied space"
EXIT_WHILE=''
while [ ! "$EXIT_WHILE" ]
do
msgw "Decide about gziping."
read -p "[wait] Do you want to enable archive logs gziping? (yes/no)" V_ANSWER
if [ "$V_ANSWER" = "yes" ]; then
GZIPPING_MAKES_SENSE=GZIP
EXIT_WHILE=1
fi
if [ "$V_ANSWER" = "no" ]; then
GZIPPING_MAKES_SENSE=NOGZIP
EXIT_WHILE=1
fi
done
msga "Adding arch script to crontab"
RND_MM=`random_int "0" "60"`
RND_HH=`random_int "14" "19"`
run_command "cd $HOME/scripto/crontabs"
msgi "I am about to add the following line to crontab_${USERNAME}_${HOSTNAME}"
echo "$RND_MM $RND_HH * * * (. \$HOME/.profile; \$HOME/scripto/oracle/arch_gz_rm/arch_gz_rm.sh $LOCATION1 $LOCATION2 0 7 $GZIPPING_MAKES_SENSE)"
msgw "Does it looks resonable?"
ACTION_FINISHED=""
while [ ! "$ACTION_FINISHED" = "yes" ]
do
read -p "[wait] Does it looks resonable?. (yes/any)" ACTION_FINISHED
done
echo "$RND_MM $RND_HH * * * (. \$HOME/.profile; \$HOME/scripto/oracle/arch_gz_rm/arch_gz_rm.sh $LOCATION1 $LOCATION2 0 7 $GZIPPING_MAKES_SENSE)" >> crontab_${USERNAME}_${HOSTNAME}
run_command "cvs commit -m 'Gzip and delete archive, and oracle logs' crontab_${USERNAME}_${HOSTNAME}"
run_command "crontab crontab_${USERNAME}_${HOSTNAME}"
run_command "crontab -l"
fi #CONFIRM_BLOCK_STATUS
msgb "${FUNCNAME[0]} Finished."
} #b_crontab_scripts_to_rm_gzip_archivelogs
b_create_temp_datafiles_for_temp_tbs_if_dont_exist()
{
B_PAR=$1 # Check if block was run with parameter
# Info section
msgb "${FUNCNAME[0]} Beginning."
msgb "Create temporary datafiles for temporary tablespaces if they don't exists "
msgb "such action is often needed after hot copy and I got bored doing that by hand."
# If block was run with INFO parameter I exit now
if [ "$B_PAR" = "INFO" ]; then
return 0
fi
SQLPLUS=$ORACLE_HOME/bin/sqlplus
check_file $SQLPLUS
# $SQLPLUS "/ as sysdba" <<EOF
#set pagesize 400
#set heading off
#set verify off
#set feedback on
#set echo off
#spool tmp.sql
#select 'alter database tempfile '||name||' drop including datafiles;' from v$tempfile;
#spool off
#rem @tmp.sql
#EOF
msgi "Get location where temp files will be created (I take simplistic approach - put it where SYSTEM tbs is)"
f_execute_sql "select name from v\$datafile where FILE#='1';"
TMP_VAR=`echo $V_EXECUTE_SQL | awk -F"/" '{print $NF}'`
V_TEMP_DIR=`echo $V_EXECUTE_SQL | sed -e s/$TMP_VAR//`
check_directory $V_TEMP_DIR
msgi "Create temp files for temp tablespaces that have no datafiles yet"
msgi "The tbs is assumed not to have datafiles if tbs name is not present in DBA_TEMP_FILES"
$SQLPLUS -s "/ as sysdba" <<EOF
set pagesize 400
set linesize 400
set heading off
set verify off
set feedback off
spool tmp.sql
select 'alter tablespace '||TABLESPACE_NAME||' add tempfile ''$V_TEMP_DIR'||TABLESPACE_NAME||'_auto_tmp1.dbf'' size 150M autoextend on next 100M maxsize 30000M;' from dba_tablespaces where CONTENTS='TEMPORARY' and TABLESPACE_NAME not in (select TABLESPACE_NAME from DBA_TEMP_FILES);
spool off
@tmp.sql
EOF
run_command "cat tmp.sql"
msgb "${FUNCNAME[0]} Finished."
} #b_create_temp_datafiles_for_temp_tbs_if_dont_exist
# Support functions
#
# Function that will remove the line with a string from .profile
profile_remove()
{
F_STORED_PROFILE=profile_${USERNAME}_${HOSTNAME}_${ORACLE_SID}
check_directory "$HOME/scripto/oracle/logs"
cd $HOME/scripto/oracle/logs
cp $ORACLE_HOME/.profile $F_STORED_PROFILE
check_file "$HOME/scripto/oracle/logs/$F_STORED_PROFILE"
TMP_CHK=`cat $F_STORED_PROFILE | grep "^$1$"`
if [ ! -z "$TMP_CHK" ]; then
msge "[profile_remove] EXISTS [$1], consider removing it."
# If block was run with CHECK parameter I exit now, before any permanent actions are done
if [ "$B_PAR" = "CHECK" ]; then
return 0
fi
msga "[profile_remove] EXISTS [$1], removing"
run_command_e "cat $ORACLE_HOME/.profile | grep -v \"^$1$\" > $F_STORED_PROFILE"
cvs commit -m "removed $1" $F_STORED_PROFILE
run_command_e "cp $F_STORED_PROFILE $ORACLE_HOME/.profile"
fi
} # profile_remove
# Function that will add a line to .profile
profile_add()
{
F_STORED_PROFILE=profile_${USERNAME}_${HOSTNAME}_${ORACLE_SID}
check_directory "$HOME/scripto/oracle/logs"
cd $HOME/scripto/oracle/logs
cp $ORACLE_HOME/.profile $F_STORED_PROFILE
check_file "$F_STORED_PROFILE"
#msg "[profile_add] Checking if [$1] exists, if yes skip adding it."
TMP_CHK=`cat $F_STORED_PROFILE | grep "^$1$"`
if [ -z "$TMP_CHK" ]; then
msge "[profile_add] [$1] does not exists. Consider adding it."
# If block was run with CHECK parameter I exit now, before any permanent actions are done
if [ "$B_PAR" = "CHECK" ]; then
return 0
fi
msga "[profile_add] ADDING it, [$1] does not exists."
run_command_e "echo \"$1\" >> $F_STORED_PROFILE"
cvs commit -m "added $1" $F_STORED_PROFILE
run_command_e "cp $F_STORED_PROFILE $HOME/.profile"
fi
} # profile_add
# Functions that are blocks of work
#
# Because I rely on mails forwarded from hosts to dba alias it is important to know that the mail is working fine
# Checking it the host has queue of unsent mail
# If this command hangs add the host with domain name to /etc/hosts
# Actions: check
#
b_check_mail_queue()
{
msgb "${FUNCNAME[0]} Beginning."
MAILQ=/usr/bin/mailq
check_file "$MAILQ"
$PING -c 2 notes.pgf.com.pl > /dev/null
if [ $? -ne 0 ]; then
msge "[b_check_mail_queue] Can not ping to notes.pgf.com.pl"
fi
TMP_CHK=`$MAILQ 2>&1 | grep "No authorization to run mailq"`
msgd "TMP_CHK: $TMP_CHK"
if [ "${TMP_CHK}" ]; then
msge "[b_check_mail_queue] No authorization to run mailq. Exiting. (Fix it with OS admin)"
exit 0
fi
# Architecture dependend checking
case `uname -p` in
"sparc")
msgd "I am running on Solaris SPARC now"
TMP_CHK=`$MAILQ | grep "Total requests" | awk '{ print $3 }'`
msgd "TMP_CHK: $TMP_CHK"
if [ "${TMP_CHK}" -ne "0" ]; then
msge "[b_check_mail_queue] Host has queue of unsent mail. Exiting. (Fix it with OS admin)"
exit 0
else
msgi "[b_check_mail_queue] No waiting mails. OK."
fi
;;
"i686"|"x86_64")
msgd "I am running on Linux now"
TMP_CHK=`$MAILQ | grep "Mail queue is empty" | wc -l`
msgd "TMP_CHK: $TMP_CHK"
if [ "${TMP_CHK}" -ne "1" ]; then
msge "[b_check_mail_queue] Host has queue of unsent mail. Exiting. (Fix it with OS admin)"
exit 0
else
msgi "[b_check_mail_queue] No waiting mails. OK."
fi
;;
*)
msge "Unknown OS!!! Exiting."
exit 1
;;
esac
msgb "${FUNCNAME[0]} Finished."
} # b_check_mail_queue
# Take file privided as $1
# Copy to directory already in CVS provided as $2
# store it in CVS as filename provided as $3
f_standalone_file_to_cvs()
{
msgi "[f_standalone_file_to_cvs] Storing the: $1 in CVS"
msgi "[f_standalone_file_to_cvs] In dir: $2 as filename: $3"
check_file "$1"
check_directory "$2"
check_parameter "$3"
cd $2
cp $1 $3
cvs add $3 > /dev/null 2>&1
V_CVSDIFF=`cvs diff $3 | $GREP -e "^>" -e "^<"`
if [ `echo $V_CVSDIFF | grep -v '^ *$' | wc -l ` -gt 0 ]; then
#echo $V_CVSDIFF
cvs diff $3
fi
cvs commit -m "[f_standalone_file_to_cvs] $V_CVSDIFF" $3 > /dev/null 2>&1
} #f_standalone_file_to_cvs
# Store interesting files in CVS
b_store_in_cvs()
{
msgb "${FUNCNAME[0]} Beginning."
$PING -c 1 cvs.pgf.com.pl > /dev/null
if [ $? -ne 0 ]; then
msge "Host cvs.pgf.com.pl not found. Exiting."
exit 0
else
msgi "Host cvs.pgf.com.pl found. OK"
fi
V_UNIQ_ORA_HOME=`echo $ORACLE_HOME | tr -d '/'`
f_standalone_file_to_cvs $ORACLE_HOME/.profile $HOME/scripto/oracle/logs profile_${USERNAME}_${HOSTNAME}_${V_UNIQ_ORA_HOME}
crontab -l > $D_TMP/crontab_${USERNAME}_${HOSTNAME}
f_standalone_file_to_cvs $D_TMP/crontab_${USERNAME}_${HOSTNAME} $HOME/scripto/crontabs crontab_${USERNAME}_${HOSTNAME}
f_execute_sql "create pfile from spfile;"
cat $ORACLE_HOME/dbs/init${ORACLE_SID}.ora | grep -v "__" > $D_TMP/init${ORACLE_SID}.ora.tmp
f_standalone_file_to_cvs $D_TMP/init${ORACLE_SID}.ora.tmp $HOME/scripto/oracle/logs init${ORACLE_SID}.ora_${USERNAME}_${HOSTNAME}
msgd "Checking opatch PATH"
TMP_PATH=`which opatch`
msgd "TMP_PATH: $TMP_PATH"
check_file $TMP_PATH
opatch lsinventory 2>&1 | grep -v 'Log file location' | grep -v 'Creating log file' | grep -v 'Lsinventory Output file location' > $D_TMP/lsinventory_${ORACLE_SID}_${HOSTNAME}
f_standalone_file_to_cvs $D_TMP/lsinventory_${ORACLE_SID}_${HOSTNAME} $HOME/scripto/oracle/logs lsinventory_${ORACLE_SID}_${HOSTNAME}
if [ -f "$ORACLE_HOME/network/admin/listener.ora" ]; then
f_standalone_file_to_cvs $ORACLE_HOME/network/admin/listener.ora $HOME/scripto/oracle/logs listener.ora_${ORACLE_SID}_${HOSTNAME}
fi
if [ -f "$ORACLE_HOME/network/admin/sqlnet.ora" ]; then
f_standalone_file_to_cvs $ORACLE_HOME/network/admin/sqlnet.ora $HOME/scripto/oracle/logs sqlnet.ora_${ORACLE_SID}_${HOSTNAME}
fi
msgd "Preparing tnsnames.ora not to include the Header keyword interpreted by CVS"
cat $ORACLE_HOME/network/admin/tnsnames.ora | grep -v "^# .Header:" > $D_TMP/tnsnames.ora
f_standalone_file_to_cvs $D_TMP/tnsnames.ora $HOME/scripto/oracle/logs tnsnames.ora_${ORACLE_SID}_${HOSTNAME}
msgb "${FUNCNAME[0]} Finished."
} # b_store_in_cvs
# Make desired changes to .profile
b_change_profile()
{
msgb "${FUNCNAME[0]} Beginning."
B_PAR=$1 # Check if block was run with parameter
msgi "[b_change_profile] Adjusting .profile if neccessary"
profile_remove "stty erase"
profile_remove "stty erase ^?"
profile_remove "set -o vi"
profile_remove 'export SQLPATH=\$HOME/scripto/oracle/sqlbin'
profile_remove 'export NLS_LANG=AMERICAN_AMERICA.EE8ISO8859P2'
profile_add 'SQLPATH=\$HOME/scripto/oracle/sqlbin'
profile_add 'export SQLPATH'
profile_add 'NLS_LANG=AMERICAN_AMERICA.EE8ISO8859P2'
profile_add 'export NLS_LANG'
msgb "${FUNCNAME[0]} Finished."
} # b_change_profile
# Configure the host to forward user mails to alias
# create .forward file if not already there with dba@notes.pgf.com.pl alias in it
b_forward_mail()
{
msgb "${FUNCNAME[0]} Beginning."
B_PAR=$1 # Check if block was run with parameter
TMP_CHK=`cat $HOME/.forward | grep "^dba@notes.pgf.com.pl$"`
if [ ! "$TMP_CHK" = "dba@notes.pgf.com.pl" ]; then
msge "[b_forward_mail] File $HOME/.forward not found or different than expected."
# If block was run with CHECK parameter I exit now, before any permanent actions are done
if [ "$B_PAR" = "CHECK" ]; then
return 0
fi
msga "[b_forward_mail] CREATING file. "
run_command "echo dba@notes.pgf.com.pl > $HOME/.forward"
else
msgi "[b_forward_mail] Contents as expected, doing nothing."
fi
msgb "${FUNCNAME[0]} Finished."
} # b_forward_mail
#
# End of block functions
# There are often questions: Check if everything is OK. This block checks the basic things that should exists.
# - oratab, oraInst.loc
# - /etc/init.d/oracle, link from rc3.d
b_basic_os_healthcheck()
{
case `uname -p` in
"sparc")
msgi "I am running on Solaris SPARC now"
b_basic_os_healthcheck_sparc
;;
"i686"|"x86_64")
msgi "I am running on Linux now"
msgi "WIP"
;;
*)
msge "Unknown OS!!! Exiting."
exit 1
;;
esac
} #b_basic_os_healthcheck
b_basic_os_healthcheck_sparc()
{
msgb "${FUNCNAME[0]} Beginning."
# function called from bash_library
f_check_expected_format_for_etc_hosts
msgi "Checking if oratab exists"
check_file "/var/opt/oracle/oratab"
if [ -f /var/opt/oracle/oratab ]; then
ORATAB_CONTENTS=`cat /var/opt/oracle/oratab | grep -v '^#' | grep -v '^ *$' | grep -v '^*'`
if [ -z "$ORATAB_CONTENTS" ]; then
msge "[b_basic_os_healthcheck_sparc] oratab does not contains a meningfull entry"
else
msgi "oratab found, contains a meningfull entry"
msgi "$ORATAB_CONTENTS"
fi
else
msge "[b_basic_os_healthcheck_sparc] oratab NOT found."
fi
msgi "Checking if oraInst.loc exists"
check_file "/var/opt/oracle/oraInst.loc"
if [ -f /var/opt/oracle/oraInst.loc ]; then
ORATAB_CONTENTS=`cat /var/opt/oracle/oraInst.loc | grep -v '^#' | grep -v '^ *$' | grep -v '^*'`
if [ -z "$ORATAB_CONTENTS" ]; then
msge "[b_basic_os_healthcheck_sparc] oraInst.loc does not contains a meningfull entry"
else
msgi "oraInst.loc found, contains a meningfull entry"
msgi "$ORATAB_CONTENTS"
fi
else
msge "[b_basic_os_healthcheck_sparc] oraInst.loc NOT found."
fi
# Check /etc/init.d/oracle, link from rc3.d
if [ ! -f "/etc/init.d/oracle" ]; then
msge "[b_basic_os_healthcheck_sparc] /etc/init.d/oracle does not exists, there can be problem with autostart of oracle db."
else
msgi "/etc/init.d/oracle exists. OK"
f_check_owner root /etc/init.d/oracle
# sometimes has -rwxr-xr-x, skiping test f_check_permission -rwxr--r-- /etc/init.d/oracle
fi
if [ ! -L "/etc/rc3.d/S99oracle" ]; then
msge "[b_basic_os_healthcheck_sparc] Link /etc/rc3.d/S99oracle does not exists, there can be problem with autostart of oracle db."
else
msgi "Link /etc/rc3.d/S99oracle exists. OK"
fi
#RB, 2011.04.18 I skin those checks, Przemek is working on forced shutdown procedure and needs the K links
#msgi "Checking how many links point to /etc/init.d/oracle"
#V_INODE=`ls -i /etc/init.d/oracle | awk '{ print $1 }'`
#V_LINKS_TO_ORACLE=`find /etc/rc* -follow -inum $V_INODE -print 2>/dev/null`
#V_NR_OF_LINKS_TO_ORACLE=`find /etc/rc* -follow -inum $V_INODE -print 2>/dev/null | wc -l | awk '{ print $1 }'`
#msgi "Nr of links: $V_NR_OF_LINKS_TO_ORACLE"
#msgi "Checking the Solaris version, to determine how many links should point to /etc/init.d/oracle"
V_SOLARIS=`uname -a | awk '{ print $3 }'`
case $V_SOLARIS in
"5.10")
#if [ "${V_NR_OF_LINKS_TO_ORACLE}" -ne 1 ]; then
# msge "I am on the Solaris Zone, there should be no /etc/rc0.d/ link to shutdown the database. The database is shutdown from master zone by running /etc/init.d/oracle stop."
# msge "Then number of links pointing to /etc/init.d/oracle different than 1. Something is wrong"
# msge "$V_LINKS_TO_ORACLE"
#else
# msgi "OK, one link found"
# msgi "$V_LINKS_TO_ORACLE"
#fi
msgi "I check how many days was the zone up. If it is up more than 1 day that means that the zone was not shutdown to backup - meaning there was no backup."
msgi "This is true if we use cold backup."
msgi "If this zone can and shoul be up for more than X days place such an info by:"
msgi "echo X_nr_of_days > /var/tmp/DBA_zone_accepted_uptime.txt"
V_UPTIME_MORE_THAN_1_DAY=`uptime | grep day | wc -l`
if [ "${V_UPTIME_MORE_THAN_1_DAY}" -eq 1 ]; then
msgi "Zone is up more than 24h, checking how much"
if [ -f /var/tmp/DBA_zone_accepted_uptime.txt ]; then
msgi "File with accepted days of uptime found. Comparing"
V_UPTIME_ACCEPTED_DAY=`cat /var/tmp/DBA_zone_accepted_uptime.txt`
V_UPTIME_ACTUAL_DAY=`uptime | grep day | awk '{ print $3 }'`
if [ "${V_UPTIME_ACTUAL_DAY}" -gt "${V_UPTIME_ACCEPTED_DAY}" ]; then
msge "Zone is up ${V_UPTIME_ACTUAL_DAY} days which is more than accepted ${V_UPTIME_ACCEPTED_DAY} days."
msge "If this zone can and should be up for more than ${V_UPTIME_ACTUAL_DAY} days place such an info by:"
msge "echo ${V_UPTIME_ACTUAL_DAY} > /var/tmp/DBA_zone_accepted_uptime.txt"
else
msgi "OK, zone is up ${V_UPTIME_ACTUAL_DAY} which is less than accepted ${V_UPTIME_ACCEPTED_DAY}."
fi
else
msge "Zone is up `uptime | grep day | awk '{ print $3 }'` days which is more than accepted 1 day. Check if backup was performed."
msge "Or set accepted nr of uptime days by eg: echo `uptime | grep day | awk '{ print $3 }'` > /var/tmp/DBA_zone_accepted_uptime.txt"
fi
else
msgi "OK, zone is up less than 24h"
fi
;;
"5.9")
msgi "I am NOT on the Solaris Zone, there should be a link in /etc/rc0.d/K01oracle to shutdown the database."
if [ "${V_NR_OF_LINKS_TO_ORACLE}" -ne 2 ]; then
msge "Then number of links pointing to /etc/init.d/oracle different than 2. Something is wrong"
msgi "$V_LINKS_TO_ORACLE"
else
msgi "OK, one link found"
msgi "$V_LINKS_TO_ORACLE"
fi
;;
*)
echo "Unknown Solaris version!!! Exiting."
exit 1
;;
esac
msgb "${FUNCNAME[0]} Finished."
} #b_basic_os_healthcheck_sparc
# This function is used in block b_basic_db_healthcheck
# It takes a file $1 and replaces all occurences of lines found in $2 in it
# $1 - main template $2 - exceptions file
f_override_template_with_exception()
{
check_file $1
check_file $2
# Parameters in exceptions override those in template
cat $2 | awk -F":" '{ print ":"$2":" }' > $D_TMP/oracle_setup.tmp.template
cat $1 | $GREP -v -i -f $D_TMP/oracle_setup.tmp.template > $D_TMP/oracle_setup_template.txt_new
cp $1 $D_TMP/oracle_setup_template.txt_old
cp $D_TMP/oracle_setup_template.txt_new $1
cat $2 >> $1
} #f_override_template_with_exception
# Check if owner provided as $1 is an owner of the file provided as $2
f_check_owner()
{
V_EXPECTED_OWNER=$1
V_FILENAME=$2
check_parameter $V_EXPECTED_OWNER
check_file $V_FILENAME
msgd "Provided V_EXPECTED_OWNER: $V_EXPECTED_OWNER"
msgd "Provided V_FILENAME: $V_FILENAME"
V_ACTUAL_OWNER=`ls -l $V_FILENAME | awk '{ print $3 }'`
if [ ${V_ACTUAL_OWNER} == ${V_EXPECTED_OWNER} ]; then
msgi "Expecting owner: $V_EXPECTED_OWNER on file $V_FILENAME . Found ${V_ACTUAL_OWNER} OK"
else
msge "Expecting owner: $V_EXPECTED_OWNER on file $V_FILENAME . Found ${V_ACTUAL_OWNER} NOT OK."
fi
} # f_check_owner
# Check if permission provided as $1 is an actual permission of the file provided as $2
f_check_permission()
{
V_EXPECTED_PERMISSION=$1
V_FILENAME=$2
check_parameter $V_EXPECTED_PERMISSION
if [ -f "${V_FILENAME}" ]; then
msgd "Provided V_EXPECTED_PERMISSION: $V_EXPECTED_PERMISSION"
msgd "Provided V_FILENAME: $V_FILENAME"
V_ACTUAL_PERMISSION=`ls -l $V_FILENAME | awk '{ print $1 }'`
if [ ${V_ACTUAL_PERMISSION} == ${V_EXPECTED_PERMISSION} ]; then
msgi "Expecting permission: $V_EXPECTED_PERMISSION on file $V_FILENAME . Found ${V_ACTUAL_PERMISSION} OK"
else
msge "Expecting permission: $V_EXPECTED_PERMISSION on file $V_FILENAME . Found ${V_ACTUAL_PERMISSION} NOT OK."
fi
else
msge "File $V_FILENAME not found. Can not check permission."
fi # -f "${V_FILENAME}"
} #f_check_permission
b_check_if_init_params_agree_with_template()
{
msgb "${FUNCNAME[0]} Beginning."
# Transform init to all lower case to ease the checking
cat $ORACLE_HOME/dbs/init${ORACLE_SID}.ora | tr '[A-Z]' '[a-z]' | sed 's;^[^.]*\.;;' | grep -v "__db_cache_size" | grep -v "__java_pool_size" | grep -v "__large_pool_size" | grep -v "__shared_pool_size" | grep -v "__streams_pool_size" | grep -v "__oracle_base" | grep -v "__pga_aggregate_target" | grep -v "__sga_target" | grep -v "__shared_io_pool_size" > $D_TMP/oracle_infra_init${ORACLE_SID}.ora
case $V_RDBMS_VERSION in
"9.2")
TEMPLATE_FILE="$HOME/scripto/oracle/create_db_scripts/oracle_setup/template_HAL_9i.txt"
TEMPLATE_STB="$HOME/scripto/oracle/create_db_scripts/oracle_setup/exe_standby_9i.txt"
TEMPLATE_T2="$HOME/scripto/oracle/create_db_scripts/oracle_setup/exe_T2_9i.txt"
;;
"10.2")
TEMPLATE_FILE="$HOME/scripto/oracle/create_db_scripts/oracle_setup/template_HAL_10g.txt"
TEMPLATE_STB="$HOME/scripto/oracle/create_db_scripts/oracle_setup/exe_standby_10g.txt"
TEMPLATE_T2="$HOME/scripto/oracle/create_db_scripts/oracle_setup/exe_T2_10g.txt"
;;
"11.1"|"11.2")
TEMPLATE_FILE="$HOME/scripto/oracle/create_db_scripts/oracle_setup/template_HAL_11g.txt"
TEMPLATE_STB="$HOME/scripto/oracle/create_db_scripts/oracle_setup/exe_standby_11g.txt"
TEMPLATE_T2="$HOME/scripto/oracle/create_db_scripts/oracle_setup/exe_T2_11g.txt"
;;
*)
msge "Unknown rdbms version: $V_RDBMS_VERSION. Exiting"
;;
esac
check_file $TEMPLATE_FILE
check_file $TEMPLATE_STB
check_file $TEMPLATE_T2
msgi "Various special configurations, eg standby need some extra parameters"
msgi " I consider that as an exception and modify the original template accordingly"
msgi " Determining if this database needs an exception list"
cp $TEMPLATE_FILE $D_TMP/oracle_setup_template.txt
TEMPLATE_FILE=$D_TMP/oracle_setup_template.txt
msgi "Checking if this database has a standby. I check for parameter: standby_file_management."
TMP_CHK=`cat $D_TMP/oracle_infra_init${ORACLE_SID}.ora | grep -v '^ *$' | grep -i standby_file_management`
if [ `echo $TMP_CHK | grep -v '^ *$' | wc -l` -gt 0 ]; then
msga "This is a standby configuration. Adding additional rules."
msgi "$TMP_CHK"
f_override_template_with_exception $TEMPLATE_FILE $TEMPLATE_STB
else
msgi "This in NOT a standby configuration. No extra rules added."
fi
msgi "Checking if the machine if a T2 processors family"
MACHINE_ARCH=`uname -i`
msgi "Current architecture: $MACHINE_ARCH"
if [ `echo $MACHINE_ARCH | grep "T5"` ]; then
msga "I am on the T2 architecture. Adding additional rules."
f_override_template_with_exception $TEMPLATE_FILE $TEMPLATE_T2
else
msgi "I am NOT on T2 architecture. No extra rules added."
fi
check_directory $SCRIPTO_DIR
cd $SCRIPTO_DIR
msgi "Checking if this DB: $ORACLE_SID has its own init exceptions."
if [ -f exe_${ORACLE_SID}.txt ]; then
msga "Found DB specific exceptions. Adding additional rules."
f_override_template_with_exception $TEMPLATE_FILE ${SCRIPTO_DIR}/exe_${ORACLE_SID}.txt
else
msgi "No DB specific exceptions found."
fi
msgb "Analysing init file."
# Preparing environment for analysis
check_file $TEMPLATE_FILE
check_file $D_TMP/oracle_infra_init${ORACLE_SID}.ora
# Removing temporary files
rm -f $D_TMP/oracle_infra_OK.txt
rm -f $D_TMP/oracle_infra_ERROR.txt
rm -f $D_TMP/oracle_infra_CHANGE.txt
# Checking if all the parameters that should have value are set
# To do that I scan the template in search for check_if_* parameters and make sure that they are set in init
# I do not check their values, but only the existence
while read TEMPLATE_LINE
do
#echo -n "."
msgri "."
TEMPLATE_ACTION=`echo $TEMPLATE_LINE | awk -F":" '{ print $1 }'`
TEMPLATE_PAR=`echo $TEMPLATE_LINE | awk -F":" '{ print $2 }'`
TEMPLATE_VALUE=`echo $TEMPLATE_LINE | awk -F":" '{ print $3 }'`
#echo $TEMPLATE_LINE
#echo $TEMPLATE_ACTION
if [ `echo $TEMPLATE_ACTION | grep check_if_ | wc -l` -gt 0 ]; then
if [ `cat $D_TMP/oracle_infra_init${ORACLE_SID}.ora | grep "^${TEMPLATE_PAR}=" | wc -l` -lt 1 ]; then
echo "parameter should be set: $TEMPLATE_PAR" >> $D_TMP/oracle_infra_ERROR.txt
# I make the $TEMPLATE_VALUE uppercase to be consisten with how Oracle shows then
# during show parameter
TEMPLATE_VALUE=`echo $TEMPLATE_VALUE | tr '[a-z]' '[A-Z]'`
echo "alter system set $TEMPLATE_PAR=$TEMPLATE_VALUE scope=spfile sid='*';" >> $D_TMP/oracle_infra_CHANGE.txt
fi
fi
done < $TEMPLATE_FILE
# Loop through the init file and analyse the contents
while read INIT_LINE
do
#echo -n "."
msgri "."
# Get init parameter from $INIT_LINE
INIT_PAR=`echo $INIT_LINE | awk -F"=" '{ print $1 }'`
INIT_VALUE=`echo $INIT_LINE | awk -F"=" '{ print $2 }'`
#echo $INIT_PAR; echo $INIT_VALUE
# Search the template for instructions
# Make sure there is 1 or 0 lines with instructions
TEMPLATE_CHECK=`cat $TEMPLATE_FILE | grep ":$INIT_PAR:" | wc -l`
if [ "$TEMPLATE_CHECK" -gt 1 ]; then
msge "There are two instructions or more in template regarding the same init parameter."
msge "It should not happen. Exiting."
cat $TEMPLATE_FILE | grep ":$INIT_PAR:"
exit 1
fi
TEMPLATE_LINE=`cat $TEMPLATE_FILE | grep ":$INIT_PAR:"`
TEMPLATE_ACTION=`echo $TEMPLATE_LINE | awk -F":" '{ print $1 }'`
TEMPLATE_PAR=`echo $TEMPLATE_LINE | awk -F":" '{ print $2 }'`
TEMPLATE_VALUE=`echo $TEMPLATE_LINE | awk -F":" '{ print $3 }'`
TEMPLATE_COMMENT=`echo $TEMPLATE_LINE | awk -F":" '{ print $4 }'`
#echo $TEMPLATE_LINE; echo $TEMPLATE_ACTION; echo $TEMPLATE_PAR; echo $TEMPLATE_VALUE; echo $TEMPLATE_COMMENT
case $TEMPLATE_ACTION in
"ignore")
#echo "OK. Ignoring parameter $INIT_PAR"
echo "ignoring: $INIT_LINE" >> $D_TMP/oracle_infra_OK.txt
;;
"check_if_equal")
if [ ! "$INIT_VALUE" = "$TEMPLATE_VALUE" ]; then
echo "value not equal: $INIT_LINE, should be: $TEMPLATE_VALUE" >> $D_TMP/oracle_infra_ERROR.txt
echo "alter system set $INIT_PAR=$TEMPLATE_VALUE scope=spfile sid='*';" >> $D_TMP/oracle_infra_CHANGE.txt
else
echo "value equal: $INIT_LINE" >> $D_TMP/oracle_infra_OK.txt
fi
;;
"check_if_less")
if [ "$INIT_VALUE" -gt "$TEMPLATE_VALUE" ]; then
echo "value too large: $INIT_LINE, should be: $TEMPLATE_VALUE" >> $D_TMP/oracle_infra_ERROR.txt
echo "alter system set $INIT_PAR=$TEMPLATE_VALUE scope=spfile sid='*';" >> $D_TMP/oracle_infra_CHANGE.txt
else
echo "value correct: $INIT_LINE" >> $D_TMP/oracle_infra_OK.txt
fi
;;
"check_if_more")
if [ "$INIT_VALUE" -lt "$TEMPLATE_VALUE" ]; then
echo "value too small: $INIT_LINE, should be: $TEMPLATE_VALUE" >> $D_TMP/oracle_infra_ERROR.txt
echo "alter system set $INIT_PAR=$TEMPLATE_VALUE scope=spfile sid='*';" >> $D_TMP/oracle_infra_CHANGE.txt
else
echo "value correct: $INIT_LINE" >> $D_TMP/oracle_infra_OK.txt
fi
;;
"check_if_set")
echo "value set: $INIT_LINE" >> $D_TMP/oracle_infra_OK.txt
;;
"do_not_set")
echo "parameter should not be set: $INIT_LINE" >> $D_TMP/oracle_infra_ERROR.txt
echo "alter system reset $INIT_PAR scope=spfile sid='*';" >> $D_TMP/oracle_infra_CHANGE.txt
;;
*)
echo "Unknown parameter for template: $INIT_PAR"
exit 0
;;
esac
done < $D_TMP/oracle_infra_init${ORACLE_SID}.ora
#echo ""
if [ -f $D_TMP/oracle_infra_ERROR.txt ]; then
msgi "You are on $USERNAME at `uname -n`"
msge "Parameters with wrong values or that should not be set for DB: $ORACLE_SID"
cat $D_TMP/oracle_infra_ERROR.txt | sort
fi
if [ -f $D_TMP/oracle_infra_CHANGE.txt ]; then
msgi "To change the configuration according to template you can issue:"
# for hidden parameters include them into "" to work
while read LINE
do
if [ `echo "$LINE" | awk '{ print $4 }' | grep '^_'` ]; then
echo "$LINE" | awk '{ print $1 " " $2 " " $3 " \"" $4 "\" " $5 " " $6 }'
else
echo $LINE
fi
done < $D_TMP/oracle_infra_CHANGE.txt
#cat $D_TMP/oracle_infra_CHANGE.txt | sort
fi
msgi "Done"
msgb "${FUNCNAME[0]} Finished."
} #b_check_if_init_params_agree_with_template
#Master block for DB health check, divided for architecture and db version specyfic and common blocks
b_basic_db_healthcheck()
{
msgb "${FUNCNAME[0]} Beginning."
msgi "[b_basic_db_healthcheck] common blocks"
b_basic_db_healthcheck_common
b_check_if_init_params_agree_with_template
msgi "[b_basic_db_healthcheck] OS architecture dependend blocks"
case `uname -p` in
"sparc")
msgi "I am running on Solaris SPARC now, block: b_basic_db_healthcheck_os_sparc"
b_basic_db_healthcheck_os_sparc
;;
"i686"|"x86_64")
msgi "I am running on Linux i686 now, block: b_basic_db_healthcheck_os_linux"
b_basic_db_healthcheck_os_linux
;;
*)
msge "Unknown OS!!! Exiting."
exit 1
;;
esac
msgi "[b_basic_db_healthcheck] DB version dependend blocks"
# Note, for version dependend AND role dependend blocks see below
case $V_RDBMS_VERSION in
"9.2")
msgi "[b_basic_db_healthcheck] Version $V_RDBMS_VERSION"
msgi "[b_basic_db_healthcheck] NOT implemented"
;;
"10.2")
msgi "[b_basic_db_healthcheck] Version $V_RDBMS_VERSION"
msgi "[b_basic_db_healthcheck] implemented"
b_basic_db_healthcheck_db_10
;;
"11.2")
msgi "[b_basic_db_healthcheck] Version $V_RDBMS_VERSION"
msgi "[b_basic_db_healthcheck] NOT implemented"
;;
*)
msge "Unknown rdbms version: $V_RDBMS_VERSION. Exiting"
;;
esac
msgi "[b_basic_db_healthcheck] DB role (primary/standby) dependend blocks"
case ${V_DATABASE_ROLE} in
"PRIMARY")
msgi "Primary database"
b_basic_db_healthcheck_db_primary
b_check_if_recovery_was_taking_place_during_startup
msgi "Primary database dependend on the db version"
case $V_RDBMS_VERSION in
"10.2")
b_basic_db_healthcheck_db_primary_10
;;
"11.2")
msgi "Version $V_RDBMS_VERSION"
msgi "Not implemented"
;;
*)
msgi "Not implemented in this rdbms version: $V_RDBMS_VERSION."
;;
esac
;;
"PHYSICAL STANDBY")
msgi "Standby database"
b_basic_db_healthcheck_db_standby
;;
*)
msge "Unknown DB role (primary/standby): ${V_DATABASE_ROLE}. Exiting"
;;
esac
msgb "${FUNCNAME[0]} Finished."
} #b_basic_db_healthcheck
b_basic_db_healthcheck_os_linux()
{
msgb "${FUNCNAME[0]} Beginning."
msgi "Nothing here yet."
msgb "${FUNCNAME[0]} Finished."
} #b_basic_db_healthcheck_os_linux
b_basic_db_healthcheck_os_sparc()
{
msgb "${FUNCNAME[0]} Beginning."
msgi "Checking if the machine if a T2 processors family"
MACHINE_ARCH=`uname -i`
msgi "Current architecture: $MACHINE_ARCH"
if [ `echo $MACHINE_ARCH | grep "T5"` ]; then
msgi "I am on the T2 architecture, DB version: $V_RDBMS_VERSION"
msgi " special configuration may be needed depending on the DB version"
case $V_RDBMS_VERSION in
"9.2")
msgi "Checks according to OracleKnowledge#Konfiguracja_baz_9i_na_serwerach"
msgi "1. Parameter cpu_count should be set and to a value lower/equal to 8"
TMP_CHK=`cat $ORACLE_HOME/dbs/init${ORACLE_SID}.ora | grep cpu_count`
if [ `echo $TMP_CHK | grep -v '^ *$' | wc -l` -ge 1 ]; then
msgi "OK, parameter set: $TMP_CHK"
else
msge "Parameter cpu_count not set on T2 machine."
fi
TMP_CHK=`cat $ORACLE_HOME/dbs/init${ORACLE_SID}.ora | grep _kghdsidx_count`
if [ `echo $TMP_CHK | grep -v '^ *$' | wc -l` -ge 1 ]; then
msgi "OK, parameter set: $TMP_CHK"
else
msge "Parameter _kghdsidx_count not set on T2 machine."
fi
# eperiencing one case with that error, ignoring for now
#TMP_CHK=`cat $ORACLE_HOME/dbs/init${ORACLE_SID}.ora | grep "timed_statistics=false"`
#echo $TMP_CHK
#if [ `echo $TMP_CHK | grep -v '^ *$' | wc -l` -ge 1 ]; then
# msgi "OK, parameter set: $TMP_CHK"
#else
# msge "Parameter timed_statistics=false not set on T2 machine."
#fi
;;
*)
msgi "No special actions needed. OK."
;;
esac
else
msgi "I am NOT on T2 architecture, no special checks done"
fi
msgb "${FUNCNAME[0]} Finished."
} #b_basic_db_healthcheck_os_sparc
b_basic_db_healthcheck_db_10()
{
msgb "${FUNCNAME[0]} Beginning."
# well, I am not sure any more, it is free and defcon looks still usefull
# msgi "Checking if statspack snap is created from crontab. It should not be."
# V_TMP=`crontab -l | grep -v "^#" | grep "execute statspack.snap" | wc -l`
# if [ "$V_TMP" -gt 0 ]; then
# msge "There are still statspack snapschots made from crontab. Use AWR reports instead"
# fi
msgi "Checking if certain oracle binaries have proper permissions"
f_check_owner root $ORACLE_HOME/bin/nmo
#on 10.2.0.5 they changes permisiion, I skip checking # f_check_permission -rwsr-s--- $ORACLE_HOME/bin/nmo
f_check_owner root $ORACLE_HOME/bin/nmb
#on 10.2.0.5 they changes permisiion, I skip checking # f_check_permission -rwsr-s--- $ORACLE_HOME/bin/nmb
f_check_owner root $ORACLE_HOME/bin/oradism
f_check_permission -r-sr-s--- $ORACLE_HOME/bin/oradism
msgi "Checking if there are still old 9.2 binaries present"
if [ -d $ORACLE_HOME/../9.2.0 ]; then
msge "There are still 9.2 binaries present in 10.2 database. Consider running 03_post_upgrade.sh"
fi
if [ -d /$ORACLE_SID ]; then
msgi "Checking if there are still installation binaries present under /SID (default location)"
V_TMP=`ls /$ORACLE_SID | $GREP -e _bin_agent10205 -e _patch10204 -e _10g_bin -e _10g_com -e _patches`
if [ `echo $V_TMP | grep -v '^ *$' | wc -l` -gt 0 ]; then
msge "Looks like those binaries or upgrade logs are still present: $V_TMP . Consider deleting the installation binaries."
fi
fi
msgi "Checking if 10g agent is running"
TMP_CHK=`ps -ef | grep -v grep | grep emagent`
if [ `echo $TMP_CHK | grep -v '^ *$' | wc -l` -lt 1 ]; then
msge "Looks like the 10g agent is not running, check and fix it"
else
msgi "Looks like the 10g agent is running. OK"
msgi "$TMP_CHK"
fi # ps -ef
msgi "Checking if this database has a standby. I check for parameter: standby_file_management."
TMP_CHK=`cat $ORACLE_HOME/dbs/init${ORACLE_SID}.ora | grep -v '^ *$' | grep -i standby_file_management`
if [ `echo $TMP_CHK | grep -v '^ *$' | wc -l` -gt 0 ]; then
msgi "This is a standby configuration. Actions related to standby configuration go here."
msgi "Check permission of drc${ORACLE_SID}.log"
f_execute_sql "select VALUE from v\$parameter where NAME='background_dump_dest';"
V_DG_LOG=$V_EXECUTE_SQL
V_DG_LOG="${V_EXECUTE_SQL}/drc${ORACLE_SID}.log"
msgd "V_DG_LOG: $V_DG_LOG"
check_file $V_DG_LOG
f_check_permission -rw-r--r-- $V_DG_LOG
msgi "Checking if Data Guard logs look like beeing monitored by oralms"
TMP_CHK=`ps -ef | grep -v grep | grep tail | grep drc${ORACLE_SID}.log`
if [ `echo $TMP_CHK | grep -v '^ *$' | wc -l` -lt 1 ]; then
msge "Looks like the DG alert log is not monitored by oralms, check and fix it"
else
msgi "Looks like the DG alert log is monitored by oralms. OK"
msgi "$TMP_CHK"
fi # ps -ef
msgi "Checking if listener has service name for DG Broker. See StandbySetupOracle10 for explanation"
TMP_CHK=`$LSNRCTL status | grep ${V_DB_UNIQUE_NAME}_DGMGRL.${V_DB_DOMAIN}`
if [ `echo $TMP_CHK | grep -v '^ *$' | wc -l` -ne 1 ]; then
msge "Looks like the listener does not have the service name for DG Broker. Check and fix it"
else
msgi "Looks like the listener does have the service name for DG Broker. OK"
msgi "$TMP_CHK"
fi # ps -ef
fi # Actions if DB is DB with standby
msgb "${FUNCNAME[0]} Finished."
} #b_basic_db_healthcheck_db_10
b_basic_db_healthcheck_db_primary_10()
{
msgb "${FUNCNAME[0]} Beginning."
msgi "Checking if statistics estimate is 100"
f_execute_sql "select dbms_stats.get_param('ESTIMATE_PERCENT') from dual;"
V_TMP=$V_EXECUTE_SQL
msgd "V_TMP: $V_TMP"
if [ "${V_TMP}" = "NULL" ]; then
V_TMP=0
elif [ "${V_TMP}" = "DBMS_STATS.AUTO_SAMPLE_SIZE" ]; then
V_TMP=0
fi
if [ "${V_TMP}" -ne 100 ]; then
msge "The estimation for auto gathering statistics is not 100 on DB: $ORACLE_SID. Not a PGF standard."
msge "Change it by running: >exec DBMS_STATS.SET_PARAM('ESTIMATE_PERCENT','100');"
fi
msgb "${FUNCNAME[0]} Finished."
}
b_check_if_recovery_was_taking_place_during_startup()
{
msgb "${FUNCNAME[0]} Beginning."
msgi "Checking if last startup was done withou recovery, which would indicate clean shutdown"
msgi "This check is introduced to spot DB that does not shutdown properly for cold backup"
msgi "This checking makes sense only for PRIMARY database, not for standby"
msgd "Determining line with last Completed: ALTER DATABASE CLOSE NORMAL"
msgd "V_ALERT_LOG: $V_ALERT_LOG"
V_ALERT_DB_CLOSE=`tail -15000 $V_ALERT_LOG | $GREP -B 1 -n -i "Completed: ALTER DATABASE CLOSE NORMAL" | tail -1 | awk -F":" '{ print $1 }'`
if [ -z "$V_ALERT_DB_CLOSE" ]; then
msgd "Strange, alert log does not contain any info about database beeing closed. Ever."
msgd "Can happen when alert log is rotated. Setting V_ALERT_DB_CLOSE to 1"
V_ALERT_DB_CLOSE=1
fi
msgd "V_ALERT_DB_CLOSE: $V_ALERT_DB_CLOSE"
msgd "Determining line with last Completed: ALTER DATABASE OPEN"
V_ALERT_DB_OPEN=`tail -15000 $V_ALERT_LOG | $GREP -B 1 -n -i "Completed: ALTER DATABASE OPEN" | tail -1 | awk -F":" '{ print $1 }'`
if [ -z "$V_ALERT_DB_OPEN" ]; then
msgd "Strange, alert log does not contain any info about database startup. Ever."
msgd "Should not happen. Setting V_ALERT_DB_RECOVER to 0"
V_ALERT_DB_OPEN=0
fi
msgd "V_ALERT_DB_OPEN: $V_ALERT_DB_OPEN"
msgd "Determining line with last Beginning crash recovery of"
V_ALERT_DB_RECOVER=`tail -15000 $V_ALERT_LOG | $GREP -B 1 -n -i "Beginning crash recovery of" | tail -1 | awk -F":" '{ print $1 }'`
if [ -z "$V_ALERT_DB_RECOVER" ]; then
msgd "Strange, alert log does not contain any info about database doing crash recovery. Ever."
msgd "Can happen when alert log is rotated. Setting V_ALERT_DB_RECOVER to 0"
V_ALERT_DB_RECOVER=0
fi
msgd "V_ALERT_DB_RECOVER: $V_ALERT_DB_RECOVER"
msgd "Determining whether the order is normal or indicates recovery during startup, which should not happen"
msgd "if ( ${V_ALERT_DB_RECOVER} < ${V_ALERT_DB_CLOSE} ) and ( ${V_ALERT_DB_CLOSE} < ${V_ALERT_DB_OPEN} )"
msgd "if ( V_ALERT_DB_RECOVER < V_ALERT_DB_CLOSE ) and ( V_ALERT_DB_CLOSE < V_ALERT_DB_OPEN )"
if [ "${V_ALERT_DB_RECOVER}" -lt "${V_ALERT_DB_CLOSE}" ] && [ "${V_ALERT_DB_CLOSE}" -lt "${V_ALERT_DB_OPEN}" ]; then
msgd "The order seems fine: recover(past, not related), close, open"
else
msge "Database $ORACLE_SID has performed automatic instance recovery before last open. This should not happen and indicates, that DB was not shutdown cleanly to cold backup. Check the alert log."
fi
msgb "${FUNCNAME[0]} Finished."
}
b_basic_db_healthcheck_db_primary()
{
msgb "${FUNCNAME[0]} Beginning."
msgi "Checking if supplemental logging is enabled."
f_execute_sql "SELECT SUPPLEMENTAL_LOG_DATA_MIN FROM V\$DATABASE;"
msgd "V_EXECUTE_SQL: $V_EXECUTE_SQL"
if [ ! "${V_EXECUTE_SQL}" = "YES" ]; then
msge "Supplemental logging should be turned on. DB: $ORACLE_SID"
msge "See HalInfrastructureSupplementalLogging"
fi
msgi "Checking if number of rows in deferror table is greater than: $V_DEFERROR_TRESHOLD"
V_DEFERROR_TRESHOLD=2000
f_execute_sql "select count(*) from sys.deferror;"
if [ "$V_EXECUTE_SQL" -gt "$V_DEFERROR_TRESHOLD" ]; then
msge "Number if rows in deferror table: $V_EXECUTE_SQL larger than $V_DEFERROR_TRESHOLD"
msge "It has been agreed that errors older than 30 days can be safely deleted."
msge "There should be an automatic procedure implemented to delete the old deferror entries."
msge "See https://twiki.pgf.com.pl/cgi-bin/twiki/view/Main/InstallingOracle10gOnSolaris10#Standard_post_install_actions_ch"
msge "If You wand to manually delete the errors:"
msge "See https://twiki.pgf.com.pl/cgi-bin/twiki/view/Main/HalReplikacja#Delete_transactions_from_DEFERRO for solution"
fi
msgi "Number of rows in deferror table: $V_EXECUTE_SQL"
msgi "Checking if there are unusable indexes"
f_execute_sql_no_rows_expected "select owner,index_name from dba_indexes where status='UNUSABLE' order by owner;"
f_execute_sql_no_rows_expected "select index_owner,index_name,partition_name from dba_ind_partitions where status='UNUSABLE';"
msgi "I sometimes setup an logon trigger to trace certain user actions. This is to check if I forgot to delete it."
f_execute_sql_no_rows_expected "select TRIGGER_NAME from dba_triggers where TRIGGER_NAME='SET_TRACE';"
msgi "Just in case. Controlfile to trace"
f_execute_sql "alter database backup controlfile to trace;"
msgi "$V_EXECUTE_SQL"
msgb "${FUNCNAME[0]} Finished."
} #b_basic_db_healthcheck_db_primary
b_basic_db_healthcheck_db_standby()
{
msgb "${FUNCNAME[0]} Beginning."
msgi "Checking if flashback db is turned on on standby database"
f_execute_sql "select FLASHBACK_ON from v\$database;"
msgd "V_EXECUTE_SQL: $V_EXECUTE_SQL"
if [ ! "${V_EXECUTE_SQL}" = "YES" ]; then
msge "Flashback db should be turned on on standby database"
msge "Run ./oracle_setup.sh b_implement_flashback_database"
fi
msgb "${FUNCNAME[0]} Finished."
} #b_basic_db_healthcheck_db_standby
b_basic_db_healthcheck_common()
{
msgb "${FUNCNAME[0]} Beginning."
# Set usefull variables
f_execute_sql "select VERSION from v\$instance;"
V_RDBMS_VERSION=`echo $V_EXECUTE_SQL | awk -F"." '{ print $1 "." $2 }'`
f_execute_sql "select upper(VALUE) from v\$parameter where NAME='db_name';"
V_DB_NAME=$V_EXECUTE_SQL
f_execute_sql "select upper(VALUE) from v\$parameter where NAME='db_unique_name';"
V_DB_UNIQUE_NAME=$V_EXECUTE_SQL
if [ "$V_DB_UNIQUE_NAME" = "no rows selected" ]; then
msgd "db_unique_name parameter not sed (probably pre 10g db. Taking db_name instead."
V_DB_UNIQUE_NAME=$V_DB_NAME
fi
msgd "V_DB_UNIQUE_NAME: $V_DB_UNIQUE_NAME"
f_execute_sql "select upper(VALUE) from v\$parameter where NAME='db_domain';"
V_DB_DOMAIN=$V_EXECUTE_SQL
f_execute_sql "select DATABASE_ROLE from v\$database;"
V_DATABASE_ROLE=$V_EXECUTE_SQL
msgd "V_DATABASE_ROLE: $V_DATABASE_ROLE"
msgd "Checking permission of alert log"
f_execute_sql "select VALUE from v\$parameter where NAME='background_dump_dest';"
V_ALERT_LOG=$V_EXECUTE_SQL
V_ALERT_LOG="${V_EXECUTE_SQL}/alert_${ORACLE_SID}.log"
msgd "V_ALERT_LOG: $V_ALERT_LOG"
check_file $V_ALERT_LOG
f_check_permission -rw-r--r-- $V_ALERT_LOG
msgi "Check if spfile is used. It should."
SQLPLUS=$ORACLE_HOME/bin/sqlplus
check_file $SQLPLUS
f_execute_sql "select VALUE from v\$parameter where NAME='spfile';"
if [ ! `echo $V_EXECUTE_SQL | grep spfile` ]; then
echo $V_EXECUTE_SQL
msge "Spfile is NOT used. Can not continue."
exit 1
fi
msgi "Create pfile from spfile for the purpose of analysis"
f_execute_sql "create pfile from spfile;"
check_file "$ORACLE_HOME/dbs/init${ORACLE_SID}.ora"
LSNRCTL=$ORACLE_HOME/bin/lsnrctl
check_file $LSNRCTL
msgi "Checking listener registered databases only if listener.ora found"
if [ -f "$ORACLE_HOME/network/admin/listener.ora" ]; then
msgi "Listener.ora found. Checking if listener has service name for ${V_DB_UNIQUE_NAME}.${V_DB_DOMAIN}"
TMP_CHK=`$LSNRCTL status | grep ${V_DB_UNIQUE_NAME}.${V_DB_DOMAIN}`
if [ `echo $TMP_CHK | grep -v '^ *$' | wc -l` -ne 1 ]; then
msge "Looks like the listener does not have the service name for DB_UNIQUE_NAME.DB_DOMAIN: ${V_DB_UNIQUE_NAME}.${V_DB_DOMAIN}. Check and fix it"
else
msgi "Looks like the listener does have the service name for DB_UNIQUE_NAME.DB_DOMAIN: ${V_DB_UNIQUE_NAME}.${V_DB_DOMAIN}. OK"
msgi "$TMP_CHK"
fi # ps -ef
msgi "Checking if listener has service name for ${V_DB_NAME}.${V_DB_DOMAIN}"
TMP_CHK=`$LSNRCTL status | grep ${V_DB_NAME}.${V_DB_DOMAIN}`
if [ `echo $TMP_CHK | grep -v '^ *$' | wc -l` -ne 1 ]; then
msge "Looks like the listener does not have the service name for DB_NAME.DB_DOMAIN: ${V_DB_NAME}.${V_DB_DOMAIN}. Check and fix it"
else
msgi "Looks like the listener does have the service name for DB_NAME.DB_DOMAIN: ${V_DB_NAME}.${V_DB_DOMAIN}. OK"
msgi "$TMP_CHK"
fi # ps -ef
else
msgi "Listener.ora NOT found."
fi #$ORACLE_HOME/network/admin/listener.ora
msgi "Checking if alert log look like beeing monitored"
TMP_CHK=`ps -ef | grep -v grep | grep tail | grep alert_${ORACLE_SID}.log`
if [ `echo $TMP_CHK | grep -v '^ *$' | wc -l` -lt 1 ]; then
msge "Looks like the DB alert log: alert_${ORACLE_SID}.log is not monitored, check and fix it"
else
msgi "Looks like the DB alert log: alert_${ORACLE_SID}.log is monitored. OK"
msgi "$TMP_CHK"
fi # ps -ef
msgb "${FUNCNAME[0]} Finished."
} #b_basic_db_healthcheck_common
###########################
b_implement_flashback_database()
{
msgb "${FUNCNAME[0]} Beginning."
B_PAR=$1 # Check if block was run with parameter
# Info section
msgb "Implement Flashback Database functionality"
# If block was run with INFO parameter I exit now
if [ "$B_PAR" = "INFO" ]; then
return 0
fi
msgi "Check if flashback database is enabled"
f_execute_sql "select FLASHBACK_ON from v\$database;"
echo $V_EXECUTE_SQL
if [ ! "$V_EXECUTE_SQL" = "NO" ]; then
msgi "Flashback database already enabled. Exiting."
exit 0
else
msgi "Flashback database not enabled. Continuing."
fi
msgw "The following actions will make the currently running database UNAVAILABLE to end users."
ACTION_FINISHED=""
while [ ! "$ACTION_FINISHED" = "yes" ]
do
read -p "Are you sure, that you are doing that in scheduled maintenance? (yes/any)" ACTION_FINISHED
done
msgi "Check if spfile is used"
f_check_if_spfile_is_used
msgi "Check if database is in archivelog mode"
f_execute_sql "select log_mode from v\$database;"
echo $V_EXECUTE_SQL
if [ ! "$V_EXECUTE_SQL" = "ARCHIVELOG" ]; then
msge "Database NOT in archivelog mode. Exiting."
fi
msgi "Check if flash recovery area is already set"
f_execute_sql "select value from v\$parameter where name = 'db_recovery_file_dest';"
echo $V_EXECUTE_SQL
if [ ! -z "$V_EXECUTE_SQL" ]; then
msgi "Flash recovery area already configured. Skipping this part."
f_execute_sql "select name, value from v\$parameter where name like 'db_recovery_file_dest%';"
cat $F_EXECUTE_SQL
else
msga "Configuring flash recovery area (FRA)"
msgi "Estimate the recommended size of FRA for 3 days of flashback database capability (minimum * 2)"
# The following estimation could also be used, but that requires the flashback database to be active for some time
# SELECT ESTIMATED_FLASHBACK_SIZE FROM V$FLASHBACK_DATABASE_LOG;
f_execute_sql "select round((redo_size.bytes * nr_switched.count * 2)/1024/1024/1024) GB from ( select distinct BYTES from sys.v\$log ) redo_size, ( select count(*) count from gv\$loghist where FIRST_TIME > sysdate -3 ) nr_switched;"
echo $V_EXECUTE_SQL
V_FRA_SIZE=$V_EXECUTE_SQL
msga "The suggested size of FRA in GB is: $V_FRA_SIZE"
msgw "Do you want to change it? (y/any)"
read -p "" V_TMP_CHANGE_VALUE
if [ "$V_TMP_CHANGE_VALUE" = "y" ]; then
read -p "Provide new size in GB: " V_FRA_SIZE
fi
msga "Size of FRA will be set in GB to: $V_FRA_SIZE"
msgi "Configuring the location of FRA"
V_FRA_LOCATION=/${ORACLE_SID}/flash_recovery_area
msga "The suggested location of FRA is: $V_FRA_LOCATION"
msgw "Do you want to change it? (y/any)"
read -p "" V_TMP_CHANGE_VALUE
if [ "$V_TMP_CHANGE_VALUE" = "y" ]; then
read -p "Provide new location: " V_FRA_LOCATION
fi
msga "Location of FRA will be set to: $V_FRA_LOCATION"
run_command_e "mkdir -p $V_FRA_LOCATION"
f_execute_sql "alter system set DB_RECOVERY_FILE_DEST_SIZE=${V_FRA_SIZE}G scope=spfile;"
cat $F_EXECUTE_SQL
f_execute_sql "alter system set DB_RECOVERY_FILE_DEST='$V_FRA_LOCATION' scope=spfile;"
cat $F_EXECUTE_SQL
msgi "Restart database"
f_execute_sql "shutdown immediate"
cat $F_EXECUTE_SQL
f_execute_sql "startup"
cat $F_EXECUTE_SQL
fi #flash recovery area is already set
msgi "Enabling flashback database."
f_execute_sql "select FLASHBACK_ON from v\$database;"
echo $V_EXECUTE_SQL
if [ ! "$V_EXECUTE_SQL" = "NO" ]; then
msgi "Flashback database already enabled. Skipping this part."
else
msga "Enabling flashback database."
V_FDB_RETENTION=4320
msga "The suggested retention time (in min) is $V_FDB_RETENTION (`expr $V_FDB_RETENTION / 24 / 60` days)"
msgw "Do you want to change it? (y/any)"
read -p "" V_TMP_CHANGE_VALUE
if [ "$V_TMP_CHANGE_VALUE" = "y" ]; then
read -p "Provide new retention time in min: " V_FDB_RETENTION
fi
msga "Retention time in min st to: $V_FDB_RETENTION"
msgi "Mount database to enable flashback db"
f_execute_sql "shutdown immediate"
cat $F_EXECUTE_SQL
f_execute_sql "startup mount"
cat $F_EXECUTE_SQL
f_execute_sql "ALTER SYSTEM SET DB_FLASHBACK_RETENTION_TARGET=${V_FDB_RETENTION};"
cat $F_EXECUTE_SQL
f_execute_sql "ALTER DATABASE FLASHBACK ON;"
cat $F_EXECUTE_SQL
f_execute_sql "alter database open;"
cat $F_EXECUTE_SQL
fi #Check if flashback database is enabled
msgb "${FUNCNAME[0]} Finished."
} #b_implement_flashback_database
b_check_and_fix_orphan_links_resulting_from_rsync_copy()
{
B_PAR=$1 # Check if block was run with parameter
# Info section
msgb "${FUNCNAME[0]} Beginning."
msgb "For security reasons rsync does not properly handles absolut links when copying between hosts."
msgb "He eats the leading slash. This block checks for links that are broken under specified directory,"
msgb "and suggests a change that will fix this problem."
msgi "I will check all the links under current ORACLE_HOME: $ORACLE_HOME"
check_parameter $ORACLE_HOME
check_directory $ORACLE_HOME
F_LINKS_LIST=$D_TMP/links_list.txt
find $ORACLE_HOME -type l > $F_LINKS_LIST
F_INVALID_LINKS_LIST=$D_TMP/invalid_links_list.txt
rm -f $F_INVALID_LINKS_LIST
msgi "Looping through the list and checking if links are valid"
while read LINE
do
msgd "-- Checking: $LINE"
ls -lL $LINE > /dev/null
if [ $? -ne 0 ]; then
msgd "-- link not valid"
echo $LINE >> $F_INVALID_LINKS_LIST
fi
done < $F_LINKS_LIST
if [ -f $F_INVALID_LINKS_LIST ]; then
msge "Invalid links found that probably result from rsync copy."
msge "This can be fixed by running ./oracle_setup.sh b_check_and_fix_orphan_links_resulting_from_rsync_copy"
else
msgi "No invalid links found. OK."
return 0
fi
# If block was run with CHECK parameter I exit now, before any permanent actions are done
if [ "$B_PAR" = "CHECK" ]; then
return 0
fi
check_file $F_INVALID_LINKS_LIST
msgi "Analysing each failed link and preparing corrective actions"
exec 6<$F_INVALID_LINKS_LIST
while read -u 6 LINE
do
msgi ""
msgi "Invalid link: $LINE"
V_INVALID_LINK=`ls -l $LINE | awk -F">" '{ print $2 }' | tr -d " " `
V_PROBABLY_FIXED_LINK=`ls -l $LINE | awk -F">" '{ print $2 }' | tr -d " " | awk '{ print "/"$1 }' `
msgi "Pointing to: $V_INVALID_LINK"
msgi "Probably should point to: $V_PROBABLY_FIXED_LINK"
msgi "Checking if fixed link would be valid"
ls -l $V_PROBABLY_FIXED_LINK
if [ $? -ne 0 ]; then
msge "Fixed link not valid, I did not expected that. Exiting, fix it manually."
exit 1
else
msgi "Fixed link would work OK"
fi
read -p "[wait] Do you want to delete the old link and create the new one? (y/any)" V_ANSWER
if [ "$V_ANSWER" = "y" ]; then
msga "Deleting old link"
run_command_e "rm $LINE"
msga "Creating new link"
run_command_e "ln -s $V_PROBABLY_FIXED_LINK $LINE"
fi #$V_ANSWER" = "y"
done #while read $F_INVALID_LINKS_LIST
msgb "${FUNCNAME[0]} Finished."
} #b_check_and_fix_orphan_links_resulting_from_rsync_copy
# Function used in b_PGF_db_files_layout to suggest db files location changes
f_change_location_db_file()
{
msgd "${FUNCNAME[0]} Enter."
V_SOURCE_PATH=$1
V_DEST_DIR=$2
check_file $V_SOURCE_PATH
check_directory $V_DEST_DIR
V_SOURCE_FILENAME=`basename $V_SOURCE_PATH`
V_SOURCE_DIR=`dirname $V_SOURCE_PATH`
msgd "V_SOURCE_PATH $V_SOURCE_PATH"
msgd "V_SOURCE_FILENAME $V_SOURCE_FILENAME"
msgd "V_SOURCE_DIR $V_SOURCE_DIR"
msgd "V_DEST_DIR $V_DEST_DIR"
msgd "Make sure that both V_SOURCE_DIR and V_DEST_DIR are on the same filesystem"
cd $V_SOURCE_DIR
V_SOURCE_DIR_FS=`df -h . | grep -v "Filesystem" | awk '{ print $6 }'`
cd $V_DEST_DIR
V_DEST_DIR_FS=`df -h . | grep -v "Filesystem" | awk '{ print $6 }'`
if [ "${V_SOURCE_DIR_FS}" == "${V_DEST_DIR_FS}" ]; then
msgd "Filesystem are the same. OK"
else
msge "Source filesystem: ${V_SOURCE_DIR_FS} is different from destination ${V_DEST_DIR_FS} . Exiting"
exit 1
fi
if [ "${V_SOURCE_DIR}" == "${V_DEST_DIR}" ]; then
msgd "Source and Destination path is the same. Doing nothing"
else
msgd "Preparing commands for path change"
echo "mv -i $V_SOURCE_PATH ${V_DEST_DIR}/${V_SOURCE_FILENAME}" >> $F_OUTPUT_SH_COMMANDS
echo "alter database rename file '$V_SOURCE_PATH' to '${V_DEST_DIR}/${V_SOURCE_FILENAME}';" >> $F_OUTPUT_SQL_COMMANDS
fi
msgd "${FUNCNAME[0]} Finished."
} #f_change_location_db_file
b_change_db_files_to_PGF_layout()
{
B_PAR=$1 # Check if block was run with parameter
# Info section
msgb "${FUNCNAME[0]} Beginning."
msgb "This block checks if database files have standard layout"
# If block was run with CHECK parameter I exit now, before any permanent actions are done
if [ "$B_PAR" = "CHECK" ]; then
return 0
fi
msgi "This procedure can be used on RDBMS 10g or later. Checking if this is true."
f_execute_sql "select regexp_substr(VALUE, '[[:alnum:]]*') from v\$parameter where NAME='compatible';"
if [ "$V_EXECUTE_SQL" -le 10 ]; then
echo $V_EXECUTE_SQL
msgi "OK. 10g or above used."
else
echo $V_EXECUTE_SQL
msge "Can not continue. Not sufficient DB version."
exit 1
fi
msga "Creating directories for new layout"
run_command_e "mkdir -p /${ORACLE_SID}/u02/${ORACLE_SID}"
run_command_e "mkdir -p /${ORACLE_SID}/u03/${ORACLE_SID}"
run_command_e "mkdir -p /${ORACLE_SID}/u04/${ORACLE_SID}"
run_command_e "mkdir -p /${ORACLE_SID}/u05/${ORACLE_SID}"
run_command_e "mkdir -p /${ORACLE_SID}/${ORACLE_SID}_arch_1"
run_command_e "mkdir -p /${ORACLE_SID}/${ORACLE_SID}_arch_2"
# Cleaning output files
F_OUTPUT_SH_COMMANDS=$D_TMP/output.sh
F_OUTPUT_SQL_COMMANDS=$D_TMP/output.sql
rm -f $F_OUTPUT_SH_COMMANDS $F_OUTPUT_SQL_COMMANDS
touch $F_OUTPUT_SH_COMMANDS $F_OUTPUT_SQL_COMMANDS
check_file $F_OUTPUT_SH_COMMANDS
check_file $F_OUTPUT_SQL_COMMANDS
echo "#!/bin/bash" >> $F_OUTPUT_SH_COMMANDS
echo "set -x" >> $F_OUTPUT_SH_COMMANDS
chmod 750 $F_OUTPUT_SH_COMMANDS
echo "set echo on" >> $F_OUTPUT_SQL_COMMANDS
echo "set termout on" >> $F_OUTPUT_SQL_COMMANDS
echo "set feedback on" >> $F_OUTPUT_SQL_COMMANDS
msgi "Analysing controlfiles"
f_execute_sql "select NAME from v\$controlfile order by NAME;"
F_CTRLFILE=$D_TMP/b_PGF_db_files_layout.ctrlfile
run_command_e "cat $F_EXECUTE_SQL | grep -v '^ *\$' | grep -v 'rows selected' > $F_CTRLFILE"
V_CTRLFILE_NR=`cat $F_CTRLFILE | wc -l`
if [ "${V_CTRLFILE_NR}" -ge 5 ]; then
msge "This script is able to handle max 4 control files. Provided ${V_CTRLFILE_NR} Exiting."
exit 0
else
msgd "OK, ${V_CTRLFILE_NR} controlfiles found"
fi
V_ALTER_CTRL="alter system set control_files="
V_NR=2
while read V_CUR_CTRL_LOC
do
msgd "V_CUR_CTRL_LOC: $V_CUR_CTRL_LOC"
V_CUR_CTRL_LOC_BASENAME=`basename $V_CUR_CTRL_LOC`
msgd "V_CUR_CTRL_LOC_BASENAME: $V_CUR_CTRL_LOC_BASENAME"
V_NEW_CTRL_LOC=/${ORACLE_SID}/u0${V_NR}/${ORACLE_SID}/$V_CUR_CTRL_LOC_BASENAME
msgd "V_NEW_CTRL_LOC: $V_NEW_CTRL_LOC"
msgd "Preparing commands to change the ctrl file location"
echo "mv -i $V_CUR_CTRL_LOC $V_NEW_CTRL_LOC" >> $F_OUTPUT_SH_COMMANDS
msgd "Building alter system command"
V_ALTER_CTRL="${V_ALTER_CTRL}'${V_NEW_CTRL_LOC}',"
V_NR=`expr ${V_NR} + 1`
done < $F_CTRLFILE
msgd "Chop the last , and add ;"
V_ALTER_CTRL=`echo ${V_ALTER_CTRL} | sed s/.$//`
V_ALTER_CTRL="${V_ALTER_CTRL} scope=spfile;"
msgi "V_ALTER_CTRL: ${V_ALTER_CTRL}"
msgi "Analysing logfile"
f_execute_sql "select member from v\$logfile;"
F_LOGFILE=$D_TMP/b_PGF_db_files_layout.logfile
run_command_e "cat $F_EXECUTE_SQL | grep -v '^ *\$' | grep -v 'rows selected' > $F_LOGFILE"
while read LINE
do
msgd "LINE: $LINE"
if [ ! -z "$LINE" ]; then
f_change_location_db_file $LINE "/${ORACLE_SID}/u02/${ORACLE_SID}"
fi
read LINE
msgd "LINE: $LINE"
if [ ! -z "$LINE" ]; then
f_change_location_db_file $LINE "/${ORACLE_SID}/u03/${ORACLE_SID}"
fi
done < $F_LOGFILE
msgi "Analysing dba_temp_files"
f_execute_sql "select file_name from dba_temp_files order by file_name;"
F_DBA_TEMP_FILES=$D_TMP/b_PGF_db_files_layout.dba_temp_files
run_command_e "cat $F_EXECUTE_SQL | grep -v '^ *\$' | grep -v 'rows selected' > $F_DBA_TEMP_FILES"
while read LINE
do
msgd $LINE
f_change_location_db_file $LINE "/${ORACLE_SID}/u05/${ORACLE_SID}"
done < $F_DBA_TEMP_FILES
msgi "Analysing dba_data_files"
f_execute_sql "select file_name from dba_data_files order by file_name;"
F_DBA_DATA_FILES=$D_TMP/b_PGF_db_files_layout.dba_data_files
run_command_e "cat $F_EXECUTE_SQL | grep -v '^ *\$' | grep -v 'rows selected' > $F_DBA_DATA_FILES"
while read LINE
do
msgd $LINE
V_FILENAME=`basename $LINE`
V_FILEPATH=`dirname $LINE`
msgd "V_FILENAME: $V_FILENAME"
msgd "V_FILEPATH: $V_FILEPATH"
# I stripe filename from number and extention to be able to decide what to do with it
V_FILENAME_CORE=`echo $V_FILENAME | awk -F"." '{ print $1 }' | sed s/[0-9]//g | tr -d "_"`
case $V_FILENAME_CORE in
"HALINDEKSY"|"HALINDEKSYST"|"TCINDX"|"tcindx")
msgd "pliki danych - uzytkownik - indeksy -> /SID/u04/SID"
f_change_location_db_file $LINE "/${ORACLE_SID}/u04/${ORACLE_SID}"
;;
"HALDANE"|"HALJN"|"HALUZYTK"|"HALREJ"|"HALDANEST"|"HALAR"|"MJREJESTRATOR"|"TELEPOCZTA"|"USERS"|"XADANE"|"HALDEVEL"|"xadane"|"halindeksyst"|"haluzytk"|"haldane"|"haljn"|"halindeksy"|"haldevel"|"tcdane"|"tcdevel"|"TCDANE"|"TCDEVEL")
msgd "pliki danych - uzytkownik - tabele -> /SID/u03/SID"
f_change_location_db_file $LINE "/${ORACLE_SID}/u03/${ORACLE_SID}"
;;
"undotbs"|"xdb"|"users"|"tools"|"indx"|"drsys"|"system"|"perfstat"|"sysaux"|"PERFSTAT")
msgd "pliki danych - oracle - wszystko ( tabele indeksy, default temp ) -> /SID/u02/SID"
f_change_location_db_file $LINE "/${ORACLE_SID}/u02/${ORACLE_SID}"
;;
*)
msge "Not known tablespace name, please setup a rule"
msgi "$V_FILENAME_CORE $V_FILENAME"
msge "exiting"
exit 1
;;
esac
done < $F_DBA_DATA_FILES
msgi "Resulting shell commands in file: $F_OUTPUT_SH_COMMANDS"
#cat $F_OUTPUT_SH_COMMANDS
msgi "Resulting SQL commands in file: $F_OUTPUT_SQL_COMMANDS"
#cat $F_OUTPUT_SQL_COMMANDS
msga "To make the changes:"
msga "!! This procedure should not be used when Data Guard is configured. !!"
msga ""
msga "1. Issue the controlfile rename command"
msga " > ${V_ALTER_CTRL}"
msga "2. Change the location of archivelogs"
msga " > alter system set log_archive_dest_1='location=/${ORACLE_SID}/${ORACLE_SID}_arch_1' scope=spfile;"
msga " > alter system set log_archive_dest_2='location=/${ORACLE_SID}/${ORACLE_SID}_arch_2' scope=spfile;"
msga "3. Shutdown cleanly database"
msga " > shutdown immediate"
msga "4. Check the files content before running them"
msga " less $F_OUTPUT_SH_COMMANDS"
msga " less $F_OUTPUT_SQL_COMMANDS"
msga "5. Run the physical move commands"
msga " $ cd `dirname $F_OUTPUT_SH_COMMANDS`"
msga " $ ./`basename $F_OUTPUT_SH_COMMANDS`"
msga "6. Startup mount, run the SQL portion to update controlfile"
msga " > startup mount"
msga " > @`basename $F_OUTPUT_SQL_COMMANDS`"
msga "7. Open database"
msga " > alter database open;"
msga "8. Investigate the old directories whether they can be deleted"
# Block actions start here
msgb "${FUNCNAME[0]} Finished."
} #b_change_db_files_to_PGF_layout
b_set_audit_file_dest()
{
B_PAR=$1 # Check if block was run with parameter
# Info section
msgb "${FUNCNAME[0]} Beginning."
msgd "Running this block only on 10.2 database"
if [ ! "${V_RDBMS_VERSION}" = "10.2" ]; then
msgi "Performing this check only on 10.2 database. This is ${V_RDBMS_VERSION}. Skipping."
return 0
fi
msgi "Checking if audit_file_dest location has expected value"
f_execute_sql "select VALUE from v\$parameter where NAME='background_dump_dest';"
V_BASE_DIR=`dirname $V_EXECUTE_SQL`
msgd "V_BASE_DIR: $V_BASE_DIR"
f_execute_sql "select VALUE from v\$parameter where NAME='audit_file_dest';"
V_AUDIT_DIR=$V_EXECUTE_SQL
msgd "V_AUDIT_DIR: $V_AUDIT_DIR"
if [ "${V_AUDIT_DIR}" == "${V_BASE_DIR}/adump" ]; then
msgi "audit_file_dest has expected value: ${V_AUDIT_DIR}"
exit 0
else
msge "audit_file_dest has value: ${V_AUDIT_DIR} different that expected ${V_BASE_DIR}/adump"
msge "Run cd ~/scripto/oracle/create_db_scripts/oracle_setup; ./oracle_setup.sh b_set_audit_file_dest"
msge "To set proper audit_file_dest."
fi
# If block was run with CHECK parameter I exit now, before any permanent actions are done
if [ "$B_PAR" = "CHECK" ]; then
return 0
fi
check_directory $V_BASE_DIR
run_command_e "mkdir -p $V_BASE_DIR/adump"
msgi "Set the value of audit_file_dest to $V_BASE_DIR/adump"
f_execute_sql "alter system set audit_file_dest='$V_BASE_DIR/adump' scope=spfile;"
cat $F_EXECUTE_SQL
msgi "Please note, the changes will be visible after the db restarts as this is static parameter"
msgb "${FUNCNAME[0]} Finished."
} #b_set_audit_file_dest
# Check if provided variable $1 is an integer and within range >= $2 and <= $3
f_check_integer()
{
msgd "${FUNCNAME[0]} Beginning."
V_INTEGER=$1
V_LOW=$2
V_HIGH=$3
check_parameter $V_INTEGER
check_parameter $V_LOW
check_parameter $V_HIGH
msgd "V_INTEGER: $V_INTEGER"
if [ ! $(echo "$V_INTEGER" | $GREP -E "^[0-9]+$") ]
then
msge "$V_INTEGER is not a valid integer, exiting"
exit 1
fi
if [ "${V_INTEGER}" -lt "${V_LOW}" ]; then
msge "Provided integer: ${V_INTEGER} is lower than provided minimum: ${V_LOW} . Exiting."
exit 1
fi
if [ "${V_INTEGER}" -gt "${V_HIGH}" ]; then
msge "Provided integer: ${V_INTEGER} is greater than provided maximum: ${V_HIGH} . Exiting."
exit 1
fi
msgd "${FUNCNAME[0]} Finished."
} #f_check_integer
b_add_new_redo()
{
B_PAR=$1 # Check if block was run with parameter
msgb "${FUNCNAME[0]} Beginning."
msgb "WARNING: This procedure is for testing purposes only"
msgb "I will add new redo to database"
msgb "- if standby redo exists I will add it too"
# If block was run with CHECK parameter I exit now, before any permanent actions are done
if [ "$B_PAR" = "CHECK" ]; then
return 0
fi
read -p "Provide size of redo in MB: " V_REDO_SIZE
msgd "V_REDO_SIZE: $V_REDO_SIZE"
f_check_integer $V_REDO_SIZE 50 5000
msgd "Trying to suggest reasonable location for redo logs"
msgd "Based on current redo location"
f_execute_sql "select member from v\$logfile where rownum < 3;"
F_LOGFILE=$D_TMP/b_add_new_redo.logfile
run_command_e "cat $F_EXECUTE_SQL | grep -v '^ *\$' | grep -v 'rows selected' > $F_LOGFILE"
while read LINE
do
V_REDO_LOC1=`dirname $LINE`
msgd "V_REDO_LOC1: $V_REDO_LOC1"
read LINE
V_REDO_LOC2=`dirname $LINE`
msgd "V_REDO_LOC2: $V_REDO_LOC2"
done < $F_LOGFILE
msgi "Based on current redo location "
msgi "I suggest the first redo location as : $V_REDO_LOC1"
msgi "I suggest the second redo location as: $V_REDO_LOC2"
read -p "Do you want to change the locatios of redo? (y/any)" V_ANSWER
if [ "$V_ANSWER" = "y" ]; then
read -p "Provide new first location of redo : " V_REDO_LOC1
read -p "Provide new second location of redo: " V_REDO_LOC2
fi
msgi "First location of redo : $V_REDO_LOC1"
msgi "Second location of redo: $V_REDO_LOC1"
check_directory $V_REDO_LOC1
check_directory $V_REDO_LOC2
read -p "Provide number of redo log groups: " V_REDO_NR
f_check_integer $V_REDO_NR 3 50
msgd "V_REDO_NR: $V_REDO_NR"
msgi "Number of redo groups to be created: ${V_REDO_NR}"
msgd "Determining if standby redo should be first created"
msgi "Create pfile from spfile for the purpose of analysis"
f_execute_sql "create pfile from spfile;"
msgd "Checking if this database has a standby. I check for parameter: standby_file_management."
TMP_CHK=`cat $ORACLE_HOME/dbs/init${ORACLE_SID}.ora | grep -v '^ *$' | grep -i standby_file_management`
if [ `echo $TMP_CHK | grep -v '^ *$' | wc -l` -gt 0 ]; then
msgi "This is a standby configuration."
msgi "Determining starting group number"
f_execute_sql "select max(GROUP#) from v\$logfile;"
V_GROUP_START=$V_EXECUTE_SQL
msgd "V_GROUP_START: $V_GROUP_START"
msgi "I will start creating redo group from number $V_GROUP_START + 1"
F_GEN_STB_LOGFILE=$D_TMP/generate_standby_redo.sql
rm -f $F_GEN_STB_LOGFILE
msgi "If this is standby database I have to cancel recovery first"
f_execute_sql "select DATABASE_ROLE from v\$database;"
msgd "V_EXECUTE_SQL: $V_EXECUTE_SQL"
if [ "${V_EXECUTE_SQL}" = "PHYSICAL STANDBY" ]; then
msgd "Adding cancel recovery to script"
echo "ALTER DATABASE RECOVER MANAGED STANDBY DATABASE CANCEL;" >> $F_GEN_STB_LOGFILE
fi
msgd "Generating SQLs to create new standby redo"
V_ITER=0
msgd "Increasing redo number to have + 1 of standby redo logs"
V_REDO_STB_NR=`expr ${V_REDO_NR} + 1 `
msgd "V_REDO_NR: $V_REDO_NR"
while [ ! ${V_ITER} -eq ${V_REDO_STB_NR} ]
do
V_ITER=`expr ${V_ITER} + 1 `
msgd "V_ITER: $V_ITER"
V_GROUP_NR=`expr ${V_GROUP_START} + ${V_ITER}`
msgd "V_GROUP_NR: $V_GROUP_NR"
echo "alter database add standby logfile group $V_GROUP_NR ('${V_REDO_LOC1}/stb_redo${V_REDO_SIZE}M${V_GROUP_NR}a.rdo','${V_REDO_LOC2}/stb_redo${V_REDO_SIZE}M${V_GROUP_NR}b.rdo') size ${V_REDO_SIZE}M;" >> $F_GEN_STB_LOGFILE
done
msgi "If this is standby database I have to start recovery after adding standby redo"
f_execute_sql "select DATABASE_ROLE from v\$database;"
msgd "V_EXECUTE_SQL: $V_EXECUTE_SQL"
if [ "${V_EXECUTE_SQL}" = "PHYSICAL STANDBY" ]; then
msgd "Adding recovery to script"
echo "ALTER DATABASE RECOVER MANAGED STANDBY DATABASE USING CURRENT LOGFILE DISCONNECT;" >> $F_GEN_STB_LOGFILE
fi
cat $F_GEN_STB_LOGFILE
V_ANSWER=''
EXIT_WHILE=''
while [ ! "$EXIT_WHILE" ]
do
read -p "[wait] Do you want to run the SQL commands? (yes/no)" V_ANSWER
if [ "$V_ANSWER" = "yes" ]; then
f_execute_sql "@${F_GEN_STB_LOGFILE}"
cat $F_EXECUTE_SQL
EXIT_WHILE=1
fi
if [ "$V_ANSWER" = "no" ]; then
msgi "Doing nothing."
EXIT_WHILE=1
fi
done
fi # Actions if DB is DB with standby
msgi "Determining starting group number"
f_execute_sql "select max(GROUP#) from v\$logfile;"
V_GROUP_START=$V_EXECUTE_SQL
msgd "V_GROUP_START: $V_GROUP_START"
msgi "I will start creating redo group from number $V_GROUP_START + 1"
F_GEN_LOGFILE=$D_TMP/generate_redo.sql
rm -f $F_GEN_LOGFILE
msgi "If this is standby database I have to cancel recovery first"
f_execute_sql "select DATABASE_ROLE from v\$database;"
msgd "V_EXECUTE_SQL: $V_EXECUTE_SQL"
if [ "${V_EXECUTE_SQL}" = "PHYSICAL STANDBY" ]; then
msgd "Adding cancel recovery to script"
echo "ALTER DATABASE RECOVER MANAGED STANDBY DATABASE CANCEL;" >> $F_GEN_LOGFILE
echo "alter system set STANDBY_FILE_MANAGEMENT=MANUAL scope=memory;" >> $F_GEN_LOGFILE
fi
msgd "Generating SQLs to create new redo"
V_ITER=0
while [ ! ${V_ITER} -eq ${V_REDO_NR} ]
do
V_ITER=`expr ${V_ITER} + 1 `
msgd "V_ITER: $V_ITER"
V_GROUP_NR=`expr ${V_GROUP_START} + ${V_ITER}`
msgd "V_GROUP_NR: $V_GROUP_NR"
echo "alter database add logfile group $V_GROUP_NR ('${V_REDO_LOC1}/redo${V_REDO_SIZE}M${V_GROUP_NR}a.rdo','${V_REDO_LOC2}/redo${V_REDO_SIZE}M${V_GROUP_NR}b.rdo') size ${V_REDO_SIZE}M;" >> $F_GEN_LOGFILE
done
msgi "If this is standby database I have to start recovery after adding standby redo"
f_execute_sql "select DATABASE_ROLE from v\$database;"
msgd "V_EXECUTE_SQL: $V_EXECUTE_SQL"
if [ "${V_EXECUTE_SQL}" = "PHYSICAL STANDBY" ]; then
msgd "Adding recovery to script"
echo "alter system set STANDBY_FILE_MANAGEMENT=AUTO scope=memory;" >> $F_GEN_LOGFILE
echo "ALTER DATABASE RECOVER MANAGED STANDBY DATABASE USING CURRENT LOGFILE DISCONNECT;" >> $F_GEN_LOGFILE
fi
cat $F_GEN_LOGFILE
V_ANSWER=''
EXIT_WHILE=''
while [ ! "$EXIT_WHILE" ]
do
read -p "[wait] Do you want to run the SQL commands? (yes/no)" V_ANSWER
if [ "$V_ANSWER" = "yes" ]; then
f_execute_sql "@${F_GEN_LOGFILE}"
cat $F_EXECUTE_SQL
EXIT_WHILE=1
fi
if [ "$V_ANSWER" = "no" ]; then
msgi "Doing nothing."
EXIT_WHILE=1
fi
done
msgi "Listing potential groups to be droped"
f_execute_sql "select 'alter database drop logfile group '||GROUP#||';' from v\$log where BYTES > ${V_REDO_SIZE}*1024*1024 or BYTES < ${V_REDO_SIZE}*1024*1024;"
cat $F_EXECUTE_SQL
f_execute_sql "select 'alter database drop logfile group '||GROUP#||';' from v\$standby_log where BYTES > ${V_REDO_SIZE}*1024*1024 or BYTES < ${V_REDO_SIZE}*1024*1024;"
cat $F_EXECUTE_SQL
# Block actions start here
msgb "${FUNCNAME[0]} Finished."
} #b_add_new_redo
# Try to tnsping to all the databases found in tnsnames
b_check_reachability_of_DBs_from_tnsnames()
{
B_PAR=$1 # Check if block was run with parameter
# Info section
msgb "${FUNCNAME[0]} Beginning."
msgb "Try to tnsping to all the databases found in tnsnames"
# If block was run with CHECK parameter I exit now, before any permanent actions are done
if [ "$B_PAR" = "CHECK" ]; then
return 0
fi
F_TNSNAMES=$ORACLE_HOME/network/admin/tnsnames.ora
check_file $F_TNSNAMES
msgd "Prepare the list of tnsnames aliases to check"
cat $F_TNSNAMES | grep -v '(' | grep -v ')' | grep -v "^$" | grep -v "^#" | sed -e 's/=//' | awk -F"," '{ print $1 }' > $D_TMP/tns_alias.txt
check_file $D_TMP/tns_alias.txt
msgd "Loop through the list and try to tnsping each service"
while read LINE
do
msga "Checking $LINE"
tnsping $LINE > /dev/null
if [ $? -ne 0 ]; then
msge " >>> Failed to reach >>> $LINE"
else
msgi "Reached $LINE. OK."
fi
done < $D_TMP/tns_alias.txt
msgb "${FUNCNAME[0]} Finished."
} #b_check_reachability_of_DBs_from_tnsnames
b_switch_to_manual_from_ASMM()
{
B_PAR=$1 # Check if block was run with parameter
# Info section
msgb "${FUNCNAME[0]} Beginning."
msgb "This block helps in switch from ASMM (sga_target) to manual memory management"
msgb " it takes the values of parameters currently set on database and produces"
msgb " a set of alter system commands to complete the task."
msgb "Based on https://twiki.pgf.com.pl/cgi-bin/twiki/view/Main/HalInfrastructureNoAsmm"
# If block was run with CHECK parameter I exit now, before any permanent actions are done
if [ "$B_PAR" = "CHECK" ]; then
return 0
fi
msgd "Check whether this DB is using ASMM at all"
msgi "Create pfile from spfile for the purpose of analysis"
f_execute_sql "create pfile from spfile;"
check_file "$ORACLE_HOME/dbs/init${ORACLE_SID}.ora"
V_TMP=`cat $ORACLE_HOME/dbs/init${ORACLE_SID}.ora | grep -i sga_target | wc -l`
if [ "$V_TMP" -gt 0 ]; then
msgi "System has sga_target set to `cat $ORACLE_HOME/dbs/init${ORACLE_SID}.ora | grep -i sga_target`"
else
msge "System does not have a sga_target set, thus is not using ASMM. Exiting."
exit 0
fi
rm -f $D_TMP/no_more_asmm.txt
f_execute_sql "select * from (select 'alter system set '||parameter||'='||case when FINAL_SIZE/1024/1024 < 1024 then 1024 when FINAL_SIZE/1024/1024 > 1024 then FINAL_SIZE/1024/1024 end ||'M scope=spfile;' from v\$sga_resize_ops where parameter='db_cache_size' order by end_time desc) where rownum=1;"
msgd "V_EXECUTE_SQL: $V_EXECUTE_SQL"
echo $V_EXECUTE_SQL >> $D_TMP/no_more_asmm.txt
f_execute_sql "select * from (select 'alter system set '||parameter||'='||case when FINAL_SIZE/1024/1024 < 512 then 512 when FINAL_SIZE/1024/1024 > 512 then FINAL_SIZE/1024/1024 end ||'M scope=spfile;' from v\$sga_resize_ops where parameter='shared_pool_size' order by end_time desc) where rownum=1;"
msgd "V_EXECUTE_SQL: $V_EXECUTE_SQL"
echo $V_EXECUTE_SQL >> $D_TMP/no_more_asmm.txt
f_execute_sql "select * from (select 'alter system set '||parameter||'='||case when FINAL_SIZE/1024/1024 < 256 then 256 when FINAL_SIZE/1024/1024 > 256 then FINAL_SIZE/1024/1024 end ||'M scope=spfile;' from v\$sga_resize_ops where parameter='java_pool_size' order by end_time desc) where rownum=1;"
msgd "V_EXECUTE_SQL: $V_EXECUTE_SQL"
echo $V_EXECUTE_SQL >> $D_TMP/no_more_asmm.txt
f_execute_sql "select * from (select 'alter system set '||parameter||'='||case when FINAL_SIZE/1024/1024 < 64 then 64 when FINAL_SIZE/1024/1024 > 64 then FINAL_SIZE/1024/1024 end ||'M scope=spfile;' from v\$sga_resize_ops where parameter='large_pool_size' order by end_time desc) where rownum=1;"
msgd "V_EXECUTE_SQL: $V_EXECUTE_SQL"
echo $V_EXECUTE_SQL >> $D_TMP/no_more_asmm.txt
echo "alter system reset sga_target scope=spfile sid='*';" >> $D_TMP/no_more_asmm.txt
msgi "MANUALLY check and issue the following commands:"
cat $D_TMP/no_more_asmm.txt
msgc "Have You issued the commands MANUALLY?"
msgb "${FUNCNAME[0]} Finished."
} #b_switch_to_manual_from_ASMM
# 16 lines to yank
b_template()
{
B_PAR=$1 # Check if block was run with parameter
# Info section
msgb "${FUNCNAME[0]} Beginning."
msgb "I will do that "
msgb "and that "
# If block was run with CHECK parameter I exit now, before any permanent actions are done
if [ "$B_PAR" = "CHECK" ]; then
return 0
fi
# Block actions start here
msgi "ala ma kota"
msgb "${FUNCNAME[0]} Finished."
} #b_template
# Execution of blocks
#INFO_MODE=DEBUG
#INFO_MODE=ERROR
# If parameter 'block_name' was provided I execute only specified block
SELECT_BLOCK=$1
# Temporary directory temporary files, has to be unique
D_TMP=/tmp/tmp_${USERNAME}_${ORACLE_SID}
mkdir -p $D_TMP
case $SELECT_BLOCK in
"b_crontab_scripts_to_rm_gzip_archivelogs")
b_crontab_scripts_to_rm_gzip_archivelogs $2
;;
"b_create_temp_datafiles_for_temp_tbs_if_dont_exist")
b_create_temp_datafiles_for_temp_tbs_if_dont_exist $2
;;
"b_basic_db_healthcheck")
b_basic_db_healthcheck $2
;;
"b_implement_flashback_database")
b_implement_flashback_database $2
;;
"b_check_mail_queue")
b_check_mail_queue $2
;;
"b_forward_mail")
b_forward_mail $2
;;
"b_change_profile")
b_change_profile $2
;;
"b_store_in_cvs")
b_store_in_cvs
;;
"b_basic_os_healthcheck")
b_basic_os_healthcheck
;;
"b_change_db_files_to_PGF_layout")
b_change_db_files_to_PGF_layout
;;
"s_standard_infrastructure_actions")
b_check_mail_queue
b_store_in_cvs
b_change_profile
b_forward_mail
b_basic_os_healthcheck
b_basic_db_healthcheck
;;
"b_check_and_fix_orphan_links_resulting_from_rsync_copy")
b_check_and_fix_orphan_links_resulting_from_rsync_copy $2
;;
"b_set_audit_file_dest")
b_set_audit_file_dest $2
;;
"b_add_new_redo")
b_add_new_redo
;;
"b_check_reachability_of_DBs_from_tnsnames")
b_check_reachability_of_DBs_from_tnsnames
;;
"b_switch_to_manual_from_ASMM")
b_switch_to_manual_from_ASMM
;;
*)
echo "Please provide the block that You want to run."
echo "Available blocks: "
echo "b_crontab_scripts_to_rm_gzip_archivelogs"
echo "b_create_temp_datafiles_for_temp_tbs_if_dont_exist"
echo "b_basic_db_healthcheck"
echo "b_implement_flashback_database"
echo "b_check_and_fix_orphan_links_resulting_from_rsync_copy"
echo "b_change_db_files_to_PGF_layout"
echo "b_set_audit_file_dest"
echo "b_add_new_redo"
echo "b_check_reachability_of_DBs_from_tnsnames"
echo "b_switch_to_manual_from_ASMM"
echo ""
echo "Available sets of blocks:"
echo "s_standard_infrastructure_actions"
echo "- b_check_mail_queue"
echo "- b_store_in_cvs"
echo "- b_change_profile"
echo "- b_forward_mail"
echo "- b_basic_os_healthcheck"
echo "- b_basic_db_healthcheck"
exit 1
;;
esac
rm -f tmp.sql
| true |
81f4f1c675869530dc31e3121269b3e652b249f2 | Shell | AkshithGit/awsCLI | /kinesisStream_randomPuts.sh | UTF-8 | 1,706 | 4.1875 | 4 | [] | no_license | #!/bin/bash
## This script will create random data into a Kinesis stream
## Still To do:
#1 Fix Word3 to have a bigger data entry
#2 Have the entries written in a format such as CSV or JSON
#3 Better understand the partition key and how to use it correctly
# Function for the randomWord
randomWord(){
shuf -n1 /usr/share/dict/words
}
# Get stream name and save as variable. In this test, there is only one stream. If there are more, then you will have add a loop to specify which one you want to have used.
STREAM=`aws kinesis list-streams | grep ' "'|tr -d ' "'`
# How many puts?
echo ""
echo "$(tput setaf 2)Yo! How many Put-Records would you like to do per partiton-key?$(tput sgr0)"
read COUNT
echo "$(tput setaf 2)Okay. Doing $COUNT Put-Records into Stream $STREAM.$(tput sgr0)"
echo ""
# loop to put a record loop 10 times and use a random word
N=1
while [ $N -le $COUNT ]
do
WORD1=$(randomWord)","
WORD2=$(randomWord)"," # Using two word so that you can play with different partition keys
## Word3 feature coming soon.
#WORD3="$(randomWord) $(randomWord) $(randomWord) $(randomWord) $(randomWord) $(randomWord)"
echo $N key1 word: $WORD1
aws kinesis put-record --stream-name $STREAM --partition-key 1 --data $WORD1
echo $N key2 word: $WORD2
aws kinesis put-record --stream-name $STREAM --partition-key 2 --data $WORD2
## Word3 feature coming soon.
#echo $N key2 word: $WORD3
#aws kinesis put-record --stream-name $STREAM --partition-key 3 --data $WORD3
echo "$(tput setaf 2)$N Done$(tput sgr0)"
echo "-----"
((N++))
done
echo "All done :)"
echo "-----"
| true |
4ad2a366979864bf5d7bf31bd6705ca77f85a9ee | Shell | Igorjan94/CF | /scripts/cloneRep.sh | UTF-8 | 175 | 3.140625 | 3 | [] | no_license | #!/usr/bin/env bash
if [ -z "$1" ]; then
echo "Rep isn't set";
exit 1;
fi
surname=$(findSurname.py "$1")
git clone "$1" "$surname" && cd "$surname" && buildHere.sh
| true |
3c58f5fdb22cb46362135c53470a946199dd5f6b | Shell | jxsr713/coding | /tools/ddt_clr_tmp_config_files.sh | UTF-8 | 818 | 2.5625 | 3 | [] | no_license | #########################################################################
# File Name: .sh
# Author: jxsr713
# mail: jxsr713@163.com
# sync km iommu code: saf driver and cli tool
# Created Time: 2021年06月09日 星期三 15时10分45秒
#########################################################################
#!/bin/bash
#++++++++++++++++++++++++++++++++++++++++++++++++++++#
#general source code can be used all shell script
#++++++++++++++++++++++++++++++++++++++++++++++++++++#
#get current path
CUR_DIR=`pwd`
echo "current path:$CUR_DIR"
#get current file location
TOOL_DIR="$( cd "$( dirname "$0" )" && pwd )"
echo "Tool path: $TOOL_DIR $(dirname $0) $0"
TIMESTAMP=$(date +%Y%m%d-%H%M%S)
CFG_DIR="./bk-config-${TIMESTAMP}"
mkdir -p ${CFG_DIR}
mv config* ${CFG_DIR}
rm conf* -rf
cp ${CFG_DIR}/* ./ -rf
exit
| true |
8287a5d12f12e147c3ba9f9393905105d9eeaa27 | Shell | donaldguy/2017-dotfiles | /zshrc.d/functions/osx.zsh | UTF-8 | 261 | 2.953125 | 3 | [] | no_license | function toggle_desktop() {
shown=$(defaults read com.apple.finder CreateDesktop)
if [ $shown = "true" ]; then
defaults write com.apple.finder CreateDesktop 'false'
else
defaults write com.apple.finder CreateDesktop 'true'
fi
killall Finder
}
| true |
e52cf60d7e9fbd814a3430d8c92480aa0df40757 | Shell | marquisXuan/configuration | /linux/install/centos-7/docker/docker-spring-boot.sh | UTF-8 | 1,262 | 3.6875 | 4 | [] | no_license | #!/bin/bash
_path=$1
_container_name=$2
_env=$3
DOCKER_CONTAINER_NAME=spring-boot-images
DOCKER_IMAGES=$(docker images | grep spring-boot-images | awk '{print $1}')
if [ -z "$DOCKER_IMAGES" ]; then
__path=/srv/docker/java/base
mkdir -p $__path && cd $__path
echo "RlJPTSBvcGVuamRrOjgKRU5WIEpBVkFfT1BUSU9OUz0iIgpFTlRSWVBPSU5UIFsgInNoIiwgIi1jIiwgImphdmEgJEpBVkFfT1BUUyAtRGphdmEuc2VjdXJpdHkuZWdkPWZpbGU6L2Rldi8uL3VyYW5kb20gLWphciAvb3B0L2FwcC5qYXIiIF0K" | base64 -d >$__path/Dockerfile
INNER_DOCKER_IMAGES=$(docker images | grep $DOCKER_CONTAINER_NAME | awk '{print $1}')
while [ "$INNER_DOCKER_IMAGES" != "$DOCKER_CONTAINER_NAME" ]; do
docker pull openjdk:8
INNER_DOCKER_IMAGES=$(docker images | grep $DOCKER_CONTAINER_NAME | awk '{print $1}')
done
docker build -t $DOCKER_CONTAINER_NAME .
fi
# 基于 docker 的 spring-boot 容器
DOCKER_NETWORK_BRIDGE_NAME=nginx-network
if [ -n "$_path" ]; then
echo -e "\033[36m 创建容器 \033[0m"
docker rm -f $_container_name
docker run --network $DOCKER_NETWORK_BRIDGE_NAME -v /etc/localtime:/etc/localtime:ro \
-v $_path:/opt/app.jar \
-v /var/logs/java:/var/logs/java \
--name $_container_name \
-e JAVA_OPTS=-Dspring.profiles.active=$_env \
--restart=always \
-d $DOCKER_IMAGES
else
echo -e "\033[36m 无参时,只创建 SpringBoot 镜像 \033[0m"
fi
docker logs -f $_container_name | true |
ed645dfdf2e3c0b91a824ea9bd7f11f5f22cab90 | Shell | ef37/operations-puppet | /modules/role/files/toollabs/fetch-worker.bash | UTF-8 | 500 | 3.296875 | 3 | [] | no_license | #!/bin/bash
# Simple script that only downloads kubelet/kube-proxy
# Useful for first run scenarios
set -o errexit
set -o nounset
# TODO: Add error checking (baaaaassshhhhh)
URL_PREFIX="${1:?URL_PREFIX is required}"
VERSION="${2:?VERSION is required}"
# Download the new things!
wget -O /usr/local/bin/kubelet $URL_PREFIX/$VERSION/kubelet
wget -O /usr/local/bin/kube-proxy $URL_PREFIX/$VERSION/kube-proxy
# Make them executable!
chmod u+x /usr/local/bin/kubelet
chmod u+x /usr/local/bin/kube-proxy | true |
f126ea91ed15f026041803c2d9d5f43856fdf430 | Shell | jwiegley/scripts | /b2-restic | UTF-8 | 6,070 | 3.59375 | 4 | [] | no_license | #!/bin/bash
if [[ "$1" == "--passwords" ]]; then
source $2
shift 2
fi
POOL="$1"
shift 1
export PATH=$HOME/.nix-profile/bin:$PATH
if [[ -z "$RESTIC_PASSWORD" ]]; then
export RESTIC_PASSWORD_COMMAND="pass show Passwords/restic"
else
unset RESTIC_PASSWORD_COMMAND
fi
if [[ -z "$AWS_ACCESS_KEY_ID" ]]; then
export AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID:-$(pass show Passwords/b2-access-key-id | head -1)}
fi
if [[ -z "$AWS_SECRET_ACCESS_KEY" ]]; then
export AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY:-$(pass show Passwords/b2-access-key | head -1)}
fi
# export B2_ACCOUNT_ID=${B2_ACCOUNT_ID:-$(pass show Passwords/backblaze-api-key)}
# export B2_ACCOUNT_KEY=${B2_ACCOUNT_KEY:-$(pass show Passwords/backblaze-secret-key)}
function restic_it() {
echo "========================================================================"
echo "restic backup ${@:$#}"
if [[ -n "$RESTIC_PASSWORD" ]]; then export RESTIC_PASSWORD; fi
if [[ -n "$AWS_ACCESS_KEY_ID" ]]; then export AWS_ACCESS_KEY_ID; fi
if [[ -n "$AWS_SECRET_ACCESS_KEY" ]]; then export AWS_SECRET_ACCESS_KEY; fi
if [[ "$1" == "--bucket" ]]; then
bucket="$2"
shift 2
else
bucket=$(echo ${@:$#} | sed 's%/%-%g')
fi
cat ~/.config/ignore.lst \
| sed 's/^- //' \
| sed 's%/$%/**%' \
> /tmp/ignore.$$
fs="${@:$#}"
DIR=$(zfs get -H -o value mountpoint $POOL/$fs)
# --repo b2:jwiegley-${bucket} backup
restic \
--repo s3:s3.us-west-001.backblazeb2.com/jwiegley-${bucket} backup \
-o b2.connections=10 \
--quiet \
--ignore-inode \
--one-file-system \
--exclude-if-present '.b2-exclude' \
--exclude-file /tmp/ignore.$$ \
"${@:1:$#-1}" \
"$DIR"
}
function backup() {
case ${1:---all} in
--all)
for i in \
Audio \
Backups/Misc \
Databases \
Desktop \
Documents \
Home \
Library \
Media \
Messages \
Photos \
Video \
doc \
src
do
backup $i
done
;;
Home | kadena | src)
restic_it \
--exclude '*.agdai' \
--exclude '*.glob' \
--exclude '*.hi' \
--exclude '*.o' \
--exclude '*.aux' \
--exclude '*.vo' \
--exclude '*.vok' \
--exclude '*.vos' \
--exclude '*.elc' \
--exclude '*.eln' \
--exclude '*.cma' \
--exclude '*.cmi' \
--exclude '*.cmo' \
--exclude '*.cmx' \
--exclude '*.cmxa' \
--exclude '*.cmxs' \
--exclude '*.lia-cache' \
--exclude '*.lra-cache' \
--exclude '*.nia-cache' \
--exclude '*.nra-cache' \
--exclude '.lia.cache' \
--exclude '.lra.cache' \
--exclude '.nia.cache' \
--exclude '.nra.cache' \
--exclude '.vagrant' \
--exclude '.ghc.*' \
--exclude '.cabal' \
--exclude '.cargo' \
--exclude '.coq-native' \
--exclude 'result' \
--exclude 'result-*' \
--exclude '.ghc' \
--exclude '.MAlonzo' \
--exclude '.dist' \
--exclude '.dist-newstyle' \
--exclude '.slocdata' \
--exclude '.local/share/vagrant' \
$1 ;;
Library)
restic_it \
--exclude 'Application Support/Bookmap/Cache' \
--exclude 'Application Support/FileProvider' \
--exclude 'Application Support/MobileSync' \
--exclude 'CloudStorage/GoogleDrive-copper2gold1@gmail.com' \
--exclude 'Containers' \
--exclude 'Caches/GeoServices' \
$1 ;;
Messages)
restic_it \
--exclude 'lucene-indexes' \
--exclude 'dovecot.index.*' \
$1 ;;
Video)
# jww (2023-01-13): Due to historical accident, the bucket is called
# jwiegley-Movies, and not jwiegley-Video; but since it's very large and I
# cannot rename buckets, it's not worth re-uploading just yet. The usual
# ~/Movies folder isn't something I backup to B2.
restic_it --bucket Movies $1 ;;
*) restic_it $1 ;;
esac
}
backup $1
| true |
d4199c15411e092e1e262a6c393ef36b0e660811 | Shell | indaco/predix-ec-configurator-vagrant | /scripts/provision.sh | UTF-8 | 4,308 | 3.28125 | 3 | [] | no_license | #!/bin/bash
set -o errexit
echo '>>> Setting up ECDemo Vagrant Box'
# Assign permissions to "vagrant" user
sudo chown -R vagrant /usr/local
# Updating the system
sudo apt-get -y update
### Installing Git
echo '>>> Installing Git'
sudo apt-get install -y git
git config --global user.name "Admin"
git config --global user.email "admin@example.com"
### Installing CF CLI
echo ">>> Installing CF CLI"
wget -q -O - https://packages.cloudfoundry.org/debian/cli.cloudfoundry.org.key | sudo apt-key add -
echo "deb http://packages.cloudfoundry.org/debian stable main" | sudo tee /etc/apt/sources.list.d/cloudfoundry-cli.list
sudo apt-get update
sudo apt-get install cf-cli
### Installing Go 1.9.2
echo '>>> Installing Go 1.9.2'
wget -q -O /home/vagrant/go1.9.2.linux-amd64.tar.gz https://redirector.gvt1.com/edgedl/go/go1.9.2.linux-amd64.tar.gz
sudo tar -C /usr/local -xzf /home/vagrant/go1.9.2.linux-amd64.tar.gz
rm /home/vagrant/go1.9.2.linux-amd64.tar.gz
VAGRANT_HOME=/home/vagrant
GOPATH=$VAGRANT_HOME/gowork
mkdir $GOPATH
# Setting Go env variables
echo '>>> Setting Go env variables'
echo 'export GOROOT=/usr/local/go' >> $VAGRANT_HOME/.profile
echo 'export PATH=$PATH:$GOROOT/bin' >> $VAGRANT_HOME/.profile
source $VAGRANT_HOME/.profile
echo 'export VAGRANT_HOME=/home/vagrant' >> $VAGRANT_HOME/.bash_profile
echo 'export GOPATH=$VAGRANT_HOME/gowork' >> $VAGRANT_HOME/.bash_profile
echo 'export GOBIN=$GOPATH/bin' >> $VAGRANT_HOME/.bash_profile
echo 'export PATH=$GOBIN:$PATH' >> $VAGRANT_HOME/.bash_profile
source $VAGRANT_HOME/.bash_profile
echo '>>> Checking Go installation (You should see go version number below this)'
go version
# Assign permissions to "vagrant" user on GOPATH
sudo chown vagrant:vagrant -R $GOPATH
# Installing GoVendor
echo '>>> Installing GoVendor'
go get -u github.com/kardianos/govendor
# Getting predix-ec-configurator
echo '>>> Getting predix-ec-configurator'
go get -u github.com/indaco/predix-ec-configurator
cd $GOPATH/src/github.com/indaco/predix-ec-configurator
echo '>>> Downloading dependencies'
$GOPATH/bin/govendor sync
go get
echo '>>> Building predix-ec-configurator'
go build
# Create a service
sudo cp /vagrant/scripts/ec-configurator-service.sh /etc/init.d/ec-configurator-service
sudo chmod 775 /etc/init.d/ec-configurator-service
### Installing Nginx
echo '>>> Installing Nginx'
sudo apt-get install -y nginx
# set up nginx server
sudo cp /etc/nginx/nginx.conf /etc/nginx/nginx.bk.conf
sudo sed -i 's/www-data/vagrant/' /etc/nginx/nginx.conf
sudo rm /etc/nginx/sites-enabled/default
sudo cp /vagrant/scripts/mysite /etc/nginx/sites-available/mysite
sudo chmod 644 /etc/nginx/sites-available/mysite
sudo ln -s /etc/nginx/sites-available/mysite /etc/nginx/sites-enabled/mysite
# clean /var/www
sudo rm -Rf /var/www
# symlink /var/www => /vagrant
ln -s /vagrant /var/www
### Installing PostgreSQL
echo '>>> Installing PostgreSQL'
POSTGRESQL_VERSION=9.6
sudo apt-get update
sudo apt-get install -y software-properties-common python-software-properties
sudo add-apt-repository "deb http://apt.postgresql.org/pub/repos/apt/ trusty-pgdg main"
wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo apt-key add -
sudo apt-get update
sudo apt-get install -y postgresql-$POSTGRESQL_VERSION
# Allowing external connections
sudo sed -i 's/#listen_addresses/listen_addresses/' /etc/postgresql/$POSTGRESQL_VERSION/main/postgresql.conf
sudo sed -i 's/localhost/*/' /etc/postgresql/9.6/main/postgresql.conf
# Configuring Postgres
sudo bash -c "cat > /etc/postgresql/${POSTGRESQL_VERSION}/main/pg_hba.conf" << EOL
local all all trust
host all all 127.0.0.1/32 trust
host all all ::1/128 trust
host all all 10.0.0.0/16 md5
EOL
# Restarting PostgreSQL
sudo /etc/init.d/postgresql restart
# Creating a new ecdemodb database
# Setting password for ecdemouser
psql -U postgres <<EOF
\x
CREATE USER ecdemouser;
ALTER USER ecdemouser PASSWORD 'ecdemo';
CREATE DATABASE ecdemodb OWNER ecdemouser;
EOF
# Storing sample data on PostgreSQL
psql -U ecdemouser ecdemodb < /vagrant/scripts/dbexport.pgsql
### Finished! ###
echo '>>> predix-ec-configurator Vagrant box is ready!'
| true |
45af512aae510d78e75abae83de3b356cc4acacd | Shell | joebass85/awesome.config | /.bashrc | UTF-8 | 2,720 | 2.703125 | 3 | [] | no_license | #
# ~/.bashrc
# If not running interactively, don't do anything
[[ $- != *i* ]] && return
export HISTCONTROL=ignoreboth:erasedups
PS1='[\u@\h \W]\$ '
if [ -d "$HOME/bin" ] ;
then PATH="$HOME/bin/sh:$PATH"
fi
#list
alias ls='ls --color=auto'
alias la='ls -a'
alias ll='ls -AlF'
alias l='ls'
alias l.="ls -A | egrep '^\.'"
alias lll='ls -AlF | less'
alias p='python2'
alias p3='python3'
alias sysr='sudo systemctl restart'
alias syss='sudo systemctl start'
alias syse='sudo systemctl enable'
alias sysd='sudo systemctl disable'
alias v='vim'
alias sv='sudo vim'
alias arp='sudo arp-scan -l'
alias netst='sudo netstat -tulpn'
alias speed='speedtest-cli --bytes --share --single'
alias w='sudo wifi-menu'
#fix obvious typo's
alias cd..='cd ..'
alias pdw="pwd"
## Colorize the grep command output for ease of use (good for log files)##
alias grep='grep --color=auto'
alias egrep='egrep --color=auto'
alias fgrep='fgrep --color=auto'
#pacman unlock
alias unlock="sudo rm /var/lib/pacman/db.lck"
#merge new settings
alias merge="xrdb -merge ~/.Xresources"
# yay as aur helper - updates everything
alias pksyua="yay -Syu --noconfirm"
#ps
alias ps="ps auxf"
alias psgrep="ps aux | grep -v grep | grep -i -e VSZ -e"
#grub update
alias update-grub="sudo grub-mkconfig -o /boot/grub/grub.cfg"
#skip integrity check
alias yayskip='yay -S --mflags --skipinteg'
alias trizenskip='trizen -S --skipinteg'
#check vulnerabilities microcode
alias microcode='grep . /sys/devices/system/cpu/vulnerabilities/*'
#get fastest mirrors in your neighborhood
alias mirror="sudo reflector -f 30 -l 30 --number 10 --verbose --save /etc/pacman.d/mirrorlist"
alias mirrord="sudo reflector --latest 50 --number 20 --sort delay --save /etc/pacman.d/mirrorlist"
alias mirrors="sudo reflector --latest 50 --number 20 --sort score --save /etc/pacman.d/mirrorlist"
alias mirrora="sudo reflector --latest 50 --number 20 --sort age --save /etc/pacman.d/mirrorlist"
#shopt
shopt -s autocd # change to named directory
shopt -s cdspell # autocorrects cd misspellings
shopt -s cmdhist # save multi-line commands in history as single line
shopt -s dotglob
shopt -s histappend # do not overwrite history
shopt -s expand_aliases # expand aliases
#Recent Installed Packages
alias rip="expac --timefmt='%Y-%m-%d %T' '%l\t%n %v' | sort | tail -100"
#Cleanup orphaned packages
alias cleanup='sudo pacman -Rns $(pacman -Qtdq)'
#create a file called .bashrc-personal and put all your personal aliases
#in there. They will not be overwritten by skel.
[[ -f ~/.bashrc-personal ]] && . ~/.bashrc-personal
export VISUAL=vim
export PATH=$PATH
export BROWSER=firefox
repo="https://github.com/joebass85"
set -o vi
export TERMINAL=st
export EDITOR=vim
neofetch
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.