blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 4 115 | path stringlengths 2 970 | src_encoding stringclasses 28 values | length_bytes int64 31 5.38M | score float64 2.52 5.28 | int_score int64 3 5 | detected_licenses listlengths 0 161 | license_type stringclasses 2 values | text stringlengths 31 5.39M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
45b84b239fd45253b747de37205c5ac872cf49c5 | Shell | 5470x3/atango | /setup/mac_provision.sh | UTF-8 | 1,172 | 2.9375 | 3 | [
"WTFPL"
] | permissive | #!/bin/bash
sudo mkdir /work
sudo chmod 777 /work
# Install Xcode Command Line Tools
xcode-select --install
# Install Homebrew
ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
# Install 3
brew install python3
# Install virtualenv
sudo pip3 install virtualenv
virtualenv -p python3 /work/venv/atango
. /work/venv/atango/bin/activate
# Install MeCab
curl -k -O https://raw.githubusercontent.com/kuhaku/atango/master/setup/mecab_install.sh
bash mecab_install.sh
rm mecab_install.sh
# Install Java7
brew tap phinze/homebrew-cask
brew install brew-cask
brew tap caskroom/versions
brew cask install java7
# Install Elasticsearch
brew install elasticsearch
# Install Redis
brew install redis
# Install Python libs
curl -k -O https://raw.githubusercontent.com/kuhaku/atango/master/requirements.txt
pip install --upgrade --timeout 30 -r requirements.txt
rm requirements.txt
# Clone Atango repository
git clone https://github.com/kuhaku/atango.git /work/atango
# Install nginx
brew install nginx
cp /work/atango/settings/nginx.conf /usr/local/etc/nginx/nginx.conf
# Install iStats for CPU temperature monitor
gem install iStats
| true |
6e9244a96a696a32ceace59a44a21de2e92d666c | Shell | aleitamar/verigreen | /utils/service-script/verigreen | UTF-8 | 3,943 | 3.796875 | 4 | [
"Apache-2.0"
] | permissive | #! /bin/bash
#*******************************************************************************
# Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
#*******************************************************************************
# Basic support for IRIX style chkconfig or sysv-rc-conf (for Ubuntu)
#*******************************************************************************
# Set the script to load at start-up (run as a daemon) via:
# Redhat/Centos: chkconfig 235 98 55
# Ubuntu: sysv-rc-conf -level 325 98 55
# Description: Manages a service controlled by the chkconfig or sysv-rc-conf command.
# Default location is the server's /etc/init.d/ folder, grant required permissions.
#*******************************************************************************
SERVICE_NAME="verigreen" # the service name (case sensitive, of course!). Should be the same as this script name
# All parameters below are for the docker HOST machine, not the container!
VG_NAME="<name>_vg"
REPO_NAME="<repo_name>"
REPO_DIR="/DATA/gitrepo/$REPO_NAME" # may need to manually clone to this location
SSH_DIR="/root/.ssh" # SSH keys to authenticate git operations within the repository
CONFIG_DIR="/DATA/VG" # config.properties and run.yml files are placed here
DOCKER_IMAGE="verigreen/vg-collector"
PORT=8085
# Limit container logs (via json-file log file) to 10k. Adjust as needed
LIMIT_DOCKER_LOG="--log-driver json-file --log-opt max-size=10k --log-opt max-file=10"
RESTART_OPT="--restart=always"
#*******************************************************************************
# start, stop, status functions
function SERVICE_START() {
/usr/bin/docker run -d \
$RESTART_OPT \
$LIMIT_DOCKER_LOG \
--hostname="$VG_NAME" \
--name="$VG_NAME" \
-v $SSH_DIR:/ssh \
-v $CONFIG_DIR:/vg \
-v $REPO_DIR:/repo \
-p $PORT:8080 \
$DOCKER_IMAGE
sleep 3
SERVICE_STATUS
}
function SERVICE_STOP() {
docker kill $VG_NAME
docker rm $VG_NAME
}
function SERVICE_STATUS() {
docker ps -a | grep $VG_NAME
}
#*******************************************************************************
# authorized user verification, currtly, script is only supporting root!
USERNAME="root" # rename if clashes with system defined param
USAGE="Usage: /sbin/service $SERVICE_NAME {start|stop|status|restart}"
# set the USERNAME variable if missing from env
if [ "x$USERNAME" = "x" ]; then
USERNAME=$(whoami)
fi
#*******************************************************************************
# This part need not be changed, usually.
case "$1" in
start)
echo -n "Starting $SERVICE_NAME $VGNAME"
#To run it as root:
SERVICE_START
#Or to run it as some other user:
#/bin/su - $USERNAME -c $SERVICE_START
echo "."
;;
stop)
echo -n "Stopping $SERVICE_NAME"
#To run it as root:
echo "Stopping verigreen container"
SERVICE_STOP
#Or to run it as some other user:
#/bin/su - $USERNAME -c SERVICE_STOP
echo "."
;;
restart|reload|condrestart)
# service must be in path.
# usually under /sbin/service or /usr/sbin/service
service $SERVICE_NAME stop
service $SERVICE_NAME start
;;
status)
SERVICE_STATUS
;;
*)
echo $USAGE
exit 1
esac
exit 0 | true |
970e6262d35075e45e0a4986da0ed4948440fd02 | Shell | Arun-George-Zachariah/pharml | /pharML-Bind/run_ensemble_zinc.sh | UTF-8 | 2,231 | 2.75 | 3 | [
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | #!/bin/bash
#Add the PDB IDs which you have generated results for here
#pdbid_list="6m3m 6w02 6y2e 6y84"
#pdbid_list="6vsb 6vyb 6lzg"
pdbid_list="6lzg 6m3m 6vsb 6vyb 6w02 6w4b 6y2e 6y84"
for PDB_ID in $pdbid_list;
do
#INFERENCE_RESULTS="../../results-covid19/ncnpr-all/pharml-bind-covid-${PDB_ID}-np-32-lr0.000000001-bs8-ncnpr-all/results/covid19/inference"
#INFERENCE_RESULTS="../../results-covid19/ncnpr-all/pharml-bind-covid-${PDB_ID}-np-32-lr0.000000001-bs8-ncnpr-all/results/covid19/inference"
INFERENCE_RESULTS="../../results-covid19/zinc-in-man/pharml-bind-covid-${PDB_ID}-np-64-lr0.000000001-bs8-zinc-inman-float/results/covid19/inference"
OUTPUT_DIR=ensemble5x_covid19_results_${PDB_ID}_zinc_inman
mkdir $OUTPUT_DIR
cd $OUTPUT_DIR
cp ../ensemble-rank.py .
cp ../chemio.py .
MAPS="${INFERENCE_RESULTS}/model_0/combined_predictions_${PDB_ID}_inference_model0.map:${INFERENCE_RESULTS}/model_1/combined_predictions_${PDB_ID}_inference_model1.map:${INFERENCE_RESULTS}/model_2/combined_predictions_${PDB_ID}_inference_model2.map:${INFERENCE_RESULTS}/model_3/combined_predictions_${PDB_ID}_inference_model3.map:${INFERENCE_RESULTS}/model_4/combined_predictions_${PDB_ID}_inference_model4.map"
python ensemble-rank.py --out top-ranked-fp-${PDB_ID}.txt --maps ${MAPS}
cat ${INFERENCE_RESULTS}/model_*/combined_predictions_${PDB_ID}_inference_model*.map > ./ensemble5x_all.out
cat top-ranked-fp-${PDB_ID}.txt | awk '{ printf "%1s\n", $1 }' > top-compounds-${PDB_ID}-ID-only-ranked-fp.out
#cat ./ensemble5x_all.out | sort | uniq --all-repeated --unique | uniq -c | sort > ensemble5x_final_ranked_${PDB_ID}.out
#cat top-preds.txt | awk '{ printf "%10s\n", $1 }' | tr -d '[:blank:]' > ensemble5x_final_top_${PDB_ID}.out
#cat ./ensemble5x_final_ranked_${PDB_ID}.out | grep "4 .*.pred: 1" | sort > ensemble5x_final_top_${PDB_ID}.out
#cat ensemble5x_final_top_${PDB_ID}.out | sed 's/.*.lig\/lig//' | sed 's/.lig.*.//' > ensemble5x_top_${PDB_ID}.txt
cd ../
done
#Format needs to look like this in final HTML file
#<object data="https://www.ebi.ac.uk/chembl/embed/#compound_report_card/CHEMBL603/name_and_classification" width="100%" height="100%"></object>
| true |
29447bacd3c5c9a55268379ae74bbcdfd31a0cf5 | Shell | eupedrosa/dotfiles | /private_dot_local/bin/executable_tmuxctl | UTF-8 | 3,773 | 3.84375 | 4 | [] | no_license | #!/bin/bash
set -e
[[ -x $(command -v tmux) ]] || {
echo "tmux not installed ..."
exit 1
}
help() {
>&2 echo 'usage: tmuxctl <command> [OPTIONS]
These are the available commands:
jump smart jump to a directory followed by a session [create and] switch
switch select a sessiont to switch to
split smart split window, if current pane is an ssh session or a shell
running in distrobox it will use them as start command
splitdb split window with a distrobox shell
'
exit
}
[[ $# -eq 0 ]] && help
jump() {
sname=$(tmux display -p -F '#{session_name}' || echo "")
[[ "$sname" == "popup" ]] && tmux display 'jump disabled for this session' && exit 0
directory=$(\
zoxide query -l |\
fzf-tmux -p --reverse --prompt "jump> " --info hidden --scheme=history \
--border-label="Zoxide List" --border-label-pos=3 \
--color=bg+:#363a4f,bg:#24273a,spinner:#f4dbd6,hl:#ed8796 \
--color=fg:#cad3f5,header:#ed8796,info:#c6a0f6,pointer:#f4dbd6 \
--color=marker:#f4dbd6,fg+:#cad3f5,prompt:#c6a0f6,hl+:#ed8796 \
|| true
)
session_name="${directory##*/}"
# empty target is valid, just do nothing
[[ -z "$session_name" ]] && exit 0
# Create the session if it does not exist
tmux new -d -A -s "$session_name" -c "$directory"
if [[ -z $TMUX ]]; then
exec tmux attach -t "$session_name"
else
tmux switch-client -t "$session_name"
fi
}
switch() {
sname=$(tmux display -p -F '#{session_name}' || echo "")
[[ "$sname" == "popup" ]] && tmux display 'switch disabled for this session' && exit 0
target=$(\
tmux ls -F "#{session_name}" |\
grep -v ^popup$ |\
fzf-tmux -p --reverse --prompt "session> " --info hidden \
--border-label="Tmux Session List" --border-label-pos=3 \
--color=bg+:#363a4f,bg:#24273a,spinner:#f4dbd6,hl:#ed8796 \
--color=fg:#cad3f5,header:#ed8796,info:#c6a0f6,pointer:#f4dbd6 \
--color=marker:#f4dbd6,fg+:#cad3f5,prompt:#c6a0f6,hl+:#ed8796 \
|| true
)
# empty target is valid, just do nothing
[[ -z "$target" ]] && exit 0
tmux switch-client -t "$target"
}
split() {
local ptty cmd run_cmd=""
ptty=$(tmux display -p "#{pane_tty}")
cmd=$(ps -o comm=,args= -t "$ptty")
[[ ! -z $(echo "$cmd" | awk '$1 == "ssh"') ]] && {
run_cmd=$(echo "$cmd" | awk '$1 == "ssh"' | awk '{$1=""; print $0}')
}
[[ ! -z $(echo "$cmd" | awk '$1 == "distrobox-enter"') ]] && {
run_cmd=$(echo "$cmd" | awk '$1 == "distrobox-enter"' | awk '{$1=$2=""; print $0}')
}
tmux split-window -h -c "#{pane_current_path}" $run_cmd
}
splitdb() {
local list=$(distrobox list --no-color | cut -d'|' -f2 | tail -n +2 | awk '{$1=$1;print}')
target=$(\
echo "$list" |\
fzf-tmux -p --reverse --prompt "container> " --info hidden \
--border-label="Distrobox List" --border-label-pos=3 \
--color=bg+:#363a4f,bg:#24273a,spinner:#f4dbd6,hl:#ed8796 \
--color=fg:#cad3f5,header:#ed8796,info:#c6a0f6,pointer:#f4dbd6 \
--color=marker:#f4dbd6,fg+:#cad3f5,prompt:#c6a0f6,hl+:#ed8796 \
|| true
)
# empty target is valid, just do nothing
[[ -z "$target" ]] && exit 0
tmux split-window -h -c "#{pane_current_path}" "distrobox-enter $target"
}
command="$1"
shift
case "$command" in
jump)
jump
;;
switch)
switch
;;
split)
split
;;
splitdb)
splitdb
;;
*)
echo "Unknown command: $command"
help
;;
esac
| true |
7004b5b3a85ebd6ff175948b135265053bcf41f1 | Shell | sellmerfud/awakening | /scripts/release.sh | UTF-8 | 8,375 | 4.125 | 4 | [
"MIT"
] | permissive | #! /usr/bin/env bash
# This script will set the product version by modifying the appropriate source files
# Then use sbt to build and stage the files for the new version
# And finally zip up the results and copy the zip file to Dropbox
#
shopt -s extglob
usage() {
{
printf "usage: package.sh [--commit|--no_commit|-n] [version]\n"
printf " --commit - Commit changes and push them to Github (Default)\n"
printf " --no-commit - Do not commit changes\n"
printf " -n - Do not commit changes (same as --no-commit)\n"
printf "\n"
printf " version - Can be one of:\n"
printf " next_minor: Bump the minor version number (Default)\n"
printf " next_major: Bump the major version number and set minor to zero\n"
printf " <major>.<minor>: where: major and minor are integers\n"
} 1>&2
exit 1
}
repo_dirty() {
test -n "$(git status --porcelain)"
}
getYorN() {
local prompt="$1"
local response
while true; do
printf "\n$prompt (y/n) "
read response
case "$response" in
y*|Y*) return 0;;
n*|N*) return 1;;
*) printf "Invalid response\n"
esac
done
}
# Update the version in build.sbt
# Note we cannot update the README.md file until we have uploaded the
# zip file to Dropbox so that we can get its download URL. (see the update_readme() function)
set_version() {
local version=$1
ruby -p -i -e 'gsub(/(version\s*:=\s*)("\d+\.\d+")/, "\\1\"'$version'\"")' build.sbt
printf "Version set to $version\n"
}
# Add the files that we have modified to the git index,
# commit the release, and push it to Github
commit_release() {
local version=$1
git add --update .
git ci -m"Update version number to $version"
git tag -m"Release v$version" v$version
git push --tags origin master
}
# Get a short term access token for the dropbox api using our refresh token.
# We must do this because the access tokens are shot term and will expire
# in about 4 hours.
get_access_token() {
local refresh_token="$(head -n1 ~/.dropbox/game_bots_refresh_token)"
local client_id="$(head -n1 ~/.dropbox/game_bots_client_id)"
local response=/tmp/access_token_response.$$
local result=1
curl -s https://api.dropbox.com/oauth2/token \
-d grant_type=refresh_token \
-d refresh_token="$refresh_token" \
-d client_id="$client_id" > $response
if fgrep --quiet '"error":' $response; then
printf "Error getting access token\n" >&2
jq . $response >&2
else
jq --raw-output .access_token $response
result=0
fi
rm -f $response
return $result
}
# Get the sharable url for the zip file and echo it to stdout
get_zipfile_url() {
local version="$1"
local local_zip_file_path="target/$program_name-${version}.zip"
local dropbox_zip_file_path="/$dropbox_folder/$program_name-${version}.zip"
local access_token=""
local response=/tmp/get_zipfile_url_response.$$
local result=1
[[ -f $local_zip_file_path ]] || {
printf "zip file does not exist: $local_zip_file_path\n"
return 1
}
# NOTE: We cannot assign this in the local variable declaration
# because we would lose the returned error code and would
# get the success error code from the 'local' function.
access_token="$(get_access_token)"
# If the url already exists then an error object is returned with the url buried
# several layers down. Otherwise it is in the field .url at top level.
curl -s -X POST https://api.dropboxapi.com/2/sharing/create_shared_link_with_settings \
--header "Authorization: Bearer $access_token" \
--header "Content-Type: application/json" \
--data "{\"path\":\"${dropbox_zip_file_path}\"}" > $response
if fgrep --quiet '"shared_link_already_exists":' $response; then
jq --raw-output '.error.shared_link_already_exists.metadata.url' $response
result=0
elif fgrep --quiet '"error":' $response; then
printf "Error getting zipfile url\n" >&2
jq . $response >&2
else
jq --raw-output '.url' $response
result=0
fi
rm -f $response
return $result
}
upload_zipfile() {
local version="$1"
local local_zip_file_path="target/$program_name-${version}.zip"
local dropbox_zip_file_path="/$dropbox_folder/$program_name-${version}.zip"
local access_token=""
local response=/tmp/upload_response.$$
local result=1
# NOTE: We cannot assign this in the local variable declaration
# because we would lose the returned error code and would
# get the success error code from the 'local' function.
access_token="$(get_access_token)"
curl -s -X POST https://content.dropboxapi.com/2/files/upload \
--header "Authorization: Bearer $access_token" \
--header "Dropbox-API-Arg: {\"autorename\":false,\"mode\":\"overwrite\",\"mute\":false,\"path\":\"${dropbox_zip_file_path}\",\"strict_conflict\":false}" \
--header "Content-Type: application/octet-stream" \
--data-binary @"$local_zip_file_path" >$response
if fgrep --quiet '"error":' $response; then
printf "Error uploading zip file\n" >&2
jq . $response >&2
else
printf "$local_zip_file_path copied to Dropbox\n"
result=0
fi
rm -f $response
return $result
}
# Update the README.md file with the new
# version number and dropbox url
update_readme() {
local version="$1"
local zip_file_url=""
zip_file_url="$(get_zipfile_url $version)"
ruby -p -i -e 'gsub(/\[Version\s*\d+\.\d+\]/, "[Version '$version']")' \
-e 'gsub(/^\[1\]:.*$/, "[1]: '"$zip_file_url"'")' README.md
}
# Start of main script
# Commit changes by default
DO_COMMIT=yes
case "$1" in
--commit)
shift
;;
-n|--no-commit)
DO_COMMIT=no
shift
;;
-*)
usage
;;
esac
# The deafault action if no paramter is given is to update the minor version number
case "$1" in
"")
NEW_VERSION=next_minor
;;
+([0-9]).+([0-9]))
NEW_VERSION="$1"
;;
next_minor)
NEW_VERSION=next_minor
;;
next_major)
NEW_VERSION=next_major
;;
*)
usage
;;
esac
## Set the current working directory to the parent directory of this script.
## (The top level working directory of the git repository)
## This is important because sbt' must be run from the top level directory
cd $(dirname $0)/..
# Program name and dropbox folder are used to
# upload the zip file to dropbox
program_name=awakening
dropbox_folder=awakening
# Make sure we are on the master branch
branch=$(git branch --show-current 2>/dev/null)
if [[ $? -ne 0 ]]; then
printf "\Cannot determine the current branch!\n"
exit 1
elif [[ $branch != "master" ]]; then
printf "Must be on 'master' branch to create the release.\n"
printf "Current branch is '$branch'"
exit 1
fi
if repo_dirty; then
printf "Working directory is not clean.\n"
git status --short
getYorN "Do you wish to continue anyway?" || exit 0
fi
CURRENT_VERSION=$(grep '^\s*version' build.sbt | tr '"' , | cut -d, -f2)
printf "\nCurrent version is $CURRENT_VERSION\n"
if [[ $CURRENT_VERSION =~ ^([[:digit:]]+)\.([[:digit:]]+)$ ]]; then
MAJOR=${BASH_REMATCH[1]}
MINOR=${BASH_REMATCH[2]}
case $NEW_VERSION in
current ) NEW_VERSION=$CURRENT_VERSION ;;
next_major) NEW_VERSION=$(($MAJOR + 1)).0 ;;
next_minor) NEW_VERSION=$MAJOR.$(($MINOR + 1)) ;;
* ) ;; # NEW_VERSION was explicitly given as the argument
esac
else
printf "The current version does not have the correct format of <major.minor>\n"
exit 1
fi
if [[ $CURRENT_VERSION != $NEW_VERSION ]]; then
if getYorN "Set version to $NEW_VERSION and create a release?"; then
set_version $NEW_VERSION
else
exit 0
fi
else
getYorN "Create a release for version $NEW_VERSION?" || exit 0
fi
set -euo pipefail
current_command=$BASH_COMMAND
# keep track of the last executed command
trap 'last_command=$current_command; current_command=$BASH_COMMAND' DEBUG
# echo an error message before exiting
trap 'printf "\"${last_command}\" command failed with exit code $?.\n"' EXIT
sbt stage
upload_zipfile $NEW_VERSION
update_readme $NEW_VERSION
if [[ $DO_COMMIT == yes ]]; then
commit_release $NEW_VERSION
printf "Version $NEW_VERSION successfully created and pushed to Github!"
else
printf "Version $NEW_VERSION successfully created!"
fi
trap - DEBUG EXIT
| true |
7ab6bdc53ed4f3723d71566c5b11f134706780d5 | Shell | kartikeya-b/jenkins-docker | /files/init.sh | UTF-8 | 2,045 | 2.828125 | 3 | [] | no_license | #! /bin/bash
addgroup -g ${gid} jenkins && adduser -h "${JENKINS_HOME}" -u ${uid} -G jenkins -s /bin/bash -D jenkins
cd ${JENKINS_HOME}
cp -r /files/plugins ${JENKINS_HOME}
cp /files/scriptApproval.xml .
cp /files/hudson.plugins.emailext.ExtendedEmailPublisher.xml .
cp /files/jenkins.model.JenkinsLocationConfiguration.xml .
createJob() {
local name=$1
local repo=$2
local jobDir=$(jobDir ${name})
[ ! -e ${jobDir} ] && mkdir -p ${jobDir}
cp /files/template-config.xml ${jobDir}/config.xml
sed -i "s|REPO|${repo}|g" ${jobDir}/config.xml
}
createJobNotTriggered() {
local name=$1
local repo=$2
createJob ${name} ${repo}
# Comment out configuration of PeriodicFolderTrigger
local jobDir=$(jobDir ${name})
sed -i "s|<!-- PeriodicFolderTrigger START -->|<!--|g" ${jobDir}/config.xml
sed -i "s|<!-- PeriodicFolderTrigger END -->|-->|g" ${jobDir}/config.xml
}
jobDir() {
name=$1
local jobDir="jobs/${name}"
echo -n ${jobDir}
}
createJob eid git@git.difi.local:eid
createJob jenkins-docker https://github.com/difi/jenkins-docker
createJob kontaktregister-statistikk https://github.com/difi/kontaktregister-statistikk
createJob poc-statistics https://github.com/difi/poc-statistics
createJob eid-oidc-provider git@git.difi.local:eid-oidc-provider
createJob minid-on-the-fly git@git.difi.local:minid-on-the-fly
createJob eid-resilience git@git.difi.local:eid-common-resilience.git
createJob eid-common git@git.difi.local:eid-common.git
createJob krr git@git.difi.local:krr.git
createJob idporten git@git.difi.local:idporten.git
createJob idporten-admin git@git.difi.local:idporten-admin.git
createJob idporten-authlevel git@git.difi.local:idporten-authlevel.git
createJob idporten-app-dpi-reklame git@git.difi.local:idporten-app-dpi-reklame.git
createJob idporten-minid-updater git@git.difi.local:idporten-minid-updater.git
createJob dsf-gateway git@git.difi.local:dsf-gateway.git
createJob eid-level1-poc git@git.difi.local:eid-level1-poc.git
createJobNotTriggered puppet-hiera git@eid-gitlab.dmz.local:puppet/puppet_hiera.git
createJobNotTriggered puppet-control git@eid-gitlab.dmz.local:puppet/puppet_control.git
chown -R jenkins:jenkins ${JENKINS_HOME}
| true |
cfa57dca117016b89d1fb5d5c9105eb877e74dbb | Shell | mingrener/dealii-translator | /translator_tools/translator_to.sh | UTF-8 | 2,216 | 2.984375 | 3 | [] | no_license | #!/bin/bash
# coding=UTF-8
mkdir translator_file
rm -rf translator_file/*
path=include/deal.II-translator
rm -rf include/deal.II-translator
cp -rf include/deal.II-origin include/deal.II-translator
files=$(ls $path)
for filename in $files
do
echo $path/$filename
# step2-main code
hfile=$(ls $path/$filename)
for hfilename in $hfile
do
# 生成 "_0.txt" 分解文档
./contrib/translator/to.py $path/$filename/$hfilename
# 跳过翻译,合并文档
txtfile_0=`echo $hfilename | sed 's/.h$/\_0.txt/'`
hfile0=`echo $hfilename | sed 's/.h$/\_0.h/'`
hfile1=`echo $hfilename | sed 's/.h$/.origin/'`
hfile2=`echo $hfilename | sed 's/.h$/.temp/'`
./contrib/translator/from.py $path/$filename/$txtfile_0 # 输出hfile0 (未修复版)
mv $path/$filename/$hfilename $path/$filename/$hfile1 # bak the origin file
./contrib/translator/wrap.py $path/$filename/$hfile0 >> $path/$filename/$hfilename
mv $path/$filename/$hfile0 $path/$filename/$hfile2 # bak the file
done
# setp3-合并h文件,方便一次性翻译
txtfile=$(ls $path/$filename/*.txt)
for txtfilename in $txtfile
do
echo $txtfilename >> translator_file/$filename
cat $txtfilename >> translator_file/$filename
echo -e "\n" >> translator_file/$filename
done
done
# for filename in $files
# do
# # # # step1-初始化clear
# # rm -rf $path/$filename/*_postcommands
# # rm -rf $path/$filename/*_commands
# # rm -rf $path/$filename/*_latex
# # rm -rf $path/$filename/*_comment
# # rm -rf $path/$filename/*.txt
# # rm -rf $path/$filename/*.temp
# # rm -rf $path/$filename/*.origin
# done
# # step3-将翻译合并成一个文件
# result_path=translator_file
# translate_files=$(ls $result_path)
# for translate_filename in $translate_files
# do
# cat $result_path/$translate_filename >> $result_path/all.txt
# echo $result_path/$translate_filename
# done
| true |
71ea905b7cfa763c2d2021cfc4c688c4908fcfa8 | Shell | zyxrhythm/zyx-tools | /nix/fprm.sh | UTF-8 | 2,168 | 3.875 | 4 | [] | no_license | #!/bin/bash
#Fix permissions of files and folders
#This script is based on this blog: https://odd.blog/2013/11/05/fix-file-644-directory-775-permissions-linux-easily/
#$1 = directory permission (if blank, default value: 755, use 'x' to prevent the script from changing directory permissions)
#$2 = file permission (if blank, default value: 644, use x to to prevent the script from changing file permissions)
#$3 = recursive switch ( if 'r' is added as 3rd parameter the script will do a recursive file perm fix on the current dir, but if a valid is used as 3rd parameter, the script will update permissions on that directory, if left blank the script will do a non-recursive file and dir perm fix on the current dir)
if [[ $1 = 'help' ]]; then
clear
echo "
Fix permissions of files and folders
This script is based on the following blog:
https://odd.blog/2013/11/05/fix-file-644-directory-775-permissions-linux-easily/
\$1 = directory permission
if blank, default value: 755
use 'x' to prevent the script from modifying directory permissions
\$2 = file permission
if blank, default value: 644,
use x to to prevent the script from modifying file permissions.
\$3 = recursive switch
if 'r' is added as 3rd parameter the script will do a recursive file and dir perm fix on the current dir,
if a valid directtory is used as 3rd parameter, the script will update permissions on that directory.
if left blank the script will do a non-recursive file and dir perm fix on the current dir
"
else
# 1st
permregex="^[0-7]{3}$"
if [ -z $1 ]; then
dirperm='755'
elif [ ! -z $1 ] && [[ $1 =~ $permregex ]]; then
dirperm=$1
elif [ ! -z $1 ] && [ $1 = 'x' ]; then
dskip='y'
fi
# 2nd
if [ -z $2 ]; then
fileperm='644'
elif [ ! -z $2 ] && [[ $2 =~ $permregex ]]; then
fileperm=$2
elif [ ! -z $2 ] && [ $2 = 'x' ]; then
fskip='y'
fi
# 3rd
if [ -d $3 ]; then
tardir="$3"
elif [ $3 = 'r' ]; then
tardir="./"
elif [ -z $3 ]; then
tardir="."
fi
if [[ $dskip != 'y' ]]; then
find $tardir -type d -exec chmod $dirperm {} \;
fi
if [[ $fskip != 'y' ]]; then
find $tardir -type f -exec chmod $fileperm {} \;
fi
exit 0
fi
| true |
36106a475aa921c60814c0a6c52070d0df14ae04 | Shell | lukaszachy/tmt | /tests/prepare/require/test.sh | UTF-8 | 1,161 | 3.1875 | 3 | [
"MIT"
] | permissive | #!/bin/bash
. /usr/share/beakerlib/beakerlib.sh || exit 1
rlJournalStart
rlPhaseStartSetup
rlRun "pushd data"
rlRun "set -o pipefail"
rlPhaseEnd
for image in fedora centos:7 ; do
# Prepare the tmt command and expected error message
tmt="tmt run -avr provision -h container -i $image"
if [[ $image == fedora ]]; then
error='Unable to find a match: forest'
else
error='No package forest available'
fi
rlPhaseStartTest "Require an available package ($image)"
rlRun -s "$tmt plan --name available"
rlAssertGrep '1 preparation applied' $rlRun_LOG
rlPhaseEnd
rlPhaseStartTest "Require a missing package ($image)"
rlRun -s "$tmt plan --name missing" 2
rlAssertGrep "$error" $rlRun_LOG
rlPhaseEnd
rlPhaseStartTest "Require both available and missing ($image)"
rlRun -s "$tmt plan --name mixed" 2
rlAssertGrep "$error" $rlRun_LOG
rlPhaseEnd
done
rlPhaseStartCleanup
rlRun "rm -f output"
rlRun "popd"
rlPhaseEnd
rlJournalEnd
| true |
3564eb1cdafc5f15b3d84a934ba9df4ce83360a3 | Shell | rootless-containers/usernetes | /boot/docker-2ndboot.sh | UTF-8 | 144 | 2.671875 | 3 | [
"Apache-2.0",
"GPL-2.0-only",
"BSD-2-Clause"
] | permissive | #!/bin/bash
cd $(realpath $(dirname $0)/..)
set -eux
if ! ./install.sh $@; then
journalctl -xe --no-pager
exit 1
fi
exec journalctl -f -n 100
| true |
8f5f0c3ce134914333dcfe134218ff1c088f2a83 | Shell | alachaum/cube-centos | /rootfs/nx/entrypoint-setup | UTF-8 | 137 | 3.015625 | 3 | [] | no_license | #!/bin/bash
# Run all setup scripts
if [ -d "/nx/setup-scripts" ]; then
for f in "/nx/setup-scripts/*" ; do
/bin/bash $f
done
fi | true |
d90966b2395f610cbe231ac274cd3238a6a605c4 | Shell | Jit-INP/MinCaml-Compiler | /scripts/mincaml-test-parser.sh | UTF-8 | 648 | 3.1875 | 3 | [] | no_license | #! /bin/sh
cd "$(dirname "$0")"/.. || exit 1
MINCAMLC=java/mincamlc
echo "\e[33mStart syntax test\e[0m\n"
for test_case in tests/syntax/valid/*.ml
do
echo "testing parser on: $test_case"
$MINCAMLC -p "$test_case" 2> /dev/null 1> /dev/null
if [ $? -eq 0 ]
then
echo "\e[32mOK\e[0m"
else
echo "\e[31mKO\e[0m"
fi
done
for test_case in tests/syntax/invalid/*.ml
do
echo "testing parser on: $test_case"
$MINCAMLC -p "$test_case" 2> /dev/null 1> /dev/null
if [ $? -ne 0 ]
then
echo "\e[32mOK\e[0m"
else
echo "\e[31mKO\e[0m"
fi
done
echo "\e[33mEnd syntax test\e[0m\n" | true |
9b56c99948cf80525aa07db5089944a3476f55cb | Shell | dmccuk/vagrant-puppet-master | /bootstrap-master.sh | UTF-8 | 692 | 3.171875 | 3 | [] | no_license | #!/bin/sh
# Run on VM to bootstrap Puppet Master server
if ps aux | grep "puppet master" | grep -v grep 2> /dev/null
then
echo "Puppet Master is already installed. Exiting..."
exit 0
else
# Install Puppet Master
wget https://apt.puppetlabs.com/puppetlabs-release-trusty.deb
sudo dpkg -i puppetlabs-release-trusty.deb
sudo apt-get update -yq
sudo apt-get upgrade -yq
sudo apt-get install -yq puppetmaster
# Add optional alternate DNS names to /etc/puppet/puppet.conf
sudo sed -i 's/.*\[main\].*/&\ndns_alt_names = puppet,puppet.example.com/' /etc/puppet/puppet.conf
sudo sed -i 's/.*\[master\].*/&\nautosign = true/' /etc/puppet/puppet.conf
fi
| true |
46cf037de2d0ee4616047d42eec2f7207170c417 | Shell | denyHell/seq2seqOT | /scripts/train.sh | UTF-8 | 1,531 | 2.578125 | 3 | [] | no_license | #!/bin/sh
lang_pair=${@:1}
source=${lang_pair##*-}
target=${lang_pair%-*}
echo source $source
echo target $target
vocab="../data/vocab_$lang_pair.bin"
train_src="../data/train.$lang_pair.$source.txt"
train_tgt="../data/train.$lang_pair.$target.txt"
dev_src="../data/dev.$lang_pair.$source.txt"
dev_tgt="../data/dev.$lang_pair.$target.txt"
test_src="../data/test.$lang_pair.$source.txt"
test_tgt="../data/test.$lang_pair.$target.txt"
test_tgt="../data/test.$lang_pair.en.txt"
embed_file="../wiki.$source.vec"
work_dir="results/$lang_pair"
mkdir -p ${work_dir}
echo save re
# comment below to test the decoder
python ../models/nmt.py \
train \
--cuda \
--vocab ${vocab} \
--train-src ${train_src} \
--train-tgt ${train_tgt} \
--dev-src ${dev_src} \
--dev-tgt ${dev_tgt} \
--save-to ${work_dir}/model.bin \
--valid-niter 800 \
--batch-size 16 \
--hidden-size 512 \
--embed-size 512 \
--uniform-init 0.04 \
--dropout 0.2 \
--clip-grad 5.0 \
--lr-decay 0.5 \
--num-layers 1 \
--attention-type 'general' \
--bidirectional \
--tau 0.01 \
--gamma1 0.1 \
--gamma2 0.1 \
--cost-fcn 'cosine' \
--max-epoch 100
# --embedding_file ${embed_file}
# FOR BIDIRECTIONAL add the flag --bidirectional
#python nmt.py \
# decode \
# --cuda \
# --beam-size 5 \
# --max-decoding-time-step 100 \
# ${work_dir}/model.bin \
# ${test_src} \
# ${work_dir}/decode.txt
#perl multi-bleu.perl ${test_tgt} < ${work_dir}/decode.txt
| true |
fa80015114be02725b842eadc8642f1f532bb5ea | Shell | daxm/docker-guacamole-server | /install_docker.sh | UTF-8 | 1,159 | 3.34375 | 3 | [] | no_license | #!/usr/bin/env bash
echo "### Remove any previously installed Docker packages ###"
sudo apt remove docker*
echo "### Add APT Cert and Key ###"
sudo apt install apt-transport-https ca-certificates curl software-properties-common -y
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
sudo apt-key fingerprint 0EBFCD88
echo "### Add Docker Repository ###"
sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
echo "### Refresh APT Cache ###"
sudo apt update
echo "### Install Docker CE ###"
sudo apt install docker-ce -y
echo "### Install Python3 and associated packages and modules for Docker Compose. ###"
sudo apt install python3 python3-pip python3-setuptools -y
sudo -H pip3 install docker-compose
echo "### Add current user to the docker group. (Log out and back in to take effect.) ###"
sudo usermod -aG docker $(whoami)
echo "########### NEXT STEPS ############"
echo "### 1. Edit the env_file and modify it to your environment."
echo "### 2. Run the runme.sh file to build, deploy, and start the Docker containers."
echo "###################################"
| true |
107bff69aa3d27154f990879d1bcb3d520106b86 | Shell | obsidiansystems/provenance | /scripts/generate-toc.sh | UTF-8 | 3,325 | 4.3125 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
# This file can either be executed or sourced.
# If sourced, the generate_toc function will be added to the environment.
# If executed, it will be run using the provided parameters (provide --help for usage).
# Determine if this script was invoked by being executed or sourced.
( [[ -n "$ZSH_EVAL_CONTEXT" && "$ZSH_EVAL_CONTEXT" =~ :file$ ]] \
|| [[ -n "$KSH_VERSION" && $(cd "$(dirname -- "$0")" && printf '%s' "${PWD%/}/")$(basename -- "$0") != "${.sh.file}" ]] \
|| [[ -n "$BASH_VERSION" ]] && (return 0 2>/dev/null) \
) && generate_toc_sourced='YES' || generate_toc_sourced='NO'
__generate_toc_usage () {
cat << EOF
generate-toc: Creates the table of contents for a markdown file.
Usage: ./update-toc.sh "<filename>" [<max level>|<min level> <max level>]
The filename is required.
The min level and max level are optional.
If two are provided, the smallest is the min and the largest is the max.
If exactly one is provided, it is treated as the max level.
The min level is then the smaller of that max level and 2.
I.e. Providing just "1" results in both a min and max level of 1.
If no levels are provided, the min is 2 and max is 3.
Both have a minimum value of 1 and a maximum value of 5 and are adjusted if needed.
A "level" is referring to the number of pound signs in the heading markdown.
EOF
}
# Creates a table of contents as markdown lines from a provided markdown file.
# Usage: generate_toc "<filename>" [<max level>|<min level> <max level>]
# See __generate_toc_usage for details.
generate_toc () {
local filename level
filename="$1"
shift
if [[ ! -f "$filename" ]]; then
printf 'File not found: %s\n' "$filename" >&2
return 1
fi
if [[ "$#" -ge '2' ]]; then
level1=$1
level2=$2
shift
shift
elif [[ "$#" -ge '1' ]]; then
level2=$1
shift
if [[ "$level2" -lt '2' ]]; then
level1=$level2
else
level1=2
fi
else
level1=2
level2=3
fi
if [[ "$level1" -lt '1' ]]; then
level1=1
elif [[ "$level1" -gt '5' ]]; then
level1=5
fi
if [[ "$level2" -lt '1' ]]; then
level2=1
elif [[ "$level2" -gt '5' ]]; then
level2=5
fi
if [[ "$level1" -gt "$level2" ]]; then
local temp
temp=$level1
level1=$level2
level2=$temp
fi
# Use Grep to get all heading lines with the desired levels.
# Use sed to remove any pound below the min level.
# Use sed to delimit the markdown from the heading, and a duplicate of the heading
# Use awk to do some conversion on each piece and output each final entry line.
grep -E "^#{${level1},${level2}} " "$filename" \
| sed "s/^#\{${level1}\}/#/" \
| sed -E 's/(#+) (.+)/\1~\2~\2/g' \
| awk -F "~" \
'{
gsub(/#/," ",$1);
gsub(/[^[:alnum:]_]+/,"-",$3);
print $1 "- [" $2 "](#" tolower($3) ")";
}'
}
# If not sourced, do the stuff!
if [[ "$generate_toc_sourced" != 'YES' ]]; then
if [[ "$#" -eq 0 ]]; then
__generate_toc_usage
exit 0
fi
for a in "$@"; do
if [[ "$a" == '-h' || "$a" == '--help' || "$a" == "help" ]]; then
__generate_toc_usage
exit 0
fi
done
generate_toc "$@"
exit $?
fi
# It was sourced, clean up some environment stuff.
unset generate_toc_sourced
unset -f __generate_toc_usage
return 0 | true |
489481e9817ec385aab4de963f6687380f1a02bb | Shell | gsstudios/initramfs3 | /res/crontab/cron-scripts/clear-file-cache.sh | UTF-8 | 629 | 3.5 | 4 | [] | no_license | #!/sbin/busybox sh
# Clear Cache script
(
PROFILE=`cat /data/.siyah/.active.profile`;
. /data/.siyah/${PROFILE}.profile;
if [ "$cron_clear_app_cache" == "on" ]; then
while [ ! `cat /proc/loadavg | cut -c1-4` \< "3.50" ]; do
echo "Waiting For CPU to cool down";
sleep 30;
done;
CACHE_JUNK=`ls -d /data/data/*/cache`
for i in $CACHE_JUNK; do
rm -rf $i/*
done;
# Old logs
rm -f /data/tombstones/*;
rm -f /data/anr/*;
rm -f /data/system/dropbox/*;
date +%H:%M-%D-%Z > /data/crontab/cron-clear-file-cache;
echo "Done! Cleaned Apps Cache" >> /data/crontab/cron-clear-file-cache;
sync;
fi;
)&
| true |
453f566664ca240372cd161bec63d0a90e3ca29b | Shell | ShalokShalom/plan.sh | /mpv/plan.sh | UTF-8 | 1,467 | 2.515625 | 3 | [
"Apache-2.0"
] | permissive | pkg_origin=cosmos
pkg_name=mpv
pkg_version=0.25.0
pkg_description='Video player based on MPlayer/mplayer2'
pkg_license=('GPL')
pkg_upstream_url='http://mpv.io'
pkg_deps=('ffmpeg' 'lcms2' 'libdvdread' 'libcdio-paranoia' 'libgl' 'libxinerama'
'libxv' 'libxkbcommon' 'libva' 'wayland' 'desktop-file-utils' 'hicolor-icon-theme'
'xdg-utils' 'libdvdnav' 'youtube-dl' 'libass' 'libbluray' 'lua' 'libxrandr' 'jack'
'rubberband' 'samba' 'libxss' 'libcaca')
pkg_build_deps=('mesa' 'python2-docutils' 'ladspa' 'perl')
pkg_source=("https://github.com/mpv-player/mpv/archive/v${pkg_version}.tar.gz"
"https://github.com/mpv-player/mpv/commit/5053f4cc3f48538c3d9a3ba13dc98442f3302052.patch")
#pkg_source=("https://github.com/mpv-player/mpv/archive/${_commit}.zip")
pkg_shasum=('73b3d233c3b4428d0cfd5491c5cb6c41'
'0e0c367e20d4c821db4456fb01f5bc71')
prepare() {
# https://github.com/mpv-player/mpv/issues/2729
#patch -p1 -i ${CACHE_PATH}/5053f4cc3f48538c3d9a3ba13dc98442f3302052.patch
./bootstrap.py
}
do_build() {
./waf configure --prefix=/usr \
--confdir=/etc/mpv \
--enable-libmpv-shared \
--enable-cdda \
--enable-sdl2 \
--enable-zsh-comp \
--enable-libarchive \
--enable-dvdnav \
--enable-dvdread \
--enable-libbluray \
--enable-cdda
./waf build
}
do_package() {
./waf install --destdir=${pkg_prefix}
install -m644 DOCS/{encoding.rst,tech-overview.txt} ${pkg_prefix}/usr/share/doc/mpv
}
| true |
d3c37a5b640df016eca086502b0f80496d5c72ce | Shell | tayfunbkts/assignments | /assignment/casestudy.sh | UTF-8 | 325 | 2.765625 | 3 | [] | no_license | #!/bin/bash
user=$(whoami)
sudo echo "welcome $user"
time_today=$(date)
echo $user $time_today >> AccessLog.txt
FILE=/usr/bin/python3
if test -f "$FILE"; then
echo "$FILE exists."
else
sudo yum install python3 -y
fi
sudo wget https://raw.githubusercontent.com/MarkMaddison/assignments/master/assignment/phonebook.py
| true |
b1c4b368ed87b0c73964f0f666b5e46bac148a94 | Shell | MhmtErsy/Check-Server-Shell-Script | /Server Control/pingstatus.sh | UTF-8 | 333 | 2.875 | 3 | [] | no_license | #!/bin/bash
ping -c3 -i2 $1
if [ $? -eq 0 ]
then
echo "============SERVER IS ALIVE [$1]============"
(echo "[$1] Adlı Server erişilebilir durumdadır. ") >>rapor.txt
sh pingtime.sh $1
exit 0
else
echo "============SERVER IS DEAD [$1]============"
(echo "[$1] Adlı Server erişilemez durumdadır. ") >>rapor.txt
sh main.sh
fi
| true |
5018e88d13f2b2a7f0b86bce0e3739d47e8664e4 | Shell | oldlaptop/uqm-crossbuilder | /build_uqm.sh | UTF-8 | 8,044 | 3.765625 | 4 | [
"BSD-2-Clause"
] | permissive | #!/bin/bash
#
# This script builds UQM in a Virtual Machine for Windows
#
# Parameters:
#
# vanilla:
# Builds Ur-Quan Masters vanilla code
#
# balance:
# Builds Shiver's Balance Mod code
#
#
#
# The script first creates a VirtualMachine and sets its properties
# then starts that machine and executes a build script inside it
# The produced executables and build logs are then found in the shared folder
#
SHARED_DIR="uqm-crossbuilder-share"
IMAGE_NAME="uqm-crossbuilder.iso"
CURRENT_DIR=`pwd`
LIVE_USER="fwiffo"
LIVE_PASSWORD="live"
VANILLA_EXE="uqm.exe"
BALANCE_EXE="uqm-improved-netmelee.exe"
EFFECTS_PACK="improved-netmelee-effects.zip"
BUILD_VANILLA="false"
BUILD_BMOD="false"
BUILD_SCRIPT_PATH="/usr/bin/auto-uqm-build-vbox.sh"
USEOLDSYNTAX=false # Old VirtualBox installations use --wait-for stdout instead of wait-stdout, along with other differences
WAITSTDOUT="--wait-stdout"
WAITSTDERR="--wait-stderr"
NET_IFACE_TYPE="nat"
HOST_IFACE="eth0"
cleanup ()
{
echo "## Shutting down the VM ##"
vboxmanage controlvm "UQM_crossbuilder_001" poweroff
sleep 5
echo "== Deleting the VM"
vboxmanage unregistervm "UQM_crossbuilder_001" -delete
echo "== Deleting the temporary hard drive"
}
if [ `echo $@ | grep "clean"` ]; then
echo "Cleaning"
cleanup
exit 1
fi
if [ `echo $@ | grep "vanilla"` ]; then
export BUILD_VANILLA="true"
elif [ `echo $@ | grep "balance"` ]; then
export BUILD_BMOD="true"
else
echo "!! You must specify build target: either vanilla or balance"
exit 1
fi
if [ `echo $@ | grep "old_virtualbox"` ]; then
$USEOLDSYNTAX = true
$WAITSTDOUT = "--wait-for stdout"
$WAITSTDERR = "--wait-for stderr"
fi
if [ ! -f ./$IMAGE_NAME ]; then
echo "!! You need to have the live CD image $IMAGE_NAME in the current directory"
exit 2
fi
echo "## Found live CD image, all good ##"
if [ ! -d $SHARED_DIR ]; then
echo "== Shared dir does not exist. Creating it at $CURRENT_DIR/$SHARED_DIR"
mkdir $SHARED_DIR
if [ $? -ne 0 ]; then
echo "!! Problem creating shared directory. Do you have permissions for this folder?"
exit 3
else
echo "== Shared directory created"
fi
else
echo "== Shared dir already exists, verifying permissions"
touch $SHARED_DIR/testfile
if [ $? -ne 0 ]; then
echo "!! Problem testing shared directory. Are the permissions in order?"
exit 3
else
echo "All good"
fi
fi
echo "## Creating the Virtual Machine ##"
vboxmanage createvm --name "UQM_crossbuilder_001" --register
if [ $? -ne 0 ]; then
echo "!! vboxmanage returned an error, canceling"
vboxmanage unregistervm "UQM_crossbuilder_001" -delete
exit 5
fi
# Give it a CD rom drive
vboxmanage storagectl "UQM_crossbuilder_001" --name "cdrom_drive" --add ide --controller PIIX4
if [ $? -ne 0 ]; then
echo "!! vboxmanage returned an error while creating a cd rom drive, canceling"
vboxmanage unregistervm "UQM_crossbuilder_001" -delete
exit 5
fi
# Set up a virtual NIC
vboxmanage modifyvm "UQM_crossbuilder_001" --nic1 $NET_IFACE_TYPE
if [ $NET_IFACE_TYPE = "bridged" ]; then
vboxmanage modifyvm "UQM_crossbuilder_001" --bridgeadapter1 $HOST_IFACE
fi
echo "== Giving the live image to the Virtual Machine: $IMAGE_NAME"
vboxmanage storageattach "UQM_crossbuilder_001" --storagectl "cdrom_drive" --port 0 --device 0 --type dvddrive --medium $IMAGE_NAME
if [ $? -ne 0 ]; then
echo "!! vboxmanage returned an error while inputting the live image, exiting"
vboxmanage unregistervm "UQM_crossbuilder_001" -delete
rm ./testhd*
fi
echo "== Creating a shared folder"
vboxmanage sharedfolder add "UQM_crossbuilder_001" --name "uqm-crossbuilder-share" --hostpath $CURRENT_DIR/$SHARED_DIR
if [ $? -ne 0 ]; then
echo "!! vboxmanage returned an error while creating the shared folder at $CURRENT_DIR/$SHARED_DIR"
vboxmanage unregistervm "UQM_crossbuilder_001" -delete
rm ./testhd*
fi
echo "## Starting the Virtual Machine ##"
vboxmanage startvm "UQM_crossbuilder_001" --type headless
if [ $? -ne 0 ]; then
echo "!! vboxmanage returned an error while starting the VM, canceling"
cleanup
exit 5
fi
echo "== Waiting 10 seconds to make sure the boot menu has time to load"
sleep 10
# Send keyboard key "enter" to pass the boot menu
echo "== Sending enter to boot menu"
vboxmanage controlvm "UQM_crossbuilder_001" keyboardputscancode 1c
if [ $? -ne 0 ]; then
echo "!! vboxmanage returned an error while sending enter to the VM, canceling"
cleanup
exit 5
fi
echo "## Waiting 30 seconds for the system to load ##"
echo " "
echo "##################################################"
echo "After this the build will begin "
echo "The build probably takes a while, so wait warmly "
echo "##################################################"
sleep 30
echo "## Mounting shared folder ##"
vboxmanage guestcontrol "UQM_crossbuilder_001" execute "/usr/bin/sudo" --username $LIVE_USER --password $LIVE_PASSWORD --verbose $WAITSTDOUT $WAITSTDERR "/bin/mkdir" "/vbox_share"
if [ $? -ne 0 ]; then
echo "!! Error at creating shared folder on guest"
cleanup
exit 6
fi
echo "== Folder created on guest"
vboxmanage guestcontrol "UQM_crossbuilder_001" execute "/usr/bin/sudo" --username $LIVE_USER --password $LIVE_PASSWORD --verbose $WAITSTDOUT $WAITSTDERR -- "/bin/mount" "-tvboxsf" $SHARED_DIR "/vbox_share"
if [ $? -ne 0 ]; then
echo "!! Error at mounting shared folder on guest"
cleanup
exit 6
fi
echo "== Shared folder mounted successfully"
echo "== Testing shared folder"
vboxmanage guestcontrol "UQM_crossbuilder_001" execute "/usr/bin/sudo" --username $LIVE_USER --password $LIVE_PASSWORD --verbose $WAITSTDOUT $WAITSTDERR -- "/bin/touch" "/vbox_share/touched_file"
if [ $? -ne 0 ]; then
echo "!! Error at testing shared directory. This is probably an error with your VirtualBox version"
cleanup
exit 6
fi
if [ ! -f $SHARED_DIR/touched_file ]; then
echo "!! The Virtual Machine was able to create a file in the shared folder, but it wasn't received in the master OS"
echo "!! This probably means that the guest OS didn't properly mount the shared directory"
cleanup
exit 7
fi
echo "== Shared folder works"
echo "## Sending the build command to the Virtual Machine ##"
if [ $BUILD_VANILLA = "true" ]; then
vboxmanage guestcontrol "UQM_crossbuilder_001" execute "/usr/bin/sudo" --username $LIVE_USER --password $LIVE_PASSWORD --verbose $WAITSTDOUT $WAITSTDERR -- "/bin/sh" "$BUILD_SCRIPT_PATH" "vanilla"
if [ $? -ne 0 ]; then
echo "!! VirtualBox returned an error while sending the build command, canceling"
cleanup
exit 6
else
echo "== All good"
cleanup
fi
fi
if [ $BUILD_BMOD = "true" ]; then
vboxmanage guestcontrol "UQM_crossbuilder_001" execute "/usr/bin/sudo" --username $LIVE_USER --password $LIVE_PASSWORD --verbose $WAITSTDOUT $WAITSTDERR -- "/bin/sh" "$BUILD_SCRIPT_PATH" "balance"
if [ $? -ne 0 ]; then
echo "!! VirtualBox returned an error while sending the build command, canceling"
cleanup
exit 6
else
echo "== All good"
cleanup
fi
fi
if [ $BUILD_VANILLA = "true" ]; then
if [ -f $SHARED_DIR/$VANILLA_EXE ]; then
echo "== Copying the binaries to the current directory"
cp $SHARED_DIR/$VANILLA_EXE $CURRENT_DIR
else
echo "!! Binary not found. Something went wrong :("
fi
fi
if [ $BUILD_BMOD = "true" ]; then
if [ -f $SHARED_DIR/$BALANCE_EXE ]; then
echo "== Copying the Balance Mod exe to current dir: $BALANCE_EXE"
cp $SHARED_DIR/$BALANCE_EXE $CURRENT_DIR
else
echo "!! Balance Mod exe not found. Something went wrong :("
fi
if [ -f $SHARED_DIR/$EFFECTS_PACK ]; then
echo "== Copying the effects pack to current dir: $EFFECTS_PACK"
cp $SHARED_DIR/$EFFECTS_PACK $CURRENT_DIR
else
echo "!! Effecting pack not found. Something went wrong :("
fi
fi
echo "-- All done! =)"
| true |
aabedd6886d91ffad6dcedf56c6679e0c083b15e | Shell | michael-stevenson/dotfiles | /.local/etc/config/commands/taskwarrior | UTF-8 | 333 | 3.046875 | 3 | [] | no_license | #!/usr/bin/env bash
command_help "Install taskwarrior" $@
command_require curl tar cmake make
set -e
d=$(mktemp -d)
cd $d
curl -O https://taskwarrior.org/download/task-2.5.1.tar.gz
tar xvzf task-2.5.1.tar.gz
cd task-2.5.1
cmake -DENABLE_SYNC=OFF -DCMAKE_BUILD_TYPE=release -DCMAKE_INSTALL_PREFIX=$HOME/.local .
make
make install
| true |
7591026e672c339dd4ccb6ca883f9c67052db11e | Shell | kdave/xfstests | /tests/overlay/047 | UTF-8 | 1,482 | 3.359375 | 3 | [] | no_license | #! /bin/bash
# SPDX-License-Identifier: GPL-2.0
# Copyright (C) 2016-2017 CTERA Networks. All Rights Reserved.
#
# FSQA Test No. 047
#
# Test hardlink breakage after unlink and mount cycle
#
# - file A and B are hardlinked in lower
# - modify A to trigger copy up and index lower
# - unlink A and mount cycle
# - check that B still contains the modified data
#
. ./common/preamble
_begin_fstest auto quick copyup hardlink
# Import common functions.
. ./common/filter
# real QA test starts here
_supported_fs overlay
_require_scratch
_require_scratch_feature index
_scratch_mkfs >>$seqres.full 2>&1
# Create 2 hardlinked files in lower
lowerdir=$OVL_BASE_SCRATCH_MNT/$OVL_LOWER
mkdir -p $lowerdir
echo "zero" >> $lowerdir/foo
ln $lowerdir/foo $lowerdir/bar
# Enable overlay index feature to prevent breaking hardlinks on copy up
_scratch_mount -o index=on
rm -f $tmp.*
foo=$SCRATCH_MNT/foo
bar=$SCRATCH_MNT/bar
FILES="$foo $bar"
echo "== Before copy up =="
cat $foo
# Modify content of one of the hardlinks
echo "one" >> $bar
echo "== After write one =="
cat $foo
# Unlink the copied up hardlink
rm $bar
echo "== After unlink one =="
cat $foo
# Verify that the hardlinks survive a mount cycle
_scratch_cycle_mount index=on
echo "== After mount cycle =="
cat $foo
# Drop caches to get the copied up hardlink out of cache
echo 3 > /proc/sys/vm/drop_caches
# Modify content of the other hardlink
echo "two" >> $foo
echo "== After write two =="
cat $foo
status=0
exit
| true |
8bed9af25c43aa47b2b2048e753c545778d7d45b | Shell | ivanamark/gatsby-portfolio | /loops/continous.sh | UTF-8 | 180 | 3.515625 | 4 | [] | no_license | #!/bin/sh
NUMS="1 2 3 4 5 6 7"
set -x
for NUM in $NUMS
do
if [ $(expr $NUM % 2) -eq 0 ]
then
echo "it is even"
continue
fi
echo "found odd"
done | true |
8e3d18c62fd6718e4ba023f78974559b43bfcf37 | Shell | ggerganov/whisper.cpp | /extra/bench-wts.sh | UTF-8 | 2,113 | 3.671875 | 4 | [
"MIT"
] | permissive | # Benchmark word-level timestamps for different models
#
# This script takes two arguments
# - an audio file
# - [optional] path to a font file
# I'm using "/usr/share/fonts/truetype/freefont/FreeMono.ttf" on Ubuntu
if [ -z "$1" ]; then
echo "Usage: $0 <audio file> [font file]"
exit 1
fi
#TODO: Make this a command line parameter
#models="base small large"
#models="tiny.en tiny base.en base small.en small medium.en medium large-v1 large"
models="tiny.en base.en small.en medium.en large"
DURATION=$(ffprobe -i $1 -show_entries format=duration -v quiet -of csv="p=0")
DURATION=$(printf "%.2f" $DURATION)
echo "Input file duration: ${DURATION}s"
for model in $models; do
echo "Running $model"
COMMAND="./main -m models/ggml-$model.bin -owts -f $1 -of $1.$model"
if [ ! -z "$2" ]; then
COMMAND="$COMMAND -fp $2"
fi
#TODO: Surface errors better
# TIMEFMT is for zsh, TIMEFORMAT is for bash
EXECTIME=$({ TIMEFMT="%E";TIMEFORMAT=%E; time $COMMAND >/dev/null 2>&1; } 2>&1)
# Slightly different formats between zsh and bash
if [ "${EXECTIME: -1}" == "s" ]; then
EXECTIME=${EXECTIME::-1}
fi
RATIO=$(echo "$DURATION / $EXECTIME" | bc -l)
RATIO=$(printf "%.2f" $RATIO)
echo "Execution time: ${EXECTIME}s (${RATIO}x realtime)"
# If the file already exists, delete it
if [ -f $1.mp4 ]; then
rm $1.mp4
fi
bash $1.$model.wts >/dev/null 2>&1
mv $1.mp4 $1.$model.mp4
ffmpeg -y -f lavfi -i color=c=black:s=1200x50:d=$DURATION -vf "drawtext=fontfile=$2:fontsize=36:x=10:y=(h-text_h)/2:text='ggml-$model - ${EXECTIME}s (${RATIO}x realtime)':fontcolor=lightgrey" $1.$model.info.mp4 >/dev/null 2>&1
done
COMMAND="ffmpeg -y"
for model in $models; do
COMMAND="$COMMAND -i $1.$model.info.mp4 -i $1.$model.mp4"
done
COMMAND="$COMMAND -filter_complex \""
COUNT=0
for model in $models; do
COMMAND="$COMMAND[${COUNT}:v][$(($COUNT+1)):v]"
COUNT=$((COUNT+2))
done
COMMAND="$COMMAND vstack=inputs=${COUNT}[v]\" -map \"[v]\" -map 1:a $1.all.mp4 >/dev/null 2>&1"
echo $COMMAND
# Run the command
eval $COMMAND
| true |
7a18228792ecf344a0be79506234a994353a1e2a | Shell | jason-gao/shell | /setmirror.sh | UTF-8 | 9,086 | 4.03125 | 4 | [] | no_license | #!/usr/bin/env bash
#
# Tested OS Distros:
#
# - Ubuntu 14.04/16.04
# - CentOS 6/7
#
set -e
function usage() {
cat <<EOF
Usage: $(basename $0) <mirror_url>
Examples:
$(basename $0) https://registry-mirror.qiniu.com
EOF
}
function exit_on_unsupported_os() {
echo "error: unsupporeted os, can't configure docker --registry-mirror automatically, please refer https://docs.docker.com/registry/recipes/mirror/ docs to configure it manually"
exit 1
}
function exit_on_success() {
echo "Success. $1"
exit 0
}
if [ -z "$1" ]; then
echo "error: please specify mirror url"
usage
exit 1
fi
if [[ ! "$1" =~ (https?)://[-A-Za-z0-9\+_.]*[-A-Za-z0-9\+_] ]]; then
echo "error: '$1' is not a valid url"
usage
exit 1
fi
MIRROR_URL=$1
# Get linux distribution and release version.
LINUX_DIST=
LINUX_DIST_VERSION=
if which lsb_release &>/dev/null; then
LINUX_DIST="$(lsb_release -si)"
LINUX_DIST_VERSION="$(lsb_release -rs)"
fi
if [ -z "$LINUX_DIST" ] && [ -r /etc/lsb-release ]; then
LINUX_DIST="$(. /etc/lsb-release && echo "$DISTRIB_ID")"
LINUX_DIST_VERSION="$(. /etc/lsb-release && echo "$DISTRIB_RELEASE")"
fi
if [ -z "$LINUX_DIST" ] && [ -r /etc/debian_version ]; then
LINUX_DIST='Debian'
fi
if [ -z "$LINUX_DIST" ] && [ -r /etc/fedora-release ]; then
LINUX_DIST='Fedora'
fi
if [ -z "$LINUX_DIST" ] && [ -r /etc/centos-release ]; then
LINUX_DIST='CentOS'
LINUX_DIST_VERSION=$(cat /etc/centos-release | grep -o -P '\d+\.\d+\.\d+')
fi
if [ -z "$LINUX_DIST" ] && [ -r /etc/redhat-release ]; then
LINUX_DIST="$(cat /etc/redhat-release | head -n1 | cut -d " " -f1)"
fi
if [ -z "$LINUX_DIST" ] && [ -r /etc/os-release ]; then
LINUX_DIST="$(. /etc/os-release && echo "$ID")"
LINUX_DIST_VERSION="$(. /etc/os-release && echo "$VERSION_ID")"
fi
if [ -z "$LINUX_DIST" ]; then
exit_on_unsupported_os
fi
# Get docker version.
DOCKER_VERSION=
DOCKER_VERSION_MAJOR=
DOCKER_VERSION_MINOR=
if which docker &>/dev/null; then
DOCKER_VERSION=$(docker -v | grep -o -P '\d+\.\d+\.\d+')
DOCKER_VERSION_MAJOR=$(echo $DOCKER_VERSION | cut -d "." -f 1)
DOCKER_VERSION_MINOR=$(echo $DOCKER_VERSION | cut -d "." -f 2)
else
echo "error: docker is not installed, please install it first"
exit 1
fi
if [ "$DOCKER_VERSION_MAJOR" -eq 1 ] && [ "$DOCKER_VERSION_MINOR" -lt 9 ]; then
echo "error: please upgrade your docker to v1.9 or later"
exit 1
fi
echo "Linux distribution: $LINUX_DIST $LINUX_DIST_VERSION"
echo "Docker version: $DOCKER_VERSION"
echo "Configuring --registry-mirror=$MIRROR_URL for your docker install."
function is_daemon_json_supported() {
if [ "$DOCKER_VERSION_MAJOR" -eq 1 ] && [ "$DOCKER_VERSION_MINOR" -lt 12 ]; then
return 1
else
return 0
fi
}
function update_daemon_json_file() {
DOCKER_DAEMON_JSON_FILE="/etc/docker/daemon.json"
if sudo test -f ${DOCKER_DAEMON_JSON_FILE}; then
sudo cp ${DOCKER_DAEMON_JSON_FILE} "${DOCKER_DAEMON_JSON_FILE}.bak"
if sudo grep -q registry-mirrors "${DOCKER_DAEMON_JSON_FILE}.bak";then
sudo cat "${DOCKER_DAEMON_JSON_FILE}.bak" | sed -n "1h;1"'!'"H;\${g;s|\"registry-mirrors\":\s*\[[^]]*\]|\"registry-mirrors\": [\"${MIRROR_URL}\"]|g;p;}" | sudo tee ${DOCKER_DAEMON_JSON_FILE}
else
sudo cat "${DOCKER_DAEMON_JSON_FILE}.bak" | sed -n "s|{|{\"registry-mirrors\": [\"${MIRROR_URL}\"],|g;p;" | sudo tee ${DOCKER_DAEMON_JSON_FILE}
fi
else
sudo mkdir -p "/etc/docker"
sudo echo "{\"registry-mirrors\": [\"${MIRROR_URL}\"]}" | sudo tee ${DOCKER_DAEMON_JSON_FILE}
fi
}
# Configure.
case "$LINUX_DIST" in
CentOS)
MAJOR=$(echo ${LINUX_DIST_VERSION} | cut -d "." -f1)
if [ "$MAJOR" -eq 6 ]; then
DOCKER_SERVICE_FILE="/etc/sysconfig/docker"
sudo cp ${DOCKER_SERVICE_FILE} "${DOCKER_SERVICE_FILE}.bak"
sudo sed -i "s|other_args=\"|other_args=\"--registry-mirror='${MIRROR_URL}'|g" ${DOCKER_SERVICE_FILE}
sudo sed -i "s|OPTIONS='|OPTIONS='--registry-mirror='${MIRROR_URL}'|g" ${DOCKER_SERVICE_FILE}
exit_on_success "You need to restart docker to take effect: sudo service docker restart"
elif [ $MAJOR -ge 7 ]; then
if is_daemon_json_supported; then
update_daemon_json_file
else
DOCKER_SERVICE_FILE="/lib/systemd/system/docker.service"
sudo cp ${DOCKER_SERVICE_FILE} "${DOCKER_SERVICE_FILE}.bak"
sudo sed -i "s|\(ExecStart=/usr/bin/docker[^ ]* daemon\)|\1 --registry-mirror="${MIRROR_URL}"|g" ${DOCKER_SERVICE_FILE}
sudo systemctl daemon-reload
fi
exit_on_success "You need to restart docker to take effect: sudo systemctl restart docker "
else
exit_on_unsupported_os
fi
break
;;
Fedora)
if is_daemon_json_supported; then
update_daemon_json_file
else
DOCKER_SERVICE_FILE="/lib/systemd/system/docker.service"
sudo cp ${DOCKER_SERVICE_FILE} "${DOCKER_SERVICE_FILE}.bak"
sudo sed -i "s|\(ExecStart=/usr/bin/docker[^ ]* daemon\)|\1 --registry-mirror="${MIRROR_URL}"|g" ${DOCKER_SERVICE_FILE}
sudo systemctl daemon-reload
fi
exit_on_success "You need to restart docker to take effect: sudo systemctl restart docker"
;;
Ubuntu)
MAJOR=$(echo ${LINUX_DIST_VERSION} | cut -d "." -f1)
if [ "$MAJOR" -ge 16 ]; then
if is_daemon_json_supported; then
update_daemon_json_file
else
DOCKER_SERVICE_FILE="/lib/systemd/system/docker.service"
sudo cp ${DOCKER_SERVICE_FILE} "${DOCKER_SERVICE_FILE}.bak"
sudo sed -i "s|\(ExecStart=/usr/bin/docker[^ ]* daemon -H fd://$\)|\1 --registry-mirror="${MIRROR_URL}"|g" ${DOCKER_SERVICE_FILE}
sudo systemctl daemon-reload
fi
exit_on_success "You need to restart docker to take effect: sudo systemctl restart docker.service"
else
if is_daemon_json_supported; then
update_daemon_json_file
else
DOCKER_SERVICE_FILE="/etc/default/docker"
sudo cp ${DOCKER_SERVICE_FILE} "${DOCKER_SERVICE_FILE}.bak"
if grep "registry-mirror" ${DOCKER_SERVICE_FILE} > /dev/null; then
sudo sed -i -u -E "s#--registry-mirror='?((http|https)://)?[a-zA-Z0-9.]+'?#--registry-mirror='${MIRROR_URL}'#g" ${DOCKER_SERVICE_FILE}
else
echo 'DOCKER_OPTS="$DOCKER_OPTS --registry-mirror='${MIRROR_URL}'"' >> ${DOCKER_SERVICE_FILE}
fi
fi
fi
exit_on_success "You need to restart docker to take effect: sudo service docker restart"
;;
Debian)
if is_daemon_json_supported; then
update_daemon_json_file
else
DOCKER_SERVICE_FILE="/etc/default/docker"
sudo cp ${DOCKER_SERVICE_FILE} "${DOCKER_SERVICE_FILE}.bak"
if grep "registry-mirror" ${DOCKER_SERVICE_FILE} > /dev/null; then
sudo sed -i -u -E "s#--registry-mirror='?((http|https)://)?[a-zA-Z0-9.]+'?#--registry-mirror='${MIRROR_URL}'#g" ${DOCKER_SERVICE_FILE}
else
echo 'DOCKER_OPTS="$DOCKER_OPTS --registry-mirror='${MIRROR_URL}'"' >> ${DOCKER_SERVICE_FILE}
echo ${MIRROR_URL}
fi
fi
exit_on_success "You need to restart docker to take effect: sudo service docker restart"
;;
Arch)
if grep "Arch Linux" /etc/os-release > /dev/null; then
if is_daemon_json_supported; then
update_daemon_json_file
else
DOCKER_SERVICE_FILE="/lib/systemd/system/docker.service"
sudo cp ${DOCKER_SERVICE_FILE} "${DOCKER_SERVICE_FILE}.bak"
sudo sed -i "s|\(ExecStart=/usr/bin/docker[^ ]* daemon -H fd://\)|\1 --registry-mirror="${MIRROR_URL}"|g" ${DOCKER_SERVICE_FILE}
sudo systemctl daemon-reload
fi
exit_on_success "You need to restart docker to take effect: sudo systemctl restart docker"
else
exit_on_unsupported_os
fi
;;
Suse)
if grep "openSUSE Leap" /etc/os-release > /dev/null; then
if is_daemon_json_supported; then
update_daemon_json_file
else
DOCKER_SERVICE_FILE="/usr/lib/systemd/system/docker.service"
sudo cp ${DOCKER_SERVICE_FILE} "${DOCKER_SERVICE_FILE}.bak"
sudo sed -i "s|\(^ExecStart=/usr/bin/docker daemon -H fd://\)|\1 --registry-mirror="${MIRROR_URL}"|g" ${DOCKER_SERVICE_FILE}
sudo systemctl daemon-reload
fi
exit_on_success "You need to restart docker to take effect: sudo systemctl restart docker"
else
exit_on_unsupported_os
fi
;;
*)
exit_on_unsupported_os
;;
esac
| true |
f915338323c00d42e3878943d46c0aff59bcfac2 | Shell | todokku/heroku-buildpack-node-modules-cleanup | /bin/compile | UTF-8 | 207 | 2.8125 | 3 | [] | no_license | #!/usr/bin/env bash
# bin/compile BUILD_DIR CACHE_DIR ENV_DIR
BUILD_DIR=$1
echo "-----> Deleting the node_modules directory"
find $BUILD_DIR -name 'node_modules' -type d -prune -print -exec rm -rf '{}' \;
| true |
15104fa99680a93847c97bf734b7a606d2bb6b39 | Shell | guilhermemaas/zabbix | /zabbix_3_camadas/passos_inst.sh | UTF-8 | 3,730 | 3.15625 | 3 | [
"MIT"
] | permissive | #MariaDB Server: 192.168.0.118
#Zabbix Server: 192.168.0.117
#Zabbix Web: 192.168.0.119
#Verificar o timezone atual configurado no servidor:
timedatectl status
#Configurar o timezone para Sao Paulo:
timedatectl set-timezone Amercia/Sao_Paulo
#Listar todos os timezones disponives:
timedatectl list-timezones
#Verificar hora atual:
date
#Configurar o chrony(Client NTP) para corrigir data e hora:
dnf -y install chrony
#Habilitar o Chrony
systemctl enable --now chronyd
#Validar os servers NTP disponiveis:
chronyc sources
#Sincronizar data e hora:
service chronyd restart
#Validar horario:
date
#O Chrony vai atualizar o horario a cada 64 segundos.
#Adicionar regra no firewall para o NTP:
firewall-cmd --permanent --add-service=ntp
firewall-cmd --reload
#Instalar algumas ferramentas basicas:
dnf install -y net-tools vim nano epel-release wget curl tcpdump
#Desativar o selinux:
#Verificar status:
getenforce
#Com reboot:
vim /etc/selinux/config
SELINUX=disabled
#Sem reboot:
setenforce 0
getenforce
#===MARIADB
#Checar versao disponivel do MariaDB Server:
dnf info mariadb-server
#Instalar o MariaDB:
dnf -y install mariadb-server
#Habilitar o servico do MariaDB:
systemctl enable --now mariadb
systemctl status mariadb
#Definir senha do usuario root do MySQL:
mysql_secure_installation
#Conectar ao banco de dados e criar o usuario e banco de dados zabbix:
mysql -u root -p
create database zabbix character set utf8 collate utf8_bin;
create user 'zabbix'@'localhost' identified by 'xpto';
grant all privileges on zabbix.* to 'zabbix'@'localhost';
flush privileges;
#Criar acesso a partir do Zabbix Server:
create user 'zabbix_server'@'192.168.0.117' identified by 'xpto';
grant all privileges on zabbix.* to 'zabbix_server'@'192.168.0.117';
flush privileges;
#Criar acesso a partir do Zabbix Front End:
create user 'zabbix_web'@'192.168.0.119' identified by 'xpto';
grant all privileges on zabbix.* to 'zabbix_web'@'192.168.0.119';
flush privileges;
#Criar regra no firewall pra liberar a porta 3306:
firewall-cmd --permanent --add-port=3306/tcp
firewall-cmd --reload
#Liberar MariaDB para aceitar conexoes remotas:
vim /etc/my.cnf.d/mariadb-server.cnf
bind-address = SERVERIP
#Reiniciar servico:
systemctl restart mariadb
#===ZABBIX-SERVER:
#Instalar o repositorio oficial:
rpm -ivh http://repo.zabbix.com/zabbix/5.2/rhel/8/x86_64/zabbix-release-5.2-1.el8.noarch.rpm
#Limpar o cache e remover repositorios antigos:
dnf clean all
#Instalar o Zabbix Server
dnf -y install zabbix-server
#Instalar o client do MariaDB:
dnf -y install mariadb
#Carregar a estrutura/esquema incial do banco de dados:
zcat /usr/share/doc/zabbix-server-mysql/create.sql.gz | mysql -h 192.168.0.118 -u zabbix_server -p zabbix
#Verificar se as tabelas foram criadas:
mysql -u root -p
use zabbix;
show tables;
#Editar o password no arquivo de configuracao do Zabbix Server:
vim /etczabbix/zabbix_server.conf
DBHost=192.168.0.118
DBUser=zabbix_server
DBPassword=xpto
#Verificar se nao esta logando erros:
tail -f -n 20 /var/log/zabbix/zabbix_server.log
#Criar regra no Firewall:
firewall-cmd --permanent --add-port=10051/tcp
firewall-cmd --reload
#===ZABBIX FRONT END
#Instalar o repositorio oficial:
rpm -ivh http://repo.zabbix.com/zabbix/5.2/rhel/8/x86_64/zabbix-release-5.2-1.el8.noarch.rpm
#Limpar cache e remover repositorios antigos:
dnf clean all
#Instalar os pacotes:
dnf -y install zabbix-web-mysql zabbix-nginx-conf
#Configurando o PHP:
vim /etc/php-fpm.d/zabbix.conf
php_value[date.timezone] = America/Sao_Paulo
#Habilitar a inicializacao do servico:
systemctl enable --now httpd php-fpm
systemctl status httpd php-fpm
#Acessar a interface Web:
http://IP/zabbix
User: Admin
Pass: zabbix | true |
217ba806862ec173ced8baca516b8e73b717dace | Shell | jerabekjak/smoderp2d-optim-sens | /tools/make_runs.sh | UTF-8 | 578 | 3.3125 | 3 | [] | no_license | #!/bin/bash
# DESCRIPTION
# list optim outputs which are in dir bibly_fit/
# basen that the script make runs in directory runs/
# where single runs are stored in files named
# after the name of the simulation
# eg bibly_fit/out-trebsin_ii_2008-9 > cat runs/trebsin_ii_2008-9 >> run
# in runs/trebsin_ii_2008-9 should be the executing command e.g.:
# ./optim.py -o out-trebsin_ii_2008-9 -m model/trebsin_ii_2008-9.ini -O cfgs/trebsin_ii_2008-9.cfg > logs/out-trebsin_ii_2008-9.log
worse=`ls blby_fit`
for i in $worse;
do
echo $i
cat runs/"${i:4}" >> runs/runs;
done
| true |
b0cdcd27186881b4394b8d1d49948d8c81075223 | Shell | HumboldtWirelessLab/click-brn-scripts | /500-experiments/016-topology/025-boxes/evaluation/eval.sh | UTF-8 | 2,212 | 3.5 | 4 | [] | no_license | #!/bin/bash
dir=$(dirname "$0")
pwd=$(pwd)
SIGN=`echo $dir | cut -b 1`
case "$SIGN" in
"/")
DIR=$dir
;;
".")
DIR=$pwd/$dir
;;
*)
echo "Error while getting directory"
exit -1
;;
esac
PRE_LINKS_PATH=${RESULTDIR}/links_filtered_orig.csv
LINKS_PATH=${RESULTDIR}/links_filtered.csv
ETXLIMIT=100
cd ${RESULTDIR}
../../common_evaluation/extract_nodes.py -p ${RESULTDIR}
LINKS_EXTRACED_PATH=${RESULTDIR}/links_extract.xml
../../common_evaluation/extract_dibadawn_links.py -f ${RESULTDIR}/measurement.xml > ${LINKS_EXTRACED_PATH}
LINKS_RAW_PATH=${RESULTDIR}/links_raw.csv
xsltproc -o ${LINKS_RAW_PATH} ../../common_evaluation/dibadawn_links_to_csv.xslt ${LINKS_EXTRACED_PATH}
../../common_evaluation/filter_links.py -f ${RESULTDIR}/links_raw.csv -e ${ETXLIMIT} > ${PRE_LINKS_PATH}
grep -e "00-01.*00-02" -e "00-02.*00-01" -e "node_a" ${PRE_LINKS_PATH} > ${LINKS_PATH}
SEARCHES=${RESULTDIR}/searches.xml
TOPO_PATH=${RESULTDIR}/topo_info.xml
../../common_evaluation/extract_topology_info.py -f ${RESULTDIR}/measurement.xml > ${TOPO_PATH}
xsltproc -o ${SEARCHES} ../../common_evaluation/group_searches.xslt ${TOPO_PATH}
xsltproc -o ${RESULTDIR}/articulation_points.csv ../../common_evaluation/dibadawn_articulation_points.xslt ${SEARCHES}
xsltproc -o ${RESULTDIR}/bridges_all.csv ../../common_evaluation/dibadawn_all_bridges.xslt ${SEARCHES}
xsltproc -o ${RESULTDIR}/bridges_unique_per_search.csv ../../common_evaluation/dibadawn_unique_bridges_per_search.xslt ${SEARCHES}
# Count runs
COUNT_OF_RUNS=$(grep -e "^<DibadawnStartSearch" ${RESULTDIR}/measurement.xml | wc -l)
echo -e "\"num_of_runs\"\n${COUNT_OF_RUNS}" > ${RESULTDIR}/runs.csv
# Count measure
COUNT_OF_MEASURES=$(grep -e "topology_info.*extra_data=" ${RESULTDIR}/measurement.xml | awk -F "'" 'BEGIN{max=0}{if($6 > max){max = $6}}END{print max}')
echo -e "\"num_of_measure\"\n${COUNT_OF_MEASURES}" > ${RESULTDIR}/runs_measure.csv
../../common_evaluation/calc_articulation_points.R ${LINKS_PATH} > ${RESULTDIR}/theoretical_articulation_points.csv
../../common_evaluation/calc_bridges.R ${LINKS_PATH} > ${RESULTDIR}/theoretical_bridges.csv
../../common_evaluation/calc_f1_measure.R
echo "Done"
exit 2 | true |
667345d3ecf85bd161979b76dfb156cf8868fe74 | Shell | nevdullcode/vagrant-jupyterhub | /jupyterhub/etc/init.d/jupyterhub | UTF-8 | 1,042 | 3.859375 | 4 | [
"MIT"
] | permissive | #!/bin/sh
# chkconfig: 35 90 10
# JupyterHub is a multi-user server that launches Jupyter notebook servers.
#
# Copyright (c) 2015 Jeff Denton
# The LICENSE file is located in the top level directory.
#
workdir=/root
if [[ $EUID -ne 0 ]]; then
echo "Must be run as root." 1>&2
exit 1
fi
# Source function library.
. /etc/init.d/functions
# Source custom Python version
. /etc/profile.d/python3.sh
start() {
cd $workdir && \
daemon \
--pidfile /var/run/jupyterhub.pid \
"nohup jupyterhub --pid-file=/var/run/jupyterhub.pid \
-f /etc/jupyterhub/jupyterhub_config.py \
>> /var/log/jupyterhub.log 2>&1 &"
echo "starting jupyterhub"
return 0
}
stop() {
killproc jupyterhub
echo "stopping jupyterhub"
return 0
}
case "$1" in
start)
start
;;
stop)
stop
;;
status)
status jupyterhub
;;
restart)
stop
start
;;
*)
echo "Usage: /etc/init.d/jupyterhub {start|stop|status|restart}"
exit 1
esac
exit 0
| true |
fca1ad0c6ff3cdb59604634ebb76d4adf80b0c1a | Shell | goanpeca/maxiconda-bot | /scripts/solve_for_linux.sh | UTF-8 | 733 | 3.203125 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
# Copyright (c) Semi-ATE
# Distributed under the terms of the MIT License
set -ex
# Check parameters
ARCH=${ARCH:-aarch64}
DOCKER_ARCH=${DOCKER_ARCH:-arm64v8}
DOCKERIMAGE=${DOCKERIMAGE:-condaforge/linux-anvil-aarch64}
#export CONSTRUCT_ROOT=/construct
echo "============= Create build directory ============="
mkdir -p build/
chmod 777 build/
echo "============= Enable QEMU in persistent mode ============="
docker run --rm --privileged multiarch/qemu-user-static --reset --credential yes --persistent yes
echo "============= Solve for Linux/${ARCH} ============="
docker run --rm -v "$(pwd):/construct" -e CONSTRUCT_ROOT -e MAXICONDA_VERSION -e MAXICONDA_NAME ${DOCKERIMAGE} /construct/scripts/build.sh
| true |
161c883475400e22505b63f2b94d47632f2d87de | Shell | paulhowardarm/parsec | /test/cross-compile.sh | UTF-8 | 1,680 | 2.640625 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
# Copyright 2021 Contributors to the Parsec project.
# SPDX-License-Identifier: Apache-2.0
set -xeuf -o pipefail
# The "jwt-svid-authenticator" feature is not included yet because of a cross compilation
# problem of BoringSSL. See https://github.com/tikv/grpc-rs/issues/536. Once resolved,
# "all-authenticators" will be used again.
# Allow the `pkg-config` crate to cross-compile
export PKG_CONFIG_ALLOW_CROSS=1
# Make the `pkg-config` crate use our wrapper
export PKG_CONFIG=$(pwd)/test/pkg-config
# Set the SYSROOT used by pkg-config
export SYSROOT=/tmp/arm-linux-gnueabihf
# Add the correct libcrypto to the linking process
export RUSTFLAGS="-lcrypto -L/tmp/arm-linux-gnueabihf/lib"
cargo build --features "pkcs11-provider, mbed-crypto-provider, tpm-provider, unix-peer-credentials-authenticator, direct-authenticator" --target armv7-unknown-linux-gnueabihf
export SYSROOT=/tmp/aarch64-linux-gnu
export RUSTFLAGS="-lcrypto -L/tmp/aarch64-linux-gnu/lib"
# Pull in the TS code
git submodule update --init
cargo build --features "pkcs11-provider, mbed-crypto-provider, tpm-provider, trusted-service-provider, unix-peer-credentials-authenticator, direct-authenticator" --target aarch64-unknown-linux-gnu
# This is needed because for some reason the i686/i386 libs aren't picked up if we don't toss them around just before...
apt install -y libc6-dev-i386-amd64-cross
export SYSROOT=/tmp/i686-linux-gnu
export RUSTFLAGS="-lcrypto -L/tmp/i686-linux-gnu/lib"
cargo build --features "pkcs11-provider, mbed-crypto-provider, tpm-provider, unix-peer-credentials-authenticator, direct-authenticator, tss-esapi/generate-bindings" --target i686-unknown-linux-gnu
| true |
46aa39c7e23f9fea1b7539b7dd7222473c694bfa | Shell | ofbodur/Kimberlite-magmatism-fed-by-upwelling-above-mobile-basal-mantle-structures | /Create-Map.sh | UTF-8 | 7,898 | 2.9375 | 3 | [] | no_license | #!/bin/bash
# Ömer F. Bodur
# Last Update: 24 May 2022
# University of Wollongong
region=g
proj_map=N0/12
CaseNumber=1 # Select the case number
if [ $CaseNumber -eq 1 ]
then
CaseDir="$PWD"/Case1
CaseOutputDir="$PWD"/Output-Case1
AgeGridDir="$PWD"/Agegrid-M21-NNR
CratonicShapes="$PWD"/Cratonic-Shapes-M21-NNR
root_for_Kimb_Data="$PWD"/Kimberlites/M21-NNR
maskFileDir="$PWD"/Masks
elif [ $CaseNumber -eq 2 ]
then
CaseDir="$PWD"/Case2
CaseOutputDir="$PWD"/Output-Case2
AgeGridDir="$PWD"/Agegrid-M21-NNR
CratonicShapes="$PWD"/Cratonic-Shapes-M21-NNR
root_for_Kimb_Data="$PWD"/Kimberlites/M21-NNR
maskFileDir="$PWD"/Masks
elif [ $CaseNumber -eq 3 ]
then
CaseDir="$PWD"/Case3
CaseOutputDir="$PWD"/Output-Case3
AgeGridDir="$PWD"/Agegrid-M21-NNR
CratonicShapes="$PWD"/Cratonic-Shapes-M21-NNR
root_for_Kimb_Data="$PWD"/Kimberlites/M21-NNR
maskFileDir="$PWD"/Masks
elif [ $CaseNumber -eq 4 ]
then
CaseDir="$PWD"/Case4
echo $CaseDir
CaseOutputDir="$PWD"/Output-Case4
AgeGridDir="$PWD"/Agegrid-M21
CratonicShapes="$PWD"/Cratonic-Shapes-M21
root_for_Kimb_Data="$PWD"/Kimberlites/M21
maskFileDir="$PWD"/Masks
fi
for Age in `seq 0 20 210`; do
echo $Age
# For whole mantle
filexyz=${CaseDir}/Case${CaseNumber}-Radial-Heat-Advection-Between-322km-and-CMB-Averaged-${Age}-Ma.xyz
#For Lower mantle only
# filexyz=${CaseDir}/LowerMantle/Case${CaseNumber}-Radial-Heat-Advection-Lower-Mantle-Only-Averaged-${Age}-Ma.xyz
#For Upper mantle only
#filexyz=${CaseDir}/UpperMantle/Case${CaseNumber}-Radial-Heat-Advection-Upper-Mantle-Only-Averaged-${Age}-Ma.xyz
echo $filexyz
#For Whole Mantle Only
# Introduce names for grid files and output .ps file to be converted to PDF and/or PNG/JPG
psfile=${CaseOutputDir}/Case${CaseNumber}-Radial-Heat-Advection-Between-322km-and-CMB-Averaged-${Age}-Ma-Regular.ps
medianfile=${CaseOutputDir}/Case${CaseNumber}_Radial-Heat-Advection_between_322km_and_CMB_averaged_${Age}-Ma.median
gridFile=${CaseOutputDir}/Case${CaseNumber}_Radial-Heat-Advection_between_322km_and_CMB_averaged_${Age}-Ma.nc
gridFile_1_0=${CaseOutputDir}/Case${CaseNumber}_Radial-Heat-Advection_between_322km_and_CMB_averaged_1_and_0_${Age}-Ma.nc
gridFileMasked=${CaseOutputDir}/Case${CaseNumber}_Radial-Heat-Advection_between_322km_and_CMB_averaged_${Age}masked-Ma.nc
#For Lower Mantle Only
# Introduce names for grid files and output .ps file to be converted to PDF and/or PNG/JPG
# psfile=${CaseOutputDir}/Case${CaseNumber}-Radial-Heat-Advection-Lower-Mantle-Only-Averaged-${Age}-Ma-ReviewAug.ps
# medianfile=${CaseOutputDir}/Case${CaseNumber}_Radial-Heat-Advection-Lower-Mantle-Only-Averaged_${Age}-Ma.median
# gridFile=${CaseOutputDir}/Case${CaseNumber}_Radial-Heat-Advection-Lower-Mantle-Only-Averaged_${Age}-Ma.nc
# gridFile_1_0=${CaseOutputDir}/Case${CaseNumber}_Radial-Heat-Advection-Lower-Mantle-Only-Averaged_1_and_0_${Age}-Ma.nc
# gridFileMasked=${CaseOutputDir}/Case${CaseNumber}_Radial-Heat-Advection-Lower-Mantle-Only-Averaged_${Age}masked-Ma.nc
#
#For Upper Mantle Only
#Introduce names for grid files and output .ps file to be converted to PDF and/or PNG/JPG
#psfile=${CaseOutputDir}/Case${CaseNumber}-Radial-Heat-Advection-Upper-Mantle-Only-Averaged-${Age}-Ma-ReviewAug.ps
#medianfile=${CaseOutputDir}/Case${CaseNumber}_Radial-Heat-Advection-Upper-Mantle-Only-Averaged_${Age}-Ma.median
#gridFile=${CaseOutputDir}/Case${CaseNumber}_Radial-Heat-Advection-Upper-Mantle-Only-Averaged_${Age}-Ma.nc
#gridFile_1_0=${CaseOutputDir}/Case${CaseNumber}_Radial-Heat-Advection-Upper-Mantle-Only-Averaged_1_and_0_${Age}-Ma.nc
#gridFileMasked=${CaseOutputDir}/Case${CaseNumber}_Radial-Heat-Advection-Upper-Mantle-Only-Averaged_${Age}masked-Ma.nc
if [ $CaseNumber -eq 4 ]
then
maskFileRd2=${maskFileDir}/mask_Rd_M21_${Age}.nc
CratonicShapesPlot=${CratonicShapes}/Cratonic-Shapes-M21-API5-reconstructed_${Age}.00Ma.xy
gmt grdmask -Rd -I0.1 $CratonicShapesPlot -N0/1/1 -G$maskFileRd2
agegrid=${AgeGridDir}/agegrid_final_with_continents_${Age}.grd
gmt blockmedian $filexyz -Rd -I0.1 -V > $medianfile
gmt surface $medianfile -I0.1 -R${region} -V -G$gridFile # Resolution is 0.1 deg.
gmt grdclip $gridFile -Rd -G$gridFile_1_0 -Sa0.99/1 -Sb0.99/0 -V # Set values equal and above 1 as 1, others as 0.
gmt grdmath $maskFileRd2 $gridFile_1_0 MUL = $gridFileMasked
gmt grdmath $agegrid -0.1 GT = Ocean.grd
KimberlitesXY=${root_for_Kimb_Data}/${Age}_Ma_kimberlite_locations-M21-API5.xy
else
maskFileRd2=${maskFileDir}/mask_M21_NNR_${Age}.nc
CratonicShapesPlot=${CratonicShapes}/reconstructed_MerdithCratons_${Age}.00Ma.xy
gmt grdmask -Rd -fc -I0.1 $CratonicShapesPlot -N0/1/1 -G$maskFileRd2 # added -fg
agegrid=${AgeGridDir}/agegridsampled_${Age}-Ma.grd
gmt blockmedian $filexyz -Rg -I0.1 -V > $medianfile
gmt surface $medianfile -I0.1 -R${region} -V -G$gridFile # Resolution is 0.1 deg.
gmt grdclip $gridFile -Rg -G$gridFile_1_0 -Sa0.99/1 -Sb0.99/0 -V # Set values equal and above 1 as 1, others as 0.
gmt grdedit $gridFile_1_0 -Rd -S # change grid format
gmt grdmath $maskFileRd2 $gridFile_1_0 MUL = $gridFileMasked # Make sure they have the same format
gmt grdmath $agegrid -0.1 GT = Ocean.grd
KimberlitesXY=${root_for_Kimb_Data}/${Age}Ma_kimberlite_locations-M21-NNR.xy
fi
# Radial Heat Advection Imaging
gmt psxy $CratonicShapesPlot -R${region} -J${proj_map} -Gblack -t30 -V -K > $psfile
gmt grdimage -R${region} -J${proj_map} $gridFile -CColourmap-RHA.cpt -t30 -O -V -K >> $psfile
# gmt grdimage -R${region} -J${proj_map} $gridFile -CColourmap-RHA.cpt -t70 -O -V -K >> $psfile
# gmt psxy $CratonicShapesPlot -R${region} -J${proj_map} -W0.2p,black -Gwhite -O -V -K >> $psfile
# gmt grdimage -R${region} -J${proj_map} $gridFile -CColourmap-RHA.cpt -t40 -O -V -K >> $psfile
# gmt grdimage -R${region} -J${proj_map} Ocean.grd -COcean.cpt -t65 -O -V -K >> $psfile
# gmt grdimage -R${region} -J${proj_map} $gridFileMasked -CColourmap-RHA_Masked.cpt -t50 -O -V -K >> $psfile
# Kimberlites colored by Age
root_for_Kimb_Data="$PWD"/Kimberlites/M21-NNR
KimberlitesXY=${root_for_Kimb_Data}/${Age}_Ma_kimberlite_locations-M21-API5-NNR-New.xy
# USE BELOW LINE FOR AGE CODED COLORS
# gmt psxy $KimberlitesXY -R${region} -J${proj_map} -Sc0.2 -W1.0p,black -CcolorsKimb.cpt -O -V -K >> $psfile #1)
gmt psxy $KimberlitesXY -R${region} -J${proj_map} -Sc0.3 -W1.0p,black -CcolorsKimb_OMER.cpt -O -V -K >> $psfile #1)
# USE BELOW FOR CONSTANT COLOR (EXTENDED DATA FIG)
# gmt psxy $KimberlitesXY -R${region} -J${proj_map} -Sc0.2 -W1.0p,black -CcolorsKimbConstant.cpt -O -V -K >> $psfile #1)
# gmt mapproject $KBSlice -R${region} -E -I > KBSlice.xy
# USE BELOW LINE FOR AGE CODED COLORS
# gmt psxy $KimberlitesXY -R${region} -J${proj_map} -Sc0.2 -W1.0p,black -CcolorsKimb.cpt -O -V -K >> $psfile #1)
gmt psxy $KimberlitesXY -R${region} -J${proj_map} -Sc0.3 -W0.7p,black -CcolorsKimb_OMER.cpt -O -V -K >> $psfile #1)
# USE BELOW FOR CONSTANT COLOR (EXTENDED DATA FIG)
# gmt psxy $KimberlitesXY -R${region} -J${proj_map} -Sc0.2 -W1.0p,black -CcolorsKimbConstant.cpt -O -V -K >> $psfile #1)
KBSlice="$PWD"/Cross-Section-Coords/KB-Cross-Sections.xy
AFSlice="$PWD"/Cross-Section-Coords/AF-Cross-Sections.xy
#
#if [ $Age -eq 180 ]
#then
# gmt psxy $KBSlice -Rd -J${proj_map} -W0.7p,black -O -V -K >> $psfile #1)
#gmt psxy $AFSlice -Rd -J${proj_map} -W1.7p,black -O -V -K >> $psfile #1)
#fi
#continue
# Add reconstructed kimberlites - OLD
# gmt psxy $KimberlitesXY -R${region} -J${proj_map} -Sc0.17 -W0.3p,black -Gwhite -t5 -O -V -K >> $psfile #1)
# gmt psxy $KimberlitesXY -R${region} -J${proj_map} -Sc0.08 -W0.06p,black -Gmagenta -t3 -O -V -K >> $psfile #2)
# gmt psxy $KimberlitesXY -R${region} -J${proj_map} -Sc0.03 -W0.06p,black -Gblack -t3 -O -V -K >> $psfile #3)
gmt psbasemap -R${region} -J${proj_map} -Ba90f9/a60f11 -O -V >> $psfile
gmt psconvert $psfile -A -Tf
gmt psconvert $psfile -A -Tg
rm $medianfile
# rm $gridFileMasked
rm $psfile
done
| true |
c7849ce2d5c4741321cf9b6efe2b936da09a6039 | Shell | lisuke/repo | /archlinuxcn/glmark2-git/PKGBUILD | UTF-8 | 1,027 | 2.625 | 3 | [] | no_license | # Maintainer: Ryo Munakata <afpacket@gmail.com>
pkgname=glmark2-git
pkgver=r978.9057c05
pkgrel=1
pkgdesc="OpenGL (ES) 2.0 benchmark (X11, Wayland, DRM)"
arch=('i686' 'x86_64')
url="https://launchpad.net/glmark2"
license=('GPL3')
depends=('libjpeg-turbo' 'libpng' 'libx11' 'libxcb' 'wayland' 'libgl' 'libgles' 'systemd-libs' 'wayland-protocols')
makedepends=('git' 'python2' 'udev')
conflicts=('glmark2')
provides=('glmark2')
source=(
"$pkgname"::'git+https://github.com/glmark2/glmark2.git'
)
md5sums=(
'SKIP'
)
# GLMARK2 features
GM2_FLAVORS="x11-gl,x11-glesv2,wayland-gl,wayland-glesv2,drm-gl,drm-glesv2"
pkgver() {
cd "${srcdir}/${pkgname}"
printf "r%s.%s" "$(git rev-list --count HEAD)" "$(git rev-parse --short HEAD)"
}
prepare() {
cd "${srcdir}/${pkgname}"
}
build() {
cd "${srcdir}/${pkgname}"
python2 ./waf configure \
--prefix=/usr \
--with-flavors=${GM2_FLAVORS}
python2 ./waf -j4
}
package() {
cd "${srcdir}/${pkgname}"
DESTDIR="${pkgdir}" python2 ./waf install
}
| true |
0009bedb6c859caea1dbbfd303008f733cfbc881 | Shell | znarf/DockSTARTer | /.scripts/pm_dnf_upgrade.sh | UTF-8 | 430 | 3.234375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
set -euo pipefail
IFS=$'\n\t'
pm_dnf_upgrade() {
if [[ ${CI:-} != true ]] && [[ ${TRAVIS:-} != true ]]; then
info "Upgrading packages. Please be patient, this can take a while."
dnf -y upgrade --refresh > /dev/null 2>&1 || fatal "Failed to upgrade packages from dnf."
fi
}
test_pm_dnf_upgrade() {
# run_script 'pm_dnf_upgrade'
warning "Travis does not test pm_dnf_upgrade."
}
| true |
cbd2f858cbffaa5ef3787ad50c0293da3474ce5f | Shell | dwmkerr/dotfiles | /shell.d/openshift.sh | UTF-8 | 494 | 3.125 | 3 | [] | no_license | #!/usr/bin/env bash
# OpenShift config.
# Log into to the locally configured OpenShift instance.
function oslogin() {
oc login $OPENSHIFT_URL -u $OPENSHIFT_USER -p $OPENSHIFT_PASS --insecure-skip-tls-verify
oc project $1
}
function podlogs() {
echo "Getting logs for $1 for the last $2 duration"
oc logs -f --since=$2 `oc get pods | grep $1 | grep 'Running' | grep -Ev 'deploy' | awk '{print $1}'`
}
# I have to do this way too often
alias killpod="oc delete pod --grace-period=0 "
| true |
c42f89b6c5765f042561147c47d63efa2739d724 | Shell | Matias-Bernal/dose2014 | /implementations/group5/deploy/dev-deploy.sh | UTF-8 | 320 | 2.640625 | 3 | [] | no_license | #!/bin/bash
ip=54.187.136.201
if [[ $1 == "-f" ]]; then
ssh dose@$ip /bin/bash < dev-frontend-deploy.sh
elif [[ $1 == "-b" ]]; then
ssh dose@$ip /bin/bash < dev-backend-deploy.sh
else
cat dev-backend-deploy.sh dev-frontend-deploy.sh >> both-deploy.sh
ssh dose@$ip /bin/bash < both-deploy.sh
rm both-deploy.sh
fi | true |
6cf146eee329fcd681895580b23a761c137ef470 | Shell | uk-gov-mirror/hmrc.interest-restriction-return-frontend | /migrations/applied_migrations/ReportingCompanyCTUTR.sh | UTF-8 | 4,243 | 2.59375 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
echo ""
echo "Applying migration ReportingCompanyCTUTR"
echo "Adding routes to conf/app.routes"
echo "" >> ../conf/app.routes
echo "GET /reportingCompanyCTUTR controllers.aboutReturn.ReportingCompanyCTUTRController.onPageLoad(mode: Mode = NormalMode)" >> ../conf/app.routes
echo "POST /reportingCompanyCTUTR controllers.aboutReturn.ReportingCompanyCTUTRController.onSubmit(mode: Mode = NormalMode)" >> ../conf/app.routes
echo "GET /changeReportingCompanyCTUTR controllers.aboutReturn.ReportingCompanyCTUTRController.onPageLoad(mode: Mode = CheckMode)" >> ../conf/app.routes
echo "POST /changeReportingCompanyCTUTR controllers.aboutReturn.ReportingCompanyCTUTRController.onSubmit(mode: Mode = CheckMode)" >> ../conf/app.routes
echo "Adding messages to English conf.messages"
echo "" >> ../conf/messages.en
echo "" >> ../conf/messages.en
echo "# ReportingCompanyCTUTRPage Messages" >> ../conf/messages.en
echo "# ----------------------------------------------------------" >> ../conf/messages.en
echo "reportingCompanyCTUTR.title = reportingCompanyCTUTR" >> ../conf/messages.en
echo "reportingCompanyCTUTR.heading = reportingCompanyCTUTR" >> ../conf/messages.en
echo "reportingCompanyCTUTR.checkYourAnswersLabel = reportingCompanyCTUTR" >> ../conf/messages.en
echo "reportingCompanyCTUTR.label = ReportingCompanyCTUTR" >> ../conf/messages.en
echo "reportingCompanyCTUTR.error.required = Enter reportingCompanyCTUTR" >> ../conf/messages.en
echo "reportingCompanyCTUTR.error.length = ReportingCompanyCTUTR must be 10 characters or less" >> ../conf/messages.en
echo "Adding messages to Welsh conf.messages"
echo "" >> ../conf/messages.cy
echo "" >> ../conf/messages.cy
echo "# ReportingCompanyCTUTRPage Messages" >> ../conf/messages.cy
echo "# ----------------------------------------------------------" >> ../conf/messages.cy
echo "reportingCompanyCTUTR.title = reportingCompanyCTUTR" >> ../conf/messages.cy
echo "reportingCompanyCTUTR.heading = reportingCompanyCTUTR" >> ../conf/messages.cy
echo "reportingCompanyCTUTR.checkYourAnswersLabel = reportingCompanyCTUTR" >> ../conf/messages.cy
echo "reportingCompanyCTUTR.label = ReportingCompanyCTUTR" >> ../conf/messages.cy
echo "reportingCompanyCTUTR.error.required = Enter reportingCompanyCTUTR" >> ../conf/messages.cy
echo "reportingCompanyCTUTR.error.length = ReportingCompanyCTUTR must be 10 characters or less" >> ../conf/messages.cy
echo "Adding to UserAnswersEntryGenerators"
awk '/trait UserAnswersEntryGenerators/ {\
print;\
print "";\
print " implicit lazy val arbitraryReportingCompanyCTUTRUserAnswersEntry: Arbitrary[(ReportingCompanyCTUTRPage.type, JsValue)] =";\
print " Arbitrary {";\
print " for {";\
print " page <- arbitrary[ReportingCompanyCTUTRPage.type]";\
print " value <- arbitrary[String].suchThat(_.nonEmpty).map(Json.toJson(_))";\
print " } yield (page, value)";\
print " }";\
next }1' ../test/generators/UserAnswersEntryGenerators.scala > tmp && mv tmp ../test/generators/UserAnswersEntryGenerators.scala
echo "Adding to PageGenerators"
awk '/trait PageGenerators/ {\
print;\
print "";\
print " implicit lazy val arbitraryReportingCompanyCTUTRPage: Arbitrary[ReportingCompanyCTUTRPage.type] =";\
print " Arbitrary(ReportingCompanyCTUTRPage)";\
next }1' ../test/generators/PageGenerators.scala > tmp && mv tmp ../test/generators/PageGenerators.scala
echo "Adding to UserAnswersGenerator"
awk '/val generators/ {\
print;\
print " arbitrary[(ReportingCompanyCTUTRPage.type, JsValue)] ::";\
next }1' ../test/generators/UserAnswersGenerator.scala > tmp && mv tmp ../test/generators/UserAnswersGenerator.scala
echo "Adding helper method to CheckYourAnswersHelper"
awk '/class/ {\
print;\
print "";\
print " def reportingCompanyCTUTR: Option[SummaryListRow] = answer(ReportingCompanyCTUTRPage, routes.ReportingCompanyCTUTRController.onPageLoad(CheckMode))";\
next }1' ../app/utils/CheckYourAnswersHelper.scala > tmp && mv tmp ../app/utils/CheckYourAnswersHelper.scala
echo "Migration ReportingCompanyCTUTR completed"
| true |
abd777f445365b9728b8d07b38c3aee99591de73 | Shell | Devki1/Fellowship | /BasicProgram/Arithmatic.sh | UTF-8 | 298 | 3.140625 | 3 | [] | no_license | read -p "enter a number a: " a
read -p "enter a number b: " b
read -p "enter a number c: " c
num1=$(( $a + (( $b * $c )) ))
num2=$(( $c + (( $a / $b )) ))
num3=`echo $a % $b + $c | bc -l`
num4=$(( (( $a * $b )) + $c ))
echo "a+b*c=$num1"
echo "c+a/b=$num2"
echo "a%b+c=$num3"
echo "a*b+c=$num4"
| true |
cef2b821bd934412d3f183cc725f6e3dbcfefc99 | Shell | cms-sw/cmssw | /PerfTools/JeProf/test/jeprof_warn_t.sh | UTF-8 | 198 | 3.09375 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
# Pass in name and status
function die { echo $1: status $2 ; exit $2; }
if !(jeprof_warn_t 2>&1 | tr '\n' ' ' | grep JeProfModule);then
die "jeprof_warn_t | grep MALLOC_CONF" $?
fi
| true |
ab0f4ea0bdc3cc43a35190b6d81a22d2e9c01e29 | Shell | sudheermuthyala/testing_file_directory- | /case_sample.sh | UTF-8 | 310 | 3.171875 | 3 | [] | no_license | #!/bin/bash
read -p "Enter A Number : " num
a=1
b=2
c=3
d=4
case $num in
$a )
echo "you entry is $a" ;;
$b )
echo "you entry is $b" ;;
$c )
echo "you entry is $c" ;;
$d)
echo "you entry is $d" ;;
*)
echo "you Have to Default"
esac | true |
f657c60f0beb2ed79a799fd0499d598ba73ca0bb | Shell | yehuofirst/Python365 | /push.sh | UTF-8 | 115 | 2.71875 | 3 | [] | no_license | #!/bin/bash
echo start
black .
if [ $1 ]
then
git add .
fi
git commit -m $(ls days/ | sort -Vr | head -1)
git push | true |
6592ecdff858381940d02c93f14505fe54b6aa7c | Shell | delkyd/alfheim_linux-PKGBUILDS | /x86_64-apple-darwin-sdk/PKGBUILD | UTF-8 | 345 | 2.71875 | 3 | [] | no_license | pkgname=x86_64-apple-darwin-sdk
pkgdesc="Mac OS X/Darwin SDK (requires prepackaged SDK with osxcross)"
pkgver=10.12
pkgrel=1
arch=(any)
url="https://github.com/tpoechtrager/osxcross"
license=("APSL")
options=(!strip staticlibs)
package() {
cd MacOSX$pkgver.sdk
find . -type f -exec install -Dm644 {} "$pkgdir/usr/x86_64-apple-darwin/"{} \;
}
| true |
42c145c1ef19b0512adb86521252690b761cde31 | Shell | zhengwanbo/dbtools | /diagnostics/getconfigs.sh | UTF-8 | 1,916 | 3.75 | 4 | [] | no_license | #!/bin/bash
CurDir=$(cd "$(dirname $0)"; pwd)
CONFIGPATH=$CurDir/_config
getconfigfile ()
{
filename=$1
lpath=$CONFIGPATH/$(cd "$(dirname $filename)"; pwd)
mkdir -p $lpath
cp $filename $lpath
}
echo config files beginning...
if [ -d $CONFIGPATH ] ; then
rm -rf $CONFIGPATH
fi
#########################
# Host
#########################
echo get host information ...
getconfigfile /etc/sysctl.conf
getconfigfile /etc/security/limits.conf
if [ -e /etc/modprobe.conf ]; then
getconfigfile /etc/modprobe.conf
fi
getconfigfile /etc/rc.local
getconfigfile /etc/profile
if [ -f /etc/grub.conf ]; then
getconfigfile /etc/grub.conf
fi
if [ -f /etc/sysconfig/clock ]; then
getconfigfile /etc/sysconfig/clock
fi
if [ -f /root/.profile ]; then
getconfigfile /root/.profile
fi
if [ -f /root/.bash_profile ]; then
getconfigfile /root/.bash_profile
fi
if [ -f /home/dbadmin/.profile ]; then
getconfigfile /home/dbadmin/.profile
fi
if [ -f /home/dbadmin/.bash_profile ]; then
getconfigfile /home/dbadmin/.bash_profile
fi
#########################
# Disk
#########################
getconfigfile /etc/fstab
#########################
# Network
#########################
echo get network information ...
getconfigfile /etc/hosts
getconfigfile /etc/sysconfig/network
getconfigfile /etc/resolv.conf
getconfigfile /sbin/ifconfig
if [ -d /etc/sysconfig/network-scripts ]; then
for nic in /etc/sysconfig/network-scripts/ifcfg* ; do
getconfigfile $nic
done
fi
#########################
# ODBC
#########################
echo get odbc information ...
if [ -e /etc/odbc.ini ]; then
getconfigfile /etc/odbc.ini
fi
if [ -e /etc/odbcinst.ini ]; then
getconfigfile /etc/odbcinst.ini
fi
if [ -d $CONFIGPATH ] ; then
cd $CONFIGPATH
tar czvf $CurDir/configs-`date +%Y%m%d%H%M%S`.tgz * 2>&1 > /dev/null
cd $CurDir
rm -rf $CONFIGPATH
fi
echo end config files
| true |
7208e41932d91e5c91381c601c1769141c601674 | Shell | priyansh19/Automation-using-Shell-Scripts | /Extra Scripts/DevOps-Sem3- Lab Scripts/2.2 Automation Scripts/2.2.1 Automation Scripts that save Time and Effort/labA3.sh | UTF-8 | 1,333 | 3.953125 | 4 | [] | no_license | #!/bin/bash
if [ -z "$1" ]; then
echo "ERROR: Credentials file not specified" >&2; exit 1;
elif [ -z "$2" ]; then
echo "ERROR: Backup directory not specified" >&2; exit 1;
fi
credentials_file=$(realpath $1)
backup_directory=$(realpath $2)
if [ ! -f "$credentials_file" ]; then
echo "ERROR: Credentials file does not exist" >&2; exit 1;
elif [ ! -d "$backup_directory" ]; then
echo "ERROR: Backup directory does not exist" >&2; exit 1;
fi
source $credentials_file
if [ -z ${hostname:+word} ]; then
echo "ERROR: hostname is not set" >&2; exit 1;
elif [ -z ${username:+word} ]; then
echo "ERROR: username is not set" >&2; exit 1;
elif [ -z ${password:+word} ]; then
echo "ERROR: password is not set" >&2; exit 1;
fi
mysqldump -h$hostname -u$username -p$password --all-databases > backup.sql
if [[ $? != 0 ]]; then
echo "ERROR: Error in taking mysql backup" >&2; exit 1;
fi
mv backup.sql $backup_directory/$(date +%F_%R).sql
path_to_script=$(realpath "$0")
if ! (crontab -l | grep -Fxq "0 */12 * * * $path_to_script $credentials_file $backup_directory"); then
crontab -l | { cat; echo "0 */12 * * * $path_to_script $credentials_file $backup_directory"; } | crontab -
echo "Script added to Cron"
fi
exit 0
#
# Credentials File
#
# hostname=localhost
# username=root
# password=password
# | true |
647e985104e607b1a78a7221c7466462da7c42dc | Shell | anupam-git/fingerboard | /build-scripts/build-appimage.sh | UTF-8 | 693 | 3.296875 | 3 | [
"MIT"
] | permissive | #!/bin/bash
set -ex
SCRIPTPATH="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )"
(
mkdir -p $SCRIPTPATH/build-appimage
pushd $SCRIPTPATH/build-appimage
cmake $SCRIPTPATH/.. -DCMAKE_INSTALL_PREFIX=/usr -DCMAKE_BUILD_TYPE=RelWithDebInfo
make -j$(nproc)
DESTDIR=$SCRIPTPATH/AppDir make install
mkdir -p $SCRIPTPATH/AppDir/usr/include/x86_64-linux-gnu/
cp -r /usr/include/x86_64-linux-gnu/qt5/ $SCRIPTPATH/AppDir/usr/include/x86_64-linux-gnu/
popd
export APP_VERSION=$(cat $SCRIPTPATH/../CMakeLists.txt | grep -Po '(?<=FINGERBOARD_VERSION )\d*.\d*.\d*')
appimage-builder --skip-tests
rm -rf $SCRIPTPATH/build-appimage
rm -rf $SCRIPTPATH/AppDir
)
| true |
0e3bcf54961c41ab532c60ea52984276c875857b | Shell | petrikoz/wormhole | /roles/common/files/update-motd.d/20-sysinfo | UTF-8 | 1,142 | 3.859375 | 4 | [] | no_license | #!/bin/bash
# get load averages
IFS=" " read LOAD1 LOAD5 LOAD15 <<<$(cat /proc/loadavg | awk '{ print $1,$2,$3 }')
# get free memory
IFS=" " read USED AVAIL TOTAL <<<$(free -htm | grep "Mem" | awk {'print $3,$7,$2'})
# get processes
PROCESS=`ps -eo user=|sort|uniq -c | awk '{ print $2 " " $1 }'`
PROCESS_ALL=`echo "$PROCESS"| awk {'print $2'} | awk '{ SUM += $1} END { print SUM }'`
PROCESS_ROOT=`echo "$PROCESS"| grep root | awk {'print $2'}`
PROCESS_USER=`echo "$PROCESS"| grep -v root | awk {'print $2'} | awk '{ SUM += $1} END { print SUM }'`
# colors
blue="\e[1;34m"
green="\e[1;32m"
white="\e[0;39m"
echo -e "
${blue}SYSTEM INFO:
$white Distro......: $white`cat /etc/*release | grep "PRETTY_NAME" | cut -d "=" -f 2- | sed 's/"//g'`
$white Kernel......: $white`uname -sr`
$white Uptime......: $white`uptime -p`
$white Load........: $green$LOAD1$white (1m), $green$LOAD5$white (5m), $green$LOAD15$white (15m)
$white Processes...: $green$PROCESS_ROOT$white (root), $green$PROCESS_USER$white (user), $green$PROCESS_ALL$white (total)
$white Memory......: $green$USED$white used, $green$AVAIL$white avail, $green$TOTAL$white total"
| true |
d4e4174587542c741694565689a053299758ca63 | Shell | APShirley/sprout-relint | /relint/templates/default/bash_it/custom/recreate_bosh_lite.bash | UTF-8 | 4,520 | 3.40625 | 3 | [
"Unlicense"
] | permissive | function recreate_bosh_lite() {
(
cache_directory="$HOME/Downloads"
set -e
sudo true
update=false
if [[ $1 == "-u" ]]; then
echo -e '\nWill Update cf-release and diego-release'
update=true
fi
cd ~/workspace/bosh-lite
echo -e "\nDestroying current Bosh-Lite"
vagrant destroy --force
#pull in changes for bosh-lite
git pull
#update vagrant box
vagrant box update
echo "Starting up Bosh-Lite"
vagrant up
echo "Adding route"
sudo -S bin/add-route
cd ~/workspace/cf-release
bosh target lite
stemcell_data=`curl --retry 5 -s -L https://bosh.io/api/v1/stemcells/bosh-warden-boshlite-ubuntu-trusty-go_agent`
stemcell_version=`jq '.[0].version' --raw-output <<< $stemcell_data`
stemcell_url=`jq '.[0].regular.url' --raw-output <<< $stemcell_data`
stemcell_filename="bosh-stemcell-$stemcell_version-warden-boshlite-ubuntu-trusty-go_agent.tgz"
if [ ! -f "$cache_directory/$stemcell_filename" ]; then
echo "Downloading stemcell version $stemcell_version"
aria2c -x 16 -s 16 -d $cache_directory -o $stemcell_filename $stemcell_url
else
echo "Stemcell version $stemcell_version already exists"
fi
echo "Uploading Stemcell"
bosh -t lite upload stemcell "$cache_directory/$stemcell_filename"
if $update; then
echo "Updating Diego-Release"
~/workspace/diego-release/scripts/update
fi
cd ~/workspace/cf-release
echo "Deploying CF release"
if $update; then
./scripts/update
fi
cd ~/workspace/cf-release
~/workspace/cf-release/scripts/generate-bosh-lite-dev-manifest
bosh --parallel 10 sync blobs
bosh create release --name cf --force
bosh -t lite upload release
bosh -t lite -n deploy
garden_release_data=`curl --retry 5 -s -L https://bosh.io/api/v1/releases/github.com/cloudfoundry-incubator/garden-linux-release`
garden_release_version=`jq '.[0].version' --raw-output <<< $garden_release_data`
garden_release_url=`jq '.[0].url' --raw-output <<< $garden_release_data`
garden_release_filename="garden-linux-release-$garden_release_version.tgz"
if [ ! -f "$cache_directory/$garden_release_filename" ]; then
echo "Downloading garden release version $garden_release_version"
aria2c -x 16 -s 16 -d $cache_directory -o $garden_release_filename $garden_release_url
else
echo "Garden release version $garden_release_version already exists"
fi
echo "Uploading Garden Linux"
bosh -t lite upload release "$cache_directory/$garden_release_filename"
etcd_release_data=`curl --retry 5 -s -L https://bosh.io/api/v1/releases/github.com/cloudfoundry-incubator/etcd-release`
etcd_release_version=`jq '.[0].version' --raw-output <<< $etcd_release_data`
etcd_release_url=`jq '.[0].url' --raw-output <<< $etcd_release_data`
etcd_release_filename="etcd-release-$etcd_release_version.tgz"
if [ ! -f "$cache_directory/$etcd_release_filename" ]; then
echo "Downloading etcd release version $etcd_release_version"
aria2c -x 16 -s 16 -d $cache_directory -o $etcd_release_filename $etcd_release_url
else
echo "Etcd release version $etcd_release_version already exists"
fi
echo "Uploading etcd-release"
bosh -t lite upload release "$cache_directory/$etcd_release_filename"
cd ~/workspace/diego-release
./scripts/generate-bosh-lite-manifests
echo "Deploying Diego"
bosh -t lite deployment bosh-lite/deployments/diego.yml
bosh --parallel 10 sync blobs
bosh create release --force
bosh -t lite -n upload release
bosh -t lite -n deploy
cd ~/workspace/cf-release
rm -f $stemcell
bosh -t lite deployment bosh-lite/deployments/cf.yml
virtualbox_version=$(brew cask list --versions | grep virtualbox | awk '{print $2}')
reboot_time=$(last -1 reboot | awk '{print $3, $4, $5, $6}')
bosh_lite_box=$(vagrant box list | grep bosh-lite | awk '{print $3}' | sed 's/)//g' | sort -r | head -1)
vagrant_version=$(vagrant --version | awk '{print $2}')
function log() {
echo `date "+%Y-%m-%d %H:%M:%S %z"`: $* >> ~/Library/Logs/recreate_bosh_lite.log
}
function log_empty() {
echo '' >> ~/Library/Logs/recreate_bosh_lite.log
}
log ======= NEW BOSH LITE
log Virtualbox $virtualbox_version
log Last Reboot $reboot_time
log Bosh Lite $bosh_lite_box
log Vagrant $vagrant_version
log_empty
)
}
export -f recreate_bosh_lite
| true |
c599827c18f07381c0c182b74c83e86a0a8465ef | Shell | MoaadSensei98/react-ecommerce | /move/deploy.sh | UTF-8 | 440 | 3.046875 | 3 | [] | no_license | #!/bin/bash
tags=(
"init"
"header"
"product-list"
"gridview"
"shopping-cart"
"forms"
"fake-http"
"http"
)
# get length of an array
tagsLength=${#tags[@]}
# use for loop to read all values and indexes
for (( i=0; i<${tagsLength}; i++ ));
do
echo "Deploying: ${tags[$i]}"
git co "${tags[$i]}"
npm run build
firebase hosting:channel:deploy "${tags[$i]}" --expires 30d
done
git co main
npm run build
firebase deploy
| true |
85d5e8ba56bebdc19c84fd557c055a178d410696 | Shell | pablo-pg/FIS-2021-P6-DesarrolloAgil | /code/tools/install_hooks.sh | UTF-8 | 415 | 3.5 | 4 | [] | no_license | #!/bin/bash
# Instala y activa el servicio de Git Hooks de forma local.
<<<<<<< HEAD
GIT_DIR="$(git rev-parse --git-dir)"
=======
GIT_DIR=$(git rev-parse --git-dir)
>>>>>>> 2d757bdbfff1641a5ae778106d088bc8f7519b06
CUR_DIR="$(pwd)"
echo "Instalando Git Hooks..."
# Crea un link simbólico en la carpeta local (normalmente ./git/hooks).
ln -sf ${CUR_DIR}/hooks/pre-commit $GIT_DIR/hooks/pre-commit
echo "Listo!!!"
| true |
a239fc475ad7b3a989075fd94aa1731d7b92530a | Shell | RealArtemiy/blacklist-check-unix-linux-utility | /bl.sh | UTF-8 | 378 | 3.375 | 3 | [
"MIT"
] | permissive | #!/bin/bash
path=/root/bin/bl
ips=`cat $path/ips.txt`
for ip in $ips
do
/usr/bin/bl $ip | grep blacklisted |awk {'print $2'} >> $path/blacklisted.txt
done
if [ -s $path/blacklisted.txt ] && [ -f $path/blacklisted.txt ];
then `mail -r from@example.com -s "Blacklist check" to@example.com < $path/blacklisted.txt`
mv $path/blacklisted.txt $path/blacklisted-`date +"%m-%d-%y"`.txt
fi | true |
e0e5fc807d83c589a56647150bc7cc3901848654 | Shell | shannah/cn1-teavm-builds | /upload-files.sh | UTF-8 | 1,125 | 3.15625 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
echo "${SSH_KEY}" | base64 --decode >/tmp/sftp_rsa
chmod 0700 /tmp/sftp_rsa
SCP_TARGET="$FTP_USER@$FTP_HOST:~/$FTP_PATH"
#
# Upload CLI
#
scp -o StrictHostKeyChecking=no -i /tmp/sftp_rsa -B -r -P $FTP_PORT tools/cli/target/teavm-cli-$NEW_VERSION.jar $SCP_TARGET/cli/dev/
#
# Update IDEA repository descriptor
#
cat <<EOF >.idea-repository.xml
<?xml version="1.0" encoding="UTF-8"?>
<plugins>
<plugin id="org.teavm.idea" url="https://dl.bintray.com/konsoletyper/teavm/org/teavm/teavm-idea/$NEW_VERSION/teavm-idea-$NEW_VERSION.zip" version="$NEW_VERSION">
<idea-version since-build="173.*" until-build="193.*" />
<description>TeaVM support</description>
</plugin>
</plugins>
EOF
scp -o StrictHostKeyChecking=no -i /tmp/sftp_rsa -B -r -P $FTP_PORT .idea-repository.xml $SCP_TARGET/idea/dev/teavmRepository.xml
#
# Upload Eclipse plugin
#
#cd tools/eclipse/updatesite/target/repository
# find . -type f -exec curl \
# --ftp-create-dirs \
# -u $TEAVM_FTP_LOGIN:$TEAVM_FTP_PASSWORD \
# -T {} \
# ftp://$TEAVM_FTP_HOST/httpdocs/eclipse/update-site/$BASE_VERSION-dev/{} \;
#cd ../../../../.. | true |
27632562433fb87f31efaa7d38e372e53cabd00d | Shell | subdriven/randomscripts | /linux_template.sh | UTF-8 | 1,030 | 2.703125 | 3 | [] | no_license | #!/bin/bash
# Script to "sysprep" a CentOS-based VM to be converted to a template.
# Shamelessly taken from: https://lonesysadmin.net/2013/03/26/preparing-linux-template-vms/
# Install the following
# yum install -y vim open-vm-tools yum-utils
# yum update -y && reboot
# Run the scanner_add.sh script to add the scanning user.
service rsyslog stop
service auditd stop
package-cleanup --oldkernels --count=1 -y
yum clean all
logrotate -f /etc/logrotate.conf
rm -f /var/log/*-???????? /var/log/*.gz
rm -f /var/log/dmesg.old
rm -rf /var/log/anaconda
cat /dev/null > /var/log/audit/audit.log
cat /dev/null > /var/log/wtmp
cat /dev/null > /var/log/lastlog
cat /dev/null > /var/log/grubby
rm -f /etc/udev/rules.d/70*
sed -i '/^UUID\|HWADDR\=/d' /etc/sysconfig/network-scripts/ifcfg-e*
rm -rf /tmp/*
rm -rf /var/tmp/*
rm -f /etc/ssh/*key*
rm -rf ~root/.ssh/
rm -f ~root/anaconda-ks.cfg
rm -f ~root/.bash_history
chage -d 0 root
rm -f /root/linux_template.sh
echo "Run the following manually:"
echo ""
echo "unset HISTFILE"
echo ""
| true |
70859577ed25ec8ae6e2030c63add8ffd5a98620 | Shell | kunsang-philips/DetektDemo | /config/detekt/pre-push.sh | UTF-8 | 595 | 3.515625 | 4 | [] | no_license | #!/usr/bin/env bash
echo "Deteking Issues in commits, please wait..."
OUTPUT="/tmp/detekt-$(date +%s)"
cd Code || exit
./gradlew detekt -PskipShared > "$OUTPUT"
EXIT_CODE=$?
if [ $EXIT_CODE -ne 0 ]; then
cat "$OUTPUT"
rm "$OUTPUT"
echo -e "\n"
echo "***********************************************"
echo " Detekt Quality-Gate Failed "
echo " Please fix the above issues before committing "
echo "***********************************************"
echo -e "\n"
exit $EXIT_CODE
else
echo "Detekt Quality-Gate Passed, good to push"
exit 0
fi
rm "$OUTPUT" | true |
2dbb05fbba61a902b1bc3358591f1c9ac04d15da | Shell | 0x539-Maky/Hardening | /HardeningDebian.sh | UTF-8 | 8,416 | 3.078125 | 3 | [
"Apache-2.0"
] | permissive | ##############################
# Author : Samy BELARBI #
# Creation Date: 04 Jul 2017 #
##############################
####################################################
# 3 level of Hardening : normal, high, advance
####################################################
### TODO
### UPCOMING FEATURES ###
#ENABLE WARNING BANNER
#Place the system directories to their own partitions: /tmp, /var, /var/log, /var/log/audit, /home
#Add nodev, nosuid, and noexec options to temporary storage partitions
##Add nodev, nosuid, and noexec options to /tmp
##Add nodev, nosuid, and noexec options to /dev/shm
#Disable the automounter
#Disable GNOME automounting
#Disable mounting of uncommon filesystem types
#Verify permissions on passwd, shadow, group and gshadow files
#Verify that all world-writable directories have sticky bits set
#Find unauthorized world-writable files
#Find unauthorized SUID/SGID system executables
#Find and repair unowned files
#Verify that all world-writable directories have proper ownership
#Disable core dumps or at least apply restrictive permissions on the files
#Configure sudo to improve auditing of root access
#Verify that no non-root accounts have UID 0
#Set password expiration parameters
#Remove legacy ’+’ entries from password files
#Create and maintain a group containing all human users
#Set lockouts for failed password attempts
#Use pam deny.so to quickly deny access to a service
#Upgrade password hashing algorithm to SHA-512
#Enable automatic security updates unattended-upgrades
#CONFIGURE SECURE PACKAGE REPOSITORY
#CONFIGURE SECURE DNS SERVER
#CONFIGURE SECURE NTP SERVER
#CONFIGURE NETWORK
#CONFIGURE PRINTER if needed or delete (cups..)
#DELETE ALL GUI PACKAGE (X...), disable at boot and siable startx
#remote AVAHI
#DISABLE ROOT SSH LOGIN
#Disable host-based authentication
#Disable .rhosts files
#Set idle timeout interval for user logins
#Limit users’ SSH access
#SSH Ensure only protocol 2 connections allowed
#Restrict at and cron to authorized users
#Restrict permissions on files used by cron
# Remove the anacron subsystem
# If not used, disable the Raw Devices Daemon
#Disable the irda service & Remove the irda-utils package
#Disable the Advanced Power Management Subsystem (apmd) if power management is not necessary
#Disable the Bluetooth input devices (hidd) & Disable Bluetooth Kernel Modules
#Disabler the Bluetooth host controller interface daemon (bluetooth)
#Disable the HAL Daemon (haldaemon)
#Disable the D-Bus IPC Service (messagebus)
#Disable the boot caching
#Disable Smart Cards service if Smart Cards are not in use on the system
#Disable zeroconf networking
#Disable the IA32 microcode utility (microcode ctl) if the system is not running an Intel IA32 processor
#Disable Kudzu hardware probing utility (kudzu)
#Disable the ISDN support (isdn) service
#Disable the Kdump kernel crash analyzer (kdump) service
#Disable the installation helper service (firstboot)
#Remove the talk software
#Remove the TFTP server
#Remove the NIS service
#Remove the rlogin, rsh, and rcp services, telnet
#If possible, remove the Inetd and Xinetd software packages
#Remove the pam ccreds Package if Possible"
############################################################################################
#Disclaimer
echo -n "I do not claim any responsibility for your use of this script."
#Check Debian Version
OS=$(lsb_release -si)
ARCH=$(uname -m | sed 's/x86_//;s/i[3-6]86/32/')
VER=$(lsb_release -sr)
if [ "$OS" != "Debian" ]; then
echo "Your operating system is not supported" 1>&2
exit 1
fi
#check user executing the script
if [ "$(id -u)" != "0" ]; then
echo "This script must be run as root" 1>&2
exit 1
fi
############################################################################################
sys_upgrades() {
apt-get --yes --force-yes update
apt-get --yes --force-yes upgrade
apt-get --yes --force-yes autoremove
apt-get --yes --force-yes autoclean
}
account_check() {
#check if an account has an empty password
##to lock account with empty password : passwd -l accountName
if [ `awk -F: '($2 == "") {print}' /etc/shadow | wc -c` != 0 ] ; then echo "Mot de passe vide pour un ou plusieurs compte" ; fi
#Verify that All Account Password Hashes are Shadowed
if [ `awk -F: '($2 != "x") {print}' /etc/passwd | wc -c` != 0 ] ; then echo "Hash de mot de passe présent dans /etc/passwd pour les comptes suivants :" ; awk -F: '($2 != "x") {print}' /etc/passwd ; fi
#Verify that No Non-Root Accounts Have UID 0
if [ `awk -F: '($3 == "0") {print}' /etc/passwd | wc -l` != 1 ] ; then echo "Plusieurs comptes ont un UID=0" ; fi
#Remove Legacy + Entries from Password Files
if [ `grep "^+:" /etc/passwd /etc/shadow /etc/group | wc -c` != 0 ] ; then echo "Présence du caractère + dans /etc/shadow ou /etc/group - NIS inclusion" ; fi
}
permissions_check() {
#Verify Permissions on passwd, shadow, group and gshadow Files
cd /etc
chown root:root passwd shadow group gshadow
chmod 644 passwd group
chmod 400 shadow gshadow
purge_nfs() {
# This the standard network file sharing for Unix/Linux/BSD
# style operating systems.
# Unless you require to share data in this manner,
# less layers = more sec
apt-get --yes purge nfs-kernel-server nfs-common portmap rpcbind autofs
}
disable_compilers() {
chmod 000 /usr/bin/cc
chmod 000 /usr/bin/gcc
# 755 to bring them back online
# It is better to restrict access to them
# unless you are working with a specific one
}
#firewall() {}
harden_ssh() {
# Many attackers will try to use your SSH server to brute-force passwords.
# This will only allow 6 connections every 30 seconds from the same IP address.
ufw limit OpenSSH
#disable ssh root login before disable it create a standard user or you lost connection on your server !
#sudo sh -c 'echo "PermitRootLogin no" >> /etc/ssh/ssh_config'
}
disable_avahi() {
update-rc.d -f avahi-daemon disable
# The Avahi daemon provides mDNS/DNS-SD discovery support
# (Bonjour/Zeroconf) allowing applications to discover services on the network.
}
process_accounting() {
# Linux process accounting keeps track of all sorts of details about which commands have been run on the server, who ran them, when, etc.
apt-get --yes --force-yes install acct
touch /var/log/wtmp
# To show users' connect times, run ac. To show information about commands previously run by users, run sa. To see the last commands run, run lastcomm.
#Documentation acct : https://www.tecmint.com/how-to-monitor-user-activity-with-psacct-or-acct-tools/
}
kernel_tuning() {
sudo sh -c 'echo "kernel.randomize_va_space=1" >> /etc/sysctl.conf'
# Enable IP spoofing protection
sudo sh -c 'echo "net.ipv4.conf.all.rp_filter=1" >> /etc/sysctl.conf'
# Disable source packet routing
sudo sh -c 'echo "net.ipv4.conf.all.accept_source_route = 0" >> /etc/sysctl.conf'
sudo sh -c 'echo "net.ipv6.conf.all.accept_source_route = 0 " >> /etc/sysctl.conf'
sudo sh -c 'echo "net.ipv4.conf.default.accept_source_route = 0" >> /etc/sysctl.conf'
sudo sh -c 'echo "net.ipv6.conf.default.accept_source_route = 0" >> /etc/sysctl.conf'
# Ignoring broadcasts request
sudo sh -c 'echo "net.ipv4.icmp_echo_ignore_broadcasts=1" >> /etc/sysctl.conf'
# Make sure spoofed packets get logged
sudo sh -c 'echo "net.ipv4.conf.all.log_martians=1" >> /etc/sysctl.conf'
sudo sh -c 'echo "net.ipv4.conf.default.log_martians=1" >> /etc/sysctl.conf'
# Disable ICMP routing redirects
sudo sh -c 'echo "net.ipv4.conf.all.accept_redirects=0" >> /etc/sysctl.conf'
sudo sh -c 'echo "net.ipv6.conf.all.accept_redirects=0" >> /etc/sysctl.conf'
sudo sh -c 'echo "net.ipv4.conf.all.send_redirects=0" >> /etc/sysctl.conf'
# Disables the magic-sysrq key
sudo sh -c 'echo "kernel.sysrq=0" >> /etc/sysctl.conf'
# Turn off the tcp_timestamps
sudo sh -c 'echo "net.ipv4.tcp_timestamps=0" >> /etc/sysctl.conf'
# Block SYN attacks
sudo sh -c 'echo "net.ipv4.tcp_syncookies = 1" >> /etc/sysctl.conf'
sudo sh -c 'echo "net.ipv4.tcp_max_syn_backlog = 2048" >> /etc/sysctl.conf'
sudo sh -c 'echo "net.ipv4.tcp_synack_retries = 2" >> /etc/sysctl.conf'
sudo sh -c 'echo "net.ipv4.tcp_syn_retries = 5" >> /etc/sysctl.conf'
# Enable bad error message Protection
sudo sh -c 'echo "net.ipv4.icmp_ignore_bogus_error_responses=1" >> /etc/sysctl.conf'
# RELOAD WITH NEW SETTINGS
/sbin/sysctl -p
}
main() {
sys_upgrades
account_check
purge_nfs
disable_compilers
harden_ssh
disable_avahi
process_accounting
kernel_tuning
}
main "$@"
| true |
f08ccf0c63fc091f4779540af8982c25dfe2861f | Shell | netqyq/shell-examples | /code/CH09/indirect.sh | UTF-8 | 177 | 2.609375 | 3 | [
"MIT"
] | permissive | #!/bin/bash
variable1=variable2
variable2=Hadoop
echo "varialbe1=$variable1"
eval tempvar=\$$variable1
echo "tempvar=$tempvar"
echo "Indirect ref variable1 is :${!variable1}"
| true |
d51f0c18305a1ca5cf9ad8e99409ed9278bba295 | Shell | pmckim1/Capstone | /Info_from_AWS_cluster_algo/Capstone/setup.sh | UTF-8 | 888 | 2.84375 | 3 | [] | no_license | mkdir -p ArticleTexts
mkdir -p WhooshIndex
mkdir -p Cache
mkdir -p Output
# System viz tools
sudo yum install -y tmux git htop
# Some python modules requires gcc and g++
sudo yum install -y python3 python3-devel
sudo yum group install -y "Development Tools"
# python-igraph requires cmake3.16 or highter.
# Get the binary distribution.
wget -nc -P ~/ https://github.com/Kitware/CMake/releases/download/v3.20.0/cmake-3.20.0-linux-x86_64.tar.gz
# UNpack it.
tar -zxvf ~/cmake-3.20.0-linux-x86_64.tar.gz -C ~/
# Move the cmake tool into place
sudo cp ~/cmake-3.20.0-linux-x86_64/bin/cmake /usr/bin/cmake-3.20
# Symlink the cmake executable to use it.
sudo ln -s /usr/bin/cmake-3.20 /usr/bin/cmake
# python-igraph needs the usr share data.
sudo cp -r ~/cmake-3.20.0-linux-x86_64/share/cmake-3.20 /usr/share/
# Now install the real igraph.
sudo python3 -m pip install -r requirements.txt
| true |
a811dd4b56e1c14a06bb9db3eb6e86a3fbc96d9a | Shell | intel-analytics/BigDL | /scala/serving/scripts/cluster-serving-init | UTF-8 | 644 | 3.453125 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
# try call py script
# pip install will add following script to path, but no pip would not
cluster-serving-py-setup.py &&
if [ -f config.yaml ]; then
echo "Cluster Serving config file prepared."
else
echo "Failed to find config file. Initialization failed."
fi
BIGDL_SERVING_JAR_PATH=$(find . -maxdepth 1 -name 'bigdl-serving-*.jar')
if [ -n "${BIGDL_SERVING_JAR_PATH}" ]; then
echo "BigDL Cluster Serving Jar found at "$BIGDL_SERVING_JAR_PATH", environment already set up. Initialization success."
else
echo "Failed to find bigdl-serving jar in current directory, will download it... "
download-serving-jar.sh
fi
| true |
00db4ef169c48c42b673f92c21b886ae5e486170 | Shell | ymxl85/MRs-based-test-suite-for-APR | /original/TS-mf/sp-sh2/MR4.sh | UTF-8 | 511 | 2.546875 | 3 | [] | no_license | #for i in {2..3..1}
#do
# cd mutants/MR4
# /bin/sh mkfolder.sh $i
# cd -
# t1=$(($(date +%s%N)/1000000))
# /bin/sh SEMR4.sh MR4 v$i
# /bin/sh SPMR1.sh MR4 v$i
# t2=$(($(date +%s%N)/1000000))
# t=$(($t2-$t1))
# echo $t > mutants/MR4/v$i/time.log
#done
for i in {5..5..1}
do
cd mutants/MR4
/bin/sh mkfolder.sh $i
cd -
t1=$(($(date +%s%N)/1000000))
/bin/sh SEMR3.sh MR4 v$i
/bin/sh SPMR1.sh MR4 v$i
t2=$(($(date +%s%N)/1000000))
t=$(($t2-$t1))
echo $t > mutants/MR4/v$i/time.log
done
| true |
09db75635f965728ce1ad35ff15515017d2ef609 | Shell | rhertzog/lcs | /lcs-security/sbin/lcs-stop-secourssh | UTF-8 | 173 | 2.640625 | 3 | [] | no_license | #!/bin/bash
#
# Arret du SSH de secours
#
# On Arrete toutes les instances de RSSH
for RSSHPID in `ps aux | grep rssh | awk {'print $2'}`
do
kill -9 $RSSHPID
done
| true |
085c61291b3a8991e8e946cc7e9ec7a05fd1b1c3 | Shell | dbestevez/dotfiles | /src/config/bspwm/environments/auto-detect.sh | UTF-8 | 472 | 3.328125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
# Configure monitors
if [[ "$PRIMARY" != "LVDS-1" ]] && [[ "$PRIMARY" != "eDP1" ]]; then
# Turn off built-in display
xrandr --output eDP1 --off
p=$(mons | grep " $PRIMARY " | cut -d':' -f1)
s=$(mons | grep " $SECONDARY " | cut -d':' -f1)
mons -S $p,$s:R
else
mons -e left
fi
if [[ "$SECONDARY" == "" ]]; then
bspc monitor $PRIMARY -d 1 2 3 4 5 6
else
bspc monitor $PRIMARY -d 1 2 3
bspc monitor $SECONDARY -d 4 5 6
fi
| true |
b219c50e172bb9bed5e242eca77c1fe6fe3fa641 | Shell | EricWittmann/apiman-cartridge | /update.sh | UTF-8 | 981 | 3.53125 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash -e
SRC=$1
rm -rf /tmp/keycloak-update
if [ -d "$SRC" ]; then
if [ -d "$SRC/keycloak" ]; then
SRC="$SRC/keycloak"
fi
elif [ -f "$SRC" ]; then
mkdir /tmp/keycloak-update
unzip "$SRC" -d /tmp/keycloak-update
SRC=$(ls -d /tmp/keycloak-update/keycloak-appliance-dist-all-*/keycloak)
else
echo "usage: update.sh <src dir|zip>"
fi
if [ ! -f "$SRC/standalone/configuration/keycloak-server.json" ]; then
echo "invalid dir"
exit 1
fi
VERSION=$(ls $SRC/modules/system/layers/base/org/keycloak/keycloak-core/main/keycloak-core-*.jar | head -n 1 | sed 's/^.*keycloak-core-//' | sed 's/.jar$//')
rm -rf versions/8/modules/system/layers/base
rm -rf versions/8/standalone/configuration/themes
cp -r $SRC/modules/system/layers/base versions/8/modules/system/layers/
cp -r $SRC/standalone/configuration/themes versions/8/standalone/configuration/
sed -i "s/Display-Name: Keycloak .*/Display-Name: Keycloak $VERSION/" metadata/manifest.yml
| true |
1153e95dbeadd6d85bb595835570deb6f73ba006 | Shell | eadains09/my_scripts | /upload_grades.sh | UTF-8 | 750 | 3.703125 | 4 | [] | no_license | # upload_grades.sh <homework #>
# Iterates through the list of students' usernames and accesses the folder where
# their grades are stored, copies the grade to a backup file, and adds and commits
# to their svn account.
# $1- 1st command line argument, the homework or project number being downloaded
###############################################################
#! /bin/bash
exec 2>> upload-errors.txt
exec >> upload-comments.txt
mkdir ${1}-1-graded
while IFS='' read -r username || [[ -n "$username" ]]
do
echo $username
cd ../$username/$1
cp ${username}_${1}.txt ../../../${1}-1-graded
svn add ${username}_${1}.txt
svn commit -m "uploading grade"
cd ../../1_scripts
done < usernames.txt
tar czvf ${1}-1-graded.tgz ${1}-1-graded | true |
a6abaf126157acdcd04a304e5643dba00a817cd0 | Shell | fzinfz/scripts | /lib/nw_ovs.sh | UTF-8 | 595 | 3.296875 | 3 | [] | no_license |
ovs_add_br(){
[ -z "$1" ] && echo_tip ovs_add_br BR_NAME,..
for br in $@; do
run ovs-vsctl add-br $br
run ovs-vsctl set-fail-mode $br standalone
ovs-vsctl set bridge $br stp_enable=true
done
}
ovs_interfaces(){
for i in `nw_ls_interfaces | grep ^e`; do
ip addr show dev $i 2>/dev/null | grep ovs-system
if [ $? -eq 0 ]; then
ip addr show dev $i 2>/dev/null | grep LOWER_UP &>/dev/null
[ $? -ne 0 ] && echo_tip "plug in cable & run: ip link set dev $i up"
echo
fi
done
}
| true |
5e0031555086de18f7a6f9265b0369faf667c1b6 | Shell | djdagovs/ubuntu | /redis/redis-redhat | UTF-8 | 1,071 | 4 | 4 | [] | no_license | #!/bin/sh
### BEGIN INIT INFO
# Provides: redis-server
### END INIT INFO
NAME=redis-server
DESC=redis-server
REDIS_CONFIG=/etc/redis/redis.conf
REDIS_SERVER_BIN=/usr/local/bin/redis-server
REDIS_CLI_BIN=/usr/local/bin/redis-cli
set -e
# Make sure the binary and the config file are present before proceeding
test -x $REDIS_SERVER_BIN || exit 0
# Create this file and put in a variable called REDIS_CONFIG, pointing to
# your Redis configuration file
[ $REDIS_CONFIG ] || exit 0
# . /lib/lsb/init-functions
RETVAL=0
case "$1" in
start)
echo -n "Starting $DESC: "
$REDIS_SERVER_BIN $REDIS_CONFIG
RETVAL=$?
echo "$NAME."
;;
stop)
echo -n "Stopping $DESC: "
$REDIS_CLI_BIN shutdown
RETVAL=$?
echo "$NAME."
;;
restart)
echo -n "Restarting $DESC: "
$REDIS_CLI_BIN shutdown
$REDIS_SERVER_BIN $REDIS_CONFIG
RETVAL=$?
echo "$NAME."
;;
status)
$REDIS_CLI_BIN info
RETVAL=$?
;;
*)
echo "Usage: redis-server {start|stop|restart|status}"
exit 1
;;
esac
exit $RETVAL | true |
8ef50a0afbfa3f0f7116530df49d2ea1a15e9a8d | Shell | bodii/test-code | /shell/test7/test02.sh | UTF-8 | 132 | 2.921875 | 3 | [] | no_license | #!/bin/bash
one_file='01.txt'
two_file='02.txt'
if [ -f "$1" -a -n "`cat $1`" ]
then
echo 1
else
echo 2
fi
exit 0
| true |
42f3c090ef79a71040896faf682b030538a22c2c | Shell | xavicarrillo/nagios-plugins | /OSPF_ChangesDetector2.sh | UTF-8 | 3,525 | 3.703125 | 4 | [] | no_license | #!/bin/bash
#
# This alarm will detect the Open Shortest Path First (OSPF) neighbour failures, aka dead peers
#
# NO "Dead timer expired" -> OK
# "Dead timer expired" + 'LOADING to FULL, Loading Done' -> OK
# "Dead timer expired" without 'LOADING to FULL, Loading Done' -> CRITICAL
#
# xcarrillo@domain.com
#
PLUGINSDIR="/usr/lib/nagios/plugins"
LOGFILE="/var/log/all-logs.log"
. $PLUGINSDIR/utils.sh
#Example:
#Jan 1 12:14:58 93.174.168.3 10253: 010258: Jan 1 12:14:58.116: %OSPF-5-ADJCHG: Process 64529, Nbr 93.174.168.4 on GigabitEthernet0/2 from 2WAY to DOWN, Neighbor Down: Dead timer expired
IsNeighborDown=`egrep '%OSPF-5-ADJCHG.*Dead timer expired' $LOGFILE |tail -1`
if [ "$IsNeighborDown" = '' ]
then
#if No "Dead timer expired" is found, OK
exitstatus=${STATE_OK}
statusmessage="OK: no OSPF neighbour failures"
else
#Else, we have to check if it was reloaded afterwards. Example:
#Jan 1 12:17:15 93.174.168.1 145658: 145614: Jan 1 12:17:15.713: %OSPF-5-ADJCHG: Process 64529, Nbr 93.174.168.4 on Vlan101 from LOADING to FULL, Loading Done
NeighborDownIP=`echo $IsNeighborDown | awk {'print $14'}`
IsLoaded=`egrep 'LOADING to FULL, Loading Done' $LOGFILE | grep $NeighborDownIP | tail -1`
if [ "$IsLoaded" = '' ]
then
# If after a 'Dead timer expired' message there is NOT a "LOADING to FULL, Loading Done" message for the same IP, it didn't recover. So CRITICAL
exitstatus=${STATE_CRITICAL}
statusmessage="CRITICAL: there is an OSPF neighbour failure for the IP $NeighborDownIP"
else
# If the 'Loading Done' message is there, we have to check that it occured after the 'Dead timer expired',
# therefore the difference of times have to be negative.
IsLoadedDate=`echo $IsLoaded | awk {'print $1" "$2" "$3'}`
IsLoadedTimeStamp=`date --utc --date "$IsLoadedDate" +%s`
NeighborDownDate=`echo $IsNeighborDown | awk {'print $1" "$2" "$3'}`
NeighborDownTimeStamp=`date --utc --date "$NeighborDownDate" +%s`
let "TimeDifference=$NeighborDownTimeStamp-$IsLoadedTimeStamp"
echo $TimeDifference
if [ $TimeDifference -lt 0 ]
then
# If this issue happened 10 minutes ago or more, we just quit with an OK. Otherwise, we send a warning.
# In both cases the issue was fixed, but we want to make sure that a warning has been sent so this can be investigated afterwards.
# But we don't want to be sending alarms until the logs are rotated, that's why we give this 10 min period of grace. (So only 2 warnings will be sent).
CurrentTime=`date --utc --date="now" +%s`
let "TimeDifference=$CurrentTime - $IsLoadedTimeStamp"
if [ $TimeDifference -gt 600 ]
then
exitstatus=${STATE_OK}
statusmessage="OK: No dead peers found"
else
# After a 'Dead timer expired' message there is a "LOADING to FULL, Loading Done" for the same IP, so it recovered,
# but we quit with a WARNING because this needs to be investigated.
exitstatus=${STATE_WARNING}
statusmessage="WARNING: There was an OSPF neighbour failure, although it was fixed in $TimeDifference seconds"
statusmessage="NeighborDownDate is $NeighborDownDate and IsLoadedDate=$IsLoadedDate and time is $TimeDifference"
fi
fi # If it's positive it means the 'Loading Done' was before the 'Dead time expired' so it is meaningless.
fi
fi
echo $statusmessage
#echo "and NeighborDownTimeStamp is $NeighborDownTimeStamp and IsLoadedTimeStamp is $IsLoadedTimeStamp"
echo "IsNeighborDown is $IsNeighborDown"
echo "$IsLoaded is $IsLoaded"
exit $exitstatus
| true |
9400b73aa8e9a1f63eb12ce6e4c5e31222fcb32c | Shell | aselvan/scripts | /macos/jamf.sh | UTF-8 | 3,528 | 3.734375 | 4 | [
"MIT"
] | permissive | #/bin/sh
#
# jamf.sh --- enable/disable jamf agent and daemons on demand.
#
# Author: Arul Selvan
# Version: Dec 21, 2018
#
# works with user login or elevated
user=`who -m | awk '{print $1;}'`
# list of jamf plists
jamf_daemons_plist="\
/Library/LaunchDaemons/com.jamf.management.daemon.plist \
/Library/LaunchDaemons/com.jamfsoftware.task.1.plist \
/Library/LaunchDaemons/com.jamfsoftware.jamf.daemon.plist \
/Library/LaunchDaemons/com.jamfsoftware.startupItem.plist \
/Library/LaunchDaemons/com.samanage.SamanageAgent.plist"
jamf_agent_plist="\
/Library/LaunchAgents/com.jamf.management.agent.plist \
/Library/LaunchAgents/com.jamfsoftware.jamf.agent.plist"
script_path="/Library/Application Support/JAMF/ManagementFrameworkScripts"
backup_suffix="backup"
skip_script="skip.sh"
disable_links() {
cur_dir=`pwd`
cd "$script_path" || exit
# create the placeholer and links
cat <<EOF > $skip_script
#!/bin/sh
echo "[\`date\`] \$0 starting skip..." >/tmp/$skip_script.log
exit 0
EOF
chmod +x $skip_script
scripts=`ls -1 *.sh`
for script in $scripts ; do
if [ "$script" = "$skip_script" ] ; then
continue
fi
test -h "$script"
if [ $? -eq 0 ] ; then
echo "[ERROR] "$script" is a already a symbolic link, skiping"
continue
fi
mv "$script" "$script".$backup_suffix
ln -s $skip_script "$script"
done
}
enable_links() {
cur_dir=`pwd`
cd "$script_path" || exit
scripts=`ls -1 *.sh`
for script in $scripts ; do
if [ "$script" = "$skip_script" ] ; then
continue
fi
if [ -f "$script".$backup_suffix ]; then
rm "$script"
mv "$script".$backup_suffix "$script"
else
echo "[ERROR] missing file: "$script".$backup_suffix, skipping ..."
fi
done
}
#
# remove crap that were messed up, specifically preferences that
# were overriden which breaks crond (i.e. sleep time which breaks crond)
#
disable_misl() {
echo "[INFO] Cleanup the jamf crap for user $user"
dscl . -mcxdelete /Users/$user
echo "[INFO] remove /Library/Managed Preferences ..."
cd /Library/Managed\ Preferences/ || exit 1
rm -rf *.plist
rm -rf $user
#zap the login hook (read it first to see if there are things we need there)
defaults delete com.apple.loginwindow LoginHook
defaults delete com.apple.loginwindow LogoutHook
}
check_root() {
if [ `id -u` -ne 0 ] ; then
echo "[ERROR] you must be 'root' to run this script... exiting."
exit
fi
}
enable() {
echo "[INFO] enabling daemons ..."
for p in $jamf_daemons_plist ; do
if [ -f $p ] ; then
echo "[INFO] Enabling: $p"
launchctl load $p
fi
done
echo "[INFO] enabling launch agents ..."
for a in $jamf_agent_plist ; do
if [ -f $a ] ; then
echo "[INFO] Enabling: $a"
sudo -u $user launchctl load $a
fi
done
echo "[INFO] Enable jamf scripts for user $user"
enable_links
}
disable() {
echo "[INFO] disabling launch daemons ..."
for p in $jamf_daemons_plist ; do
if [ -f $p ] ; then
echo "[INFO} Disabling: $p"
launchctl unload -w $p
fi
done
echo "[INFO] disabling launch agents for user $user ..."
for a in $jamf_agent_plist ; do
if [ -f $a ] ; then
echo "[INFO] Disabling: $a"
sudo -u $user launchctl unload -w $a
fi
done
# disable the misl crap
disable_misl
# reset links
echo "[INFO] disabling scripts..."
disable_links
}
check_root
case $1 in
enable|disable) "$@"
;;
*) echo "Usage: $0 <enable|disable>"
;;
esac
exit 0
| true |
f513b851bbff3329cb2485adebbbe30daccb3d17 | Shell | ajlsms/bigdata | /kk-flume/src/main/resources/bin/appMonAgentService.sh | UTF-8 | 528 | 3.375 | 3 | [] | no_license | #!/usr/bin/env bash
linux_service(){
case "$1" in
start)
su - appmon -c "sh /home/ap/appmon/bin/startAppMonAgent.sh" > /home/ap/appmon/logs/appmon.log
;;
stop)
su - appmon -c "sh /home/ap/appmon/bin/stopAppmonAgent.sh" > /home/ap/appmon/logs/appmon.log
;;
esac
}
os=`uname`
case "$os" in
AIX)
# aix_serivce() $*
;;
linux)
linux_service() $*
;;
HP-UX)
# hpux_serice() $*
;;
esac | true |
e0b9b89f52cf6a0669a68e93e17fe7b1d95a1185 | Shell | pchengi/esgf-group-migrator | /restore.sh | UTF-8 | 498 | 3.03125 | 3 | [] | no_license | #!/bin/bash
env|grep PGPATH >/dev/null
if [ $? -eq 1 ]; then
echo "Please export the Postgres home with PGPATH, ex: export PGPATH=/usr/local/pgsql";
exit -1;
fi
PGPATH=`env|grep PGPATH|cut -d'=' -f2`
PSQL=$PGPATH/bin/psql
if [ $# -lt 3 ]; then
echo "Please provide target group id, role id for 'user' role and input filename";
exit -1;
fi
python prepingestion.py $1 $2 $3
$PSQL -d esgcet -U dbsuper -f restoreusers.sql >/dev/null
$PSQL -d esgcet -U dbsuper -f restoreperms.sql >/dev/null
| true |
9ed8b2e5314428d06dffbc916b6ce1f09b54ba84 | Shell | jmaroeder/oh-my-zsh-custom | /lazy-nvm.zsh | UTF-8 | 252 | 2.640625 | 3 | [] | no_license | #!/usr/bin/env zsh
source "$ZSH_CUSTOM/lib/lazy_loader.sh"
nvm_init(){
export NVM_DIR="$HOME/.nvm"
[ -s "$NVM_DIR/nvm.sh" ] && source "$NVM_DIR/nvm.sh"
}
# create hooks for commands shimmed by nvm
lazy_load nvm_init nvm node eslint flow npm
| true |
70e49f86de263b8ea830fa1677c21b774336ef38 | Shell | ua9/dev-env | /sh/.bash_profile | UTF-8 | 701 | 3.203125 | 3 | [
"MIT"
] | permissive | alias dev='eval "$(curl -sSL http://bit.ly/dev-env | sh)"'
function forward_docker_port(){
running_id=$(docker ps --filter="ancestor=anovmari/dev-env" --format="{{.ID}} {{.Command}} {{.Ports}}" | grep sshd | grep 22\/tcp | head -n 1 | awk '{print $1}')
host_ip=$(docker inspect --format='{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostIp}}' $running_id)
host_port=$(docker inspect --format='{{(index (index .NetworkSettings.Ports "22/tcp") 0).HostPort}}' $running_id)
docker_ip=$(docker inspect --format='{{.NetworkSettings.IPAddress}}' $running_id)
ssh -o "StrictHostKeyChecking=no" -o "UserKnownHostsFile /dev/null" -l ubuntu $host_ip -p $host_port -N -f -L $2:$docker_ip:$1
}
| true |
478dd0f20f53b5ecb98a6862dbd5cbb422df6505 | Shell | DustinMorado/subTOM | /src/scripts/bin/subtom_wmd.sh | UTF-8 | 52,088 | 3.21875 | 3 | [
"MIT"
] | permissive | #!/bin/bash
################################################################################
# This is a run script for the standard AV3 subtomogram classification scripts.
# The MATLAB executables for this script were compiled in MATLAB-8.5. The other
# difference is that these scripts have been edited to make sure that the output
# files from each step is readable.
#
# This script is meant to run on the scratch it copies the reference and final
# allmotl file to a local folder after each iteration.
#
# Also, the run script and all the launch scripts are written in bash. This is
# mainly because the behaviour of bash is a bit more predictable.
#
# If the number of alignment jobs is greater than 1000, this script
# automatically splits the job into multiple arrays and launches them. It will
# not run if you have more than 4000 alignment jobs, as this is the current
# maximum per user.
#
# This subtomogram averaging script uses nine MATLAB compiled scripts below:
# - subtom_cluster
# - subtom_eigenvolumes_wmd
# - subtom_join_coeffs
# - subtom_join_dmatrix
# - subtom_parallel_coeffs
# - subtom_parallel_prealign
# - subtom_parallel_sums_cls
# - subtom_parallel_dmatrix
# - subtom_weighted_average_cls
# DRM 05-2019
################################################################################
set -e # Crash on error
set -o nounset # Crash on unset variables
set +o noclobber # Turn off preventing BASH overwriting files
unset ml
unset module
source "${1}"
# Check number of CC-Matrix prealign jobs
if [[ "${num_dmatrix_prealign_batch}" -gt "${max_jobs}" ]]
then
echo " TOO MANY CC-MATRIX PREALIGNMENT JOBS!!!!! I QUIT!!!"
exit 1
fi
# Check number of D-Matrix jobs
if [[ "${num_dmatrix_batch}" -gt "${max_jobs}" ]]
then
echo " TOO MANY D-MATRIX JOBS!!!!! I QUIT!!!"
exit 1
fi
# Check number of Eigencoefficient prealign jobs
if [[ "${num_coeff_prealign_batch}" -gt "${max_jobs}" ]]
then
echo " TOO MANY EIGENCOEFFICIENT PREALIGNMENT JOBS!!!!! I QUIT!!!"
exit 1
fi
# Check number of Eigencoefficient jobs
if [[ "${num_coeff_batch}" -gt "${max_jobs}" ]]
then
echo " TOO MANY EIGENCOEFFICIENT JOBS!!!!! I QUIT!!!"
exit 1
fi
# Check number of Averaging jobs
if [[ "${num_avg_batch}" -gt "${max_jobs}" ]]
then
echo " TOO MANY AVERAGING JOBS!!!!! I QUIT!!!"
exit 1
fi
# Check that the appropriate directories exist
if [[ "${skip_local_copy}" -ne 1 ]]
then
if [[ ! -d "${local_dir}" ]]
then
mkdir -p "${local_dir}"
fi
fi
if [[ ! -d "${mcr_cache_dir}" ]]
then
mkdir -p "${mcr_cache_dir}"
fi
dmatrix_dir="${scratch_dir}/$(dirname "${dmatrix_fn_prefix}")"
dmatrix_base="$(basename "${dmatrix_fn_prefix}")"
if [[ ! -d "${dmatrix_dir}" ]]
then
mkdir -p "${dmatrix_dir}"
fi
eig_val_dir="${scratch_dir}/$(dirname "${eig_val_fn_prefix}")"
eig_val_base="$(basename "${eig_val_fn_prefix}")"
if [[ ! -d "${eig_val_dir}" ]]
then
mkdir -p "${eig_val_dir}"
fi
eig_vol_dir="${scratch_dir}/$(dirname "${eig_vol_fn_prefix}")"
eig_vol_base="$(basename "${eig_vol_fn_prefix}")"
if [[ ! -d "${eig_vol_dir}" ]]
then
mkdir -p "${eig_vol_dir}"
fi
variance_dir="${scratch_dir}/$(dirname "${variance_fn_prefix}")"
variance_base="$(basename "${variance_fn_prefix}")"
if [[ ! -d "${variance_dir}" ]]
then
mkdir -p "${variance_dir}"
fi
coeff_dir="${scratch_dir}/$(dirname "${coeff_fn_prefix}")"
coeff_base="$(basename "${coeff_fn_prefix}")"
if [[ ! -d "${coeff_dir}" ]]
then
mkdir -p "${coeff_dir}"
fi
cluster_all_motl_dir="${scratch_dir}/$(dirname "${cluster_all_motl_fn_prefix}")"
cluster_all_motl_base="$(basename "${cluster_all_motl_fn_prefix}")"
if [[ ! -d "${cluster_all_motl_dir}" ]]
then
mkdir -p "${cluster_all_motl_dir}"
fi
ref_dir="${scratch_dir}/$(dirname "${ref_fn_prefix}")"
ref_base="$(basename "${ref_fn_prefix}")"
if [[ ! -d "${ref_dir}" ]]
then
mkdir -p "${ref_dir}"
fi
weight_sum_dir="${scratch_dir}/$(dirname "${weight_sum_fn_prefix}")"
weight_sum_base="$(basename "${weight_sum_fn_prefix}")"
if [[ ! -d "${weight_sum_dir}" ]]
then
mkdir -p "${weight_sum_dir}"
fi
if [[ ${mem_free%G} -ge 48 ]]
then
dedmem=',dedicated=24'
elif [[ ${mem_free%G} -ge 24 ]]
then
dedmem=',dedicated=12'
else
dedmem=''
fi
if [[ "${mask_fn}" == "none" ]]
then
mask_fn_="none"
else
mask_fn_="${scratch_dir}/${mask_fn}"
fi
################################################################################
# #
# D-MATRIX #
# #
################################################################################
# PREALIGNMENT (OPTIONAL) #
################################################################################
if [[ "${dmatrix_prealign}" -eq "1" ]]
then
# Calculate number of job scripts needed
num_jobs=$(((num_dmatrix_prealign_batch + array_max - 1) / array_max))
job_name_="${job_name}_dmatrix_parallel_prealign"
for ((job_idx = 1, array_start = 1; \
job_idx <= num_jobs; \
job_idx++, array_start += array_max))
do
array_end=$((array_start + array_max - 1))
if [[ ${array_end} -gt ${num_dmatrix_prealign_batch} ]]
then
array_end=${num_dmatrix_prealign_batch}
fi
script_fn="${job_name_}_${job_idx}"
if [[ -f "${script_fn}" ]]
then
rm -f "${script_fn}"
fi
error_fn="error_${script_fn}"
if [[ -f "${error_fn}" ]]
then
rm -f "${error_fn}"
fi
log_fn="log_${script_fn}"
if [[ -f "${log_fn}" ]]
then
rm -f "${log_fn}"
fi
cat>"${script_fn}"<<-PDPREALIJOB
#!/bin/bash
#$ -N "${script_fn}"
#$ -S /bin/bash
#$ -V
#$ -cwd
#$ -l mem_free=${mem_free},h_vmem=${mem_max}${dedmem}
#$ -o "${log_fn}"
#$ -e "${error_fn}"
#$ -t ${array_start}-${array_end}
set +o noclobber
set -e
echo \${HOSTNAME}
ldpath="XXXMCR_DIRXXX/runtime/glnxa64"
ldpath="\${ldpath}:XXXMCR_DIRXXX/bin/glnxa64"
ldpath="\${ldpath}:XXXMCR_DIRXXX/sys/os/glnxa64"
ldpath="\${ldpath}:XXXMCR_DIRXXX/sys/opengl/lib/glnxa64"
export LD_LIBRARY_PATH="\${ldpath}"
###for SGE_TASK_ID in {${array_start}..${array_end}}; do
mcr_cache_dir="${mcr_cache_dir}/${job_name_}_\${SGE_TASK_ID}"
if [[ -d "\${mcr_cache_dir}" ]]
then
rm -rf "\${mcr_cache_dir}"
fi
export MCR_CACHE_ROOT="\${mcr_cache_dir}"
"${preali_exec}" \\
all_motl_fn_prefix \\
"${scratch_dir}/${dmatrix_all_motl_fn_prefix}" \\
ptcl_fn_prefix \\
"${scratch_dir}/${ptcl_fn_prefix}" \\
prealign_fn_prefix \\
"${scratch_dir}/${ptcl_fn_prefix}_ali" \\
iteration \\
"${iteration}" \\
num_prealign_batch \\
"${num_dmatrix_prealign_batch}" \\
process_idx \\
"\${SGE_TASK_ID}"
###done 2>"${error_fn}" >"${log_fn}"
PDPREALIJOB
done
all_motl_fn="${scratch_dir}/${dmatrix_all_motl_fn_prefix}_${iteration}.em"
num_ptcls=$("${motl_dump_exec}" --size "${all_motl_fn}")
ptcl_dir="${scratch_dir}/$(dirname "${ptcl_fn_prefix}")"
ptcl_base="$(basename "${ptcl_fn_prefix}_ali")"
num_complete=$(find "${ptcl_dir}" -regex \
".*/${ptcl_base}_${iteration}_[0-9]+.em" | wc -l)
if [[ ${num_complete} -lt ${num_ptcls} ]]
then
echo -e "\nSTARTING D-Matrix Prealignment - Iteration: ${iteration}\n"
for job_idx in $(seq 1 ${num_jobs})
do
script_fn="${job_name_}_${job_idx}"
chmod u+x "${script_fn}"
if [[ "${run_local}" -eq 1 ]]
then
sed -i 's/\#\#\#//' "${script_fn}"
"./${script_fn}" &
else
qsub "${script_fn}"
fi
done
else
echo -e "\nSKIPPING D-Matrix Prealignment - Iteration: ${iteration}\n"
fi
################################################################################
# PREALIGNMENT (OPTIONAL) PROGRESS #
################################################################################
num_complete_prev=0
unchanged_count=0
while [[ ${num_complete} -lt ${num_ptcls} ]]
do
num_complete=$(find "${ptcl_dir}" -regex \
".*/${ptcl_base}_${iteration}_[0-9]+.em" | wc -l)
if [[ ${num_complete} -eq ${num_complete_prev} ]]
then
unchanged_count=$((unchanged_count + 1))
else
unchanged_count=0
fi
num_complete_prev=${num_complete}
if [[ ${num_complete} -gt 0 && ${unchanged_count} -gt 120 ]]
then
echo "Parallel D-Matrix prealignment has seemed to stall"
echo "Please check error logs and resubmit the job if neeeded."
exit 1
fi
if [[ -f "error_${job_name_}_1" ]]
then
echo -e "\nERROR Update: Prealignment - Iteration: ${iteration}\n"
tail "error_${job_name_}"_*
fi
if [[ -f "log_${job_name_}_1" ]]
then
echo -e "\nLOG Update: Prealignment - Iteration: ${iteration}\n"
tail "log_${job_name_}"_*
fi
echo -e "\nSTATUS Update: Prealignment - Iteration: ${iteration}\n"
echo -e "\t${num_complete} particles out of ${num_ptcls}\n"
sleep 60s
done
################################################################################
# PREALIGNMENT (OPTIONAL) CLEAN UP #
################################################################################
if [[ ! -d wmd_${iteration} ]]
then
mkdir wmd_${iteration}
fi
if [[ -e "${job_name_}_1" ]]
then
mv -f "${job_name_}"_* wmd_${iteration}/.
fi
if [[ -e "log_${job_name_}_1" ]]
then
mv -f "log_${job_name_}"_* wmd_${iteration}/.
fi
if [[ -e "error_${job_name_}_1" ]]
then
mv -f "error_${job_name_}"_* wmd_${iteration}/.
fi
find "${mcr_cache_dir}" -regex ".*/${job_name_}_[0-9]+" -print0 |\
xargs -0 -I {} rm -rf -- {}
echo -e "\nFINISHED D-Matrix Prealignment - Iteration: ${iteration}\n"
fi
################################################################################
# PARALLEL D-MATRIX CALCULATION #
################################################################################
if [[ ${dmatrix_prealign} -eq 1 ]]
then
ptcl_fn_prefix_="${ptcl_fn_prefix}_ali"
else
ptcl_fn_prefix_="${ptcl_fn_prefix}"
fi
# Calculate number of job scripts needed
num_jobs=$(((num_dmatrix_batch + array_max - 1) / array_max))
job_name_="${job_name}_parallel_dmatrix"
# Loop to generate parallel alignment scripts
for ((job_idx = 1, array_start = 1; \
job_idx <= num_jobs; \
job_idx++, array_start += array_max))
do
array_end=$((array_start + array_max - 1))
if [[ ${array_end} -gt ${num_dmatrix_batch} ]]
then
array_end=${num_dmatrix_batch}
fi
script_fn="${job_name_}_${job_idx}"
if [[ -f "${script_fn}" ]]
then
rm -f "${script_fn}"
fi
error_fn="error_${script_fn}"
if [[ -f "${error_fn}" ]]
then
rm -f "${error_fn}"
fi
log_fn="log_${script_fn}"
if [[ -f "${log_fn}" ]]
then
rm -f "${log_fn}"
fi
cat>"${script_fn}"<<-PDJOB
#!/bin/bash
#$ -N "${script_fn}"
#$ -S /bin/bash
#$ -V
#$ -cwd
#$ -l mem_free=${mem_free},h_vmem=${mem_max}${dedmem}
#$ -o "${log_fn}"
#$ -e "${error_fn}"
#$ -t ${array_start}-${array_end}
set +o noclobber
set -e
echo \${HOSTNAME}
ldpath="XXXMCR_DIRXXX/runtime/glnxa64"
ldpath="\${ldpath}:XXXMCR_DIRXXX/bin/glnxa64"
ldpath="\${ldpath}:XXXMCR_DIRXXX/sys/os/glnxa64"
ldpath="\${ldpath}:XXXMCR_DIRXXX/sys/opengl/lib/glnxa64"
export LD_LIBRARY_PATH="\${ldpath}"
###for SGE_TASK_ID in {${array_start}..${array_end}}; do
mcr_cache_dir="${mcr_cache_dir}/${job_name_}_\${SGE_TASK_ID}"
if [[ -d "\${mcr_cache_dir}" ]]
then
rm -rf "\${mcr_cache_dir}"
fi
export MCR_CACHE_ROOT="\${mcr_cache_dir}"
"${par_dmatrix_exec}" \\
all_motl_fn_prefix \\
"${scratch_dir}/${dmatrix_all_motl_fn_prefix}" \\
dmatrix_fn_prefix \\
"${scratch_dir}/${dmatrix_fn_prefix}" \\
ptcl_fn_prefix \\
"${scratch_dir}/${ptcl_fn_prefix_}" \\
ref_fn_prefix \\
"${scratch_dir}/${dmatrix_ref_fn_prefix}" \\
weight_fn_prefix \\
"${scratch_dir}/${weight_fn_prefix}" \\
mask_fn \\
"${mask_fn_}" \\
high_pass_fp \\
"${high_pass_fp}" \\
high_pass_sigma \\
"${high_pass_sigma}" \\
low_pass_fp \\
"${low_pass_fp}" \\
low_pass_sigma \\
"${low_pass_sigma}" \\
nfold \\
"${nfold}" \\
iteration \\
"${iteration}" \\
tomo_row \\
"${tomo_row}" \\
prealigned \\
"${dmatrix_prealign}" \\
num_dmatrix_batch \\
"${num_dmatrix_batch}" \\
process_idx \\
"\${SGE_TASK_ID}"
###done 2>"${error_fn}" >"${log_fn}"
PDJOB
done
num_complete=$(find "${dmatrix_dir}" -regex \
".*/${dmatrix_base}_${iteration}_[0-9]+.em" | wc -l)
dmatrix_fn="${scratch_dir}/${dmatrix_fn_prefix}_${iteration}.em"
if [[ -f "${dmatrix_fn}" ]]
then
do_run=0
num_complete=${num_dmatrix_batch}
elif [[ ${num_complete} -eq ${num_dmatrix_batch} ]]
then
do_run=0
else
do_run=1
fi
if [[ ${do_run} -eq 1 ]]
then
echo -e "\nSTARTING D-Matrix Calculation - Iteration: ${iteration}\n"
for job_idx in $(seq 1 ${num_jobs})
do
script_fn="${job_name_}_${job_idx}"
chmod u+x "${script_fn}"
if [[ "${run_local}" -eq "1" ]]
then
sed -i 's/\#\#\#//' "${script_fn}"
"./${script_fn}" &
else
qsub "${script_fn}"
fi
done
else
echo -e "\nSKIPPING D-Matrix Calculation - Iteration: ${iteration}\n"
fi
################################################################################
# PARALLEL D-MATRIX PROGRESS #
################################################################################
num_complete_prev=0
unchanged_count=0
while [[ ${num_complete} -lt ${num_dmatrix_batch} ]]
do
num_complete=$(find "${dmatrix_dir}" -regex \
".*/${dmatrix_base}_${iteration}_[0-9]+.em" | wc -l)
if [[ ${num_complete} -eq ${num_complete_prev} ]]
then
unchanged_count=$((unchanged_count + 1))
else
unchanged_count=0
fi
num_complete_prev=${num_complete}
if [[ ${num_complete} -gt 0 && ${unchanged_count} -gt 120 ]]
then
echo "Parallel D-Matrix has seemed to stall"
echo "Please check error logs and resubmit the job if neeeded."
exit 1
fi
if [[ -f "error_${job_name_}_1" ]]
then
echo -e "\nERROR Update: D-Matrix - Iteration: ${iteration}\n"
tail "error_${job_name_}"_*
fi
if [[ -f "log_${job_name_}_1" ]]
then
echo -e "\nLOG Update: D-Matrix - Iteration: ${iteration}\n"
tail "log_${job_name_}"_*
fi
echo -e "\nSTATUS Update: D-Matrix - Iteration: ${iteration}\n"
echo -e "\t${num_complete} batches out of ${num_dmatrix_batch}\n"
sleep 60s
done
################################################################################
# PARALLEL D-MATRIX CLEAN UP #
################################################################################
if [[ ! -d wmd_${iteration} ]]
then
mkdir wmd_${iteration}
fi
if [[ -e "${job_name_}_1" ]]
then
mv -f "${job_name_}"_* wmd_${iteration}/.
fi
if [[ -e "log_${job_name_}_1" ]]
then
mv -f "log_${job_name_}"_* wmd_${iteration}/.
fi
if [[ -e "error_${job_name_}_1" ]]
then
mv -f "error_${job_name_}"_* wmd_${iteration}/.
fi
find "${mcr_cache_dir}" -regex ".*/${job_name_}_[0-9]+" -print0 |\
xargs -0 -I {} rm -rf -- {}
################################################################################
# FINAL D-MATRIX #
################################################################################
if [[ ! -f "${dmatrix_fn}" ]]
then
ldpath="XXXMCR_DIRXXX/runtime/glnxa64"
ldpath="${ldpath}:XXXMCR_DIRXXX/bin/glnxa64"
ldpath="${ldpath}:XXXMCR_DIRXXX/sys/os/glnxa64"
ldpath="${ldpath}:XXXMCR_DIRXXX/sys/opengl/lib/glnxa64"
export LD_LIBRARY_PATH="${ldpath}"
job_name_="${job_name}_join_dmatrix"
mcr_cache_dir_="${mcr_cache_dir}/${job_name_}"
if [[ -d "${mcr_cache_dir_}" ]]
then
rm -rf "${mcr_cache_dir_}"
fi
export MCR_CACHE_ROOT="${mcr_cache_dir_}"
"${dmatrix_exec}" \
all_motl_fn_prefix \
"${scratch_dir}/${dmatrix_all_motl_fn_prefix}" \
dmatrix_fn_prefix \
"${scratch_dir}/${dmatrix_fn_prefix}" \
ptcl_fn_prefix \
"${scratch_dir}/${ptcl_fn_prefix}" \
mask_fn \
"${mask_fn_}" \
iteration \
"${iteration}" \
num_dmatrix_batch \
"${num_dmatrix_batch}"
rm -rf "${mcr_cache_dir_}"
fi
################################################################################
# FINAL D-MATRIX CLEAN UP #
################################################################################
if [[ ${skip_local_copy} -ne 1 ]]
then
local_dmatrix_dir="$(dirname "${local_dir}/${dmatrix_fn_prefix}")"
if [[ ! -d "${local_dmatrix_dir}" ]]
then
mkdir -p "${local_dmatrix_dir}"
fi
find "${dmatrix_dir}" -regex \
".*/${dmatrix_base}_${iteration}.em" -print0 |\
xargs -0 -I {} cp -- {} "${local_dmatrix_dir}/."
find "${dmatrix_dir}" -regex \
".*/${dmatrix_base}_mean_${iteration}.em" -print0 |\
xargs -0 -I {} cp -- {} "${local_dmatrix_dir}/."
fi
find "${dmatrix_dir}" -regex \
".*/${dmatrix_base}_${iteration}_[0-9]+.em" -delete
echo -e "\nFINISHED D-Matrix Calculation - Iteration: ${iteration}\n"
################################################################################
# #
# EIGENVOLUMES #
# #
################################################################################
eig_val_fn="${scratch_dir}/${eig_val_fn_prefix}_${iteration}.em"
variance_fn="${scratch_dir}/${variance_fn_prefix}_${iteration}.em"
all_done=$(find "${eig_vol_dir}" -regex \
".*/${eig_vol_base}_${iteration}_[0-9]+.em" | wc -l)
if [[ ! -f "${eig_val_fn}" ]]
then
do_run=1
elif [[ ! -f "${variance_fn}" ]]
then
do_run=1
elif [[ "${all_done}" -ne "${num_svs}" ]]
then
do_run=1
else
do_run=0
fi
if [[ "${do_run}" -eq 1 ]]
then
ldpath="XXXMCR_DIRXXX/runtime/glnxa64"
ldpath="${ldpath}:XXXMCR_DIRXXX/bin/glnxa64"
ldpath="${ldpath}:XXXMCR_DIRXXX/sys/os/glnxa64"
ldpath="${ldpath}:XXXMCR_DIRXXX/sys/opengl/lib/glnxa64"
export LD_LIBRARY_PATH="${ldpath}"
job_name_="${job_name}_eigenvolumes_wmd"
mcr_cache_dir_="${mcr_cache_dir}/${job_name_}"
if [[ -d "${mcr_cache_dir_}" ]]
then
rm -rf "${mcr_cache_dir_}"
fi
export MCR_CACHE_ROOT="${mcr_cache_dir_}"
"${eigvol_exec}" \
all_motl_fn_prefix \
"${scratch_dir}/${dmatrix_all_motl_fn_prefix}" \
ptcl_fn_prefix \
"${scratch_dir}/${ptcl_fn_prefix}" \
dmatrix_fn_prefix \
"${scratch_dir}/${dmatrix_fn_prefix}" \
eig_val_fn_prefix \
"${scratch_dir}/${eig_val_fn_prefix}" \
eig_vol_fn_prefix \
"${scratch_dir}/${eig_vol_fn_prefix}" \
variance_fn_prefix \
"${scratch_dir}/${variance_fn_prefix}" \
mask_fn \
"${mask_fn_}" \
iteration \
"${iteration}" \
num_svs \
"${num_svs}" \
svds_iterations \
"${svds_iterations}" \
svds_tolerance \
"${svds_tolerance}"
rm -rf "${mcr_cache_dir_}"
fi
################################################################################
# EIGENVOLUME CLEAN UP #
################################################################################
if [[ ${skip_local_copy} -ne 1 ]]
then
local_eig_val_dir="$(dirname "${local_dir}/${eig_val_fn_prefix}")"
if [[ ! -d "${local_eig_val_dir}" ]]
then
mkdir -p "${local_eig_val_dir}"
fi
find "${eig_val_dir}" -regex \
".*/${eig_val_base}_${iteration}.em" -print0 |\
xargs -0 -I {} cp -- {} "${local_eig_val_dir}/."
local_eig_vol_dir="$(dirname "${local_dir}/${eig_vol_fn_prefix}")"
if [[ ! -d "${local_eig_vol_dir}" ]]
then
mkdir -p "${local_eig_vol_dir}"
fi
find "${eig_vol_dir}" -regex \
".*/${eig_vol_base}_${iteration}_[0-9]+.em" -print0 |\
xargs -0 -I {} cp -- {} "${local_eig_vol_dir}/."
find "${eig_vol_dir}" -regex \
".*/${eig_vol_base}_[XYZ]_${iteration}.em" -print0 |\
xargs -0 -I {} cp -- {} "${local_eig_vol_dir}/."
local_variance_dir="$(dirname "${local_dir}/${variance_fn_prefix}")"
if [[ ! -d "${local_variance_dir}" ]]
then
mkdir -p "${local_variance_dir}"
fi
find "${variance_dir}" -regex \
".*/${variance_base}_${iteration}.em" -print0 |\
xargs -0 -I {} cp -- {} "${local_variance_dir}/."
fi
################################################################################
# #
# EIGENCOEFFICIENTS #
# #
################################################################################
# PREALIGNMENT (OPTIONAL) #
################################################################################
if [[ "${coeff_prealign}" -eq "1" ]]
then
# Calculate number of job scripts needed
num_jobs=$(((num_coeff_prealign_batch + array_max - 1) / array_max))
job_name_="${job_name}_coeff_parallel_prealign"
for ((job_idx = 1, array_start = 1; \
job_idx <= num_jobs; \
job_idx++, array_start += array_max))
do
array_end=$((array_start + array_max - 1))
if [[ ${array_end} -gt ${num_coeff_prealign_batch} ]]
then
array_end=${num_coeff_prealign_batch}
fi
script_fn="${job_name_}_${job_idx}"
if [[ -f "${script_fn}" ]]
then
rm -f "${script_fn}"
fi
error_fn="error_${script_fn}"
if [[ -f "${error_fn}" ]]
then
rm -f "${error_fn}"
fi
log_fn="log_${script_fn}"
if [[ -f "${log_fn}" ]]
then
rm -f "${log_fn}"
fi
cat>"${script_fn}"<<-PECPREALIJOB
#!/bin/bash
#$ -N "${script_fn}"
#$ -S /bin/bash
#$ -V
#$ -cwd
#$ -l mem_free=${mem_free},h_vmem=${mem_max}${dedmem}
#$ -o "${log_fn}"
#$ -e "${error_fn}"
#$ -t ${array_start}-${array_end}
set +o noclobber
set -e
echo \${HOSTNAME}
ldpath="XXXMCR_DIRXXX/runtime/glnxa64"
ldpath="\${ldpath}:XXXMCR_DIRXXX/bin/glnxa64"
ldpath="\${ldpath}:XXXMCR_DIRXXX/sys/os/glnxa64"
ldpath="\${ldpath}:XXXMCR_DIRXXX/sys/opengl/lib/glnxa64"
export LD_LIBRARY_PATH="\${ldpath}"
###for SGE_TASK_ID in {${array_start}..${array_end}}; do
mcr_cache_dir="${mcr_cache_dir}/${job_name_}_\${SGE_TASK_ID}"
if [[ -d "\${mcr_cache_dir}" ]]
then
rm -rf "\${mcr_cache_dir}"
fi
export MCR_CACHE_ROOT="\${mcr_cache_dir}"
"${preali_exec}" \\
all_motl_fn_prefix \\
"${scratch_dir}/${coeff_all_motl_fn_prefix}" \\
ptcl_fn_prefix \\
"${scratch_dir}/${ptcl_fn_prefix}" \\
prealign_fn_prefix \\
"${scratch_dir}/${ptcl_fn_prefix}_ali" \\
iteration \\
"${iteration}" \\
num_prealign_batch \\
"${num_coeff_prealign_batch}" \\
process_idx \\
"\${SGE_TASK_ID}"
###done 2>"${error_fn}" >"${log_fn}"
PECPREALIJOB
done
all_motl_fn="${scratch_dir}/${coeff_all_motl_fn_prefix}_${iteration}.em"
num_ptcls=$("${motl_dump_exec}" --size "${all_motl_fn}")
ptcl_dir="${scratch_dir}/$(dirname "${ptcl_fn_prefix}")"
ptcl_base="$(basename "${ptcl_fn_prefix}_ali")"
num_complete=$(find "${ptcl_dir}" -regex \
".*/${ptcl_base}_${iteration}_[0-9]+.em" | wc -l)
if [[ "${num_complete}" -lt "${num_ptcls}" ]]
then
echo -e "STARTING Eig. Coeff. Prealignment - Iteration: ${iteration}\n"
for job_idx in $(seq 1 ${num_jobs})
do
script_fn="${job_name_}_${job_idx}"
chmod u+x "${script_fn}"
if [[ "${run_local}" -eq 1 ]]
then
sed -i 's/\#\#\#//' "${script_fn}"
"./${script_fn}" &
else
qsub "${script_fn}"
fi
done
else
echo -e "SKIPPING Eig. Coeff. Prealignment - Iteration: ${iteration}\n"
fi
################################################################################
# PREALIGNMENT (OPTIONAL) PROGRESS #
################################################################################
num_complete_prev=0
unchanged_count=0
while [[ ${num_complete} -lt ${num_ptcls} ]]
do
num_complete=$(find "${ptcl_dir}" -regex \
".*/${ptcl_base}_${iteration}_[0-9]+.em" | wc -l)
if [[ ${num_complete} -eq ${num_complete_prev} ]]
then
unchanged_count=$((unchanged_count + 1))
else
unchanged_count=0
fi
num_complete_prev=${num_complete}
if [[ ${num_complete} -gt 0 && ${unchanged_count} -gt 120 ]]
then
echo "Parallel prealignment has seemed to stall"
echo "Please check error logs and resubmit the job if neeeded."
exit 1
fi
if [[ -f "error_${job_name_}_1" ]]
then
echo -e "\nERROR Update: Prealignment - Iteration: ${iteration}\n"
tail "error_${job_name_}"_*
fi
if [[ -f "log_${job_name_}_1" ]]
then
echo -e "\nLOG Update: Prealignment - Iteration: ${iteration}\n"
tail "log_${job_name_}"_*
fi
echo -e "\nSTATUS Update: Prealignment - Iteration: ${iteration}\n"
echo -e "\t${num_complete} particles out of ${num_ptcls}\n"
sleep 60s
done
################################################################################
# PREALIGNMENT (OPTIONAL) CLEAN UP #
################################################################################
if [[ ! -d wmd_${iteration} ]]
then
mkdir wmd_${iteration}
fi
if [[ -e "${job_name_}_1" ]]
then
mv -f "${job_name_}"_* wmd_${iteration}/.
fi
if [[ -e "log_${job_name_}_1" ]]
then
mv -f "log_${job_name_}"_* wmd_${iteration}/.
fi
if [[ -e "error_${job_name_}_1" ]]
then
mv -f "error_${job_name_}"_* wmd_${iteration}/.
fi
find "${mcr_cache_dir}" -regex ".*/${job_name_}_[0-9]+" -print0 |\
xargs -0 -I {} rm -rf -- {}
echo -e "FINISHED Eig. Coeff. Prealignment - Iteration: ${iteration}\n"
fi
################################################################################
# PARALLEL EIGENCOEFFICIENT CALCULATION #
################################################################################
if [[ ${coeff_prealign} -eq 1 ]]
then
ptcl_fn_prefix_="${ptcl_fn_prefix}_ali"
else
ptcl_fn_prefix_="${ptcl_fn_prefix}"
fi
# Calculate number of job scripts needed
num_jobs=$(((num_coeff_batch + array_max - 1) / array_max))
job_name_="${job_name}_parallel_coeffs"
# Loop to generate parallel alignment scripts
for ((job_idx = 1, array_start = 1; \
job_idx <= num_jobs; \
job_idx++, array_start += array_max))
do
array_end=$((array_start + array_max - 1))
if [[ ${array_end} -gt ${num_coeff_batch} ]]
then
array_end=${num_coeff_batch}
fi
script_fn="${job_name_}_${job_idx}"
if [[ -f "${script_fn}" ]]
then
rm -f "${script_fn}"
fi
error_fn="error_${script_fn}"
if [[ -f "${error_fn}" ]]
then
rm -f "${error_fn}"
fi
log_fn="log_${script_fn}"
if [[ -f "${log_fn}" ]]
then
rm -f "${log_fn}"
fi
cat>"${script_fn}"<<-PCJOB
#!/bin/bash
#$ -N "${script_fn}"
#$ -S /bin/bash
#$ -V
#$ -cwd
#$ -l mem_free=${mem_free},h_vmem=${mem_max}${dedmem}
#$ -o "${log_fn}"
#$ -e "${error_fn}"
#$ -t ${array_start}-${array_end}
set +o noclobber
set -e
echo \${HOSTNAME}
ldpath="XXXMCR_DIRXXX/runtime/glnxa64"
ldpath="\${ldpath}:XXXMCR_DIRXXX/bin/glnxa64"
ldpath="\${ldpath}:XXXMCR_DIRXXX/sys/os/glnxa64"
ldpath="\${ldpath}:XXXMCR_DIRXXX/sys/opengl/lib/glnxa64"
export LD_LIBRARY_PATH="\${ldpath}"
###for SGE_TASK_ID in {${array_start}..${array_end}}; do
mcr_cache_dir="${mcr_cache_dir}/${job_name_}_\${SGE_TASK_ID}"
if [[ -d "\${mcr_cache_dir}" ]]
then
rm -rf "\${mcr_cache_dir}"
fi
export MCR_CACHE_ROOT="\${mcr_cache_dir}"
"${par_coeff_exec}" \\
all_motl_fn_prefix \\
"${scratch_dir}/${coeff_all_motl_fn_prefix}" \\
dmatrix_fn_prefix \\
"${scratch_dir}/${dmatrix_fn_prefix}" \\
ptcl_fn_prefix \\
"${scratch_dir}/${ptcl_fn_prefix_}" \\
ref_fn_prefix \\
"${scratch_dir}/${dmatrix_ref_fn_prefix}" \\
coeff_fn_prefix \\
"${scratch_dir}/${coeff_fn_prefix}" \\
eig_val_fn_prefix \\
"${scratch_dir}/${eig_val_fn_prefix}" \\
eig_vol_fn_prefix \\
"${scratch_dir}/${eig_vol_fn_prefix}" \\
weight_fn_prefix \\
"${scratch_dir}/${weight_fn_prefix}" \\
mask_fn \\
"${mask_fn_}" \\
high_pass_fp \\
"${high_pass_fp}" \\
high_pass_sigma \\
"${high_pass_sigma}" \\
low_pass_fp \\
"${low_pass_fp}" \\
low_pass_sigma \\
"${low_pass_sigma}" \\
nfold \\
"${nfold}" \\
tomo_row \\
"${tomo_row}" \\
iteration \\
"${iteration}" \\
prealigned \\
"${coeff_prealign}" \\
num_coeff_batch \\
"${num_coeff_batch}" \\
process_idx \\
"\${SGE_TASK_ID}"
###done 2>"${error_fn}" >"${log_fn}"
PCJOB
done
num_complete=$(find "${coeff_dir}" -regex \
".*/${coeff_base}_${iteration}_[0-9]+.em" | wc -l)
coeff_fn="${scratch_dir}/${coeff_fn_prefix}_${iteration}.em"
if [[ -f "${coeff_fn}" ]]
then
do_run=0
num_complete=${num_coeff_batch}
elif [[ ${num_complete} -eq ${num_coeff_batch} ]]
then
do_run=0
else
do_run=1
fi
if [[ "${do_run}" -eq "1" ]]
then
echo -e "\nSTARTING Coefficient Calculation - Iteration: ${iteration}\n"
for job_idx in $(seq 1 ${num_jobs})
do
script_fn="${job_name_}_${job_idx}"
chmod u+x "${script_fn}"
if [[ "${run_local}" -eq 1 ]]
then
sed -i 's/\#\#\#//' "${script_fn}"
"./${script_fn}" &
else
qsub "${script_fn}"
fi
done
else
echo -e "\nSKIPPING Coefficient Calculation - Iteration: ${iteration}\n"
fi
################################################################################
# PARALLEL EIGENCOEFFICIENT PROGRESS #
################################################################################
num_complete_prev=0
unchanged_count=0
while [[ ${num_complete} -lt ${num_coeff_batch} ]]
do
num_complete=$(find "${coeff_dir}" -regex \
".*/${coeff_base}_${iteration}_[0-9]+.em" | wc -l)
if [[ ${num_complete} -eq ${num_complete_prev} ]]
then
unchanged_count=$((unchanged_count + 1))
else
unchanged_count=0
fi
num_complete_prev=${num_complete}
if [[ ${num_complete} -gt 0 && ${unchanged_count} -gt 120 ]]
then
echo "Parallel coefficients has seemed to stall"
echo "Please check error logs and resubmit the job if neeeded."
exit 1
fi
if [[ -f "error_${job_name_}_1" ]]
then
echo -e "\nERROR Update: Coefficients - Iteration: ${iteration}\n"
tail "error_${job_name_}"_*
fi
if [[ -f "log_${job_name_}_1" ]]
then
echo -e "\nLOG Update: Coefficients - Iteration: ${iteration}\n"
tail "log_${job_name_}"_*
fi
echo -e "\nSTATUS Update: Coefficients - Iteration: ${iteration}\n"
echo -e "\t${num_complete} batches out of ${num_coeff_batch}\n"
sleep 60s
done
################################################################################
# PARALLEL EIGENCOEFFICIENT CLEAN UP #
################################################################################
if [[ ! -d wmd_${iteration} ]]
then
mkdir wmd_${iteration}
fi
if [[ -e "${job_name_}_1" ]]
then
mv -f "${job_name_}"_* wmd_${iteration}/.
fi
if [[ -e "log_${job_name_}_1" ]]
then
mv -f "log_${job_name_}"_* wmd_${iteration}/.
fi
if [[ -e "error_${job_name_}_1" ]]
then
mv -f "error_${job_name_}"_* wmd_${iteration}/.
fi
find "${mcr_cache_dir}" -regex ".*/${job_name_}_[0-9]+" -print0 |\
xargs -0 -I {} rm -rf -- {}
################################################################################
# FINAL EIGENCOEFFICIENT #
################################################################################
if [[ ! -f "${coeff_fn}" ]]
then
ldpath="XXXMCR_DIRXXX/runtime/glnxa64"
ldpath="${ldpath}:XXXMCR_DIRXXX/bin/glnxa64"
ldpath="${ldpath}:XXXMCR_DIRXXX/sys/os/glnxa64"
ldpath="${ldpath}:XXXMCR_DIRXXX/sys/opengl/lib/glnxa64"
export LD_LIBRARY_PATH="${ldpath}"
job_name_="${job_name}_join_coeffs"
mcr_cache_dir_="${mcr_cache_dir}/${job_name_}"
if [[ -d "${mcr_cache_dir_}" ]]
then
rm -rf "${mcr_cache_dir_}"
fi
export MCR_CACHE_ROOT="${mcr_cache_dir_}"
"${coeff_exec}" \
coeff_fn_prefix \
"${scratch_dir}/${coeff_fn_prefix}" \
iteration \
"${iteration}" \
num_coeff_batch \
"${num_coeff_batch}"
rm -rf "${mcr_cache_dir_}"
fi
################################################################################
# FINAL EIGENCOEFFICIENT CLEAN UP #
################################################################################
if [[ ${skip_local_copy} -ne 1 ]]
then
local_coeff_all_motl_dir="$(dirname \
"${local_dir}/${coeff_all_motl_fn_prefix}")"
if [[ ! -d "${local_coeff_all_motl_dir}" ]]
then
mkdir -p "${local_coeff_all_motl_dir}"
fi
coeff_all_motl_dir="${scratch_dir}/$(dirname \
"${coeff_all_motl_fn_prefix}")"
coeff_all_motl_base="$(basename "${coeff_all_motl_fn_prefix}")"
find "${coeff_all_motl_dir}" -regex \
".*/${coeff_all_motl_base}_${iteration}.em" -print0 |\
xargs -0 -I {} cp -- {} "${local_coeff_all_motl_dir}/."
local_coeff_dir="$(dirname "${local_dir}/${coeff_fn_prefix}")"
if [[ ! -d "${local_coeff_dir}" ]]
then
mkdir -p "${local_coeff_dir}"
fi
find "${coeff_dir}" -regex \
".*/${coeff_base}_${iteration}.em" -print0 |\
xargs -0 -I {} cp -- {} "${local_coeff_dir}/."
fi
find "${coeff_dir}" -regex \
".*/${coeff_base}_${iteration}_[0-9]+.em" -delete
echo -e "\nFINISHED Coefficient Calculation - Iteration: ${iteration}\n"
################################################################################
# #
# CLASS AVERAGING #
# #
################################################################################
# CLUSTERING #
################################################################################
cluster_fn="${scratch_dir}/${cluster_all_motl_fn_prefix}_${iteration}.em"
if [[ ! -f "${cluster_fn}" ]]
then
ldpath="XXXMCR_DIRXXX/runtime/glnxa64"
ldpath="${ldpath}:XXXMCR_DIRXXX/bin/glnxa64"
ldpath="${ldpath}:XXXMCR_DIRXXX/sys/os/glnxa64"
ldpath="${ldpath}:XXXMCR_DIRXXX/sys/opengl/lib/glnxa64"
export LD_LIBRARY_PATH="${ldpath}"
job_name_="${job_name}_cluster"
mcr_cache_dir_="${mcr_cache_dir}/${job_name_}"
if [[ -d "${mcr_cache_dir_}" ]]
then
rm -rf "${mcr_cache_dir_}"
fi
export MCR_CACHE_ROOT="${mcr_cache_dir_}"
"${cluster_exec}" \
all_motl_fn_prefix \
"${scratch_dir}/${coeff_all_motl_fn_prefix}" \
coeff_fn_prefix \
"${scratch_dir}/${coeff_fn_prefix}" \
output_motl_fn_prefix \
"${scratch_dir}/${cluster_all_motl_fn_prefix}" \
iteration \
"${iteration}" \
cluster_type \
"${cluster_type}" \
coeff_idxs \
"${coeff_idxs}" \
num_classes \
"${num_classes}"
rm -rf "${mcr_cache_dir_}"
fi
################################################################################
# CLUSTERING CLEAN UP #
################################################################################
if [[ ${skip_local_copy} -ne 1 ]]
then
local_cluster_all_motl_dir="$(dirname \
"${local_dir}/${cluster_all_motl_fn_prefix}")"
if [[ ! -d "${local_cluster_all_motl_dir}" ]]
then
mkdir -p "${local_cluster_all_motl_dir}"
fi
find "${cluster_all_motl_dir}" -regex \
".*/${cluster_all_motl_base}_${iteration}.em" -print0 |\
xargs -0 -I {} cp -- {} "${local_cluster_all_motl_dir}/."
fi
################################################################################
# PARALLEL AVERAGING #
################################################################################
# Calculate number of job scripts needed
num_jobs=$(((num_avg_batch + array_max - 1) / array_max))
job_name_="${job_name}_parallel_sums_cls"
# Loop to generate parallel alignment scripts
for ((job_idx = 1, array_start = 1; \
job_idx <= num_jobs; \
job_idx++, array_start += array_max))
do
array_end=$((array_start + array_max - 1))
if [[ ${array_end} -gt ${num_avg_batch} ]]
then
array_end=${num_avg_batch}
fi
script_fn="${job_name_}_${job_idx}"
if [[ -f "${script_fn}" ]]
then
rm -f "${script_fn}"
fi
error_fn="error_${script_fn}"
if [[ -f "${error_fn}" ]]
then
rm -f "${error_fn}"
fi
log_fn="log_${script_fn}"
if [[ -f "${log_fn}" ]]
then
rm -f "${log_fn}"
fi
cat>"${script_fn}"<<-PSUMJOB
#!/bin/bash
#$ -N "${script_fn}"
#$ -S /bin/bash
#$ -V
#$ -cwd
#$ -l mem_free=${mem_free},h_vmem=${mem_max}${dedmem}
#$ -o "${log_fn}"
#$ -e "${error_fn}"
#$ -t ${array_start}-${array_end}
set +o noclobber
set -e
echo \${HOSTNAME}
ldpath="XXXMCR_DIRXXX/runtime/glnxa64"
ldpath="\${ldpath}:XXXMCR_DIRXXX/bin/glnxa64"
ldpath="\${ldpath}:XXXMCR_DIRXXX/sys/os/glnxa64"
ldpath="\${ldpath}:XXXMCR_DIRXXX/sys/opengl/lib/glnxa64"
export LD_LIBRARY_PATH="\${ldpath}"
###for SGE_TASK_ID in {${array_start}..${array_end}}; do
mcr_cache_dir="${mcr_cache_dir}/${job_name_}_\${SGE_TASK_ID}"
if [[ -d "\${mcr_cache_dir}" ]]
then
rm -rf "\${mcr_cache_dir}"
fi
export MCR_CACHE_ROOT="\${mcr_cache_dir}"
"${sum_exec}" \\
all_motl_fn_prefix \\
"${scratch_dir}/${cluster_all_motl_fn_prefix}" \\
ref_fn_prefix \\
"${scratch_dir}/${ref_fn_prefix}" \\
ptcl_fn_prefix \\
"${scratch_dir}/${ptcl_fn_prefix}" \\
weight_fn_prefix \\
"${scratch_dir}/${weight_fn_prefix}" \\
weight_sum_fn_prefix \\
"${scratch_dir}/${weight_sum_fn_prefix}" \\
iteration \\
"${iteration}" \\
tomo_row \\
"${tomo_row}" \\
num_avg_batch \\
"${num_avg_batch}" \\
process_idx \\
"\${SGE_TASK_ID}"
###done 2>"${error_fn}" >"${log_fn}"
PSUMJOB
done
num_total=$((num_avg_batch * num_classes))
num_complete=$(find "${ref_dir}" -regex \
".*/${ref_base}_class_[0-9]+_${iteration}_[0-9]+.em" | wc -l)
all_done=$(find "${ref_dir}" -regex \
".*/${ref_base}_class_[0-9]+_${iteration}.em" | wc -l)
if [[ "${all_done}" -eq "${num_classes}" ]]
then
do_run=0
num_complete="${num_total}"
elif [[ "${num_complete}" -eq "${num_total}" ]]
then
do_run=0
else
do_run=1
fi
if [[ "${do_run}" -eq "1" ]]
then
echo -e "\nSTARTING Parallel Average - Iteration: ${iteration}\n"
for job_idx in $(seq 1 ${num_jobs})
do
script_fn="${job_name_}_${job_idx}"
chmod u+x "${script_fn}"
if [[ "${run_local}" -eq 1 ]]
then
sed -i 's/\#\#\#//' "${script_fn}"
"./${script_fn}" &
else
qsub "${script_fn}"
fi
done
else
echo -e "\nSKIPPING Parallel Average - Iteration: ${iteration}\n"
fi
################################################################################
# PARALLEL AVERAGING PROGRESS #
################################################################################
num_complete_prev=0
unchanged_count=0
while [[ ${num_complete} -lt ${num_total} ]]
do
num_complete=$(find "${ref_dir}" -regex \
".*/${ref_base}_class_[0-9]+_${iteration}_[0-9]+.em" | wc -l)
if [[ ${num_complete} -eq ${num_complete_prev} ]]
then
unchanged_count=$((unchanged_count + 1))
else
unchanged_count=0
fi
num_complete_prev=${num_complete}
if [[ ${num_complete} -gt 0 && ${unchanged_count} -gt 120 ]]
then
echo "Parallel averaging has seemed to stall"
echo "Please check error logs and resubmit the job if neeeded."
exit 1
fi
if [[ -f "error_${job_name_}_1" ]]
then
echo -e "\nERROR Update: Averaging - Iteration: ${iteration}\n"
tail "error_${job_name_}"_*
fi
if [[ -f "log_${job_name_}_1" ]]
then
echo -e "\nLOG Update: Averaging - Iteration: ${iteration}\n"
tail "log_${job_name_}"_*
fi
echo -e "\nSTATUS Update: Averaging - Iteration: ${iteration}\n"
echo -e "\t${num_complete} parallel sums out of ${num_total}\n"
sleep 60s
done
################################################################################
# PARALLEL AVERAGING CLEAN UP #
################################################################################
if [[ ! -d wmd_${iteration} ]]
then
mkdir wmd_${iteration}
fi
if [[ -e "${job_name_}_1" ]]
then
mv -f "${job_name_}"_* wmd_${iteration}/.
fi
if [[ -e "log_${job_name_}_1" ]]
then
mv -f "log_${job_name_}"_* wmd_${iteration}/.
fi
if [[ -e "error_${job_name_}_1" ]]
then
mv -f "error_${job_name_}"_* wmd_${iteration}/.
fi
find "${mcr_cache_dir}" -regex ".*/${job_name_}_[0-9]+" -print0 |\
xargs -0 -I {} rm -rf -- {}
################################################################################
# FINAL AVERAGE #
################################################################################
if [[ "${all_done}" -ne "${num_classes}" ]]
then
ldpath="XXXMCR_DIRXXX/runtime/glnxa64"
ldpath="${ldpath}:XXXMCR_DIRXXX/bin/glnxa64"
ldpath="${ldpath}:XXXMCR_DIRXXX/sys/os/glnxa64"
ldpath="${ldpath}:XXXMCR_DIRXXX/sys/opengl/lib/glnxa64"
export LD_LIBRARY_PATH="${ldpath}"
job_name_="${job_name}_weighted_average_cls"
mcr_cache_dir_="${mcr_cache_dir}/${job_name_}"
if [[ -d "${mcr_cache_dir_}" ]]
then
rm -rf "${mcr_cache_dir_}"
fi
export MCR_CACHE_ROOT="${mcr_cache_dir_}"
"${avg_exec}" \
all_motl_fn_prefix \
"${scratch_dir}/${cluster_all_motl_fn_prefix}" \
ref_fn_prefix \
"${scratch_dir}/${ref_fn_prefix}" \
weight_sum_fn_prefix \
"${scratch_dir}/${weight_sum_fn_prefix}" \
iteration \
"${iteration}" \
num_avg_batch \
"${num_avg_batch}"
rm -rf "${mcr_cache_dir_}"
fi
################################################################################
# FINAL AVERAGE CLEAN UP #
################################################################################
if [[ ${skip_local_copy} -ne 1 ]]
then
local_ref_dir="$(dirname "${local_dir}/${ref_fn_prefix}")"
if [[ ! -d "${local_ref_dir}" ]]
then
mkdir -p "${local_ref_dir}"
fi
find "${ref_dir}" -regex \
".*/${ref_base}_class_[0-9]+_${iteration}.em" -print0 |\
xargs -0 -I {} cp -- {} "${local_ref_dir}/."
find "${ref_dir}" -regex \
".*/${ref_base}_[XYZ]_${iteration}.em" -print0 |\
xargs -0 -I {} cp -- {} "${local_ref_dir}/."
find "${ref_dir}" -regex \
".*/${ref_base}_class_[0-9]+_debug_raw_${iteration}.em" -print0 |\
xargs -0 -I {} cp -- {} "${local_ref_dir}/."
local_weight_sum_dir="$(dirname "${local_dir}/${weight_sum_fn_prefix}")"
if [[ ! -d "${local_weight_sum_dir}" ]]
then
mkdir -p "${local_weight_sum_dir}"
fi
find "${weight_sum_dir}" -regex \
".*/${weight_sum_base}_class_[0-9]+_debug_${iteration}.em" -print0 |\
xargs -0 -I {} cp -- {} "${local_weight_sum_dir}/."
find "${weight_sum_dir}" -regex \
".*/${weight_sum_base}_class_[0-9]+_debug_inv_${iteration}.em" \
-print0 | xargs -0 -I {} cp -- {} "${local_weight_sum_dir}/."
fi
find "${ref_dir}" -regex \
".*/${ref_base}_class_[0-9]+_${iteration}_[0-9]+.em" -delete
find "${weight_sum_dir}" -regex \
".*/${weight_sum_base}_class_[0-9]+_${iteration}_[0-9]+.em" -delete
echo -e "\nFINISHED Parallel Average - Iteration: ${iteration}\n"
if [[ ! -f subTOM_protocol.md ]]
then
touch subTOM_protocol.md
fi
printf "# WMD Classification Iteration %d\n" "${iteration}" >>\
subTOM_protocol.md
printf -- "---------------------------------\n" >> subTOM_protocol.md
printf "| %-25s | %25s |\n" "OPTION" "VALUE" >> subTOM_protocol.md
printf "|:--------------------------" >> subTOM_protocol.md
printf "|:--------------------------|\n" >> subTOM_protocol.md
printf "| %-25s | %25s |\n" "scratch_dir" "${scratch_dir}" >> subTOM_protocol.md
printf "| %-25s | %25s |\n" "local_dir" "${local_dir}" >> subTOM_protocol.md
printf "| %-25s | %25s |\n" "mcr_cache_dir" "${mcr_cache_dir}" >>\
subTOM_protocol.md
printf "| %-25s | %25s |\n" "exec_dir" "${exec_dir}" >> subTOM_protocol.md
printf "| %-25s | %25s |\n" "cluster_exec" "${cluster_exec}" >>\
subTOM_protocol.md
printf "| %-25s | %25s |\n" "par_coeff_exec" "${par_coeff_exec}" >>\
subTOM_protocol.md
printf "| %-25s | %25s |\n" "coeff_exec" "${coeff_exec}" >>\
subTOM_protocol.md
printf "| %-25s | %25s |\n" "eigvol_exec" "${eigvol_exec}" >> subTOM_protocol.md
printf "| %-25s | %25s |\n" "preali_exec" "${preali_exec}" >> subTOM_protocol.md
printf "| %-25s | %25s |\n" "par_dmatrix_exec" "${par_dmatrix_exec}" >>\
subTOM_protocol.md
printf "| %-25s | %25s |\n" "dmatrix_exec" "${dmatrix_exec}" >>\
subTOM_protocol.md
printf "| %-25s | %25s |\n" "sum_exec" "${sum_exec}" >> subTOM_protocol.md
printf "| %-25s | %25s |\n" "avg_exec" "${avg_exec}" >> subTOM_protocol.md
printf "| %-25s | %25s |\n" "motl_dump_exec" "${motl_dump_exec}" >>\
subTOM_protocol.md
printf "| %-25s | %25s |\n" "mem_free" "${mem_free}" >> subTOM_protocol.md
printf "| %-25s | %25s |\n" "mem_max" "${mem_max}" >> subTOM_protocol.md
printf "| %-25s | %25s |\n" "job_name" "${job_name}" >> subTOM_protocol.md
printf "| %-25s | %25s |\n" "array_max" "${array_max}" >> subTOM_protocol.md
printf "| %-25s | %25s |\n" "max_jobs" "${max_jobs}" >> subTOM_protocol.md
printf "| %-25s | %25s |\n" "run_local" "${run_local}" >> subTOM_protocol.md
printf "| %-25s | %25s |\n" "skip_local_copy" "${skip_local_copy}" >>\
subTOM_protocol.md
printf "| %-25s | %25s |\n" "iteration" "${iteration}" >> subTOM_protocol.md
printf "| %-25s | %25s |\n" "num_dmatrix_prealign_batch" \
"${num_dmatrix_prealign_batch}" >> subTOM_protocol.md
printf "| %-25s | %25s |\n" "num_dmatrix_batch" "${num_dmatrix_batch}" >>\
subTOM_protocol.md
printf "| %-25s | %25s |\n" "num_coeff_prealign_batch" \
"${num_coeff_prealign_batch}" >> subTOM_protocol.md
printf "| %-25s | %25s |\n" "num_coeff_batch" "${num_coeff_batch}" >>\
subTOM_protocol.md
printf "| %-25s | %25s |\n" "num_avg_batch" "${num_avg_batch}" >>\
subTOM_protocol.md
printf "| %-25s | %25s |\n" "high_pass_fp" "${high_pass_fp}" >>\
subTOM_protocol.md
printf "| %-25s | %25s |\n" "high_pass_sigma" "${high_pass_sigma}" >>\
subTOM_protocol.md
printf "| %-25s | %25s |\n" "low_pass_fp" "${low_pass_fp}" >>\
subTOM_protocol.md
printf "| %-25s | %25s |\n" "low_pass_sigma" "${low_pass_sigma}" >>\
subTOM_protocol.md
printf "| %-25s | %25s |\n" "nfold" "${nfold}" >> subTOM_protocol.md
printf "| %-25s | %25s |\n" "tomo_row" "${tomo_row}" >> subTOM_protocol.md
printf "| %-25s | %25s |\n" "dmatrix_prealign" "${dmatrix_prealign}" >>\
subTOM_protocol.md
printf "| %-25s | %25s |\n" "dmatrix_all_motl_fn_prefix" \
"${dmatrix_all_motl_fn_prefix}" >> subTOM_protocol.md
printf "| %-25s | %25s |\n" "dmatrix_fn_prefix" "${dmatrix_fn_prefix}" >>\
subTOM_protocol.md
printf "| %-25s | %25s |\n" "ptcl_fn_prefix" "${ptcl_fn_prefix}" >>\
subTOM_protocol.md
printf "| %-25s | %25s |\n" "dmatrix_ref_fn_prefix" \
"${dmatrix_ref_fn_prefix}" >> subTOM_protocol.md
printf "| %-25s | %25s |\n" "weight_fn_prefix" "${weight_fn_prefix}" >>\
subTOM_protocol.md
printf "| %-25s | %25s |\n" "mask_fn" "${mask_fn}" >> subTOM_protocol.md
printf "| %-25s | %25s |\n" "num_svs" "${num_svs}" >> subTOM_protocol.md
printf "| %-25s | %25s |\n" "svds_iterations" "${svds_iterations}" >>\
subTOM_protocol.md
printf "| %-25s | %25s |\n" "svds_tolerance" "${svds_tolerance}" >>\
subTOM_protocol.md
printf "| %-25s | %25s |\n" "eig_val_fn_prefix" "${eig_val_fn_prefix}" >>\
subTOM_protocol.md
printf "| %-25s | %25s |\n" "eig_vol_fn_prefix" "${eig_vol_fn_prefix}" >>\
subTOM_protocol.md
printf "| %-25s | %25s |\n" "variance_fn_prefix" "${variance_fn_prefix}" >>\
subTOM_protocol.md
printf "| %-25s | %25s |\n" "coeff_prealign" "${coeff_prealign}" >>\
subTOM_protocol.md
printf "| %-25s | %25s |\n" "coeff_all_motl_fn_prefix" \
"${coeff_all_motl_fn_prefix}" >> subTOM_protocol.md
printf "| %-25s | %25s |\n" "coeff_fn_prefix" "${coeff_fn_prefix}" >>\
subTOM_protocol.md
printf "| %-25s | %25s |\n" "cluster_type" "${cluster_type}" >>\
subTOM_protocol.md
printf "| %-25s | %25s |\n" "coeff_idxs" "${coeff_idxs}" >> subTOM_protocol.md
printf "| %-25s | %25s |\n" "num_classes" "${num_classes}" >> subTOM_protocol.md
printf "| %-25s | %25s |\n" "cluster_all_motl_fn_prefix" \
"${cluster_all_motl_fn_prefix}" >> subTOM_protocol.md
printf "| %-25s | %25s |\n" "ref_fn_prefix" "${ref_fn_prefix}" >>\
subTOM_protocol.md
printf "| %-25s | %25s |\n\n" "weight_sum_fn_prefix" \
"${weight_sum_fn_prefix}" >> subTOM_protocol.md
| true |
2b455841a48ef862d604adb7f1f75112965830af | Shell | nimahejazi/isvim.bash | /isvim.sh | UTF-8 | 132 | 2.78125 | 3 | [] | no_license | #!/bin/bash
env | grep VIMRUNTIME &>/dev/null
if [[ $? == 0 ]]; then
echo 'vim is running'
else
echo 'vim is not running'
fi
| true |
02b0d4186efdd9e240c0358ed3ffcbcd41c7877a | Shell | go-dima/linux-config | /deploy-config.sh | UTF-8 | 1,510 | 4.125 | 4 | [] | no_license | #!/bin/bash -e
SKIP_CLONE=false
if [[ ! -z "$1" ]]; then
if [[ "$1" == "-local" ]] || [[ "$1" == "-l" ]]; then
echo Running from local folder
SKIP_CLONE=true
else
echo Invalid option: $1
exit 1
fi
fi
echo '########## Update & Install Git ##########'
sudo apt-get update
sudo apt-get -y install git
if [[ "${SKIP_CLONE}" -eq "false" ]]; then
# Clone repo
git clone https://github.com/go-dima/linux-config.git ~/.linux-config
cd ~/.linux-config
fi
echo '########## Install Tools ##########'
packages_to_install=`cat packages | awk '{printf("%s ",$0)}'`
sudo apt-get -y install ${PACKAGES_TO_INSTALL}
echo '########## Configure bash-git-prompt ##########'
BASH_GIT_PROMPT_URL=https://github.com/magicmonty/bash-git-prompt.git
BASH_GIT_PROMPT_FOLDER=~/.bash-git-prompt
if [[ ! -d "$BASH_GIT_PROMPT_FOLDER" ]] ; then
git clone $BASH_GIT_PROMPT_URL $BASH_GIT_PROMPT_FOLDER --depth=1
else
cd "$BASH_GIT_PROMPT_FOLDER"
git pull $BASH_GIT_PROMPT_URL
cd -
fi
echo '########## Configure bashrc ##########'
PATTERN='source ~/.linux-config/profile/my_bashrc'
BASHRC_FILE=~/.bashrc
grep -qxF -- "$PATTERN" "$BASHRC_FILE" || echo "$PATTERN" >> "$BASHRC_FILE"
echo '########## Configure git ##########'
cat profile/my_gitconfig > ~/.gitconfig
mkdir -p ~/bin
cp git-commands/* ~/bin/
echo '########## Configure vim ##########'
cat profile/my_vimrc > ~/.vimrc
echo '########## Apply Changes ##########'
source ~/.bashrc
echo '########## Configuration Complete ##########'
| true |
4d3ec429bfe2c429dd843bb119312b51fd60862d | Shell | nawresboubakri/Shell-Project-Nawres-Zied- | /nawres/integration/menu.sh | UTF-8 | 648 | 2.546875 | 3 | [] | no_license | #!/bin/bash
menu_nawres()
{
dialog --clear --help-button --backtitle "Linux Shell Script" \
--title "[ BIENVENUE DANS MENU - NAWRES -- ZIED ]" \
--menu "MERCI DE CHOISIR UNE TACHE" 15 50 4 \
Search "pour rechercher les fichers exe d'un certain user " \
Remove " Chercher touts les fichiers executables dont l'utilisateur mis en argument en supprimant l'exécutable via l'option search-rm " \
Detail "aficher en détail les fichers exe d'un certain user" \
Pourcentage "calculer le nombres de fichiers/fichiers exe et le pourcentage" \
Save "Sauvegarder le pourcentage " \
Quitter "Exit to the shell" 2>"${INPUT}"
menuitem=$(<"${INPUT}")
}
| true |
94f46cb9ca35f9229a05776c4651287db4e1a52f | Shell | co2-git/lib | /ini/parse.sh | UTF-8 | 1,248 | 3.734375 | 4 | [] | no_license | lib.ini.parse ()
{
# This is a fork from: http://ajdiaz.wordpress.com/2008/02/09/bash-ini-parser/
local E_PARSING_FAILED=1;
ini="$(<$1)" # read the file
ini="${ini//[/\[}" # escape [
ini="${ini//]/\]}" # escape ]
OLDIFS="$IFS";
IFS=$'\n' && ini=( ${ini} ) # convert to line-array
ini=( ${ini[*]//;*/} ) # remove comments with ;
ini=( ${ini[*]/\ =/=} ) # remove tabs before =
ini=( ${ini[*]/=\ /=} ) # remove tabs be =
ini=( ${ini[*]/\ =\ /=} ) # remove anything with a space around =
ini=( ${ini[*]/#\\[/\}$'\n'cfg.section.} ) # set section prefix
ini=( ${ini[*]/%\\]/ \(} ) # convert text2function (1)
ini=( ${ini[*]/=/=\( } ) # convert item to array
ini=( ${ini[*]/%/ \)} ) # close array parenthesis
ini=( ${ini[*]/%\\ \)/ \\} ) # the multiline trick
ini=( ${ini[*]/%\( \)/\(\) \{} ) # convert text2function (2)
ini=( ${ini[*]/%\} \)/\}} ) # remove extra parenthesis
ini[0]="" # remove first element
ini[${#ini[*]} + 1]='}' # add the last brace
# Eval the result
eval 2>/dev/null "$(echo "${ini[*]}")" || {
echo "Could not parse ini file $1";
return $E_PARSING_FAILED;
}
IFS="$OLDIFS";
} | true |
de252928dd3e2fc0705a39da2fc951e1885ecad4 | Shell | Vash2593/maca | /tests/simple.test | UTF-8 | 1,135 | 2.828125 | 3 | [] | no_license | #! /bin/sh
test -f defs || { echo "defs not found" && exit 1; }
set -e
. ./defs
run()
{
expout=$1
shift
exitcode=0
"$@" || exitcode=$?
test $exitcode = $expout || exit 1
}
cat >input <<EOF
3
0 x !w
1
1 x !w
2 0
2 !x w
1
EOF
run 0 $top_builddir/gyver input "x"
run 0 $top_builddir/gyver input "EU(x, w)"
run 1 $top_builddir/gyver input "AU(x, w)"
run 0 $top_builddir/gyver input "EX(EX(w))"
run 0 $top_builddir/gyver input "EX(EX(NOT(w)))"
run 0 $top_builddir/gyver input "AND(x, NOT(w))"
run 0 $top_builddir/gyver input "OR(x, w)"
run 0 $top_builddir/gyver input "EG(x)"
run 1 $top_builddir/gyver input "AG(x)"
run 1 $top_builddir/gyver input "EX(AX(x))"
run 0 $top_builddir/gyver input "EX(NOT(NOT(x)))"
run 0 $top_builddir/gyver input "AND(NOT(w), NOT(AX(w)))"
cat >input <<EOF
3
0 x !w
1 0
1 !x !w
2
2 !x w
1
EOF
run 1 $top_builddir/gyver input "AX(x)"
run 0 $top_builddir/gyver input "EX(AX(NOT(w)))"
run 1 $top_builddir/gyver input "AX(EX(NOT(w)))"
run 0 $top_builddir/gyver input "AND(EX(x), AF(NOT(w)))"
run 1 $top_builddir/gyver input "NOT(NOT(NOT(NOT(NOT(AND(EX(x), AF(NOT(w))))))))"
| true |
c9d8f73e66e733d9b016655f4970a24fdc875872 | Shell | dragonjia/bash-tools | /DevOpsUpgrade/samples/menu.sh | UTF-8 | 683 | 3.390625 | 3 | [] | no_license | #!/usr/bin/env bash
echo
PS3='Enter catalog number: '
echo
select catalog_number in "B1723" "B1724" "B1725"
do
Inv=${catalog_number}_inventory
Val=${catalog_number}_value
Pdissip=${catalog_number}_powerdissip
Loc=${catalog_number}_loc
Ccode=${catalog_number}_colorcode
echo
echo "Catalog number $catalog_number:"
# Now, retrieve value, using indirect referencing.
echo "There are ${!Inv} of [${!Val} ohm / ${!Pdissip} watt]\
resistors in stock." # ^ ^
# As of Bash 4.2, you can replace "ohm" with \u2126 (using echo -e).
echo "These are located in bin # ${!Loc}."
echo "Their color code is \"${!Ccode}\"."
break
done
echo; echo | true |
c5d90387a6452b0ce5fc43c6e4b66dc0a1f0eeca | Shell | jasonbono/CLAS | /trunk/diff-clas | UTF-8 | 4,737 | 3.515625 | 4 | [] | no_license | #!/bin/bash
if [[ $# -lt 3 ]]; then
echo "usage:"
echo " for i in include detector io reconstruction analysis; do $0 svndir cvsdir \$i --brief; done"
echo ""
echo "i=io; `basename $0` `pwd` /home/goetz/clas-cvs \$i --exclude='evnt_2_bos.F' --exclude='make_vert.c' --exclude='evout.[ch]' --exclude='scatbos.c' --exclude='analysis.c' --exclude='user_control.inc' --exclude='ic_book.F' --exclude='*_qpad.c' --exclude='user_init.F' --brief"
fi
svn=$1
cvs=$2
dir=$3
shift
shift
shift
opts="$@"
function print-header() {
a=$1
b=$2
echo -n "${red} >>> ${cl}"
echo -n "${orange}${a}${cl} ${green}${b}${cl}"
echo "${red} <<< ${cl}"
}
function diff-this() {
a=$1
b=$2
diff $opts \
-bB --ignore-all-space --ignore-blank-lines --ignore-space-change \
--exclude=sconstruct --exclude=[Mm]akefile --exclude=Makefile_alt* \
--exclude=.svn --exclude=CVS --exclude=.cvsignore \
--exclude='Linux*' \
$a $b \
| grep -v 'Common subdirectories: ' \
| sed "s#$a/##g" \
| sed "s#$b/##g" \
| highlight --syntax='diff' --ansi \
| sed '/^$/d'
}
if [[ $dir == "include" ]]; then
c="packages"
for i in include; do
print-header SVN/$i CVS/$c/$i
diff-this $svn/$i $cvs/$c/$i
#MOVED_FILES=`diff-this $svn/$i $cvs/$c/$i \
#| grep "Only in $cvs/$c/$i" \
#| awk '{print $NF}'`
done
# moved files from include dir in CVS
a=io/caldb/C/calib_envir.h
b=packages/include/calib_envir.h
print-header $a $b
diff-this $svn/$a $cvs/$b
a=pcor/g10pcor/g10pcor.h
b=packages/include/g10pcor.h
print-header $a $b
diff-this $svn/$a $cvs/$b
a=io/c_bos_io/g3D.h
b=packages/include/g3D.h
print-header $a $b
diff-this $svn/$a $cvs/$b
a=io/Map/map_manager.h
b=packages/include/map_manager.h
print-header $a $b
diff-this $svn/$a $cvs/$b
#for i in $MOVED_FILES; do
#print-header "SVN($i)" "CVS($i)"
#svnfile=`find $svn -name "$i"`
#cvsfile=`find $cvs -name "$i"`
#print-header "SVN($svnfile)" "CVS($cvsfile)"
#diff-this $svnfile $cvsfile
#done
fi
if [[ $dir == "detector" ]]; then
s="detector"
c="packages"
for i in cc dc ec icf lac sc st tag; do
print-header SVN/$s/$i CVS/$c/$i
diff-this $svn/$s/$i $cvs/$c/$i
done
fi
if [[ $dir == "io" ]]; then
s="io"
c="packages"
for i in bankdefs bosio caldb c_bos_io clasutil c_sql itape recutl; do
print-header SVN/$s/$i CVS/$c/$i
diff-this $svn/$s/$i $cvs/$c/$i
done
# Map needs special treatment
s="io"
c="packages"
for i in Map; do
print-header SVN/$s/$i CVS/$c/$i
svnfiles=`find $svn/$s/$i -name "*.*" | grep -v '.svn'`
cvsfiles=`find $cvs/$c/$i -name "*.*" | grep -v 'CVS'`
for sf in $svnfiles; do
cf=$(find $cvs/$c/$i -name $(basename $sf))
diff-this $sf $cf
done
for cf in $cvsfiles; do
sf=$(find $svn/$s/$i -name $(basename $cf))
diff-this $sf $cf
done
done
s="io"
c="packages/utilities"
for i in bosdump countbos; do
print-header SVN/$s/$i CVS/$c/$i
diff-this $svn/$s/$i $cvs/$c/$i
done
fi
if [[ $dir == "reconstruction" ]]; then
s="reconstruction"
c="packages"
for i in ana c_cern eloss epics gem online_dummy pid recsis scaler scat seb tagM trk user user_ana vertex; do
print-header SVN/$s/$i CVS/$c/$i
diff-this $svn/$s/$i $cvs/$c/$i
done
s="reconstruction"
c="packages/utilities"
for i in a1; do
print-header SVN/$s/$i CVS/$c/$i
diff-this $svn/$s/$i $cvs/$c/$i
done
fi
if [[ $dir == "pcor" ]]; then
s="pcor"
c="packages/utilities"
for i in g10pcor Pcor; do
print-header SVN/$s/$i CVS/$c/$i
diff-this $svn/$s/$i $cvs/$c/$i
done
fi
if [[ $dir == "calibration" ]]; then
s="calibration"
c="packages"
print-header SVN/$s/dc3 CVS/$c/reccal/dc3
diff-this $svn/$s/dc3 $cvs/$c/reccal/dc3
print-header SVN/$s/stn_calib CVS/$c/utilities/stn_calib
diff-this $svn/$s/stn_calib $cvs/$c/utilities/stn_calib
print-header SVN/$s/tag_calib CVS/$c/utilities/tag_calib
diff-this $svn/$s/tag_calib $cvs/$c/utilities/tag_calib
fi
if [[ $dir == "analysis" ]]; then
s="analysis"
c="packages/utilities"
for i in clasEvent clasEvent/vkTAGR; do
print-header SVN/$s/$i CVS/$c/$i
diff-this $svn/$s/$i $cvs/$c/$i
done
fi
if [[ $dir == "physmath" ]]; then
s="physmath"
c="packages/utilities"
for i in libpp plib pwaUtil; do
print-header SVN/$s/$i CVS/$c/$i
diff-this $svn/$s/$i $cvs/$c/$i
done
fi
| true |
3866a53a2d0bb748e9a53541f4b81ef1290a02c1 | Shell | gjvanoldenborgh/climexp_data | /NCEPData/update_cmorph_iri.sh | UTF-8 | 2,426 | 3.5 | 4 | [] | no_license | #!/bin/bash
export PATH=$HOME/climexp/bin:/usr/local/free/bin:$PATH
cdo="cdo -r -f nc4 -z zip"
mkdir -p CMORPH
yr=2002
mo=1
mm=`printf %02i $mo`
yrnow=`date -d yesterday "+%Y"`
mmnow=`date -d yesterday "+%m"`
monow=${mmnow#0}
dynow=`date -d yesterday "+%d"`
# skip over all existing files
while [ -s CMORPH/cmorph_${yr}${mm}.nc ]; do
mo=$((mo+1))
if [ $mo -gt 12 ]; then
mo=$((mo-12))
yr=$((yr+1))
fi
mm=`printf %02i $mo`
done
# except the last one
if [ $yr -gt 2002 -o $mo -gt 1 ]; then
mo=$((mo-1))
if [ $mo -lt 1 ]; then
mo=$((mo+12))
yr=$((yr-1))
fi
mm=`printf %02i $mo`
fi
set -x
# and re-obtain the data for the last existing file and all missing ones
while [ $yr -lt $yrnow -o \( $yr = $yrnow -a $mo -le $monow \) ]; do
if [ $yr = $yrnow -a $mo = $monow ]; then
dd=$dynow
else
case $mo in
1|3|5|7|8|10|12) dpm=31;;
4|6|9|11) dpm=30;;
2) if [ $((yr%4)) = 0 ]; then
dpm=29
else
dpm=28
fi;;
esac
dd=$dpm
fi
s0=`date -d 2005-02-23 "+%s"`
s1=`date -d ${yr}-${mm}-01 "+%s"`
s2=`date -d ${yr}-${mm}-${dd} "+%s"`
# watch out for round-off error, apparently...
d1=$(((s1-s0+12*60*60)/(24*60*60)))
d2=$(((s2-s0+12*60*60)/(24*60*60)))
if [ $dd = $dynow ]; then
# IRI runs a few days behind...
d2=""
fi
###echo "d1,d2=$d1,$d2"
ncks -O -d T,$d1,$d2 http://iridl.ldeo.columbia.edu/SOURCES/.NOAA/.NCEP/.CPC/.CMORPH/.daily/.mean/.morphed/.cmorph/dods aap.nc
ncrename -O -v cmorph,prcp -d T,time -d X,lon -d Y,lat -v T,time -v X,lon -v Y,lat aap.nc noot.nc
ncatted -a units,prcp,m,c,"mm/dy" noot.nc
$cdo invertlat noot.nc aap.nc
$cdo settaxis,${yr}-${mo}-01,0:00,1day aap.nc noot.nc
$cdo mulc,24 noot.nc CMORPH/cmorph_${yr}${mm}.nc
$HOME/climexp/bin/averagefieldspace CMORPH/cmorph_${yr}${mm}.nc 2 2 aap.nc
$cdo copy aap.nc CMORPH/cmorph_${yr}${mm}_05.nc
rm aap.nc noot.nc
mo=$((mo+1))
if [ $mo -gt 12 ]; then
mo=$((mo-12))
yr=$((yr+1))
fi
mm=`printf %02i $mo`
done
$cdo copy CMORPH/cmorph_??????_05.nc cmorph_daily_05.nc
$cdo copy CMORPH/cmorph_??????.nc cmorph_daily.nc
$cdo monmean cmorph_daily.nc cmorph_monthly.nc
$HOME/NINO/copyfiles.sh cmorph_monthly.nc cmorph_daily_05.nc cmorph_daily.nc
| true |
30adec5bd174d53affd8970521a44ed17bcd937a | Shell | thibaut-d/docker-django-nginx-uwsgi | /db/db-start.sh | UTF-8 | 470 | 3.0625 | 3 | [
"MIT"
] | permissive | #!/bin/bash
set -e
#####
# Launch Cron for Postgres backups
#####
# Grant execution rights
chmod +x ./pg_backup_rotated.sh
# Add the cron job
(crontab -l ; echo "30 3 * * * root /srv/db/pg_backup_rotated.sh")| crontab -
# Echo the crontab content
echo Checking crontab content...
crontab -l
# Create the log file to be able to run tail
touch /var/log/cron.log
# Run the command on container startup
cron && tail -f /var/log/cron.log
| true |
d9aae8fcc11478fbe3ffefdc34c3cccae7773ff6 | Shell | PnX-SI/Ressources-techniques | /GeoNature/PNX/scripts/get_remote_config.sh | UTF-8 | 1,006 | 2.625 | 3 | [] | no_license | parc=$1
. settings.ini
set -x
ftp_parc=ftp_${parc}
ftp_access=${!ftp_parc}
rm -rf remote_config/${parc}
mkdir -p remote_config/${parc}
lftp "${ftp_access}" -e "
get geonature/config/settings.ini -o remote_config/${parc}/settings.ini;
get geonature/config/geonature_config.toml -o remote_config/${parc}/geonature_config.toml;
get atlas/atlas/configuration/settings.ini -o remote_config/${parc}/settings_atlas.ini;
get atlas/atlas/configuration/config.py -o remote_config/${parc}/config_atlas.py;
get usershub/config/settings.ini -o remote_config/${parc}/settings_usershub.ini;
get usershub/config/config.py -o remote_config/${parc}/config_usershub.py;
get taxhub/settings.ini -o remote_config/${parc}/settings_taxhub.ini;
get taxhub/config.py -o remote_config/${parc}/config_taxhub.py;
get /geonature/backend/static/mobile/occtax/settings.json -o remote_config/${parc}/settings_occtax.json;
get /geonature/backend/static/mobile/sync/settings.json -o remote_config/${parc}/settings_sync.json;
bye
" | true |
8c9aff066425e8205f8a283a0fdb0057f1f14699 | Shell | cms-ts/VggAnalysis | /VggTools/scripts/make_data_json.sh | UTF-8 | 872 | 2.703125 | 3 | [] | no_license | #!/bin/sh
cd $HOME/work/cms/CMSSW_10_2_22/
eval `scramv1 runtime -sh`
cd $OLDPWD
DATADIR=/eos/infnts/cms/store/user/dellaric/data
WORKDIR=$HOME/work/cms/VggAnalysis/VggTools/scripts
cd $WORKDIR
for F in `ls lists/Run2016*.list | grep 02Apr2020`; do
echo "checking $F - "`cat $F | wc -l`" files"
python nano_report.py -o json/`basename $F .list`.json \
`cat $F | sed -e 's;root://eosinfnts.ts.infn.it/;;'`
done
for F in `ls lists/Run2017*.list | grep 02Apr2020`; do
echo "checking $F - "`cat $F | wc -l`" files"
python nano_report.py -o json/`basename $F .list`.json \
`cat $F | sed -e 's;root://eosinfnts.ts.infn.it/;;'`
done
for F in `ls lists/Run2018*.list | grep 02Apr2020`; do
echo "checking $F - "`cat $F | wc -l`" files"
python nano_report.py -o json/`basename $F .list`.json \
`cat $F | sed -e 's;root://eosinfnts.ts.infn.it/;;'`
done
exit
| true |
6a4b575b671ba0ea00a283d7acab9ad5335ac1b3 | Shell | ShaliniSP/AutoLearner-Web-Tool | /eclara/TestSuite1.0/TestCases/create.sh | UTF-8 | 126 | 2.9375 | 3 | [] | no_license | a=28
while [ "$a" -lt 44 ] # this is loop1
do
# b="$a"
echo $a
cp p20_t.txt p${a}_t.txt
a=`expr $a + 1`
done | true |
07164ccc136684f8e5e7ac004365013759e2560f | Shell | simomarsili/ndd | /utils/numpy_fortran_compiler.bash | UTF-8 | 323 | 2.96875 | 3 | [
"BSD-3-Clause"
] | permissive | #!/bin/bash
# get numpy lapack lib
lapack_lib=`python -c "import os; import numpy; print(os.path.abspath(numpy.linalg.lapack_lite.__file__))"`
libs=`ldd $lapack_lib`
if [[ $libs == *"libgfortran"* ]]; then
echo "gfortran"
elif [[ $libs == *"libg2c"* ]]; then
echo "g77"
else
echo "not in (gfortran, g77)"
fi
| true |
ac68497d75e084253fa016e6e674eceda7f16685 | Shell | albush/the-chopper | /chopper.sh | UTF-8 | 3,543 | 4.3125 | 4 | [] | no_license | #!/bin/bash
# a little utility to cut an mp4 into smaller clips based on timestamps.
usage ()
{
echo "The Chopper is a script to cut a single video file into shorter video files, "
echo "based on a list of cut points. "
echo "Complete documentation is available at https://github.com/albush/the-chopper"
echo " "
echo "Usage: "
echo " chopper [flags]"
echo " "
echo "Required Flags:"
echo " -f file path. The path to your original video file."
echo " -c cut list. A comma separated list of points to cut."
echo " -o output path. The destination for finished clips."
echo " "
echo "Optional Flags:"
echo " -A Audio. Additionally export an mp3 version of the original video."
echo " -E Export to Cloud Files. Provide a Rackspace Cloud Files container. Requires the installation of turbolift."
echo " -h help. You're reading it now."
exit
}
# ----- Begin getopts -----
while getopts :f:c:o:AE:h FLAG; do
case $FLAG in
A) #set option "a"
audio=1
echo $audio
;;
f) #set option "f"
filename=$OPTARG
if [[ ! -f "$filename" ]]
then
echo "That video doesn't exist. Try again"
exit
else echo "Video......... Check!"
fbname=$(basename "$filename" .mp4)
fi
;;
c) #set option "c"
cut_list=$OPTARG
if [[ ! -f "$cut_list" ]]
then
echo "That cut list doesn't exist. Try again"
exit
else
echo "Cut List........ Check!"
fi
;;
E) #set option "E"
container=$OPTARG
;;
o) #set option "o"
out_path=$OPTARG
# does the output directory exist?
if [ ! -d "$out_path" ]; then
while true; do
read -p "Oh noes, that directory isn't there. should I create $out_path for you?" yn
case $yn in
[Yy]* ) mkdir $out_path; break;;
[Nn]* ) echo "Ok. I'll be here if you want to try again."; exit;;
* ) echo "Please answer yes or no.";;
esac
done
fi
;;
h) #show help
usage
;;
\?) #unrecognized option - show help
echo -e \\n"Option -${BOLD}$OPTARG${NORM} not allowed."; usage >&2; exit
;;
esac
done
if [ $OPTIND -eq 1 ]; then echo "No options were passed"; usage; exit; fi
shift $((OPTIND-1)) #This tells getopts to move on to the next argument.
# ----- End getopts -----
# ----- Extract Audio -----
if [ $audio == 1 ]
then
echo "Making MP3 from $filename"
ffmpeg -i $filename -strict -2 ${out_path%/}/${fbname// /_}.mp3
# echo "Making OGG from $filename"
# ffmpeg -i $filename -strict -2 ${out_path%/}/${fbname// /_}.ogg
echo "$filename Audio Extraction Finished"
fi
# ----- End Audio Extraction -----
# read in a csv, use those values to create individual files via ffmpeg.
# chop the videos
while IFS=, read start_time end_time clip_title
do
echo "Chopping $clip_title"
ffmpeg -i $filename -ss $start_time -to $end_time -strict -2 ${out_path%/}/${clip_title// /_}.mp4 < /dev/null
done < $cut_list
# Optional Export
if [ ! -z "$container" ]
then
echo "Let's upload these files. Shipping them off to your container named $container."
echo " - [${clip_title}](http://fc3007acf428a103a8a4-83f594235d1123a827fa878c9f3b655b.r71.cf1.rackcdn.com/${clip_title// /_}.mp4)" >> ${out_path}/${fbname}.md
turbolift -e internalURL -u $OS_USERNAME -a $OS_API_KEY --os-rax-auth $OS_RAX_AUTH upload -s $out_path -c $container
fi
| true |
3ebabffc9f1416832ef3dc2276c79058b41486e1 | Shell | kingkongmok/perl | /ntp_update.sh | UTF-8 | 775 | 3.171875 | 3 | [] | no_license | #!/bin/bash -
#===============================================================================
#
# FILE: ntp_update.sh
#
# USAGE: ./ntp_update.sh
#
# DESCRIPTION: shutdown the ntpd && ntpdate && start ntpd
#
# OPTIONS: ---
# REQUIREMENTS: ---
# BUGS: ---
# NOTES: ---
# AUTHOR: kk (Kingkong Mok), kingkongmok AT gmail DOT com
# ORGANIZATION:
# CREATED: 09/02/2014 06:02:06 PM CST
# REVISION: ---
#===============================================================================
set -o nounset # Treat unset variables as an error
[ -r /etc/default/locale ] && . /etc/default/locale
[ -n "$LANG" ] && export LANG
sudo /etc/init.d/ntpd stop && \
sudo ntpdate pool.ntp.org && \
sudo /etc/init.d/ntpd start
| true |
fc06d5a07754d82146a16bc11e97ea5d6e917e85 | Shell | feynmanliang/DNN-HMM-HTK-helpers | /single-hidden-layer/print-results.zsh | UTF-8 | 1,705 | 2.578125 | 3 | [] | no_license | #!/usr/bin/zsh
features=(
"MFC_E_Z"
"MFC_E_D_Z"
"MFC_E_D_A_Z"
"FBK_Z"
"FBK_D_Z"
"FBK_D_A_Z"
)
contexts=(
"0"
"-1,0,1"
"-2,-1,0,1,2"
"-3,-2,-1,0,1,2,3"
"-4,-3,-2,-1,0,1,2,3,4"
"-5,-4,-3,-2,-1,0,1,2,3,4,5"
"-6,-5,-4,-3,-2,-1,0,1,2,3,4,5,6"
"-7,-6,-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7"
"-8,-7,-6,-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7,8"
"-9,-8,-7,-6,-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7,8,9"
"-10,-9,-8,-7,-6,-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7,8,9,10"
)
inswords=(-32.0 -16.0 -8.0 -4.0 -2.0 0.0 2.0 4.0)
# final epoch train/cv accuracy
for feature in $features; do
for context in $contexts; do
trainAcc=$(cat \
MH0/dnn3.finetune-features=${feature}-context=${context} \
| tail -n17 \
| head -n5 \
| grep -P ".*Accuracy.*" \
| awk -F '[ =]' '{print $5}')
cvAcc=$(cat \
MH0/dnn3.finetune-features=${feature}-context=${context} \
| tail -n11 \
| grep -P ".*Accuracy.*" \
| awk -F '[ =]' '{print $5}')
print $feature\;$context\;$trainAcc\;$cvAcc
done
done
# decode accuracies (includes $inswords)
for feature in $features; do
for context in $contexts; do
for insword in $inswords; do
decodeAcc=$(cat \
MH0/decode-dnn3.finetune-trainSub-features=${feature}-context=${context}-insword=${insword}/test/LOG \
| tail -n5 \
| head -n1 \
| awk -F '[ =]' '{print $5}')
print $feature\;$context\;$insword\;Train.Subset\;$decodeAcc
decodeAcc=$(cat \
MH0/decode-dnn3.finetune-features=${feature}-context=${context}-insword=${insword}/test/LOG \
| tail -n5 \
| head -n1 \
| awk -F '[ =]' '{print $5}')
print $feature\;$context\;$insword\;Test.Set\;$decodeAcc
done
done
done
| true |
e96885b3acd8ecaae4dff4d1f41af3b84b43fc6e | Shell | delkyd/alfheim_linux-PKGBUILDS | /basex/PKGBUILD | UTF-8 | 1,764 | 2.71875 | 3 | [] | no_license | # Maintainer: Daan van Rossum <d.r.vanrossum_at gmx.de>
# Contributor: Christopher Heien <chris.h.heien@gmail.com>
# Contributor: Lazaros Koromilas <koromilaz@gmail.com>
pkgname=basex
pkgver=8.6.6
pkgrel=1
pkgdesc="Light-weight, high-performance XML database system and XPath/XQuery processor."
arch=('i686' 'x86_64')
url="http://basex.org/"
license=('BSD')
makedepends=('java-environment')
depends=('java-runtime' 'bash')
source=("http://files.basex.org/releases/${pkgver}/BaseX${pkgver//./}.zip"
"basex.sh"
"BaseX.desktop")
sha1sums=('8b1f50dab764a3d1d9e7aff8895270c88aba20e0'
'b82c43d2f247d65b93b2f073543b8ceee038bfd1'
'1f2e10e989258cc41e8d516efa80801038142358')
package() {
# install profile.d script
install -dm755 ${pkgdir}/etc/profile.d || return 1
install -m755 ${srcdir}/${pkgname}.sh ${pkgdir}/etc/profile.d || return 1
# Get the BASEX_HOME env var
source ${srcdir}/${pkgname}.sh || return 1
cd ${srcdir}
install -dm755 ${pkgdir}/${BASEX_HOME}/{bin,lib,img} || return 1
# install gui shortcuts
install -dm755 ${pkgdir}/usr/share/applications || return 1
install -m755 BaseX.desktop ${pkgdir}/usr/share/applications || return 1
# install licence
install -D -m644 basex/LICENSE "${pkgdir}/usr/share/licenses/${pkgname}/LICENSE"
# extract and install logo
jar -xf basex/BaseX.jar img/logo_256.png
install -m644 img/logo_256.png ${pkgdir}/${BASEX_HOME}/img || return 1
install -m644 basex/*.jar ${pkgdir}/${BASEX_HOME} || return 1
install -m644 basex/lib/*.jar ${pkgdir}/${BASEX_HOME}/lib || return 1
cp -Rp basex/etc ${pkgdir}/${BASEX_HOME} || return 1
# Do not copy Windows .bat/.cmd files
find basex/bin -type f -a ! -name \*.bat -a ! -name \*.cmd \
-exec install -m755 {} ${pkgdir}/${BASEX_HOME}/bin \; || return 1
}
| true |
bdd1f19db75617ffc5f6fe344ba192e2e125962c | Shell | ngc1535git/UKIRT_Scripts | /firststepsidereal.sh | UTF-8 | 1,569 | 3.171875 | 3 | [] | no_license | #!/bin/bash
shopt -s extglob
for SIDEREALFOLDER in *Sidereal; do
if [ -d "$SIDEREALFOLDER" ]; then
cd $SIDEREALFOLDER;
echo "--->---> Processing" $SIDEREALFOLDER;
echo "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"
echo
echo "This is the first/only step analysis of Sidereal data."
echo
echo "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"
for FILTERFOLDER in *; do
if [ -d "$FILTERFOLDER" ]; then
cd $FILTERFOLDER;
echo "--->--->---> Processing" $SIDEREALFOLDER $FILTERFOLDER;
echo "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"
echo
echo "PP Prepare."
echo
echo "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"
pp_prepare -keep_wcs *a.fits;
echo "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"
echo
echo "PP Photometry."
echo
echo "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"
pp_photometry *a.fits;
echo "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"
echo
echo "PP Calibrate."
echo
echo "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"
pp_calibrate *a.fits;
echo "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"
echo
echo "PP Distill."
echo
echo "+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++"
pp_distill *a.fits;
cd ..;
fi;
done;
cd ..;
fi;
done;
| true |
1d9d6facad4fcfaa76fcde0e309ff861cc4c104a | Shell | Bilalh/Gen | /etc/unused_code/scripts/misc/convert_from_old_date.sh | UTF-8 | 231 | 3.453125 | 3 | [] | no_license | #!/bin/bash
set -o nounset
set -o errexit
vd="$1"
if (sw_vers &>/dev/null); then
version_date="$(date -jf '%Y-%m-%e %H:%M %z' "${vd}" '+%F_%s')"
else
version_date="$(date --date="${vd}" '+%F_%s')"
fi
mv "$vd" "$version_date"
| true |
eff6156686b6602bb8030884a3be4069aeedfcdd | Shell | wosigh/services_gstservice | /control/prerm | UTF-8 | 518 | 2.640625 | 3 | [
"MIT"
] | permissive | #!/bin/sh
# Stop the service
/sbin/stop org.webosinternals.gstservice
# Remove the java dbus service
rm -f /usr/lib/luna/java/org.webosinternals.gstservice.jar
rm -f /usr/share/dbus-1/system-services/org.webosinternals.gstservice.service
# Remove the upstart script
rm -f /etc/event.d/org.webosinternals.gstservice
# Restart the service handler
if [ -z "$IPKG_OFFLINE_ROOT" ]; then # Defined by recent installers that also support flags.
/sbin/stop java-serviceboot
/sbin/start java-serviceboot
fi
exit 0
| true |
e94cf7035e0137610aa729a0690c63360bc7f08f | Shell | werminghoff/BazelVSBuckSample | /scripts/generate_xcode_project_tulsi.sh | UTF-8 | 2,144 | 3.390625 | 3 | [] | no_license | set -eou pipefail
##############################################################
# Constants
##############################################################
tulsi="/Applications/Tulsi.app/Contents/MacOS/Tulsi"
output_path="config/bazel_config"
tulsiproj_path="$output_path/BazelVSBuckSample.tulsiproj"
tulsiconf_path="$tulsiproj_path/project.tulsiconf"
tulsigen_path="$tulsiproj_path/Configs/BazelVSBuckSample.tulsigen"
##############################################################
# Get all targets in the repository that need to be added to the Xcode project
##############################################################
# Get targets list + add target flags
targets_list=$(make list BUILDTOOL=bazel | sed 's/^/--target /;')
##############################################################
# Generate tulsiproj
# See: https://github.com/bazelbuild/tulsi/issues/104
##############################################################
"$tulsi" -- \
--create-tulsiproj "BazelVSBuckSample" \
--bazel "/usr/local/bin/bazel" \
--outputfolder "config/bazel_config" $targets_list
##############################################################
# Patch generated tulsiproj
##############################################################
# Remove the 'additionalFilePaths'
regex_to_replace='\ "additionalFilePaths\".*?\],\n'
perl -0pi -e "s/$regex_to_replace//gs" "$tulsigen_path"
# Fix the 'sourceFilters': using './...' does not include the 'Sources' directories
# in the Xcode project ( no idea why). Using '//Libraries/...' fixes the problem
perl -pi -e 's|\Q./...|//Libraries/...|g' "$tulsigen_path"
# Remove the 'packages' from the tulsiconf file because every time they are
# generated the sorting is different and messes up with version control.
# Seems like removing them does not have any negative effect
regex_to_replace='\ "packages\".*?\],\n'
perl -0pi -e "s/$regex_to_replace//gs" "$tulsiconf_path"
##############################################################
# Generate Xcode project using the newly generated tulsiproj
##############################################################
"$tulsi" -- \
--genconfig "$tulsiproj_path"
| true |
297b22e9baf14897de42d96649972493ea3d0117 | Shell | jonghunDB/SciDB19.3 | /tests/harness/testcases/t/daily/stable/by_op/z_abort/abort_store_2.sh | UTF-8 | 2,862 | 3.421875 | 3 | [] | no_license | #!/bin/bash
#
# BEGIN_COPYRIGHT
#
# Copyright (C) 2008-2019 SciDB, Inc.
# All Rights Reserved.
#
# SciDB is free software: you can redistribute it and/or modify
# it under the terms of the AFFERO GNU General Public License as published by
# the Free Software Foundation.
#
# SciDB is distributed "AS-IS" AND WITHOUT ANY WARRANTY OF ANY KIND,
# INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,
# NON-INFRINGEMENT, OR FITNESS FOR A PARTICULAR PURPOSE. See
# the AFFERO GNU General Public License for the complete license terms.
#
# You should have received a copy of the AFFERO GNU General Public License
# along with SciDB. If not, see <http://www.gnu.org/licenses/agpl-3.0.html>
#
# END_COPYRIGHT
#
die ()
{
local SCRIPT=$(basename $0)
echo 2>&1 "${SCRIPT}: $@"
exit 1
}
[ "$IQUERY_HOST" != "" ] || die "IQUERY_HOST not defined"
[ "$IQUERY_PORT" != "" ] || die "IQUERY_PORT not defined"
IQUERY="iquery -c $IQUERY_HOST -p $IQUERY_PORT"
# remove arrays and exit with status provided
cleanup()
{
status=$1
location=$2
$IQUERY -naq "remove(fooas2)"
if [ $status != 0 ]; then
echo "Error occured: " $status "at line: " $location
else
echo "Success"
fi
exit $status
}
# The killquery.sh store() queries use the undocumented _fetch:1
# option to keep the query timing the same as before the SDB-6178 fix.
# create the test array
$IQUERY -naq "create array fooas2 <v:int64> [I=0:2000,100,0]"
if [[ $? != 0 ]] ; then cleanup 1 $LINENO; fi
uaid=$($IQUERY -otsv -aq "project(filter(list('arrays'),name='fooas2'),uaid)")
# case 1 --- abort the store of the first version of an array.
# Verify that no datastore is created.
${TEST_UTILS_DIR}/killquery.sh -afl 2 2 'store (build (fooas2, I), fooas2, _fetch:1)'
if [[ $? != 0 ]]; then cleanup 1 $LINENO; fi
$IQUERY -aq "rename(fooas2, fooas2a)"
$IQUERY -aq "rename(fooas2a, fooas2)"
lines=$($IQUERY -aq "filter(list('datastores'), uaid=$uaid)" | wc -l)
if [[ $lines != 1 ]]; then echo lines = $lines; cleanup 1 $LINENO; fi
# case 2 --- abort the store of the second version of an array.
# Verify that the contents did not change and that the used size of the
# array did not increase
$IQUERY -naq "store (build (fooas2, I), fooas2)"
if [[ $? != 0 ]] ; then cleanup 1 $LINENO; fi
size=$($IQUERY -ocsv -aq "project(summarize(fooas2), bytes)")
${TEST_UTILS_DIR}/killquery.sh -afl 2 2 'store (build (fooas2, I+1), fooas2, _fetch:1)'
if [[ $? != 0 ]]; then cleanup 1 $LINENO; fi
lines=$($IQUERY -aq "filter (fooas2, v = I)" | wc -l)
if [[ $lines != 2002 ]]; then echo lines = $lines; cleanup 1 $LINENO; fi
$IQUERY -aq "rename(fooas2, fooas2a)"
$IQUERY -aq "rename(fooas2a, fooas2)"
size1=$($IQUERY -ocsv -aq "project(summarize(fooas2), bytes)")
if [ $size != $size1 ]; then echo "Uh oh, $size != $size1"; cleanup 1 $LINENO; fi
# success
cleanup 0 $LINENO
| true |
813ea423b2bf8639edbe9c5015d8e7ab114f3b6b | Shell | LinuxOnCloud/Docker_Sample | /VM_Scripts/agent-redis-vmware.sh | UTF-8 | 11,656 | 3.578125 | 4 | [] | no_license | #!/bin/bash
host=`hostname`
dt=`date +%d%b%Y`
redispass=`grep 'requirepass' /etc/redis/redis.conf|tail -1|awk '{print $2}'`
# start redis
redisstart() {
sudo /etc/init.d/redis-server start
}
# stop redis
redisstop() {
sudo /etc/init.d/redis-server stop
}
# restart redis
redisrestart() {
sudo /etc/init.d/redis-server restart
}
case $1 in
cmd)
$2 $3 $4 $5 $6 $7 $8 $9 ${10} ${11} ${12} ${13} ${14} ${15} ${16} ${17} ${18} ${19} ${20} ${21} ${22} ${23} ${24} ${25} ${26} ${27} ${28} ${29} ${30} ${31} ${32} ${33} ${34} ${35} ${36} ${37} ${38} ${39} ${40} ${41} ${42} ${43} ${44} ${45} ${46} ${47} ${48} ${49} ${50} ${51}
if [ $? -eq 0 ]; then
echo '{"code":5000,"success":"true","message":"Command Successfully Executed"}'
else
echo '{"success":"false","code":5501,"message":"Command Execution Failed"}'
fi;;
start)
state=`ps aux |grep redis-server|grep usr|awk '{print $2}'`
if [ -n "$state" ]; then
echo '{"code":5000,"success":"true","message":"Redis Already Started"}'
else
redisstart
if [ $? -eq 0 ]; then
echo '{"code":5000,"success":"true","message":"Redis Started Successfully"}'
else
echo '{"success":"false","code":5502,"message":"Redis Could Not Be Started"}'
fi
fi;;
stop)
state=`ps aux |grep redis-server|grep usr|awk '{print $2}'`
if [ -z "$state" ]; then
echo '{"code":5000,"success":"true","message":"Redis Already Stopped"}'
else
redis-cli -a $redispass bgsave
redisstop
state1=`ps aux |grep redis-server|grep usr|awk '{print $2}'`
if [ -z "$state1" ]; then
echo '{"code":5000,"success":"true","message":"Redis Stopped Successfully"}'
else
echo '{"success":"false","code":5503,"message":"Redis Could Not Be Stopped"}'
fi
fi;;
createdb)
if [ ! -z "$3" ]; then
redisstop
sudo sed -i -e 's/requirepass app42_redis_26_password/requirepass '$3'/' /etc/redis/redis.conf
# redisstop
sleep 2
redisstart
check=`grep requirepass /etc/redis/redis.conf|tail -1|awk '{print $2}'`
if [ $check = $3 ]; then
echo '{"code":5000,"success":"true","message":"Redis Database Password Has Been Set Successfully"}'
else
echo '{"success":"false","code":5504,"message":"Redis Database Password Could Not Be Set"}'
fi
else
echo '{"success":"false","code":5505,"message":"Password Could Not Be Set Because Your Password Value Is Blank"}'
fi;;
resetpassword)
sudo sed -i -e 's/requirepass '$3'/requirepass '$4'/' /etc/redis/redis.conf
redis-cli -a $3 bgsave
redisstop
sleep 2
redisstart
check=`grep requirepass /etc/redis/redis.conf|tail -1|awk '{print $2}'`
if [ $check = $4 ]; then
echo '{"code":5000,"success":"true","message":"Redis Database Password Reset Successfull"}'
else
echo '{"success":"false","code":5506,"message":"Redis Database Password Could Not Be Reset"}'
fi;;
backup)
if [ -f /var/lib/redis/dump.rdb ]; then
redis-cli -a $2 bgsave
pwdcheck=`redis-cli -a $2 bgsave|grep 'NOAUTH'|wc -l`
if [ $pwdcheck = 0 ]; then
redisstop
sleep 2
sudo cp /var/lib/redis/dump.rdb $HOME/dump/dump-$host-$dt.rdb
sudo chown -R 1001.1001 $HOME/dump
redisstart
bkpcheck=`sudo redis-check-dump $HOME/dump/dump-$host-$dt.rdb |tail -1|rev|awk '{print $1}'|rev`
if [ $bkpcheck = OK ]; then
echo '{"code":5000,"success":"true","message":"Redis Database Backup Created","path":"'$HOME/dump/dump-$host-$dt.rdb'"}'
else
echo '{"success":"false","code":5507,"message":"Redis Database Backup Could Not Be Created"}'
fi
else
echo '{"success":"false","code":5508,"message":"Redis Database Password Incorrect"}'
fi
else
echo '{"success":"false","code":5509,"message":"Redis Database Dump is Not Available"}'
fi;;
restore)
# download source/binary
wget --no-check-certificate --directory-prefix=$HOME/download $4
if [ $? -eq 0 ]; then
rm -rf $HOME/.aws/config $HOME/url
#echo '{"code":5000,"success":"true","message":"File Download successfully"}'
#mv $HOME/backup/$4.sql $HOME/backup/$4.sql-`date +%d%b%Y`
redis-cli -a $2 bgsave
redisstop
sleep 2
sudo cp /var/lib/redis/dump.rdb $HOME/backup/dump.rdb
sudo chown -R 1001.1001 $HOME/backup
bkpcheck=`redis-check-dump $HOME/backup/dump.rdb|tail -1|rev|awk '{print $1}'|rev`
if [ $bkpcheck = OK ]; then
#echo '{"code":5000,"success":"true","message":"Create current db backup"}'
# extract file extenstion and file name from URL
redisstart
fileWithExt=${4##*/}
echo "file=$fileWithExt"
FileExt=${fileWithExt#*.}
echo "FileExt=$FileExt"
d=`echo "$fileWithExt" | cut -d'.' -f1`
echo "d=$d"
# extract source
case $FileExt in
tar.gz)
tar xvzf $HOME/download/$fileWithExt -C $HOME/download/ > $HOME/result
d=`head -1 $HOME/result`
out=$d
echo out=$out;;
rdb.tar.gz)
tar xvzf $HOME/download/$fileWithExt -C $HOME/download/ > $HOME/result
d=`head -1 $HOME/result`
out=$d
echo out=$out;;
gz)
gunzip -v $HOME/download/$fileWithExt 2> $HOME/result
d=`cat $HOME/result |rev|cut -d'/' -f1|rev`
out=$d
echo out=$out;;
rdb.gz)
gunzip -v $HOME/download/$fileWithExt 2> $HOME/result
d=`cat $HOME/result |rev|cut -d'/' -f1|rev`
out=$d
echo out=$out;;
zip)
unzip $HOME/download/$fileWithExt -d $HOME/download/ >$HOME/result
d=`tail -1 $HOME/result | rev|cut -d'/' -f1|rev`
d=`echo $d | tr -d ' '`
out=$d
echo out=$out;;
rdb.zip)
unzip $HOME/download/$fileWithExt -d $HOME/download/ >$HOME/result
d=`tail -1 $HOME/result | rev|cut -d'/' -f1|rev`
d=`echo $d | tr -d ' '`
out=$d
echo out=$out;;
rdb)
out=$fileWithExt
echo out=$out;;
esac
# move source to redis folder
dwnfcheck=`redis-check-dump $HOME/download/$out|tail -1|rev|awk '{print $1}'|rev`
if [ $dwnfcheck = OK ]; then
redisstop
sudo rm /var/lib/redis/dump.rdb
cp $HOME/download/$out /var/lib/redis/dump.rdb
sudo chown -R 103.1001 /var/lib/redis/dump.rdb
echo "permission done"
redisstart
rm -rf $HOME/download
rm -rf $HOME/backup/dump.rdb
echo '{"code":5000,"success":"true","message":"Redis Database Restored Successfully"}'
#############################################
######### ------------######################
else
redisstop
sudo rm /var/lib/redis/dump.rdb
cp $HOME/backup/dump.rdb /var/lib/redis/dump.rdb
sudo chown -R 103.1001 /var/lib/redis/dump.rdb
redisstart
rm -rf $HOME/download
rm -rf $HOME/backup/dump.rdb
echo '{"success":"false","code":5510,"message":"Redis Database Restore Failed"}'
exit 1
fi
else
# remove contents of www folder and downloaded file
rm -rf $HOME/download
rm -rf $HOME/backup/dump.rdb
redisstart
echo '{"success":"false","code":5511,"message":"Redis Current Database Backup Failed"}'
exit 1
fi
else
rm -rf $HOME/download
rm -rf $HOME/.aws/config $HOME/url
echo '{"success":"false","code":5512,"message":"Redis Error In Downloading Backup File"}'
exit 1
fi
;;
restart)
redis-cli -a $redispass bgsave
redisstop
sleep 2
redisstart
if [ $? -eq 0 ]; then
echo '{"code":5000,"success":"true","message":"Redis Restarted Successfully"}'
else
echo '{"success":"false","code":5513,"message":"Redis Could Not Be Restarted"}'
fi;;
delete_backup)
if [ -f $2 ]; then
rm -rf $2
echo '{"code":5000,"success":"true","message":"Redis Backup Deleted Successfully"}'
else
echo '{"success":"false","code":5514,"message":"Redis Backup Deletion Failed"}'
fi;;
*)
echo 'Usage: {cmd|start|stop|createdb|resetpassword|backup|restore|restart|delete_backup}'
echo '{"success":"false", "code":5515,"message":"Invalid Command"}'
;;
esac
| true |
492e723f6300db78408838098aa414bc7cdf4c1a | Shell | mrakitin/skif-bin | /qcd | UTF-8 | 423 | 3 | 3 | [] | no_license | #!/bin/bash
# Author : Maxim Rakitin rms85@physics.susu.ac.ru
# Date : 2009-10-13
# Version: 1.0
# Purpose: To navigate to calc directory directly by using PBS information
# Usage : source qcd 12345
if [ ! -z "$1" ]; then
calc_dir=$(scontrol show job $1 | grep "WorkDir=" | cut -d= -f2 | sed 's/\/job$//g')
echo "--- Calc dir = $calc_dir"
cd $calc_dir
else
cd .
fi
#exec sh -c 'cd $(echo ~/bin ); exec bash -l' --- from srg
| true |
d2dc943752f3303e60a4edcefc6d9ca3cec40b1f | Shell | maxsullivan/scripts | /dspace/Batch_Transform_Mets_production.sh | UTF-8 | 3,826 | 3.96875 | 4 | [] | no_license | #!/bin/bash
# For Dspace METS files apply xsl transformation to transform to Rosetta METS. XSL, Dspace files and this script need to be in the same directory.
# This takes two parameters:
# t = the path where the source lives
# o = the path where the rosetta deposits should be created
# The script assumes that the source deposit dirs will be named research-xxx
#
# Usage:
# ./Batch_Tranform_Mets.sh -t /path/to/dspace/exports -o /path/to/where/files/should/be/created
#
# Authors: Max Sullivan, Brendan Farrell, Stuart Yeates
#SIP_DIR="./research*"
# Get user input
while getopts s:o:user option
do
case "${option}"
in
s) TARGETPATH=${OPTARG};;
o) OUTPUTPATH=${OPTARG};;
esac
done
#check that the output folder exists, create it if it doesnt
if [ ! -d $OUTPUTPATH ]; then
mkdir $OUTPUTPATH
fi
#set the SIP DIR i.e. all dirs prefixed with research in the TARGETPATH
SIP_DIR=$TARGETPATH"/research*"
# Loop through each directory
for IN_DIR in $SIP_DIR
do
#Count the number of mets.xml files to make sure there is only 1
xmlcount=$(find "$IN_DIR" -name mets.xml | wc -l)
echo "total number of xml files: $xmlcount"
#if count is one convert mets to rosetta mets
if [[ "$xmlcount" = 1 ]]; then
#check directory exists
SIP_DIR_NAME=$(basename $IN_DIR)
echo "Processing" $SIP_DIR_NAME
if [ ! -d $SIP_DIR_NAME ]; then
mkdir $SIP_DIR_NAME
fi
#check that the deposit folder exists, create it if it doesnt
if [ ! -d $OUTPUTPATH/$SIP_DIR_NAME ]; then
mkdir $OUTPUTPATH/$SIP_DIR_NAME
mkdir $OUTPUTPATH/$SIP_DIR_NAME/content
mkdir $OUTPUTPATH/$SIP_DIR_NAME/content/streams
#cp $xml_file/mets.xml $OUTPUTPATH/$SIP_DIR_NAME/content/mets.xml
cp $IN_DIR/bitstream* $OUTPUTPATH/$SIP_DIR_NAME/content/streams/
fi
# Convert the mets.xml in the OUTPUTPATH to rosetta METS
xsltproc --novalid ./dspace_mods_to_dc_mets_production.xsl $IN_DIR/mets.xml | xsltproc --novalid ./dspace_post_process.xsl - | xmllint --format - > /tmp/tmp-METS.xml
ID=`grep hdl.handle.net $IN_DIR/mets.xml | tr '<>"' '\012' | grep hdl.handle.net | uniq | sed 's^http://hdl.handle.net^^'`
echo ID=$ID
echo ID='$ID'
URL=http://researcharchive.vuw.ac.nz/handle$ID
xsltproc --stringparam 'url' $URL MARC2SingleRecord.xsl BIBLIOGRAPHIC_3805366240002386_1.xml | xmllint --format - > /tmp/tmp-MARC.xml
grep marc:controlfield /tmp/tmp-MARC.xml > /dev/null
if [ $? = 0 ]; then
echo found MARC!
line=`sed -n '/MARC21/=' /tmp/tmp-METS.xml`
echo $line
head --lines $(($line - 1 )) < /tmp/tmp-METS.xml > /tmp/tmp-METS2.xml
cat /tmp/tmp-MARC.xml | tail --lines +2 >> /tmp/tmp-METS2.xml
tail --lines +$(($line +1 )) < /tmp/tmp-METS.xml >> /tmp/tmp-METS2.xml
cp /tmp/tmp-METS2.xml $OUTPUTPATH/$SIP_DIR_NAME/content/mets.xml
else
cp /tmp/tmp-METS.xml $OUTPUTPATH/$SIP_DIR_NAME/content/mets.xml
fi
#make dc title file
if [ -f $OUTPUTPATH/$SIP_DIR_NAME/content/mets.xml ]; then
#echo if mets file exists
thistime=`date`
echo mets file exists $OUTPUTPATH/$SIP_DIR_NAME/content/mets.xml
#Find the dc title
dcidentifier=`grep -oPm1 "(?<=title>)[^<]+" $OUTPUTPATH/$SIP_DIR_NAME/content/mets.xml`
#create the dc Title file
echo "<?xml version=\"1.0\" encoding=\"UTF-8\"?><record xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xmlns:dcterms=\"http://purl.org/dc/terms/\" xmlns:dc=\"http://purl.org/dc/elements/1.1/\"><dc:title>$dcidentifier $thistime</dc:title></record>" | xmllint --format - > $OUTPUTPATH/$SIP_DIR_NAME/dc.xml
xmllint --noout $OUTPUTPATH/$SIP_DIR_NAME/dc.xml
#end of dc title file section
else
echo too many xml files $xmlcount
fi
fi
done
| true |
1c175cfedb86e23cb495db92eaf176672b81f5bf | Shell | aarontp/l2tdevtools | /utils/submit.sh | UTF-8 | 4,601 | 3.875 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
# Script that submits a code for code review.
EXIT_FAILURE=1;
EXIT_MISSING_ARGS=2;
EXIT_SUCCESS=0;
SCRIPTNAME=`basename $0`;
BROWSER_PARAM="";
CL_NUMBER="";
USE_CL_FILE=0;
CL_FILENAME="";
if ! test -f "utils/common.sh";
then
echo "Unable to find common scripts (utils/common.sh).";
echo "This script can only be run from the root of the source directory.";
exit ${EXIT_FAILURE};
fi
. utils/common.sh
if ! have_curl;
then
echo "You'll need to install curl for this script to continue.";
exit ${EXIT_FAILURE};
fi
# Determine if we have the master repo as origin.
HAVE_REMOTE_ORIGIN=have_remote_origin;
while test $# -gt 0;
do
case $1 in
--nobrowser | --no-browser | --no_browser )
BROWSER_PARAM="--no_oauth2_webbrowser";
shift;
;;
*)
CL_NUMBER=$1;
shift
;;
esac
done
if test -z "${CL_NUMBER}";
then
BRANCH="";
get_current_branch "BRANCH";
CL_FILENAME=".review/${BRANCH}";
if test -f ${CL_FILENAME};
then
CL_NUMBER=`cat ${CL_FILENAME}`
RESULT=`echo ${CL_NUMBER} | sed -e 's/[0-9]//g'`;
if ! test -z "${RESULT}";
then
echo "${CL_FILENAME} contains an invalid CL number.";
exit ${EXIT_FAILURE};
fi
USE_CL_FILE=1;
fi
fi
if test -z "${CL_NUMBER}";
then
echo "Usage: ./${SCRIPTNAME} [--nobrowser] CL_NUMBER";
echo "";
echo " CL_NUMBER: optional change list (CL) number that is to be submitted.";
echo " If no CL number is provided the value is read from the";
echo " corresponding file in: .review/";
echo "";
echo " --nobrowser: forces upload.py not to open a separate browser";
echo " process to obtain OAuth2 credentials for Rietveld";
echo " (https://codereview.appspot.com).";
echo "";
exit ${EXIT_MISSING_ARGS};
fi
if ! ${HAVE_REMOTE_ORIGIN};
then
echo "Submit aborted - no need to use the submit script. Instead use";
echo "the update script to update the code review or the close script";
echo "to close the code review after it has been submitted by one of";
echo "the git repository maintainers.";
exit ${EXIT_FAILURE};
fi
if ! have_master_branch;
then
echo "Submit aborted - current branch is not master.";
exit ${EXIT_FAILURE};
fi
if have_double_git_status_codes;
then
echo "Submit aborted - detected double git status codes."
echo "Run: 'git stash && git stash pop'.";
exit ${EXIT_FAILURE};
fi
if ! local_repo_in_sync_with_origin;
then
echo "Submit aborted - local repo out of sync with origin."
echo "Run: 'git stash && git pull && git stash pop'.";
exit ${EXIT_FAILURE};
fi
if ! linting_is_correct_remote_origin;
then
echo "Submit aborted - fix the issues reported by the linter.";
exit ${EXIT_FAILURE};
fi
if ! tests_pass;
then
echo "Submit aborted - fix the issues reported by the failing test.";
exit ${EXIT_FAILURE};
fi
URL_CODEREVIEW="https://codereview.appspot.com";
# Get the description of the change list.
# This will convert newlines into spaces.
CODEREVIEW=`curl -s ${URL_CODEREVIEW}/api/${CL_NUMBER}`;
DESCRIPTION=`echo ${CODEREVIEW} | sed 's/^.*"subject":"\(.*\)","created.*$/\1/'`;
if test -z "${DESCRIPTION}" || test "${DESCRIPTION}" = "${CODEREVIEW}";
then
echo "Submit aborted - unable to find change list with number: ${CL_NUMBER}.";
exit ${EXIT_FAILURE};
fi
COMMIT_DESCRIPTION="Code review: ${CL_NUMBER}: ${DESCRIPTION}";
echo "Submitting ${COMMIT_DESCRIPTION}";
# Need to change the status on codereview before commit.
python utils/upload.py \
--oauth2 ${BROWSER_PARAM} ${CACHE_PARAM} --send_mail -i ${CL_NUMBER} \
-m "Code Submitted." -t "Submitted." -y;
if test -f "utils/update_version.sh";
then
. utils/update_version.sh
fi
# Check if we need to set --cache.
STATUS_CODES=`git status -s | cut -b1,2 | sed 's/\s//g' | sort | uniq`;
CACHE_PARAM="";
for STATUS_CODE in ${STATUS_CODES};
do
if test "${STATUS_CODE}" = "A";
then
CACHE_PARAM="--cache";
fi
done
git commit -a -m "${COMMIT_DESCRIPTION}";
git push
if test $? -ne 0;
then
echo "Submit aborted - unable to run: 'git push'.";
exit ${EXIT_FAILURE};
fi
CODEREVIEW_COOKIES="${HOME}/.codereview_upload_cookies";
if test -f "${CODEREVIEW_COOKIES}";
then
curl -b "${CODEREVIEW_COOKIES}" ${URL_CODEREVIEW}/${CL_NUMBER}/close -d '';
else
echo "Could not find an authenticated session to codereview.";
echo "You need to manually close the ticket on the code review site:";
echo "${URL_CODEREVIEW}/${CL_NUMBER}";
fi
if ! test -z "${USE_CL_FILE}" && test -f "${CL_FILENAME}";
then
rm -f ${CL_FILENAME};
fi
exit ${EXIT_SUCCESS};
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.