blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 4 115 | path stringlengths 2 970 | src_encoding stringclasses 28 values | length_bytes int64 31 5.38M | score float64 2.52 5.28 | int_score int64 3 5 | detected_licenses listlengths 0 161 | license_type stringclasses 2 values | text stringlengths 31 5.39M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
5d3b5c27e839726cfbf01b2154400d8dff49edfb | Shell | maithanhduyan/ant | /build.sh | UTF-8 | 1,930 | 3.484375 | 3 | [
"LicenseRef-scancode-unknown",
"LicenseRef-scancode-unknown-license-reference",
"W3C",
"GPL-1.0-or-later",
"SAX-PD",
"Apache-2.0"
] | permissive | #!/bin/sh
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# OS specific support. $var _must_ be set to either true or false.
cygwin=false
darwin=false
case "`uname`" in
CYGWIN*)
cygwin=true
;;
Darwin*)
darwin=true
if [ -z "$JAVA_HOME" ]; then
if [ -x '/usr/libexec/java_home' ]; then
JAVA_HOME=`/usr/libexec/java_home`
elif [ -d "/System/Library/Frameworks/JavaVM.framework/Versions/CurrentJDK/Home" ]; then
JAVA_HOME=/System/Library/Frameworks/JavaVM.framework/Versions/CurrentJDK/Home
fi
fi
;;
esac
REALANTHOME=$ANT_HOME
if [ -z "$PWD" ]; then
ANT_HOME=./bootstrap
else
ANT_HOME="$PWD"/bootstrap
fi
export ANT_HOME
if test ! -f bootstrap/lib/ant.jar -o ! -x bootstrap/bin/ant -o ! -x bootstrap/bin/antRun; then
/bin/sh ./bootstrap.sh
fi
if test ! -f bootstrap/lib/ant.jar -o ! -x bootstrap/bin/ant -o ! -x bootstrap/bin/antRun; then
echo Bootstrap FAILED
exit 1
fi
if [ "$REALANTHOME" != "" ]; then
if $cygwin; then
REALANTHOME=`cygpath --windows "$REALANTHOME"`
fi
ANT_INSTALL="-Dant.install=$REALANTHOME"
else
ANT_INSTALL="-emacs"
fi
bootstrap/bin/ant -nouserlib -lib lib/optional "$ANT_INSTALL" $*
| true |
2911515aa0f30fdb648b63bf74078fa071f7ff07 | Shell | starsep/bsk | /zad3/server.sh | UTF-8 | 1,958 | 3.4375 | 3 | [] | no_license | #!/bin/bash
# Filip Czaplicki
function blue_echo {
echo -e "\e[1;34m$1\e[0m"
}
# zmienić localhost na adres komputera K
K=localhost
U=fcbsk
STUDENTS=fc359081@students.mimuw.edu.pl
apt update
apt install openssh-server rsync sshfs -y
su guest -c "mkdir -p /home/guest/.ssh"
# deploy client.sh
blue_echo "Copying client.sh to $K"
scp client.sh guest@$K:~/client.sh
blue_echo "Executing client.sh @ $K as root!"
ssh -t guest@$K "sudo /home/guest/client.sh"
# Stwórz guestowi parę kluczy SSH, klucz prywatny chroniony hasłem.
su guest -c "ssh-keygen -N tajnehaslo -f /home/guest/.ssh/id_rsa"
# Umieść klucz publiczny na komputerze students.
blue_echo "Copying public key to $STUDENTS"
scp /home/guest/.ssh/id_rsa.pub $STUDENTS:~/server.pub
blue_echo "Adding public key to authorized_keys @ $STUDENTS"
ssh $STUDENTS "cat ~/server.pub >> ~/.ssh/authorized_keys && rm server.pub"
# wykonaj czynności umożliwiające U logowanie się z kluczem z K na S
scp $U@$K:/home/$U/.ssh/id_rsa.pub /home/guest/.ssh/client.pub
su guest -c "cat /home/guest/.ssh/client.pub >> /home/guest/.ssh/authorized_keys"
rm /home/guest/.ssh/client.pub
# Skonfiguruj klienta ssh tak, aby Twój login na students był używany
# jako domyślna nazwa użytkownika, gdy guest łączy się ze students
su guest -c "cat sshconfig >> /home/guest/.ssh/config"
# Skonfiguruj serwer SSH:
cp sshd_config /etc/ssh/sshd_config
service ssh restart
# Ustaw w ten sposób montowanie, żebyś mógł obejrzeć w przeglądarce
# odpalonej na komputerze w labie BSK dowolny plik z katalogu
# /home/students/inf/PUBLIC/BSK/Obrazy z maszyny students
# (oczywiście tylko taki, do którego masz uprawnienia).
su guest -c "mkdir -p /home/guest/Obrazy && sshfs students:/home/students/inf/PUBLIC/BSK/Obrazy /home/guest/Obrazy"
# Używając programu rsync skopiuj (jako guest) ze
# students katalog /home/students/inf/PUBLIC/BSK.
# z = compress file data
su guest -c "rsync -avzhe ssh students:/home/students/inf/PUBLIC/BSK ."
| true |
ec70b6b44e656167fdfd99f9bf2e5144dcf22934 | Shell | robertluwang/dckvm | /kvm_install.sh | UTF-8 | 989 | 3.015625 | 3 | [
"MIT"
] | permissive | # kvm-install.sh
# handy script to install kvm packages on ubuntu
# By Robert Wang @github.com/robertluwang
# Dec 10, 2022
# update
sudo apt update -y
# remove snap on ubuntu 22.04 , can skip this section for previous ubuntu
sudo systemctl disable snapd.service
sudo systemctl disable snapd.socket
sudo systemctl disable snapd.seeded.service
sudo apt remove snapd
sudo apt autoremove --purge snapd
sudo rm -rf /var/cache/snapd/
rm -rf ~/snap
# upgrade
sudo apt upgrade -y
# check nested vt enabled for kvm
VMX=$(egrep -c '(vmx|svm)' /proc/cpuinfo)
if $VMX = 0
then
echo "Please enable nested VT on host, exit!"
fi
# kvm install
sudo apt install -y qemu-kvm libvirt-daemon-system virtinst libvirt-clients bridge-utils
sudo systemctl enable libvirtd
sudo systemctl start libvirtd
sudo systemctl status libvirtd
# add login user to group of kvm, libvirt
sudo usermod -aG kvm $USER
sudo usermod -aG libvirt $USER
# kvm tool
sudo apt install -y libguestfs-tools virt-top
| true |
bd037ec31bd8a53b3ecb5bae90713706de6b3358 | Shell | cfillion/dotfiles | /zsh/.zlogin | UTF-8 | 216 | 2.65625 | 3 | [] | no_license | #!/bin/sh
if [ -r ~/.ssh/id_rsa ]; then
keychain ~/.ssh/id_rsa 2>/dev/null
[ -r ~/.keychain/$HOSTNAME-sh ] && . ~/.keychain/$HOSTNAME-sh
[ -r ~/.keychain/$HOSTNAME-sh-gpg ] && . ~/.keychain/$HOSTNAME-sh-gpg
fi
| true |
ba9fc8a5d9ca8a635db0402fef9360f73ddb319c | Shell | KaOSx/hardware-detection | /hooks/hwdetect_4_power | UTF-8 | 1,124 | 3.484375 | 3 | [] | no_license | #!/bin/bash
hwdetect_power()
{
local KERNEL=$(cat /proc/version | cut -d " " -f 3)
if [ -e "/tmp/platform-desktop" ] ; then
printhl "Enabling powersave functions"
for i in /lib/modules/"$KERNEL"/kernel/drivers/cpufreq/*.ko*; do
if [ -r "$i" ]; then
case "$i" in *-lib.*) continue ;; esac
m="${i##*/}" ; m="${m%%.*}"
modprobe "${m}" >/dev/null 2>&1
fi
done
for i in /sys/devices/system/cpu/cpu*/cpufreq/scaling_governor; do
if [ -w "$i" ]; then
n="${i#/sys/devices/system/cpu/cpu}" ; n="${n%/cpufreq/scaling_governor}"
echo "ondemand" > "${i}"
fi
done
elif [ -e "/tmp/platform-laptop" ] ; then
printhl "Enabling powersave functions"
for i in /lib/modules/"$KERNEL"/kernel/drivers/cpufreq/*.ko*; do
if [ -r "$i" ]; then
case "$i" in *-lib.*) continue ;; esac
m="${i##*/}" ; m="${m%%.*}"
modprobe "${m}" >/dev/null 2>&1
fi
done
for i in /sys/devices/system/cpu/cpu*/cpufreq/scaling_governor; do
if [ -w "$i" ]; then
n="${i#/sys/devices/system/cpu/cpu}" ; n="${n%/cpufreq/scaling_governor}"
echo "ondemand" > "${i}"
fi
done
fi
}
| true |
07e5e3d009fabeff9e693d662a27b289a7a651a7 | Shell | Harrison-S1/bashscripts | /csvtoJson.sh | UTF-8 | 886 | 3.625 | 4 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | #!/bin/bash
# Define var date.
date=$(date +"%m_%d_%Y_%H:%M")
# Imput from user on what file they wont to use.
echo What text file do you want to Work with?
# Read txt file.
read filename
# Cat txt file and pipe to tr and replace white space with ",". Then output file to .csv.
cat $filename | tr " " "," > file_$date.csv
# Use jq to split csv into json formant and out put to .json.
# The split starts from row 0 and using "," to define the split, making each "," seperation its own feild.
# This make it easier to drill down into the data and change what you want from it , or make it harder if you need more generic data.
jq --slurp --raw-input \
'split("\n") | .[0:] | map(split(",")) |
map({
"Log entry": [.[0], .[3], .[4], .[5], .[6], .[7], .[8], .[9], .[10], .[11], .[14], .[15], .[16], .[17], .[18]],
}
)' \
file_$date.csv > output.json
| true |
831b35e4bef3e8dda602f2a9c3eb230316d68b15 | Shell | chenchuk77/coupon-maven | /setup.sh | UTF-8 | 408 | 2.796875 | 3 | [] | no_license | #! /bin/bash
# to build the project with jdk 1.8 and maven3:
#
# 1. clone from github
# 2. go to the root folder (where the pom.xml exists)
# 3. build with maven (mvn compile)
# 4. add root folder to classpath
# 5. run
#
# u can clone and run this setup.sh script
echo "adding this folder to classpath"
export CLASSPATH=${CLASSPATH}:$PWD:$PWD/target/classes
echo "starting jvm ..."
java com.jbp.main.Test
| true |
b372836daa9a1cc580400c92937bbcef9fb5466b | Shell | tomhoover/bin | /archive/updateHostsFile.sh | UTF-8 | 521 | 2.9375 | 3 | [] | no_license | #!/usr/bin/env sh
# requires https://github.com/StevenBlack/hosts
cd ~/src/github.com/stevenblack/hosts/ || exit
git pull --rebase --autostash
cp ~/.config/hosts/* .
python3 updateHostsFile.py -a -o alternates/tch -e porn
#if [ ! "$1" = "runfromcron" ] ; then
# scp ~/src/github.com/stevenblack/hosts/alternates/tch/hosts manuel:/tmp
# echo "enter sudo password for manuel"
# ssh -t manuel 'sudo mv /tmp/hosts /etc && sudo chown root /etc/hosts'
# ssh manuel 'ls -l /etc/hosts'
#fi
git reset --hard HEAD
| true |
aabb00075e67272f91aa9cf0cc81fc585ddf6370 | Shell | chaitanya811/elan_test | /motd/facts.d/environment.sh | UTF-8 | 207 | 3.140625 | 3 | [] | no_license | #!/bin/bash
fqdn=$(hostname -f)
short=$(echo ${fqdn%%.*})
case $short in
edal???p* ) echo "environment=production";;
edal???t* ) echo "environment=test";;
edal???d* ) echo "environment=development";;
esac
| true |
f423b436332629ac72a145109e49ad87ad241d5a | Shell | cultureamp/danger-systems-buildkite-plugin | /bin/docker_js_run | UTF-8 | 710 | 3.21875 | 3 | [
"MIT"
] | permissive | #!/bin/bash
set -euo pipefail
GITHUB_REGISTRY_TOKEN=${GITHUB_REGISTRY_TOKEN:-}
# Setting the authToken, installing packages, and removing the authToken in a
# single RUN step prevents the authToken from being stored in the docker images.
if [[ -n "${GITHUB_REGISTRY_TOKEN:-}" ]]; then \
npm config set "//npm.pkg.github.com/:_authToken" "${GITHUB_REGISTRY_TOKEN}" \
&& yarn --no-progress --frozen-lockfile \
&& npm config delete "//npm.pkg.github.com/:_authToken"; fi
if [[ -z "${GITHUB_REGISTRY_TOKEN:-}" ]]; then \
yarn --no-progress --frozen-lockfile; fi
# Install danger if it not in the project deps
yarn run danger -V &>/dev/null || yarn add --dev danger
# Run danger
yarn run danger ci
| true |
1e5b9daa52f7a053b8d93e56555e350ed2406da0 | Shell | mischief/9problems | /sys/lib/lp/process/generic | UTF-8 | 5,010 | 2.921875 | 3 | [] | no_license | #!/bin/rc
# Tries to determine what type of file you are printing and do the correct
# thing with it.
# It currently knows about images, troff intermediate, HTML and ascii files.
rfork e
temp=/tmp/lp$pid
fn sigexit { rm -f $temp }
proc=$LPLIB/process
if (! ~ $DEBUG '')
flag x +
cat >$temp
type=`{file $temp}
switch ($type(2)) {
case troff
switch ($LPCLASS) {
case *Latin1* *post* *opost*
switch ($type(5)) {
# Latin1 is for compatibility with old research UNIX systems,
# doesn't work on Plan 9
case Latin1 post
tcs -s -f utf -t latin1 <$temp | $proc/dpost
case UTF
$proc/tr2post <$temp
}
case *gs!* *gsijs!*
switch ($type(5)) {
# Latin1 is for compatibility with old research UNIX systems,
# doesn't work on Plan 9
case Latin1 post
tcs -s -f utf -t latin1 <$temp | $proc/dpost |
$proc/gspipe
case UTF
$proc/tr2post <$temp | $proc/gspipe
}
case *
echo $type(2) -T$type(5) output is improper for $LPDEST >[1=2]
}
case special
switch ($type(4)) {
case '#b'
switch ($LPCLASS) {
case *post*
$proc/p9bitpost <$temp
case *gs!*
$proc/p9bitpost <$temp | $proc/gspipe
case *gsijs!*
$proc/p9bitpost <$temp | $proc/gspipeijs
}
case *
echo $type file is improper for $LPDEST >[1=2]
}
case Compressed plan old subfont
# type is really 'Compressed image' or 'plan 9 image' or
# 'old plan 9 image'
# 'subfont' is to cope with a bug in png (writing wrong length),
# 6 may 2008
switch ($LPCLASS) {
case *post*
$proc/p9bitpost <$temp
case *gs!*
$proc/p9bitpost <$temp | $proc/gspipe
case *gsijs!*
$proc/p9bitpost <$temp | $proc/gspipeijs
}
case jpeg
switch ($LPCLASS) {
case *post*
$proc/jpgpost <$temp
case *gs!*
$proc/jpgpost <$temp | $proc/gspipe
case *gsijs!*
$proc/jpgpost <$temp | $proc/gspipeijs
}
case GIF
switch ($LPCLASS) {
case *post*
$proc/gifpost <$temp
case *gs!*
$proc/gifpost <$temp | $proc/gspipe
case *gsijs!*
$proc/gifpost <$temp | $proc/gspipeijs
}
case PNG
switch ($LPCLASS) {
case *post*
$proc/pngpost <$temp
case *gs!*
$proc/pngpost <$temp | $proc/gspipe
case *gsijs!*
$proc/pngpost <$temp | $proc/gspipeijs
}
case ccitt-g31
switch ($LPCLASS) {
case *post*
$proc/g3post <$temp
case *gs!*
$proc/g3post <$temp | $proc/gspipe
case *gsijs!*
$proc/g3post <$temp | $proc/gspipeijs
}
case bitmap
# bitmap for research UNIX compatibility, does not work on Plan 9.
switch ($LPCLASS) {
case *post*
$proc/bpost <$temp
case *mhcc*
$proc/bpost <$temp | $proc/mhcc
case *
echo $type(2) file is improper for $LPDEST >[1=2]
}
case tex
mv $temp $temp.dvi
temp=$temp.dvi
switch ($LPCLASS) {
case *post*
$proc/dvipost $temp
case *gs!*
$proc/dvipost $temp | $proc/gspipe
case *gsijs!*
$proc/dvipost $temp | $proc/gspipeijs
case *
echo $type(2) file is improper for $LPDEST >[1=2]
}
case postscript
switch ($LPCLASS) {
case *post*
$proc/post <$temp
case *gs!*
$proc/post <$temp | $proc/gspipe
case *gsijs!*
$proc/post <$temp | $proc/gspipeijs
case *
echo $type(2) file is improper for $LPDEST >[1=2]
}
case HPJCL HP
switch ($LPCLASS) {
case *HPJCL*
$proc/noproc <$temp
case *
echo $type(2) file is improper for $LPDEST >[1=2]
}
case daisy
switch ($LPDEST) {
case *
echo $type(2) file is improper for $LPDEST >[1=2]
}
case tiff
switch ($LPCLASS) {
case *post*
$proc/tiffpost $temp
case *gs!*
$proc/tiffpost $temp | $proc/gspipe
case *gsijs!*
$proc/tiffpost $temp | $proc/gspipeijs
case *
echo Unrecognized class of line printer for $LPDEST >[1=2]
}
case PDF
switch ($LPCLASS) {
case *pdf*
# if (~ $LPCLASS *duplex* && ~ $LPCLASS *HPJCL*)
# echo -n '&l1S' # HP JCL: duplex on
if (~ $LPCLASS *duplex* && ~ $LPCLASS *HPJCL*) {
echo '%-12345X@PJL DEFAULT DUPLEX=ON' # HP PJL
echo '%-12345X'
echo '%-12345X@PJL DEFAULT PS:MBT=ON' # `memory boost'
echo '%-12345X'
}
cat $temp # pass pdf unaltered to pdf printer
case *post*
$proc/pdfpost $temp
case *gs!*
$proc/pdfgs $temp
case *gsijs!*
$proc/pdfgsijs $temp
case *
echo Unrecognized class of line printer for $LPDEST >[1=2]
}
case microsoft # office document
switch ($LPCLASS) {
case *post*
doc2ps $temp | $proc/post
case *gs!*
doc2ps $temp | $proc/post | $proc/gspipe
case *gsijs!*
doc2ps $temp | $proc/post | $proc/gspipeijs
case *
echo $type(2) file is improper for $LPDEST >[1=2]
}
case empty
echo file is empty >[1=2]
case cannot
echo cannot open file >[1=2]
case English short extended alef limbo [Aa]scii assembler c latin rc sh \
as mail email message/rfc822 manual
switch ($LPCLASS) {
case *post*
$proc/ppost <$temp
case *gs!*
$proc/ppost <$temp | $proc/gspipe
case *gsijs!*
$proc/ppost <$temp | $proc/gspipeijs
case *canon*
$proc/can $* <$temp
case *
echo Unrecognized class of line printer for $LPDEST >[1=2]
}
case HTML
uhtml <$temp | html2ms | tbl | troff -ms | $proc/generic
case *
echo $type(2) file is improper for $LPDEST >[1=2]
}
wait
rv=$status
rm -f $temp
# exit $rv
exit
| true |
21f80278d1f209c5c794757c714c21a40bca78e8 | Shell | FrauBSD/viotop | /.git-hooks/commit-msg | UTF-8 | 1,380 | 3.421875 | 3 | [
"BSD-2-Clause"
] | permissive | #!/bin/sh
############################################################ IDENT(1)
#
# $Title: Hook to run after EDITOR exits, following `git commit' $
#
############################################################ INFORMATION
#
# System requirements: awk(1) git(1) xargs(1)
# See also: .git-filters/keywords
#
############################################################ MAIN
DEBUG(){ [ ! "$DEBUG" ] || echo "DEBUG: $*"; }
DEBUG "$0 $*"
# Test the commit message file for content (indicating commit not aborted)
DEBUG "Checking commit message..."
awk '!/^[[:space:]]*(#|$)/{exit found++}END{exit !found}' "$1" &&
{ DEBUG "Committing!"; exit 0; }
DEBUG "Commit aborted!"
# Commit aborted: Unset keywords in modified text-files
DEBUG "Keyword modifications..."
git diff --cached --name-only -z --diff-filter=ACM |
xargs -0 .git-filters/keywords -d --
DEBUG "End List"
# Update the staging files
git diff --cached --name-only -z --diff-filter=ACM |
xargs -0 git add -u -v --
################################################################################
# END
################################################################################
#
# $Copyright: 2015-2017 The FrauBSD Project. All rights reserved. $
# $FrauBSD: viotop/.git-hooks/commit-msg 2020-04-17 15:26:14 -0700 freebsdfrau $
#
################################################################################
| true |
206e8f076787c77417b0903c13b2d6d52462c3f0 | Shell | rajecloud/DevOps | /Shell_Scripts/sv-sftp-backup.sh | UTF-8 | 3,233 | 3.859375 | 4 | [] | no_license | #! /bin/bash
script_name=${0##*/}
script_name=${script_name%.sh}
mount="/cardgen"
pulseroot="/cardgen/sftp"
pulsedir="pulse-sftp"
svroot="/cardgen/nas"
svdir="pulse_to_bo"
date="$(date -d "1 day ago" '+%y%m%d')"
file=$(ls $pulseroot/$pulsedir/ | grep -c "$date")
email=""
output="$pulseroot/$pulsedir/output.txt"
format="ASCII"
pulsesent="/tmp/sentfiles"
pulseascii="/tmp/asciifiles"
pulsedata="/tmp/datafiles"
newdate="$(date +%F)_files"
olddate="$(date -d "1 day ago" '+%F')"
#################################################################################################
if [ ! -e ${mount} ] ; then
echo "EFS Mount Point $mount not mounted yet on this node"
exit 0
else
x=$(ls $mount | grep sftp)
if [[ $x != "sftp" ]] ; then
echo "$pulseroot directory is not available"
logger "${script_name}: Error ${error_code} \"${pulseroot}\" is not available on \"${mount}\" directory, Manual intervention required"
exit 0
else
y=$(ls $pulseroot | grep $pulsedir)
if [[ $y != "$pulsedir" ]] ; then
echo "$pulsedir under $pulseroot is not available to transfer the files"
logger "${script_name}: Error ${error_code} \"${pulsedir}\" is not available on \"${mount}\" directory, Manual intervention required"
exit 0
else
echo "Checking whether any files loaded by Pulse into this $pulseroot/$pulsedir directory"
if [ $file -eq 0 ] ; then
echo "No files loaded yet for the date $date on pulse directory, hence sending the email"
echo "Cannot find any files wrt date $date on $pulseroot/$pulsedir." | mail -s "Pulse File Status - $olddate" $email
logger "${script_name}: Error ${error_code} No files has been loaded on yesterday's date on \"${pulseroot}\" directory, Manual intervention required"
else
sudo cp -R $pulseroot/$pulsedir/D$date* $svroot/$svdir/
echo "All the files wrt $olddate has been copied over to $svroot/$svdir from $pulseroot/pulsedir for sv process"
fi
fi
fi
fi
#########################################################################################
sudo mkdir -p $pulseascii/$newdate $pulsesent/$newdate $pulsedata
sudo touch $output
sudo chmod 666 $output
sudo mv $pulseroot/$pulsedir/D$date*.SENT $pulsesent/$newdate
cd $pulseroot/$pulsedir/
file D$date* > $output
while read line;
do
z=$(echo $line)
if [[ $z == *"$format"* ]] ; then
a=$(echo $z | awk '{print $1}')
b=$(echo $a | cut -f1 -d":")
sudo mv $pulseroot/$pulsedir/$b $pulseascii/$newdate
fi
done < $output
############################################################################################
sudo mv $pulseroot/$pulsedir/D$date* $pulsedata/
cd $pulseascii/$newdate/
sudo echo "Pulse text files has been attached for your reference" | mail -s "Pulse loaded text files - $olddate" -A D$date* $email
cd $pulseascii/$newdate
sudo tar czf $pulseascii/${olddate}-ascii-files.tar.gz *
cd $pulsesent/$newdate
sudo tar czf $pulsesent/${olddate}-sent-files.tar.gz *
sudo rm -rf $pulsesent/$newdate $pulseascii/$newdate $output
echo "File Processed between Pulse to SV has been completed and Backup of files has been taken and mail send with attachments..!"
###############################################################################################
| true |
485c49ea0ff40e0475ab53ccb6615dfea60949b0 | Shell | blaedj/dotfiles | /bin/show-source | UTF-8 | 160 | 2.671875 | 3 | [
"MIT"
] | permissive | #!/bin/sh
#
# passes the given file through pygmentize (http://pygments.org/) for
# syntax-highlighting, then dumps to stdout.
cat $1 | pygmentize -f terminal
| true |
662146d094b62c187ca31393b6e2d4f4f5f6101d | Shell | casual-simulation/aux-cli | /lib/auxcli/commands/helper/bt-serial-scan | UTF-8 | 1,234 | 3.578125 | 4 | [] | no_license | #!/bin/bash
config_devices="/etc/auxcli/devices.json"
path_to_helper="/lib/auxcli/commands/helper"
readarray -t current_devices < <(jq -r '.[].mac' $config_devices)
scan_devices(){
readarray -t discovered_devices < <(hcitool scan)
for ((i = 1; i < ${#discovered_devices[@]}; i++)); do
device_mac=$(echo ${discovered_devices[i]} | awk '{print $1}')
device_name=$(echo ${discovered_devices[i]} | awk '{print $2}')
. $path_to_helper/write "devices" $device_mac $device_name "" $device_desc
done
readarray -t current_devices < <(jq -r '.[].mac' $config_devices)
}
first_scan(){
while [ ${#current_devices[@]} -eq 0 ]; do
scan_devices
done
}
active_check(){
# For each MAC in current_devices
for ((i = 0; i < ${#current_devices[@]}; i++)); do
# If the MAC wasn't discovered on the last scan
if [[ ! ${discovered_devices[*]} =~ ${current_devices[i]} ]]; then
# Set active false
. $path_to_helper/write "devices" "${current_devices[i]}" "" false ""
else
# Set active true
. $path_to_helper/write "devices" "${current_devices[i]}" "" true ""
fi
done
}
first_scan
scan_devices
active_check
| true |
95f0737b7cbc9d0ea08d808d569c8edb000b1c1b | Shell | igorng/alien4cloud-cloudify3-provider | /src/test/resources/blueprints/nodecellar/mongo-scripts/install-mongo.sh | UTF-8 | 1,000 | 4.15625 | 4 | [] | no_license | #!/bin/bash
TEMP_DIR="/tmp"
MONGO_ROOT=${TEMP_DIR}/$(ctx execution-id)/mongodb
MONGO_TARBALL=mongodb-linux-x86_64-2.4.9.tgz
if [ ! -f ${MONGO_ROOT} ]; then
mkdir -p ${MONGO_ROOT} || exit $?
fi
ctx logger info "Changing directory to ${MONGO_ROOT}"
cd ${MONGO_ROOT} || exit $?
ctx logger info "Downloading mongodb to ${MONGO_ROOT}"
if [ -f ${MONGO_TARBALL} ]; then
ctx logger info "Mongo tarball already exists, skipping"
else
curl -O http://downloads.mongodb.org/linux/${MONGO_TARBALL}
fi
if [ ! -d mongodb ]; then
ctx logger info "Untaring mongodb"
tar -zxvf mongodb-linux-x86_64-2.4.9.tgz || exit $?
ctx logger info "Moving mongo distro to ${MONGO_ROOT}/mongodb"
mv mongodb-linux-x86_64-2.4.9 mongodb || exit $?
fi
ctx logger info "Creating mongodb data dir at ${MONGO_ROOT}/data"
if [ -d data ]; then
ctx logger info "Mongodb data dir already exists, skipping"
else
mkdir -p data || exit $?
fi
ctx logger info "Finished installing mongodb"
| true |
afbedb67fd2474be7a75592dc0257a78a0c308d1 | Shell | voc/schedule | /deploy.sh | UTF-8 | 545 | 3.140625 | 3 | [] | no_license | #!/bin/sh
echo ""
DEPLOY_BRANCH=`git rev-parse --abbrev-ref HEAD`
if [ `git rev-parse --verify origin/$DEPLOY_BRANCH` != `git rev-parse --verify $DEPLOY_BRANCH` ]; then
echo "You have commits on the $DEPLOY_BRANCH branch not pushed to origin yet. They would not be deployed."
echo "do you still which to deploy what's already in the repo? then type yes"
read -p "" input
if [ "x$input" != "xyes" ]; then
exit 2
fi
echo ""
fi
host=data.c3voc.de
echo "deploying to $host
"
ssh voc@$host sh << EOT
cd schedule
git pull
./test.sh
EOT | true |
78e23d9dd0a7b2e58dcd1b83f5fdff3e4f76ba46 | Shell | zchee/zsh-default-completions | /src/Base/Utility/_combination | UTF-8 | 2,514 | 3.453125 | 3 | [] | no_license | #autoload
# Usage:
# _combination [-s S] TAG STYLE \
# Ki1[:Ni1]=Pi1 Ki2[:Ni2]=Pi2 ... Kim[:Nim]=Pim Kj[:Nj] EXPL...
#
# STYLE should be of the form K1-K2-...-Kn.
#
# Example: telnet
#
# Assume a user sets the style `users-hosts-ports' as for the my-accounts
# tag:
#
# zstyle ':completion:*:*:telnet:*:my-accounts' users-hosts-ports \
# @host0: user1@host1: user2@host2:
# @mail-server:{smtp,pop3}
# @news-server:nntp
# @proxy-server:8000
#
#
# `_telnet' completes hosts as:
#
# _combination my-accounts users-hosts-ports \
# ${opt_args[-l]:+users=${opt_args[-l]:q}} \
# hosts "$expl[@]"
#
# This completes `host1', `host2', `mail-server', `news-server' and
# `proxy-server' according to the user given with `-l' if it is exists.
# And if it is failed, `_hosts' is called.
#
# `_telnet' completes ports as:
#
# _combination my-accounts users-hosts-ports \
# ${opt_args[-l]:+users=${opt_args[-l]:q}} \
# hosts="${line[2]:q}" \
# ports "$expl[@]"
#
# This completes `smtp', `pop3', `nntp' and `8000' according to the
# host argument --- $line[2] and the user option argument if it is
# exists. And if it is failed, `_ports' is called.
#
# `_telnet' completes users for an argument of option `-l' as:
#
# _combination my-accounts users-hosts-ports \
# ${line[2]:+hosts="${line[2]:q}"} \
# ${line[3]:+ports="${line[3]:q}"} \
# users "$expl[@]"
#
# This completes `user1' and `user2' according to the host argument and
# the port argument if they are exist. And if it is failed, `_users' is
# called.
local sep tag style keys pats key num tmp
if [[ "$1" = -s ]]; then
sep="$2"
shift 2
elif [[ "$1" = -s* ]]; then
sep="${1[3,-1]}"
shift
else
sep=:
fi
tag="$1"
style="$2"
shift 2
keys=( ${(s/-/)style} )
pats=( "${(@)keys/*/*}" )
while [[ "$1" = *=* ]]; do
tmp="${1%%\=*}"
key="${tmp%:*}"
if [[ $1 = *:* ]]; then
num=${tmp##*:}
else
num=1
fi
pats[$keys[(in:num:)$key]]="${1#*\=}"
shift
done
key="${1%:*}"
if [[ $1 = *:* ]]; then
num=${1##*:}
else
num=1
fi
shift
if zstyle -a ":completion:${curcontext}:$tag" "$style" tmp; then
eval "tmp=( \"\${(@M)tmp:#\${(j($sep))~pats}}\" )"
if (( keys[(in:num:)$key] != 1 )); then
eval "tmp=( \${tmp#\${(j(${sep}))~\${(@)\${(@)keys[2,(rn:num:)\$key]}/*/*}}${~sep}} )"
fi
tmp=( ${tmp%%${~sep}*} )
compadd "$@" -a tmp || { (( $+functions[_$key] )) && "_$key" "$@" }
else
(( $+functions[_$key] )) && "_$key" "$@"
fi
# vim:ft=zsh
| true |
08cbb786f7ac8232d574b1a34c6aa41ae58b5e6f | Shell | wcmckee/pyunderdog | /underdog | UTF-8 | 1,278 | 3.1875 | 3 | [
"MIT"
] | permissive | #!/bin/bash
PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
host=$HOSTNAME
now=$(date +%D-%H:%M)
# users=$(ps h -C x-session-manager -o user:16; ps h -C gnome-session -o user:16 | sort -u | grep -vE "root|Debian-gdm"); users=$(echo $users)
users=$(w -h | cut -f1 -d\ | grep -vE "root"); users=$(echo $users)
sleep $[ $RANDOM % 10 ]
[ -f /home/meta/after-school ] || exit 0
for user in $users; do
[ $user == "public" ] && user="public-${host}"
ttd=0 ;
if grep "^$user " /home/meta/time-limit; then
limit=$(grep "^$user " /home/meta/time-limit | cut -f2 -d" ")
else
limit=$(tail -1 /home/meta/time-limit)
fi
[ -f /home/meta/${user}.ttd ] && read ttd limit < /home/meta/${user}.ttd
warn=$[ $limit - 5 ]
ttd=$[ $ttd + 1 ]
echo "user $user"
echo $ttd $limit > /home/meta/${user}.ttd
echo "ttd $ttd limit $limit warn $warn"
if [ $ttd -eq $warn -a ! -f /home/meta/nokick ]; then
zenity --error --title="notification" --text "$user has had $ttd minutes today, limit is $limit" --display :0 &
fi
if [ $ttd -ge $limit -a ! -f /home/meta/nokick ]; then
zenity --error --title="Bye" --text "$user has had $ttd minutes today. Let someone else have a turn" --display :0 &
sleep 30; /etc/init.d/gdm3 restart ; killall -9 -u $user
fi
done
| true |
fb63c13bba312dee9ba76f641d225d485f0524fb | Shell | dystopian-daydream/pyqt5-hn-reader | /bin/utilities.sh | UTF-8 | 306 | 3.1875 | 3 | [] | no_license | #!/bin/bash
redecho() { echo -e "\e[0;32m$1\e[0m" ; }
grnecho() { echo -e "\e[92m$1\e[0m" ; }
grnprnt() { printf "\e[32m$1\e[0m" ; }
success() { grnprnt "\xE2\x9C\x94 " && echo "$1" ; }
divider() { printf "=%.0s" $(seq 1 80) ; }
section() { echo && divider && echo && redecho "$1" && divider && echo ; } | true |
c9e7eb93ad4ea392144130c3582d820ad710e3a7 | Shell | everycity/ec-userland | /components/lighttpd/files/lighttpd-startup.sh | UTF-8 | 1,832 | 3.625 | 4 | [
"BSD-3-Clause"
] | permissive | #!/usr/xpg4/bin/sh
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL)". You may
# only use this file in accordance with the terms of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
# Copyright 2011 EveryCity Ltd. All rights reserved.
#
. /lib/svc/share/smf_include.sh
# Do not change these defaults below, they will be overwritten
# during the next package upgrade. Instead, set these properties
# via SMF using "svccfg -s lighttpd1.4:default" and setprop
config_file="/ec/etc/lighttpd/1.4/lighttpd.conf"
lighttpd_32_binary="/ec/lib/lighttpd/1.4/bin/lighttpd"
lighttpd_64_binary="/ec/lib/lighttpd/1.4/bin/amd64/lighttpd"
additional_startup_options=""
enable_64bit="false"
getprop() {
PROPVAL=""
svcprop -q -p $1 ${SMF_FMRI}
if [ $? -eq 0 ] ; then
PROPVAL=`svccfg -s ${SMF_FMRI} listprop $1 | \
/usr/bin/nawk '{ for (i = 3; i <= NF; i++) printf $i" " }' | \
/usr/bin/nawk '{ sub(/^\"/,""); sub(/\"[ \t]*$/,""); print }' | \
/usr/bin/sed 's/[ ]*$//g'`
if [ "${PROPVAL}" = "\"\"" ] ; then
PROPVAL=""
fi
return
fi
return
}
varprop() {
getprop lighttpd/$1
if [ "${PROPVAL}" != "" ] ; then
export $1="${PROPVAL}"
fi
}
varprop config_file
varprop additional_startup_options
varprop enable_64bit
if [ "x$enable_64bit" = "xtrue" ] ; then
lighttpd_binary=$lighttpd_64_binary
else
lighttpd_binary=$lighttpd_32_binary
fi
case "$1" in
start)
echo "Starting lighttpd 1.4: \c"
$lighttpd_binary $additional_startup_options -f $config_file
echo "lighttpd."
;;
*)
echo "Usage: $0 {start}"
exit 1
;;
esac
exit 0
| true |
b3c0f5c447d3e46f64796e4dfc0222a4e9b8d214 | Shell | cruwe/pkgsrcbldr4dckr | /bulkcompile.sh | UTF-8 | 1,891 | 3.046875 | 3 | [
"BSD-2-Clause"
] | permissive | #!/usr/bin/env bash
if [ -z ${RELEASE+x} ]; then
echo "RELEASE is unset, cannnot continue without."
exit 1
fi
GENDISTLOC=/pub/NetBSD/packages/distfiles/
MNTPNT=${MNTPNT:-/data}
OS=$(cat /etc/os-release | grep -Ei ^id | awk -F= '{print $2}')
VERSION=$(cat /etc/os-release | grep -Ei ^version_id | awk -F= '{print $2}')
export PREFIX=${PREFIX:-/usr/pkg}
export PREFIX_s=$(echo $PREFIX | cut -c 2- | sed 's/\//-/g')
export ALLOW_VULNERABLE_PACKAGES=${ALLOW_VULNERABLE_PACKAGES:-yes}
export FAILOVER_FETCH=${FAILOVER_FETCH:-yes}
export FETCH_USING=${FETCH_SUING:-curl}
export MAKE_JOBS=${MAKE_JOBS:-4}
export MASTER_SITE_OVERRIDE=${MASTER_SITE_OVERRIDE:-ftp://ftp2.fr.NetBSD.org/$GENDISTLOC}
export SKIP_LICENSE_CHECK=${SKIP_LICENSE_CHECK:-yes}
export DISTDIR=${MNTPNT}/distfiles
export PACKAGES=${MNTPNT}/packages/${RELEASE}/${OS}/${VERSION}/pbulk
export WRKOBJDIR=${MNTPNT}/wrk/${RELEASE}/${OS}/${VERSION}
export PKGSRCDIR=${MNTPNT}/pkgsrc
BOOTSTRAPTGZDIR=${PACKAGES}/boostrap
tar -xzf \
${BOOTSTRAPTGZDIR}/pbulkbootstrap-${RELEASE}-${OS}-${VERSION}.tgz
cat >> /usr/pbulk/etc/pbulk.conf << EOF
#
# ----------------------------------------------------------------------------
# ------------------------- Inserting overrides ------------------------------
# ----------------------------------------------------------------------------
#
master_mode=no
#
prefix=${PREFIX}
bulklog=${MNTPNT}/bulklog/${RELEASE}/${OS}/${VERSION}
packages=${MNTPNT}/packages/${RELEASE}/${OS}/${VERSION}
pkgdb=\${prefix}/var/db/pkgdb
pkgsrc=${MNTPNT}/pkgsrc
varbase=\${prefix}/var
#
loc=\${bulklog}/meta
#
bootstrapkit=${BOOTSTRAPTGZDIR}/${PREFIX_s}bootstrap-${RELEASE}-${OS}-${VERSION}.tgz
#
mail=:
rsync=:
reuse_scan_results=no
limited_list=/pkglist
make=\${prefix}/bin/bmake
EOF
mkdir -p ${MNTPNT}/packages/${RELEASE}/${OS}/${VERSION}/All
chown -R pbulk:pbulk ${MNTPNT}
/usr/pbulk/bin/bulkbuild | true |
1b736ea9f7d8087e68a40e0beeec6c409a6f5b32 | Shell | lautarobarba/scripts | /make-pdf.sh | UTF-8 | 564 | 3 | 3 | [] | no_license | #!/bin/bash
#
################################
# _ #
# ___| |_ _ __ __ _ ______ _ #
# / __| __| '__/ _` |_ / _` | #
# \__ \ |_| | | (_| |/ / (_| | #
# |___/\__|_| \__,_/___\__,_| #
# #
#------------------------------#
# pdf-maker #
################################
fotos=$(ls)
contador=01
for f in ${fotos}
do
mv $f img$(printf "%03d" $contador).jpg
contador=$((contador+1))
done
fotos=$(ls)
for f in ${fotos}
do
convert -resize 50% $f $f
done
convert *.jpg archivo.pdf
| true |
fb46eaa23d16c70e85b125a8839fae761eaa06cc | Shell | snowflakedb/libsnowflakeclient | /deps/util-linux-2.39.0/tests/ts/lsfd/mkfds-unix-in-netns | UTF-8 | 2,899 | 3.484375 | 3 | [
"BSD-4-Clause-UC",
"GPL-2.0-only",
"LicenseRef-scancode-public-domain",
"GPL-3.0-or-later",
"BSD-2-Clause",
"GPL-2.0-or-later",
"LGPL-2.1-or-later",
"BSD-3-Clause",
"Apache-2.0"
] | permissive | #!/bin/bash
#
# Copyright (C) 2022 Masatake YAMATO <yamato@redhat.com>
#
# This file is part of util-linux.
#
# This file is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This file is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
TS_TOPDIR="${0%/*}/../.."
TS_DESC="UNIX sockets made in a differenct net namespace"
. "$TS_TOPDIR"/functions.sh
ts_init "$*"
ts_skip_nonroot
. "$TS_SELF/lsfd-functions.bash"
ts_check_test_command "$TS_CMD_LSFD"
ts_check_test_command "$TS_HELPER_MKFDS"
ts_cd "$TS_OUTDIR"
PID=
FDSELFNS=3
FDALTNS=4
FDSOCK=5
EXPR='((TYPE == "UNIX") or (TYPE == "UNIX-STREAM")) and (FD == 5)'
compare_net_namespaces()
{
local type=$1
local pid=$2
local altns_inode
local sock_netns
altns_inode=$(${TS_CMD_LSFD} -n -o INODE -p "${pid}" -Q '(FD == 4)')
sock_netns=$(${TS_CMD_LSFD} -n -o SOCK.NETNS -p "${pid}" -Q '(FD == 5)')
if [[ "${altns_inode}" == "${sock_netns}" ]]; then
echo "the netns for the $type socket is extracted as expectedly"
else
echo "the netns for the $type socket is not extracted well"
echo "altns_inode=${altns_inode}"
echo "sock_netns=${sock_netns}"
fi
}
strip_type_stream()
{
if [ "$1" = stream ]; then
lsfd_strip_type_stream
else
cat
fi
}
for t in stream dgram seqpacket; do
ts_init_subtest "$t"
{
coproc MKFDS { "$TS_HELPER_MKFDS" unix-in-netns $FDSELFNS $FDALTNS $FDSOCK \
path=test_mkfds-unix-$t-ns \
type=$t ; }
if read -r -u "${MKFDS[0]}" PID; then
${TS_CMD_LSFD} -n \
-o ASSOC,STTYPE,NAME,SOCK.STATE,SOCK.TYPE,SOCK.LISTENING,UNIX.PATH \
-p "${PID}" -Q "${EXPR}" | strip_type_stream $t
echo 'ASSOC,STTYPE,NAME,SOCK.STATE,SOCK.TYPE,SOCK.LISTENING,UNIX.PATH': ${PIPESTATUS[0]}
compare_net_namespaces "$t" "${PID}"
kill -CONT "${PID}"
fi
wait "${MKFDS_PID}"
coproc MKFDS { "$TS_HELPER_MKFDS" unix-in-netns $FDSELFNS $FDALTNS $FDSOCK \
path=test_mkfds-unix-$t-ns \
abstract=true \
type=$t ; }
if read -r -u "${MKFDS[0]}" PID; then
${TS_CMD_LSFD} -n \
-o ASSOC,STTYPE,NAME,SOCK.STATE,SOCK.TYPE,SOCK.LISTENING,UNIX.PATH \
-p "${PID}" -Q "${EXPR}" | strip_type_stream $t
echo 'ASSOC,STTYPE,NAME,SOCK.STATE,SOCK.TYPE,SOCK.LISTENING,UNIX.PATH': ${PIPESTATUS[0]}
compare_net_namespaces "abstract $t" "${PID}"
kill -CONT "${PID}"
fi
wait "${MKFDS_PID}"
} > "$TS_OUTPUT" 2>&1
if [ "$?" == "$EPERM" ]; then
ts_skip_subtest "unshare(2) is not permitted on this platform"
continue
fi
ts_finalize_subtest
done
ts_finalize
| true |
ae1f9a2519424e6c7024427599c6fe4b05e49674 | Shell | ddrown/stm32-input-capture-f031 | /pi-graph/plot | UTF-8 | 5,203 | 2.65625 | 3 | [] | no_license | #!/bin/sh
# time rtc lse/2 lse/14 ch1 ch1# ch2 ch2# ch3 ch3# ch4 ch4# int-temp vref vbat
# 1493526556 3.473 2922328755 12441 0 0 2922042558 206 2190027411 154 1997130139 2 92.3259 3.28702 3.09282
grep ^[0-9] ../typescript | awk '(length($15) && $1 > 1493614490) { print }' >samples
../avg --column=15 --combine=30 --id=1 --format=%.6f <samples >vbat.log
../avg --column=14 --combine=30 --id=1 --format=%.6f <samples >vref.log
../avg --every=1 --column=13 --combine=30 --id=1 --format=%.6f <samples >temp.log
../avg --column=2 --combine=30 --id=1 --format=%.3f <samples >rtc.log
../counter-to-ppm --id=1 --column=7 --frequency=48000000 --number=8 --limit=10 <samples | awk '(length($4)) { print }' | ../avg --every=1 --column=4 --combine=30 --id=1 --format=%.3f >hse.log
../counter-to-ppm-adjust16 --column=3 --adjust16=4 --id=1 --frequency=48000000 <samples | awk '($5 < 3 && $5 > -3 && $6 <100 && $6 >60) { print }' | ../avg --every=1 --column=6 --combine=30 --id=1 --format=%.3f >lse.log
join -j1 temp.log hse.log >temp-hse.log
join -j1 temp-hse.log lse.log >temp-lse.log
join -j1 temp.log vbat.log >temp-vbat.log
# board1 - 1750 [1742 1314] 1527 [1520] 1918 3358
# board2 - 1754 [1754 1324] 1525 [1521] 1895 3273
gnuplot <<EOF
set terminal png size 900,600
set xdata time
set timefmt "%s"
set output "vbat.png"
set grid
set xlabel "DD-HH:MM"
set format x "%d-%H:%M"
set xtic rotate by -45 scale 0
set ylabel ""
set ytics format "%1.4f V" nomirror
set title "coin cell battery"
set key bottom left box
plot \
'vbat.log' using 1:3 title 'vbat' with lines
EOF
gnuplot <<EOF
set terminal png size 900,600
set xdata time
set timefmt "%s"
set output "vref.png"
set grid
set xlabel "DD-HH:MM"
set format x "%d-%H:%M"
set xtic rotate by -45 scale 0
set ylabel ""
set ytics format "%1.4f V" nomirror
set title "voltage reference"
set key bottom left box
plot \
'vref.log' using 1:3 title 'vref' with lines
EOF
gnuplot <<EOF
set terminal png size 900,600
set xdata time
set timefmt "%s"
set output "rtc.png"
set grid
set xlabel "DD-HH:MM"
set format x "%d-%H:%M"
set xtic rotate by -45 scale 0
set ylabel ""
set ytics format "%1.0f ms" nomirror
set title "rtc offset"
set key bottom left box
plot \
'rtc.log' using 1:(\$3*1000) title 'rtc' with lines
EOF
gnuplot <<EOF
set terminal png size 900,600
set xdata time
set timefmt "%s"
set output "lse.png"
set grid
set xlabel "DD-HH:MM"
set format x "%d-%H:%M"
set xtic rotate by -45 scale 0
set ylabel "offset"
set ytics format "%1.3f ppm" nomirror
set title "32khz RTC"
set key bottom left box
plot \
'lse.log' using 1:3 title 'lse' with lines, \
'temp-lse.log' using 1:(\$7-\$5) title 'lse-hse' with lines
EOF
gnuplot <<EOF
set terminal png size 900,600
set xdata time
set timefmt "%s"
set output "hse.png"
set grid
set xlabel "DD-HH:MM"
set format x "%d-%H:%M"
set xtic rotate by -45 scale 0
set ylabel "offset"
set ytics format "%1.3f ppm" nomirror
set title "12MHz TCXO"
set key bottom left box
plot \
'hse.log' using 1:3 title 'hse' with lines
EOF
gnuplot <<EOF
set terminal png size 900,600
set xdata time
set timefmt "%s"
set output "temp.png"
set grid
set xlabel "DD-HH:MM"
set format x "%d-%H:%M"
set xtic rotate by -45 scale 0
set ylabel "Temperature"
set ytics format "%1.1f F" nomirror
set title "Microcontroller temp"
set key bottom left box
plot \
'temp.log' using 1:3 title 'temp' with lines
EOF
gnuplot <<EOF
a=-1.46416
b=-0.00641566
c=0.00023466
d=1.85336
f(x) = a+b*(x-d)+c*(x-d)**2
fit f(x) "temp-hse.log" using 3:5 via a,b,c,d
fit_stddev = sqrt(FIT_WSSR / (FIT_NDF + 1 ))
set label 1 gprintf("fit RMSE = %1.3f ppm", fit_stddev) at graph 0.9,0.9 right front
set terminal png size 900,600
set output "temp-hse.png"
set grid
set xlabel "Temperature"
set format x "%1.1f F"
set xtic rotate by -45 scale 0
set ylabel "frequency"
set ytics format "%1.3f ppm" nomirror
set title "12MHz TCXO"
set key bottom right box
plot \
'temp-hse.log' using 3:5 title 'hse' with points, \
f(x) title "temp poly fit" with line, \
f(x)+fit_stddev title "poly fit + RMSE" with line, \
f(x)-fit_stddev title "poly fit - RMSE" with line
EOF
gnuplot <<EOF
a=78.8162
c=-0.0114065
d=87.1253
f(x) = a+c*(x-d)**2
fit f(x) "temp-lse.log" using 3:(\$7-\$5) via a,c,d
fit_stddev = sqrt(FIT_WSSR / (FIT_NDF + 1 ))
set label 1 gprintf("fit RMSE = %1.3f ppm", fit_stddev) at graph 0.9,0.9 right front
set terminal png size 900,600
set output "temp-lse.png"
set grid
set xlabel "Temperature"
set format x "%1.1f F"
set xtic rotate by -45 scale 0
set ylabel "frequency"
set ytics format "%1.3f ppm" nomirror
set title "32khz RTC"
set key bottom left box
plot \
'temp-lse.log' using 3:(\$7-\$5) title 'lse' with points, \
f(x) title "temp poly fit" with line, \
f(x)+fit_stddev title "poly fit + RMSE" with line, \
f(x)-fit_stddev title "poly fit - RMSE" with line
EOF
gnuplot <<EOF
set terminal png size 900,600
set output "temp-vbat.png"
set grid
set xlabel "Temperature"
set format x "%1.1f F"
set xtic rotate by -45 scale 0
set ylabel "Voltage"
set ytics format "%1.4f V" nomirror
set title "Coin Cell"
set key bottom left box
plot \
'temp-vbat.log' using 3:5 title 'vbat' with points
EOF
rsync -avP *.png index.html vps3:dan.drown.org/html/stm32/hat2/
| true |
605f702d86c1762fdca5f91eb3b7da1dd980d44f | Shell | alexdavid/dotfiles | /boar/macrotests/test_simple_concurrency.sh | UTF-8 | 1,244 | 3.890625 | 4 | [] | no_license | # Test that several processes can work with the repository concurrently.
set -e
$BOAR mkrepo TESTREPO || exit 1
dotest()
{
SESSION=$1
$BOAR mksession --repo=TESTREPO $SESSION || exit 1
$BOAR co --repo=TESTREPO $SESSION || exit 1
for i in {1..10}; do
echo "$SESSION PID=$$ commit ${i} here" >$SESSION/commit.txt || exit 1
for j in {1..100}; do
date >$SESSION/"$i-$j.txt"
done
(cd $SESSION && $BOAR ci -q) || exit 1
done
# Now verify that all data is present
REVS=$($BOAR list --repo=TESTREPO $SESSION|grep Revision|cut -d ' ' -f 3)
index=0
for rev in $REVS; do
if [ $index -ne 0 ]; then
# The first revision is session creation and will not contain the file
(cd $SESSION && $BOAR update -q -r $rev) || exit 1
grep "commit $index here" $SESSION/commit.txt || { echo "Revision $rev did not contain expected data"; exit 1; }
fi
index=$[$index + 1]
done
if [ $index -ne 11 ]; then
echo "Wrong number of revisions for $SESSION"
exit 1
fi
}
PIDS=""
for i in {0..5}; do
dotest Session$i >Session$i.txt 2>&1 &
PIDS="$PIDS $!"
done
for PID in $PIDS; do
wait $PID || { echo "Process $PID failed"; exit 1; }
done
$BOAR verify --repo=TESTREPO || exit 1
true
| true |
08a95bab2e315b86594fdb11cd592e22514f4dcf | Shell | Azure/azure-quickstart-templates | /application-workloads/ibm-cloud-pak/ibm-cloud-pak-for-data/scripts/openshiftCloudPakConfig.sh | UTF-8 | 5,167 | 2.734375 | 3 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-public-domain"
] | permissive | #!/bin/sh
export ARTIFACTSLOCATION=${1::-1}
export ARTIFACTSTOKEN=\"$2\"
export SUDOUSER=$3
export WORKERNODECOUNT=$4
export NAMESPACE=$5
export APIKEY=$6
export INSTALLERHOME=/home/$SUDOUSER/.ibm
export OCPTEMPLATES=/home/$SUDOUSER/.openshift/templates
runuser -l $SUDOUSER -c "mkdir -p $INSTALLERHOME"
runuser -l $SUDOUSER -c "mkdir -p $OCPTEMPLATES"
# Root kube config
mkdir -p /root/.kube
cp /home/$SUDOUSER/.kube/config /root/.kube/config
# Create Registry Route
runuser -l $SUDOUSER -c "oc patch configs.imageregistry.operator.openshift.io/cluster --type merge -p '{\"spec\":{\"defaultRoute\":true, \"replicas\":$WORKERNODECOUNT}}'"
runuser -l $SUDOUSER -c "sleep 20"
runuser -l $SUDOUSER -c "oc project kube-system"
registryRoute=$(oc get route default-route -n openshift-image-registry --template='{{ .spec.host }}' --kubeconfig /home/$SUDOUSER/.kube/config)
runuser -l $SUDOUSER -c "cat > $OCPTEMPLATES/registries.conf <<EOF
unqualified-search-registries = ['registry.access.redhat.com', 'docker.io']
[[registry]]
prefix = \"${registryRoute}\"
insecure = true
blocked = false
location = \"${registryRoute}\"
EOF"
# Machine Configs
insecureRegistryData=$(cat $OCPTEMPLATES/registries.conf | base64 -w 0)
runuser -l $SUDOUSER -c "cat > $OCPTEMPLATES/insecure-registry-mc.yaml <<EOF
apiVersion: machineconfiguration.openshift.io/v1
kind: MachineConfig
metadata:
labels:
machineconfiguration.openshift.io/role: worker
name: 90-worker-container-runtime
spec:
config:
ignition:
version: 2.2.0
storage:
files:
- contents:
source: data:text/plain;charset=utf-8;base64,${insecureRegistryData}
filesystem: root
mode: 0644
path: /etc/containers/registries.conf
EOF"
runuser -l $SUDOUSER -c "cat > $OCPTEMPLATES/sysctl-mc.yaml <<EOF
apiVersion: machineconfiguration.openshift.io/v1
kind: MachineConfig
metadata:
labels:
machineconfiguration.openshift.io/role: worker
name: 98-master-worker-sysctl
spec:
config:
ignition:
version: 2.2.0
storage:
files:
- contents:
source: data:text/plain;charset=utf-8;base64,dm0ubWF4X21hcF9jb3VudCA9IDI2MjE0NAprZXJuZWwuc2VtID0gMjUwIDEwMjQwMDAgMTAwIDQwOTYKa2VybmVsLm1zZ21heCA9IDY1NTM2Cmtlcm5lbC5tc2dtbmIgPSA2NTUzNg==
filesystem: root
mode: 0644
path: /etc/sysctl.conf
EOF"
runuser -l $SUDOUSER -c "cat > $OCPTEMPLATES/limits-mc.yaml <<EOF
apiVersion: machineconfiguration.openshift.io/v1
kind: MachineConfig
metadata:
labels:
machineconfiguration.openshift.io/role: worker
name: 15-security-limits
spec:
config:
ignition:
version: 2.2.0
storage:
files:
- contents:
source: data:text/plain;charset=utf-8;base64,KiAgICAgICAgICAgICAgIGhhcmQgICAgbm9maWxlICAgICAgICAgNjY1NjAKKiAgICAgICAgICAgICAgIHNvZnQgICAgbm9maWxlICAgICAgICAgNjY1NjA=
filesystem: root
mode: 0644
path: /etc/security/limits.conf
EOF"
runuser -l $SUDOUSER -c "cat > $OCPTEMPLATES/crio-mc.yaml <<EOF
apiVersion: machineconfiguration.openshift.io/v1
kind: MachineConfig
metadata:
labels:
machineconfiguration.openshift.io/role: worker
name: 90-worker-crio
spec:
config:
ignition:
version: 2.2.0
storage:
files:
- contents:
source: data:text/plain;charset=utf-8;base64,W2NyaW9dCltjcmlvLmFwaV0Kc3RyZWFtX2FkZHJlc3MgPSAiIgpzdHJlYW1fcG9ydCA9ICIxMDAxMCIKW2NyaW8ucnVudGltZV0KZGVmYXVsdF91bGltaXRzID0gWwogICAgIm5vZmlsZT02NTUzNjo2NTUzNiIKXQpjb25tb24gPSAiL3Vzci9saWJleGVjL2NyaW8vY29ubW9uIgpjb25tb25fY2dyb3VwID0gInBvZCIKYXBwYXJtb3JfcHJvZmlsZSA9ICJjcmlvLWRlZmF1bHQiCmNncm91cF9tYW5hZ2VyID0gInN5c3RlbWQiCmhvb2tzX2RpciA9IFsKICAgICIvZXRjL2NvbnRhaW5lcnMvb2NpL2hvb2tzLmQiLApdCnBpZHNfbGltaXQgPSAxMjAwMApbY3Jpby5pbWFnZV0KZ2xvYmFsX2F1dGhfZmlsZSA9ICIvdmFyL2xpYi9rdWJlbGV0L2NvbmZpZy5qc29uIgpwYXVzZV9pbWFnZSA9ICJxdWF5LmlvL29wZW5zaGlmdC1yZWxlYXNlLWRldi9vY3AtdjQuMC1hcnQtZGV2QHNoYTI1NjoyZGMzYmRjYjJiMGJmMWQ2YzZhZTc0OWJlMDE2M2U2ZDdjYTgxM2VjZmJhNWU1ZjVkODg5NzBjNzNhOWQxMmE5IgpwYXVzZV9pbWFnZV9hdXRoX2ZpbGUgPSAiL3Zhci9saWIva3ViZWxldC9jb25maWcuanNvbiIKcGF1c2VfY29tbWFuZCA9ICIvdXNyL2Jpbi9wb2QiCltjcmlvLm5ldHdvcmtdCm5ldHdvcmtfZGlyID0gIi9ldGMva3ViZXJuZXRlcy9jbmkvbmV0LmQvIgpwbHVnaW5fZGlycyA9IFsKICAgICIvdmFyL2xpYi9jbmkvYmluIiwKXQpbY3Jpby5tZXRyaWNzXQplbmFibGVfbWV0cmljcyA9IHRydWUKbWV0cmljc19wb3J0ID0gOTUzNw==
filesystem: root
mode: 0644
path: /etc/crio/crio.conf
EOF"
runuser -l $SUDOUSER -c "cat > $OCPTEMPLATES/chrony-mc.yaml <<EOF
---
apiVersion: machineconfiguration.openshift.io/v1
kind: MachineConfig
metadata:
labels:
machineconfiguration.openshift.io/role: worker
name: 90-worker-chrony
spec:
config:
ignition:
version: 2.2.0
storage:
files:
- contents:
source: data:text/plain;charset=utf-8;base64,cG9vbCAyLnJoZWwucG9vbC5udHAub3JnIGlidXJzdApkcmlmdGZpbGUgL3Zhci9saWIvY2hyb255L2RyaWZ0Cm1ha2VzdGVwIDEuMCAzCnJ0Y3N5bmMKbG9nZGlyIC92YXIvbG9nL2Nocm9ueQpyZWZjbG9jayBQSEMgL2Rldi9wdHAwIHBvbGwgMyBkcG9sbCAtMiBvZmZzZXQgMAo=
filesystem: root
mode: 0644
path: /etc/chrony.conf
---
apiVersion: machineconfiguration.openshift.io/v1
kind: MachineConfig
metadata:
labels:
machineconfiguration.openshift.io/role: master
name: 90-master-chrony
spec:
config:
ignition:
version: 2.2.0
storage:
files:
- contents:
source: data:text/plain;charset=utf-8;base64,cG9vbCAyLnJoZWwucG9vbC5udHAub3JnIGlidXJzdApkcmlmdGZpbGUgL3Zhci9saWIvY2hyb255L2RyaWZ0Cm1ha2VzdGVwIDEuMCAzCnJ0Y3N5bmMKbG9nZGlyIC92YXIvbG9nL2Nocm9ueQpyZWZjbG9jayBQSEMgL2Rldi9wdHAwIHBvbGwgMyBkcG9sbCAtMiBvZmZzZXQgMAo=
filesystem: root
mode: 0644
path: /etc/chrony.conf
EOF"
runuser -l $SUDOUSER -c "sudo mv $OCPTEMPLATES/registries.conf /etc/containers/registries.conf"
runuser -l $SUDOUSER -c "oc create -f $OCPTEMPLATES/insecure-registry-mc.yaml"
runuser -l $SUDOUSER -c "oc create -f $OCPTEMPLATES/sysctl-mc.yaml"
runuser -l $SUDOUSER -c "oc create -f $OCPTEMPLATES/limits-mc.yaml"
runuser -l $SUDOUSER -c "oc create -f $OCPTEMPLATES/crio-mc.yaml"
runuser -l $SUDOUSER -c "oc create -f $OCPTEMPLATES/chrony-mc.yaml"
runuser -l $SUDOUSER -c "echo 'Sleeping for 12mins while MCs apply and the cluster restarts' "
runuser -l $SUDOUSER -c "sleep 12m"
echo "$(date) - ############### Script Complete #############" | true |
aae2153ea5a02e6d31b999e25ea7574a7c627a70 | Shell | ititandev/magnum | /magnum/drivers/common/templates/kubernetes/fragments/install-helm-modules.sh | UTF-8 | 859 | 3.859375 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
step="install-helm-modules.sh"
printf "Starting to run ${step}\n"
. /etc/sysconfig/heat-params
set -ex
echo "Waiting for Kubernetes API..."
until [ "ok" = "$(curl --silent http://127.0.0.1:8080/healthz)" ]
do
sleep 5
done
if [ "$(echo ${TILLER_ENABLED} | tr '[:upper:]' '[:lower:]')" != "true" ]; then
echo "Use --labels tiller_enabled=True to allow for tiller dependent resources to be installed"
else
HELM_MODULES_PATH="/srv/magnum/kubernetes/helm"
mkdir -p ${HELM_MODULES_PATH}
helm_modules=(${HELM_MODULES_PATH}/*)
# Only run kubectl if we have modules to install
if [ "${helm_modules}" != "${HELM_MODULES_PATH}/*" ]; then
for module in "${helm_modules[@]}"; do
echo "Applying ${module}."
kubectl apply -f ${module}
done
fi
fi
printf "Finished running ${step}\n"
| true |
2da82d332fad64db72150f1b31edbb0b05ef0c98 | Shell | ccoors/game_of_life | /tools/build_windows_static_x86.sh | UTF-8 | 214 | 2.515625 | 3 | [
"MIT"
] | permissive | #!/bin/sh
set -e
SCRIPT=$(readlink -f "$0")
SCRIPTPATH=$(dirname "$SCRIPT")
cd $SCRIPTPATH
i686-w64-mingw32-g++ -static -static-libgcc -static-libstdc++ -lpthread -Wall -O3 -std=c++14 -o main_x86.exe ../src/*.cpp
| true |
01148bc3f746413c1bf06c93892a2d034ef3233f | Shell | LiveLink-JoshOne/init_ll | /install.sh | UTF-8 | 4,699 | 3.53125 | 4 | [] | no_license | #!/bin/bash
## Install required packages after installing Ubuntu 18.04
APP_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
echo "[INIT_LL][INFO] App cloned to ${APP_DIR}"
echo ''
echo '[INIT_LL][INPUT] Please run as root / sudo - CAUTION!'
echo 'This will install numerous packages and ruby2.3 (and set it as your primary ruby version)'
echo 'Please press Ctrl+C to cancel or press ENTER to continue'
read
echo '[INIT_LL][INFO] Updating sources file - backup in /root/sources.list.orig'
cp /etc/apt/sources.list /root/sources.list.orig
cat <<EOM > /etc/apt/sources.list
deb http://gb.archive.ubuntu.com/ubuntu/ bionic main restricted universe multiverse
# deb-src http://gb.archive.ubuntu.com/ubuntu/ bionic main restricted universe multiverse
deb http://gb.archive.ubuntu.com/ubuntu/ bionic-updates main restricted universe multiverse
# deb-src http://gb.archive.ubuntu.com/ubuntu/ bionic-updates main restricted universe multiverse
deb http://gb.archive.ubuntu.com/ubuntu/ bionic-backports main restricted universe multiverse
# deb-src http://gb.archive.ubuntu.com/ubuntu/ bionic-backports main restricted universe multiverse
deb http://archive.canonical.com/ubuntu bionic partner
# deb-src http://archive.canonical.com/ubuntu bionic partner
deb http://security.ubuntu.com/ubuntu bionic-security main restricted universe multiverse
# deb-src http://security.ubuntu.com/ubuntu bionic-security main restricted universe multiverse
EOM
echo ''
echo '[INIT_LL][INFO] Updating system'
apt update
apt upgrade
echo ''
echo '[INIT_LL][INFO] Installing essential packages'
apt install -yf ntfs-3g ntfs-3g-dev make gcc build-essentials xdotool xclip ubuntu-restricted-* linux-firmware libimage-exiftool-perl libgmp-dev
apt install -yf libmysqlclient-dev mysql-client mysql-utilities openssh-server sshpass
apt install -yf vim ncdu htop nbtscan curl tmux screen gparted imagemagick git terminator cowsay xcowsay gimp pina kazam vlc guake gnome-tweak-tool chrome-gnome-shell simplescreenrecorder
apt-install -yf ruby ruby-dev ruby-bundler
echo ''
echo '[INIT_LL][INFO] Installing secondary package managers'
apt install -yf snap flatpak
apt install -yf gnome-software-plugin-flatpak
flatpak remote-add --if-not-exists flathub https://flathub.org/repo/flathub.flatpakrepo
echo ''
echo '[INIT_LL][INFO] Installing more essential packages'
snap install slack --classic
snap install atom --classic
echo ''
echo '[INIT_LL][INFO] Installing up-to-date Ruby'
apt-add-repository ppa:brightbox/ruby-ng
apt update
apt install -yf ruby2.3 ruby2.3-dev ruby-bundler
gem install bundler
echo ''
echo '[INIT_LL][INFO] Syncing filesystem'
sync
updatedb
sync
echo ''
echo '-------------------------------------------------------------------------------------'
echo '[INIT_LL][INPUT] Would you like to install less essential packages?'
select pkg_yn in 'Yes' 'No'; do
case $pkg_yn in
Yes ) apt install -yf telegram-desktop ; snap install android-studio --classic ; break ;;
No ) break ;;
esac
echo ''
done
echo '[INIT_LL][INPUT] Would you like to install Gyazo?'
select gyazo_yn in 'Yes' 'No'; do
case $gyazo_yn in
Yes ) curl -s https://packagecloud.io/install/repositories/gyazo/gyazo-for-linux/script.deb.sh | bash ; apt install gyazo ; break ;;
No ) break ;;
esac
echo ''
done
echo '[INIT_LL][INPUT] Would you like to install Virtualisation?'
select virt_yn in 'Yes' 'No'; do
case $virt_yn in
Yes ) apt install -yf virtualbox virtualbox-ext-pack virtualbox-guest-additions-iso virtualbox-guest-utils virtualbox-guest-x11 ; break ;;
No ) break ;;
esac
echo ''
done
echo '-------------------------------------------------------------------------------------'
echo '[INIT_LL][INFO] Symlinking init executables to /usr/local/bin'
ln -s ${APP_DIR}/init_llts /usr/local/bin/init_llts
ln -s ${APP_DIR}/init_llice /usr/local/bin/init_llice
echo ''
echo '[INIT_LL][INFO] Cleaning up'
apt remove --purge ubuntu-web-launchers -yf
apt autoremove
apt clean
updatedb
sync
echo ''
echo '[INIT_LL][INFO] FINISHED!!!'
echo ''
echo '[INIT_LL][INFO] Post-Install Checklist:'
echo '1) Enable proprietary drivers in Additional Drivers tab of Software & Updates'
echo '2) Set up keyboard shortcuts (Terminator, Gyazo)'
echo '3) Make sure to set up required partitions in /etc/fstab'
echo '4) Install Google Chrome via https://www.google.co.uk/chrome'
echo '5) Gnome tweaks & extensions @ https://extensions.gnome.org/'
echo '6) Install XLL from git repo: https://github.com/EmpyDoodle/xll_support.git'
echo ''
echo '[INIT_LL][INFO] To run your workstation profile, simply run $ init_llts or $ init_llice'
| true |
8bf7b40557b7ab706204f8a02174ee20a7e89596 | Shell | b2blosangeles/easydocker | /cron.sh | UTF-8 | 1,142 | 3.65625 | 4 | [] | no_license | #!/bin/bash
SCR_DIR=$(cd `dirname $0` && pwd)
SCRIPTFN=$(basename -- $SCR_DIR)
DATA_DIR="$(dirname "$SCR_DIR")/_"$SCRIPTFN"_DATA"
CRON_PATH=$DATA_DIR/_cron
TMP_PATH=$DATA_DIR/_tmp
mkdir -p $CRON_PATH
mkdir -p $TMP_PATH
markfile=$DATA_DIR/mark.data
# --- clean longer time task -----
for file in $(find $markfile -not -newermt '-120 seconds' 2>&1) ;do
if [ -f "$markfile" ]; then
vfn=$(<$markfile)
cmda="rm -fr $vfn && pkill -f $vfn > /dev/null && rm -fr $markfile >/dev/null 2>&1"
eval "$cmda"
fi
done
for f in "$CRON_PATH"/*; do
if [ -f "$markfile" ]; then
break;
fi
if [ -f "$f" ]; then
execfn=$TMP_PATH/SH_$(basename $f)
echo $execfn > $markfile
# cmdd="cp $f /Users/johnxu/_tmp && mv $f $execfn && sh $execfn $DOCKERCMD && rm -fr $execfn && rm -fr $markfile"
echo "-- Ran $f -- at $(date +"%m/%d/%Y %H:%M:%S")"
mkdir -p /Users/johnxu/_tmp
cp $f /Users/johnxu/_tmp/ || true
mv -f $f $execfn || true
sh $execfn $DOCKERCMD || true
rm -fr $execfn || true
rm -fr $markfile || true
echo "-- done $f -- at $(date +"%m/%d/%Y %H:%M:%S")"
else
exit 1
fi
done
| true |
c3309de19dfda1ecc0b5ec7b9a43c66f0640bf4d | Shell | kidanger/inverse_compositional | /DemoExtras/ica_script2.sh | UTF-8 | 1,648 | 3.640625 | 4 | [
"BSD-2-Clause",
"BSD-3-Clause"
] | permissive | #!/bin/bash
if [ "$#" -lt "12" ]; then
echo "usage:\n\t$0 nscales zoom eps transform robust lambda dbp edgepadding color gradient first_scale std"
exit 1
fi
nscales=$1
zoom=$2
eps=$3
transform=$4
robust=$5
lambda=$6
dbp=$7
edgepadding=$8
color=$9
gradient=${10}
first_scale=${11}
std=${12}
if [ "$color" = "True" ]; then
GRAYMETHOD=1
else
GRAYMETHOD=0
fi
if [ "$dbp" = "True" ]; then
NANIFOUTSIDE=1
else
NANIFOUTSIDE=0
edgepadding=0
fi
ref=input_0.png
ref_noisy=input_noisy_0.png
warped=input_1.png
warped_noisy=input_noisy_1.png
file=transformation.txt
filewithout=transformation_without.txt
if [ -f input_2.txt ]; then
file2=input_2.txt
else
file2=""
fi
echo "Standard deviation of the noise added: $std"
add_noise $ref $ref_noisy $std
add_noise $warped $warped_noisy $std
echo ""
GRAYMETHOD=0 EDGEPADDING=0 NANIFOUTSIDE=0 ROBUST_GRADIENT=0 inverse_compositional_algorithm $ref_noisy $warped_noisy -f $filewithout -z $zoom -n $nscales -r $robust -e $eps -t $transform -s 0 > /dev/null
GRAYMETHOD=$GRAYMETHOD EDGEPADDING=$edgepadding NANIFOUTSIDE=$NANIFOUTSIDE ROBUST_GRADIENT=$gradient inverse_compositional_algorithm $ref_noisy $warped_noisy -f $file -z $zoom -n $nscales -r $robust -e $eps -t $transform -s $first_scale -v
echo ""
echo "Without modification:"
NANIFOUTSIDE=1 EDGEPADDING=0 generate_output $ref_noisy $warped_noisy $filewithout $file2
mv output_estimated.png output_estimated2.png
if [ -f epe.png ]; then
mv epe.png epe2.png
fi
mv diff_image.png diff_image2.png
echo ""
echo "With modifications:"
NANIFOUTSIDE=1 EDGEPADDING=0 generate_output $ref_noisy $warped_noisy $file $file2
| true |
40db3645e79d50a662838569168b6c6652deb366 | Shell | cliquesads/adexchange | /setup-redis.sh | UTF-8 | 886 | 3.734375 | 4 | [] | no_license | #!/bin/bash
# usage text visible when --help flag passed in
usage="$(basename "$0") -- Sets up proper version of Redis for this environment and starts redis-server
where:
--help show this help text"
if [ ! -z $1 ]; then
if [ $1 == '--help' ]; then
echo "$usage"
exit 0
fi
fi
# Now get proper environment variables for global package versions, etc.
source ./config/environments/adexchange_environment.cfg
#install redis from source to ensure latest version
REDISPATH=$HOME'/redis-'$REDIS_VERSION
if [ ! -d $REDISPATH ]; then
cd $HOME
wget 'http://download.redis.io/releases/redis-'$REDIS_VERSION'.tar.gz'
tar xzf 'redis-'$REDIS_VERSION'.tar.gz'
cd 'redis-'$REDIS_VERSION
make
rm $HOME'/redis-'$REDIS_VERSION'.tar.gz'
fi
cd $REDISPATH'/src'
# Now start redis-server
./redis-server $HOME/repositories/cliques-config/redis/redis.conf
exit 0 | true |
9c22a0194040f2c8d70e299e6355a7bad58f7077 | Shell | scudiero/tools | /src/mergeRoles.sh | UTF-8 | 27,261 | 3.5 | 4 | [] | no_license | #!/bin/bash
#==================================================================================================
version=2.3.0 # -- dscudiero -- Tue 05/22/2018 @ 14:06:07.01
#==================================================================================================
TrapSigs 'on'
myIncludes="GetOutputFile BackupCourseleafFile ProtectedCall GetExcel SetFileExpansion"
Import "$standardInteractiveIncludes $myIncludes"
originalArgStr="$*"
scriptDescription="Merge role data"
#==================================================================================================
# Merge role data
#==================================================================================================
#==================================================================================================
## Copyright ©2014 David Scudiero -- all rights reserved.
## 06-17-15 -- dgs - Initial coding
#==================================================================================================
#==================================================================================================
# local functions
#==================================================================================================
#==============================================================================================
# parse script specific arguments
#==============================================================================================
function mergeRoles-ParseArgsStd {
myArgs+=("w|workbookFile|option|workbookFile||script|The fully qualified workbook file name")
myArgs+=("m|merge|switch|merge||script|Merge role data when duplicate role names are found")
myArgs+=("o|overlay|switch|overlay||script|Overlay role data when duplicate role names are found")
return 0
}
#==============================================================================================
# Goodbye call back
#==============================================================================================
function mergeRoles-Goodbye {
trap 'SignalHandler ERR ${LINENO} $? ${BASH_SOURCE[0]}' ERR
local exitCode="$1"
[[ -f $tmpWorkbookFile ]] && rm $tmpWorkbookFile
[[ -f $stepFile ]] && rm -f $stepFile
[[ -f $backupStepFile ]] && mv -f $backupStepFile $stepFile
[[ -f "$tmpDataFile" ]] && rm -f "$tmpDataFile"
return 0
}
#==============================================================================================
# TestMode overrides
#==============================================================================================
function mergeRoles-testMode {
trap 'SignalHandler ERR ${LINENO} $? ${BASH_SOURCE[0]}' ERR
srcEnv='dev'
srcDir=~/testData/dev
tgtEnv='test'
tgtDir=~/testData/next
return 0
}
#==================================================================================================
# Cleanup funtion, strip leadning blanks, tabs and commas
#==================================================================================================
function CleanMembers {
trap 'SignalHandler ERR ${LINENO} $? ${BASH_SOURCE[0]}' ERR
local string="$1"
## blanks before commas
string=$(sed 's/ ,/,/g' <<< "$string" )
## blanks after commas
string=$(sed 's/, /,/g' <<< "$string" )
## Strip leading blanks. tabs. commas
string=$(sed 's/^[ ,\t]*//g' <<< "$string" )
## Strip trailing blanks. tabs. commas
string=$(sed 's/[ ,\t]*$//g' <<< "$string" )
echo "$string"
return 0
}
#==================================================================================================
# Merge two comma delimited strings, sort and remove duplicates
#==================================================================================================
function MergeRoleData {
trap 'SignalHandler ERR ${LINENO} $? ${BASH_SOURCE[0]}' ERR
local srcData="$1"
local tgtData="$2"
local mergeArray=($(echo $srcData,$tgtData | tr ',' '\n' | sort -u | tr '\n' ' '))
echo $(echo ${mergeArray[@]} | tr ' ' ',')
return 0
}
#==================================================================================================
# Get the roles data from the file
#==================================================================================================
function GetRolesDataFromFile {
trap 'SignalHandler ERR ${LINENO} $? ${BASH_SOURCE[0]}' ERR
local rolesFile="$1"
#PushSettings; set +e; shift; PopSettings
[[ $useUINs == true && $processedUserData != true ]] && Msg $T "$FUNCNAME: Requesting UIN mapping but no userid sheet was provided"
Msg "\nParsing the roles.tcf file ($rolesFile) ..."
## Get the roles data from the roles.tcf file
[[ ! -r $rolesFile ]] && Terminate "Could not read the .../courseleaf/roles.tcf file"
while read line; do
if [[ ${line:0:5} == 'role:' ]]; then
key=$(echo $line | cut -d '|' -f 1 | cut -d ':' -f 2)
data=$(Trim $(echo $line | cut -d '|' -f 2-))
dump 2 -n line key data
rolesFromFile["$key"]="$data"
fi
done < $rolesFile
Msg "^Retrieved ${#rolesFromFile[@]} records"
if [[ $verboseLevel -ge 1 ]]; then Msg "\n^rolesFromFile: $roleFile"; for i in "${!rolesFromFile[@]}"; do printf "\t\t[$i] = >${rolesFromFile[$i]}<\n"; done; fi
return 0
} #GetRolesDataFromFile
#==================================================================================================
# Read / Parse roles data from spreadsheet
#==================================================================================================
function GetRolesDataFromSpreadsheet {
trap 'SignalHandler ERR ${LINENO} $? ${BASH_SOURCE[0]}' ERR
Msg "\nParsing the roles data from the workbook file ($workbookFile)..."
## Parse the role data from the spreadsheet
workbookSheet='roles'
GetExcel -wb "$workbookFile" -ws "$workbookSheet"
## Parse the output array
foundHeader=false; foundData=false
for ((ii=0; ii<${#resultSet[@]}; ii++)); do
result="${resultSet[$ii]}"
dump 2 -n result
[[ $result == '' || $result = '|||' ]] && continue
SetFileExpansion 'off'
if [[ $(Lower ${result:0:7}) == '*start*' || $(Lower ${result:0:3}) == '***' ]] && [[ $foundHeader == false ]]; then
(( ii++ ))
result="${resultSet[$ii]}"
SetFileExpansion
Msg $V1 'Parsing header record'
IFSave=$IFS; IFS=\|; sheetCols=($result); IFS=$IFSave;
findFields="role members email"
for field in $findFields; do
dump 2 -n field
unset fieldCntr
for sheetCol in "${sheetCols[@]}"; do
(( fieldCntr += 1 ))
dump 2 -t sheetCol fieldCntr
[[ $(Contains "$(Lower $sheetCol)" "$field") == true ]] && eval "${field}Col=$fieldCntr" && break
done
done
dump 2 roleCol membersCol emailCol
membersCol=$membersCol$userCol
[[ $roleCol == '' ]] && Terminate "Could not find a 'RoleName' column in the 'RolesData' sheet"
[[ $membersCol == '' ]] && Terminate "Could not find a 'MemberList or UserList' column in the 'RolesData' sheet"
[[ $emailCol == '' ]] && Terminate "Could not find a 'Email or UserList' column in the 'RolesData' sheet"
foundHeader=true
elif [[ $foundHeader == true ]]; then
key=$(echo $result | cut -d '|' -f $roleCol)
[[ $key == '' ]] && continue
value=$(echo $result | cut -d '|' -f $membersCol)'|'$(echo $result | cut -d '|' -f $emailCol)
dump 2 -n result -t key value
if [[ ${rolesFromSpreadsheet["$key"]+abc} ]]; then
if [[ ${rolesFromSpreadsheet["$key"]} != $value ]]; then
Terminate "The '$workbookSheet' tab in the workbook contains duplicate role records \
\n\tRole '$key' with value '$value' is duplicate\n\tprevious value = '${rolesFromSpreadsheet["$key"]}'"
fi
else
rolesFromSpreadsheet["$key"]="$value"
fi
fi
done
[[ ${#rolesFromSpreadsheet[@]} -eq 0 ]] && Terminate "Did not retrieve any records from the spreadsheet, \
\n^most likely it is missing the 'start' directive ('*start*' or '***') in column 'A' just above the header record."
Msg "^Retrieved ${#rolesFromSpreadsheet[@]} records"
if [[ $verboseLevel -ge 1 ]]; then Msg "\n^rolesFromSpreadsheet: $roleFile"; for i in "${!rolesFromSpreadsheet[@]}"; do printf "\t\t[$i] = >${rolesFromSpreadsheet[$i]}<\n"; done; fi
return 0
} #GetRolesDataFromSpreadsheet
#==================================================================================================
# Map UIDs to UINs in role members
#==================================================================================================
function EditRoleMembers {
trap 'SignalHandler ERR ${LINENO} $? ${BASH_SOURCE[0]}' ERR
local memberData="$*"
[[ $useUINs != true ]] && echo "$memberData" && return
local memberDataNew
local member
local members
IFSsave=$IFS; IFS=',' read -a members <<< "$memberData"; IFS=$IFSsave
for member in "${members[@]}"; do
[[ ${usersFromDb["$member"]+abc} ]] && memberDataNew="$memberDataNew,$member" && continue
[[ ${uidUinHash["$member"]+abc} ]] && memberDataNew="$memberDataNew,${uidUinHash["$member"]}" && continue
memberDataNew="$memberDataNew,$member"
done
echo "${memberDataNew:1}"
return 0
} #EditRoleMembers
#==================================================================================================
# Declare local variables and constants
#==================================================================================================
falseVars='noUinMap useUINs'
for var in $falseVars; do eval $var=false; done
unset workbookFile
declare -A rolesFromSrcFile
declare -A rolesFromTgtFile
declare -A rolesOut
declare -A rolesFromSpreadsheet
declare -A uidUinHash
declare -A membersErrors
#==================================================================================================
# Standard arg parsing and initialization
#==================================================================================================
helpSet='script,client,env'
helpNotes+=("The output is written to the $HOME/clientData/<client> directory,\n\t if the directory does exist one will be created.")
Hello
GetDefaultsData -f $myName
ParseArgsStd $originalArgStr
dump -1 client env envs srcEnv tgtEnv -q
displayGoodbyeSummaryMessages=true
Init 'getClient getSrcEnv getTgtEnv getDirs checkEnvs addPvt'
srcEnv="$(TitleCase "$srcEnv")"
tgtEnv="$(TitleCase "$tgtEnv")"
## Findout if we should merge or overlay role data
unset mergeMode
[[ $overlay == true ]] && mergeMode='overlay'
[[ $merge == true ]] && mergeMode='merge'
if [[ $mergeMode == '' && $informationOnlyMode != true ]]; then
unset ans
Msg "\n Do you wish to merge data when roles are found in both source and target, or do you want to overlay the target data from the source"
Prompt ans "'Yes' = 'merge', 'No' = 'overlay'" 'Yes No' 'Yes'; ans=$(Lower ${ans:0:1})
[[ $ans == 'y' ]] && mergeMode='merge' || mergeMode='overlay'
else
[[ -n $mergeMode ]] && Note "Using specified value of '$mergeMode' for 'mergeMode'"
fi
## Find out if this client uses UINs
if [[ $noUinMap == false ]]; then
sqlStmt="select usesUINs from $clientInfoTable where name=\"$client\""
RunSql $sqlStmt
if [[ ${#resultSet[@]} -ne 0 ]]; then
result="${resultSet[0]}"
[[ $result == 'Y' ]] && useUINs=true && Msg
fi
fi
## Get workbook file
unset workflowSearchDir
if [[ $workbookFile == '' && $verify != false ]]; then
if [[ $verify != false ]]; then
unset ans
Msg
Prompt ans "Do you wish to merge in spreadsheet data" 'Yes No' 'No'; ans=$(Lower ${ans:0:1})
if [[ $ans == 'y' ]]; then
## Search for XLSx files in clientData and implimentation folders
if [[ -d "$localClientWorkFolder/$client" ]]; then
workbookSearchDir="$localClientWorkFolder/$client"
else
## Find out if user wants to load cat data or cim data
Prompt product 'Are you merging CAT or CIM data' 'cat cim' 'cat';
product=$(Upper $product)
implimentationRoot="/steamboat/leepfrog/docs/Clients/$client/Implementation"
if [[ -d "$implimentationRoot/Attachments/$product/Workflow" ]]; then workbookSearchDir="$implimentationRoot/Attachments/$product/Workflow"
elif [[ -d "$implimentationRoot/Attachments/$product" ]]; then workbookSearchDir="$implimentationRoot/Attachments/$product"
elif [[ -d "$implimentationRoot/$product/Workflow" ]]; then workbookSearchDir="$implimentationRoot/$product/Workflow"
elif [[ -d "$implimentationRoot/$product" ]]; then workbookSearchDir="$implimentationRoot/$product"
fi
fi
if [[ $workbookSearchDir != '' ]]; then
SelectFile $workbookSearchDir 'workbookFile' '*.xls*' "\nPlease specify the $(ColorK '(ordinal)') number of the Excel workbook you wish to load data from:\
\n(*/.xls* files found in '$(ColorK $workbookSearchDir)')\n(sorted ordered newest to oldest)\n\n"
workbookFile="$workbookSearchDir/$workbookFile"
fi
fi
fi
fi
[[ $workbookFile != '' && ! -f $workbookFile ]] && Terminate "Could not locate the workbookFile:\n\t$workbookFile"
workbookFileStr='N/A'
if [[ $workbookFile != '' ]]; then
## If the workbook file name contains blanks then copy to temp and use that one.
realWorkbookFile="$workbookFile"
unset tmpWorkbookFile
if [[ $(Contains "$workbookFile" ' ') == true ]]; then
cp -fp "$workbookFile" /tmp/$userName.$myName.workbookFile
tmpWorkbookFile="/tmp/$userName.$myName.workbookFile"
workbookFileStr="$workbookFile as $tmpWorkbookFile"
workbookFile="$tmpWorkbookFile"
else
workbookFileStr="$workbookFile"
fi
[[ ! -r $workbookFile ]] && Terminate "Could not locate file: '$workbookFile'"
fi
## set default values
[[ $useUINs == '' ]] && useUINs='N/A'
## output file
unset outFile
## Set outfile -- look for std locations
outFile="/home/$userName/$client-$srcEnv-$tgtEnv-CIM_Roles.txt"
if [[ -d $localClientWorkFolder ]]; then
if [[ ! -d $localClientWorkFolder/$client ]]; then mkdir -p "$localClientWorkFolder/$client"; fi
outFile="$localClientWorkFolder/$client/$client-$srcEnv-$tgtEnv-CIM_Roles.txt"
fi
echo -e "Role name\t'$srcEnv' site member list\t'$srcEnv' site email\t'$tgtEnv' site member list\t'$tgtEnv' site email" > "$outFile"
## Verify processing
verifyArgs+=("Client:$client")
verifyArgs+=("Source Env:$(TitleCase $srcEnv) ($srcDir)")
verifyArgs+=("Target Env:$(TitleCase $tgtEnv) ($tgtDir)")
[[ $workbookFile != '' ]] && verifyArgs+=("Input file:$workbookFileStr")
verifyArgs+=("Map UIDs to UINs:$useUINs")
verifyArgs+=("Role combination rule:$mergeMode")
[[ -n $outFile ]] && verifyArgs+=("Output File:$outFile")
VerifyContinue "You are asking to update CourseLeaf data"
dump -1 client srcEnv srcDir tgtEnv tgtDir useUINs outFile
myData="Client: '$client', SrcEnv: '$srcEnv', TgtEnv: '$tgtEnv', File: '$workbookFile' "
[[ $logInDb != false && $myLogRecordIdx != "" ]] && dbLog 'data' $myLogRecordIdx "$myData"
#==================================================================================================
# Main
#==================================================================================================
## Process spreadsheet
if [[ $workbookFile != '' ]]; then
## Get the list of sheets in the workbook
GetExcel -wb "$workbookFile" -ws 'GetSheets'
sheets="${resultSet[0]}"
dump -1 sheets
## Make sure we have a 'role' sheet
[[ $(Contains "$(Lower $sheets)" 'role') != true ]] && Terminate"Could not locate a sheet with 'role' in its name in workbook:\n^$workbookFile"
GetRolesDataFromSpreadsheet
if [[ $verboseLevel -ge 1 ]]; then Msg "\n^rolesfromSpreadsheet:"; for i in "${!rolesFromSpreadsheet[@]}"; do printf "\t\t[$i] = >${rolesFromSpreadsheet[$i]}<\n"; done; fi
fi
## Process role.tcf files
declare -A rolesFromFile
rolesFile=$srcDir/web/courseleaf/roles.tcf
GetRolesDataFromFile $rolesFile
for key in "${!rolesFromFile[@]}"; do rolesFromSrcFile["$key"]="${rolesFromFile["$key"]}"; done
unset rolesFromFile
declare -A rolesFromFile
rolesFile=$tgtDir/web/courseleaf/roles.tcf
GetRolesDataFromFile $rolesFile
for key in "${!rolesFromFile[@]}"; do rolesFromTgtFile["$key"]="${rolesFromFile["$key"]}"; done
unset rolesFromFile
## Merge role file data
Msg "\nMerging the '$srcEnv' roles into the '$tgtEnv' roles ..."
unset numDifferentFromSrc addedFromSrc
## Prime roles out hash with the tgt file data.
for key in "${!rolesFromTgtFile[@]}"; do rolesOut["$key"]="${rolesFromTgtFile["$key"]}"; done
## Loop through target hash and report on any roles not found in the source
for key in "${!rolesFromTgtFile[@]}"; do
if [[ !${rolesFromSrcFile["$key"]+abc} ]]; then
Info 0 1 "Role '$key' found in $tgtEnv but not in $srcEnv"
echo -e "${key}\t\t\t$(echo ${rolesFromTgtFile["$key"]} | cut -d'|' -f1)\t$(echo ${rolesFromTgtFile["$key"]} | cut -d'|' -f2)" >> "$outFile"
fi
done
## Loop through source hash and see if it is in the target hash, if yes then compare
for key in "${!rolesFromSrcFile[@]}"; do
if [[ ${rolesOut["$key"]+abc} ]]; then
if [[ ${rolesFromSrcFile["$key"]} != ${rolesOut["$key"]} ]]; then
Warning 0 1 "Role '$key' data in '$srcEnv' differs from '$tgtEnv'"
Msg "^^$srcEnv data: ${rolesFromSrcFile["$key"]}"
Msg "^^$tgtEnv data: ${rolesOut["$key"]}"
if [[ $mergeMode == 'merge' ]]; then
unset members1 members2 email1 email2 mergedMembers mergedEmail fromEnv
rolesOut["//$key"]="${rolesFromSrcFile["$key"]} <-- Pre-merge $srcEnv value"
rolesOut["//$key"]="${rolesOut["$key"]} <-- Pre-merge $tgtEnv value"
members1=$(echo ${rolesFromSrcFile["$key"]} | cut -d'|' -f1)
email1=$(echo ${rolesFromSrcFile["$key"]} | cut -d'|' -f2)
members2=$(echo ${rolesOut["$key"]} | cut -d'|' -f1)
email2=$(echo ${rolesOut["$key"]} | cut -s -d'|' -f2)
mergedMembers=$(MergeRoleData "$members1" "$members2")
if [[ $email1 != $email2 ]]; then
[[ $email1 != '' ]] && mergedEmail="$email1" && fromEnv="$srcEnv"
[[ $email2 != '' ]] && mergedEmail="$email2" && fromEnv="$tgtEnv"
Msg "^^Email data on the roles do not match, using: '$mergedEmail' from '$fromEnv'"
#warningMsgs+=("\tEmail data on the roles do not match, using: '$mergedEmail' from '$fromEnv'")
else
mergedEmail="$email2"
fi
dump -1 -t -t members1 members2 mergedMembers email1 email2 mergedEmail
rolesOut["$key"]="$mergedMembers|$mergedEmail"
Msg "^^New $tgtEnv (merged) data: ${rolesOut["$key"]}"
#warningMsgs+=("\tNew $tgtEnv (merged) data: ${rolesOut["$key"]}")
else
rolesOut["//$key"]="${rolesFromSrcFile["$key"]} <-- Pre-merge $srcEnv value"
Msg "^^Keeping existing ($tgtEnv) data."
fi
echo -e "${key}\t$(echo ${rolesFromSrcFile["$key"]} | cut -d'|' -f1)\t$(echo ${rolesFromSrcFile["$key"]} | cut -d'|' -f2)$(echo ${rolesFromTgtFile["$key"]} | cut -d'|' -f1)\t$(echo ${rolesFromTgtFile["$key"]} | cut -d'|' -f2)" >> "$outFile"
(( numDifferentFromSrc += 1 ))
fi
else
Note 0 1 "Role '$key' found in $srcEnv but not in $tgtEnv, adding to $tgtEnv"
rolesOut["$key"]="${rolesFromSrcFile["$key"]}"
echo -e "${key}\t$(echo ${rolesFromSrcFile["$key"]} | cut -d'|' -f1)\t$(echo ${rolesFromSrcFile["$key"]} | cut -d'|' -f2)\t\t" >> "$outFile"
(( addedFromSrc += 1 ))
fi
done;
numRolesOut=${#rolesOut[@]}
Msg "^Merged roles out record count: ${numRolesOut}"
[[ $addedFromSrc -gt 0 ]] && Msg "^$addedFromSrc records added from '$srcEnv'"
[[ $numDifferentFromSrc -gt 0 ]] && Msg "^$numDifferentFromSrc records differed between '$srcEnv' and '$tgtEnv'"
if [[ $verboseLevel -ge 1 ]]; then Msg "\n^rolesOut: "; for i in "${!rolesOut[@]}"; do printf "\t\t[$i] = >${rolesOut[$i]}<\n"; done; fi
## Merge role spreadsheet data
if [[ $workbookFile != '' ]]; then
unset numDifferentFromSpreadsheet addedFromSpreadsheet
Msg "\nMerging the spreadsheet role data into the '$tgtEnv' roles ..."
for key in "${!rolesFromSpreadsheet[@]}"; do
if [[ ${rolesOut["$key"]+abc} ]]; then
if [[ ${rolesFromSpreadsheet["$key"]} != ${rolesOut["$key"]} ]]; then
Warning 0 1 "Role '$key' data in spreadsheet differs from '$tgtEnv'"
Msg "^^Spreadsheet data: ${rolesFromSpreadsheet["$key"]}"
Msg "^^$tgtEnv data: ${rolesOut["$key"]}"
if [[ $mergeMode == 'merge' ]]; then
unset members1 members2 email1 email2 mergedMembers mergedEmail
rolesOut["//$key"]="${rolesFromSpreadsheet["$key"]} <-- Pre-merge spreadsheet value"
rolesOut["//$key"]="${rolesOut["$key"]} <-- Pre-merge $tgtEnv value"
members1=$(echo ${rolesFromSpreadsheet["$key"]} | cut -d'|' -f1)
email1=$(echo ${rolesFromSpreadsheet["$key"]} | cut -d'|' -f2)
members2=$(echo ${rolesOut["$key"]} | cut -d'|' -f1)
email2=$(echo ${rolesOut["$key"]} | cut -s -d'|' -f2)
mergedMembers=$(MergeRoleData "$members1" "$members2")
if [[ $email1 != $email2 ]]; then
[[ $email2 != '' ]] && mergedEmail="$email2" && fromEnv="$tgtEnv"
[[ $email1 != '' ]] && mergedEmail="$email1" && fromEnv="spreadsheet"
Msg "^^Email data on the roles do not match, using: '$mergedEmail' from '$fromEnv'"
#warningMsgs+=("\tEmail data on the roles do not match, using: '$mergedEmail' from '$fromEnv'")
else
mergedEmail="$email2"
fi
dump -1 -t -t members1 members2 mergedMembers email1 email2 mergedEmail
rolesOut["$key"]="$mergedMembers|$mergedEmail"
Msg "^^New $tgtEnv (merged) data: ${rolesOut["$key"]}"
#warningMsgs+=("^New $tgtEnv (merged) data: ${rolesOut["$key"]}")
else
rolesOut["//$key"]="${rolesFromSpreadsheet["$key"]} <-- Pre-merge spreadsheet value"
Msg "^^Keeping existing ($tgtEnv) data."
fi
(( numDifferentFromSrc += 1 ))
fi
else
rolesOut["$key"]="${rolesFromSpreadsheet["$key"]}"
(( addedFromSrc += 1 ))
fi
done
Msg "^Merged roles out record count: ${#rolesOut[@]}"
[[ $addedFromSpreadsheet -gt 0 ]] && Msg "^$addedFromSpreadsheet records added from the Spreadsheet"
[[ $numDifferentFromSpreadsheet -gt 0 ]] && Msg "^$numDifferentFromSpreadsheet records differed between Spreadsheet and '$tgtEnv'"
fi
## Write out file
##TODO: Replace writing of the roles data with a courseleaf step somehow
if [[ $informationOnlyMode != true ]]; then
writeFile=true
if [[ ${#warningMsgs[@]} -gt 0 ]]; then
Msg; unset ans
Prompt ans "Warning messages were issued, do you wish to write the role data out to '$tgtEnv'" "Yes No"; ans=$(Lower ${ans:0:1})
[[ $ans != 'y' ]] && writeFile=false
fi
if [[ $writeFile == true ]]; then
rolesFile=$tgtDir/web/courseleaf/roles.tcf
Msg "\nWriting out new roles.tcf file to '$tgtEnv' ($rolesFile)..."
editFile=$rolesFile
BackupCourseleafFile $editFile
# Parse the target file to put source data in the correct location in target file.
topPart=/tmp/$userName.$myName.topPart; [[ -f $topPart ]] && rm -f $topPart
bottomPart=/tmp/$userName.$myName.bottomPart; [[ -f $bottomPart ]] && rm -f $bottomPart
found=false
while read -r line; do
#echo "line='${line}'" >> $stdout
[[ ${line:0:5} == 'role:' ]] && found=true
[[ ${line:0:5} != 'role:' ]] && [[ $found == false ]] && echo "${line}" >> $topPart
[[ ${line:0:5} != 'role:' ]] && [[ $found == true ]] && echo "${line}" >> $bottomPart
done < $editFile
## Paste the target file together
[[ -f $topPart ]] && $DOIT cp -f $topPart $editFile.new
echo >> $editFile.new
for key in "${!rolesOut[@]}"; do
[[ ${key:0:2} == '//' ]] && echo "//role:${key:2}|${rolesOut["$key"]}" >> $editFile.new || echo "role:${key}|${rolesOut["$key"]}" >> $editFile.new
done
echo >> $editFile.new
[[ -f $bottomPart ]] && $DOIT cat $bottomPart >> $editFile.new
## Swap the files
mv $editFile.new $editFile
[[ -f $topPart ]] && rm -f $topPart; [[ -f $bottomPart ]] && rm -f $bottomPart
Msg "^$editFile written to disk"
summaryMsgs+=("$editFile written to disk")
## Write out change log entries
$DOIT Msg "\n$userName\t$(date) via '$myName' version: $version" >> $tgtDir/changelog.txt
$DOIT Msg "^Merged data from '$srcEnv'" >> $tgtDir/changelog.txt
[[ $workbookFile != '' ]] && $DOIT Msg "^Merged data from $realWorkbookFile" >> $tgtDir/changelog.txt
fi # writeFile
fi #not informationOnlyMode
## Processing summary
summaryMsgs+=("Retrieved ${#rolesFromSrcFile[@]} records from the '$srcEnv' roles.tcf file")
summaryMsgs+=("Retrieved ${#rolesFromTgtFile[@]} records from the '$tgtEnv' roles.tcf file")
[[ $addedFromSrc -gt 0 ]] && summaryMsgs+=("\t$addedFromSrc records added from '$srcEnv'")
[[ $numDifferentFromSrc -gt 0 ]] && summaryMsgs+=("\t$numDifferentFromSrc records differed between '$srcEnv' and '$tgtEnv'")
if [[ $workbookFile != '' ]]; then
summaryMsgs+=("Retrieved ${#rolesFromSpreadsheet[@]} records from '$workbookFile'")
[[ $addedFromSpreadsheet -gt 0 ]] && summaryMsgs+=("\t$addedFromSpreadsheet records added from Spreadsheet")
[[ $numDifferentFromSpreadsheet -gt 0 ]] && summaryMsgs+=("\t$numDifferentFromSpreadsheet records differed between Spreadsheet and '$tgtEnv'")
fi
summaryMsgs+=("")
summaryMsgs+=("${#rolesOut[@]} Merged Records")
summaryMsgs+=("")
summaryMsgs+=("Data comparison workbook file: $outFile")
[[ $informationOnlyMode == false ]] && { summaryMsgs+=(""); summaryMsgs+=("$tgtEnv roles.tcf file written ($tgtDir/web/courseleaf/roles.tcf)"); }
#==================================================================================================
## Done
#==================================================================================================
## Exit nicely
Goodbye 0 #'alert'
#==================================================================================================
## Check-in log
#==================================================================================================
# 11-24-2015 -- dscudiero -- Merge CourseLeaf roles (1.1)
# 12-30-2015 -- dscudiero -- refactor workbook file selection (2.2.1)
## Fri Apr 1 13:30:46 CDT 2016 - dscudiero - Swithch --useLocal to $useLocal
## Wed Apr 6 16:09:35 CDT 2016 - dscudiero - switch for
## Wed Apr 27 16:05:00 CDT 2016 - dscudiero - Switch to use RunSql
## Thu Aug 4 11:02:15 CDT 2016 - dscudiero - Added displayGoodbyeSummaryMessages=true
## Tue Sep 20 12:34:42 CDT 2016 - dscudiero - Switched to use Msg2
## 05-26-2017 @ 12.49.39 - (2.2.21) - dscudiero - Found an instance of Msg vs Msg2
## 06-07-2017 @ 07.44.14 - (2.2.22) - dscudiero - Added BackupCourseleafFIle to the import list
## 09-25-2017 @ 12.26.53 - (2.2.24) - dscudiero - Switch to use Msg
## 10-16-2017 @ 09.06.27 - (2.2.45) - dscudiero - Updated to use GetExcel
## 11-02-2017 @ 06.58.56 - (2.2.47) - dscudiero - Switch to ParseArgsStd
## 11-02-2017 @ 11.02.12 - (2.2.48) - dscudiero - Add addPvt to the init call
## 03-22-2018 @ 14:06:58 - 2.2.49 - dscudiero - Updated for Msg3/Msg, RunSql2/RunSql, ParseArgStd/ParseArgStd2
## 03-23-2018 @ 11:56:24 - 2.2.50 - dscudiero - Updated for GetExcel2/GetExcel
## 03-23-2018 @ 15:35:13 - 2.2.51 - dscudiero - D
## 03-23-2018 @ 16:52:33 - 2.2.52 - dscudiero - Msg3 -> Msg
## 04-11-2018 @ 14:57:01 - 2.2.53 - dscudiero - Added wfHello, tweaked wfDebug & wfDump
## 05-22-2018 @ 14:08:14 - 2.3.0 - dscudiero - Create a comparison file as outout
| true |
f6e3f7c24014e13375b6e3e54bf24338693ca792 | Shell | jphppd/homeskel | /.local/bin/tab2space | UTF-8 | 2,014 | 3.984375 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
# SPDX-License-Identifier: MIT
#
# Copyright (c) 2013-2018 Nicolas Iooss
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# Convert tabs into 4-spaces in specified files (or not if -t is given),
# strip spaces at the end of lines and replace nbsp with spaces.
TAB=$'\t'
NBSP=$'\xc2\xa0'
REPLACE_TABS=true
if [ "$1" = '-t' ]; then
REPLACE_TABS=false
shift
fi
for file in "$@"; do
if [ -d "$file" ]; then
echo >&2 "Ignoring directory $file"
continue
fi
# Replace tab by 4 spaces
if "$REPLACE_TABS" && grep -q -F "$TAB" <"$file"; then
echo "[tab] $file"
sed -i --follow-symlinks "s/$TAB/ /g" "$file"
fi
# End of line spaces
if grep -q -E " +\$" <"$file"; then
echo "[eol] $file"
sed -i --follow-symlinks "s/ *\$//" "$file"
fi
# Remove non-breakable spaces
if grep -q -F "$NBSP" <"$file"; then
echo "[nbsp] $file"
sed -i --follow-symlinks "s/$NBSP/ /g" "$file"
fi
done
| true |
ab6071e4374ac2ede06a6cd6ef1446ac46cf0465 | Shell | efweber999/ACCUM2_BENCHMARKING | /start_accumulo | UTF-8 | 6,162 | 3.71875 | 4 | [] | no_license | #!/bin/bash
# June 2021 - Gene Weber
# This script launches Accumulo after running the installation script, and start_hadoop script.
#
# COMMAND LINE OPTION: If this script is launched with the command line option "split"
# it will use the pre-split properties file, else it will use the no-split file.
# Save command line option
COMMAND_OPTION=$1
# Location of existing directory on shared file system of installed software.
INSTALL_DIR=$(cat inst_dir)
# Execute the configuration file to initialize all needed variables.
source acc_test.conf
# Get all needed environmental variables.
source $INSTALL_DIR/acc_env_vars
# Set bashrc flag
touch script_active
# Based on command line option use no-split or pre-split properties file.
if [ "$COMMAND_OPTION" == "split" ]; then
echo ""
echo "Setting the properties file to pre-split."
echo ""
cp $ACCUMULO_HOME/conf/accumulo.properties.pre-split $ACCUMULO_HOME/conf/accumulo.properties
else
echo ""
echo "Setting the properties file to no-split."
echo ""
cp $ACCUMULO_HOME/conf/accumulo.properties.no-split $ACCUMULO_HOME/conf/accumulo.properties
fi
# Get Name of first Accumulo Master Node
set -- $ACC_MASTER_NODES
export FIRST_MASTER=$1
# Initialize a new accumulo structure.
echo ""
echo "Initializing a new Accumulo database"
echo ""
# Check if the Accumulo database already exists.
if $(accumulo org.apache.accumulo.server.util.ListInstances 2>/dev/null | grep -q $ACC_DBASE_NAME); then
# Delete existing entry from Zookeper and re-initialize
pdsh -w $FIRST_MASTER "echo -e '$ACC_DBASE_NAME\ny\n$ACC_PASSWD\n$ACC_PASSWD\n' | accumulo init"
else
# Initialize a new accumulo database
pdsh -w $FIRST_MASTER "echo -e '$ACC_DBASE_NAME\n$ACC_PASSWD\n$ACC_PASSWD\n' | accumulo init"
fi
echo ""
echo "Starting the Accumulo Monitor"
echo ""
# Start the accumulo monitor
pdsh -w ^$INSTALL_DIR/accmonitornodes 'accumulo-service monitor start'
echo ""
echo "Starting the Accumulo Tablet Servers"
echo ""
# Create tservers launch file
rm $INSTALL_DIR/lnch_tsrvrs 2>/dev/null
touch $INSTALL_DIR/lnch_tsrvrs
chmod 755 $INSTALL_DIR/lnch_tsrvrs
for (( i=1; i<=$TABS_PER_NODE; i++ ))
do
LOGFILE="/$LOCAL_NVME1/$ACC_LOG_DIR/tserver_${i}.log"
ERRFILE="/$LOCAL_NVME1/$ACC_LOG_DIR/tserver_${i}.err"
echo "nohup accumulo tserver >$LOGFILE 2>$ERRFILE < /dev/null &" >> $INSTALL_DIR/lnch_tsrvrs
done
# Copy the launch file to local drive. This is more for AWS where nfs is slow!
pdsh -w ^$INSTALL_DIR/acctservernodes 'cp $INSTALL_DIR/lnch_tsrvrs /$LOCAL_NVME1'
# Start the accumulo tablet servers.
echo "Launching $TABS_PER_NODE Tablet Servers on each of these nodes:"
echo $ACC_TSERVER_NODES
pdsh -w ^$INSTALL_DIR/acctservernodes 'nohup /$LOCAL_NVME1/lnch_tsrvrs &' &
# Capture start time.
TBLTS_START=$(date +%s)
# Allow some time for all tserver processes to start.
echo "Waiting $TSS_WAIT_TIME seconds for all Tablet Servers to start."
sleep ${TSS_WAIT_TIME}s
# Collect all tserver PIDs
echo "Collecting all Tablet Server PIDs."
PIDFILE="/$LOCAL_NVME1/$A_PID_DIR/tservers.pid"
pdsh -w ^$INSTALL_DIR/acctservernodes "ps -fu $LOGNAME | grep '.* tserver$' | sed 's/^$LOGNAME *//' | sed 's/\ .*$//' >$PIDFILE"
# Verify the Tablet Servers
cat <<EOT > $INSTALL_DIR/tsrv_verify
LONGEST=0
NUM_PIDS=0
for pid in \$(cat $PIDFILE)
do
((NUM_PIDS++))
TEMP1=\$(export TZ=UTC0 LC_ALL=C; date -d "\$(ps -o lstart= \$pid)" +%s)
TEMP=\$((\$TEMP1 - $TBLTS_START))
if (( \$TEMP > \$LONGEST )); then
LONGEST=\$TEMP
fi
done
if (($TABS_PER_NODE > \$NUM_PIDS)); then
echo "ERROR! -> Only \$NUM_PIDS of $TABS_PER_NODE Tablet Servers are running!"
else
echo "All Tablet Servers started within \$LONGEST seconds."
fi
EOT
# Copy file to local drives and launch them.
chmod 755 $INSTALL_DIR/tsrv_verify
pdsh -w ^$INSTALL_DIR/acctservernodes 'cp $INSTALL_DIR/tsrv_verify /$LOCAL_NVME1'
pdsh -w ^$INSTALL_DIR/acctservernodes '/$LOCAL_NVME1/tsrv_verify'
# Clean up
rm $INSTALL_DIR/lnch_tsrvrs 2>/dev/null
pdsh -w ^$INSTALL_DIR/acctservernodes 'rm /$LOCAL_NVME1/lnch_tsrvrs 2>/dev/null' 2>/dev/null
rm $INSTALL_DIR/tsrv_verify 2>/dev/null
pdsh -w ^$INSTALL_DIR/acctservernodes 'rm /$LOCAL_NVME1/tsrv_verify 2>/dev/null' 2>/dev/null
echo ""
echo "Starting the Accumulo Masters"
echo ""
# Start the accumulo master
pdsh -w ^$INSTALL_DIR/accmasternodes 'accumulo-service master start'
echo ""
echo "Starting the Accumulo Garbage Collector"
echo ""
# Start the accumulo garbage collector
pdsh -w ^$INSTALL_DIR/accgcnodes 'accumulo-service gc start'
echo ""
echo "Starting the Accumulo Tracer"
echo ""
# Start the accumulo tracer
pdsh -w ^$INSTALL_DIR/acctracernodes 'accumulo-service tracer start'
# If Accumulo Proxy is installed, start it.
if [ $INSTALL_PROXY = true ]; then
echo ""
echo "Starting the Accumulo Proxy"
echo ""
LOGFILE="/$LOCAL_NVME1/$ACC_LOG_DIR/acc_proxy.log"
ERRFILE="/$LOCAL_NVME1/$ACC_LOG_DIR/acc_proxy.err"
PIDFILE="/$LOCAL_NVME1/$A_PID_DIR/acc_proxy.pid"
pdsh -w ^$INSTALL_DIR/acc_proxy_node "nohup accumulo-proxy -p $ACC_PROXY_HOME/conf/proxy.properties \
>$LOGFILE 2>$ERRFILE < /dev/null &"
pdsh -w ^$INSTALL_DIR/acc_proxy_node "ps -fu $LOGNAME | grep '.*bin\/accumulo-proxy ' | sed 's/^$LOGNAME *//' |\
sed 's/\ .*$//' >$PIDFILE"
touch $INSTALL_DIR/acc_proxy_running
fi
# Run a command in the accumulo shell as a simple test.
cat <<EOT | accumulo shell -u root
tables
bye
EOT
if [ "$AWS" = true ]; then
PROXY=$(cat $INSTALL_DIR/accmonitornodes | sed 's/^/ip-/' | sed 's/\./-/g' | sed 's/$/\.evoforge\.org/')
else
PROXY=$(cat $INSTALL_DIR/accmonitornodes)
fi
echo ""
echo "You can view Accumulo monitor page at:"
echo "http://$PROXY:9995/monitor"
echo ""
echo ""
echo "If this looked successful you can now run accumulo tests."
echo "The stop_accumulo script will terminate accumulo processes and clean up."
echo ""
echo "NOTE: TO RUN ACCUMULO COMMANDS FROM THE COMMAND LINE YOU MUST FIRST:"
echo "source acc_test.conf"
echo "source \$INSTALL_DIR/acc_env_vars"
echo ""
rm acc_stopped_flag 2>/dev/null # Clear flag that accumulo has been stopped
rm script_active
| true |
bce3a1aa90a4e3a28fd9fa66ddaec56b8f2cd4fc | Shell | cadia-lvl/kaldi | /egs/althingi/s5/local/nnet3/run_ivector_common.sh | UTF-8 | 7,224 | 3.734375 | 4 | [
"Apache-2.0",
"LicenseRef-scancode-public-domain"
] | permissive | #!/bin/bash
set -e
# This script is called from local/chain/run_tdnn_lstm.sh. It contains the common feature
# preparation and iVector-related parts of the script. See those scripts for examples of usage.
stage=1
generate_alignments=false # Depends on whether we are doing speech perturbations
speed_perturb=false
# Defined in conf/path.conf, default to /mnt/scratch/inga/{exp,data,mfcc}
exp=
data=
mfcc=
. ./cmd.sh
. ./path.sh # runs conf/path.conf
. ./utils/parse_options.sh
if [ ! $# = 4 ]; then
echo "This script creates high-resolution MFCC features for the training data,"
echo "which is either speed perturbed or not. If we speed perturb, then new alignments"
echo "are also obtained. An ivector extractor is also trained and ivectors extracted"
echo "for both training and test sets."
echo ""
echo "Usage: $0 [options] <input-training-data-dir> <dir-with-test-sets> <lang-dir> <gmm-name>"
echo " e.g.: $0 data/train data data/lang tri5"
echo ""
echo "Options:"
echo " --speed-perturb # apply speed perturbations, default: true"
echo " --generate-alignments # obtain the alignments of the perturbed data"
exit 1;
fi
inputdata=$1
testdatadir=$2
langdir=$3
gmm=$4
suffix=
$speed_perturb && suffix=_sp
# perturbed data preparation
train_set=$(basename $inputdata)
#train_set=train_okt2017_fourth
gmm_dir=$exp/${gmm}
ali_dir=$exp/${gmm}_ali_${train_set}$suffix
if [ "$speed_perturb" == "true" ]; then
if [ $stage -le 1 ]; then
#Although the nnet will be trained by high resolution data, we still have to perturbe the normal data to get the alignment
# _sp stands for speed-perturbed
echo "$0: preparing directory for low-resolution speed-perturbed data (for alignment)"
utils/data/perturb_data_dir_speed_3way.sh $inputdata $data/${train_set}${suffix}
echo "$0: making MFCC features for low-resolution speed-perturbed data"
steps/make_mfcc.sh --nj 100 --cmd "$train_cmd --time 2-00" \
$data/${train_set}${suffix} || exit 1
steps/compute_cmvn_stats.sh $data/${train_set}${suffix} || exit 1
utils/fix_data_dir.sh $data/${train_set}${suffix} || exit 1
fi
if [ $stage -le 2 ] && [ "$generate_alignments" == "true" ]; then
#obtain the alignment of the perturbed data
steps/align_fmllr.sh --nj 100 --cmd "$decode_cmd --time 3-00" \
$data/${train_set}${suffix} $langdir $gmm_dir $ali_dir || exit 1
fi
train_set=${train_set}${suffix}
fi
if [ $stage -le 3 ]; then
# Create high-resolution MFCC features (with 40 cepstra instead of 13).
# this shows how you can split across multiple file-systems. we'll split the
# MFCC dir across multiple locations. You might want to be careful here, if you
# have multiple copies of Kaldi checked out and run the same recipe, not to let
# them overwrite each other.
echo "$0: creating high-resolution MFCC features"
# the 100k_nodup directory is copied seperately, as
# we want to use exp/tri1b_ali_100k_nodup for ivector extractor training
# the main train directory might be speed_perturbed
if [ "$speed_perturb" == "true" ]; then
utils/copy_data_dir.sh $data/$train_set $data/${train_set}_hires
else
utils/copy_data_dir.sh $inputdata $data/${train_set}_hires
fi
# do volume-perturbation on the training data prior to extracting hires
# features; this helps make trained nnets more invariant to test data volume.
utils/data/perturb_data_dir_volume.sh $data/${train_set}_hires
steps/make_mfcc.sh \
--nj 100 --mfcc-config conf/mfcc_hires.conf \
--cmd "$decode_cmd --time 3-00" \
$data/${train_set}_hires \
$exp/make_hires/$train_set $mfcc;
steps/compute_cmvn_stats.sh $data/${train_set}_hires $exp/make_hires/${train_set} $mfcc;
# Remove the small number of utterances that couldn't be extracted for some
# reason (e.g. too short; no such file).
utils/fix_data_dir.sh $data/${train_set}_hires;
for dataset in dev eval; do
# Create MFCCs for the dev/eval sets
utils/copy_data_dir.sh $testdatadir/$dataset $data/${dataset}_hires
steps/make_mfcc.sh --cmd "$train_cmd" --nj 30 --mfcc-config conf/mfcc_hires.conf \
$data/${dataset}_hires $exp/make_hires/$dataset $mfcc;
steps/compute_cmvn_stats.sh $data/${dataset}_hires $exp/make_hires/$dataset $mfcc;
utils/fix_data_dir.sh $data/${dataset}_hires # remove segments with problems
done
# Take 35k utterances (about 1/20th of the data (if using train_okt_fourth)) this will be used
# for the diagubm training
# The 100k subset will be used for ivector extractor training
utils/subset_data_dir.sh $data/${train_set}_hires 35000 $data/${train_set}_35k_hires
utils/subset_data_dir.sh $data/${train_set}_hires 100000 $data/${train_set}_100k_hires
fi
if [ $stage -le 5 ]; then
echo "$0: computing a PCA transform from the hires data."
steps/online/nnet2/get_pca_transform.sh --cmd "$train_cmd --time 2-00" \
--splice-opts "--left-context=3 --right-context=3" \
--max-utts 10000 --subsample 2 \
$data/${train_set}_35k_hires $exp/nnet3/pca
fi
if [ $stage -le 6 ]; then
# To train a diagonal UBM we don't need very much data, so use the smallest subset.
echo "$0: training the diagonal UBM."
steps/online/nnet2/train_diag_ubm.sh --cmd "$train_cmd" --nj 30 --num-frames 200000 \
$data/${train_set}_35k_hires 512 $exp/nnet3/pca $exp/nnet3/diag_ubm
fi
if [ $stage -le 7 ]; then
# iVector extractors can be sensitive to the amount of data, but this one has a
# fairly small dim (defaults to 100) so we don't use all of it, we use just the
# 100k subset (~15% of the data).
echo "$0: training the iVector extractor"
steps/online/nnet2/train_ivector_extractor.sh --cmd "$train_cmd --time 1-12" --nj 10 \
$data/${train_set}_100k_hires $exp/nnet3/diag_ubm $exp/nnet3/extractor || exit 1;
fi
if [ $stage -le 8 ]; then
# We extract iVectors on the speed-perturbed training data after combining
# short segments, which will be what we train the system on. With
# --utts-per-spk-max 2, the script pairs the utterances into twos, and treats
# each of these pairs as one speaker; this gives more diversity in iVectors..
# Note that these are extracted 'online'.
# note, we don't encode the 'max2' in the name of the ivectordir even though
# that's the data we extract the ivectors from, as it's still going to be
# valid for the non-'max2' data, the utterance list is the same.
ivectordir=$exp/nnet3/ivectors_${train_set}
# having a larger number of speakers is helpful for generalization, and to
# handle per-utterance decoding well (iVector starts at zero).
temp_data_root=${ivectordir}
utils/data/modify_speaker_info.sh --utts-per-spk-max 2 \
$data/${train_set}_hires ${temp_data_root}/${train_set}_hires_max2
steps/online/nnet2/extract_ivectors_online.sh --cmd "$train_cmd --time 2-12" --nj 100 \
${temp_data_root}/${train_set}_hires_max2 \
$exp/nnet3/extractor $ivectordir
# Also extract iVectors for the test data
for data_set in dev eval; do
steps/online/nnet2/extract_ivectors_online.sh --cmd "$train_cmd" --nj 30 \
$data/${data_set}_hires $exp/nnet3/extractor $exp/nnet3/ivectors_${data_set} || exit 1;
done
fi
exit 0;
| true |
a66aa85e856e31d118790e8367dfdefed4f4cf1c | Shell | whisperity/dotfiles | /sbin/reboot-windows | UTF-8 | 819 | 3.875 | 4 | [] | no_license | #!/bin/bash
if [[ ${EUID} -ne 0 ]]
then
echo "Must be run as root." >&2
exit 1
fi
DEFAULT_LINE=$(cat /etc/default/grub | grep "GRUB_DEFAULT" | grep -v "^#")
if [[ ! "${DEFAULT_LINE}" =~ "saved" ]]
then
echo "Grub default configuration is not set to saved value." >&2
echo "Please chage GRUB_DEFAULT to 'saved' in /etc/default/grub" >&2
echo "then execute \`update-grub2\`." >&2
exit 1
fi
WINDOWS_ENTRY=$(grep "menuentry" /boot/grub/grub.cfg | grep "Windows")
WINDOWS_ENTRY_NAME=$(echo ${WINDOWS_ENTRY} | \
sed "s/^menuentry '//" | \
sed "s/'.*$//")
grub-reboot "${WINDOWS_ENTRY_NAME}"
if [[ $? -ne 0 ]]
then
echo "Error: calling grub-reboot failed." >&2
exit 1
fi
echo "Grub has been set to boot \"${WINDOWS_ENTRY_NAME}\""
echo "Please \`reboot\` now..."
| true |
49f0b11047dd7096eb850ffc800841a790ec4d8e | Shell | fagan2888/packet-parse | /scripts/least.sh | UTF-8 | 178 | 2.84375 | 3 | [] | no_license | #!/bin/bash
for (( c = 1; ; c++ )) do
if [ ! -f "output/$c.meta" ]; then
break
fi
less "output/$c.meta"
less "output/$c.initiator"
less "output/$c.responder"
done
| true |
ebef6b396e5a026b1597e1fd0c20eecdda65ad18 | Shell | robertatakenaka/Bibliometria | /www/scibiblio/bases/estat/tab30/gt30/tc30issn.sh | ISO-8859-1 | 14,202 | 2.84375 | 3 | [] | no_license | # -------------------------------------------------------------------------- #
# gencorrtc30 - Gera Tabela de Correcao do Tit Citado
# -------------------------------------------------------------------------- #
# entrada :
# saida :
# corrente : ???/www/scibiblio/bases/estat/tab30/gt30
# observacoes :
# -------------------------------------------------------------------------- #
# DATA RESP Comantarios
# 20040616 Rogerio / Chico Edicao original
#
# -------------------------------------------------------------------------- #
echo "*********************************************************************************************"
echo "****** SHELL PARA GERAO DA BASE DE CORREO DE TTULOS DE REVISTAS: ******"
echo "****** campo v30 da base Scielo Cuba - artigo ******"
echo "*********************************************************************************************"
echo "******* Cipar, bases e gerao de arquivos invertidos das bases a serem utilizadas *******"
echo "*********************************************************************************************"
CIPAR=../../cipar.cip; export CIPAR
CISIS="/utl/cisis"
CISIS1660="/utl/cisis1660"
LIND="/utl/lind"
FFI="/utl/cisis1660ffi"
LINDFFI="/utl/lindffi"
echo "* Conta citaes j tabuladas [ contav30 ]a artigos de revista [ p(v30) and a(v18) ] *"
$LIND/mxtbansi artigoc create=contav30 bool=$ uctab=ansi "150:if p(v30) and a(v18) then mpu,v30 fi" btell=0 class=90000
$LIND/msrt contav30 10 v998
#mx contav30 "fst=1 0 ref(['tab30issn']l(['tab30issn']v1),v222^a)" fullinv/ansi=contav30
echo "*********************************************************************************************"
echo "******** Parte 1 - Gera base TC30SCIELO, utilizando os dados da TITLE SciELO *******"
$LIND/mx ../../jcr/title uctab=ansi create=tc30scielo -all now "proc='d*a100~',mpu,v100,'~',if p(v110) then 'a110~'v100' - 'v110'~a111~'v100'-'v110'~' fi,'a150~',mpu,v150,mpl'~',if p(v151) then 'a151~',mpu,v151,mpl'~' fi,'a222~^a',mpl,v100,'^bSCIELO~a400~'v400'~'"
$LIND/mx tc30scielo "fst=@tc30scielo.fst" fullinv/ansi=tc30scielo
echo "*********************************************************************************************"
echo "***** Parte 2 - Gera base de correo de ttulos TAB30ISSN_CORRECT *****"
echo "*** Gera FASE 1 da base de correo de ttulos, com ttulos escritos como nna base ISSN ***"
echo "** Seleciona citaes j tabuladas, com Match=1 na base do ISSN [ fastissn ] **"
$LIND/mx null count=0 create=fase1
$LIND/mx contav30 create=pre_fase1 "proc=if f(npost(['fstissn']replace(replace(replace(s(mhu,v1,mpl),' ',' '),'.',' '),' ',' ')),1,0)='1' then 'd1d998d999a1~'ref(['fstissn']l(['fstissn']replace(replace(replace(s(mhu,v1,mpl),' ',' '),'.',' '),' ',' ')),mfn)'~a222~'ref(['fstissn']l(['fstissn']replace(replace(replace(s(mhu,v1,mpl),' ',' '),'.',' '),' ',' ')),v222)'~a22~'ref(['fstissn']l(['fstissn']replace(replace(replace(s(mhu,v1,mpl),' ',' '),'.',' '),' ',' ')),v22^a)'~a29~'mfn'~a30~'v1'~a999~'v999'~' else 'd*' fi" -all now
$LIND/mx pre_fase1 append=fase1 -all now
rm pre_fase1.*
echo "*** Gera FASE 2 da base de correo de ttulos, utilizando inverso palavra por palavra ***"
echo "** Gerao de expresses de busca (palavra por palavra) para recuperao na base **"
$LIND/mx null count=0 >nullstw.tab
$LIND/mx null count=0 create=fase2_tabout2
echo "** Monta Shell c/ expresses de busca palavra-a-palavra **"
$LIND/mx contav30 uctab=ansi "jchk=null+nullstw.tab=mpu,v1" "pft='$LIND/mx fstissn2 btell=0 \"bool='(if p(v32001) then v32001^k'$',if iocc<nocc(v32001) then ' * ' fi fi),'\" +hits \"proc=if val(v1002)>1 then #%#d*#%# else #%#d*a1~#%#mfn#%#~a222~#%#v222#%#~a29~^c'mfn'~#%# fi\" append=fase2_tabout2 -all now'/" lw=999 -all now>savehits2.txt
echo "!ID 000000">giz2.id
echo "!v001!035037035">>giz2.id
echo "!v002!039">>giz2.id
echo "!v011!asc">>giz2.id
echo "!v021!asc">>giz2.id
$LIND/id2i giz2.id create=giz2
$LIND/mx seq=savehits2.txt create=work gizmo=giz2,1 -all now
$LIND/mx work "pft=v1/" lw=999 -all now>savehits2.txt
rm work.* giz2.*
\. savehits2.txt
rm savehits2.txt
echo "** Formata a base fase2_tabout2 **"
$LIND/mx fase2_tabout2 create=fase2_join1issnWW -all now "proc='d29a22~'ref(['fstissn']val(v1),v22^a)'~a29~'v29^c'~a30~'ref(['contav30']val(v29^c),v1)'~a999~'ref(['contav30']val(v29^c),v999)'~'"
rm fase2_tabout2.*
echo "** Seleciona registros excludentes entre as bases das FASES 1 e 2 **"
$LIND/mx fase1 "fst=1 0 v29" fullinv=fase1 tell=5000
$LIND/mx fase2_join1issnWW "join=fase1=v29" create=fase2_fstissn_tabout2 -all now
rm fase2_join1issnWW.*
echo "** Seleciona (da base_FASE 2) citaes tabuladas excludentes das FASE 1 **"
$LIND/mx null create=fase2_complementar count=0
$LIND/mx fase2_fstissn_tabout2 append=fase2_complementar -all now "proc=if p(v32001^m) then 'd*' else 'd32001' fi"
rm fase2_fstissn_tabout2.*
echo "** Appenda as bases da FASES 1 e 2 **"
$LIND/mx null create=tab30issn_correct count=0
$LIND/mx fase1 append=tab30issn_correct -all now
$LIND/mx fase2_complementar append=tab30issn_correct -all now
rm fase2_complementar.* fase1.*
echo "*********************************************************************************************"
echo "***** Parte 3 - Corrige ttulos menos citados por mais citados similares - TRIGRAMA *****"
echo "***** e aplica base de correo TAB30ISSN_CORRECT *****"
$LIND/mx tab30issn_correct iso=tab30issn_correct.iso -all now
$CISIS1660/mx iso=tab30issn_correct.iso create=tab30issn_correct_1660 -all now
$CISIS1660/mx tab30issn_correct_1660 "fst=1 0 v30" fullinv=tab30issn_correct_1660
rm tab30issn_correct.iso
echo "** Comea a gerar base WTRIG_TITLES **"
$LIND/mx contav30 iso=tc30.iso -all now
$LINDFFI/mx iso=tc30.iso create=tc30 -all now
rm tc30.iso
$LINDFFI/msrt tc30 10 v998
$LIND/wtrig1 documents=tc30 extract=v1 tell=5000
$LIND/wtrig2 documents=tc30 collection=tc30 maxrel=10 minsim=0.80 loadindex loadvectors tell=1000
$LINDFFI/mx tc30.y "proc='d6',(if v6>'' then '<6 0>'f(val(v6^*),6,0)'^0',v6'</6>'fi)" "proc='s6'" "pft=if size(ref->tc30(mfn,v1)) > 3 then (if val(v6^0)<=mfn and val(v6^s)>0.90 then ref->tc30(mfn,v1),'|',ref->tc30(val(v6^0),v1)'|'mfn(1)'|'v6^0'|'v6^s/,break else ref->tc30(mfn,v1)/,break fi) fi" lw=9999 now tell=10000 >tc30giz_nos.txt
$CISIS1660/mx seq=tc30giz_nos.txt -all now create=tc30giz_nos "proc='d1d2d3d4d5a30~'v1'~a2~'v2'~a3~'v3'~a4~'v4'~a5~'v5'~'"
rm tc30giz_nos.txt tc30.*
$CISIS1660/mx tc30giz_nos -all now create=wtrig_titles jmax=1 "join=tab30issn_correct_1660,22,222=v30" "join=issn,22,222=if v32001^m='' then v30 fi" "join=issn,22,222=if v32001^m='' and v32002^m='' then replace(replace(replace(v30,' ',' '),'.',' '),' ',' ') fi" "join=tab30issn_correct_1660,22,222=if v32001^m='' and v32002^m='' and v32003^m='' then v2 fi" "join=issn,22,222=if v32001^m='' and v32002^m='' and v32003^m='' and v32004^m='' then v2 fi" "join=issn,22,222=if v32001^m='' and v32002^m='' and v32003^m='' and v32004^m='' and v32005^m='' then replace(replace(replace(v2,' ',' '),'.',' '),' ',' ') fi"
rm tc30giz_nos.* tab30issn_correct*
$LIND/mx wtrig_titles "fst=;1 0 v30;2 0 v22;3 0 v222^a" fullinv/ansi=wtrig_titles tell=5000
echo "** Cria base TC30ISSN com TC30SCIELO e WTRIG_TITLES **"
$LIND/mx null create=tc30issn count=0
$LIND/mx tc30scielo append=tc30issn -all now
$LIND/mx seq=tc30add.seq append=tc30issn "proc='d*a30~'v1'~a222~'v2'~a22~'v3'~'" -all now
$LIND/mx wtrig_titles append=tc30issn -all now
$LIND/mx tc30issn "fst=@../tc30issn.fst" fullinv/ansi=tc30issn tell=5000
echo "*********************************************************************************************"
echo "***** Parte 4 - Une as bases geradas nas partes 1 e 3 e aplica TRIGRAMA entre *****"
echo "***** ttulos no convertidos e convertidos pela base WTRIG_TITLES, *****"
echo "***** possibilitando assim a correo de mais alguns ttulos *****"
echo "** Gera a base WTRIG_TITLES2 **"
$LIND/mx null count=0 create=delaa
$LIND/mx contav30 uctab=ansi append=delaa -all now "proc='a2~'v1'~'" "proc='Gsplit=2=words'" "proc=if v2='' then 'd1a1~XX~' else 'd2' fi" "proc=if size(v1)<5 then 'd*a2~'v1'~' else 'd1a1~'v1'~a2~'ref(['tc30issn']l(['tc30issn']mpu,v1,mpl),mpu,v222^a,mpl)'~' fi" "proc=if v2='' then 'd*a1~'v1'~' else 'd*' fi"
$LIND/mx null count=0 create=delbb
$LIND/mx tc30issn -all now append=delbb "proc=if p(v400) and p(v222) then 'd*a100~'v100'~a150~'v150'~a151~'v151'~' else, if a(v400) and p(v222) then 'd*a150~'v30'~a151~'v2'~' else if a(v222) then 'd*' fi, fi, fi" "proc=if v150=v151 or v151='' or size(v151)<5 then 'd151' fi,if size(v150)<5 then 'd150' fi,if size(v100)<5 then 'd100' fi"
$LIND/mx delbb -all now iso=delbb.iso
$LIND/mx delaa -all now iso=delaa.iso
$LINDFFI/mx iso=delaa.iso -all now create=delaaffi
$LINDFFI/mx iso=delbb.iso -all now create=delbbffi
rm del??.iso
echo "** Aplica trigramas entre ttulos no convertidos e convertidos pela base WTRIG_TITLES **"
$LIND/wtrig1 documents=delbbffi case=ansi "extract=v100/v150/v151/" dmfn bin tell=10000
$LIND/wtrig1 documents=delaaffi case=ansi "extract=if p(v1) then v1/ else 'falta' fi" dmfn bin tell=10000
$LIND/wtrig2 documents=delaaffi collection=delbbffi loadvectors loadindex cmfn=1 minsim=0.8 tell=15000
rm del??ffi.mst del??ffi.xrf
$LINDFFI/mx delaaffi.y "proc='a333~'mfn'~'" -all now iso=aabbffi.iso
$LIND/mx null count=0 create=aabb
$LIND/mx iso=aabbffi.iso "proc='='v333" -all now append=aabb
rm aabbffi.iso
$LIND/mx null count=0 create=wtrig_titles2
$LIND/mx aabb -all now append=wtrig_titles2 uctab=ansi "proc='d*a30~'ref(['delaa']val(v333),v1)'~a2~'ref(['delbb']val(v6[1]^*),v150)'~a5~'v6[1]^s'~'" "proc='a222~^a'ref(['tc30issn']l(['tc30issn']v2),v222^a)'~'" "proc=e1:=l(['/bases/estat/jcr/title']v222^a), if e1>0 then 'a22~'ref(['/bases/estat/jcr/title']l(['/bases/estat/jcr/title']mpu,v222^a,mpl),v400)'~' else 'a22~'ref(['tc30issn']l(['tc30issn']v2),if v22^a>'' then v22^a else v22 fi)'~' fi"
rm del* aabb*
echo "*********************************************************************************************"
echo "******* Parte 5 - Gera base TC30ISSN, utilizando as bases geradas nas partes 1, 3 e 4 ******"
echo "** Gera incio da base TAB30ISSN, utilizando os dados da TITLE SciELO **"
$LIND/mx null create=tc30issn2 count=0
$LIND/mx tc30scielo append=tc30issn2 -all now
$LIND/mx seq=tc30add.seq append=tc30issn2 "proc='d*a30~'v1'~a222~'v2'~a22~'v3'~'" -all now
echo "** Apenda base WTRIG_TITLES2, gerada acima **"
$LIND/mx wtrig_titles2 append=tc30issn2 -all now
echo "** Apenda a base WTRIG_TITLES **"
$LIND/mx wtrig_titles append=tc30issn2 -all now
$LIND/mx tc30issn2 create=tc30issn1 -all now "proc=if v222='' then e1:=l(['tc30scielo']v2),if e1>0 then 'd222d22d400a222~'ref(['tc30scielo']e1,v222)'~a22~'ref(['tc30scielo']e1,v400)'~' fi, fi" "proc=if v222>'' then e1:=l(['tc30scielo']v22), e2:=l(['tc30scielo']v22^a), if e1>0 and v222^a<>ref(['tc30scielo']e1,v222^a) then 'd222a222~'ref(['tc30scielo']e1,v222)'~' else if e2>0 and v222^a<>ref(['tc30scielo']e2,v222^a) then 'd222a222~'ref(['tc30scielo']e2,v222)'~' fi, fi, fi"
$LIND/mx null create=tc30issn count=0
$LIND/mx tc30issn1 "proc=if v222^a='' then 'd*' fi" append=tc30issn -all now
$LIND/mx tc30issn1 "proc=if v222^a<>'' then 'd*' fi" append=tc30issn -all now
$LIND/mx tc30issn "fst=@../tc30issn.fst" fullinv/ansi=tc30issn tell=5000
echo "*********************************************************************************************"
echo "*********** Parte 6 - Acrescenta dados provenientes da base TC30ISSN do SciELO BR **********"
echo "********** Gera base TC30ISSN, utilizando as bases geradas nas partes 1, 3, 4 e 6 ***********"
$LIND/mx null create=tc30issn11 count=0
$LIND/mx tc30issn1 "proc=if v222^a<>'' then 'd*' fi" append=tc30issn11 -all now
$LIND/mx null create=tc30issn3 count=0
$LIND/mx tc30issn11 "proc=e1:=l(['tc30issn_BR']v30),if e1>0 then 'd32001d32002d32003d32004d32005d32006a222~'ref(['tc30issn_BR']e1,v222,if p(v22) then '~a22~'v22'~' else '~' fi) fi" "proc=if v222^a='' then 'd*' fi" append=tc30issn3 -all now
$LIND/mx tc30issn11 "proc=e1:=l(['tc30issn_BR']v30),if e1>0 then 'd32001d32002d32003d32004d32005d32006a222~'ref(['tc30issn_BR']e1,v222,if p(v22) then '~a22~'v22'~' else '~' fi) fi" "proc=if v222^a<>'' then 'd*' fi" append=tc30issn3 -all now
$LIND/mx null create=tc30issn count=0
$LIND/mx tc30issn1 "proc=if v222^a='' then 'd*' fi" append=tc30issn -all now
$LIND/mx tc30issn3 append=tc30issn -all now
$LIND/mx tc30issn "fst=@../tc30issn.fst" fullinv/ansi=tc30issn tell=5000
rm tc30issn11.*
rm tc30issn3.*
rm tc30issn2.*
cp tc30issn.mst ..
cp tc30issn.xrf ..
cp tc30issn.cnt ..
cp tc30issn.iyp ..
cp tc30issn.ly1 ..
cp tc30issn.ly2 ..
cp tc30issn.n01 ..
cp tc30issn.n02 ..
unset CIPAR
echo "*********************************************************************************************"
echo "*********************************************************************************************"
echo "***************************** !!!!! FIM !!!!! *******************************"
echo "*********************************************************************************************"
echo "*********************************************************************************************"
| true |
8a55350ed09fe5b3254fae4e9b0e5993f7696184 | Shell | Suharsh329/dotfiles | /.bashrc | UTF-8 | 903 | 2.640625 | 3 | [] | no_license | #
# ~/.bashrc
#
# If not running interactively, don't do anything
[[ $- != *i* ]] && return
alias ls='ls --color=auto'
PS1='[\u@\h \W]\$ '
alias bsh="nvim ~/.bashrc"
alias srcb="source ~/.bashrc"
alias py=python3
alias python=python3
alias c=tcc
alias cc=tcc
alias n=nnn
alias v=nvim
alias ll="ls -al"
alias i3="nvim ~/.config/i3/config"
alias gs="git status"
alias gp="git push"
alias gpl="git pull"
alias gc="git clone"
alias gch="git checkout"
alias ga="git add"
alias gad="git add ."
alias gcm="git commit -m"
alias zat=zathura
alias zatf="zathura --mode=fullscreen"
#Autocd
shopt -s autocd
#Get history based on input typed
if [[ $- == *i* ]]
then
bind '"\e[A": history-search-backward'
bind '"\e[B": history-search-forward'
fi
fortune -os | cowsay | lolcat
PATH=$PATH:~/scripts
PATH=$PATH:~/projects/hourglass
if [[ -z $DISPLAY ]] && [[ $(tty) = /dev/tty1 ]]; then exec startx; fi
| true |
1d9cb58c5676b1f71f4201dffbd7b844ab503ac2 | Shell | arkirchner/tools_config | /bashrc | UTF-8 | 551 | 2.6875 | 3 | [] | no_license | # .bashrc
# Source global definitions
if [ -f /etc/bashrc ]; then
. /etc/bashrc
fi
# Uncomment the following line if you don't like systemctl's auto-paging feature:
# export SYSTEMD_PAGER=
# User specific aliases and functions
export NVM_DIR="/home/akirchner/.nvm"
[ -s "$NVM_DIR/nvm.sh" ] && . "$NVM_DIR/nvm.sh" # This loads nvm
alias gs='git status '
alias ga='git add '
alias gb='git branch '
alias gc='git commit'
alias gd='git diff'
alias go='git checkout '
alias gk='gitk --all&'
alias gx='gitx --all'
alias got='git '
alias get='git '
| true |
8b65e3d9af94f01860f3ef7a2eb5c64ff760f86b | Shell | justinnaldzin/strimzi | /.travis/setup-kubernetes.sh | UTF-8 | 2,121 | 2.84375 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
function install_kubectl {
curl -Lo kubectl https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl && chmod +x kubectl
sudo cp kubectl /usr/bin
}
if [ "$TEST_CLUSTER" = "minikube" ]; then
install_kubectl
curl -Lo minikube https://storage.googleapis.com/minikube/releases/latest/minikube-linux-amd64 && chmod +x minikube
sudo cp minikube /usr/bin
export MINIKUBE_WANTUPDATENOTIFICATION=false
export MINIKUBE_WANTREPORTERRORPROMPT=false
export MINIKUBE_HOME=$HOME
export CHANGE_MINIKUBE_NONE_USER=true
mkdir $HOME/.kube || true
touch $HOME/.kube/config
docker run -d -p 5000:5000 registry
export KUBECONFIG=$HOME/.kube/config
sudo -E minikube start --vm-driver=none --insecure-registry localhost:5000
sudo -E minikube addons enable default-storageclass
elif [ "$TEST_CLUSTER" = "minishift" ]; then
#install_kubectl
MS_VERSION=1.13.1
curl -Lo minishift.tgz https://github.com/minishift/minishift/releases/download/v$MS_VERSION/minishift-$MS_VERSION-linux-amd64.tgz && tar -xvf minishift.tgz --strip-components=1 minishift-$MS_VERSION-linux-amd64/minishift && rm minishift.tgz && chmod +x minishift
sudo cp minishift /usr/bin
#export MINIKUBE_WANTUPDATENOTIFICATION=false
#export MINIKUBE_WANTREPORTERRORPROMPT=false
export MINISHIFT_HOME=$HOME
#export CHANGE_MINIKUBE_NONE_USER=true
mkdir $HOME/.kube || true
touch $HOME/.kube/config
docker run -d -p 5000:5000 registry
export KUBECONFIG=$HOME/.kube/config
sudo -E minishift start
sudo -E minishift addons enable default-storageclass
elif [ "$TEST_CLUSTER" = "oc" ]; then
mkdir -p /tmp/openshift
wget https://github.com/openshift/origin/releases/download/v3.7.0/openshift-origin-client-tools-v3.7.0-7ed6862-linux-64bit.tar.gz -O openshift.tar.gz
tar xzf openshift.tar.gz -C /tmp/openshift --strip-components 1
sudo cp /tmp/openshift/oc /usr/bin
else
echo "Unsupported TEST_CLUSTER '$TEST_CLUSTER'"
exit 1
fi | true |
08f7f5109c1315d08e9a560d74ea095872a42427 | Shell | ZealousMacwan/shell-scripts-learning | /command-return-code.sh | UTF-8 | 135 | 3.09375 | 3 | [
"MIT"
] | permissive | #!/bin/sh
whoami
echo $?
if [ $? == 0 ]
then
echo "Commmand Executed Successfully"
else
echo "Command Return Code is not 0"
fi | true |
638980f16c6cf33c6621b53a15f32d4e09b060e1 | Shell | yorokobi/.dotfiles | /screen/.screen-bin/nb-users | UTF-8 | 183 | 3.125 | 3 | [
"LicenseRef-scancode-public-domain"
] | permissive | #!/bin/sh
#uptime | sed -e "s/.*, *\(.* users\), .*/\1/"
NUM_USERS=`who | wc -l`
if [ $NUM_USERS -gt 1 ] ; then
echo -n "$NUM_USERS users"
else
echo -n "$NUM_USERS user"
fi
| true |
6f66ed93bbde16c6578ee71f013d1b2a5ba9e7c3 | Shell | chrismarget/raspbian-appliance | /build.d/08_packages.sh | UTF-8 | 1,719 | 4.0625 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
. $(dirname $0)/functions
[ -n "$PROJECT_DIR" ] || error "PROJECT_DIR unset"
[ -n "$P3_MNT" ] || error "P3_MNT unset"
[ -d "$P3_MNT" ] || error "partition 3 should be mounted, but isn't"
[ -n "$P3_LABEL" ] || error "P3_LABEL unset"
[ -n "$BOOT_MNT" ] || error "BOOT_MNT unset"
[ -d "$BOOT_MNT" ] || error "/boot partition should be mounted, but isn't"
pkg_list=${PROJECT_DIR}/packages.txt
# we store the downloaded packages in CACHE_DIR:
if [ -z "$CACHE_DIR" ]
then
CACHE_DIR="${PROJECT_DIR}/.build.cache"
fi
mkdir -p "$CACHE_DIR"
# we put the downloaded packages on the SD card here:
pkg_dir="pkgs"
build_pkg_dir="${P3_MNT}/$pkg_dir"
mkdir -p $build_pkg_dir
# when the pi is running, it needs to know both the mount point
# and the directory where the packages will be found:
pi_pkg_mp="/opt/${P3_LABEL}"
pi_pkg_dir="${pi_pkg_mp}/$pkg_dir"
# loop line-by-line over $pkg_list
if [ -f $pkg_list ]
then
while read line
do
set $line
# ignore lines containing more than two "words"
if [ $# -ne 2 ]
then
continue
fi
hash=$1
link=$2
# ensure sum portion contains only valid characters
if [[ ! $hash =~ ^[0-9a-fA-F]*$ ]]
then
continue
fi
# sha1/sha2 produce sums of specific lengths. ignore others.
case ${#hash} in
40) ;;
64) ;;
96) ;;
129) ;;
*) continue ;;
esac
# Fetch the file if we don't already have it.
file="${CACHE_DIR}/$(basename $link)"
[ -f "$file" ] || (echo -e "\nFeching $link..."; curl -o "$file" "$link")
echo -n "Checking $file... "
shasum -c - <<< "$hash $file" && cp $file $build_pkg_dir && packages_exist="y"
done <<< "$(cat $pkg_list)"
fi
| true |
3523a02082ab703bd651d8d0fa1c73e46ec37a64 | Shell | joaompinto/ansible-playbook-kvm-infra | /utils/deploy-ssh-config.sh | UTF-8 | 400 | 3.328125 | 3 | [] | no_license | #!/bin/sh
hostname=$1
ip=$2
[ ! -d ~/.ssh ] && mkdir -m700 ~/.ssh
[ ! -f ~/.ssh/config ] && echo >> ~/.ssh/config
sed -i 's/^Host/\n&/' ~/.ssh/config
sed -i '/^Host '"$hostname"'$/,/^$/d;/^$/d' ~/.ssh/config
cat << _EOF_ >> ~/.ssh/config
Host ${hostname}
Hostname $ip
User ubuntu
_EOF_
ssh-keygen -f ~/.ssh/known_hosts -R "$ip" > /dev/null 2>&1 || true
chmod 700 ~/.ssh/config
| true |
6a668c56e36dcbf5ae14b0c3f5c1151c3369a144 | Shell | redle/slackware_custom | /scripts/create_installer | ISO-8859-1 | 6,116 | 3.53125 | 4 | [] | no_license | #!/bin/sh
#source /usr/bin/functions
DRIVE_HD=/dev/sda
DRIVE_USB=/dev/sdb
MODULE_PATH=/lib/modules/3.5.0-rc7/kernel
MODULE_IDE=$MODULE_PATH/drivers/ata/
MODULE_USB=$MODULE_PATH/kernel/drivers/ata/
PARTITION_SYSTEM=50
PARTITION_DB=50
PARTITION_SWAP=4
ARCH=64
PIPE_CONTROL=/tmp/setup_control
INFO() {
echo -e "\033[1m[INSTALL] \033[0m $1"
}
ERROR() {
echo -e "\033[1m[INSTALL] \033[0m $1"
}
CREATE_PARTITION() {
INFO "CRIANDO PARTICOES: "
dd if=/dev/zero of=$DRIVE bs=512 count=1 2> /dev/null > /dev/null
SIZE=`fdisk -l $DRIVE 2> /dev/null | grep Disk | grep -v doesn | awk '{print $5}'`
CYLINDER=`echo $SIZE/255/63/512 | bc`
CYLINDER_BYTE=`echo $SIZE/$CYLINDER | bc`
CYLINDER_SYSTEM=`echo "($PARTITION_SYSTEM*2^30)/$CYLINDER_BYTE" | bc`
CYLINDER_DB=`echo "($PARTITION_DB*2^30)/$CYLINDER_BYTE" | bc`
CYLINDER_SWAP=`echo "($PARTITION_SWAP*2^30)/$CYLINDER_BYTE" | bc`
{
echo ,$CYLINDER_SYSTEM,,*
echo ,$CYLINDER_SWAP,82,
echo ,$CYLINDER_DB,,
echo ,,,-
} | sfdisk -H 255 -S 63 -C $CYLINDER $DRIVE_HD #2> /dev/null > /dev/null
if [ $? -ne 0 ] || [ ! -e ${DRIVE_HD}1 ] || [ ! -e ${DRIVE_HD}2 ] || [ ! -e ${DRIVE_HD}3 ]; then
ERROR "FAIL CREATE PARTITION"
exit 0
fi
}
MAKE_FILESYSTEM() {
mkfs.ext4 ${DRIVE_HD}1
mkswap /dev/${DRIVE_HD}2
mkfs.ext4 ${DRIVE_HD}3
mkfs.ext4 ${DRIVE_HD}4
}
MODPROBE_DISK() {
if [ "$1" == "MENU" ]; then
dialog \
--title ' AGUARDE ' \
--infobox '\nCarregando mdulos do kernel de disco...' \
0 0
return
echo teste
exit 0
fi
cd $MODULE_IDE/
for MODULE_NAME in `ls`; do
modprobe $MODULE_NAME 2> /dev/null
done
modprobe sd_mod
}
MOUNT_ISO() {
mount /dev/sdb1 /mnt/pendrive
ISO=`find /mnt/pendrive/ -name *${ARCH}*.iso -print`
if [ "$ISO" == "" ]; then
INFO "NAO ENCONTRADO O ISO DO SLACKWARE"
fi
mount -o loop $ISO /mnt/iso/
}
MODPROBE_USB() {
if [ "$1" == "MENU" ]; then
dialog \
--title ' AGUARDE ' \
--infobox '\nCarregando mdulos do kernel de disco...' \
0 0
fi
modprobe ehci_hcd
modprobe uhci_hcd
modprobe ohci_hcd
modprobe vfat
modprobe usb_storage
modprobe sd_mod
}
PREPARE_DISK() {
modprobe ext4
INFO "PREPARANDO SISTEMA"
mount ${DRIVE_HD}1 /mnt/system
}
INSTALL_SYSTEM() {
PATH_INSTALL=/mnt/iso/slackware${ARCH}/
INFO "INICIANDO INSTALACAO"
mkdir /tmp
cd $PATH_INSTALL/a/
#installpkg --root /mnt/system *.t?z 2>> /tmp/error
installpkg -root /mnt/system -tagfile /tagfile/a_pkg *.t?z 2>> /tmp/error
cd $PATH_INSTALL/l/
#installpkg --root /mnt/system *.t?z 2>> /tmp/error
installpkg -root /mnt/system -tagfile /tagfile/l_pkg *.t?z 2>> /tmp/error
cd $PATH_INSTALL/n/
#installpkg --root /mnt/system *.t?z 2>> /tmp/error
installpkg -root /mnt/system -tagfile /tagfile/n_pkg *.t?z 2>> /tmp/error
cd $PATH_INSTALL/d/
#installpkg --root /mnt/system *.t?z 2>> /tmp/error
installpkg -root /mnt/system -tagfile /tagfile/d_pkg *.t?z 2>> /tmp/error
}
CREATE_FSTAB() {
echo -e "
/dev/sda2 swap swap defaults 0 0
/dev/sda1 / ext4 defaults 1 1
/dev/sda3 /files_db ext4 defaults 1 2
/dev/sda4 /files ext4 defaults 1 2
#/dev/sda2 swap swap defaults 0 0
" > /mnt/system/etc/fstab
mkdir /mnt/system/files
mkdir -p /mnt/system/files/vm
mkdir /mnt/system/files_db
}
CREATE_LILO() {
echo -e "
timeout = 1200
lba32
compact
delay = 60
change-rules
reset
install = menu
image = /boot/vmlinuz
root = /dev/sda1
label = lnx
read-only
" > /mnt/system/etc/lilo.conf
lilo -b /dev/sda -r /mnt/system
}
FINISH_INSTALL() {
cd /
umount /mnt/system/
}
MENU_INIT() {
dialog --title ' AVISO - NCS GNU\LINUX ' --yesno '\nO procedimento de instalao automtica ir apagar todos os dados do hard disk, antes de prosseguir verifique se os backup necessrios foram feitos.\n\n Voc deseja continuar?\n\n' 0 0
if [ $? -eq 1 ]; then
echo 255 > $PIPE_CONTROL
fi
}
CONTROL_MENU() {
STEP_CURRENT=0
touch $PIPE_CONTROL
echo 0 > $PIPE_CONTROL
while [ 1 -eq 1 ]; do
STEP_IN=`cat $PIPE_CONTROL`
if [ $STEP_IN -gt $STEP_CURRENT ]; then
STEP_CURRENT=$STEP_IN
case $STEP_CURRENT in
1)
MENU_INIT
;;
2)
MODPROBE_DISK MENU
;;
255)
exit 0
;;
esac
fi
sleep 1
done
}
USAGE() {
echo -e "\033[1m[USE:\033[0m[ installer [OPCAO]
\033[1m[[OPCAO]\033[0m
cp - CREATE_PARTITION
mu - MODPROBE_USB
md - MODPROBE_DISK
mi - MOUNT_ISO
is - INSTALL_SYSTEM"
}
case $1 in
cp)
CREATE_PARTITION
;;
mu)
MODPROBE_USB
;;
md)
MODPROBE_DISK
;;
mi)
MOUNT_ISO
;;
is)
INSTALL_SYSTEM
;;
mf)
MAKE_FILESYSTEM
;;
pd)
PREPARE_DISK
;;
dv)
MODPROBE_DISK
CREATE_PARTITION
MAKE_FILESYSTEM
MODPROBE_USB
MOUNT_ISO
;;
fstab)
CREATE_FSTAB
;;
lilo)
CREATE_LILO
;;
install)
MODPROBE_DISK
CREATE_PARTITION
MAKE_FILESYSTEM
MODPROBE_USB
sleep 10
MOUNT_ISO
PREPARE_DISK
INSTALL_SYSTEM
CREATE_FSTAB
CREATE_LILO
FINISH_INSTALL
;;
test_menu)
MENU_INIT
echo 2 > $PIPE_CONTROL
sleep 10
#echo 2 > $PIPE_CONTROL
#CREATE_PARTITION
#MAKE_FILESYSTEM
#MODPROBE_USB
#sleep 10
#MOUNT_ISO
#PREPARE_DISK
#INSTALL_SYSTEM
#CREATE_FSTAB
#CREATE_LILO
#FINISH_INSTALL
;;
control_menu)
CONTROL_MENU
;;
init)
MENU_INIT
;;
*)
USAGE
;;
esac
| true |
954be8d2f978c9df3122a04c40bfce6e7875146a | Shell | cash2one/lycy_autoUpdate | /shell/国外/foreign_backend_rsync.sh | UTF-8 | 643 | 3.015625 | 3 | [] | no_license | #!/bin/bash
#path:/home/op/sh/foreign_backend_rsync.sh
#if use rsync to upload,you should set ff=unix
#2016.3.9
server_list=(78.46.112.249)
rsync(){
update_time=`date +%y%m%d%H%M%S`
local_path=/opt/fps/pack_tools/backend_pack/upload
remote_path=/data/backup-update
remote_user=op
remote_pass=ychd@0613
for i in ${server_list[*]};do
ssh -T $i <<EOF
[ ! -d $remote_path/$update_time ] && echo $remote_pass|sudo -S mkdir -p $remote_path/$update_time
sudo chown -R $remote_user.$remote_user $remote_path
EOF
rsync -avzp $local_path/* $i:$remote_path/$update_time
done
}
rsync | true |
a6933f2a52bd42451ce82283d8261180efa29ff9 | Shell | Alpine-DAV/ascent | /src/examples/paraview-vis/tests/build_and_run_sim_with_docker.sh | UTF-8 | 1,146 | 3.3125 | 3 | [
"BSD-3-Clause",
"Zlib"
] | permissive | #!/bin/bash
###############################################################################
# Copyright (c) Lawrence Livermore National Security, LLC and other Ascent
# Project developers. See top-level LICENSE AND COPYRIGHT files for dates and
# other details. No copyright assignment is required to contribute to Ascent.
###############################################################################
# Start the ubuntu-paraview-ascent docker and run
# build_and_run_sim_inside_docker.sh
# ascent dir inside docker. See README-docker.md for the command that builds the
# container
date
ascentDir=/root/projects/ascent
# keep_going: optional count that says how many time we keep going when we should stop
keep_going=$1
if [[ -z $keep_going ]]; then
keep_going=0
fi
build_option=$2
if [[ -z $build_option ]]; then
build_option="-j40"
fi
build_dependency=$3
if [[ -z $build_dependency ]]; then
build_dependency=""
fi
docker container start ubuntu-paraview-ascent
docker exec ubuntu-paraview-ascent ${ascentDir}/src/examples/paraview-vis/tests/build_and_run_sim_inside_docker.sh $keep_going $build_option $build_dependency
date
| true |
22f1057cb39942e9765210c896ec2de088680ad4 | Shell | gitbarnabedikartola/calamares-settings-biglinux | /calamares-settings-biglinux/usr/sbin/calamares-oem-uid | UTF-8 | 617 | 2.921875 | 3 | [] | no_license | #!/bin/sh
# SPDX-FileCopyrightText: 2018 Harald Sitter <sitter@kde.org>
# SPDX-License-Identifier: GPL-3.0-only OR LicenseRef-KDE-Accepted-GPL
# Hack...
# Since calamares doesn't allow forcing a uid we'll implcitly move the
# auto-ranges so the oem user is created at uid 900.
# This hack is undone in a module once the user was created.
# This hack allows us to not break with the expecation that the default/first
# user will be UID 1000.
set -ex
ROOT=$1
cp $ROOT/etc/login.defs $ROOT/etc/login.defs.oem
sed -i 's%UID_MIN.*1000%UID_MIN 900%g' $ROOT/etc/login.defs
sed -i 's%GID_MIN.*1000%GID_MIN 900%g' $ROOT/etc/login.defs
| true |
ffe8cd582161dbfea44cefa28bb0ece8256bfa8e | Shell | JulienBalestra/enjoliver | /matchbox/assets/coreos/alter-distribution.sh | UTF-8 | 6,026 | 3.09375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
set -ex
test $(id -u -r) -eq 0
test ${VERSION}
test ${COMMIT_ID}
cd $(dirname $0)
COREOS_DIRECTORY=$(pwd -P)
ASSETS_DIRECTORY=$(dirname ${COREOS_DIRECTORY})
export VERSION_DIR=${COREOS_DIRECTORY}/${VERSION}
cd ${VERSION_DIR}
export USR_A=${VERSION_DIR}/usr-a
export ROOTFS=${VERSION_DIR}/rootfs
export BOOT=${VERSION_DIR}/boot
export VERSION
mkdir -pv {squashfs,initrd} ${USR_A} ${BOOT} ${ROOTFS}
bzip2 -fdk coreos_production_image.bin.bz2
${COREOS_DIRECTORY}/disk.py rw
LOOP=$(losetup --find --show coreos_production_image.bin)
partprobe ${LOOP}
set +e
umount ${LOOP}p9 ${ROOTFS}
umount ${LOOP}p3 ${USR_A}
umount ${LOOP}p1 ${BOOT}
set -e
mount ${LOOP}p9 ${ROOTFS}
mount ${LOOP}p3 ${USR_A}
mount ${LOOP}p1 ${BOOT}
gunzip -c --force coreos_production_pxe_image.cpio.gz > coreos_production_pxe_image.cpio
cd initrd
cpio -id < ../coreos_production_pxe_image.cpio
cd ../squashfs
unsquashfs -no-progress ../initrd/usr.squashfs
_remove_in_fs(){
for fs in squashfs-root/ ${USR_A}
do
rm -fv ${fs}/${1}
done
}
_upx_in_fs() {
for fs in squashfs-root/ ${USR_A}
do
upx -q ${fs}/${1}
upx -t ${fs}/${1}
done
}
# CWD == ~/matchbox/assets/coreos/${VERSION}/squashfs
EXCLUDES="--exclude rootfs/dgr --exclude rootfs/etc --exclude rootfs/tmp --exclude rootfs/run --exclude rootfs/sys"
for useless in /bin/docker /bin/containerd /bin/containerd-shim /bin/dockerd /bin/runc \
/bin/docker-containerd-shim /bin/docker-containerd /bin/docker-runc /bin/ctr /bin/docker-proxy /bin/mayday \
/bin/actool /bin/tpmd
do
_remove_in_fs ${useless}
done
HAPROXY_ACI=$(ls ${ACI_PATH}/haproxy/haproxy-*-linux-amd64.aci | head -n 1)
tar -C squashfs-root/ -xvf ${HAPROXY_ACI} rootfs/usr/sbin --strip 2 ${EXCLUDES}
tar -C ${USR_A}/ -xvf ${HAPROXY_ACI} rootfs/usr/sbin --strip 2 ${EXCLUDES}
_remove_in_fs /bin/etcd2
_remove_in_fs /bin/etcdctl
ETCD_ACI=$(ls ${ACI_PATH}/etcd/etcd-*-linux-amd64.aci | head -n 1)
tar -C squashfs-root/ -xvf ${ETCD_ACI} rootfs/usr/bin --strip 2 ${EXCLUDES}
tar -C ${USR_A}/ -xvf ${ETCD_ACI} rootfs/usr/bin --strip 2 ${EXCLUDES}
VAULT_ACI=$(ls ${ACI_PATH}/vault/vault-*-linux-amd64.aci | head -n 1)
tar -C squashfs-root/ -xvf ${VAULT_ACI} rootfs/usr/ --strip 2 ${EXCLUDES}
tar -C ${USR_A}/ -xvf ${VAULT_ACI} rootfs/usr/ --strip 2 ${EXCLUDES}
_remove_in_fs /bin/ip
IPROUTE2_ACI=$(ls ${ACI_PATH}/iproute2/iproute2-*-linux-amd64.aci | head -n 1)
tar -C squashfs-root/ -xvf ${IPROUTE2_ACI} rootfs/usr/bin --strip 2 ${EXCLUDES}
tar -C ${USR_A}/ -xvf ${IPROUTE2_ACI} rootfs/usr/bin --strip 2 ${EXCLUDES}
_remove_in_fs /bin/fleetd
_remove_in_fs /bin/fleetctl
FLEET_ACI=$(ls ${ACI_PATH}/fleet/fleet-*-linux-amd64.aci | head -n 1)
tar -C squashfs-root/ -xvf ${FLEET_ACI} rootfs/usr/bin --strip 2 ${EXCLUDES}
tar -C ${USR_A}/ -xvf ${FLEET_ACI} rootfs/usr/bin --strip 2 ${EXCLUDES}
_remove_in_fs /bin/rkt /lib64/rkt/stage1-images/stage1-fly.aci /lib64/rkt/stage1-images/stage1-coreos.aci
RKT_ACI=$(ls ${ACI_PATH}/rkt/rkt-*-linux-amd64.aci | head -n 1)
tar -C squashfs-root/ -xvf ${RKT_ACI} rootfs/usr --keep-directory-symlink --strip 2 ${EXCLUDES}
tar -C ${USR_A}/ -xvf ${RKT_ACI} rootfs/usr --keep-directory-symlink --strip 2 ${EXCLUDES}
mkdir -pv squashfs-root/local/cni
mkdir -pv ${USR_A}/local/cni
CNI_ACI=$(ls ${ACI_PATH}/cni/cni-*-linux-amd64.aci | head -n 1)
tar -C squashfs-root/local/cni -xvf ${CNI_ACI} rootfs/usr --strip 2 ${EXCLUDES}
tar -C ${USR_A}/local/cni -xvf ${CNI_ACI} rootfs/usr --strip 2 ${EXCLUDES}
for p in squashfs-root/bin ${USR_A}/bin
do
cd ${p}
ln -svf ../local/cni/bin/cnitool
cd -
done
HYPERKUBE_ACI=$(ls ${ACI_PATH}/hyperkube/hyperkube-*-linux-amd64.aci | head -n 1)
tar -C squashfs-root/bin -xvf ${HYPERKUBE_ACI} rootfs/ --strip 1 ${EXCLUDES}
tar -C ${USR_A}/bin -xvf ${HYPERKUBE_ACI} rootfs/ --strip 1 ${EXCLUDES}
cp -v ${ASSETS_DIRECTORY}/enjoliver-agent/serve/enjoliver-agent squashfs-root/bin/
cp -v ${ASSETS_DIRECTORY}/enjoliver-agent/serve/enjoliver-agent ${USR_A}/bin
_upx_in_fs /bin/enjoliver-agent
cp -v ${ASSETS_DIRECTORY}/discoveryC/serve/discoveryC squashfs-root/bin
cp -v ${ASSETS_DIRECTORY}/discoveryC/serve/discoveryC ${USR_A}/bin
_upx_in_fs /bin/discoveryC
for b in /bin/locksmithctl /bin/coreos-cloudinit
do
_upx_in_fs ${b}
done
mkdir -pv ${USR_A}/local/etc/ squashfs-root/local/etc/
echo -n "{\"release\": \"${VERSION}\", \"alter_timestamp\": \"$(date +%s)\", \"commit\": \"${COMMIT_ID}\"}" | \
tee ${USR_A}/local/etc/alter-version squashfs-root/local/etc/alter-version ${VERSION_DIR}/alter-version
# Cloud requirements
mkdir -pv ${ROOTFS}/etc/systemd/system/multi-user.target.wants ${ROOTFS}/etc/systemd/system/multi-user.target.requires
cp -v ${COREOS_DIRECTORY}/oem-cloudinit.service ${ROOTFS}/etc/systemd/system/oem-cloudinit.service
cd ${ROOTFS}/etc/systemd/system/multi-user.target.wants
ln -svf /etc/systemd/system/oem-cloudinit.service oem-cloudinit.service
cd -
cp -v ${COREOS_DIRECTORY}/coreos-metadata-sshkeys@.service ${ROOTFS}/etc/systemd/system/coreos-metadata-sshkeys@.service
cd ${ROOTFS}/etc/systemd/system/multi-user.target.requires
ln -svf /etc/systemd/system/coreos-metadata-sshkeys@.service coreos-metadata-sshkeys@core.service
cd -
sync
umount ${ROOTFS}
umount ${USR_A}
${COREOS_DIRECTORY}/disk_util --disk_layout=base verity --root_hash=${VERSION_DIR}/coreos_production_image_verity.txt ${VERSION_DIR}/coreos_production_image.bin
printf %s "$(cat ${VERSION_DIR}/coreos_production_image_verity.txt)" | \
dd of=${BOOT}/coreos/vmlinuz-a conv=notrunc seek=64 count=64 bs=1 status=none
sync
umount ${BOOT}
losetup -d ${LOOP}
${COREOS_DIRECTORY}/disk.py ro
bzip2 -fzk ${VERSION_DIR}/coreos_production_image.bin -9
cp -v ${COREOS_DIRECTORY}/coreos-install squashfs-root/bin/coreos-install
mksquashfs squashfs-root/ ../initrd/usr.squashfs -noappend -always-use-fragments
cd ../initrd
find . | cpio -o -H newc | gzip -9 > ../coreos_production_pxe_image.cpio.gz
cd ../
rm -rf squashfs initrd coreos_production_pxe_image.cpio ${USR_A}
| true |
5cc6d3474a021da4cf6caa15682859512f8624aa | Shell | ltw/dotfiles | /shells/common/prompt.sh | UTF-8 | 290 | 3.5 | 4 | [
"MIT"
] | permissive | function user_at_host() {
local str
if [[ "$USER" != "lucas" ]]; then
str="$USER"
if [[ "$USER" == "root" ]]; then
str="$pr_red$str$pr_reset"
fi
str="${str}@"
fi
if [[ -n "$SSH_TTY" ]]; then
str="$str$pr_blue`hostname -s`$pr_reset"
fi
echo $str
}
| true |
cf7daaa91e472bd5251dcbc502e90888de8ae919 | Shell | ashiqopu/VNL | /scripts/addtopo.sh | UTF-8 | 1,219 | 3.3125 | 3 | [] | no_license | #!/bin/bash
R="$( cd "$( dirname "${BASH_SOURCE[0]}" )/.." && pwd )"
tt=$1
topoid=$2
cd deployments/$tt || exit 1
# make service script
php5 $R/scripts/gencmd.php $R/topo/$tt.xml $topoid > $topoid.sh
chmod +x $topoid.sh
# make key pair
if [ ! -f $topoid.pvtkey -o ! -f $topoid.pubkey ]
then
if [ -f $topoid.pvtkey ]; then rm $topoid.pvtkey; fi
if [ -f $topoid.pubkey ]; then rm $topoid.pubkey; fi
ssh-keygen -f $topoid.pvtkey -N '' -C topo$topoid
mv $topoid.pvtkey.pub $topoid.pubkey
fi
# make topoimage
php5 $R/scripts/topoimage.php $R/topo/$tt $topoid > $topoid.png
# make user package
if [ -d vnltopo$topoid ]; then rm -rf vnltopo$topoid; fi
mkdir vnltopo$topoid
cd vnltopo$topoid
php5 $R/scripts/ttlist.php sshconfig $R/topo/$tt.xml $topoid >vnltopo$topoid.sshconfig
cp ../$topoid.pvtkey ./vnltopo$topoid.pvtkey
php5 $R/scripts/ttlist.php connscript $R/topo/$tt.xml $topoid >vnltopo$topoid.sh
php5 $R/scripts/ttlist.php ip $R/topo/$tt.xml $topoid >vnltopo$topoid.iplist
for rtablefilename in `php5 $R/scripts/ttlist.php rtablelist $R/topo/$tt.xml`
do
php5 $R/scripts/ttlist.php rtable $R/topo/$tt.xml $topoid $rtablefilename >$rtablefilename
done
tar cf ../$topoid.tar *
cd ..
rm -rf vnltopo$topoid
| true |
9e54654348be9ce4feedaee735c4fb728b34e18a | Shell | groundnuty/lw-daap | /deploy/barehost/lwdaap.sh | UTF-8 | 5,023 | 2.96875 | 3 | [] | no_license | #!/usr/bin/env bash
#
# Version 0.1b
# Next TODO
# -Config SSL Apache
# -Remove HARDCODE celeryd config args
# -Set custom conf args for redis-server
#
die () {
echo >&2 "$@"
exit 1
}
[ "$#" -eq 1 ] || die "Usage: $(basename $0) http://<site url>:<site port>"
#
# INSTALL PREREQUISITES
#
sudo apt-get update
sudo DEBIAN_FRONTEND=noninteractive apt-get -y upgrade
sudo apt-get -y install build-essential git redis-server \
libmysqlclient-dev libxml2-dev libxslt-dev \
libjpeg-dev libfreetype6-dev libtiff-dev \
libffi-dev libssl-dev \
software-properties-common python-dev \
python-pip apache2 libapache2-mod-wsgi libapache2-mod-xsendfile \
libapache2-mod-shib2 \
supervisor
sudo DEBIAN_FRONTEND=noninteractive apt-get -y install mysql-server
curl -sL https://deb.nodesource.com/setup | sudo bash -
sudo apt-get install -y nodejs
sudo su -c "npm install -g bower"
sudo npm install -g less@1.7.5 clean-css requirejs uglify-js bower
sudo pip install -U virtualenvwrapper pip
#
# PRECONFIGURE
#
CFG_LWDAAP_VIRTUALENV=lwdaap
CFG_LWDAAP_REPOSITORY=https://github.com/aeonium/lw-daap.git
CFG_LWDAAP_WORKDIR=$HOME/lwdaap
CFG_LWDAAP_DATABASE_NAME=lwdaap
CFG_LWDAAP_DATABASE_USER=lwdaap
CFG_LWDAAP_DATABASE_HOST=localhost
CFG_LWDAAP_DATABASE_PORT=3306
CFG_LWDAAP_SITE_URL=$1
CFG_LWDAAP_SITE_SECURE_URL=$(echo $1 | sed 's/http/https/')
CFG_LWDAAP_USER=${USER:=$(whoami)}
#
# INSTALL
#
source $(which virtualenvwrapper.sh)
mkvirtualenv $CFG_LWDAAP_VIRTUALENV
git clone $CFG_LWDAAP_REPOSITORY $CFG_LWDAAP_WORKDIR
pushd $CFG_LWDAAP_WORKDIR
pip install -r requirements.txt --exists-action i
pip install -e .
#
# INVENIO CONFIG
# Invenio configuration values:
# http://invenio-software.org/code-browser/invenio.base.config-module.html
#
inveniomanage config create secret-key
CFG_FILE=$(inveniomanage config locate)
cat << EOF >> $CFG_FILE
CFG_EMAIL_BACKEND = u'flask.ext.email.backends.console.Mail'
COLLECT_STORAGE = u'invenio.ext.collect.storage.link'
CFG_BIBSCHED_PROCESS_USER = u'$CFG_LWDAAP_USER'
CFG_DATABASE_HOST = u'$CFG_LWDAAP_DATABASE_HOST'
CFG_DATABASE_PORT = u'$CFG_LWDAAP_DATABASE_PORT'
CFG_DATABASE_NAME = u'$CFG_LWDAAP_DATABASE_NAME'
CFG_DATABASE_USER = u'$CFG_LWDAAP_DATABASE_USER'
CFG_SITE_URL = u'$CFG_LWDAAP_SITE_URL'
CFG_SITE_SECURE_URL = u'$CFG_LWDAAP_SITE_SECURE_URL'
DEBUG = True
DEBUG_TB_ENABLED = False
ASSETS_DEBUG = True
ASSETS_AUTO_BUILD = True
EOF
#
# INSTALL AND COLLECT ASSETS
#
inveniomanage bower -i bower-base.json > bower.json
echo '{"directory": "lw_daap/base/static/vendors"}' > .bowerrc
CI=true bower install
inveniomanage collect
#
# CREATE DATABASE
#
inveniomanage database init --user=root --yes-i-know
inveniomanage database create
#
# CONFIGURE APACHE
#
inveniomanage apache create-config > /dev/null
sudo a2enmod rewrite
sudo a2enmod xsendfile
sudo a2enmod ssl
sudo a2enmod shib2
sudo cp ${VIRTUAL_ENV}/var/invenio.base-instance/apache/invenio-apache-vhost.conf /etc/apache2/sites-available/lwdaap.conf
sudo sed -i '/Listen 80/s/^#//g' /etc/apache2/sites-available/lwdaap.conf
sudo a2dissite *default*
sudo a2ensite lwdaap
sudo cp ${VIRTUAL_ENV}/var/invenio.base-instance/apache/invenio-apache-vhost-ssl.conf /etc/apache2/sites-available/lwdaap-ssl.conf
sudo sed -i '/Listen 443/s/^#//g' /etc/apache2/sites-available/lwdaap-ssl.conf
sudo sed -i 's#^SSLCertificateFile.*#SSLCertificateFile /etc/ssl/certs/ssl-cert-snakeoil.pem#' /etc/apache2/sites-available/lwdaap-ssl.conf
sudo sed -i 's#^\#SSLCertificateKeyFile.*#SSLCertificateKeyFile /etc/ssl/private/ssl-cert-snakeoil.key#' /etc/apache2/sites-available/lwdaap-ssl.conf
# ShibRequireAll not supported on Apache 2.4+: This command is subsumed by Apache 2.4's own support for controlling authorization rule composition.
sudo sed -i 's/^\(.*ShibRequireAll.*\)/#\1/' /etc/apache2/sites-available/lwdaap-ssl.conf
sudo a2ensite lwdaap-ssl
sudo truncate -s 0 /etc/apache2/ports.conf
#
# CELERY
#
cat $CFG_LWDAAP_WORKDIR/deploy/barehost/supervisor/celerybeat.conf \
| sed "s#%VIRTUAL_ENV%#$VIRTUAL_ENV#g" \
| sed "s#%CFG_LWDAAP_USER%#$CFG_LWDAAP_USER#g" \
| sudo tee /etc/supervisor/conf.d/celerybeat.conf
cat $CFG_LWDAAP_WORKDIR/deploy/barehost/supervisor/celeryd.conf \
| sed "s#%VIRTUAL_ENV%#$VIRTUAL_ENV#g" \
| sed "s#%CFG_LWDAAP_USER%#$CFG_LWDAAP_USER#g" \
| sudo tee /etc/supervisor/conf.d/celeryd.conf
sudo service supervisor restart
sudo mkdir -p /var/log/celery
sudo mkdir -p /var/run/celery
sudo chown $USER /var/log/celery
sudo chown $USER /var/run/celery
#
# REDIS
#
sudo service redis-server restart
#
# BIBSCHED
#
cat << EOF > bibsched-run.sh
#!/usr/bin/env bash
bibsched purge
bibsched start
bibindex -s5m -u admin
bibrank -s5m -u admin
bibreformat -s5m -o HB,HD -u admin
bibsort -s5m -u admin
webcoll -s5m -u admin
EOF
chmod +x bibsched-run.sh
./bibsched-run.sh
#
# SOME SOFTLINKS
#
ln -s ${VIRTUAL_ENV}/var/log ./log
ln -s $(inveniomanage config locate 2> /dev/null) ./invenio.cfg
popd
sudo service apache2 restart
echo "INSTALL DONE"
| true |
f5bd9a194fa696ef063d9c19a4897ee2d462578b | Shell | ADVANTECH-Corp/RISC_tools_scripts | /imx8/cp_uboot.sh | UTF-8 | 716 | 2.921875 | 3 | [] | no_license | #!/bin/sh
UBOOT_PATH=$1
SOC=$2
case $SOC in
"imx8mm")
echo "Copy files to [iMX8MM]"
cp $UBOOT_PATH/u-boot-nodtb.bin iMX8M/u-boot-nodtb.bin
cp $UBOOT_PATH/u-boot.dtb iMX8M/fsl-imx8mm-evk.dtb
cp $UBOOT_PATH/spl/u-boot-spl.bin iMX8M/u-boot-spl.bin
;;
"imx8m")
echo "Copy files to [iMX8M]"
cp $UBOOT_PATH/u-boot-nodtb.bin iMX8M/u-boot-nodtb.bin
cp $UBOOT_PATH/u-boot.dtb iMX8M/fsl-imx8m-evk.dtb
cp $UBOOT_PATH/spl/u-boot-spl.bin iMX8M/u-boot-spl.bin
;;
"imx8qxp")
echo "Copy files to [iMX8QX]"
cp $UBOOT_PATH/u-boot.bin iMX8QX/
;;
"imx8qm")
echo "Copy files to [iMX8QM]"
cp $UBOOT_PATH/u-boot.bin iMX8QM/
;;
*)
echo "parameter2 must be imx8mm, imx8m, imx8qxp, imx8qm"
;;
esac
sync
| true |
4f35d959fd5c5600491b3ba92b7be1e4761d02cf | Shell | imjacobbh/adminSis | /funcionesavanzadas/grupo.sh | UTF-8 | 1,721 | 3.421875 | 3 | [] | no_license | #!/bin/bash
opc=0
while [ "$opc" -ne "3" ]; do
clear
opc=$(dialog --title "ALTA DE GRUPO" --menu --stdout "QUE QUIERE HACER?" 0 0 0 \
1 "CREAR GRUPO"\
2 "AÑADIR USUARIO A GRUPO" \
3 "Regresar")
if [ $? != 0 ]; then
opc=3
fi
case "$opc" in
1)
grupo=$(dialog --stdout --title "GRUPO" --inputbox "nombre del grupo que quieres crear " 0 0)
if [ ${#grupo} = 0 ]; then #validacion
dialog --title "Información" --msgbox "No se realizó la operacion con exito, grupo vacio" 0 0
else
groupadd $grupo #groupadd crea un grupo nuevo
dialog --title "Información" --msgbox "Se realizó la operacion con exito " 0 0
fi
;;
2)
grupo=$(dialog --stdout --title "GRUPO" --inputbox "nombre del grupo al que vas a añadir al usuario " 0 0)
login=$(dialog --stdout --title "LOGIN" --inputbox "Dame el nombre de usuario " 0 0)
if [ ${#login} = 0 ] || [ ${#grupo} = 0 ] #validacion
then
dialog --title "Información" --msgbox "No se realizó la operacion con exito, uno o más campos se dejaron vacios" 0 0
else
egrep "^$login" /etc/passwd >/dev/null #egrep busca el login en passwd
if [ $? -eq 0 ]
then
egrep "^$grupo" /etc/group >/dev/null #egrep busca el login en group
if [ $? -eq 0 ]
then
usermod -a -G $grupo $login #usermod modifica el usuario con los parametros dados añade el usuario al grupo
dialog --title "Información" --msgbox " Se realizó la operacion con exito" 0 0
else
dialog --title "Información" --msgbox "No se realizó la operacion con exito" 0 0
fi
fi
fi
;;
3)
;;
esac
done
#groupadd $grupo
#usermod -a -G $grupo $login
| true |
469e6c917762ce64dff15de04e25e90ed7ec08bd | Shell | RoryIAngus/CIC-Visualisation | /2_Environment/startEnv-Copy_from_root.sh | UTF-8 | 1,926 | 3.15625 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
#created by Rory Angus - 12Nov18
# Jupyter Notebook commands
# create a new screen to hold the commands
screen -d -m -S Jupyter
# execute the commands in the "" Add a new line character to sumilate pressing enter
# this creates a notebook that can run Python at the address http://127.0.0.1:8888/tree
screen -S Jupyter -p 0 -X stuff "jupyter notebook\n"
# MongoDB Screen commands
# create a new screen to hold the commands
screen -d -m -S MongoDB
# execute the commands in the "" Add a new line character to sumilate pressing enter
# This creates a mongoDB that can bee accessed at 127.0.0.1:27017
screen -S MongoDB -p 0 -X stuff "conda activate mongodb\n
mongod\n"
# screen manual https://www.gnu.org/software/screen/manual/screen.html
# use this website to change the screenrs file so that it is not resized when the window is connected to https://superuser.com/questions/374752/reattaching-screen-having-irssi-running-forces-window-resize
# this is the command to edit the file with write permissions
# sudo vim /etc/screenrc
# This lists the screens that have been created. As this is run mannualy it lets the user see that it was successful
screen -ls
## the following is a few instrcutions to get you started working with the screens ##
# (note: remove the '' from the commands.
#
# help is found by 'ctrl+a' '?'
# typing 'screen -ls' porduces an output like this:
# There are screens on:
# 11010.MSREnv (Detached)
# 21782.MongoDB (Detached)
# 21775.Jupyter (Detached)
# 3 Sockets in /var/run/screen/S-ec2-user.
#
# To attached to a screen use the following command and use the correct environment variable. For e.g.
# screen -dR 11010.MSREnv
# screen -dR 21782.MongoDB
# screen -dR 21775.Jupyter
# Once on the screen to return back you need to hit 'ctrl+a' followed by typing 'd'
# 'ctrl+a' 'd'
#
# To close the screen and terminate it. Type at the prompt
# 'exit'
| true |
71023b897a59d720e3432e2a8257036b33d14d19 | Shell | spencedalley/dotfiles-1 | /tmux/segments/arc_changes_planned.sh | UTF-8 | 228 | 3.28125 | 3 | [] | no_license | # Displays the number of maniphest diffs that have planned changes
run_segment() {
cd ~/git/hackerone
tasks=`arc list | grep 'Changes Planned' | wc -l | tr -d ' '`
if [[ $tasks > 0 ]]; then
echo " ${tasks}"
fi
}
| true |
a48fe74bf47d536651a982491b52c16a0f11f9aa | Shell | dgabrielson/bash-functions | /bash_functions.d/idrac_tunnel.sh | UTF-8 | 1,697 | 3.5 | 4 | [] | no_license |
function ssh-idrac-tunnel()
{
local gateway=$1
local idrac=$2
if [[ -z ${gateway} ]]; then
echo "You must supply the ssh-able host to tunnel through."
return 1
fi
if [[ -z ${idrac} ]]; then
echo "You must supply the hostname of the iDRAC you want to setup a tunnel for."
return 1
fi
echo "Local machine password to forward privaledged ports:"
sudo true
echo "https://localhost/login.html"
bash -c '( sleep 2s && open https://localhost/login.html )' &
echo "Use CTRL+C when done"
sudo ssh -N -L 443:${idrac}:443 -L 5900:${idrac}:5900 -L 5901:${idrac}:5901 ${gateway}
}
function gauss-idrac-tunnel()
{
ssh-idrac-tunnel mathadmin@gauss.math.umanitoba.ca "$@"
}
function math-idrac-tunnel()
{
ssh-idrac-tunnel gabriels@kvm0.math.umanitoba.ca "$@"
}
function idrac-brunswick()
{
math-idrac-tunnel brunswick-rac
}
function idrac-havana()
{
gauss-idrac-tunnel havana-rac
}
function idrac-panama()
{
gauss-idrac-tunnel panama-rac
}
function idrac-gauss()
{
local n=$1
if [[ -z $n ]]; then
echo "You must specify which node, e.g.:"
echo "idrac-gauss node101"
return 1
fi
gauss-idrac-tunnel ${n}-rac
}
function idrac-stats-kvm1()
{
ssh-idrac-tunnel statsadmin@10.45.3.152 kvm1-rac
}
function idrac-math-kvm0()
{
open https://kvm0.math.umanitoba.ca/login.html
}
function idrac-sec()
{
open https://sec-rac.stats.umanitoba.ca/login.html
}
function idrac-caan()
{
open https://caan-rac.stats.umanitoba.ca/login.html
}
function idrac-thay()
{
open https://thay-rac.stats.umanitoba.ca/login.html
}
function idrac-jast()
{
open https://jast-rac.stats.umanitoba.ca/login.html
}
| true |
c2cffc557a632fe062d4862fc71cd7240f5fd50e | Shell | eban/jarp | /files/bareruby.sh | UTF-8 | 4,559 | 3.453125 | 3 | [] | no_license | #!/bin/sh
trap 'rm -f btest.[cho] btest btest.exe' 0 1 2 15
srcdir=`expr $0 : '\(.*\)/.*' '|' .`
# check option
for option
do
case "$option" in
--*=*)
eval "`echo \"$option\" | sed 's/--\(.*\)=\(.*\)/\1=\"\2\"/'`";;
--*)
eval "`expr \"$option\" : '--\(.*\)'`=yes";;
esac
done
# check true srcdir
if test -f ${srcdir}/inits.c; then
:
else
echo "run `basename $0` with --srcdir"
exit 1
fi
# test only
case "$test" in
on|yes|1)
sed "s/miniruby/bareruby/;s:Dir\[\":Dir[\"${srcdir}/:" ${srcdir}/sample/test.rb >test.rb
./bareruby test.rb >test.log
exit $?;;
esac
# execute yacc
if test -n "${yacc}"; then
${yacc} ${srcdir}/parse.y
mv y.tab.c parse.c
fi
# check gcc
if test -z "${CC}"; then
echo 'main(){}' >btest.c
if gcc -c btest.c -o btest.o; then
CC=gcc
else
CC=cc
fi
fi
: ${CPP="${CC} -E"}
: ${LIBS=-lm}
cat >btest.h <<'+'
#define RUBY_PLATFORM "unknown-unknown"
#define DLEXT ".so"
#define GETGROUPS_T int
#define TOKEN_PASTE(x,y) x##y
#define RETSIGTYPE void
#define HAVE_GETCWD 1
#define HAVE_MKDIR 1
#define RUBY_SITE_LIB2 ""
#define RUBY_SITE_ARCHLIB ""
#define RUBY_SITE_LIB ""
#define RUBY_LIB ""
#define RUBY_ARCHLIB ""
#define __CHECKER__ 1
+
cat >btest.c <<'+'
int
main()
{
if (-1==(-1>>1))
return 0;
return 1;
}
+
${CC} ${CFLAGS} -o btest btest.c
if test x"$RSHIFT_SIGN" = xyes || ./btest; then
echo '#define RSHIFT(x,y) ((x)>>(int)y)' >>btest.h
else
echo '#define RSHIFT(x,y) (((x)<0) ? ~((~(x))>>y) : (x)>>y)' >>btest.h
fi
cat >btest.c <<'+'
#ifndef __MINGW32__
#error not support MinGW
#endif
+
if ${CPP} ${CPPFLAGS} btest.c >/dev/null 2>&1; then
mingw=1
cat >>btest.h <<'+'
#define NT 1
#define HAVE_WAITPID 1
#define HAVE_STDARG_PROTOTYPES
+
fi
headers='
direct
dirent
fcntl
ndir
stdlib
string
unistd
sys/dir
sys/ndir
sys/param
sys/time
'
for i in ${headers}
do
echo "#include <${i}.h>" >btest.c
if ${CPP} ${CPPFLAGS} btest.c >/dev/null 2>&1; then
echo Found "<${i}.h>"
echo "#define HAVE_`echo ${i} |tr 'a-z/' 'A-Z_'`_H 1" >>btest.h
fi
done
types='
int
short
long/long
long
__int64
void*
float
double
'
for i in ${types}
do
i=`echo ${i} | tr / ' '`
name=SIZEOF_`echo "${i}" |tr 'a-z* ' 'A-ZP_'`
eval "size=`echo '$''{'${name}-unset'}'`"
if test ${size} != unset; then
echo "#define ${name} ${size}" >>btest.h
echo "sizeof(${i}): (cached) ${size}"
else
echo "main(){printf(\"%d\\n\", sizeof(${i}));}" >btest.c
size=0
if ${CC} ${CFLAGS} -o btest btest.c >/dev/null 2>&1;then
size=`./btest 2>/dev/null`
fi
echo "#define ${name} ${size}" >>btest.h
echo "sizeof(${i}): ${size}"
fi
done
echo '#define HAVE_STDARG_PROTOTYPES 1' >>btest.h
echo '#define WNOHANG 1' >>btest.h
echo '#ifdef __OpenBSD__' >>btest.h
echo '#define HAVE_OFF_T 1' >>btest.h
echo '#endif' >>btest.h
if cmp config.h btest.h >/dev/null 2>&1; then
:
else
echo Replaced config.h
mv btest.h config.h
fi
cp ${srcdir}/inits.c btest.c
cat >>btest.c <<'+'
int ReadDataPending(){ return 0; }
int sigmask(){ return 0; }
int sigsetmask(){ return 0; }
int sigblock(){ return 0; }
/*int strftime(){ return 0; }*/
+
if cmp bareinits.c btest.c >/dev/null 2>&1; then
:
else
echo Replaced bareinits.c
mv btest.c bareinits.c
fi
if test -f bgc.c; then
:
else
sed 's/^# define STACK_LEVEL_MAX 655300/# define STACK_LEVEL_MAX 0x10000/' ${srcdir}/gc.c >bgc.c
fi
sources='
array
bignum
class
compar
dir
dln
dmyext
enum
error
eval
file
bgc
hash
bareinits
io
main
marshal
math
numeric
object
pack
parse
prec
process
random
range
re
regex
ruby
signal
sprintf
st
string
struct
time
util
variable
version
'
missing=`cd ${srcdir}/missing; ls *.c |egrep -v 'mkdir|os2|strftime|x68.c' |sed 's/\.c$//'`
if test x${mingw} = x1; then
sources="${sources} win32"
CPPFLAGS="${CPPFLAGS} -I${srcdir}/win32"
LIBS=-lwsock32
fi
objects=`echo ${sources} ${missing} |sed 's/\([^ ][^ ]*\)/_\1.o/g'`
for i in ${sources} ${missing}
do
if test -f ${i}.c; then
src=${i}
elif test -f ${srcdir}/${i}.c; then
src=${srcdir}/${i}
elif test -f ${srcdir}/missing/${i}.c; then
src=${srcdir}/missing/${i}
elif test -f ${srcdir}/win32/${i}.c; then
src=${srcdir}/win32/${i}
else
echo "${i}.c not found"
exit 1
fi
if ls -t ${src}.c _${i}.o config.h 2>&1 |head -1 |grep -qv '\.o$'; then
cmd="${CC} -I. -I${srcdir} ${CPPFLAGS} ${CFLAGS} -c -o _${i}.o ${src}.c"
echo ${cmd}
${cmd} || exit $?
fi
src=''
done
if test $? = 0; then
cmd="${CC} ${CFLAGS} -o bareruby ${objects} ${LIBS}"
echo ${cmd}
$cmd
fi
| true |
1fa47a80f0cfaabcc7b19449b3d7ea52930ace77 | Shell | enm1986/Shellscripts | /Lista/DAW_lista_Ex14.sh | UTF-8 | 810 | 3.453125 | 3 | [] | no_license | #!/bin/bash
# Exercici 14
# S'ha de passar un únic paràmetre
if `test $# -ne 1`
then
echo "Nombre de parametres incorrecte";
echo "Ex. ús: $0 <ruta directori>";
exit 1;
fi
# Primer comprovem si existeix el directori $1
if `test -d ${1}`
then
cd ${1}; # Si existeix entrem dins ell
else
mkdir ${1} 2> /dev/null; # Si NO existeix el creem i redireccionem la sortida d'error
if `test -d ${1}` # Comprovem si s'ha creat el directori
then
cd ${1}; # Si s'ha creat entrem dins ell
else
echo "No s'ha pogut crear el directori"; # Si NO, mostram un missatge
fi
fi
# Finalment mostrem on ens trobem al final de l'script
echo -n "Ens trobem a: "
'pwd';
# Encara que entrem dins el directori durant l'execució de l'script,
# no cambiarem de directori dins el terminal
exit 0;
| true |
00baa15a1f840431f10449d1e72caff3c5e773eb | Shell | torenj/Logger | /ExternalFiles/postProcessing.sh | UTF-8 | 1,474 | 3.734375 | 4 | [] | no_license | #!/usr/bin/env bash
echo_time() {
date +"[%Y-%m-%d %H:%M:%S] $(printf "%s " "$@" | sed 's/%/%%/g')"
}
DOCUMENTS=${1}
APP_SUPPORT_FOLDER=~/Library/Application\ Support/dk.logging.ContextLogger/
cd ${DOCUMENTS}
mkdir input &> /dev/null
mkdir input_old &> /dev/null
mkdir output &> /dev/null
if [ "$(ls -A "$APP_SUPPORT_FOLDER")" ]; then
for file in "$APP_SUPPORT_FOLDER"*; do mv -- "$file" ./input ; done
echo_time "Moved captured files to input folder" >> "${DOCUMENTS}/sequencelog.txt"
source ~/.zshrc >> "${DOCUMENTS}/debuglog.txt"
eval "$(conda shell.zsh hook)" >> "${DOCUMENTS}/debuglog.txt"
conda activate logger >> "${DOCUMENTS}/debuglog.txt"
python "${DOCUMENTS}/dot.py" >> "${DOCUMENTS}/sequencelog.txt"
python "${DOCUMENTS}/generateImageDiffs.py" >> "${DOCUMENTS}/sequencelog.txt"
if [ "$(ls -A ./input)" ]; then
for file in ./input/*; do mv -- "$file" ./input_old ; done
echo_time "Moved processed input files out of the way" >> "${DOCUMENTS}/sequencelog.txt"
else
echo_time "Failed to move files from input out of the way (nothing to move? Was the environment setup?)" >> "${DOCUMENTS}/sequencelog.txt"
fi
mkdir "${DOCUMENTS}/OutputImageFolder" &> /dev/null
for file in ./output/*; do mv -- "$file" "${DOCUMENTS}/OutputImageFolder" ; done
else
echo_time "Failed to move captured files to input (nothing to move? Was something captured?)" >> "${DOCUMENTS}/sequencelog.txt"
fi
| true |
93b08f1c068948628c47d080bfb8c80f2bfa236c | Shell | lheckemann/namic-sandbox | /VervetAtlas/Scripts/N3Runner.sh | UTF-8 | 1,036 | 3.359375 | 3 | [] | no_license | #!/bin/bash
#if [ $# -ne 1 ]
#then
# echo "Usage: `basename $0` directory_with_cases"
# exit 1
#fi
#CASES=$1
SLICER=/workspace/fedorov/Slicer/Release/Slicer3-build/Slicer3
N3=/projects/birn/fedorov/Slicer3fedorov/10579/N3MRILightCLI/N3MRILightCLI
MOVING_IMAGE=./VPA-10-1.1/Vervet_T1_Template_WholeHead.nii
MOVING_IMAGE_ICC=./VPA-10-1.1/Vervet_T1_Template_ICC-mask.nrrd
for CASE in Calvin Gucci Hugo Ralph Valentino Marc Issac Louis Oscar Tommy
do
echo "Performing N3 correction for $CASE"
INPUT_IMAGE=${CASE}/${CASE}-GAD.nrrd
# INPUT_MASK=${CASE}/${CASE}-ICC.nrrd
INPUT_MASK=${CASE}/${CASE}-ICC.nrrd
OUTPUT_IMAGE1=${CASE}/${CASE}-N3.nrrd
OUTPUT_IMAGE2=${CASE}/${CASE}-BiasField.nrrd
CMD="${SLICER} --launch $N3 --shrinkfactor 4 --nfittinglevels 4 --niterations 100 --nhistogrambins 200 --weinerfilternoise 0.1 --biasfieldfullwidthathalfmaximum 0.15 --convergencethreshold 0.0001 --splineorder 3 --ncontrolpoints 4,4,4 ${INPUT_IMAGE} ${INPUT_MASK} ${OUTPUT_IMAGE1} ${OUTPUT_IMAGE2}"
$CMD
echo "N3 is complete for case ${CASE}"
done
| true |
8622754e9d34739308014a9216954776bb76eb81 | Shell | leobeosab/dots | /utils/clone_vim_setup.sh | UTF-8 | 223 | 2.796875 | 3 | [] | no_license | #! /bin/bash
SCRIPT=$(readlink -f "$0")
SCRIPTPATH=$(dirname "$SCRIPT")
touch $SCRIPTPATH/vimplugins
for d in $HOME/.vim/bundle/*/; do
(cd $d; git config --get remote.origin.url) | tee -a $SCRIPTPATH/vimplugins
done
| true |
567115149ed731d04564d841ed68900af8ea5232 | Shell | SamuelSwartzberg/paper | /init.sh | UTF-8 | 3,183 | 3.828125 | 4 | [] | no_license | #!/usr/bin/env bash
POSTS=(posts/*.md)
echo "<!-- This is the generated index.html. Any edits here will be overwritten! -->" > index.html #add a warning to the generated index.html
cat templates/_index.html >> index.html # add the content of the master index.html file to the generated one
echo "" > sitemap.txt
for (( i = 0; i < ${#POSTS[@]}; i++ )); do
HEADER=$(head -n 6 ${POSTS[i]}) # Get the first three lines, which are header lines
IFS=$'\n' # Make the field separator a newline
read -rd '' -a headerLineArray <<<"$HEADER" # Split them into an array
postTemplate=$(cat templates/post-item-template.html) # get the html post template
cat templates/markdown-template.html > "${POSTS[i]%.md}temp.html" # Add the content of the template for the markdown post to a temp file , which will now be filled
noPreview=false # By default, items have previews (the snippets in index.html)
for (( j = 0; j < ${#headerLineArray[@]}; j++ )); do # go through all the header lines
key=$(echo ${headerLineArray[j]} | cut -d "|" -f1) # The part in the header before | is the key
value=$(echo ${headerLineArray[j]} | cut -d "|" -f2) # The part in the header after | is the value
postTemplate=${postTemplate/"{$key}"/"$value"} # Replace the {key} instances in postTemplate by their values, filling the template
if [[ "$key" == "arguments" && "$value" == *nopreview* ]]; then
noPreview=true
fi
sed -i'.original' -e "s|{$key}|${value}|g" "${POSTS[i]%.md}temp.html" # In the temp post item we created above, replace all instances of key with value, filling the template with the headers
if [[ "$key" == "arguments" && "$value" == *norobots* ]]; then
sed -i'.original2' -e "s|<!--norobotshead-->|<meta name='robots' content='noindex'>|" "${POSTS[i]%.md}temp.html"
fi
done
postTemplate=${postTemplate/"{url}"/"${POSTS[i]%.md}.html"} # Replace the {key} instances in postTemplate by their values, filling the template
tail -n +7 "${POSTS[i]}" > tempMDpost.md # add everything except the six header lines to a temporary markdown post
markdown tempMDpost.md --template "${POSTS[i]%.md}temp.html" > "${POSTS[i]%.md}.html" #create the article html file from the temporary markdown post
perl -pi -e 's| <([^&]*?)> |<\1>|g' "${POSTS[i]%.md}.html" # Since the markdown shell command has a bug where it does not parse but instead escapes html tags (in violation of the markdown spec), we need to do it manually. In our case, we only parse out tags that have a space on both sides, to prevent accidental parsing
echo $postTemplate > tempPostItem.html # create a temporary post item
echo "https://samswartzberg.com/paper/${POSTS[i]%.md}.html" >> sitemap.txt # Add the post url to the sitemap, so google can index it easily
if [ "$noPreview" = true ] ; then
continue
fi
sed -i'.original' '/INSERT HERE-->/r tempPostItem.html' index.html # add the contents of the post item to the generated index.html
done
# Do some cleanup
rm *.original
rm posts/*.original
rm temp* # will not remove templates/, since it is a directory (which is good, since that's what we want, but it will display an alert in the console)
rm posts/*temp.*
| true |
6c3f86c7ec7e84f1f5706f4ceccbb963ccd44d72 | Shell | alexcu/dotfiles | /bitly/install.command | UTF-8 | 267 | 2.671875 | 3 | [] | no_license | #!/usr/bin/env bash
echo
echo "!!! Place a generic access token in ~/.bitlytoken !!!"
echo "!!! See: https://bitly.is/2Kpcq4e !!!"
echo
echo "Linking ./bitly -> /usr/local/bin/bitly"
BASEDIR=$(greadlink -f $(dirname $0))
ln -nsf "$BASEDIR/bitly" /usr/local/bin/bitly
| true |
8bf1cfe47c31091c49fd668e55019f4fc4b4ef94 | Shell | tuxlover/bashtools | /Scripts/devel/clearsys.sh | UTF-8 | 3,395 | 3.53125 | 4 | [] | no_license | #!/bin/bash
#clearsys
##functions begin here
root_check()
{
if [ $UID -ne 0 ]
then
SEARCH_PATH=$HOME
SEARCH_PATH2=$HOME
else
SEARCH_PATH=( /usr /home /etc /opt /var /root /mnt /media )
SEARCH_PATH2=( /bin /etc /home /lib /media /mnt /opt /root /sbin /srv /usr /var )
fi
}
#search functions begin here
all_actions() #proceed all actions -a same as no option
{
root_check
echo -e '\E[32mThese temporary files have been found:'; tput sgr0
for i in ${SEARCH_PATH[@]}
do
find $i -type f -name *.tmp -o -name *.temp -o -name *~
done
echo -e '\E[32mThese old rpmsave files have been found'; tput sgr0
for i in ${SEARCH_PATH[@]}
do
find $i -type f -name *.rpmsave
done
echo -e '\E[32mThese are probertly broken symbolic links but should be inspeted afterwards'; tput sgr0
for j in ${SEARCH_PATH2[@]}
do
find $j -type l -print0| xargs -r0 file| grep "broken symbolic"| sed -e 's/^\|: *broken symbolic.*$/"/g'
done
exit 0
}
interactive_actions() #finds and deletes temporary files -i
{
root_check
echo -e '\E[33mDo you want to delete finayly'; tput sgr0
for i in ${SEARCH_PATH[@]}
do
find $i -type f -name *.tmp -ok rm {} \;
find $i -type f -name *.temp -ok rm {} \;
find $i -type f -name *~ -ok rm {} \;
find $i -type f -name *rpmsave -ok rm {} \;
done
exit 0
}
find_empty_dir() #finds empty dir -e
{
root_check
echo -e '\E[33m These are empty directories that may not be usefull any longer'; tput sgr0
sleep 2
for i in ${SEARCH_PATH[@]}
do
find $i -type d -empty
done
exit 0
}
find_temp() #finds tmp, and temp files -t
{
root_check
echo -e '\E[32mThese temporary files have been found:'; tput sgr0
for i in ${SEARCH_PATH[@]}
do
find $i -type f -name *.tmp -o -name *.temp
done
exit 0
}
find_rpmsave() #finds only rpmsave data -r
{
root_check
echo -e '\E[32mThese .rpmsave files have been found:'; tput sgr0
for i in ${SEARCH_PATH[@]}
do
find $i -type f -name *.rpmsave
done
exit 0
}
find_oldsaves() #finds files usaly created by editors -o
{
root_check
echo -e '\E[32mThese oldsave ~ files have been found:'; tput sgr0
for i in ${SEARCH_PATH[@]}
do
find $i -type f -name *~
done
exit 0
}
find_broken_links() #this function finds broken-links -l
{
root_check
echo -e '\E[32mThese are probertly broken symbolic links but should be inspeted afterwards'; tput sgr0
for j in ${SEARCH_PATH2[@]}
do
find $j -type l -print0| xargs -r0 file| grep "broken symbolic"| sed -e 's/^\|: *broken symbolic.*$/"/g'
done
exit 0
}
#search functions end here
get_help() #displys help for this script -h
{
echo "$0 finds temporary files and cleans them from the system"
echo "-a: displays all .rpmsave, .tmp, .temp, oldsave ~ files and broken-links"
echo "-e: displays all empty directories"
echo "-h: print this help"
echo "-i: finds and deletes .rpmsave, tmp, temp and oldsaves interacitvely"
echo "-l: displays all broken links"
echo "-o: displays oldsaves only"
echo "-r: displays .rpmsave only"
echo "-t: displays .tmp and .temp only"
exit 0
}
while getopts aehilort opt
do
case "$opt" in
a) all_actions
;;
e) find_empty_dir
;;
h) get_help
;;
i) interactive_actions
;;
l) find_broken_links
;;
o) find_oldsaves
;;
r)find_rpmsave
;;
t)find_temp
;;
esac
done
root_check; all_actions
##functions end here
exit 0
| true |
e9ffa2cf7019dcdc0cb07d956de37ce06657028b | Shell | squaresurf/dotfiles | /bin/rubyspec | UTF-8 | 204 | 2.984375 | 3 | [
"MIT"
] | permissive | #!/bin/bash
BLUE='\e[0;34m'
RESET='\e[0;0m'
if [[ -f bin/rspec ]]; then
printf "${BLUE}bin/rspec${RESET}\n"
bin/rspec $@
else
printf "${BLUE}bundle exec rspec${RESET}\n"
bundle exec rspec $@
fi
| true |
9d83e222a70e43ee8d387423343fd6dffc7dfc01 | Shell | anryko/aws-ansible | /utilities/ssh-port-proxy | UTF-8 | 1,343 | 3.734375 | 4 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
set -e
usage() {
echo "Usage: $0 8080:1.2.3.4:80"
exit 1
}
[[ $# -eq 0 || "$1" == "-h" ]] && usage
args=( "$@" )
for i in ${!args[@]}; do
if [[ ${args[i]} = *'market='* ]]; then
market=$(
echo "${args[i]}"
| fmt -1
| grep 'market='
| tail -1
| cut -d= -f2
)
unset args[$i]
fi
if [[ ${args[i]} = *'env='* ]]; then
env=$(
echo "${args[i]}"
| fmt -1
| grep 'env='
| tail -1
| cut -d= -f2
)
unset args[$i]
fi
done
if [[ -n $market ]] && [[ -e .env.yml ]]; then
ENV_INI_VARS=$(
./utilities/infra-vars get-env $market $env
| sed -e '/\[/d;s/#.*$//;/^$/d'
)
eval $ENV_INI_VARS
elif [[ $# -eq 1 ]] && [[ -e .env.yml ]] && [[ $1 =~ ^[0-9]+:(.+):[0-9]+ ]]; then
read market env <<<$(./utilities/infra-vars find-ip ${BASH_REMATCH[1]} --inline)
if ! [[ $market =~ [a-z]+'='[a-z]+ ]] || ! [[ $env =~ [a-z]+'='[a-z]+ ]]; then
echo "Provided Host dosn't belong to any ansible namanged market environment."
exit 1
fi
ENV_INI_VARS=$(
./utilities/infra-vars get-env ${market#*=} ${env#*=}
| sed -e '/\[/d;s/#.*$//;/^$/d'
)
eval $ENV_INI_VARS
elif [[ -e .env ]]; then
source .env
fi
ssh -i $BASTION_SSH_KEY -o StrictHostKeyChecking=no ubuntu@$BASTION_HOST -L ${args[@]}
| true |
914d31c90db1e57f6db3f13d098bfdc7df26b6ee | Shell | shenh10/wifi_location | /scripts/capture/exp5/stop_all.sh | UTF-8 | 470 | 2.609375 | 3 | [] | no_license | workers='workerfile'
log_file='~/projects/linux-80211n-csitool-supplementary/netlink/log_to_file'
datapath='~/projects/wifi_location/scripts/capture/exp5/data/'
timestamp=$(date +"%T")
echo "Stop capture at $timestamp"
while IFS='' read -r -u10 fd || [[ -n "$fd" ]]; do
echo $fd
username=$(echo ${fd} | cut -d' ' -f1)
addr=$(echo ${fd} | cut -d' ' -f2)
echo "${username} at ${addr}"
ssh ${username}@${addr} "bash " < stop_snippet.sh
done 10< "$workers"
| true |
adf234a5c1d0941d9b87399cc547195b2839e187 | Shell | sshyran/skia-buildbot | /skolo/build_release_backup | UTF-8 | 719 | 3.40625 | 3 | [
"BSD-3-Clause"
] | permissive | #!/bin/bash
# Builds and uploads a debian package for rpi-backup.
APPNAME=rpi-backup
SYSTEMD=${APPNAME}.service
SYSTEMD_TIMER=${APPNAME}.timer
DESCRIPTION="backup the rpi image to GS"
# Copy files into the right locations in ${ROOT}.
copy_release_files()
{
INSTALL="sudo install -D --verbose --backup=none --group=root --owner=root"
INSTALL_DIR="sudo install -d --verbose --backup=none --group=root --owner=root"
${INSTALL} --mode=755 -T ${GOPATH}/bin/${APPNAME} ${ROOT}/usr/local/bin/${APPNAME}
${INSTALL} --mode=644 -T ./sys/${APPNAME}.service ${ROOT}/etc/systemd/system/${APPNAME}.service
${INSTALL} --mode=644 -T ./sys/${APPNAME}.timer ${ROOT}/etc/systemd/system/${APPNAME}.timer
}
source ../bash/release.sh
| true |
e9151a8864b31b7a53500fdf058eabc138b59964 | Shell | jamaljsr/polar | /docker/eclair/docker-entrypoint.sh | UTF-8 | 1,065 | 3.921875 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
set -e
# give bitcoind a second to bootup
sleep 1
# containers on linux share file permissions with hosts.
# assigning the same uid/gid from the host user
# ensures that the files can be read/write from both sides
if ! id eclair > /dev/null 2>&1; then
USERID=${USERID:-1000}
GROUPID=${GROUPID:-1000}
echo "adding user eclair ($USERID:$GROUPID)"
groupadd -f -g $GROUPID eclair
useradd -r -u $USERID -g $GROUPID eclair
# ensure correct ownership of user home dir
mkdir -p /home/eclair
chown -R $USERID:$GROUPID /home/eclair
fi
if [ "$1" = "polar-eclair" ]; then
# convert command line args to JAVA_OPTS
JAVA_OPTS=""
for arg in "$@"
do
if [ "${arg:0:2}" = "--" ]; then
JAVA_OPTS="$JAVA_OPTS -Declair.${arg:2}"
fi
done
# trim leading/trailing whitespace
JAVA_OPTS="$(sed -e 's/[[:space:]]*$//' <<<${JAVA_OPTS})"
echo "Running as eclair user:"
echo "bash eclair-node/bin/eclair-node.sh $JAVA_OPTS"
exec gosu eclair bash eclair-node/bin/eclair-node.sh $JAVA_OPTS
fi
echo "Running: $@"
exec "$@"
| true |
bf3cf8959addfe335d0c6a4dc49319f739888f10 | Shell | hergerr/scripting-sandbox | /lab1/ex11.sh | UTF-8 | 115 | 3.328125 | 3 | [] | no_license | #!/bin/bash
directory_name=$1
for file in $directory_name/*
do
if [ -w $file ]
then
touch -m $file
fi
done
| true |
5d12562a30282942a51e2b6c816b7a717426be12 | Shell | pvelissariou1/ADC-WW3-NWM-SCHISM-NEMS | /WW3/regtests/bin/matrix_ncep | UTF-8 | 6,943 | 3.265625 | 3 | [
"CC0-1.0"
] | permissive | #!/bin/bash
# --------------------------------------------------------------------------- #
# matrix.go: Run matrix of regression tests on target machine. #
# #
# Remarks: #
# - This version is set up for automatic w3_setenv script and for the #
# NOAA RDHPC 'zeus' system. When using this for your own setup and #
# computer, please copy rather than modify. #
# #
# Hendrik L. Tolman #
# August 2013 #
# December 2013 #
# April 2018 #
# #
# Copyright 2013 National Weather Service (NWS), #
# National Oceanic and Atmospheric Administration. All rights #
# reserved. WAVEWATCH III is a trademark of the NWS. #
# No unauthorized use without permission. #
# #
# --------------------------------------------------------------------------- #
# 0. Environment file
source $(dirname $0)/../../model/bin/w3_setenv
main_dir=$WWATCH3_DIR
temp_dir=$WWATCH3_TMP
source=$WWATCH3_SOURCE
list=$WWATCH3_LIST
echo "Main directory : $main_dir"
echo "Scratch directory : $temp_dir"
echo "Save source codes : $source"
echo "Save listings : $list"
# Set batchq queue to define headers etc (default to original version if empty)
batchq="slurm"
# 1. Set up
# 1.a Computer/ user dependent set up
echo '#!/bin/sh --login' > matrix.head
echo ' ' >> matrix.head
if [ $batchq = "slurm" ]
then
echo '#SBATCH -n 24' >> matrix.head
echo '#SBATCH -q batch' >> matrix.head
echo '#SBATCH -t 08:00:00' >> matrix.head
echo '#SBATCH -A marine-cpu' >> matrix.head
echo '#SBATCH -J ww3_regtest' >> matrix.head
echo '#SBATCH -o matrix.out' >> matrix.head
else
echo '#PBS -l procs=24' >> matrix.head
echo '#PBS -q batch' >> matrix.head
echo '#PBS -l walltime=08:00:00' >> matrix.head
echo '#PBS -A marine-cpu' >> matrix.head
echo '#PBS -N ww3_regtest' >> matrix.head
echo '#PBS -j oe' >> matrix.head
echo '#PBS -o matrix.out' >> matrix.head
echo ' ' >> matrix.head
fi
echo " cd $(dirname $main_dir)/regtests" >> matrix.head
echo ' ' >> matrix.head
# Netcdf and Parmetis modules & variables
istheia=`hostname | grep tfe`
if [ $istheia ]
then
modcomp='intel/14.0.2'
modmpi='impi/5.1.2.150'
modnetcdf='netcdf/4.3.0'
fi
echo " module load $modcomp $modmpi $modnetcdf" >> matrix.head
echo " export WWATCH3_NETCDF=NC4" >> matrix.head
echo " export NETCDF_CONFIG=`which nc-config`" >> matrix.head
echo " export METIS_PATH=/scratch3/NCEPDEV/stmp2/Jessica.Meixner/parmetis-4.0.3" >> matrix.head
echo " export WW3_PARCOMPN=4" >> matrix.head
echo ' '
# Compiler option. Choose appropriate compiler and set cmplOption to
# y if using for the first time or using a different compiler
cmplr=Intel
export cmplOption='y'
if [ "$batchq" = 'slurm' ]
then
export mpi='srun'
else
export mpi='mpirun'
fi
export np='24'
export nr='4'
export nth='6'
# Compile option
if [ "$cmplOption" = 'y' ]
then
opt="-c $cmplr -S -T"
else
opt="-S"
fi
# Batch queue option
if [ "$batchq" = 'slurm' ]
then
opt="-b $batchq $opt"
fi
# Base run_test command line
export rtst="./bin/run_test $opt"
export ww3='../model'
# 1.b Flags to do course selection - - - - - - - - - - - - - - - - - - - - - -
# Addition selection by commenting out lines as below
export shrd='y' # Do shared architecture tests
export dist='y' # Do distributed architecture (MPI) tests
export omp='y' # Threaded (OpenMP) tests
export hybd='n' # Hybrid options
export prop1D='y' # 1-D propagation tests (ww3_tp1.X)
export prop2D='y' # 2-D propagation tests (ww3_tp2.X)
export time='y' # time linmited growth
export fetch='y' # fetch linmited growth
export hur1mg='y' # Hurricane with one moving grid
export shwtr='y' # shallow water tests
export unstr='y' # unstructured grid tests
export pdlib='y' # unstr with pdlib for domain decomposition and implicit solver
export smcgr='y' # SMC/Rotated grid test
export mudice='y' # Mud/Ice and wave interaction tests
export infgrv='y' # Second harmonic generation tests
export uost='y' # ww3_ts4 Unresolved Obstacles Source Term (UOST)
export assim='y' # Restart spectra update
export multi01='y' # mww3_test_01 (wetting and drying)
export multi02='y' # mww3_test_02 (basic two-way nesting test))
export multi03='y' # mww3_test_03 (three high and three low res grids).
export multi04='y' # mww3_test_04 (swell on sea mount and/or current)
export multi05='y' # mww3_test_05 (three-grid moving hurricane)
export multi06='y' # mww3_test_06 (curvilinear grid tests)
export multi07='y' # mww3_test_07 (unstructured grid tests)
export multi08='y' # mww3_test_08 (wind and ice tests)
# export filter='PR3 ST2 UQ'
# The filter does a set of consecutinve greps on the
# command lines generated by filter.base with the above
# selected options.
# --------------------------------------------------------------------------- #
# 2. Execute matrix.base ... #
# --------------------------------------------------------------------------- #
$main_dir/../regtests/bin/matrix.base
# --------------------------------------------------------------------------- #
# End to the matrix #
# --------------------------------------------------------------------------- #
| true |
09a55b3cc4cfccb43e47ba39061a0f13380c012c | Shell | cbritezm/cbritezm | /stackpack/snapgen.sh | UTF-8 | 2,237 | 3.21875 | 3 | [] | no_license | #!/bin/ksh
PERFSTAT=${1:-perfstat}
PERFPASS=${2:-perfstat}
SWAP_FILE=./list_level.lst
SPREPORT=$ORACLE_HOME/rdbms/admin/spreport.sql
echo "Enter dbid:"
read dbid
echo "Enter instance number:"
read instnum
echo "Enter snap level:"
read snap_level
echo "Enter the first snap id:"
read first
echo "Enter the last snap id:"
read last
export PERFSTAT PERFPASS dbid instnum instnam snap_level \
first last
sqlplus -s ${PERFSTAT}/${PERFPASS} <<EOF
set pages 0
set termout on ;
set head off
set feed off
column instart_fmt noprint;
column versn noprint heading 'Release' new_value versn;
column host_name noprint heading 'Host' new_value host_name;
column para noprint heading 'OPS' new_value para;
column level format 99 heading 'Snap|Level';
column snap_id heading 'Snap|Id' format 999990;
column snapdat heading 'Snap Started' just c format a17;
column comment heading 'Comment' format a22;
break on inst_name on db_name on instart_fmt;
ttitle lef 'Completed Snapshots' skip 2;
spool ${SWAP_FILE}
select di.version versn
, di.parallel para
, to_char(s.startup_time,' dd Mon "at" HH24:mi:ss') instart_fmt
, s.snap_id
, to_char(s.snap_time,'dd Mon YYYY HH24:mi') snapdat
, s.snap_level "level"
, substr(s.ucomment, 1,60) "comment"
from stats\$snapshot s
, stats\$database_instance di
where s.dbid = $dbid
and di.dbid = $dbid
and s.instance_number = $instnum
and di.instance_number = $instnum
and di.startup_time = s.startup_time
and s.snap_level = $snap_level
order by db_name, instance_name, snap_id;
exit;
EOF
echo "Gen Arrary"
cat ${SWAP_FILE} | awk ' BEGIN { D1=0 }
{
if(D1!=0)
if(D1 >= '$first' && $1 <= '$last' )
printf("%s %s\n",D1,$1);
D1=$1;
}'| while read Record
do
set -A Element $(echo $Record)
echo Working on [${Element[0]},${Element[1]}]
sqlplus ${PERFSTAT}/${PERFPASS} @${SPREPORT} <<EOF 2>/dev/null 1>&2
${Element[0]}
${Element[1]}
exit
EOF
done
rm ${SWAP_FILE}
| true |
98d290fceb5cfbfcca3defe7e056acb920485870 | Shell | hugoeanogueira/dotfiles | /_scripts/install_java.sh | UTF-8 | 236 | 2.703125 | 3 | [] | no_license | #!/usr/bin/env bash
##
# 2.8) JEnv
# https://github.com/gcuisinier/jenv
#
# find folder
DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd );
. "$DIR/utils.sh";
echo "--- Java ---";
brew install --cask temurin19;
success "Done!";
| true |
82d5fb0bb60b1b68b937f964bd92e44986a57a1e | Shell | MariIGM/Thalassisiraceae-transcriptomes-project-ThTSP | /.github/workflows/2-Assembly.sh | UTF-8 | 1,149 | 3.03125 | 3 | [] | no_license |
# Assembly of Thalassiosirales transcriptomes using the Trinity and rnaSPAdes
# Requirements
# Khmer V2.1.1, Trinity V2.12.0,rnaSPAdes V3.13.0
# Files with extension pe.qc.keep.abundfilt.fq.gz generated in the previous step must be in working directory
#
# first we need to split pe.qc.keep.abundfilt.fq.gz files
for file in *.pe.qc.keep.abundfilt.fq.gz
do
split-paired-reads.py ${file}
done
for file in *.1
do
file2=${file/.1/.2}
fileout=${file/.pe.qc.keep.abundfilt.fq.gz.1/}
cat ${file} > ${fileout}.left.fq
cat ${file2} > ${fileout}.right.fq
done
for file in *.se.qc.keep.abundfilt.fq.gz
do
fileout=${file/.se.qc.keep.abundfilt.fq.gz/.left.fq}
gunzip -c ${file} >> ${fileout}
done
# Assembly with Trinity
for fileleft in *.left.fq
do
fileright=${fileleft/.left.fq/.right.fq}
out=${fileleft/.left.fq/}
Trinity --left ${fileleft} \
--right ${fileright} --seqType fq --max_memory 12G \
--CPU 10 --no_bowtie --output ${out%%}-Trinity
done
# Assembly with rnaSPAdes
for fileleft in *.left.fq
do
fileright=${fileleft/.left.fq/.right.fq}
out=${fileleft/.left.fq/}
rnaspades.py -1 ${fileleft} -2 ${fileright} -o ${out%%}-SPADES
done
| true |
44daebf54f162dc1d1e22357120c182eb2565b71 | Shell | telegraph/migration-scripts | /a2a3-migration/archive/clone-scripts-online-dating-09-03-2022/test-cleanup-dir.sh | UTF-8 | 551 | 3.203125 | 3 | [] | no_license | #!/bin/bash
# Testing number of files in a directory
OUTPUT_A2_BACKUP="./output/a2-backup"
OUTPUT_A2_BACKUP_P1="./output/a2-backup-p1"
OUTPUT_A3_MIGRATED="./output/a3-migrated"
OUTPUT_A3_MIGRATED_P1="./output/a3-migrated-p1"
INPUT="input"
clearDir() {
directory="$1"
rm $directory/*.*
}
clearDir "${OUTPUT_A2_BACKUP}"
clearDir "${OUTPUT_A2_BACKUP_P1}"
clearDir "${OUTPUT_A3_MIGRATED}"
clearDir "${OUTPUT_A3_MIGRATED_P1}"
rm ${INPUT}/input-urls.txt; touch ${INPUT}/input-urls.txt
rm ${INPUT}/input-urls-p1.txt; touch ${INPUT}/input-urls-p1.txt
| true |
024d7fd3240fbacbf2f11b6d458b1a7d186ee4ee | Shell | Meh-Dotfiles/vim | /install.sh | UTF-8 | 206 | 2.5625 | 3 | [] | no_license | #!/bin/bash
VIM=~/.dotfiles/vim
env git clone --depth=1 https://github.com/Meh-Dotfiles/vim.git $VIM || {
printf "Error: git clone of vim repo failed\n"
exit 1
}
bash $VIM/configure-vim.sh
| true |
c35a3472f24b36c238257c743c74da2e563d0962 | Shell | Andkeil/OneClickDeploy | /scripts/build.sh | UTF-8 | 1,335 | 2.734375 | 3 | [] | no_license | #!/bin/bash
sudo apt-get update
#install kubectl
sudo apt-get update && sudo apt-get install -y apt-transport-https
sudo curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -
sudo touch /etc/apt/sources.list.d/kubernetes.list
sudo echo "deb http://apt.kubernetes.io/ kubernetes-xenial main" | sudo tee -a /etc/apt/sources.list.d/kubernetes.list
sudo apt-get update
sudo apt-get install -y kubectl
#install kops
sudo wget https://github.com/kubernetes/kops/releases/download/1.10.0/kops-linux-amd64
sudo chmod +x kops-linux-amd64
sudo mv kops-linux-amd64 /usr/local/bin/kops
#awscli
sudo apt-get install -y awscli
#docker
sudo apt-get install docker.io
#set s3 bucket
aws s3 mb s3://cluster1.cloudhippo.io
export KOPS_STATE_STORE=s3://cluster1.cloudhippo.io
#create cluster
kops create cluster --cloud=aws --zones=us-west-1b --dns-zone=cloudhippo.io --name=cluster1.cloudhippo.io --yes
until kops validate cluster | tail -1 | grep ready
do
echo "Waiting for cluster to be ready..."
done
# run deploy
echo "Deploying..."
kubectl create -f deploy.yml
# exposing app
echo "Exposing app..."
kubectl expose deploy hello-deploy --name=hello-svc --target-port=8080 --type=NodePort
SECURITY="$(aws ec2 describe-security-groups --filters Name=group-name,Values=nodes.cluster1.cloudhippo.io --query 'Se
| true |
dd8374dfd5a8bac1d9d9c19174bd8ab6236ecae2 | Shell | s-u/uuid | /src/update-inst.sh | UTF-8 | 594 | 3.9375 | 4 | [
"BSD-3-Clause"
] | permissive | #!/bin/bash
a=$(dirname $0)
if [ -z "$a" ]; then a=.; fi
src=`(cd $a && pwd)`
if [ -z "$src" ]; then src=`pwd`; fi
if [ ! -e $src/uuid.h ]; then
echo "ERROR: cannot determine src directory. If in doubt, run from it." >&2
exit 1
fi
cd "$src/.."
echo "Processing uuid package in `pwd`"
if [ ! -e DESCRIPTION ]; then
echo "ERROR: invalid package structure, cannot find $src/../DESCRIPTION" >&2
exit 1
fi
if [ ! -e inst/include ]; then
echo Creating inst/include
mkdir -p inst/include
fi
echo Copying LinkingTo header files
cp -p src/*uuid.h inst/include/
echo Done
| true |
d19a2f06f8ad82de29d64f3ce052163ccff9d180 | Shell | MASS-VETS/MASS | /EnvRuns/Bash/Docker/cmd/DockerRmvCAmq | UTF-8 | 193 | 2.984375 | 3 | [] | no_license | #!/bin/bash
#--- DOCKER CONTAINER: ActiveMQ
#--- Remove the existing ActiveMQ container
container=$(docker ps -q -a -f name=amq_)
if [ ${#container} -gt 0 ] ; then
docker rm -f $container
fi
| true |
865eb1c4c7c56aa1e80fd7f79ac1bbcceae95c16 | Shell | Mic92/xfstests-cntr | /tests/btrfs/220 | UTF-8 | 10,754 | 3.5625 | 4 | [] | no_license | #! /usr/bin/env bash
# SPDX-License-Identifier: GPL-2.0
# Copyright (C) 2020 SUSE Linux Products GmbH. All Rights Reserved.
#
# FS QA Test 220
#
# Test all existent mount options of btrfs
# * device= argument is already being test by btrfs/125
# * space cache test already covered by test btrfs/131
seq=`basename $0`
seqres=$RESULT_DIR/$seq
echo "QA output created by $seq"
here=`pwd`
tmp=/tmp/$$
status=1 # failure is the default!
trap "cleanup; exit \$status" 0 1 2 3 15
# get standard environment, filters and checks
. ./common/rc
. ./common/filter
# remove previous $seqres.full before test
rm -f $seqres.full
_supported_fs btrfs
_require_scratch
cleanup()
{
cd /
rm -f $tmp.*
}
# Compare the mounted flags with $opt_check. When the comparison fails, $opt is
# echoed to help to track which option was used to trigger the unexpected
# results.
test_mount_flags()
{
local opt
local opt_check
opt="$1"
opt_check="$2"
active_opt=$(cat /proc/self/mounts | grep $SCRATCH_MNT | \
$AWK_PROG '{ print $4 }')
if [[ "$active_opt" != *$opt_check* ]]; then
echo "Could not find '$opt_check' in '$active_opt', using '$opt'"
fi
}
# Mounts using opt ($1), remounts using remount_opt ($2), and remounts again
# using opt again (1), checking if the mount opts are being enabled/disabled by
# using _check arguments ($3 and $4)
test_enable_disable_mount_opt()
{
local opt
local opt_check
local remount_opt
local remount_opt_check
opt="$1"
opt_check="$2"
remount_opt="$3"
remount_opt_check="$4"
_scratch_mount "-o $opt"
test_mount_flags $opt $opt_check
_scratch_remount $remount_opt
test_mount_flags $remount_opt $remount_opt_check
_scratch_remount $opt
test_mount_flags $opt $opt_check
_scratch_unmount
}
# Checks if mount options are applied and reverted correctly.
# By using options to mount ($1) and remount ($2), this function will mount,
# remount, and the mount with the original args, checking if the mount options
# match the _check args ($3 and $4).
# Later, opt and remount_opt are swapped, testing the counterpart option if used
# to first mount the fs.
test_roundtrip_mount()
{
local opt
local opt_check
local remount_opt
local remount_opt_check
opt="$1"
opt_check="$2"
remount_opt="$3"
remount_opt_check="$4"
# invert the args to make sure that both options work at mount and
# remount time
test_enable_disable_mount_opt $opt $opt_check $remount_opt $remount_opt_check
test_enable_disable_mount_opt $remount_opt $remount_opt_check $opt $opt_check
}
# Just mount and check if the options were mounted correctly by comparing the
# results with $opt_check
test_mount_opt()
{
local opt
local opt_check
local active_opt
opt="$1"
opt_check="$2"
_scratch_mount "-o $opt"
test_mount_flags $opt $opt_check
_scratch_unmount
}
# Test mount options that should fail, usually by wrong arguments to options
test_should_fail()
{
local opt
opt="$1"
# wrong $opt on purpose, should fail
_try_scratch_mount "-o $opt" >/dev/null 2>&1
if [ $? -ne 0 ]; then
return
fi
echo "Option $opt should fail to mount"
_scratch_unmount
}
# Try to mount using $opt, and bail our if the mount fails without errors. If
# the mount succeeds, then compare the mount options with $opt_check
test_optional_mount_opts()
{
local opt
local opt_check
opt="$1"
opt_check="$2"
# $opt not enabled, return without running any tests
_try_scratch_mount "-o $opt" >/dev/null 2>&1 || return
_scratch_unmount
# option enabled, run the test
test_mount_opt $opt $opt_check
}
# Testes related to subvolumes, from subvol and subvolid options.
test_subvol()
{
test_should_fail "subvol=vol2"
_scratch_mount "-o subvol=vol1"
if [ ! -f "$SCRATCH_MNT/file.txt" ]; then
echo "file.txt not found inside vol1 using subvol=vol1 mount option"
fi
_scratch_unmount
test_should_fail "subvolid=222"
_scratch_mount "-o subvolid=256"
if [ ! -f "$SCRATCH_MNT/file.txt" ]; then
echo "file.txt not found inside vol1 using subvolid=256 mount option"
fi
_scratch_unmount
# subvol and subvolid should point to the same subvolume
test_should_fail "-o subvol=vol1,subvolid=1234132"
test_mount_opt "subvol=vol1,subvolid=256" "space_cache,subvolid=256,subvol=/vol1"
test_roundtrip_mount "subvol=vol1" "space_cache,subvolid=256,subvol=/vol1" "subvolid=256" "space_cache,subvolid=256,subvol=/vol1"
}
# These options are enable at kernel compile time, so no bother if they fail
test_optional_kernel_features()
{
# Test options that are enabled by kernel config, and so can fail safely
test_optional_mount_opts "check_int" "space_cache,check_int,subvolid"
test_optional_mount_opts "check_int_data" "space_cache,check_int_data,subvolid"
test_optional_mount_opts "check_int_print_mask=123" "space_cache,check_int_print_mask=123,subvolid"
test_should_fail "fragment=invalid"
test_optional_mount_opts "fragment=all" "space_cache,fragment=data,fragment=metadata,subvolid"
test_optional_mount_opts "fragment=data" "space_cache,fragment=data,subvolid"
test_optional_mount_opts "fragment=metadata" "space_cache,fragment=metadata,subvolid"
}
test_non_revertible_options()
{
test_mount_opt "clear_cache" "relatime,space_cache,clear_cache,subvolid"
test_mount_opt "degraded" "relatime,degraded,space_cache,subvolid"
test_mount_opt "inode_cache" "space_cache,inode_cache,subvolid"
# nologreplay should be used only with
test_should_fail "nologreplay"
test_mount_opt "nologreplay,ro" "ro,relatime,rescue=nologreplay,space_cache"
# norecovery should be used only with. This options is an alias to nologreplay
test_should_fail "norecovery"
test_mount_opt "norecovery,ro" "ro,relatime,rescue=nologreplay,space_cache"
test_mount_opt "rescan_uuid_tree" "relatime,space_cache,rescan_uuid_tree,subvolid"
test_mount_opt "skip_balance" "relatime,space_cache,skip_balance,subvolid"
test_mount_opt "user_subvol_rm_allowed" "space_cache,user_subvol_rm_allowed,subvolid"
test_should_fail "rescue=invalid"
# nologreplay requires readonly
test_should_fail "rescue=nologreplay"
test_mount_opt "rescue=nologreplay,ro" "relatime,rescue=nologreplay,space_cache"
test_mount_opt "rescue=usebackuproot,ro" "relatime,space_cache,subvolid"
}
# All these options can be reverted (with their "no" counterpart), or can have
# their values set to default on remount
test_revertible_options()
{
test_roundtrip_mount "acl" "relatime,space_cache,subvolid" "noacl" "relatime,noacl,space_cache,subvolid"
test_roundtrip_mount "autodefrag" "relatime,space_cache,autodefrag" "noautodefrag" "relatime,space_cache,subvolid"
test_roundtrip_mount "barrier" "relatime,space_cache,subvolid" "nobarrier" "relatime,nobarrier,space_cache,subvolid"
test_should_fail "commit=-10"
# commit=0 sets the default, so btrfs hides this mount opt
test_roundtrip_mount "commit=35" "relatime,space_cache,commit=35,subvolid" "commit=0" "relatime,space_cache,subvolid"
test_should_fail "compress=invalid"
test_should_fail "compress-force=invalid"
test_roundtrip_mount "compress" "relatime,compress=zlib:3,space_cache,subvolid" "compress=lzo" "relatime,compress=lzo,space_cache,subvolid"
test_roundtrip_mount "compress=zstd" "relatime,compress=zstd:3,space_cache,subvolid" "compress=no" "relatime,space_cache,subvolid"
test_roundtrip_mount "compress-force=no" "relatime,space_cache,subvolid" "compress-force=zstd" "relatime,compress-force=zstd:3,space_cache,subvolid"
# zlib's max level is 9 and zstd's max level is 15
test_roundtrip_mount "compress=zlib:20" "relatime,compress=zlib:9,space_cache,subvolid" "compress=zstd:16" "relatime,compress=zstd:15,space_cache,subvolid"
test_roundtrip_mount "compress-force=lzo" "relatime,compress-force=lzo,space_cache,subvolid" "compress-force=zlib:4" "relatime,compress-force=zlib:4,space_cache,subvolid"
# on remount, if we only pass datacow after nodatacow was used it will remain with nodatasum
test_roundtrip_mount "nodatacow" "relatime,nodatasum,nodatacow,space_cache,subvolid" "datacow,datasum" "relatime,space_cache,subvolid"
# nodatacow disabled compression
test_roundtrip_mount "compress-force" "relatime,compress-force=zlib:3,space_cache,subvolid" "nodatacow" "relatime,nodatasum,nodatacow,space_cache,subvolid"
# nodatacow disabled both datacow and datasum, and datasum enabled datacow and datasum
test_roundtrip_mount "nodatacow" "relatime,nodatasum,nodatacow,space_cache,subvolid" "datasum" "relatime,space_cache,subvolid"
test_roundtrip_mount "nodatasum" "relatime,nodatasum,space_cache,subvolid" "datasum" "relatime,space_cache,subvolid"
test_should_fail "discard=invalid"
test_roundtrip_mount "discard" "relatime,discard,space_cache,subvolid" "discard=sync" "relatime,discard,space_cache,subvolid"
test_roundtrip_mount "discard=async" "relatime,discard=async,space_cache,subvolid" "discard=sync" "relatime,discard,space_cache,subvolid"
test_roundtrip_mount "discard=sync" "relatime,discard,space_cache,subvolid" "nodiscard" "relatime,space_cache,subvolid"
test_roundtrip_mount "enospc_debug" "relatime,space_cache,enospc_debug,subvolid" "noenospc_debug" "relatime,space_cache,subvolid"
test_should_fail "fatal_errors=pani"
# fatal_errors=bug is the default
test_roundtrip_mount "fatal_errors=panic" "relatime,space_cache,fatal_errors=panic,subvolid" "fatal_errors=bug" "relatime,space_cache,subvolid"
test_roundtrip_mount "flushoncommit" "relatime,flushoncommit,space_cache,subvolid" "noflushoncommit" "relatime,space_cache,subvolid"
# 2048 is the max_inline default value
test_roundtrip_mount "max_inline=1024" "relatime,max_inline=1024,space_cache" "max_inline=2048" "relatime,space_cache,subvolid"
test_roundtrip_mount "metadata_ratio=0" "relatime,space_cache,subvolid" "metadata_ratio=10" "space_cache,metadata_ratio=10,subvolid"
# ssd_spread implies ssd, while nossd_spread only disables ssd_spread
test_roundtrip_mount "ssd_spread" "relatime,ssd_spread,space_cache" "nossd" "relatime,nossd,space_cache,subvolid"
test_roundtrip_mount "ssd" "relatime,ssd,space_cache" "nossd" "relatime,nossd,space_cache,subvolid"
test_mount_opt "ssd" "relatime,ssd,space_cache"
test_should_fail "thread_pool=-10"
test_should_fail "thread_pool=0"
test_roundtrip_mount "thread_pool=10" "relatime,thread_pool=10,space_cache" "thread_pool=50" "relatime,thread_pool=50,space_cache"
test_roundtrip_mount "notreelog" "relatime,notreelog,space_cache" "treelog" "relatime,space_cache,subvolid"
}
# real QA test starts here
_scratch_mkfs >/dev/null
# create a subvolume that will be used later
_scratch_mount
$BTRFS_UTIL_PROG subvolume create "$SCRATCH_MNT/vol1" > /dev/null
touch "$SCRATCH_MNT/vol1/file.txt"
_scratch_unmount
test_optional_kernel_features
test_non_revertible_options
test_revertible_options
test_subvol
echo "Silence is golden"
status=0
exit
| true |
f90032d97b8853f49cf6bc7473cfa0cefc10d7a6 | Shell | EthanChen911238/Grammar-Correction | /grammar.sh | UTF-8 | 1,099 | 3.40625 | 3 | [] | no_license | #!/bin/bash
#bash grammar.sh --encoder models/with_error_tag.encoder \
# --decoder models/with_error_tag.decoder \
# --sentences CoNLL_data/train.txt --emb CoNLL_data/train_small.elmo
POSITIONAL=()
while [[ $# -gt 0 ]]
do
key="$1"
case $key in
-e|--encoder)
ENCODER="$2"
shift # past argument
shift # past value
;;
-d|--decoder)
DECODER="$2"
shift # past argument
shift # past value
;;
-s|--sentences)
SENTENCES="$2"
shift # past argument
shift # past value
;;
-m|--emb)
EMB="$2"
shift # past argument
shift # past value
;;
--default)
DEFAULT=YES
shift # past argument
;;
*) # unknown option
POSITIONAL+=("$1") # save it in an array for later
shift # past argument
;;
esac
done
set - "${POSITIONAL[@]}" # restore positional parameters
# train
source ../allennlp/bin/activate
python elmo.py $SENTENCES $EMB
deactivate
source ../torch/bin/activate
python train.py $ENCODER $DECODER $SENTENCES $EMB
deactivate
#echo $ENCODER
#echo $DECODER
#echo $SENTENCES
#echo $EMB
# test
| true |
c126a348ab7acf5f0d7479271faf962313b066fd | Shell | yaochaoutokyo/bitcoinsv-docker-image | /entrypoint.sh | UTF-8 | 1,112 | 3.296875 | 3 | [] | no_license | #!/bin/bash
set -e
if [[ "$1" == "bitcoin-cli" || "$1" == "bitcoin-tx" || "$1" == "bitcoind" || "$1" == "test_bitcoin" ]]; then
mkdir -p "$BITCOIN_DATA"
if [[ ! -s "$BITCOIN_DATA/bitcoin.conf" ]]; then
cat <<-EOF > "$BITCOIN_DATA/bitcoin.conf"
printtoconsole=1
rpcallowip=::/0
rpcpassword=${BITCOIN_RPC_PASSWORD:-password}
rpcuser=${BITCOIN_RPC_USER:-bitcoin}
EOF
chown bitcoin:bitcoin "$BITCOIN_DATA/bitcoin.conf"
fi
# ensure correct ownership and linking of data directory
# we do not update group ownership here, in case users want to mount
# a host directory and still retain access to it
chown -R bitcoin "$BITCOIN_DATA"
ln -sfn "$BITCOIN_DATA" /home/bitcoin/.bitcoin
chown -h bitcoin:bitcoin /home/bitcoin/.bitcoin
# use exec to run command as PID 1, `docker stop` only send signal to PID 1 process, you need signal propagation to pass signal to child process
# refer to https://blog.csdn.net/boling_cavalry/article/details/93380447 , use gosu to run command with user bitcoin. (sudo will frok child process)
exec gosu bitcoin "$@"
fi
# default command
exec "$@" | true |
84e0d5dda4be68b1b8b9f3b08fc6ac778421bc6a | Shell | flyzjhz/DWR-956_decrypted_fw | /decrypted_and_extracted/DWR956_v1.0.0.11_r03_NB_P02003/root/usr/local/bin/update_SN.sh | UTF-8 | 667 | 3 | 3 | [] | no_license | #!/bin/sh
#sed -i 's/ugly/beautiful/g' /home/bruno/old-friends/sue.txt
if [ -n "$1" ] ; then
MAC_MTD=/dev/mtd3
mkdir -p /tmp/wlan_cal
cd /tmp/wlan_cal
nanddump -f datab.gz "$MAC_MTD"
tar xf datab.gz
SN=`cat mac |grep "SN="|cut -d= -f2`
HAS_SN_ID=`cat mac | grep "SN="`
#echo now SN $SN now 1 $1
if [ -n "$SN" ]; then
#echo "now edit"
sed -i 's/'"$SN"'/'"$1"'/g' mac
else
if [ -n "$HAS_SN_ID" ]; then
sed -i 's/'"SN=$SN"'/'"SN=$1"'/g' mac
else
echo "SN=$1" >> mac
fi
fi
tar cf datab.gz cal_wlan0.bin mac
cat mac | grep "SN="
nandwrite -e -p "$MAC_MTD" datab.gz
cd /tmp
rm -rf /tmp/wlan_cal
else
echo "Usage: update_SN.sh 00000001"
fi
| true |
90407ebcc5f3f86d3bc0ff2e4276d67433923a0d | Shell | vanta/vanta.github.io | /site.sh | UTF-8 | 767 | 3.265625 | 3 | [] | no_license | echo "<html><head><style>
table, th, td {
border: 1px solid black;
border-collapse: collapse;
}
th, td {
padding: 3px;
}
</style></head><body>"
echo "<h1>Wyniki głosowania ("`date`"):</h1>"
echo "<table border=1>"
echo "<tr><th>Miejsce</th><th>Głosów</th><th>Nazwa</th></tr>"
counter=1
while read data; do
votes=`echo $data | egrep -o "^[0-9]+"`
name=`echo $data | egrep -o "[^|]+$" | xargs`
color='FFFFFF'
if [[ $name =~ "Nasza Dobra Szkoła" ]] ; then
color='cc3366'
else
if [ $counter -lt 11 ] ; then
color='66cc66'
fi
fi
echo "<tr style='background-color:#$color'><td style="text-align:right">$counter</td><td style="text-align:right">$votes</td><td>$name</td></tr>"
((counter+=1))
done
echo "</table>"
echo "</body></html>"
| true |
35a2e6d520497788cd99023d25f2c71cf1175785 | Shell | validator/validator | /build/build.sh | UTF-8 | 139 | 2.875 | 3 | [
"MIT"
] | permissive | #!/bin/sh
if [ "$1" != "" ]; then
args=$*;
else
args="run";
fi
if [ -z "$PYTHON" ]; then
PYTHON=python
fi
$PYTHON ./checker.py $args
| true |
16b2e2bcbfe2f0f9c5e1feca8704dfa3d48c619a | Shell | 5l1v3r1/shellnotes | /util/noteinfo.sh | UTF-8 | 423 | 3.5625 | 4 | [] | no_license | :<<'info'
shellnotes - noteinfo.sh
(C) Dimitris Marakomihelakis
Released under the "All rights reserved" category. See the RIGHTS.txt file
in /docs/github/ for its full text.
info
function noteinfo() {
if [ $# -eq 0 ]; then
echo -n "Enter note name: " && read notename
else
notename=$1
fi
cd $DEFAULT_PATH
if [ -e $notename ]; then
wc $notename
echo "(lines/words/chars/name)"
else
echo "That note doesn't exist."
fi
cd $DIR
} | true |
0f4d892cc885b1985654743804ea291f53d32008 | Shell | TheInventorMan/ConnectNow | /lib/dpkg/info/procps.postinst | UTF-8 | 3,645 | 3.59375 | 4 | [
"MIT"
] | permissive | #!/bin/sh
# postinst script for procps
#
# see: dh_installdeb(1)
set -e
# summary of how this script can be called:
# * <postinst> `configure' <most-recently-configured-version>
# * <old-postinst> `abort-upgrade' <new version>
# * <conflictor's-postinst> `abort-remove' `in-favour' <package>
# <new-version>
# * <deconfigured's-postinst> `abort-deconfigure' `in-favour'
# <failed-install-package> <version> `removing'
# <conflicting-package> <version>
# for details, see http://www.debian.org/doc/debian-policy/ or
# the debian-policy package
#
# quoting from the policy:
# Any necessary prompting should almost always be confined to the
# post-installation script, and should be protected with a conditional
# so that unnecessary prompting doesn't happen if a package's
# installation fails and the `postinst' is called with `abort-upgrade',
# `abort-remove' or `abort-deconfigure'.
# Move a conffile without triggering a dpkg question
mv_conffile() {
OLDCONFFILE="$1"
NEWCONFFILE="$2"
if [ -e "$OLDCONFFILE" ]; then
echo "Preserving user changes to $NEWCONFFILE ..."
mv -f "$NEWCONFFILE" "$NEWCONFFILE".dpkg-new
mv -f "$OLDCONFFILE" "$NEWCONFFILE"
fi
}
# update alternative, if it exists
check_alternatives() {
BINNAME="$1"
BINPATH="$2"
MANSEC="$3"
if [ -e "$BINPATH"/"$BINNAME".procps ] ; then
update-alternatives --install "$BINPATH"/"$BINNAME" "$BINNAME" \
"$BINPATH"/"$BINNAME".procps 50 \
--slave /usr/share/man/man"$MANSEC"/"$BINNAME"."$MANSEC".gz "$BINNAME"."$MANSEC".gz \
/usr/share/man/man"$MANSEC"/"$BINNAME".procps."$MANSEC".gz
fi
}
case "$1" in
configure|abort-remove|abort-deconfigure)
if [ -e /etc/psdevtab ] ; then
rm -f /etc/psdevtab
fi
if [ -e /etc/psdatabase ]
then
rm -f /etc/psdatabase
fi
# Remove old procps init.d script, if it exists Closes: #55137
if [ -L /etc/rcS.d/S30procps.sh ]
then
update-rc.d -f procps.sh remove >/dev/null
fi
# and if that didn't work Closes: #92184 (#234306 with -L )
if [ -L /etc/rcS.d/S30procps.sh ]
then
rm -f /etc/rcS.d/S30procps.sh
fi
# Remove moved procps.sh file, if it is there
if dpkg --compare-versions "$2" le "1:3.2.7-3"; then
mv_conffile "/etc/init.d/procps.sh" "/etc/init.d/procps"
fi
#
# Now to do the alternatives for w
update-alternatives --install /usr/bin/w w /usr/bin/w.procps 50 \
--slave /usr/share/man/man1/w.1.gz w.1.gz /usr/share/man/man1/w.procps.1.gz
# Do alternatives for uptime kill vmstat and ps, if required
check_alternatives "uptime" "/usr/bin" "1"
check_alternatives "kill" "/usr/bin" "1"
check_alternatives "vmstat" "/usr/bin" "8"
check_alternatives "ps" "/bin" "1"
;;
abort-upgrade)
# Nothing to do
;;
*)
echo "postinst called with unknown argument \`$1'" >&2
exit 1
;;
esac
# dh_installdeb will replace this with shell code automatically
# generated by other debhelper scripts.
# Automatically added by dh_installinit
if [ -e "/etc/init/procps.conf" ]; then
invoke-rc.d procps start || exit $?
fi
# End automatically added section
# Automatically added by dh_installinit
update-rc.d -f procps remove >/dev/null || exit $?
# End automatically added section
# Automatically added by dh_installmenu
if [ "$1" = "configure" ] && [ -x "`which update-menus 2>/dev/null`" ]; then
update-menus
fi
# End automatically added section
# Automatically added by dh_makeshlibs
if [ "$1" = "configure" ]; then
ldconfig
fi
# End automatically added section
exit 0
| true |
350d9af1eeb5f8aa213459a561de64566060935a | Shell | c-mertes/docker_drop | /entry_point.sh | UTF-8 | 514 | 3.90625 | 4 | [] | no_license | #!/usr/bin/env bash
set -Eeo pipefail
# set home folder and source the drop's bashrc
export HOME=/drop
. /drop/.bashrc
set -u
# some bash args or none at all (default)
if [ "$#" -eq 0 ] || [ "${1#-}" != "$1" ] ;
then
echo -e "Welcome to the drop command line!\n"
exec bash "$@"
elif [ "${1#./}" == "bash" ]; then
echo -e "Welcome to the drop command line!\n"
exec "$@"
# any other command should be treated as command to be
# run within the conda environment
else
conda run -n drop "$@"
fi
| true |
f247d8f795f4cdcbea53818c2935a5e651c8f46a | Shell | syspimp/rhel-edge-application-collection | /build-scripts/configure-system.sh | UTF-8 | 1,150 | 2.765625 | 3 | [] | no_license | #!/bin/bash
set -xe
CHECKLOGGINGUSER=$(whoami)
if [ ${CHECKLOGGINGUSER} == "root" ];
then
echo "login as sudo user to run script."
echo "You are currently logged in as root"
exit 1
fi
sudo subscription-manager register
sudo subscription-manager refresh
sudo subscription-manager attach --auto
sudo subscription-manager repos --enable=rhel-8-for-x86_64-appstream-rpms --enable=rhel-8-for-x86_64-baseos-rpms
sudo dnf module install -y container-tools
sudo yum install git vim curl wget pcp pcp-zeroconf -y
sudo pip3 install podman-compose
sudo yum install slirp4netns podman -y
sudo tee -a /etc/sysctl.d/userns.conf > /dev/null <<EOT
user.max_user_namespaces=28633
EOT
sudo sysctl -p /etc/sysctl.d/userns.conf
podman network create --driver bridge rhel-edge --subnet 192.168.33.0/24
sudo yum install redis -y
sudo systemctl start redis
sudo systemctl enable redis
sudo systemctl enable pmlogger_daily_report.timer pmlogger_daily_report-poll.timer --now
sudo systemctl restart pmcd pmlogger
sudo systemctl enable pmproxy
sudo systemctl start pmproxy
sudo firewall-cmd --add-service=pmproxy --permanent
sudo firewall-cmd --reload
| true |
ccd7c3f34c1745d2f8401dff61579e45100c9b20 | Shell | achmadnabawi/sparkdata | /wrt/data_base_process/sh/shopitem_c.sh | UTF-8 | 1,440 | 2.609375 | 3 | [] | no_license | #!/usr/bin/env bash
source ~/.bashrc
pre_path='/home/wrt/sparkdata'
#zuotian=$(date -d '1 days ago' +%Y%m%d)
#qiantian=$(date -d '2 days ago' +%Y%m%d)
now_day=$1
last_day=$2
hfs -rmr /user/wrt/shopitem_c_tmp
spark-submit --executor-memory 6G --driver-memory 5G --total-executor-cores 80 \
$pre_path/wrt/data_base_process/t_base_shopitem_c.py $now_day >> \
$pre_path/wrt/data_base_process/sh/log_shopitem/log_c_$now_day 2>&1
hive<<EOF
use wl_base;
LOAD DATA INPATH '/user/wrt/shopitem_c_tmp' OVERWRITE INTO TABLE t_base_ec_shopitem_c PARTITION (ds='0temp');
insert OVERWRITE table t_base_ec_shopitem_c PARTITION(ds = $now_day)
select
case when t1.item_id is null then t2.shop_id else t1.shop_id end,
case when t1.item_id is null then t2.item_id else t1.item_id end,
case when t1.item_id is null then t2.sold else t1.sold end,
case when t1.item_id is null then t2.saleprice else t1.saleprice end,
case when t2.item_id is null then t1.up_day else t2.up_day end,
case when t1.item_id is null then t2.update_day else t1.update_day end,
case when t1.item_id is null then t2.ts else t1.ts end
from
(select * from t_base_ec_shopitem_c where ds = '0temp')t1
full outer join
(select * from t_base_ec_shopitem_c where ds = $last_day)t2
on
t1.item_id = t2.item_id;
EOF
#hfs -mkdir /commit/shopitem_c/archive/$now_day'_arc'
#hfs -mv /commit/shopitem_c/20*/* /commit/shopitem_c/archive/$now_day'_arc'/
#日更就不需要archieve这种东西了
| true |
416dbb10250ff1b1b6df9528c6b47a5746239bf7 | Shell | calculatelinux/calculate | /profiles/templates/2_ac_install_merge/sys-apps/calculate-utils/profile.bashrc.d/30-update-cache | UTF-8 | 951 | 2.828125 | 3 | [] | no_license | # Calculate comment=# protected
# vim: set syntax=sh
#
# Copyright 2015 Calculate Ltd. http://www.calculate-linux.org
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
update_cache() {
[[ -x /usr/libexec/calculate/cl-pkg-cache ]] &&
/usr/libexec/calculate/cl-pkg-cache
}
calculate_configurable_package() {
! [[ -f /var/lib/calculate/calculate-core/cache/merge-setup.list ]] ||
grep ${CATEGORY}/${PN} /var/lib/calculate/calculate-core/cache/merge-setup.list &>/dev/null
}
if [[ ${EBUILD_PHASE} == "setup" ]]
then
if tail -5 /var/log/emerge.log | grep -P "\(1 of.*${CATEGORY}/${P}" &>/dev/null
then
update_cache
fi
fi
if [[ ${EBUILD_PHASE} == "postrm" ]]
then
if tail -2 /var/log/emerge.log | grep "*** emerge " &>/dev/null
then
update_cache
fi
fi
| true |
a54d86f81358dc86182bbb390f6fa4c128dd2310 | Shell | LuisVCSilva/thredded_create_app | /.travis.setup.sh | UTF-8 | 236 | 2.765625 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
set -eux
if [ "$DB" = 'postgresql' ]; then
sudo service postgresql start
psql -d template1 -c 'CREATE EXTENSION citext;' -U postgres
fi
if [ "$DB" = 'mysql2' ]; then
sudo bash .travis.setup.mysql_v5_7.sh
fi
| true |
c1dfdb6e8ab398e4f8f7a8a608a752dbe4f06563 | Shell | hfuerst/Reelvdr-vdr2.2-Ubuntu16.04 | /packages/reelvdr-tools/tmp/usr/sbin/screenshot.sh | UTF-8 | 1,365 | 3.734375 | 4 | [] | no_license | #!/bin/sh
#
# Creates a OSD screenshot using osddump / svdrp grab
#
# 2012-06-12: adjust for ReelBox ICE series (RC)
# 2007-02-07 by RollerCoaster
# include language
if [ -f /etc/sysconfig ] ; then
. /etc/sysconfig
else
. /etc/default/sysconfig
fi
if [ -f /etc/reel/globals ] ; then
. /etc/reel/globals
else
. /etc/default/globals
fi
echo running $0
if RbMini ; then
Syslog "sorry, screenshots disabled on Reel NetClient."
exit 0
fi
loglevel=3
syntax () {
cat << EOF
Usage: $MyName
Description:
Creates a OSD screenshot using osddump
Screenshots will be saved in /tmp
Options:
no options
EOF
}
start() {
# check directory
test -d /media/reel/pictures/screenshot || mkdir /media/reel/pictures/screenshot
case $1 in
"")
nowhr=`date +%Y-%m-%d_%H.%M.%S`
if RbLite ; then
fname="/tmp/${nowhr}.jpg"
osddump | pnmtojpeg > $fname
elif RbIce ; then
fname="/media/reel/pictures/screenshot/${nowhr}.jpg"
svdrpsend.sh GRAB $fname
else
fname="/media/reel/pictures/screenshot/${nowhr}.png"
hdfbshot -d $FBDEV -s0 $fname
fi
Log 1 "saved screenshot as $fname"
smesgtl "Info: Screenshot saved in My Images / screenshot"
;;
-h)
syntax
exit 0
;;
*)
echo "Unknown option $SOPTION."
syntax
exit 2
;;
esac
}
#
# Main
#
MY_FB=$(grep hde_fb /proc/fb | awk '{print $1}')
FBDEV="/dev/fb$MY_FB"
start &
| true |
f66059607af22bd39bc4aa0a50dbc4778fc5335d | Shell | jsoref/js-test | /handle-comments.sh | UTF-8 | 2,312 | 3.25 | 3 | [] | no_license | #!/bin/bash
set -e
set -x
export spellchecker=${spellchecker:-/app}
. "$spellchecker/common.sh"
echo debug:
cat "$GITHUB_EVENT_PATH"
action=$(jq -r .action < "$GITHUB_EVENT_PATH")
if [ "$action" != "created" ]; then
exit 0
fi
issue=$(mktemp)
jq -r .issue < "$GITHUB_EVENT_PATH" > $issue
number=$(jq -r .number < $issue)
comment=$(mktemp)
jq -r .comment < "$GITHUB_EVENT_PATH" > $comment
username=$(jq -r .user.login < $comment)
body=$(jq -r .body < $comment)
body_to_payload() {
BODY="$1"
PAYLOAD=$(mktemp)
echo '{}' | jq --rawfile body "$BODY" '.body = $body' > $PAYLOAD
rm -f $BODY
cat $PAYLOAD >&2
echo "$PAYLOAD"
}
comment() {
comments_url="$1"
payload="$2"
if [ -n "$payload" ]; then
payload="--data @$payload"
method="$3"
if [ -n "$method" ]; then
method="-X $method"
fi
fi
curl -vvv -L -s -S \
$method \
-H "Authorization: token $GITHUB_TOKEN" \
--header "Content-Type: application/json" \
-H 'Accept: application/vnd.github.comfort-fade-preview+json' \
$payload \
"$comments_url"
}
if echo "$body" | grep -q "setup"; then
COMMENTS_URL=$(jq -r .comments_url < $issue)
BODY=$(mktemp)
base="<details><summary>This is data...</summary>
1
2
3
</details>"
echo "$base" > $BODY
PAYLOAD=$(body_to_payload $BODY)
response=$(mktemp)
comment "$COMMENTS_URL" "$PAYLOAD" > $response
cat $response
COMMENTS_URL=$(jq -r .url < $response)
echo "$base
[Quote this line to tell]($COMMENTS_URL) @check-spelling-bot update whitelist" > $BODY
PAYLOAD=$(body_to_payload $BODY)
comment "$COMMENTS_URL" "$PAYLOAD" "PATCH" > $response
cat $response
exit 0
fi
trigger=$(echo "$body" | perl -ne 'print if /\@check-spelling-bot(?:\s+|:\s*)update whitelist/')
if [ -n "$trigger" ]; then
comments_url=$(jq -r .repository.issue_comment_url < "$GITHUB_EVENT_PATH")
export comments_url=$(echo "$comments_url" | perl -pne 's{\{.*\}}{/\\d+}')
comment_url=$(echo "$trigger" | perl -ne 'next unless m{($ENV{comments_url})}; print "$1\n";')
comment=$(comment "$comment_url")
comment=$("$comment" | jq -r .body)
script=$(mktemp)
echo "$comment" | perl -e '$/=undef; $_=<>; s/.*\`\`\`(.*)\`\`\`.*/$1/s;print' >> $script
echo "# The following code (or equivalent) needs to be run:"
cat $script
echo "# end"
exit 0
fi
| true |
17ce9fcf7c351b36e737eba83988c0182721efad | Shell | joshgav/azure-scripts | /acs/mesos/run-mesos.sh | UTF-8 | 634 | 2.59375 | 3 | [
"MIT"
] | permissive | SCRIPTS_DIR=`dirname $0`
MESOS_MASTER=~/mesos/build/bin/mesos-master.sh
MESOS_SLAVE=~/mesos/build/bin/mesos-slave.sh
MESOS_LOGDIR=/var/log/mesos
LOCAL_IPADDR=`$SCRIPTS_DIR/find-ip-address.sh`
nohup $MESOS_MASTER --cluster=joshgav-mesos --external_log_file=$MESOS_LOGDIR/master_console.log \
--ip=$LOCAL_IPADDR --log_dir=$MESOS_LOGDIR --work_dir=$MESOS_LOGDIR/work \
> $MESOS_LOGDIR/master_console.log 2>&1 &
nohup $MESOS_SLAVE --master=${LOCAL_IPADDR}:5050 --external_log_file=$MESOS_LOGDIR/slave_console.log \
--ip=$LOCAL_IPADDR --log_dir=$MESOS_LOGDIR --work_dir=$MESOS_LOGDIR/work \
> $MESOS_LOGDIR/slave_console.log 2>&1 &
| true |
d3662b486982597a94fa4c2ed19bbd250b2dfaf8 | Shell | PaddlePaddle/PaddleTest | /models/PaddleSlim/CE/linux/scripts/slim_dy_ce_tests_ptq/train.sh | UTF-8 | 1,867 | 3.390625 | 3 | [] | no_license | #!/usr/bin/env bash
#外部传入参数说明
# $1: 'single' 单卡训练; 'multi' 多卡训练; 'recv' 恢复训练
# $2: $XPU = gpu or cpu
#获取当前路径
cur_path=`pwd`
model_name=${PWD##*/}
echo "$model_name 模型train阶段"
#路径配置
root_path=$cur_path/../../
code_path=$cur_path/../../PaddleSlim/ce_tests/dygraph/quant
log_path=$root_path/log/$model_name/
mkdir -p $log_path
#临时环境更改
cd $root_path/PaddleSlim
#pip uninstall paddleslim
python -m pip install -r requirements.txt
python setup.py install
#访问RD程序
print_info(){
if [ $1 -ne 0 ];then
echo "exit_code: 1.0" >> ${log_path}/$2.log
echo -e "\033[31m FAIL_$2 \033[0m"
echo $2 fail log as follows
cat ${log_path}/$2.log
cp ${log_path}/$2.log ${log_path}/FAIL_$2.log
else
echo "exit_code: 0.0" >> ${log_path}/$2.log
fi
}
cd $code_path
data_path='./ILSVRC2012/'
output_dir="./output_ptq"
quant_batch_num=10
quant_batch_size=10
model=mobilenet_v1
if [ "$1" = "linux_dy_gpu1" ];then #单卡
# save ptq quant model
python ./src/ptq.py \
--data=${data_path} \
--arch=${model} \
--quant_batch_num=${quant_batch_num} \
--quant_batch_size=${quant_batch_size} \
--output_dir=${output_dir} > ${log_path}/ptq_${model} 2>&1
print_info $? $2
elif [ "$1" = "linux_dy_gpu2" ];then # 多卡
python ./src/ptq.py \
--data=${data_path} \
--arch=${model} \
--quant_batch_num=${quant_batch_num} \
--quant_batch_size=${quant_batch_size} \
--output_dir=${output_dir} > ${log_path}/ptq_${model} 2>&1
print_info $? $2
elif [ "$1" = "linux_dy_cpu" ];then # cpu
python ./src/ptq.py \
--data=${data_path} \
--arch=${model} \
--quant_batch_num=${quant_batch_num} \
--quant_batch_size=${quant_batch_size} \
--output_dir=${output_dir} > ${log_path}/ptq_${model} 2>&1
print_info $? $2
fi
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.