blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
1b1a375613b2de23df1a0ea079003f61ceef7d31
|
Shell
|
Jack-0/dotfiles
|
/scripts/code_sesh.sh
|
UTF-8
| 344
| 3.71875
| 4
|
[] |
no_license
|
#!/bin/bash
# Requirements
#
# THIS SCRIPT ASSUMES YOU STORE ALL YOUR WORK CODE IN ONE DIR
#
# 1. brew install fzf
# 2. change ~/dev to where ever you store your git repos
if [[ $# -eq 1 ]]; then
selected=$1
else
selected=$(find ~/dev -mindepth 1 -maxdepth 1 -type d | fzf)
fi
if [[ -z $selected ]]; then
exit 0
fi
code $selected
| true
|
752d8f294e7cbcc9fda8d005ffaf9bc283c25c9b
|
Shell
|
amedvedova/NWPvis
|
/runme.sh
|
UTF-8
| 634
| 3.25
| 3
|
[] |
no_license
|
#!/bin/bash
# This script runs the "main_bash.py" python script, which produces the vertical cross-section figures.
# The python code takes 4 command line arguments: 3 for input data, 1 as path to output figures.
# Path to data and figures:
PATH_DATA='/home/alve/Desktop/NWPvis/data'
PATH_FIGS='/home/alve/Desktop/NWPvis/figures/'
# Model level data
ML_DATA="${PATH_DATA}/ML.nc"
# Logarithm of surface pressure
SFC_LNSP="${PATH_DATA}/SFC_LNSP.nc"
# Surface geopotential data
Z_DATA="${PATH_DATA}/TOPO.nc"
# Call the python script: save figs in the figures folder
python main_bash.py ${ML_DATA} ${SFC_LNSP} ${Z_DATA} ${PATH_FIGS}
| true
|
0beb1af7a9ba9697df173924fa330e70b12cc794
|
Shell
|
cs558i/bootscripts-standard
|
/bootscripts/cblfs/init.d/gdm
|
UTF-8
| 740
| 3.1875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Begin $rc_base/init.d/gdm
# Based on sysklogd script from LFS-3.1 and earlier.
# Rewritten by Gerard Beekmans - gerard@linuxfromscratch.org
#$LastChangedBy: randy $
#$Date: 2007-07-26 10:55:51 -0500 (Thu, 26 Jul 2007) $
. /etc/sysconfig/rc
. $rc_functions
pidfile=/run/gdm.pid
export PATH=$PATH:/opt/gnome/bin:/opt/gnome/sbin
case "$1" in
start)
boot_mesg "Starting GDM..."
loadproc -p $pidfile gdm
;;
stop)
boot_mesg "Stopping GDM..."
if [ -f $pidfile ]; then
loadproc gdm-stop
fi
;;
reload)
boot_mesg "Reloading GDM..."
loadproc gdm-safe-restart
;;
restart)
boot_mesg "Restarting GDM..."
loadproc gdm-restart
;;
*)
echo "Usage: $0 {start|stop|reload|restart}"
exit 1
;;
esac
# End $rc_base/init.d/gdm
| true
|
2831b666d646e60546ae2ff191b43e7d295f0d7f
|
Shell
|
DuckSoft/AUR
|
/ventoy-bin/PKGBUILD
|
UTF-8
| 2,534
| 2.953125
| 3
|
[] |
no_license
|
# Maintainer: DuckSoft <realducksoft at gmail dot com>
# Co-Maintainer: Mark Wagie <mark dot wagie at tutanota dot com>
# Contributor: KokaKiwi <kokakiwi+aur@kokakiwi.net>
pkgname=ventoy-bin
pkgver=1.0.43
pkgrel=1
pkgdesc='A new multiboot USB solution (Binary)'
url='http://www.ventoy.net/'
arch=('i686' 'x86_64')
license=('GPL3')
depends=('bash' 'util-linux' 'xz' 'dosfstools' 'lib32-glibc')
provides=("${pkgname%-bin}")
conflicts=("${pkgname%-bin}")
install="${pkgname%-bin}.install"
source=("https://github.com/ventoy/Ventoy/releases/download/v${pkgver}/${pkgname%-bin}-${pkgver}-linux.tar.gz"
"${pkgname%-bin}" "${pkgname%-bin}web" "${pkgname%-bin}-persistent" 'sanitize.patch')
sha256sums=('12085654919b708a4fd9393572fb4dd10732481f88becb334cce20d5954305a4'
'1ad5d314e02b84127a5a59f3871eb1d28617218cad07cde3eeddcac391473000'
'c3d4463a878a89d96e5f0bc4e1a43e48f27af5965bd4c977567695d7cf91fe5f'
'51029745da197dded6e007aee3f30f7ea1aa6e898172a6ea176cc2f3a842d0ff'
'd250816de0903a5fc5364ee25914a06b1b1595bcfc72dac84ad016e1d30727c4')
prepare() {
msg2 "Decompress tools..."
cd "$srcdir/${pkgname%-bin}-${pkgver}/tool/$CARCH"
for file in *.xz; do
xzcat $file > ${file%.xz}
chmod +x ${file%.xz}
done
msg2 "Cleaning up .xz crap..."
rm -fv ./*.xz
msg2 "Applying sanitize patch..."
cd ../..
patch --verbose -p0 < "$srcdir/sanitize.patch"
sed -i 's|log\.txt|/var/log/ventoy.log|g' WebUI/static/js/languages.js
msg2 "Cleaning up unused binaries..."
# Preserving mkexfatfs and mount.exfat-fuse because exfatprogs is incompatible
for binary in xzcat hexdump; do
rm -fv tool/$CARCH/$binary
done
}
package() {
cd "$srcdir/${pkgname%-bin}-${pkgver}"
msg2 "Copying package files..."
install -Dm644 -vt "$pkgdir/opt/${pkgname%-bin}/boot/" boot/*
install -Dm644 -vt "$pkgdir/opt/${pkgname%-bin}/${pkgname%-bin}/" "${pkgname%-bin}"/*
install -Dm755 -vt "$pkgdir/opt/${pkgname%-bin}/tool/" tool/*.{cer,sh}
install -Dm755 -vt "$pkgdir/opt/${pkgname%-bin}/tool/$CARCH/" tool/$CARCH/*
install -Dm755 -vt "$pkgdir/opt/${pkgname%-bin}/" *.sh
cp --no-preserve=o -avt "$pkgdir/opt/${pkgname%-bin}/" plugin WebUI
msg2 "Linking system binaries..."
for binary in xzcat hexdump; do
ln -svf /usr/bin/$binary "$pkgdir/opt/${pkgname%-bin}/tool/$CARCH/"
done
msg2 "Creating /usr/bin entries..."
install -Dm755 "$srcdir/${pkgname%-bin}"{,web,-persistent} -vt "$pkgdir"/usr/bin/
}
| true
|
0a6ff79896eb21355e0813fbea0c87976f692ced
|
Shell
|
joejin00/utilis
|
/dom0_mem_precheck
|
UTF-8
| 4,470
| 3.453125
| 3
|
[] |
no_license
|
#!/bin/bash
XM=/usr/sbin/xm
if ! [ -x $XM ]; then
echo "Can not found xm command"
exit 1
fi
if ! $XM list &>/dev/null; then
echo "Unable to connect to xend!"
exit 1
fi
function is_digit()
{
if [ "x$1" = "x" ]; then
return 0
fi
re='^[0-9]+$'
if ! [[ $1 =~ $re ]] ; then
return 1
fi
return 0
}
total=$($XM info | grep total | awk -F: '{print $2}'|sed 's/ //g')
if ! is_digit $total; then
echo "Invalid total memory(\"$total\")!"
exit 1
fi
if [ $total -le 0 ]; then
echo "Unable to get total memory!"
exit 1
fi
dom0_mem=$(echo "768+$total*0.0205"|bc|sed 's/\..*//g')
dom0_cur=$(free -m | head -2|tail -1|awk '{print $2}')
if ! is_digit $dom0_mem; then
echo "Unable to calculate dom0_mem"
exit 1
fi
echo '+------------------------------------|--------------------|--------------------+'
printf "|%-36s|%20s|%20s|\n" "Name" "Current" "Required"
echo '+------------------------------------|--------------------|--------------------+'
printf "|%-36s|%20s|%20s|\n" "Domain-0" $dom0_cur $dom0_mem
req=0
for domid in $($XM list | egrep -v -w "Name|Domain-0" | awk '{print $2}'); do
domname=$($XM list $domid | tail -1 | awk '{print $1}')
max=$($XM list --long $domid | egrep -w "maxmem" | awk '{print $2}' | sed 's/)//g')
mem=$($XM list --long $domid | egrep -w "memory" | awk '{print $2}' | sed 's/)//g')
if [ "x$mem" = "x" ] || ! is_digit $mem ; then
echo "Unable to get domain $i usage!, please check it manually"
exit 1
fi
if [ "x$max" = "x" ] || ! is_digit $max ; then
max=$mem
fi
printf "|%-36s|%20s|%20s|\n" $domname $((mem+10)) $((max+10))
# Add extra 10M which used by guests
req=$((req+mem+10))
done
req=$((req+dom0_mem))
echo '+------------------------------------|--------------------|--------------------+'
printf "|%-36s|%20s|%20s|\n" "Total" $total $req
echo '+------------------------------------|--------------------|--------------------+'
if [ $req -gt $total ]; then
echo "Server $(hostname) does not match minimum memory requirement!"
exit 1
fi
echo "Server $(hostname) matches memory requirement, READY to go!!"
exit 0
[root@localhost ~]# vi dom0_mem_prechk
[root@localhost ~]# cat dom0_mem_prechk
#!/bin/bash
XM=/usr/sbin/xm
if ! [ -x $XM ]; then
echo "Can not found xm command"
exit 1
fi
if ! $XM list &>/dev/null; then
echo "Unable to connect to xend!"
exit 1
fi
function is_digit()
{
if [ "x$1" = "x" ]; then
return 0
fi
re='^[0-9]+$'
if ! [[ $1 =~ $re ]] ; then
return 1
fi
return 0
}
total=$($XM info | grep total | awk -F: '{print $2}'|sed 's/ //g')
if ! is_digit $total; then
echo "Invalid total memory(\"$total\")!"
exit 1
fi
if [ $total -le 0 ]; then
echo "Unable to get total memory!"
exit 1
fi
dom0_mem=$(echo "768+$total*0.0205"|bc|sed 's/\..*//g')
dom0_cur=$(free -m | head -2|tail -1|awk '{print $2}')
if ! is_digit $dom0_mem; then
echo "Unable to calculate dom0_mem"
exit 1
fi
echo '+------------------------------------|--------------------|--------------------+'
printf "|%-36s|%20s|%20s|\n" "Name" "Current" "Required"
echo '+------------------------------------|--------------------|--------------------+'
printf "|%-36s|%20s|%20s|\n" "Domain-0" $dom0_cur $dom0_mem
req=0
for domid in $($XM list | egrep -v -w "Name|Domain-0" | awk '{print $2}'); do
domname=$($XM list $domid | tail -1 | awk '{print $1}')
max=$($XM list --long $domid | egrep -w "maxmem" | awk '{print $2}' | sed 's/)//g')
mem=$($XM list --long $domid | egrep -w "memory" | awk '{print $2}' | sed 's/)//g')
if [ "x$mem" = "x" ] || ! is_digit $mem ; then
echo "Unable to get domain $i usage!, please check it manually"
exit 1
fi
if [ "x$max" = "x" ] || ! is_digit $max ; then
max=$mem
fi
printf "|%-36s|%20s|%20s|\n" $domname $((mem+10)) $((max+10))
# Add extra 10M which used by guests
req=$((req+mem+10))
done
req=$((req+dom0_mem))
echo '+------------------------------------|--------------------|--------------------+'
printf "|%-36s|%20s|%20s|\n" "Total" $total $req
echo '+------------------------------------|--------------------|--------------------+'
if [ $req -gt $total ]; then
echo "Server $(hostname) does not match minimum memory requirement!"
exit 1
fi
echo "Server $(hostname) matches memory requirement, READY to go!!"
exit 0
| true
|
1bf51493448f348e8592d1330874f4a7bc52d380
|
Shell
|
Mic92/xfstests-cntr
|
/tests/xfs/150
|
UTF-8
| 1,051
| 3.328125
| 3
|
[] |
no_license
|
#! /usr/bin/env bash
# SPDX-License-Identifier: GPL-2.0
# Copyright (c) 2006 Silicon Graphics, Inc. All Rights Reserved.
#
# FSQA Test No. 150
#
# Set DM_EVENT_RENAME event on a DMAPI filesystem and verify it is set.
#
seq=`basename $0`
seqres=$RESULT_DIR/$seq
echo "QA output created by $seq"
here=`pwd`
tmp=/tmp/$$
status=1 # failure is the default!
trap "exit \$status" 0 1 2 3 15
_filter_fs_handle()
{
$SED_PROG -e "s/$DMAPI_FS_HANDLE/DMAPI_FS_HANDLE/g"
}
# get standard environment, filters and checks
. ./common/rc
. ./common/filter
. ./common/dmapi
# real QA test starts here
_supported_fs xfs
_require_scratch
_scratch_mkfs_xfs >/dev/null 2>&1
_dmapi_scratch_mount
DMAPI_FS_HANDLE="`${DMAPI_QASUITE1_DIR}cmd/path_to_fshandle $SCRATCH_MNT`"
${DMAPI_QASUITE1_DIR}cmd/get_eventlist $DMAPI_FS_HANDLE \
| _filter_fs_handle
${DMAPI_QASUITE1_DIR}cmd/set_eventlist $DMAPI_FS_HANDLE DM_EVENT_RENAME
echo "set_eventlist Returned: $?"
${DMAPI_QASUITE1_DIR}cmd/get_eventlist $DMAPI_FS_HANDLE \
| _filter_fs_handle
status=0
exit
| true
|
c746c8293702b2200de4c3e207348ce615b7561f
|
Shell
|
waveshare/pi-display
|
/install.sh
|
UTF-8
| 640
| 2.8125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
set -e
password=$1
# install pip and some apt dependencies
echo $password | sudo -S apt-get update
echo $password | sudo -S apt install -y python3-pil python3-smbus
# install pidisplay
echo $password | sudo -S python3 setup.py install
# install picard display service
echo $password | sudo -S sed -i -e 's:#dtparam=i2c_arm=on:dtparam=i2c_arm=on:g' /boot/config.txt || true
python3 -m pidisplay.create_display_service
echo $password | sudo -S mv picard_display.service /etc/systemd/system/picard_display.service
echo $password | sudo -S systemctl enable picard_display
echo $password | sudo -S systemctl start picard_display
| true
|
f1a99389a10dd374bf3044679c772f86fb3a768e
|
Shell
|
portalssh/autologin
|
/wms/config/wifi_id
|
UTF-8
| 822
| 3
| 3
|
[] |
no_license
|
#!/bin/sh
#username wifi.id
user="username_wifi.id"
#password wifi.id
pass="password_wifi.id"
track2=$(ping -c 2 -W 4 -I wlan1 8.8.8.8)
track2=$?
if [ $track2 -eq 1 ]
then
logger "wifi.id disconnected...triying to reconnect..."
ifdown wwan1 && sleep 5 && ifup wwan1 && sleep 20
ipwan2=$(ifconfig | grep -A 2 'wlan1' | awk '/inet addr/{print substr($2,6)}')
wlan_mac2=$(ifconfig wlan1 | grep -o -E '([[:xdigit:]]{1,2}:){5}[[:xdigit:]]{1,2}')
wget --user-agent="Mozilla/5.0" --post-data="username=$user@spin2&password=$pass" "https://welcome2.wifi.id/authnew/login/check_login.php?ipc="$ipwan2"&gw_id=WAG-D2-CVG&client_mac=$wlan_mac2&wlan=SGSEG00211-N/TLK-CI-84587:@wifi.id" -O /dev/null --no-check-certificate
else
logger "wifi.id connected..." && exit 0
fi
| true
|
6c4de534f4ce2543fa94f8fcca8112930a57e738
|
Shell
|
xstefank/bug-clerk
|
/src/main/bash/load-bugs.sh
|
UTF-8
| 1,609
| 3.703125
| 4
|
[] |
no_license
|
#!/bin/bash
readonly BZ_URL=${BZ_URL:-'https://bugzilla.redhat.com/buglist.cgi'}
readonly CLASSIFICATION=${CLASSIFICATION:-'JBoss'}
readonly URL="${BZ_URL}?${CLASSIFICATION}&columnlist=product%2Ccomponent%2Cassigned_to%2Cbug_status%2Cpriority%2Cbug_severity%2Cresolution%2Cshort_desc%2Cchangeddate%2Cflagtypes.name&priority=urgent&priority=high&priority=medium&priority=low&product=JBoss%20Enterprise%20Application%20Platform%206&query_format=advanced&title=Bug%20List%3A%20Potential%20JBoss%20SET%20EAP%206%20issues&ctype=atom&list_id=3176545"
readonly FEED_FILE=$(mktemp)
readonly BUG_CLERK_HOME=${BUG_CLERK_HOME:-'.'}
readonly JAR_NAME=${JAR_NAME:-'bugclerk'}
readonly VERSION=${VERSION:-'0.1'}
readonly BZ_SERVER_URL=${BZ_SERVER_URL:-'https://bugzilla.redhat.com/show_bug.cgi?id='}
checkScriptDependency() {
local cmd=${1}
which ${cmd} > /dev/null
if [ "${?}" -ne 0 ]; then
echo "This scripts requires command '${cmd}' which is missing from PATH."
exit 1
fi
}
checkScriptDependency wget
checkScriptDependency java
if [ ! -e "${BUG_CLERK_HOME}" ]; then
echo "The BUG_CLERK_HOME '${BUG_CLERK_HOME}' provided does not exist."
exit 2
fi
echo "Downloading an storing feed in file ${FEED_FILE}."
wget -q --output-document="${FEED_FILE}" "${URL}"
echo -n "Extracting Bug IDs... "
readonly BUGS_ID=$(cat "${FEED_FILE}" | grep -e '<id>' | grep -e 'show_bug'| sed -e 's;^.*<id>.*id=;;' -e 's;</id>.*$;;' -e 's/\n/ /g')
echo 'Done.'
echo "Running ${JAR_NAME}-${VERSION} from ${BUG_CLERK_HOME}:"
java -jar "${BUG_CLERK_HOME}/${JAR_NAME}-${VERSION}.jar" -u "${BZ_SERVER_URL}" ${BUGS_ID}
| true
|
a3cb2a035deefb451050541bcb45af842f607fbb
|
Shell
|
graphingwiki/collabbackend
|
/scripts/collab-check
|
UTF-8
| 1,005
| 3.6875
| 4
|
[] |
no_license
|
#!/bin/sh
# -*- coding: utf-8 -*-
## @copyright: 2015 by Marko Laakso
## @license: MIT <http://www.opensource.org/licenses/mit-license.php>
myerr() {
echo $@ 2>&1
exit 1
}
CFG=/etc/local/collab/collab.ini
if [ -f ${CFG} ]; then
. ${CFG} 2> /dev/null
else
myerr "No ${CFG}"
fi
if [ $# -eq 1 ]; then
tocheck=$1
shift
else
tocheck="*"
fi
if [ $# -gt 0 ]; then
myerr "Usage: $0 [<collab>]"
fi
cd ${wikidir} || myerr "Failed to cd to wikidir: ${wikidir}"
for collab in ${tocheck}; do
for page in AccessGroup FrontPage CollabFacts; do
if [ ! -d ${collab}/data/pages/${page} ]; then
echo "${collab}: has no ${page}" 1>&2
fi
done
eventstamp=`grep -v "AccessGroup" ${collab}/data/event-log | \
tail -1 | cut -c1-10`
editstamp=`grep -v "AccessGroup" ${collab}/data/edit-log | \
tail -1 | cut -c1-10`
echo "${collab}: last event (except AccessGroup)" `date -d @${eventstamp}`
echo "${collab}: last edit (except AccessGroup) " `date -d @${editstamp}`
done
| true
|
41af605af45ac3f80f3c8c4850330cb4195f7a0d
|
Shell
|
zealic/zealic-learn
|
/BuildAutomation/ci-build.sh
|
UTF-8
| 86
| 2.640625
| 3
|
[] |
no_license
|
#!/bin/bash
cd `cd $(dirname $0) && pwd`
for name in Travis-CI/*.sh
do
./$name
done
| true
|
6fb031ded428c89d68fda9f93c111c56fea4bdf4
|
Shell
|
AlexDuperre/Autonomous-Drone-Navigation
|
/tensorflow/util/clear_empty_trajectories.sh
|
UTF-8
| 156
| 3.203125
| 3
|
[] |
no_license
|
#!/bin/bash
# echo 'file_path = ' $1
IFS='_' read -r -a array <<< "$1"
if [ "${array[-2]}" == "${array[-3]}" ]; then
rm -r $1
echo "Cleared " $1
fi
| true
|
b3dafbc5410c5015bc8676ae6f2d7e46943993de
|
Shell
|
adeelmalik78/dxapikit
|
/API/flows.sh
|
UTF-8
| 5,493
| 3.578125
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright (c) 2017 by Delphix. All rights reserved.
#
# Program Name : flows.sh
# Description : Delphix API timeflows examples
# Author : Alan Bitterman
# Created : 2017-08-09
# Version : v1.1.0
#
# Requirements :
# 1.) curl and jq command line libraries
# 2.) Include ./jqJSON_subroutines.sh
# 3.) Populate Delphix Engine Connection Information . ./delphix_engine.conf
#
# Interactive Usage: ./flows.sh
#
#########################################################
## Parameter Initialization ...
. ./delphix_engine.conf
#########################################################
# NO CHANGES REQUIRED BELOW THIS POINT #
#########################################################
#########################################################
## Subroutines ...
source ./jqJSON_subroutines.sh
human_print(){
while read B dummy; do
[ $B -lt 1024 ] && echo ${B} Bytes && break
KB=$(((B+512)/1024))
[ $KB -lt 1024 ] && echo ${KB} KB && break
MB=$(((KB+512)/1024))
[ $MB -lt 1024 ] && echo ${MB} MB && break
GB=$(((MB+512)/1024))
[ $GB -lt 1024 ] && echo ${GB} GB && break
echo $(((GB+512)/1024)) TB
done
}
#########################################################
## Session and Login ...
echo "Authenticating on ${BaseURL}"
RESULTS=$( RestSession "${DMUSER}" "${DMPASS}" "${BaseURL}" "${COOKIE}" "${CONTENT_TYPE}" )
#echo "Results: ${RESULTS}"
if [ "${RESULTS}" != "OK" ]
then
echo "Error: Exiting ..."
exit 1;
fi
echo "Session and Login Successful ..."
#########################################################
## Get database container
STATUS=`curl -s -X GET -k ${BaseURL}/database -b "${COOKIE}" -H "${CONTENT_TYPE}"`
#echo "database: ${STATUS}"
RESULTS=$( jqParse "${STATUS}" "status" )
#
# Command Line Arguments ...
#
SOURCE_SID=$1
if [[ "${SOURCE_SID}" == "" ]]
then
VDB_NAMES=`echo "${STATUS}" | jq --raw-output '.result[] | .name '`
echo "VDB Names:"
echo "${VDB_NAMES}"
echo " "
echo "Please Enter dSource or VDB Name: "
read SOURCE_SID
if [ "${SOURCE_SID}" == "" ]
then
echo "No dSource of VDB Name Provided, Exiting ..."
exit 1;
fi
fi;
export SOURCE_SID
echo "Source: ${SOURCE_SID}"
CONTAINER_REFERENCE=`echo ${STATUS} | jq --raw-output '.result[] | select(.name=="'"${SOURCE_SID}"'") | .reference '`
echo "container reference: ${CONTAINER_REFERENCE}"
#########################################################
## List timeflows for the container reference
echo " "
echo "Timeflows API "
STATUS=`curl -s -X GET -k ${BaseURL}/timeflow -b "${COOKIE}" -H "${CONTENT_TYPE}"`
#########################################################
## Select the timeflow
FLOW_NAMES=`echo "${STATUS}" | jq --raw-output '.result[] | select(.container=="'"${CONTAINER_REFERENCE}"'") | .name '`
echo "timeflow names:"
echo "${FLOW_NAMES}"
echo " "
echo "Select timeflow Name (copy-n-paste from above list): "
read FLOW_NAME
if [ "${FLOW_NAME}" == "" ]
then
echo "No Flow Name provided, exiting ... ${FLOW_NAME} "
exit 1;
fi
# Get timeflow reference ...
FLOW_REF=`echo "${STATUS}" | jq --raw-output '.result[] | select(.name=="'"${FLOW_NAME}"'") | .reference '`
echo "timeflow reference: ${FLOW_REF}"
# timeflowRanges for this timeflow ...
echo " "
echo "TimeflowRanges for this timeflow ... "
STATUS=`curl -s -X POST -k --data @- ${BaseURL}/timeflow/${FLOW_REF}/timeflowRanges -b "${COOKIE}" -H "${CONTENT_TYPE}" <<-EOF
{
"type": "TimeflowRangeParameters"
}
EOF
`
echo ${STATUS} | jq "."
#########################################################
## Get snapshot for this timeflow ...
echo " "
echo "Snapshot per Timeflow ... "
STATUS=`curl -s -X GET -k ${BaseURL}/snapshot -b "${COOKIE}" -H "${CONTENT_TYPE}"`
SYNC_NAMES=`echo "${STATUS}" | jq --raw-output '.result[] | select(.container=="'"${CONTAINER_REFERENCE}"'" and .timeflow=="'"${FLOW_REF}"'") | .name '`
echo "snapshots:"
echo "${SYNC_NAMES}"
echo " "
echo "Select Snapshot Name (copy-n-paste from above list): "
read SYNC_NAME
if [ "${SYNC_NAME}" == "" ]
then
echo "No Snapshot Name provided, exiting ... ${SYNC_NAME} "
exit 1;
fi
SYNC_REF=`echo "${STATUS}" | jq --raw-output '.result[] | select(.name=="'"${SYNC_NAME}"'") | .reference '`
echo "snapshot reference: ${SYNC_REF}"
echo "${STATUS}" | jq --raw-output '.result[] | select(.name=="'"${SYNC_NAME}"'") '
#########################################################
## Get snapshot space ...
echo "-----------------------------"
echo "-- Snapshot Space JSON ... "
json="{
\"type\": \"SnapshotSpaceParameters\",
\"objectReferences\": [
\"${SYNC_REF}\"
]
}"
echo "JSON> $json"
echo "Snapshot Space Results ..."
SPACE=`curl -s -X POST -k --data @- $BaseURL/snapshot/space -b "${COOKIE}" -H "${CONTENT_TYPE}" <<EOF
${json}
EOF
`
echo "$SPACE" | jq '.'
SIZE=`echo "${SPACE}" | jq '.result.totalSize' | human_print`
echo "Snapshot Total Size: ${SIZE}"
echo " "
echo "Done "
exit 0;
| true
|
75384a4f692edf710490dd96b6e929b6a15d8cf5
|
Shell
|
jamierocks/MCStats
|
/deploy.sh
|
UTF-8
| 677
| 3.078125
| 3
|
[
"BSD-2-Clause"
] |
permissive
|
#!/bin/bash
# get the start unix timestamp
START=`date +%s`
echo -e " [\e[1;33m++\e[00m] Compiling"
mvn --quiet clean package
chmod -R 777 .
echo -e " [\e[1;33m++\e[00m] Uploading build"
rsync -av --progress --exclude=*.tar.gz --exclude=archive-tmp --exclude=classes --exclude=maven-archiver --exclude=surefire target/ root@backend1.mcstats.org:/home/mcstats/
rsync -av --progress server-definitions.txt root@backend1.mcstats.org:/home/mcstats/
echo -e " [\e[1;33m++\e[00m] Fixing permissions"
ssh root@backend1.mcstats.org "chown -R mcstats:mcstats /home/mcstats/"
# finish timestamp, calculate runtime
FINISH=`date +%s`
RUNTIME=`echo $FINISH - $START | bc`
echo -e " [\e[0;32m!!\e[00m] Finished deploy ($RUNTIME seconds)"
| true
|
4080a7fa1cec12b922ba0265b544882438ce8b9c
|
Shell
|
BinaryTreeCode/adJ
|
/pruebas/compila-libc.sh
|
UTF-8
| 261
| 2.609375
| 3
|
[
"ISC"
] |
permissive
|
#!/bin/sh
# Compila libc
# Dominio Público. 2013. vtamara@pasosdeJesus.org
SRC=/usr/src
cd $SRC/include
make
make includes
if (test "$?" != "0") then {
exit 1;
} fi;
make install
cd ../lib/libc
#make clean
make depend
make
if (test "$?" != "0") then {
exit 1;
} fi;
make install
| true
|
a04658207c0f5f3d4c24fbc8169dbe6f9f20d04c
|
Shell
|
aikuma0130/prezto
|
/runcoms/zprofile
|
UTF-8
| 3,218
| 3.0625
| 3
|
[
"MIT"
] |
permissive
|
#
# Executes commands at login pre-zshrc.
#
# Authors:
# Sorin Ionescu <sorin.ionescu@gmail.com>
#
#
# Browser
#
if [[ -z "$BROWSER" && "$OSTYPE" == darwin* ]]; then
export BROWSER='open'
fi
#
# Editors
#
if [[ -z "$EDITOR" ]]; then
export EDITOR='nano'
fi
if [[ -z "$VISUAL" ]]; then
export VISUAL='nano'
fi
if [[ -z "$PAGER" ]]; then
export PAGER='less'
fi
#
# Language
#
if [[ -z "$LANG" ]]; then
export LANG='en_US.UTF-8'
fi
#
# Paths
#
# Ensure path arrays do not contain duplicates.
typeset -gU cdpath fpath mailpath path
# Set the list of directories that cd searches.
# cdpath=(
# $cdpath
# )
# Set the list of directories that Zsh searches for programs.
path=(
$HOME/{,s}bin(N)
/opt/{homebrew,local}/{,s}bin(N)
/usr/local/{,s}bin(N)
$path
)
#
# Less
#
# Set the default Less options.
# Mouse-wheel scrolling has been disabled by -X (disable screen clearing).
# Remove -X to enable it.
if [[ -z "$LESS" ]]; then
export LESS='-g -i -M -R -S -w -X -z-4'
fi
# Set the Less input preprocessor.
# Try both `lesspipe` and `lesspipe.sh` as either might exist on a system.
if [[ -z "$LESSOPEN" ]] && (( $#commands[(i)lesspipe(|.sh)] )); then
export LESSOPEN="| /usr/bin/env $commands[(i)lesspipe(|.sh)] %s 2>&-"
fi
export PATH="/usr/local/opt/ruby/bin:/usr/local/lib/ruby/gems/2.6.0/bin:$PATH"
#source <(kubectl completion bash)
export GOPATH=$HOME/go
export PATH=$PATH:$GOPATH/bin
#export PATH=$HOME/.anyenv/bin:$PATH
#eval "$(anyenv init -)"
# for work
if [ -f ${HOME}/.zprofile_private ]; then
. ${HOME}/.zprofile_private
fi
# Set width
if [ -n "${TERM}" ];
then SCREEN_WIDTH=$(tput cols);
else SCREEN_WIDTH=20;
fi
# Draw HR function
echo_hr(){
printf -v _hr "%*s" ${SCREEN_WIDTH} && echo ${_hr// /${1--}}
}
# for rails
alias be='bundle exec'
# for git/github alias
alias gbrowse='hub browse $(ghq list | peco | cut -d "/" -f 2,3 )'
alias g='hub'
alias gch='hub checkout $(hub branch | peco |sed s/\*//g)'
alias gb='hub branch'
alias ga='hub add'
alias gd='hub diff'
alias gco='hub commit'
alias gs='hub status'
alias look='cd $(ghq root)/$(ghq list |peco)'
# for jira alias
alias j='jira'
alias jsprint='echo "$(echo "タスク一覧" ; echo_hr ; jira sprint ; echo out-of-planning ; echo_hr ; jira out-of-planning ${JIRA_SPRINT})"'
alias jbrowse='jira browse $(echo "$(jira sprint; jira out-of-planning ${JIRA_SPRINT})" |grep -v '\''^$'\'' |peco |awk '\''{print $3}'\'')'
alias jstart='jira start $(echo "$(jira sprint; jira out-of-planning ${JIRA_SPRINT})" |grep -v '\''^$'\'' |peco |awk '\''{print $3}'\'')'
alias jclose='jira trans "課題の解決" $(echo "$(jira sprint; jira out-of-planning ${JIRA_SPRINT})" |grep -v '\''^$'\'' |peco |awk '\''{print $3}'\'')'
# z keys instead of fg command
fancy-ctrl-z () {
if [[ $#BUFFER -eq 0 ]]; then
BUFFER="fg"
zle accept-line
else
zle push-input
zle clear-screen
fi
}
zle -N fancy-ctrl-z
bindkey '^Z' fancy-ctrl-z
# https://qiita.com/lichtshinoza/items/ed03f42614ee5605974d
export OBJC_DISABLE_INITIALIZE_FORK_SAFETY=YES
# rbenv
export PATH=$HOME/.rbenv/bin:$PATH
eval "$(rbenv init -)"
# nodenv
export PATH="$HOME/.nodenv/bin:$PATH"
eval "$(nodenv init -)"
# aws-cli
export AWS_DEFAULT_PROFILE=playground
| true
|
3fd5ac208df19a2a497e45189bdd76a5d2cd708e
|
Shell
|
yarikoptic/dh-annex
|
/tools/generate_sample_repo
|
UTF-8
| 2,222
| 3.59375
| 4
|
[] |
no_license
|
#!/bin/bash
set -eu
# just a sample script to generate a test/trial repository
# which I could work with quickly and which would be git-annex repo
odir=datatune-orig
tdir=datatune
rhost=washoe
rpath=~/www/tmp/datatune.git
git-sedi() {
git grep -l $1 | xargs sed -i -e "s/$1/$2/g"
}
if [ -e $odir ]; then
chmod +w -R $odir
rm -rf $odir
fi
rsync -a -L ~/proj/svgtune/ $odir
cd $odir
git checkout master
git-sedi svgtune datatune # so we have new name all around
git mv svgtune datatune
git commit -m 'renamed all shebang to datatune' -a
echo "123" > 123.dat
mkdir -p data/deep
echo "deep" > data/deep/deep-data.dat
git annex init
git annex add 123.dat data/deep/deep-data.dat
# create some archives
for a in arch1 arch2; do
mkdir $a
for f in a b; do
echo "data $a for $f" > $a/$f;
done
tar -czf $a.tgz $a
rm -rf $a
git annex add $a.tgz
done
git commit -m 'added some files to annex' -a
git tag -a -m "bogus upstream tag" upstream/999.0.0
git checkout debian
git merge --no-edit master
git-sedi svgtune datatune
git-sedi PREFIX DESTDIR
cat >| debian/rules <<EOF
#!/usr/bin/make -f
%:
dh \$@
override_dh_auto_install:
dh_auto_install
install -m 0644 -D 123.dat debian/datatune/usr/lib/datatune/data/123.dat
install -m 0644 -D data/deep/deep-data.dat debian/datatune/usr/lib/datatune/data/deep/deep-data.dat
cd debian/datatune/usr/lib/datatune/data; tar -xzvf \$(CURDIR)/arch1.tgz
cd debian/datatune/usr/lib/datatune/data; tar -xzvf \$(CURDIR)/arch2.tgz
EOF
git commit -m "renamed all to datatune in debian" -a
dch --noconf -v '1.0.0-1' "Initiated a new matching debian changelog"
git commit -m "added changelog entry for debian" -a
# now publish it
ssh $rhost "rm -rf $rpath; mkdir $rpath; cd $rpath; git init --bare --shared; mv hooks/post-update.sample hooks/post-update"
git remote add publish $rhost:$rpath
git fetch publish
git push publish master git-annex debian
git push --tags publish
git annex copy --to=publish .
cd ..
if [ -e $tdir ]; then
chmod a+w -R $tdir; rm -rf $tdir
fi
git clone $rhost:$rpath $tdir
cd $tdir
git annex get .
git checkout debian
fakeroot debian/rules binary
# TODO: mkdir -p debian/source
# TODO: echo '3.0 (git)' >| debian/options/format
| true
|
1188cf832146394f2be95aae3b73913840a3a7b3
|
Shell
|
YasirChoudhary/bash_scripts
|
/06_if_statement_1.sh
|
UTF-8
| 80
| 2.734375
| 3
|
[] |
no_license
|
#! /bin/bash
count=13
if (( $count <=9 ))
then
echo "Condition is true"
fi
| true
|
b25672ebec21b8b1bf3f999ecf00aab693d8b7ba
|
Shell
|
nickhould/.dotfiles
|
/.bash_profile
|
UTF-8
| 2,163
| 3.265625
| 3
|
[] |
no_license
|
# My custom speed dial
lckr=~/Dropbox/PasswordBox/lckr/app/lckr
projects=~/Sites/rails_projects/
# Password Box - start server
alias startlckr='rvmsudo MYSQL_SOCKET=/tmp/mysql.sock rails s -p 80'
export MYSQL_SOCKET='/tmp/mysql.sock'
# Git Tab Completion
source ~/git-completion.bash
source ~/hub.bash_completion.sh
#Show branch in status line
parse_git_branch() {
git branch --no-color 2> /dev/null | sed -e '/^[^*]/d' -e 's/* \(.*\)/\ →\ \1/'
}
export PS1='\[\e[1;31m\]\W\[\e[1;33m\]$(parse_git_branch)\[\e[0;39m\]> '
export PROMPT_COMMAND='echo -ne "\033]0;${PWD}\007"'
export EDITOR="subl"
# This is a good place to source rvm v v v
[[ -s "/Users/jean-nicholashould/.rvm/scripts/rvm" ]] && source "/Users/jean-nicholashould/.rvm/scripts/rvm"
# This loads RVM into a shell session.
# Custom Shell Colours
alias ls='ls -GFh'
#Homebrew correction for OSX Lion
export PATH=/usr/local/bin:$PATH
# Correction for sublime
export PATH="~/bin:/usr/local/bin:/usr/local/sbin:$PATH"
# Give it a # and a dir, it will cd to that dir, then `cd ..` however many times you've indicated with the number
# The number defaults to 1, the dir, if not provided, defaults to the output of the previous command
# This lets you find the dir on one line, then run the command on the next
2dir() {
last_command="$(history | tail -2 | head -1 | sed 's/^ *[0-9]* *//')"
count="${1-1}"
name="${2:-$($last_command)}"
while [[ $count > 0 ]]
do
name="$(dirname "$name")"
((count--))
done
echo "$name"
cd "$name"
}
# Hub
alias git=hub
eval "$(hub alias -s)"
# MySQL
#Set the MySQL Home environment variable to point to the root directory of the MySQL installation.
export set MYSQL_HOME=/usr/local/mysql-5.5.16-osx10.6-x86_64
# Add the /bin directory from the MYSQL_HOME location into your $PATH environment variable.
export set PATH=$PATH:$MYSQL_HOME/bin
# Create aliases that make it easier for you to manually start and stop the MySQL Daemon.
alias mysqlstart="sudo /Library/StartupItems/MySQLCOM/MySQLCOM start"
alias mysqlstop="sudo /Library/StartupItems/MySQLCOM/MySQLCOM stop"
alias mysqlstatus="ps aux | grep mysql | grep -v grep"
| true
|
fb1147991b55962c1cad4eef5cf73bfa3cb4ebc8
|
Shell
|
bossjones/scripts-1
|
/check-log-speed.sh
|
UTF-8
| 13,462
| 3.671875
| 4
|
[] |
no_license
|
#!/bin/bash
set -euo pipefail
function get_running_pod() {
# $1 is component for selector
oc get pods -l component=$1 | awk -v sel=$1 '$1 ~ sel && $3 == "Running" {print $1}' | head -1
}
function query_es_from_es() {
oc exec $1 -- curl --connect-timeout 1 -s -k \
--cert /etc/elasticsearch/secret/admin-cert --key /etc/elasticsearch/secret/admin-key \
https://localhost:9200/${2}*/${3}\?"$4"
}
# read JSON query arguments from stdin
function query_es_from_es_json() {
oc exec -i $1 -- curl --connect-timeout 1 -s -k \
--cert /etc/elasticsearch/secret/admin-cert --key /etc/elasticsearch/secret/admin-key \
https://localhost:9200/${2}*/${3} -d@-
}
function get_url_path_of_record() {
# returns $index/$type/$_id
python -c "import json, sys; hsh = json.loads(sys.stdin.read())['hits']['hits'][0]; print '%(_index)s/%(_type)s/%(_id)s'.encode('utf-8') % hsh"
}
function get_last_field_from_es() {
query_es_from_es $1 $2 _search "size=1\&sort=@{timefield}:desc" | \
get_field_value_from_json $3
}
function get_last_url_from_es() {
query_es_from_es $1 $2 _search "size=1&sort=${timefield}:desc${3:+&$3}" | \
get_url_path_of_record
}
function get_field_value_from_record() {
python -c 'import json, sys; print json.loads(sys.stdin.read())["_source"]["'"$1"'"].encode("utf-8")'
}
function get_field_value_from_es_url() {
oc exec -i $1 -- curl --connect-timeout 1 -s -k \
--cert /etc/elasticsearch/secret/admin-cert --key /etc/elasticsearch/secret/admin-key \
https://localhost:9200/$2 | get_field_value_from_record "$3"
}
# $1 - shell command or function to call to test if wait is over -
# this command/function should return true if the condition
# has been met, or false if still waiting for condition to be met
# $2 - shell command or function to call if we timed out for error handling
# $3 - timeout in seconds - should be a multiple of $4 (interval)
# $4 - loop interval in seconds
function wait_until_cmd_or_err() {
let ii=$3
local interval=${4:-1}
while [ $ii -gt 0 ] ; do
$1 && break
sleep $interval
let ii=ii-$interval
done
if [ $ii -le 0 ] ; then
$2
return 1
fi
return 0
}
debug() {
if [ -n "${DEBUG:-}" ] ; then
echo "$@"
fi
}
verbose() {
if [ -n "${VERBOSE:-}" ] ; then
echo "$@"
fi
}
err() {
echo ERROR: "$@"
}
info() {
echo "$@"
}
info_same() {
verbose "$@"
}
info_diff() {
# $X is $N $units behind $Y
# or
# $X is up-to-date with $Y
if [ $2 = "0" -o $2 = "0.0" ] ; then
info_same $1 is up-to-date with $4 in $3
else
info $1 is $2 $3 behind $4
fi
}
es_pod=`get_running_pod es`
es_ops_pod=`get_running_pod es-ops 2> /dev/null`
es_ops_pod=${es_ops_pod:-$es_pod}
if grep -q -- '--log-driver=journald' /etc/sysconfig/docker ; then
USE_JOURNAL=1
fi
# get es version - 2.x and later use @timestamp, earlier use time
esver=`oc exec $es_pod -- curl --connect-timeout 1 -s -k --cert /etc/elasticsearch/secret/admin-cert --key /etc/elasticsearch/secret/admin-key https://localhost:9200/_cat/nodes?h=version`
case $esver in
1.*) timefield=time; prj_prefix= ;;
*) timefield=@timestamp; prj_prefix="project." ;;
esac
if [ -n "${USE_JOURNAL:-}" ] ; then
journal_pos_err() {
err timed out waiting for /var/log/journal.pos - check Fluentd pod log
exit 1
}
wait_until_cmd_or_err "test -f /var/log/journal.pos" journal_pos_err 300
# get cursor position
cursor="`cat /var/log/journal.pos`"
last_cursor="`journalctl -r -n 1 -o export|awk '/^__CURSOR=/ {print substr($0, 10)}'`"
if [ "$cursor" = "$last_cursor" ] ; then
:
else
nrecs=`journalctl -c "$cursor" | wc -l`
nrecs=`expr $nrecs - 1` || : # -1 for header
fi
verbose last record read by Fluentd: `journalctl -c "$cursor" -n 1|tail -1`
verbose last record in the journal: `journalctl -c "$last_cursor" -n 1|tail -1`
last_srts=`journalctl -n 1 -o export -c "$last_cursor"|awk -F= '/^_SOURCE_REALTIME_TIMESTAMP/ {print $2}'`
last_rts=`journalctl -n 1 -o export -c "$last_cursor"|awk -F= '/^__REALTIME_TIMESTAMP/ {print $2}'`
last_ts=${last_srts:-$last_rts}
srts=`journalctl -n 1 -o export -c "$cursor"|awk -F= '/^_SOURCE_REALTIME_TIMESTAMP/ {print $2}'`
rts=`journalctl -n 1 -o export -c "$cursor"|awk -F= '/^__REALTIME_TIMESTAMP/ {print $2}'`
ts=${srts:-$rts}
if [ "$cursor" != "$last_cursor" ] ; then
diff=`expr $last_ts - $ts` || :
secdiff=`expr $diff / 1000000` || :
usecdiff=`expr $diff % 1000000` || :
info_diff Fluentd $secdiff.$usecdiff seconds "the journal"
info_diff Fluentd $nrecs records "the journal"
fi
# find the last project record
last_prj=`journalctl -o export -u docker | grep '^CONTAINER_NAME=k8s_'| grep -v '^CONTAINER_NAME=k8s_[^\.]\+\.[^_]\+_[^_]\+_\(default\|openshift-infra\|openshift\)_[^_]\+_[a-f0-9]\{8\}$'|tail -1`
last_prj_cursor="`journalctl -n 1 -o export "$last_prj"|awk '/^__CURSOR=/ {print substr($0, 10)}'`"
verbose last record from a project container: `journalctl -u docker -n 1 "$last_prj"|tail -1`
prj_srts=`journalctl -n 1 -o export -c "$last_prj_cursor"|awk -F= '/^_SOURCE_REALTIME_TIMESTAMP/ {print $2}'`
prj_rts=`journalctl -n 1 -o export -c "$last_prj_cursor"|awk -F= '/^__REALTIME_TIMESTAMP/ {print $2}'`
prj_ts=${prj_srts:-$prj_rts}
prj=`echo "$last_prj" | \
sed -n '/^CONTAINER_NAME=/ {s/^CONTAINER_NAME=k8s_\([^\.]\+\)\.[^_]\+_\([^_]\+\)_\([^_]\+\)_[^_]\+_[a-f0-9]\{8\}$/\3/; p; q}'`
# see if the fluentd record is for ops or projects
namespace=`journalctl -n 1 -o export -c "$cursor" | \
sed -n '/^CONTAINER_NAME=/ {s/^CONTAINER_NAME=k8s_\([^\.]\+\)\.[^_]\+_\([^_]\+\)_\([^_]\+\)_[^_]\+_[a-f0-9]\{8\}$/\3/; p; q}'`
fluentd_rec_is_project=
case "$namespace" in
"") : ;; # ops
"default"|"openshift-infra"|"openshift") : ;; # ops
*) fluentd_rec_is_project=1 ;;
esac
# find the latest .operations.* and project.* records in Elasticsearch
# compare them to the records with ${timefield} $ts and $prj_ts
# find out how far behind ES is in both number of records and time
# get url of last .operations record in ES
# empty means no such index in ES yet
es_ops_url=`get_last_url_from_es $es_ops_pod .operations.` || :
# get timestamp of last .operations record in ES
if [ -z "${es_ops_url:-}" ] ; then
info Elasticsearch has no index or data for operations
else
es_ops_ts_str=`get_field_value_from_es_url $es_ops_pod $es_ops_url "${timefield}"`
es_ops_ts=`date +%s%6N --date="$es_ops_ts_str"`
# get message of last .operations record in ES
es_ops_msg=`get_field_value_from_es_url $es_ops_pod $es_ops_url "message"`
# find out how far behind journal es is for ops logs
es_j_ops_diff=`expr $last_ts - $es_ops_ts` || :
es_j_ops_diff_secs=`expr $es_j_ops_diff / 1000000` || :
es_j_ops_diff_usecs=`expr $es_j_ops_diff % 1000000` || :
info_diff "Elasticsearch operations index" $es_j_ops_diff_secs.$es_j_ops_diff_usecs seconds "the journal"
fi
# get url of last project. record in ES
es_prj_url=`get_last_url_from_es $es_pod ${prj_prefix}$prj.` || :
if [ -z "${es_prj_url:-}" ] ; then
info Elasticsearch has no index or data for projects
else
# get timestamp of last project record in ES
es_prj_ts_str=`get_field_value_from_es_url $es_pod $es_prj_url "${timefield}"`
es_prj_ts=`date +%s%6N --date="$es_prj_ts_str"`
# get message of last .operations record in ES
es_prj_msg=`get_field_value_from_es_url $es_pod $es_prj_url "message"`
# find out how far behind journal es is for project logs
es_j_prj_diff=`expr $prj_ts - $es_prj_ts` || :
es_j_prj_diff_secs=`expr $es_j_prj_diff / 1000000` || :
es_j_prj_diff_usecs=`expr $es_j_prj_diff % 1000000` || :
info_diff "Elasticsearch project index" $es_j_prj_diff_secs.$es_j_prj_diff_usecs seconds "the journal"
fi
# find out how far behind fluentd es is
if [ -z "$fluentd_rec_is_project" ] ; then
if [ -n "${es_ops_url:-}" ] ; then
es_ops_diff=`expr $ts - $es_ops_ts` || :
es_ops_diff_secs=`expr $es_ops_diff / 1000000` || :
es_ops_diff_usecs=`expr $es_ops_diff % 1000000` || :
info_diff "Elasticsearch operations index" $es_ops_diff_secs.$es_ops_diff_usecs seconds Fluentd
fi
else
if [ -n "${es_ops_url:-}" ] ; then
es_prj_diff=`expr $ts - $es_prj_ts` || :
es_prj_diff_secs=`expr $es_prj_diff / 1000000` || :
es_prj_diff_usecs=`expr $es_prj_diff % 1000000` || :
info_diff "Elasticsearch project index" $es_prj_diff_secs.$es_prj_diff_usecs seconds Fluentd
fi
fi
else # use /var/log/messages and json-file docker logs
node_pos_err() {
err timed out waiting for /var/log/node.log.pos - check Fluentd pod log
exit 1
}
wait_until_cmd_or_err "test -f /var/log/node.log.pos" node_pos_err 300
cont_pos_err() {
err timed out waiting for /var/log/es-containers.log.pos - check Fluentd pod log
exit 1
}
wait_until_cmd_or_err "test -f /var/log/es-containers.log.pos" cont_pos_err 300
totalfiles=0
skippedfiles=0
for file in /var/log/messages /var/log/containers/*.log ; do
totalfiles=`expr $totalfiles + 1`
src_offset=`ls -lL $file|awk '{print $5}'`
if [ $src_offset = 0 ] ; then
# file is empty - no records in es or fluentd either
skippedfiles=`expr $skippedfiles + 1`
verbose Skipping empty file $file
continue
fi
if [ $file = /var/log/messages ] ; then
posfile=/var/log/node.log.pos
index=.operations.
q=
es_pod_to_use=$es_ops_pod
else
posfile=/var/log/es-containers.log.pos
# get project from filename
prj=`echo "$file" | sed 's,^/var/log/containers/[^_]\+_\([^_]\+\)_.*\.log$,\1,'`
cont_id=`echo "$file" | sed 's,^/var/log/containers/.*-\([^\.]\+\).*\.log$,\1,'`
case "$prj" in
"") index=.operations.; es_pod_to_use=$es_ops_pod ;; # ops
"default"|"openshift-infra"|"openshift") index=.operations.; es_pod_to_use=$es_ops_pod ;; # ops
*) index="${prj_prefix}$prj."; es_pod_to_use=$es_pod ;;
esac
if [ -n "$cont_id" ] ; then
q="q=docker.container_id:$cont_id"
else
q=
fi
fi
# map file to ES index
# get url of last matching document in ES
es_url=`get_last_url_from_es $es_pod_to_use $index $q`
if [ -z "$es_url" ] ; then
es_ts=null
else
# get timestamp of ES document
es_ts_str=`get_field_value_from_es_url $es_pod_to_use $es_url "${timefield}"`
es_ts=`date +%s --date="$es_ts_str"`
# get message of ES document
es_msg=`get_field_value_from_es_url $es_pod_to_use $es_url "message"`
fi
# get the offset from the pos file - convert hex to decimal
f_offset=`awk -v file=$file '$1 == file {ii=sprintf("0x%s", $2); print strtonum(ii)}' $posfile`
if [ -z "$f_offset" -o "$f_offset" = 0 ] ; then
# fluentd hasn't seen this file yet, or
# no record read yet - assume diff from beginning of file
f_offset=0
fi
f_rec=`head -c $f_offset $file | tail -1`
src_offset=`ls -lL $file|awk '{print $5}'`
src_rec=`tail -1 $file`
if [ $file = /var/log/messages ] ; then
f_date=`echo "$f_rec"| awk '{print $1, $2, $3}'`
f_ts=`date --date="$f_date" +%s`
src_date=`echo "$src_rec"| awk '{print $1, $2, $3}'`
src_ts=`date --date="$src_date" +%s`
else
f_date=`echo "$f_rec" | python -c 'import sys,json; print json.loads(sys.stdin.read())["time"].encode("utf-8")'`
f_ts=`date --date="$f_date" +%s`
src_date=`echo "$src_rec" | python -c 'import sys,json; print json.loads(sys.stdin.read())["time"].encode("utf-8")'`
src_ts=`date --date="$src_date" +%s`
fi
if [ $es_ts = null ] ; then
info Elasticsearch $index index has no records for $file
else
info_diff "Elasticsearch $index index" `expr $src_ts - $es_ts` seconds $file
info_diff "Elasticsearch $index index" `expr $f_ts - $es_ts` seconds "Fluentd for $file"
fi
info_diff Fluentd `expr $src_ts - $f_ts` seconds $file
info_diff Fluentd `expr $src_offset - $f_offset` "bytes of offset" $file
diff_recs=`tail -c +$f_offset $file | wc -l`
diff_recs=`expr $diff_recs - 1` || : # for trailing nl
info_diff Fluentd $diff_recs records $file
done
info Skipped $skippedfiles empty files of total $totalfiles
fi
# echo '{
# "size": 1,
# "sort": [{"${timefield}":"desc"}],
# "query": {
# "constant_score": {
# "filter": {
# "term": {"${timefield}":'"$ts_str"'}
# }
# }
# }
# }' | query_es_from_es_json $espod .operations. _search
| true
|
32cccae888e882ed6c20b24d4d4f88b2ea5d942e
|
Shell
|
robin22d/node_docker
|
/Terraform/Jenkins.sh
|
UTF-8
| 2,070
| 2.96875
| 3
|
[] |
no_license
|
#!/bin/bash
# updating machine
echo "updating machine..."
yum update -y
yum upgrade -y
echo "installing unzip and wget"
# isntalling wget
yum install wget -y
# isntalling unzip
yum install unzip -y
echo "installing git..."
# isntalling get
yum install git -y
echo "Installing java package"
# installing Java
yum install java-1.8.0-openjdk-devel -y
echo "checking the version on java"
#checking java has installed
java -version
javac -version
# Setting environment variables
echo "updating environment variables"
cp /etc/profile /etc/profile_backup
echo 'export JAVA_HOME=/usr/lib/jvm/jre-1.8.0-openjdk' | sudo tee -a /etc/profile
echo 'export JRE_HOME=/usr/lib/jvm/jre' | sudo tee -a /etc/profile
echo 'export PATH=$PATH:$GRADLE_HOME/bin' | sudo tee -a /etc/profile
source /etc/profile
echo "installing docker..."
#installing docker
yum install -y yum-utils device-mapper-persistent-data lvm2
yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
yum install docker-ce -y
systemctl enable docker.service
systemctl start docker.service
systemctl status docker.service
usermod -aG docker centos
#https://www.cyberciti.biz/faq/install-use-setup-docker-on-rhel7-centos7-linux/
echo "checking docker is working"
docker run hello-world
echo "installing gitlab server key"
# installing gitlab server key
touch /root/.ssh/known_hosts
cat << 'EOF' >> /root/.ssh/known_hosts
gitlab.cs.cf.ac.uk,131.251.168.40 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBDQcOCOPLUQCRGrioWbPcxxCsqGOIj2wbP9MiE14Oc7KeLYbRwBtlHImq4k8f0tgI3qejeSnXl2y3jbFAmnttXY=
EOF
chmod 644 /root/.ssh/known_hosts
#changing to root
cd ~
#getting jenkins and installing
wget -O /etc/yum.repos.d/jenkins.repo http://pkg.jenkins-ci.org/redhat-stable/jenkins.repo
rpm --import http://pkg.jenkins-ci.org/redhat-stable/jenkins-ci.org.key
yum install jenkins -y
# stating and enabling jenkins
systemctl start jenkins.service
systemctl enable jenkins.service
# waiting for file to be created
sleep 70
# Printing jenkins password
echo 'jenkins password:'
cat /var/lib/jenkins/secrets/initialAdminPassword
| true
|
d043cb7fc6ef145e2ffc2110cb00bb09db846e70
|
Shell
|
dsmastrodomenico/ShellAlgorithmPractice
|
/2_variables.sh
|
UTF-8
| 341
| 2.765625
| 3
|
[] |
no_license
|
# !/bin/bash
# Programa para revisar la declaración de variables
# Autor:: Darwin Mastrodomenico
opcion=0
nombre="Darwin Mastrodomenico"
echo ""
echo "opción: $opcion"
echo "nombre: $nombre"
echo ""
# Exportar la variable nombre para que este disponible a los demas procesos
export nombre
# Llamar al siguiente script para recuperar la variable
./2_variables_2.sh
| true
|
0a0471180079cef96fe35d9bbf364c9b75cc7ac0
|
Shell
|
Just-Angelo/Example
|
/shrug-log.sh
|
UTF-8
| 556
| 3.796875
| 4
|
[] |
no_license
|
#!/bin/bash
repos_num=-1
Arg_count=$#
#checks .shrug actually exist
if [ ! -d ".shrug" ]
then
echo "shrug-add: error: no .shrug directory containing shrug repository exists"
exit;
fi
if [ $Arg_count != 0 ]
then
echo "usage: shrug-log"
exit
fi
#counts how many stages of commit there is, as theres an inital file repos_num
#is initialized as -1.
for files in .shrug/repos*
do
repos_num=$((repos_num+1))
done
if [ $repos_num = 0 ]
then
echo "shrug-log: error: your repository does not have any commits yet"
exit
fi
cat .shrug/log
| true
|
59291609a8555d04e9fc024d8bcbb65ecf6a3e49
|
Shell
|
vtjnash/julia-buildbot
|
/commands/launchpad.sh
|
UTF-8
| 2,780
| 3.578125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
# This script functions best when the following are installed:
# wget, git, bzr, dch, make, python (with the "requests" module installed, for Travis-ci API)
# Note that in order to push to lp:~staticfloat/julianightlies/trunk, you need my GPG private key
TEAM=~staticfloat
PROJECT=julianightlies
JULIA_GIT_URL="https://github.com/JuliaLang/julia.git"
DEBIAN_GIT_URL="https://github.com/staticfloat/julia-debian.git"
JULIA_GIT_BRANCH=master
DEBIAN_GIT_BRANCH=master
BUILD_DIR=/tmp/julia-packaging/ubuntu
cd $(dirname $0)
ORIG_DIR=$(pwd)
if [[ -z "$1" ]]; then
echo "Usage: $0 <commit sha>"
exit 1
fi
COMMIT="$1"
# Check if we've been downloaded as a git directory. If so, update ourselves!
if [[ -d .git ]]; then
git pull
fi
# Store everything in a temp dir
mkdir -p $BUILD_DIR
cd $BUILD_DIR
# Get the git branch
if test ! -d julia-${JULIA_GIT_BRANCH}; then
git clone ${JULIA_GIT_URL} julia-${JULIA_GIT_BRANCH}
# Setup remote for launchpad
git remote add launchpad git+ssh://staticfloat@git.launchpad.net/~staticfloat/julianightlies
fi
# Get the debian directory
if test ! -d debian-${DEBIAN_GIT_BRANCH}; then
git clone ${DEBIAN_GIT_URL} debian-${DEBIAN_GIT_BRANCH}
else
cd debian-${DEBIAN_GIT_BRANCH}
git pull
cd ..
fi
# Go into our checkout of JULIA_GIT_URL
cd julia-${JULIA_GIT_BRANCH}
git checkout ${JULIA_GIT_BRANCH}
git fetch
git reset --hard origin/${JULIA_GIT_BRANCH}
make cleanall
# Checkout the commit we've been given
git checkout -B ${JULIA_GIT_BRANCH} ${COMMIT}
git submodule init
git submodule update
# Get dependencies
make NO_GIT=1 -C deps getall
# Let's build the documentation, so that we don't have to do so on the debian servers
make -C doc html
make -C doc latex
# We're going to compile LLVM on our own. :(
make -C deps get-llvm
# Force downloading of LLVM 3.6.1 as well, so that ARM builds get happier
make LLVM_VER=3.6.1 -C deps get-llvm
make -C deps get-Rmath-julia
# Work around our lack of git on buildd servers
make -C base version_git.jl.phony
# Run this again in an attempt to get the timestamps correct
make doc/_build/html
# Make it blaringly obvious to everyone that this is a git build when they start up Julia-
JULIA_VERSION=$(cat ./VERSION)
DATECOMMIT=$(git log --pretty=format:'%cd' --date=short -n 1 | tr -d '-')
echo "Syncing commit ${JULIA_VERSION}+$DATECOMMIT."
# Throw the debian directory into here
rm -rf debian
cp -r ../debian-${DEBIAN_GIT_BRANCH}/debian .
# Also, increment the current debian changelog, so we get git version tagged binaries
dch -v "${JULIA_VERSION}+$DATECOMMIT" "nightly git build"
# Force-push this up to launchpad
git add -f *
git commit -m "Manual import commit ${DATECOMMIT} from ${JULIA_GIT_URL}"
git push -f launchpad
exit 0
| true
|
6ebab5958e66f09c1976cf5798c9e728cc80ac19
|
Shell
|
v0rts/spaceship-prompt
|
/sections/perl.zsh
|
UTF-8
| 1,628
| 3.4375
| 3
|
[
"MIT"
] |
permissive
|
#
# Perl
#
# Perl is a general purpose programming language, originally developed for
# text manipulation.
# Perl refers to Perl 5.x, where Perl 6+ officially changed the name to Raku.
#
# Link: https://www.perl.org/
# ------------------------------------------------------------------------------
# Configuration
# ------------------------------------------------------------------------------
SPACESHIP_PERL_SHOW="${SPACESHIP_PERL_SHOW=true}"
SPACESHIP_PERL_ASYNC="${SPACESHIP_PHP_ASYNC=true}"
SPACESHIP_PERL_PREFIX="${SPACESHIP_PERL_PREFIX="$SPACESHIP_PROMPT_DEFAULT_PREFIX"}"
SPACESHIP_PERL_SUFFIX="${SPACESHIP_PERL_SUFFIX="$SPACESHIP_PROMPT_DEFAULT_SUFFIX"}"
SPACESHIP_PERL_SYMBOL="${SPACESHIP_PERL_SYMBOL="🐪 "}"
SPACESHIP_PERL_COLOR="${SPACESHIP_PERL_COLOR="blue"}"
# ------------------------------------------------------------------------------
# Section
# ------------------------------------------------------------------------------
# Show current version of Perl
spaceship_perl() {
[[ $SPACESHIP_PERL_SHOW == false ]] && return
spaceship::exists perl || return
# Show only if perl files or composer.json exist in current directory
local is_perl_project="$(spaceship::upsearch META.{json,yml,yaml} .perl-version cpanfile)"
[[ -n "$is_perl_project" || -n *.pl(#qN^/) || -n *.pm(#qN^/) ]] || return
local perl_version=$(perl -v 2>&1 | awk '/This/ {print $9}' | sed -r 's/[(v]+//g;s/[)]//g')
spaceship::section \
--color "$SPACESHIP_PERL_COLOR" \
--prefix "$SPACESHIP_PERL_PREFIX" \
--suffix "$SPACESHIP_PERL_SUFFIX" \
--symbol "$SPACESHIP_PERL_SYMBOL" \
"v$perl_version"
}
| true
|
42d710a356b3a15948a6cb7149e27e320efb133c
|
Shell
|
oRiamn/x200t-xinput
|
/commands/toggle.sh
|
UTF-8
| 373
| 3.71875
| 4
|
[] |
no_license
|
#!/bin/bash
function toggle {
local deviceID=$1
local deviceName=$2
local state=$(xinput list-props "$deviceID" | grep "Device Enabled" | grep -o "[01]$")
if [ $state == '1' ];then
xinput --disable "$deviceID"
echo -e "\e[1m$deviceName\e[21m is now disable"
else
xinput --enable "$deviceID"
echo -e "\e[1m$deviceName\e[21m is now enable"
fi
}
| true
|
ab32da8171ce03f61295cfc2f9ccaad9b35f41e5
|
Shell
|
ethragur/dotfiles
|
/scripts/backlight.sh
|
UTF-8
| 201
| 2.828125
| 3
|
[] |
no_license
|
#!/usr/bin/bash
cd /sys/class/backlight/amdgpu_bl0/;
m=$(cat max_brightness);
c=$(cat brightness);
if [ "$1" == "-i" ]; then
n=$(($c + ($m/12)))
else
n=$(($c - ($m/12)))
fi
echo $n > brightness
| true
|
8b21ae9f5a01c25ddec05cb853be2bf87ff4753d
|
Shell
|
EssTravelNet/eclipse.platform.releng.eclipsebuilder
|
/scripts/sdk/promotion/wgetSDKPromoteScripts.sh
|
UTF-8
| 2,010
| 3.25
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
#Sample utility to wget files related to promotion
# we say "branch or tag", but for tag, the wget has to use tag= instead of h=
branchOrTag=master
# remember that wget puts most its output on "standard error", I assume
# following the philosophy that anything "diagnosic" related goes to standard err,
# and only things the user really needs wants to see as "results" goes to standard out
# but in cron jobs and similar, this comes across as "an error".
wget --no-verbose -O syncDropLocation.sh http://git.eclipse.org/c/platform/eclipse.platform.releng.eclipsebuilder.git/plain/scripts/sdk/promotion/syncDropLocation.sh?h=$branchOrTag 2>&1
wget --no-verbose -O sdkPromotionCronJob.sh http://git.eclipse.org/c/platform/eclipse.platform.releng.eclipsebuilder.git/plain/scripts/sdk/promotion/sdkPromotionCronJob.sh?h=$branchOrTag 2>&1
wget --no-verbose -O updateDropLocation.sh http://git.eclipse.org/c/platform/eclipse.platform.releng.eclipsebuilder.git/plain/scripts/sdk/promotion/updateDropLocation.sh?h=$branchOrTag 2>&1
wget --no-verbose -O getBaseBuilder.xml http://git.eclipse.org/c/platform/eclipse.platform.releng.eclipsebuilder.git/plain/scripts/sdk/promotion/getBaseBuilder.xml?h=$branchOrTag 2>&1
wget --no-verbose -O getEBuilder.sh http://git.eclipse.org/c/platform/eclipse.platform.releng.eclipsebuilder.git/plain/scripts/sdk/promotion/getEBuilder.sh?h=$branchOrTag 2>&1
wget --no-verbose -O wgetSDKPromoteScripts.NEW.sh http://git.eclipse.org/c/platform/eclipse.platform.releng.eclipsebuilder.git/plain/scripts/sdk/promotion/wgetSDKPromoteScripts.sh?h=$branchOrTag 2>&1
differs=`diff wgetSDKPromoteScripts.NEW.sh wgetSDKPromoteScripts.sh`
if [ -z "${differs}" ]
then
# 'new' not different from existing, so remove 'new' one
rm wgetSDKPromoteScripts.NEW.sh
else
echo " "
echo " wgetSDKPromoteScripts.sh has changed. Compare with and consider replacing with wgetSDKPromoteScripts.NEW.sh"
echo "differs: ${differs}"
echo " "
fi
chmod +x *.sh
| true
|
2855ffec946131f979221e19686770ef2e634495
|
Shell
|
tox2ik/go-smux
|
/testdata/test.luks.sh
|
UTF-8
| 620
| 2.75
| 3
|
[] |
no_license
|
#!/bin/bash
sudo cryptsetup close test-smux
losetup | awk '/crypto-fs.block/ { print $1}' | xargs --no-run-if-empty -n1 losetup -d
if ! cryptsetup isLuks crypto-fs.block; then
rm -fv crypto-fs.block
fallocate crypto-fs.block -l $((1024**2 *20))
echo -n jwf > crypto-fs.key
cryptsetup luksFormat -q crypto-fs.block crypto-fs.key
fi
loop=$(losetup crypto-fs.block -f --show)
echo "Type jwf;
(aka cryptsetup open $loop test-smux --key-file=crypto-fs.key)"
sudo ../smux "cryptsetup open $loop test-smux" --key-file=crypto-fs.key ' (jwf) '
lsblk /dev/mapper/test-smux -o type,size,path
| true
|
dc403a8015296a8e346a61bbb95eb9e3eb5630c0
|
Shell
|
luiseduardohdbackup/mtk
|
/cyclone.sh
|
UTF-8
| 1,498
| 4.125
| 4
|
[
"WTFPL"
] |
permissive
|
#!/usr/bin/env bash
# clone all partitions of a drive and try not to waste space
# @author Filipp Lepalaan <filipp@mcare.fi>
# @package mtk
if [[ $USER != "root" ]]; then
echo "Insufficient privileges!" 2>&1
exit 1
fi
if [[ $# -lt 2 ]]; then
echo "Usage: $(basename $0) source destination" 2>&1
exit 1
fi
SOURCE=$1
TARGET=$2
# Make sure we're not operating on the boot drive
if [[ mount | head -n 1 | egrep -q "${SOURCE}|${TARGET}" ]]; then
echo "Error: cannot operate on the boot drive" 2>&1
exit 1
fi
TMPFILE="/tmp/$(uuidgen)"
trap "killall dd; rm ${TMPFILE}; echo 'Cleaning up...'; exit 255" SIGINT SIGTERM
# Get size of source
/usr/sbin/diskutil info -plist $SOURCE > "${TMPFILE}".plist
SOURCE_SIZE=`defaults read $TMPFILE TotalSize`
# Get size of destination
/usr/sbin/diskutil info -plist $TARGET > $TMPFILE
TARGET_SIZE=`defaults read $TMPFILE TotalSize`
rm $TMPFILE
if [[ $TARGET_SIZE == $SOURCE_SIZE ]]; then
echo "Sizes are identical, cloning with dd..."
/usr/sbin/diskutil quiet unmountDisk $SOURCE
/usr/sbin/diskutil quiet unmountDisk $TARGET
/bin/dd bs=16m if="/dev/r${SOURCE}" of="/dev/r${TARGET}" conv=noerror,sync &
DD_PID=$!
# while dd is running...
while [[ ps -ax | egrep -q -m 1 " ${DD_PID} " ]]; do
sleep 1
/bin/kill -SIGINFO $DD_PID
done
/usr/sbin/diskutil quiet mountDisk $SOURCE
/usr/sbin/diskutil quiet mountDisk $TARGET
exit 0
fi
if [[ $TARGET_SIZE < $SOURCE_SIZE ]]; then
echo "Warning: target drive is smaller than source!" 2>&1
fi
| true
|
b5eedda531506f104874afef5b8931f2457c2521
|
Shell
|
rebelplutonium/secret-editor
|
/extension/init.user.sh
|
UTF-8
| 1,441
| 2.9375
| 3
|
[] |
no_license
|
#!/bin/sh
TEMP=$(mktemp -d) &&
echo "${GPG_SECRET_KEY}" > ${TEMP}/gpg-secret-key &&
gpg --batch --import ${TEMP}/gpg-secret-key &&
echo "${GPG2_SECRET_KEY}" > ${TEMP}/gpg2-secret-key &&
gpg2 --batch --import ${TEMP}/gpg2-secret-key &&
echo "${GPG_OWNER_TRUST}" > ${TEMP}/gpg-owner-trust &&
gpg --batch --import-ownertrust ${TEMP}/gpg-owner-trust &&
echo "${GPG2_OWNER_TRUST}" > ${TEMP}/gpg2-owner-trust &&
gpg2 --batch --import-ownertrust ${TEMP}/gpg2-owner-trust &&
rm -rf ${TEMP} &&
pass init $(gpg --list-keys | grep "^pub" | sed -e "s#^.*/##" -e "s# .*\$##") &&
pass git init &&
pass git config user.name "${COMMITTER_NAME}" &&
pass git config user.email "${COMMITTER_EMAIL}" &&
pass git remote add origin origin:${ORIGIN_ORGANIZATION}/${ORIGIN_REPOSITORY}.git &&
echo "${ORIGIN_ID_RSA}" > /home/user/.ssh/origin_id_rsa &&
ssh-keyscan -p ${ORIGIN_PORT} "${ORIGIN_HOST}" > /home/user/.ssh/known_hosts &&
(cat > /home/user/.ssh/config <<EOF
Host origin
HostName ${ORIGIN_HOST}
Port ${ORIGIN_PORT}
User git
IdentityFile ~/.ssh/origin_id_rsa
EOF
) &&
pass git fetch origin master &&
pass git checkout master &&
if [ ! -z "${READ_WRITE}" ]
then
ln -sf /usr/local/bin/post-commit ${HOME}/.password-store/.git/hooks
fi &&
if [ ! -z "${READ_ONLY}" ]
then
ln -sf /usr/local/bin/pre-comit ${HOME}/.password-store/.git/hooks
fi
| true
|
a4e38aa669b2207afa4addb8c33e048790290c4b
|
Shell
|
dinjonya/blog
|
/Project.sh
|
UTF-8
| 691
| 2.625
| 3
|
[
"MIT"
] |
permissive
|
if [ ! -d "Publish" ]; then
echo 'Publish not exists,please look your current path'
exit 0
fi
#进入发布文件夹 清空发布文件夹
cd Publish
rm -rf *
#退出
cd ..
echo -e "\033[47;35m apiblog \033[0m"
cd apiblog
dotnet build
dotnet publish -o ../Publish/apiblog
cd ..
echo -e "\033[47;35m Authen \033[0m"
cd Authen
dotnet build
dotnet publish -o ../Publish/Authen
cd ..
echo -e "\033[47;35m uiblog \033[0m"
cd uiblog
dotnet build
dotnet publish -o ../Publish/uiblog
cd ..
echo -e "\033[47;35m Publish \033[0m"
cd Publish
cd apiblog
zip -r ./../apiblog.zip ./*
cd ..
cd Authen
zip -r ./../Authen.zip ./*
cd ..
cd uiblog
zip -r ./../uiblog.zip ./*
cd ..
| true
|
c8542819f69a238cd55690707138c51c6ccc57fb
|
Shell
|
JJungJieun/test_jieun
|
/99for.sh
|
UTF-8
| 141
| 2.6875
| 3
|
[] |
no_license
|
#!/bin/bash
for i in 1 2 3 4 5 6 7 8 9
do
for j in 1 2 3 4 5 6 7 8 9
do
mul=`expr $i \* $j`
echo $j'*' $i '=' $mul
done
done
exit 0
| true
|
dc71ee115d5dceb2c6b2850f43d779d402af993a
|
Shell
|
ndhoule/dotfiles
|
/packages/bin/bin/dot
|
UTF-8
| 2,556
| 4.15625
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
#
# Refresh dotfiles from repository and update dependencies.
#
set -o errexit
set -o nounset
set -o pipefail
CURRENT_SCRIPT_PATH="$(
cd "$(dirname "$0")"
pwd -P
)"
OSTYPE=${OSTYPE:-}
function log_info() {
echo "[INFO]: $*"
}
function log_warn() {
echo "[WARN]: $*"
}
function log_error() {
echo "[ERROR]: $*" >&2
}
# TODO(ndhoule): This is duplicated from the root install script, extract this into a reusable chunk
# or script
function link_dotfile_packages() {
if ! command -v stow > /dev/null 2>&1; then
log_warn "stow not installed, skipping dotfile linking..."
return
fi
packages=(
"ag"
"alacritty"
"asdf"
"bin"
"clojure"
"emacs"
"git"
"gpg"
"haskell"
"javascript"
"ruby"
"ssh"
"taskwarrior"
"tmux"
"nvim"
"yubikey"
"zsh"
)
case "${OSTYPE}" in
linux*)
:
packages+=(
"gammastep"
"linux"
"sway"
"zeal"
)
;;
esac
log_info "Symlinking dotfiles into ${HOME}..."
for pkg in "${packages[@]}"; do
pushd "${CURRENT_SCRIPT_PATH}/../../../packages" > /dev/null
stow --target="${HOME}" --verbose=1 "${pkg}"
popd > /dev/null
done
log_info "Symlinked dotfiles into ${HOME}."
}
function dot_linux_arch() {
if ! command -v yay > /dev/null 2>&1; then
log_warn "yay not installed, skipping system package updates..."
else
log_info "Updating system packages..."
yay --sync --refresh --sysupgrade --nodiffmenu --norebuild --removemake --noanswerclean
yay --sync --clean --noconfirm --quiet
log_info "Updated system packages."
fi
if ! command -v flatpak > /dev/null 2>&1; then
log_warn "flatpak not installed, skipping flatpak package updates..."
else
log_info "Updating Flatpaks..."
flatpak update --noninteractive
log_info "Updated Flatpaks."
fi
}
function dot_linux() {
local distro_name
distro_name=$(hostnamectl | grep "Operating System" | cut -d ':' -f 2 | xargs)
case "${distro_name}" in
"Arch Linux")
:
dot_linux_arch
;;
*)
:
echo "Linux distribution '${distro_name}' not supported."
;;
esac
}
function dot_darwin() {
log_error "macOS not yet supported."
}
function main() {
case "${OSTYPE}" in
darwin*)
:
dot_darwin
link_dotfile_packages
;;
linux*)
:
dot_linux
link_dotfile_packages
;;
*)
:
log_error "Unsupported OS: '${OSTYPE}'"
exit 1
;;
esac
}
main "$@"
| true
|
7bbb7d86ac3cc1cbe582324b61aca7dac0762099
|
Shell
|
abhinav-webonise/linux_assignment
|
/ass7.sh
|
UTF-8
| 592
| 3.140625
| 3
|
[] |
no_license
|
#!/bin/bash
mkdir public_html test_folder
touch public_html/public_html/linux_assignment/README.md
chmod 777 public_html
for filename in `find . -maxdepth 1 -type f | cut --complement -c1,2 | sort | head -4`
do
mv "$filename" "../public_html/$filename"
echo "Moved $filename to ../public_html/$filename"
done
cd ..
cp -R /home/webonise/abhinav/assign /home/webonise/abhinav/public_html/public_html
mv /home/webonise/abhinav/t/assign /home/webonise/abhinav/public_html/public_html
mv /home/webonise/abhinav/public_html/public_html /home/webonise/abhinav/public_html/renamed_folder
| true
|
e7b95a9f88e67245c6da1554b075af49b1ef1e87
|
Shell
|
anasthesia/soft_LLP
|
/MG5_aMC_v2_6_4/Singlet-triplet_MG_run_files/0_my_proc_generation_combined.sh
|
UTF-8
| 398
| 2.765625
| 3
|
[] |
no_license
|
#!/bin/bash
#The first script to run: creates the process folders
#No need to run afterwards
# set the paths
MGdir=~/mywork/soft/MG5_aMC_v2_6_5/
CARDdir=$MGdir/Singlet-triplet_MG_run_files
# set proc card file
PROC_CARD_Sc="proc_card_scalar.dat"
PROC_CARD_Ps="proc_card_pseudo.dat"
# go to MG and generate the process
cd $MGdir
./bin/mg5 $CARDdir/$PROC_CARD_Sc
./bin/mg5 $CARDdir/$PROC_CARD_Ps
| true
|
f0ffe57e25068fba1b0b8c014f93b2414e0c1dab
|
Shell
|
bajajamit01/docker-hello-world-spring-boot
|
/bin/spring-demo-api-deployment.sh
|
UTF-8
| 3,198
| 3.59375
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# Exit script if any command returns non-zero
set -e
if [ "$#" -ne 6 ]; then
echo "ERROR: Incorrect number of arguments, 8 required"
echo "Usage:"
echo "$0 <pullSecret> <ENVIRONMENT> <NAMESPACE> <IMAGE_NAME> <IMAGE_VERSION> <DOCKER_REPO> <ACR> <REPLICAS>"
exit 1
fi
PULL_SECRET=$1
ENVIRONMENT=$2
NAMESPACE=$3
#IMAGE_NAME=$4
IMAGE_VERSION=$4
#DOCKER_REPO=$6
ACR=$5
REPLICAS=$6
DEPLOYMENT_NAME="spring-demo-api-${ENVIRONMENT}-deployment"
DEPLOYMENT_POD="spring-demo-api-${ENVIRONMENT}-pod"
DEPLOYMENT_SERVICE="spring-demo-api-${ENVIRONMENT}-service"
HTTPS_CONTAINER_PORT=8443
HTTP_CONTAINER_PORT=8080
INGRESS_NAME="spring-demo-${ENVIRONMENT}-ingress"
# Prints all executed commands to terminal
set -x
echo "apiVersion: v1
kind: Service
metadata:
name: ${DEPLOYMENT_SERVICE}
namespace: ${NAMESPACE}
spec:
type: ClusterIP
selector:
app: spring-demo-api-${ENVIRONMENT}
ports:
- protocol: TCP
port: 8443
targetPort: ${HTTPS_CONTAINER_PORT}
name: https
- protocol: TCP
port: 8080
targetPort: ${HTTP_CONTAINER_PORT}
name: http
" > service.yaml
# Create a service to attach to the deployment
kubectl apply -f service.yaml --wait
echo "apiVersion: apps/v1
kind: Deployment
metadata:
name: ${DEPLOYMENT_NAME}
namespace: ${NAMESPACE}
labels:
app: spring-demo-api-${ENVIRONMENT}
spec:
replicas: ${REPLICAS}
selector:
matchLabels:
app: spring-demo-api-${ENVIRONMENT}
strategy:
rollingUpdate:
maxSurge: 25%
maxUnavailable: 25%
type: RollingUpdate
template:
metadata:
labels:
app: spring-demo-api-${ENVIRONMENT}
spec:
containers:
- name: spring-demo-api-${ENVIRONMENT}
image: ${ACR}.azurecr.io/spring-demo-api:${IMAGE_VERSION}
imagePullPolicy: Always
resources:
requests:
memory: '200Mi'
cpu: '100m'
limits:
memory: '200Mi'
cpu: '300m'
livenessProbe:
httpGet:
port: ${HTTP_CONTAINER_PORT}
httpHeaders:
- name: Custom-Header
value: "Hello World 2021"
initialDelaySeconds: 15
periodSeconds: 10
timeoutSeconds: 30
successThreshold: 1
failureThreshold: 5
readinessProbe:
httpGet:
port: ${HTTP_CONTAINER_PORT}
httpHeaders:
- name: Custom-Header
value: "Hello World 2021"
initialDelaySeconds: 20
periodSeconds: 3
imagePullSecrets:
- name: ${PULL_SECRET}
" > deployment.yaml
# Deploy the application containers to the cluster with kubernetes
kubectl apply -f deployment.yaml -o json --wait --timeout 90s
echo "apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
annotations:
kubernetes.io/ingress.class: nginx
name: ${INGRESS_NAME}
namespace: jenkins
spec:
rules:
- http:
paths:
- backend:
serviceName: ${DEPLOYMENT_SERVICE}
servicePort: ${HTTP_CONTAINER_PORT}
path: /
pathType: Prefix
" > ingress.yaml
#Deploy Ingress
kubectl apply -f ingress.yaml --wait
| true
|
0a08d9eda42b8e3c0da1db929310fa8343e89f64
|
Shell
|
ardin/ardin-tools
|
/src/etc/profile.d/ardin-tools.sh
|
UTF-8
| 4,346
| 3.546875
| 4
|
[] |
no_license
|
##
## Aliases
##
alias s="ssh"
alias h="host"
alias pwgen-8="pwgen -s -1 8"
alias pwgen-12="pwgen -s -1 12"
alias pwgen-16="pwgen -s -1 16"
##
## Functions
##
function ssl-san()
{
[[ -z $1 ]] && echo " * Syntax: $FUNCNAME site:port" && return 1
printf "\n" | openssl s_client -servername $(echo $1 | cut -f 1 -d ':') -connect $1 2>&1 | openssl x509 -text -noout | grep DNS | sed -e 's/^[ \t]*//'
}
function ssl-dates()
{
[[ -z $1 ]] && echo " * Syntax: $FUNCNAME site:port" && return 1
sn=$(echo $1 | cut -f 1 -d ':')
printf "\n" | openssl s_client -servername ${sn} -connect $1 2>&1 | openssl x509 -text -noout | grep DNS | sed -e 's/^[ \t]*//'
printf "\n" | openssl s_client -servername ${sn} -connect $1 2>&1 | openssl x509 -dates -noout
}
function ssl-check-v3()
{
[[ -z $1 ]] && echo " * Syntax: $FUNCNAME site:port" && return 1
printf "HEAD / HTTP/1.0" | openssl s_client -connect $1 -ssl3 >/dev/null 2>&1
[[ $? -eq 0 ]] && echo "enabled" || echo "disabled"
}
function grep-ip ()
{
if [ -z $1 ]; then
echo " * Syntax: $FUNCNAME filename"
else
perl -lne 'print $1 if /(\d+\.\d+\.\d+\.\d+)/' $1
fi
}
function server-info()
{
if [ -z $1 ] ; then
echo " * Syntax: $FUNCNAME servername"
else
ssh $1 'hostname; echo -n "- HDD: "; df -hl / | grep -v Filesystem ; echo -n "- CPUs: "; cat /proc/cpuinfo | grep ^processor | wc -l ; echo -n "- "; grep MemTotal /proc/meminfo ; echo ; '
fi
}
function docker-enter()
{
CONTAINERS=`sudo docker ps -a | grep Up | awk '{ print $1" "$NF }' | xargs`;
echo $CONTAINERS;
ID=`dialog --title " VM Configuration " --stdout --menu "Select container" 22 70 16 $CONTAINERS`;
sudo docker exec -t -i $ID /bin/bash
}
function docker-clean()
{
docker rm -v `docker ps -a -q -f status=exited` 2>/dev/null
docker rmi `docker images -f "dangling=true" -q` 2>/dev/null
}
function http-keepalive-test()
{
echo "Checking: ${1}";
time printf "GET / HTTP/1.1\r\nHost: ${1}\r\nConnection: Keep-alive\r\n\r\n" | nc ${1} 80;
}
function change-root-password()
{
if [ -z $1 ] ; then
echo "Syntax: $FUNCNAME servername";
else
PASS=`head -c 500 /dev/urandom | tr -dc a-z0-9 | head -c 15`;
echo "Setting password for $1"
ssh -t ${1} "echo -e \"${PASS}\n${PASS}\" | passwd root" && echo "server: root@${1} , new password: ${PASS}" || echo "server: root@${1}, FAIL !";
fi
}
function shadow-pass()
{
openssl passwd -1 -salt xyz ${1}
}
function ardin-tools()
{
echo "ardin-tools: installed";
}
function server-deinstallation()
{
[[ -z $1 ]] && echo " * Syntax: $FUNCNAME server" && return 1
echo "Creating directory $1"
mkdir $1 && cd $1 || exit 2
echo "Copying /etc .."
ssh $1 'cd /; tar czf - etc ' | cat - > etc.tgz
echo "Copying /home .."
ssh $1 'cd /; tar czf - home ' | cat - > home.tgz
echo "Copying /usr/local .."
ssh $1 'cd /usr; tar czf - local ' | cat - > usr-local.tgz
echo "Taking command views (crontab, ps, ip, netstat, df) .."
ssh $1 'crontab -l' > crontab-l
ssh $1 'ps auxf' > ps-auxf
ssh $1 'ip a s' > ip-as
ssh $1 'netstat -pln' > netstat-plnt
ssh $1 'df -h' > df-h
echo "done"
cd -
}
function decrypt-p12()
{
[[ -z $1 ]] && echo " * Syntax: $FUNCNAME filename.p12" && return 1
[[ ! -f $1 ]] && echo "Error: no such file or directory" && return 1
echo -n "Enter password: "
read pass
# create private
openssl pkcs12 -passin "pass:${pass}" -in "${1}" -clcerts -nocerts -nodes | sed -ne '/-BEGIN PRIVATE KEY-/,/-END PRIVATE KEY-/p' > "${1}.key"
# certificate
openssl pkcs12 -passin "pass:${pass}" -in "${1}" -clcerts -nokeys | sed -ne '/-BEGIN CERTIFICATE-/,/-END CERTIFICATE-/p' > "${1}.crt"
# chain
openssl pkcs12 -passin "pass:${pass}" -in "${1}" -clcerts -nokeys -chain | sed -ne '/-BEGIN CERTIFICATE-/,/-END CERTIFICATE-/p' > "${1}.chain"
# pem
cat "${1}.key" > "${1}.pem"
cat "${1}.crt" >> "${1}.pem"
cat "${1}.chain" >> "${1}.pem"
}
| true
|
032a66de07feff79ca9d9b5b84f4eb9e78d98ba6
|
Shell
|
bonohub13/free_cache
|
/free_cache.bash
|
UTF-8
| 366
| 3.1875
| 3
|
[
"LicenseRef-scancode-other-permissive"
] |
permissive
|
#!/bin/bash
# -*- coding:utf-8 -*-
current_cache=$(free | grep Mem | sed -r -e "s/[^ ]+ +[^ ]+ +([^ ]+)+ +[^ ]+ +[^ ]+ +[^ ]+ +[^ ]+/\1/")
mem_limit=6000000 # default to 6GB
if [ $# -gt 0 ];then
if [ -n $1 -a $1 -gt 1000000 ];then # clears
mem_limit=$1
fi
fi
if [ $current_cache -gt $mem_limit ];then
sync; echo 1 > /proc/sys/vm/drop_caches
fi
| true
|
3129439f089ec4f91b27e248b86941d0f2c5cba3
|
Shell
|
rahulyhg/saas-modal
|
/script/pulsar-saas-ssl.sh
|
UTF-8
| 5,993
| 2.921875
| 3
|
[] |
no_license
|
#!/bin/bash
#Program: pulsar-ssl.sh
#Script to Configure NginX SSL Server for Pulsar(https) <cavirin-sudhirk>
#instance_ip=$1
#Nginx variables
upstream='$upstream'
host='$host'
remote_addr='$remote_addr'
proxy_add_x_forwarded_for='$proxy_add_x_forwarded_for'
request_uri='$request_uri'
scheme='$scheme'
#Creating the ssl directory for storing key and certificates
sudo rm -rf /etc/nginx/ssl
mkdir -p /etc/nginx/ssl
#Create directory for storing ssl Configuration Snippet
sudo rm -rf /etc/nginx/snippets
mkdir -p /etc/nginx/snippets
sudo chmod -R 662 /etc/nginx
#Creating ssl certificate/etc/ssl/private
sudo openssl req -x509 -nodes -days 365 -newkey rsa:2048 \
-subj "/C=US/ST=California/L=Santa Clara/O=Cavirin/OU=www/CN=Cavirin Inc." \
-keyout /etc/nginx/ssl/nginx-selfsigned.key -out /etc/nginx/ssl/nginx-selfsigned.crt
#sudo openssl req -new -newkey rsa:2048 -days 365 -nodes -x509 \
# -subj "/C=US/ST=California/L=Santa Clara/O=Cavirin/OU=www/CN=Cavirin Inc." \
# -keyout /etc/nginx/ssl/nginx.key -out /etc/nginx/ssl/nginx.crt
#Configuration Snippet Pointing to the SSL Key and Certificate:
#cat << SNIPPET_CONF | sudo tee /etc/nginx/snippets/self-signed.conf >& /dev/null
#ssl_certificate /etc/nginx/ssl/nginx-selfsigned.crt;
#ssl_certificate_key /etc/nginx/ssl/nginx-selfsigned.key;
#SNIPPET_CONF
#Snippet with Strong Encryption Settings
cat << PARAM_CONF | sudo tee /etc/nginx/snippets/ssl-params.conf >& /dev/null
# from https://cipherli.st/
# and https://raymii.org/s/tutorials/Strong_SSL_Security_On_nginx.html
ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
ssl_prefer_server_ciphers on;
ssl_ciphers "EECDH+AESGCM:EDH+AESGCM:AES256+EECDH:AES256+EDH";
ssl_ecdh_curve secp384r1;
ssl_session_cache shared:SSL:10m;
#ssl_session_tickets off;
ssl_stapling on;
ssl_stapling_verify on;
resolver 8.8.8.8 8.8.4.4 valid=300s;
resolver_timeout 5s;
# Disable preloading HSTS for now. You can use the commented out header line that includes
# the "preload" directive if you understand the implications.
#add_header Strict-Transport-Security "max-age=63072000; includeSubdomains; preload";
add_header Strict-Transport-Security "max-age=63072000; includeSubdomains";
add_header X-Frame-Options DENY;
add_header X-Content-Type-Options nosniff;
client_max_body_size 300M;
#ssl_dhparam /etc/ssl/certs/dhparam.pem;
PARAM_CONF
#REMOVING THE EXISTING default from /etc/nginx/sites-enable
if [ -f /etc/nginx/sites-enabled/pulsar_saas_ssl ]; then
echo "Removing /etc/nginx/sites-enabled/pulsar_saas_ssl"
sudo rm /etc/nginx/sites-enabled/pulsar_saas_ssl
fi
#REMOVING THE EXISTING default from /etc/nginx/sites-available
if [ -f /etc/nginx/sites-available/pulsar_saas_ssl ]; then
echo "Removing /etc/nginx/sites-available/pulsar_saas_ssl"
sudo rm -f /etc/nginx/sites-available/pulsar_saas_ssl
fi
#The main conf file for sites-available:
cat << NGINX_CONF | sudo tee /etc/nginx/sites-available/pulsar_saas_ssl >& /dev/null
server {
listen 80;
return 301 https://$host$request_uri;
}
server {
# SSL configuration
server_name SaaS.cavirin.com;
listen 443 ssl default_server;
listen [::]:443 ssl default_server;
#include snippets/self-signed.conf;
include snippets/ssl-params.conf;
location / {
try_files $uri $uri/ =404;
proxy_pass http://localhost:3000;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
#proxy_set_header X-Client-Verify SUCCESS;
#proxy_set_header X-Client-DN $ssl_client_s_dn;
#proxy_set_header X-SSL-Subject $ssl_client_s_dn;
#proxy_set_header X-SSL-Issuer $ssl_client_i_dn;
proxy_read_timeout 1800;
proxy_connect_timeout 1800;
access_log /var/log/nginx/home_ac.log;
error_log /var/log/nginx/home_er.log;
}
location /app/ {
# First attempt to serve request as file, then
# as directory, then fall back to displaying a 404.
#try_files / =404;
proxy_pass http://127.0.0.1:3000/app/;
access_log /var/log/nginx/app_ac.log;
error_log /var/log/nginx/app_er.log;
# Uncomment to enable naxsi on this location
# include /etc/nginx/naxsi.rules
}
#location /customer/ #{
# proxy_pass https://$instance_ip/pulsar/;
# access_log /var/log/nginx/customer_ac.log;
# error_log /var/log/nginx/customerer.log;
#}
location /css/ {
proxy_pass http://127.0.0.1:3000/css/;
access_log /var/log/nginx/css_ac.log;
error_log /var/log/nginx/css_er.log;
}
location /js/ {
proxy_pass http://127.0.0.1:3000/js/;
access_log /var/log/nginx/js_ac.log;
error_log /var/log/nginx/js_er.log;
}
ssl_certificate /etc/nginx/ssl/nginx-selfsigned.crt;
ssl_certificate_key /etc/nginx/ssl/nginx-selfsigned.key;
}
NGINX_CONF
sudo ln -s /etc/nginx/sites-available/pulsar_saas_ssl /etc/nginx/sites-enabled/pulsar_saas_ssl
sudo chmod -R 511 /etc/nginx
#Test Ngix New Config file syntax:
sudo nginx -t
#Restart nginx
sudo service nginx restart
echo "End of SSL Configuration"
| true
|
1041eeb3fffc3ef1c17a7e62b7fe86921f7cbc1d
|
Shell
|
phantoscope/xbaydns
|
/xbaydns/tools/master/Log2IPlist.sh
|
UTF-8
| 331
| 2.9375
| 3
|
[
"BSD-2-Clause"
] |
permissive
|
#!/bin/sh
export PATH=$PATH:/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin
PPATH=`dirname $0`
if [ -f "$PPATH/../master.conf" ]; then
. $PPATH/../master.conf
fi
cd $PPATH
if cat ../slave/named/log/*.log > ../slave/named/log/dummy ; then
python logtolist.py ../slave/named/log/dummy ../iplist
rm -f ../slave/named/log/dummy
fi
| true
|
b986109977311c240545740904919b7615228f40
|
Shell
|
isabella232/freedom-sdk-utils
|
/scripts/create-project
|
UTF-8
| 5,788
| 3.765625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -e
set -o pipefail
help() {
cat >&2 <<EOF
$0: Create a software project direcrory for an overlay SiFive Freedom SDK
--help Prints this help text.
--project-path Specify project path for an overlay.
--project-name Specify project name for an overlay.
--overlay-name Specify overlay name installed.
--overlay-dts=*.dts The absolute path to the overlay device tree file added.
EOF
}
# Command-line argument handling
unset DTSFILE
unset OVERLAYNAME
unset PROJECTPATH
unset PROJECTNAME
while [[ "$1" != "" ]]
do
case "$1" in
--help) help "$0"; exit 0;;
--project-path=*) PROJECTPATH="$(echo "$1" | cut -d= -f2-)"; shift 1;;
--project-path) PROJECTPATH="$2"; shift 2;;
--project-name=*) PROJECTNAME="$(echo "$1" | cut -d= -f2-)"; shift 1;;
--project-name) PROJECTNAME="$2"; shift 2;;
--overlay-name=*) OVERLAYNAME="$(echo "$1" | cut -d= -f2-)"; shift 1;;
--overlay-name) OVERLAYNAME="$2"; shift 2;;
--overlay-dts=*) DTSFILE="$(echo "$1" | cut -d= -f2-)"; shift 1;;
--overlay-dts) DTSFILE="$2"; shift 2;;
*) echo "$0: Unknown argument $1" >&2; exit 1;;
esac
done
if [[ "$PROJECTNAME" == "" ]]
then
echo "[ERROR] $0: Must specify --project-name" >&2
help "$0"
exit 1
fi
if [[ "$DTSFILE" == "" && "$OVERLAYNAME" == "" ]]
then
echo "[ERROR] $0: Must specify either --overlay-name, or --overlay-dts" >&2
help "$0"
exit 1
fi
if [[ "$DTSFILE" != "" ]]
then
if [[ ! -f "$DTSFILE" && "$DTSFILE" != "*.dts" ]]
then
echo "[ERROR] $0: $DTSFILE must be a dts file" >&2
help "$0"
exit 1
fi
if [[ "$OVERLAYNAME" == "" ]]
then
dtsfile=$(basename $DTSFILE)
OVERLAYNAME="${dtsfile%.dts}"
echo "[INFO] $0: --overlay-name not provided, default to use $OVERLAYNAME" >&2
fi
fi
# Set up user project base directory
PROJECTDIR="$PROJECTPATH"
if [[ "$PROJECTDIR" == "" ]]
then
PROJECTDIR="EPREFIX/project"
fi
NAME="$PROJECTNAME"
# add a new project workspace if one not already exist
if test -d "${PROJECTDIR}/${NAME}"
then
echo "[ERROR] $0: Project ${PROJECTDIR}/${NAME} already exist. Delete or Provide a new project name" >&2
exit 1
fi
mkdir -p "${PROJECTDIR}/${NAME}"
# Create a sample Makefile for the project
cat > "${PROJECTDIR}/${NAME}/Makefile" << EOF
# Rename "hello" for your own application
PROGRAM ?= hello
MACHINE ?= $OVERLAYNAME
MEE ?= mee
# Redefine the bit-with per your design.info file
BITWIDTH ?= 32
# Add additional C or S files as needed
C_SRCS += src/main.c
ASM_SRCS+=
LINKER_SCRIPT = \
-Tenv/\$(MACHINE).lds
# Add additional header files in include dir as needed
INCLUDES = \
-Iinclude \
DEFINES = \
# CFLAGS is preset by Freedom SDK for mee environment. It is best to appends additional flags
CFLAGS = \
\$(INCLUDES) \$(DEFINES) -menv=\$(MEE) -mmachine=\$(MACHINE)
# LDFLAGS is preset by Freedom SDK for mee environment. It is best to appends additional flags
LDFLAGS = \
-Wl,-Map,\$(PROGRAM).map
GCC := riscv64-sifive-elf-gcc
GXX := riscv64-sifive-elf-g++
OBJDUMP := riscv64-sifive-elf-objdump
OBJCPY := riscv64-sifive-elf-objcopy
GDB := riscv64-sifive-elf-gdb
AR := riscv64-sifive-elf-ar
ELF2HEX := riscv64-sifive-elf-elf2hex
SIZE := riscv64-sifive-elf-size
DTC := dtc
LDSGEN := freedom-ldscript-generator
ASM_OBJS := \$(ASM_SRCS:.S=.o)
C_OBJS := \$(C_SRCS:.c=.o)
LINK_OBJS := \$(ASM_OBJS) \$(C_OBJS)
all: \$(PROGRAM).hex
env/\$(MACHINE).dtb: env/\$(MACHINE).dts
@echo " Compiling $<"
@\$(DTC) env/\$(MACHINE).dts -o \$@ -O dtb -I dts
env/\$(MACHINE).lds: env/\$(MACHINE).dtb
@echo " Generating $<"
@\$(LDSGEN) --dtb env/\$(MACHINE).dtb --linker \$@
\$(C_OBJS): %.o: %.c
@echo " CC $<"
@\$(GCC) \$(CFLAGS) -c \$(INCLUDES) -o \$@ $<
\$(ASM_OBJS): %.o: %.S
@echo " CC $<"
@\$(GCC) \$(CFLAGS) -c \$(INCLUDES) -o \$@ $<
\$(PROGRAM).elf : \$(ASM_OBJS) \$(C_OBJS) env/\$(MACHINE).lds
@echo Linking....
@\$(GCC) \$(CFLAGS) \$(LINKER_SCRIPT) \$(INCLUDES) \$(LINK_OBJS) -o \$@ \$(LDFLAGS)
@\$(OBJDUMP) -S \$(PROGRAM).elf > \$(PROGRAM).asm
@\$(SIZE) --format=berkeley \$(PROGRAM).elf
\$(PROGRAM).hex : \$(PROGRAM).elf
@echo Bitstreaming....
@\$(ELF2HEX) --input \$(PROGRAM).elf --bit-width \${BITWIDTH} --output \$(PROGRAM).hex
@rm -fr src/obj
@mkdir src/obj
@mv \$(PROGRAM).* src/obj/
@cp src/obj/\$(PROGRAM).hex src/obj/program.hex
@echo Completed \$@
clean :
@rm -f \$(C_OBJS) \$(ASM_OBJS) *.map *.elf *.asm
@rm -fr src/obj
#-------------------------------------------------------------
EOF
# Create a env dir for depositing dts and sample project linker files
mkdir -p "${PROJECTDIR}/${NAME}/env"
if [[ "$DTSFILE" != "" ]]
then
cp "$DTSFILE" "${PROJECTDIR}/${NAME}/env/${OVERLAYNAME}.dts"
else
cp "EPREFIX/usr/share/device-trees/${OVERLAYNAME}.dts" "${PROJECTDIR}/${NAME}/env/"
fi
# Create a simple sourceme to get into the project workspace
mkdir -p "${PROJECTDIR}/${NAME}/scripts"
cat > "${PROJECTDIR}/${NAME}/scripts/sourceme" << EOF
source EPREFIX/enter.bash
cd "${PROJECTDIR}/${NAME}"
EOF
# Create include dir for project
mkdir -p "${PROJECTDIR}/${NAME}/include"
cp EPREFIX/examples/hello/sifive-hifive1/src/config.h "${PROJECTDIR}/${NAME}/include/"
# Create a src dir for the project, a sample main.c from example/hello
mkdir -p "${PROJECTDIR}/${NAME}/src"
cp EPREFIX/examples/hello/sifive-hifive1/src/src/main.c "${PROJECTDIR}/${NAME}/src/"
#sed -i 's/#include <config.h>//' "${PROJECTDIR}/${NAME}/src/main.c"
| true
|
d53640ef5c2addf11b853749a4ed8396d3b8ebd3
|
Shell
|
aurex-linux/aurex-corporative-client
|
/install.mount/usr/sbin/aurex-cc-mount-settings
|
UTF-8
| 2,456
| 3.515625
| 4
|
[] |
no_license
|
#!/bin/sh
# -------
# File: aurex-cc-mount-settings
# Description: Generates pam_mount.conf.xml and aurex-umount configuration
# Author: Luis Antonio Garcia Gisbert <luisgg@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin St, Fifth Floor, Boston MA 02110-1301 USA
# --------
case "$1" in
remove|disable)
rm -f /var/lib/aurex-cc-mount-settings/pam_mount.conf.xml /etc/aurex-fs-utils/umount.d/aurex-cc-mount-settings.conf
aurex-transmute --module=cc-mount untransmute || true
;;
update|enable)
MOUNT_CFG_DIR="/etc/aurex-cc/mounts.d/"
TOP_FILE=/usr/share/aurex-cc-mount/pam_mount.conf.xml.top
BOTTOM_FILE=/usr/share/aurex-cc-mount/pam_mount.conf.xml.bottom
TMP_FILE1="$(tempfile)"
TMP_FILE2="$(tempfile)"
[ ! -r "$TOP_FILE" ] || cat "$TOP_FILE" >> "$TMP_FILE1"
cat <<EOF >> "$TMP_FILE1"
<!--
============= START MOUNTPOINTS AUTOGENERATED SECTION =============
-->
EOF
ls -1 "$MOUNT_CFG_DIR" |while read l; do
cat "$MOUNT_CFG_DIR/$l" >> "$TMP_FILE1"
sed -ne '/^<volume mountpoint=/{s%^<volume mountpoint=%%;s%[[:blank:]]\+.*$%%;s%"%%g;s%^%/%;p}' "$MOUNT_CFG_DIR/$l" >> "$TMP_FILE2"
done
cat <<EOF >> "$TMP_FILE1"
<!--
============= END MOUNTPOINTS AUTOGENERATED SECTION =============
-->
EOF
[ ! -r "$BOTTOM_FILE" ] || cat "$BOTTOM_FILE" >> "$TMP_FILE1"
cat "$TMP_FILE1" > /var/lib/aurex-cc-mount-settings/pam_mount.conf.xml
sort -u "$TMP_FILE2" > /etc/aurex-fs-utils/umount.d/aurex-cc-mount-settings.conf
rm -f "$TMP_FILE1" "$TMP_FILE2"
MOUNTP_CFG_DIR="/etc/aurex-cc/mountpoints.d/"
ls -1 "$MOUNTP_CFG_DIR" |while read l; do
sed -ne "\%^/%{s%[[:blank:]]*$%%;p}" "/etc/aurex-cc/mountpoints.d/$l" |while read L; do
mkdir -p "$L"
done
done
aurex-transmute --module=cc-mount transmute || true
;;
*)
echo "Usage: $(basename "$0") {update|remove}" >&2
exit 1
;;
esac
exit 0
| true
|
2719e8b9e35214119a41e58bf735d582243cb18a
|
Shell
|
ducduyn31/HasherSv
|
/start.sh
|
UTF-8
| 180
| 2.96875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
input=$1
while IFS= read -r line; do
for N in {1..9}; do
python runner.py -i "$line" -n "$N"
done
python runner.py -i "$line" -n 10
done <"$input"
| true
|
b59a6d3d22ad284155158a0b6c007b6040f1ef02
|
Shell
|
iridium-browser/iridium-browser
|
/native_client/tests/spec2k/run_wasm.sh
|
UTF-8
| 3,579
| 3.671875
| 4
|
[
"BSD-3-Clause",
"Zlib",
"Classpath-exception-2.0",
"BSD-Source-Code",
"LZMA-exception",
"LicenseRef-scancode-unicode",
"LGPL-3.0-only",
"LGPL-2.0-or-later",
"LicenseRef-scancode-philippe-de-muyter",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-intel-osl-1993",
"HPND-sell-variant",
"ICU",
"LicenseRef-scancode-protobuf",
"bzip2-1.0.6",
"Spencer-94",
"NCSA",
"LicenseRef-scancode-nilsson-historical",
"CC0-1.0",
"LicenseRef-scancode-proprietary-license",
"LGPL-2.1-only",
"LicenseRef-scancode-other-copyleft",
"GPL-2.0-or-later",
"NTP",
"GPL-2.0-only",
"LicenseRef-scancode-other-permissive",
"GPL-3.0-only",
"GFDL-1.1-only",
"W3C",
"LicenseRef-scancode-python-cwi",
"GCC-exception-3.1",
"BSL-1.0",
"Python-2.0",
"GPL-1.0-or-later",
"LGPL-2.1-or-later",
"LicenseRef-scancode-unknown-license-reference",
"CPL-1.0",
"GFDL-1.1-or-later",
"W3C-19980720",
"LGPL-2.0-only",
"LicenseRef-scancode-amd-historical",
"LicenseRef-scancode-ietf",
"SAX-PD",
"LicenseRef-scancode-x11-hanson",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"dtoa",
"MIT",
"LicenseRef-scancode-public-domain-disclaimer",
"PSF-2.0",
"LicenseRef-scancode-newlib-historical",
"LicenseRef-scancode-generic-exception",
"SMLNJ",
"HP-1986",
"LicenseRef-scancode-free-unknown",
"SunPro",
"MPL-1.1"
] |
permissive
|
#!/bin/bash
# Copyright 2016 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This script assumes the waterfall framework
# (https://github.com/WebAssembly/waterfall) has been downloaded and run, thus
# it is required that all tools necessary for wasm be already installed in the
# system and their relative paths to the waterfall directory follow the
# directory structure of the waterfall framework. Or you can download a prebuilt
# framework for Linux directly from the buildbot at https://wasm-stat.us/.
#
# Environment variable $WASM_INSTALL_DIR must point to the wasm install
# directory in the waterfall, which is WATERFALL_DIR/src/work/wasm-install where
# WATERFALL_DIR is the top waterfall directory.
# Some limitations of this framework:
#
# Spec2k uses a lot of file I/O (stdin/stdout, reading files, writing files)
# which doesn't really work in JavaScript. Emscripten deals with this by
# providing the --embed-file and --preload-file options to package input files
# into a virtual in-memory file system. --embed-file allows input files, stdin,
# and stdout to work in the standalone js shell, except that initialization of
# the file system is extremely slow and ruins any attempt to get reasonable
# timings. --preload-file works reasonably fast, but it only works when running
# in a browser and does not work with d8 shell. d8 shell supports read()
# function which can read a file, so this might be able to be fixed for d8 shell
# if Emscripten hooks up appropriate d8 functions. But it is not supported at
# the time of this writing and --embed-file is the only option.
#
# Currently Wasm does not build and run correctly for many of the SPEC
# benchmarks, which needs further investigation. In addition, 254.gap and
# 176.gcc do not (yet) work because Emscripten does not support setjmp. Also,
# currently wasm build fails if you override some of libc functions, because
# Emscripten does not use archives and has all its libc functions precompiled in
# ~/.emscripten_cache/wasm/libc.bc. 197.parser fails due to this reason
# (strncasecmp is duplicated).
#
# The other file I/O related problem is that it is not possible for the harness
# to inspect output files (which are only represented in-memory), so validation
# is only possible by looking at stdout. To create persistent data out of a
# session, emscripten provides two file system: NODEFS and IDBFS. However,
# NODEFS is only for use when running inside node.js, and IDBFS works only when
# running inside a browser.
#
# For the "train" versus "ref" runs, the run scripts generally copy or symlink
# files from the appropriate input directory into the current directory before
# running the binary. This approach doesn't work for Emscripten, where the files
# need to be prepackaged at build time. The prepackaging is done by
# concatenating all the files into one big file, and embedding offsets and
# lengths into the JavaScript code. This means we have to build separate
# versions for "train" and "ref". To avoid duplication, the input file
# preparation is refactored into a new prepare_input.sh script in each Spec2k
# component directory.
if [ -z $WASM_INSTALL_DIR ]; then
echo 'error: WASM_INSTALL_DIR is not set'
exit 1
fi
if [ -z $SCRIPTNAME ]; then
echo 'error: SCRIPTNAME is not set. Try running using run_all.sh.'
exit 1
fi
REFTRAIN=`echo $SCRIPTNAME | sed -e 's/.*run\.//' -e 's/\.sh$//'`
$WASM_INSTALL_DIR/bin/d8 --expose-wasm $(basename $1 .js).$REFTRAIN.js -- ${@:2}
| true
|
fa9c0a5b4197c9e1a25e6c951cc03f6b7e6bc1b7
|
Shell
|
mmellott/w-ilab-scripts
|
/functions
|
UTF-8
| 1,916
| 3.046875
| 3
|
[] |
no_license
|
#!/bin/bash
function for_each_node {
. /users/mmellott/w-ilab-scripts/nodes
for i in "${nodes[@]}"; do
echo "$i"
ssh "$i" "$@"
done
}
function node_map {
. /users/mmellott/w-ilab-scripts/nodes
for i in "${nodes[@]}"; do
echo "$i"
ssh "$i" "hostname && ifconfig | grep mp0 && sudo iw dev mp0 mpath dump"
echo
done
}
function get_ip_addr {
local num="$(hostname | cut -d'.' -f1 | sed -e 's|[^0-9]*0*||')"
echo "10.0.0.$num"
}
# wiphy <iface> <ip_addr>
function wiphy {
local iface="$1"
local ip_addr="$2"
ifconfig "$iface" "$ip_addr"
ifconfig "$iface" netmask 255.255.255.0
iwconfig "$iface" txpower 1
iwconfig "$iface" rate 6M fixed
}
# ibss <ip_addr>
function ibss {
iwconfig wlan0 mode ad-hoc
wiphy wlan0 "$1"
iw dev wlan0 ibss join mellott 5180 fixed-freq aa:bb:cc:dd:ee:ff
}
# mesh <ip_addr>
function mesh {
iw phy phy0 interface add mp0 type mp mesh_id mellott
wiphy mp0 "$1"
}
function copy_modules {
home=/users/fabrizio
#netdir="net-origin"
netdir="net-atlas"
ath_dir="ath"
driverdir="drivers/net/wireless/${ath_dir}/ath9k/"
driverdir2="drivers/net/wireless/${ath_dir}/"
#netdir="net-test"
cp $home/atlas/kernel-3.8.0-atlas/${netdir}/wireless/cfg80211.ko /lib/modules/3.8.0-37-generic/kernel/net/wireless/
cp $home/atlas/kernel-3.8.0-atlas/${netdir}/mac80211/mac80211.ko /lib/modules/3.8.0-37-generic/kernel/net/mac80211/
cp $home/atlas/kernel-3.8.0-atlas/${driverdir}/*.ko /lib/modules/3.8.0-37-generic/kernel/drivers/net/wireless/ath/ath9k/
cp $home/atlas/kernel-3.8.0-atlas/${driverdir2}/ath.ko /lib/modules/3.8.0-37-generic/kernel/drivers/net/wireless/ath/
rmmod ath9k ath9k_common ath9k_hw ath mac80211 cfg80211
modprobe ath9k
}
# run "iperf -D -s" on the other node
function myperf {
iperf -c "$1" -t 200 -i 2 -yC
}
| true
|
a4e901c02c10c9ba82ecb8eff45f40554b2ff0ab
|
Shell
|
whigg/SWARP-routines
|
/forecast_scripts/utility_scripts/check_ECMWF.sh
|
UTF-8
| 1,147
| 3.515625
| 4
|
[] |
no_license
|
Edir=/work/shared/nersc/ECMWFR_T799/
source $SWARP_ROUTINES/source_files/hex_vars.src
email=$(cat $FCemail)
hr=`date +%H`
if [ $hr -eq 23 ] || [ "$hr" == "00" ]
then
# rsync time
exit
fi
if [ $# -eq 0 ]
then
alert=0
else
alert=$1
fi
yr=`date +%Y`
correct=0
echo " " > txt
for vbl in D2M MSL T2M TCC U10M V10M
do
# file to check
ncfil=$Edir/ec_atmo_geo_la_${vbl}_$yr.nc
echo Checking $ncfil ... >> txt
# get number of recs
ss=`$ncdump -h $ncfil |grep UNLIMITED `
ss=($ss)
nrec=${ss[5]}
nrec=${nrec#(}
echo "Number of records : $nrec" >> txt
# desired number
FCdays=8
Nfc=$((FCdays*4+3))
jday=10#`date +%j`
nrec0=$(($Nfc+4*$jday))
echo "Expected number of records : $nrec0" >> txt
if [ $nrec -ge $nrec0 ]
then
correct=$((correct+1))
fi
done
# EMAIL
if [ $alert -eq 1 ]
then
if [ $correct -lt 6 ]
then
mail -s "Missing records in ECMWF forcing" $email < txt
fi
elif [ $alert -eq 2 ]
then
if [ $correct -eq 6 ]
then
mail -s "ECMWF forcing OK now" $email < txt
fi
else
cat txt
echo " "
fi
rm txt
| true
|
5a122d6ee1a51cb88c8b69d42d6346fcd89c60d4
|
Shell
|
haroldangenent/box
|
/provision/vhosts/hosts-clean.sh
|
UTF-8
| 280
| 3.109375
| 3
|
[
"MIT"
] |
permissive
|
. $(dirname $0)/hosts-get.sh
echo 'Cleaning up hostnames from /etc/hosts...'
hosts=$(getHosts)
ip=$(getIp)
for host in $hosts
do
host=$(echo "$host"|tr -d '[:space:]')
sudo sed -i '' "/$ip $host \# haroldangenent\/box/d" /etc/hosts
done
echo 'Finished cleaning /etc/hosts.'
| true
|
80b522fc7d65a1861e6a60d2bd4818c31b7336fd
|
Shell
|
appuio/charts
|
/appuio/mariadb-galera/hack/failover-testing/client.sh
|
UTF-8
| 467
| 2.59375
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
set -eu
kubectl run mariadb-client \
--rm --tty -i --restart='Never' \
--namespace default \
--labels="role=client" \
--image docker.io/bitnami/mariadb-galera:10.5.12-debian-10-r1 \
--command -- \
mysql -h ${1:-mariadb} -P 3306 -uroot \
-p$(kubectl get secret --namespace default mariadb -o jsonpath="{.data.mariadb-root-password}" | base64 --decode) \
my_database \
||:
kubectl delete pod/mariadb-client ||:
| true
|
25dce99f97aaa38573876139f2c18cc671fb3f92
|
Shell
|
amberlauer/automate_azure2
|
/automate_azure_calc/automate_azure_calc.sh2
|
UTF-8
| 1,493
| 2.734375
| 3
|
[] |
no_license
|
#!/bin/bash
#g++ -o automate_azure.o automate_azure.cpp
cwd=$(pwd)
#cd /home/amber/executables/root_6.16.00/install
cd /home/amber/executables/root-6.12.06/install
source ./bin/thisroot.sh
cd $cwd
#rm automate_azure_calc.o
g++ -o automate_azure_calc.o2 automate_azure_calc.cpp
echo "what to name the output file?"
read output_name
output_root=/home/amber/Documents/Tech/azure/
touch ${output_name}_calc_outputs.txt
res=0
echo "jpi1 jp2 jpi3 jpi4 res order" >> ${output_name}_calc_outputs.txt
for((a=0; a<=4; a++))
do
for((b=0; b<=4; b++))
do
for((c=0;c<=4; c++))
do
for((d=0; d<=4; d++))
do
rm current_work.azr
./automate_azure_calc.o2 $d $c $b $a $res
mv work_temp.azr current_work.azr
echo "$d $c $b $a $res " >> ${output_name}_calc_outputs.txt
./automate_azure_responses_calc.exp | grep -E 'Segment #1|WARNING' >> ${output_name}_calc_outputs.txt
done
done
done
#done
done
mv ${output_name}_calc_outputs.txt ${output_root}/outputs/
#WARNING: Denominator less than zero in E=6.95 MeV resonance transformation. Tranformation may not have been successful.
#cat $MESA_BASE/inlist_cluster >./inlist_cluster
#jpi=${i}p good_sets.txt)
# ./automate_azure.o $jpi 0e
| true
|
8601bfa12e4083f7e9099077515ae211f9e2c1c6
|
Shell
|
amarr/config
|
/.bash_profile
|
UTF-8
| 671
| 2.953125
| 3
|
[] |
no_license
|
export CLICOLOR=1
export LSCOLORS=ExFxCxDxBxegedabagacad
# Add git completion
if [ -e "/usr/local/git/contrib/completion/git-completion.bash" ] ; then
source "/usr/local/git/contrib/completion/git-completion.bash"
fi
# Add git branch to prompt
if [ -e "/usr/local/git/contrib/completion/git-prompt.sh" ] ; then
source "/usr/local/git/contrib/completion/git-prompt.sh"
PS1='\n\[\e[0;32m\]\u@\h \[\e[0;33m\]\w$(__git_ps1 " (%s)")\n\[\e[0m\]\$ '
else
PS1='\n\[\e[0;32m\]\u@\h \[\e[0;33m\]\w\n\[\e[0m\]\$ '
fi
# Support git autocomplete
if [ -f $(brew --prefix)/etc/bash_completion ]; then
. $(brew --prefix)/etc/bash_completion
fi
| true
|
c77e75ecaa99dbb5716952c977ce57557b8da76f
|
Shell
|
flavian-ndunda/Pluscoin
|
/qa/pull-tester/build-tests.sh.in.sh
|
UTF-8
| 1,287
| 3.546875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Param1: The prefix to mingw staging
# Param2: Path to java comparison tool
# Param3: Number of make jobs. Defaults to 1.
set -e
set -o xtrace
MINGWPREFIX=$1
JAVA_COMPARISON_TOOL=$2
JOBS=${3-1}
if [ $# -lt 2 ]; then
echo "Usage: $0 [mingw-prefix] [java-comparison-tool] <make jobs>"
exit 1
fi
DISTDIR=@PACKAGE@-@VERSION@
cd @abs_top_srcdir@
make distdir
mv $DISTDIR linux-build
cd linux-build
./configure --with-comparison-tool="$JAVA_COMPARISON_TOOL"
make -j$JOBS
make check
#Test code coverage
cd @abs_top_srcdir@
make distdir
mv $DISTDIR linux-coverage-build
cd linux-coverage-build
./configure --enable-lcov --with-comparison-tool="$JAVA_COMPARISON_TOOL"
make -j$JOBS
make cov
# win32 build disabled until pull-tester has updated dependencies
##Test win32 build
#cd @abs_top_srcdir@
#make distdir
#mv $DISTDIR win32-build
#cd win32-build
#./configure --prefix=$MINGWPREFIX --host=i586-mingw32msvc --with-qt-bindir=$MINGWPREFIX/host/bin --with-qt-plugindir=$MINGWPREFIX/plugins --with-qt-incdir=$MINGWPREFIX/include --with-boost=$MINGWPREFIX --with-protoc-bindir=$MINGWPREFIX/host/bin --with-comparison-tool="$JAVA_COMPARISON_TOOL" CPPFLAGS=-I$MINGWPREFIX/include LDFLAGS=-L$MINGWPREFIX/lib
#make -j$JOBS
#make check
| true
|
19a60616477cfec155071a3dded725a77dfa8e76
|
Shell
|
clarkngo/bash-scripts
|
/0_bash_exercises/10_PrintFibonacciNumbers1
|
UTF-8
| 520
| 4.0625
| 4
|
[] |
no_license
|
#! /bin/bash
#: Title: printFibonacciNumbers - Print out Fibonacci numbers
#: Synopsis: printFibonacciNumber TOTAL_NUMBER
#: Date: 2019-07-21
#: Version: 1.0
#: Author: Clark Ngo
#: Options: null
printFibonacciNumbers() { #@ DESCRIPTION: print Fibonacci numbers
#@ USAGE: printFibonacciNumbers TOTAL_NUMBER
first=0
second=1
echo 'The Fibonacci numbers: '
for (( i=0; i<$1; i++ ))
do
echo -n "$first "
fn=$((first + second))
first=$second
second=$fn
done
echo ''
}
| true
|
5b19e6ce35b096d9a38f7fb85b35a957fc6425ca
|
Shell
|
lsteck/ibm-garage-iteration-zero
|
/install/install-tiles.sh
|
UTF-8
| 6,521
| 3.640625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
###################################################################################
# Register a collection of IBM Cloud Private Catalog Offerings within an Existing
# Catalog
#
# Author : Matthew Perrins, Sean Sundberg
# email : mjperrin@us.ibm.com, seansund@us.ibm.com
#
###################################################################################
echo "IBM Cloud Private Catalog Offering Creation!"
echo ""
echo "This will create or update a set of offering tiles in an existing catalog."
echo " CATALOG_NAME, API_KEY, GIT_REPO, VERSION, and OFFERINGS can all be provided via environment variables"
echo ""
# CATALOG_NAME input
if [[ -z "${CATALOG_NAME}" ]]; then
read -rp "Please provide the CATALOG_NAME: " CATALOG_NAME
if [[ -n "${CATALOG_NAME}" ]]; then
echo ""
fi
else
echo "Using CATALOG_NAME: ${CATALOG_NAME}"
fi
while [[ -z "${CATALOG_NAME}" ]]; do
read -rp " The CATALOG_NAME cannot be empty. Try again: " CATALOG_NAME
if [[ -n "${CATALOG_NAME}" ]]; then
echo ""
fi
done
# input validation
if [[ -z "${API_KEY}" ]]; then
read -rsp "Please provide your API_KEY: " API_KEY
echo ""
if [[ -n "${API_KEY}" ]]; then
echo ""
fi
fi
while [[ -z "${API_KEY}" ]]; do
read -rsp " The API_KEY cannot be empty. Try again: " API_KEY
echo ""
if [[ -n "${API_KEY}" ]]; then
echo ""
fi
done
if [[ -z "${GIT_REPO}" ]]; then
GIT_REPO="cloud-native-toolkit/ibm-garage-iteration-zero"
fi
echo "Using GIT_REPO: ${GIT_REPO}"
# input validation, Version is provided when the packaged release of this repository is created
if [[ -z "${VERSION}" ]]; then
VERSION="latest"
fi
echo "Using VERSION: ${VERSION}"
if [[ "${VERSION}" == "latest" ]] || [[ -z "${OFFERINGS}" ]]; then
echo "Retrieving version and offerings"
if [[ "${VERSION}" == "latest" ]]; then
RELEASE_URL="https://api.github.com/repos/${GIT_REPO}/releases/latest"
else
RELEASE_URL="https://api.github.com/repos/${GIT_REPO}/releases/tags/${VERSION}"
fi
RELEASE_JSON=$(curl -sL "${RELEASE_URL}" | jq -c '{tag_name, assets}')
if [[ "${VERSION}" == "latest" ]]; then
VERSION=$(echo "${RELEASE_JSON}" | jq -r '.tag_name')
echo " The latest version is $VERSION"
fi
if [[ -z "${OFFERINGS}" ]]; then
OFFERINGS=$(echo "$RELEASE_JSON" | jq -r '.assets | .[] | .name' | grep -E "offering-.*json" | sed -E "s/.json$//g" | paste -sd "," -)
echo " Found offerings: ${OFFERINGS}"
fi
fi
# Get a Bearer Token from IBM Cloud IAM
IAM_AUTH=$(curl -s -k -X POST \
--header "Content-Type: application/x-www-form-urlencoded" \
--header "Accept: application/json" \
--data-urlencode "grant_type=urn:ibm:params:oauth:grant-type:apikey" \
--data-urlencode "apikey=${API_KEY}" \
"https://iam.cloud.ibm.com/identity/token")
# Extract the Bearer Token from IAM response
TOKEN=$(echo "${IAM_AUTH}" | jq '.access_token' | tr -d '"')
BEARER_TOKEN="Bearer ${TOKEN}"
# credentials to post data to cloudant for bulk document upload
ACURL="curl -s -g -H 'Authorization: ${BEARER_TOKEN}' -H 'Content-Type: application/json'"
HOST="https://cm.globalcatalog.cloud.ibm.com/api/v1-beta"
TMP_DIR="./offerings-tmp"
if ! mkdir -p "${TMP_DIR}"; then
echo "Error creating tmp dir: ${TMP_DIR}"
exit 1
fi
# Get List of Catalogs and match to Catalog name
# If the catalog does not exist create it and use that GUID for the Offering Registration
echo "Retrieving Catalogs"
CATALOGS=$(eval "${ACURL}" -X GET "${HOST}/catalogs")
if [ -z "${CATALOGS}" ]; then
echo " Unable to retrieve catalogs. Check your API_KEY."
exit 1
fi
# Lets find the Catalog Label and match it to the one we have passed in
for row in $(echo "${CATALOGS}" | jq -r '.resources[] | @base64'); do
_jq() {
echo "${row}" | base64 --decode | jq -r "${1}"
}
# echo $(_jq '.label')
if [[ "$(_jq '.label')" == "${CATALOG_NAME}" ]]; then
CATALOG_ID=$(_jq '.id')
echo " Found catalog: ${CATALOG_NAME}"
fi
done
# Lets check if we have a Catalog
if [[ -z "${CATALOG_ID}" ]]; then
echo "Catalog does not exist, please create one with the IBM Console->Manage->Catalogs view "
exit 1
fi
eval "${ACURL}" -X GET "${HOST}/catalogs/${CATALOG_ID}/offerings" | jq -r '.resources' > "${TMP_DIR}/existing-offerings.json"
# Define the Offering and relationship to the Catalog
IFS=','; for OFFERING in ${OFFERINGS}; do
echo "Retrieving offering: https://github.com/${GIT_REPO}/releases/download/${VERSION}/${OFFERING}.json"
curl -sL "https://github.com/${GIT_REPO}/releases/download/${VERSION}/${OFFERING}.json" | \
jq --arg CATALOG_ID "${CATALOG_ID}" '.catalog_id = $CATALOG_ID | .kinds[0].versions[0].catalog_id = $CATALOG_ID' \
> "${TMP_DIR}/${OFFERING}.json"
OFFERING_NAME=$(cat "${TMP_DIR}/${OFFERING}.json" | jq -r '.name')
OFFERING_VERSION=$(cat "${TMP_DIR}/${OFFERING}.json" | jq -r '.kinds | .[] | .versions | .[] | .version' | head -1)
echo " Processing offering: ${OFFERING_NAME}"
EXISTING_OFFERING=$(cat "${TMP_DIR}/existing-offerings.json" | jq -r --arg NAME "${OFFERING_NAME}" '.[] | select(.name == $NAME)')
MATCHING_VERSION=$(echo "${EXISTING_OFFERING}" | jq -r --arg VERSION "${OFFERING_VERSION}" '.kinds | .[] | .versions | .[] | select(.version == $VERSION) | .version')
if [[ -n "${MATCHING_VERSION}" ]]; then
echo " Nothing to do. Offering version already registered: ${OFFERING_VERSION}"
elif [[ -n "${EXISTING_OFFERING}" ]]; then
OFFERING_ID=$(echo "${EXISTING_OFFERING}" | jq -r '.id')
NEW_VERSION=$(cat "${TMP_DIR}/${OFFERING}.json" | jq -r '.kinds | .[] | .versions')
echo "${EXISTING_OFFERING}" | jq --argjson NEW_VERSION "${NEW_VERSION}" '.kinds[0].versions += $NEW_VERSION' > "${TMP_DIR}/${OFFERING}-update.json"
echo " Updating existing offering ${OFFERING_ID} in catalog ${CATALOG_ID} with new version: ${OFFERING_VERSION}"
if eval ${ACURL} -L -X PUT "${HOST}/catalogs/${CATALOG_ID}/offerings/${OFFERING_ID}" --data "@${TMP_DIR}/${OFFERING}-update.json" 1> /dev/null 2> /dev/null; then
echo " Offering updated successfully"
else
echo " Error updating offering: ${OFFERING_NAME} ${OFFERING_ID}"
fi
else
echo " Creating new ${OFFERING} offering in catalog ${CATALOG_ID}"
if eval ${ACURL} -L -X POST "${HOST}/catalogs/${CATALOG_ID}/offerings" --data "@${TMP_DIR}/${OFFERING}.json" 1> /dev/null 2> /dev/null; then
echo " Offering created successfully"
else
echo " Error creating offering: ${OFFERING_NAME}"
fi
fi
done
echo "Offering Registration Complete ...!"
rm -rf "${TMP_DIR}"
| true
|
7c9cdf1773b56fbbcbe432aa6a5188f6d0f4d00b
|
Shell
|
Gerling/habib
|
/Fredrik/Bash_script-master/Labb3/simplemath.sh
|
UTF-8
| 362
| 3.578125
| 4
|
[] |
no_license
|
#!/bin/bash
clear
echo "Simple math\n------------------------------------------"
read -p "Type two numbers, separated with space: " number1 number2
if [ $number1 -ge $number2 ]; then
echo "Number one greater than or equal to Number two!"
elif [ $number1 -le $number2 ]; then
echo "Number one is less than or equal to Number two!"
fi
sleep 2
clear
exit 0
| true
|
d975b0b8514699a0f8a1eaac889e4ef6ae49e670
|
Shell
|
SrinivasuluCharupally/expert_programming
|
/module-c-cpp-concepts-1/compare.sh
|
UTF-8
| 699
| 2.875
| 3
|
[] |
no_license
|
cd backup_output_files_run2
echo $PWD
folder="ls $1"
#echo $folder
rm compared.log
for i in ./*
do
echo $i
cmd="diff 10/Output_1024x768_f32.y $i/Output_1024x768_f32.y"
$cmd &>> compared.log
cmd1="diff 10/Output_1024x768_f32.y $i/Output1_1024x768_f32.y"
cmd2="diff 10/Output_1024x768_f32.y $i/Output2_1024x768_f32.y"
cmd3="diff 10/Output_1024x768_f32.y $i/Output3_1024x768_f32.y"
cmd4="diff 10/Output_1024x768_f32.y $i/Output4_1024x768_f32.y"
$cmd1 &>> compared.log
$cmd2 &>> compared.log
$cmd3 &>> compared.log
$cmd4 &>> compared.log
#cmd5="diff ../Output_1024x768_f32.y $i/Output_1024x768_f32.y"
#$cmd5 &>> compared.log
done
| true
|
5c71b755d610cade6ad6a128c5f5c75ca399197c
|
Shell
|
david-barbion/system
|
/bash-wait/wait1.sh
|
UTF-8
| 884
| 3.921875
| 4
|
[] |
no_license
|
#!/bin/bash
# + no external file
# + easy to implement
# - no return code after child execution
# - the infinite loop consumes too much CPU
# Start few childs
pids=""
for t in 3 5 4; do
sleep "$t" &
pids="$pids $!"
done
# Here, pids is a string containing all child pids
# Start an infinite loop, checking for exited shilds
while true; do
# if pids string still contains pids, check them
if [ -n "$pids" ] ; then
# check each pid one by one
for pid in $pids; do
echo "Checking the $pid"
# try to send a signal to the child, if child has exited, remove its pid from pids string
kill -0 "$pid" 2>/dev/null || pids=$(echo $pids | sed "s/\b$pid\s*//")
done
else
# here all process have exited
echo "All your process completed"
break
fi
done
# you go here once all process have exited
| true
|
4d95358c8630704a7457ad6f2d2dda3bda3797f7
|
Shell
|
kiannikzad/CS111-1
|
/Project4/B/smoketest.sh
|
UTF-8
| 404
| 3.53125
| 4
|
[] |
no_license
|
#!/bin/bash
{ echo "START"; sleep 2; echo "STOP"; sleep 2; echo "OFF"; } | ./lab4b --log=log.txt
if [ $? -ne 0 ]
then
echo "Error: program should have exited with 0"
else
echo "good return value!"
fi
for c in START STOP OFF SHUTDOWN
do
grep "$c" log.txt > /dev/null
if [ $? -ne 0 ]
then
echo "failed to log $c command"
else
echo "$c was logged successfully!"
fi
done
rm -f log.txt
| true
|
e141284d24bee5c0110632b51c51711c7f6eac1b
|
Shell
|
awslabs/libfabric-ci-scripts
|
/nccl/common/nccl-common.sh
|
UTF-8
| 26,597
| 3.5
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
set -e
# Generate universally unique identifier
get_uniq_num() {
echo $(uuidgen)
}
# AMIs dict
declare -A AMIS
# Placement groups dict
declare -A PGS
# List of aws regions where tests can be executed
aws_regions=('us-west-2' 'us-east-1')
# Number of GPUs on each test node
TEST_NODE_GPUS=8
NVIDIA_DRIVER_VERSION=450.80.02
NVIDIA_BASE_URL='https://us.download.nvidia.com/tesla'
NVIDIA_DRIVER_PATH="$NVIDIA_BASE_URL/$NVIDIA_DRIVER_VERSION/NVIDIA-Linux-x86_64-$NVIDIA_DRIVER_VERSION.run"
# Components installation prefixes
LIBFABRIC_INSTALL_PREFIX='$HOME/libfabric/install'
AWS_OFI_NCCL_INSTALL_PREFIX='$HOME/aws-ofi-nccl/install'
NCCL_INSTALL_PREFIX='$HOME/nccl'
# LD_LIBRARY_PATH for nccl tests
# TODO: Find alternative way for LD_LIBRARY_PATH construction
# custom_ld_library_path should be updated in case
# of any changes in components installation prefixes
custom_ld_library_path="$AWS_OFI_NCCL_INSTALL_PREFIX/lib/:`
`$NCCL_INSTALL_PREFIX/build/lib:`
`$LIBFABRIC_INSTALL_PREFIX/lib/:`
`/opt/amazon/openmpi/lib64:`
`/opt/amazon/openmpi/lib:\$LD_LIBRARY_PATH"
set_jenkins_variables() {
tmp_script=${tmp_script:-$(mktemp -p $WORKSPACE)}
tmp_out=${tmp_out:-$(mktemp -p $WORKSPACE)}
}
find_latest_ami() {
ami=$(aws ec2 describe-images --owners amazon --filters \
"Name=name,Values=*$1*" \
"Name=state,Values=available" "Name=architecture,Values="x86_64"" \
--query 'reverse(sort_by(Images, &CreationDate)[].ImageId)' \
--output text | awk '{print $1;}')
echo ${ami}
}
set_aws_defaults() {
echo "==> Establishing default parameters for region: $1"
export AWS_DEFAULT_REGION=$1
#Use default vpc_id for each region
export vpc_id_reg=$(aws ec2 describe-vpcs --query "Vpcs[*].VpcId" --filters Name=isDefault,Values=true --output=text)
# The latest Deep Learning AMI (Amazon Linux 2) Image
ami_amzn=$(find_latest_ami "Deep Learning Base AMI (Amazon Linux 2)")
echo "==> Latest Deep Learning Base AMI (Amazon Linux): ${ami_amzn}"
# The latest Deep Learning AMI Ubuntu 16.04 Image
ami_ubuntu_16_04=$(find_latest_ami "Deep Learning AMI (Ubuntu 16.04)")
echo "==> Latest Deep Learning AMI (Ubuntu 16.04): ${ami_ubuntu_16_04}"
# The latest Deep Learning AMI Ubuntu 18.04 Image
ami_ubuntu_18_04=$(find_latest_ami "Deep Learning Base AMI (Ubuntu 18.04)")
echo "==> Latest Deep Learning Base AMI (Ubuntu 18.04): ${ami_ubuntu_18_04}"
}
define_parameters() {
# Instance type for AMI preparation
instance_ami_type='c5n.18xlarge'
# Instance type for running NCCL tests
# p3dn.24xlarge instance type was previously used
# Changed to p4d.24xlarge due to capacity issue
instance_test_type='p4d.24xlarge'
create_instance_retries=10
instance_check_retries=10
ami_check_retries=20
ssh_check_retries=40
#Size in (B) used to filter busbw test result
test_b_size='1073741824'
if [[ "${label}" == 'alinux' ]]; then
ssh_user='ec2-user'
prep_ami=${ami_amzn}
elif [[ "${label}" == 'ubuntu_16.04' ]]; then
ssh_user='ubuntu'
prep_ami=${ami_ubuntu_16_04}
elif [[ "${label}" == 'ubuntu_18.04' ]]; then
ssh_user='ubuntu'
prep_ami=${ami_ubuntu_18_04}
else
echo "Unknown label"
exit 1
fi
}
# Create security group for NCCL testing
create_efa_sg() {
SGId=$(aws ec2 create-security-group --group-name "EFA-enabled-sg-$(get_uniq_num)" \
--tag-specification "ResourceType=security-group,Tags=[{Key=Workspace,Value="${WORKSPACE}"},{Key=Build_Number,Value="${BUILD_NUMBER}"}]" \
--description "EFA-enabled security group" --vpc-id ${vpc_id_reg} --query "GroupId" --output=text)
echo "==> Setting rules for efa sg ${SGId}"
aws ec2 authorize-security-group-egress --group-id ${SGId} --protocol all --source-group ${SGId}
aws ec2 authorize-security-group-ingress --group-id ${SGId} --protocol all --source-group ${SGId}
aws ec2 authorize-security-group-ingress --port 22 --cidr 0.0.0.0/0 --protocol tcp --group-id ${SGId}
}
define_subnets() {
# Get a list of subnets within the VPC relevant to the SG
vpc_id=$(aws ec2 describe-security-groups \
--group-ids ${SGId} \
--query SecurityGroups[0].VpcId --output=text)
if [[ "${AWS_DEFAULT_REGION}" == 'us-west-2' ]]; then
subnet_ids=$(aws ec2 describe-subnets \
--filters "Name=availability-zone,Values=[us-west-2a,us-west-2b,us-west-2c]" \
"Name=vpc-id,Values=$vpc_id" \
--query "Subnets[*].SubnetId" --output=text)
elif [[ "${AWS_DEFAULT_REGION}" == 'us-east-1' ]]; then
subnet_ids=$(aws ec2 describe-subnets \
--filters "Name=availability-zone,Values=[us-east-1a,us-east-1b]" \
"Name=vpc-id,Values=$vpc_id" \
--query "Subnets[*].SubnetId" --output=text)
else
subnet_ids=$(aws ec2 describe-subnets \
--filters "Name=vpc-id,Values=$vpc_id" \
--query "Subnets[*].SubnetId" --output=text)
fi
}
custom_instance_preparation() {
define_parameters
create_efa_sg
define_subnets
}
delete_sg() {
echo "==> Deleting $1"
if [ -z $1 ]; then
echo "SG $1 does not exist"
return 0
fi
aws ec2 delete-security-group --group-id $1
}
create_instance() {
INSTANCE_IDS=''
SERVER_ERROR=(InsufficientInstanceCapacity RequestLimitExceeded ServiceUnavailable Unavailable Unsupported)
creation_attempts_count=0
error=1
network_interface="[{\"DeviceIndex\":0,\"DeleteOnTermination\":true,\"InterfaceType\":\"efa\",\"Groups\":[\"$1\"]"
addl_args=""
echo "==> Creating instances"
while [ ${error} -ne 0 ] && [ ${creation_attempts_count} -lt ${create_instance_retries} ]; do
for subnet in ${subnet_ids[@]}; do
if [ ${ENABLE_PLACEMENT_GROUP} -eq 1 ]; then
addl_args+=" --placement GroupName="${PGS["${subnet}"]}
fi
if [[ -n ${USER_DATA_FILE} && -f ${USER_DATA_FILE} ]]; then
addl_args+=" --user-data file://${USER_DATA_FILE}"
fi
error=1
set +e
INSTANCE_IDS=$(aws ec2 run-instances \
--tag-specification "ResourceType=instance,Tags=[{Key=Workspace,Value="${WORKSPACE}"},{Key=Name,Value=Slave},{Key=Build_Number,Value="${BUILD_NUMBER}"}]" \
--image-id $3 \
--instance-type $4 \
--enable-api-termination \
--key-name ${slave_keypair} \
--network-interface ${network_interface}",\"SubnetId\":\"${subnet}\"}]" \
--count $2 \
--query "Instances[*].InstanceId" \
--output=text ${addl_args} 2>&1)
create_instance_exit_code=$?
echo "${INSTANCE_IDS}"
set -e
# If run-instances is successful break from both the loops, else
# find out whether the error was due to SERVER_ERROR or some other error
if [ $create_instance_exit_code -ne 0 ]; then
# If the error was due to SERVER_ERROR, set error=1 else for
# some other error set error=0
for code in ${SERVER_ERROR[@]}; do
if [[ "${INSTANCE_IDS}" == *${code}* ]]; then
error=1
break
else
error=0
fi
done
else
echo "==> Instances created: ${INSTANCE_IDS}"
break 2
fi
# If run-instances wasn't successful, and it was due to some other
# error, exit and fail the test.
if [ ${error} -eq 0 ]; then
exit ${create_instance_exit_code}
fi
done
sleep 2m
creation_attempts_count=$((creation_attempts_count+1))
done
}
prepare_instance() {
for region in ${aws_regions[@]}; do
# Set the default region
set_aws_defaults ${region}
custom_instance_preparation
echo "==> Launching instance in region ${AWS_DEFAULT_REGION}"
num_instances=$2
# HW CUDA errors: https://docs.nvidia.com/deploy/xid-errors/index.html
CUDA_HW_ERROR_CODES=(48 74)
INSTANCES=()
create_pg
create_instance_attempts=0
INSTANCE_STATE="unavailable"
while [ ${INSTANCE_STATE} != 'running' ] && [ ${create_instance_attempts} -lt ${create_instance_retries} ] ; do
if [ $1 == 'ami_instance' ] ; then
create_instance ${SGId} 1 ${prep_ami} ${instance_ami_type}
else
create_instance ${SGId} ${num_instances} ${AMIS["${AWS_DEFAULT_REGION}"]} ${instance_test_type}
fi
if [ ${create_instance_exit_code} -ne 0 ]; then
echo "==> Changing the region"
delete_pg
# Start over with new region
continue 3
else
INSTANCES=(${INSTANCE_IDS})
for INSTANCE_ID in ${INSTANCES[@]};do
test_instance_status $INSTANCE_ID
if [ ${INSTANCE_STATE} != "running" ]; then
terminate_instances
break
fi
done
if [ $1 != 'ami_instance' ] ; then
for INSTANCE_ID in ${INSTANCES[@]};do
test_ssh $INSTANCE_ID
run_nvidia_checks $INSTANCE_ID
sleep 1m
test_dmesg_errors $INSTANCE_ID
if [[ ! -z ${ERRORS} ]]; then
echo "XID errors: ${ERRORS}"
fi
for code in ${CUDA_HW_ERROR_CODES[@]}; do
if [[ "${ERRORS}" == *${code},* ]]; then
echo "!!!Node $INSTANCE_ID reports CUDA XID ${code} errors terminating the instances!!!"
terminate_instances
INSTANCE_STATE='terminated'
# Wait before creating new instance to avoid the same pool
sleep 2m
break 2
fi
done
done
fi
fi
create_instance_attempts=$((create_instance_attempts+1))
done
if [ ${INSTANCE_STATE} != 'running' ] ; then
echo "All attempts to create instance failed."
exit 1
fi
break
done
}
ami_instance_preparation() {
prepare_instance 'ami_instance' 1
test_ssh ${INSTANCE_IDS}
# Install software and prepare custom AMI
prepare_ami "${PULL_REQUEST_REF}" "${PULL_REQUEST_ID}" "${TARGET_BRANCH}" "${TARGET_REPO}" "${PROVIDER}" "${LIBFABRIC_INSTALL_PREFIX}" "${AWS_OFI_NCCL_INSTALL_PREFIX}" "${NCCL_INSTALL_PREFIX}"
# Upload AMI to marketplace
create_ami ${INSTANCE_IDS}
# Copy ami to different region, required for region switch
copy_ami ${CUSTOM_AMI} ${AWS_DEFAULT_REGION}
}
get_instance_ip() {
instance_ip=$(aws ec2 describe-instances --instance-ids $1 \
--query "Reservations[*].Instances[*].PrivateIpAddress" \
--output=text)
echo ${instance_ip}
}
get_public_dns() {
public_dns=$(aws ec2 describe-instances --instance-ids $1 \
--query 'Reservations[0].Instances[0].PublicDnsName' --output text)
echo ${public_dns}
}
test_ssh() {
PublicDNS=$(get_public_dns $1)
host_ready=1
host_poll_count=0
set +e
while [ $host_ready -ne 0 ] && [ $host_poll_count -lt ${ssh_check_retries} ] ; do
echo "Waiting for host instance to become ready"
sleep 5
ssh -T -o ConnectTimeout=30 -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o BatchMode=yes \
-i "~/${slave_keypair}" ${ssh_user}@${PublicDNS} hostname
if [ $? -eq 0 ]; then
host_ready=0
fi
host_poll_count=$((host_poll_count+1))
done
echo "Host instance ssh exited with status ${host_ready}"
set -e
}
test_dmesg_errors() {
ERRORS=''
PublicDNS=$(get_public_dns $1)
ERRORS=$(ssh -T -o ConnectTimeout=30 -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o BatchMode=yes \
-i "~/${slave_keypair}" ${ssh_user}@${PublicDNS} dmesg | grep -e "Xid" || true)
echo "ERRORS: ${ERRORS}"
}
terminate_instances() {
echo "==> Terminating instances ${INSTANCE_IDS[@]}"
if [[ ! -z ${INSTANCE_IDS[@]} ]]; then
aws ec2 terminate-instances --instance-ids ${INSTANCE_IDS[@]}
aws ec2 wait instance-terminated --instance-ids ${INSTANCE_IDS[@]}
fi
}
# Custom AMI preparation
prepare_ami() {
echo "==> Starting AMI preparation..."
cat <<-EOF > ${tmp_script}
export PULL_REQUEST_REF="$1"
export PULL_REQUEST_ID="$2"
export TARGET_BRANCH="$3"
export TARGET_REPO="$4"
export PROVIDER="$5"
export LIBFABRIC_INSTALL_PREFIX="$6"
export AWS_OFI_NCCL_INSTALL_PREFIX="$7"
export NCCL_INSTALL_PREFIX="$8"
EOF
cat $WORKSPACE/libfabric-ci-scripts/nccl/common/prep_ami.sh >> ${tmp_script}
ssh -T -o ConnectTimeout=30 -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o BatchMode=yes \
-i "~/${slave_keypair}" ${ssh_user}@${PublicDNS} "bash -s" < ${tmp_script}
}
test_ami_status() {
ami_status="unavailable"
check_attempts=0
while [ ${ami_status} != "available" ] && [ ${check_attempts} -lt ${ami_check_retries} ] ; do
sleep 1m
ami_status=$(aws ec2 describe-images --image-ids $1 --region $2 \
--query "Images[*].State" --output text)
check_attempts=$((check_attempts+1))
echo "$1 status: ${ami_status}"
echo "AMI status check attempts: ${check_attempts}"
done
if [ ${ami_status} != "available" ]; then
echo "There is a problem with ami $1 it still has ${ami_status} status after ${ami_check_retries} minutes"
exit 1
fi
}
# Copy custom AMI to different region
copy_ami() {
if [ $2 == 'us-east-1' ]; then
destination_region='us-west-2'
else
destination_region='us-east-1'
fi
COPIED_AMI=$(aws ec2 copy-image --source-image-id $1 --source-region $2 \
--region ${destination_region} --name "nccl-enabled-ami-$(get_uniq_num)" \
--output=text --query 'ImageId')
echo "==> Wait for image ${COPIED_AMI} to become available"
test_ami_status ${COPIED_AMI} ${destination_region}
AMIS["${destination_region}"]=${COPIED_AMI}
}
# Create custom AMI
create_ami() {
echo "==> Create custom AMI"
CUSTOM_AMI=$(aws ec2 create-image --instance-id $1 --name "nccl-enabled-ami-$(get_uniq_num)" \
--description "${WORKSPACE}_${BUILD_NUMBER}" --output=text --query 'ImageId')
echo "==> Wait for image ${CUSTOM_AMI} to become available"
test_ami_status ${CUSTOM_AMI} ${AWS_DEFAULT_REGION}
AMIS["${AWS_DEFAULT_REGION}"]=${CUSTOM_AMI}
}
# Deregister custom AMIs
deregister_ami() {
if [[ -z ${AMIS[@]} ]]; then
return 0
fi
echo "==> Deregistering AMIs"
for region in ${!AMIS[@]}; do
snapshot=$(aws ec2 describe-images --image-ids ${AMIS[${region}]} --region ${region} --query "Images[*].BlockDeviceMappings[*].Ebs.SnapshotId" --output text)
aws ec2 deregister-image --image-id ${AMIS[${region}]} --region ${region}
echo "==> Deleting snapshot"
aws ec2 delete-snapshot --snapshot-id ${snapshot} --region ${region}
done
}
test_instance_status() {
echo "==> Waiting for instance $1 to become available"
instance_status="unavailable"
check_attempts=0
while [[ ${instance_status} != "running" && ${instance_status} != "terminated" && ${instance_status} != "shutting-down" && ${check_attempts} -lt ${instance_check_retries} ]]; do
sleep 1m
instance_status=$(aws ec2 describe-instances --instance-ids $1 --query "Reservations[*].Instances[*].State.Name" --output text)
check_attempts=$((check_attempts+1))
echo "$1 status: ${instance_status}"
done
if [ ${instance_status} != "running" ] && [ ${instance_status} != "terminated" ] && [ ${instance_status} != "shutting-down" ]; then
echo "There is a problem with instance $1 it still has ${instance_status} status after ${check_attempts} minutes, terminating"
terminate_instances
instance_status='terminated'
fi
INSTANCE_STATE=${instance_status}
}
# Create placement groups for cluster to run NCCL test
create_pg() {
if [ ${ENABLE_PLACEMENT_GROUP} -eq 0 ]; then
return 0
fi
echo "==> Creating placement group"
# We should have placement group for each subnet
# Once we tr to create instance in particular subnet/AZ
# PG is tied to it and cannot be used in different AZs
for subnet in ${subnet_ids[@]}; do
PLACEMENT_GROUP="placement-group-$(get_uniq_num)"
placement_group_id=$(aws ec2 create-placement-group \
--group-name ${PLACEMENT_GROUP} \
--strategy cluster \
--tag-specification "ResourceType=placement-group,Tags=[{Key=Workspace,Value="${WORKSPACE}"},{Key=Build_Number,Value="${BUILD_NUMBER}"}]" \
--output=text --query 'PlacementGroup.GroupId')
if [ $? -eq 0 ]; then
echo "Placement group: ${PLACEMENT_GROUP} created."
fi
PGS["${subnet}"]=${PLACEMENT_GROUP}
done
}
delete_pg() {
echo "==> Removing placement groups"
for placement_group in ${PGS[@]}; do
if [ -z ${placement_group} ]; then
echo "Placement group: ${placement_group} does not exist."
return 0
fi
echo "==> Removing placement group: ${placement_group}"
aws ec2 delete-placement-group --group-name ${placement_group}
done
# clearing the PGs dict
for key in ${!PGS[@]}; do
unset PGS["${key}"]
done
}
generate_key() {
cat <<-"EOF" > ${tmp_script}
#!/bin/bash
echo "==> Generating key"
ssh-keygen -f ~/.ssh/id_rsa -N "" > /dev/null 2>&1
chmod 600 ~/.ssh/id_rsa
EOF
}
install_nvidia_driver() {
# Install nvidia driver if it is missing
cat <<-EOF > ${tmp_script}
#!/bin/bash
NVIDIA_DRIVER_PATH="${NVIDIA_DRIVER_PATH}"
EOF
cat <<-"EOF" >> ${tmp_script}
echo "==> Checking if nvidia module is loaded"
/sbin/lsmod | grep nvidia > /dev/null 2>&1
if [ $? -eq 0 ]; then
echo "==> nvidia module is loaded"
exit 0
fi
echo "==> nvidia module is missing, installing..."
cd $HOME
curl -L -o ./nvidia_driver.run "${NVIDIA_DRIVER_PATH}"
sudo sh ./nvidia_driver.run --no-drm --disable-nouveau --dkms --silent --no-cc-version-check --install-libglvnd
echo "==> Verify that nvidia driver is functional after installation"
set -e
nvidia-smi -q | head
echo "==> Check nvidia driver version after installation"
cat /proc/driver/nvidia/version
EOF
ssh -T -o ConnectTimeout=30 -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o BatchMode=yes \
-i "~/${slave_keypair}" ${ssh_user}@$1 "bash -s" < ${tmp_script}
}
run_nvidia_checks() {
cat <<-EOF > ${tmp_script}
#!/bin/bash
TEST_NODE_GPUS="${TEST_NODE_GPUS}"
EOF
cat <<-"EOF" >> ${tmp_script}
#!/bin/bash
echo "==> Running nvidia GPUs count check"
gpus_count=$(sudo lspci | grep "3D controller: NVIDIA Corporation Device" | wc -l)
if [[ "${gpus_count}" != "${TEST_NODE_GPUS}" ]]; then
echo "==> Nvidia GPUs is missing, on board: ${gpus_count}, should be ${TEST_NODE_GPUS}"
exit 1
fi
echo "==> Running basic nvidia devices check"
nvidia-smi
echo "==> Running bandwidthTest for each NVIDIA device"
sudo lspci | grep NVIDIA | cut -d" " -f 1 > devices.txt
readarray devices_arr < devices.txt
cd /usr/local/cuda/samples/1_Utilities/bandwidthTest
sudo make
for device in "${devices_arr[@]}"; do
./bandwidthTest --device ${device}
done
EOF
PDNS=$(get_public_dns $1)
ssh -T -o ConnectTimeout=30 -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o BatchMode=yes \
-i "~/${slave_keypair}" ${ssh_user}@${PDNS} "bash -s" < ${tmp_script}
}
generate_unit_tests_script_single_node() {
cat <<-EOF > ${tmp_script}
#!/bin/bash
PROVIDER="${PROVIDER}"
custom_ld_library_path="${custom_ld_library_path}"
EOF
cat <<-"EOF" >> ${tmp_script}
while true; do
echo "==> Executing Unit Tests for provider: "$PROVIDER""
echo "==> Running nccl_connection unit test"
set -xe
timeout 5m /opt/amazon/openmpi/bin/mpirun -n 2 \
-x FI_PROVIDER="$PROVIDER" -x FI_EFA_ENABLE_SHM_TRANSFER=0 \
-x LD_LIBRARY_PATH="${custom_ld_library_path}" \
-x RDMAV_FORK_SAFE=1 --mca pml ^cm \
--mca btl tcp,self --mca btl_tcp_if_exclude lo,docker0 \
--bind-to none ~/aws-ofi-nccl/install/bin/nccl_connection
echo "==> Running ring unit test"
timeout 5m /opt/amazon/openmpi/bin/mpirun -n 3 \
-x FI_PROVIDER="$PROVIDER" -x FI_EFA_ENABLE_SHM_TRANSFER=0 \
-x LD_LIBRARY_PATH="${custom_ld_library_path}" \
-x RDMAV_FORK_SAFE=1 --mca pml ^cm \
--mca btl tcp,self --mca btl_tcp_if_exclude lo,docker0 \
--bind-to none ~/aws-ofi-nccl/install/bin/ring
echo "==> Running nccl_message_transfer unit test"
timeout 5m /opt/amazon/openmpi/bin/mpirun -n 2 \
-x FI_PROVIDER="$PROVIDER" -x FI_EFA_ENABLE_SHM_TRANSFER=0 \
-x LD_LIBRARY_PATH="${custom_ld_library_path}" \
-x RDMAV_FORK_SAFE=1 --mca pml ^cm \
--mca btl tcp,self --mca btl_tcp_if_exclude lo,docker0 \
--bind-to none ~/aws-ofi-nccl/install/bin/nccl_message_transfer
set +x
break
done
EOF
}
generate_unit_tests_script_multi_node() {
cat <<-EOF > ${tmp_script}
#!/bin/bash
PROVIDER="${PROVIDER}"
custom_ld_library_path="${custom_ld_library_path}"
EOF
cat <<-"EOF" >> ${tmp_script}
while true; do
echo "==> Executing Unit Tests for provider: "$PROVIDER""
echo "==> Running nccl_connection unit test"
set -xe
timeout 5m /opt/amazon/openmpi/bin/mpirun -n 2 -N 1 \
-x FI_PROVIDER="$PROVIDER" -x FI_EFA_ENABLE_SHM_TRANSFER=0 \
-x LD_LIBRARY_PATH="${custom_ld_library_path}" \
-x RDMAV_FORK_SAFE=1 --mca pml ^cm \
--mca btl tcp,self --mca btl_tcp_if_exclude lo,docker0 \
--bind-to none --tag-output --hostfile hosts ~/aws-ofi-nccl/install/bin/nccl_connection
echo "==> Running ring unit test"
timeout 5m /opt/amazon/openmpi/bin/mpirun -n 3 -N 1 \
-x FI_PROVIDER="$PROVIDER" -x FI_EFA_ENABLE_SHM_TRANSFER=0 \
-x LD_LIBRARY_PATH="${custom_ld_library_path}" \
-x RDMAV_FORK_SAFE=1 --mca pml ^cm \
--mca btl tcp,self --mca btl_tcp_if_exclude lo,docker0 \
--bind-to none --tag-output --hostfile hosts ~/aws-ofi-nccl/install/bin/ring
echo "==> Running nccl_message_transfer unit test"
timeout 5m /opt/amazon/openmpi/bin/mpirun -n 2 -N 1 \
-x FI_PROVIDER="$PROVIDER" -x FI_EFA_ENABLE_SHM_TRANSFER=0 \
-x LD_LIBRARY_PATH="${custom_ld_library_path}" \
-x RDMAV_FORK_SAFE=1 --mca pml ^cm \
--mca btl tcp,self --mca btl_tcp_if_exclude lo,docker0 \
--bind-to none --tag-output --hostfile hosts ~/aws-ofi-nccl/install/bin/nccl_message_transfer
set +x
break
done
EOF
}
generate_nccl_test_script() {
cat <<-EOF > ${tmp_script}
#!/bin/bash
PROVIDER="${PROVIDER}"
NUM_GPUS=$1
custom_ld_library_path="${custom_ld_library_path}"
EOF
cat <<-"EOF" >> ${tmp_script}
echo "Executing NCCL test.."
echo "==>The provider for test is: "$PROVIDER""
echo "==>The number of GPUs is: $NUM_GPUS"
set -xe
timeout 30m /opt/amazon/openmpi/bin/mpirun \
-x FI_PROVIDER="$PROVIDER" \
-x NCCL_ALGO=ring --hostfile $HOME/hosts \
-x FI_EFA_ENABLE_SHM_TRANSFER=0 \
-x LD_LIBRARY_PATH="${custom_ld_library_path}" \
-x FI_EFA_TX_MIN_CREDITS=64 \
-x RDMAV_FORK_SAFE=1 \
-x NCCL_DEBUG=INFO \
-n $NUM_GPUS -N 8 \
--mca btl tcp,self --mca btl_tcp_if_exclude lo,docker0 --mca pml ^cm \
--bind-to none $HOME/nccl-tests/build/all_reduce_perf -b 8 -e 1G -f 2 -g 1 -c 1 -n 100
set +x
EOF
}
# Check if EFA provider has been used during test execution
check_allperf_efa_usage() {
grep "Selected Provider is efa" $1 > /dev/null
if [ $? -ne 0 ];then
echo "EFA PROVIDER has not been used during the test"
exit 1
fi
}
on_exit() {
# Cleanup instances, SGs, PGs after test
for reg in ${aws_regions[@]}; do
INSTANCE_IDS=($(aws --region ${reg} ec2 describe-instances --filters "[{\"Name\":\"instance-state-name\",\"Values\":[\"pending\",\"running\",\"stopped\"]},{\"Name\":\"tag:Workspace\",\"Values\":[\"${WORKSPACE}\"]},{\"Name\":\"tag:Build_Number\",\"Values\":[\"${BUILD_NUMBER}\"]}]" --query "Reservations[*].Instances[*].InstanceId" --output text))
INSTANCE_IDS_SIZE=${#INSTANCE_IDS[@]}
SG_IDS=($(aws --region ${reg} ec2 describe-security-groups --filters "[{\"Name\":\"tag:Workspace\",\"Values\":[\"${WORKSPACE}\"]},{\"Name\":\"tag:Build_Number\",\"Values\":[\"${BUILD_NUMBER}\"]}]" --query "SecurityGroups[*].{Name:GroupId}" --output text))
SG_IDS_SIZE=${#SG_IDS[@]}
if [ ${INSTANCE_IDS_SIZE} -ne 0 ]; then
aws --region ${reg} ec2 terminate-instances --instance-ids ${INSTANCE_IDS[@]}
aws --region ${reg} ec2 wait instance-terminated --instance-ids ${INSTANCE_IDS[@]}
fi
if [ ${SG_IDS_SIZE} -ne 0 ]; then
for sg in ${SG_IDS[@]}; do
aws --region ${reg} ec2 delete-security-group --group-id ${sg}
done
fi
done
deregister_ami
delete_pg
}
| true
|
d1c9cf89efdad7a752fa73d6764b1c66c8264f09
|
Shell
|
korosuke613/dotfiles
|
/mac/zsh/.zshrc.auto_renice_fast
|
UTF-8
| 671
| 3.421875
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env zsh
NICE_VALUE="-15"
APPS=(
"JapaneseIM-RomajiTyping.app" # mac標準日本語入力IME
"TextInputMenuAgent.app" # mac標準日本語入力IME
"TextInputSwitcher.app" # mac標準日本語入力IME
"EmojiFunctionRowIM.app" # mac標準日本語入力IME
)
for app in "${APPS[@]}"
do
pids=($(pgrep -f "${app}"))
if [ $? -ne 1 ]; then # プロセスが存在するか確認
now_nice=$(ps -p "${pids[*]}" -o nice | tail -1)
if [ "${now_nice}" -ne $NICE_VALUE ]; then
# appのnice値が更新されてなかったらreniceする
echo -e "\e[33mRun renice ${app} \e[m"
sudo renice $NICE_VALUE "${pids[@]}"
fi
fi
done
| true
|
6293ec7981873ed73e22e45ddfc2bea6386d63f3
|
Shell
|
cristian0497/holberton-system_engineering-devops
|
/0x04-loops_conditions_and_parsing/5-4_bad_luck_8_is_your_chance
|
UTF-8
| 292
| 3.515625
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# print Holberton school 10 times, if is 4 print bad luck, if 8 good look
x=1
while [ $x -lt 11 ]
do
if [ $x -eq 4 ]
then
echo "bad luck"
elif [ $x -eq 8 ]
then
echo "good luck"
else
echo "Holberton School"
fi
((x++))
done
| true
|
1d30119f1c734ee2da010c720dc9f06a240593bc
|
Shell
|
jackdoe/zr-public
|
/rfc/rfc.sh
|
UTF-8
| 999
| 3.484375
| 3
|
[] |
no_license
|
root=$(dirname $0)
mkdir -p $root/data
if [ ! -f $root/data/RFC.tar.gz ]; then
curl -sL https://www.rfc-editor.org/in-notes/tar/RFC-all.tar.gz > $root/data/RFC.tar.gz
fi
pushd $root/data
tar --wildcards '*.txt' -xzvf RFC.tar.gz
popd
echo > data/pagerank.in
# simply make
for rfc in $(ls -1 $root/data/ | grep txt); do
echo $rfc
for row in $(cat $root/data/$rfc | sed -r 's/RFC ([0-9]+)/_ZR_rfc\1.txt_ZR_/g' | tr "_ZR_" "\n" | rg -w 'rfc\d+.txt' | grep -v $rfc); do
echo $rfc $row >> $root/data/pagerank.in
done
done
cat data/pagerank.in | pagerank -int > data/pagerank
for rfc in $(ls -1 $root/data/ | grep txt); do
score=$(cat $root/data/pagerank | grep -w $rfc | cut -f 1 -d ' ' | head -1)
popularity=${score:-0}
echo $rfc score: $popularity
zr-stdin -k rfc -root $root/../public/ -title $rfc -id $rfc -popularity $popularity -file $root/data/$rfc
done
zr-reindex -k rfc -root $root/../public
tar -czvf $root/../dist/rfc.tar.gz $root/../public/rfc
| true
|
573111b457e07b38620e8e158a4d7d8c3e068be2
|
Shell
|
JessieAMorris/wd
|
/wait
|
UTF-8
| 155
| 3.046875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#! /bin/bash
r=$[ ( $RANDOM % 100 ) ]
echo got $r
while [ $r -ge 20 ]
do
echo 'waiting 60 sec'
sleep 60s
r=$[ ( $RANDOM % 100 ) ]
echo got $r
done
| true
|
d0b422dd4c6145585d8cb5e0efcb5716daf88820
|
Shell
|
swirepe/personalscripts
|
/sagify
|
UTF-8
| 1,569
| 3.96875
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# sudo apt-get update ; sudo apt-fast install -y
# if apt-fast isn't installed, install it
if [[ -z "$(which apt-fast)" ]]
then
echo "[sagify] apt-fast not installed: installing."
sudo add-apt-repository ppa:apt-fast/stable
sagi apt-fast
fi
CURRDIR=$(pwd)
function update_machines {
if [[ -z "$PERS_DIR" ]]
then
PERS_DIR="$HOME/pers"
fi
if [[ -d $PERS_DIR/machines ]]
then
echo "Updating machines repository."
cd $PERS_DIR/machines
git pull --no-edit origin master
else
echo "No machines repository found. Cloning."
git clone git@bitbucket.org:swirepe/machines.git $PERS_DIR/machines
fi
SAGIRECORD_DIR="$PERS_DIR/machines/apt/$(hostname)"
if [ ! -d $SAGIRECORD_DIR ]
then
echo "[sagi] $SAGIRECORD_DIR does not exist: creating."
mkdir -p $SAGIRECORD_DIR
fi
}
for package in $@
do
if [[ "$package" == "-"* ]] || ! dpkg -s $package &> /dev/null
then
packages+=($package)
else
echo "Warning: Package already installed: $package" >/dev/stderr
fi
done
if [ ${#packages[@]} = 0 ]
then
echo "Nothing to install. Exiting."
exit 0
else
echo "Installing packages: ${#packages[@]}"
fi
sudo apt-fast update
sudo apt-fast install -y ${packages[@]} && (
update_machines &&
cd "$SAGIRECORD_DIR" &&
dpkg --get-selections > aptlist.txt &&
git add aptlist.txt &&
git commit -m "sagify $(hostname) $*" &&
git push origin master
)
cd "$CURRDIR"
| true
|
a5ad6ff0b55f593f5da7b04d19dd6fe176016272
|
Shell
|
barryk/arch-osx
|
/libxml2/PKGBUILD
|
UTF-8
| 995
| 2.71875
| 3
|
[] |
no_license
|
# PKGBUILD autocreated by ABStoOSX 0.1
# ArchLinux Maintainer: Jan de Groot <jgc@archlinux.org>
# Contributor: John Proctor <jproctor@prium.net>
pkgname=libxml2
pkgver=2.7.2
pkgrel=1
pkgdesc="XML parsing library, version 2"
arch=('macx86')
license=('custom')
depends=('zlib>=1.2.3.3' 'libiconv')
optdepends=('readline>=5.2-8: xmlinit xmlcatalog'
'ncurses>=5.6-7'
'gettext'
'python: Many tools')
makedepends=('python' 'gettext' 'ncurses' 'readline')
options=('!libtool')
url="http://www.xmlsoft.org/"
source=(ftp://ftp.xmlsoft.org/${pkgname}/${pkgname}-${pkgver}.tar.gz)
build() {
cd "${srcdir}/${pkgname}-${pkgver}"
# No large64 patch needed. Mac OS X standard functions (fopen etc) are 64bit
./configure --prefix=/opt/arch --with-threads --with-history || return 1
make || return 1
make DESTDIR="${pkgdir}" install || return 1
install -m755 -d "${pkgdir}/opt/arch/share/licenses/${pkgname}"
install -m644 COPYING "${pkgdir}/opt/arch/share/licenses/${pkgname}/" || return 1
}
| true
|
03878097f7fc9cdbcbfda5de4ac8a661e9bb112b
|
Shell
|
thaidn/tink
|
/examples/cc/digital_signatures/digital_signatures_cli_test.sh
|
UTF-8
| 4,226
| 3.765625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#############################################################################
#### Tests for digital_signatures_cli binary.
SIGNATURE_CLI="$1"
PRIVATE_KEYSET_FILE="$TEST_TMPDIR/private_keyset.bin"
PUBLIC_KEYSET_FILE="$TEST_TMPDIR/public_keyset.bin"
MESSAGE_FILE="$TEST_TMPDIR/message.txt"
SIGNATURE_FILE="$TEST_TMPDIR/signature.bin"
RESULT_FILE="$TEST_TMPDIR/result.txt"
OTHER_PRIVATE_KEYSET_FILE="$TEST_TMPDIR/other_private_keyset.bin"
OTHER_PUBLIC_KEYSET_FILE="$TEST_TMPDIR/other_public_keyset.bin"
OTHER_MESSAGE_FILE="$TEST_TMPDIR/other_message.txt"
echo "This is a message." > $MESSAGE_FILE
echo "This is a different message." > $OTHER_MESSAGE_FILE
#############################################################################
#### Helper function that checks if values are equal.
assert_equal() {
if [ "$1" == "$2" ]; then
echo "+++ Success: values are equal."
else
echo "--- Failure: values are different. Expected: [$1], actual: [$2]."
exit 1
fi
}
#############################################################################
#### All good, everything should work.
test_name="all_good"
echo "+++ Starting test $test_name..."
#### Generate a private key and get a public key.
$SIGNATURE_CLI gen-private-key $PRIVATE_KEYSET_FILE || exit 1
$SIGNATURE_CLI get-public-key $PRIVATE_KEYSET_FILE $PUBLIC_KEYSET_FILE || exit 1
#### Sign the message.
$SIGNATURE_CLI sign $PRIVATE_KEYSET_FILE $MESSAGE_FILE $SIGNATURE_FILE || exit 1
#### Verify the signature.
$SIGNATURE_CLI verify $PUBLIC_KEYSET_FILE $MESSAGE_FILE $SIGNATURE_FILE $RESULT_FILE || exit 1
#### Check that the signature is valid.
RESULT=$(<$RESULT_FILE)
assert_equal "valid" "$RESULT"
#############################################################################
#### Bad private key when getting the public key.
test_name="get_public_key_with_bad_private_key"
echo "+++ Starting test $test_name..."
echo "abcd" >> $PRIVATE_KEYSET_FILE
$SIGNATURE_CLI get-public-key $PRIVATE_KEYSET_FILE $PUBLIC_KEYSET_FILE
EXIT_VALUE="$?"
assert_equal 1 "$EXIT_VALUE"
#############################################################################
#### Different public key when verifying a signature.
test_name="verify_with_different_public_key"
echo "+++ Starting test $test_name..."
$SIGNATURE_CLI gen-private-key $PRIVATE_KEYSET_FILE || exit 1
$SIGNATURE_CLI gen-private-key $OTHER_PRIVATE_KEYSET_FILE || exit 1
$SIGNATURE_CLI get-public-key $OTHER_PRIVATE_KEYSET_FILE $OTHER_PUBLIC_KEYSET_FILE || exit 1
$SIGNATURE_CLI sign $PRIVATE_KEYSET_FILE $MESSAGE_FILE $SIGNATURE_FILE || exit 1
$SIGNATURE_CLI verify $OTHER_PUBLIC_KEYSET_FILE $MESSAGE_FILE $SIGNATURE_FILE $RESULT_FILE || exit 1
RESULT=$(<$RESULT_FILE)
assert_equal "invalid" "$RESULT"
#############################################################################
#### Different message when verifying a signature.
test_name="verify_with_different_message"
echo "+++ Starting test $test_name..."
$SIGNATURE_CLI gen-private-key $PRIVATE_KEYSET_FILE || exit 1
$SIGNATURE_CLI get-public-key $PRIVATE_KEYSET_FILE $PUBLIC_KEYSET_FILE || exit 1
$SIGNATURE_CLI sign $PRIVATE_KEYSET_FILE $MESSAGE_FILE $SIGNATURE_FILE || exit 1
$SIGNATURE_CLI verify $PUBLIC_KEYSET_FILE $OTHER_MESSAGE_FILE $SIGNATURE_FILE $RESULT_FILE || exit 1
RESULT=$(<$RESULT_FILE)
assert_equal "invalid" "$RESULT"
#############################################################################
#### Sign with wrong key.
test_name="sign_with_wrong_key"
echo "+++ Starting test $test_name..."
$SIGNATURE_CLI gen-private-key $PRIVATE_KEYSET_FILE || exit 1
$SIGNATURE_CLI get-public-key $PRIVATE_KEYSET_FILE $PUBLIC_KEYSET_FILE || exit 1
$SIGNATURE_CLI sign $PUBLIC_KEYSET_FILE $MESSAGE_FILE $SIGNATURE_FILE
EXIT_VALUE="$?"
assert_equal 1 "$EXIT_VALUE"
#############################################################################
#### Verify with wrong key.
test_name="verify_with_wrong_key"
echo "+++ Starting test $test_name..."
$SIGNATURE_CLI gen-private-key $PRIVATE_KEYSET_FILE || exit 1
$SIGNATURE_CLI sign $PRIVATE_KEYSET_FILE $MESSAGE_FILE $SIGNATURE_FILE || exit 1
$SIGNATURE_CLI verify $PRIVATE_KEYSET_FILE $MESSAGE_FILE $SIGNATURE_FILE $RESULT_FILE
EXIT_VALUE="$?"
assert_equal 1 "$EXIT_VALUE"
| true
|
17609b3ecf4deca40b8e5e7c60493847f996bf76
|
Shell
|
slackpanos/SlackOnly-SlackBuilds
|
/games/xarchon/xarchon.SlackBuild
|
UTF-8
| 4,522
| 3.375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
# Slackware build script for xarchon
# Written by B. Watson (yalhcru@gmail.com)
# Licensed under the WTFPL. See http://www.wtfpl.net/txt/copying/ for details.
# note: xarchon plays Archon and Archon II: Adept. I can't tell if the
# Adept game is complete or correct, because I never could understand
# how to play it, even as a video-game-obsessed kid in the 1980s.
# further note, written later: no, Adept is nowhere near complete or
# playable. So I'm patching the game to remove the menu option for it.
PRGNAM=xarchon
VERSION=${VERSION:-0.60}
BUILD=${BUILD:-2}
TAG=${TAG:-_SBo}
if [ -z "$ARCH" ]; then
case "$( uname -m )" in
i?86) ARCH=i586 ;;
arm*) ARCH=arm ;;
*) ARCH=$( uname -m ) ;;
esac
fi
CWD=$(pwd)
TMP=${TMP:-/tmp/SBo}
PKG=$TMP/package-$PRGNAM
OUTPUT=${OUTPUT:-/tmp}
if [ "$ARCH" = "i586" ]; then
SLKCFLAGS="-O2 -march=i586 -mtune=i686"
LIBDIRSUFFIX=""
elif [ "$ARCH" = "i686" ]; then
SLKCFLAGS="-O2 -march=i686 -mtune=i686"
LIBDIRSUFFIX=""
elif [ "$ARCH" = "x86_64" ]; then
SLKCFLAGS="-O2 -fPIC"
LIBDIRSUFFIX="64"
else
SLKCFLAGS="-O2"
LIBDIRSUFFIX=""
fi
set -e
rm -rf $PKG
mkdir -p $TMP $PKG $OUTPUT
cd $TMP
rm -rf $PRGNAM-$VERSION
tar xvf $CWD/$PRGNAM-$VERSION.tar.gz
cd $PRGNAM-$VERSION
chown -R root:root .
find -L . \
\( -perm 777 -o -perm 775 -o -perm 750 -o -perm 711 -o -perm 555 \
-o -perm 511 \) -exec chmod 755 {} \; -o \
\( -perm 666 -o -perm 664 -o -perm 640 -o -perm 600 -o -perm 444 \
-o -perm 440 -o -perm 400 \) -exec chmod 644 {} \;
# disable Adept menu option, since it's non-playable.
patch -p1 < $CWD/patches/no_adept.diff
# hammer old C++ code into building on gcc-5.3, plus autoconf fixes
# to get qt3 detected & linked properly.
patch -p1 < $CWD/patches/compilefix.diff
# theoretically xarchon can be built with gtk+-1.x, but it requires an
# ancient gtk1-compatible version of glade... plus the gtk1 stuff won't
# compile under gcc-5.3. went through the trouble of patching the qt UI,
# so we don't bother with gtk1.
patch -p1 < $CWD/patches/remove_gtk.diff
# stop the GUI from getting stuck in Configure Players, when no joysticks
# are plugged in. Without the patch, it continually complains "Joystick
# not available" and won't allow saving the settings, even if joystick
# control isn't selected. With the patch, it still complains, but only
# once (per human player), then it saves the settings and exits Configure
# Players correctly.
patch -p1 < $CWD/patches/joystick.diff
# remove "Select Theme" from the menu. it's unimplemented, plus there
# aren't any themes to choose (just the default one).
patch -p1 < $CWD/patches/remove_theme_option.diff
# add warning to the man page about the GUI section being outdated,
# remove mention of selecting themes.
patch -p1 < $CWD/patches/manpage.diff
# configure script is from 2002, no thank you.
rm -f configure
autoreconf -if
# don't rely on /etc/profile.d/qt.sh being executable.
export QTDIR=/opt/kde3/lib${LIBDIRSUFFIX}/qt3
export PATH=$QTDIR/bin:$PATH
CFLAGS="$SLKCFLAGS" \
CXXFLAGS="$SLKCFLAGS -fpermissive" \
./configure \
--with-default-qt \
--prefix=/usr \
--bindir=/usr/games \
--libdir=/usr/lib${LIBDIRSUFFIX} \
--datadir=/usr/share/games/ \
--sysconfdir=/etc \
--localstatedir=/var \
--mandir=/usr/man \
--docdir=/usr/doc/$PRGNAM-$VERSION \
--build=$ARCH-slackware-linux
make
make install-strip DESTDIR=$PKG
gzip -9 $PKG/usr/man/man6/$PRGNAM.6
# default to mouse control, not joystick. note that keyboard control is
# always active, regardless of this setting. also by default player 2
# is the AI. this allows people to quickly fire up a single-player game
# (the most common case) without going through the config menu. this
# just changes the default; users can change it as desired.
cat $CWD/$PRGNAM.default > $PKG/usr/share/games/$PRGNAM/$PRGNAM.default
# .desktop written for this build.
mkdir -p $PKG/usr/share/pixmaps $PKG/usr/share/applications
ln -s ../games/$PRGNAM/icon.xpm $PKG/usr/share/pixmaps/$PRGNAM.xpm
cat $CWD/$PRGNAM.desktop > $PKG/usr/share/applications/$PRGNAM.desktop
mkdir -p $PKG/usr/doc/$PRGNAM-$VERSION
cp -a AUTHORS COPYING ChangeLog NEWS README $PKG/usr/doc/$PRGNAM-$VERSION
cat $CWD/$PRGNAM.SlackBuild > $PKG/usr/doc/$PRGNAM-$VERSION/$PRGNAM.SlackBuild
mkdir -p $PKG/install
cat $CWD/slack-desc > $PKG/install/slack-desc
cat $CWD/slack-required > $PKG/install/slack-required
cat $CWD/doinst.sh > $PKG/install/doinst.sh
cd $PKG
/sbin/makepkg -l y -c n $OUTPUT/$PRGNAM-$VERSION-$ARCH-$BUILD$TAG.${PKGTYPE:-tgz}
| true
|
0fb29ff5d482af2a7505706fc8270cdd9be9dff3
|
Shell
|
ianisl/dotfiles
|
/.xinitrc
|
UTF-8
| 1,662
| 3.46875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# Since we are using a custom xinitrc, we need to read Xresources
[[ -f ~/.Xresources ]] && xrdb -merge -I$HOME ~/.Xresources
# Set locale before launching URxvt so we don't have encoding problems
export LANG=fr_FR.UTF-8
# According to https://wiki.archlinux.org/index.php/Xinit:
# "At the very least, ensure that the last if block in /etc/X11/xinit/xinitrc is present in your .xinitrc file to ensure that the scripts in /etc/X11/xinit/xinitrc.d are sourced."
# That means we should source the corresponding folder for our XQuartz install, ie /opt/X11/lib/X11/xinit/xinitrc.d
# This folder contains 3 scripts: 10-fontdir.sh 98-user.sh 99-quartz-wm.sh
# Since we don't need 99-quartz-wm.sh, and 98-user.sh sources ~/.xinitrc.d, we can just source .xinitrc.d and add our own fontdir script there
if [ -d "${HOME}/.xinitrc.d" ] ; then
for f in "${HOME}"/.xinitrc.d/*.sh ; do
[ -x "$f" ] && . "$f"
done
unset f
fi
# Set path with macOS path_helper (needed for Homebrew to work)
eval $(/usr/libexec/path_helper -s)
# Launch programs
# Important: don't forget the trailing &s or the .xinitrc script won't proceed to loading the WM! In this case, the first program would open *outside* any WM.
/usr/local/bin/urxvt -cd "${HOME}" &
# Start the window manager
# Why exec? According to https://wiki.archlinux.org/index.php/Xinit: "prepending exec will replace the script process with the window manager process, so that X does not exit even if this process forks to the background". In practice it seems we need to add this to the last instruction of .xinitrc
exec $HOME/src/objective-c/projets/quartz-vm_ianisl/src/quartz-wm
| true
|
7f7dff3a74cedbba6bcfcdbdfa7efd66595f5354
|
Shell
|
crawlingcity/macsetup
|
/bash_aliases
|
UTF-8
| 1,740
| 3.046875
| 3
|
[] |
no_license
|
# lists contents of current directory with file permisions
alias ll='ls -l -sort'
# list all directories in current directories
alias ldir='ls -l | grep ^d'
# self explanatory
alias ..='cd ..'
alias ...='cd ../../'
# show aliases
alias a='echo "------------Your aliases------------";alias'
# Apply changes to aliases
alias sa='source ~/.bash_aliases;echo "Bash aliases sourced."'
# Edit Aliases
alias via='sudo nano ~/.bash_aliases'
# Web-apps Alias
alias wba='cd /mnt/c/web-apps/'
# Clear Alias
alias cls='clear'
# Updates repo cache and installs all kinds of updates
alias update='sudo apt-get update && sudo apt-get upgrade && sudo apt-get dist-upgrade'
# Git Alias
alias git='echo "web#2k17" | sudo -S git'
# Server alias
#alias server='echo "web#2k15" | sudo -S server'
# Gitk Alias
alias gitk='dopen gitk'
# Composer Alias
#alias composer='echo "web#2k17" | sudo -S composer'
# Sarah Assistent
alias sarah="python ~/sarah.py"
# Clean distribution
alias cleanup="dpkg --list | grep linux-image | awk '{ print $2 }' | sort -V | sed -n '/'`uname -r`'/q;p' | xargs sudo ap
t-get -y purge"
# Remote Desktop
alias remote-desktop='echo "web#2k17" | sudo -S dopen remmina'
# Switch PHPFarm
alias switch-phpfarm='/opt/phpfarm/inst/bin/switch-phpfarm'
# Composer with differnet versions.
compo() {
local param="$1"
shift 1
/usr/bin/php$param /usr/local/bin/composer $@
}
# Magento Piece of shit..
magento() {
/usr/bin/php7.0 bin/magento $@
}
# Update Server
alias update-server='echo "web#2k17" | sudo -S cp /mnt/c/web-apps/scripts/server /usr/sbin/ && sudo -S chmod +x /usr/sbin/server'
# PHP Override (2abe59)
php(){/usr/bin/php7.1 $@}
| true
|
a2714628987c4fa65e130f7a212af1e1bd2a9cda
|
Shell
|
Qrust/SaveVig
|
/run.sh
|
UTF-8
| 1,415
| 3.5625
| 4
|
[] |
no_license
|
#!/bin/sh
clear && clear
FILE="Strong_vigenere_cipher-bin"
LEN=$1
TESTS=$2
METHOD=$3
BASEDIR="$PWD"
make -C "$BASEDIR" rebuild || { echo Failed to compile ; exit; }
cd ..
test -d "build" || mkdir "build"
cd "build"
mv ../src/$FILE ./
cp ../src/text.txt ./
test -d "py_tests" || mkdir "py_tests"
cd "py_tests"
mkdir "${TESTS}_${LEN}"
cd ..
echo "Начинаем генерацию тестов, при генерации нельзя запускать второй"
for ticks in `seq 1 $TESTS`;do
#PASSWORD=`cat /dev/urandom | tr -dc "a-zA-Z0-9@#*=[]" | dd bs=$LEN count=1 2>/dev/null`
PASSWORD=`cat /dev/urandom | tr -dc "a-z" | dd bs=$LEN count=1 2>/dev/null`
echo "Количество тестов $TESTS. Длина пароля $LEN. Тест№ $ticks"
./$FILE -p $PASSWORD -m $METHOD
mv "plotdata_${PASSWORD}.py" "./py_tests/${TESTS}_${LEN}/"
done
cd "./py_tests/${TESTS}_${LEN}"
echo "Строим графики, можно запускать еще тесты"
FILES="*.py"
for files in ${FILES};do
echo "Строим график $files"
python3 $files
done
echo "Подготавливаем файлы для видео"
mkdir tmp
x=1; for i in *png; do counter=$(printf %03d $x); ln "$i" ./tmp/img"$counter".png; x=$(($x+1)); done
cd ./tmp
echo "Делаем видео"
ffmpeg -f image2 -i img%03d.png ${TESTS}_${LEN}.mpg
mv ${TESTS}_${LEN}.mpg ../
echo "Готово"
| true
|
1f948234870c4156548747d9890e0b76ce8562b4
|
Shell
|
whmzsu/kubernetes-handbook
|
/tools/wordcount/wordcount.sh
|
UTF-8
| 254
| 3.59375
| 4
|
[
"CC-BY-NC-SA-4.0",
"CC-BY-4.0"
] |
permissive
|
#!/bin/bash
# Count Chinese characters
total=0
basedir="../../"
for x in `cd $basedir;fd -e md`
do
i=`cnwordcount -f $basedir/$x|cut -d " " -f2`
total=$(($total+$i))
if [ $i -ne 0 ]; then
echo "$x $i"
fi
done
echo "Total $total"
| true
|
fa650653d532b688caffb0daf7f4dcf1b567be60
|
Shell
|
weaming/daily-script-collections
|
/zsh/custom/7.fn.handy.sh
|
UTF-8
| 1,899
| 3.328125
| 3
|
[] |
no_license
|
# Go
alias cdwm='cd ~/go/src/github.com/weaming'
alias musl-go-build='docker run --rm -v "$PWD":"/go${$(pwd)#$GOPATH}" -w "/go${$(pwd)#$GOPATH}" blang/golang-alpine go build -v'
# conda
workon() {
source activate $1
export VIRTUAL_ENV="$(dirname $(dirname $(which python)))"
which python
function workoff() {
source deactivate
unset -f workoff
unset VIRTUAL_ENV
}
}
py-echo-import() {
grep -hE '^(from .+ )?import' **/*.py | sort -r | uniq -c | sort -r | less
}
mk-python-module() {
mkdir -p $1
touch $1/__init__.py
}
alias pipf='python -m pip freeze'
alias pipfg='python -m pip freeze | grep'
alias pipi='pip install'
alias pipir='pip install -r requirements.txt'
pip-install-force() {
cmd=${2:-pip}
$cmd install -U --no-cache-dir -i https://pypi.org/simple $1
}
alias py='python'
rm-extension() {
find . -name "*.$1" | xargs rm
}
alias rm-pyc='rm-extension pyc'
# Go
generate-and-view-gograph() {
set -o pipefail
out=/tmp/godepgraph.png
godepgraph $1 | dot -Tpng -o $out && open $out
}
# show command status
cmd() {
if which "$1" > /dev/null && [ -e "$(which $1)" ]; then
echo -e $(OK "ls -l \$(which $1):")
ls -l `which $1`
else
echo -e $(OK 'which:')
which $1
fi
echo
echo -e $(OK 'where:')
where $1
echo
echo -e $(OK 'type:')
type $1
}
# Flutter
alias flutter_get_gitignore='wget https://raw.githubusercontent.com/flutter/flutter/master/.gitignore -O .gitignore'
# Pypi
alias get-example-setup.py='wget https://raw.githubusercontent.com/pypa/sampleproject/master/setup.py -O setup.py'
# photo
# smms-upload in script/python, paster_to_tmp_png in script/shell
alias upload-image-in-clipboard-to-smms="clipboard-to-tmp-png | smms-upload -"
# linux
alias tv='tmux attach'
alias dpkg-reconfigure-timezone='sudo dpkg-reconfigure tzdata'
| true
|
08c9398d6716b3b2efd0bfee53722b051620c967
|
Shell
|
b06902106/OSproj1
|
/demo.sh
|
UTF-8
| 293
| 2.578125
| 3
|
[] |
no_license
|
#!/bin/bash
tasks="TIME_MEASUREMENT FIFO_1 PSJF_2 RR_3 SJF_4"
for task in TIME_MEASUREMENT FIFO_1 PSJF_2 RR_3 SJF_4; do
echo "------------------------------------------------"
echo "${task}"
sudo dmesg -C
sudo ./scheduler < test/${task}.txt
sudo dmesg | grep Project1
done
| true
|
d9bfaa889c6b06d42af904f48498f1f7b804e71b
|
Shell
|
andypost/drupal.ru
|
/scripts/before.sh
|
UTF-8
| 292
| 2.546875
| 3
|
[] |
no_license
|
#!/bin/sh
SITEPATH="$HOME/domains/$SETTINGS_DOMAIN"
cd $SITEPATH
#Quote module replacement. Issue #51
if [ -d "$SITEPATH/sites/all/modules/contrib/quote" ]; then
drush -y dis quote
rm -rf $SITEPATH/sites/all/modules/contrib/quote
touch /tmp/quote.remove
fi
drush cache-clear drush
| true
|
6dc6c71ea77f0da04767d7a516cdc8cbd6a0a485
|
Shell
|
gavinchou/vim_config
|
/install.sh
|
UTF-8
| 525
| 3.203125
| 3
|
[] |
no_license
|
#!/bin/bash
## @brief install vim config
## @author Gavin Chou
## @email gavineaglechou@gmail.com
## @date 2016-05-12-Thu
function digitaldatetime() {
echo `date +"%Y%m%d%H%M%S"`
}
vim_config=`pwd -P`
cd $HOME
echo "mv .vimrc .vimrc.`digitaldatetime`"
mv .vimrc .vimrc.`digitaldatetime`2>/dev/null
echo "mv .vim .vim.`digitaldatetime`"
mv .vim .vim.`digitaldatetime`2>/dev/null
echo "ln -s ${vim_config}/_vimrc .vimrc"
ln -s ${vim_config}/_vimrc .vimrc
echo "ln -s ${vim_config}/.vim .vim"
ln -s ${vim_config}/.vim .vim
echo "enjoy!"
| true
|
8b7a8524f4f47276b8fd8cb369eb86ec5e4e4327
|
Shell
|
aixj1984/go-jira-api
|
/run.sh
|
UTF-8
| 107
| 2.546875
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
source .env
if [[ "$1" == "" ]]; then
go run main.go
else
go run examples/"$1"
fi
| true
|
c9feb53760ea6849a4111bb7fab654794702ac5d
|
Shell
|
frenzieddoll/dotfiles
|
/.emacs.d/script/mpv-rifle.sh
|
UTF-8
| 1,263
| 3.984375
| 4
|
[] |
no_license
|
#!/bin/bash
if [ $# -eq 0 ]; then
echo "Usage: ${0##*/} PICTURES"
exit
fi
[ "$1" = '--' ] && shift
tmpfile=$(mktemp)
# 生成した一時ファイルを削除する
function rm_tmpfile {
[[ -f "$tmpfile" ]] && rm -f "$tmpfile"
}
# 正常終了したとき
trap rm_tmpfile EXIT
# 異常終了したとき
trap 'trap - EXIT; rm_tmpfile; exit -1' INT PIPE TERM
abspath () {
case "$1" in
/*) printf "%s\n" "$1";;
./*) printf "%s\n" "$PWD/$(basename "$1")";;
*) printf "%s\n" "$PWD/$1";;
esac
}
targetAbsPath="$(abspath "$1")"
targetFileName=$(echo $(basename "$targetAbsPath") | sed -e "s/\[/\\\[/g" | sed -e "s/\]/\\\]/g")
find "$(dirname "$targetAbsPath")" -maxdepth 1 -type f -iregex '.*\(mp4\|mkv\|avi\|wmv\|webm\|mpg\|flv\|m4v\|rm\|rmvb\|mpeg\|asf\|mp3\)$' | sort > $tmpfile
# playlist=$(find "$(dirname "$target")" -maxdepth 1 -type f -iregex '.*\(mp4\|mkv\|avi\|wmv\|webm\|mpg\|flv\|m4v\|rm\|rmvb\|mpeg\|asf\)$' | sort)
count="$(grep -n "$targetFileName" $tmpfile | cut -d ":" -f 1)"
# mpv --playlist=$tmpfile --playlist-start=$(($count - 1))
if [ "$(wc -l $tmpfile)" == 1 ]; then
mpv $target
elif [ -n "$count" ]; then
mpv --playlist=$tmpfile --playlist-start=$(($count - 1))
else
echo "error"
fi
| true
|
626e5fb675cef650b237c75f99fd317583ebf924
|
Shell
|
R-omk/webhook-receiver
|
/trigg.sh
|
UTF-8
| 1,216
| 4
| 4
|
[] |
no_license
|
#!/bin/bash
set -e
ARGS="${@}"
check_args.sh ${ARGS[@]}
status="$?"
if [[ "$status" != "0" ]]; then
exit "$status"
fi
declare -A variables=()
trigger_url=""
trigger_token=""
trigger_ref="master"
if [[ "$#" == "0" ]]; then
echo "!!! No options provided. Exiting..."
exit 1
fi
echo "start with args:"
echo "${@}"
while true; do
case "$1" in
"--trigger-url" )
shift
trigger_url="$1"
shift
;;
"--trigger-token" )
shift
trigger_token="$1"
shift
;;
"--trigger-ref" )
shift
trigger_ref="$1"
shift
;;
"--var" )
shift
key="$1"; shift
value="$1"; shift
variables+=(["$key"]="$value")
;;
* ) shift ;;
esac
if [[ "$#" == "0" ]]; then
break
fi
done
if [[ "$trigger_url" == "" ]]; then
echo "!!! The trigger url is no setted. Exiting..."
exit 1
fi
command_str="curl -X POST"
if [[ "$trigger_token" != "" ]]; then
command_str="$command_str -F token=$trigger_token"
fi
if [[ "$trigger_ref" != "" ]]; then
command_str="$command_str -F ref=$trigger_ref"
fi
for key in ${!variables[@]}
do
value="${variables[$key]}"
command_str="$command_str -F variables[\"$key\"]=\"$value\""
done
command_str="$command_str $trigger_url"
eval "$command_str"
| true
|
8bbe5cead0bc65ff29cc5ffdd8e3d80371d4897c
|
Shell
|
Paulmicha/common-web-tools
|
/cwt/extensions/apache/instance/init.hook.sh
|
UTF-8
| 721
| 3.171875
| 3
|
[
"Apache-2.0",
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/usr/bin/env bash
##
# Implements hook -a 'init' -v 'PROVISION_USING HOST_TYPE INSTANCE_TYPE'.
#
# @see u_instance_init() in cwt/instance/instance.inc.sh
#
case "$CWT_APACHE_INIT_VHOST" in true)
if i_am_su; then
u_apache_write_vhost_conf
else
# TODO [evol] non-interactive shell environments need sudoers config in
# place, as password prompts won't work.
# See https://askubuntu.com/a/192062
sudo u_apache_write_vhost_conf
fi
if [[ $? -ne 0 ]]; then
echo >&2
echo "Error in $BASH_SOURCE line $LINENO: unable to write Apache VHost for '$INSTANCE_DOMAIN'." >&2
echo "Is $USER properly configured for sudo ?" >&2
echo "-> Aborting (1)." >&2
echo >&2
exit 1
fi
esac
| true
|
cac2ca667a09333cc7a4ed544bd9dfd0b6456651
|
Shell
|
superice119/mypackages
|
/others/topologyd/files/etc/init.d/topologyd
|
UTF-8
| 470
| 2.8125
| 3
|
[] |
no_license
|
#!/bin/sh /etc/rc.common
START=14
STOP=96
USE_PROCD=1
start_service() {
procd_open_instance
config_load owsd
config_get enable ubusproxy enable
procd_set_param command "/sbin/topologyd"
if [ "$enable" == "1" ]
then
procd_append_param command --root
fi
procd_set_param respawn
# procd_set_param stdout 1
procd_set_param stderr 1
procd_close_instance
}
service_triggers()
{
procd_add_reload_trigger topologyd
}
reload_service() {
stop
start
}
| true
|
2b1ae9d4bc1fe97d1bd0735e7504e7612a3e2369
|
Shell
|
zelhar/my-dotfiles-git
|
/usr_home/bin/surf_tabs.sh
|
UTF-8
| 579
| 2.796875
| 3
|
[] |
no_license
|
#!/bin/bash
#tabbed -cd zathura -e
#$(tabbed -d >/tmp/tabbed.xid); urxvt -embed $(</tmp/tabbed.xid);
#$(tabbed -d >/tmp/tabbed.xid); zathura -embed $(</tmp/tabbed.xid);
file_path="/tmp/tabbed.xid"
if ! pgrep -x "tabbed" > /dev/null #if tabbed is not running, delete tabbed.xid
then
# $(tabbed -d >/tmp/tabbed.xid);
rm $file_path
fi
if [[ -e $file_path ]] ; then
nohup surf -e $(< $file_path) "$@" &!
else
tabbed -d >/tmp/tabbed.xid nohup surf -e $(< $file_path) "$@" &!
fi
#ext pdf, has zathura, X, flag f = zathura -- "$@"
#nohup surf_tabs.sh "www.yahoo.de" &!
| true
|
9b6d18a17fe0a799ada544f08abffd189f5b98e7
|
Shell
|
michaelsalisbury/builder
|
/udev/archive/vbox_launcher.sh.v31
|
UTF-8
| 32,782
| 3.453125
| 3
|
[] |
no_license
|
#!/bin/bash
function main(){
case "${1}" in
a*|A*) shift; ACTION_ADD "$@";;
r*|R*) shift; ACTION_REMOVE "$@";;
s*|S*) ACTION_SELECT;;
t*|T*) shift; ACTION_TRIGGER "$@";;
sd*) ACTION_ADD "$@";;
*) ACTION_SELECT;;
#*) UMOUNT=false
# STARTVM=false
# ACTION_ADD;;
esac
}
function HELP(){
cat <<-HELP
Usage...
a [dev] run device add
r [dev] run device remove
t [dev] trigger udev device add action
LOG :: ${LOG}
HELP
EXIT 1
}
function ACTION_SELECT(){
# select a disk manually
zenity_choose_disk
# verify user selected disk otherwise exit
if (( ${#DEVICE[*]} )) && IS_DEVICE_REAL ${DEVICE[0]}; then
ACTION_ADD ${DEVICE[0]}
else
EXIT 1
fi
}
function ACTION_ADD(){
local DISPLAY_0_USER=$(GET_DISPLAY_0_USER)
local DISPLAY_0_HOME=$(GET_DISPLAY_0_HOME)
DEVICE=$(GET_DEVICE_LIST | head -1)
DEVICE=${1:-${DEVICE}}
# filter out root; system drive
IS_DEVICE_ROOT # function exits if positive
# popup log
OPEN_POPUP_LOG
# prompt user to choose tool or disregard
IFS=: SELECTION=( $(zenity_choose_tool) )
# exit if user chose cancel or closed dialog
case "${SELECTION[0]}" in
0) echo dblCLICK :: Task selection \#${SELECTION[1]}.;;
1) echo CANCELED :: Task selection dialog for disk \"${DEVICE}\". Exiting\!
EXIT 1;;
5) (( ${#SELECTION[*]} > 1 )) \
&& echo clk_NEXT :: Task selection \#${SELECTION[1]}.\
|| echo TIME_OUT :: Task selection dialog timed out\! SELECTION=${SELECTION[*]}.;;
*) echo NEWSTATE :: Task selection dialog returned ${SELECTION[*]}. Exiting\!
EXIT 1;;
esac
# set default if next was selected without chooseing a tool
(( ${#SELECTION[*]} > 1 )) && GET_SELECTION_DETAILS || SET_DEFAULT_SELECTION
# prompt user to add seconadary device
if grep -qi "^\(diag\|PXE\|DISK\)" <(echo ${SELECTION[2]}); then
zenity_choose_disk
fi
#while echo ${SELECTION[2]} | grep -qi "^\(diag\|PXE\|DISK\)"; do
while false; do
IFS=: SELECTION2=( $(zenity_choose_second) )
case "${SELECTION2[0]}" in
0) echo dblCLICK :: Disk selection \#${SELECTION2[1]}.;;
1) echo CANCELED :: Disk selection dialog for disk \"${DEVICE}\". Skipping\!
break;;
5) if (( ${#SELECTION2[*]} > 1 )); then
echo clk_NEXT :: Disk selection \#${SELECTION2[1]}.
else
echo TIME_OUT :: Or no selection\! SELECTION=${SELECTION[*]}. Skipping
break
fi;;
*) echo NEWSTATE :: Disk selection dialog returned ${SELECTION[*]}. Exiting\!
EXIT 1;;
esac
# if the user made a selection but the device is inuse, WARN
if (( ${#SELECTION2[*]} > 1 )) && [[ "${SELECTION2[4]}" =~ ^(local|VBOX)$ ]]; then
#if (( $(zenity_warning_inuse "${SELECTION2[*]}") == 5 )); then
if (( $(zenity_warning_inuse "${SELECTION2[1]}") == 5 )); then
DEVICE[${#DEVICE[*]}]=${SELECTION2[1]}
break
fi
# if the user made a selection and the device is free, MOVE ON
elif (( ${#SELECTION2[*]} > 1 )); then
DEVICE[${#DEVICE[*]}]=${SELECTION2[1]}
break
# this logic brack should never occcur because the previos case block break null selections
else
unset DEVICE[1]
break
fi
done
# prompt user to label task/vm
IFS=: NAME=( $(zenity_name_task) )
# exit if user chose cancel or closed dialog
case "${NAME[0]}" in
1) echo CANCELED :: Task naming dialog for disk \"${DEVICE}\". Exiting\!
EXIT 1;;
5) (( ${#NAME[*]} > 1 )) \
&& echo LAUNCHED :: Name - ${NAME[*]} \
|| echo TIME_OUT :: Task naming dialog timed out\! SELECTION=${SELECTION[*]}.;;
*) echo NEWSTATE :: Task naming dialog returned ${NAME[*]}. Exiting\!
EXIT 1;;
esac
# set default name if start vm was selected without entering a name
(( ${#NAME[*]} > 1 )) || SET_DEFAULT_NAME
######### if selection is PXE then verify bridged ethernet default is OK
# amend VRDEPORT and DEVICE to NAME[1]
NAME[1]="${NAME[1]}-$(GET_VRDEPORT).${DEVICE}"
echo NAME :: ${NAME[*]}
# set path for vmdk files
declare -A VMDK
for DEV in ${DEVICE[*]}; do
VMDK[${DEV}]="${DISPLAY_0_HOME}/.VirtualBox/udev.${DEV}.vmdk"
done
# cleanup old primary vmdk files
for DEV in ${DEVICE[*]}; do
DETACH_VMDK_HDD ${DEV}
done
# cleanup conflicting stale virtual machines
for DEV in ${DEVICE[*]}; do
DELETE_VM ${DEV}
done
# unmount devices
local TRIES=""
local PART=""
for DEV in ${DEVICE[*]}; do
for PART in `ls /dev/${DEV}[0-9]`; do
for TRIES in {1..2}; do
umount ${PART} &>/dev/null
done
umount -v ${PART}
done
done
# create virtualbox raw vmdk files; user must be part of "disk" group
for DEV in ${DEVICE[*]}; do
cat <<-SU | su - ${DISPLAY_0_USER} -s /bin/bash
# Only run command if VBoxManage exists and is in PATH
which VBoxManage &>/dev/null &&\
VBoxManage internalcommands createrawvmdk \
-filename "${VMDK[${DEV}]}" \
-rawdisk /dev/${DEV}
#VBoxManage list hdds
SU
done
# create virtual machine
SET_VM
# start virtual machine
START_VM
# dump var status to LOG
echo
echo " name :: "${NAME[*]}
echo " name err :: "${NAME[0]}
echo " selection :: "${SELECTION[*]}
echo "select err :: "${SELECTION[0]}
echo " path :: "$(GET_SELECTION_PATH)
echo " mem :: "$(GET_SELECTION_MEM)
echo " name :: "${NAME[1]}
echo ' $@ :: '$@
echo
}
function ACTION_REMOVE(){
unset POPUP_LOG
local DISPLAY_0_USER=$(GET_DISPLAY_0_USER)
local DISPLAY_0_HOME=$(GET_DISPLAY_0_HOME)
DEVICE=$(GET_DEVICE_LIST | head -1)
DEVICE=${1:-${DEVICE}}
# filter out root; system drive
IS_DEVICE_ROOT # function exits if positive
# LOG
echo REMOVING :: ${DEVICE}
# detach and delete vmdk
DETACH_VMDK_HDD ${DEVICE}
# delete vm
DELETE_VM ${DEVICE}
}
function SETUP_CONFIG_UDEV_RULE(){
# Dependant on GLOBAL var; UDEV_RULE_CONFIG_FILE_NAME_DEFAULT
local PROG_NAME=$(basename "${BASH_SRCNAME}" .sh)
local RULE_FILE="/etc/udev/rules.d/${UDEV_RULE_CONFIG_FILE_NAME_DEFAULT}"
# LOG Info
LOG INFO :: ${FUNCNAME} :: UDEV_RULE_CONFIG_FILE_NAME_DEFAULT[${UDEV_RULE_CONFIG_FILE_NAME_DEFAULT}]
# Find the most relavant udev rule config file template
if [ -f "${BASH_SRCDIR}/${UDEV_RULE_CONFIG_FILE_NAME_DEFAULT}" ]; then
local SOURCE_CONFIG="${BASH_SRCDIR}/${UDEV_RULE_CONFIG_FILE_NAME_DEFAULT}"
elif [ -f "${BASH_SRCDIR}/../etc/${PROG_NAME}/${UDEV_RULE_CONFIG_FILE_NAME_DEFAULT}" ]; then
local SOURCE_CONFIG="${BASH_SRCDIR}/../etc/${PROG_NAME}/${UDEV_RULE_CONFIG_FILE_NAME_DEFAULT}"
else
LOG ERROR :: ${FUNCNAME} :: Source udev rule template not found. UDEV_RULE_CONFIG_FILE_NAME_DEFAULT[${UDEV_RULE_CONFIG_FILE_NAME_DEFAULT}]
EXIT 1
fi
# fix template file; ensure rules execute this script
cat <<-SED | sed -i -f <(cat) "${SOURCE_CONFIG}"
s|\(.*[[:space:]]RUN+="\)\([^[:space:]]\+\)\(.*\)|\1${BASH_SRCFQFN}\3|
SED
# test if template differs from install udev rule
diff --suppress-common-lines "${SOURCE_CONFIG}" "${RULE_FILE}" | LOG INFO :: ${FUNCNAME} ::
#
(( ${PIPESTATUS[0]} )) && cat "${SOURCE_CONFIG}" > "${RULE_FILE}"
}
function ACTION_TRIGGER(){
#udevadm trigger --action=add --sysname-match="sdb"
local DEV=$(GET_DEVICE_LIST | head -1)
local DEV=${2:-${DEV}}
unset POPUP_LOG
IS_DEVICE_ROOT ${DEV}
IS_DEVICE_REAL ${DEV}
SETUP_CONFIG_IF_EMPTY "${UDEV_RULE_CONFIG_FILE_NAME_DEFAULT}" "/etc/udev/rules.d/${UDEV_RULE_CONFIG_FILE_NAME_DEFAULT}"
SETUP_CONFIG_UDEV_RULE
case "$1" in
a*|A*) udevadm trigger --action=add --sysname-match="${DEV}";;
r*|R*) udevadm trigger --action=remove --sysname-match="${DEV}";;
*) ACTION_TRIGGER add "$@";;
esac
}
function START_VM(){
local DISPLAY_0_USER=$(GET_DISPLAY_0_USER)
local VB=$(which VirtualBox 2>/dev/null)
if [ -z "${VB}" ]; then
echo ERROR :: ${FUNCNAME} :: VirtualBox not found. Skipping\! >> "${LOG}"
return
fi
# start vm
cat <<-SU | su - ${DISPLAY_0_USER} -s /bin/bash
export DISPLAY=:0
${VB} --startvm ${NAME[1]} &
SU
}
function DETACH_VMDK_HDD(){
local DISPLAY_0_USER=$(GET_DISPLAY_0_USER)
local DEV=${1:-${DEVICE}}
local TARGET_VM_MASK="$(GET_VRDEPORT ${DEV}).${DEV}.vmdk"
cat <<-SU | su - ${DISPLAY_0_USER} -s /bin/bash
# Only run command if VBoxManage exists and is in PATH
which VBoxManage &>/dev/null &&\
VBoxManage list hdds |\
while read VAR DATA; do
(( \${#VAR} )) || continue
# create variables
eval \${VAR%:}=\"\${DATA}\"
# If the following are true...
# Usage is the current VAR (also the last hdd detail)
# Format equals VMDK
# The Location name matches my mask
if [ "\${VAR%:}" == "Usage" ] \
&& [ "\${Format}" == "VMDK" ] \
&& [[ "\${Location}" =~ ${TARGET_VM_MASK}\$ ]]; then
echo DEL_DISK :: UUID = \${UUID}
echo DEL_DISK :: PATH = \${Location}
VBoxManage controlvm "\${Usage% (*}" poweroff
(( \$? )) || sleep 3
VBoxManage storageattach "\${Usage% (*}"\
--storagectl ${SCTL} \
--type hdd --port 0 --device 0 \
--medium none
(( \$? )) || sleep 2
VBoxManage storageattach "\${Usage% (*}"\
--storagectl ${SCTL} \
--type hdd --port 1 --device 0 \
--medium none
(( \$? )) || sleep 2
VBoxManage closemedium disk "\${UUID}"
(( \$? )) || sleep 2
VBoxManage unregistervm "\${Usage% (*}" --delete
(( \$? )) || sleep 2
VBoxManage unregistervm "\${Usage% (*}"
(( \$? )) || sleep 2
fi
done
SU
# now remove the actual vmdk file
rm -f "${DISPLAY_0_HOME}/.VirtualBox/udev.${DEV}."*.vmdk
}
function DELETE_VM(){
local DISPLAY_0_USER=$(GET_DISPLAY_0_USER)
local DEV=${1:-${DEVICE}}
local TARGET_VM_MASK="$(GET_VRDEPORT ${DEV}).${DEV}"
cat <<-SU | su - ${DISPLAY_0_USER} -s /bin/bash
# Only run command if VBoxManage exists and is in PATH
which VBoxManage &>/dev/null &&\
VBoxManage list vms |\
while read line; do
eval words=( "\${line}" )
vm_uuid=\${words[1]}
# echo MACHINE :: \${words[*]}
if [[ "\${words[0]}" =~ ${TARGET_VM_MASK}\$ ]]; then
echo REMOVED :: \${words[*]}
VBoxManage controlvm \${vm_uuid} poweroff
(( \$? )) || sleep 3
VBoxManage unregistervm \${vm_uuid} --delete
(( \$? )) || sleep 3
VBoxManage unregistervm \${vm_uuid}
(( \$? )) || sleep 3
fi
done
SU
# remove vm folder
rm -rf "${DISPLAY_0_HOME}/VirtualBox VMs"/*${TARGET_VM_MASK}
}
function SET_VM(){
local DISPLAY_0_USER=$(GET_DISPLAY_0_USER)
local DISPLAY_0_HOME=$(GET_DISPLAY_0_HOME)
local DISPLAY_0_TOOL_DIR="${DISPLAY_0_HOME}/${TOOL_DIR}"
local VBM=$(which VBoxManage 2>/dev/null)
if [ -z "${VBM}" ]; then
echo ERROR :: ${FUNCNAME} :: VBoxManage not found. Skipping\! >> "${LOG}"
return
fi
# prep
case ${SELECTION[1]} in
-) local boot1='net';;
0) local boot1='disk';;
*) local boot1='dvd'
local DVD=$(GET_SELECTION_PATH);;
esac
# create vm
cat <<-SU | su - ${DISPLAY_0_USER} -s /bin/bash
${VBM} createvm --name ${NAME[1]} \
--ostype Other \
--register
SU
# memory
cat <<-SU | su - ${DISPLAY_0_USER} -s /bin/bash
${VBM} modifyvm ${NAME[1]} --memory $(GET_SELECTION_MEM)
SU
# set boot device
cat <<-SU | su - ${DISPLAY_0_USER} -s /bin/bash
${VBM} modifyvm ${NAME[1]} --boot1 ${boot1} \
--boot2 none \
--boot3 none \
--boot4 none
SU
# set nic
cat <<-SU | su - ${DISPLAY_0_USER} -s /bin/bash
${VBM} modifyvm ${NAME[1]} --nic1 bridged \
--cableconnected1 on \
--bridgeadapter1 ${BRIDGED_ETH} \
--nictype1 82540EM \
--macaddress1 $(GET_MAC)
SU
# set VRDE
cat <<-SU | su - ${DISPLAY_0_USER} -s /bin/bash
${VBM} modifyvm ${NAME[1]} --vrde on \
--vrdeport $(GET_VRDEPORT) \
--vrdeauthtype null \
--vrdemulticon on
SU
# set ide controler and dvd
cat <<-SU | su - ${DISPLAY_0_USER} -s /bin/bash
${VBM} storagectl ${NAME[1]} --name ${SCTL} \
--add ${SCTL} \
--bootable on
${VBM} storageattach ${NAME[1]} --storagectl ${SCTL} \
--port 0 \
--device 1 \
--type dvddrive \
--medium "${DVD:-emptydrive}"
SU
# set disks
local DEV="" PORT_NUM=0
for DEV in ${DEVICE[*]}; do
echo DEV :: ${DEV} :: ${PORT_NUM} :: ${VMDK[${DEV}]} >> "${LOG}"
cat <<-SU | su - ${DISPLAY_0_USER} -s /bin/bash
${VBM} storageattach ${NAME[1]} --storagectl ${SCTL} \
--port ${PORT_NUM}\
--device 0 \
--type hdd \
--medium "${VMDK[${DEV}]}"
SU
let PORT_NUM++
done
}
###########################################################################################
###########################################################################################
function GET_VRDEPORT(){
# dependant on global variables; VRDEPORT, DEVICE
local DEV=${1:-${DEVICE}}
# get vrdeport; convert the sd disk letter to a number and add to port base
# sdb = 33890, sdc = 33891, sdd = 33892,,,
echo $(( VRDEPORT + $(printf "%d\n" \'${DEV:2}) - 98 ))
}
function GET_MAC(){
# dependant on global variables; MAC
local DEV=${1:-${DEVICE}}
# get mac; convert the sd disk letter to a number and add to mac base
# sdb = 99, sdc = 98, sdd = 97,,,
echo ${MAC}$(( 197 - $(printf "%d\n" \'${DEV:2}) ))
}
function GET_SELECTION_MEM(){
# dependant on global variables; SELECTION
echo ${SELECTION[5]} | sed 's/^$/128/'
}
###########################################################################################
########################################################################### SELECTION TASKS
function GET_SELECTIONS(){
# dependant on global variables; CONFIG_TOOL_SELECTIONS
GET_DEFAULT_SELECTION
cat -n <(GET_CONFIG_SECTION "$(GET_DISPLAY_0_CONFIG_FQFN)" ${UCST_TOOL_SELECTIONS})
}
function GET_SELECTION_DETAILS(){
# dependant on global variables; SELECTION
local LINE=""
eval IFS=${DIFS} SELECTION=(
${SELECTION[0]}
$(grep "^[[:space:]]*${SELECTION[1]}[[:space:]]" <(GET_SELECTIONS))
)
}
function GET_DEFAULT_SELECTION(){
echo - PXE DEFAULT none 512
echo 0 DISK EXPERIMENTAl none 512
}
function SET_DEFAULT_SELECTION(){
# dependant on global variables; SELECTION
IFS=$DIFS SELECTION=( ${SELECTION[0]} $(GET_DEFAULT_SELECTION) )
IFS=$DIFS SELECTION=( ${SELECTION[*]:0:6} )
}
function SET_DEFAULT_NAME(){
# dependant on global variables; NAME
local DEVICE_SIZE=$(FORMAT_TO_GB $(GET_DEVICE_HWI_SIZE ${DEVICE}) | tr -d ' ')
IFS=$DIFS NAME=( ${NAME[0]} ${DEVICE_SIZE} )
#IFS=$DIFS NAME=( ${NAME[0]} $(GET_DEVICE_DETAIL) )
#IFS=$DIFS NAME=( ${NAME[0]} "${NAME[3]}${NAME[4]}" )
}
###########################################################################################
#################################################################################### zenity
function zenity_device_info(){
local IFS=${DIFS}
local DEV=$(basename "${1:-${DEVICE}}")
# retrieve into an array the field names returned from the Disk Selection Dialog
eval local field=( $(GET_CONFIG_SECTION \
"$(GET_DISPLAY_0_CONFIG_FQFN)" \
${UCST_DISK_COLUMN_HEADERS}) )
# retieve into an array the values returned from the Disk Selection Dialog
eval local value=( $(GET_DEVICE_INFO_ARRAY ${DEV}) )
# prep vars
local index=0
local space=10
local spaces=$(seq ${space} | sed 's/.*/ /g' | tr -d '\n')
# write device info
echo '<span color=\"blue\" face=\"courier new\">'
while (( index < ${#field[*]} )); do
field[${index}]="${spaces}${field[${index}]}"
field[${index}]=${field[${index}]: -${space}}
echo "${field[${index}]} <big>::</big> ${value[$(( index++ ))]}"
done
echo '</span>'
}
function zenity_device_info_list(){
local DEV=""
local DISK_NUM=""
for DEV in ${DEVICE[*]}; do
echo "<b><big><span color='purple'>-VM DISK $(( DISK_NUM++ ))-</span></big></b>\n"
echo "$(zenity_device_info ${DEV})\n"
done
}
function zenity_warning_inuse(){
local IFS=${DIFS}
local DEV=$(basename "${1:-${DEVICE}}")
# https://developer.gnome.org/pygtk/stable/pango-markup-language.html
# dependant on global variables; DEVICE
local DISPLAY_0_USER=$(GET_DISPLAY_0_USER)
# get drive size
local zenityTitle=$(zenity_default_title)
# warning instructions
local zenityText=$(GET_CONFIG_SECTION "$(GET_DISPLAY_0_CONFIG_FQFN)" ${UCST_WARNING_INSTRUCTIONS})
# device info to assist confirmation
zenityText+="$(zenity_device_info ${DEV})"
# Set zenity window position
${ENABLE_WMCTRL} && MOV_WINDOW "${zenityTitle}" 100 50 &
# launch zenity dialog
#(
cat <<-ZENITY | su - ${DISPLAY_0_USER} -s /bin/bash 2>/dev/null
DISPLAY=:${TARGET_DISPLAY} zenity \
--timeout=${DIALOG_TIMEOUT:-25} \
--title="${zenityTitle// / }" \
--text="${zenityText}" \
--ok-label="Confirm" \
--cancel-label="GO BACK" \
--question
ZENITY
echo $?
#) | tac | tr -d '\n'
}
function zenity_name_task(){
local IFS=${DIFS}
# dependant on global variables; DEVICE
local DISPLAY_0_USER=$(GET_DISPLAY_0_USER)
# set window title
local zenityTitle=$(zenity_default_title)
# inform user which disks have been choosen
local zenityText="$(zenity_device_info_list)"
# naming instructions
zenityText+=$(GET_CONFIG_SECTION "$(GET_DISPLAY_0_CONFIG_FQFN)" ${UCST_NAMING_INSTRUCTIONS})
# Set zenity window position
${ENABLE_WMCTRL} && MOV_WINDOW "${zenityTitle}" 100 50 &
# launch zenity dialog
(
cat <<-ZENITY | su - ${DISPLAY_0_USER} -s /bin/bash 2>/dev/null
DISPLAY=:${TARGET_DISPLAY} zenity \
--timeout=${DIALOG_TIMEOUT:-25} \
--ok-label="START VM" \
--cancel-label="CANCEL" \
--title="${zenityTitle// / }" \
--text="${zenityText}" \
--add-entry="Name" \
--forms
ZENITY
echo $?:
) | tac | tr -d '\n'
}
function zenity_default_title(){
echo VBOX Auto Launcher
return 0
echo $(GET_DEVICE_INTERFACE ${DEVICE} | tr a-z A-Z) :: \
${DEVICE} :: \
$(FORMAT_TO_GB $(GET_DEVICE_HWI_SIZE ${DEVICE}))
}
function zenity_choose_tool(){
# dependant on global variables; DEVICE
local DISPLAY_0_USER=$(GET_DISPLAY_0_USER)
# set window title
local zenityTitle=$(zenity_default_title)
# inform user which disks have been choosen
local zenityText="$(zenity_device_info_list)"
# get selection instructions
zenityText+=$(GET_CONFIG_SECTION \
"$(GET_DISPLAY_0_CONFIG_FQFN)" \
${UCST_TOOL_INSTRUCTIONS})
# get column headers
eval local column=( $(GET_CONFIG_SECTION \
"$(GET_DISPLAY_0_CONFIG_FQFN)" \
${UCST_TOOL_COLUMN_HEADERS}) )
# Set zenity window position
${ENABLE_WMCTRL} && MOV_WINDOW "${zenityTitle}" 100 50 &
# launch zenity dialog
(
cat <<-ZENITY | su - ${DISPLAY_0_USER} -s /bin/bash 2>/dev/null
DISPLAY=:${TARGET_DISPLAY} zenity \
--width=300 \
--height=700 \
--timeout=${DIALOG_TIMEOUT:-25} \
--ok-label="NEXT" \
--cancel-label="CANCEL" \
--title="${zenityTitle// / }" \
--text="${zenityText}" \
--list \
--print-column=ALL \
--separator=: \
--column "#" \
--column "${column[0]}" \
--column "${column[1]}" \
$(zenity_selection_list)
ZENITY
echo $?:
) | tac | tr -d '\n'
}
function zenity_choose_disk(){
local MAX_TRIES=20
while (( MAX_TRIES-- )); do
local IFS=:
local SELECTION=( $(zenity_choose_second) )
case "${SELECTION[0]}" in
0) echo dblCLICK :: Disk selection \#${SELECTION[1]}.;;
1) echo CANCELED :: Disk selection dialog for disk \"${DEVICE}\". Skipping\!
break;;
5) if (( ${#SELECTION[*]} > 1 )); then
echo clk_NEXT :: Disk selection \#${SELECTION[1]}.
else
echo TIME_OUT :: Or no selection\! SELECTION=${SELECTION[*]}. Skipping
break
fi;;
*) echo NEWSTATE :: Disk selection dialog returned ${SELECTION[*]}. Exiting\!
EXIT 1;;
esac
# if the user made a selection but the device is inuse, WARN
if (( ${#SELECTION[*]} > 1 )) && [[ "${SELECTION[4]}" =~ ^(local|VBOX)$ ]]; then
#if (( $(zenity_warning_inuse "${SELECTION2[*]}") == 5 )); then
if (( $(zenity_warning_inuse "${SELECTION[1]}") == 5 )); then
DEVICE[${#DEVICE[*]}]=${SELECTION[1]}
break
fi
# if the user made a selection and the device is free, MOVE ON
elif (( ${#SELECTION[*]} > 1 )); then
DEVICE[${#DEVICE[*]}]=${SELECTION[1]}
break
fi
done
}
function zenity_choose_second(){
local IFS=${DIFS}
# dependant on global variables; DEVICE
local DISPLAY_0_USER=$(GET_DISPLAY_0_USER)
# set window title
local zenityTitle=$(zenity_default_title)
# inform user which disks have been choosen
local zenityText="$(zenity_device_info_list)"
# get selection instructions
zenityText+=$(GET_CONFIG_SECTION \
"$(GET_DISPLAY_0_CONFIG_FQFN)" \
${UCST_DISK_INSTRUCTIONS})
# get column headers
eval local column=( $(GET_CONFIG_SECTION \
"$(GET_DISPLAY_0_CONFIG_FQFN)" \
${UCST_DISK_COLUMN_HEADERS}) )
# Set zenity window position
${ENABLE_WMCTRL} && MOV_WINDOW "${zenityTitle}" 12 50 &
# launch zenity dialog
(
cat <<-ZENITY | su - ${DISPLAY_0_USER} -s /bin/bash 2>/dev/null
DISPLAY=:${TARGET_DISPLAY} zenity \
--width=1000 \
--height=700 \
--timeout=${DIALOG_TIMEOUT:-25} \
--ok-label="NEXT" \
--cancel-label="CANCEL" \
--title="${zenityTitle// / }" \
--text="${zenityText}" \
--list \
--print-column=ALL \
--separator=: \
--column "${column[0]}" \
--column "${column[1]}" \
--column "${column[2]}" \
--column "${column[3]}" \
--column "${column[4]}" \
--column "${column[5]}" \
--column "${column[6]}" \
--column "${column[7]}" \
$(zenity_disk_list)
ZENITY
echo $?:
) | tac | tr -d '\n'
}
function zenity_disk_list(){
local DEV=""
while read DEV; do
GET_DEVICE_INFO_ARRAY ${DEV}
done < <(GET_DEVICE_LIST)
}
function zenity_selection_list(){
local IFS=${DIFS}
GET_SELECTIONS |\
while read LINE; do
eval ARGS=( ${LINE} )
echo -n ${ARGS[0]} \"${ARGS[1]}\" \"${ARGS[2]}\" ""
done
}
###########################################################################################
###########################################################################################
function GET_DEVICE_INFO_ARRAY(){
local DEV=$(basename "${1:-${DEVICE}}")
if [ ! -f "/dev/shm/$$${FUNCNAME}_${DEV}" ]; then
cat <<-SHMF > "/dev/shm/$$${FUNCNAME}_${DEV}"
"${DEV}"
"$(GET_DEVICE_INTERFACE ${DEV} | tr a-z A-Z)"
"$(FORMAT_TO_GB $(GET_DEVICE_HWI_SIZE ${DEV}))"
"$(GET_DEVICE_MOUNT ${DEV})"
"$(GET_DEVICE_STATUS ${DEV})"
"$(GET_DEVICE_MOUNT_LOCATION ${DEV})"
"$(GET_DEVICE_HWI_MODEL ${DEV})"
"$(GET_DEVICE_HWI_SERIAL ${DEV})"
SHMF
fi
echo -n $(cat "/dev/shm/$$${FUNCNAME}_${DEV}" | tr '\n' \ ) \
}
function GET_DEVICE_MOUNT(){
local DISPLAY_0_USER=$(GET_DISPLAY_0_USER)
local DEV=$(basename "${1:-${DEVICE}}")
local TARGET_VM_MASK="$(GET_VRDEPORT ${DEV}).${DEV}.vmdk"
# test mtab
if grep -q "^/dev/${DEV}[0-9]*[[:space:]]" /etc/mtab; then
echo local
return 0
fi
# test vboxmanager list hdds
cat <<-SU | su - ${DISPLAY_0_USER} -s /bin/bash 2>/dev/null
which VBoxManage &>/dev/null &&\
VBoxManage list hdds |\
grep -q "^Location:[[:space:]].*${TARGET_VM_MASK}\$"
SU
if (( $? )); then
echo ...
return 1
else
echo VBOX
return 0
fi
}
function SET_VBOX_HDDS_LIST_SHMF(){
local DISPLAY_0_USER=$(GET_DISPLAY_0_USER)
local SHMF=$1
cat <<-SED | sed -n -f <(cat) \
<(su - localcosadmin -c "which vboxmanage && vboxmanage list hdds") > "${SHMF}"
/^UUID:/,/^$/{
/^UUID:/{h;d}
/^$/d
G
s/\(.*\)\nUUID:[[:space:]]\+\(.*\)/\2\t\1/p
}
SED
}
function GET_VBOX_HDD_ATTRIBUTE(){
local rEGEX=$1
shift
local ATTRIB=$*
[ -f "/dev/shm/$$${FUNCNAME}" ] || SET_VBOX_HDDS_LIST_SHMF "/dev/shm/$$${FUNCNAME}"
cat <<-SED | sed -n -f <(cat) "/dev/shm/$$${FUNCNAME}"
/[[:space:]]${ATTRIB}[[:space:]]/{
/${rEGEX}/p
}
SED
}
function GET_DEVICE_VBOX_UUID(){
local DEV=$(basename "${1:-${DEVICE}}")
local TARGET_VM_MASK="$(GET_VRDEPORT ${DEV}).${DEV}.vmdk"
#local TARGET_VM_MASK="disk1.vdi"
local IFS=${DIFS}
local DATA=( $(GET_VBOX_HDD_ATTRIBUTE "${TARGET_VM_MASK}\$" Location:) )
echo ${DATA[0]}
}
function GET_DEVICE_VBOX_LOCATION(){
local DEV=$(basename "${1:-${DEVICE}}")
local TARGET_VM_MASK="$(GET_VRDEPORT ${DEV}).${DEV}.vmdk"
local IFS=${DIFS}
local DATA=( $(GET_VBOX_HDD_ATTRIBUTE "${TARGET_VM_MASK}\$" Location:) )
echo ${DATA[*]:2}
}
function GET_DEVICE_VBOX_MACHINE_UUID(){
local DEV=$(basename "${1:-${DEVICE}}")
local VBOX_UUID=$(GET_DEVICE_VBOX_UUID ${DEV})
local IFS=${DIFS}
local DATA=( $(GET_VBOX_HDD_ATTRIBUTE "^${VBOX_UUID}" Usage:) )
echo ${DATA[*]: -1} | sed 's/)$//'
}
function GET_DEVICE_VBOX_MACHINE(){
local DEV=$(basename "${1:-${DEVICE}}")
local VBOX_UUID=$(GET_DEVICE_VBOX_UUID ${DEV})
local IFS=${DIFS}
local DATA=( $(GET_VBOX_HDD_ATTRIBUTE "^${VBOX_UUID}" Usage:) )
echo ${DATA[*]:2:${#DATA[*]}-4}
}
function IS_VBOX_MACHINE_RUNNING(){
local DISPLAY_0_USER=$(GET_DISPLAY_0_USER)
local DEVICE_VBOX_MACHINE_UUID=$1
su - ${DISPLAY_0_USER} -s /bin/bash \
-c "which VBoxManage && VBoxManage list runningvms" |\
grep -q "[[:space:]]{${DEVICE_VBOX_MACHINE_UUID}}\$"
}
function GET_DEVICE_STATUS(){
local DEV=$(basename "${1:-${DEVICE}}")
local DEVICE_MOUNT=$(GET_DEVICE_MOUNT ${DEV})
case "${DEVICE_MOUNT}" in
local) echo browsable
;;
VBOX) local DEVICE_VBOX_MACHINE_UUID=$(GET_DEVICE_VBOX_MACHINE_UUID ${DEV})
IS_VBOX_MACHINE_RUNNING ${DEVICE_VBOX_MACHINE_UUID} \
&& echo RUNNING \
|| echo off-line
;;
*) echo ...
;;
esac
}
function GET_DEVICE_MOUNT_LOCATION(){
local DEV=$(basename "${1:-${DEVICE}}")
local DEVICE_MOUNT=$(GET_DEVICE_MOUNT ${DEV})
local TARGET_VM_MASK="$(GET_VRDEPORT ${DEV}).${DEV}.vmdk"
case "${DEVICE_MOUNT}" in
local) local _DEV="" MOUNT="" x="" CNT=1 IFS=${DIFS}
while read _DEV MOUNT x; do
echo [$(( CNT++ ))] ${_DEV##*/}..${MOUNT}
done < <(grep "^/dev/${DEV}[0-9]*[[:space:]]" /etc/mtab)
;;
VBOX) echo $(GET_DEVICE_VBOX_MACHINE ${DEV})
;;
*) echo ...
return 1
;;
esac
}
###########################################################################################
###########################################################################################
function GET_CONFIG_SECTION(){
# dependant on global variables; USER_TOOL_LIST_PATH
local DISPLAY_0_HOME=$(GET_DISPLAY_0_HOME)
local SECTION=$1
#echo ${DISPLAY_0_HOME} ${SECTION} | LOG
cat <<-SED | sed -n -f <(cat) "${DISPLAY_0_HOME}/${USER_TOOL_LIST_PATH}"
/[[:space:]]*\[ ${SECTION} \]/,/[[:space:]]*\[/{
/[[:space:]]*\[/d # delete first and last line
/^$/d # delete empty lines
/^[[:space:]]*#/d # delete comment lines
s/^\t// # remove single leading tab char
p # print
}
SED
}
function GET_SELECTION_PATH(){
# dependant on global variables; SELECTION
local DISPLAY_0_HOME=$(GET_DISPLAY_0_HOME)
local DISPLAY_0_CONFIG_DIR=$(GET_DISPLAY_0_CONFIG_DIR)
local ISO=${SELECTION[4]}
if [[ "${ISO}" =~ ^\/ ]]; then
if [ ! -f "${ISO}" ]; then
if [ -f "${DISPLAY_0_CONFIG_DIR}${ISO}" ]; then
local ISO="${DISPLAY_0_CONFIG_DIR}${ISO}"
elif [ -f "${DISPLAY_0_HOME}${ISO}" ]; then
local ISO="${DISPLAY_0_HOME}${ISO}"
else
unset ISO
fi
fi
else
if [ -f "${DISPLAY_0_CONFIG_DIR}/${ISO}" ]; then
local ISO="${DISPLAY_0_CONFIG_DIR}/${ISO}"
elif [ -f "${DISPLAY_0_HOME}/${ISO}" ]; then
local ISO="${DISPLAY_0_HOME}/${ISO}"
else
unset ISO
fi
fi
echo ${ISO}
}
function GET_DISPLAY_0_CONFIG_DIR(){
# Dependant on GLOBAL vars; TARGET_DISPLAY
# Deplendant on functions; GET_DISPLAY_USER, GET_USER_HOME_DIR
local DISPLAY_0_USER=$(GET_DISPLAY_USER ${TARGET_DISPLAY:-0})
local DISPLAY_0_HOME=$(GET_USER_HOME_DIR ${DISPLAY_0_USER})
local DISPLAY_0_CONFIG_DIR="${DISPLAY_0_HOME}/${USER_CONFIG_DIR_RELATIVE}"
[ -d "${DISPLAY_0_CONFIG_DIR}" ] \
&& echo "${DISPLAY_0_CONFIG_DIR}" \
|| echo "/dev/zreo"
}
function GET_DISPLAY_0_CONFIG_FQFN(){
# Dependant on GLOBAL vars; TARGET_DISPLAY
# Deplendant on functions; GET_DISPLAY_USER, GET_USER_HOME_DIR
local DISPLAY_0_USER=$(GET_DISPLAY_USER ${TARGET_DISPLAY:-0})
local DISPLAY_0_HOME=$(GET_USER_HOME_DIR ${DISPLAY_0_USER})
local DISPLAY_0_CONFIG="${DISPLAY_0_HOME}/${USER_CONFIG_DIR_RELATIVE}/${USER_CONFIG_FILE_NAME}"
[ -f "${DISPLAY_0_CONFIG}" ] \
&& echo "${DISPLAY_0_CONFIG}" \
|| echo "/dev/zero"
}
function GET_DISPLAY_0_USER(){
GET_DISPLAY_USER ${TARGET_DISPLAY:-0}
}
function GET_DISPLAY_0_HOME(){
GET_USER_HOME_DIR $(GET_DISPLAY_USER ${TARGET_DISPLAY:-0})
}
function OPEN_POPUP_LOG(){
${POPUP_LOG:-false} || return
local DISPLAY_0_USER=$(GET_DISPLAY_USER ${TARGET_DISPLAY:-0})
cat <<-SU | su - ${DISPLAY_0_USER} -s /bin/bash &
DISPLAY=:${TARGET_DISPLAY} terminator \
-m -b \
-T "$(zenity_default_title)" \
-e "tail -f \"${LOG_}\""
SU
POPUP_LOG_PID=$!
sleep 2
POPUP_LOG_PID=$(FIND_PID ${POPUP_LOG_PID} tail)
LOG INFO :: ${FUNCNAME} :: PID = ${POPUP_LOG_PID}
}
function EXIT(){
# cleanup
rm -rf "${TMP}"
rm -rf "${SHM}"
rm -rf "/dev/shm/$$"*
if ${POPUP_LOG:-false}; then
echo INFO :: ${FUNCNAME} :: POPUP_LOG_PID = ${POPUP_LOG_PID} >> "${LOG}"
eval "sleep ${POPUP_LOG_AUTOCLOSE_DELAY}; kill ${POPUP_LOG_PID};"
fi
exit ${1:- 0}
}
# SOURCE Dependant Functions
source "$(dirname "${BASH_SOURCE}")/../functions/functions.general.sh"
source "$(dirname "${BASH_SOURCE}")/../functions/functions.test.sh"
# GLOBAL vars; fully qualified script paths and names
BASH_SRCFQFN=$(canonicalpath "${BASH_SOURCE}")
BASH_SRCNAME=$(basename "${BASH_SRCFQFN}")
BASH_SRCDIR=$(dirname "${BASH_SRCFQFN}")
#
#LOG="/var/log/udev.vbox_launcher.log"
#DEBUG=true
# GLOBAL vars; source config file
SOURCE_CONFIG_GLOBAL_VARS "config"
# GLOBAL vars; LOG
if (( ${#LOG} > 0 )); then
LOG=$(basename "${BASH_SRCNAME}" .sh)
LOG="/var/log/${LOG}.log"
fi
touch "${LOG}"
chmod 777 "${LOG}"
# Target DISPLAY to popup dialogs and create VM's
TARGET_DISPLAY=${TARGET_DISPLAY:-0}
TARGET_DISPLAY=${TARGET_DISPLAY//[^0-9.]/}
# udev rule file name
UDEV_RULE_CONFIG_FILE_NAME_DEFAULT=${UDEV_RULE_CONFIG_FILE_NAME_DEFAULT:-99-customUDEV.rules}
# GLOBAL vars; User Config Section Titles (UCST_)
UCST_TOOL_SELECTIONS=${UCST_TOOL_SELECTIONS:-Task List Selections}
UCST_TOOL_INSTRUCTIONS=${UCST_TOOL_INSTRUCTIONS:-Task Selection Instructions}
UCST_TOOL_COLUMN_HEADERS=${UCST_TOOL_COLUMN_HEADERS:-Task List Column Headers}
UCST_DISK_INSTRUCTIONS=${UCST_DISK_INSTRUCTIONS:-Disk Selection Instructions}
UCST_DISK_COLUMN_HEADERS=${UCST_DISK_COLUMN_HEADERS:-Disk List Column Headers}
UCST_NAMING_INSTRUCTIONS=${UCST_NAMING_INSTRUCTIONS:-Naming Instructions}
UCST_WARNING_INSTRUCTIONS=${UCST_WARNING_INSTRUCTIONS:-Warning Instructions}
UCST_GLOBAL_VAR_DEFAULTS=${UCST_GLOBAL_VAR_DEFAULTS:-Global Defaults}
# User Task Managment Folder
USER_CONFIG_DIR_RELATIVE=${USER_CONFIG_DIR_RELATIVE:-ISO}
USER_CONFIG_FILE_NAME=${USER_CONFIG_FILE_NAME:-${USER_CONFIG_FILE_NAME_DEFAULT:-tool.list.txt}}
#USER_TOOL_DIR=${USER_TOOL_DIR:-ISO} # OLD, REMOVE ASAP
#TOOl_LIST_FILE_NAME=${TOOl_LIST_FILE_NAME:-tool.list.txt} # OLD, REMOVE ASAP
#USER_TOOL_LIST_PATH=${USER_TOOL_DIR}/${TOOl_LIST_FILE_NAME} # OLD, REMOVE ASAP
# Setup User Task Managment Folder
cat <<-SU | su - $(GET_DISPLAY_0_USER) -s /bin/bash
mkdir -p ~/"${USER_CONFIG_DIR_RELATIVE}"
# rm -f ~/"${USER_CONFIG_DIR_RELATIVE}/${USER_CONFIG_FILE_NAME}"
touch ~/"${USER_CONFIG_DIR_RELATIVE}/${USER_CONFIG_FILE_NAME}"
SU
SETUP_CONFIG_IF_EMPTY "${USER_CONFIG_FILE_NAME_DEFAULT}" "$(GET_DISPLAY_0_CONFIG_FQFN)"
# GLOBAL vars; source user config file
SOURCE_CONFIG_GLOBAL_VARS <(GET_CONFIG_SECTION "$(GET_DISPLAY_0_CONFIG_FQFN)" ${UCST_GLOBAL_VAR_DEFAULTS})
# GLOBAL vars; mac address base, vrdeport base, DIALOG_TIMEOUT
MAC=${MAC:-080027ABCD}
MAC=${MAC//:/}
MAC=${MAC:0:10}
VRDEPORT=${VRDEPORT:-33890}
DIALOG_TIMEOUT=${DIALOG_TIMEOUT:-25}
BRIDGED_ETH=${BRIDGED_ETH:-eth0}
# GLOBAL vars; VirtualBox
SCTL='ide'
# GLOBAL vars; TMP
TMP="/tmp/$$_${BASH_SRCNAME}_$$"
mkdir "${TMP}"
SHM="/dev/shm/$$_${BASH_SRCNAME}_$$"
mkdir "${SHM}"
chmod +t "${SHM}"
# GLOBAL vars; DIFS = default array delimiter
DIFS=${IFS}
# GLOBAL vars; DEBUG
DEBUG=${DEBUG:-false}
# Disable wmctrl; this controls cosistent window placement, doesn't work through X11 or VNC
ENABLE_WMCTRL=${ENABLE_WMCTRL:-true}
# Source git repo sudirectory
#http='https://raw.github.com/michaelsalisbury/builder/master/udev'
# Project NAME
#NAME='x11vnc'
# Get details of the latest version
#latest=`wget -O - -o /dev/null "${http}/LATEST.TXT"`
# GLOGAL vars; POPUP_LOG, POPUP_LOG_AUTOCLOSE_DELAY
POPUP_LOG=${POPUP_LOG:-false}
POPUP_LOG_AUTOCLOSE_DELAY=${POPUP_LOG_AUTOCLOSE_DELAY:-60}
# Display Help
[[ "$1" =~ ^(-h|--help)$ ]] && HELP
#for DEV in `GET_DEVICE_LIST`; do
# echo $DEV :: `GET_DEVICE_HWI_MODEL ${DEV}`\
# :: `GET_DEVICE_HWI_SERIAL ${DEV}`\
# :: `FORMAT_TO_GB $(GET_DEVICE_HWI_SIZE ${DEV})`
#done
# main
main "$@" >> "${LOG}" 2>&1
EXIT
# simulation control
#
# trigger add and remove
# udevadm trigger --action=remove --sysname-match="sdb"
# udevadm trigger --action=add --sysname-match="sdb"
#
# force re-read of linked udev rules
# stop udev; sleep 1; start udev
| true
|
228ad1bdb1b0f959c463d48d00218ec35d5fdb9b
|
Shell
|
Grompokstar/saveApiData
|
/bin/full_deploy.sh
|
UTF-8
| 636
| 3.4375
| 3
|
[] |
no_license
|
#!/bin/bash
# Deploy revision using in webpack.prod.js
export DEPLOY_REVISION=`date +"%s"`
# vars
ARCHIVE_NAME=$DEPLOY_REVISION.zip
REMOTE_RELEASES_PATH=$DEPLOY_PATH
set -o errexit # Exit on error
# Build project
yarn build
# Zip project
zip $ARCHIVE_NAME -r *
# Remote commands
# Make dir
ssh $DEPLOY_ADDRESS -p $DEPLOY_PORT "mkdir -p $REMOTE_RELEASES_PATH"
# Copy zip
scp -P $DEPLOY_PORT $ARCHIVE_NAME $DEPLOY_ADDRESS:$REMOTE_RELEASES_PATH
# Unzip and remove archive
ssh $DEPLOY_ADDRESS -p $DEPLOY_PORT "
cd $REMOTE_RELEASES_PATH &&
unzip $ARCHIVE_NAME &&
rm $ARCHIVE_NAME
"
# Remove local archive
rm $ARCHIVE_NAME
| true
|
7316b1161daddd9e03530ea46c05b0622b6d39a0
|
Shell
|
gauglertodd/docker_scripts
|
/build_coin.sh
|
UTF-8
| 701
| 2.734375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
VERSION="2.9"
RELEASE="1"
sudo apt-get update
sudo apt-get install -y subversion g++ make zlib1g-dev pkg-config \
checkinstall
svn co --non-interactive --trust-server-cert-failures=unknown-ca https://projects.coin-or.org/svn/Cbc/stable/${VERSION} coin_Cbc
cd coin_Cbc
./configure --enable-cbc-parallel=yes --prefix=/usr
make
# printf "\ninstall:\n\tmv fit2tcx /usr/bin/\n" >> Makefile
printf "COIN-OR Linear Program Solver\n" > description-pak
### this part is sadly interactive
sudo checkinstall --pkgversion ${VERSION} --pkgrelease ${RELEASE} -y
sudo chown ${USER}:${USER} coin-cbc_${VERSION}-${RELEASE}*.deb
mv coin-cbc_${VERSION}-${RELEASE}*.deb ~/py2deb/
| true
|
901d24ddf86d62bc0dc93dbd0be02f79af02e760
|
Shell
|
rcopstein/Share
|
/createManyFiles.sh
|
UTF-8
| 139
| 2.765625
| 3
|
[] |
no_license
|
#!/bin/bash
for i in {0001..125}
do
mkdir ${i}
for j in ` seq 1 3 150 `
do
echo ${j}
touch ${i}/${j}.txt
done
done
| true
|
3045ea768eeb770e8cdc6aea162adf8bdc93b0f0
|
Shell
|
demorym/COSMO_extended_sandbox
|
/2_lm_c/set
|
UTF-8
| 1,193
| 3.015625
| 3
|
[] |
no_license
|
#!/bin/bash
NX=$1
NY=$2
NIO=$3
compact_io=$4
echo "grid $NX*$NY with $NIO IO processes"
sed -i "s|nprocx=[0-9]*,|nprocx=${NX},|" INPUT_ORG
sed -i "s|nprocy=[0-9]*,|nprocy=${NY},|" INPUT_ORG
sed -i "s|nprocio=[0-9]*,|nprocio=${NIO},|" INPUT_ORG
if [ "$compact_io" == "yes" ]; then
NODES_IO=1
else
NODES_IO=$[ $NIO ]
fi
NODES_COMP=$[ $NX*$NY ]
NODES=$[ $NODES_IO + $NODES_COMP ]
CORES=$[ 8*$NODES ]
#sed -i "s|mppwidth=[0-9]*|mppwidth=$CORES|" job
sed -i "s|mppwidth=[0-9]*|mppwidth=$NODES|" job
sed -i "s|SBATCH --nodes.*|SBATCH --nodes=$NODES|" job
sed -i "s|COSMO_NPROC_NODEVICE.*|COSMO_NPROC_NODEVICE $NIO|" job
if [ "$compact_io" == "yes" ]; then
sed -i "s|aprun.*|aprun -N 1 -n $NODES_COMP ./lm_f90 : -N $NIO -n $NIO ./lm_f90|" job
grep "setenv CRAY_CUDA_PROXY" job &> /dev/null
if [ "$?" != "0" ]; then
echo "error: add the following to your job script:"
echo "setenv CRAY_CUDA_PROXY 1"
exit;
fi
sed -i "s|CRAY_CUDA_PROXY.*|CRAY_CUDA_PROXY 1|" job
else
sed -i "s|aprun.*|aprun -N 1 -n $NODES ./lm_f90|" job
sed -i "s|CRAY_CUDA_PROXY.*|CRAY_CUDA_PROXY 0|" job
fi
echo "using $NODES nodes with $CORES cores : $NX*$NY+$NIO"
| true
|
e192b9a185ca0f845e3943b65b4ed3889cdc2ad8
|
Shell
|
craja26/PostgreSQL
|
/pgbackrest.sh
|
UTF-8
| 1,799
| 3.1875
| 3
|
[] |
no_license
|
## Installation:
# connect as a root
yum install pgbackrest --nogpgcheck
mkdir -p /var/log/pgbackrest
chown postgres:postgres /var/log/pgbackrest
mkdir /etc/pgbackrest
bash -c 'cat << EOF > /etc/pgbackrest/pgbackrest.conf
[global]
repo1-path=/var/lib/pgbackrest
repo1-retention-full=2
[pg0app]
pg1-path=/var/lib/pgsql/11/data
pg1-port=5432
EOF'
chown postgres:postgres /etc/pgbackrest/pgbackrest.conf
# create backup directory and grant permissions for backup directory
mkdir /var/lib/pgbackrest
chmod 750 /var/lib/pgbackrest
chown -R postgres:postgres /var/lib/pgbackrest/
# Update postgres conf file for archiving.
archive_mode = on
archive_command = 'pgbackrest --stanza=pg0app archive-push %p'
# Restart postgres
systemctl restart postgresql-11
# create the Stanza
su - postgres
pgbackrest stanza-create --stanza=pg0app --log-level-console=info
# check the configurations
su - postgres
pgbackrest --stanza=pg0app --log-level-console=info check
# Perform a Backup
# By default pgBackRest will attemp to perform an incremental backup. However, an incremental backup must be based on a full backup and
# since no full backup existed pgBackRest ran a full backup instead.
pgbackrest backup --type=full --stanza=pg0app --log-level-console=info
# information about the backup
pgbackrest --stanza=pg0app --set=20210801-033002F_20210801-123001I info
# restore database required data folder to be empty.
# here is the command to restore database without delete files in data folder
pgbackrest --stanza=pg0app --delta --log-level-console=detail restore --type=immediate --target-action=promote --set=20210801-033002F_20210801-123001I
# restore command.
pgbackrest --stanza=pg0app --delta --log-level-console=detail restore --type=immediate --target-action=promote --set=20210801-033002F
| true
|
d744ffcb1d1a878b39f9585c5861be7939b70abd
|
Shell
|
NormanDunbar/C68Port
|
/Tests/01_LocalInteger/buildAll.sh
|
UTF-8
| 189
| 3.15625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#
# A script to compile everything named *.c in the current directory.
#
for f in `ls *.c`
do
echo Compiling "${f}".c to "${f}".exe ...
./buildTests.sh "${f%.c}"
done
| true
|
461bfe0bb10dee48366d0c79a75b7fc69624ed84
|
Shell
|
flavio-fernandes/odl-openstack-ci-1
|
/odl-openstack-dg.sh
|
UTF-8
| 942
| 3.234375
| 3
|
[] |
no_license
|
#!/bin/bash
# NOTE: This file takes two jobs from the OpenStack infra and
# puts them here. See here:
#
# https://github.com/openstack-infra/project-config/blob/master/jenkins/jobs/networking-odl.yaml
# Check for Fedora vs. Ubuntu
if [ -f "/etc/debian_version" ]; then
IS_UBUNTU=1
else
IS_FEDORA=1
fi
if [ "$IS_FEDORA" == "1" ]; then
# *SIGH*. This is required to get lsb_release
sudo yum -y install redhat-lsb-core indent
fi
# Add the Jenkins user
JENKGRP=$(sudo grep jenkins /etc/group)
JENKUSR=$(sudo grep jenkins /etc/passwd)
if [ "$JENKGRP" == "" ]; then
sudo groupadd jenkins
fi
if [ "$JENKUSR" == "" ]; then
if [ "$IS_FEDORA" == "1" ]; then
sudo adduser -g jenkins jenkins
else
JGID=$(cat /etc/group|grep jenkins| cut -d ":" -f 3)
sudo adduser --quiet --gid $JGID jenkins
fi
fi
# Run the script as the jenkins user
sudo -u jenkins -H sh -c "./odl-openstack-dg-run.sh"
| true
|
59eb9d886e855b8649716e957069d8ff2ada984c
|
Shell
|
EricArray/colaboreitor
|
/bash/get_cp.sh
|
UTF-8
| 351
| 3.078125
| 3
|
[] |
no_license
|
if [ $# -ne 2 ] && [ $# -ne 3 ]; then
echo "usage: user pass [cp]"
exit 1
fi
user="$1"
pass="$2"
cp="$3"
server="http://localhost:8080"
auth="X-Authorization: $user:$pass"
content_type="Content-Type: application/json"
if [ -z "$cp" ]
then
curl -X GET -H "$auth" "$server/cp"
else
curl -X GET -H "$auth" "$server/cp/$cp"
fi
echo
| true
|
6592930e6fbcd260c5ae04f6e6cdf4957aa8c068
|
Shell
|
BSShubham97/programs_btcmp
|
/ProgramArray/similarnumber.sh
|
UTF-8
| 396
| 3.578125
| 4
|
[] |
no_license
|
#! /bin/bash -x
# Find SIMILAR NUMBER WITHIN AN ARRAY LIKE 11 22 33 44 ....
declare -a numbers1
declare -a numbers2
declare -a diff
for((i=0;i<=100;i++))
do
numbers1="$i"
echo "${numbers1[*]}"
done
numbers1=(${numbers1[*]})
numbers2=( 11 22 33 44 55 66 77 88 99 )
echo ${numbers2[*]}
diff=(`echo ${numbers1[*]} ${numbers2[*]} | tr ' ' '\n' | sort | uniq -u `)
echo "Similar :${diff[*]}"
| true
|
bdcf4f20b1c575dad5442218e5eef6b7ef52de4e
|
Shell
|
Vulturem/introduction_to_linux_gb
|
/lesson4/task2/ascript.sh
|
UTF-8
| 130
| 2.921875
| 3
|
[] |
no_license
|
#bin/bash
for (( x = 1; x <= $((1+ RANDOM % 10)); x++))
do
mkdir 00$x
cd 00$x
touch 00$x.txt
echo file00$x >> 00$x.txt
cd ..
done
| true
|
8c71b4745193e7151d4b0611d98b33381ea63e69
|
Shell
|
seanwooj/dotfiles
|
/makesymlinks.sh
|
UTF-8
| 1,029
| 4.0625
| 4
|
[] |
no_license
|
#!/bin/bash
############################
# .make.sh
# This script creates symlinks from the home directory to any desired dotfile in ~/dotfiles
# Inspired by: http://blog.smalleycreative.com/tutorials/using-git-and-github-to-manage-your-dotfiles/
############################
###### Variables
dir=~/dotfiles # dotfiles directory
olddir=~/dotfiles_old # old dotfiles directory
files="bashrc vim vimrc irbrc" # list of files an folders to symlink into the home directory
############################
# Create dotfiles_old in homedir
echo "Creating $olddir for backup of existing dotfiles in ~"
mkdir -p $olddir
echo ".....done"
# Change to dotfiles directory
echo "Changing to the $dir directory"
cd $dir
echo "......done"
# Move existing dotfiles to olddir and create symlinks
for file in $files; do
echo "Moving existing dotfiles from ~ to $olddir"
mv ~/.$file $olddir
echo "Creating symlink to $file in home directory."
ln -s $dir/$file ~/.$file
done
# Pointing crontab to this crontab
crontab crontab
| true
|
2e710069ddef237ccc34420f55ec916181d5f20e
|
Shell
|
chrisnoto/KB
|
/ansible/ansible-zabbix/roles/base_inst_zbclient_linux/files/pageinout.sh
|
UTF-8
| 1,203
| 3.046875
| 3
|
[] |
no_license
|
#!/bin/bash
SENDER='/usr/bin/zabbix_sender'
HOST="10.67.37.192"
IP=`grep -i '^Hostname=' /etc/zabbix/zabbix_agentd.conf |cut -d'=' -f2`
#active/inactive memory
res=`vmstat -a|awk 'NR==3{print $6,$5}'`
act=`echo $res|awk '{print $1}'`
inact=`echo $res|awk '{print $2}'`
#page in/out
pi=`vmstat -s|grep "paged in"|awk '{print $1}'`
po=`vmstat -s|grep "paged out"|awk '{print $1}'`
#tcp connection
LISTEN=0
CLOSE_WAIT=0
TIME_WAIT=0
ESTABLISHED=0
FIN_WAIT1=0
FIN_WAIT2=0
CONN=`netstat -antl | awk '/^tcp/ {++state[$NF]} END {for(key in state) print key"="state[key]}'`
eval $CONN
$SENDER -s "$IP" -z "$HOST" -k "memory.pi" -o "$pi"
$SENDER -s "$IP" -z "$HOST" -k "memory.po" -o "$po"
$SENDER -s "$IP" -z "$HOST" -k "memory.active" -o "$act"
$SENDER -s "$IP" -z "$HOST" -k "memory.inactive" -o "$inact"
$SENDER -s "$IP" -z "$HOST" -k "tcpconn.listen" -o "$LISTEN"
$SENDER -s "$IP" -z "$HOST" -k "tcpconn.closewait" -o "$CLOSE_WAIT"
$SENDER -s "$IP" -z "$HOST" -k "tcpconn.timewait" -o "$TIME_WAIT"
$SENDER -s "$IP" -z "$HOST" -k "tcpconn.finwait1" -o "$FIN_WAIT1"
$SENDER -s "$IP" -z "$HOST" -k "tcpconn.finwait2" -o "$FIN_WAIT2"
$SENDER -s "$IP" -z "$HOST" -k "tcpconn.established" -o "$ESTABLISHED"
| true
|
47023ca124a2c7be81bb6a59622d03a7d6ef6183
|
Shell
|
LeavaTail/dotfiles
|
/common/scripts/checkfile.sh
|
UTF-8
| 566
| 3.953125
| 4
|
[] |
permissive
|
#!/bin/bash -eu
#
# usage:
# checkfile file dotfile
# ex.:
# checkfile /home/user/.vimrc
#
# description:
# check dotfile is correct deploy.
#
# return:
# 0: succeed
# 1: command wrong usage
# Invalid usage
if [ "$#" != 1 ]; then
echo "ERROR: command usage was wrong." 1>&2
exit 1
fi
CHAR=' '
# symlink check
if [[ -L $1 ]]; then
CURRENT=$(realpath "$(dirname ${BASH_SOURCE})")
DOTDIR=$(dirname "$(dirname ${CURRENT})")
SOURCE=$(readlink $1)
case ${SOURCE} in
${DOTDIR}*)
CHAR='*'
;;
*)
CHAR='?'
;;
esac
fi
echo "[${CHAR}] $1"
| true
|
3165586cb6dddfdef276dcf844b94d1c0569e008
|
Shell
|
Thibault2ss/heroku-doppler-buildpack
|
/bin/compile
|
UTF-8
| 2,738
| 3.84375
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
# bin/compile <build-dir> <cache-dir> <env-dir>
# set -o errexit # always exit on error
# set -o pipefail # don't ignore exit codes when piping output
# set -o nounset # fail on unset variables
# Parse args
BUILD_DIR=$1
CACHE_DIR=$2
ENV_DIR=$3
# identify OS
os="unknown"
uname_os=$(uname -s)
if [ "$uname_os" = "Darwin" ]; then
os="macos"
elif [ "$uname_os" = "Linux" ]; then
os="linux"
elif [ "$uname_os" = "FreeBSD" ]; then
os="freebsd"
elif [ "$uname_os" = "OpenBSD" ]; then
os="openbsd"
elif [ "$uname_os" = "NetBSD" ]; then
os="netbsd"
else
echo "ERROR: Unsupported OS '$uname_os'"
echo ""
echo "Please report this issue:"
echo "https://github.com/DopplerHQ/cli/issues/new?template=bug_report.md&title=[BUG]%20Unsupported%20OS"
exit 1
fi
echo "Detected OS '$os'"
# identify arch
arch="unknown"
uname_machine=$(uname -m)
if [ "$uname_machine" = "i386" ] || [ "$uname_machine" = "i686" ]; then
arch="i386"
elif [ "$uname_machine" = "amd64" ] || [ "$uname_machine" = "x86_64" ]; then
arch="amd64"
elif [ "$uname_machine" = "armv6" ] || [ "$uname_machine" = "armv6l" ]; then
arch="armv6"
elif [ "$uname_machine" = "armv7" ] || [ "$uname_machine" = "armv7l" ]; then
arch="armv7"
# armv8?
elif [ "$uname_machine" = "arm64" ]; then
arch="arm64"
else
echo "ERROR: Unsupported architecture '$uname_machine'"
echo ""
echo "Please report this issue:"
echo "https://github.com/DopplerHQ/cli/issues/new?template=bug_report.md&title=[BUG]%20Unsupported%20architecture"
exit 1
fi
echo "Detected architecture '$arch'"
DOWNLOAD_URL="https://cli.doppler.com/download?os=$os&arch=$arch&format=tar"
filename="doppler-download.tar.gz"
curl --silent --retry 3 -o "$filename" -LN -D - "$DOWNLOAD_URL"
mkdir -p "$BUILD_DIR"/doppler
tar -zxf $filename -C $BUILD_DIR/doppler
chmod +x $BUILD_DIR/doppler/doppler
mkdir -p "$BUILD_DIR"/.profile.d
cat <<EOF >"$BUILD_DIR"/.profile.d/load-doppler-env.sh
# #!/bin/sh
PATH=$HOME/doppler:\$PATH
ENV_FILE=$HOME/.env
if [ -z \${DOPPLER_TOKEN+x} ]; then
echo "ERROR: DOPPLER_TOKEN must be set"
exit 1
elif [ -z \${DOPPLER_PROJECT+x} ] || [ -z \${DOPPLER_CONFIG+x} ]; then
doppler secrets download --token \$DOPPLER_TOKEN --no-file --format env > \$ENV_FILE
else
doppler secrets download --token \$DOPPLER_TOKEN -p \$DOPPLER_PROJECT -c \$DOPPLER_CONFIG --no-file --format env > \$ENV_FILE
fi;
# replace \$ with \\\$ (except for \$PORT)
(sed 's/\\\$/\\\\$/g' \$ENV_FILE | sed 's/\\\\\\\$PORT/\\\$PORT/g') > \${ENV_FILE}_formatted
rm \$ENV_FILE
mv \${ENV_FILE}_formatted \$ENV_FILE
# Override doppler variables with variables already set in heroku env
env | sed 's/=\(.*\)/="\1"/' >> \$ENV_FILE
set -a
source \$ENV_FILE
set +a
EOF
| true
|
12257f32858fc5f1b9fbc736bc875b973195e34a
|
Shell
|
Johnnyww/Awesome-TTRSS
|
/entrypoint.sh
|
UTF-8
| 571
| 3.140625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
# remove trailing / if any.
SELF_URL_PATH=${SELF_URL_PATH/%\//}
# extract the root path from SELF_URL_PATH (i.e http://domain.tld/<root_path>).
ROOT_PATH=${SELF_URL_PATH/#http*\:\/\/*\//}
if [ "${ROOT_PATH}" == "${SELF_URL_PATH}" ]; then
# no root path in SELF_URL_PATH.
mkdir -p /var/tmp
ln -sf "/var/www" "/var/tmp/www"
else
mkdir -p /var/tmp/www
ln -sf "/var/www" "/var/tmp/www/${ROOT_PATH}"
fi
php /configure-db.php
exec s6-svscan /etc/s6/
#php /configure-db.php
#exec supervisord -c /etc/supervisor/conf.d/supervisord.conf
| true
|
6d7482c7a8645eb23e451ef4a5f8e3ce8575397a
|
Shell
|
de-jcup/sechub
|
/sechub-server/src/main/resources/run.sh
|
UTF-8
| 948
| 3.234375
| 3
|
[
"MIT",
"ANTLR-PD",
"LicenseRef-scancode-generic-exception",
"BSD-3-Clause",
"LGPL-2.0-or-later",
"LGPL-2.1-only",
"GPL-1.0-or-later",
"LicenseRef-scancode-oracle-openjdk-exception-2.0",
"MPL-1.1",
"MPL-2.0",
"CC-PDDC",
"LicenseRef-scancode-warranty-disclaimer",
"EPL-2.0",
"GPL-2.0-only",
"EPL-1.0",
"CC0-1.0",
"Classpath-exception-2.0",
"Apache-2.0",
"LGPL-2.1-or-later",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-public-domain",
"GPL-2.0-or-later",
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause",
"Apache-1.1",
"MPL-1.0",
"CDDL-1.1",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
#!/bin/bash
# SPDX-License-Identifier: MIT
echo "Starting run script:run.sh $1 $2"
# Set debug options if required
if [ x"${JAVA_ENABLE_DEBUG}" != x ] && [ "${JAVA_ENABLE_DEBUG}" != "false" ]; then
JAVA_DBG_OPTS="-agentlib:jdwp=transport=dt_socket,server=y,suspend=n,address=${JAVA_DEBUG_PORT:-5005}"
fi
if [ -z "$1" ] || [ $1 = "byenv" ]; then
PROFILE_TO_USE=$SPRING_PROFILE
else
PROFILE_TO_USE=$1
fi
if [ -z "$2" ] ; then
JAR_LOCATION="/home/javarun/app.jar"
else
JAR_LOCATION="$2"
fi
#
# Usage: run.sh demomode -> starts demomode variant (for special java options set please change the JAVA_OPT env)
# run.sh -> starts kubernetes setup
#
#
#
# java.security.edg necessary for optimized random space -> otherwise start is slow becauase of entropy scanning etc.
# file encoding per default UTF-8
java $JAVA_DBG_OPTS -Dfile.encoding=UTF-8 -Djava.security.egd=file:/dev/./urandom $SECHUB_OPTS -jar $JAR_LOCATION
| true
|
af6862653f1f93c609bdb65e61c9608ed0790dcb
|
Shell
|
anteskoric/BSPA1
|
/try-host.sh
|
UTF-8
| 1,146
| 4.125
| 4
|
[] |
no_license
|
#!/bin/bash
# <Pings given host machine or ipaddress>
# <Ante Skoric and Timo Quednau>
# <06.10.2019>
# ------------------------------------------------------------
# This function shows the help text for this bash script
usage(){
echo "
$0 [-h|-s sec] hostname|ip-address
Pings the given hostname or ip-address.
Use -h for help, -s is the first parameter it represents the seconds the hostname|ip-address will
be pinged every -s seconds.
You can use hostname or ip-address to ping the server.
"
}
# ---------------------- main --------------------------------
# check parameters
if [ $# -gt 3 ] || [ $1 = "-h" ]; then
usage
exit 1
fi
if [[ $# -eq 1 ]]; then
while [[ true ]]; do
ping -c 1 $1 > /dev/null; VAR=$?
if [[ VAR -eq 0 ]]; then
echo "$1 OK"
else
echo "$1 FAILED"
fi
sleep 10;
done
elif [[ $# -eq 3 ]] && [[ $1 = "-s" ]]; then
while [[ true ]]; do
ping -c 1 $3 > /dev/null;
VAR=$?
if [[ VAR -eq 0 ]]; then
echo "$3 OK"
else
echo "$3 FAILED"
fi
sleep $2;
done
else
usage;
fi
# ---------------------- end ---------------------------------
exit 0
| true
|
77683a3106f17485cedce2b5b0a52051a0320622
|
Shell
|
mastafunk/DX2-CWMR
|
/assets/hijack.killboot
|
UTF-8
| 1,718
| 3.328125
| 3
|
[] |
no_license
|
#!/system/bin/sh
logfile=/data/rec_log.txt
echo >> " Inside kill all script." >> $logfile
export PATH="/sbin:${PATH}"
sleep 5s
# kill all services
for i in $(getprop | grep init.svc | sed -r 's/^\[init\.svc\.(.+)\]:.*$/\1/'); do
# stop this fucking service (or try to anyway)
echo " Attempting to stop service:" >> $logfile
echo ${i} >> $logfile
stop ${i}
sleep 1
done
# unmount /tmp
echo " Attempting to unmount /tmp" >> $logfile
for i in $(seq 1 10); do
TMP=$(mount | grep /tmp)
if [ -z "$TMP" ] ; then
break
fi
umount -l /tmp
sleep 1
done
# unmount all yaffs2 partitions
echo " Attempting to unmount any yaffs2 partitions" >> $logfile
for i in $(seq 1 10); do
TMP=$(mount | grep yaffs2 | awk '{print $3}')
if [ -z "$TMP" ] ; then
echo " Attempting to unmount:" >> $logfile
echo $TMP >> $logfile
break;
fi
for j in $(mount | grep yaffs2 | awk '{print $3}'); do
echo " Attempting to unmount:" >> $logfile
echo $j >> $logfile
umount -l "$j"
done
sleep 1
done
# unmount all ext3 partitions
echo " Attempting to unmount any ext3 partitions" >> $logfile
for i in $(seq 1 10); do
TMP=$(mount | grep ext3 | awk '{print $3}')
if [ -z "$TMP" ] ; then
echo " Attempting to unmount:" >> $logfile
echo $TMP >> $logfile
break;
fi
for j in $(mount | grep ext3 | awk '{print $3}'); do
echo " Attempting to unmount:" >> $logfile
echo $j >> $logfile
umount -l "$j"
done
sleep 1
done
# kill any existing adbd processes
echo " Attempting to kill any adbd running" >> $logfile
kill $(ps | grep adbd)
echo "msc_adb" > /dev/usb_device_mode
# try to remove our sockets!
echo " Attempting to remove all sockets" >> $logfile
rm -f /dev/socket/*
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.