blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
d6c68f9bb2448b7068d59be14df96c0d14c8c140
|
Shell
|
dmariogatto/muget
|
/src/MuGet.Forms.iOS/appcenter-pre-build.sh
|
UTF-8
| 1,558
| 3.828125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
plistPath="${BUILD_REPOSITORY_LOCALPATH}/src/MuGet.Forms.iOS/Info.plist"
appCsPath="${BUILD_REPOSITORY_LOCALPATH}/src/MuGet.Forms.UI/App.xaml.cs"
buildNumber=$APPCENTER_BUILD_ID
buildOffset=$BUILD_ID_OFFSET
buildShortVersion="$VERSION_NAME"
# exit if a command fails
set -e
if [ ! -f $plistPath ] ; then
echo " [!] File doesn't exist at specified Info.plist path: ${plistPath}"
exit 1
fi
if [ ! -f $appCsPath ] ; then
echo " [!] File doesn't exist at specified App.xaml.cs path: ${appCsPath}"
exit 1
fi
sed -i "" "s/APPCENTER_ANDROID/${APPCENTER_ANDROID}/g" "${appCsPath}"
sed -i "" "s/APPCENTER_IOS/${APPCENTER_IOS}/g" "${appCsPath}"
buildVersion=$((buildNumber + buildOffset))
if [ -z "$buildShortVersion" ] ; then
buildShortVersion="$(date -u +'%Y.%-m.%-d')"
fi
echo " (i) Provided Info.plist path: ${plistPath}"
bundleVerCmd="/usr/libexec/PlistBuddy -c \"Print CFBundleVersion\" \"${plistPath}\""
bundleShortVerCmd="/usr/libexec/PlistBuddy -c \"Print CFBundleShortVersionString\" \"${plistPath}\""
# verbose / debug print commands
set -v
# ---- Current Bundle Version:
eval $bundleVerCmd
# ---- Set Bundle Version:
/usr/libexec/PlistBuddy -c "Set :CFBundleVersion ${buildVersion}" "${plistPath}"
# ---- New Bundle Version:
eval $bundleVerCmd
# ---- Current Bundle Short Version String:
eval $bundleShortVerCmd
# ---- Set Bundle Short Version String:
/usr/libexec/PlistBuddy -c "Set :CFBundleShortVersionString ${buildShortVersion}" "${plistPath}"
# ---- New Bundle Short Version String:
eval $bundleShortVerCmd
set +v
| true
|
9749d96655a976af99e2aa916af8b2b0311f6760
|
Shell
|
hardboydu/PuTTY
|
/wcwidth-update/mk-putty-wcwidth
|
UTF-8
| 12,735
| 3.25
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh -
main() {
VER="${1:-10.0.0}"
DIR="${2:-$(pwd)}"
add_text > wcwidth-"$VER".c
}
add_combi() {
sed -e 's/#.*//' -e 's/[\t ]*$//' \
< $DIR/UNIDATA-$VER/extracted/DerivedGeneralCategory.txt |
awk -F'[\t ]*;[\t ]*' '
BEGIN{
for(i=0; i<256; i++) {
hexconv[sprintf("%x",i)] = i;
hexconv[sprintf("%02x",i)] = i;
hexconv[sprintf("%02X",i)] = i;
}
}
$1 != "" && ($2 == "Me" || $2 == "Mn" || $2 == "Cf") {
st=$1;
sub("\\.\\..*","",st);
en=$1;
sub(".*\\.\\.","",en);
if (length(st) == 4 ) st = "0" st;
if (length(st) == 5 ) st = "0" st;
if (length(en) == 4 ) en = "0" en;
if (length(en) == 5 ) en = "0" en;
st = hexconv[substr(st,1,2)]*65536+ hexconv[substr(st,3,2)]*256+ hexconv[substr(st,5,2)];
en = hexconv[substr(en,1,2)]*65536+ hexconv[substr(en,3,2)]*256+ hexconv[substr(en,5,2)];
for(i=st; i<=en; i++)
print i ";" $2
}
' | sort -n |
awk -F\; '
F==1{t[toupper($1)]=$2; firstn = -1; lastn = -2; }
F==2 {
if ($1 == lastn || $1 == lastn+1) {
lastn = $1; next;
} else {
if (firstn > 0) outline();
firstn = lastn = $1;
}
}
END{
if (firstn > 0) outline();
}
function outline() {
st=sprintf("%04x", firstn);
en=sprintf("%04x", lastn);
if (st != en )
print " {0x"st ", 0x"en"},\t/*",t[toupper(st)],"to",t[toupper(en)],"*/";
else
print " {0x"st ", 0x"en"},\t/*",t[toupper(st)],"*/";
firstn = lastn = -1;
}' F=1 $DIR/UNIDATA-$VER/UnicodeData.txt F=2 -
}
add_wide() {
sed -e 's/#.*//' -e 's/[\t ]*$//' \
< $DIR/UNIDATA-$VER/EastAsianWidth.txt |
awk -F'[\t ]*;[\t ]*' '
BEGIN{
for(i=0; i<256; i++) {
hexconv[sprintf("%x",i)] = i;
hexconv[sprintf("%02x",i)] = i;
hexconv[sprintf("%02X",i)] = i;
}
for(i=0; i<65533; i++) {
tbl[i+2*65536] = 1;
tbl[i+3*65536] = 1;
}
# CJK Unified Ideographs Extension A: U+3400..U+4DBF
# CJK Unified Ideographs: U+4E00..U+9FFF
# CJK Compatibility Ideographs: U+F900..U+FAFF
for(i=13312; i<19904; i++) tbl[i] = 1;
for(i=19968; i<40960; i++) tbl[i] = 1;
for(i=63744; i<64256; i++) tbl[i] = 1;
}
$1 != "" {
st=$1;
sub("\\.\\..*","",st);
en=$1;
sub(".*\\.\\.","",en);
v=($2 == "W" || $2 == "F");
if (length(st) == 4 ) st = "0" st;
if (length(st) == 5 ) st = "0" st;
if (length(en) == 4 ) en = "0" en;
if (length(en) == 5 ) en = "0" en;
st = hexconv[substr(st,1,2)]*65536+ hexconv[substr(st,3,2)]*256+ hexconv[substr(st,5,2)];
en = hexconv[substr(en,1,2)]*65536+ hexconv[substr(en,3,2)]*256+ hexconv[substr(en,5,2)];
for(i=st; i<=en; i++)
tbl[i] = v;
}
END{
for(i in tbl)
if(tbl[i]) print i ";W"
}
' | sort -n |
awk -F\; '
F==1{t[toupper($1)]=$2; firstn = -1; lastn = -2; }
F==2 {
if ($1 == lastn || $1 == lastn+1) {
lastn = $1; next;
} else {
if (firstn > 0) outline();
firstn = lastn = $1;
}
}
END{
if (firstn > 0) outline();
}
function outline() {
st=sprintf("%04x", firstn);
en=sprintf("%04x", lastn);
if (st != en )
print " {0x"st ", 0x"en"},\t/*",t[toupper(st)],"to",t[toupper(en)],"*/";
else
print " {0x"st ", 0x"en"},\t/*",t[toupper(st)],"*/";
firstn = lastn = -1;
}' F=1 $DIR/UNIDATA-$VER/UnicodeData.txt F=2 -
}
add_ambig() {
sed -e 's/#.*//' -e 's/[\t ]*$//' \
< $DIR/UNIDATA-$VER/EastAsianWidth.txt |
awk -F'[\t ]*;[\t ]*' '
BEGIN{
for(i=0; i<256; i++) {
hexconv[sprintf("%x",i)] = i;
hexconv[sprintf("%02x",i)] = i;
hexconv[sprintf("%02X",i)] = i;
}
}
$1 != "" && $2 == "A" {
st=$1;
sub("\\.\\..*","",st);
en=$1;
sub(".*\\.\\.","",en);
if (length(st) == 4 ) st = "0" st;
if (length(st) == 5 ) st = "0" st;
if (length(en) == 4 ) en = "0" en;
if (length(en) == 5 ) en = "0" en;
st = hexconv[substr(st,1,2)]*65536+ hexconv[substr(st,3,2)]*256+ hexconv[substr(st,5,2)];
en = hexconv[substr(en,1,2)]*65536+ hexconv[substr(en,3,2)]*256+ hexconv[substr(en,5,2)];
for(i=st; i<=en; i++)
print i ";A"
}
' | sort -n |
awk -F\; '
F==1{t[toupper($1)]=$2; firstn = -1; lastn = -2; }
F==2 {
if ($1 == lastn || $1 == lastn+1) {
lastn = $1; next;
} else {
if (firstn > 0) outline();
firstn = lastn = $1;
}
}
END{
if (firstn > 0) outline();
}
function outline() {
st=sprintf("%04x", firstn);
en=sprintf("%04x", lastn);
if (st != en )
print " {0x"st ", 0x"en"},\t/*",t[toupper(st)],"to",t[toupper(en)],"*/";
else
print " {0x"st ", 0x"en"},\t/*",t[toupper(st)],"*/";
firstn = lastn = -1;
}' F=1 $DIR/UNIDATA-$VER/UnicodeData.txt F=2 -
}
add_text() {
cat <<!
/*
* This is an implementation of wcwidth() and wcswidth() (defined in
* IEEE Std 1002.1-2001) for Unicode.
*
* http://www.opengroup.org/onlinepubs/007904975/functions/wcwidth.html
* http://www.opengroup.org/onlinepubs/007904975/functions/wcswidth.html
*
* In fixed-width output devices, Latin characters all occupy a single
* "cell" position of equal width, whereas ideographic CJK characters
* occupy two such cells. Interoperability between terminal-line
* applications and (teletype-style) character terminals using the
* UTF-8 encoding requires agreement on which character should advance
* the cursor by how many cell positions. No established formal
* standards exist at present on which Unicode character shall occupy
* how many cell positions on character terminals. These routines are
* a first attempt of defining such behavior based on simple rules
* applied to data provided by the Unicode Consortium.
*
* For some graphical characters, the Unicode standard explicitly
* defines a character-cell width via the definition of the East Asian
* FullWidth (F), Wide (W), Half-width (H), and Narrow (Na) classes.
* In all these cases, there is no ambiguity about which width a
* terminal shall use. For characters in the East Asian Ambiguous (A)
* class, the width choice depends purely on a preference of backward
* compatibility with either historic CJK or Western practice.
* Choosing single-width for these characters is easy to justify as
* the appropriate long-term solution, as the CJK practice of
* displaying these characters as double-width comes from historic
* implementation simplicity (8-bit encoded characters were displayed
* single-width and 16-bit ones double-width, even for Greek,
* Cyrillic, etc.) and not any typographic considerations.
*
* Much less clear is the choice of width for the Not East Asian
* (Neutral) class. Existing practice does not dictate a width for any
* of these characters. It would nevertheless make sense
* typographically to allocate two character cells to characters such
* as for instance EM SPACE or VOLUME INTEGRAL, which cannot be
* represented adequately with a single-width glyph. The following
* routines at present merely assign a single-cell width to all
* neutral characters, in the interest of simplicity. This is not
* entirely satisfactory and should be reconsidered before
* establishing a formal standard in this area. At the moment, the
* decision which Not East Asian (Neutral) characters should be
* represented by double-width glyphs cannot yet be answered by
* applying a simple rule from the Unicode database content. Setting
* up a proper standard for the behavior of UTF-8 character terminals
* will require a careful analysis not only of each Unicode character,
* but also of each presentation form, something the author of these
* routines has avoided to do so far.
*
* http://www.unicode.org/unicode/reports/tr11/
*
* Markus Kuhn -- 2007-05-26 (Unicode 5.0)
*
* Permission to use, copy, modify, and distribute this software
* for any purpose and without fee is hereby granted. The author
* disclaims all warranties with regard to this software.
*
* Latest version: http://www.cl.cam.ac.uk/~mgk25/ucs/wcwidth.c
*/
#include <wchar.h>
#include "putty.h" /* for prototypes */
struct interval {
unsigned int first;
unsigned int last;
};
/* auxiliary function for binary search in interval table */
static int bisearch(unsigned int ucs, const struct interval *table, int max) {
int min = 0;
int mid;
if (ucs < table[0].first || ucs > table[max].last)
return 0;
while (max >= min) {
mid = (min + max) / 2;
if (ucs > table[mid].last)
min = mid + 1;
else if (ucs < table[mid].first)
max = mid - 1;
else
return 1;
}
return 0;
}
/* The following two functions define the column width of an ISO 10646
* character as follows:
*
* - The null character (U+0000) has a column width of 0.
*
* - Other C0/C1 control characters and DEL will lead to a return
* value of -1.
*
* - Non-spacing and enclosing combining characters (general
* category code Mn or Me in the Unicode database) have a
* column width of 0.
*
* - SOFT HYPHEN (U+00AD) has a column width of 1.
*
* - Other format characters (general category code Cf in the Unicode
* database) and ZERO WIDTH SPACE (U+200B) have a column width of 0.
*
* - Hangul Jamo medial vowels and final consonants (U+1160-U+11FF)
* have a column width of 0.
*
* - Spacing characters in the East Asian Wide (W) or East Asian
* Full-width (F) category as defined in Unicode Technical
* Report #11 have a column width of 2.
*
* - All remaining characters (including all printable
* ISO 8859-1 and WGL4 characters, Unicode control characters,
* etc.) have a column width of 1.
*
* This implementation assumes that wchar_t characters are encoded
* in ISO 10646.
*/
int mk_wcwidth(unsigned int ucs)
{
/* sorted list of non-overlapping intervals of non-spacing characters */
!
cat <<!
/* All Mn, Me and Cf characters from version $VER of
http://www.unicode.org/Public/UNIDATA/extracted/DerivedGeneralCategory.txt
*/
static const struct interval combining[] = {
!
add_combi
cat <<!
};
/* sorted list of non-overlapping intervals of wide characters */
/* All 'W' and 'F' characters from version $VER of
http://www.unicode.org/Public/UNIDATA/EastAsianWidth.txt
*/
static const struct interval wide[] = {
!
add_wide
cat <<\!
};
/* Fast test for 8-bit control characters and many ISO8859 characters. */
/* NOTE: this overrides the 'Cf' definition of the U+00AD character */
if (ucs < 0x0300) {
if (ucs == 0)
return 0;
if (ucs < 32 || (ucs >= 0x7f && ucs < 0xa0))
return -1;
return 1;
}
/* binary search in table of non-spacing characters */
if (bisearch(ucs, combining,
sizeof(combining) / sizeof(struct interval) - 1))
return 0;
/* The first wide character is U+1100, everything below it is 'normal'. */
if (ucs < 0x1100)
return 1;
/* Hangul Jamo medial vowels and final consonants (U+1160-U+11FF)
* are zero length despite not being Mn, Me or Cf */
if (ucs >= 0x1160 && ucs <= 0x11FF)
return 0;
/* if we arrive here, ucs is not a combining or C0/C1 control character */
return 1 + (bisearch(ucs, wide, sizeof(wide) / sizeof(struct interval) - 1));
}
int mk_wcswidth(const unsigned int *pwcs, size_t n)
{
int w, width = 0;
for (;*pwcs && n-- > 0; pwcs++)
if ((w = mk_wcwidth(*pwcs)) < 0)
return -1;
else
width += w;
return width;
}
/*
* The following functions are the same as mk_wcwidth() and
* mk_wcswidth(), except that spacing characters in the East Asian
* Ambiguous (A) category as defined in Unicode Technical Report #11
* have a column width of 2. This variant might be useful for users of
* CJK legacy encodings who want to migrate to UCS without changing
* the traditional terminal character-width behaviour. It is not
* otherwise recommended for general use.
*/
int mk_wcwidth_cjk(unsigned int ucs)
{
/* sorted list of non-overlapping intervals of East Asian Ambiguous
* characters. */
!
cat <<!
/* All 'A' characters from version $VER of
http://www.unicode.org/Public/UNIDATA/EastAsianWidth.txt
*/
static const struct interval ambiguous[] = {
!
add_ambig
cat <<\!
};
int w = mk_wcwidth(ucs);
if (w != 1 || ucs < 128) return w;
/* binary search in table of ambiguous characters */
if (bisearch(ucs, ambiguous,
sizeof(ambiguous) / sizeof(struct interval) - 1))
return 2;
return 1;
}
int mk_wcswidth_cjk(const unsigned int *pwcs, size_t n)
{
int w, width = 0;
for (;*pwcs && n-- > 0; pwcs++)
if ((w = mk_wcwidth_cjk(*pwcs)) < 0)
return -1;
else
width += w;
return width;
}
!
}
main "$@"
| true
|
b6783b58a7d6a0972e71962114eca1c0027935ed
|
Shell
|
mwvaughn/bwa-0.59
|
/lonestar/wrapper.sh
|
UTF-8
| 4,639
| 2.890625
| 3
|
[] |
no_license
|
tar -xzf bin.tgz
export PATH=$HOME/bin:$PATH:./bin
#query1=/vaughn/sample.fq
#databaseFasta=/shared/iplantcollaborative/genomeservices/builds/0.1/Arabidopsis_thaliana/Col-0_thale_cress/10/de_support/genome.fas
# SPLIT_COUNT / 4 = number of records per BWA job
SPLIT_COUNT=16000000
OUTPUT_SAM=bwa_output.sam
# Vars passed in from outside
REFERENCE=${databaseFasta}
QUERY1=${query1}
QUERY2=${query2}
CPUS=${IPLANT_CORES_REQUESTED}
ARGS="-t 12"
if [ -n "${mismatchTolerance}" ]; then ARGS="${ARGS} -n ${mismatchTolerance}"; fi
if [ -n "${maxGapOpens}" ]; then ARGS="${ARGS} -o ${maxGapOpens}"; fi
if [ -n "${maxGapExtensions}" ]; then ARGS="${ARGS} -e ${maxGapExtensions}"; fi
if [ -n "${noEndIndel}" ]; then ARGS="${ARGS} -i ${noEndIndel}"; fi
if [ -n "${maxOccLongDeletion}" ]; then ARGS="${ARGS} -d ${maxOccLongDeletion}"; fi
if [ -n "${seedLength}" ]; then ARGS="${ARGS} -l ${seedLength}"; fi
if [ -n "${maxDifferenceSeed}" ]; then ARGS="${ARGS} -k ${maxDifferenceSeed}"; fi
if [ -n "${maxEntriesQueue}" ]; then ARGS="${ARGS} -m ${maxEntriesQueue}"; fi
#if [ -n "${numThreads}" ]; then ARGS="${ARGS} -t ${numThreads}"; fi
if [ -n "${mismatchPenalty}" ]; then ARGS="${ARGS} -M ${mismatchPenalty}"; fi
if [ -n "${gapOpenPenalty}" ]; then ARGS="${ARGS} -O ${gapOpenPenalty}"; fi
if [ -n "${gapExtensionPenalty}" ]; then ARGS="${ARGS} -E ${gapExtensionPenalty}"; fi
if [ -n "${stopSearching}" ]; then ARGS="${ARGS} -R ${stopSearching}"; fi
if [ -n "${qualityForTrimming}" ]; then ARGS="${ARGS} -q ${qualityForTrimming}"; fi
if [ -n "${barCodeLength}" ]; then ARGS="${ARGS} -B ${barCodeLength}"; fi
if [ "${logScaleGapPenalty}" -eq "1" ]; then ARGS="${ARGS} -L"; fi
if [ "${nonIterativeMode}" -eq "1" ]; then ARGS="${ARGS} -N"; fi
# Determine pair-end or not
IS_PAIRED=0
if [[ -n "$QUERY1" && -n "$QUERY2" ]]; then let IS_PAIRED=1; echo "Paired-end"; fi
# Assume script is already running in a scratch directory
# Create subdirectories for BWA workflow
for I in input1 input2 temp
do
echo "Creating $I"
mkdir -p $I
done
# Copy reference sequence...
REFERENCE_F=$(basename ${REFERENCE})
echo "Copying $REFERENCE_F"
iget_cached -frPVT ${REFERENCE} .
# Quick sanity check before committing to do anything compute intensive
if ! [[ -e $REFERENCE_F ]]; then echo "Error: Genome sequence not found."; exit 1; fi
# Copy sequences
QUERY1_F=$(basename ${QUERY1})
echo "Copying $QUERY1_F"
iget_cached -frPVT ${QUERY1} .
split -l $SPLIT_COUNT --numeric-suffixes $QUERY1_F input1/query.
if [[ "$IS_PAIRED" -eq 1 ]];
then
QUERY2_F=$(basename ${QUERY2})
echo "Copying $QUERY2_F"
iget_cached -frPVT ${QUERY2} .
split -l $SPLIT_COUNT --numeric-suffixes $QUERY2_F input2/query.
fi
# Copying the indexes in addition to the FASTA file
echo "Copying $REFERENCE_F index"
CHECKSUM=0
for J in amb ann bwt fai pac rbwt rpac rsa sa
do
echo "Copying ${REFERENCE}.${J}"
iget_cached -frPVT "${REFERENCE}.${J}" . && let "CHECKSUM += 1" || { echo "${REFERENCE}.${J} was not fetched"; }
done
# If counter < 9, this means one of the index files was not transferred.
# Solution: Re-index the genome sequence
if (( $CHECKSUM < 9 )); then
echo "Indexing $REFERENCE_F"
bin/bwa index -a bwtsw $REFERENCE_F
fi
# Align using the parametric launcher
# Create paramlist for initial alignment + SAI->SAM conversion
# Emit one cli if single-end, another if pair end
rm -rf paramlist.aln
for C in input1/*
do
ROOT=$(basename $C);
if [ "$IS_PAIRED" -eq 1 ]; then
echo "bin/bwa aln ${ARGS} $REFERENCE_F input1/$ROOT > temp/$ROOT.1.sai ; bin/bwa aln ${ARGS} $REFERENCE_F input2/$ROOT > temp/$ROOT.2.sai ; bin/bwa sampe $REFERENCE_F temp/$ROOT.1.sai temp/$ROOT.2.sai input1/$ROOT input2/$ROOT > temp/${ROOT}.sam" >> paramlist.aln
else
echo "bin/bwa aln ${ARGS} $REFERENCE_F input1/$ROOT > temp/$ROOT.1.sai ; bin/bwa samse $REFERENCE_F temp/$ROOT.1.sai input1/$ROOT > temp/${ROOT}.sam" >> paramlist.aln
fi
done
echo "Launcher...."
date
EXECUTABLE=$TACC_LAUNCHER_DIR/init_launcher
$TACC_LAUNCHER_DIR/paramrun $EXECUTABLE paramlist.aln
date
echo "..Done"
echo "Post-processing...."
# Extract header from one SAM file
head -n 120000 temp/query.01.sam | egrep "^@" > $OUTPUT_SAM
# Extract non-header files from all SAM files
for D in temp/*sam
do
egrep -v -h "^@" $D >> $OUTPUT_SAM
done
# Clean up temp data. If not, service will copy it all back
for M in $QUERY1_F $QUERY2_F $REFERENCE_F $REFERENCE_F.* input1 input2 temp .launcher
do
echo "Cleaning $M"
rm -rf $M
done
# Remove bin directory
rm -rf bin
# This is needed for large return files until Rion is able to update the service to add -T automatically
shopt -s expand_aliases
alias iput='iput -T'
| true
|
1f4d766c0ad9818c5cf8ea1ad3753501cee06100
|
Shell
|
jamesrafe/soal-shift-sisop-modul-1-C11-2021
|
/soal1/soal1.sh
|
UTF-8
| 1,865
| 3.875
| 4
|
[] |
no_license
|
#!/bin/bash
# File management
input="syslog.log"
error_output="error_message.csv"
user_output="user_statistic.csv"
# 1(a)
error_regex="(?<=ERROR )(.*)"
info_regex="(?<=INFO )(.*)"
# 1(b)
error_list=$(grep -oP "${error_regex}(?=\ )" "$input")
# echo "$error_list"
# echo -n "Jumlah ERROR: "
# echo "$error_list" | wc -l
# 1(c)
username=()
error_count=()
info_count=()
while read p; do
arr=($p)
name=${arr[-1]}
name=${name:1:-2}
index=0
# echo -n "$name "
if [[ ! " ${username[*]} " =~ " $name " ]]
then
username+=("$name")
error_count+=(0)
info_count+=(0)
fi
if [[ $p = *ERROR* ]]
then
for temp in "${username[@]}"
do
if [[ "$temp" == "$name" ]]
then
# echo $temp
# echo $index
break
fi
let index+=1
done
let error_count[index]+=1
# echo -n "${username[$index]}"
# echo "${error_count[$index]}"
else
for temp in "${username[@]}"
do
if [[ "$temp" == "$name" ]]
then
# echo $temp
# echo $index
break
fi
let index+=1
done
let info_count[index]+=1
fi
done < $input
# 1(d)
echo "Error,Count" > $error_output
grep -oP "${error_regex}(?=\ )" "$input" | sort | uniq -c | sort -nr | grep -oP "^ *[0-9]+ \K.*" | while read -r error_log
do
count=$(grep "$error_log" <<< "$error_list" | wc -l)
echo -n "${error_log}," >> $error_output
echo "$count" >> $error_output
done
# 1(e)
echo -n "" > $user_output
let len=${#username[*]}
for ((it=0; it<$len; it+=1))
do
# echo "$it"
echo -n "${username[$it]}," >> $user_output
echo -n "${info_count[$it]}," >> $user_output
echo "${error_count[$it]}" >> $user_output
done
user_output_sorted=$(cat $user_output | sort | uniq )
echo "Username,INFO,ERROR" > $user_output
echo "$user_output_sorted" >> $user_output
# echo "${username[*]}"
# echo "${error_count[*]}"
# echo "${info_count[*]}"
# cat $error_output
# cat $user_output
| true
|
14ef68d84249adc3c588e0ee8e3485d1c773cdb0
|
Shell
|
mpbagot/SteamVR-OpenHMD
|
/docker.sh
|
UTF-8
| 710
| 2.984375
| 3
|
[
"BSL-1.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash
CD=$(dirname $BASH_SOURCE)
cd $CD
CD=$(pwd)
cd $CD/docker
#docker-compose up --build
docker build ./ -t steamvr_openhmd/build
docker run -ti --rm \
-e USER=$USER \
-v $HOME:/home/$USER \
-v $CD:/tmp/dev/ \
-v /etc/passwd:/etc/passwd \
--name build \
steamvr_openhmd/build:latest "$@"
# register this build with steamVR automatically
~/.local/share/Steam/steamapps/common/SteamVR/bin/linux64/vrpathreg adddriver $CD/build/
# run steamVR, giving priority to Steam runtime libraries instead of system libraries.
export STEAM_RUNTIME_PREFER_HOST_LIBRARIES=0
~/.local/share/Steam/ubuntu12_32/steam-runtime/run.sh ~/.local/share/Steam/steamapps/common/SteamVR/bin/vrstartup.sh
| true
|
715955f4777e9f31387e929113be9f8028ae5744
|
Shell
|
CarlosRayon/linux
|
/bash/Strings/test.sh
|
UTF-8
| 171
| 2.8125
| 3
|
[] |
no_license
|
#!/bin/bash
var1="Hello"
var2="World"
result="$var1 $var2"
var3="Bob"
var4="Hussain"
var3+=$var4
#echo $result
#echo $var3
str="Hello World"
sub=${str:6:5}
echo $sub
| true
|
066ab3e34d60f7cb5b233b24ca356c0e2f602b30
|
Shell
|
ctssample/demo_repo
|
/src/hdfs/rio/transformations/rio_transaction_structure_coordinator/rio_transaction_structure_workflow/rio_determine_dest_dir.sh
|
UTF-8
| 2,077
| 3.953125
| 4
|
[] |
no_license
|
#!/bin/bash
########################################################################################################################
### Script : rio_determine_dest_dir.sh
### Description : This script determines the destination dir for rio based on specified raw dir path and nominal time of coordinator
###
### Run Command(s) : rio_determine_dest_dir.sh <Nominal Time> <Raw Dir>
###
### By: Priyank Gupta
### Email : Priyank.Gupta@cognizant.com
###
### Modification History:
### Rev# Date Developer Description
### ---- ---------- ---------------- ---------------------------------------------------------
### 1.0 2015-01-07 Priyank Gupta First Release
##########################################################################################################################
#Declarations
PROG_NAME=`basename $0`
ARG_CNT=$#
NOMINAL_TIME=$1 # <Nominal Time>
RAW_DIR=$2 # <Raw Dir>
TS_PATTERN=[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}Z #Eg - 2015-12-08T05:00Z
STREAM_GPRS=rio_prag_incoming_gprs
STREAM_GSM=rio_prag_incoming_gsm
#Sanity Checks
if [[ ( ${ARG_CNT} -ne 2 ) ]]; then
echo "status=ERROR"
echo "msg=Incorrect Usage. Usage: ${PROG_NAME} <Nominal Time> <Raw Dir>"
exit 0
fi
if [[ ! $NOMINAL_TIME =~ $TS_PATTERN ]]; then
echo "status=ERROR"
echo "msg=Incorrect Timestamp Format. Expected Format::YYYY-MM-DDTHH:MMZ";
exit 0;
fi
nominalYear=${NOMINAL_TIME:0:4}
nominalMonth=${NOMINAL_TIME:5:2}
nominalDay=${NOMINAL_TIME:8:2}
nominalHour=${NOMINAL_TIME:11:2}
nominalMinute=${NOMINAL_TIME:14:2}
destDirGPRS=${RAW_DIR}/${STREAM_GPRS}/${nominalYear}/${nominalMonth}/${nominalDay}/${nominalHour}/${nominalMinute}
destDirGSM=${RAW_DIR}/${STREAM_GSM}/${nominalYear}/${nominalMonth}/${nominalDay}/${nominalHour}/${nominalMinute}
echo "dest_dir_gprs=${destDirGPRS}"
echo "dest_dir_gsm=${destDirGSM}"
| true
|
5496f84954412e55e6afa9465cca7ead80c0c875
|
Shell
|
liwen666/ETL
|
/dw-etl-onebank/bin/deploy.sh
|
UTF-8
| 7,075
| 3.265625
| 3
|
[] |
no_license
|
#!/bin/bash
##############################################
#调用方法:部署脚本
##############################################
timestamp=`date "+%Y%m%d%H%M%S"`
echo ========================================init config.cfg date:${timestamp}====================================
echo ''> config.cfg
if [ "$1" == sit ]; then
echo -----------------------------------------deploy sit environment------------------------------------------
echo host_name=10.11.16.29 >> config.cfg
echo user_name=gpadmin >> config.cfg
echo user_pass=U2FsdGVkX1/HbxIvsSIU6VrbfqbOqr7x14ogARel2+w= >> config.cfg
echo db_port=5432 >> config.cfg
echo db_name=test_zip >> config.cfg
echo log_path=/data/app/app/log >> config.cfg
echo sleep_interval=3 >> config.cfg
echo job_delay_time=60 >> config.cfg
echo environment=sit >> config.cfg
elif [ "$1" == uat ]; then
echo ----------------------------------------deploy uat environment-------------------------------------------
echo host_name=10.11.16.29 >> config.cfg
echo user_name=gpadmin >> config.cfg
echo user_pass=U2FsdGVkX1/HbxIvsSIU6VrbfqbOqr7x14ogARel2+w= >> config.cfg
echo db_port=5432 >> config.cfg
echo db_name=test_zip >> config.cfg
echo log_path=/data/app/app/log >> config.cfg
echo sleep_interval=3 >> config.cfg
echo job_delay_time=60 >> config.cfg
echo environment=uat >> config.cfg
elif [ "$1" == prd ]; then
echo ----------------------------------------deploy prd environment------------------------------------------
echo host_name=10.11.16.29 >> config.cfg
echo user_name=gpadmin >> config.cfg
echo user_pass=U2FsdGVkX1/HbxIvsSIU6VrbfqbOqr7x14ogARel2+w= >> config.cfg
echo db_port=5432 >> config.cfg
echo db_name=test_zip >> config.cfg
echo log_path=/data/app/app/log >> config.cfg
echo sleep_interval=3 >> config.cfg
echo job_delay_time=60 >> config.cfg
echo environment=prd >> config.cfg
else
echo --------------------------------------deploy default environment----------------------------------------
echo host_name=10.11.16.29 >> config.cfg
echo user_name=gpadmin >> config.cfg
echo user_pass=U2FsdGVkX1/HbxIvsSIU6VrbfqbOqr7x14ogARel2+w= >> config.cfg
echo db_port=5432 >> config.cfg
echo db_name=test_zip >> config.cfg
echo log_path=/data/app/app/log >> config.cfg
echo sleep_interval=3 >> config.cfg
echo job_delay_time=60 >> config.cfg
echo environment=default >> config.cfg
fi
echo ========================================init etl_daily_snap_job.sh 脚本=====================================
echo '#!/bin/bash
############################################################################
# 拉起实时数据仓库日终快照任务
#
# 传入参数:etl_date
#
# 描述:
# 1. 检查ods_anytxn_boa_exec_sign表中当日txn日终跑批是否完成,没有完成则轮循5分钟检查一次
# 2. 启动日终快照存储过程
#
# 调用方法:sh etl_daily_snap_job.sh 20190529
############################################################################
#引入配置文件
cur_dir=$(cd "$(dirname "$0")";pwd)
. ${cur_dir}/config.cfg
log_path=${log_path}/
#业务日期
etl_date=$1
sleep_interval=${sleep_interval}
job_delay_time=${job_delay_time}
#数据库连接信息
host_name=${host_name}
user_name=${user_name}
user_pass=`${cur_dir}/decrypt.sh ${user_pass}`
db_name=${db_name}
db_port=${db_port}
#创建日志目录
if [ ! -d ${log_path} ]; then
mkdir -p ${log_path}
fi
#日志函数
shlog() {
local line_no msg
line_no=$1
msg=$2
echo "[etl_daily_snap_job.sh][$line_no]["`date "+%Y%m%d %H:%M:%S"`"] $msg " >> ${log_path}etl_daily_snap_job.log
echo "[etl_daily_snap_job.sh][$line_no]["`date "+%Y%m%d %H:%M:%S"`"] $msg "
}
#检查日志跑批是否完成,未完成则每5分钟轮询一次
txn_batch_status=`psql -d ${db_name} -h ${host_name} -p ${db_port} -U ${user_name} -q -c "SELECT check_txn_batch_status(${etl_date});" | sed -n '3p'`
while [ ${txn_batch_status} = 0 ]
do
shlog $LINENO "TXN日终跑批还未完成"
sleep ${sleep_interval}
txn_batch_status=`psql -d ${db_name} -h ${host_name} -p ${db_port} -U ${user_name} -q -c "SELECT check_txn_batch_status(${etl_date});" | sed -n '3p'`
done
#检查或者主批完成时间, 等待60分钟后启动任务
dw_job_can_start=`psql -d ${db_name} -h ${host_name} -p ${db_port} -U ${user_name} -q -c "SELECT check_dw_job_can_start(${etl_date}, ${job_delay_time});" | sed -n '3p'`
while [ ${dw_job_can_start} = 0 ]
do
shlog $LINENO "等待时间未到"
echo ${dw_job_can_start}
sleep ${sleep_interval}
dw_job_can_start=`psql -d ${db_name} -h ${host_name} -p ${db_port} -U ${user_name} -q -c "SELECT check_dw_job_can_start(${etl_date}, ${job_delay_time});" | sed -n '3p'`
done
#运行日终快照加工任务
res_etl_ods_snap=`psql -d ${db_name} -h ${host_name} -p ${db_port} -U ${user_name} -q -c "SET client_min_messages=WARNING;SELECT etl_ods_create_daily_snap_table(${etl_date});" | sed -n '3p'`
if [[ $res_etl_ods_snap == *ERROR* ]]; then
shlog $LINENO "ODS日终快照加工失败:${res_etl_ods_snap}"
exit -1
fi
#运行日终拉链表加工任务
res_etl_ods_zip=`psql -d ${db_name} -h ${host_name} -p ${db_port} -U ${user_name} -q -c "SET client_min_messages=WARNING;SELECT etl_ods_create_zip_table(${etl_date});" | sed -n '3p'`
if [[ $res_etl_ods_zip == *ERROR* ]]; then
shlog $LINENO "ODS日终拉链表加工失败:${res_etl_ods_zip}"
exit -1
fi
exit 0
' > etl_daily_snap_job.sh
chmod 755 etl_daily_snap_job.sh
echo =======================================初始化解密脚本 decrypt.sh===========================================
echo '#!/bin/bash
##############################################
#解密脚本
#调用方法:sh decrypt.sh str
##############################################
dec_str=$1
echo ${dec_str} | openssl aes-128-cbc -d -k 666 -base64' > decrypt.sh
chmod 755 decrypt.sh
echo ========================================开始初始化ddl与etl脚本=============================================
#引入配置文件
cur_dir=$(cd "$(dirname "$0")";pwd)
. ${cur_dir}/config.cfg
echo --------------------------------------------初始化ddl表结构-----------------------------------------------
end=`psql -d ${db_name} -h ${host_name} -p ${db_port} -U ${user_name} -q -f /data/app/app/etl/deploy/deploy_dll_all.sql | sed -n '3p'`
echo ----------------初始化etl脚本---------------
end=`psql -d ${db_name} -h ${host_name} -p ${db_port} -U ${user_name} -q -f /data/app/app/etl/deploy/deploy_etl_all.sql | sed -n '3p'`
echo -----------------------------------------------部署结束--------------------------------------------------
exit 0
| true
|
338590e15889cf3ba46c70f4f3c246bd4f8ae5ac
|
Shell
|
andy-654321/database_asg4
|
/setup.sh
|
UTF-8
| 364
| 2.96875
| 3
|
[] |
no_license
|
#!/bin/bash
if [ $(whoami) != "root" ]
then
echo "I must be run as root!"
exit
fi
echo "Success! Setting up environment now"
#backup initial html folder
[ -d html ] || cp -r /var/www/html .
rm /var/www/html/*
mysql -u root -pcoursework < database.txt
cp index.html /var/www/html
cp test.php /var/www/html
cp style.css /var/www/html
cp main.js /var/www/html
| true
|
60003679e087ce13b1ce487cb453dd644b2bfecf
|
Shell
|
wplib/composer-docker
|
/files/usr/local/bin/composer
|
UTF-8
| 852
| 3.671875
| 4
|
[] |
no_license
|
#!/bin/bash
check_image()
{
VERSION="wplib/composer:$REQVERS"
EXISTS="`docker images -q $VERSION 2>/dev/null`"
if [ "$EXISTS" == "" ]
then
wget -q -O /dev/null https://index.docker.io/v1/repositories/wplib/composer/tags/$REQVERS
if [ "$?" == "0" ]
then
echo "WPLib: Need to retrieve $VERSION from repository. Please wait..."
/usr/bin/docker pull $VERSION
else
echo "WPLib: ERROR: Version $VERSION doesn't exist in repository."
exit
fi
fi
}
SCRIPT="`basename $0`"
REQVERS="`echo $SCRIPT | sed -r 's/composer-([0-9]*\.[0-9]*)/\1/'`"
EXEC="/usr/local/bin/composer-$REQVERS"
if [ -x "$EXEC" ]
then
check_image
elif [ "$SCRIPT" == "$REQVERS" ]
then
REQVERS="latest"
check_image
fi
exec /usr/bin/docker run --rm -it --name "composer-$REQVERS" --user vagrant:vagrant --volume $PWD:/app $VERSION "$@"
# --network wplibbox
| true
|
0d580aa2b70b1530bdf8edf17b726cf5f3c55234
|
Shell
|
ant-Korn/linux-configs
|
/scripts/i3layout.sh
|
UTF-8
| 398
| 3.15625
| 3
|
[] |
no_license
|
#!/bin/bash
#Shell scipt to prepend i3status with more stuff
i3status --config /etc/i3status.conf | while :
do
read line
LG=$(xkb-switch | cut -c -2)
if [ $LG == "us" ]
then
dat="[{ \"full_text\": \"LANG: $LG\", \"color\":\"#00FF00\" },"
else
dat="[{ \"full_text\": \"LANG: $LG\", \"color\":\"#FF0000\" },"
fi
echo "${line/[/$dat}" || exit 1
done
| true
|
267621704b71bd4eae048837efe52fee0a71f216
|
Shell
|
bitpixels/android_secure_shell
|
/secure_root/rootpwd.sh
|
UTF-8
| 1,009
| 3.59375
| 4
|
[] |
no_license
|
#!/system/bin/sh
dir="" #define directory
sec="secure" #secure folder
pwd="passwd" #password secure filename
pwdstore=$dir/$sec/$pwd #full secure path
function passwdexec {
if['$1' == '-change'];
then
secpwd
else
cat $pwdstore
fi
}
function secpwd {
echo -e "Enter current password for $USER'
stty -echo
read curpwd
stty echo
curpwd=$(echo $curpwd | md5sum | awk '{print $1}')
hashpwd=$(passwdexec)
echo hash $hashpwd checkhash: $curpwd
if['$curpwd' == '$hashpwd'];
then
setpwd
else
secpwd
fi
}
}
function setpwd {
echo -e "Enter new password for $USER'
stty -echo
read newpasswd
styy echo
echo -e "Confirm new password for $USER'
stty -echo
read confpasswd
stty echo
echo password $newpasswd
echo confirm password $confpasswd
if['$newpasswd' == '$confpasswd'];
then
mount -o remount,rw /system
storepwd=$(echo $confpasswd | md5sum | awk '{print $1}')
$(echo $storepwd > $pwdstore)
mount -o remount,ro /system
echo password changed for $USER
echo password hash: $storepwd
fi
}
passwdexec $1
| true
|
045f42bb56943785e7af8d0f2164c5601118d2c0
|
Shell
|
rogafe/autoInstall_pc
|
/install.sh
|
UTF-8
| 2,129
| 4.3125
| 4
|
[] |
no_license
|
#!/bin/bash
# Check the distribution
distro=""
if command -v lsb_release &> /dev/null; then
distro=$(lsb_release -si)
elif [ -e /etc/os-release ]; then
distro=$(awk -F= '/^ID=/{print $2}' /etc/os-release)
elif [ -e /etc/lsb-release ]; then
distro=$(awk -F= '/^DISTRIB_ID=/{print $2}' /etc/lsb-release)
fi
# Function to install packages based on the distribution
install_packages() {
local distro="$1"
case "$distro" in
"Arch Linux" | "ManjaroLinux" | "GarudaLinux" )
sudo pacman -Sy --noconfirm "$@"
;;
"Alpine" )
sudo apk update
sudo apk add --no-cache "$@"
;;
* )
echo "Unsupported distribution: $distro"
exit 1
;;
esac
}
# Affiche un message indiquant le début de la post-installation et attend 2 secondes.
echo "[...] Commencement de la post installation du PC"
sleep 2
# Affiche un message indiquant qu'une utilisation sur Arch Linux ou Alpine Linux est obligatoire et attend 2 secondes.
echo "[...] Utilisation sur Arch Linux ou Alpine Linux obligatoire"
sleep 2
# Installe yay uniquement si la distribution est Arch Linux
if [ "$distro" == "Arch Linux" ]; then
echo "[...] Installation de yay (gestionnaire de paquets AUR)"
git clone https://aur.archlinux.org/yay.git /tmp/yay
cd /tmp/yay
makepkg -si --noconfirm
cd -
rm -rf /tmp/yay
fi
# Installe les paquets Zsh, Git, Curl, Vim et cowsay en utilisant la fonction install_packages().
install_packages "$distro" zsh git curl vim cowsay
# Clone le dépôt Git contenant les fichiers de configuration et les installe.
echo "[...] Téléchargement et installation des dotfiles"
cd /tmp
git clone https://github.com/rogafe/dotfile_pc.git
cd dotfile_pc
mv zshrc_save ~/.zshrc
# Installe Antigen, un gestionnaire de plugins pour Zsh.
echo "[...] Installation d'Antigen"
mkdir -p ~/.antigen
cd ~/.antigen
curl -L git.io/antigen > antigen.zsh
# Nettoie les fichiers temporaires.
cd ~
rm -rf /tmp/dotfile_pc
# Affiche un message indiquant que tout est terminé.
echo "[...] Tout est terminé, merci !"
| true
|
691b3dc60ec63f85930cb4ec98d42d3a6abc6478
|
Shell
|
tonyswoo/naturalbugpatch
|
/generate_csv.sh
|
UTF-8
| 2,605
| 3.34375
| 3
|
[] |
no_license
|
export JAVA_HOME=/usr/lib/jvm/java-7-openjdk-amd64
export JRE_HOME=/usr/lib/jvm/java-7-openjdk-amd64/jre
POOL=/data/ktony11/
ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )"
ORIGDIR=$pwd
RESULT=result.csv
EVOSUITE=$ROOT/test_suite
LOGFILE="csv_gen.log"
rm $RESULT $LOGFILE 2> /dev/null
export PATH=$PATH:$ROOT/defects4j/framework/bin
echo "Project, BugNumber, Treatment, Seed, Patch ID, Time to Generate, Entropy, Num Tests Generated, Num Tests Failed" >> $RESULT
for data in $POOL/GenProg*/; do
for bug in $data/*/; do
if [ $(ls $bug | grep variants | grep -v tar | wc -l) -gt 0 ]; then
numRepairs=$(ls $bug/variants* | grep repair | grep -v repair.sanity | wc -l)
if [ $numRepairs -gt 0 ]; then
PROJECT=$(echo $(basename "$bug") | sed 's/Buggy$//' | sed 's/.*/\u&/' | sed 's/[0-9]//g')
BUGNUM=$(echo $(basename "$bug") | sed 's/[a-zA-Z]//g')
if [[ $(basename "$data") == *"Vanilla"* ]]; then
TYPE=Pure
else
TYPE=Entropy
fi
TESTS=$(ls $EVOSUITE/$PROJECT/evosuite-branch/1/ | grep $PROJECT"-"$BUGNUM | grep -v '.bak')
NUMEVOTESTS=$(grep Generated $ROOT/test_suite/logs/$PROJECT.$BUGNUM"f.branch.1.log" | awk '{print $3}')
if [ $(grep $PROJECT"-"$BUGNUM"f" $ROOT/test_suite/$PROJECT/evosuite-branch/1/fix_test_suite.run.log | wc -l) -gt 0 ]; then
NUMBROKENTESTS=$(grep $PROJECT"-"$BUGNUM"f" $ROOT/test_suite/$PROJECT/evosuite-branch/1/fix_test_suite.run.log | awk '{print $1}')
NUMEVOTESTS=$(($NUMEVOTESTS - $NUMBROKENTESTS))
fi
if [ -z "$NUMEVOTESTS" ]; then
NUMEVOTESTS=0
fi
for seed in $bug/variants*/; do
TESTLOGFILE=$seed/"test.log"
SEEDNUM=$(echo $(basename "$seed") | sed 's/.*Seed//')
for repair in $seed/repair*/; do
if [[ $(basename "$repair") == 'repair.sanity' || ! -d $repair ]]; then
continue
fi
echo "$repair"
PATCHNUM=$(echo $(basename "$repair") | sed 's/repair//')
ENTROPY=$(grep "variant$PATCHNUM " $TESTLOGFILE | awk '{print $3}')
TIMETOGENERATE=$(grep "variant$PATCHNUM" $bug/log*$SEEDNUM.txt | head -n1 | awk '{print $1}')
FAILING=$(grep "$PROJECT $BUGNUM SEED$SEEDNUM $(basename "$repair") " $ROOT/evosuite_$(basename "$data").log)
if [[ -z "$FAILING" || -z "$(echo $FAILING | awk '{print $7}')" ]]; then
FAILING=0
else
FAILING=$(echo $FAILING | awk '{print $7}')
fi
echo "$PROJECT, $BUGNUM, $TYPE, $SEEDNUM, $PATCHNUM, $TIMETOGENERATE, $ENTROPY, $NUMEVOTESTS, $FAILING" >> $RESULT
done
done
fi
fi
done
done
| true
|
d73469bcdc20721de8da9f922181fbfa70e145a1
|
Shell
|
FahsinaFaisal/University
|
/mult.sh
|
UTF-8
| 145
| 3.1875
| 3
|
[] |
no_license
|
echo "Enter a Number"
read n
echo "Multiplication table of $n :"
for((i=1;i<=10;i++))
do
mult=`expr $i \* $n`
echo "$i * $n = $mult"
mult=0
done
| true
|
3f1165080a2b059f0ae7e5083dee4bf67e01a630
|
Shell
|
t-mario-y/dotfiles
|
/scripts/install_initial_chromeos.sh
|
UTF-8
| 1,848
| 2.6875
| 3
|
[] |
no_license
|
#!/bin/bash
set -eu
sudo apt-get update
apt-get upgrade -y
sudo apt-get install -y --no-install-recommends \
zsh \
# for VSCode settings sync
gnome-keyring \
seahorse
# VSCode
sudo apt-get install -y wget gpg
wget -qO- https://packages.microsoft.com/keys/microsoft.asc | gpg --dearmor > packages.microsoft.gpg
sudo install -D -o root -g root -m 644 packages.microsoft.gpg /etc/apt/keyrings/packages.microsoft.gpg
sudo sh -c 'echo "deb [arch=amd64,arm64,armhf signed-by=/etc/apt/keyrings/packages.microsoft.gpg] https://packages.microsoft.com/repos/code stable main" > /etc/apt/sources.list.d/vscode.list'
rm -f packages.microsoft.gpg
sudo apt-get install apt-transport-https
sudo apt-get update
sudo apt-get install -y code
/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/JetBrains/JetBrainsMono/master/install_manual.sh)"
sudo chsh -s /usr/bin/zsh "$(whoami)"
# TODO: alt + up/down is jacked to pageup/pagedown
# TODO: alt + shift + I is jacked to "send feedback to google"
sudo apt-get install dconf-tools
gsettings set org.gnome.desktop.wm.keybindings switch-to-workspace-up "['disabled']"
gsettings set org.gnome.desktop.wm.keybindings switch-to-workspace-down "['disabled']"
gsettings set org.gnome.desktop.wm.keybindings move-to-workspace-up "['disabled']"
gsettings set org.gnome.desktop.wm.keybindings move-to-workspace-down "['disabled']"
# TODO: 日本語入力 not working
sudo apt-get install -y --install-recommends \
fcitx \
fcitx-mozc \
fonts-ipafont \
fonts-ipaexfont
sudo apt install task-japanese locales-all fonts-ipafont -y
sudo localectl set-locale LANG=ja_JP.UTF-8 LANGUAGE="ja_JP:ja"
source /etc/default/locale
sudo touch /etc/
# code /etc/environment.d/fcitx.conf
# GTK_IM_MODULE=fcitx
# QT_IM_MODULE=fcitx
# XMODIFIERS=@im=fcitx
# GDK_BACKEND=x11
# launch fcitx by "fcitx-configtool"
| true
|
de1684fd6eb23a9d63417b61392de2adaa264886
|
Shell
|
multatronic/akurra
|
/scripts/install_dependencies
|
UTF-8
| 395
| 3.140625
| 3
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
#!/bin/bash
DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
# Install dependencies
sudo apt-get build-dep pygame
sudo apt-get install -y mercurial libavformat-dev libswscale-dev
mkdir -p $DIR/../tmp && cd $DIR/../tmp
# Grab source
hg clone --cwd $DIR/../tmp https://bitbucket.org/pygame/pygame
# Finally build and install
cd pygame
python3 setup.py build
sudo python3 setup.py install
| true
|
46f4bd94ee95dbe8751b3e6da9bcc7e1593fcbf8
|
Shell
|
pragmadox/DFA
|
/myfirst
|
UTF-8
| 143
| 2.546875
| 3
|
[] |
no_license
|
#!/bin/sh
# My First Program
# Jajuan Burton 31 MAY 2017
#
# This Program prints hello world to standard output
PATH=/bin/echo
echo "Hello World"
| true
|
02419c343539ebb889bd828fc34fe1d36079be06
|
Shell
|
Maethorin/copa_do_mundo
|
/gunicorn.sh
|
UTF-8
| 1,124
| 3.203125
| 3
|
[] |
no_license
|
#!/bin/bash
NAME="copa_do_mundo" # Name of the application
DJANGODIR=/webapps/copa_do_mundo/app/ # Django project directory
USER=copa # the user to run as
GROUP=webapps # the group to run as
NUM_WORKERS=3 # how many worker processes should Gunicorn spawn
DJANGO_SETTINGS_MODULE=copa_do_mundo.settings # which settings file should Django use
DJANGO_WSGI_MODULE=copa_do_mundo.wsgi # WSGI module name
echo "Starting $NAME as `whoami`"
# Activate the virtual environment
cd $DJANGODIR
source /webapps/copa_do_mundo/.virtualenvs/copa/bin/activate
export DJANGO_SETTINGS_MODULE=$DJANGO_SETTINGS_MODULE
export PYTHONPATH=$DJANGODIR:$PYTHONPATH
# Start your Django Unicorn
# Programs meant to be run under supervisor should not daemonize themselves (do not use --daemon)
exec gunicorn ${DJANGO_WSGI_MODULE}:application \
--name $NAME \
--workers $NUM_WORKERS \
--user=$USER --group=$GROUP \
--log-level=debug \
--bind=0.0.0.0:8082
| true
|
edca4209e582424e9de2b8a7ae4a51e4466d692c
|
Shell
|
oondeo/oo-wordpress
|
/migrate-database.sh
|
UTF-8
| 570
| 2.71875
| 3
|
[] |
no_license
|
#!/bin/bash
# wp core install --path=www --url=${APPLICATION_DOMAIN} --title=${APPLICATION_DOMAIN} --admin_user=${WORDPRESS_ADMIN_USER} --admin_password=${WORDPRESS_ADMIN_PASSWORD} --admin_email=${WORDPRESS_ADMIN_EMAIL} --skip-email
# first='0'
# for i in $(echo $WORDPRESS_THEMES | tr "," "\n")
# do
# if [ $first == '0' ]; then
# wp theme install --path=www $i --activate
# first='1'
# else
# wp theme install --path=www $i
# fi
# done
# for i in $(echo $WORDPRESS_PLUGINS | tr "," "\n")
# do
# wp plugin install --path=www $i
# done
| true
|
f43668c9a6976031ba90f56aa7ef0e7967eb8867
|
Shell
|
saketj/cs838-p2
|
/grader/PartBQuestion1.sh
|
UTF-8
| 581
| 2.65625
| 3
|
[] |
no_license
|
#!/bin/bash
# Assumes position of streaming directory
# Rest can be customised by changing variables below
source /home/ubuntu/run.sh
echo "This application expects apache spark, hdfs and hive metastore to be running on the cluster"
sleep 1
datasetStreamDirectory=/user/ubuntu/cs-838/part-b/workload/dataset-stream/
sparkMasterURL=spark://10.254.0.147:7077
jarFile=/home/ubuntu/cs838-p2/Part-B/proj-question-1/target/scala-2.10/part-b-question-1_2.10-1.0.jar
echo Running Spark Code
spark-submit --class "PartBQuestion1" --master $sparkMasterURL $jarFile $datasetStreamDirectory
| true
|
62c2459d2382a0c437df33454d0b0892a849c2ba
|
Shell
|
bschwedler/dotfiles
|
/.bash_prompt
|
UTF-8
| 158
| 2.5625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
if [ -n "$PS1" ]; then
# shellcheck disable=SC1090
source ~/local/etc/liquidprompt
fi
export PROMPT_COMMAND="history -a;$PROMPT_COMMAND"
| true
|
1b2ec8654c465e3d3e5b54b4bf65024a6b46cf21
|
Shell
|
boy1583/graph-databases-testsuite
|
/vldb19.sh
|
UTF-8
| 3,423
| 2.859375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Comments this line out if deps have been installed globally.
# shellcheck disable=SC1091
[[ -f ".venv/bin/activate" ]] && source .venv/bin/activate
# Bash strict mode
# redsymbol.net/articles/unofficial-bash-strict-mode
set -ueo pipefail
# Some tests are possible/make sense only for index capable systems.
# We, thus, have two different invocation methods.
test_all() {
index_rdy
no_index
}
index_rdy() {
# Samples MUST be generated by neo4j !!!
python test.py \
-e JAVA_OPTIONS='-Xms1G -Xmn128M -Xmx120G -XX:+UseG1GC' \
-r "$REPETITIONS" \
-s "$SETTINGS" \
$EXTRA \
-i dbtrento/gremlin-neo4j \
-i dbtrento/gremlin-neo4j-tp3
python test.py \
-e JAVA_OPTIONS='-Xms1G -Xmn128M -Xmx120G -XX:+UseG1GC' \
-r "$REPETITIONS" \
-s "$SETTINGS" \
$EXTRA \
-i dbtrento/gremlin-arangodb
python test.py \
-e JAVA_OPTIONS='-Xms1G -Xmn128M -Xmx120G -XX:+UseG1GC' \
-r "$REPETITIONS" \
-s "$SETTINGS" \
$EXTRA \
-i dbtrento/gremlin-pg
# This system requires a license in runtime/confs/sparksee.cfg
python test.py \
-e JAVA_OPTIONS='-Xms1G -Xmn128M -Xmx120G -XX:+UseG1GC' \
-r "$REPETITIONS" \
-s "$SETTINGS" \
$EXTRA \
-i dbtrento/gremlin-sparksee
# From the "Performance-Tuning" documentation page:
# https://orientdb.com/docs/2.1.x/Performance-Tuning.html
# "... it's usually better assigning small heap and large disk cache buffer"
python test.py \
-e JAVA_OPTIONS='-Xms4g -Xmx20g -XX:+UseG1GC -Dstorage.diskCache.bufferSize=102400' \
-r "$REPETITIONS" \
$EXTRA \
-s "$SETTINGS" \
-i dbtrento/gremlin-orientdb
# Titans require additional options for cassandra and friends.
python test.py \
-e TITAN_JAVA_OPTS='-Xms4G -Xmx120G -XX:+UseG1GC -Dcassandra.jmx.local.port=9999 -Dcom.sun.management.jmxremote.port=9999 -Dcom.sun.management.jmxremote.authenticate=false' \
-r "$REPETITIONS" \
$EXTRA \
-s "$SETTINGS" \
-i dbtrento/gremlin-titan \
-i dbtrento/gremlin-titan-tp3
}
no_index() {
python test.py \
-e JAVA_OPTIONS='-Xms1G -Xmn128M -Xmx120G -XX:MaxDirectMemorySize=60000m -XX:+UseG1GC' \
-r "$REPETITIONS" \
$EXTRA \
-s "$SETTINGS" \
-i dbtrento/gremlin-blazegraph
}
# ------------------------------------------------------------------------------
# Default variables
SETTINGS=settings/full.json
REPETITIONS=2
EXTRA=''
# ------------------------------------------------------------------------------
# --- Micro-benchmark queries
echo "*** Micro-benchmark queries ***"
test_all
make collect
# ------------------------------------------------------------------------------
# --- Macro-benchmark - LDBC (requires special index)
echo "*** Macro-benchmark - LDBC ***"
SETTINGS=settings/ldbc_cindex.json
EXTRA='-c _ldbc' REPETITIONS=1 index_rdy
SETTINGS=settings/ldbc.json
EXTRA='-x _ldbc' index_rdy
make collect
# ------------------------------------------------------------------------------
# --- Indexed micro-benchmark queries
echo "*** Indexed micro-benchmark queries ***"
SETTINGS=settings/cindex.json
EXTRA='-c _indexed' REPETITIONS=1 index_rdy
SETTINGS=settings/indexed.json
EXTRA='-x _indexed' index_rdy
make collect
exp_id=$(find collected/RESULTS/ -type f |\
awk -F'_' '{print $1}' |\
sort -n -r |\
head -n1 |\
xargs basename)
echo "$exp_id" >> notebooks/indexed.csv
| true
|
34b5eca0e245c2fb68dc8a8e77af418c8518b9df
|
Shell
|
adesso-mobile/ams-android-gradle-docker
|
/install.sh
|
UTF-8
| 1,473
| 3.21875
| 3
|
[] |
no_license
|
#!/bin/bash
export ANDROID_SDK_ROOT=/android-sdk-linux
set -e
set -x
apt-get --quiet update --yes
apt-get --quiet install --yes wget tar unzip libx11-6 libx11-dev lib32stdc++6 lib32z1 git --no-install-recommends
apt-get --quiet install --yes libxcursor1 libasound2 libxcomposite1 libnss3 libgl1 libpulse0 libpulse-dev --no-install-recommends
rm -rf /var/lib/apt/lists/*
curl -sSL "${GRADLE_SDK_URL}" -o gradle-${GRADLE_VERSION}-bin.zip
unzip gradle-${GRADLE_VERSION}-bin.zip -d ${SDK_HOME}
rm -rf gradle-${GRADLE_VERSION}-bin.zip
curl -sSL "${ANDROID_SDK_URL}" -o android-sdk-linux.zip
mkdir -p "$ANDROID_SDK_ROOT/cmdline-tools"
unzip android-sdk-linux.zip -d "$ANDROID_SDK_ROOT"/cmdline-tools
mv "$ANDROID_SDK_ROOT"/cmdline-tools/cmdline-tools "$ANDROID_SDK_ROOT"/cmdline-tools/latest
rm -rf android-sdk-linux.zip
find "$ANDROID_HOME" -maxdepth 2
mkdir $ANDROID_HOME/licenses
set +e
yes | $ANDROID_HOME/cmdline-tools/latest/bin/sdkmanager "tools" "platform-tools"
yes | $ANDROID_HOME/cmdline-tools/latest/bin/sdkmanager "emulator"
yes | $ANDROID_HOME/cmdline-tools/latest/bin/sdkmanager "build-tools;${ANDROID_BUILD_TOOLS}"
yes | $ANDROID_HOME/cmdline-tools/latest/bin/sdkmanager "platforms;${ANDROID_TARGET_SDK}"
yes | $ANDROID_HOME/cmdline-tools/latest/bin/sdkmanager --licenses
mkdir -p "$ANDROID_HOME"/platform-tools/api/
cp "$ANDROID_HOME"/platforms/android-*/data/api-versions.xml "$ANDROID_HOME"/platform-tools/api/
echo "Installed SDK"
| true
|
40bc94a9ca250ec455a2b7973d5a29798d851508
|
Shell
|
EasyX-Community/energi-scripts
|
/bin/energi3-status
|
UTF-8
| 652
| 3.5625
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
while true
do
echo ""
if [[ -z "${ENERGI_WALLET_ADDR}" ]] ; then
echo "No wallet address is set! You may edit this script or enter it below."
read -p "Enter wallet address: " ENERGI_WALLET_ADDR
if [ -n "$ENERGI_WALLET_ADDR" ]; then
export ENERGI_WALLET_ADDRESS=${ENERGI_WALLET_ADDR}
fi
else
break
fi
done
echo ""
echo "--------------- ⏲ Obtaining Status ---------------"
echo ""
# Define unlock command
UNLOCK_CMD="miner.stakingStatus()"
# Unlock the account (asks user for password)
energi3 attach --exec "${UNLOCK_CMD}"
echo ""
echo "--------------- 💯 DONE ---------------"
echo ""
exit 0
| true
|
52ab57f5372323e4d0ae652a9db919ba3e6acc2c
|
Shell
|
kazi-nutanix/karbon-toolkit
|
/Pro SDK/testSshConnectivity.sh
|
UTF-8
| 686
| 3.703125
| 4
|
[] |
no_license
|
#!/bin/bash
set -e
source $SERVER_CONFIG/data.cfg
if [ ! -z "$1" ]; then
echo "Connecting $1 cluster"
else
echo "A cluster name is needed"
exit 1
fi
#-------------------------------------------------------------------------------#
# Iterage over each nodes and #
#-------------------------------------------------------------------------------#
kubectl get nodes -o wide --context=$1-context | grep node > nodes
echo "Checking ssh connection for nodes"
while read line
do
words=( $line )
IP=$(echo ${words[5]})
ssh -n -i ~/.ssh/$1 -o "StrictHostKeyChecking no" "$WN_USER@$IP" "echo 2>&1" && echo "Can connect to $IP" || echo "Can't connect to $IP"
done < nodes
| true
|
d1e7b1db03ab9513b1fede0f0fa2d71b4a521650
|
Shell
|
noirgif/CORDS
|
/systems/eth/pre-merge/reset_nodes.sh
|
UTF-8
| 774
| 3.328125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
./stop.sh
while pgrep geth > /dev/null ; do
sleep 1
done
rm -f nodes
for i in {1..3} ; do
geth --datadir blkchain-${i} init genesis.json
# create for mining, also print the address for future connection
geth --datadir blkchain-${i} --nodiscover --networkid 1234 --port $((30302+i)) js printenode.js >> nodes
cp keys/miner-${i} blkchain-${i}/keystore
done
# put the enode addresses of other nodes into the config file
for i in {1..3} ; do
./parse_nodes.py < nodes > blkchain-${i}/geth/static-nodes.json
done
# connect the user with one of the nodes
./parse_nodes.py < nodes > blkchain-user/geth/static-nodes.json && \
rm nodes
# restart user node
geth --datadir blkchain-user --nodiscover --networkid 1234 --port 30302
./start.sh
| true
|
38bedef5b47f6420d557b45e9c6680c79db767ed
|
Shell
|
jmjjg/game-puzzle
|
/docker/usr/bin/docker-entrypoint.sh
|
UTF-8
| 216
| 2.625
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env sh
set -o errexit
set -o nounset
set -o pipefail
NODE_MODULES_DIR="${NODE_MODULES_DIR:-/opt/node_modules}"
GRUNT_TASK="${GRUNT_TASK:-default}"
${NODE_MODULES_DIR}/grunt-cli/bin/grunt "${GRUNT_TASK}"
| true
|
b4c1587d5fc8aa892df988cbd5179bf3ddf5d149
|
Shell
|
mimikun/dotfiles
|
/private_dot_local/bin/executable_git-browse
|
UTF-8
| 253
| 2.6875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
remote_url=$(
git remote -v |
head -n 1 |
cut -d ' ' -f 1 |
sed -e 's/origin//g' |
sed -e 's/\t//g' |
sed -e 's/ssh:\/\/git@/http:\/\//g' |
sed -e 's/\:2200/\:8080/g'
)
echo "$remote_url"
| true
|
6a5a5d42c6ed91465d4898d6195db3f40d830519
|
Shell
|
brandonwkerns/lpt-python-public
|
/realtime/download_cfs_rt.MASTER.sh
|
UTF-8
| 1,037
| 3.703125
| 4
|
[] |
no_license
|
#!/bin/bash
####### Download real time version of CMORPH.
ftpdir=https://nomads.ncep.noaa.gov/pub/data/nccf/com/cfs/prod/cfs
## parent directory directory where data will be downloaded.
download_parent_dir=/path/to/keep/your/data
ENS=01 #Ensemble number. 01 is control. 02, 03, and 04 are perturbed.
#######################################################################
## Give input as YYYYMMDD, or it will get today's date using the Linux date command.
if [ -z $1 ]
then
today=`/bin/date -u +%Y%m%d`
else
today=$1
fi
for days_back in {0..6}
do
ymd=`date --date=${today}-${days_back}day +%Y%m%d`
yyyy=`date --date=${today}-${days_back}day +%Y`
mm=`date --date=${today}-${days_back}day +%m`
HHinit=00
filewanted=$ftpdir/cfs.$ymd/$HHinit/time_grib_$ENS/prate.$ENS.$ymd$HHinit.daily.grb2
echo $filewanted
/usr/bin/wget -q -nc -x -nH --cut-dirs=7 -P $download_parent_dir $filewanted
if [ $? -eq 0 ]
then
echo Success!
else
echo Failed! File may not be on the server yet.
fi
done
exit 0
| true
|
5013bc1d27f2ff400712f698b2710922cb805660
|
Shell
|
svandiek/summa-farsi
|
/path.sh
|
UTF-8
| 1,472
| 2.71875
| 3
|
[] |
no_license
|
# This contains the locations of the tools and data required for running
# the GlobalPhone experiments.
export LC_ALL=C # For expected sorting and joining behaviour
KALDI_ROOT=/disk/scratch1/svandiek/kaldi
[ -f $KALDI_ROOT/tools/env.sh ] && . $KALDI_ROOT/tools/env.sh
KALDISRC=$KALDI_ROOT/src
KALDIBIN=$KALDISRC/bin:$KALDISRC/featbin:$KALDISRC/fgmmbin:$KALDISRC/fstbin
KALDIBIN=$KALDIBIN:$KALDISRC/gmmbin:$KALDISRC/latbin:$KALDISRC/nnetbin:$KALDISRC/chainbin:$KALDISRC/ivectorbin
KALDIBIN=$KALDIBIN:$KALDISRC/sgmm2bin:$KALDISRC/lmbin:$KALDISRC/nnet2bin:$KALDISRC/nnet3bin:$KALDISRC/online2bin
FSTBIN=$KALDI_ROOT/tools/openfst/bin
LMBIN=$KALDI_ROOT/tools/irstlm/bin
[ -d $PWD/local ] || { echo "Error: 'local' subdirectory not found."; }
[ -d $PWD/utils ] || { echo "Error: 'utils' subdirectory not found."; }
[ -d $PWD/steps ] || { echo "Error: 'steps' subdirectory not found."; }
export kaldi_local=$PWD/local
export kaldi_utils=$PWD/utils
export kaldi_steps=$PWD/steps
SCRIPTS=$kaldi_local:$kaldi_utils:$kaldi_steps
export PATH=$PATH:$KALDIBIN:$FSTBIN:$LMBIN:$SCRIPTS
# If the correct version of shorten and sox are not on the path,
# the following will be set by local/gp_check_tools.sh
SHORTEN_BIN=/disk/scratch1/svandiek/kaldi/egs/gp/s5/tools/shorten-3.6.1/bin
# e.g. $PWD/tools/shorten-3.6.1/bin
SOX_BIN=/disk/scratch1/svandiek/kaldi/egs/gp/s5/tools/sox-14.3.2/bin
# e.g. $PWD/tools/sox-14.3.2/bin
export PATH=$PATH:$SHORTEN_BIN
export PATH=$PATH:$SOX_BIN
| true
|
d262d124e2c895c8175b00032b8ae9342e4d9919
|
Shell
|
impactaky/dotfiles
|
/ubuntu20.04.sh
|
UTF-8
| 1,030
| 2.84375
| 3
|
[] |
no_license
|
#!/bin/bash
set -eux
CLANG_VERSION=15
IS_SERVER=${IS_SERVER:-no}
sudo add-apt-repository ppa:neovim-ppa/unstable
sudo sh -c "wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key | apt-key add -"
sudo sh -c 'echo "deb http://apt.llvm.org/$(lsb_release -sc)/ llvm-toolchain-$(lsb_release -sc)-15 main" >> /etc/apt/sources.list'
sudo apt update
sudo apt install -y \
git neovim python3 python3-pip \
tmux porg golang zsh gawk curl trash-cli automake ninja-build ccache \
clang-"${CLANG_VERSION}" \
clang-format-"${CLANG_VERSION}" \
clang-tidy-"${CLANG_VERSION}" \
clang-tools-"${CLANG_VERSION}" \
clangd-"${CLANG_VERSION}" \
libclang-"${CLANG_VERSION}"-dev \
lld-"${CLANG_VERSION}"
sudo pip3 install -U pip
sudo pip3 install cmake
sudo pip3 install pynvim
sudo pip3 install pexpect python-language-server
curl -fsSL https://deno.land/x/install/install.sh | sh
if [[ "${IS_SERVER}" != "no" ]]; then
sudo apt install -y \
libudev-dev libusb-1.0-0-dev libboost-dev libboost-regex-dev
fi
| true
|
d3c181f0899c161832398817a36c76986eaabd8a
|
Shell
|
yalzhang/kiss-vm-ns
|
/kiss-vm
|
UTF-8
| 79,303
| 3.125
| 3
|
[] |
no_license
|
#!/bin/bash
# author: yin-jianhong@163.com
# test pass on RHEL-7/CentOS-7, RHEL-8/CentOS-8 and Fedora-29/30/31
LANG=C
PATH=~/bin:$PATH
P=$0
[[ $0 = /* ]] && P=${0##*/}
Distro=
Location=
Imageurl=
VM_OS_VARIANT=
OVERWRITE=no
KSPath=
ksauto=
MacvtapMode=bridge
VMName=
InstallType=import
ImagePath=~/myimages
DownloadImagePath=$ImagePath/download
VMSHOME=~/VMs
vmprefix=$(whoami)
[[ -n "$SUDO_USER" ]] && {
eval ImagePath=~$SUDO_USER/myimages
eval VMSHOME=~$SUDO_USER/VMs
vmprefix=${SUDO_USER}
}
RuntimeTmp=/tmp/vm-$$
INTERACT=yes
Intranet=yes
VIRT_READY=unkown
_MSIZE=1536
DSIZE=27
baudrate=115200
QEMU_OPTS=--qemu-commandline=
QEMU_ENV=()
baseUrl=http://download.devel.redhat.com/qa/rhts/lookaside/kiss-vm-ns
bkrClientImprovedUrl=http://download.devel.redhat.com/qa/rhts/lookaside/bkr-client-improved
VCPUS=4
Cleanup() {
cd ~
#echo "{DEBUG} Removing $RuntimeTmp"
rm -rf $RuntimeTmp
[[ -n "$VMpath" ]] && { rmdir "$VMpath" && rmdir "${VMpath%/*}"; } 2>/dev/null
exit
}
trap Cleanup EXIT #SIGINT SIGQUIT SIGTERM
#-------------------------------------------------------------------------------
vmname_gen() {
local distro=$1
local cname=$2
local name=${Distro//./}
name=${name,,}
if [[ -n "$cname" ]]; then
name=${cname}
else
[[ -n "$vmprefix" ]] && name=${vmprefix}-${name}
fi
echo -n $name
}
_vmdelete() {
local _vmname=$1
[[ -z "$_vmname" ]] && {
return 1
}
egrep -q "^$_vmname$" <(virsh list --name --all) || {
echo -e "{VM:WARN} VM '$_vmname' does not exist"
return 1
}
homedir=$(getent passwd "$vmprefix" | cut -d: -f6)
if [[ -n "${vmprefix}" ]]; then
blk=$(virsh domblklist "$_vmname" | sed 1,2d)
if test -n "$blk"; then
! grep -q "$homedir/VM[sS]/" <<<"$blk" && {
echo -e "{VM:WARN} VM '$_vmname' was not created by current user; try:"
cat <<-EOF
virsh destroy $_vmname
virsh undefine $_vmname --remove-all-storage
EOF
return 1
}
#pxe vms
elif [[ ! -d $homedir/VMs/PXE/$_vmname ]]; then
echo -e "{VM:WARN} VM '$_vmname' was not created by current user; try:"
cat <<-EOF
virsh destroy $_vmname
virsh undefine $_vmname --remove-all-storage
EOF
return 1
fi
else
:
fi
local _vmdir=
local _image=$(virsh dumpxml --domain $_vmname | sed -n "/source file=.*qcow/{s|^.*='||; s|'/>$||; p; q}")
if test -n "$_image"; then
_vmdir=$(dirname $_image)
else
_vmdir=$homedir/VMs/PXE/$_vmname
fi
echo -e "\n{VM:INFO} => dist removing VM $_vmname .."
virsh destroy $_vmname 2>/dev/null
sleep 1
virsh undefine --remove-all-storage $_vmname
if [[ "$_vmdir" = $VMSHOME/?*/$_vmname ]]; then
echo -e "- {VM:INFO} removing VM folder $_vmdir ..."
rm -f $_vmdir/{url,nohup.log,ext4.qcow2,xfs.qcow2,vm.xml,qemu.argv,.kiss-vm}
rm -f $_vmdir/*.qcow2.xz $_vmdir/*.img $_vmdir/*.image
rm -f $_vmdir/nvdimm-*.dev
rmdir $_vmdir 2>/dev/null
rmdir ${_vmdir%/*} 2>/dev/null
fi
return 0
}
vmdialogchecklist() {
local cmdinfo=$1
local resfile=$2
local all=$3
local vmlist=$(virsh list --name ${all:+--all})
[[ -n "$vmprefix" ]] && vmlist=$(grep "^$vmprefix" <<<"$vmlist"; grep -v "^$vmprefix" <<<"$vmlist";)
local vmList=$(echo "$vmlist" | sed -e /^$/d -e 's/.*/"&" "" 1/')
[[ -z "${vmList}" ]] && {
echo -e "{VM:WARN} there is not any VM in your host .." >&2
return 1;
}
which dialog &>/dev/null || yum install -y dialog &>/dev/null
dialog --backtitle "$cmdinfo" --separate-output --checklist "${cmdinfo}: please select vms you want " 30 120 28 $vmList 2>$resfile; rc=$?
printf '\33[H\33[2J'
return $rc
}
vmdialogradiolist() {
local cmdinfo=$1
local resfile=$2
local all=$3
local vmlist=$(virsh list --name ${all:+--all})
[[ -n "$vmprefix" ]] && vmlist=$(grep "^$vmprefix" <<<"$vmlist"; grep -v "^$vmprefix" <<<"$vmlist";)
local vmList=$(echo "$vmlist" | sed -e /^$/d -e 's/.*/"&" "" 1/')
[[ -z "${vmList}" ]] && {
echo -e "{VM:WARN} there is not any VM in your host .." >&2
return 1;
}
which dialog &>/dev/null || yum install -y dialog &>/dev/null
dialog --backtitle "$cmdinfo" --radiolist "${cmdinfo}: please select vm you want " 30 60 28 $vmList 2>$resfile; rc=$?
printf '\33[H\33[2J'
return $rc
}
vmdelete() {
[[ $# = 0 ]] && {
resf=$RuntimeTmp/vmlist
vmdialogchecklist vm-delete $resf all && rmList=$(< $resf)
[[ -z "$rmList" ]] && { return; }
eval set $rmList
}
for vm; do _vmdelete $vm; done
}
vmifaddr() {
[[ "$1" = /x ]] && {
shift
local GETENT=yes
}
local _vmname=$1
[[ -z "$_vmname" ]] && {
resf=$RuntimeTmp/vmlist
vmdialogradiolist vm-ifaddr $resf && _vmname=$(sed 's/"//g' $resf)
[[ -z "$_vmname" ]] && { return; }
}
local addrs=
if [[ "$GETENT" = yes ]]; then
addrs=$(getent hosts "$_vmname"|awk '{print $1}'|tac)
else
addrs=$(virsh domifaddr "$_vmname" | awk '$3=="ipv4" {print gensub("/.*","",1,$4)}')
fi
[[ -n "$addrs" ]] && echo "$addrs"
}
vncget() {
local vncport=$1
local ConvertCmd="gm convert"
! which vncdo &>/dev/null && {
echo "{VM:WARN} command vncdo is needed by 'vncget' function!" >&2
return 1
}
! which gm &>/dev/null && {
if ! which convert &>/dev/null; then
echo "{VM:WARN} command gm or convert are needed by 'vncget' function!" >&2
return 1
else
ConvertCmd=convert
fi
}
! which gocr &>/dev/null && {
echo "{VM:WARN} command gocr is needed by 'vncget' function!" >&2
return 1
}
vncdo -s $vncport capture $RuntimeTmp/_screen.png
$ConvertCmd $RuntimeTmp/_screen.png -threshold 30% $RuntimeTmp/_screen2.png
gocr -i $RuntimeTmp/_screen2.png 2>/dev/null | GREP_COLORS='ms=01;30;47' grep --color .
}
vncput() {
local vncport=$1
shift
which vncdo >/dev/null || {
echo "{VM:WARN} vncdo is needed by 'vncput' function!" >&2
return 1
}
local msgArray=()
for msg; do
if [[ -n "$msg" ]]; then
if [[ "$msg" = key:* ]]; then
msgArray+=("$msg")
else
regex='[~@#$%^&*()_+|}{":?><!]'
_msg="${msg#type:}"
if [[ "$_msg" =~ $regex ]]; then
while IFS= read -r line; do
if [[ "$line" = key:shift-? ]]; then
: #line=key:shift-$(tr ')~!@#$%^&*(+}{|:><?"' '0`123456789=][\\;.,/'"'" <<<"${line: -1}")
else
line="type:$line"
fi
msgArray+=("$line")
done < <(sed -r -e 's;[~!@#$%^&*()_+|}{":?><]+;&\n;g' -e 's;[~!@#$%^&*()_+|}{":?><];\nkey:shift-&;g' <<<"$_msg")
else
msgArray+=("$msg")
fi
fi
msgArray+=("")
else
msgArray+=("$msg")
fi
done
for msg in "${msgArray[@]}"; do
if [[ -n "$msg" ]]; then
if [[ "$msg" = key:* ]]; then
vncdo --force-caps -s $vncport key "${msg#key:}"
else
vncdo --force-caps -s $vncport type "${msg#type:}"
fi
else
sleep 1
fi
done
}
vmvncport() {
local _vmname=$1
[[ -z "$_vmname" ]] && {
resf=$RuntimeTmp/vmlist
vmdialogradiolist vm-vncport $resf all && _vmname=$(sed 's/"//g' $resf)
[[ -z "$_vmname" ]] && { return; }
}
local port=$(virsh dumpxml "$_vmname" | sed -rn "/.* type=.vnc. port=.([0-9]+).*/{s//\1/;p}")
[[ -n "${port}" ]] && {
if [[ -n "${VNCPUTS}" ]]; then
vncput localhost::$port "${VNCPUTS[@]}"
echo "[vncput@$_vmname]> ${VNCPUTS[*]}"
vncget localhost::$port
elif [[ -n "${VNCGET}" ]]; then
echo "[vncget@$_vmname]:"
vncget localhost::$port
else
for _host in $(hostname -A|xargs -n 1|sort -u); do
ping -c 2 $_host &>/dev/null || continue
echo $_host:$port
done
fi
}
}
vmxml() {
local _vmname=$1
[[ -z "$_vmname" ]] && {
resf=$RuntimeTmp/vmlist
vmdialogradiolist vm-dumpxml $resf all && _vmname=$(sed 's/"//g' $resf)
[[ -z "$_vmname" ]] && { return; }
}
virsh dumpxml "$_vmname"
}
vmedit() {
local _vmname=$1
[[ -z "$_vmname" ]] && {
resf=$RuntimeTmp/vmlist
vmdialogradiolist vm-edit $resf all && _vmname=$(sed 's/"//g' $resf)
[[ -z "$_vmname" ]] && { return; }
}
virsh edit "$_vmname"
}
port_available() {
nc $1 $2 </dev/null &>/dev/null
}
vmreboot() {
[[ "$1" = /w ]] && {
shift
local WAIT=yes
}
local _vmname=$1
[[ -z "$_vmname" ]] && {
resf=$RuntimeTmp/vmlist
vmdialogradiolist vm-reboot $resf all && _vmname=$(sed 's/"//g' $resf)
[[ -z "$_vmname" ]] && { return; }
}
virsh destroy "$_vmname" 2>/dev/null
virsh start "$_vmname"
[[ "$WAIT" = yes ]] && {
echo -e "{VM:INFO} waiting restart finish ..."
until port_available ${_vmname} 22; do sleep 1; done
}
}
vmstop() {
local _vmname=$1
[[ -z "$_vmname" ]] && {
resf=$RuntimeTmp/vmlist
vmdialogchecklist vm-shutdown $resf all && vmList=$(< $resf)
[[ -z "$vmList" ]] && { return; }
eval set $vmList
}
for vm; do virsh destroy "$vm"; done
}
vmstart() {
local _vmname=$1
[[ -z "$_vmname" ]] && {
resf=$RuntimeTmp/vmlist
vmdialogchecklist vm-start $resf all && vmList=$(< $resf)
[[ -z "$vmList" ]] && { return; }
eval set $vmList
}
for vm; do virsh start "$vm"; done
}
vmclone() {
[[ "$1" = /a ]] && {
shift
local APPEND=yes
}
is_invalid_vmname() {
local nvmname=$1
egrep --color=always "[][~\!@#$^&()=,\":;{}|<>'\` ]" <<<"$nvmname"
}
local dstname=$1
local srcname=$2
[[ -z "$srcname" ]] && {
resf=$RuntimeTmp/vmlist
vmdialogradiolist vm-clone $resf all && srcname=$(sed 's/"//g' $resf)
[[ -z "$srcname" ]] && {
return;
}
}
if [[ -z "$dstname" ]]; then
dstname=${srcname}-clone-$$
else
#verify invalid charactors
[[ "$APPEND" = yes ]] && dstname=${srcname}-${dstname}
fi
is_invalid_vmname "$dstname" && return 1
#get src vm path
read image < <(virsh dumpxml --domain ${srcname} | sed -n "/source file=/{s|^.*='||; s|'/>$||; p}")
local srcpath=${image%/*}
[[ ! -f ${srcpath}/.kiss-vm || ${srcpath##*/} != ${srcname} ]] && {
echo -e "{VM:WARN} seems $srcname was not created by kiss-vm, can not use vm-clone; please use virt-clone instead ..."
return 1
}
virsh suspend "$srcname"
# do clone
local dstpath=${srcpath%/*}/$dstname
mkdir -p $dstpath
virt-clone --original ${srcname} --name ${dstname} --file $dstpath/vm.qcow2
#fix me: Permission denied, why new image owner is root?
LIBGUESTFS_BACKEND=direct virt-sysprep -d ${dstname} --hostname $dstname --enable user-account,ssh-hostkeys,net-hostname,net-hwaddr,machine-id --remove-user-accounts bar --run 'ls'
virsh resume "$srcname"
}
vmcopyto() {
[[ "$#" -lt 3 ]] && {
echo "Usage: vm cpto vmname <src files/dirs ...> <dst dir in vm>"
return 1
}
local _vmname=$1
shift 1
local dstdir=${@: -1}
local srcarray=("${@:1: $#-1}")
nc $_vmname 22 </dev/null &>/dev/null || {
echo -e "{VM:WARN} port $_vmname:22 is not available"
return 1
}
local sshexec=no
local sshOpts="-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no"
ssh -n -o Batchmode=yes -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@$_vmname &>$RuntimeTmp/sshtest
[[ $? = 0 ]] && sshexec=yes
if [[ $sshexec = yes ]]; then
scp $sshOpts -r "${srcarray[@]}" root@${_vmname}:$dstdir/.
rc=$?
else
if grep -q -w password $RuntimeTmp/sshtest; then
expect <(cat <<-EOF
set timeout 120
spawn scp $sshOpts -r "${srcarray[@]}" root@${_vmname}:$dstdir/.
expect {
"password:" { send "redhat\r"; exp_continue }
"Password for" { send "redhat\r"; exp_continue }
eof
}
foreach {pid spawnid os_error_flag value} [wait] break
exit \$value
EOF
)
rc=$?
else
echo -e "{VM:WARN} ssh $_vmname is not available ..."
rc=1
fi
fi
return $rc
}
vmcopyfrom() {
[[ "$#" -lt 3 ]] && {
echo "Usage: vm cpfrom vmname <files/dirs> <dst dir>"
return 1
}
local _vmname=$1
shift 1
local src=$1
local dstdir=$2
nc $_vmname 22 </dev/null &>/dev/null || {
echo -e "{VM:WARN} port $_vmname:22 is not available"
return 1
}
local sshexec=no
local sshOpts="-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no"
ssh -n -o Batchmode=yes -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@$_vmname &>$RuntimeTmp/sshtest
[[ $? = 0 ]] && sshexec=yes
if [[ $sshexec = yes ]]; then
scp $sshOpts -r root@${_vmname}:$src $dstdir/.
rc=$?
else
if grep -q -w password $RuntimeTmp/sshtest; then
expect <(cat <<-EOF
set timeout 120
spawn scp -r root@${_vmname}:$src $dstdir/.
expect {
"password:" { send "redhat\r"; exp_continue }
"Password for" { send "redhat\r"; exp_continue }
eof
}
foreach {pid spawnid os_error_flag value} [wait] break
exit \$value
EOF
)
rc=$?
else
echo -e "{VM:WARN} ssh $_vmname is not available ..."
rc=1
fi
fi
return $rc
}
vmlogin() {
local console=
[[ "$1" = /c ]] && {
console=yes
shift
}
local _vmname=$1
shift
[[ -z "$_vmname" ]] && {
resf=$RuntimeTmp/vmlist
vmdialogradiolist vm-login $resf && _vmname=$(sed 's/"//g' $resf)
[[ -z "$_vmname" ]] && { return; }
}
[[ "${EXEC}" = yes ]] && {
[[ $# = 0 ]] && set "exit 0";
[[ "$VERBOSE" = yes ]] &&
echo "[root@$_vmname]> $*" | GREP_COLORS='ms=01;36' grep --color . >&2
}
[[ "$console" = yes ]] && {
if [[ "$EXEC" = yes ]]; then
:
else
virsh console "$_vmname"
return $?
fi
}
#sometimes there's bug in libnss. get more than one addr but some one
#of them does not exist. use nc detect and ignore it/them
addrs=$(vmifaddr "$_vmname")
[[ -z "$addrs" ]] && {
echo -e "{VM:WARN} can not get ip info of $_vmname, try login with console ..."
virsh console "$_vmname"
return $?
}
echo "$*" >$RuntimeTmp/sshcmd
for addr in $addrs; do
nc $addr 22 </dev/null &>/dev/null || {
echo -e "{VM:WARN} port $addr:22 is not available"
continue
}
sshexec=no
ssh -n -o Batchmode=yes -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@$addr &>$RuntimeTmp/sshtest
[[ $? = 0 ]] && sshexec=yes
if [[ $sshexec = yes ]]; then
ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@$addr "$*"
rc=$?
else
if grep -q -w password $RuntimeTmp/sshtest; then
expect <(cat <<-EOF
set timeout 120
set sshcmd [exec cat $RuntimeTmp/sshcmd]
log_user 0
spawn ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@$addr \$sshcmd
log_user 1
if {\$argc == 0} {
expect {
"password:" { send "redhat\r"; exp_continue }
"Password for" { send "redhat\r"; exp_continue }
"*# " { send "\r"; interact }
}
} else {
expect {
"password:" { send "redhat\r"; exp_continue }
"Password for" { send "redhat\r"; exp_continue }
eof
}
}
foreach {pid spawnid os_error_flag value} [wait] break
exit \$value
EOF
) "$@"
rc=$?
else
echo -e "{VM:WARN} ssh root@$addr is not available, try login with console ..."
virsh console "$_vmname"
rc=$?
fi
fi
faillog() { echo -e "\033[41m{TEST:FAIL} $*\033[0m"; }
[[ -n "$expectedrc" ]] && {
[[ " ${expectedrc[@]} " != *" $rc "* ]] && {
faillog "return code: expect $expectedRC, but got $rc"
}
}
return $rc
done
}
vmexec() { EXEC=yes vmlogin "$@"; }
vmlist() { virsh list --all; }
netcreate() {
local netname=
local brname=
local subnet=
local forward=nat
[[ $# = 0 ]] && { virsh net-list; return; }
Usage() {
cat <<-U
Example:
vm net netname=net10 brname=virbr10 subnet=10 #subnet range: [1-121,123-254]
U
}
for opt; do [[ "$opt" =~ ^(netname|brname|subnet|forward|tftproot|bootpfile)=.* ]] || continue; eval "$opt"; done
[[ -z "$netname" || -z "$brname" ]] && { Usage; return 1; }
#https://libvirt.org/formatnetwork.html
[[ -n "$forward" ]] && {
forwardNode="<forward mode=\"$forward\"/>"
[[ "$forward" = nat ]] && forwardNode="<forward mode=\"$forward\" >
<nat>
<port start='1024' end='65535'/>
</nat>
</forward>"
}
[[ -n "$tftproot" ]] && tftpNode="<tftp root='$tftproot'/>"
[[ -n "$bootpfile" ]] && bootpNode="<bootp file='$bootpfile'/>"
[[ -n "$subnet" ]] && ipNode="<ip address=\"192.168.${subnet}.1\" netmask=\"255.255.255.0\" >
$tftpNode
<dhcp>
<range start=\"192.168.${subnet}.128\" end=\"192.168.${subnet}.254\"/>
$bootpNode
</dhcp>
</ip>"
virsh net-info -- "$netname" &>/dev/null && {
echo "{VM:WARN} Virt network $netname has been there."
return
}
virsh net-define --file <(
cat <<-NET
<network>
<name>$netname</name>
<bridge name="$brname" />
$forwardNode
$ipNode
</network>
NET
)
virsh net-start $netname
virsh net-autostart $netname
}
netinfo() {
for net; do
virsh net-info -- $net || continue
virsh net-dumpxml -- $net
done
}
netstart() {
for net; do
virsh net-start -- $net
done
}
netdelete() {
echo -e "\n{VM:INFO} => delete virtual net: $@ .."
for net; do
virsh net-destroy -- $net
virsh net-undefine -- $net
done
}
vercmp() {
usage() {
echo "usage: vercmp <ver1> < = | '>' | '<' | '>=' | '<=' > <ver2>" >&2
echo " vercmp <ver1> < eq | gt | lt | ge | le > <ver2>" >&2
echo " vercmp <ver1> < match > <'pattern'>" >&2
}
[ $# != 3 ] && {
usage
return 1
}
vl=$1
cmpType=$2
vr=$3
res=1
[ "$vl" = "$vr" ] && eq=1
vmax=$(echo -e "$vl\n$vr" | sort -V | tail -n 1)
case "$cmpType" in
=|eq) [ "$eq" = 1 ] && res=0;;
\>|gt) [ "$eq" != 1 -a "$vl" = "$vmax" ] && res=0;;
\<|lt) [ "$eq" != 1 -a "$vr" = "$vmax" ] && res=0;;
\>=|ge) [ "$vl" = "$vmax" ] && res=0;;
\<=|le) [ "$vr" = "$vmax" ] && res=0;;
*) echo "$vl" | egrep -q "$vr"; res=$?;;
esac
return $res
}
create_vdisk() {
local path=$1
local size=$2
local fstype=$3
dd if=/dev/null of=$path bs=1${size//[0-9]/} seek=${size//[^0-9]/}
local dev=$(losetup --partscan --show --find $path)
printf "o\nn\np\n1\n\n\nw\n" | fdisk "$dev"
mkfs.$fstype $MKFS_OPT "${dev}p1"
losetup -d $dev
}
mount_vdisk() {
local path=$1
local mp=$2
local partN=${3:-1}
local offset=$(fdisk -l -o Start "$path" |
awk -v N=$partN '
/^Units:/ { unit=$(NF-1); offset=0; }
/^Start/ {
for(i=0;i<N;i++)
if(getline == 0) { $0=""; break; }
offset=$1*unit;
}
END { print offset; }'
)
echo "offset: $offset"
[[ -d "$mp" ]] || {
echo "{warn} mount_vdisk: dir '$mp' not exist"
return 1
}
if [[ "$offset" -ne 0 || "$partN" -eq 1 ]]; then
mount $MNT_OPT -oloop,offset=$offset $path $mp
else
echo "{warn} mount_vdisk: there's not part($partN) on disk $path"
return 1
fi
}
curl_download() {
local filename=$1
local url=$2
shift 2;
local curlopts="-f -L"
local header=
local fsizer=1
local fsizel=0
local rc=
[[ -z "$filename" || -z "$url" ]] && {
echo "Usage: curl_download <filename> <url> [curl options]" >&2
return 1
}
header=$(curl -L -I -s $url|sed 's/\r//')
fsizer=$(echo "$header"|awk '/Content-Length:/ {print $2; exit}')
if echo "$header"|grep -q 'Accept-Ranges: bytes'; then
curlopts+=' --continue-at -'
fi
echo "{VM:INFO} run: curl -o $filename $curl $curlopts $curlOpt $@"
curl -o $filename $url $curlopts $curlOpt "$@"
rc=$?
if [[ $rc != 0 && -s $filename ]]; then
fsizel=$(stat --printf %s $filename)
if [[ $fsizer -le $fsizel ]]; then
echo "{VM:INFO} *** '$filename' already exist $fsizel/$fsizer"
rc=0
fi
fi
return $rc
}
#-------------------------------------------------------------------------------
enable_libvirt() {
local force=$1
local pkglist="libvirt libvirt-client virt-install virt-viewer qemu-kvm expect nmap-ncat tmux libguestfs-tools-c libvirt-nss dialog qemu-img iptables-nft"
local sudouser=${SUDO_USER:-$(whoami)}
eval sudouserhome=~$sudouser
[[ -z "$force" && -d $sudouserhome/.config/kiss-vm ]] && return 0
echo -e "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ enable libvirt start ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"
echo -e "{VM:INFO} checking libvirtd service and related packages ..."
if ! egrep -q '^!?epel' < <(yum repolist 2>/dev/null); then
OSV=$(rpm -E %rhel)
if [[ "$OSV" != "%rhel" ]]; then
yum $yumOpt install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-${OSV}.noarch.rpm 2>/dev/null
fi
fi
rpm -q $pkglist || {
sudo yum $yumOpt --setopt=strict=0 install -y $pkglist
}
#install packages(gm,gocr,vncdo) required by -vncget -vncput options
echo -e "{VM:INFO} install gm gocr and vncdo ..."
_file=ggv-install.sh
if ! which $_file &>/dev/null; then
_url=$baseUrl/utils/$_file
mkdir -p ~/bin && curl_download ~/bin/$_file $_url && chmod +x ~/bin/$_file
sudo cp ~/bin/$_file /usr/bin/.
fi
sudo ggv-install.sh
echo -e "{VM:INFO} configure libvirt-nss ..."
grep -q '^hosts:.*libvirt libvirt_guest' /etc/nsswitch.conf || {
echo -e "{*INFO*} you have not configure /etc/nsswitch.conf, trying sudo sed ..."
sudo sed -ri '/^hosts:/s/files /&libvirt libvirt_guest /' /etc/nsswitch.conf
}
echo -e "{VM:INFO} checking if ${sudouser} has joined group libvirt ..."
[[ $(id -u) != 0 ]] && {
if ! id -Gn | egrep -q -w libvirt; then
if getent group libvirt|egrep -q -w $sudouser; then
: #do nothing
else
echo -e "{*INFO*} run: sudo usermod -a -G libvirt $sudouser ..."
sudo usermod -a -G libvirt $sudouser #or sudo gpasswd -a $sudouser libvirt
fi
fi
}
virtdconf=/etc/libvirt/libvirtd.conf
echo -e "{VM:INFO} checking value of 'unix_sock_group' and 'unix_sock_rw_perms' in $virtdconf ..."
ls $virtdconf &>/dev/null || sudo chmod +x /etc/libvirt
awk '/^unix_sock_group = "libvirt"/{c++} /^unix_sock_rw_perms = "0770"/{c++} END {rc=1; if(c==2) rc=0; exit rc}' $virtdconf || {
echo -e "{*INFO*} confiure $virtdconf ..."
sudo -- sh -c "
sed -ri -e '/#unix_sock_group = \"libvirt\"/s/^#//' -e '/#unix_sock_rw_perms = \"0770\"/s/^#//' $virtdconf
egrep -e ^unix_sock_group -e ^unix_sock_rw_perms $virtdconf
systemctl restart libvirtd && systemctl restart virtlogd
"
}
pvirtconf=$sudouserhome/.config/libvirt/libvirt.conf
virsh net-info default &>/dev/null && grep -q -w default <(virsh net-list --name) || {
#export LIBVIRT_DEFAULT_URI=qemu:///system
echo 'uri_default = "qemu:///system"' >>$pvirtconf
}
: <<'COMM'
qemuconf=/etc/libvirt/qemu.conf
eval echo -e "{VM:INFO} checking if qemu can read image in ~$sudouser ..."
sudo egrep -q '^#(user|group) =' "$qemuconf" && {
sudo sed -i '/^#(user|group) =/s/^#//' "$qemuconf"
}
COMM
echo -e "{VM:INFO} setfacl -mu:qemu:rx $sudouserhome ..."
mkdir -p $VMSHOME
setfacl -mu:qemu:rx $sudouserhome
setfacl -mu:qemu:rx -R $VMSHOME
setfacl -mu:qemu:rx -R $sudouserhome/.cache/virt-manager 2>/dev/null
getfacl $sudouserhome
getfacl $VMSHOME
#first time
[[ $(id -u) != 0 ]] && {
if ! id -Gn | egrep -q -w libvirt; then
if getent group libvirt|egrep -q -w $sudouser; then
echo -e "{VM:WARN} you just joined group libvirt, but still need re-login to enable the change set ..."
#echo -e "{VM:WARN} or run: newgrp libvirt #without re-login"
grep -q "newgrp libvirt" $sudouserhome/.bashrc ||
echo 'if ! id -Gn|grep -qw libvirt && groups $USER|grep -qw libvirt; then newgrp libvirt; exit; fi #kiss-vm' >> $sudouserhome/.bashrc
else
echo -e "{VM:WARN} you have not joined group libvirt. please check the warning msg above ^^^"
fi
exit 1
else
sed -i "/newgrp libvirt; exit; *#kiss-vm$/d" $sudouserhome/.bashrc
fi
}
mkdir -p $sudouserhome/.config/kiss-vm
echo -e "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ enable libvirt done! ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"
[[ -z "$force" ]] && printf '\33[H\33[2J'
}
is_available_url() {
local _url=$1
curl --connect-timeout 8 -m 16 --output /dev/null --silent --head --fail $_url &>/dev/null
}
is_intranet() {
local iurl=http://download.devel.redhat.com
is_available_url $iurl
}
# functions that work in Intranet
if [[ $Intranet = yes ]]; then
install_distro_compose() {
local _file=distro-compose
which $_file &>/dev/null || {
_url=$bkrClientImprovedUrl/utils/$_file
mkdir -p ~/bin && curl_download ~/bin/$_file $_url && chmod +x ~/bin/$_file
}
}
fetch_distro_list() {
install_distro_compose
distro-compose --distrolist
}
fetch_distro_trees() {
local distro=$1
local arch=$2
local dtrees=
#dtrees=$(bkr distro-trees-list --name "$distro" --arch "$arch"|egrep '(released|compose)')
install_distro_compose
dtrees=$(distro-compose -d "$distro" --distrotrees|egrep "(released|compose).*${arch}")
[[ -z "$dtrees" ]] &&
dtrees=$(distro-compose -d "$distro" --distrotrees|egrep "(Everything).*${arch}")
echo "$dtrees"
}
distro2location() {
local distro=$1
local variant=${2:-Server}
local arch=$(arch)
distrotrees=$(fetch_distro_trees $distro $arch)
urls=$(echo "$distrotrees" | awk '$3 ~ /https?:.*'"(${variant}|BaseOS|Everything)"'/{print $3}' | sort -u)
local _file=fastesturl.sh
which $_file &>/dev/null || {
_url=$baseUrl/utils/$_file
mkdir -p ~/bin && curl_download ~/bin/$_file $_url && chmod +x ~/bin/$_file
}
$_file $urls
}
getimageurls() {
local parenturl=$1
local suffix_pattern=$2
local rc=1
local imagenames=$(curl -L -s ${parenturl} | sed -nr '/.*"([^"]+'"${suffix_pattern}"')".*/{s//\1/;p}')
for imagename in $imagenames; do
echo -e "{VM:INFO} imagename: $imagename" >&2
if [[ -n "${imagename}" ]]; then
[[ "${imagename}" = *Atomic* ]] && continue
[[ "${imagename}" = CentOS*-ec2* ]] && continue
echo ${parenturl%/}/${imagename}
rc=0
fi
done
return $rc
}
distro2repos() {
local distro=$1
local url=$2
local Repos=()
shopt -s nocasematch
case $distro in
RHEL-5*|RHEL5*)
{ read; read os arch verytag verxosv _; } < <(tac -s ' ' <<<"${url//\// }")
debug_url=${url/\/os/\/debug}
osv=${verxosv#RHEL-5-}
Repos+=(
Server:${url}/Server
Cluster:${url}/Cluster
ClusterStorage:${url}/ClusterStorage
Client:${url}/Client
Workstation:${url}/Workstation
${osv}-debuginfo:${debug_url}
)
;;
#RHEL-6-EXTRAS
#CDN
#https://cdn.redhat.com/content/dist/rhel/server/6/6Server/x86_64/extras/
#Internal
#http://pulp.dist.prod.ext.phx2.redhat.com/content/dist/rhel/server/6/6Server/x86_64/extras/
#RHEL-7-EXTRAS
#CDN
#https://cdn.redhat.com/content/dist/rhel/server/7/7Server/x86_64/extras/
#Internal
#http://pulp.dist.prod.ext.phx2.redhat.com/content/dist/rhel/server/7/7Server/x86_64/extras/
#RHEL-6-EXTRAS
#CDN
#http://cdn.stage.redhat.com/content/dist/rhel/server/6/6Server/x86_64/extras/
#Internal
#http://pulp.dist.stage.ext.phx2.redhat.com/content/dist/rhel/server/6/6Server/x86_64/extras/
#RHEL-7-EXTRAS
#CDN
#http://cdn.stage.redhat.com/content/dist/rhel/server/7/7Server/x86_64/extras/
#Internal
#http://pulp.dist.stage.ext.phx2.redhat.com/content/dist/rhel/server/7/7Server/x86_64/extras/
RHEL-6*|RHEL6*|centos6*|centos-6*)
{ read; read os arch osv ver _; } < <(tac -s ' ' <<<"${url//\// }")
debug_url=${url/\/os/\/debug}
Repos+=(
${osv}:${url}
${osv}-SAP:${url/$osv/${osv}-SAP}
${osv}-SAPHAHA:${url/$osv/${osv}-SAPHAHA}
${osv}-debuginfo:${debug_url}
${osv}-SAP-debuginfo:${debug_url/$osv/${osv}-SAP}
${osv}-SAPHAHA-debuginfo:${debug_url/$osv/${osv}-SAPHAHA}
extras:http://cdn.stage.redhat.com/content/dist/rhel/server/6/6Server/$(arch)/extras/os
cdn_os:http://cdn.stage.redhat.com/content/dist/rhel/server/6/6Server/$(arch)/os
)
;;
RHEL-7*|RHEL7*|centos7*|centos-7*)
{ read; read os arch osv ver _; } < <(tac -s ' ' <<<"${url//\// }")
debug_url=${url/\/os/\/debug\/tree}
Repos+=(
${osv}:${url}
${osv}-optional:${url/$osv/${osv}-optional}
${osv}-NFV:${url/$osv/${osv}-NFV}
${osv}-RT:${url/$osv/${osv}-RT}
${osv}-SAP:${url/$osv/${osv}-SAP}
${osv}-SAPHAHA:${url/$osv/${osv}-SAPHAHA}
${osv}-debuginfo:${debug_url}
${osv}-optional-debuginfo:${debug_url/$osv/${osv}-optional}
${osv}-NFV-debuginfo:${debug_url/$osv/${osv}-NFV}
${osv}-RT-debuginfo:${debug_url/$osv/${osv}-RT}
${osv}-SAP-debuginfo:${debug_url/$osv/${osv}-SAP}
${osv}-SAPHAHA-debuginfo:${debug_url/$osv/${osv}-SAPHAHA}
extras:http://cdn.stage.redhat.com/content/dist/rhel/server/7/7Server/$(arch)/extras/os
cdn_os:http://cdn.stage.redhat.com/content/dist/rhel/server/7/7Server/$(arch)/os
beaker-harness:https://download.devel.redhat.com/beakerrepos/harness/RedHatEnterpriseLinux7
)
;;
RHEL-8*|RHEL8*)
{ read; read os arch osv ver _; } < <(tac -s ' ' <<<"${url//\// }")
debug_url=${url/\/os/\/debug\/tree}
read dtype distro <<< $(awk -F/+ '{
for (i=3;i<NF;i++) { if ($(i+1) ~ /RHEL-/) {
d=$(i+1)
if (d ~ /RHEL-[0-9]$/) d=$(i+2)
print($i, d); break }
}
}' <<<"$url")
read prefix ver time <<< ${distro//-/ }
[[ "$dtype" =~ rel-eng|nightly ]] || dtype=nightly
Repos+=(
BaseOS:${url}
AppStream:${url/BaseOS/AppStream}
CRB:${url/BaseOS/CRB}
HighAvailability:${url/BaseOS/HighAvailability}
NFV:${url/BaseOS/NFV}
ResilientStorage:${url/BaseOS/ResilientStorage}
RT:${url/BaseOS/RT}
SAP:${url/BaseOS/SAP}
SAPHANA:${url/BaseOS/SAPHANA}
BaseOS-debuginfo:${debug_url}
AppStream-debuginfo:${debug_url/BaseOS/AppStream}
CRB-debuginfo:${debug_url/BaseOS/CRB}
HighAvailability-debuginfo:${debug_url/BaseOS/HighAvailability}
NFV-debuginfo:${debug_url/BaseOS/NFV}
ResilientStorage-debuginfo:${debug_url/BaseOS/ResilientStorage}
RT-debuginfo:${debug_url/BaseOS/RT}
SAP-debuginfo:${debug_url/BaseOS/SAP}
SAPHANA-debuginfo:${debug_url/BaseOS/SAPHANA}
Buildroot:http://download.devel.redhat.com/rhel-8/$dtype/BUILDROOT-8/latest-BUILDROOT-$ver-RHEL-8/compose/Buildroot/$arch/os
beaker-harness:https://download.devel.redhat.com/beakerrepos/harness/RedHatEnterpriseLinux8
)
;;
RHEL-9*|RHEL9*)
{ read; read os arch osv ver _; } < <(tac -s ' ' <<<"${url//\// }")
debug_url=${url/\/os/\/debug\/tree}
read dtype distro <<< $(awk -F/+ '{
for (i=3;i<NF;i++) { if ($(i+1) ~ /RHEL-/) {
d=$(i+1)
if (d ~ /RHEL-[0-9]$/) d=$(i+2)
print($i, d); break }
}
}' <<<"$url")
read prefix ver time <<< ${distro//-/ }
dtype=nightly
Repos+=(
BaseOS:${url}
AppStream:${url/BaseOS/AppStream}
CRB:${url/BaseOS/CRB}
HighAvailability:${url/BaseOS/HighAvailability}
NFV:${url/BaseOS/NFV}
ResilientStorage:${url/BaseOS/ResilientStorage}
RT:${url/BaseOS/RT}
SAP:${url/BaseOS/SAP}
SAPHANA:${url/BaseOS/SAPHANA}
BaseOS-debuginfo:${debug_url}
AppStream-debuginfo:${debug_url/BaseOS/AppStream}
CRB-debuginfo:${debug_url/BaseOS/CRB}
HighAvailability-debuginfo:${debug_url/BaseOS/HighAvailability}
NFV-debuginfo:${debug_url/BaseOS/NFV}
ResilientStorage-debuginfo:${debug_url/BaseOS/ResilientStorage}
RT-debuginfo:${debug_url/BaseOS/RT}
SAP-debuginfo:${debug_url/BaseOS/SAP}
SAPHANA-debuginfo:${debug_url/BaseOS/SAPHANA}
Buildroot:http://download.devel.redhat.com/rhel-9/$dtype/BUILDROOT-9/latest-BUILDROOT-$ver/compose/Buildroot/$arch/os
beaker-harness:https://download.devel.redhat.com/beakerrepos/harness/RedHatEnterpriseLinux8
)
;;
esac
shopt -u nocasematch
for repo in "${Repos[@]}"; do
read _name _url <<<"${repo/:/ }"
is_available_url $_url &&
echo "$repo"
done
}
fi
Usage() {
cat <<-'EOF'
Usage:
vm [subcmd] <-d distroname> [OPTIONs] ...
Options:
-h, --help #Display this help.
--prepare #check/install/configure libvirt and other dependent packages
-I #create VM by import existing disk image, auto search url according distro name
-i <url/path> #create VM by import existing disk image, value can be url or local path
-L #create VM by using location, auto search url according distro name
-l <url> #create VM by using location
-C <iso path> #create VM by using ISO image
--ks <file> #kickstart file, will auto generate according distro name if omitting
-n|--vmname <name>
#VM name suffix, will auto generate according distro name if omitting
--getvmname #get *final* vmname. e.g:
vm -r --getvmname centos-8 -n nfsserv
vmname=$(vm -r --getvmname centos-8 -n nfsserv)
-f|--force #over write existing VM with same name
-p|-pkginstall <pkgs>
#pkgs in default system repo, install by yum or apt-get
-b|-brewinstall <args>
#pkgs in brew system or specified by url, install by internal brewinstall.sh
`-> just could be used in Intranet
-g|-genimage #generate VM image, after install shutdown VM and generate new qcow2.xz file
--rm #like --rm option of docker/podman, remove VM after quit from console
--nocloud|--nocloud-init
#don't create cloud-init iso for the image that is not cloud image
--osv <variant>
#OS_VARIANT, optional. virt-install will attempt to auto detect this value
# you can get [-osv variant] info by using:
$ osinfo-query os #RHEL-7 and later
$ virt-install --os-variant list #RHEL-6
--nointeract #exit from virsh console after install finish
--noauto #enter virsh console after installing start
--saveimage [path]
#save image in path if install with import mode
--downloadonly #download image only if there is qcow* image
--cpus <N> #number of virtual cpus, default 4
--msize <size> #memory size, default 2048
--dsize <size> #disk size, default 16
--net <$name[,$model]>
#attach tun dev(vnetN) and connect to net $name, optional $model: virtio,e1000,...
--net-br <$brname[,$model]>
#attach tun dev(vnetN) and connect to bridge $brname, optional $model: virtio,e1000,...
--net-macvtap, --netmacvtap [$sourceNIC[,$model]]
#attach macvtap interface over $sourceNIC, optional $model: virtio,e1000,...
--macvtapmode <vepa|bridge>
#macvtap mode
-r|--ready #virt config is ready, don't have to run enable_libvirt function
--xdisk <size[,fstype]>
#add an extra disk, could be specified multi-times. size unit is G
#e.g: --xdisk 10 --xdisk 20,xfs
--disk <img[,bus=]>
#add exist disk file, could be specified multi-times.
--bus <$start_disk_bus>
--sharedir <shpath[:target]>
#share path between host and guest
--nvdimm <nvdimm list | no>
#one or more nvdimm specification, format: 511+1 (targetSize+labelSize)
#e.g: --nvdimm="511+1 1023+1" -> two nvdimm device
#e.g: --nvdimm="511 1023" -> two nvdimm device
# ^^^^^^^^ default labelSize is 1, if omitting
#note: nvdimm function need qemu >= v2.6.0(RHEL/CentOS 8.0 or later)
--kdump #enable kdump
--fips #enable fips
--nosshkey #don't inject sshkey
-v|--verbose #verbose mode
--debug #debug mode
--vncget #get vnc screen and convert to text by gocr
--vncput <msg> #send string or key event to vnc server, could be specified multi-times
#e.g: --vncput root --vncput key:enter --vncput password --vncput key:enter
--vncputln <msg>
#alias of: --vncput msg --vncput key:enter
--vncput-after-install <msg>
#send string or key event ASAP after virt-intall
--xml #just generate xml
--machine <machine type>
#specify machine type #get supported type by: qemu-kvm -machine help
--qemu-opts #Pass-through qemu options
--qemu-env #Pass-through qemu env[s]
--enable-nested-vm #enable nested on host
--enable-guest-hypv #enable guest hypervisor, same as --qemu-opts="-cpu host,+vmx" or --qemu-opts="-cpu host,+svm"
#ref: https://www.linux-kvm.org/page/Nested_Guests
--disable-guest-hypv #disable guest hypervisor
-x[arg] #expected return code of sub-command exec, if doesn't match output test fail msg
# e.g: -x or -x0 or -x1,2,3 or -x1,10,100-200
--pxe #PXE install
# e.g: vm fedora-32 -n f32 -net-macvtap -pxe --noauto -f
--diskless #diskless install
# e.g: vm fedora-32 -n f32-diskless --net pxenet --pxe --diskless -f
-q #quiet mode, intend suppress the outputs of command yum, curl
EOF
[[ "$Intranet" = yes ]] && cat <<-EOF
Example Intranet:
$P # will enter a TUI show you all available distros that could auto generate source url
$P RHEL-7.7 # install RHEL-7.7 from cloud-image(by default)
$P RHEL-6.10 -L # install RHEL-6.10 from Location(by -L option)
$P RHEL-8.1.0 -f -p "vim wget git" # -f force install VM and ship pkgs: vim wget git
$P RHEL-8.1.0 -brewinstall 23822847 # ship brew scratch build pkg (by task id)
$P RHEL-8.1.0 -brewinstall kernel-4.18.0-147.8.el8 # ship brew build pkg (by build name)
$P RHEL-8.1.0 -brewinstall "lstk -debug" # ship latest brew build release debug kernel
$P RHEL-8.1.0 -brewinstall "upk -debug" # ship latest brew build upstream debug kernel
$P RHEL-8.1.0 --nvdimm "511 1022+2" # add two nvdimm device
$P rhel-8.2.0% # nightly 8.2 # fuzzy search distro: ignore-case
$P rhel-8.2*-????????.? # rtt 8.2 # - and only support glob * ? syntax, and SQL %(same as *)
$P --enable-nested-vm #enable nested on host, need sudo
$P rhel-8.2% -enable-guest-hypv -msize=\$((8*1024)) -dsize=120 # enable hyper-v on guest
EOF
cat <<-EOF
Example Internet:
$P # will enter a TUI show you all available distros that could auto generate source url
$P CentOS-8-stream -b ftp://url/path/x.rpm
$P CentOS-8 -p "jimtcl vim git make gcc"
$P CentOS-7 -p "vim git wget make gcc"
$P CentOS-6
$P fedora-32
$P centos-5 -l http://vault.centos.org/5.11/os/x86_64/
$P debian-10 -i https://cdimage.debian.org/cdimage/openstack/current-10/debian-10-openstack-amd64.qcow2
$P openSUSE-leap-15.2
$P --enable-nested-vm #enable nested on host, need sudo
$P CentOS-7 -enable-guest-hypv -msize=\$((8*1024)) -dsize=120 # enable hyper-v on guest
Example from local image:
$P rhel-8-up -i ~/myimages/RHEL-8.1.0-20191015.0/rhel-8-upstream.qcow2.xz --nocloud-init
$P debian-10 -i /mnt/vm-images/debian-10-openstack-amd64.qcow2
$P openSUSE-leap-15.2 -i ~/myimages/openSUSE-Leap-15.2-OpenStack.x86_64.qcow2
Example [subcmd]:
vm list #list all VMs //you can use ls,li,lis* instead list
vm login [/c] [VM] #login VM //you can use l,lo,log* instead login
vm delete [VM list] #delete VMs //you can use d,de,del*,r,rm instead delete
vm ifaddr [VM] #show ip address //you can use i,if,if* instead ifaddr
vm vncport [VM] #show vnc host:port //you can use v,vnc instead vncport
vm xml [VM] #dump vm xml file //you can use x,xm instead xml
vm edit [VM] #edit vm xml file //you can use ed,ed* instead edit
vm exec [-v] [-x] "\$VM" -- "cmd" #login VM and exec cmd //you can use e,ex,ex* instead exec
vm reboot [/w] [VM] #reboot VM //option /w indicate wait until reboot complete(port 22 is available)
vm stop [VM] #stop/shutdonw VM //nil
vm start [VM] #start VM //nil
vm net #list all virtual network
vm net netname=nat-net brname=virbrM subnet=10 forward=nat #create virtual network 'nat-net', default forward is 'nat'
vm net netname=isolated-net brname=virbrN subnet=20 forward= #create virtual network 'isolated-net'
vm net netname=pxe brname=virpxebrN subnet=200 tftproot=/var/lib/tftpboot bootpfile=pxelinux/pxelinux.0
vm netinfo netname #show detail info of virtual network 'netname'
vm netstart netname #start virtual network 'netname'
vm netdel netname #delete virtual network 'netname'
EOF
}
run() {
[[ $# -eq 0 ]] && return 0
[[ "$DEBUG" = yes ]] && echo "[sys]" "$@"
"$@"
}
expandrc() {
local rcrange=$1
local rclist=()
for rc in ${rcrange//,/ }; do
if [[ "$rc" =~ ^[0-9]+$ ]]; then
rclist+=($rc)
elif [[ "$rc" =~ ^[0-9]+-[0-9]+$ ]]; then
eval rclist+=({${rc/-/..}})
fi
done
echo -n ${rclist[@]}
}
is_bridge() {
local ifname=$1
[[ -z "$ifname" ]] && return 1
ip -d a s $ifname | grep -qw bridge
}
get_default_if() {
local notbr=$1 #indicate get real NIC not bridge
local _iface= iface=
local type=
ifaces=$(ip route | awk '/^default/{print $5}')
for _iface in $ifaces; do
type=$(ip -d link show dev $_iface|sed -n '3{s/^\s*//; p}')
[[ -z "$type" || "$type" = altname* || "$type" = bridge* ]] && {
iface=$_iface
break
}
done
if [[ -n "$notbr" ]] && is_bridge $iface; then
# ls /sys/class/net/$iface/brif
if command -v brctl >/dev/null; then
brctl show $iface | awk 'NR==2 {print $4}'
else
ip link show type bridge_slave | awk -F'[ :]+' '/master '$iface' state UP/{print $2}' | head -n1
fi
return 0
fi
echo $iface
}
# command line parse
_at=`getopt -o hd:Ll:C:fn:gb:p:Ii:rvdx::Pq \
--long help \
--long debug \
--long prepare \
--long ks: \
--long rm \
--long osv: \
--long os-variant: \
--long force \
--long vmname: \
--long genimage \
--long xzopt: \
--long brewinstall: \
--long pkginstall: \
--long geturl \
--long getvmname \
--long nocloud-init --long nocloud \
--long cpus: \
--long msize: \
--long dsize: \
--long net: \
--long net-br: \
--long net-macvtap:: --long netmacvtap:: \
--long macvtapmode: \
--long nointeract \
--long noauto \
--long saveimage:: --long sa:: \
--long downloadonly \
--long ready \
--long bus: \
--long xdisk: \
--long disk: \
--long sharedir: \
--long nvdimm: \
--long kdump \
--long fips \
--long nosshkey \
--long verbose \
--long vncget \
--long vncput: \
--long vncputln: \
--long vncput-after-install: \
--long xml \
--long machine: \
--long qemu-opts: \
--long qemu-env: \
--long enable-nested-vm \
--long enable-guest-hypv \
--long disable-guest-hypv \
--long pxe \
--long diskless \
-a -n "$0" -- "$@"`
[[ $? != 0 ]] && { exit 1; }
eval set -- "$_at"
while true; do
case "$1" in
-h|--help) Usage; shift 1; exit 0;;
--prepare) ONLY_PREPARE=yes; shift 1;;
-d) Distro=$2; shift 2;;
-C) InstallType=cdrom; Isourl=$2; shift 2;;
-l) InstallType=location; Location=$2; shift 2;;
-L) InstallType=location; shift 1;;
-i) InstallType=import; Imageurl=${2}; shift 2;;
-I) InstallType=import; shift 1;;
-P|--pxe) InstallType=pxe; shift 1;;
--diskless) DISKLESS=yes; shift 1;;
--ks) KSPath=$2; shift 2;;
--rm) RM=yes; shift 1;;
--xzopt) XZ="$2"; shift 2;;
-f|--force) OVERWRITE="yes"; shift 1;;
-n|--vmname) VMName="$2"; shift 2;;
-g|--genimage) GenerateImage=yes; shift 1;;
--geturl) OnlyGetUrl=yes; shift 1;;
--getvmname) OnlyGetVMName=yes; shift 1;;
-b|--brewinstall) BPKGS="$2"; shift 2;;
-p|--pkginstall) PKGS="$2"; shift 2;;
--osv|--os-variant) VM_OS_VARIANT="$2"; shift 2;;
--nocloud*) NO_CLOUD_INIT="yes"; shift 1;;
--dsize) dsizeflag=1; DSIZE="$2"; shift 2;;
--msize) MSIZE="$2"; shift 2;;
--cpus) VCPUS="$2"; shift 2;;
--nointeract) INTERACT="no"; shift 1;;
--noauto) NOAUTO="yes"; shift 1;;
--net)
read netname model <<<"${2/,/ }"
NETWORK_OPTS+=" --network=network=$netname,model=${model:-virtio}"; shift 2;;
--net-br)
read brname model <<<"${2/,/ }"
NETWORK_OPTS+=" --network=bridge=$brname,model=${model:-virtio}"; shift 2;;
--net-macvtap|--netmacvtap)
read srcif model <<<"${2/,/ }"
srcif=${srcif:-$(get_default_if)}
[[ "$srcif" = - ]] && srcif=$(get_default_if)
NETWORK_OPTS+=" --network=type=direct,source=$srcif,source_mode=$MacvtapMode,model=${model:-virtio}"; shift 2;;
--macvtapmode) MacvtapMode="$2"; shift 2;;
--saveimage|--sa)
SAVE_IMAGE="yes"
DownloadImagePath=${2:-$ImagePath/download}
shift 2
;;
--downloadonly) SAVE_IMAGE="yes"; DOWNLOAD_ONLY="yes"; InstallType=import; shift 1;;
-r|--ready) VIRT_READY=yes; shift 1;;
--bus) START_DISK_BUS="$2"; shift 2;;
--xdisk) EXTRA_DISKS+=("$2"); shift 2;;
--disk) DISKS+=("$2"); shift 2;;
--sharedir) SHARE_DIRS+=("$2"); shift 2;;
--nvdimm) NVDIMM_LIST=$2; shift 2;;
--kdump) kdump=yes; shift 1;;
--fips) fips=yes; shift 1;;
--nosshkey) NO_SSHKEY=yes; shift 1;;
-v|--verbose) VERBOSE=yes; shift 1;;
--debug) DEBUG=yes; shift 1;;
--vncget) VNCGET=yes; shift 1;;
--vncput) VNCPUTS+=("$2"); shift 2;;
--vncputln) [[ -n "$2" ]] && VNCPUTS+=("$2" "key:enter") || VNCPUTS+=("key:enter"); shift 2;;
--vncput-after-install) VNCPUT_AFTER_INSTALL="$2"; shift 2;;
--xml) XML=--print-xml; shift 1;;
--machine) MACHINE_OPT="--machine=$2"; shift 2;;
--qemu-opts) QEMU_OPTS="--qemu-commandline=$2"; shift 2;;
--qemu-env) QEMU_ENV+=("--qemu-commandline=env=$2"); shift 2;;
--enable-nested-vm) ENABLE_NESTED=yes; shift 1;;
--enable-guest-hy*) ENABLE_L2VM=yes; shift 1;;
--disable-guest-hy*) ENABLE_L2VM=no; shift 1;;
-x) expectedRC=${2:-0}; expectedrc=$(expandrc ${expectedRC#=}); shift 2;;
-q) QUIET=yes; shift 1;;
--) shift; break;;
esac
done
[[ "$QUIET" = yes ]] && {
yumOpt=-q
curlOpt=-s
}
# __main__
if egrep -q -wo '(vmx|svm)' /proc/cpuinfo || egrep -q 'CPU 0000%@' /proc/cpuinfo; then
[[ "$VIRT_READY" != yes ]] && {
if [[ -t 1 ]]; then
enable_libvirt $ONLY_PREPARE &>/dev/tty
else
#echo -e "{VM:WARN} there's not /dev/tty file redirect enable_libvirt msg to stderr" >&2
enable_libvirt $ONLY_PREPARE >&2
fi
[[ "$ONLY_PREPARE" = yes ]] && exit
}
else
echo -e "{VM:WARN} current machine doesn't support Virtualization, show help info with\n $PROG -h"
exit 1
fi
enable_nested_kvm() {
local kmodule=$(lsmod|awk '$1 == "kvm" {print $NF}')
local vendor=${kmodule#kvm_}
{
echo "options kvm-$vendor nested=1"
[[ "$vendor" = intel ]] && cat <<-EOF
options kvm-$vendor enable_shadow_vmcs=1
options kvm-$vendor enable_apicv=1
options kvm-$vendor ept=1
EOF
} | sudo tee /etc/modprobe.d/kvm-nested.conf >/dev/null
if [[ $(< /sys/module/$kmodule/parameters/nested) != [Yy1] ]]; then
modprobe -r $kmodule || {
echo -e "{VM:WARN} stop tasks are using module $kmodule, and try again"
return 1
}
modprobe $kmodule
fi
cat /sys/module/$kmodule/parameters/nested
}
[[ "$ENABLE_NESTED" = yes ]] && {
enable_nested_kvm
exit $?
}
support_nested_kvm() {
local kmodule=$(lsmod|awk '$1 == "kvm" {print $NF}')
local paramf=/sys/module/$kmodule/parameters/nested
local rc=0
if [[ ! -f $paramf || $(< $paramf) != [Yy1] ]]; then
rc=1
fi
return $rc
}
support_nested_kvm && ENABLE_L2VM=${ENABLE_L2VM:-yes}
[[ "$ENABLE_L2VM" = yes ]] && {
kmodule=$(lsmod|awk '$1 == "kvm" {print $NF}')
vendor=${kmodule#kvm_}
case $vendor in
intel) QEMU_OPTS+=" -cpu host,+vmx";;
amd) QEMU_OPTS+=" -cpu host,+svm";;
esac
}
mkdir -p $RuntimeTmp
mkdir -p ${DownloadImagePath}
# sub-command
[[ $P = vmc || $P = vm ]] || true && {
subcmd=$1
case "$subcmd" in
create|creat) shift;;
r|rm|d|de|del*) shift; [[ "$1" = -d ]] && shift; vmdelete "$@"; exit $?;;
l|lo|log*) shift; [[ "$1" = -d ]] && shift; vmlogin "$@"; exit $?;;
cpfrom|cpf*) shift; [[ "$1" = -d ]] && shift; vmcopyfrom "$@"; exit $?;;
cpto|cpt*) shift; [[ "$1" = -d ]] && shift; vmcopyto "$@"; exit $?;;
e|ex|ex*) shift; [[ "$1" = -d ]] && shift; vmexec "$@"; exit $?;;
ls|li|lis|list) shift; [[ "$1" = -d ]] && shift; vmlist "$@"; exit $?;;
i|if|if*) shift; [[ "$1" = -d ]] && shift; vmifaddr "$@"; exit $?;;
v|v*) shift; [[ "$1" = -d ]] && shift; vmvncport "$@"; exit $?;;
x|xm*) shift; [[ "$1" = -d ]] && shift; vmxml "$@"; exit $?;;
ed|ed*) shift; [[ "$1" = -d ]] && shift; vmedit "$@"; exit $?;;
reboot|reb|re*) shift; [[ "$1" = -d ]] && shift; vmreboot "$@"; exit $?;;
stop|sto*) shift; [[ "$1" = -d ]] && shift; vmstop "$@"; exit $?;;
start|sta*) shift; [[ "$1" = -d ]] && shift; vmstart "$@"; exit $?;;
clone) shift; [[ "$1" = -d ]] && shift; vmclone "$@"; exit $?;;
net|netcreate) shift; [[ "$1" = -d ]] && shift; netcreate "$@"; exit $?;;
netinfo) shift; [[ "$1" = -d ]] && shift; netinfo "$@"; exit $?;;
netstart) shift; [[ "$1" = -d ]] && shift; netstart "$@"; exit $?;;
netdel|netdelete) shift; [[ "$1" = -d ]] && shift; netdelete "$@"; exit $?;;
esac
}
[[ -n "$VNCPUTS" || -n "$VNCGET" ]] && { vmvncport "$@"; exit $?; }
declare -A distroInfo
distroInfo[CentOS-8-stream]="https://cloud.centos.org/centos/8-stream/x86_64/images/ http://mirror.centos.org/centos/8-stream/BaseOS/x86_64/os/"
distroInfo[CentOS-8]="https://cloud.centos.org/centos/8/x86_64/images/ http://mirror.centos.org/centos/8/BaseOS/x86_64/os/"
distroInfo[CentOS-7]="https://cloud.centos.org/centos/7/images/%%GenericCloud-.{4}.qcow2c http://mirror.centos.org/centos/7/os/x86_64/"
distroInfo[CentOS-6]="https://cloud.centos.org/centos/6/images/%%GenericCloud.qcow2c http://mirror.centos.org/centos/6/os/x86_64/"
# https://ord.mirror.rackspace.com/fedora/releases/$version/Cloud/
distroInfo[fedora-rawhide]="https://ord.mirror.rackspace.com/fedora/development/rawhide/Cloud/x86_64/images/"
distroInfo[fedora-32]="https://ord.mirror.rackspace.com/fedora/releases/32/Cloud/x86_64/images/"
distroInfo[fedora-31]="https://ord.mirror.rackspace.com/fedora/releases/31/Cloud/x86_64/images/"
distroInfo[fedora-30]="https://ord.mirror.rackspace.com/fedora/releases/30/Cloud/x86_64/images/"
distroInfo[fedora-29]="https://ord.mirror.rackspace.com/fedora/releases/29/Cloud/x86_64/images/"
# https://cdimage.debian.org/cdimage/openstack/testing/
# https://cdimage.debian.org/cdimage/openstack/$latestVersion/
# https://cdimage.debian.org/cdimage/openstack/archive/$olderVersion/
distroInfo[debian-testing]="https://cdimage.debian.org/cdimage/openstack/testing/debian-testing-openstack-amd64.qcow2"
distroInfo[debian-10]="https://cdimage.debian.org/cdimage/openstack/current-10/debian-10-openstack-amd64.qcow2"
distroInfo[debian-9]="https://cdimage.debian.org/cdimage/openstack/current-9/debian-9-openstack-amd64.qcow2"
distroInfo[openSUSE-leap-15.2]="https://download.opensuse.org/repositories/Cloud:/Images:/Leap_15.2/images/openSUSE-Leap-15.2-OpenStack.x86_64.qcow2"
distroInfo[FreeBSD-12.2]="https://download.freebsd.org/ftp/releases/VM-IMAGES/12.2-RELEASE/amd64/Latest/FreeBSD-12.2-RELEASE-amd64.qcow2.xz"
distroInfo[FreeBSD-13.0]="https://download.freebsd.org/ftp/snapshots/VM-IMAGES/13.0-CURRENT/amd64/Latest/FreeBSD-13.0-CURRENT-amd64.qcow2.xz"
if is_intranet; then
distroInfo[FreeBSD-12.2]="http://download.devel.redhat.com/qa/rhts/lookaside/vm-images/FreeBSD-12.2/FreeBSD-12.2-RELEASE-amd64.qcow2.xz"
distroInfo[FreeBSD-13.0]="http://download.devel.redhat.com/qa/rhts/lookaside/vm-images/FreeBSD-13.0/FreeBSD-13.0-CURRENT-amd64.qcow2.xz"
else
Intranet=no
baseUrl=https://raw.githubusercontent.com/tcler/kiss-vm-ns/master
#bkrClientImprovedUrl=https://raw.githubusercontent.com/tcler/bkr-client-improved/master
fi
for disk in "${EXTRA_DISKS[@]}"; do
read size fstype _ <<<"${disk//,/ }"
[[ -n "$fstype" && $(id -u) -ne 0 ]] && {
echo "{WARN} creating extra disk($disk) need super user permission, try:"
echo " sudo $0 ${_at[@]} $@"
exit 1
}
done
# Phase-0 get distro name
[[ -z "$Distro" ]] && Distro=$1
[[ -z "$Distro" ]] && {
if [[ -z "$Location" && -z "$Imageurl" && -z "$Isourl" ]]; then
which dialog &>/dev/null || yum install -y dialog &>/dev/null
distropatternOut=$RuntimeTmp/distroPatternOut
distropatternIn=$RuntimeTmp/distroPatternIn
distrolist=$RuntimeTmp/distroList
dialogres=$RuntimeTmp/dialogRes
touch $distropatternOut $distropatternIn
echo -e "CentOS\nfedora\ndebian\nopenSUSE\nFreeBSD" >>$distropatternOut
for dname in "${!distroInfo[@]}"; do echo "$dname"; done >>$distrolist
[[ "$Intranet" = yes ]] && {
echo -e "Fedora\nRHEL-8\nRHEL-7\nRHEL-6\nRHEL5" >>$distropatternIn
fetch_distro_list >>$distrolist
}
familys=$(sed -e 's/.*/"&" "" 1/' $distropatternIn $distropatternOut)
dialog --backtitle "$0" --radiolist "please selet distro family/pattern:" 16 40 12 $familys 2>$dialogres || { Usage; exit 0; }
pattern=$(head -n1 $dialogres|sed 's/"//g')
dList=$(sed -e '/ /d' -e 's/.*/"&" "" 1/' $distrolist|egrep "$pattern")
[[ "$Intranet" = yes ]] && grep -F "$pattern" $distropatternIn && {
dialog --title "If include nightly build" \
--backtitle "Do you want nightly build distros?" \
--yesno "Do you want nightly build distros?" 7 60
[[ $? = 1 ]] && dList=$(echo "$dList"|grep -v '\.n\.[0-9]"')
}
dialog --backtitle "$0" --radiolist "please select distro:" 30 60 28 $dList 2>$dialogres || { Usage; exit 0; }
Distro=$(head -n1 $dialogres|sed 's/"//g')
printf '\33[H\33[2J'
else
Usage
echo -e "\n{VM:WARN} ^^^ a distro name is necessary!\n"
exit 1
fi
}
[[ -z "$Distro" ]] && {
echo -e "{VM:WARN} you have to select a distro name or specified it by adding command line parameter:\n"
Usage
exit 1
}
if egrep --color=always "[][~\!@#$^&()=,\":;{}|<>'\` ]" <<<"$Distro"; then
echo -e "{VM:WARN} ^^^ invalid character[s] in distro name: '$Distro'"
exit 1
fi
[[ "$Distro" = *[%*?]* ]] && {
dpattern=${Distro//./\\.}
dpattern=${dpattern//\?/.}
dpattern=${dpattern//[%*]/.*}
_distro=$(fetch_distro_list | grep -i "^${dpattern}$" | head -1)
[[ -n "$_distro" ]] && Distro="$_distro"
}
[[ -n "$VMName" ]] && vmprefix=
[[ "$OnlyGetVMName" = yes ]] && {
vmname_gen $Distro $VMName
exit $?
}
[[ -z "$OnlyGetUrl" ]] && {
# check if VM exist
vmname=$(vmname_gen $Distro $VMName)
virsh desc $vmname &>/dev/null && {
if [[ "${OVERWRITE}" = "yes" ]]; then
echo "{VM:INFO} VM $vmname has been there, remove it ..."
_vmdelete $vmname
else
echo "{VM:INFO} VM $vmname has been there, if you want overwrite please use --force option"
exit
fi
}
vmhostname=$vmname
[[ "$GenerateImage" = yes ]] && vmhostname=${vmname#${vmprefix}-}
VMpath=$VMSHOME/$Distro/$vmname
[[ "$InstallType" = pxe ]] && VMpath=$VMSHOME/PXE/$vmname
mkdir -p $VMpath
}
# Phase-1: get distro's Location or Image url
_Imagepattern='(qcow2c|qcow2|qcow2.xz)'
[[ -n "${distroInfo[$Distro]}" ]] && {
echo -e "{VM:INFO} ${distroInfo[$Distro]}"
read _Imageurl _Location <<<"${distroInfo[$Distro]}"
[[ "$_Imageurl" = *%%* ]] && {
read _Imageurl _Imagepattern <<<"${_Imageurl/\%\%/ }"
}
echo -e "{VM:INFO} $_Imageurl($_Imagepattern) $_Location"
[[ -z "$Imageurl" ]] && Imageurl=${_Imageurl}
[[ -z "$Location" ]] && Location=${_Location}
}
if [[ "$InstallType" = import ]]; then
if [[ -n "$Imageurl" ]]; then
if [[ ! -f $Imageurl ]] && ! is_available_url $Imageurl; then
echo "{VM:WARN} image url $Imageurl is not available, switching to Location mode" >&2
InstallType=location
Imageurl=
fi
elif [[ $Intranet = yes ]]; then
echo "{VM:INFO} searching private image url of $Distro ..." >&2
baseurl=http://download.eng.bos.redhat.com/qa/rhts/lookaside/vm-images
baseurl=http://download.devel.redhat.com/qa/rhts/lookaside/vm-images
imageLocation=${baseurl}/$Distro
read Imageurl _ < <(getimageurls ${imageLocation} "${_Imagepattern}"|sort -Vr)
if [[ -z "$Imageurl" ]]; then
echo "{VM:INFO} getting fastest location of $Distro ..." >&2
Location=$(distro2location $Distro)
[[ -z "$Location" ]] && {
echo "{VM:WARN} can not find location info of '$Distro'" >&2
exit 1
}
echo -e " -> $Location"
echo "{VM:INFO} getting image url according location url ^^^ ..." >&2
imageLocation=${Location/\/os\//\/images\/}
[[ $Distro = Fedora-* ]] &&
imageLocation=$(echo "$imageLocation" | sed -r 's;/[Ss]erver|Everything/;/Cloud/;')
is_available_url $imageLocation ||
imageLocation=${imageLocation/Cloud/CloudImages}
read Imageurl _ < <(getimageurls $imageLocation "${_Imagepattern}"|sort -Vr)
if [[ $? != 0 ]]; then
echo "{VM:INFO} can not find imageUrl of '$Distro' in database, switching to Location mode" >&2
InstallType=location
Imageurl=
fi
fi
else
echo "{VM:WARN} can not find imageUrl of '$Distro' in database." >&2
exit 1
fi
egrep -q '(qcow2|qcow2.xz)' <<<"$Imageurl" || {
read Imageurl _ < <(getimageurls ${Imageurl} "${_Imagepattern}"|sort -Vr)
}
[[ -n "$Imageurl" ]] &&
echo -e "image url: $Imageurl"
fi
if [[ "$InstallType" = location ]]; then
if [[ $Intranet = yes && -z "$Location" ]]; then
echo "{VM:INFO} getting fastest location of $Distro ..." >&2
Location=$(distro2location $Distro)
fi
if [[ -z "$Location" ]]; then
echo "{VM:WARN} can not find distro location. please check if '$Distro' is valid distro" >&2
exit 1
fi
_MSIZE=2048
[[ -n "$Location" ]] &&
echo -e "location url: $Location"
fi
if [[ "$InstallType" = pxe ]]; then
_MSIZE=2048
fi
MSIZE=${MSIZE:-$_MSIZE}
[[ -n "$OnlyGetUrl" ]] && { exit; }
echo "{VM:INFO} guess/verify os-variant ..."
[[ -z "$VM_OS_VARIANT" ]] && {
VM_OS_VARIANT=${Distro/-/}
VM_OS_VARIANT=${VM_OS_VARIANT%%-*}
VM_OS_VARIANT=${VM_OS_VARIANT,,}
}
osvariants=$(virt-install --os-variant list 2>/dev/null) ||
osvariants=$(osinfo-query os 2>/dev/null)
[[ -n "$osvariants" ]] && {
grep -q "^ $VM_OS_VARIANT " <<<"$osvariants" || VM_OS_VARIANT=${VM_OS_VARIANT/.*/-unknown}
grep -q "^ $VM_OS_VARIANT " <<<"$osvariants" || VM_OS_VARIANT=${VM_OS_VARIANT/[0-9]*/-unknown}
if grep -q "^ $VM_OS_VARIANT " <<<"$osvariants"; then
OS_VARIANT_OPT=--os-variant=$VM_OS_VARIANT
fi
}
# Phase-2: start create VM
# prepare ssh pub key
[[ -f ~/.ssh/id_rsa && -f ~/.ssh/id_rsa.pub ]] || {
echo -e 'y\n' | ssh-keygen -q -t rsa -f ~/.ssh/id_rsa -N ''
}
[[ -f ~/.ssh/id_ecdsa && -f ~/.ssh/id_ecdsa.pub ]] || {
echo -e 'y\n' | ssh-keygen -q -t ecdsa -f ~/.ssh/id_ecdsa -N ''
}
# prepare network option
[[ -z "$NETWORK_OPTS" ]] && {
srcif=$(get_default_if)
if [[ "$InstallType" = pxe ]]; then
NETWORK_OPTS="--network=type=direct,source=$srcif,source_mode=$MacvtapMode,model=virtio"
else
NETWORK_OPTS="--network=network=default,model=virtio --network=type=direct,source=$srcif,source_mode=$MacvtapMode,model=virtio"
fi
}
k=0
for disk in "${EXTRA_DISKS[@]}"; do
read size fstype _ <<<"${disk//,/ }"
ximage=xdisk$((k++)).qcow2
format=qcow2
if [[ -z "$fstype" ]]; then
qemu-img create -f qcow2 $VMpath/$ximage ${size}G
else
ximage=${ximage/qcow2/img}
format=raw
sudo bash -c "$(declare -f create_vdisk); create_vdisk $VMpath/$ximage ${size}G $fstype"
fi
DISK_OPTS+=" --disk path=$VMpath/$ximage,format=$format,bus=virtio" #bus=scsi
done
for disk in "${DISKS[@]}"; do
read img arg <<<"${disk/,/ }"
cp -f $img $VMpath/.
DISK_OPTS+=" --disk path=$VMpath/$img,${arg:-bus=ide}"
done
[[ ${#SHARE_DIRS[@]} -gt 0 ]] && {
libvirt_nvr=$(rpm -q libvirt)
if vercmp "$libvirt_nvr" ge libvirt-6.2; then
INTERACT=no
MEM_OPTS+=" --memorybacking=access.mode=shared"
hpmemMax=$((1024*512)) #units Mi
memSlots=4
MEM_OPTS+=" --memory=${MSIZE},hotplugmemorymax=$hpmemMax,hotplugmemoryslots=$memSlots --cpu cell0.cpus=0-$((VCPUS-1)),cell0.memory=$((MSIZE*1024))"
#ref: https://libvirt.org/kbase/virtiofs.html#id3
#- The host-side virtiofsd daemon, like other vhost-user backed devices,
#- requires shared memory between the host and the guest.
virsh allocpages 2M $((MSIZE/2))
#or add memory_backing_dir = "/dev/shm/" in /etc/libvirt/qemu.conf
else
echo -e "{VM:WARN} $libvirt_nvr don't support virtiofs driver"
SHARE_DIRS=()
exit 1
fi
}
NVDIMM=no
CONTROLLER_OPTS=
qemukvm=$(rpm -q qemu-kvm)
[[ -n "$NVDIMM_LIST" ]] && vercmp $qemukvm ge qemu-kvm-2.6 && {
NVDIMM=yes
CONTROLLER_OPTS="--controller=type=pci,index=9,model=pcie-root-port --controller=type=pci,index=10,model=pcie-root-port"
}
if [[ "$NVDIMM" = yes ]]; then
hpmemMax=$((1024*512)) #units Mi
memSlots=4
MEM_OPTS="--memory=${MSIZE},hotplugmemorymax=$hpmemMax,hotplugmemoryslots=$memSlots --cpu cell0.cpus=0-$((VCPUS-1)),cell0.memory=$((MSIZE*1024))"
nvdimmdevs=($NVDIMM_LIST)
for ((i=0; i<${#nvdimmdevs[@]}; i++)); do
[[ $i -ge $memSlots ]] && break
nvdimmfile=$VMpath/nvdimm-$i.dev
read targetSize labelSize _ <<<"${nvdimmdevs[$i]//+/ }"
targetSize=${targetSize:-511}
labelSize=${labelSize:-1}
totalSize=$((targetSize+labelSize))
truncate -s ${totalSize}M $nvdimmfile
MEM_OPTS+=" --memdev nvdimm,source_path=$nvdimmfile,target_size=${targetSize},target_node=0,target_label_size=${labelSize}"
done
fi
MEM_OPTS=${MEM_OPTS:---memory=${MSIZE}}
case $Distro in
RHEL-5*|RHEL5*|centos5*|centos-5*)
EPEL=http://archive.fedoraproject.org/pub/archive/epel/epel-release-latest-5.noarch.rpm;;
RHEL-6*|RHEL6*|centos6*|centos-6*)
EPEL=http://dl.fedoraproject.org/pub/epel/epel-release-latest-6.noarch.rpm;;
RHEL-7*|RHEL7*|centos7*|centos-7*)
EPEL=http://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm;;
RHEL-8*|RHEL8*|centos8*|centos-8*)
EPEL=http://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm;;
esac
[[ -n "$EPEL" ]] && PKGS="$EPEL $PKGS"
if [[ "$InstallType" = location ]]; then
[[ -z "$KSPath" ]] && {
echo "{VM:INFO} generating kickstart file for $Distro ..."
ksauto=$RuntimeTmp/ks-$VM_OS_VARIANT-$$.cfg
postscript=$RuntimeTmp/postscript.ks
KSPath=$ksauto
REPO_OPTS=$(distro2repos $Distro $Location | sed 's/^/--repo /')
_file=ks-generator.sh
which $_file &>/dev/null || {
_url=$baseUrl/utils/$_file
mkdir -p ~/bin && curl_download ~/bin/$_file $_url && chmod +x ~/bin/$_file
}
cat <<-END >>$postscript
test -f /etc/yum.repo.d/cdn_os.repo && sed -i 's/enabled = 1/enabled = 0/' /etc/yum.repo.d/cdn_os.repo
echo "[\$USER@\${HOSTNAME} \${HOME} \$(pwd)] configure hostname ..."
test -f /etc/hostname && echo ${vmhostname} >/etc/hostname || echo HOSTNAME=${vmhostname} >>/etc/sysconfig/network
echo "[\$USER@\${HOSTNAME} \${HOME} \$(pwd)] yum install pkgs ${PKGS} ..."
yum install -y wget ${PKGS} 2>/dev/null
END
[[ $Intranet = yes ]] && cat <<-END >>$postscript
echo "[\$USER@\${HOSTNAME} \${HOME} \$(pwd)] brew install $BPKGS ..."
wget -O /usr/bin/brewinstall.sh -N -q $bkrClientImprovedUrl/utils/brewinstall.sh --no-check-certificate
chmod +x /usr/bin/brewinstall.sh
brewinstall.sh $BPKGS -noreboot
END
[[ "$fips" = yes ]] && cat <<-END >>$postscript
echo "[\$USER@\${HOSTNAME} \${HOME} \$(pwd)] enable fips ..."
wget -O /usr/bin/enable-fips.sh -N -q $baseUrl/utils/enable-fips.sh --no-check-certificate
chmod +x /usr/bin/enable-fips.sh
enable-fips.sh
END
[[ "$kdump" = yes ]] && cat <<-END >>$postscript
echo "[\$USER@\${HOSTNAME} \${HOME} \$(pwd)] kdump-setup ..."
wget -O /usr/bin/kdump-setup.sh -N -q $baseUrl/utils/kdump-setup.sh --no-check-certificate
chmod +x /usr/bin/kdump-setup.sh
kdump-setup.sh
END
[[ "$GenerateImage" = yes ]] && {
cat <<-END >>$postscript
test -f /etc/yum.repo.d/cdn_os.repo && sed -i 's/enabled = 0/enabled = 1/' /etc/yum.repo.d/cdn_os.repo
echo "[\$USER@\${HOSTNAME} \${HOME} \$(pwd)] install cloud-init ..."
yum install -y cloud-init
test -f /etc/yum.repo.d/cdn_os.repo && sed -i 's/enabled = 1/enabled = 0/' /etc/yum.repo.d/cdn_os.repo
END
}
if [[ "$NO_SSHKEY" = yes ]]; then
ks-generator.sh -d $Distro -url $Location $REPO_OPTS --post $postscript >$KSPath
else
ks-generator.sh -d $Distro -url $Location $REPO_OPTS --post $postscript --sshkeyf ~/.ssh/id_rsa.pub --sshkeyf ~/.ssh/id_ecdsa.pub >$KSPath
fi
}
echo -e "{VM:INFO} creating VM by using location:\n -> $Location"
touch $VMpath/.kiss-vm
echo $Location >$VMpath/url
[[ "$GenerateImage" = yes ]] && {
sed -i '/^reboot$/s//poweroff/' ${KSPath}
NOREBOOT=--noreboot
}
ksfile=${KSPath##*/}
qemu-img create -f qcow2 $VMpath/${vmname}.qcow2 ${DSIZE}G
[[ "$DEBUG" = yes ]] && {
echo "#[debug] kickstart:"
cat ${KSPath}
}
#workaround: see https://bugzilla.redhat.com/show_bug.cgi?id=1707389#c15
workaroundExtraArgs="rd.driver.pre=loop"
run nohup virt-install --connect=qemu:///system --hvm --accelerate $XML "$QEMU_OPTS" "${QEMU_ENV[@]}" \
--name $vmname \
--location $Location \
$OS_VARIANT_OPT \
$MACHINE_OPT \
--vcpus ${VCPUS} \
$MEM_OPTS \
--disk path=$VMpath/${vmname}.qcow2,bus=${START_DISK_BUS:-virtio} \
$DISK_OPTS \
$NETWORK_OPTS $CONTROLLER_OPTS \
--initrd-inject $KSPath \
--extra-args="ks=file:/$ksfile console=tty0 console=ttyS0,${baudrate}n8 $workaroundExtraArgs" \
--noautoconsole \
--vnc --vnclisten 0.0.0.0 $NOREBOOT &>$VMpath/nohup.log &
while true; do test -s $VMpath/nohup.log && break; done
expect -c "spawn tail -f $VMpath/nohup.log
expect {
{* to complete the installation process.} { exit }
{*\r} { exp_continue }
{</domain>} { exit }
}
"
[[ -n "$XML" ]] && {
cp $VMpath/nohup.log $VMpath/vm.xml
virsh domxml-to-native qemu-argv $VMpath/vm.xml | tee $VMpath/qemu-argv
rm -f $VMpath/*
exit
}
if [[ "$NOAUTO" = yes ]]; then
expect -c '
set timeout 10
spawn virsh console '"$vmname"'
expect {
timeout {
send_user "\n\n{VM:INFO} exiting from console...\n"
exit 0
}
exp_continue
}
'
else
trap - SIGINT
for ((i=0; i<31; i++)); do
#clear -x
printf '\33[H\33[2J'
INTERACT=$INTERACT LANG=C expect -c '
set intc 0
set timeout -1
spawn virsh console '"$vmname"'
trap {
send_user "You pressed Ctrl+C [incr intc]/8\n"
if {$intc >= 8} {
interact
}
} SIGINT
expect {
"error: Disconnected from qemu:///system due to end of file*" {
send "\r"
puts $expect_out(buffer)
exit 5
}
"error: failed to get domain" {
send "\r"
puts $expect_out(buffer)
exit 6
}
"error: internal error: character device console0 is not using a PTY" {
send "\r"
puts $expect_out(buffer)
exit 1
}
"Unsupported Hardware Detected" {
send "\r"
exp_continue
}
"Which would you like to install through" {
# see: [RHEL 6.1] Anaconda requires user interaction in case of kickstart network activation failing
send "\r"
interact
}
"reboot: Power down" { exit 0 }
"Power down" { exit 0 }
"reboot: Restarting system" { send "\r"; exit 1 }
"Restarting system" { send "\r"; exit 1 }
"error: The domain is not running" { send "\r"; exit 127 }
"reboot: System halted" { send_user "\r\rsomething is wrong! cancel installation ..\r\r"; exit 255 }
"System halted" { send_user "\r\rsomething is wrong! cancel installation ..\r\r"; exit 255 }
"An unknown error has occurred" { exit 255 }
"error: Domain not found:" { exit 255 }
"* login:" { send "root\r" }
}
expect "Password:" {
send "redhat\r"
send "\r\r\r\r\r\r"
send "# your are in console, Ctr + ] to exit \r"
send "\r\r\r\r\r\r"
}
if {$env(INTERACT) == "no"} { exit 0 }
interact
'
ReturnCode=$?
[[ $ReturnCode = 0 || $ReturnCode = 255 ]] && break
[[ $ReturnCode = 127 ]] && { virsh start $vmname 2>&1 | sed 's/error: //'; continue; }
sleep 2
done
echo -e "\n{VM:INFO} Quit from expect -c 'spawn virsh console $vmname'"
[[ $ReturnCode = 255 ]] && {
echo -e "\n{VM:INFO} something is wrong(please check screen log), will clean all tmp files ..."
RM=yes
GenerateImage=
}
fi
elif [[ "$InstallType" = import ]]; then
[[ -f $Imageurl ]] && Imageurl=file://$(readlink -f ${Imageurl})
imagefilename=${Imageurl##*/}
imagefile=$VMpath/$imagefilename
echo "{VM:INFO} downloading cloud image file of $Distro to $imagefile ..."
if [[ $Imageurl != file:///* ]]; then
if [[ -n "$SAVE_IMAGE" ]]; then
until curl_download ${DownloadImagePath}/${imagefilename} $Imageurl; do sleep 1; done
[[ "$DOWNLOAD_ONLY" = yes ]] && {
ls -l ${DownloadImagePath}/${imagefilename}
exit 0
}
cp ${DownloadImagePath}/${imagefilename} $imagefile
else
until curl_download $imagefile $Imageurl; do sleep 1; done
fi
else
cp -f ${Imageurl#file://} $imagefile
fi
[[ -f ${imagefile} ]] || exit 1
[[ $imagefile = *.xz ]] && {
echo "{VM:INFO} decompress $imagefile ..."
xz -d $imagefile
rm -f $imagefile
imagefile=${imagefile%.xz}
[[ -f ${imagefile} ]] || exit 1
}
[[ "$NO_CLOUD_INIT" != yes ]] && {
echo -e "{VM:INFO} creating cloud-init iso"
_file=cloud-init-iso-gen.sh
which $_file &>/dev/null || {
_url=$baseUrl/utils/$_file
mkdir -p ~/bin && curl_download ~/bin/$_file $_url && chmod +x ~/bin/$_file
}
cloudinitiso=$VMpath/$vmname-cloud-init.iso
[[ "$fips" = yes ]] && FIPS_OPT=--fips
[[ "$kdump" = yes ]] && KDUMP_OPT=--kdump
[[ $Intranet = yes && -n "$Location" ]] && {
REPO_OPTS=$(distro2repos $Distro $Location | sed -e '/cdn_os:/d' -e 's/^/--repo /')
}
[[ "$DEBUG" = yes ]] && DEBUG_OPT=--debug
if [[ "$NO_SSHKEY" = yes ]]; then
cloud-init-iso-gen.sh $cloudinitiso -hostname ${vmhostname} -b "$BPKGS" -p "$PKGS" $DEBUG_OPT $REPO_OPTS \
$FIPS_OPT $KDUMP_OPT
else
cloud-init-iso-gen.sh $cloudinitiso -hostname ${vmhostname} -b "$BPKGS" -p "$PKGS" $DEBUG_OPT $REPO_OPTS \
$FIPS_OPT $KDUMP_OPT --sshkeyf ~/.ssh/id_rsa.pub --sshkeyf ~/.ssh/id_ecdsa.pub
fi
CLOUD_INIT_OPT="--disk $cloudinitiso,device=cdrom"
}
echo -e "{VM:INFO} creating VM by import $imagefile"
touch $VMpath/.kiss-vm
echo $Imageurl >$VMpath/url
[[ "$dsizeflag" = 1 || "$NOAUTO" != yes ]] && {
_size=$(qemu-img info ${imagefile}|sed -rn '/^virtual size:.*\(([0-9]+).*$/{s//\1/;p}')
[[ "$_size" -lt $((DSIZE*1024*1024*1024)) ]] && {
echo -e "{VM:INFO} resize $imagefile to ${DSIZE}G"
qemu-img resize ${imagefile} ${DSIZE}G
}
}
run nohup virt-install --connect=qemu:///system --hvm --accelerate $XML "$QEMU_OPTS" "${QEMU_ENV[@]}" \
--name $vmname \
$OS_VARIANT_OPT \
$MACHINE_OPT \
--vcpus ${VCPUS} \
$MEM_OPTS \
--disk path=${imagefile},bus=${START_DISK_BUS:-virtio} \
$DISK_OPTS \
$CLOUD_INIT_OPT \
$NETWORK_OPTS $CONTROLLER_OPTS \
--import \
--noautoconsole \
--vnc --vnclisten 0.0.0.0 &>$VMpath/nohup.log &
while true; do test -f $VMpath/nohup.log && break; done
expect -c "spawn tail -f $VMpath/nohup.log
expect {
{*Domain creation completed.} { exit }
{*\r} { exp_continue }
{</domain>} { exit }
}
"
[[ -n "$XML" ]] && {
cp $VMpath/nohup.log $VMpath/vm.xml
virsh domxml-to-native qemu-argv $VMpath/vm.xml | tee $VMpath/qemu-argv
exit
}
if [[ "$NOAUTO" = yes ]]; then
expect -c '
set timeout 10
spawn virsh console '"$vmname"'
expect {
timeout {
send_user "\n\n{VM:INFO} exiting from console...\n"
exit 0
}
exp_continue
}
'
if [[ -n "$VNCPUT_AFTER_INSTALL" ]]; then
vm "$vmname" -vncput "$VNCPUT_AFTER_INSTALL"
else
vm "$vmname" -vncget
fi
echo -e "{VM:INFO} vnc info:"
vm vncport $vmname
else
if grep -i -q freebsd <<<"$Distro"; then
echo -e "{VM:INFO} waiting login: prompt ..."
while ! grep "^login:" < <(vm "$vmname" -vncget); do sleep 10; done
vm "$vmname" -vncputln root
vm "$vmname" -vncputln "hostname ${vmname}" \
-vncputln "echo 'hostname=\"${vmname}\"' >>/etc/rc.conf" \
-vncputln "service syslogd reload" \
-vncputln "echo 'sshd_enable=\"YES\"' >>/etc/rc.conf" \
-vncputln "printf 'PermitRootLogin yes\\nPasswordAuthentication yes\\n' >>/etc/ssh/sshd_config" \
-vncputln "grep '^[^#]' /etc/rc.conf" \
-vncputln "grep '^[^#]' /etc/ssh/sshd_config"
vm "$vmname" -vncputln "pw useradd foo -G wheel" \
-vncputln "passwd foo" -vncput "" -vncputln "redhat" -vncput "" -vncputln "redhat" \
-vncputln "passwd root" -vncput "" -vncputln "redhat" -vncput "" -vncputln "redhat" \
-vncputln "/etc/rc.d/sshd start"
expect -c "
spawn ssh-copy-id -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@$vmname
expect -re .*: {send \"redhat\\r\"}
expect eof
"
else
trap - SIGINT
for ((i=0; i<31; i++)); do
INTERACT=$INTERACT SHUTDOWN=$GenerateImage LANG=C expect -c '
set intc 0
set timeout -1
spawn virsh console '"$vmname"'
trap {
send_user "You pressed Ctrl+C [incr intc]/8\n"
if {$intc >= 8} {
interact
}
} SIGINT
expect {
"error: failed to get domain" {
send "\r"
puts $expect_out(buffer)
exit 6
}
"error: internal error: character device console0 is not using a PTY" {
send "\r"
puts $expect_out(buffer)
exit 1
}
"* login:" { send "root\r" }
}
expect "Password:" {
send "redhat\r"
send "\r\r\r\r\r\r"
if {$env(SHUTDOWN) == "yes"} {
send {while ps axf|grep -A1 "/var/lib/cloud/instance/scripts/runcm[d]"; do echo "{VM:INFO}: cloud-init scirpt is still running .."; sleep 10; done; poweroff}
send "\r\n"
expect "Restarting system" { exit 0 }
}
send {while ps axf|grep -A1 "/var/lib/cloud/instance/scripts/runcm[d]"; do echo "{VM:INFO}: cloud-init scirpt is still running .."; sleep 10; done; echo "~~~~~~~~ no cloud-init or cloud-init done ~~~~~~~~"\d}
send "\r\n"
expect {
"or cloud-init done ~~~~~~~~d" {send "\r\r# Now you can take over the keyboard\r\r"}
"* login:" {
send "root\r"
expect "Password:" {
send "redhat\r"
send "\r\r# Now you can take over the keyboard\r\r"
}
}
}
send "# and your are in console, Ctr + ] to exit \r"
send "\r\r"
}
if {$env(INTERACT) == "no"} { exit 0 }
interact
'
ReturnCode=$?
[[ $ReturnCode = 0 || $ReturnCode = 255 ]] && break
[[ $ReturnCode = 127 ]] && { virsh start $vmname 2>&1 | sed 's/error: //'; continue; }
sleep 2
done
echo -e "\n{VM:INFO} Quit from expect -c 'spawn virsh console $vmname'"
[[ -f "$cloudinitiso" ]] && {
echo -e "\n{VM:INFO} eject iso ${cloudinitiso##*/} from $vmname"
virsh change-media $vmname $cloudinitiso --eject
}
fi
fi
elif [[ "$InstallType" = pxe ]]; then
imagefile=$VMpath/${vmname}.qcow2
diskOpt=path=${imagefile},bus=virtio,size=${DSIZE}
[[ "$DISKLESS" = yes ]] && diskOpt=none
run virt-install --connect=qemu:///system --hvm --accelerate $XML "$QEMU_OPTS" "${QEMU_ENV[@]}" \
--name $vmname \
$OS_VARIANT_OPT \
$MACHINE_OPT \
--vcpus ${VCPUS} \
$MEM_OPTS \
--pxe \
--disk $diskOpt \
--noautoconsole \
${NETWORK_OPTS%,model=virtio} \
--vnc --vnclisten 0.0.0.0
sleep 1
vm $vmname -vncget
if [[ "$NOAUTO" = yes ]]; then
echo -e "{VM:INFO} waiting boot: prompt ..."
while ! grep "^boot:" < <(vm "$vmname" -vncget); do sleep 1; done
vm $vmname -vncget
vm $vmname -vncputln "menu"
vm $vmname -vncput "key:down"
else
sleep 10
vm $vmname -vncget
fi
echo -e "\n{VM:INFO} please connect vnc to continue install:"
for vncaddr in $(vmvncport $vmname); do
echo -e " $ vncviewer $vncaddr #from remote"
done
exit 0
elif [[ "$InstallType" = cdrom ]]; then
[[ -f $Isourl ]] && Isourl=file://$(readlink -f ${Isourl})
isofilename=${Isourl##*/}
isofile=$VMpath/$isofilename
echo "{VM:INFO} downloading iso file of $Distro to $isofile ..."
if [[ $Isourl != file:///* ]]; then
if [[ -n "$SAVE_IMAGE" ]]; then
until curl_download ${DownloadImagePath}/${isofilename} $Isourl; do sleep 1; done
[[ "$DOWNLOAD_ONLY" = yes ]] && {
ls -l ${DownloadImagePath}/${isofilename}
exit 0
}
cp ${DownloadImagePath}/${isofilename} $isofile
else
until curl_download $isofile $Isourl; do sleep 1; done
fi
else
cp -f ${Isourl#file://} $isofile
fi
[[ -f ${isofile} ]] || exit 1
run virt-install --connect=qemu:///system --hvm --accelerate $XML "$QEMU_OPTS" "${QEMU_ENV[@]}" \
--name $vmname \
$OS_VARIANT_OPT \
$MACHINE_OPT \
--vcpus ${VCPUS} \
$MEM_OPTS \
--cdrom $isofile \
$DISK_OPTS \
$NETWORK_OPTS $CONTROLLER_OPTS \
--noautoconsole \
--vnc --vnclisten 0.0.0.0
sleep 2
vm $vmname -vncget
vm $vmname -vncput "key:down" -vncput "key:up"
echo -e "\n{VM:INFO} please connect vnc to continue install:"
for vncaddr in $(vmvncport $vmname); do
echo -e " $ vncviewer $vncaddr #from remote"
done
exit 0
fi
if ! virsh desc $vmname &>/dev/null; then
echo -e "\n{VM:ERR} virt-install fail"
rm -rf $VMpath && rmdir {$VMpath%/*} 2>/dev/null
exit 1
fi
if [[ "$GenerateImage" = yes ]]; then
mkdir -p $ImagePath/$Distro
read image < <(virsh dumpxml --domain $vmname | sed -n "/source file=/{s|^.*='||; s|'/>$||; p}")
imgfilename=${image##*/}
imgfilename=${imgfilename#${vmprefix}-}
newimage=$ImagePath/$Distro/${imgfilename}
echo -e "\n{VM:INFO} force shutdown $vmname ..."
virsh destroy $vmname 2>/dev/null
ls -lh ${image}
if ! true; then
echo -e "\n{VM:INFO} copy image $image to ${newimage} ..."
cp ${image} ${newimage}
else
echo -e "\n{VM:INFO} virt-sparsify image $image to ${newimage} ..."
LIBGUESTFS_BACKEND=direct virt-sparsify --check-tmpdir fail ${image} ${newimage} || {
ls -lh ${image}
cp ${image} ${newimage}
LIBGUESTFS_BACKEND=direct virt-sparsify --in-place ${newimage}
}
ls -lh ${image}
ls -lh ${newimage}
fi
echo -e "\n{VM:INFO} virt-sysprep ..."
ls -lh ${newimage}
LIBGUESTFS_BACKEND=direct virt-sysprep -a ${newimage}
ls -lh ${newimage}
echo -e "\n{VM:INFO} xz compress image ..."
time xz -z -f -T 0 ${XZ:--9} ${newimage}
ls -lh ${newimage}.xz
echo -e "\n{VM:INFO} remove temporary VM $vmname ..."
_vmdelete $vmname
exit 0
fi
if [[ "$RM" = yes ]]; then
_vmdelete $vmname
exit 0
fi
[[ ${#SHARE_DIRS[@]} -gt 0 ]] && {
vm stop $vmname
for idx in "${!SHARE_DIRS[@]}"; do
read srcdir rtarget _ <<< "${SHARE_DIRS[$idx]//:/ }"
rtarget=${rtarget:-tag$idx}
target=virtiofs-tag-$rtarget
mkdir -p $srcdir
cat >virtiofs.xml <<-EOF
<filesystem type='mount' accessmode='passthrough'>
<binary path='/usr/libexec/virtiofsd' xattr='on'/>
<driver type='virtiofs'/>
<source dir='$srcdir'/>
<target dir='$target'/>
</filesystem>
EOF
virsh attach-device $vmname virtiofs.xml --persistent
done
vm restart /w $vmname
for idx in "${!SHARE_DIRS[@]}"; do
read _ rtarget _ <<< "${SHARE_DIRS[$idx]//:/ }"
rtarget=${rtarget:-tag$idx}
target=virtiofs-tag-$rtarget
mp=/virtiofs/$rtarget
vm exec -v $vmname -- mkdir -p $mp
vm exec -v $vmname -- mount -t virtiofs $target $mp
vm exec -v $vmname -- mount -t virtiofs
done
}
if [[ ! -f ~/.ssh/config ]]; then
cat <<-EOF > ~/.ssh/config
Host 192.168.*.*
StrictHostKeyChecking no
UserKnownHostsFile=/dev/null
LogLevel ERROR
EOF
chmod 600 ~/.ssh/config
fi
: <<\COMM
echo -e "\n{VM:INFO} attach test disk ext4.qcow2 and xfs.qcow2:"
qemu-img create -f qcow2 $VMpath/ext4.qcow2 10G
qemu-img create -f qcow2 $VMpath/xfs.qcow2 10G
virsh attach-disk $vmname --subdriver qcow2 --persistent $VMpath/ext4.qcow2 vdb #--current --targetbus usb
virsh attach-disk $vmname --subdriver qcow2 --persistent $VMpath/xfs.qcow2 vdc #--current --targetbus usb
#virsh detach-disk $vmname --persistent vdb
#virsh detach-disk $vmname --persistent vdc
echo -e " $ virsh console $vmname"
echo -e " $ ssh foo@$vmname #password: redhat"
read addr < <(vmifaddr $vmname)
[[ -n "$addr" ]] && {
echo -e " $ ssh foo@$addr #password: redhat"
}
COMM
echo -e "\n{VM:INFO} you can try login $vmname again by using:"
echo -e " $ vm login $vmname #from host"
for vncaddr in $(vmvncport $vmname); do
if [[ "$vncaddr" = localhost* ]]; then
echo -e " $ vncviewer $vncaddr #from localhost"
else
echo -e " $ vncviewer $vncaddr #from remote"
fi
done
exit 0
| true
|
0d5585d4f0ff934c4050605462caac6ed1ed509f
|
Shell
|
CiscoSystems/os-sqe
|
/tools/disk-image-builder/elements/manage-cloud-init/post-install.d/manage-cloud-init
|
UTF-8
| 447
| 2.984375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
for int in $(ip -o link | grep -P -o '\d+: eth\d+' | awk '{print $2}')
do
mac=$(ip address show $int | grep -P -o 'link/ether .*? brd' | awk '{print $2}')
IFS=':' octets=($mac)
if [ ${octets[1]} != "00" ] && [ ${octets[1]} != "10" ]
then
sudo systemctl start cloud-config
sudo systemctl start cloud-final
sudo systemctl start cloud-init-local
sudo systemctl start cloud-init
fi
done
| true
|
78388e70f3c5e9d666de24e100497c93839cabef
|
Shell
|
ing-systems/crcxx
|
/generate_catalog.sh
|
UTF-8
| 1,597
| 2.984375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
# Extracted from https://github.com/akhilles/crc-catalog/blob/master/generate_catalog.sh
echo "use super::Params;" >> src/crc8/catalog.rs
echo "use super::Params;" >> src/crc16/catalog.rs
echo "use super::Params;" >> src/crc32/catalog.rs
echo "use super::Params;" >> src/crc64/catalog.rs
echo "use super::Params;" >> src/crc128/catalog.rs
curl -s https://reveng.sourceforge.io/crc-catalogue/all.htm | grep -o 'width.*name.*"' | while read -r line; do
# echo $(echo $line | \
# sed 's/ /, /g' | \
# sed 's/[-\/]/_/g' | \
# sed 's/width=\([0-9]*\), \(.*\), name="\(.*\)"/pub const \3: Algorithm<u\1> = Algorithm { \2 };/')
width=$(echo $line | sed 's/width=\([0-9]*\) \(.*\) name="\(.*\)"/\1/')
params=$(echo $line | sed 's/width=\([0-9]*\) \(.*\) name="\(.*\)"/\2/' | sed 's/ /, /g' | sed 's/=/: /g')
name=$(echo $line | sed 's/width=\([0-9]*\) \(.*\) name="\(.*\)"/\3/' | sed 's/[-\/]/_/g')
if [ $width -le 8 ]; then
echo "pub const $name: Params<u8> = Params { width: $width, $params };" >> src/crc8/catalog.rs
elif [ $width -le 16 ]; then
echo "pub const $name: Params<u16> = Params { width: $width, $params };" >> src/crc16/catalog.rs
elif [ $width -le 32 ]; then
echo "pub const $name: Params<u32> = Params { width: $width, $params };" >> src/crc32/catalog.rs
elif [ $width -le 64 ]; then
echo "pub const $name: Params<u64> = Params { width: $width, $params };" >> src/crc64/catalog.rs
elif [ $width -le 128 ]; then
echo "pub const $name: Params<u128> = Params { width: $width, $params };" >> src/crc128/catalog.rs
fi
done
| true
|
1e89dd8df86965fd313e226d07a04a50e9c7a4df
|
Shell
|
fengjixuchui123/SourecCoding
|
/maintenanceToolKit/target/output/bin/backup.sh
|
UTF-8
| 280
| 2.671875
| 3
|
[] |
no_license
|
#!/bin/bash
backupFolder=$HPBA_HOME/pgsql/backup
WORKPATH=$HPBA_HOME/pgsql/bin
export WORKPATH
if [ ! -d $backupFolder ]
then
mkdir $backupFolder
fi
$WORKPATH/pg_dump -d xs_mng -U xsadmin > backup.txt
tar -czf $HPBA_HOME/pgsql/backup/backup.tar.gz backup.txt --remove-files
| true
|
2bcaa31380b4ea830844b267a9bde1b44f0c5c98
|
Shell
|
UCBerkeleyAPIs/docker-jboss-fuse-6.3
|
/jboss-fuse-6.3-fabric/fabric.sh
|
UTF-8
| 1,779
| 4.15625
| 4
|
[] |
no_license
|
#!/bin/bash
#
# Starts a fabric with the given environment variables
#
# Sets the environment variables
#
if [ -z $FABRIC_USER ]; then
export FABRIC_USER=admin
fi
if [ -z $FABRIC_PASSWD ]; then
export FABRIC_PASSWD=admin
fi
if [ -z $SSH_PASSWD ]; then
export SSH_PASSWD=admin
fi
if [ -z $ZOOKEEPER_PASSWD ]; then
export ZOOKEEPER_PASSWD=${FABRIC_PASSWD}
fi
#
# Run standalone version of fuse
#
echo "Starting JBoss Fuse"
/opt/jboss/jboss-fuse/bin/fuse server & FUSE_SERVER=$!
#
# Wait until the container is available to run client commands
#
count=0
while :
do
echo "Wait for container"
/opt/jboss/jboss-fuse/bin/client "version"; return=$?
if [ $return -eq 0 ]; then
sleep 15
break
else
sleep 5
(( count++ ))
echo "Failed to get client session " $count " times."
if [ $count == 60 ]; then
echo "Failed to get a client session after 5 minutes, fabric create failed"
exit 1
fi
fi
done
# Create the fabric
/opt/jboss/jboss-fuse/bin/client "fabric:create --wait-for-provisioning --verbose --clean --bootstrap-timeout 60000 --new-user ${FABRIC_USER} --new-user-password ${FABRIC_PASSWD} --zookeeper-password ${ZOOKEEPER_PASSWD} --resolver localip"
# Add managed server using ssh commands
echo "Managed servers to create" ${MANAGED_HOSTS}
for host in ${MANAGED_HOSTS//,/ }
do
hostname=${host^^}
servicehost=${hostname}_SERVICE_HOST
echo "Create managed server" $host with service hostname ${!servicehost}
/opt/jboss/jboss-fuse/bin/client "container-create-ssh --host ${!servicehost} --user user --password ${SSH_PASSWD} ${host}"
done
if [ -z $MANAGED_HOSTS ]; then
echo Admin server is not startet with managed hosts
fi
# Wait for fuse to end
echo Fuse Fabric Server is ready for requests
wait $FUSE_SERVER
| true
|
1768e7e24fc9eabf150f7ffcadb7834aa385e90f
|
Shell
|
Ferada/cl-nailgun
|
/src/script.sh
|
UTF-8
| 874
| 3.390625
| 3
|
[
"BSD-2-Clause"
] |
permissive
|
#!/bin/sh
PORT=2323
PIDFILE=/home/ferada/.cl-nailgun-script.pid
CL=/home/ferada/src/opt/bin/sbcl
NG=/home/ferada/src/nailgun/ng
PID=
if [ -f $PIDFILE ]
then
PID=$(cat $PIDFILE)
fi
# TODO: file lock so only one server is started
SPAWN=
[ -z "$PID" ] && SPAWN=YES
/bin/kill -s 0 $PID || SPAWN=yes
if [ -n "$SPAWN" ]
then
nohup $CL \
--eval "(asdf:load-system '#:cffi)" \
--eval "(push \"/home/ferada/src/opt/lib/\" cffi:*foreign-library-directories*)" \
--eval "(asdf:load-system '#:iolib/grovel)" \
--eval "(push \"-I/home/ferada/src/opt/include/\" iolib-grovel::*cc-flags*)" \
--eval "(asdf:load-system '#:cl-nailgun-script)" \
--eval "(cl-nailgun-script:start $PORT)" > ~/.cl-nailgun-script.log 2>&1 &
echo $! > $PIDFILE
while ! nc -q 0 localhost $PORT < /dev/null > /dev/null 2>&1; do sleep 1; done
fi
$NG --nailgun-port $PORT $0 $*
| true
|
b6f0c48d3b40dd9e4554d83fe2ba5a0f03373012
|
Shell
|
Duncaen/OpenDoas
|
/configure
|
UTF-8
| 10,281
| 3.890625
| 4
|
[
"ISC"
] |
permissive
|
#!/bin/sh
die() {
printf "$1\n" >&2
exit 1
}
usage() {
cat <<EOF
usage: configure [options]
--prefix=PREFIX installation prefix [/usr]
--exec-prefix=EPREFIX installation prefix for executable files [PREFIX]
--bindir=DIR user executables [PREFIX/bin]
--datadir=DIR architecture-independent data files [PREFIX/share]
--mandir=DIR manual pages [DATADIR/man]
--sysconfdir=DIR directory for configuration files [/etc]
--build=build-alias a cpu-vendor-opsys for the system where the application will be built
--host=host-alias a cpu-vendor-opsys for the system where the application will run
--target=target-alias the machine that CC will produce code for
--enable-debug enable debugging
--enable-static prepare for static build
--without-pam disable pam support
--without-shadow disable shadow support
--with-timestamp enable timestamp support
--uid-max=NUM set UID_MAX (default 65535)
--gid-max=NUM set GID_MAX (default 65535)
--help, -h display this help and exit
EOF
exit 0
}
# defaults
WITHOUT_TIMESTAMP=yes
UID_MAX=65535
GID_MAX=65535
for x; do
opt=${x%%=*}
var=${x#*=}
case "$opt" in
--prefix) PREFIX=$var ;;
--exec-prefix) EPREFIX=$var ;;
--bindir) BINDIR=$var ;;
--datadir) SHAREDIR=$var ;;
--mandir) MANDIR=$var ;;
--sysconfdir) SYSCONFDIR=$var ;;
--build) BUILD=$var ;;
--host) HOST=$var ;;
--target) TARGET=$var ;;
--enable-debug) DEBUG=yes ;;
--enable-static) BUILD_STATIC=yes ;;
--with-pam) WITHOUT_PAM=; WITHOUT_SHADOW=yes ;;
--with-shadow) WITHOUT_SHADOW=; WITHOUT_PAM=yes ;;
--without-pam) WITHOUT_PAM=yes ;;
--without-shadow) WITHOUT_SHADOW=yes ;;
--with-timestamp) WITHOUT_TIMESTAMP= ;;
--without-timestamp) WITHOUT_TIMESTAMP=yes ;;
--uid-max) UID_MAX=$var ;;
--gid-max) UID_MAX=$var ;;
--help|-h) usage ;;
*) die "Error: unknown option $opt" ;;
esac
done
CONFIG_MK=config.mk
CONFIG_H=config.h
rm -f "$CONFIG_MK" "$CONFIG_H"
cat <<! >$CONFIG_H
#ifndef CONFIG_H
#define CONFIG_H
!
if [ -z "$BUILD" ]; then
BUILD="$(uname -m)-unknown-$(uname -s | tr '[:upper:]' '[:lower:]')"
fi
if [ -z "$HOST" ]; then
[ -z "$TARGET" ] && TARGET=$BUILD
HOST=$TARGET
fi
if [ -z "$TARGET" ]; then
[ -z "$HOST" ] && HOST=$BUILD
TARGET=$HOST
fi
if [ -z "$OS" ]; then
# Derive OS from cpu-manufacturer-os-kernel
CPU=${TARGET%%-*}
REST=${TARGET#*-}
MANU=${REST%%-*}
REST=${REST#*-}
OS=${REST%%-*}
REST=${REST#*-}
KERNEL=${REST%%-*}
fi
OS_CFLAGS="-D__${OS}__"
case "$OS" in
linux)
printf 'Setting UID_MAX\t\t\t\t%d.\n' "$UID_MAX" >&2
printf '#define UID_MAX %s\n' "$UID_MAX" >>$CONFIG_H
printf 'Setting GID_MAX\t\t\t\t%d.\n' "$GID_MAX" >&2
printf '#define GID_MAX %s\n' "$GID_MAX" >>$CONFIG_H
OS_CFLAGS="$OS_CFLAGS -D_DEFAULT_SOURCE -D_GNU_SOURCE"
;;
netbsd)
OS_CFLAGS="$OS_CFLAGS -D_OPENBSD_SOURCE"
printf 'LDLIBS += -lutil\n' >>$CONFIG_MK
: ${BINGRP:=wheel}
;;
freebsd)
printf 'LDLIBS += -lutil\n' >>$CONFIG_MK
: ${BINGRP:=wheel}
;;
darwin)
: ${BINGRP:=wheel}
;;
esac
: ${PREFIX:=/usr/local}
: ${EPREFIX:=${PREFIX}}
: ${BINDIR:=${PREFIX}/bin}
: ${SHAREDIR:=${PREFIX}/share}
: ${MANDIR:=${SHAREDIR}/man}
: ${SYSCONFDIR:=/etc}
: ${BINMODE:=4755}
: ${BINOWN:=root}
: ${BINGRP:=root}
cat <<EOF >>$CONFIG_MK
PREFIX ?= ${PREFIX}
EPREFIX ?= ${EPREFIX}
BINDIR ?= ${BINDIR}
SHAREDIR ?= ${SHAREDIR}
MANDIR ?= ${MANDIR}
SYSCONFDIR?= ${SYSCONFDIR}
BINMODE ?= ${BINMODE}
BINOWN ?= ${BINOWN}
BINGRP ?= ${BINGRP}
EOF
[ -n "$OS_CFLAGS" ] && \
printf 'OS_CFLAGS += %s\n' "$OS_CFLAGS" >>$CONFIG_MK
[ -n "$DEBUG" ] && \
printf 'CFLAGS += -O0 -g\n' >>$CONFIG_MK
[ -n "$BUILD_STATIC" ] && \
printf 'CFLAGS += -static\n' >>$CONFIG_MK
# Add CPPFLAGS/CFLAGS/LDFLAGS/LDLIBS to CC for testing features
XCC="${CC:=cc} $CFLAGS $OS_CFLAGS $CPPFLAGS $LDFLAGS $LDLIBS"
# Make sure to disable --as-needed for CC tests.
case "$OS" in
darwin) ;;
*) XCC="$XCC -Wl,--no-as-needed" ;;
esac
check_func() {
func="$1"; src="$2"; shift 2
printf 'Checking for %-14s\t\t' "$func ..." >&2
printf '%s\n' "$src" >"_$func.c"
$XCC "_$func.c" -o "_$func" 2>/dev/null
ret=$?
rm -f "_$func.c" "_$func"
upperfunc="$(printf '%s\n' "$func" | tr '[[:lower:]]' '[[:upper:]]')"
if [ $ret -eq 0 ]; then
printf 'yes.\n' >&2
printf '#define HAVE_%s\n' "$upperfunc" >>$CONFIG_H
return 0
else
printf '/* #define HAVE_%s */\n' "$upperfunc" >>$CONFIG_H
printf 'no.\n' >&2
return 1
fi
}
authmethod() {
#
# Check for pam_appl.h.
#
src='
#include <security/pam_appl.h>
int main(void) {
return 0;
}'
[ -z "$WITHOUT_PAM" ] && check_func "pam_appl_h" "$src" && {
printf 'SRCS += pam.c\n' >>$CONFIG_MK
printf 'LDLIBS += -lpam\n' >>$CONFIG_MK
printf '#define USE_PAM\n' >>$CONFIG_H
printf 'pam\n'
return 0
}
#
# Check for shadow.h.
#
src='
#include <shadow.h>
int main(void) {
return 0;
}'
[ -z "$WITHOUT_SHADOW" ] && check_func "shadow_h" "$src" && {
printf 'SRCS += shadow.c\n' >>$CONFIG_MK
printf 'LDLIBS += -lcrypt\n' >>$CONFIG_MK
printf '#define USE_SHADOW\n' >>$CONFIG_H
printf 'shadow\n'
return 0
}
return 1
}
persistmethod() {
[ -z "$WITHOUT_TIMESTAMP" ] && {
printf '#define USE_TIMESTAMP\n' >>$CONFIG_H
printf 'SRCS += timestamp.c\n' >>$CONFIG_MK
printf 'timestamp\n'
return 0
}
return 1
}
#
# Check for explicit_bzero().
#
src='
#include <string.h>
int main(void) {
explicit_bzero(NULL, 0);
return 0;
}'
check_func "explicit_bzero" "$src" || {
printf 'SRCS += libopenbsd/explicit_bzero.c\n' >>$CONFIG_MK
}
#
# Check for strlcat().
#
src='
#include <string.h>
int main(void) {
const char s1[] = "foo";
char s2[10];
strlcat(s2, s1, sizeof(s2));
return 0;
}'
check_func "strlcat" "$src" || {
printf 'SRCS += libopenbsd/strlcat.c\n' >>$CONFIG_MK
}
#
# Check for strlcpy().
#
src='
#include <string.h>
int main(void) {
const char s1[] = "foo";
char s2[10];
strlcpy(s2, s1, sizeof(s2));
return 0;
}'
check_func "strlcpy" "$src" || {
printf 'SRCS += libopenbsd/strlcpy.c\n' >>$CONFIG_MK
}
#
# Check for errc().
#
src='
#include <err.h>
int main(void) {
errc(0, 0, "");
return 0;
}'
check_func "errc" "$src" || {
printf 'SRCS += libopenbsd/errc.c\n' >>$CONFIG_MK
}
#
# Check for verrc().
#
src='
#include <stddef.h>
#include <err.h>
int main(void) {
verrc(0, 0, "x", NULL);
return 0;
}'
check_func "verrc" "$src" || {
printf 'SRCS += libopenbsd/verrc.c\n' >>$CONFIG_MK
}
#
# Check for setprogname().
#
src='
#include <stdlib.h>
int main(void) {
setprogname("");
return 0;
}'
check_func "setprogname" "$src" || {
printf 'SRCS += libopenbsd/progname.c\n' >>$CONFIG_MK
}
#
# Check for readpassphrase().
#
src='
#include <readpassphrase.h>
int main(void) {
char buf[12];
readpassphrase("", buf, sizeof(buf), 0);
return 0;
}'
check_func "readpassphrase" "$src" || {
printf 'SRCS += libopenbsd/readpassphrase.c\n' >>$CONFIG_MK
}
#
# Check for strtonum().
#
src='
#include <stdlib.h>
int main(void) {
const char *errstr;
strtonum("", 1, 64, &errstr);
return 0;
}'
check_func "strtonum" "$src" || {
printf 'SRCS += libopenbsd/strtonum.c\n' >>$CONFIG_MK
}
#
# Check for reallocarray().
#
src='
#include <stdlib.h>
int main(void) {
reallocarray(NULL, 0, 0);
return 0;
}'
check_func "reallocarray" "$src" || {
printf 'SRCS += libopenbsd/reallocarray.c\n' >>$CONFIG_MK
}
#
# Check for execvpe().
#
src='
#include <unistd.h>
int main(void) {
const char *p = { "", NULL };
execvpe("", p, p);
return 0;
}'
check_func "execvpe" "$src" || {
printf 'SRCS += libopenbsd/execvpe.c\n' >>$CONFIG_MK
}
#
# Check for setresuid().
#
src='
#include <unistd.h>
int main(void) {
setresuid(0, 0, 0);
return 0;
}'
check_func "setresuid" "$src"
have_setresuid=$?
#
# Check for setresgid().
#
src='
#include <unistd.h>
int main(void) {
setresgid(0, 0, 0);
return 0;
}'
check_func "setresgid" "$src"
have_setresgid=$?
if [ $have_setresuid -eq 1 -o $have_setresgid -eq 1 ]; then
printf 'SRCS += libopenbsd/bsd-setres_id.c\n' >>$CONFIG_MK
fi
#
# Check for setreuid().
#
src='
#include <unistd.h>
int main(void) {
setreuid(0, 0);
return 0;
}'
check_func "setreuid" "$src"
#
# Check for setregid().
#
src='
#include <unistd.h>
int main(void) {
setregid(0, 0);
return 0;
}'
check_func "setregid" "$src"
#
# Check for closefrom().
#
src='
#include <unistd.h>
int main(void) {
closefrom(0);
return 0;
}'
check_func "closefrom" "$src" || {
printf 'SRCS += libopenbsd/closefrom.c\n' >>$CONFIG_MK
}
#
# Check for sysconf().
#
src='
#include <unistd.h>
int main(void) {
(void)sysconf(0);
return 0;
}'
check_func "sysconf" "$src"
#
# Check for dirfd().
#
src='
#include <dirent.h>
int main(void) {
(void)dirfd(0);
return 0;
}'
check_func "dirfd" "$src"
#
# Check for fcntl.h.
#
src='
#include <fcntl.h>
int main(void) {
return 0;
}'
check_func "fcntl_h" "$src"
#
# Check for F_CLOSEM.
#
src='
#include <fcntl.h>
#ifndef F_CLOSEM
#error no F_CLOSEM
#endif
int main(void) {
return 0;
}'
check_func "F_CLOSEM" "$src"
#
# Check for dirent.h.
#
src='
#include <dirent.h>
int main(void) {
return 0;
}'
check_func "dirent_h" "$src"
#
# Check for sys/ndir.h.
#
src='
#include <sys/ndir.h>
int main(void) {
return 0;
}'
check_func "sys_ndir_h" "$src"
#
# Check for sys/dir.h.
#
src='
#include <sys/dir.h>
int main(void) {
return 0;
}'
check_func "sys_dir_h" "$src"
#
# Check for ndir.h.
#
src='
#include <ndir.h>
int main(void) {
return 0;
}'
check_func "ndir_h" "$src"
#
# Check for login_cap.h.
#
src='
#include <sys/types.h>
#include <login_cap.h>
int main(void) {
return 0;
}'
check_func "login_cap_h" "$src"
#
#
#
src='
#include <stdlib.h>
int main(void){return 0;}
__attribute__((__unused__)) static void foo(void){return;}
'
check_func "__attribute__" "$src" || {
printf 'OS_CFLAGS += -DNO_ATTRIBUTE_ON_RETURN_TYPE=1\n' >>$CONFIG_MK
}
auth=$(authmethod)
if [ $? -eq 0 ]; then
printf 'Using auth method\t\t\t%s.\n' "$auth" >&2
else
printf 'Error auth method\t\t\n' >&2
exit 1
fi
persist=$(persistmethod)
if [ $? -eq 0 ]; then
printf 'Using persist method\t\t\t%s.\n' "$persist" >&2
else
printf 'Using persist method\t\t\tnone.\n' >&2
fi
printf '#define DOAS_CONF "%s/doas.conf"\n' "${SYSCONFDIR}" >>$CONFIG_H
printf '\n#endif /* CONFIG_H */\n' >>$CONFIG_H
| true
|
98587e756a64c8c5fa9f5a25c4229dfbc507a4a7
|
Shell
|
delkyd/alfheim_linux-PKGBUILDS
|
/sift/PKGBUILD
|
UTF-8
| 938
| 2.546875
| 3
|
[] |
no_license
|
# Maintainer: Javier Tiá <javier dot tia at gmail dot com>
pkgname=sift
pkgver=0.9.0
pkgrel=1
pkgdesc="A fast and powerful open source alternative to grep"
arch=('i686' 'x86_64')
url="http://sift-tool.org/"
license=('GPL3')
makedepends=('go' 'git')
options=('!strip' '!emptydirs')
conflicts=('sift-bin')
replaces=('sift-bin')
provides=("sift=${pkgver}")
source=("https://github.com/svent/${pkgname}/archive/v${pkgver}.tar.gz")
sha256sums=('bbbd5c472c36b78896cd7ae673749d3943621a6d5523d47973ed2fc6800ae4c8')
_gourl="github.com/svent/${pkgname}"
build() {
cd "${pkgname}-${pkgver}"
export GOPATH="${srcdir}"
go get -v ${_gourl}
}
# check() {
# export GOPATH="${srcdir}"
# go test -v -x github.com/svent/sift
# }
package() {
install -Dm 775 "${srcdir}/bin/${pkgname}" \
"${pkgdir}/usr/bin/${pkgname}"
install -Dm 644 "${srcdir}/${pkgname}-${pkgver}/LICENSE" \
"${pkgdir}/usr/share/licenses/${pkgname}/LICENSE"
}
# vim:set ft=sh ts=2 sw=2 et:
| true
|
73916ec45004e4d511591d5f7eda04786dba9a30
|
Shell
|
ildarf/scripts
|
/tide
|
UTF-8
| 438
| 2.609375
| 3
|
[] |
no_license
|
#!/bin/bash
# if [ $# -eq 0 -o "$1" = "-h" ]
# then
# echo 'Usage: testide'
# exit 0
# fi
tmux resize-pane -D 5
tmux split-window -c '#{pane_current_path}' -p 25
tmux split-window -h -c '#{pane_current_path}' -p 34
tmux send-keys "clear" Enter
tmux send-keys "runwatch $1"
tmux select-pane -t :.-
tmux send-keys "clear" Enter
tmux send-keys "makewatch $1"
tmux select-pane -t :.-
tmux send-keys Enter
tmux send-keys ":tabe $1" Enter
| true
|
7699f488a923846e218f6087b607b57de4a59c9a
|
Shell
|
sjorge/dotFiles
|
/homedir/.zshrc.d/envvars/os:solaris:MANPATH
|
UTF-8
| 546
| 2.921875
| 3
|
[] |
no_license
|
#!/bin/sh
# add /usr/share/man
[ -d "/usr/share/man" ] && \
MANPATH="${MANPAGE}:/usr/share/man"
# check for pkgsrc (smartos)
MANPATH="$(echo ${MANPATH} | /bin/sed -r 's#/opt/local/man:##g')"
if [ -d "/opt/local" ]; then
MANPATH="/opt/local/man:${MANPATH}"
fi
# check for omniti repository (omnios)
MANPATH="$(echo ${MANPATH} | /bin/sed -r 's#/opt/omni/share/man:##g')"
if [ -d "/opt/omni" ]; then
MANPATH="${MANPATH}:/opt/omni/share/man"
fi
# return new MANPATH
echo "${MANPATH}"
# vim: tabstop=2 expandtab shiftwidth=2 softtabstop=2
| true
|
2eb5bce68e7f81fbe47dd691fc0942773cd3869c
|
Shell
|
textarcana/bats-example-project
|
/test/basic.bats
|
UTF-8
| 119
| 2.640625
| 3
|
[
"Apache-2.0"
] |
permissive
|
# FLUNK="oh my yes"
@test "truth should be true" {
true
}
@test "string should be empty" {
[ -z "$FLUNK" ]
}
| true
|
55565ccb8635f030f931a5e8f6fa82df8e6faafe
|
Shell
|
lancelet/dotfiles
|
/macinstall.sh
|
UTF-8
| 1,939
| 3.515625
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
#
# Install on a new mac.
# "Safe" bash
set -euf -o pipefail
# ANSI codes
readonly GREEN='\033[0;32m'
readonly NC='\033[0m' # no color
# Print a log message, in green.
log () {
declare -r message="$1"
printf "${GREEN}MACINSTALL: ${message}${NC}\n"
}
log 'dotfiles installation commencing...'
read -s -p "Password: " PASSWORD
declare -r PASSWORD_QUOTED=$(printf '%q' "$PASSWORD")
declare -r PASSWORD_QUOTED_TWICE=$(printf '%q' "$PASSWORD_QUOTED")
log 'received password'
log 'installing iTerm2'
mkdir -p ~/Applications
pushd ~/Applications
curl -L https://iterm2.com/downloads/stable/iTerm2-3_4_4.zip -o iTerm2.zip
unzip iTerm2.zip
rm iTerm2.zip
xattr -dr com.apple.quarantine iTerm.app
popd
log 'installing Nix'
sh <(curl -L https://nixos.org/nix/install) \
--darwin-use-unencrypted-nix-store-volume
# shellcheck disable=SC1090
source "$HOME/.nix-profile/etc/profile.d/nix.sh"
log 'installing nix-darwin (using minimal darwin-configuration.nix)'
mkdir -p "$HOME/.nixpkgs"
cat <<EOF >"$HOME/.nixpkgs/darwin-configuration.nix"
{ config, pkgs, lib, ... }:
{
imports = [ <home-manager/nix-darwin> ];
system.stateVersion = 4;
home-manager.users.jsm = { pkgs, ... }: {
home.packages =
with pkgs;
[
git
];
}
EOF
nix-channel --add \
https://github.com/nix-community/home-manager/archive/master.tar.gz \
home-manager
nix-channel --update
nix-build https://github.com/LnL7/nix-darwin/archive/master.tar.gz -A installer
cat "$HOME/workspace/dotfiles/nix-darwin-installer.exp" |
sed "s/PASSWORD/${PASSWORD_QUOTED_TWICE}/g" |
expect
rm result
log 'checking out repo'
rm -rf "$HOME/.nixpkgs"
mkdir -p ~/workspace
pushd ~/workspace
nix-shell \
--pure \
--packages cacert git \
--run 'git clone https://github.com/lancelet/dotfiles.git'
popd
log 'linking ~/.nixpkgs -> ~/workspace/nixpkgs'
ln -s "$HOME/workspace/dotfiles/nixpkgs" "$HOME/.nixpkgs"
| true
|
819eb01665c2fd3f10037b3e05f66088cc6bb85e
|
Shell
|
rogetsun/stzb
|
/docker/build.sh
|
UTF-8
| 426
| 3.21875
| 3
|
[] |
no_license
|
#!/bin/bash
baseimg=registry.cn-beijing.aliyuncs.com/songyw/stzb
image=${baseimg}:${1}
echo "image is ${image}"
if [[ "${1}" == "" || "${1}" == "0" ]]; then
echo "please input tag; ${image} ?"
else
docker build -t ${image} .
docker images | grep ${baseimg}
docker rmi ${baseimg}:latest
docker tag ${image} ${baseimg}:latest
docker push ${image}
docker push ${baseimg}:latest
docker images | grep ${baseimg}
fi
| true
|
58a5f39d53245c49118ab8bcdcb828656fa306a8
|
Shell
|
sw897/CentOSGeoWebEnv
|
/soft/python.sh
|
UTF-8
| 1,073
| 3.0625
| 3
|
[] |
no_license
|
#!/bin/sh
#
# ******************************************************************************
# Software: CentOSGeoWebEnv
# Author: mapdb
# Website: www.mapdb.cn
# Email: mapdb2014@gmail.com
# ------------------------------------++++
# Thank you for choosing CentOSGeoWebEnv!
# ******************************************************************************
#
if [ -e "../inc.sh" ]; then
. "../inc.sh"
elif [ -e "./inc.sh" ]; then
. "./inc.sh"
fi
soft_version="2.7.9"
python_version="2.7"
rm -rf Python-${soft_version}
if [ ! -f Python-${soft_version}.tar.xz ]; then
wget https://www.python.org/ftp/python/${soft_version}/Python-${soft_version}.tar.xz
fi
tar -xf Python-${soft_version}.tar.xz
cd Python-${soft_version}
./configure
make
make altinstall
cd ..
rm -rf Python-${soft_version}
# rm -f python-${soft_version}.tar.gz
if [ ! -f ez_setup.py ]; then
wget https://bitbucket.org/pypa/setuptools/raw/bootstrap/ez_setup.py
fi
python${python_version} ez_setup.py
easy_install-${python_version} pip
pip${python_version} install virtualenv
| true
|
900ad9abc788c077a8d1dec9663a3a7cbec6d2cf
|
Shell
|
ludmilaklenova/Tests-Plateforme
|
/pytests_platform/JavaRoot/integration/scripts/drjava_all_tests.sh
|
UTF-8
| 1,030
| 3.34375
| 3
|
[] |
no_license
|
#!/bin/sh
##+=============================================================================
##
## file :
##
## description : Ce script met à jour l'infrastructure et lance tous les tests pour le package Device Root Java
##
##
##-=============================================================================
#
. $PYTESTS_PLATFORM_ROOT/scripts/common_testing_setenv.sh
echo -e "${BLUE}${BOLD}==> Préparation de l'infrastructure ... ${NORMAL}"
$PYTESTS_PLATFORM_ROOT/scripts/drjava_prepare_infra.sh
ret=$?
if [ $ret -ne 0 ]; then
echo -e "${RED}======== Fatal: Preparation de l'infrastructure Failed. ========${NORMAL}"
exit 1
fi
echo -e "${BLUE}${BOLD}==> Lancement des tests non fonctionnels, fonctionnel, de fiabilité et de régression ... ${NORMAL}"
$PYTESTS_PLATFORM_ROOT/scripts/drjava_other_tests.sh
ret=$?
if [ $ret -ne 0 ]; then
echo -e "${RED}======== Fatal: Lancement des tests Failed. ========${NORMAL}"
exit 1
fi
echo
echo -e "${GREEN}${BOLD}Execution : " $(basename $0) " done${NORMAL}"
exit 0
| true
|
1d80fc111343ba3e24bb29749d0d7416b1020eab
|
Shell
|
chrismaes87/make-docker-base-image
|
/make-docker-base-image.sh
|
UTF-8
| 6,375
| 4.375
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
#
# Create a base Docker image.
#
# inspired from https://github.com/moby/moby/blob/master/contrib/mkimage-yum.sh
set -e
me=$(basename $0)
usage() {
cat <<EOOPTS
$me [OPTIONS] <name[:version]>
SYNOPSIS
Create an image from scratch with certain packages installed.
The resulting image will be created with tag 'name:version' .
If version is not specified, we will try to fetch it from /etc/{redhat,system}-release in the image.
OPTIONS:
-c | --config-file <config_file> The path to the pkg-manager config file.
Default:
- if <pkg-manager> = yum : /etc/yum.conf
- if <pkg-manager> = zypper : /etc/zypp/zypp.conf
- if <pkg-manager> = dnf : /etc/dnf/dnf.conf
--filesystem-only Construct only the filesystem in <target-dir>.
Don't create the docker image. Don't remove <target-dir> at the end.
-g | --group <group> Package group to install in the container.
(example: Core for centos)
Can be specified multiple times.
-p | --package <package> Package to install in the container.
Can be specified multiple times.
-t | --target-dir <target-dir> Where to construct the image filesystem before importing
Default: temporary directory in /tmp
EOOPTS
exit 1
}
REARRANGED_OPTIONS=$(getopt -o c:g:hp:t: --long config-file:,filesystem-only,group:,help,package:,target-dir: -- "$@")
eval set -- "$REARRANGED_OPTIONS"
install_groups=()
install_packages=()
version=
while true
do
case "$1" in
-c | --config-file ) config_file=$2; shift 2;;
--filesystem-only ) FILESYSTEM_ONLY=1; shift;;
-g | --group ) install_groups+=("$2"); shift 2;;
-h | --help ) usage ;;
-p | --package ) install_packages+=("$2"); shift 2;;
-t | --target-dir ) target=$2; shift 2;;
-- )
shift; # skip --
if [ -n "$1" ]
then
docker_tag=$1
shift # skip $1
if [ -n "$1" ]
then
echo "$me : Unexpected arguments: \"$@\" . exiting."; exit 1 ;
fi
elif [[ -z $FILESYSTEM_ONLY ]]
then
# user needs to specify the name except when FILESYSTEM_ONLY is specified.
usage
fi
break;;
* ) echo "$me : Unexpected options: \"$@\" . exiting."; exit 1 ;;
esac
done
for pkgm in zypper yum dnf
do
if command -v $pkgm >/dev/null
then
pkg_manager=$pkgm
break
fi
done
if [[ -z $pkg_manager ]]
then
echo "no valid package manager found. Exiting"
exit 1
fi
if [[ -z $config_file ]]
then
case $pkg_manager in
yum)
config_file=/etc/yum.conf ;;
zypper)
config_file=/etc/zypp/zypp.conf ;;
dnf)
config_file=/etc/dnf/dnf.conf ;;
esac
echo "auto-selected config_file: $config_file"
fi
if [[ ! -e $config_file ]]
then
echo "$config_file does not exist"
exit 1
fi
if [[ $pkg_manager != zypper ]]
then
# default to Core group if not specified otherwise
if [ ${#install_groups[@]} -eq 0 ]; then
install_groups=('Core')
fi
fi
if [[ -n $target ]]
then
if [ ! -d $target ]
then
mkdir $target
fi
else
target=$(mktemp -d --tmpdir $(basename $0).XXXXXX)
fi
set -x
mkdir -m 755 "$target"/dev
mknod -m 600 "$target"/dev/console c 5 1
mknod -m 600 "$target"/dev/initctl p
mknod -m 666 "$target"/dev/full c 1 7
mknod -m 666 "$target"/dev/null c 1 3
mknod -m 666 "$target"/dev/ptmx c 5 2
mknod -m 666 "$target"/dev/random c 1 8
mknod -m 666 "$target"/dev/tty c 5 0
mknod -m 666 "$target"/dev/tty0 c 4 0
mknod -m 666 "$target"/dev/urandom c 1 9
mknod -m 666 "$target"/dev/zero c 1 5
# amazon linux yum will fail without vars set
if [ -d /etc/yum/vars ]; then
mkdir -p -m 755 "$target"/etc/yum
cp -a /etc/yum/vars "$target"/etc/yum/
fi
if [[ $pkg_manager = zypper ]]
then
ZYPP_CONF="$config_file" $pkg_manager --root="$target" --gpg-auto-import-keys refresh
if [[ -n "$install_groups" ]]
then
ZYPP_CONF="$config_file" $pkg_manager --root="$target" install -y -t pattern "${install_groups[@]}"
fi
if [[ -n "$install_packages" ]]
then
ZYPP_CONF="$config_file" $pkg_manager --root="$target" install -y "${install_packages[@]}"
fi
ZYPP_CONF="$config_file" $pkg_manager --root="$target" clean -a
else
if [[ -n "$install_groups" ]]
then
$pkg_manager -c "$config_file" --installroot="$target" --releasever=/ --setopt=tsflags=nodocs \
--setopt=group_package_types=mandatory -y groupinstall "${install_groups[@]}"
fi
if [[ -n "$install_packages" ]]
then
$pkg_manager -c "$config_file" --installroot="$target" --releasever=/ --setopt=tsflags=nodocs \
--setopt=group_package_types=mandatory -y install "${install_packages[@]}"
fi
$pkg_manager -c "$config_file" --installroot="$target" -y clean all
cat > "$target"/etc/sysconfig/network <<EOF
NETWORKING=yes
HOSTNAME=localhost.localdomain
EOF
fi
# effectively: febootstrap-minimize --keep-zoneinfo --keep-rpmdb --keep-services "$target".
# locales
rm -rf "$target"/usr/{{lib,share}/locale,{lib,lib64}/gconv,bin/localedef,sbin/build-locale-archive}
# docs and man pages
rm -rf "$target"/usr/share/{man,doc,info,gnome/help}
# cracklib
rm -rf "$target"/usr/share/cracklib
# i18n
rm -rf "$target"/usr/share/i18n
# yum cache
rm -rf "$target"/var/cache/yum/*
# sln
rm -rf "$target"/sbin/sln
# ldconfig
rm -rf "$target"/etc/ld.so.cache "$target"/var/cache/ldconfig/*
if [[ $FILESYSTEM_ONLY ]]
then
echo "filesystem constructed in directory $target . "
exit 0
fi
if ! [[ $docker_tag =~ : ]]
then
# docker_tag does not contain a version number, search it in the image.
for file in "$target"/etc/{redhat,system}-release
do
if [ -r "$file" ]; then
version="$(sed 's/^[^0-9\]*\([0-9.]\+\).*$/\1/' "$file")"
break
fi
done
docker_tag="$docker_tag:$version"
fi
tar --numeric-owner -c -C "$target" . | docker import - $docker_tag
rm -rf "$target"
| true
|
98e37c00e852d552b7565872c23bca7babf76c2b
|
Shell
|
cflury/sentiment-workshop
|
/infra/start_jupyter.sh
|
UTF-8
| 336
| 2.625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
export PATH=~/anaconda3/bin:$PATH
source activate pytorch_p36
echo "PATH: $PATH"
JUP_ENV=`conda env list | grep "*" | awk '{print $1}'`
echo "starting jupyter in conda env $JUP_ENV"
cd /home/ubuntu/dev
/home/ubuntu/anaconda3/envs/pytorch_p36/bin/jupyter-notebook --config=/home/ubuntu/.jupyter/jupyter_notebook_config.py
| true
|
e0641d4106ded69110638b6ce78af4c80197857c
|
Shell
|
StevenACoffman/go-clever
|
/convert.sh
|
UTF-8
| 488
| 2.71875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
set -x
npm install
ls -1 ./oas2/*.yml | while read swagger; do
# use '$item'
echo "${swagger}"
oapi_output="$(echo "$(basename "${swagger}")" | cut -d'.' -f1)"
echo "${oapi_output}"
# npx api-spec-converter --from=swagger_2 --to=openapi_3 --syntax=yaml "${swagger}" > "./oas3/${oapi_output}.oas3.yaml"
npx swagger2openapi --patch -y "${swagger}" -o "./oas3/${oapi_output}.oas3.yaml"
# NOTE: above, consider swagger2openapi options --resolveInternal --resolve
done
| true
|
9604cedbe0f28c9575606379f553fdff1eb34980
|
Shell
|
haianos/script_ubx
|
/install
|
UTF-8
| 4,030
| 3.734375
| 4
|
[] |
no_license
|
#!/bin/bash
####
## Start Install Script
################################################################
##
# Functions
###
set_colors()
{
red='[0;31m'; lred='[1;31m'
green='[0;32m'; lgreen='[1;32m'
yellow='[0;33m'; lyellow='[1;33m'
blue='[0;34m'; lblue='[1;34m'
purple='[0;35m'; lpurple='[1;35m'
cyan='[0;36m'; lcyan='[1;36m'
grey='[0;37m'; lgrey='[1;37m'
white='[0;38m'; lwhite='[1;38m'
std='[m'
}
instructions()
{
echo "*** Instructions to Install Microblx ***"
echo "- Solve the following system dependencies: ${cyan}clang, gcc, libluajit-5.1-dev luajit${std}"
echo "- Download the code: ${cyan}git clone https://github.com/UbxTeam/microblx${std}"
echo "- Setup ${cyan}UBX_ROOT${std} Environmental variable"
echo "- Further informations on ${green}http://ubxteam.github.io/quickstart/${std}"
exit 0
}
set_ubx_root()
{
echo "${red}UBX_ROOT Environment Variable not found${std}"
while true; do
read -p "ubx-core not found. Do you get the sources already?${std} [Y/N]?" answer
case $answer in
[Yy]* ) echo "Insert path to microblx source code ${yellow}(set UBX_ROOT)${std}";
read ubxpath;
export UBX_ROOT=$ubxpath;
break;;
[Nn]* ) echo "Please, read the follow instructions and execute this script again";
echo "-------------------------------------------------------------------";
instructions
break;;
* ) echo "${yellow}$answer is not valid. Please answer yes (y) or no.${std}";;
esac
done
}
set_ubx_install()
{
echo "${red}UBX_INSTALL Environment Variable not found${std}"
read -p "Where do you want to install Ubx software?${std} (path)?" answer
read ubxpath;
export UBX_INSTALL=$ubxpath;
break;
}
################################################################
### Program starts here
set_colors
echo "${cyan}*** Welcome to Microblx framework! ***${std}"
echo "*** Installation Script ***"
if [ -z "$UBX_ROOT" ]; then
set_ubx_root
fi
echo "${yellow}UBX_ROOT${std}: $UBX_ROOT"
if [ -z "$UBX_INSTALL" ]; then
set_ubx_install
fi
export UBX_MODULES=$UBX_ROOT/lib/ubx
export UBX_MODELS=$UBX_ROOT/share/ubx/models
export UBX_METAMODELS=$UBX_ROOT/share/ubx/metamodels
echo ".... Compiling Ubx..."
CURR=`pwd`
cd $UBX_ROOT
source env.sh
make install
echo "${cyan}... Installing Ubx Core Library${std}"
echo "${cyan}... - lib"
if [ ! -d "$UBX_INSTALL/include/ubx/core" ]; then
mkdir -p $UBX_INSTALL/include/ubx/core
fi
cp $UBX_ROOT/src/uthash_ffi.h $UBX_INSTALL/include/ubx/core
cp $UBX_ROOT/src/ubx_types.h $UBX_INSTALL/include/ubx/core
cp $UBX_ROOT/src/ubx_proto.h $UBX_INSTALL/include/ubx/core
echo "${cyan}... - luajit layer${std}"
if [ ! -d "$UBX_INSTALL/share/ubx/scripts" ]; then
mkdir -p $UBX_INSTALL/share/ubx/scripts
fi
cp $UBX_ROOT/lua/*.lua $UBX_INSTALL/share/ubx/scripts
if [ ! -d "$UBX_INSTALL/bin" ]; then
mkdir -p $UBX_INSTALL/bin
fi
echo "${cyan}... - common blocks and types${std}"
# webif
cp $UBX_ROOT/std_blocks/webif/webif.lua $UBX_INSTALL/share/ubx/scripts
echo "${cyan}... Installing core-tools${std}"
cp $UBX_ROOT/tools/file2carr.lua $UBX_INSTALL/share/ubx/scripts
cp $UBX_ROOT/tools/luaffi_load_file.lua $UBX_INSTALL/share/ubx/scripts
cp $UBX_ROOT/tools/ubx_launch.lua $UBX_INSTALL/share/ubx/scripts
cp $UBX_ROOT/tools/ubx_launch $UBX_INSTALL/share/ubx/scripts
ln -s $UBX_INSTALL/share/ubx/scripts/ubx_launch $UBX_INSTALL/bin/ubx_launch
echo "${cyan}... Generate Environment Setup${std}"
cd $CURR
echo "export UBX_ROOT=$UBX_ROOT" > env.sh
echo "export UBX_INSTALL=$UBX_INSTALL" >> env.sh
echo 'export UBX_MODULES=$UBX_INSTALL/lib/ubx' >> env.sh
echo 'export UBX_MODELS=$UBX_INSTALL/share/ubx/models' >> env.sh
echo 'export UBX_METAMODELS=$UBX_INSTALL/share/ubx/metamodels' >> env.sh
echo 'export LUA_PATH=";;;$LUA_PATH;$UBX_INSTALL/share/ubx/scripts/?.lua"' >> env.sh
echo 'export PATH=$PATH:$UBX_INSTALL/bin' >> env.sh
echo 'Source file "env.sh" '
| true
|
27c3e1d0bb9898355d9357eec740950c20ef0b34
|
Shell
|
xmudrii/dotfiles
|
/zsh/.zshrc
|
UTF-8
| 2,325
| 3.03125
| 3
|
[] |
no_license
|
# Path to your oh-my-zsh installation.
export ZSH="/home/marko/.oh-my-zsh"
# Enable Powerlevel10k instant prompt. Should stay close to the top of ~/.zshrc.
# Initialization code that may require console input (password prompts, [y/n]
# confirmations, etc.) must go above this block; everything else may go below.
if [[ -r "${XDG_CACHE_HOME:-$HOME/.cache}/p10k-instant-prompt-${(%):-%n}.zsh" ]]; then
source "${XDG_CACHE_HOME:-$HOME/.cache}/p10k-instant-prompt-${(%):-%n}.zsh"
fi
# Set name of the theme to load. Optionally, if you set this to "random"
# it'll load a random theme each time that oh-my-zsh is loaded.
# See https://github.com/robbyrussell/oh-my-zsh/wiki/Themes
ZSH_THEME="powerlevel10k/powerlevel10k"
# Which plugins would you like to load? (plugins can be found in ~/.oh-my-zsh/plugins/*)
# Custom plugins may be added to ~/.oh-my-zsh/custom/plugins/
# Example format: plugins=(rails git textmate ruby lighthouse)
# Add wisely, as too many plugins slow down shell startup.
plugins=(
git
docker
httpie
)
source $ZSH/oh-my-zsh.sh
# User configuration
[ -f ~/.zsh_env ] && source ~/.zsh_env
[ -f ~/.zsh_func ] && source ~/.zsh_func
[ -f ~/.zsh_alias ] && source ~/.zsh_alias
[ -f ~/.zsh_docker ] && source ~/.zsh_docker
[ -f ~/.zsh_private ] && source ~/.zsh_private
## Auto completions
# kubectl
if kubectl --help >/dev/null 2>&1; then
source <(kubectl completion zsh)
complete -o default -F __start_kubectl k
fi
# kubeone
if kubeone --help >/dev/null 2>&1; then
source <(kubeone completion zsh)
fi
# doctl
if doctl --help >/dev/null 2>&1; then
source <(doctl completion zsh)
fi
# git-completion: https://raw.githubusercontent.com/git/git/master/contrib/completion/git-completion.bash
[ -f ~/.git-completion.bash ] && source ~/.git-completion.bash
# fubectl: https://github.com/kubermatic/fubectl
[ -f ~/.fubectl.source ] && source ~/.fubectl.source
# fzf
[ -f ~/.fzf.zsh ] && source ~/.fzf.zsh
# nvm
[ -f /usr/share/nvm/init-nvm.sh ] && source /usr/share/nvm/init-nvm.sh
[ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh" # This loads nvm
[ -s "$NVM_DIR/bash_completion" ] && \. "$NVM_DIR/bash_completion" # This loads nvm bash_completion
# direnv
eval "$(direnv hook zsh)"
# To customize prompt, run `p10k configure` or edit ~/.p10k.zsh.
[[ ! -f ~/.p10k.zsh ]] || source ~/.p10k.zsh
| true
|
9d00696e37628277791594a462b17bb2f7541223
|
Shell
|
rofrol/git-helpers
|
/git-create
|
UTF-8
| 646
| 2.9375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#http://stackoverflow.com/a/10325316/588759
#http://stackoverflow.com/a/13509762/588759
#https://gist.github.com/robwierzbowski/5430952
user=$(echo ${1} | tr -d ' ')
repo=$(echo ${2} | tr -d ' ')
echo ${user}
echo ${repo}
[ -z "$user" ] && echo "user empty!" && exit 1
[ -z "$repo" ] && echo "repo empty!" && exit 1
curl -u ${user} https://api.github.com/user/repos -d "{\"name\":\"${repo}\"}"
git remote add origin https://github.com/${user}/${repo}.git
echo "if origin exist"
echo "git remote set-url origin https://github.com/${user}/${repo}.git"
echo "Maybe you have to wait a little before push works."
git push -u origin master
| true
|
c7edf8e9ca7e49bc4a3d45e56490a98f30414bbe
|
Shell
|
dbat/shell-scripts
|
/timer.sh
|
UTF-8
| 1,338
| 3.921875
| 4
|
[] |
no_license
|
#!/bin/sh
test "$1" || { echo nothing happen; exit 1; }
bar="============================"
bar2="----------------------------"
unset err; log="/tmp/zz-${0##*/}.log"
alias today='date -j "+%Y.%m.%d %H:%M:%S"'
alias ticks='date -j +%s'
clock () {
test -z "$1" -o -z "$1" && \
{ echo "missing arguments"; return 1; }
local ts="$(($2-$1))"
test "$ts" -lt 0 && \
{ echo limitless; return 2; }
case "$ts" in
0) echo unchanged;;
1) echo a second\!;;
60) echo a minute;;
3600) echo an hour;;
86400) echo a day;;
*) local tics="$ts seconds"
if test "$ts" -le 60; then echo "$tics"
else echo "`date -j -r "$ts" "+%T"` ($tics)"
fi;;
esac
return 0
}
makebar () {
test "$2" || return 1
local b="$1"; shift
local n="${#@}"
while test "$n" -gt 0; do
n="$(($n-1))"; echo -n $b
done
}
TIMER="START: `today`"
PROCS="PROCESS: \"$*\""
#echo "len PROCS=${#PROCS}, len bar=${#bar}"
test "${#PROCS}" -gt "${#bar}" && {
bar=`makebar "=" "$PROCS"`
bar2=`makebar "-" "$PROCS"`
}
echo -e "\n$bar\n$PROCS\n$TIMER\n$bar2" | tee -ai $log
tic="`ticks`"; $* || err=1; tac="`ticks`"
test "$err" && echo "*** ERROR ***" | tee -ai $log
echo; echo "RUNNING TIME: `clock "$tic" "$tac"`" | tee -ai $log
TIMER="FINISH: `today`"
echo -e "\n$bar2\n$TIMER\n$bar" | tee -ai $log
echo ""| tee -ai $log
| true
|
9aeb6b758bf7938117fb9113f81e96611db565f2
|
Shell
|
lucafavatella/provisioning-android
|
/bin/apksignercerts
|
UTF-8
| 1,141
| 3.53125
| 4
|
[
"ISC"
] |
permissive
|
#!/bin/sh
T="docker-apksignercerts"
F="${1:?}"
{ docker build \
-q \
-t "${T:?}" \
- \
>/dev/null <<"EOF"
FROM debian:bullseye
## Refs:
## - [apt-get best practices](https://docs.docker.com/develop/develop-images/dockerfile_best-practices/#apt-get).
RUN apt-get update && apt-get install -y --no-install-recommends \
apksigner \
&& rm -rf /var/lib/apt/lists/*
WORKDIR /workdir
## Refs:
## - https://android.stackexchange.com/questions/9312/how-can-i-verify-the-authenticity-of-an-apk-file-i-downloaded
## It also has alternative solution using `keytool -printcert`
## but it is tricky because certificate path is not always at `META-INF/CERT.RSA`.
## - https://developer.android.com/studio/command-line/apksigner
ENTRYPOINT ["sh", "-c", "cat - > \"${1:?}\" && apksigner verify --print-certs -v \"${1:?}\"", "apksigner"]
EOF
} && {
# shellcheck disable=SC2094
docker run \
-i \
-a stdin \
-a stdout \
-a stderr \
--network none \
--rm \
"${T:?}" \
"$(basename "${F:?}")" \
< "${F:?}"; }
| true
|
a5f3bac768b30caedbbe6cb1e05dea9d179e0b9f
|
Shell
|
niuzb/quake-android
|
/pela/quake2android/change_package_name.sh
|
UTF-8
| 2,383
| 3.078125
| 3
|
[] |
no_license
|
AppFullName=com.xianle.doomtnt
AppVersionCode=10
AppVersionName="1.0 releae TNT of Doom"
AppName="TNT Doom"
echo Patching java file
for F in project/src/*.java; do
echo Patching $F
cat $F | \
sed "s/package .*;/package com.xianle.doomtnt;/" > \
$F.1
mv -f $F.1 $F
done
echo Patching project/AndroidManifest.xml
cat project/AndroidManifest.xml | \
sed "s/package=.*/package=\"$AppFullName\"/" | \
sed "s^android:versionCode=.*^android:versionCode=\"$AppVersionCode\"^" | \
sed "s^android:versionName=.*^android:versionName=\"$AppVersionName\"^" > \
project/AndroidManifest.xml.1
mv -f project/AndroidManifest.xml.1 project/AndroidManifest.xml
echo Pathching project/res/layout/cover.xml
cat project/res/layout/cover.xml | \
sed "s^xmlns:myapp=.*^xmlns:myapp=\"http://schemas.android.com/apk/res/\"$AppFullName\"\"^">\
project/res/layout/cover.xml.1
mv -f project/res/layout/cover.xml.1 project/res/layout/cover.xml
AppShortName=`echo $AppName | sed 's/ //g'`
DataPath="$AppFullName"
AppFullNameUnderscored=`echo $AppFullName | sed 's/[.]/_/g'`
AppSharedLibrariesPath=/data/data/$AppFullName/lib
echo Patching project/jni/Android.mk
cat project/jni/Android.mk | \
sed "s/SDL_JAVA_PACKAGE_PATH := .*/SDL_JAVA_PACKAGE_PATH := $AppFullNameUnderscored/" | \
sed "s^SDL_CURDIR_PATH := .*^SDL_CURDIR_PATH := $DataPath^" > \
project/jni/Android.mk.1
if [ -n "`diff -w project/jni/Android.mk.1 project/jni/Android.mk`" ] ; then
mv -f project/jni/Android.mk.1 project/jni/Android.mk
else
rm -rf project/jni/Android.mk.1
fi
echo Patching project/res/values/strings.xml
cat project/res/values/strings.xml | \
sed "s^[<]string name=\"app_name\"[>].*^<string name=\"app_name\">$AppName</string>^" > \
project/res/values/strings.xml.1
mv -f project/res/values/strings.xml.1 project/res/values/strings.xml
echo Forcing rebuild of specific files
rm -rf project/libs/*
for OUT in obj; do
rm -rf project/$OUT/local/*/objs/sdl_main/* project/$OUT/local/*/libsdl_main.so
rm -rf project/$OUT/local/*/libsdl.so
rm -rf project/$OUT/local/*/libstlport.a # Should be re-linked if you're changing toolchain
rm -rf project/$OUT/local/*/objs/sdl/src/*/android
rm -rf project/$OUT/local/*/objs/sdl/src/video/SDL_video.o
rm -rf project/$OUT/local/*/objs/sdl/SDL_renderer_gles.o
# Do not rebuild libraries that do not need that
find project/$OUT/local -name "*.[oa]" -exec touch '{}' \;
done
| true
|
3f89d565633a79205769192697badd14c9b74b1e
|
Shell
|
saptakds/Day-7-Arrays
|
/script1.sh
|
UTF-8
| 505
| 3.578125
| 4
|
[] |
no_license
|
#!/bin/bash -x
arr=()
for i in `seq 0 9`
do
randomNo=$((100+$RANDOM%999))
arr[i]=$randomNo
done
first=${arr[0]}
min=$first
minTwo=$first
max=$first
maxTwo=$first
for j in ${arr[@]}
do
if [ $j -lt $min ]
then
minTwo=$min
min=$j
elif [ $j -lt $minTwo -a $j -ne $min ]
then
minTwo=$j
fi
if [ $j -gt $max ]
then
maxTwo=$max
max=$j
elif [ $j -gt $maxTwo -a $j -ne $max ]
then
maxTwo=$j
fi
done
echo "Second minimum: $minTwo"
echo "Second maximum: $maxTwo"
| true
|
55350943d83253ad44bccc612fcceffe64acf8e5
|
Shell
|
yasam/tools
|
/eclipse/eprojects_kernel.sh
|
UTF-8
| 684
| 3.515625
| 4
|
[] |
no_license
|
#!/bin/sh
if [ $# == 1 ];then
echo "Changing directory : $1"
cd $1
fi
PROJECT_DIR=`pwd`
TEMPLATE_DIR=/home/yasam/projects/tools/eclipse
NAME=`basename $PROJECT_DIR`
echo "Creating project on $PROJECT_DIR"
echo "Project name : $NAME"
echo "Coping project files"
cp $TEMPLATE_DIR/.project ./
cp $TEMPLATE_DIR/.cproject ./
echo "Replacing project name with : $NAME"
cat .project | sed 's/@@__NAME__@@/'$NAME'/g' > .tmpproject
mv .tmpproject .project
cp $TEMPLATE_DIR/.project_kernel ./.project
cp $TEMPLATE_DIR/.cproject_kernel ./.kernel
echo "Replacing project name with : $NAME"
cat .project | sed 's/@@__NAME__@@/'$NAME'/g' > .tmpproject
mv .tmpproject .project
| true
|
ce0791a09f9f4294cdaf197a20620f73900b223e
|
Shell
|
joepetrowski/substraTEE
|
/scriptsM4/start_client.sh
|
UTF-8
| 718
| 2.828125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
clear
# wait until the worker 1 is ready
sleep 30s
# start the client and send first transaction
cd /substraTEE/substraTEE-worker-M4/bin
./substratee_client -p 9977 -s 192.168.10.10 2>&1 | tee /substraTEE/output/client_first.log
# wait until worker 2 registered
sleep 30s
# start the client and send second transaction
cd /substraTEE/substraTEE-worker-M4/bin
./substratee_client -p 9977 -s 192.168.10.10 2>&1 | tee /substraTEE/output/client_second.log
# wait until transaction is processed
sleep 30s
# query the counter
cd /substraTEE/substraTEE-worker-M4/bin
./substratee_client -p 9977 -s 192.168.10.10 getcounter 2>&1 | tee /substraTEE/output/client_counter.log
read -p "Press enter to continue"
| true
|
4a136713a7ab05ebc086651a828fa2da2b37b832
|
Shell
|
cashgithubs/sda
|
/spider/bin/slave-control.sh
|
UTF-8
| 1,994
| 4.15625
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
bin=`cd "$( dirname "$0" )"; pwd`
spider_dir=`cd ${bin}/..;pwd`
usage() {
cat << EOF
usage: $0 options
NAME
slave-control.sh - script to control the slaves remotely.
SYNOPSIS
$0 -i <ip> -p <password> -c <content> [start|status]
OPTIONS:
-h Show this message
-i IP , slave's ip
-p Password, default all the slaves have the same password
-c Content, what should be updated - question_detail/question/answer
EOF
}
parseArgs() {
while getopts "c:p:i:h" arg
do
case ${arg} in
p)
password=$OPTARG
;;
i)
ip=$OPTARG
;;
c)
content=$OPTARG
;;
h)
usage
exit 1
;;
?)
echo "Unknown argument"
exit 1
;;
esac
done
}
start() {
${bin}/remote-execute.sh ${ip} ${password} \
"cd /root/work/sda/spider/; git reset --hard;\
git pull; sleep 3; bin/update-zhihu.sh -c ${content}"
sleep 3
}
REMOTE_SPIDER_DATA_DIR="/root/work/sda/data/zhihu"
REMOTE_SPIDER_LOG_DIR="/root/work/sda/spider/log"
status() {
echo "########################Slave Status######################"
${bin}/remote-execute.sh ${ip} ${password} \
"ps -ef | grep zhihu | grep ${content};sleep 3; echo "------------------------------------";\
ls -lrt ${REMOTE_SPIDER_DATA_DIR}/${content}/ | tail -10"
sleep 3
}
parseArgs $*
shift $((OPTIND-1))
WHAT=$1
echo "...What:${WHAT}"
echo "...content:${content}"
if [ "$content" == "" ]; then
echo "...content should not be empty."
exit
fi
if [ "$WHAT" == "status" ]; then
status
elif [ "$WHAT" == "start" ]; then
echo "...start the slaves..."
start
else
echo "start or status should be appended, please see the usage of command"
usage
fi
| true
|
6470f33d96b7c9528d411d6145e6f59fc822d43f
|
Shell
|
cat769/termux-project
|
/pythontools.sh
|
UTF-8
| 1,210
| 3.796875
| 4
|
[] |
no_license
|
clear
password="im learning"
read -p "enter the code here:" pass
if [ "$pass" != "$password" ]
then
exit
fi
clear
echo
toilet -f big -F gay "welcome to kevin's tools!"
echo "here are the options:"
echo "1.install python"
echo "2.install python2"
echo "3.install python3"
echo "4.ping to wifi"
echo "5.exit..."
echo "6.clear your previous and current screen"
read -p "make your choice here... :" choice;
if [ $choice = 1 ]
then
clear
toilet -f big -F gay "kevin tools"
echo "python installing..."
apt update && apt upgrade
apt install python
echo "python installed!"
fi
if [ $choice = 2 ]
then
clear
toilet -f big -F gay "kevin tools"
echo "python2 installing..."
apt update && apt upgrade
apt install python2
echo "python2 installed"
fi
if [ $choice = 3 ]
then
clear
toilet -f big -F gay "kevin tools"
echo "python3 installing..."
apt update && apt upgrade
apt install python3
echo "python3 installed"
fi
if [ $choice = 4 ]
then
clear
read -p "choose the ip you want to ping:" ping
toilet -f big -F gay "kevin special ping to ip wifi"
ping -s1000 $ping
fi
if [ $choice = 5 ]
then
break
fi
if [ $choice = 6 ]
then
clear
fi
| true
|
8935ec1e64180e13c669ec1b64126a6829301b5d
|
Shell
|
lalmeras/clickable_bootstrap
|
/dev-setup.sh
|
UTF-8
| 1,870
| 3.875
| 4
|
[] |
no_license
|
#! /bin/bash
# vim: tabstop=4 shiftwidth=4 softtabstop=4 expandtab ai
set -e
conda_prefix=.conda
# default values for MINICONDA_LOCATION and CONDA_ENV
: ${MINICONDA_LOCATION:=~/.miniconda}
: ${MINICONDA_URL:=https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh}
: ${CONDA_ENV:=main}
echo "Detect conda"
# load conda activation script if not loaded
if ! [ type conda &> /dev/null || which conda &> /dev/null ]; then
if ! [ -f "${MINICONDA_LOCATION}/bin/activate" ]; then
echo "Conda not detected, installing in ${MINICONDA_LOCATION}"
curl -L -O "${MINICONDA_URL}"
bash "$( basename "${MINICONDA_URL}" )" -u -b -p "${MINICONDA_LOCATION}"
rm "$( basename "${MINICONDA_URL}" )"
fi
echo "Load conda from ${MINICONDA_LOCATION}"
source "${MINICONDA_LOCATION}/bin/activate"
fi
# create an environment with generic tools (tox, virtualenv, ...)
if ! [ -d "${conda_prefix}/${CONDA_ENV}" ]; then
echo "Create main conda environment in ${conda_prefix}/${CONDA_ENV}"
conda env create -f environment.yml -p "${conda_prefix}/${CONDA_ENV}"
fi
echo "Load main conda environment"
conda activate "${conda_prefix}/${CONDA_ENV}"
# create runtime environments
envs=( py27 py34 py35 py36 py37 py38 )
declare -A versions
versions[py27]=2.7
versions[py34]=3.4
versions[py35]=3.5
versions[py36]=3.6
versions[py37]=3.7
versions[py38]=3.8
for env_name in "${envs[@]}"; do
echo "Create and install ${env_name}"
version="${versions[${env_name}]}"
conda create -q -y -p "${conda_prefix}/${env_name}" > /dev/null || \
{ echo "${env_name} creation failed; abort"; false; }
# anaconda repo is needed for python 2.6, 3.4
conda install -q -y -c conda-forge -p "${conda_prefix}/${env_name}" python="${version}" > /dev/null || \
{ echo "${env_name} installation failed; abort"; false; }
done
| true
|
851421c72094023d8a6e4618a88e3e960a8bfb3c
|
Shell
|
opendevstack/ods-core
|
/infrastructure-setup/conf/wildfly-init-redhat.sh
|
UTF-8
| 6,238
| 3.875
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
set -e
#
# WildFly control script
#
# chkconfig: 2345 80 20
# description: WildFly startup/shutdown script
#
### BEGIN INIT INFO
# Provides: wildfly
# Required-Start: $remote_fs $network
# Required-Stop: $remote_fs $network
# Should-Start: $named
# Should-Stop: $named
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: WildFly Application Server
# Description: WildFly startup/shutdown script
### END INIT INFO
# Source function library.
. /etc/init.d/functions
#NAME=$(readlink -f ${0} | xargs basename)
NAME=wildfly
echo $NAME
# Check privileges
if [ `id -u` -ne 0 ]; then
echo "You need root privileges to run this script"
exit 1
fi
# Load wildfly init.d configuration.
if [ -z "$JBOSS_CONF" ]; then
JBOSS_CONF="/etc/default/${NAME}.conf"
fi
echo $JBOSS_CONF
# Set defaults.
if [ -f "$JBOSS_CONF" ]; then
. "$JBOSS_CONF"
fi
# Location of JDK
if [ -n "$JAVA_HOME" ]; then
export JAVA_HOME
fi
# Setup the JVM
if [ -z "$JAVA" ]; then
if [ -n "$JAVA_HOME" ]; then
JAVA="$JAVA_HOME/bin/java"
else
JAVA="java"
fi
fi
# Location of wildfly
if [ -z "$JBOSS_HOME" ]; then
JBOSS_HOME="/opt/${NAME}"
fi
export JBOSS_HOME
# Check if wildfly is installed
if [ ! -f "$JBOSS_HOME/jboss-modules.jar" ]; then
echo "$NAME is not installed in \"$JBOSS_HOME\""
exit 1
fi
# Run as wildfly user
if [ -z "$JBOSS_USER" ]; then
JBOSS_USER=wildfly
fi
# Check wildfly user
id $JBOSS_USER > /dev/null 2>&1
if [ $? -ne 0 -o -z "$JBOSS_USER" ]; then
echo "User \"$JBOSS_USER\" does not exist..."
exit 1
fi
# Check owner of JBOSS_HOME
if [ ! $(stat -L -c "%U" "$JBOSS_HOME") = $JBOSS_USER ]; then
echo "The user \"$JBOSS_USER\" is not owner of \"$(readlink -f $JBOSS_HOME)\""
echo "Try: chown -R $JBOSS_USER:$JBOSS_USER \"$(readlink -f $JBOSS_HOME)\""
exit 1
fi
# Location to set the pid file
if [ -z "$JBOSS_PIDFILE" ]; then
JBOSS_PIDFILE=/var/run/wildfly/${NAME}.pid
fi
export JBOSS_PIDFILE
# Location to set the lock file
if [ -z "$JBOSS_LOCKFILE" ]; then
JBOSS_LOCKFILE=/var/lock/subsys/${NAME}
fi
# Location to keep the console log
if [ -z "$JBOSS_CONSOLE_LOG" ]; then
JBOSS_CONSOLE_LOG=/var/log/${NAME}/console.log
fi
# The amount of time to wait for startup
if [ -z "$STARTUP_WAIT" ]; then
STARTUP_WAIT=30
fi
# The amount of time to wait for shutdown
if [ -z "$SHUTDOWN_WAIT" ]; then
SHUTDOWN_WAIT=30
fi
# Startup mode of wildfly
if [ -z "$JBOSS_MODE" ]; then
JBOSS_MODE=standalone
fi
if [ -z "$JBOSS_BASE_DIR" ]; then
JBOSS_BASE_DIR="$JBOSS_HOME/$JBOSS_MODE"
else
JBOSS_OPTS="$JBOSS_OPTS -Djboss.server.base.dir=$JBOSS_BASE_DIR"
fi
JBOSS_MARKERFILE=$JBOSS_BASE_DIR/tmp/startup-marker
# Startup mode script
if [ "$JBOSS_MODE" = "standalone" ]; then
JBOSS_SCRIPT=$JBOSS_HOME/bin/standalone.sh
if [ -z "$JBOSS_CONFIG" ]; then
JBOSS_CONFIG=standalone.xml
fi
else
JBOSS_SCRIPT=$JBOSS_HOME/bin/domain.sh
if [ -z "$JBOSS_DOMAIN_CONFIG" ]; then
JBOSS_DOMAIN_CONFIG=domain.xml
fi
if [ -z "$JBOSS_HOST_CONFIG" ]; then
JBOSS_HOST_CONFIG=host.xml
fi
fi
# Helper function to check status of wildfly service
check_status() {
status -p "$JBOSS_PIDFILE" -l $(basename "$JBOSS_LOCKFILE") "$NAME" >/dev/null 2>&1
}
start() {
echo -n $"Starting $NAME: "
check_status
status_start=$?
if [ $status_start -eq 3 ]; then
mkdir -p $(dirname "$JBOSS_PIDFILE")
mkdir -p $(dirname "$JBOSS_CONSOLE_LOG")
chown $JBOSS_USER $(dirname "$JBOSS_PIDFILE") || true
cat /dev/null > "$JBOSS_CONSOLE_LOG"
currenttime=$(date +%s%N | cut -b1-13)
if [ "$JBOSS_MODE" = "standalone" ]; then
cd $JBOSS_HOME >/dev/null 2>&1
daemon --user=$JBOSS_USER --pidfile=$JBOSS_PIDFILE LAUNCH_JBOSS_IN_BACKGROUND=1 JBOSS_PIDFILE=$JBOSS_PIDFILE "$JBOSS_SCRIPT -c $JBOSS_CONFIG $JBOSS_OPTS &" >> $JBOSS_CONSOLE_LOG 2>&1
cd - >/dev/null 2>&1
else
cd $JBOSS_HOME >/dev/null 2>&1
daemon --user=$JBOSS_USER --pidfile=$JBOSS_PIDFILE LAUNCH_JBOSS_IN_BACKGROUND=1 JBOSS_PIDFILE=$JBOSS_PIDFILE "$JBOSS_SCRIPT --domain-config=$JBOSS_DOMAIN_CONFIG --host-config=$JBOSS_HOST_CONFIG $JBOSS_OPTS &" >> $JBOSS_CONSOLE_LOG 2>&1
cd - >/dev/null 2>&1
fi
count=0
until [ $count -gt $STARTUP_WAIT ]
do
sleep 1
let count=$count+1;
if [ -f $JBOSS_MARKERFILE ]; then
markerfiletimestamp=$(grep -o '[0-9]\+' $JBOSS_MARKERFILE) > /dev/null
if [ "$markerfiletimestamp" -gt "$currenttime" ] ; then
grep -i 'success:' $JBOSS_MARKERFILE > /dev/null
if [ $? -eq 0 ]; then
success
echo
touch $JBOSS_LOCKFILE
exit 0
fi
grep -i 'error:' $JBOSS_MARKERFILE > /dev/null
if [ $? -eq 0 ]; then
warning
echo
echo "$NAME started with errors, please see server log for details."
touch $JBOSS_LOCKFILE
exit 0
fi
fi
fi
done
if check_status; then
warning
echo
echo "$NAME hasn't started within the timeout allowed."
touch $JBOSS_LOCKFILE
exit 0
else
failure
echo
echo "$NAME failed to start within the timeout allowed."
exit 1
fi
else
echo
$0 status
fi
}
stop() {
echo -n $"Shutting down $NAME: "
check_status
status_stop=$?
if [ $status_stop -eq 0 ]; then
count=0;
if [ -f $JBOSS_PIDFILE ]; then
read kpid < $JBOSS_PIDFILE
let kwait=$SHUTDOWN_WAIT
# Try issuing SIGTERM
kill -15 $kpid
until [ `ps --pid $kpid 2> /dev/null | grep -c $kpid 2> /dev/null` -eq '0' ] || [ $count -gt $kwait ]
do
sleep 1
let count=$count+1;
done
if [ $count -gt $kwait ]; then
kill -9 $kpid
fi
fi
success
elif [ $status_stop -eq 1 ]; then
echo
echo -n "$NAME dead but pid file exists, cleaning up"
elif [ $status_stop -eq 2 ]; then
echo
echo -n "$NAME dead but subsys locked, cleaning up"
elif [ $status_stop -eq 3 ]; then
echo
echo -n $"$NAME is already stopped"
fi
rm -f $JBOSS_PIDFILE
rm -f $JBOSS_LOCKFILE
echo
}
case "$1" in
start)
start
;;
stop)
stop
;;
restart)
$0 stop
$0 start
;;
status)
status -p "$JBOSS_PIDFILE" -l $(basename "$JBOSS_LOCKFILE") "$NAME"
;;
*)
## If no parameters are given, print which are avaiable.
echo "Usage: $0 {start|stop|restart|status}"
exit 1
;;
esac
| true
|
19743a855807c94ae423658f2e2dc8a0a9beb2b7
|
Shell
|
curquiza/LibftASM
|
/tests/cat_tests.sh
|
UTF-8
| 599
| 3.734375
| 4
|
[] |
no_license
|
#/bin/bash
GREEN="\033[1;32m"
RED="\033[1;31m"
YELLOW="\033[1;33m"
DEF="\033[0m"
ft_cat="./c_cat_tests"
input_folder="tests/input_files"
for file in "$input_folder"/*; do
ft_ret="$($ft_cat $file)"
ret="$(cat $file)"
if [[ "$ft_ret" == "$ret" ]]; then
printf "%-50s$GREEN%s$DEF\n" "$file" "OK"
else
printf "%-50s$RED%s$DEF\n" "$file" "KO"
fi
done
printf "\nNEG_FD:\n"
ft_ret="$($ft_cat "NEG_FD")"
if [[ "$ft_ret" == "" ]]; then
printf "$GREEN%s$DEF\n" "PASSED"
else
printf "$RED%s$DEF\n" "FAILED"
fi
printf "\nReading on STDIN:\n"
$ft_cat "STDIN"
| true
|
7c9fa756144ae28ddc778e63000aae9fc5deab65
|
Shell
|
ashrafsharif/gh-ost-trigger
|
/gh-ost-trigger.sh
|
UTF-8
| 6,034
| 3.765625
| 4
|
[] |
no_license
|
#!/bin/bash
# This script will run gh-ost against a table with or without trigger.
# If a trigger exist for the table, it will make a backup of it and restore
# it again after gh-ost finishes.
# Supported options: run, test
# Example:
# ./gh-ost-trigger.sh test
# ./gh-ost-trigger.sh run
#### Update this ###
# This MySQL host
HOST='192.168.0.82'
# Master MySQL server (to restore trigger)
MHOST='192.168.0.81'
# Database
DB='mydatabase'
# Table name
TABLE='mytable'
# Alter statement
ALTER_STMT="MODIFY COLUMN depositor_account varchar(30) CHARACTER SET utf8 COLLATE utf8_general_ci DEFAULT '' NOT NULL COMMENT 'Depositor bank account no', ADD COLUMN fdo_locked SMALLINT(1) NOT NULL DEFAULT 0 AFTER fdo_changestatus"
###
### Setting ###
# gh-ost postpone failover. Set to one will use a postpone flag file
POSTPONE_CUTOVER=0
# remove trigger if exists (will be restored back once gh-ost finishes)
REMOVE_TRIGGER=1
# gh-ost's max_load=Threads_connected value
MAX_THREADS=40
# gh-ost's chunk size
CHUNK_SIZE=5000
# gh-ost's login dir, e.g, /root/.gh-ost.cnf
GH_OST_LOGIN=/root/.gh-ost.cnf
###
if [ -z "$1" ]; then
echo 'Option to specify: run, test'
exit 0
else
OPTION=$1
fi
PFILE=/tmp/ghost.postpone.flag
TRIGGERDIR=/root/gh-ost/${DB}
TRIGGERFILE=${TRIGGERDIR}/${DB}_${TABLE}_triggers.sql
TMPFILE=/tmp/st
GOT_TRIGGER=0
GH_USER=$(cat ${GH_OST_LOGIN} | grep user | sed 's/^user=//g')
GH_PASS=$(cat ${GH_OST_LOGIN} | grep password | sed 's/^password=//g')
backup_remove_triggers() {
echo "[Script] Checking if ${DB}.${TABLE} has triggers..."
check_trigger=$(mysql -u${GH_USER} -p${GH_PASS} -A -Bse "select trigger_name from information_schema.triggers where trigger_schema = '${DB}' and event_object_table = '${TABLE}'")
if [ ! -z "$check_trigger" ]; then
GOT_TRIGGER=1
mysql -u${GH_USER} -p${GH_PASS} -A -Bse "select trigger_name from information_schema.triggers where trigger_schema = '${DB}' and event_object_table = '${TABLE}'" > $TMPFILE
no_of_triggers=$(cat $TMPFILE | wc -l)
echo "[Script] Found $no_of_triggers trigger(s).."
[ -d $TRIGGERDIR ] || mkdir -p $TRIGGERDIR
echo "[Script] Backing up triggers for table ${DB}.${TABLE}"
mysqldump --triggers --no-data --no-create-info ${DB} ${TABLE} > ${TRIGGERFILE}
if [ $? -eq 0 ]; then
echo "[Script] Triggers backed up at ${TRIGGERFILE}"
echo "[Script] Removing triggers for ${DB}.${TABLE}"
if [ -e ${TRIGGERFILE} ]; then
for i in $(cat $TMPFILE); do
echo "[Script] Deleting $i on database ${DB} and table ${TABLE} on ${MHOST}"
mysql -u${GH_USER} -p${GH_PASS} -h${MHOST} -P3306 -e "DROP TRIGGER ${DB}.${i}"
[ $? -eq 0 ] && echo '.........OK' || exit 1
done
fi
echo "[Script] We can now safe to perform schema change operation.."
else
echo "[Script] Failed to backup triggers. Nothing is changed. Aborting.."
exit 1
fi
else
echo "[Script] Found no trigger.. We can proceed to schema change operation.."
fi
}
restore_add_triggers () {
echo "[Script] Restoring triggers on master: ${MHOST}.."
#mysql -u ${DB} < ${TRIGGERFILE}
mysql -u${GH_USER} -p${GH_PASS} -h${MHOST} -P3306 ${DB} < ${TRIGGERFILE}
if [ $? -eq 0 ]; then
echo "[Script] Triggers restored."
else
echo "[Script] Triggers restoration failed. Try to do manually on master:"
echo "mysql -u${GH_USER} -p${GH_PASS} -h${MHOST} -P3306 ${DB} < ${TRIGGERFILE}"
exit 1
fi
}
if [ $OPTION == "run" ]; then
if [ $POSTPONE_CUTOVER -eq 1 ]; then
[ -e $PFILE ] || touch $PFILE
echo "[Script] Cutover is postponed until you remove $PFILE"
else
[ -e $PFILE ] && rm -f $PFILE
echo '[Script] Cutover will be immediate!!'
fi
if [ $REMOVE_TRIGGER -eq 1 ]; then
backup_remove_triggers
fi
echo "DB : ${DB}"
echo "TABLE : ${TABLE}"
echo "ALTER STATEMENT: ${ALTER_STMT}"
echo
read -p "Confirm to start the exercise? [Y for yes, others for no]: " -n 1 -r
echo
if [[ ! $REPLY =~ ^[Yy]$ ]]
then
[ $GOT_TRIGGER -eq 1 ] && echo "[Script] You have to restore the triggers manually" && echo "mysql ${DB} < ${TRIGGERFILE}"
exit 1
fi
echo
echo "################# Over to gh-ost #################"
if [ $POSTPONE_CUTOVER -eq 0 ]; then
gh-ost \
--host=${HOST} \
--conf=${GH_OST_LOGIN} \
--database=${DB} \
--table=${TABLE} \
--alter="${ALTER_STMT}" \
--chunk-size=${CHUNK_SIZE} \
--max-load=Threads_connected=${MAX_THREADS} \
--exact-rowcount \
--concurrent-rowcount \
--verbose \
--execute
else
gh-ost \
--host=${HOST} \
--conf=${GH_OST_LOGIN} \
--database=${DB} \
--table=${TABLE} \
--alter="${ALTER_STMT}" \
--chunk-size=${CHUNK_SIZE} \
--max-load=Threads_connected=${MAX_THREADS} \
--exact-rowcount \
--concurrent-rowcount \
--verbose \
--postpone-cut-over-flag-file=${PFILE} \
--execute
fi
RESULT=$?
if [ $REMOVE_TRIGGER -eq 1 ]; then
if [ $GOT_TRIGGER -eq 1 ]; then
restore_add_triggers
fi
fi
[ -e $TMPFILE ] && rm -f $TMPFILE
echo
echo "[Script] Process completes"
elif [ $OPTION == "test" ]; then
gh-ost \
--host=${HOST} \
--conf=${GH_OST_LOGIN} \
--database=${DB} \
--table=${TABLE} \
--alter="${ALTER_STMT}" \
--chunk-size=${CHUNK_SIZE} \
--max-load=Threads_connected=${MAX_THREADS} \
--exact-rowcount \
--concurrent-rowcount \
--verbose
else
echo '[Script] Unknown option'
exit 1
fi
| true
|
4e8a45042bd303061c0b0e17b2c1c6947d1facb9
|
Shell
|
tnotstar/tnotbox
|
/Utils/Fossil/Learn/QuickStart/working/tnothome-fossil/etc/bash/profile
|
UTF-8
| 194
| 2.828125
| 3
|
[] |
no_license
|
#
# ~/Local/etc/bash/profile -> ~/.bash_profile
#
# source local definitions
if [ -n "$BASH_VERSION" ]; then
if [ -f "$HOME/.bashrc" ]; then
source "$HOME/.bashrc"
fi
fi
# EOF
| true
|
10fa965da6554c3cc1b07fba2bb8d7bef73341f3
|
Shell
|
TheEnbyperor/usable-git
|
/d
|
UTF-8
| 485
| 2.8125
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
if (git remote | grep --fixed-strings --line-regexp --quiet 'upstream') then
git diff upstream/master --stat $@
git diff --check upstream/master $@
git diff --find-copies-harder --ignore-space-change --inter-hunk-context=10 --color --unified=10 upstream/master $@
else
git diff origin/master --stat $@
git diff --check origin/master $@
git diff --find-copies-harder --ignore-space-change --inter-hunk-context=10 --color --unified=10 origin/master $@
fi
| true
|
e19d20769caf8f093d4a0c81f67926822bd4ff36
|
Shell
|
qcontom/Hacking-Protocol
|
/HackingProtocol Pieces/HackingProtocol Pieces/lvl 3
|
UTF-8
| 4,594
| 3.4375
| 3
|
[] |
no_license
|
#!/bin/bash
let y=1
let z=1
let clearancelvl=2
let used=0
let fileuse=0
echo "------------------------------------"
echo "CONGRADULATIONS. WELCOME TO LEVEL 3 "
echo "------------------------------------"
echo " "
while [ $y -eq 1 ]
do
echo "Complex: "
read maininput
case $maininput in
"help")
echo "help: displays this help "
echo "ls: lists files "
echo "cat: use with conjunction with a filename to display the contents of a file "
echo "motd: displays main objective "
echo "sudo: run a command as an administrator "
;;
"motd")
echo "(OBJECTIVE: get the right level of clearance to proceed) "
;;
"cat")
echo "use with conjunction with a filename to display the contents of a file "
echo "USAGE: cat <filename> "
;;
#---------------------------------------------------------------------------------------------------------------ls files------------------------------
"ls")
if [ $clearancelvl -eq 1 ]
then
echo "files: "
echo "favor.fyle"
elif [ "$clearancelvl" -eq 2 ]
then
echo "files: "
echo "HIDDEN.fyle"
echo "employees.data"
elif [ "$clearancelvl" -eq 3 ]
then
echo " "
;;
#---------------------------------------------------------------------------------------------------------------ls files------------------------------
"sudo")
echo "sudo: run a command as an administrator "
echo "USAGE: sudo <command> "
echo "EXAMPLE: sudo cat <filename> "
;;
"cat favor.fyle")
echo "This fyle can only be opened by and administrator! "
;;
"sudo cat favor.fyle")
if [ "$fileuse" -eq 0 ]
then
echo "-------------------------------------------------------"
echo "John, i need a favor."
echo " I need you to up my clearance level for today only, "
echo " so I can acsess some of the files I have stored on "
echo " the cloud. Just type in 'clearancelvl @a + 1' ."
echo " Thanks! "
echo "-------------------------------------------------------"
let fileuse=1
else
echo "You can only open this file once! "
;;
"lvl")
echo "current level: $clearancelvl "
;;
"clearancelvl @a + 1")
if [ "$used" -eq 0 ]
then
let $clearancelvl=2
let used=1
echo " Clearance level upped to level 2 "
else
echo "You cannot use that command twice! "
;;
"cat HIDDEN.fyle")
echo "This fyle can only be opened by and administrator! "
;;
"sudo cat HIDDEN.fyle")
if [ "$clearancelvl" -eq 2 ]
then
echo "-------------------------------------------------------"
echo "HIDDEN: Upping the clearance level for a employee: "
echo "Type in 'employeeClearance', then their name and id number, "
echo "and add in the level of clearance they need. Thats it! "
echo "-------------------------------------------------------"
else
echo "Your clearance level isnt high enough to view this file! "
;;
"cat employees.data")
if [ "$clearancelvl" -eq 2 ]
then
echo "-------------------------------------------------------"
echo "John Brown: user: jbrown id: 255 "
echo "Mr. Legend: user: ****** id: *******"
echo "Joe Janitor: user: jjanitor id: 279"
else
echo "Your clearance level isnt high enough to view this file! "
;;
"employeeClearance")
if [ "$clearancelvl" -ge 2 ]
then
while [ $z -eq 1 ]
do
echo "Clearance: "
read clearanceinput
case $clearanceinput in
"help")
echo "help: displays this help "
echo "setlevel: set clearance level of an employee "
echo "exit: return to main "
;;
"exit")
echo " exiting... "
break
;;
"setlevel")
echo "user"
read clearanceuser
case $clearanceuser in
"jbrown")
echo "id: "
read clearanceid
case $clearanceid in
"255")
echo "level: "
read clearancelevel
case $clearancelevel in
"3")
echo "Clearance Level of jbrown set to 3! "
let clearancelvl=3
break
;;
*)
echo "invalid level! "
;;
esac
;;
*)
echo "invalid id! "
;;
esac
;;
"jjanitor")
echo "id: "
read clearanceid
case $clearanceid in
"279")
echo "level: "
read clearancelevel
case $clearancelevel in
"2")
echo "Clearance level of jjanitor set to 2! "
;;
"3")
echo "Clearance level of jjanitor set to 3! "
;;
*)
echo "invalid level! "
;;
esac
*)
echo "invalid id! "
;;
esac
;;
*)
echo "invalid user! "
;;
esac
;;
esac
done
else
echo "Clearance level not high enough! "
fi
;;
esac
done
else
echo "Not high enough clearance level! "
fi
;;
*)
echo "Unknown command. Type help for a list of commands "
;;
read response
| true
|
d3d462c0f91f63086aa5125ff5f64a5edc5cfaa7
|
Shell
|
javieitez/scripts
|
/check_url_grepdate.sh
|
UTF-8
| 1,611
| 4.1875
| 4
|
[] |
no_license
|
#!/bin/bash
# Author: JA Vieitez (github.com/javieitez)
# Quick and dirty script to find a Date stamp in a URL. This is useful for checking logs to be up to date, or to check a proper refreshing of a given webservice.
# Check the date command man page for date options
# help requested or invalid parameters
#############################################################
printhelp () {
echo "find the current date and time in a web page"
echo ""
echo "Usage: check_url_grepdate hostname path date_options [--http]"
echo ""
echo "HTTPS is enabled by default, use -HTTP to disable it"
echo "Example: check_url_grepdate www.example.com /log.html '+%Y%m%d\ %H:%M'"
exit 3
}
if [ "$1" == '-h' ] || [ "$1" == '--help' ] || [ -z "$1" ] || [ -z "$2" ]
then
printhelp
fi
#############################################################
# Set to http if specified, default to https
PROTOCOL="https"
if [ "$4" == "--http" ]
then
PROTOCOL="http"
fi
#############################################################
#Generate the date timestamp
MYTIMESTAMP=`date '$3'`
if [ "$3" == "" ] || [ "$MYTIMESTAMP" == "" ]
then
printf "\nNo valid operand supplied. Please provide a valid date one.\n\n\n\n"
printhelp
fi
#Run the command
curl --silent "$PROTOCOL://$1$2" | grep "$MYTIMESTAMP"
#capture the exit code
LASTEXITCODE=$?
#return output based on the exit code
if [ "$LASTEXITCODE" == 0 ]
then
# No alarms? Ok, everything is right.
printf "\nOK - $MYTIMESTAMP found in $PROTOCOL://$1$2\n"
exit 0
else
printf "\nCRITICAL - $MYTIMESTAMP NOT FOUND IN $PROTOCOL://$1$2\n"
exit 2
fi
| true
|
22f63d597e54995a5d7c1a0941964e8f8e7b0740
|
Shell
|
lenik/uni
|
/java/coolmaven/set-layout.in
|
UTF-8
| 1,752
| 3.8125
| 4
|
[] |
no_license
|
#!/bin/bash
: ${RCSID:=$Id: - @VERSION@ @DATE@ @TIME@ - $}
: ${PROGRAM_TITLE:="Adjust the dir layout as the specified"}
: ${PROGRAM_SYNTAX:="[OPTIONS] [--] ..."}
. shlib-import cliboot
option --dry-run "Only show what to do"
option -q --quiet
option -v --verbose
option -h --help
option --version
dry_run=
function setopt() {
case "$1" in
--dry-run)
dry_run=1;;
-h|--help)
help $1; exit;;
-q|--quiet)
LOGLEVEL=$((LOGLEVEL - 1));;
-v|--verbose)
LOGLEVEL=$((LOGLEVEL + 1));;
--version)
show_version; exit;;
*)
quit "invalid option: $1";;
esac
}
function main() {
if [ $# = 0 ]; then set LAYERS; fi
layer=
while read name comments; do
[ -z "$name" ] && continue
[ "${name:0:1}" = '#' ] && continue
if [ "${name:0:6}" = 'layer-' ]; then
layer="$name"
continue
fi
from=(*/$name)
case "${#from[@]}" in
0)
die "Not existed: $name";;
1)
from="${from%/*}"
_log1 "Move $name from $from to $layer"
;;
*)
_log1 "$name is ambiguous: "
for f in "${from[@]}"; do
_log2 " $f"
done
exit 1
;;
esac
[ "$from" = "$layer" ] && continue
[ "$dry_run" = 1 ] && continue
if [ ! -d "$layer" ]; then
_log1 "Create directory $layer/"
mkdir "$layer" || die "Failed to create"
fi
vcs mv $from/$name $layer/$name
done <"$1"
_log1 "Fix the poms."
./fix-layer-poms
}
boot "$@"
| true
|
a65efc3eb826cd3636ca25e90e493eee1de05882
|
Shell
|
Haufe-Lexware/wicked.haufe.io
|
/src/test/portal-auth/test.sh
|
UTF-8
| 796
| 2.734375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
echo Running as `whoami`
echo Waiting for portal-api...
node node_modules/portal-env/await.js http://portal-api:3001/ping
echo Ping returned 200 for portal-api
echo Waiting for portal-kong-adapter...
node node_modules/portal-env/await.js http://portal-kong-adapter:3002/ping
echo Ping returned 200 for portal-kong-adapter
echo Waiting for portal-auth...
node node_modules/portal-env/await.js http://portal-auth:3010/ping
echo Ping returned 200 for portal-auth
mkdir test_results
mocha > test_results/auth-test.log || echo Integration tests failed. See log. > test_results/KONG_FAILED
echo Trying to kill portal-kong-adapter...
curl -X POST http://portal-kong-adapter:3002/kill
echo Trying to kill portal-api...
curl -X POST http://portal-api:3001/kill
sleep 2
echo Exiting.
| true
|
2b92f5f26222b10f5d4646f207897290dee3b2e8
|
Shell
|
shavchen/env_install_scripts
|
/jdk_install.sh
|
UTF-8
| 1,457
| 3.953125
| 4
|
[] |
no_license
|
#/bin/bash
#获取版本信息
ver="jdk_"`echo $1 | awk -F "-" '{print $2}'`
version=`echo $1 | awk -F "-" '{print $2}'`
#移动目录并解压
if [ ! -d "/opt/java" ]; then
echo -e "\e[1;32m 已创建 /opt/java 目录 \e[0m"
mkdir /opt/java
fi
if [ ! -f $1 ]; then
echo -e "\e[1;31m 不存在 $1 , 请上传tar.gz文件到当前目录\e[0m"
exit 1
else
cp $1 /opt/java/$1
echo -e "\e[1;32m 已移动 $1 到 /opt/java/$1\e[0m"
echo -e "\e[1;32m 正在解压\e[0m"
if [ ! -d "/opt/java/$ver" ]; then
mkdir "/opt/java/$ver"
fi
tar zxvf /opt/java/$1 -C /opt/java/$ver --strip-components 1 > /dev/null 2>&1
rm /opt/java/$1
echo -e "\e[1;32m 已删除 /opt/java/$1"
echo -e "\e[1;32m 解压完成\e[0m"
fi
# 备份和设置环境变量
if [ -f "/etc/profile.bak" ]; then
rm /etc/profile
cp /etc/profile.bak /etc/profile
echo -e "\e[0;32m 已备份 /etc/profile \e[0m"
else
cp /etc/profile /etc/profile.bak
fi
if [ -d "/opt/java/$ver" ]; then
echo -e "\e[1;32m 正在设置环境变量\e[0m"
echo "# $ver" >> /etc/profile
echo "JAVA_HOME=/opt/java/$ver" >> /etc/profile
echo "JAVA_BIN=/opt/java/$ver/bin" >> /etc/profile
echo 'PATH=$PATH:${JAVA_BIN}' >> /etc/profile
echo 'CLASSPATH=$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar' >> /etc/profile
echo "export JAVA_HOME JAVA_BIN PATH CLASSPATH" >> /etc/profile
source /etc/profile
fi
# 测试
echo -e "\e[0;33m 执行 java -version \e[0m"
java -version
| true
|
775632d15439e56addd32a71b774a1cc890fb55c
|
Shell
|
vkvish19/confluent-schema-registry-and-rest-proxy
|
/kafka-avro-java-course/avro-tools.sh
|
UTF-8
| 705
| 2.53125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
cd /home/vishu/Softwares/Apache
#put this in any directory you like
wget http://central.maven.org/maven2/org/apache/avro/avro-tools/1.9.1/avro-tools-1.9.1.jar
AVRO_TOOL_JAR=/home/vishu/Softwares/Apache/avro-tools-1.9.1.jar
AVRO_PROJECT_FOLDER=/home/vishu/Learnings/Udemy/ApacheKafkaSeries/ConfluentSchemaRegistryAndRESTProxy/kafka-avro-java-course
#run this from your project folder.
cd $AVRO_PROJECT_FOLDER
java -jar $AVRO_TOOL_JAR tojson --pretty "$AVRO_PROJECT_FOLDER/customer-generic.avro"
java -jar $AVRO_TOOL_JAR tojson --pretty "$AVRO_PROJECT_FOLDER/customer-specific.avro"
#getting the schema
java -jar $AVRO_TOOL_JAR getschema "$AVRO_PROJECT_FOLDER/customer-specific.avro"
| true
|
dc4a59edc83d69e94fda4835a515ac3a2fadc270
|
Shell
|
jaytaylor/circus
|
/fswatch.sh
|
UTF-8
| 510
| 3.1875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
set -o errexit
set -o pipefail
set -o nounset
cd "$(dirname "$0")"
cd 'quickstart'
echo 0 > /tmp/last
set +o errexit
while true ; do
# shellcheck disable=SC2016
fswatch -o -r -m poll_monitor themes \
| xargs -n1 -IX /bin/bash -c 'now=$(date +%s) ; last="$(cat /tmp/last)" ; secs="$((now-last))" ; echo "INFO: last run was ${secs}s ago" 1>&2 ; if [ ${secs} -gt 2 ] ; then ../generate-site.sh && echo "${now}" > /tmp/last ; else echo "WARN: too soon" 1>&2 ; fi ;'
done
| true
|
2ec7855f91b1a744e120d92af2fa5f8bd1aa89b5
|
Shell
|
bradyhouse/house
|
/fiddles/bash/fiddle-0056-InstallLiveServer/test.sh
|
UTF-8
| 332
| 3.125
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
source script.sh
function catch() {
case $1 in
0) echo "all tests succeeded"
;;
1) echo "installLiveServer() failed";
;;
*) echo "fubar! Something went wrong."
;;
esac
exit $1
}
# try
(
installLiveServer || exit 1
)
catch $?;
| true
|
02699438574843ea0a5679df1fb4dca376aff870
|
Shell
|
abcdabcd987/cse505
|
/homework/hw4/configure
|
UTF-8
| 507
| 3.3125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
function ckcmd {
if ! command -v "$1" > /dev/null 2>&1; then
echo "Error: could not find $1"
exit 1
fi
}
function ckdeps {
ckcmd coqc
ckcmd ocamlbuild
if ! coqc --version | grep -q 'version 8.8'; then
echo "Error: bad coqc version, need Coq 8.8"
exit 1
fi
}
function coqproj {
for d in $(find coq -type d); do
echo "-Q $d Imp"
done
echo "-R ./frap Frap"
echo
find coq -iname '*.v'
}
function main {
ckdeps
coqproj > _CoqProject
}
main
| true
|
91d9552e9d4a870f448841d8fdaa6b947d5f2925
|
Shell
|
VanDerLars/MySQL-Upload-multiple-Dumps
|
/MySQL_upload_all.sh
|
UTF-8
| 1,714
| 3.6875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# -------------------------------
# ----------- CONFIG ------------
# Set Database Information here
DB_USER="root"
DB_PSWD="PASSWORD_VERY_SECRET"
DB_HOST="127.0.0.1"
PATH_TO_BACKUP_FILES="mysql_backups/path/to/dumps"
# -------------------------------
# -------------------------------
# -------------------------------
# ------------ CODE ------------
echo "\nIMPORT START"
mkdir $PATH_TO_BACKUP_FILES"/processed"
function process() {
filename="${1##*/}"
echo "├ 🧨 Processing $1 ($filename)"
echo "│" # ----
echo "│┬ 📋 unzip file"
gunzip $1
unzipped=${1%.gz}
echo "│├ 📎 new file: $unzipped"
echo "│└ ✅"
echo "│" # ----
echo "│┬ 📋 replace definer in $unzipped"
# $1 = echo LC_CTYPE=C && LANG=C && sed -E 's/CREATE(.*)FUNCTION/CREATE FUNCTION/g' $1;
# $1 = echo LC_CTYPE=C && LANG=C && sed -E 's/CREATE(.*)PROCEDURE/CREATE PROCEDURE/g' $1;
sed -i '' -E 's/CREATE(.*)FUNCTION/CREATE FUNCTION/g' $unzipped
sed -i '' -E 's/CREATE(.*)PROCEDURE/CREATE PROCEDURE/g' $unzipped
echo "│└ ✅"
echo "│" # ----
echo "│┬ 📋 re-zip file"
gzip $unzipped
echo "│└ ✅"
echo "│" # ----
echo "│┬ 📋 upload to MySQL Server"
sh MySQL_upload_single.sh -u $DB_USER -p$DB_PSWD -h $DB_HOST $1
echo "│└ ✅"
echo "│" # ----
echo "│┬ 📋 move zip file to processed directory"
mv $1 $PATH_TO_BACKUP_FILES"/processed/$filename"
echo "│└ ✅"
echo "└ ✅"
echo "─ 🔄 next"
}
for i in $PATH_TO_BACKUP_FILES/*
do
if [[ $i == *.sql ]] || [[ $i == *.sql.gz ]] ;
then
echo "\n┬ 🎁 PROCESS: $i"
process $i
else
echo "\n─ 🔄 SKIPPED: $i"
fi
done
echo "\n✅ IMPORT DONE"
| true
|
cfed106e29e7e277e2bca3491ee585fee45a02eb
|
Shell
|
obuk/use-groff
|
/script/generate-font.sh
|
UTF-8
| 1,041
| 3.8125
| 4
|
[] |
no_license
|
#!/bin/sh
set -eu
PATH=/usr/local/bin:$PATH
usage="usage: $(basename $0) [-av] [-s size] [-t .ext] [-n subname] style fontfile"
subname=""
while getopts n:s:t: OPT
do
case $OPT in
s) size=$OPTARG ;;
t) type=$OPTARG ;;
n) subname=$OPTARG ;;
*) echo $usage >&2; exit 2 ;;
esac
done
size=${size-1024}
type=${type-.ttf}
shift $((OPTIND - 1))
style=${1:?"$usage"}
font=${2:?"$usage"}
transform=""
case "$style" in
R) ;;
B) subname="Bold"
transform="ExpandStroke(50, 0, 1, 0, 1)" ;;
I) subname="Italic"
transform="Skew(13)" ;;
BI) subname="BoldItalic"
transform="ExpandStroke(50, 0, 1, 0, 1); Skew(13)" ;;
*) echo $usage >&2; exit 2 ;;
esac
[ -n "$subname" ] && subname="-$subname"
temp=$(mktemp)
cat > $temp <<EOF
Open(\$1);
SelectAll();
ClearInstrs();
$transform;
Simplify();
CorrectDirection();
ScaleToEm($size);
RoundToInt();
SetFontNames(\$fontname + "$subname");
RenameGlyphs("Adobe Glyph List");
Generate(\$fontname + "$type");
EOF
fontforge -script $temp "$font"
rm -f $temp
| true
|
e9afb773c5313dd0ff40dda015deaaa4fa5ef97d
|
Shell
|
leejefon/cluster-setup-kube
|
/create-certs.sh
|
UTF-8
| 5,063
| 2.5625
| 3
|
[] |
no_license
|
source ./env.sh
mkdir -p certs
cd certs
# CA
{
cat > ca-config.json << EOF
{
"signing": {
"default": {
"expiry": "8760h"
},
"profiles": {
"kubernetes": {
"usages": ["signing", "key encipherment", "server auth", "client auth"],
"expiry": "8760h"
}
}
}
}
EOF
cat > ca-csr.json << EOF
{
"CN": "Kubernetes",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "US",
"L": "Santa Clara",
"O": "Kubernetes",
"OU": "CA",
"ST": "California"
}
]
}
EOF
cfssl gencert -initca ca-csr.json | cfssljson -bare ca
}
# Admin Client Certificate
{
cat > admin-csr.json << EOF
{
"CN": "admin",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "US",
"L": "Santa Clara",
"O": "system:masters",
"OU": "Silver Peak",
"ST": "California"
}
]
}
EOF
cfssl gencert \
-ca=ca.pem \
-ca-key=ca-key.pem \
-config=ca-config.json \
-profile=kubernetes \
admin-csr.json | cfssljson -bare admin
}
# Kubelet Client Certificates
{
cat > ${WORKER1_NAME}-csr.json << EOF
{
"CN": "system:node:${WORKER1_NAME}",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "US",
"L": "Santa Clara",
"O": "system:nodes",
"OU": "Silver Peak",
"ST": "California"
}
]
}
EOF
cfssl gencert \
-ca=ca.pem \
-ca-key=ca-key.pem \
-config=ca-config.json \
-hostname=${WORKER1_IP} \
-profile=kubernetes \
${WORKER1_NAME}-csr.json | cfssljson -bare ${WORKER1_NAME}
cat > ${WORKER2_NAME}-csr.json << EOF
{
"CN": "system:node:${WORKER2_NAME}",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "US",
"L": "Santa Clara",
"O": "system:nodes",
"OU": "Silver Peak",
"ST": "California"
}
]
}
EOF
cfssl gencert \
-ca=ca.pem \
-ca-key=ca-key.pem \
-config=ca-config.json \
-hostname=${WORKER2_IP} \
-profile=kubernetes \
${WORKER2_NAME}-csr.json | cfssljson -bare ${WORKER2_NAME}
}
# Controller Manager Client Certificate
{
cat > kube-controller-manager-csr.json << EOF
{
"CN": "system:kube-controller-manager",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "US",
"L": "Santa Clara",
"O": "system:kube-controller-manager",
"OU": "Silver Peak",
"ST": "California"
}
]
}
EOF
cfssl gencert \
-ca=ca.pem \
-ca-key=ca-key.pem \
-config=ca-config.json \
-profile=kubernetes \
kube-controller-manager-csr.json | cfssljson -bare kube-controller-manager
}
# Kube Proxy Client Certificate
{
cat > kube-proxy-csr.json << EOF
{
"CN": "system:kube-proxy",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "US",
"L": "Santa Clara",
"O": "system:node-proxier",
"OU": "Silver Peak",
"ST": "California"
}
]
}
EOF
cfssl gencert \
-ca=ca.pem \
-ca-key=ca-key.pem \
-config=ca-config.json \
-profile=kubernetes \
kube-proxy-csr.json | cfssljson -bare kube-proxy
}
# Kube Scheduler Client Certificate
{
cat > kube-scheduler-csr.json << EOF
{
"CN": "system:kube-scheduler",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "US",
"L": "Santa Clara",
"O": "system:kube-scheduler",
"OU": "Silver Peak",
"ST": "California"
}
]
}
EOF
cfssl gencert \
-ca=ca.pem \
-ca-key=ca-key.pem \
-config=ca-config.json \
-profile=kubernetes \
kube-scheduler-csr.json | cfssljson -bare kube-scheduler
}
# API Server Certificates
{
cat > kubernetes-csr.json << EOF
{
"CN": "kubernetes",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "US",
"L": "Santa Clara",
"O": "Kubernetes",
"OU": "Silver Peak",
"ST": "California"
}
]
}
EOF
cfssl gencert \
-ca=ca.pem \
-ca-key=ca-key.pem \
-config=ca-config.json \
-hostname=${CERT_HOSTNAME} \
-profile=kubernetes \
kubernetes-csr.json | cfssljson -bare kubernetes
}
# Service Account Certificate
{
cat > service-account-csr.json << EOF
{
"CN": "service-accounts",
"key": {
"algo": "rsa",
"size": 2048
},
"names": [
{
"C": "US",
"L": "Santa Clara",
"O": "Kubernetes",
"OU": "Silver Peak",
"ST": "California"
}
]
}
EOF
cfssl gencert \
-ca=ca.pem \
-ca-key=ca-key.pem \
-config=ca-config.json \
-profile=kubernetes \
service-account-csr.json | cfssljson -bare service-account
}
# Move certs to workers
scp ca.pem ${WORKER1_NAME}-key.pem ${WORKER1_NAME}.pem ${WORKER1_USER}@${WORKER1_IP}:~
scp ca.pem ${WORKER2_NAME}-key.pem ${WORKER2_NAME}.pem ${WORKER2_USER}@${WORKER2_IP}:~
# Move certs to controllers
scp ca.pem ca-key.pem kubernetes-key.pem kubernetes.pem \
service-account-key.pem service-account.pem ${CONTROLLER0_USER}@${CONTROLLER0_IP}:~
scp ca.pem ca-key.pem kubernetes-key.pem kubernetes.pem \
service-account-key.pem service-account.pem ${CONTROLLER1_USER}@${CONTROLLER1_IP}:~
| true
|
12aeec748101e4c4d1be27e5bc2c42d1587d3a06
|
Shell
|
nikita-duseja/directed_studies_2020
|
/run_process.sh
|
UTF-8
| 605
| 3.09375
| 3
|
[] |
no_license
|
## upload file to google storage
file_path=$1
num_speakers=$2
rm -f speaker_windows
rm -f word-time-offsets
gsutil cp $file_path gs://tamu-nduseja-ds/
file_name=`basename $file_path`
gs_storage_path="gs://tamu-nduseja-ds/$file_name"
python transcription/transcribe_word_time_offsets.py $gs_storage_path &
python3 diarization/pyAudioAnalysis/audioAnalysis.py speakerDiarization -i $file_path --num $num_speakers &
echo "Waiting for processes(transcription, diarization) to complete"
while :
do
if [ -f speaker_windows ] && [ -f word-time-offsets ]
then
break
fi
done
python consolidate_results.py
| true
|
bc6a381a94050869b69770a3c5c3486d4aabe51d
|
Shell
|
jorgeavilacardenosa/dotfiles
|
/terminal/_aliases/utils.sh
|
UTF-8
| 1,489
| 2.5625
| 3
|
[] |
no_license
|
# Enable aliases to be sudo’ed
alias sudo='sudo '
# Others
alias aux='ps uax'
alias brwe='brew'
alias edithosts='sudo vim /etc/hosts'
alias c='pbcopy'
alias copy='pbcopy'
alias copy_ssh_key='xclip -sel clip < ~/.ssh/id_rsa.pub'
alias count_files_recursive='find . -type f -print | wc -l'
alias count_files_recursive_per_directory='ls -d */ | xargs -I _ sh -c "find \"_\" -type f | wc -l | xargs echo _"'
alias emptytrash='sudo empty_trash'
alias find_broken_symlinks='find -L . -type l'
alias fuck!='sudo $history[1]'
alias flat_this_dir="sudo find . -mindepth 2 -type f -exec mv -i '{}' . ';'"
alias k='kill -9'
alias map="xargs -n1"
alias r='realpath'
alias reveal='open .'
alias size_of_the_current_directory='du -ch | grep total'
alias size_of_directories='ls | xargs -I _ du -sch "_" | grep -v total | sort -h'
alias stt='subl .'
alias watch_number_of_files='watch -n1 "find . -type f -print | wc -l"'
alias t='time'
alias pubkey='cat ~/.ssh/id_rsa.pub | pbcopy'
alias fuck_sbt="ps aux | grep sbt | awk '{print $2}' | xargs kill -9"
alias catimg='imgcat'
alias editdotfiles='subl ~/.dotfiles'
alias optimize_zsh='source ${ZDOTDIR:-${HOME}}/.zlogin'
# Utils for presentations. Hide/show all desktop icons.
alias hidedesktop='defaults write com.apple.finder CreateDesktop -bool false; killall Finder'
alias showdesktop='defaults write com.apple.finder CreateDesktop -bool true; killall Finder'
# Mac
alias wall='change_wallpaper'
alias out='outdated_apps'
alias up='update_apps'
| true
|
4e427017f02c663df4c5cbd79524de4e00c6b716
|
Shell
|
ernestoriv7/APU-performance-evaluation
|
/scenes/filerandom.sh
|
UTF-8
| 290
| 2.90625
| 3
|
[] |
no_license
|
for file in cpu/*
do
#file=${file#*/}
mv "$file" "cpu/"$RANDOM\_"${file#*/}"
#echo $file
done
for x in gpu/*
do
#file=${file#*/}
mv "$x" "gpu/"$RANDOM\_"${x#*/}"
#echo $file
done
for y in apu/*
do
#file=${file#*/}
mv "$y" "apu/"$RANDOM\_"${y#*/}"
#echo $file
done
| true
|
7f90b41470669570b93ba9c6a25410b5f567d97b
|
Shell
|
ProjectStudyMaterials/tslog
|
/scripts/preCommit/protectDevAndMainBranches.sh
|
UTF-8
| 310
| 3.4375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Protect Dev and Main Branches
branch="$(git rev-parse --abbrev-ref HEAD)"
if [ "$branch" = "main" ] || [ "$branch" = "dev" ]
then
echo "You can't commit directly to the $branch branch! Please open a PR on a separate feature branch and get it approved before merging into $branch."
exit 1
fi
| true
|
049c1bf2a11c78029ca85bcb00dc7ef0f66e1d9f
|
Shell
|
bigfix/boxes
|
/bigfix/common/util.source.sh
|
UTF-8
| 874
| 3.671875
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
function is_ok {
local url="$1"
local auth=${2:-false}
local auth_opt=""
if $auth; then
auth_opt="--user bigfix:bigfix"
fi
local status=`curl --insecure $auth_opt $url -s -o /dev/null -w "%{http_code}"`
local retry=0
while [[ "$status" -ne 200 && "$retry" -lt 3 ]]; do
sleep 15
((retry++))
status=`curl --insecure $auth_opt $url -s -o /dev/null -w "%{http_code}"`
done
if [[ "$status" -ne 200 ]]; then
return 1
else
return 0
fi
}
function download {
local version="$1"
local package="$2"
local major_version=`echo "$version" | sed -r -n 's/([0-9]+)\.([0-9]+)\.([0-9]+)\.([0-9]+)/\1\.\2/p'`
[[ $major_version == "42.1" ]] && major_version="WebConsole"
is_ok "http://builds.sfolab.ibm.com/$major_version/$version/" || exit 1
curl -sO "http://builds.sfolab.ibm.com/$major_version/$version/Unix/$package"
echo $major_version
}
| true
|
e275fa296cff7e68f260e6534f579491f9cf8a91
|
Shell
|
theritesite/wp-ci-test
|
/bin/install-wp.sh
|
UTF-8
| 4,722
| 4.0625
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# see https://github.com/wp-cli/wp-cli/blob/master/templates/install-wp-tests.sh
if [ $# -lt 3 ]; then
echo "usage: $0 <db-name> <db-user> <db-pass> [db-host] [wp-version]"
exit 1
fi
DB_NAME=$1
DB_USER=$2
DB_PASS=$3
DB_HOST=${4-localhost}
WP_VERSION=${5-latest}
# TODO: allow environment vars for WP_TESTS_DIR & WP_CORE_DIR
WP_TESTS_DIR="${PWD}/tmp/wordpress-tests-lib"
WP_CORE_DIR="${PWD}/tmp/wordpress/"
if [[ $WP_VERSION =~ [0-9]+\.[0-9]+(\.[0-9]+)? ]]; then
WP_TESTS_TAG="tags/$WP_VERSION"
else
# http serves a single offer, whereas https serves multiple. we only want one
curl http://api.wordpress.org/core/version-check/1.7/ --output /tmp/wp-latest.json
grep '[0-9]+\.[0-9]+(\.[0-9]+)?' /tmp/wp-latest.json
LATEST_VERSION=$(grep -o '"version":"[^"]*' /tmp/wp-latest.json | sed 's/"version":"//')
if [[ -z "$LATEST_VERSION" ]]; then
echo "Latest WordPress version could not be found"
exit 1
fi
WP_TESTS_TAG="tags/$LATEST_VERSION"
fi
set -e
say() {
echo -e "$1"
}
install_wp() {
# make wordpress dirtectory
mkdir -p "$WP_CORE_DIR"
# corect WP archive to grab
if [ $WP_VERSION == 'latest' ]; then
local ARCHIVE_NAME='latest'
else
local ARCHIVE_NAME="wordpress-$WP_VERSION"
fi
# grab the archive
curl https://wordpress.org/${ARCHIVE_NAME}.tar.gz --output /tmp/wordpress.tar.gz --silent
# unconpress it
tar --strip-components=1 -zxmf /tmp/wordpress.tar.gz -C "$WP_CORE_DIR"
# get a test db config
curl https://raw.github.com/markoheijnen/wp-mysqli/master/db.php?access_token=$GITHUB_TOKEN --output "$WP_CORE_DIR/wp-content/db.php" --silent
say "WordPress Installed"
}
install_test_suite() {
# portable in-place argument for both GNU sed and Mac OSX sed
if [[ $(uname -s) == 'Darwin' ]]; then
local ioption='-i .bak'
else
local ioption='-i'
fi
# set up testing suite in wordpress test libary directory
mkdir -p "$WP_TESTS_DIR"
cd "$WP_TESTS_DIR"
svn co --quiet https://develop.svn.wordpress.org/${WP_TESTS_TAG}/tests/phpunit/includes/
curl http://develop.svn.wordpress.org/${WP_TESTS_TAG}/wp-tests-config-sample.php --output wp-tests-config.php --silent
# test configuration
sed $ioption "s:dirname( __FILE__ ) . '/src/':'$WP_CORE_DIR':" wp-tests-config.php
sed $ioption "s/youremptytestdbnamehere/$DB_NAME/" wp-tests-config.php
sed $ioption "s/yourusernamehere/$DB_USER/" wp-tests-config.php
sed $ioption "s/yourpasswordhere/$DB_PASS/" wp-tests-config.php
sed $ioption "s|localhost|${DB_HOST}|" wp-tests-config.php
sed $ioption "s/wptests_/wctests_/" wp-tests-config.php
sed $ioption "s/example.org/woocommerce.com/" wp-tests-config.php
sed $ioption "s/admin@example.org/tests@woocommerce.com/" wp-tests-config.php
sed $ioption "s/Test Blog/WooCommerce Unit Tests/" wp-tests-config.php
say "Test Suite Installed"
}
install_cs() {
# ensure we are in tmp directory instead of the wordpress test direcory
cd ../
# make a directory for codesniffer
mkdir -p "php-codesniffer"
# uncompress codesniffer into the directory we created
curl -L https://api.github.com/repos/squizlabs/PHP_CodeSniffer/tarball/2.3.3?access_token=$GITHUB_TOKEN --silent | tar --strip-components=1 -zx -C "php-codesniffer"
say "PHP_CodeSniffer Installed"
# make a directory for the WP coding standard rules
mkdir -p "wordpress-coding-standards"
# uncompress the coding standards into the directory we created
curl -L https://api.github.com/repos/WordPress-Coding-Standards/WordPress-Coding-Standards/tarball/0.6.0?access_token=$GITHUB_TOKEN --silent | tar --strip-components=1 -zx -C "wordpress-coding-standards"
# move in the codesniffer directory
cd php-codesniffer
# install the WP coding standard rules
scripts/phpcs --config-set installed_paths ../wordpress-coding-standards,../prospress-coding-standards
say "Coding Standards Installed"
# for consistency move back into the tmp directory
cd ../
}
install_db() {
# parse DB_HOST for port or socket references
local PARTS=(${DB_HOST//\:/ })
local DB_HOSTNAME=${PARTS[0]};
local DB_SOCK_OR_PORT=${PARTS[1]};
local EXTRA=""
if ! [ -z $DB_HOSTNAME ] ; then
if [[ "$DB_SOCK_OR_PORT" =~ ^[0-9]+$ ]] ; then
EXTRA=" --host=$DB_HOSTNAME --port=$DB_SOCK_OR_PORT --protocol=tcp"
elif ! [ -z $DB_SOCK_OR_PORT ] ; then
EXTRA=" --socket=$DB_SOCK_OR_PORT"
elif ! [ -z $DB_HOSTNAME ] ; then
EXTRA=" --host=$DB_HOSTNAME --protocol=tcp"
fi
fi
# create database - more generic than MAMP for use on multple system
#/Applications/MAMP/Library/bin/mysqladmin create $DB_NAME --user="$DB_USER" --password="$DB_PASS"$EXTRA
mysqladmin create $DB_NAME --user="$DB_USER" --password="$DB_PASS"$EXTRA
say "Database Created"
}
install_wp
install_test_suite
install_cs
install_db
| true
|
ef5c93ec339686e7f56d7d7c491d7851aee45f6b
|
Shell
|
ankan1811/Crio-Winter-of-Doing-Part-2
|
/install.sh
|
UTF-8
| 1,277
| 3.25
| 3
|
[] |
no_license
|
#INSTALL MONGODB
# Import the public key used by the package management system
wget -qO - https://www.mongodb.org/static/pgp/server-4.2.asc | sudo apt-key add -
# Add sources
echo "deb [ arch=amd64,arm64 ] https://repo.mongodb.org/apt/ubuntu bionic/mongodb-org/4.2 multiverse" | sudo tee /etc/apt/sources.list.d/mongodb-org-4.2.list
#Reloading local package database
sudo apt update
#Install the MongoDB packages
sudo apt install -y mongodb-org
#Starting and verifying the service
sudo systemctl start mongod
sudo systemctl status mongod
#Enable the service start on every reboot
sudo systemctl enable mongod
###################################################################################################################################
#INSTALLING NODEJS
#Install nvm
curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.34.0/install.sh | bash
#Load nvm without restarting terminal
export NVM_DIR="$HOME/.nvm"
[ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh" # This loads nvm
[ -s "$NVM_DIR/bash_completion" ] && \. "$NVM_DIR/bash_completion" # This loads nvm bash_completion
#Install nodejs
nvm install node
#Testing node and nvm are installed and running
node -v
nvm -v
#Check if git is installed
git --version
| true
|
4bd3b05799c7fbb5b0969d8ebc1dd4903e7f9334
|
Shell
|
rubiin/dotfiles
|
/dot_bin/git-extras/executable_git-loglive
|
UTF-8
| 378
| 3.25
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Originially stolen from: https://git.io/vi07z
while :
do
# Outputting like that reduces blinking
status_output=$(git -c "color.status=always" status --short --branch)
log_output=$(git --no-pager log --color=always --abbrev-commit --date=relative --pretty=live $@)
clear
echo "$status_output"
echo ""
echo "$log_output"
sleep 2
done
| true
|
0727737a654db2c9f1cd71d1f7f2c165ac15a6fe
|
Shell
|
apjanke/brew-el_cap-work
|
/bin/user-functions.zsh
|
UTF-8
| 198
| 2.75
| 3
|
[
"MIT"
] |
permissive
|
# Functions for interactive use
#
#
# Display the last brew run log (from this project's build scripts)
# for a given formula
function last-log() {
ls log/runs/*/*/$1 | tail -1 | xargs cat
}
| true
|
35486aed266710fabdf4f64b03ebdd98fdb3b5a0
|
Shell
|
iibot-irc/core
|
/bin/ircwatch
|
UTF-8
| 1,016
| 3.234375
| 3
|
[
"Unlicense"
] |
permissive
|
#!/bin/bash
NETWORK=`$HOME/config.py network`
BOTNAME=`$HOME/config.py nick`
CHAN=$1
CHAN_II=$HOME/irc/$NETWORK/$CHAN
inotifywait -e modify $CHAN_II/out > /dev/null 2>&1
STUFF=`tail -n 1 $CHAN_II/out`
USER=`echo "$STUFF" | sed -e 's/^[^<]*<//' -e 's/>.*$//'`
if [ "$USER" != $BOTNAME ]; then
MSG=`echo "$STUFF" | sed -e 's/^[^>]*>\ //'`
if [[ "$MSG" == "$BOTNAME: ping" ]]; then
echo "$USER: pong" > $CHAN_II/in
elif [ `expr match "$MSG" "$BOTNAME: echo"` -ne 0 ]; then
echo "$USER:${MSG:12}" > $CHAN_II/in
### This goes with github.com/iibot/jerkcity
# elif [ `expr match "$MSG" "$BOTNAME: jc"` -ne 0 ]; then
# $HOME/bin/play_jc "${MSG:11}" > $CHAN_II/in
### These goes with github.com/iibot/twitter
# elif [ `expr match "$MSG" "$BOTNAME: tweet "` -ne 0 ]; then
# $HOME/bin/tweet.py tweet "${MSG:14}" "$USER" > $CHAN_II/in
# elif [ `expr match "$MSG" "$BOTNAME: delete_tweet '` -ne 0 ]; then
# echo `$HOME/bin/tweet.py delete_tweet "${MSG:21}" "$USER"` > $CHAN_II/in
fi
fi
$HOME/bin/ircwatch $1 &
| true
|
68a4e8c3562c4930ed6d337fe3d33c107bd9e0c9
|
Shell
|
dsyer/kpack
|
/hack/update-codegen.sh
|
UTF-8
| 875
| 3.03125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
set -o errexit
set -o nounset
set -o pipefail
SCRIPT_ROOT=$(realpath $(dirname ${BASH_SOURCE})/..)
pushd $SCRIPT_ROOT
go mod vendor
popd
trap 'rm -rf $SCRIPT_ROOT/vendor' EXIT
CODEGEN_PKG=${CODEGEN_PKG:-$(cd "${SCRIPT_ROOT}"; ls -d -1 ./vendor/k8s.io/code-generator 2>/dev/null || echo ../code-generator)}
TMP_DIR="$(mktemp -d)"
trap 'rm -rf ${TMP_DIR} && rm -rf $SCRIPT_ROOT/vendor' EXIT
export GOPATH=${GOPATH:-${TMP_DIR}}
TMP_REPO_PATH="${TMP_DIR}/src/github.com/pivotal/kpack"
mkdir -p "$(dirname "${TMP_REPO_PATH}")" && ln -s "${SCRIPT_ROOT}" "${TMP_REPO_PATH}"
bash "${CODEGEN_PKG}"/generate-groups.sh "deepcopy,client,informer,lister" \
github.com/pivotal/kpack/pkg/client github.com/pivotal/kpack/pkg/apis \
"build:v1alpha1" \
--output-base "${TMP_DIR}/src" \
--go-header-file "${SCRIPT_ROOT}"/hack/boilerplate/boilerplate.go.txt
| true
|
9868f043c8cfb36ca27c19f25bfe2326d9f701d7
|
Shell
|
lttr/mapreduce-wikipedia-visitors
|
/scripts/mapreduce-wikipedia-visitors-all-files-at-once.sh
|
UTF-8
| 506
| 3.140625
| 3
|
[] |
no_license
|
# Takes input files (Wikipedia pagecounts) as arguments and process them
# ungzip all input files to stdout
gunzip -c $* |
# sum all lines starting with "cs" and not too long
# finally print the associative array
awk '
$1=="cs" && length($2)<100 {a[$2]+=$3}
END { for (i in a) print i,a[i] }
' |
# sort lines by second column (number values) descendantly
sort -r -n -k 2 |
# top 20 is enough
head -20 |
# decode url encoding
python -c "import sys, urllib as ul; print ul.unquote_plus(sys.stdin.read())"
| true
|
824ee98ed61514d0b1bbe394e2912ed21b1dea30
|
Shell
|
jakob1379/.dotfiles
|
/i3/.i3/Scripts/toggle-microphone
|
UTF-8
| 284
| 2.9375
| 3
|
[] |
no_license
|
#! /bin/sh
amixer set Capture toggle
ans=$(amixer sget Capture toggle | grep -Pom1 '(?<=\[)(on|off)(?=\])')
echo "Registered state after change: $ans"
if [ "$ans" = "on" ]; then
notify-send " Microphone On"
elif [ "$ans" = "off" ]; then
notify-send " Microphone Off"
fi
| true
|
c73a6314752e8a4de4c4fe5297e90fbac741454c
|
Shell
|
alexvasseur/mylio
|
/deletePmov.sh
|
UTF-8
| 310
| 3.28125
| 3
|
[] |
no_license
|
#!/bin/bash
D='/Volumes/TBSSD/TMP'
DD='' #2011-2020/2016
find "$D/$DD" -type f | egrep "/P.*\.mov" | while read aF
do
echo "$aF"
baseFileName=`basename "$aF" .mov`
inDir=`dirname "$aF"`
if [ -f "$inDir/$baseFileName.jpeg" ]
then
echo rm "$inDir/$baseFileName.jpeg"
echo " deleted"
fi
done
exit 0
| true
|
1316e7398cc8696d18ebb35de8e3ae3134860a95
|
Shell
|
davewhiiite/stakeware
|
/withdraw.sh
|
UTF-8
| 1,235
| 3.703125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# example script invocation:
# $ ./withdraw.sh usb://ledger?key=0 stake-account.json 2ETrrFcKsKpnE1RCLpxDfdwDA1fBVRKZmWQmEBi4sBqv
funding_keypair=$1 # ex: usb://ledger?key=0
stake_keypair_filename=$2 # stake-account.json
recipient_address=$3 # <receiving solana address>, ex: 2ETrrFcKsKpnE1RCLpxDfdwDA1fBVRKZmWQmEBi4sBqv
stake_pubkey=`solana-keygen pubkey $stake_keypair_filename`
num_seed_accounts=`solana-stake-accounts count $stake_pubkey`
solana-stake-accounts addresses $stake_pubkey --num-accounts $num_seed_accounts > seed_account_list_tmp.txt
echo ""
echo "keypair for funding, fee-payer, stake and withdraw authority is: $funding_keypair"
echo ""
echo "the pubkey for $stake_keypair_filename is: $stake_pubkey"
echo ""
echo "the number of seed accounts belonging to $stake_pubkey is: $num_seed_accounts"
echo ""
while IFS= read -r line
do
echo "seed account $line will have ALL funds withdrawn to: $recipient_address."
if [[ "$funding_keypair" == *"usb"* ]]; then
echo "using hardware wallet as funding keypair: please sign on device now."
fi
solana withdraw-stake --withdraw-authority $funding_keypair $line $recipient_address ALL --fee-payer $funding_keypair
done < seed_account_list_tmp.txt
rm seed_account_list_tmp.txt
| true
|
658b6ad909d053bc5cd8370cbfffa5b4f1e99abd
|
Shell
|
hallzy/pickapart-database
|
/populate-database-stmhall.sh
|
UTF-8
| 7,324
| 4.09375
| 4
|
[] |
no_license
|
#!/bin/bash
# Unset globbing
set -f
# Now readarray delimits with newlines
IFS='
'
# No case sensitivity for string matching
shopt -s nocasematch
#Name of the file that contains the necessary information.
ARGS_FILE=arguments_for_script
# If the arguments file exists then read the file and get the arguments, and
# insert them into the ncftpput command
if [ -e "$ARGS_FILE" ]; then
# This is a file with three arguments that will be filled into the script
# below.
readarray -t ARGUMENTS < $ARGS_FILE
#Username
USER_ARG=${ARGUMENTS[0]}
#server password
PASS_ARG=${ARGUMENTS[1]}
fi
mysql -u ${USER_ARG} -p${PASS_ARG} << EOF
source ./public_html/create-table.sql
EOF
echo "Create Table Done"
# Get master list of car models#{{{
model_list="cars-to-find"
flag=0
wget -O $model_list "http://parts.pickapart.ca/" && flag=1
if (( flag == 1 )); then
echo "Page Download Success"
else
echo "Page Download Failed"
exit 1
fi
# Remove all lines that do not contain " - "
# The lines that do have this are the lines with the models of the cars we want.
sed -i '/ - /!d' $model_list
# Delete the beginning of the line that doesn't matter
sed -i 's/^.* - //' $model_list
# Delete the end of the line that doesn't Matter
sed -i "s@</option>@@g" $model_list
#}}}
readarray CARS < ./cars-to-find
let new_car_total_count=0
for CAR in "${CARS[@]}"; do
echo "======================================================================="
CAR=${CAR//$'\n'/}
rm -rf "car_details"
# Find cars on the lot#{{{
echo "Downloading car list from webpage: http://parts.pickapart.ca/index.php"
echo "Car = ${CAR}"
# This submits a form on pickapart.ca to get a list of ${CAR}s in the lot.
flag=0
curl --form-string 'md=submit' --form-string "model=${CAR}" 'http://parts.pickapart.ca/index.php' > "car_details" && flag=1
# cp car_details ${CAR}_details
if (( flag == 1 )); then
echo "2nd Page Download Success"
else
echo "Page Download Failed"
exit
fi
dos2unix car_details
# A newline followed by a closing td tag is to be appended to the previous line.
# In order to solve some issues where this part is on a different line for some
# vehicles
sed -i '$!N;s/\n\s*<\/td>/<\/td>/;P;D' car_details
# Remove all lines from HTML that are not necessary at all (before the parts we don't need, and after)#{{{
# Open ${CAR}_list_html
declare -a ARRAY
exec 10<&0
fileName="car_details"
exec < "$fileName"
let count=0
# Each line gets stored in an array.
while read LINE; do
ARRAY[$count]=$LINE
((count++))
done
exec 0<&10 10<&-
# Used to find the lines we need.
regex="<tr [[:print:]]*photo-group[[:print:]]*</tr>"
#}}}
# make tdtags file that contains only the useful information.#{{{
ELEMENTS=${#ARRAY[@]}
firstLine=0
for((i=0;i<ELEMENTS;i++)); do
if [[ ${ARRAY[${i}]} =~ $regex ]] ; then
if (( firstLine < 1 )); then
echo "${BASH_REMATCH[0]}" > car_details
let firstLine=$firstLine+1
else
echo "${BASH_REMATCH[0]}" >> car_details
fi
fi
done
# At the end of all td tags start a new line.
sed -i "s@</td>@</td>\n@g" car_details
# Put URLs on there on lines
sed -i "s@http@\nhttp@g" car_details | sed -in "s/\(^http[s]*:[a-Z0-9/.=?_-]*\)\(.*\)/\1/p"
# Delete all lines containing <tr bgcolor=
sed -i '/<tr bgcolor=/d' car_details
# Delete everything after the URL on the line
sed -i 's/JPG.*/JPG/' car_details
# remove "<td>" from each line
sed -i "s@<td>@@g" car_details
# remove "</td>" from each line
sed -i "s@</td>@@g" car_details
# remove "</tr>" from each line
sed -i "s@</tr>@@g" car_details
#}}}
# Populate Arrays for the data for the new cars#{{{
# Open "${CAR}_tdtags_latest_cars"
declare -a PIC_URLS
declare -a COUNT_OF_CAR_FOR_PIC
declare -a DATE_ADDED
declare -a CAR_MAKE
declare -a CAR_MODEL
declare -a CAR_YEAR
declare -a CAR_BODY_STYLE
declare -a CAR_ENGINE
declare -a CAR_TRANSMISSION
declare -a CAR_DESCRIPTION
declare -a CAR_ROW
declare -a CAR_STOCK_NUMBERS
let pic_urls_count=0
let date_added_count=0
let car_make_count=0
let car_model_count=0
let car_year_count=0
let car_body_style_count=0
let car_engine_count=0
let car_transmission_count=0
let car_description_count=0
let car_row_count=0
let car_stock_array_count=0
# CAR_ARRAY now contains all the car information for ${CAR}s.
# Note that bash does not have 2D arrays, so it is stored in a 1D array.
exec 10<&0
fileName="car_details"
exec < "$fileName"
let count=0
let current_car=0
let skip=10
while read LINE; do
if (( skip < 10 )); then
((skip++))
# Get date added
elif [[ $LINE =~ "http" ]] ; then
basename=$(basename "$LINE")
PIC_URLS[$pic_urls_count]="<a href='$LINE'>$basename</a>"
COUNT_OF_CAR_FOR_PIC[$pic_urls_count]=$current_car
((pic_urls_count++))
# skip
else
skip=1
((current_car++))
fi
((count++))
done
exec 0<&10 10<&-
# Delete all lines containing "http"
sed -i '/http/d' car_details
exec 10<&0
fileName="car_details"
exec < "$fileName"
# index 0 = Date added
# index 1 = Make
# index 2 = Model
# index 3 = Year
# index 4 = Body Style (ex. 4DSDN, 2DCPE etc)
# index 5 = Engine
# index 6 = Transmission
# index 7 = Description
# index 8 = Row # (The row at the lot that the car is in)
# index 9 = Stock #
# index 10 = Date added for the next car
# etc
# Each line gets stored in an array.
let count=0
while read LINE; do
# Get date added
if (( count % 10 == 0 )); then
DATE_ADDED[$date_added_count]=$LINE
((date_added_count++))
# Get car make
elif (( count % 10 == 1 )); then
CAR_MAKE[$car_make_count]=$LINE
((car_make_count++))
# Get car models
elif (( count % 10 == 2 )); then
CAR_MODEL[$car_model_count]=$LINE
((car_model_count++))
# Get car year
elif (( count % 10 == 3 )); then
CAR_YEAR[$car_year_count]=$LINE
((car_year_count++))
# Get car body styles
elif (( count % 10 == 4 )); then
CAR_BODY_STYLE[$car_body_style_count]=$LINE
((car_body_style_count++))
# Get car engine type
elif (( count % 10 == 5 )); then
CAR_ENGINE[$car_engine_count]=$LINE
((car_engine_count++))
# Get car transmission type
elif (( count % 10 == 6 )); then
CAR_TRANSMISSION[$car_transmission_count]=$LINE
((car_transmission_count++))
# Get car description
elif (( count % 10 == 7 )); then
CAR_DESCRIPTION[$car_description_count]=$LINE
((car_description_count++))
# Get car row
elif (( count % 10 == 8 )); then
CAR_ROW[$car_row_count]=$LINE
((car_row_count++))
# Get stock numbers
elif (( count % 10 == 9 )); then
CAR_STOCK_NUMBERS[$car_stock_array_count]=$LINE
((car_stock_array_count++))
fi
((count++))
done
exec 0<&10 10<&-
# number of cars = size of array / 10
num_of_cars_current=$car_stock_array_count
for((i=0;i<num_of_cars_current;i++)); do
mysql -u ${USER_ARG} -p${PASS_ARG} << EOF
use stmhallc_cars;
insert into cars values (
'${DATE_ADDED[$i]}',
'${CAR_MAKE[$i]}',
'${CAR_MODEL[$i]}',
${CAR_YEAR[$i]},
'${CAR_BODY_STYLE[$i]}',
'${CAR_ENGINE[$i]}',
'${CAR_TRANSMISSION[$i]}',
'${CAR_DESCRIPTION[$i]}',
'${CAR_ROW[$i]}',
'${CAR_STOCK_NUMBERS[$i]}'
);
EOF
done
for((i=0;i<pic_urls_count;i++)); do
mysql -u ${USER_ARG} -p${PASS_ARG} << EOF
use stmhallc_cars;
insert into pics values (
'${PIC_URLS[$i]}',
'${CAR_STOCK_NUMBERS[${COUNT_OF_CAR_FOR_PIC[$i]}]}'
);
EOF
done
#}}}
#}}}
done
unset IFS
set +f
| true
|
42c4c8846e6904d8da3531555f42c6c44cd579c3
|
Shell
|
rdfostrich/cobra-bear-results
|
/polish-raw-offset-ostrich.sh
|
UTF-8
| 1,046
| 3.5
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
dir=$1
policies="versionmat deltamat version"
for policy in ${policies[@]}; do
i=0
queries=$(ls -d $dir/_raw/*.txt)
for query in ${queries[@]}; do
query=$(echo $query | sed 's/\/$//')
file=$query/$policy-
pol=$(echo $policy | sed "s/versionmat/vm/" | sed "s/deltamat/dm/" | sed "s/version/vq/")
offsetdir=$(echo $query | sed 's/^.*_\([0-9]*\)\.txt$/\1/')
mkdir -p $dir/$offsetdir
out=$dir/$offsetdir/$pol.csv
if [ "$policy" = "versionmat" ]; then
echo "version,ms" > $out
tail -n +2 $file* | gawk -F ',' '{print $1","$5/1000}' >> $out
fi
if [ "$policy" = "deltamat" ]; then
echo "versionstart,versionend,ms" > $out
tail -n +2 $file* | sed -n '/^0/p ' | gawk -F ',' '{print $1","$2","$6/1000}' >> $out
fi
if [ "$policy" = "version" ]; then
echo "ms" > $out
tail -n +2 $file* | gawk -F ',' '{print $4/1000}' >> $out
fi
let i++
done
done
| true
|
5cd899dd53e977476f3a22889af86449daf2d447
|
Shell
|
xinyuegtxy/pre_data
|
/.bashrc
|
UTF-8
| 6,112
| 3.546875
| 4
|
[] |
no_license
|
# If not running interactively, don't do anything
case $- in
*i*) ;;
*) return;;
esac
# don't put duplicate lines or lines starting with space in the history.
# See bash(1) for more options
HISTCONTROL=ignoreboth
# append to the history file, don't overwrite it
shopt -s histappend
# for setting history length see HISTSIZE and HISTFILESIZE in bash(1)
HISTSIZE=1000
HISTFILESIZE=2000
# check the window size after each command and, if necessary,
# update the values of LINES and COLUMNS.
shopt -s checkwinsize
# If set, the pattern "**" used in a pathname expansion context will
# match all files and zero or more directories and subdirectories.
#shopt -s globstar
# make less more friendly for non-text input files, see lesspipe(1)
[ -x /usr/bin/lesspipe ] && eval "$(SHELL=/bin/sh lesspipe)"
# set variable identifying the chroot you work in (used in the prompt below)
if [ -z "${debian_chroot:-}" ] && [ -r /etc/debian_chroot ]; then
debian_chroot=$(cat /etc/debian_chroot)
fi
# set a fancy prompt (non-color, unless we know we "want" color)
case "$TERM" in
xterm-color|*-256color) color_prompt=yes;;
esac
# uncomment for a colored prompt, if the terminal has the capability; turned
# off by default to not distract the user: the focus in a terminal window
# should be on the output of commands, not on the prompt
#force_color_prompt=yes
if [ -n "$force_color_prompt" ]; then
if [ -x /usr/bin/tput ] && tput setaf 1 >&/dev/null; then
# We have color support; assume it's compliant with Ecma-48
# (ISO/IEC-6429). (Lack of such support is extremely rare, and such
# a case would tend to support setf rather than setaf.)
color_prompt=yes
else
color_prompt=
fi
fi
if [ "$color_prompt" = yes ]; then
PS1='${debian_chroot:+($debian_chroot)}\[\033[01;32m\]\u@\h\[\033[00m\]:\[\033[01;34m\]\w\[\033[00m\]\$ '
else
PS1='${debian_chroot:+($debian_chroot)}\u@\h:\w\$ '
fi
unset color_prompt force_color_prompt
# If this is an xterm set the title to user@host:dir
case "$TERM" in
xterm*|rxvt*)
PS1="\[\e]0;${debian_chroot:+($debian_chroot)}\u@\h: \w\a\]$PS1"
;;
*)
;;
esac
PS1='`a=$?;if [ $a -ne 0 ]; then a=" "$a; echo -ne "\[\e[1A\e[$((COLUMNS-2))G\e[31m\e[1;41m${a:(-3)}\]\[\e[0m\e[7m\e[2m\r\n\]";fi`${debian_chroot:+($debian_chroot)}\[\e[1;33m\]\u\[\e[1;31m\]@\[\e[1;35m\]\h\[\e[1;32m\][\t]\[\e[1;31m\]:\[\e[1;36m\]\w\[\e[1;34m\]\$\[\e[0;39m\]'
export PS1="\[\033[38;5;87m\]\u\[$(tput bold)\]\[$(tput sgr0)\]\[\033[38;5;15m\]@\[$(tput sgr0)\]\[$(tput sgr0)\]\[\033[38;5;119m\]\h\[$(tput sgr0)\]\[\033[38;5;15m\] [\[$(tput sgr0)\]\[\033[38;5;198m\]\t\[$(tput sgr0)\]\[\033[38;5;15m\]] {\[$(tput sgr0)\]\[\033[38;5;81m\]\w\[$(tput sgr0)\]\[\033[38;5;15m\]}\n\[$(tput sgr0)\]\[\033[38;5;2m\]--\[$(tput sgr0)\]\[\033[38;5;118m\]>\[$(tput sgr0)\]\[\033[38;5;15m\]\\$ \[$(tput sgr0)\]"
#PS1="\[\e[36;1m\] \W \[\e[31;1m\]> \[$(tput sgr0)\]\[\e[0m\]"
# enable color support of ls and also add handy aliases
if [ -x /usr/bin/dircolors ]; then
test -r ~/.dircolors && eval "$(dircolors -b ~/.dircolors)" || eval "$(dircolors -b)"
alias ls='ls --color=auto'
#alias dir='dir --color=auto'
#alias vdir='vdir --color=auto'
alias grep='grep --color=auto'
alias fgrep='fgrep --color=auto'
alias egrep='egrep --color=auto'
fi
# colored GCC warnings and errors
#export GCC_COLORS='error=01;31:warning=01;35:note=01;36:caret=01;32:locus=01:quote=01'
# some more ls aliases
alias ll='ls -alh'
alias la='ls -A'
alias l='ls -CF'
# Add an "alert" alias for long running commands. Use like so:
# sleep 10; alert
alias alert='notify-send --urgency=low -i "$([ $? = 0 ] && echo terminal || echo error)" "$(history|tail -n1|sed -e '\''s/^\s*[0-9]\+\s*//;s/[;&|]\s*alert$//'\'')"'
# Alias definitions.
# You may want to put all your additions into a separate file like
# ~/.bash_aliases, instead of adding them here directly.
# See /usr/share/doc/bash-doc/examples in the bash-doc package.
if [ -f ~/.bash_aliases ]; then
. ~/.bash_aliases
fi
# enable programmable completion features (you don't need to enable
# this, if it's already enabled in /etc/bash.bashrc and /etc/profile
# sources /etc/bash.bashrc).
if ! shopt -oq posix; then
if [ -f /usr/share/bash-completion/bash_completion ]; then
. /usr/share/bash-completion/bash_completion
elif [ -f /etc/bash_completion ]; then
. /etc/bash_completion
fi
fi
. /opt/conda/etc/profile.d/conda.sh
alias pi='pip install'
alias cd.='cd ..'
alias v='vim'
alias w='wc -l'
alias d='du -sh'
alias c='cat'
alias le='less'
alias h='head'
alias hn='head -n'
alias t='tail'
alias vb='vim ~/.bashrc'
alias sb='source ~/.bashrc'
#alias pytorch='conda activate pytorch'
alias nv='nvidia-smi'
alias jp='nohup jupyter notebook > nohup.jp 2>&1 &'
#alias psp='ps -ef | grep python | cut -c 9-15| xargs kill -s 9'
alias pg='ps -ef | grep'
alias ka=' | cut -c 9-15| xargs kill -s 9'
export CUDA_HOME=/usr/local/cuda
export PATH=$PATH:$CUDA_HOME/bin
export LD_LIBRARY_PATH=/usr/local/cuda/lib64${LD_LIBRARY_PATH:+:${LD_LIBRARY_PATH}}
#export PATH=/usr/local/cuda-8.0/bin${PATH:+:${PATH}}
#export LD_LIBRARY_PATH=/usr/local/cuda8.0/lib64${LD_LIBRARY_PATH:+:${LD_LIBRARY_PATH}}
export kk='kill -9'
export PATH="/opt/conda/bin:$PATH"
export DISPLAY="localhost:10.0"
alias gc='git clone'
alias gr='git clone --recursive'
alias gsi='git submodule init'
alias gsp='git submodule update'
alias mk='make -j 32'
alias mc='make clean'
alias mi='make install'
alias pa='ps aux'
alias ds='du -sh ./*'
alias py3.6='conda activate py3.6'
alias da='conda deactivate'
alias wk='cd /workspace'
alias ta='tmux attach -t'
alias tn='tmux new -s'
alias cls6='cd /workspace/cls6/'
alias tmp='cd /workspace/tmp/'
alias data='cd /ssd/cls_lung'
alias tl='tmux ls'
alias tk='tmux kill-session -t' # session_name
# ctrl + b, d, 临时退出
# ctrl + b, ctrl + z, 挂起当前session
alias v='vim'
alias pt='python train_with_dataloader.py'
alias pr='python resnext3d.py'
alias cls='cd /workspace/lung_cls5'
alias p3='python resnext3d.py'
alias wn='watch -n 0.1 -d nvidia-smi'
alias p='python'
| true
|
4db0c253a9f337bb3a6712904bc541d8bb9658ef
|
Shell
|
jan-kaspar/analysis_d0_totem
|
/run_multiple
|
UTF-8
| 2,233
| 3.609375
| 4
|
[] |
no_license
|
#!/bin/bash
#----------------------------------------------------------------------------------------------------
# defaults
models_ranges=()
make_plot_links="n"
uncs=(
#"st"
#"st+sy"
"st+sy+no"
)
#----------------------------------------------------------------------------------------------------
# parse command line
while [ -n "$1" ]
do
case "$1" in
"-g")
shift
case "$1" in
"std")
#models_ranges+=("lt-17,ht/e012+e023:f=0.2")
models_ranges+=("lts-100,ht/e012+e023:f=0.4")
;;
"bootstrap")
models_ranges+=("bootstrap/bootstrap")
;;
"correlation")
base="lts-100,ht/e012+e023"
models_ranges+=("$base:f=0.0")
models_ranges+=("$base:f=0.1")
models_ranges+=("$base:f=0.2")
models_ranges+=("$base:f=0.3")
models_ranges+=("$base:f=0.4")
models_ranges+=("$base:f=0.5")
models_ranges+=("$base:f=0.6")
models_ranges+=("$base:f=0.7")
models_ranges+=("$base:f=0.8")
models_ranges+=("$base:f=0.9")
models_ranges+=("$base:f=1.0")
;;
*)
echo "ERROR: unknown group $1"
exit 1
;;
esac
;;
"-plots")
make_plot_links="y"
;;
-*)
echo "ERROR: unknown option $1"
exit 2
;;
*)
models_ranges+=("$1")
;;
esac
shift
done
#----------------------------------------------------------------------------------------------------
function RunOne()
{
local dir="$1"
echo "* $dir"
mkdir -p "$dir"
# make plot links
if [ "$make_plot_links" == "y" ]
then
rm -rf "$dir/plots"
mkdir -p "$dir/plots"
for f in plots/templates/*.asy
do
ln -s "../../../../../$f" "$dir/plots/"
done
fi
# run fits
cd "$dir"
../../../../do_fits -model "$model" -range "$t_range" -unc "$unc" &> "do_fits.log"
../../../../s_extrapolation -model "$model" -range "$t_range" &> "s_extrapolation.log"
cd - &> /dev/null
}
#----------------------------------------------------------------------------------------------------
make -j2 || exit 1
for model_range in ${models_ranges[*]}
do
t_range=${model_range%/*}
model=${model_range#*/}
for unc in ${uncs[*]}
do
dir="fits/$t_range/$model/$unc"
RunOne "$dir" &
done
done
| true
|
fd5e585bf4e6bb98a2a25ec218a4015f4b8c3567
|
Shell
|
HenriqueMorato/cc_dotfiles
|
/ubuntu.sh
|
UTF-8
| 2,037
| 2.65625
| 3
|
[] |
no_license
|
#!/bin/sh
sudo apt-get update
sudo apt-get install -y python-software-properties software-properties-common
sudo add-apt-repository -y ppa:pi-rho/dev
wget -q -O - https://dl-ssl.google.com/linux/linux_signing_key.pub | sudo apt-key add -
echo 'deb [arch=amd64] http://dl.google.com/linux/chrome/deb/ stable main' | sudo tee /etc/apt/sources.list.d/google-chrome.list
sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 931FF8E79F0876134EDDBDCCA87FF9DF48BF1C90
echo deb http://repository.spotify.com stable non-free | sudo tee /etc/apt/sources.list.d/spotify.list
sudo apt-get update
sudo apt-get install -y silversearcher-ag \
zsh \
tmux \
dconf-cli \
vim-gnome
git clone https://github.com/Anthony25/gnome-terminal-colors-solarized.git
~/.cc_dotfiles/gnome-terminal-colors-solarized/install.sh
sudo apt-get purge ruby
echo "======================================================"
echo "Installing Google Chrome."
echo "======================================================"
echo
sudo apt install google-chrome-stable
echo "======================================================"
echo "Installing Spotify"
echo "======================================================"
echo
sudo apt install spotify-client
echo "======================================================"
echo "Installing Rambox"
echo "======================================================"
echo
wget $(curl -s https://api.github.com/repos/ramboxapp/community-edition/releases | grep browser_download_url | grep '64[.]deb' | head -n 1 | cut -d '"' -f 4) -O rambox.deb
sudo dpkg -i rambox.deb
sudo apt-get install -f
sudo rm -f rambox.deb
echo "======================================================"
echo "Changing terminal colors"
echo "======================================================"
echo
bash ./ubuntu/gnome-terminal-profile import ./ubuntu/profile_colors
echo 'Change your terminal window to Run command as login shell and restart'
echo 'You can find more information about this on' \
'https://github.com/rvm/ubuntu_rvm'
| true
|
fd8b3cad7c6725f76ee1044e64be16b5b3962709
|
Shell
|
TerranceN/asdf
|
/asdf
|
UTF-8
| 2,612
| 4.09375
| 4
|
[] |
no_license
|
#!/bin/bash
unhide_cursor() {
printf '\e[?25h'
}
trap unhide_cursor EXIT
_key()
{
local kp
ESC=$'\e'
_KEY=
IFS= read -d '' -r -sn1 _KEY
case $_KEY in
"$ESC")
while read -d '' -sn1 -t1 kp
do
_KEY=$_KEY$kp
case $kp in
[a-zA-NP-Z~]) break;;
esac
done
;;
esac
printf -v "${1:-_KEY}" "%s" "$_KEY"
}
function _asdf() {
local text=""
local cursorPos=1
printf '\e[?25l'
while true; do
clear
echo "${text}"
eval printf %.1s '-{1..'"${COLUMNS:-$(tput cols)}"\}; echo
local lineNum=1
eval "history_fast_search ${text} | cut -c -$(expr ${COLUMNS:-$(tput cols)} - 2)" | while IFS= read -r line ; do
if [ $lineNum -eq $cursorPos ]; then
printf "\e[1;31m> %s\e[0;255m\n" "${line}"
else
printf " %s\n" "${line}"
fi
lineNum=`expr $lineNum + 1`
done
_key char
local cmd="$(history_fast_search $text | head -n $cursorPos | tail -n 1)"
if [ "$char" == $'\n' ]; then
clear
unhide_cursor
local enter_esc=$'\x0a'
TYPE_CMD_ENABLED=yes TYPE_CMD_TYPE="${cmd}${enter_esc}" prefill
break
elif [ "$char" == " " ]; then
text="${text} "
elif [ "$char" == $'\177' ]; then
text="${text%?}"
cursorPos=1
elif [ "$char" == $'\e[D' ]; then
local file=`mktemp -t asdf`
perl -0pe "s/#[0-9]*\n$(printf '%q' "$(history_fast_search $text | head -n $cursorPos | tail -n 1)" | sed 's/\//\\\//g' | sed 's/@/\\@/g')\n//g" $HISTFILE > $file
local exitStatus=$?
if [ $exitStatus -eq 0 ] && [ `wc -c < "$file"` -gt 1000 ]; then
cp $HISTFILE "${HISTFILE}.backup"
cp $file $HISTFILE
cat $file
rm $file
else
clear
echo "ERROR editing history file!!!"
echo "Exit status: $exitStatus"
echo "New file size (must be > 1000 to accept): $(wc -c < "$file" | xargs)"
break
fi
elif [ "$char" == $'\e[C' ]; then
clear
TYPE_CMD_ENABLED=yes TYPE_CMD_TYPE="${cmd}" prefill
break
elif [ "$char" == $'\e[A' ]; then
cursorPos=`expr $cursorPos - 1`
elif [ "$char" == $'\e[B' ]; then
cursorPos=`expr $cursorPos + 1`
else
text="${text}${char}"
cursorPos=1
fi
done
}
_asdf $@
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.