blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 4 115 | path stringlengths 2 970 | src_encoding stringclasses 28 values | length_bytes int64 31 5.38M | score float64 2.52 5.28 | int_score int64 3 5 | detected_licenses listlengths 0 161 | license_type stringclasses 2 values | text stringlengths 31 5.39M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
444fb9688588eb2c17e3ec2d2ef3f41cd616d5d2 | Shell | rerun-modules/waitfor | /lib/functions.sh | UTF-8 | 1,079 | 4 | 4 | [] | no_license | # Shell functions for the waitfor module.
#/ usage: source RERUN_MODULE_DIR/lib/functions.sh command
#
# Read rerun's public functions
. $RERUN || {
echo >&2 "ERROR: Failed sourcing rerun function library: \"$RERUN\""
return 1
}
# Check usage. Argument should be command name.
[[ $# = 1 ]] || rerun_option_usage
# Source the option parser script.
#
if [[ -r $RERUN_MODULE_DIR/commands/$1/options.sh ]]
then
. $RERUN_MODULE_DIR/commands/$1/options.sh || {
rerun_die "Failed loading options parser."
}
fi
# - - -
# Your functions declared here.
# - - -
progress_tic() {
tic=$1
if [[ -t 1 ]]
then printf -- "%s" "$tic"
else printf -- "%s\n" "$tic"
fi
}
function show_progress {
percDone=$(echo 'scale=2;'$1/$2*100 | bc)
barLen=$(echo ${percDone%'.00'})
bar=''
fills=''
for (( b=0; b<$barLen; b++ ))
do
bar=$bar"#"
done
blankSpaces=$(echo $((100-$barLen)))
for (( f=0; f<$blankSpaces; f++ ))
do
fills=$fills"."
done
clear
echo "${barLen}%[${bar}#${fills}]"
}
| true |
a39ba08a3b629a28ef6d5c4299023cbe5d240f28 | Shell | uriel1998/ppl_virdirsyncer_addysearch | /vcardreader.sh | UTF-8 | 5,437 | 3.703125 | 4 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/bin/bash
##############################################################################
# vcardreader, by Steven Saus 24 July 2020
# steven@stevesaus.com
# Licenced under the MIT License
##############################################################################
# because this is a bash function, it's using the input variable $SelectedVcard
# and the arrays as the returned
# variable. So there's no real "return" other than setting that var.
# This only outputs results to STDOUT if run as a standalone program.
function read_vcard {
cat "$SelectedVcard" | while read line ; do
if [[ $line = EMAIL* ]]; then
#starts it at one!
(( ++num_emails ))
# removing the non-standardized "PREF" string
temp=$(echo "$line" | awk -F = '{ print $2 }' | awk -F : '{print $1}' | awk '{print tolower($0)}' | sed 's/pref//' | sed 's/,//' )
if [ -z "$temp" ];then
email_type[$num_emails]="main"
else
email_type[$num_emails]=$(echo "$temp")
fi
temp=""
temp=$(echo "$line" | awk -F ':' '{print $2}')
email[$num_emails]=${temp//[$'\t\r\n']}
line=""
fi
if [[ $line = ORG:* ]]; then
org=${line#*:}
fi
if [[ "$line" =~ "ADR;" ]]; then
(( ++num_adr ))
# removing the non-standardized "PREF" string
temp=$(echo "$line" | awk -F = '{ print $2 }' | awk -F : '{print $1}' | awk '{print tolower($0)}' | sed 's/pref//' | sed 's/;label//'| sed 's/,//' )
if [ -z "$temp" ];then
adr_type[$num_adr]="none"
else
adr_type[$num_adr]=$(echo "$temp")
fi
adr_type[$num_adr]=${temp//[$'\t\r\n']}
temp=""
# testing to see if the address continues, using grep, of all things.
testcount=$(grep ADR --after-context=1 "${SelectedVcard}" | tail -1 | grep -c -e "^\ ")
if [ $testcount -gt 0 ];then
line=$(grep ADR --after-context=1 "${SelectedVcard}" | sed 's/^[[:space:]]*//')
line=${line//[$'\t\r\n']}
fi
temp=$(echo "$line" | awk -F ':' '{print $2}' | sed 's/;/,/g' | sed 's/^,,//' | sed 's/,,/,/g' )
temp=${temp//[$'\t\r\n']}
address[$num_adr]=$(echo "$temp" | sed 's/,$//')
line=""
fi
if [[ "$line" =~ "TEL;" ]]; then
(( ++num_tels ))
# removing the non-standardized "PREF" string
temp=$(echo "$line" | awk -F = '{ print $2 }' | awk -F : '{print $1}' | awk '{print tolower($0)}' | sed 's/pref//' | sed 's/,//' )
if [ -z "$temp" ];then
tel_type[$num_tels]="none"
else
tel_type[$num_tels]=$(echo "$temp")
fi
tel_type[$num_tels]=${temp//[$'\t\r\n']}
temp=""
temp=$(echo "$line" | awk -F ':' '{print $2}')
tel_num[$num_tels]=${temp//[$'\t\r\n']}
line=""
fi
#TODO catch if not FN, put together
if [[ $line = FN:* ]]; then
full_name=${line#*:}
fi
if [[ "$line" =~ "END:VCARD" ]]; then
echo " ✢ $full_name"
if [ ! -z "$org" ];then
echo " ☖ $org"
fi
START=1
END="${num_tels[@]}"
if [[ $END -gt 0 ]];then
for (( c=$START; c<=$END; c++ ));do
printf " ☎ %s: %s \n" "${tel_type[c]}" "${tel_num[c]}"
done
else
printf " ☎ No Phone number \n"
#printf "%s: %s \n" "${tel_type[0]}" "${tel_num[0]}"
fi
START=1
END="${num_adr[@]}"
if [[ $END -gt 1 ]];then
for (( c=$START; c<=$END; c++ ));do
printf " 🏚 %s: %s\n" "${adr_type[c]}" "${address[c]}"
done
else
printf " 🏚 %s: %s\n" "${adr_type[1]}" "${address[1]}"
fi
START=1
END="${num_emails[@]}"
if [[ $END -gt 1 ]];then
for (( c=$START; c<=$END; c++ ));do
printf " ✉ %s: %s\n" "${email_type[c]}" "${email[c]}"
done
else
printf " ✉ %s: %s\n" "${email_type[1]}" "${email[1]}"
fi
fi
done
}
##############################################################################
# Are we sourced?
# From http://stackoverflow.com/questions/2683279/ddg#34642589
##############################################################################
# Try to execute a `return` statement,
# but do it in a sub-shell and catch the results.
# If this script isn't sourced, that will raise an error.
$(return >/dev/null 2>&1)
# What exit code did that give?
if [ "$?" -eq "0" ];then
#echo "[info] Function read_vcard ready to go."
OUTPUT=0
else
OUTPUT=1
if [ "$#" = 0 ];then
echo "Please call this as a function or with the filename as the first argument."
else
if [ -f "$1" ];then
SelectedVcard="$1"
else
#if it's coming from pplsearch for preview
SelectedVcard=$(echo "$1" | awk -F ':' '{print $2}' | xargs -I {} realpath {} )
fi
if [ ! -f "$SelectedVcard" ];then
echo "File not found..."
exit 1
fi
SUCCESS=0
output=$(read_vcard)
if [ $SUCCESS -eq 0 ];then
# If it gets here, it has to be standalone
echo "$output"
else
exit 99
fi
fi
fi
| true |
8ddcf71c383cc89c876a3de1bf0d6b15f19acd0f | Shell | izzunnaqi/proofreader-id | /src/extract_xml.sh | UTF-8 | 144 | 2.921875 | 3 | [] | no_license | #!/bin/bash
for f in ../resource/xml/first10/*.xml;
do
b=$(basename $f)
echo "Parsing $b..."
python parser.py $f -o 'doc/first10/'${b}
done
| true |
4a9e4b5b7100043b4a95304685dcc334b1d958c0 | Shell | runngezhang/sword_offer | /leetcode/python/ListNode.py | UTF-8 | 3,025 | 2.9375 | 3 | [] | no_license | #!/bin/bash python3
"""
定义单链表
这里Node的数据结构和函数封装到了一起,可以尝试分开:
class Node():
def __init__(self, x):
# 初始化
self.val = x
self.next = None
class ListNode():
# some function
pass
"""
class ListNode():
def __init__(self, x):
"""
初始化
"""
self.val = x
self.next = None
def insertTail(self, x):
"""
尾部插入
"""
if self == None:
return
while self.next != None:
self = self.next
self.next = ListNode(x)
def insertHead(self, x):
"""
头部插入
"""
if self == None:
return
newNode = ListNode(x)
newNode.next = self.next
self.next = newNode
def deleteTail(self):
"""
尾部删除
"""
if self == None or self.next == None:
return
while self.next.next != None:
self = self.next
del self.next
self.next = None
def creatFromList(self, lst):
"""
从list中创建,头节点默认为0
"""
del self.next
if lst == []:
return
for i in lst:
self.next = ListNode(i)
self = self.next
def getLastNode(self):
"""
返回最后一个非空节点
"""
if self == None:
return None
while self.next != None:
self = self.next
return self
def getMiddleNode(self):
"""
返回中间位置的节点(偏向头节点)
"""
if self == None:
return None
fast = self
slow = self
while fast != None and fast.next != None:
fast = fast.next.next
slow = slow.next
return slow
def getIndexNode(self, k):
"""
返回第k个节点
"""
if self == None or k <= 0:
return None
for i in range(1, k):
if self != None:
self = self.next
else:
break
return self
def saveToList(self):
"""
顺序保存到list
去除了固定头节点
"""
if self == None:
return []
ret = []
while self != None:
ret.append(self.val)
self = self.next
return ret[1:]
def listPrint(self, ifJumpHead=True, endFormat=" "):
"""
顺序打印,不包括固定的头指针
ifJumpHead: 是否跳过头节点
endFormat: 每次打印的结尾标志
"""
if ifJumpHead:
tmp = self.next
else:
tmp = self
while tmp != None:
print(tmp.val, end=endFormat)
tmp = tmp.next
print() | true |
981adee6946fd3fe655fa7dcfeda1a4025910b83 | Shell | KirinDave/yaws | /scripts/Install | UTF-8 | 3,081 | 3.40625 | 3 | [
"BSD-3-Clause"
] | permissive | #!/bin/sh
set -x
prefix=$1
destdir=$2
erl=$3
werl=$4
etcdir=$5
vardir=$6
bindir=$7
#bindir=`dirname $erl` ; bindir=`dirname $bindir`/lib/erlang/bin
. ../vsn.mk
y=yaws-${YAWS_VSN}
p=${prefix}
e=${etcdir}
v=${vardir}
install -d ${destdir}/${prefix}/bin
install -d ${destdir}/${prefix}/etc
cat yaws.template | \
./Subst %yawsdir% ${prefix}/lib/yaws | \
./Subst %vardir% ${vardir} | \
./Subst %erl% "${erl}" | \
./Subst %run_erl% "${bindir}/run_erl" | \
./Subst %to_erl% "${bindir}/to_erl" | \
./Subst %werl% "${werl}" > ${destdir}/${prefix}/bin/yaws
chmod +x ${destdir}/${prefix}/bin/yaws
install -d ${destdir}/${prefix}/lib/yaws/examples/ebin
install -d ${destdir}/${prefix}/lib/yaws/examples/include
if [ -f /etc/gentoo-release ]; then
install -d ${destdir}/${etcdir}/init.d/
install -d ${destdir}/${etcdir}/conf.d/
install -d ${destdir}/${vardir}/run/yaws
chmod a+rwx ${destdir}/${vardir}/run/yaws
cp gentoo/init.d.yaws ${destdir}/${etcdir}/init.d/yaws
chmod +x ${destdir}/${etcdir}/init.d/yaws
sed -e "s;%prefix%;$p;g" gentoo/conf.d.yaws > ${destdir}/${etcdir}/conf.d/yaws
elif [ -f /etc/redhat-release ]; then
install -d ${destdir}/${etcdir}/init.d
sed -e "s;%prefix%;$p;g" redhat/yaws.init.d > ${destdir}/${etcdir}/init.d/yaws
chmod +x ${destdir}/${etcdir}/init.d/yaws
install -d ${destdir}/${vardir}/run/yaws
chmod a+rwx ${destdir}/${vardir}/run/yaws
elif [ -f /etc/suseservers ]; then
install -d ${destdir}/${etcdir}/init.d
sed -e "s;%prefix%;$p;g" suse/yaws.init.d > ${destdir}/${etcdir}/init.d/yaws
chmod +x ${destdir}/${etcdir}/init.d/yaws
install -d ${destdir}/${vardir}/run/yaws
chmod a+rwx ${destdir}/${vardir}/run/yaws
elif [ -f /etc/debian_version ]; then
install -d ${destdir}/${etcdir}/init.d
sed -e "s;%prefix%;$p;g" debian/yaws.init.d > ${destdir}/${etcdir}/init.d/yaws
chmod +x ${destdir}/${etcdir}/init.d/yaws
install -d ${destdir}/${vardir}/run/yaws
chmod a+rwx ${destdir}/${vardir}/run/yaws
elif [ "`uname -s`" = "Darwin" -a `id -u` = 0 ]; then
startupdir="/Library/StartupItems/Yaws"
if [ ! -e ${startupdir} ]; then
mkdir ${startupdir};
elif [ ! -d ${startupdir} ]; then
echo "${startupdir} exists but is not a directory, bailing out ..."
exit 1
fi
sed -e "s;%prefix%;$p;g" darwin/Yaws.StartupItem > ${startupdir}/Yaws
chmod +x ${startupdir}/Yaws
cp darwin/Yaws.plist ${startupdir}/StartupParameters.plist
# MacOS is particular about the ownership of startup items.
chown -R root:wheel ${startupdir}
elif [ "`uname -s`" = "FreeBSD" ]; then
sed -e "s;%prefix%;$p;g" -e "s;%etcdir%;$e;g" freebsd/yaws.sh > ${destdir}/${etcdir}/rc.d/yaws.sh
elif [ "`uname -s`" = "NetBSD" ]; then
sed -e "s;%prefix%;$p;g" -e "s;%etcdir%;$e;g" netbsd/yaws.sh > /etc/rc.d/yaws
else
install -d ${destdir}/${etcdir}
echo "Don't know how to make /etc/init scrips for this system"
echo "possibly add ${prefix}/bin/yaws --daemon --heart to your /etc/rc.local manually"
fi
| true |
58801521cc31dbab96a27d5d79b234cc8b0ed05d | Shell | cdadia/dotfiles | /bin/jq-repl | UTF-8 | 450 | 2.921875 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
if [[ -z $1 ]] || [[ $1 == "-" ]]; then
input=$(mktemp)
trap "rm -f $input" EXIT
cat /dev/stdin > $input
else
input=$1
fi
</dev/null fzf --phony \
--print-query \
--preview "jq --color-output $JQ_REPL_ARGS {q} $input" \
--preview-window="down:99%" \
--height="99%" \
--query="." \
--bind "alt-up:preview-page-up,alt-down:preview-page-down"
| true |
1adba18100bf69d3289f1b02050bae69cc016c19 | Shell | dwakel/third-party-auth | /AccountTracker/scripts/create.sh | UTF-8 | 412 | 2.78125 | 3 | [] | no_license |
echo ":: starting containers"
docker-compose up --build -d
echo ":: waiting for containers to start..."
secs=10
while [ $secs -gt 0 ]; do
echo -ne "$secs\033[0K\r"
sleep 1
: $((secs--))
done
echo ":: applying migrations"
export CONNSTR="Host=localhost;Username=tracker;Port=5401;Password=1;Database=tracker"
./scripts/migrator.sh apply
echo ":: seeding data"
./scripts/runsql.sh "./sql/seed.sql"
| true |
a4f75fd88b6718731a79181101af2cec174a1cdb | Shell | RyanLongVA/brutesubs | /scripts/startWebAppTesting.sh | UTF-8 | 485 | 3.90625 | 4 | [
"MIT"
] | permissive | #!/bin/bash
#Basic input check
if [ $# -eq 0 ]; then
echo "\nNo argument was provided.\n \nPlease specify list (urls) to push into the urlsToChrome.py"
exit 1
fi
FILE=$1
if [ ! -f $FILE ]; then
echo "The file you provided, apparently, does not exist: $FILE"
exit 1
fi
echo "Input file atleast exists... starting chrome"
gnome-terminal -x sh -c 'chromium-browser --proxy-server="127.0.0.1:8080" --profile-directory="Profile 2"'
python urlsToChrome.py -u $FILE
| true |
b996efe6231845250df1db3b5e44d1babb95feb0 | Shell | h121h/automation-scripts | /Bruteforcer-ec2.sh | UTF-8 | 2,544 | 3.3125 | 3 | [] | no_license | #This tools is used to automate the gobuster bruteforcing process.
#LOCATION: home directory of Amazon ec2
if [ -z "$1" ]
then
echo "\e[1;31m[!] Please Enter the Target Domain:\e[0m";
echo "\e[1;32m[!] Example:\e[0m" "\e[1;32msh bruteforcer.sh https://www.target.com \e[0m";
exit
fi
echo "1-content_discovery_all.txt -[373535]"
echo "2-Top10000-RobotsDisallowed.txt-[10000]"
echo "3-Big.txt-[20k]"
echo "4-Parameters.txt -[2.5K] "
echo "6-cgis.txt [1k]"
echo "7-quickhits.txt -[2.5K] "
echo "8-combined.txt [9k]"
echo "9-raft-large-directories.txt [62K] "
echo "10-Common_PHP_Filenames.txt [5k]"
echo "11-raft-large-files.txt [37K]"
echo "12-Apache.txt [9k]"
echo "\e[1;32m-----------------------------\e[0m";
echo "\e[1;32m|- Enter the List Number - |\e[0m";
echo "\e[1;32m-----------------------------\e[0m";
read choice
echo "\e[1;32m----------------------------------\e[0m";
echo "\e[1;32m|- Enter the No of Threads - |\e[0m";
echo "\e[1;32m----------------------------------\e[0m";
read threads
case $choice in
1)
./go/bin/gobuster -u $1 -w tools/web-discovery/content_discovery_all.txt -t $threads
;;
2)
./go/bin/gobuster -u $1 -w tools/web-discovery/Top10000-RobotsDisallowed.txt -t $threads
;;
3)
./go/bin/gobuster -u $1 -w tools/web-discovery/big.txt -t $threads
;;
4)
./go/bin/gobuster -u $1 -w tools/web-discovery/Parameters.txt -t $threads
;;
5)
./go/bin/gobuster -u $1 -w tools/web-discovery/cgis.txt -t $threads
;;
6)
./go/bin/gobuster -u $1 -w tools/web-discovery/quickhits.txt -t $threads
;;
7)
./go/bin/gobuster -u $1 -w tools/web-discovery/combined.txt -t $threads
;;
8)
./go/bin/gobuster -u $1 -w tools/web-discovery/raft-large-directories.txt -t $threads
;;
9)
./go/bin/gobuster -u $1 -w tools/web-discovery/Common_PHP_Filenames.txt -t $threads
;;
10)
./go/bin/gobuster -u $1 -w tools/web-discovery/raft-large-files.txt -t $threads
;;
11)
./go/bin/gobuster -u $1 -w tools/web-discovery/Apache-Fuzz.txt -t $threads
;;
*)
echo "File Not Found Enter the file Path:"
read path
./go/bin/gobuster -u $1 -w $path -t $threads
;;
esac
| true |
0e34df435a9b6f1b2223a638badbe68b1c7660bd | Shell | MarcKe/archi-ramfs | /init.sh | UTF-8 | 3,829 | 3.375 | 3 | [] | no_license | #!/sbin/sh
# _ _ _ _ __ _
# / \ _ __ ___| |__ (_) |/ /___ _ __ _ __ ___| |
# / _ \ | '__/ __| '_ \| | ' // _ \ '__| '_ \ / _ \ |
# / ___ \| | | (__| | | | | . \ __/ | | | | | __/ |
# /_/ \_\_| \___|_| |_|_|_|\_\___|_| |_| |_|\___|_|
#
# Copyright 2014 Łukasz "JustArchi" Domeradzki
# Contact: JustArchi@JustArchi.net
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -e
# Device-specific
KERNEL="/dev/block/mmcblk0p5" # THIS IS FOR GALAXY S3 ONLY
PARSERAMDISK=1 # If we don't need to worry about compressed ramdisk (i.e. putting modules inside), we can skip it
# Global
AK="/tmp/archikernel"
AKDROP="$AK/drop"
exec 1>"$AK/ArchiKernel.log"
exec 2>&1
date
echo "INFO: ArchiKernel flasher ready!"
echo "INFO: Safety check: ON, flasher will immediately terminate in case of ANY error"
if [ ! -f "$AK/mkbootimg-static" -o ! -f "$AK/unpackbootimg-static" ]; then
echo "FATAL ERROR: No bootimg tools?!"
exit 1
else
chmod 755 "$AK/mkbootimg-static" "$AK/unpackbootimg-static"
fi
echo "INFO: Pulling boot.img from $KERNEL"
if [ ! -z "$(which dump_image)" ]; then
dump_image "$KERNEL" "$AK/boot.img"
else
dd if="$KERNEL" of="$AK/boot.img"
fi
mkdir -p "$AKDROP/ramdisk"
echo "INFO: Unpacking pulled boot.img"
"$AK/unpackbootimg-static" -i "$AK/boot.img" -o "$AKDROP"
if [ -f "$AKDROP/boot.img-ramdisk.gz" ]; then
if [ "$PARSERAMDISK" -eq 1 ]; then
echo "INFO: Ramdisk in gzip format found, extracting..."
cd "$AKDROP/ramdisk"
gunzip -c ../boot.img-ramdisk.gz | cpio -i
cp -r "$AK/res" "$AKDROP/ramdisk"
cp -r "$AK/sbin" "$AKDROP/ramdisk"
if [ -d "$AKDROP/ramdisk/lib/modules" ]; then
echo "INFO: Detected Samsung variant"
# Remove all current modules
find "$AKDROP/ramdisk/lib/modules" -type f -iname "*.ko" | while read line; do
rm -f "$line"
done
# Copy all new ArchiKernel modules
find "/system/lib/modules" -type f -iname "*.ko" | while read line; do
cp "$line" "$AKDROP/ramdisk/lib/modules"
done
rm -rf "/system/lib/modules" # No need to confusion on Sammy base
else
echo "INFO: Detected AOSP variant"
fi
# add service for kernel initiation to init.rc
if ! grep -q post-init.sh "$AKDROP/ramdisk/init.rc"; then
cat <<EOF >> "$AKDROP/ramdisk/init.rc"
service kernelinit /sbin/post-init.sh
class core
user root
oneshot
EOF
fi
rm -f "$AKDROP/boot.img-ramdisk.gz"
find . | cpio -o -H newc | gzip > "$AKDROP/boot.img-ramdisk.gz"
fi
else
echo "FATAL ERROR: No ramdisk?!"
exit 2
fi
echo "INFO: Combining ArchiKernel zImage and current kernel ramdisk"
"$AK/mkbootimg-static" \
--kernel "$AK/zImage" \
--ramdisk "$AKDROP/boot.img-ramdisk.gz" \
--cmdline "$(cat $AKDROP/boot.img-cmdline)" \
--board "$(cat $AKDROP/boot.img-board)" \
--base "$(cat $AKDROP/boot.img-base)" \
--pagesize "$(cat $AKDROP/boot.img-pagesize)" \
--kernel_offset "$(cat $AKDROP/boot.img-kerneloff)" \
--ramdisk_offset "$(cat $AKDROP/boot.img-ramdiskoff)" \
--tags_offset "$(cat $AKDROP/boot.img-tagsoff)" \
--output "$AK/newboot.img"
echo "INFO: newboot.img ready!"
echo "INFO: Flashing newboot.img on $KERNEL"
if [ ! -z "$(which flash_image)" ]; then
flash_image "$KERNEL" "$AK/newboot.img"
else
dd if="$AK/newboot.img" of="$KERNEL"
fi
echo "SUCCESS: Everything finished successfully!"
touch "$AK/_OK"
date
sync
exit 0
| true |
77f62a08a5cd93bc124f002875f6d226d0f3bf23 | Shell | mssalvatore/ubuntu-cve-tracker-mirror | /scripts/git-hooks/pre-commit-wrapper | UTF-8 | 329 | 3.484375 | 3 | [] | no_license | #!/bin/bash
CURRENT_DIR=$(dirname "$0")
${CURRENT_DIR}/pre-commit-syntax-check "$@"
CHECK_SYNTAX_STATUS=$?
if [[ $CHECK_SYNTAX_STATUS == 0 ]]; then \
${CURRENT_DIR}/pre-commit-pyflakes3 "$@"
PYFLAKES3_STATUS=$?
if [[ $PYFLAKES3_STATUS != 0 ]]; then \
exit $PYFLAKES3_STATUS
fi
else
exit $CHECK_SYNTAX_STATUS
fi
| true |
859f2df92c3e2ed15abde81fb9b0e041a64da3c9 | Shell | serginator/mac-initializer | /dotfiles/osx | UTF-8 | 7,851 | 3.09375 | 3 | [] | no_license | #!/usr/bin/env bash
OSX=$(test "`uname`" == "Darwin" && echo "x")
if [[ OSX ]]; then
# Ask for the administrator password upfront
sudo -v
# Keep-alive: update existing `sudo` time stamp until `.osx` has finished
while true; do sudo -n true; sleep 60; kill -0 "$$" || exit; done 2>/dev/null &
############################################################################
# General UI/UX #
############################################################################
# Disable the sound effects on boot
sudo nvram SystemAudioVolume=" "
# disable window animations
sudo defaults write NSGlobalDomain NSAutomaticWindowAnimationsEnabled -bool NO
# font smoothing (1-3)
defaults -currentHost write -globalDomain AppleFontSmoothing -int 1
# disable the “Are you sure you want to open this application?” dialog
defaults write com.apple.LaunchServices LSQuarantine -bool false
# Set the icon size of Dock items to 48 pixels
defaults write com.apple.dock tilesize -int 48
# Finder: allow quitting via ⌘ + Q; doing so will also hide desktop icons
defaults write com.apple.finder QuitMenuItem -bool true
# disable opening and closing window animations
sudo defaults write NSGlobalDomain NSAutomaticWindowAnimationsEnabled -bool false
# disable Resume system-wide
sudo defaults write NSGlobalDomain NSQuitAlwaysKeepsWindows -bool false
# disable auto-correct
sudo defaults write NSGlobalDomain NSAutomaticSpellingCorrectionEnabled -bool false
# disable Finder animations
defaults write com.apple.finder DisableAllAnimations -bool true
# disable disk image verification
defaults write com.apple.frameworks.diskimages skip-verify -bool true
defaults write com.apple.frameworks.diskimages skip-verify-locked -bool true
defaults write com.apple.frameworks.diskimages skip-verify-remote -bool true
# disable the warning when changing a file extension
defaults write com.apple.finder FXEnableExtensionChangeWarning -bool false
# remove the animation when hiding/showing the Dock (actually, make it fast. If you want to remove, use 0)
defaults write com.apple.dock autohide-time-modifier -float 0
# prevent Time Machine from prompting to use new hard drives as backup volume
# defaults write com.apple.TimeMachine DoNotOfferNewDisksForBackup -bool true
# Restart automatically if the computer freezes
systemsetup -setrestartfreeze on
# Increase sound quality for Bluetooth headphones/headsets
defaults write com.apple.BluetoothAudioAgent "Apple Bitpool Min (editable)" -int 40
# Enable full keyboard access for all controls
# (e.g. enable Tab in modal dialogs)
defaults write NSGlobalDomain AppleKeyboardUIMode -int 3
# General: automatically illuminate built-in MacBook keyboard in low light
defaults write com.apple.BezelServices kDim -bool true
# Turn off keyboard illumination when computer is not used for 5 minutes
defaults write com.apple.BezelServices kDimTime -int 300
############################################################################
# Screen #
############################################################################
# Require password immediately after sleep or screen saver begins
defaults write com.apple.screensaver askForPassword -int 1
defaults write com.apple.screensaver askForPasswordDelay -int 0
# Save screenshots to ~/Screenshots
mkdir -p "${HOME}/Screenshots"
defaults write com.apple.screencapture location -string "${HOME}/Screenshots"
# Save screenshots in PNG format (other options: BMP, GIF, JPG, PDF, TIFF)
defaults write com.apple.screencapture type -string "png"
############################################################################
# Finder #
############################################################################
# Finder: disable window and Get Info animations
defaults write com.apple.finder DisableAllAnimations -bool true
# Finder: show hidden files
defaults write com.apple.finder AppleShowAllFiles YES
# Finder: show all filename extensions
defaults write NSGlobalDomain AppleShowAllExtensions -bool true
# Finder: allow text selection in Quick Look
defaults write com.apple.finder QLEnableTextSelection -bool true
# Finder: display full path as Finder window title
defaults write com.apple.finder _FXShowPosixPathInTitle -bool true
# When performing a search, search the current folder by default
defaults write com.apple.finder FXDefaultSearchScope -string "SCcf"
# Finder: automatically open a new window when a volume is mounted
defaults write com.apple.frameworks.diskimages auto-open-ro-root -bool true
defaults write com.apple.frameworks.diskimages auto-open-rw-root -bool true
defaults write com.apple.finder OpenWindowForNewRemovableDisk -bool true
# Enable snap-to-grid for icons on the desktop and in other icon views
/usr/libexec/PlistBuddy -c "Set :DesktopViewSettings:IconViewSettings:arrangeBy grid" ~/Library/Preferences/com.apple.finder.plist
/usr/libexec/PlistBuddy -c "Set :FK_StandardViewSettings:IconViewSettings:arrangeBy grid" ~/Library/Preferences/com.apple.finder.plist
/usr/libexec/PlistBuddy -c "Set :StandardViewSettings:IconViewSettings:arrangeBy grid" ~/Library/Preferences/com.apple.finder.plist
# Increase grid spacing for icons on the desktop and in other icon views
/usr/libexec/PlistBuddy -c "Set :DesktopViewSettings:IconViewSettings:gridSpacing 100" ~/Library/Preferences/com.apple.finder.plist
/usr/libexec/PlistBuddy -c "Set :FK_StandardViewSettings:IconViewSettings:gridSpacing 100" ~/Library/Preferences/com.apple.finder.plist
/usr/libexec/PlistBuddy -c "Set :StandardViewSettings:IconViewSettings:gridSpacing 100" ~/Library/Preferences/com.apple.finder.plist
# Increase the size of icons on the desktop and in other icon views
/usr/libexec/PlistBuddy -c "Set :DesktopViewSettings:IconViewSettings:iconSize 64" ~/Library/Preferences/com.apple.finder.plist
/usr/libexec/PlistBuddy -c "Set :FK_StandardViewSettings:IconViewSettings:iconSize 64" ~/Library/Preferences/com.apple.finder.plist
/usr/libexec/PlistBuddy -c "Set :StandardViewSettings:IconViewSettings:iconSize 64" ~/Library/Preferences/com.apple.finder.plist
# Use columns view in all Finder windows by default
# Four-letter codes for the other view modes: `icnv`, `Nlsv`, `Flwv`
defaults write com.apple.finder FXPreferredViewStyle -string "clmv"
# General: enable the warning before emptying the Trash
defaults write com.apple.finder WarnOnEmptyTrash -bool true
# Show the ~/Library folder
chflags nohidden ~/Library
############################################################################
# Dock, Dashboard, and hot corners #
############################################################################
# Dock: minimize windows into their application's icon
defaults write com.apple.dock minimize-to-application -bool true
# Speed up Mission Control animations
defaults write com.apple.dock expose-animation-duration -float 0.1
# Dock: make icons of hidden applications translucent
defaults write com.apple.dock showhidden -bool true
############################################################################
# Kill affected applications #
############################################################################
for app in "Dock" "Finder" "SystemUIServer"; do
killall "${app}" > /dev/null 2>&1
done
echo "Done. Note that some of these changes require a logout/restart to take effect."
else
echo "Skipping ~/.osx evaluation..."
fi
| true |
e4ba4d9de79c6684bf6490a8e24c8b966decdcad | Shell | ulikoehler/Programmierpraktikum | /Solution3/Assignment3/YeastChr1/gnuplot~ | UTF-8 | 498 | 2.515625 | 3 | [] | no_license | #!/bin/bash
data =
"reset
n=10
max=${1}
min=1
width=${2}
set boxwidth width*0.9
set term png
set output \"histogram\_${1}\_${1}\_${0}.png\"
set title \"length of open reading frames\"
set xlabel \"length\"
set ylabel \"count\"
set xrange [min:max]
set yrange [0:]
set style fill solid 0.5
set offset graph 0.05,0.05,0.05,0.0
hist(x,width)=width*floor(x/width)+width/2.0
plot \"lengthDistribution_${0}\" u (hist(\$1,width)):(\$2) smooth freq w boxes lc rgb\"green\" notitle
quit"
gnuplot < data
| true |
06f63ae982a101c7ce36821567358a44fa1099ea | Shell | zzmjohn/SLAE | /exam3/access-scasd/make.sh | UTF-8 | 3,703 | 4.1875 | 4 | [] | no_license | #!/usr/bin/env sh
#
# USAGE
# ./make.sh [Egg-Mark]
#
# NOTE
# Egg-Mark must be a plaintext with 8 bytes in length
# If Egg-Mark was not specified, the default one will be used.
#
# To specify a custom payload, simply modify the code of payload.nasm file.
# Alternativly, you can modify PAYLOADCODE= variable down below the code.
#
ARG1=$1
if [ -z "$ARG1" ]; then
echo " [I] Argument not specified. Using default EGG mark."
ARG1="Egg-Mark";
elif ! [[ `expr length $ARG1` -ge 8 && `expr length $ARG1` -le 8 ]]; then
echo " [E] Custom EGG mark must be 8 bytes in length! Exiting."
exit 1;
else
echo " [I] Using custom EGG mark: "$ARG1
fi
DEFAULTEGG=($(echo -n "Egg-Mark" | sed -e 's/\(....\)/\1\n/g')) # set in hunter.nasm
EGGMARK=$ARG1
NEWEGG=($(echo -n $EGGMARK | sed -e 's/\(....\)/\1\n/g'))
# Uncomment to save EGGMARK in HEX
EGGMARK=$(echo -n $ARG1 | od -A n -t x1 |sed 's/ /\\x/g')
# Cleanup
rm -f shellcode payload.o payload hunter.o hunter
echo " [+] Compiling payload.nasm ..."
nasm -f elf32 -o payload.o payload.nasm
ld -m elf_i386 -o payload payload.o
echo " [+] Compiling hunter.nasm ..."
nasm -f elf32 -o hunter.o hunter.nasm
ld -m elf_i386 -o hunter hunter.o
echo " [+] Extracting PAYLOAD code from payload ..."
PAYLOADCODE=$(objdump -d ./payload |grep '[0-9a-f]:'|grep -v 'file'|cut -f2 -d:|cut -f1-7 -d' '|tr -s ' '|tr '\t' ' '|sed 's/ $//g'|sed 's/ /\\x/g'|paste -d '' -s)
echo " [+] Adding EGG mark to PAYLOAD ..."
FULL_PAYLOADCODE=$(echo -n ${EGGMARK}${PAYLOADCODE}|sed 's/^/"/' |sed 's/$/"/g')
echo " [+] Checking PAYLOAD code for NULLs ..."
if [[ $FULL_PAYLOADCODE == *00* ]]; then
echo " [E] Your PAYLOAD code contains 00 (NULL) ! Exiting."
exit 1
fi
echo " [+] Extracting HUNTER code from hunter ..."
HUNTERCODE=$(objdump -d ./hunter |grep '[0-9a-f]:'|grep -v 'file'|cut -f2 -d:|cut -f1-7 -d' '|tr -s ' '|tr '\t' ' '|sed 's/ $//g'|sed 's/ /\\x/g'|paste -d '' -s|sed 's/^/"/' |sed 's/$/"/g')
# For debugging only
#echo ${DEFAULTEGG[0]}
#echo ${DEFAULTEGG[1]}
#echo ${NEWEGG[0]}
#echo ${NEWEGG[1]}
# Preparing Default egg to HEX form in order to replace it with a New egg
DEFEGG1=$(echo -n ${DEFAULTEGG[0]} | od -A n -t x1 |sed 's/ /\\x/g'|sed 's/\\/\\\\/g')
DEFEGG2=$(echo -n ${DEFAULTEGG[1]} | od -A n -t x1 |sed 's/ /\\x/g'|sed 's/\\/\\\\/g')
# Uncomment to save new EGGMARK in HEX format
NEWEGG1=$(echo -n ${NEWEGG[0]} | od -A n -t x1 |sed 's/ /\\x/g'|sed 's/\\/\\\\/g')
NEWEGG2=$(echo -n ${NEWEGG[1]} | od -A n -t x1 |sed 's/ /\\x/g'|sed 's/\\/\\\\/g')
# Uncomment to save new EGGMARK in Plaintext format
#NEWEGG1=$(echo -n ${NEWEGG[0]})
#NEWEGG2=$(echo -n ${NEWEGG[1]})
FULL_HUNTERCODE=$(echo -n $HUNTERCODE |sed 's/'$DEFEGG1'/'$NEWEGG1'/g'| sed 's/'$DEFEGG2'/'$NEWEGG2'/g')
echo " [+] Checking HUNTER code for NULLs ..."
if [[ $FULL_HUNTERCODE == *00* ]]; then
echo " [E] Your HUNTER code contains 00 (NULL) ! Exiting."
exit 1
fi
# Uncomment to see what will is replaced (default egg with a new one)
#echo $DEFEGG1
#echo $DEFEGG2
#echo $NEWEGG1
#echo $NEWEGG2
#echo $HUNTERCODE
#echo $FULL_HUNTERCODE
cat > shellcode.c << EOF
#include <stdio.h>
#include <string.h>
unsigned char hunter[] = \
$FULL_HUNTERCODE;
unsigned char garbage1[] = \
"Just some garbage here...";
unsigned char payload[] = \
$FULL_PAYLOADCODE;
unsigned char garbage2[] = \
"And some garbage there...";
main()
{
printf("Hunter Length: %d\n", strlen(hunter));
printf("Payload Length: %d\n", strlen(payload));
int (*ret)() = (int(*)())hunter;
ret();
}
EOF
echo " [+] Compiling shellcode.c ..."
gcc -m32 -fno-stack-protector -z execstack shellcode.c -o shellcode
# Cleanup
rm -f payload.o payload hunter.o hunter
ls -la ./shellcode
echo " [+] All done!"
| true |
b4b48d83ddb99823f21976076a07ee636189a0a3 | Shell | itmpsk/docker-cronicle | /entrypoint.sh | UTF-8 | 1,035 | 3.328125 | 3 | [
"MIT"
] | permissive | #!/bin/sh
ROOT_DIR=/opt/cronicle
CONF_DIR=$ROOT_DIR/conf
BIN_DIR=$ROOT_DIR/bin
LIB_DIR=$ROOT_DIR/lib
# DATA_DIR needs to be the same as the exposed Docker volume in Dockerfile
DATA_DIR=$ROOT_DIR/data
# PLUGINS_DIR needs to be the same as the exposed Docker volume in Dockerfile
PLUGINS_DIR=$ROOT_DIR/plugins
# The env variables below are needed for Docker and cannot be overwritten
export CRONICLE_Storage__Filesystem__base_dir=${DATA_DIR}
export NODE_EXTRA_CA_CERTS=/etc/ssl/certs/ca-certificates.crt
export CRONICLE_echo=1
export CRONICLE_foreground=1
# Only run setup when setup needs to be done
if [ ! -f $DATA_DIR/.setup_done ]
then
chown -R cronicle:cronicle /opt/cronicle/
$BIN_DIR/control.sh setup
cp $CONF_DIR/config.json $CONF_DIR/config.json.origin
mkdir -p $PLUGINS_DIR
touch $DATA_DIR/.setup_done
fi
if [ -f $DATA_DIR/config.json.import ]
then
chown -R cronicle:cronicle $CONF_DIR
# Move in custom configuration
cp $DATA_DIR/config.json.import $CONF_DIR/config.json
fi
# Run cronicle
exec "$@" | true |
4365fde2feb50e84ff51f3935ebc89fc05d4e68c | Shell | ajainvivek/s3-kms | /localstack/scripts/compose.sh | UTF-8 | 453 | 3.34375 | 3 | [] | no_license | #!/bin/bash
set -ex
PROJECT_ROOT=$(git rev-parse --show-toplevel)
PROJECT_NAME=$(basename $PROJECT_ROOT)
: ${COMPOSE_PROJECT_NAME:=$PROJECT_NAME}
COMPOSE_PROJECT_NAME=${COMPOSE_PROJECT_NAME}_playground
COMPOSE_FILE=${PROJECT_ROOT}/localstack/docker-compose.yaml
if [ ! -f ${COMPOSE_FILE} ]; then
echo "Warning: ${COMPOSE_FILE} is not a file"
exit 0
fi
PROJECT_ROOT=${PROJECT_ROOT} docker-compose -p $COMPOSE_PROJECT_NAME -f $COMPOSE_FILE "$@" | true |
f160c03f67107a3a03f88210a7e3cabc053bf0b5 | Shell | renovate-bot/sample-tester | /examples/convention-tag/test-convention-tag | UTF-8 | 1,478 | 3.609375 | 4 | [
"Apache-2.0"
] | permissive | #! /bin/bash
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Exit correctly depending on whether this file is being sourced or executed.
[[ "${BASH_SOURCE[0]}" != "${0}" ]] && EXIT=return || EXIT=exit
output_file="$(mktemp /tmp/sample-tester.XXXXXXXXX )"
pushd "$(dirname ${BASH_SOURCE})/../.." >& /dev/null
sample-tester -v detailed ${FLAGS:---xunit=/tmp/xunit.tag.xml} examples/convention-tag/language.test.yaml examples/convention-tag/language.manifest.yaml >& $output_file
code=$?
if [[ $code -eq 0 ]] ; then
for expected_text in \
'In setup' \
'In teardown' \
'Language samples test' \
'A test defined via yaml directives' \
"A test defined via 'code'" \
;
do
if ! grep --silent "$expected_text" "$output_file"; then
echo "Assertion failed"
echo "Expected \`$expected_text\` in $output_file"
code=1
break
fi
done
fi
popd >& /dev/null
${EXIT} $code
| true |
7de50cf98b6c6f17f73551b029631d1bca2fd830 | Shell | shwetap29/Arrayinshellscript | /ArrayPractice/withoutsortarray.sh | UTF-8 | 866 | 3.390625 | 3 | [] | no_license | #!/bin/bash -x
echo "Generating ten random values"
for(( i=0 ; i< 10 ; i++ ))
do
array[i]=$((RANDOM%900+100))
done
echo "${array[@]}"
#Finding 2nd largest value
max1=0
max2=0
for(( i=0 ; i< 10 ; i++ ))
do
if(( ${array[i]} > $max1 ))
then
max2=$max1
max1=${array[i]}
fi
done
for(( i=0 ; i<10 ; i++))
do
if(( ${array[i]} > $max2 && $max1 > ${array[i]} ))
then
max2=${array[i]}
fi
done
echo "Second maximum value is : $max2"
#finding 2nd smallest value
min1=1001
min2=1002
for(( i=0 ; i< 10 ; i++ ))
do
if(( ${array[i]} < $min1 ))
then
min2=$min1
min1=${array[i]}
fi
done
for(( i=0 ; i<10 ; i++))
do
if(( ${array[i]} < $min2 && $min1 < ${array[i]} ))
then
min2=${array[i]}
fi
done
echo "Second minimum value is : $min2"
| true |
d7da83302e4ef7722eaa42cb626af06828a2e57f | Shell | LeszekSwirski/dotfiles | /makeLinks | UTF-8 | 829 | 4.3125 | 4 | [] | no_license | #!/bin/bash
dolink() {
local LINK=$1
local TARGET=$2
if [ -e ${LINK} ] ; then
if [ -h ${LINK} ] ; then
if [ "$(readlink -e ${LINK})" != "$(readlink -e ${TARGET})" ] ; then
echo "Warning: The link ${LINK} already exists, but links to $(readlink -e ${LINK}) -- remove the link and run this script again"
fi
else
echo "Warning: ${LINK} is an existing file -- remove it and run this script again"
fi
else
echo "Linking ${LINK} -> ${TARGET}"
ln -s ${TARGET} ${LINK}
fi
}
DOTFILES=$(dirname $0 | xargs readlink -e)
if [ -z $1 ] ; then
LINKDIR=${HOME}
else
LINKDIR=$(readlink -e $1)
fi
for i in $(ls -A $DOTFILES/files) ; do
TARGET=${DOTFILES}/files/$i
LINK=${LINKDIR}/$i
dolink "${LINK}" "${TARGET}"
done
| true |
31948a39d83c924b33fad291e6d69be710867be7 | Shell | papamoose/pxe | /bin/create-tftp-server | UTF-8 | 6,590 | 3.6875 | 4 | [] | no_license | #!/bin/bash
distro=$1
servername="netboot"
serverdomain="local"
tftpdir="/srv/tftp"
mirror="mirror.anl.gov"
# generate md5 password with salt
# printf "lamepassword" | mkpasswd -S `pwgen -1` -s -m md5
# used for root password in all preseed files
md5pass="\$1\$ahwobeeh\$wDnkfRQ5COUC5BjnOekoQ0"
partitions="100pct-lvm-atomic-sda 100pct-raid-lvm-atomic-sda+sdb 50pct-lvm-multi-sda 80pct-lvm-advanced-partitions-sda 90pct-lvm-multi-sda"
declare -a debian=(unstable stable squeeze wheezy)
declare -a ubuntu=(hardy lucid natty oneiric precise quantal raring)
declare -A distros=( ["ubuntu"]="${ubuntu[@]}" ["debian"]="${debian[@]}" )
architectures="i386 amd64"
setupbase(){
mkdir -p $tftpdir/bin
mkdir -p $tftpdir/kernel
mkdir -p $tftpdir/pxelinux.cfg
mkdir -p $tftpdir/www
apt-get install -y syslinux tftpd-hpa apache2
syslinuxdir="/usr/lib/syslinux"
ln -s $syslinuxdir/menu.c32 $tftpdir/menu.c32
ln -s $syslinuxdir/vesamenu.c32 $tftpdir/vesamenu.c32
ln -s $syslinuxdir/pxelinux.0 $tftpdir/pxelinux.0
ln -s $syslinuxdir/memdisk $tftpdir/memdisk
cat > $tftpdir/www/robots.txt <<EOF
User-agent: *
Disallow: /
EOF
cat > /etc/default/tftpd-hpa <<EOF
# /etc/default/tftpd-hpa
TFTP_USERNAME="tftp"
TFTP_DIRECTORY="/srv/tftp"
TFTP_ADDRESS="0.0.0.0:69"
TFTP_OPTIONS="--secure"
EOF
cat > /etc/apache/sites-enabled/10-tftp <<EOF
<VirtualHost *:80>
ServerAdmin webmaster@localhost
ServerName "$servername.$serverdomain"
ServerAlias $servername
DocumentRoot /srv/tftp/www
<Directory /srv/tftp/www/>
Options Indexes FollowSymLinks MultiViews
AllowOverride None
Order allow,deny
allow from all
</Directory>
ErrorLog ${APACHE_LOG_DIR}/error.log
LogLevel warn
CustomLog ${APACHE_LOG_DIR}/access.log combined
</VirtualHost>
EOF
/etc/init.d/tftpd-hpa restart
/etc/init.d/apache2 restart
}
downloadkernel(){
distro=$1
release=$2
arch=$3
wd="/tmp/initrd"
mkdir -p $wd
mkdir -p "$tftpdir/kernel/$distro/$release/$arch/official"
mkdir -p "$tftpdir/kernel/$distro/$release/$arch/non-free"
# get lastest firmware file name and download to working directory
# we use debians archive because they have all latest non-free drivers
firmwareurl="http://ftp.debian.org/debian/pool/non-free/f/firmware-nonfree"
firmwarename=`curl -s $firmwareurl/ | sed -e :a -e 's/<[^>]*>//g;/</N;//ba' | grep ".tar.gz" | sed 's/gz/gz /' | awk '{print $2 " " $3 " " $1}' | tail -n1 | awk '{print $3}'`
echo "Downloading $firmwarename..."
wget -c -q -O $wd/$firmwarename $firmwareurl/$firmwarename
# create firmware directory, and put extracted firmware into it
tar -xzf $wd/$firmwarename -C $wd
TAR_DIR="firmware-nonfree/linux-nonfree"
for file in linux initrd.gz
do
echo "Downloading $distro $release $arch $file..."
wget -q -c -P $tftpdir/kernel/$distro/$release/$arch/official/ http://$mirror/$distro/dists/$release/main/installer-$arch/current/images/netboot/$distro-installer/$arch/$file
echo "Copying kernel to non-free directory..."
cp $tftpdir/kernel/$distro/$release/$arch/official/linux $tftpdir/kernel/$distro/$release/$arch/non-free/linux
if [ $file == "initrd.gz" ]; then
# copy initrd to working directory
cp $tftpdir/kernel/$distro/$release/$arch/official/initrd.gz $wd/initrd.gz
# extract the ram disk
extracteddir=$wd/extracted
mkdir -p $extracteddir
cd $extracteddir
echo "Extracting the ramdisk (initrd.gz)"
gunzip --quiet <$wd/initrd.gz | cpio --extract --preserve --quiet
# copy the folder 'firmware' to $extracteddir
echo "Copying non-free firmware to lib/firmware in initrd"
rsync -avq "$wd/$TAR_DIR/" $extracteddir/lib/firmware/
# create new initrd.gz
echo -n "Creating new initrd.gz..."
cd $extracteddir
find . | cpio --create --format='newc' | gzip > $wd/initrd_non-free.gz
echo "Copying non-free ramdisk to non-free directory..."
cp $wd/initrd_non-free.gz $tftpdir/kernel/$distro/$release/$arch/non-free/initrd.gz
# remove temporary files
rm -r $extracteddir
rm $wd/initrd.gz
# go back to directory from which we started
cd $CURRENT_DIR
fi
done
}
createpreseed(){
distro=$1
release=$2
arch=$3
wd="$tftpdir/www/$distro/$release/$arch"
mkdir -p $wd
for part in $partitions
do
cat > "$wd/$part" <<EOF
d-i debian-installer/locale string en_US
d-i console-keymaps-at/keymap select us
d-i keyboard-configuration/xkb-keymap select us
d-i netcfg/choose_interface select auto
d-i netcfg/get_hostname string unassigned-hostname
d-i netcfg/get_domain string unassigned-domain
d-i hw-detect/load_firmware boolean true
d-i mirror/country string manual
d-i mirror/http/hostname string $mirror
d-i mirror/http/directory string /$distro
#d-i mirror/http/proxy string
d-i mirror/suite string $release
d-i passwd/root-password-crypted password $md5pass
d-i passwd/make-user boolean false
d-i clock-setup/utc boolean true
d-i time/zone string US/Central
d-i clock-setup/ntp boolean true
EOF
wget -q -c --no-check-certificate -O - https://raw.github.com/papamoose/pxe/master/www/d-i/partition-recipies/$partition | cat - >> "$wd/$part"
cat >> "$wd/$part" <<EOF
d-i apt-setup/non-free boolean true
d-i apt-setup/contrib boolean true
tasksel tasksel/first multiselect standard
d-i pkgsel/include string openssh-server build-essential
d-i pkgsel/upgrade select full-upgrade
popularity-contest popularity-contest/participate boolean false
d-i finish-install/reboot_in_progress note
d-i cdrom-detect/eject boolean false
EOF
done
}
createpxeentry(){
distro=$1
release=$2
arch=$3
for part in $partitions
do
for status in non-free official
do
wd="$tftpdir/pxelinux.cfg/$distro/$release/$arch/$status"
mkdir -p $wd
cat >> "$wd/default" <<EOF
LABEL "$distro-$release-$arch-$status-$part"
MENU LABEL "$distro $release $status $part"
LINUX "kernel/$distro/$release/$arch/$status/linux"
INITRD "kernel/$distro/$release/$arch/$status/initrd.gz"
APPEND url="http://$servername.$serverdomain/$distro/$release/$arch/$status/$part" interface=auto netdfg/dhcp_timeout=60 priority=critical auto=true
EOF
done
done
}
main(){
# setupbase
for distro in "${!distros[@]}"
do
for release in ${distros["$distro"]}
do
for arch in $architectures
do
downloadkernel $distro $release $arch # working
createpxeentry $distro $release $arch # working, no menu entries to navigate between (distro, releases, arch, status)
createpreseed $distro $release $arch # working
done
done
done
}
main
| true |
5a689ce28bf3b1f01e42d73e8f0a44c3046fd907 | Shell | oneto1/Cb-bois-backup | /firmware-util.sh | UTF-8 | 537 | 2.703125 | 3 | [] | no_license | #!/bin/bash
#
# This script offers provides the ability to update the
# Legacy Boot payload, set boot options, and install
# a custom coreboot firmware for supported
# ChromeOS devices
#
# Created by Mr.Chromebox <mrchromebox@gmail.com>
#
# May be freely distributed and modified as needed,
# as long as proper attribution is given.
#
#ensure output of system tools in en-us for parsing
export LC_ALL=C
#get support scripts
source ./sources.sh
source ./firmware.sh
source ./functions.sh
#do setup stuff
prelim_setup
#show menu
menu_fwupdate
| true |
b556d41a817b122b3c8b9c0dbfc911044dcbe304 | Shell | gravitational/stolon-app | /resources/install.sh | UTF-8 | 586 | 2.75 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/sh
set -o errexit
set -o xtrace
export EXTRA_PARAMS=""
if [ -f /var/lib/gravity/resources/custom-build.yaml ]
then
export EXTRA_PARAMS="--values /var/lib/gravity/resources/custom-build.yaml"
fi
set +e
/usr/local/bin/helm install /var/lib/gravity/resources/charts/stolon \
--values /var/lib/gravity/resources/custom-values.yaml $EXTRA_PARAMS \
--name stolon
set -e
timeout 5m sh -c "while ! /usr/local/bin/kubectl get job stolon-postgres-hardening; do sleep 10; done"
/usr/local/bin/kubectl wait --for=condition=complete --timeout=5m job/stolon-postgres-hardening
| true |
9b9d9447fe2e03af66cea45752ce88202b9a3190 | Shell | dikers/ocr-crnn-chinese | /crnn_ctc/shell/generation_en_tfrecord.sh | UTF-8 | 2,877 | 3.046875 | 3 | [] | no_license | #!/bin/bash
#!/usr/bin/env bash
if [ $# -ne 2 ]
then
echo "Usage: $0 sample_count(1000) 'val_rate(0.2)' "
echo "Usage: $0 10000 0.2' "
exit
fi
startTime=`date +%Y%m%d-%H:%M`
startTime_s=`date +%s`
export PYTHONPATH=../../
# 需要需改自己的路径 下载地址 http://www.robots.ox.ac.uk/~vgg/data/text/mjsynth.tar.gz
DATA_SRC_DIR='../../output/mjsynth_data/mnt/ramdisk/max/90kDICT32px/'
DATA_TARGET_DIR='./data_en/'
if [ ! -d ${DATA_TARGET_DIR} ]
then
echo '请先下载 mjsynth 训练数据集 地址如下: '
echo 'http://www.robots.ox.ac.uk/~vgg/data/text/mjsynth.tar.gz'
echo '保存路径: output/mjsynth_data/mnt/ramdisk/max/90kDICT32px/ '
fi
if [ ! -d ${DATA_TARGET_DIR} ]
then
mkdir ${DATA_TARGET_DIR}
mkdir ${DATA_TARGET_DIR}'tfrecords'
else
echo ${DATA_TARGET_DIR} "文件夹已经存在"
rm -fr ${DATA_TARGET_DIR}'tfrecords'
fi
head -n $1 ${DATA_SRC_DIR}'annotation_train.txt' > ${DATA_TARGET_DIR}'image_list.txt'
echo "start --------- generate image --"
count=$(wc -l ${DATA_TARGET_DIR}'image_list.txt' | awk '{print $1}')
echo 'val_rate: ' + $2
val_count=`echo "scale=0; $count * $2" | bc`
val_count=`echo $val_count | awk -F. '{print $1}'`
train_count=$[count - val_count]
test_count=20
echo 'total count: ' + $count
echo 'val count: ' $val_count
echo 'train count: ' $train_count
echo 'test count: ' ${test_count}
head ${DATA_TARGET_DIR}'image_list.txt' -n $train_count > ${DATA_TARGET_DIR}'image_list_train.txt'
tail ${DATA_TARGET_DIR}'image_list.txt' -n $val_count > ${DATA_TARGET_DIR}'image_list_valid.txt'
python3 ../../utils/change_label_text.py \
--input_file ${DATA_TARGET_DIR}'image_list_train.txt' \
--output_dir ${DATA_TARGET_DIR}'label'
python3 ../../utils/change_label_text.py \
--input_file ${DATA_TARGET_DIR}'image_list_valid.txt' \
--output_dir ${DATA_TARGET_DIR}'label'
head ${DATA_TARGET_DIR}'label/image_list_valid.txt' -n 20 > ${DATA_TARGET_DIR}'label/image_list_test.txt'
cp '../../sample_data/char_map_en.json' ${DATA_TARGET_DIR}'char_map.json'
cp '../../sample_data/char_dict_en.json' ${DATA_TARGET_DIR}'char_dict.json'
cp '../../sample_data/ord_map_en.json' ${DATA_TARGET_DIR}'ord_map.json'
echo "start --------- generate tfrecord "
python ../data_provider/write_tfrecord.py \
--dataset_dir=${DATA_SRC_DIR} \
--char_dict_path=${DATA_TARGET_DIR}'char_map.json' \
--anno_file_path=${DATA_TARGET_DIR}'label/image_list_train.txt' \
--dataset_flag='train' \
--save_dir=${DATA_TARGET_DIR}'tfrecords'
python ../data_provider/write_tfrecord.py \
--dataset_dir=${DATA_SRC_DIR} \
--char_dict_path=${DATA_TARGET_DIR}'char_map.json' \
--anno_file_path=${DATA_TARGET_DIR}'label/image_list_valid.txt' \
--dataset_flag='valid' \
--save_dir=${DATA_TARGET_DIR}'tfrecords'
endTime=`date +%Y%m%d-%H:%M`
endTime_s=`date +%s`
echo "$startTime ---> $endTime" | true |
3555d8b82c888411a1cb934f7c30f1adce7491db | Shell | Kuniwak/jenkins-plugin-fixator | /bin/internal/install-plugin | UTF-8 | 487 | 3.390625 | 3 | [
"MIT"
] | permissive | #!/bin/bash -eu
set -o pipefail
# NOTE: This command do NOT resolve dependencies.
# So, you should install all depended plugins.
function install-plugin() {
local BASE_PATH="$(cd "$(dirname "$0")"; pwd)"
local plugin_name plugin_version plugin_hpi_url
plugin_name="$1"
plugin_version="$2"
plugin_hpi_url="$("$BASE_PATH/jenkins-plugin-hpi-url" "$plugin_name" "$plugin_version")"
"$BASE_PATH/exec-jenkins-cli" install-plugin "$plugin_hpi_url"
}
install-plugin "$@"
| true |
56f4eb1575f97d24f246467734b8d0bddc51d6db | Shell | muyukha/terraform-packet-kubernetes | /userdata/files/symlink-persistent-dirs.sh | UTF-8 | 645 | 3.84375 | 4 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env sh
set -xe
MOVE_DIR="/moved"
DATA_DIR="/DATA"
[ -n "${PERSIST_DIRS}" ] || ( \
echo "Error: No PERSIST_DIRS provided"
exit 1)
mkdir -p "${MOVE_DIR}"
for dir in ${PERSIST_DIRS}; do
DIR_NAME="$(basename "${dir}")"
DEST_DIR="${MOVE_DIR}/${DIR_NAME}"
if [ -d "${dir}" ] && [ ! -L "${dir}" ]
then
if [ -d "${DEST_DIR}" ]
then
cp -an "${dir}"/* "${DEST_DIR}"
else
mv "${dir}" "${DEST_DIR}"
fi
else
if [ -L "${dir}" ]
then
continue
else
ln -s "${DATA_DIR}/${DIR_NAME}" "${dir}"
fi
fi
done
| true |
9c490332ae8b9cac6e0318a108d99880526c530a | Shell | LabNeuroCogDevel/ALFF_MEG_MR | /preproc_pet.bash | UTF-8 | 1,075 | 3.03125 | 3 | [] | no_license | #!/usr/bin/env bash
set -e
trap 'e=$?; [ $e -ne 0 ] && echo "$0 exited with error $e" >&2' EXIT
sd=$1
[ -z "$sd" ] && echo "USAGE: $0 subj_date" >&2 && exit 1
sd=$(basename $sd)
for dir in /Volumes/Zeus/ALFF/full/subjs_test_retest/$sd/[12]_*; do
cd $dir
[ -r .preprocessFunctional_complete ] && continue
[ ! -r structuralFiles.txt ] && echo "$sid: no $(pwd)/structuralFiles.txt file!" >&2 && continue
read bet warp < structuralFiles.txt
[ -z "$bet" -o -z "$warp" ] && echo "$sid: no $(pwd)/structuralFiles.txt does not have bet and warp!" >&2 && continue
[ ! -r "$bet" -o ! -r "$warp" ] && echo "$sid: missing bet/warp $bet/$warp" >&2 && continue
#[ -r .preproc_cmd ] && rm .preproc_cmd
yes | preprocessFunctional \
-tr 1.5 \
-4d rest[12].nii.gz \
-mprage_bet $bet \
-warpcoef $warp \
-4d_slice_motion \
-custom_slice_times /Volumes/Phillips/mMR_PETDA/scripts/sliceTimings.1D \
-template_brain MNI_2.3mm \
-no_hp \
-nuisance_compute 6motion,rx,ry,rz,tx,ty,tz,wm,csf,gs,d6motion,drx,dry,drz,dtx,dty,dtz,dwm,dcsf,dgs
done
| true |
8ea1f8ed40af6e052674aba273a3c995d938bd2b | Shell | freebsd/freebsd-ports | /net/nncp/files/nncp-caller.in | UTF-8 | 620 | 3.296875 | 3 | [
"BSD-2-Clause"
] | permissive | #!/bin/sh
# PROVIDE: nncp-caller
# REQUIRE: DAEMON NETWORKING FILESYSTEMS
# KEYWORD: shutdown
#
# Add these lines to /etc/rc.conf.local or /etc/rc.conf
# to enable this service:
#
# nncp_caller_enable (bool): Set to NO by default.
# Set it to YES to enable nncp-caller.
# nncp_caller_config (path): Set to %%PREFIX%%/etc/nncp.hjson by default.
. /etc/rc.subr
name=nncp_caller
rcvar=nncp_caller_enable
load_rc_config $name
: ${nncp_caller_enable:="NO"}
: ${nncp_caller_config="%%PREFIX%%/etc/nncp.hjson"}
command=%%PREFIX%%/bin/nncp-caller
command_args="-quiet -cfg $nncp_caller_config &"
run_rc_command "$1"
| true |
3357a7eb582c762f6cafbf6a22ae1babb19c7833 | Shell | mstksg/log.sh | /log.sh | UTF-8 | 3,539 | 4.3125 | 4 | [] | no_license | #!/bin/bash
DEFAULTLOGNAME=".log"
VERSION="1.0.0"
usage() {
cat <<-ENDOFUSAGE
$(basename $0) [-cCetsh] [-f filename] [-d directory] [message]
Appends a line to a log file with a timestamp. Basically, searches up the
directory tree until the first valid log file is found.
Message can also be piped in from standard input.
Remember to escape message for special characters like !, ", ', and *
-f [filename]
Filename (without extension) of the log file.
Default: ".log"
-l
Displays the log file, and doesn't write any messages.
-e
Opens log file for editing using the \$EDITOR environment variable.
-c
Creates a log file in the current directory if not already there.
-d [directory]
Overrides default searching mechanism and uses the log from the
specified directory.
Default: Automatic
-C
Clears contents of found logfile. Asks for confirmation.
-t
Inserts a "[ ]" check box before the log line, indicating a "todo".
-s
silent (does not display log file name and path)
-h
Displays this message
Written by Justin Le (justin@jle.im) 2013
Version $VERSION
ENDOFUSAGE
exit 0
}
CALLINGDIR="$(pwd)"
LOGNAME="$DEFAULTLOGNAME"
CREATE=""
SHOW=""
EDIT=""
FOUND_LOG=""
CLEAR=""
SILENT=""
TODO=""
while getopts ":f:dlecCsth" Option
do
case $Option in
f)
LOGNAME="$OPTARG";;
d)
FOUND_LOG="$OPTARG";;
l)
SHOW=1;;
e)
EDIT=1;;
c)
CREATE=1;;
C)
CLEAR=1;;
s)
SILENT=1;;
t)
TODO="[ ] ";;
h)
usage
exit 1;;
esac
done
shift $(($OPTIND - 1))
RAWMSG="$*"
if [[ -z "$SHOW$CREATE$CLEAR$EDIT$RAWMSG" ]]; then
stdin="$(ls -l /proc/self/fd/0)"
stdin="${stdin/*-> /}"
if [[ "$stdin" =~ ^/dev/pts/[0-9] ]]; then
echo "Enter message here: (CTRL+D/EOF to end)"
fi
RAWMSG="$(cat /dev/stdin)"
fi
if [[ -z "$SHOW$CREATE$CLEAR$EDIT$RAWMSG" ]]; then
echo "Cannot log blank message."
echo " $(basename $0) -h for help"
exit 1
fi
if [[ -n "$CREATE" ]]; then
LOGPATH="$LOGNAME.log"
if [[ -n "$FOUND_LOG" ]]; then
LOGPATH="$FOUND_LOG/$LOGPATH"
fi
touch "$LOGPATH"
chmod 600 "$LOGPATH"
fi
while [[ -z "$FOUND_LOG" ]]; do
if [[ -e "$LOGNAME.log" ]]; then
FOUND_LOG="$(pwd)/$LOGNAME.log"
elif [[ "$(pwd)" == "/" ]]; then
echo "No writable log file $LOGNAME.log found in any parent directory."
echo "Run $(basename $0) -c to create a log file in this directory,"
echo " or $(basename $0) -cd [dir] to specify directory of log file."
exit 1
else
cd ../
fi
done
if [[ -n "$CLEAR" ]]; then
read -p "Clear contents of $FOUND_LOG? (y/N) " -n 1 -r
echo ""
if [[ $REPLY =~ ^[Yy]$ ]]; then
cat /dev/null > "$FOUND_LOG"
echo "$FOUND_LOG cleared."
else
echo "Clearing aborted."
fi
exit 0
fi
CURRDIR=$(pwd)
RELPATH=${CALLINGDIR#$CURRDIR}
PATHSTR=""
if [[ -n "$RELPATH" ]]; then
PATHSTR="(./${RELPATH:1}) "
fi
if [[ -z "$SHOW$EDIT" && -n "$RAWMSG" ]]; then
echo -e "$RAWMSG" | while read line; do
MESSAGE="[$( date )]\t$TODO$PATHSTR$line"
echo -e "$MESSAGE" >> "$FOUND_LOG"
echo -e "$MESSAGE"
done
if [[ -z "$SILENT" ]]; then
echo "Logged in $FOUND_LOG"
fi
exit 0
fi
if [[ -n "$EDIT" ]]; then
if [[ -z "$SILENT" ]]; then
echo "Log file: $FOUND_LOG"
fi
$EDITOR "$FOUND_LOG"
fi
if [[ -n "$SHOW" ]]; then
if [[ -z "$SILENT" ]]; then
echo "Log file: $FOUND_LOG"
fi
cat "$FOUND_LOG"
fi
| true |
14ef5af9c217170710de612adb55108e97ee6405 | Shell | rkoteck/ansible_int_training | /samples/edit | UTF-8 | 100 | 2.703125 | 3 | [] | no_license | #!/bin/bash
#
#
if [[ $1 == "" ]];then echo "Usage: edit file-to-edit";exit;fi
gedit $1 2>/dev/null
| true |
29b85a4dc0484b80a4e5b8e5be55f8f108785585 | Shell | uklinux/picam | /create_video_daily.sh | UTF-8 | 322 | 2.984375 | 3 | [] | no_license | #!/bin/bash
cd /var/tmp/front/vid
DAY=`date +%a_%d_%b_%y`
DAY1=`date +%Y%m%d`
DAY2=`date +%a_%d_%b_%y`
sleep 1500
HOST=`hostname`
DIR=/var/tmp/${HOST}
FILES=`ls -tr ${DAY}_*_${HOST}.avi|tr '\n' ' '`
VID=${DAY}_${HOST}.avi
sleep 30
mencoder -ovc copy -forceidx ${FILES} -o ${VID}
rm -f ${DIR}/vid/${DAY2}_????_${HOST}.avi
| true |
81d1893c22b6e0f09e35f7c15bccde756c08ee32 | Shell | linuxfhy/auto_test | /semiauto.v0.2/tests/demo/tc_demo_pos002.sh | UTF-8 | 786 | 3.234375 | 3 | [] | no_license | #!/usr/bin/bash
#
# Copyright (c) 2017, Inspur. All rights reserved.
#
################################################################################
#
# __stc_assertion_start
#
# ID: demo/tc_demo_pos002
#
# DESCRIPTION:
# Demo test case pos002
#
# STRATEGY:
# 1. issue "uname -a", expect to pass
# 2. issue "ls /tmp/OopsXXX", expect to fail
#
# __stc_assertion_end
#
################################################################################
NAME=$(basename $0)
CDIR=$(dirname $0)
TMPDIR=${TMPDIR:-"/tmp"}
TSROOT=$CDIR/../../
source $TSROOT/lib/libstr.sh
source $TSROOT/lib/libcommon.sh
source $TSROOT/config.vars
tc_start $0
trap "tc_xres \$?" EXIT
RUN_POS uname -a || exit $STF_FAIL
RUN_NEU rm -f /tmp/OopsXXX
RUN_NEG ls /tmp/OopsXXX || exit $STF_FAIL
exit $STF_PASS
| true |
acec59a7d10289a5db55b82a2677c7ecd874a0c3 | Shell | infogrind/checkurl | /test.sh | UTF-8 | 663 | 3.59375 | 4 | [
"MIT"
] | permissive | #!/bin/bash
# Here we make some assumptions, that the following URLs are always available.
LEGIT="https://www.google.com https://www.facebook.com \
https://www.twitter.com"
BOGUS="http://www.asiojrandomblabla.com http://eweas.sdfs.net \
http://www33.bumbalabalabumm.org"
ERROR=0
for url in $LEGIT; do
echo -n "Testing $url..."
./checkurl.sh "$url"
if [ $? -eq 0 ]; then
echo OK
else
echo FAIL
ERROR=1
fi
done
for url in $BOGUS; do
echo -n "Testing $url..."
./checkurl.sh "$url"
if [ $? -ne 0 ]; then
echo OK
else
echo FAIL
ERROR=1
fi
done
echo ""
if [ $ERROR -eq 1 ]; then
echo "Some tests failed!"
else
echo "All tests passed!"
fi
| true |
8c4fb9f8e59f87f7e4bdf51bc37fd8435907b0b4 | Shell | petronny/aur3-mirror | /python2-scruffy-git/PKGBUILD | UTF-8 | 1,066 | 2.96875 | 3 | [] | no_license | # Maintainer: kpj <kpjkpjkpjkpjkpjkpj@com.googlemail>
__pkgname="scruffy"
pkgname="python2-${__pkgname}-git"
pkgver=20120827
pkgrel=1
pkgdesc="Creates UML diagrams using yUML-like (http://yuml.me) syntax"
arch=('any')
license=('MIT')
provides=(${__pkgname})
conflicts=('${__pkgname}')
url="https://github.com/aivarsk/scruffy/"
depends=('graphviz' 'librsvg' 'plotutils' 'python2' 'python-imaging')
makedepends=('git')
_gitroot="git://github.com/aivarsk/${__pkgname}.git"
_gitname="${__pkgname}"
build() {
cd "$srcdir"
msg "Connecting to GIT server...."
if [[ -d "$_gitname" ]]; then
cd "$_gitname" && git pull origin
msg "The local files are updated."
else
git clone "$_gitroot" "$_gitname"
fi
msg "GIT checkout done or server timeout"
msg "Starting build..."
rm -rf "$srcdir/$_gitname-build"
git clone "$srcdir/$_gitname" "$srcdir/$_gitname-build"
cd "$srcdir/$_gitname-build"
# Hinting everything at python2
sed -i 's?^#!/usr/bin/env python$?\02?' bin/* suml/*
}
package() {
cd "$srcdir/$_gitname-build"
python2 setup.py install --root="$pkgdir/" --optimize=1
}
| true |
850dd01a60128891693d115fc92d4b4e8f241471 | Shell | mmelqonyan/Stepanavan | /Aren Grigoryan/04_03/script3.sh | UTF-8 | 163 | 3.59375 | 4 | [] | no_license | #!/bin/bash
echo "Enter name of your file"
read name
cd $1
if [ ! -f "$name" ]; then
echo "Your file $name not exist"
else echo "Your file $name exist"
fi
| true |
2b1396cfac8c1f2648cdaeb0cad62dc4f46adc72 | Shell | Nanoseb/refresco-tools | /report2metadata | UTF-8 | 666 | 3.078125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
file="$1"
output=$(pdftotext -htmlmeta "$1" -)
title=$(grep -m 1 '<title>' <<<"$output" | awk -F'<|>' '{print $3}')
author=$(grep -m 1 'name="Author"' <<<"$output" | sed 's/^.*content=//' | awk -F'"' '{print $2}')
subject=$(grep -m 1 'name="Subject"' <<<"$output" | sed 's/^.*content=//' | awk -F'"' '{print $2}')
keywords=$(grep -m 1 'name="Keywords"' <<<"$output" | sed 's/^.*content=//' | awk -F'"' '{print $2}')
date=$(grep -m 2 '^: ' <<<"$output" | tail -n 1 | sed 's/..//')
ID=$(grep -m 1 '^: ' <<<"$output" | sed 's/..//')
echo Title: $title
echo Subject: $subject
echo Author: $author
echo Keywords: $keywords
echo Date: $date
echo ID: $ID
| true |
b97e1dd2e35f789a669ac0984242be4a07a5b7ff | Shell | nitinkk13/shell-script | /shell-script/case-cmdargs.sh | UTF-8 | 264 | 3.671875 | 4 | [] | no_license | #!/bin/bash
option="${1}"
case ${option} in
-f) FILE="${2}"
echo "File name is $FILE"
;;
-d) DIR="${2}"
echo "Directory name is $DIR"
;;
*)
echo "`basename ${0}`: [-f file] | [-d directroy]"
exit 1
;;
esac
| true |
5a24d31196ea1ec36696b0f1b41efcb4e1a71473 | Shell | CmsHI/CVS_SavedFMa | /bin/newGetFiles.sh | UTF-8 | 280 | 3.6875 | 4 | [] | no_license | #!/bin/bash -
if [ $# -eq 0 ]; then
echo "Usage:"
echo " $0 <source_file> [destination_file]"
fi
if [ $# -eq 1 ]; then
fileName=${1##/*/}
destination="."
fi
if [ $# -gt 1 ]; then
destination=$2
fi
echo "cp -v $1 $destination" > getfiles.sh
chmod +x ./getfiles.sh
| true |
197e4df7d8e1b3545015987e950acfc11eaab2b5 | Shell | devdavid16/Mybashscripts | /forfor.sh | UTF-8 | 75 | 2.546875 | 3 | [] | no_license | #!/bin/bash
for COLOR in red green blue
do
echo "COLOR: $COLOR"
done
| true |
549f2cc9a0c9f9ac11204d8564968627e42e07a4 | Shell | min969/Termux-Kali-1 | /installkali.sh | UTF-8 | 1,792 | 2.671875 | 3 | [] | no_license | printf " \e[32;1m[+] Installation Initialized ... \e[0m\n"
printf "\e[0m\n"
printf " \e[32;1m[+] Checking Internet Connection\e[0m\n"
printf "\e[0m\n"
if ping -q -c 1 -W 1 8.8.8.8 >/dev/null; then
printf "\e[32;1m[+] Internet is working [+]\e[0m\n"
else
printf "\e[31;1mInternet is not working\e[0m\n"
printf "\e[31;1mPlease connect to Internet\e[0m\n"
printf "\e[31;1mExit ...\e[0m\n"
exit
fi
printf "\e[0m\n"
printf " \e[32;1m[+] Updating packages ... \e[0m\n"
apt update -y && apt upgrade -y
printf " \e[32;1m[+] Installing kali... \e[0m\n"
pkg install wget openssl-tool proot -y && hash -r && wget https://raw.githubusercontent.com/EXALAB/AnLinux-Resources/master/Scripts/Installer/Kali/kali.sh && bash kali.sh
printf " \e[32;1m[+] Settings up kali... \e[0m\n"
mv kali-fs start-kali.sh kali-binds kali.sh ../ -f
figlet FINISH | lolcat
printf "\e[31;1m[+] First time use : \e[0m\n"
printf "\e[33;1m [+] Run '\e[32;1mcd ..\e[33;1m' command \e[0m\n"
printf "\e[33;1m [+] Run '\e[32;1m./start-kali.sh\e[33;1m' command to start Kali \e[0m\n"
printf "\e[0m\n"
printf "\e[31;1m[+] How to use : \e[0m\n"
printf "\e[33;1m [+] Open Termux Application \e[0m\n"
printf "\e[33;1m [+] Run '\e[32;1m./start-kali.sh\e[33;1m' command to start Kali \e[0m\n"
printf "\e[0m\n"
printf "\e[32;1m[+] Follow me on [+] \e[0m\n"
printf "\e[32;1m [+] Youtube : youtube.com/techpanther [+] \e[0m\n"
printf "\e[32;1m [+] Instagram: instagram.com/techpanther [+] \e[0m\n"
printf "\e[32;1m [+] Facebook : facebook.com/techpanther22 [+] \e[0m\n"
printf "\e[32;1m [+] Blog : techpanther.in [+] \e[0m\n"
printf "\e[32;1m [+] github : github.com/techpanther22 [+] \e[0m\n"
figlet Techpanther | lolcat
exit
| true |
962679d708ff3a4af4864416e83f3b87fd238401 | Shell | LiTianjue/DLP | /bin/key_match.sh | UTF-8 | 192 | 2.78125 | 3 | [] | no_license | #!/bin/bash
key="SSLVPN"
grep $key $1
if [ $? == 0 ];
then
echo "Match+++++++++++++++++++++++++++++++++++++++++++++++"
else
echo "NO Match--------------------------------------------"
fi
| true |
7d755ea9b3f11762ddaf54fd26afcf871de2cca4 | Shell | lsqshr/Neuveal | /dataprep/singlegt.pdb | UTF-8 | 906 | 2.609375 | 3 | [
"BSD-3-Clause"
] | permissive | #!/bin/bash
# pbs launching script example for NAMD job
# PLEASE DO NOT CHANGE THIS FILE. TO SUBMIT QUEUE, PLS MAKE A COPY OF THIS FILE AND MAKE THE ACCORDING CHANGES
# job name:
#PBS -N Neuveal
#PBS -P RDS-FEI-NRMMCI-RW
#PBS -q compute
# how many cpus?
#PBS -l ncpus=2
#PBS -l pmem=4000mb
# How long to run the job? (hours:minutes:seconds)
#PBS -l walltime=3:0:0
# Name of output file:
#PBS -o trainlog.txt
# Environmental varibles to make it work:
module load matlab;
cd $PBS_O_WORKDIR;
# Launching the job!
JOBNAME='singlegt';
DATA="/project/$PROJECTID1/SQ-Workspace/RivuletJournalData/OP/OP1/op1.v3draw";
SWC="/project/$PROJECTID1/SQ-Workspace/RivuletJournalData/OP/OP1/OP_1.v3draw";
TODIR="/project/$PROJECTID1/SQ-Workspace/RivuletJournalData/OP/OP1/OP1Feat/";
# Run Script
matlab -nodesktop -nosplash -r "[~, ~, data] = singlegt($DATA, $SWC, 10, 7, 13, $TODIR);" | true |
bda297abe4c1f367506da7677419b5ba27764df5 | Shell | changlabtw/GODoc | /code/scripts/shell/thesis/cafa2-swiss/pca_tfpssm_exp_mfo.sh | UTF-8 | 4,920 | 3.265625 | 3 | [] | no_license | #!/bin/bash
# PCA experiment
if [ "$#" -ne 3 ]; then
echo "Illegal number of parameters, must eqal 3."
echo "Usage: ./pca_tfpssm_exp.sh [nfold folder root] [tfpssm folder] [data folder root]"
exit
fi
echo nfold folder root: ${1}
echo tfpssm folder: ${2}
echo data folder root: ${3}
nfold_root=${1}
tfpssm_folder=${2}
data_root=${3}
ratio_array='0.9 0.905 0.91 0.915 0.92 0.925 0.93 0.935 0.94 0.945 0.95 0.955 0.96 0.965 0.97 0.975 0.98 0.985'
for t in mfo
do
for fold in 0 1 2 3 4
do
mkdir ${nfold_root}/$t/fold$fold/${data_root}
for explain in ${ratio_array}
do
mkdir ${nfold_root}/$t/fold$fold/${data_root}/${explain}
mkdir ${nfold_root}/$t/fold$fold/${data_root}/${explain}/w
mkdir ${nfold_root}/$t/fold$fold/${data_root}/${explain}/nw
mkdir ${nfold_root}/$t/fold$fold/${data_root}/${explain}/w/nr
mkdir ${nfold_root}/$t/fold$fold/${data_root}/${explain}/w/r
mkdir ${nfold_root}/$t/fold$fold/${data_root}/${explain}/nw/nr
mkdir ${nfold_root}/$t/fold$fold/${data_root}/${explain}/nw/r
done
for whiten in -w " "
do
if [ "$whiten" == "-w" ]; then
cur_path=${nfold_root}/$t/fold$fold/${data_root}/[n_c]/w
model_path=${nfold_root}/$t/fold$fold/${data_root}/1/w
else
cur_path=${nfold_root}/$t/fold$fold/${data_root}/[n_c]/nw
model_path=${nfold_root}/$t/fold$fold/${data_root}/1/nw
fi
# Non-redundant
# python python/pca/pca_exp_iterate.py -i ${tfpssm_folder} -n ${ratio_array} \
# -l ${nfold_root}/$t/fold$fold/train_ID.txt -list \
# -m ${model_path}/nr/nr_pca_model.pkl \
# -o ${cur_path}/nr/train_tfpssm_pca.csv
# python python/pca/pca_exp_iterate.py -i ${tfpssm_folder} -n ${ratio_array} \
# -l ${nfold_root}/$t/fold$fold/test_ID.txt -list \
# -m ${model_path}/nr/nr_pca_model.pkl \
# -o ${cur_path}/nr/test_tfpssm_pca.csv
# Redundant
# python python/pca/pca_exp_iterate.py -i ${tfpssm_folder} -n ${ratio_array} \
# -l ${nfold_root}/$t/fold$fold/train_ID.txt -list \
# -m ${model_path}/r/pca_model.pkl \
# -o ${cur_path}/r/train_tfpssm_pca.csv
# python python/pca/pca_exp_iterate.py -i ${tfpssm_folder} -n ${ratio_array} \
# -l ${nfold_root}/$t/fold$fold/test_ID.txt -list \
# -m ${model_path}/r/pca_model.pkl \
# -o ${cur_path}/r/test_tfpssm_pca.csv
for explain in ${ratio_array}
do
if [ "$whiten" == "-w" ]; then
output_path=${nfold_root}/$t/fold$fold/${data_root}/${explain}/w
else
output_path=${nfold_root}/$t/fold$fold/${data_root}/${explain}/nw
fi
# Non-redundant
# python python/knn/knn.py -k 1\
# -train ${output_path}/nr/train_tfpssm_pca.csv \
# -test ${output_path}/nr/test_tfpssm_pca.csv \
# -o ${output_path}/nr/1nn_res.tsv
python python/1NN/1NN.py -i ${output_path}/nr/1nn_res.tsv \
-l ${nfold_root}/$t/fold$fold/train_leaf.txt \
-o ${output_path}/nr/vote_score.tsv
# seq_eval(matlab_path, cat, ont_db_path, oa_file, pred_file, benchmark_file, output_folder)
# matlab -nodisplay -r "addpath('./cafa2_eval/myscript');\
# seq_eval('./cafa2_eval/matlab','$t','./data/go_20130615-termdb.obo',\
# '${nfold_root}/$t/fold$fold/test_pro.txt',\
# '${output_path}/nr/vote_score.tsv',\
# '${nfold_root}/$t/fold$fold/test_ID.txt',\
# '${output_path}/nr/');quit;"
# Redundant
# python python/knn/knn.py -k 1\
# -train ${output_path}/r/train_tfpssm_pca.csv \
# -test ${output_path}/r/test_tfpssm_pca.csv \
# -o ${output_path}/r/1nn_res.tsv
python python/1NN/1NN.py -i ${output_path}/r/1nn_res.tsv \
-l ${nfold_root}/$t/fold$fold/train_leaf.txt \
-o ${output_path}/r/vote_score.tsv
# seq_eval(matlab_path, cat, ont_db_path, oa_file, pred_file, benchmark_file, output_folder)
# matlab -nodisplay -r "addpath('./cafa2_eval/myscript');\
# seq_eval('./cafa2_eval/matlab','$t','./data/go_20130615-termdb.obo',\
# '${nfold_root}/$t/fold$fold/test_pro.txt',\
# '${output_path}/r/vote_score.tsv',\
# '${nfold_root}/$t/fold$fold/test_ID.txt',\
# '${output_path}/r/');quit;"
done
done
done
done
| true |
a43a3b50d25cb84fd156fcbc3573dd9f518cb1fe | Shell | gasgit/Bash | /search_grep.sh | UTF-8 | 519 | 2.890625 | 3 | [] | no_license | #!/bin/bash
# recursive -r
# line number -n
# whole word -w
# lower case L -l
#grep -rnw '/path/to/dir/' -e "pattern_to_search"
# case sensitive - fast
#grep -rnw /home/glen/Music/ -e "Hardwired"
# case insensitive - slower
grep -insr "Hardwired" /home/glen/Music/
# include file types
#grep --include=\*.{file_type,file_type} -rnw '/path/to/dir/' -e "pattern_to_search"
# include file types and print to file
grep --include=\*.{html,js} -rnw /home/glen/Documents/ -e "index" > /home/glen/Documents/grep.txt
| true |
5f109224942edbe08a4c4a4a6c9a7a4aa042a3df | Shell | pangine/disasm-eval-sources | /src/wine_scripts/gtcl.sh | UTF-8 | 386 | 3.328125 | 3 | [] | no_license | #!/bin/bash
# This script is a msvc cl wrapper that uses git to record every file generated
# by a compiler using git.
. ${GT_ORIGIN_MSVC_WRAPPERS}/msvcenv.sh
${GT_ORIGIN_MSVC_WRAPPERS}/wine-msvc.sh $BINDIR/cl.exe "${@:3}"
echo $? > /tmp/${1}-ret
MESSAGE=$(git status --porcelain --untracked-files=all)
git add -A >/dev/null
git commit -m "$MESSAGE" >/dev/null
touch /tmp/${1}-done
| true |
79c987c8015ba1bbb395efebda6ec058c897e1e9 | Shell | dianedelallee/myConfig | /setup_computer.sh | UTF-8 | 2,865 | 3.125 | 3 | [] | no_license | #!/usr/bin/env bash
#
# Bootstrap script for setting up a new OSX machine
#
# This should be idempotent so it can be run multiple times.
#
# Some apps don't have a cask and so still need to be installed by hand. These
# include:
#
# - Bear app
# - filezilla
# - Noizio
# - pcloud
# - simplemind
# - speedtest
# - stts
#
# Reference:
# - https://gist.github.com/MatthewMueller/e22d9840f9ea2fee4716
# - https://news.ycombinator.com/item?id=8402079
# -
echo "Starting setup of the computer"
# Check for Homebrew, install if we don't have it
if test ! $(which brew); then
echo "Installing homebrew..."
ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
fi
# Update homebrew recipes
brew update
# Install Bash 4
brew install bash
PACKAGES=(
ack
ansiweather
bash-completion
battery
cowsay
curl
emojify
fortune
git
grep
htop
lolcat
pipenv
python3
terminal-notifier
tree
youtube-dl
zsh
zsh-completion
)
echo "Installing packages..."
brew install ${PACKAGES[@]}
echo "Cleaning up..."
brew cleanup
echo "Installing cask..."
brew install caskroom/cask/brew-cask
CASKS=(
alfred
brackets
cheatsheet
dashlane
firefox
flux
franz
gimp
grammarly
google-chrome
iterm2
libreoffice
nuclino
postman
pritunl
pycharm-ce
spark
ticktick
)
echo "Installing cask apps..."
brew cask install ${CASKS[@]}
echo "Installing fonts..."
brew tap caskroom/fonts
FONTS=(
font-inconsolidata
font-roboto
font-clear-sans
)
brew cask install ${FONTS[@]}
echo 'installing oh-my-zsh and dependencies'
sh -c "$(curl -fsSL https://raw.githubusercontent.com/ohmyzsh/ohmyzsh/master/tools/install.sh)"
git clone https://github.com/bhilburn/powerlevel9k.git ~/.oh-my-zsh/custom/themes/powerlevel9k
echo "Installing Python packages..."
PYTHON_PACKAGES=(
ipython
pipenv
)
sudo pip install ${PYTHON_PACKAGES[@]}
echo "Installing Ruby gems"
RUBY_GEMS=(
bundler
)
sudo gem install ${RUBY_GEMS[@]}
echo "Installing global npm packages..."
npm install marked -g
echo "Configuring OSX..."
# Set fast key repeat rate
defaults write NSGlobalDomain KeyRepeat -int 0
# Require password as soon as screensaver or sleep mode starts
defaults write com.apple.screensaver askForPassword -int 1
defaults write com.apple.screensaver askForPasswordDelay -int 0
# Show filename extensions by default
defaults write NSGlobalDomain AppleShowAllExtensions -bool true
# Enable tap-to-click
defaults write com.apple.driver.AppleBluetoothMultitouch.trackpad Clicking -bool true
defaults -currentHost write NSGlobalDomain com.apple.mouse.tapBehavior -int 1
# Disable "natural" scroll
defaults write NSGlobalDomain com.apple.swipescrolldirection -bool false
echo "Setup of the computer complete" | true |
b992dc6174fe1c21a9a53c524e8c253bc99776c0 | Shell | lborguetti/goss-cis-benchmark | /scripts/test_section_06_level1/6-1-14.sh | UTF-8 | 1,345 | 3.78125 | 4 | [
"MIT"
] | permissive | #!/bin/bash
#
# 6.1.14 Audit SGID executables (Manual)
#
# Description:
# The owner of a file can set the file's permissions to run with the owner's or
# group's permissions, even if the user running the program is not the owner or
# a member of the group. The most common reason for a SGID program is to enable
# users to perform functions (such as changing their password) that require root
# privileges.
#
# Rationale:
# There are valid reasons for SGID programs, but it is important to identify and
# review such programs to ensure they are legitimate. Review the files returned
# by the action in the audit section and check to see if system binaries have a
# different md5 checksum than what from the package. This is an indication that
# the binary may have been replaced.
set -o errexit
set -o nounset
declare gcp_binaries="14"
declare azure_binaries="8"
declare url_google="http://metadata/computeMetadata/v1/instance/hostname"
status=0
count_SGID=$(df --local -P | awk "{'if (NR!=1) print $6'}" | xargs -I '{}' find '{}' -xdev -type f -perm -2000 | wc -l)
curl -v -H Metadata-Flavor:Google $url_google -f > /dev/null 2>&1 || status=1
if [[ "${status}x" == "0x" ]]; then
if [[ $count_SGID == "$gcp_binaries" ]]; then
exit 1
else
exit 0
fi
fi
if [[ $count_SGID == "$azure_binaries" ]]; then
exit 1
else
exit 0
fi
| true |
767db879772ffdf6891d339abe578a382e76a660 | Shell | Garbulix/disk-space-filler | /big_files.sh | UTF-8 | 2,411 | 4.34375 | 4 | [
"MIT"
] | permissive | #! /bin/bash
function generate_random {
# generate random numeric string with given length and save it in variable
random_string=$(cat /dev/urandom | tr -dc '0-9' | fold -w $1 | head -n 1)
}
function check_if_error_occured {
# create a confirmation that some kind of error occured
if [ $? -ne 0 ]
then
abort_pending_operation=1
fi
}
### remember to give path!
path="/PATH/TO/PARTITION_OR_DIRECTORY"
###
### variables that shouldn't be changed later
line_length=500
max_lines=100
filename_length=16
max_chars_in_file=$(( $line_length*$max_lines ))
### variables that will change later
current_file=""
random_string=0
random_filename=""
# "booleans":
does_file_exist=1
abort_pending_operation=0
# vars for summary:
no_of_files=0
approx_number_of_chars=0
no_of_full_files=0
# performing a loop until error occured
while [ 0 ]
do
### CREATING A FILE
generate_random $filename_length # generating name for new file
random_filename=$random_string
current_file=${path}${random_filename}
does_file_exist=$(ls $path | grep -x "$random_filename" | wc -l)
if [ $does_file_exist -ne 0 ]
then
continue # continue to next iteration to generate new name, because that is used
fi
touch $current_file
check_if_error_occured # check error when creating file
if [ $abort_pending_operation -ne 0 ]
then
break
fi
no_of_files=$(( $no_of_files+1 )) # there was no errors, so we can count the file
### FILLING UP THE FILE
line_count=0
while [ $line_count -le $max_lines ]
do
generate_random $line_length
random_line=$random_string
echo $random_line >> $current_file
check_if_error_occured # if there is no space left for another line
if [ $abort_pending_operation -ne 0 ]
then
break
fi
line_count=$(( $line_count+1 )) # count line, because there was no errors
done
if [ $abort_pending_operation -ne 0 ]; then break; fi
done
### SUMMARY
no_of_full_files=$(( $no_of_files-1 )) # last file may not be full
approx_no_of_chars=$(( $line_count*$line_length + $no_of_full_files*$max_chars_in_file ))
echo
echo "There are $no_of_files new files with at least $approx_no_of_chars of characters in them in the directory."
echo
| true |
739d82c92f98df4ed19bb69d5186675fecae832d | Shell | CohenBerkeleyLab/AutoWRFChem-Base | /RUNUTILS/lastrst | UTF-8 | 1,104 | 4 | 4 | [
"Unlicense"
] | permissive | #!/bin/bash
#
# Finds the last restart file in ../../WRFV3/run and echoes it
# Therefore we can't echo anything else from this to stdout, it must
# go to stderr if an error message is required.
cd `dirname $0`
myname=`basename $0`
mydir=`pwd -P`
pyprog="$mydir/../CONFIG/autowrf_namelist_main.py"
pydc="$mydir/../Tools/datecompare.py"
# Get the start and end dates from the WRF namelist
wrf_start=`python $pyprog get-wrf-opt --start-date`
wrf_end=`python $pyprog get-wrf-opt --end-date`
rstfiles="../../WRFV3/run/wrfrst*"
regex="[0-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9]_[0-9][0-9]:[0-9][0-9]:[0-9][0-9]"
for f in $rstfiles; do
if [[ $f =~ $regex ]]; then
rdate="${BASH_REMATCH[0]}"
python $pydc --datefmt '%Y-%m-%d_%H:%M:%S' "$rdate" ge "$wrf_start"
start_chk=$?
python $pydc --datefmt '%Y-%m-%d_%H:%M:%S' "$rdate" lt "$wrf_end"
end_chk=$?
if [[ $start_chk == 0 ]] && [[ $end_chk == 0 ]]; then
last_rst_file="$f"
fi
else
echo "Could not determine date of restart file ($f)" >&2
fi
done
echo "$last_rst_file"
| true |
dd67894bed3c4d0de8cc2959906714a6af546273 | Shell | dlaststark/machine-learning-projects | /Programming Language Detection/Experiment-2/Dataset/Train/UNIX-Shell/greatest-element-of-a-list-2.sh | UTF-8 | 148 | 3.453125 | 3 | [] | no_license | max() {
m=$1 # Bourne Shell has no local command.
shift
while [ $# -gt 0 ]
do
[ "$m" -lt "$1" ] && m=$1
shift
done
echo "$m"
}
| true |
195c2cc91e0a1c51c9731d104a9a477f2fec4c7d | Shell | drngsl/spring-cloud | /config-server/docker/build.sh | UTF-8 | 290 | 2.828125 | 3 | [] | no_license | #!/bin/bash
cur=$(cd $(dirname $0);pwd)
pkg_name=config-server
mvn package -f $cur/../pom.xml
cp $cur/../target/config-server*.jar $cur/
pkg_ver=`ls config-server* | sed "s/config-server-\(.*\).jar/\1/g"`
docker build -t dengshaolin/spring-config $cur
rm -rf $cur/config-server*.jar
| true |
814a8df9ef60a475c6f2e9647ec7c34b96c5966a | Shell | ghomsy/makani | /lib/scripts/style/pack2_lint.sh | UTF-8 | 1,274 | 3.375 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
# Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
source /opt/shflags-1.0.3/src/shflags
DEFINE_boolean 'verbose' true 'Print message for each file.'
FLAGS "$@" || exit $?
readonly P2FMT="${MAKANI_HOME}/bazel-out/k8-py2-fastbuild/bin/lib/python/pack2/tools/p2fmt"
if [[ ! -x "${P2FMT}" ]]; then
echo "${P2FMT} is not built."
exit 255
fi
num_errors=0
for f in "$@"; do
if [[ -e "${f}" ]]; then
if [[ "${FLAGS_verbose}" -eq "${FLAGS_TRUE}" ]]; then
echo "Checking style in ${f}"
fi
OUTPUT="$(${P2FMT} < ${f} | diff ${f} - | colordiff)"
if [[ ! -z "${OUTPUT}" ]]; then
let num_errors++
echo "Style error in file ${f}"
echo "${OUTPUT%x}"
fi
fi
done
exit "${num_errors}"
| true |
d11207e0837af7982a38772574e7cd30c7e1c734 | Shell | snowytoxa/tools | /automation/sn00p/source/src/report/txt.sh | UTF-8 | 4,080 | 3.90625 | 4 | [] | no_license | #!/bin/sh
################################################################################
# ____ _ __ #
# ___ __ __/ / /__ ___ ______ ______(_) /___ __ #
# / _ \/ // / / (_-</ -_) __/ // / __/ / __/ // / #
# /_//_/\_,_/_/_/___/\__/\__/\_,_/_/ /_/\__/\_, / #
# /___/ nullsecurity team #
# #
# sn00p - automates your toolchain for security tests #
# #
# FILE #
# report_txt.sh #
# #
# DATE #
# 09/02/2012 #
# #
# DESCRIPTION #
# sn00p is a modular tool written in bourne shell and designed to chain and #
# automate security tools and tests. It parses target definitions from the #
# command line and runs corresponding modules afterwards. sn00p can also parse #
# a given nmap logfile for open tcp and udp ports. All results will be logged #
# in specified directories and a report can subsequently be generated. #
# #
# AUTHOR #
# noptrix - http://www.nullsecurity.net/ #
# #
################################################################################
# generate results
make_txt_results()
{
modes="host tcp udp web lan wlan"
for i in ${hosts}
do
msg "[ RESULTS ]\n" >> "report/${i}.txt"
msg "-----------------------------------------------------------------"\
"---------------\n" >> "report/${i}.txt"
for mode in ${modes}
do
for j in `ls "${i}/${mode}" 2> /dev/null`
do
for k in `ls "${i}/${mode}/${j}/" 2> /dev/null`
do
msg "` msg ${j} \(${mode}\) - ${k} | sed 's/\.log//'`\n" \
>> "report/${i}.txt"
msg "`cat ${i}/${mode}/${j}/${k}`\n" >> "report/${i}.txt"
msg "-----------------------------------------------------"\
"---------------------------\n" >> "report/${i}.txt"
done
done
done
done
return ${SUCCESS}
}
# generate target list
make_txt_target_list()
{
tfile="${1}"
msg "[ TARGET LIST ]\n" >> ${tfile}
for i in ${hosts}
do
msg "${i}" >> ${tfile}
done
msg "\n------------------------------------------------------------------"\
"--------------\n" >> ${tfile}
return ${SUCCESS}
}
# generate text header
make_txt_header()
{
tfile="${1}"
msg "sn00p report (`date +%F`)" > ${tfile}
msg "------------------------------------------------------------------"\
"--------------\n" >> ${tfile}
return ${SUCCESS}
}
# generate text summay
make_txt_summary()
{
tfile="${1}"
msg "[ SUMMARY ]\n" >> ${tfile}
msg "HOST\t\t${num_hosts}" >> ${tfile}
msg "TCP\t\t${num_tcp}" >> ${tfile}
msg "UDP\t\t${num_udp}" >> ${tfile}
msg "WWW\t\t${num_www}" >> ${tfile}
msg "LAN\t\t${num_lan}" >> ${tfile}
msg "WLAN\t\t${num_wlan}" >> ${tfile}
msg "" >> ${tfile}
msg "------------------------------------------------------------------"\
"--------------\n" >> ${tfile}
return ${SUCCESS}
}
# EOF
| true |
fbb54522ef0291f0297b8cf88d6ad4f181e61eef | Shell | CU-CommunityApps/cu-aws-cloudformation | /template-template/deploy.sh | UTF-8 | 1,747 | 3.96875 | 4 | [] | no_license | #!/bin/bash
#
# Script to create/update CloudFormation stack from a template.
#
# Original Source: https://github.com/CU-CommunityApps/cu-aws-cloudformation/template-template/deploys.sh
#
# Prerequisites:
# - AWS CLI
# - cfn-flip: https://github.com/awslabs/aws-cfn-template-flip
# - jq: https://stedolan.github.io/jq/
# - cfn-lint: https://github.com/aws-cloudformation/cfn-python-lint
TARGET_TEMPLATE=template.yaml
if [[ -z "$ENV" ]]; then
echo 'ENV environment variable is not set. Please set to "dev" or "test" or "prod".'
exit 1
fi
# Grab the version from the template metadata
TEMPLATE_VERSION=$(cfn-flip $TARGET_TEMPLATE | jq -r ".Metadata.Version")
STACK_NAME=$(cfn-flip $TARGET_TEMPLATE | jq -r ".Metadata.RecommendedStackName")
STACK_NAME=$(eval "echo $STACK_NAME")
echo "########## PROPERTIES ##########"
echo "Template: $TARGET_TEMPLATE"
echo "Depoying/Updating stack: $STACK_NAME"
echo "Deploying template version: $TEMPLATE_VERSION"
echo "########## LINT ##########"
cfn-lint $TARGET_TEMPLATE
echo "########## VALDIATE ##########"
set -e # Stop the script if it doesn't validate.
aws cloudformation validate-template --template-body file://$TARGET_TEMPLATE
# Uncomment other CAPABILITY values as needed
CAPABILITIES=""
# CAPABILITIES="--capabilities CAPABILITY_IAM"
# CAPABILITIES="--capabilities CAPABILITY_NAMED_IAM"
echo "########## DEPLOY ##########"
aws cloudformation deploy \
--template-file $TARGET_TEMPLATE \
--stack-name $STACK_NAME \
--no-fail-on-empty-changeset \
$CAPABILITIES \
--parameter-overrides \
VersionParam="$TEMPLATE_VERSION" \
EnvironmentParam="$ENV"
aws cloudformation update-termination-protection \
--enable-termination-protection \
--stack-name $STACK_NAME
| true |
68a602024c7a83c8ece15c9d011be8c6ade3a23a | Shell | Anusree-S/SDLab-Record | /Cycle 1/script4 | UTF-8 | 128 | 3.34375 | 3 | [] | no_license | #!/bin/bash
read -p "Enter 20 numbers " input
sum=0
for i in ${input[@]}
do
sum=` expr $sum + $i`
done
echo "The sum is :" $sum
| true |
3f7181896866d7b35cce7febfb294c18ed0a0e26 | Shell | MichaelHu/myscripts | /svn/hooks/fedocs/bin/build-all-force.sh | UTF-8 | 720 | 3.28125 | 3 | [] | no_license | #!/bin/bash
ROOT=/home/work/hudamin/fedocs
DOCDIR=$ROOT/docs
SVNCMD=/usr/bin/svn
CHANGEDFILE=$1
cd $ROOT
pushd $DOCDIR
$SVNCMD up
popd
rm -rf __tmp/*
cp -r $DOCDIR/* __tmp
find __tmp -type d -name ".svn" -exec rm -rf {} \;
pushd __tmp
# for i in `cat $CHANGEDFILE | grep -P '\.md$' | awk '{printf "%s\n",$2}'`; do
# /bin/bash $ROOT/bin/build-markdown.sh $i
# done
#
# for i in `cat $CHANGEDFILE | grep -P '\.slides$' | awk '{printf "%s\n",$2}'`; do
# /bin/bash $ROOT/bin/build-slides.sh $i
# done
find . -type f \
-regex "\..*\.md" \
-exec /bin/bash $ROOT/bin/build-markdown-node.sh {} \;
find . -type f \
-regex "\..*\.slides" \
-exec /bin/bash $ROOT/bin/build-slides.sh {} \;
popd
| true |
4ef24426f0536fab0946c88dd9c3917557432076 | Shell | StenSipma/dotfiles | /bin/volume | UTF-8 | 2,659 | 4.0625 | 4 | [] | no_license | #!/bin/bash
# Script to alter the volume of a sink
CONF_DIR="$HOME/.config/volume"
MSG_NOTIFY_ID=90043
# Sends a signal to the status bar
function send-signal {
pkill -RTMIN+2 i3blocks
}
# Sends a notification to the notification system
function notify {
dunstify -r "$MSG_NOTIFY_ID" -i audio-volume-medium "Volume $1"
}
function select-sink {
pactl list sinks | awk -v sink="#$1" '$2~sink, /^$/ {print}'
}
function select-sink-description {
select-sink $1 | awk '/Description:/{for(i=2;i<=NF;i++){printf "%s ", $i}; printf "\n"} '
}
function sink-exists {
# If $sink is in the list of sinks, keep it
# Else select the first sink
sinks=$(pactl list sinks short | awk '{print $1}')
for sink in $sinks; do
[[ $sink -eq $1 ]] && echo $sink && return
done
echo $(pactl list sinks short | awk '{print $1}' | sed '1p; d')
}
function select-running-sink {
pactl list sinks short | awk '$7=="RUNNING"{print $1}'
}
sink=$(cat $CONF_DIR/selected_sink)
newsink=$(sink-exists $sink)
if [[ ! $newsink -eq $sink ]]; then
sink=$newsink
printf "%d" "$sink" > $CONF_DIR/selected_sink
send-signal
fi
case $1 in
show)
level=$(pactl list sinks | select-sink $sink | awk '/Volume: front-left:/{print $5};' | sed 's/%//')
printf "%d" "$level"
;;
show-muted)
output=$(pactl list sinks | select-sink $sink | awk '/Mute:/ {print $2}' )
printf "%s" "$output"
;;
show-sink)
printf "%d" "$sink"
;;
show-inputs)
select-sink-description $sink
;;
set-sink)
sinknumber="$(pactl list sinks short | awk '{print $2; print $1}' | sed 's/bluez_sink.*$/Bluetooth/; s/alsa_output.pci-*$/Laptop/;' | ~/scripts/wrappers/dmenu_wrapper.py)"
#sinknumbers=$(pactl list sinks short | awk '{print $1}')
#dmen_input=${sinknumbers//*([:digit:])/}
printf "%d" "$sinknumber" > $CONF_DIR/selected_sink
send-signal
;;
auto-select)
sinknumber=$(select-running-sink)
printf "%d" "$sinknumber" > $CONF_DIR/selected_sink
send-signal
;;
up)
pactl set-sink-volume $sink +5%
send-signal
notify "Up"
;;
down)
pactl set-sink-volume $sink -5%
send-signal
notify "Down"
;;
mute)
pactl set-sink-mute $sink toggle
send-signal
notify "Mute Toggle"
;;
*)
printf "\n'%s' is not a valid action\n\n" "$1"
printf "Usage: volume [ACTION]\n"
printf " Actions:\n"
printf " show - Displays current sink & volume level\n"
printf " up - Increases volume of selected sink by 5 percent\n"
printf " down - Decreases volume of selected sink by 5 percent\n"
printf " mute - Toggles mute on the selected sink\n"
printf " set-sink - Select the sink to be controlled, using Rofi\n"
printf "\n"
esac
| true |
121eb91381af9e5166ba4188348db60964e481b1 | Shell | k-uryu/vagrant_docker | /.lib/php/.bin/build_workspace.sh | UTF-8 | 918 | 2.78125 | 3 | [] | no_license | #!/bin/bash
ROOT_DIR=`dirname $0`
git clone -b v6.0.1 https://github.com/Laradock/laradock.git workspace
cp -r ${ROOT_DIR}/.bin workspace/.bin
cd workspace
cp env-example .env
echo 'cd workspace'
echo ' ↓ '
echo 'vim .env'
echo '-----------------------------------------------------------------'
echo '! #APPLICATION=../'
echo '+ APPLICATION=../src'
echo ''
echo '! #PHP_VERSION=72'
echo '+ #PHP_VERSION=56'
echo ''
echo '! #DATA_SAVE_PATH=~/.laradock/data'
echo '+ DATA_SAVE_PATH=~/.laradock/data/php'
echo '-----------------------------------------------------------------'
echo ' ↓ '
echo 'vim docker-compose.yml'
echo '-----------------------------------------------------------------'
echo ' applications:'
echo ' image: tianon/true'
echo '+ volumes:'
echo '+ - ${APPLICATION}:/var/www/'
echo '-----------------------------------------------------------------'
echo ' ↓ '
echo './.bin/docker_up.sh'
| true |
05046a2b5335a68c9ffb8e5958a6d3ebf2da73ee | Shell | qixin5/debloating_study | /expt/script/rdsfuzz_reliability_test_utilprog/check_rslt.sh | UTF-8 | 207 | 2.5625 | 3 | [
"MIT"
] | permissive | #!/bin/bash
BENCH=$1
PROGNAME=$2
progpath=$3
inputset=$4
forrazor=$5
#Using *_sirprog* is NOT a mistake
rdsfuzz_reliability_test_sirprog/check_rslt.sh $BENCH $PROGNAME ${progpath} ${inputset} ${forrazor}
| true |
e454ff66b9a559c974be3099abd171749889bd3a | Shell | dlledos/qnap | /bin/start-jdownloader.sh | UTF-8 | 457 | 3.359375 | 3 | [] | no_license | #!/bin/sh
function isJD2running(){
CMD=$(ps l -C java | grep JDownloader2)
RUNNING=$(echo -n $CMD | wc -l)
PID=$(echo $CMD | tr -s " " " " | cut -d " " -f3)
}
isJD2running
if [ "$RUNNING" -ne 1 ]
then
echo "Starting Jdownloader2 ..."
cd /tmp
/share/CACHEDEV1_DATA/.qpkg/JDownloader2/JDownloader2.sh start
#/opt/bin/nohup /share/CACHEDEV1_DATA/.qpkg/jd2/opt/JDownloader2 &
else
echo "Jdownloader2 already strated with PID = $PID ..."
echo $CMD
fi
| true |
9f09ac7348dafadd9fc92343044315ecc1dc2464 | Shell | PrakhyatKhati/ethercattle-deployment | /scripting/default-server-config.sh | UTF-8 | 3,563 | 3.546875 | 4 | [] | no_license | #!/bin/bash -xe
yum install -y aws-cfn-bootstrap
if [ "$(arch)" == "x86_64" ]
then
ARCH="amd64"
elif [ "$(arch)" == "aarch64" ]
then
ARCH="arm64"
fi
sysctl -p || true
GETH_BIN="geth-linux-$ARCH"
LOGS_BIN="journald-cloudwatch-logs-$ARCH"
aws s3 cp s3://${S3GethBucketName}/${ECGethVersion}/$GETH_BIN /usr/bin/geth
aws s3 cp s3://${S3GethBucketName}/$LOGS_BIN /usr/local/bin/journald-cloudwatch-logs
aws s3 cp s3://${S3GethBucketName}/peerManagerAuth.py /usr/local/bin/peerManager.py
chmod +x /usr/bin/geth
chmod +x /usr/local/bin/journald-cloudwatch-logs
chmod +x /usr/local/bin/peerManager.py
mkdir -p /var/lib/journald-cloudwatch-logs/
mkdir -p /var/lib/ethereum
mount -o barrier=0,data=writeback,noatime /dev/sdf /var/lib/ethereum
mkdir -p /var/lib/ethereum/overlay
resize2fs /dev/sdf
useradd -r geth
echo "/dev/sdf /var/lib/ethereum ext4 barrier=0,data=writeback,noatime 1 1" >> /etc/fstab
ignore="$(readlink -f /dev/sd*) $(readlink -f /dev/xvd*)"
cutignore="$(for x in $ignore ; do echo $x | cut -c -12; done | uniq)"
devices="$(ls /dev/nvme* | grep -E 'n1$')" || devices=""
cutdevices="$(for x in $devices ; do echo $x | cut -c -12; done | uniq)"
localnvme=$(for d in $cutdevices; do if ! $(echo "$cutignore"| grep -q $d) ; then echo $d; fi ; done)
if [ ! -z "$localnvme" ]
then
mkfs.ext4 $localnvme
mount -o barrier=0,data=writeback $localnvme /var/lib/ethereum/overlay
echo "$localnvme /var/lib/ethereum/overlay ext4 barrier=0,data=writeback,noatime 1 1" >> /etc/fstab
elif [ -e /dev/sdg ]
then
mkfs.ext4 /dev/sdg
mount -o barrier=0,data=writeback /dev/sdg /var/lib/ethereum/overlay
echo "/dev/sdg /var/lib/ethereum/overlay ext4 barrier=0,data=writeback,noatime 1 1" >> /etc/fstab
fi
chown -R geth /var/lib/ethereum
yum install -y https://s3.amazonaws.com/amazoncloudwatch-agent/amazon_linux/$ARCH/latest/amazon-cloudwatch-agent.rpm nmap-ncat jq python-pip jq fio || true
pip install kafka-python
/opt/aws/amazon-cloudwatch-agent/bin/amazon-cloudwatch-agent-ctl -a fetch-config -m ec2 -c ssm:${MetricsConfigParameter} -s
crontab -l > newcrontab || true
echo "5,20,35,50 * * * * /usr/bin/sh -c 'for x in \$(ls /dev/sd*) ; do echo resizing \$(readlink -f \$x) if needed; /usr/sbin/resize2fs \$(readlink -f \$x) ; done'" >> newcrontab
crontab newcrontab
printf "[Unit]
Description=journald-cloudwatch-logs
Wants=basic.target
After=basic.target network.target
[Service]
ExecStart=/usr/local/bin/journald-cloudwatch-logs /usr/local/etc/journald-cloudwatch-logs.conf
KillMode=process
Restart=on-failure
RestartSec=42s" > /etc/systemd/system/journald-cloudwatch-logs.service
echo "geth hard nofile 500000" >> /etc/security/limits.conf
echo "geth soft nofile 500000" >> /etc/security/limits.conf
systemctl daemon-reload
sleep 5 #TODO- workaround for a deadlock on topic creation
systemctl enable amazon-cloudwatch-agent.service
systemctl start amazon-cloudwatch-agent.service
systemctl enable journald-cloudwatch-logs
systemctl start journald-cloudwatch-logs
fio --filename=/dev/sdf --rw=read --bs=128k --iodepth=32 --ioengine=libaio --prio=7 --prioclass=3 --thinktime=2 --rate_iops=$((${DiskSize} * 3 - 100 )) --direct=1 --name=volume-initialize &
export AWS_DEFAULT_REGION=${AWS::Region}
VOLUME_ID=$(aws ec2 describe-volumes --filters Name=attachment.instance-id,Values="$(curl http://169.254.169.254/latest/meta-data/instance-id)" | jq '.Volumes[] | select(. | .Attachments[0].Device == "/dev/sdf") | .VolumeId' -cr)
wait
aws ec2 modify-volume --volume-id $VOLUME_ID --volume-type gp2 &
| true |
3c413e47e71ab2b43681ae25d2acda2cd6da9fb8 | Shell | unb-libraries/CargoDock | /container/drupal/buildDrupalTree.sh | UTF-8 | 1,172 | 3.53125 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env sh
set -e
# Dev/NoDev
DRUPAL_COMPOSER_DEV="${1:-no-dev}"
# Copy build files into a temporary build location.
mkdir ${DRUPAL_BUILD_TMPROOT}
cp ${TMP_DRUPAL_BUILD_DIR}/composer.json ${DRUPAL_BUILD_TMPROOT}
# Change to the build directory
cd ${DRUPAL_BUILD_TMPROOT}
# Get latest composer/ScriptHandler.php.
mkdir -p scripts/composer
curl -O https://raw.githubusercontent.com/drupal-composer/drupal-project/8.x/scripts/composer/ScriptHandler.php
mv ScriptHandler.php scripts/composer/
# Build instance.
BUILD_COMMAND="composer install --no-suggest --prefer-dist --no-interaction --${DRUPAL_COMPOSER_DEV}"
echo "Building Drupal [${BUILD_COMMAND}]"
${BUILD_COMMAND}
# Move profile from repo to build root.
cd ${DRUPAL_BUILD_TMPROOT}
mv ${TMP_DRUPAL_BUILD_DIR}/${DRUPAL_SITE_ID} ${DRUPAL_BUILD_TMPROOT}/profiles/
# Copy config from standard install profile for current version of Drupal.
cp -r ${DRUPAL_BUILD_TMPROOT}/core/profiles/minimal/config ${DRUPAL_BUILD_TMPROOT}/profiles/${DRUPAL_SITE_ID}/
# Move settings files into build location.
mkdir -p ${DRUPAL_BUILD_TMPROOT}/sites/all
mv ${TMP_DRUPAL_BUILD_DIR}/settings ${DRUPAL_BUILD_TMPROOT}/sites/all/
| true |
235a5d36944dc5e3f3629f2f13ff07f82848dbeb | Shell | artm/ExplodedViews | /Scripts/zipper/makefakeload.sh | UTF-8 | 167 | 2.65625 | 3 | [] | no_license | #!/bin/sh
for d in foo bar baz _-37.817713_144.966580_1 ; do
echo $d
mkdir -p $d
rm -f $d/load.fake
dd if=/dev/random of=$d/load.fake count=10000 bs=4096
done
| true |
366ecd36691e233ff27b4c7bd420db6ddc85da73 | Shell | petronny/aur3-mirror | /openarch_conky/conkyrun | UTF-8 | 1,550 | 3.28125 | 3 | [] | no_license | #!/bin/bash
#########################################
## Control script for conkyclockwidget ##
## by jpope ##
## v 1.0.2010.10.19 ##
## Modified my OvsInc ##
#########################################
cnfg1="/usr/share/conky/clockwidget/conkyclockwdgtrc"
cnfg1btn="/usr/share/conky/clockwidget/btnclockwdgt.py"
confdir="${HOME}/.conky/clockwidget"
conf="${confdir}/clockswitch"
main="/usr/share/conky/conky_main/conkyrc_main"
conky-start()
{
if [[ $(pgrep -f "${main}") ]]; then
echo "Conky main window is already running. Use $0 restart"
else
conky -q -c "${main}"
fi
if [[ $(pgrep -f "${cnfg1}") ]]; then
echo "Conky clock widget is already running. Use $0 restart"
else
[[ -r ${cnfg1} ]] && \
(sleep 1s
mkdir -p "${confdir}" 2>/dev/null
echo "0" > "${conf}"
conky -q -c "${cnfg1}") &
[[ -r ${cnfg1btn} ]] && /usr/bin/python2 "${cnfg1btn}" &
fi
}
conky-stop()
{
pkill -f clockwdgt && echo "Clockwidget stopped" || echo "Clockwidget is not running"
pkill -f conky_main && echo "Conkymain stopped" || echo "Conkymain is not running"
}
case $1 in
stop )
conky-stop;;
start )
conky-start;;
restart )
conky-stop
sleep .5
conky-start
;;
* )
echo 'usage ( start | stop | restart )'
;;
esac
exit
| true |
9530f5b23a65bd2e7d9da6b08df3ecd5ca4e48ef | Shell | hakoseven/packages-community | /kdeplasma-applets-homerun/PKGBUILD | UTF-8 | 895 | 2.59375 | 3 | [] | no_license | # Maintainer: philm <philm@manjaro.org>
pkgname=kdeplasma-applets-homerun
_pkgname=homerun
pkgver=1.2.5
pkgrel=1
pkgdesc="Fullscreen application launcher for KDE Plasma desktop"
arch=('i686' 'x86_64')
url="http://userbase.kde.org/Homerun"
license=('GPL' 'LGPL' 'BSD')
depends=('kdebase-workspace' 'kdebase-lib')
makedepends=('cmake' 'automoc4' 'git')
source=("http://download.kde.org/stable/$_pkgname/src/$_pkgname-$pkgver.tar.xz")
install=$pkgname.install
sha256sums=('23f56511b3d64ed3ce979bd6a233aa3d864e9babd333524451d119ec6026c79e')
build() {
cd "$srcdir"
mkdir build
cd build
cmake ../$_pkgname-$pkgver \
-DQT_QMAKE_EXECUTABLE=qmake-qt4 \
-DCMAKE_INSTALL_PREFIX=/usr \
-DCMAKE_BUILD_TYPE=Release
make
}
package() {
cd "$srcdir"/build
make DESTDIR="$pkgdir" install
install -Dm644 "$srcdir/$_pkgname-$pkgver/LICENSE.BSD" "$pkgdir/usr/share/licenses/$pkgname/LICENCE"
}
| true |
428b31e2629218cc181ce33c7253a86c5306abce | Shell | roverway/myscripts | /brightness_control.sh | UTF-8 | 1,331 | 3.515625 | 4 | [] | no_license | #/!bin/bash
usage="sh shell_name.sh +/-"
msg="valid brightness in this is between 1...7"
brightness=`cat /sys/class/backlight/acpi_video0/brightness`
if [ "$1" = + ]; then
let brightness_cur=$brightness+1
else
if [ "$1" = - ]; then
let brightness_cur=$brightness-1
fi
fi
if [ "$1" = + ] || [ "$1" = - ]; then
if [ "$brightness_cur" -ge 1 ] && [ "$brightness_cur" -le 7 ]; then
`echo $brightness_cur > /sys/class/backlight/acpi_video0/brightness`
fi
else
echo $usage
echo $msg
exit 1
fi
brightness_cur=`cat /sys/class/backlight/acpi_video0/brightness`
brightness_per=`expr $brightness_cur "*" 100 "/" 7`
if [ "$brightness_per" = "1" ]; then
icon_name="notification-display-brightness-off"
else
if [ "$brightness_per" -lt "33" ]; then
icon_name="notification-display-brightness-low"
else
if [ "$brightness_per" -lt "67" ]; then
icon_name="notification-display-brightness-medium"
else
if [ "$brightness_per" -lt "99" ]; then
icon_name="notification-display-brightness-high"
else
icon_name="notification-display-brightness-full"
fi
fi
fi
fi
notify-send " " -i $icon_name -h int:value:$brightness_per -h string:x-canonical-private-synchronous:brightness
| true |
d15392025684fb7702dfc9f2b80074307bf64ae5 | Shell | qgis/QGIS-Server-CertifSuite | /docker/3.28/build.sh | UTF-8 | 456 | 2.515625 | 3 | [] | no_license | #! /bin/sh
BASEDIR=$(dirname "$0")
docker build ${BASEDIR} -t qgisserver-certifsuite/3.28-prepare
docker run --name certifsuite-3.28-build --rm --privileged -d -it qgisserver-certifsuite/3.28-prepare /bin/bash
docker exec certifsuite-3.28-build sh /root/qgis.sh
docker commit --change='CMD ["sh", "/root/cmd.sh"]' certifsuite-3.28-build qgisserver-certifsuite/3.28
docker stop certifsuite-3.28-build
sleep 5
docker rmi qgisserver-certifsuite/3.28-prepare
| true |
ffd04a9951a3ccfe8a412c223fe0a29ea9e04d9c | Shell | beaglest/tf_aws | /bootstrap.sh | UTF-8 | 1,022 | 2.9375 | 3 | [
"MIT"
] | permissive | #!/bin/bash
sudo yum update -y
sudo yum install -y httpd24 php70 php70-mysqlnd
sudo service httpd start
sudo chkconfig httpd on
sudo usermod -a -G apache ec2-user
sudo chown -R ec2-user:apache /var/www
sudo chmod 2775 /var/www
echo "<html><h1>Hello</h1></html>" > /var/www/html/index.html
echo "<?php" >> /var/www/html/index.php
echo "\$username = \"${rds_uname}\";" >> /var/www/html/index.php
echo "\$password = \"${rds_pwd}\";" >> /var/www/html/index.php
echo "\$hostname = \"${rds_address}\";" >> /var/www/html/index.php
echo "\$dbname = \"${rds_dbname}\";" >> /var/www/html/index.php
echo "\$dbhandle = mysqli_connect(\$hostname, \$username, \$password) or die(\"Unable to connect to MySQL\");" >> /var/www/html/index.php
echo "echo \"Connected to MySQL using username - \$username, host - \$hostname<br>\";" >> /var/www/html/index.php
echo "\$selected = mysqli_select_db(\$dbhandle, \$dbname) or die("Unable to connect to MySQL DB - check the database name and try again.");"
echo "?>" >> /var/www/html/index.php
| true |
33ba2ed35f37007f02894dcddc3f665e127ffc1c | Shell | fekepp/dlubm-scal-eval | /experiments.saved/eval_1x_m3.large-manager_1x_m3.large-worker_50x_m3.large/scripts/experiment-adaptation-departments | UTF-8 | 5,177 | 2.875 | 3 | [] | no_license | #!/bin/bash
# Include configuration
source configuration
department_amounts=(15 19 16 21 22)
university_index_first=0
university_index_last=$(expr $UNIVERSITY_AMOUNT - 1)
university_index_padding=${#university_index_last}
for university_index in $(seq ${university_index_first} ${university_index_last})
do
department_index_first=0
department_index_last=$(expr ${department_amounts[${university_index}]} - 1)
department_index_padding=${#department_index_last}
for department_index in $(seq ${department_index_first} ${department_index_last})
do
echo "__________________________________________________"
echo "__________________________________________________"
echo "University ${university_index}"
echo "Department ${department_index}"
# Containers
echo "__________________________________________________"
echo "PUT > Content-Type: text/turtle > http://d${department_index}.u${university_index}.dlubm.ddns.me/evaluation"
curl -v -sS -X "PUT" -H "Content-Type: text/turtle" http://d${department_index}.u${university_index}.dlubm.ddns.me/evaluation --data-binary @resources/ldp-container.ttl
echo "__________________________________________________"
echo "PUT > Content-Type: text/turtle > http://d${department_index}.u${university_index}.dlubm.ddns.me/evaluation/programs"
curl -v -sS -X "PUT" -H "Content-Type: text/turtle" http://d${department_index}.u${university_index}.dlubm.ddns.me/evaluation/programs --data-binary @resources/ldp-container.ttl
echo "__________________________________________________"
echo "PUT > Content-Type: text/turtle > http://d${department_index}.u${university_index}.dlubm.ddns.me/evaluation/queries"
curl -v -sS -X "PUT" -H "Content-Type: text/turtle" http://d${department_index}.u${university_index}.dlubm.ddns.me/evaluation/queries --data-binary @resources/ldp-container.ttl
echo "__________________________________________________"
echo "PUT > Content-Type: text/turtle > http://d${department_index}.u${university_index}.dlubm.ddns.me/evaluation/resources"
curl -v -sS -X "PUT" -H "Content-Type: text/turtle" http://d${department_index}.u${university_index}.dlubm.ddns.me/evaluation/resources --data-binary @resources/ldp-container.ttl
# Programs
echo "__________________________________________________"
echo "PUT > Content-Type: text/n3 > http://d${department_index}.u${university_index}.dlubm.ddns.me/evaluation/programs/this"
curl -v -sS -X "PUT" -H "Content-Type: text/n3" http://d${department_index}.u${university_index}.dlubm.ddns.me/evaluation/programs/this --data "<http://this#dlubm> <http://this#uri> <http://d${department_index}.u${university_index}.dlubm.ddns.me/d> ."
echo "__________________________________________________"
echo "PUT > Content-Type: text/n3 > http://d${department_index}.u${university_index}.dlubm.ddns.me/evaluation/programs/dlubm-getters"
curl -v -sS -X "PUT" -H "Content-Type: text/n3" http://d${department_index}.u${university_index}.dlubm.ddns.me/evaluation/programs/dlubm-getters --data-binary @programs/dlubm-get-this.n3
echo "__________________________________________________"
echo "PUT > Content-Type: text/n3 > http://d${department_index}.u${university_index}.dlubm.ddns.me/evaluation/programs/rdfs"
curl -v -sS -X "PUT" -H "Content-Type: text/n3" http://d${department_index}.u${university_index}.dlubm.ddns.me/evaluation/programs/rdfs --data-binary @rulesets/rdfs.n3
echo "__________________________________________________"
echo "PUT > Content-Type: text/n3 > http://d${department_index}.u${university_index}.dlubm.ddns.me/evaluation/programs/owl-imports"
curl -v -sS -X "PUT" -H "Content-Type: text/n3" http://d${department_index}.u${university_index}.dlubm.ddns.me/evaluation/programs/owl-imports --data-binary @rulesets/owl-imports.n3
for query_index in {01..14}
do
# Queries
echo "__________________________________________________"
echo "PUT > Content-Type: application/sparql-query > http://d${department_index}.u${university_index}.dlubm.ddns.me/evaluation/queries/dlubm-${query_index}"
curl -v -sS -X "PUT" -H "Content-Type: application/sparql-query" http://d${department_index}.u${university_index}.dlubm.ddns.me/evaluation/queries/dlubm-${query_index} --data-binary @queries/dlubm-${query_index}.rq
# Runs
echo "__________________________________________________"
echo "PUT > Content-Type: text/turtle > http://d${department_index}.u${university_index}.dlubm.ddns.me/evaluation/run-dlubm-${query_index}"
curl -v -sS -X "PUT" -H "Content-Type: text/turtle" http://d${department_index}.u${university_index}.dlubm.ddns.me/evaluation/run-dlubm-${query_index} --data-binary @resources/run-dlubm-${query_index}.ttl
# Start
echo "__________________________________________________"
echo "POST > Content-Type: text/turtle > http://d${department_index}.u${university_index}.dlubm.ddns.me/evaluation/run-dlubm-${query_index}"
curl -v -sS -X "POST" -H "Content-Type: text/turtle" http://d${department_index}.u${university_index}.dlubm.ddns.me/evaluation/run-dlubm-${query_index}
done
sleep 2
done
done
| true |
0145742943663b8e6356d51b38099a9ff034b73d | Shell | mattj-io/hugo-config | /start.sh | UTF-8 | 404 | 2.8125 | 3 | [] | no_license | #!/bin/bash
HUGO_BINARY=/usr/bin/hugo;
HUGO_HOME=/home/hugo;
for BLOG in $( ls $HUGO_HOME | grep '.blog$' ); do
source $HUGO_HOME/$BLOG/server.conf;
$HUGO_BINARY server \
--theme="$THEME" \
--port="$PORT" \
--baseUrl="$BASEURL" \
--bind="0.0.0.0" \
--buildDrafts="$BUILDDRAFTS" \
--watch=true --appendPort=false --source=$HUGO_HOME/$BLOG &
done;
| true |
6a98f9e03e05990a18a34e932e84af63c777665c | Shell | helmo/drupal-permissions | /drupal-permissions.sh | UTF-8 | 3,414 | 4.5625 | 5 | [
"MIT"
] | permissive | #!/bin/bash
##
# Based from script found at: https://drupal.org/node/244924
#
# See README or code below for usage
##
# Is this really necessary?
if [ $(id -u) != 0 ]; then
printf "This script must be run as root.\n"
exit 1
fi
# Script arguments
drupal_path=${1%/}
drupal_user=${2}
httpd_group="${3:-www-data}"
# Help menu
print_help() {
cat <<-HELP
This script is used to fix permissions of a Drupal installation
you need to provide the following arguments:
1) Path to your Drupal installation.
2) Username of the user that you want to give files/directories ownership.
3) HTTPD group name (defaults to www-data for Apache).
Usage: (sudo) bash ${0##*/} --drupal_path=PATH --drupal_user=USER --httpd_group=GROUP
Example: (sudo) bash ${0##*/} --drupal_path=/usr/local/apache2/htdocs --drupal_user=john --httpd_group=www-data
HELP
exit 0
}
# Parse Command Line Arguments
while [ $# -gt 0 ]; do
case "$1" in
--drupal_path=*)
drupal_path="${1#*=}"
;;
--drupal_user=*)
drupal_user="${1#*=}"
;;
--httpd_group=*)
httpd_group="${1#*=}"
;;
--help) print_help;;
*)
printf "Invalid argument, run --help for valid arguments.\n";
exit 1
esac
shift
done
# Basic check to see if this is a valid Drupal install
if [ -z "${drupal_path}" ] || [ ! -d "${drupal_path}/sites" ] || [ ! -f "${drupal_path}/modules/system/system.module" ]; then
printf "Please provide a valid Drupal path.\n"
print_help
exit 1
fi
# Basic check to see if valiud user
if [ -z "${drupal_user}" ] || [ $(id -un ${drupal_user} 2> /dev/null) != "${drupal_user}" ]; then
printf "Please provide a valid user.\n"
print_help
exit 1
fi
# Start changing permissions
cd $drupal_path
printf "Changing ownership of all contents of \"${drupal_path}\":\n user => \"${drupal_user}\" \t group => \"${httpd_group}\"\n"
chown -R ${drupal_user}:${httpd_group} .
printf "Changing permissions of all directories inside \"${drupal_path}\" to \"rwxr-x---\"...\n"
find . -type d -exec chmod u=rwx,g=rx,o= '{}' \;
printf "Changing permissions of all files inside \"${drupal_path}\" to \"rw-r-----\"...\n"
find . -type f -exec chmod u=rw,g=r,o= '{}' \;
printf "Changing permissions of \"files\" directories in \"${drupal_path}/sites\" to \"rwxrwx---\"...\n"
cd ${drupal_path}/sites
find . -type d -name files -exec chmod ug=rwx,o= '{}' \;
printf "Changing permissions of all files inside all \"files\" directories in \"${drupal_path}/sites\" to \"rw-rw----\"...\n"
printf "Changing permissions of all directories inside all \"files\" directories in \"${drupal_path}/sites\" to \"rwxrwx---\"...\n"
for x in ./*/files; do
printf "Changing permissions ${x} ...\n"
find ${x} -type d -exec chmod ug=rwx,o= '{}' \;
find ${x} -type f -exec chmod ug=rw,o= '{}' \;
done
printf "Changing permissions of \".git\" directories and files in \"${drupal_path}\" to \"rwx------\"...\n"
cd ${drupal_path}
chmod -R u=rwx,go= .git
chmod u=rwx,go= .gitignore
printf "Changing permissions of various Drupal text files in \"${drupal_path}\" to \"rwx------\"...\n"
cd ${drupal_path}
chmod u=rwx,go= CHANGELOG.txt
chmod u=rwx,go= COPYRIGHT.txt
chmod u=rwx,go= INSTALL.mysql.txt
chmod u=rwx,go= INSTALL.pgsql.txt
chmod u=rwx,go= INSTALL.txt
chmod u=rwx,go= LICENSE.txt
chmod u=rwx,go= MAINTAINERS.txt
chmod u=rwx,go= UPGRADE.txt
echo "Done setting proper permissions on files and directories"
| true |
7071f46c36c31ffcc718b053b613c2944dbe418d | Shell | RagedUnicorn/docker-nexus | /docker-entrypoint.sh | UTF-8 | 936 | 3.921875 | 4 | [
"MIT"
] | permissive | #!/bin/sh
# @author Michael Wiesendanger <michael.wiesendanger@gmail.com>
# @description launch script for nexus
set -euo pipefail
create_data_dir() {
echo "$(date) [INFO]: Creating data directory ${NEXUS_DATA_DIR} and setting permissions"
mkdir -p ${NEXUS_DATA_DIR}/etc ${NEXUS_DATA_DIR}/log ${NEXUS_DATA_DIR}/tmp
chown -R "${NEXUS_USER}":"${NEXUS_GROUP}" "${NEXUS_DATA_DIR}"
}
init() {
if [ -f "${NEXUS_DATA_DIR}/.init" ]; then
echo "$(date) [INFO]: Init script already run - starting Nexus"
# check if run directory exists
create_data_dir
echo "$(date) [INFO]: Starting nexus ..."
exec su-exec "${NEXUS_USER}" "${NEXUS_HOME}/bin/nexus" run
else
echo "$(date) [INFO]: First time setup - running init script"
create_data_dir
touch "${NEXUS_DATA_DIR}/.init"
echo "$(date) [INFO]: Init script done"
echo "$(date) [INFO]: Starting nexus ..."
exec su-exec "${NEXUS_USER}" "${NEXUS_HOME}/bin/nexus" run
fi
}
init
| true |
9cd42c39453d4f85979a87fdc013e885ea8bfc18 | Shell | boomini/Shell-Script-Study | /zip/ifelse3.sh | UTF-8 | 124 | 2.609375 | 3 | [] | no_license | #!/bin/bash
if [ -d ~/code ]
then
echo "~/code는 디렉터리다 "
else
echo "~/code는 디렉터리가 아니다 "
fi
| true |
bfb8e8b13e28e37ff098618f0b2bd1258091f71f | Shell | geniorgeous/ShellUtils | /pdfsplit.sh | UTF-8 | 601 | 3.828125 | 4 | [] | no_license | #!/bin/bash
# INIT: arguments number checking
if [ $# != 3 ]
then
echo "USAGE : `basename $0` <filename.pdf> <first_page_to_get> <last_page_to_get>
DESCRIPTION: for the pdf $1, generates a pdf starting at $2 and ending at $3 page
DEPENDENCIES: gs (ghostscript)
EXAMPLE: `basename $0` Prospecting-Objection-Handling.pdf 1 3
"
exit
fi
# INIT: check $1 directory
if [ ! -e "$1" ]
then
echo "file \"$1\" does not exist"
exit
fi
gs -sDEVICE=pdfwrite -dNOPAUSE -dQUIET -dBATCH -dFirstPage=$2 -dLastPage=$3 -sOutputFile=$1.from.$2.to.$3.pdf $1
| true |
eec658d692f189a0f60b36a223053b9661e77a1b | Shell | zhourao/spark-mlib-learning | /sbin/spark-submit.sh | UTF-8 | 1,642 | 2.546875 | 3 | [] | no_license | #!/bin/bash
set -x -e
classname=$1
profiles=$2
appName=`echo ${classname} | awk -F "." '{print $NF}'`
base_path=$(cd `dirname $0`; pwd)
HADOOP_USER_NAME=hdfs \
spark-submit \
--conf "spark.master=yarn" \
--conf "spark.submit.deployMode=cluster" \
--conf "spark.app.name=${appName}" \
--conf "spark.driver.cores=2" \
--conf "spark.driver.memory=512M" \
--conf "spark.driver.memoryOverhead=1G" \
--conf "spark.executor.memory=512M" \
--conf "spark.executor.memoryOverhead=1G" \
--conf "spark.executor.cores=2" \
--conf "spark.default.parallelism=100" \
--conf "spark.sql.shuffle.partitions=60" \
--conf "spark.dynamicAllocation.enabled=true" \
--conf "spark.dynamicAllocation.maxExecutors=5" \
--conf "spark.dynamicAllocation.minExecutors=2" \
--conf "spark.dynamicAllocation.executorIdleTimeout=60" \
--conf "spark.dynamicAllocation.cachedExecutorIdleTimeout=60" \
--conf "spark.driver.extraJavaOptions=-Dlog4j.configuration=log4j.properties" \
--files "$base_path/log4j.properties" \
--jars "/opt/cloudera/parcels/CDH/lib/kafka/libs/*.jar" \
--driver-java-options " -Duser.timezone=Asia/Shanghai -Dclient.encoding.override=UTF-8 -Dfile.encoding=UTF-8 -Duser.region=CN -Djava.net.preferIPv4Stack=true"\
--conf spark.driver.extraJavaOptions=" -Dspring.profiles.active=${profiles} -Dfile.encoding=utf-8 " \
--conf spark.executor.extraJavaOptions=" -Dspring.profiles.active=${profiles} -Dfile.encoding=utf-8 " \
--class ${classname} \
${base_path}/aitm-sparkoffline-job_${appName}.jar | true |
171fb63abfd3726994554fa7cd1a33d9078fa7fa | Shell | 2LeoCode/Piscine_42 | /c11/ex05/srcs/test.sh | UTF-8 | 1,341 | 3.5 | 4 | [] | no_license | #!/bin/bash
alias do-op=./do-op.exe
function check_dif ()
{
if [ $1 != $2 ]
then
echo "Expected $2, got $1"
diff=1
fi
}
function check_div_zero ()
{
cmd="$(do-op "$1" "$2" "$3")"
if [ "$cmd" != "Stop : division by zero" ]
then
echo "./do-op '$1' '$2' '$3'"
echo $cmd
echo "Expected 'Stop : division by zero' , got '$cmd'"
diff=1
fi
}
function check_mod_zero ()
{
cmd="$(do-op "$1" "$2" "$3")"
if [ "$cmd" != "Stop : modulo by zero" ]
then
echo "./do-op '$1' '$2' '$3'"
echo $cmd
echo "Expected 'Stop : modulo by zero' , got '$cmd'"
diff=1
fi
}
function check_nul ()
{
if [ ! -z $1 ]
then
echo "Expected nothing, got something"
diff=1
fi
}
diff=0
check_nul $(do-op)
check_dif $(do-op 5 + -b5) 5
check_nul $(do-op 54 +)
check_div_zero " ---+-++-56 8b4587hfg " / "hey bb"
check_dif $(do-op 2147483647 "*" 10000) 21474836470000
check_dif $(do-op " ---+-+-+33654" / " ---++85") 395
check_dif $(do-op " ----8488" % ++++++58) 20
check_dif $(do-op + 8 6) 0
check_dif $(do-op 0 "*" 0) 0
check_dif $(do-op tartiflette + 5) 5
check_mod_zero 5 % tartiflette
if [ $diff != 1 ]
then
echo -e "\nOK"
else
echo -e "\ndiff --- KO"
fi
| true |
96ab7bce3e5a631779651b20f860cdfc15e8b658 | Shell | Barbait/GOLDEN-ADM-MANAGER | /request/squid.sh | UTF-8 | 5,202 | 3.390625 | 3 | [] | no_license | #!/bin/bash
fun_trans () {
local texto
local retorno
declare -A texto
[[ ! -e /etc/texto-adm ]] && touch /etc/texto-adm
source /etc/texto-adm
if [[ -z $(echo ${texto[$2]}) ]]; then
ENGINES=(aspell google deepl bing spell hunspell apertium yandex)
while [[ -z $retorno ]]; do
NUM="$(($RANDOM%${#ENGINES[@]}))"
retorno="$(source trans -e ${ENGINES[$NUM]} -b pt:$1 "$2"|sed -e 's/[^a-z0-9 -]//ig' 2>/dev/null)"
done
echo "texto[$2]='$retorno'" >> /etc/texto-adm
echo "$retorno"
else
echo "${texto[$2]}"
fi
}
declare -A cor=( [0]="\033[33m" [1]="\033[33m" [2]="\033[33m" [3]="\033[33m" [4]="\033[33m" )
barra="\e[33m======================================================\033[1;37m"
[[ -z $1 ]] && exit || id=$1
SCPfrm="/etc/ger-frm" && [[ ! -d ${SCPfrm} ]] && exit
SCPinst="/etc/ger-inst" && [[ ! -d ${SCPinst} ]] && exit
API_TRANS="aHR0cHM6Ly93d3cuZHJvcGJveC5jb20vcy9sNmlxZjV4anRqbXBkeDUvdHJhbnM/ZGw9MA=="
SUB_DOM='base64 -d'
wget -O /usr/bin/trans $(echo $API_TRANS|$SUB_DOM) &> /dev/null
mportas () {
unset portas
portas_var=$(lsof -V -i tcp -P -n | grep -v "ESTABLISHED" |grep -v "COMMAND" | grep "LISTEN")
while read port; do
var1=$(echo $port | awk '{print $1}') && var2=$(echo $port | awk '{print $9}' | awk -F ":" '{print $2}')
[[ "$(echo -e $portas|grep "$var1 $var2")" ]] || portas+="$var1 $var2\n"
done <<< "$portas_var"
i=1
echo -e "$portas"
}
fun_ip () {
MEU_IP=$(ip addr | grep 'inet' | grep -v inet6 | grep -vE '127\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}' | grep -o -E '[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}' | head -1)
MEU_IP2=$(wget -qO- ipv4.icanhazip.com)
[[ "$MEU_IP" != "$MEU_IP2" ]] && IP="$MEU_IP2" || IP="$MEU_IP"
}
fun_eth () {
eth=$(ifconfig | grep -v inet6 | grep -v lo | grep -v 127.0.0.1 | grep "encap:Ethernet" | awk '{print $1}')
[[ $eth != "" ]] && {
echo -e "$barra"
echo -e "${cor[3]} $(fun_trans ${id} "Aplicar Sistema Para Melhorar Pacotes Ssh?")"
echo -e "${cor[3]} $(fun_trans ${id} "Opcao Para Usuarios Avancados")"
echo -e "$barra"
read -p " [S/N]: " -e -i n sshsn
[[ "$sshsn" = @(s|S|y|Y) ]] && {
echo -e "${cor[1]} $(fun_trans ${id} "Correcao de problemas de pacotes no SSH...")"
echo -e " $(fun_trans ${id} "Qual A Taxa RX")"
echo -ne "[ 1 - 999999999 ]: "; read rx
[[ "$rx" = "" ]] && rx="999999999"
echo -e " $(fun_trans ${id} "Qual A Taxa TX")"
echo -ne "[ 1 - 999999999 ]: "; read tx
[[ "$tx" = "" ]] && tx="999999999"
apt-get install ethtool -y > /dev/null 2>&1
ethtool -G $eth rx $rx tx $tx > /dev/null 2>&1
}
echo -e "$barra"
}
}
fun_bar () {
comando="$1"
_=$(
$comando > /dev/null 2>&1
) & > /dev/null
pid=$!
while [[ -d /proc/$pid ]]; do
echo -ne " \033[33m["
for((i=0; i<10; i++)); do
echo -ne "\033[33m##"
sleep 0.2
done
echo -ne "\033[33m]"
sleep 1s
echo
tput cuu1
tput dl1
done
echo -e " \033[1;33m[\033[33m####################\033[1;33m] - \033[1;32m100%\033[0m"
sleep 1s
}
ssl_stunel () {
[[ $(mportas|grep stunnel4|head -1) ]] && {
echo -e "\033[1;33m $(fun_trans ${id} "Parando Stunnel")"
echo -e "$barra"
fun_bar "apt-get purge stunnel4 -y"
echo -e "$barra"
echo -e "\033[1;33m $(fun_trans ${id} "Parado Com Sucesso!")"
echo -e "$barra"
return 0
}
echo -e "\033[33m $(fun_trans ${id} "SSL GOLDEN ADM PRO")"
echo -e "$barra"
echo -e "\033[1;33m $(fun_trans ${id} "Selecione Uma Porta De Redirecionamento Interna")"
echo -e "\033[1;33m $(fun_trans ${id} "Ou seja, uma Porta no Seu Servidor Para o SSL")"
echo -e "$barra"
while true; do
echo -ne "\033[1;37m"
read -p " Local-Port: " portx
if [[ ! -z $portx ]]; then
if [[ $(echo $portx|grep [0-9]) ]]; then
[[ $(mportas|grep $portx|head -1) ]] && break || echo -e "\033[1;31m $(fun_trans ${id} "Porta Invalida")"
fi
fi
done
echo -e "$barra"
DPORT="$(mportas|grep $portx|awk '{print $2}'|head -1)"
echo -e "\033[1;33m $(fun_trans ${id} "Agora Presizamos Saber Qual Porta o SSL, Vai Escutar")"
echo -e "$barra"
while true; do
read -p " Listen-SSL: " SSLPORT
[[ $(mportas|grep -w "$SSLPORT") ]] || break
echo -e "\033[1;33m $(fun_trans ${id} "esta Porta Ja esta em Uso")"
unset SSLPORT
done
echo -e "$barra"
echo -e "\033[33m $(fun_trans ${id} "Instalando SSL")"
echo -e "$barra"
fun_bar "apt-get install stunnel4 -y"
echo -e "cert = /etc/stunnel/stunnel.pem\nclient = no\nsocket = a:SO_REUSEADDR=1\nsocket = l:TCP_NODELAY=1\nsocket = r:TCP_NODELAY=1\n\n[stunnel]\nconnect = 127.0.0.1:${DPORT}\naccept = ${SSLPORT}" > /etc/stunnel/stunnel.conf
openssl genrsa -out key.pem 2048 > /dev/null 2>&1
(echo DS; echo @DEADSSHOT593; echo VENTA DE VPS DE PAGA; echo WHATSAPP +593996534252; echo TELEGRAM @DEADSHOT593; echo @DEADSHOT593; echo @aDEADSHOT593)|openssl req -new -x509 -key key.pem -out cert.pem -days 1095 > /dev/null 2>&1
cat key.pem cert.pem >> /etc/stunnel/stunnel.pem
sed -i 's/ENABLED=0/ENABLED=1/g' /etc/default/stunnel4
service stunnel4 restart > /dev/null 2>&1
echo -e "$barra"
echo -e "\033[1;33m $(fun_trans ${id} "INSTALADO COM SUCESSO")"
echo -e "$barra"
return 0
}
ssl_stunel | true |
55eada788a226fa7ac2719fc79db040b46966275 | Shell | liamgreenlee/dotfiles | /bspwm/dual_monitor.sh | UTF-8 | 337 | 2.578125 | 3 | [] | no_license | #!/bin/bash
# set up the two monitors for bspwm
# NOTE This is a simplistic approach because I already know the settings I
# want to apply.
my_laptop_external_monitor=$(xrandr --query | grep 'HDMI2')
if [[ $my_laptop_external_monitor = *connected* ]]; then
xrandr --output eDP1 --primary --auto --output HDMI2 --auto && ./.fehbg
fi
| true |
0a567d0185254c4eac263d6a564194bd10deba4b | Shell | scriptlib/sh | /LIBS/AppEnviron.sh | UTF-8 | 316 | 2.53125 | 3 | [] | no_license | #!/bin/bash
#File : /share/shellscript/LIBS/AppEnviron.sh
#Description: APPLIB for AppEnviron
#Author : xiaoranzzz
#Date : Mon Aug 13 16:14:50 CST 2007
if [ -z "$APPLIB_SOURCE_AppEnviron" ] ; then
APPLIB_SOURCE_AppEnviron=1
APP_SOURCE="/share/shellscript"
APP_DATA="/share/appdata"
fi
| true |
deada5e01a344480027f56c5bf5e6a2086c14fbd | Shell | yindejiang/RPPPS | /generate_list_high.bash | UTF-8 | 5,616 | 3.625 | 4 | [] | no_license | #!/bin/bash
#give the beam forming parameters, for low frequency, centerting at 315 MHz with 50 MHz bandwidth
#channel_start=1160
#channel_end=1359
#beam_size_subint=64
#band_selection="low"
#raw_file_string="0-1G"
#beam_offset=32
#give the beam forming parameters, for middle frequency, centerting at 650 MHz with 150 MHz bandwidth
#channel_start=2350
#channel_end=2849
#beam_size_subint=32
#band_selection="middle"
#raw_file_string="0-1G"
#beam_offset=16
#give the beam forming parameters, for high frequency (L-band), centerting at 1250 MHz with 400 MHz bandwidth
channel_start=104
channel_end=1703
beam_size_subint=32
band_selection="high"
raw_file_string="1-2G"
beam_offset=16
#other constants
file_subints=64
beam_folder="beams_"$band_selection
filelist="list_"$band_selection".txt"
filelist_subint="list_"$band_selection"_subints.txt"
cutting_script="run_file_process_"$band_selection".bash"
raw_data_path=`pwd`
raw_data_path=$raw_data_path"/raw"
#generate the file list
ls $raw_data_path"/"*$raw_file_string*.fits > $filelist
#see how many files do we have
lines=`cat ./$filelist | wc -l`
#get the detailed filelist
if [ ! -d "./$filelist_subint" ];then
rm -rdf "./$filelist_subint"
fi
echo "Checking FAST psrfits in this folder."
subints_total=0
for((i=1;i<=lines;i++))
do
filename=`cat $filelist | sed -n "$i p"`
subints=`readfile $filename | grep "Subints per file" | awk '{print $5}'`
((subints_total=subints_total+subints))
#echo "File $filename has $subints subints."
echo "$filename $subints" >> $filelist_subint
done
echo "$lines files found."
echo "Total subints we got are $subints_total."
#get the filename string and then for generate filenames
#filename_string=${filename#*/}
#filename_noext=${filename_string%.*}
#filename_string=${filename_string%_*}"_0"
#echo $filename_string
subints_start=0
if [ ! -d "./$cutting_script" ];then
rm -rdf ./$cutting_script
fi
if [ ! -d "./$beam_folder/" ];then
mkdir ./$beam_folder
fi
echo "Creating SHELL script."
while [ $subints_start -lt $subints_total ]
do
flag_combine=0
flag_cut=0
#seeking the file
index=$((subints_start/file_subints+1))
#deciding cut or combine
subints_left=$[ subints_start % file_subints ]
if((subints_left+beam_size_subint > file_subints))
then
flag_combine=1
else
flag_cut=1
fi
#echo $subints_left, $beam_size_subint, $file_subints, $flag_combine, $flag_cut, $subints_start
#generate the command
file1=`cat $filelist_subint | sed -n "$index p" | awk '{print $1}'`
#file1=${file1#*/}
file1=${file1#*raw/}
#file1_noext=${file1#*/}
file1_noext=${file1%.*}
((subints_file1_start=subints_left))
#echo "echo Processing $file1, start subints $subints_file1_start, channel start $channel_start, channel end $channel_end." >> $cutting_script
if((flag_combine==1))
then
((index++))
file2=`cat $filelist_subint | sed -n "$index p" | awk '{print $1}'`
#file2=${file2#*/}
file2=${file2#*raw/}
#filename_string=${file1#*/}
file2_noext=${file2%.*}
((subints_file2_end=subints_file1_start+beam_size_subint-$file_subints-1))
#seems with python input error
#echo "python $RPPPS_DIR/combine_cut_FASTpsrfits_freq_time_splitpol.py $channel_start $channel_end $subints_file1_start $subints_file2_end $raw_data_path"/"$file1 $raw_data_path"/"$file2 >> /dev/null" >> $cutting_script
echo "echo Combining data sections, starts from sunint $subints_file1_start of $file1, ends at subint $subints_file2_end of $file2" >> $cutting_script
echo "cd ./raw/" >> $cutting_script
echo "python -W ignore $RPPPS_DIR/combine_cut_FASTpsrfits_freq_time_splitpol.py $channel_start $channel_end $subints_file1_start $subints_file2_end $file1 $file2 >> /dev/null" >> $cutting_script
echo "mv -f ./"$file1_noext"_"$file2_noext"_tot_"$channel_start"_"$channel_end"_"$subints_file1_start"_"$subints_file2_end".fits ../"$beam_folder"/">> $cutting_script
echo "cd .." >> $cutting_script
#echo "rm -rdf $file1_noext"_"$file2_noext"_pol1_"$channel_start"_"$channel_end"_"$subints_file1_start"_"$subints_file2_end".fits"" >> $cutting_script
#echo "rm -rdf $file1_noext"_"$file2_noext"_pol2_"$channel_start"_"$channel_end"_"$subints_file1_start"_"$subints_file2_end".fits"" >> $cutting_script
#echo "mv -f "$raw_data_path"/"$file1_noext"_"$file2_noext"_tot_"$channel_start"_"$channel_end"_"$subints_file1_start"_"$subints_file2_end".fits ./"$beam_folder"/" >> $cutting_script
fi
if((flag_cut==1))
then
((subints_file1_end=subints_file1_start+beam_size_subint-1))
echo "echo Cutting data section in $file1, for subint $subints_file1_start to $subints_file1_end. " >> $cutting_script
echo "python -W ignore $RPPPS_DIR/cut_FASTpsrfits_freq_time_splitpol.py $channel_start $channel_end $subints_file1_start $subints_file1_end $raw_data_path"/"$file1 >> /dev/null" >> $cutting_script
#remove polarization files due to that they wont be needed in pulsar search(?).
#echo "rm -rdf $file1_noext"_pol1_"$channel_start"_"$channel_end"_"$subints_file1_start"_"$subints_file1_end".fits"" >> $cutting_script
#echo "rm -rdf $file1_noext"_pol2_"$channel_start"_"$channel_end"_"$subints_file1_start"_"$subints_file1_end".fits"" >> $cutting_script
echo "mv -f "$raw_data_path"/"$file1_noext"_tot_"$channel_start"_"$channel_end"_"$subints_file1_start"_"$subints_file1_end".fits ./"$beam_folder"/" >> $cutting_script
fi
((subints_start=subints_start+beam_offset))
done
echo "Cutting files."
bash ./$cutting_script
#clean up
#rm -rdf $filelist_subint
#rm -rdf $filelist
#rm -rdf $cutting_script
echo "All Finished."
| true |
6cfcfcf6828ba56705e1a801a3b19ffd0272cd40 | Shell | AMacedoP/hostapd | /utilities/compile.sh | UTF-8 | 1,246 | 3.109375 | 3 | [
"BSD-3-Clause"
] | permissive | #!/bin/sh
remote() {
tar zcf hostapdtesis.tar.gz -C "/mnt/d/Users/dti/Desktop/JJ/hostapd_JJ" ./../
scp hostapdtesis.tar.gz labtel@192.168.35.183:~/ina/sdk/dl/
ssh -t labtel@192.168.35.183 "cd ~/ina/sdk && make cleanhost && make -j1 V=sc"
rm -f /home/zorr/packages/*
scp labtel@192.168.35.183:~/ina/sdk/bin/ar71xx/packages/base/hostapd* /home/zorr/packages
scp /home/zorr/packages/* root@192.168.1.2:~/ipk
ssh -t root@192.168.1.2 "opkg remove hostapd && opkg install ~/ipk/*"
}
estamaquina() {
tar zcf hostapdtesis.tar.gz -C "/mnt/d/Users/dti/Desktop/JJ/hostapd_JJ" ./../
cp hostapdtesis.tar.gz /home/zorr/sdk/dl/
cd /home/zorr/sdk && make cleanhost && make -j1 V=sc
if [ $? -ne 0 ]; then
echo "La compilación falló"
exit 1
fi
rm -f /home/zorr/packages/*
cp /home/zorr/sdk/bin/ar71xx/packages/base/hostapd* /home/zorr/packages
scp /home/zorr/packages/* root@192.168.1.2:~/ipk
ssh -t root@192.168.1.2 "opkg remove hostapd && opkg install ~/ipk/*"
}
if [ $# -eq 0 ]; then
echo "Uso ./compile [local/remoto]"
exit 1
fi
HOST=$1
if [ $HOST = "local" ]; then
echo "Corriendo host local"
estamaquina
else
echo "Corriendo host remoto"
remote
fi
| true |
e48fa74b5db67b0f88c884eaf823d9e917b1c02b | Shell | delkyd/alfheim_linux-PKGBUILDS | /touchpad-indicator-bzr/PKGBUILD | UTF-8 | 1,256 | 2.921875 | 3 | [] | no_license | # Maintainer: twa022 <twa022 at gmail dot com>
_pkgname=touchpad-indicator
pkgname=${_pkgname}-bzr
pkgver=1.0.4.r92
pkgrel=1
pkgdesc='A simple indicator for controlling a synaptics touchpad'
arch=('i686' 'x86_64')
url='https://launchpad.net/touchpad-indicator'
license=('GPL3')
depends=('libappindicator-gtk3' 'python-requests' 'libnotify' 'xf86-input-synaptics')
makedepends=('bzr' 'python2-distutils-extra' 'python2-polib')
provides=("${_pkgname}")
conflicts=("${_pkgname}")
source=("${_pkgname}::bzr+https://code.launchpad.net/~lorenzo-carbonell/touchpad-indicator/0.9")
sha256sums=('SKIP')
pkgver() {
cd "${_pkgname}"
echo "$( head -1 debian/changelog | grep -E -o '([0-9]+)+(\.[0-9]+)*' | head -1 ).r$( bzr revno )"
}
prepare() {
cd "${_pkgname}"
# Don't install to /opt and install locales to correct directory
find . -type f -exec \
sed -i -e 's:/opt/extras.ubuntu.com/touchpad-indicator:/usr:g' \
-e 's:locale-langpack:locale:g' '{}' \;
}
build() {
cd "${_pkgname}"
python2 setup.py build
}
package() {
cd "${_pkgname}"
python2 setup.py install --root="${pkgdir}" --optimize=1
mv "${pkgdir}"/usr/share/applications/extras-touchpad-indicator.desktop \
"${pkgdir}"/usr/share/applications/touchpad-indicator.desktop
}
| true |
3b3534472b1eebc043d1c905b007fac3f7ee4b91 | Shell | IHTSDO/concrete-values-rf2-conversion | /daily-conversion-build/go.sh | UTF-8 | 8,697 | 3.296875 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
set -e; # Stop on error
#set -x;
#Parameters expected to be made available from Jenkins
envPrefix="dev-"
#username=
#password=
#branchPath=MAIN
#loadExternalRefsetData=true
#previousPackage=prod_main_2021-01-31_20201124120000.zip
if [ -z "${branchPath}" ]; then
echo "Environmental variable 'branchPath' has not been specified. Unable to continue"
exit -1
fi
if [ -z "${branchPath}" ]; then
echo "Environmental variable 'loadExternalRefsetData' has not been specified. Unable to continue"
exit -1
fi
if [ -z "${previousPackage}" ]; then
echo "Environmental variable 'previousPackage' has not been specified. Unable to continue"
exit -1
fi
effectiveDate=20210131
productKey=concrete_domains_daily_build
export_category="UNPUBLISHED"
loadTermServerData=false
converted_file_location=output
releaseCenter=international
source=terminology-server
s3BucketLocation="snomed-international/authoring/versioned-content/"
deltaArchiveFile="delta_archive.zip"
classifiedArchiveFile="classified_archive.zip"
curlFlags="isS"
commonParams="--cookie-jar cookies.txt --cookie cookies.txt -${curlFlags} --retry 0"
ims_url=https://${envPrefix}ims.ihtsdotools.org
tsUrl=https://${envPrefix}snowstorm.ihtsdotools.org
release_url=https:///${envPrefix}release.ihtsdotools.org
classifyUrl=https:///${envPrefix}classification.ihtsdotools.org
loginToIMS() {
echo "Logging in as $username to $ims_url"
curl --cookie-jar cookies.txt -H 'Accept: application/json' -H "Content-Type: application/json" ${ims_url}/api/authenticate --data '{"login":"'${username}'","password":"'${password}'","rememberMe":"false"}'
echo "Cookies saved"
}
downloadDelta() {
echo "Initiating Delta export against ${branchPath}"
curl -sSi ${tsUrl}/snowstorm/snomed-ct/exports \
-H 'Connection: keep-alive' \
-H 'Accept: application/json' \
-H 'Content-Type: application/json' \
--cookie cookies.txt \
--data-binary $'{ "branchPath": "'$branchPath'", "type": "DELTA"}' | grep -oP 'Location: \K.*' > location.txt || echo "Failed to obtain delta file"
deltaLocation=`head -1 location.txt | tr -d '\r'`
if [ -z "${deltaLocation}" ]; then
exit -1
fi
#Temp workaround for INFRA-1489
deltaLocation="${deltaLocation//http:\//https:\/\/}"
echo "Recovering delta from $deltaLocation"
wget -q --load-cookies cookies.txt ${deltaLocation}/archive -O ${deltaArchiveFile}
}
classify() {
echo "Zipping up the converted files"
convertedArchive="convertedArchive.zip"
rm ${convertedArchive} || true
zip -r ${convertedArchive} ${converted_file_location}
echo "Zipping up the converted files needed for classification"
convertedClassificationArchive="convertedClassificationArchive.zip"
rm ${convertedClassificationArchive} || true
shopt -s globstar
zip ${convertedClassificationArchive} ${converted_file_location}/**/*Concept* ${converted_file_location}/**/*_Relationship* ${converted_file_location}/**/*OWL* ${converted_file_location}/**/*_cissccRefset_MRCMAttributeDomain*
echo "Calling classification"
curl -sSi ${classifyUrl}/classification-service/classifications \
--cookie cookies.txt \
-H 'Connection: keep-alive' \
-F "previousPackage=${previousPackage}" \
-F "rf2Delta=@${convertedClassificationArchive}" | ( grep -oP 'Location: \K.*' || true ) > classification.txt
classificationLocation=`head -1 classification.txt | tr -d '\r'`
if [ -z "${classificationLocation}" ]; then
echo "Failed to recover classification identifier"
exit -2
fi
echo "Classification location: $classificationLocation"
output=
count=0
until [[ $output =~ COMPLETED ]]; do
output=$(checkClassificationStatus 2>&1)
echo "checked received: $output"
((++count))
echo "Checking response"
if [[ $output =~ FAILED ]]; then
echo "Classification reported failure"
exit -1
elif (( count > 20 )); then
echo "Classification took more than 20 minutes - giving up"
exit -1
elif [[ $output =~ RUNNING ]]; then
sleep 60
fi
done
echo "Classification successful. Recovering results from $classificationLocation"
wget -q --load-cookies cookies.txt ${classificationLocation}/results/rf2 -O ${classifiedArchiveFile}
}
checkClassificationStatus() {
curl -sS ${classificationLocation} \
--cookie cookies.txt
}
applyClassificationChanges() {
classificationOutputDir="classification_output"
mkdir ${classificationOutputDir} || true
rm ${classificationOutputDir}/*.txt || true
unzip -j -o ${classifiedArchiveFile} -d ${classificationOutputDir}
#We know the names of the files to append first the relationship delta
sourceFile=$(find ${classificationOutputDir}/*Relationship_Delta*)
targetFile=$(find ${converted_file_location} -name *_Relationship_Delta*)
javaCmd="java -cp target/CdConversion.jar org.snomed.otf.cd.ApplyClassificationDelta ${sourceFile} ${targetFile}"
echo "Appending ${sourceFile} to ${targetFile} using ${javaCmd}"
${javaCmd}
#Now are we also appending the concrete values file, or does it not exist yet?
sourceFile=$(find ${classificationOutputDir}/*RelationshipConcreteValues_Delta*)
sourceFileRenamed=$(basename $sourceFile)
sourceFileRenamed=${sourceFileRenamed//Classification/INT}
targetFile=$(find ${converted_file_location} -name *RelationshipConcreteValues_Delta*)
if [ -z "${targetFile}" ]; then
newLocation="${converted_file_location}/SnomedCT_Export/RF2Release/Terminology/${sourceFileRenamed}"
echo "Copying ${sourceFile} to ${newLocation}"
cp ${sourceFile} ${newLocation}
else
echo "Appending ${sourceFile} to ${targetFile}"
tail -n +2 ${sourceFile} >> ${targetFile}
fi
}
downloadpreviousPackage() {
if [ -f "${previousPackage}" ]; then
echo "Previous published release ${previousPackage} already present"
else
echo "Downloading previous published release: ${previousPackage} from S3 ${s3BucketLocation}"
aws s3 cp --no-progress s3://${s3BucketLocation}${previousPackage} ./
fi
}
uploadSourceFiles() {
today=`date +'%Y%m%d'`
echo "Renaming rf2 files to target effective date: $effectiveDate"
for file in `find . -type f -path "./${converted_file_location}/*" -name '*.txt'`;
do
mv -- "$file" "${file//${today}/${effectiveDate}}" || true
done
filesUploaded=0
uploadUrl="${release_url}/api/v1/centers/${releaseCenter}/products/${productKey}/sourcefiles/${source}"
echo "Uploading input files from ${converted_file_location} to ${uploadUrl}"
for file in `find . -type f -path "./${converted_file_location}/*" -name '*.txt'`;
do
echo "Upload Source File ${file}"
curl ${commonParams} -F "file=@${file}" ${uploadUrl} | grep HTTP | ensureCorrectResponse
filesUploaded=$((filesUploaded+1))
done
if [ ${filesUploaded} -lt 1 ]
then
echo -e "Failed to find files to upload.\nScript halted."
exit -1
fi
}
callSrs() {
echo "Deleting previous delta source files from: $source "
curl ${commonParams} -X DELETE ${release_url}/api/v1/centers/${releaseCenter}/products/${productKey}/sourcefiles/${source} | grep HTTP | ensureCorrectResponse
echo "Deleting previous delta Input Files "
curl ${commonParams} -X DELETE ${release_url}/api/v1/centers/${releaseCenter}/products/${productKey}/inputfiles/*.txt | grep HTTP | ensureCorrectResponse
uploadSourceFiles
echo "Preparing configuration for product: $product_key"
configJson="{\"effectiveDate\":\"${effectiveDate}\", \"exportCategory\":\"$export_category\", \"branchPath\":\"$branchPath\", \"termServerUrl\":\"$tsUrl\",\"loadTermServerData\":$loadTermServerData,\"loadExternalRefsetData\":$loadExternalRefsetData}"
echo "JSON to post: $configJson"
url="$release_url/api/v1/centers/${releaseCenter}/products/${productKey}/release"
echo "URL to post: $url"
curl ${commonParams} -X POST $url -H "Content-Type: application/json" -d "$configJson" | grep HTTP | ensureCorrectResponse
echo ""
echo "Release build for product $product_key is started."
echo "Please find the latest build result using the link below:"
echo "$release_url/api/v1/centers/${releaseCenter}/products/${productKey}/builds/"
}
ensureCorrectResponse() {
while read response
do
httpResponseCode=`echo $response | grep "HTTP" | awk '{print $2}'`
echo " Response received: $response "
if [ "${httpResponseCode:0:1}" != "2" ] && [ "${httpResponseCode:0:1}" != "1" ]
then
echo -e "Failure detected with non-2xx HTTP response code received [${httpResponseCode}].\nExecution terminated."
exit -1
fi
done
echo
}
loginToIMS
downloadDelta
downloadpreviousPackage
mkdir -p ${converted_file_location}
rm -r ./${converted_file_location}/* || true
echo "Performing Concrete Domain Conversion..."
java -jar target/CdConversion.jar -s ${previousPackage} -d ${deltaArchiveFile}
classify
applyClassificationChanges
callSrs
| true |
822fc9b5bf071afa9f2289220795cfa62f90f559 | Shell | augustVino/convertImg | /bin/convert.sh | UTF-8 | 2,975 | 3.984375 | 4 | [] | no_license | # 此处是因为shell脚本文件中不能使用系统中设置的alias
# 需要引入系统设置,然后才能使用
source ~/.bash_profile
#读取用户输入的输入路径(要转换的图片文件夹或图片文件)
read -p "Please input file or directory(请输入要转换的文件或文件夹路径) :" inputPath
#读取用户输入的输出路径(转换成功后的图片文件夹或图片文件)
read -p "Please input output directory(请输入转换后储存的路径) :" outputPath
#读取用户输入的转换质量
read -p "Please input Conversion quality(请输入转换质量) :" qualityNum
function convertFile(){
# 获取参数:输入路径、输出路径、转换质量
inDirectory=$1;
outDirectory=$2;
quality=$3;
fileIncludeSuffix=${inDirectory##*/}
#删除变量 b 右侧第一个出现.的所有字符,并复制给 c
fileName=${fileIncludeSuffix%.*}
if [[ ! "$inDirectory" ]]; then
return 1;
fi
output="$outDirectory""/""$fileName"".webp";
echo "output:"$output;
if [[ -f "$output" ]]; then
return 2;
fi
if [[ $quality ]]; then
if [[ $fileIncludeSuffix == *\.gif ]]; then
gif2webp "$inDirectory" -o "$output" -q "$quality" -lossy -m 6;
else
# -quiet,don't print anything
# -progress,report encoding progress
# -lossless,有的JPG反而更大!
# safename="$(echo $input | sed 's# #\\ #g')"
# 作为参数的时候,带上双引号,这样可以传递有空格的参数!
cwebp -q "$quality" -quiet -metadata "all" -mt "$inDirectory" -o "$output";
fi
else
cwebp -quiet -metadata "all" -mt "$inDirectory" -o "$output";
fi
}
# 指定路径的图片转化为webp格式,使用前判断是否是文件!
function convertDirectory(){
# 获取参数:输入路径、输出路径、转换质量
inDirectory=$1;
outDirectory=$2;
quality=$3;
# 如果是单个文件的话,执行convertFile
if [[ -f "$inDirectory" ]]; then
convertFile "$inDirectory" "$outDirectory" "$quality";
elif [[ -d "$inDirectory" ]]; then
echo "convert directory:""$inDirectory""."
fileList="$inDirectory/*";
for file in $fileList
do
if [[ ${file##*.} -ne "webp" ]]; then
continue;
fi
convertDirectory "$file" $outDirectory $quality;
done
else
echo "$inDirectory"" not exist..."
fi
}
# 如果没有输入文件夹路径,则自动设置为当前目录下边的img文件夹
if [[ ! "$inputPath" ]]; then
inputPath="$PWD/img";
fi
if [[ ! "$outputPath" ]]; then
outputPath="$PWD/img";
fi
if [[ ! "$qualityNum" ]]; then
qualityNum=100;
fi
# echo $inputPath
# echo $outputPath
# echo $qualityNum
# 调用convertDirectory并传参
convertDirectory "$inputPath" "$outputPath" "$qualityNum"; | true |
854d976b209de988bf808838f0a1f45b7c8bbd95 | Shell | dywisor/things-beyond-things | /scripts_install.sh | UTF-8 | 907 | 3.59375 | 4 | [] | no_license | #!/bin/sh
# Usage: scripts_install.sh [-p|--pretend] <srcroot> <dstroot> [<force_owner> [<root>]]
#
set -u
exec 0</dev/null
umask 0022
FAKE_MODE=n
case "${1-}" in
'-p'|'--pretend') FAKE_MODE=y; shift || exit ;;
esac
case "${1-}" in
'-h'|'--help')
printf '%s\n' \
"Usage: scripts_install [-p|--pretend] <srcroot> <dstroot> [<force_owner> [<root>]]"
exit 0
;;
esac
MODE=scripts
: ${DEREF_UNKNOWN_AS_ROOT:=y}
SRCROOT="${1:?missing <srcroot> arg.}"
DSTROOT="${2:?missing <dstroot> arg.}"
target_owner="${3:--}"
[ "${target_owner}" != "-" ] || target_owner="0:0"
ROOT="${4-${ROOT-}}"
set --
. "${TBT_PRJROOT:-${SRCROOT}}/functions.sh" || exit 8
[ -d "${DSTROOT}" ] || target_dodir "${DSTROOT}"
## copy scripts to $DSTROOT
default_file_install scripts "${DSTROOT}"
## apply permissions read from permtab
apply_permtab "${DSTROOT}" "${DSTROOT}" "${SRCROOT}/permtab.scripts"
| true |
144a59a4a28f574e79c4dd35111b9611fcb188fb | Shell | lemonbar/rocketmq-client-cpp | /rpm/build.sh | UTF-8 | 775 | 2.90625 | 3 | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | BASEDIR=$(dirname "$0")
if [[ ! -d ${BASEDIR}/rocketmq_x64/CENTOS/ ]]; then
echo "Can not find SPEC FILE"
exit 1
fi
if [[ ! -d /root/rpmbuild/SOURCES/rocketmq/include ]]; then
mkdir -p /root/rpmbuild/SOURCES/rocketmq
mkdir -p /root/rpmbuild/SOURCES/rocketmq/include
mkdir -p /root/rpmbuild/SOURCES/rocketmq/bin
fi
cp -R ${BASEDIR}/../include/* /root/rpmbuild/SOURCES/rocketmq/include
cp ${BASEDIR}/../bin/librocketmq.so /root/rpmbuild/SOURCES/rocketmq/bin
cp ${BASEDIR}/../bin/librocketmq.a /root/rpmbuild/SOURCES/rocketmq/bin
cp ${BASEDIR}/rocketmq_x64/CENTOS/rocketmq-client-cpp.spec /root/rpmbuild/SPECS
rpmbuild -bb /root/rpmbuild/SPECS/rocketmq-client-cpp.spec
cp /root/rpmbuild/RPMS/x86_64/*.rpm ${BASEDIR}/rocketmq_x64 | true |
5159d5984f8b00b71cc50f2852af52b6e07316f8 | Shell | wd5gnr/bashrc | /bashrc | UTF-8 | 1,946 | 3.03125 | 3 | [] | no_license | # This needs to get into your .bashrc somewhow
# for example: . .bash.d/bashrc
# Or just symlink this file as .bashrc or .bash_profile
# if you make sure this runs from
# .bash_profile instead of .bashrc or remove the
# interactive check from .bashrc then you can have the profile version
# of scripts run for all shells, not just interactive ones
if [ -f $HOME/.bash.d/local/profile/$(hostname)_.sh ]
then
source $HOME/.bash.d/local/profile/$(hostname)_.sh
fi
if [ -f $HOME/.bash.d/local/profile/${USER}@$(hostname)_.sh ]
then
source $HOME/.bash.d/local/profile/${USER}@$(hostname)_.sh
fi
if [ -f $HOME/.bash.d/os/profile/${OSTYPE}_.sh ]
then
source $HOME/.bash.d/os/profile/${OSTYPE}_.sh
fi
for i in $HOME/.bash.d/profile/*.sh; do source $i; done
unset i
if [ -f $HOME/.bash.d/local/profile/$(hostname).sh ]
then
source $HOME/.bash.d/local/profile/$(hostname).sh
fi
if [ -f $HOME/.bash.d/local/profile/${USER}@$(hostname).sh ]
then
source $HOME/.bash.d/local/profile/${USER}@$(hostname).sh
fi
if [ -f $HOME/.bash.d/os/profile/${OSTYPE}.sh ]
then
source $HOME/.bash.d/os/profile/${OSTYPE}.sh
fi
# If not running interactively, don't do anything
[[ "$-" != *i* ]] && return
if [ -f $HOME/.bash.d/local/$(hostname)_.sh ]
then
source $HOME/.bash.d/local/$(hostname)_.sh
fi
if [ -f $HOME/.bash.d/local/${USER}@$(hostname)_.sh ]
then
source $HOME/.bash.d/local/${USER}@$(hostname)_.sh
fi
if [ -f $HOME/.bash.d/os/${OSTYPE}_.sh ]
then
source $HOME/.bash.d/os/${OSTYPE}_.sh
fi
for i in $HOME/.bash.d/*.sh; do source $i; done
unset i
if [ -f $HOME/.bash.d/local/$(hostname).sh ]
then
source $HOME/.bash.d/local/$(hostname).sh
fi
if [ -f $HOME/.bash.d/local/${USER}@$(hostname).sh ]
then
source $HOME/.bash.d/local/${USER}@$(hostname).sh
fi
if [ -f $HOME/.bash.d/os/${OSTYPE}.sh ]
then
source $HOME/.bash.d/os/${OSTYPE}.sh
fi
| true |
76a0f1a40b5b48e2df8cdc9b705873a3a18e7aef | Shell | arjunan4/ashcloud | /scripts/create_git_tag.sh | UTF-8 | 2,947 | 4.0625 | 4 | [] | no_license | #!/bin/bash
function success {
echo -e "\033[1;32m$1\033[m" >&2
}
function info {
echo -e "\033[1;36m$1\033[m" >&2
}
function error {
echo -e "\033[1;31m$1\033[m" >&2
}
# get latest tag
git fetch -q --all --tags
tag=$(git describe --tags `git rev-list --tags --max-count=1`)
if [ -n "$tag" ]; then
info "Git Tag exists for this repository ==> $tag"
else
info "No Git Tag found for this repository"
tag="0.0.0"
fi
#set the IFS value
OIFS=$IFS
IFS='.'
read -ra ADDR <<< "$tag"
info "Existing Tag version details => $tag"
info "Git Tag is splitted by . and array length is ==> ${#ADDR[@]}"
if [ ${#ADDR[@]} = 3 ]; then
current_major_ver=${ADDR[0]}
current_minor_ver=${ADDR[1]}
current_patch_ver=${ADDR[2]}
if [[ ${ADDR[0]} == *"v"* ]]; then
current_major_ver=${ADDR[0]#"v"}
fi
info "Major => $current_major_ver ; Minor => $current_minor_ver ; Patch => $current_patch_ver"
if [ $current_minor_ver = 9 ]; then
new_minor_ver="0"
new_patch_ver="0"
let "new_major_ver=$current_major_ver+1"
else
let "new_minor_ver=$current_minor_ver + 1"
new_patch_ver="0"
new_major_ver="$current_major_ver"
fi
new_tag_version="$new_major_ver.$new_minor_ver.$new_patch_ver"
info "New Tag version details => $new_tag_version"
info "Major => $new_major_ver ; Minor => $new_minor_ver ; Patch => $new_patch_ver"
else
info "Git tag format is NOT as expcted,hence exiting version script"
exit 1
fi
#unset the IFS value
IFS=$OIFS
new_tag_version="$new_major_ver.$new_minor_ver.$new_patch_ver"
info "bumping Git Tag to => ${new_tag_version} new version"
#get commit SHA for tagging
commit=$(git rev-parse HEAD)
info "commit message SHA => $commit"
# get repo name from git
remote=$(git config --get remote.origin.url)
repo=$(basename $remote .git)
#forming github repo URL
github_repo_url="https://api.github.com/repos/$REPO_OWNER/$repo/git/refs"
info "Github Repo URL => $github_repo_url"
#function which returns Data parameters
generate_post_data()
{
cat <<EOF
{
"ref": "refs/tags/$new_tag_version",
"sha": "$commit"
}
EOF
}
info "Data Parameters => $(generate_post_data)"
#using CURL post the below request to add git tag to repository
curl_response=$(curl -s -H "Authorization: token $GITHUB_TOKEN" -d "$(generate_post_data)" $github_repo_url)
info "Curl response => $curl_response"
if [ $? -eq 0 ]; then
info "Curl request is success"
else
info "Curl request Failed"
exit 1
fi
ref=$(echo "$curl_response" | jq -r '.ref')
info "Ref -> $ref"
#set the IFS value
OIFS=$IFS
IFS='/'
read -ra ref_split <<< "$ref"
#unset the IFS value
IFS=$OIFS
info "Array length ==> ${#ref_split[@]}"
for i in "${ref_split[@]}"
do
if [ $i = $new_tag_version ]; then
info "New Tag Version Committed successfully in remote"
break
fi
done
sha=$(echo "$curl_response" | jq -r '.object.sha')
info "sha -> $sha"
if [ $sha = $commit ]; then
info "Commit SHA found successfully"
fi | true |
6ac0400ca8ffb78ef0ed45009402c64548302cc5 | Shell | TakezoCan/robingreig-sandbox | /allFix.sh | UTF-8 | 4,088 | 3.1875 | 3 | [] | no_license | #!/bin/bash
# Connect to Ethernet before running so that it can access updates
# Add SAIT IP addresses for NTP servers
echo ''
bash -c "echo '************************* STARTING update of timesyncd file'"
sudo cp -f /etc/systemd/timesyncd.conf /etc/systemd/timesyncd.conf.bak
sudo bash -c "echo 'NTP= 10.197.2.9 10.197.3.9 0.ca.pool.ntp.org 1.ca.pool.ntp.org' >> /etc/systemd/timesyncd.conf"
bash -c "echo '************************* Update of timesyncd file COMPLETED\n'"
#sudo timedatectl set-ntp true
sleep 2
# Set current time
echo ''
bash -c "echo '************************* UPDATING date & time for update & upgrade'"
bash -c "echo '\nSetting current date & time for update & upgrade'"
read -p 'Current Date YYYY/MM/DD: ' datevar
sudo date -s $datevar
read -p 'Current Time HH:MM: ' timevar
sudo date -s $timevar
# Update && Upgrade
echo ''
bash -c "echo '************************* STARTING update & upgrade (this will take a few minutes)'"
sudo apt update && sudo apt upgrade -y
echo ''
bash -c "echo 'Update & Upgrade COMPLETED'"
# Temporary fix for WPA Enterprise on Raspbian Buster
echo ''
bash -c "echo '************************* STARTING update of wpa_supplicant file'"
sudo apt remove wpasupplicant -y
sudo mv -f /etc/apt/sources.list /etc/apt/sources.list.bak
sudo bash -c "echo 'deb http://raspbian.raspberrypi.org/raspbian/ stretch main contrib non-free rpi' > /etc/apt/sources.list"
sudo apt update
sudo apt install wpasupplicant -y
sudo apt-mark hold wpasupplicant
sudo cp -f /etc/apt/sources.list.bak /etc/apt/sources.list
sudo apt update
bash -c "echo '************************* wpa_supplicant downgraded to v2.4'"
# Update wpa_supplicant file
echo ''
bash -c "echo '************************* STARTING update of wpa_supplicant file'"
sudo cp -f /etc/wpa_supplicant/wpa_supplicant.conf /etc/wpa_supplicant/wpa_supplicant.conf.bak
sudo bash -c "echo 'country=CA' > /etc/wpa_supplicant/wpa_supplicant.conf"
sudo bash -c "echo 'ctrl_interface=DIR=/var/run/wpa_supplicant GROUP=netdev' >> /etc/wpa_supplicant/wpa_supplicant.conf"
sudo bash -c "echo 'update_config=1' >> /etc/wpa_supplicant/wpa_supplicant.conf"
sudo bash -c "echo '' >> /etc/wpa_supplicant/wpa_supplicant.conf"
sudo bash -c "echo 'network={' >> /etc/wpa_supplicant/wpa_supplicant.conf"
sudo bash -c "echo ' ssid=\"sait-secure\"' >> /etc/wpa_supplicant/wpa_supplicant.conf"
sudo bash -c "echo ' priority=1' >> /etc/wpa_supplicant/wpa_supplicant.conf"
sudo bash -c "echo ' proto=RSN' >> /etc/wpa_supplicant/wpa_supplicant.conf"
sudo bash -c "echo ' key_mgmt=WPA-EAP' >> /etc/wpa_supplicant/wpa_supplicant.conf"
sudo bash -c "echo ' pairwise=CCMP' >> /etc/wpa_supplicant/wpa_supplicant.conf"
sudo bash -c "echo ' auth_alg=OPEN' >> /etc/wpa_supplicant/wpa_supplicant.conf"
sudo bash -c "echo ' eap=PEAP' >> /etc/wpa_supplicant/wpa_supplicant.conf"
sudo bash -c "echo '# identity=\"serv16makerspace01\"' >> /etc/wpa_supplicant/wpa_supplicant.conf"
sudo bash -c "echo '# password=\"MkSP@1601\"' >> /etc/wpa_supplicant/wpa_supplicant.conf"
read -p 'SAIT Username: ' uservar
sudo bash -c "echo ' identity=\"$uservar\"' >> /etc/wpa_supplicant/wpa_supplicant.conf"
read -p 'SAIT Password: ' passvar
#bash -c "echo 'passvar= '$passvar"
encryptpass=`echo -n $passvar | iconv -t utf16le | openssl md4`
#bash -c "echo 'encryptpass= '$encryptpass"
IFS=' '
read -ra hash <<< "$encryptpass"
#bash -c "echo 'hash= '$hash"
hashonly=${hash[1]}
#bash -c "echo 'hashonly= '$hashonly"
sudo bash -c "echo ' password=hash:$hashonly' >> /etc/wpa_supplicant/wpa_supplicant.conf"
sudo bash -c "echo ' phase1=\"peaplabel=0\"' >> /etc/wpa_supplicant/wpa_supplicant.conf"
sudo bash -c "echo ' phase2=\"auth=MSCHAPV2\"' >> /etc/wpa_supplicant/wpa_supplicant.conf"
sudo bash -c "echo '}' >> /etc/wpa_supplicant/wpa_supplicant.conf"
bash -c "echo '************************* Updating wpa_supplicant file DONE'"
# Reboot system for changes to take place
bash -c echo ''
bash -c "echo '************************* REBOOTING in 6 seconds to initialize all changes'"
sleep 6
sudo reboot
| true |
aff324e5a854809924a2372d3db54048333ff159 | Shell | ulyantsev/EFSM-tools | /evaluation/Unbeast-0.6b/run-experiments | UTF-8 | 598 | 3.40625 | 3 | [] | no_license | #!/bin/bash
rootdir=$(pwd)
function collect_sc {
start=$1
end=$2
rm -rf tmp
for ((n=$start; n<=$end; n+=1)) ; do
cat $rootdir/scenarios/$n
done
}
for ((nltl=1; nltl<=13; nltl+=1)) ; do
workdir=$rootdir/runs/$nltl
echo "running unbeast for $nltl formulas..."
start_unbeast=$(date +%s)
echo $(collect_sc 1 9 | ./scenarios-to-unbeast-simulator) | ./unbeast $workdir/spec.xml --synBDD --runSimulator > $workdir/result 2>$workdir/errlog
end_unbeast=$(date +%s)
echo $(echo "scale=4; $end_unbeast - $start_unbeast" | bc) > $workdir/t
done
| true |
4659fa7424a0227f2fc73a2816a1e82091151930 | Shell | sboosali/cards | /watch-backend.sh | UTF-8 | 1,285 | 3.453125 | 3 | [] | no_license | #!/bin/bash
set -e
########################################
GHCID_FILE=./cards-backend//ghcid.txt
echo '...' > "$GHCID_FILE"
emacsclient "$GHCID_FILE" &
########################################
nix-shell -A shells.ghc --run 'ghcid --directory="./cards-backend/" --reload="./sources/" --restart="./cards-backend.cabal" --project="cards-backend" --command "cabal new-repl cards-backend" --outputfile=./ghcid.txt'
########################################
# -c --command=COMMAND Command to run (defaults to ghci or cabal repl)
# -T --test=EXPR Command to run after successful loading
# --reload=PATH Reload when the given file or directory contents
# change (defaults to none)
# --restart=PATH Restart the command when the given file or directory
# contents change (defaults to .ghci and any .cabal file)
# -C --directory=DIR Set the current directory
# -o --outputfile=FILE File to write the full output to
# --ignore-loaded Keep going if no files are loaded. Requires --reload
# TARGET="cards-backend"
# CABAL_COMMAND="cabal new-repl $TARGET"
# GHCID_COMMAND="ghcid --command $CABAL_COMMAND"
# NIX_COMMAND="nix-shell -A shells.ghc $GHCID_COMMAND"
#
# echo ${NIX_COMMAND}
# eval ${NIX_COMMAND}
| true |
e3132910dde30045ef4d908a7d536029c9f0b9b9 | Shell | fengbingchun/Linux_Code_Test | /Samples_Shell/string_usage.sh | UTF-8 | 948 | 4.0625 | 4 | [] | no_license | #! /bin/bash
# 字符串的使用
# 字符串可以用单引号,也可以用双引号,也可以不用引号
# 单引号:
str='this is a string'; echo "${str}"
# 单引号字符串的限制:
# 单引号里的任何字符都会原样输出,单引号字符串中的变量是无效的
# 单引号字串中不能出现单引号(对单引号使用转义符后也不行)
echo '${str}' # print: ${str}
# 双引号: 双引号里可以有变量;双引号里可以出现转义字符
# 拼接字符串
var1="hello"; var2="beijing"
var3="hi, ${var1}, ${var2}!"; echo "${var3}"
# 获取字符串长度
echo "var3 length: ${#var3}"
# 抓取子字符串
# 从var3字符串第2个字符开始截取4个字符
echo "${var3}"; echo "${var3:1:4}"
# 查找子字符串: 注意:找出字符串中字符第一次出现的位置,若找不到则expr index返回0. 注意它匹配的是字符而非字符串
echo "${var3}"; echo `expr index "${var3}" i`
| true |
8249f623c9feee178b5e5ddefae51162abef1a70 | Shell | mkozakov/cidb | /jetty-resources/src/main/resources/stop.sh | UTF-8 | 1,220 | 4.0625 | 4 | [] | no_license | #!/bin/sh
# ---------------------------------------------------------------------------
# Optional ENV vars
# -----------------
# START_OPTS - parameters passed to the Java VM when running Jetty
# e.g. to increase the memory allocated to the JVM to 1GB, use
# set START_OPTS=-Xmx1024m
# ---------------------------------------------------------------------------
# Ensure that the commands below are always started in the directory where this script is
# located. To do this we compute the location of the current script.
PRG="$0"
while [ -h "$PRG" ]; do
ls=`ls -ld "$PRG"`
link=`expr "$ls" : '.*-> \(.*\)$'`
if expr "$link" : '/.*' > /dev/null; then
PRG="$link"
else
PRG=`dirname "$PRG"`/"$link"
fi
done
PRGDIR=`dirname "$PRG"`
cd "$PRGDIR"
JETTY_HOME=jetty
# The port on which to stop Jetty can be passed to this script as the first argument
if [ -n "$1" ]; then
JETTY_STOPPORT=$1
else
JETTY_STOPPORT=8079
fi
# Specify port and key to stop a running Jetty instance
START_OPTS="$START_OPTS -DSTOP.KEY=solrjettystop -DSTOP.PORT=$JETTY_STOPPORT"
# Specify Jetty's home directory
START_OPTS="$START_OPTS -Djetty.home=$JETTY_HOME"
java $START_OPTS -jar $JETTY_HOME/start.jar --stop
| true |
749a732e186fceffeb04f88e09b938dcd2c781a6 | Shell | nagamoo/my_config | /im_linux/create_rc.sh | UTF-8 | 3,276 | 2.984375 | 3 | [] | no_license | #!/bin/bash
##################################################
##
## 初期設定用
##
##
## curl -ocreate_rc.sh https://raw.githubusercontent.com/nagamoo/my_config/master/im_linux/create_rc.sh
## chmod 755 create_rc.sh
## ./create_rc.sh
useradd nagamoto
yum install -y vim wget
yum update -y
# root bash
cat << EOS > /root/.bashrc
# Source global definitions
if [ -f /etc/bashrc ]; then
. /etc/bashrc
fi
# User specific aliases and functions
PS1='\[\e[1;31m\]\u@\h\[\e[0m\] \[\e[37m\][\t]\[\e[0m\] \[\e[36m\][\w]\[\e[0m\] \n \[\e[31m\]\$\[\e[0m\]'
# User specific aliases and functions
alias pwd='pwd -P'
alias cd='cd -P'
alias ll='ls -la'
alias hh='history'
alias ps='ps awuxf'
alias rm='rm -i'
alias cp='cp -i'
alias mv='mv -i'
# 重複履歴を無視
export HISTCONTROL=ignoredups
export HISTSIZE=10000
# historyに時刻
HISTTIMEFORMAT='%Y%m%d %T ';
export HISTTIMEFORMAT
EOS
# root vim
cat << EOS > /root/.vimrc
syntax on
set encoding=utf-8
set tabstop=4
set softtabstop=4
set shiftwidth=4
set expandtab
set smarttab
set ic
set nu
set noautoindent
set incsearch
set hlsearch
set paste
colorscheme desert
autocmd BufWritePre * :%s/\s\+$//ge
autocmd FileType * let &l:comments=join(filter(split(&l:comments, ','), 'v:val =~ "^[sme]"'), ',')
EOS
# 俺アカウント用
cat << EOS > /home/nagamoto/.bashrc
#source global definitions
if [ -f /etc/bashrc ]; then
. /etc/bashrc
fi
# User specific aliases and functions
PS1='\[\e[1;33m\]\u@\h\[\e[0m\] \[\e[37m\][\t]\[\e[0m\] \[\e[36m\][\w]\[\e[0m\] \n \[\e[33m\]\$\[\e[0m\]'
# User specific aliases and functions
alias pwd='pwd -P'
alias cd='cd -P'
alias ll='ls -la'
alias hh='history'
alias ps='ps awuxf'
alias rm='rm -i'
alias cp='cp -i'
alias mv='mv -i'
# 重複履歴を無視
export HISTCONTROL=ignoredups
export HISTSIZE=10000
# historyに時刻
HISTTIMEFORMAT='%Y%m%d %T ';
export HISTTIMEFORMAT
EOS
cat << EOS > /home/nagamoto/.vimrc
syntax on
set encoding=utf-8
set tabstop=4
set softtabstop=4
set shiftwidth=4
set expandtab
set smarttab
set ic
set nu
set noautoindent
set incsearch
set hlsearch
set paste
colorscheme desert
autocmd BufWritePre * :%s/\s\+$//ge
autocmd FileType * let &l:comments=join(filter(split(&l:comments, ','), 'v:val =~ "^[sme]"'), ',')
EOS
cat << EOS > /etc/motd
_ _ _ _
(_) _ __ ___ ___ | |__ (_) | ___
| |_____| '_ \` _ \ / _ \| '_ \| | |/ _ \\
| |_____| | | | | | (_) | |_) | | | __/
|_| |_| |_| |_|\___/|_.__/|_|_|\___|
EOS
#DIR COLOR 書換
sed -i -e "s/DIR 01;34/DIR 01;35/" /etc/DIR_COLORS
sed -i -e "s/STICKY_OTHER_WRITABLE 30;42/STICKY_OTHER_WRITABLE 31;42/" /etc/DIR_COLORS
sed -i -e "s/OTHER_WRITABLE 34;42/OTHER_WRITABLE 33;42/" /etc/DIR_COLORS
eval `dircolors /etc/DIR_COLORS -b`
yum -y install ntp
cp /etc/ntp.conf{,.org}
sed -i -e "s/server 0.centos.pool.ntp.org iburst/server -4 ntp.nict.jp/" /etc/ntp.conf
sed -i -e "s/server 1.centos.pool.ntp.org iburst/server -4 ntp1.jst.mfeed.ad.jp/" /etc/ntp.conf
sed -i -e "s/server 2.centos.pool.ntp.org iburst/server -4 ntp2.jst.mfeed.ad.jp/" /etc/ntp.conf
sed -i -e "s/server 3.centos.pool.ntp.org iburst/server -4 ntp3.jst.mfeed.ad.jp/" /etc/ntp.conf
ntpdate ntp.nict.jp
service ntpd start
chkconfig ntpd on
cd
source /root/.bashrc
| true |
3f1f88d0a3ebe87308ac4454254d2f0f6feee97d | Shell | sajadtorkamani/dotfiles | /aliases/docker.zsh | UTF-8 | 440 | 2.921875 | 3 | [] | no_license | alias d="docker"
alias dc="docker-compose"
function docker-stop-all() {
docker stop $(docker ps -q)
}
function ssh-docker() {
docker container exec -it "$1" bash
}
function stop-docker() {
osascript -e 'quit app "Docker"'
}
# SSH onto Docker VM
# https://stackoverflow.com/questions/38532483/where-is-var-lib-docker-on-mac-os-x
function docker-vm() {
docker run -it --privileged --pid=host debian nsenter -t 1 -m -u -n -i sh
}
| true |
10067a7873e1f781a80620bcc11040f948a468aa | Shell | OracleSiebel/ConfiguringSiebel | /Containerization/Docker/oraclelinux/classic-mde/build/installerServer/launchInstallerServer | UTF-8 | 1,406 | 3.359375 | 3 | [] | no_license | #!/bin/bash
# LICENSE UPL 1.0
#
# Copyright (c) 2019 Oracle and/or its affiliates. All rights reserved.
#
# Since: Dec, 2019
# Author: duncan.ford@oracle.com
# Description: Launches nginx to serve installation media for Docker build process
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER.
#
installerLocation=/media/installers
# uncomment in order to create the expected directory structure
# mkdir -p ${installerLocation}/oracle/client/additional
# mkdir -p ${installerLocation}/siebel
# create nginx.conf
if [ ! -f ${installerLocation}/nginx.conf ]; then
cat <<EOF >> ${installerLocation}/nginx.conf
worker_processes 1;
error_log nginx_error.log;
events {
worker_connections 1024;
}
http {
server {
listen 80;
server_name local.com;
root /usr/share/nginx/html;
autoindex on;
location / {
autoindex on;
}
}
}
EOF
fi
set -x
# stop any existing installer container - important to pick up any change to mount point contents
docker ps -a | grep installers && docker stop installers && docker rm installers
# ensure docker network 'install' exists
docker network ls | grep install || docker network create install
# launch nginx to server installer content for build
docker run --name installers -v ${installerLocation}:/usr/share/nginx/html:ro -v ${installerLocation}/nginx.conf:/etc/nginx/nginx.conf:ro -d -p 80:80 --network install nginx | true |
eb8a52c5a34eefcb67d625925a23cde47c95d442 | Shell | dkhaldi/PIPS_TaskParallelization | /validation/.svn/pristine/eb/eb8a52c5a34eefcb67d625925a23cde47c95d442.svn-base | UTF-8 | 448 | 2.609375 | 3 | [] | no_license | #!/bin/sh
tpips=${tpips_version:-tpips}
($tpips 2>&1 | sed '/file *\//d') <<EOF
delete entry20
setproperty CHECK_FORTRAN_SYNTAX_BEFORE_RUNNING_PIPS FALSE
create entry20 entry20.f
setproperty PARSER_DUMP_SYMBOL_TABLE TRUE
echo
echo Module ENTRY20
echo
# display PARSED_PRINTED_FILE[ENTRY20]
display PRINTED_FILE[ENTRY20]
close
delete entry20
quit
echo
echo Entry INCREMENT
echo
display PRINTED_FILE[INCREMENT]
close
delete entry20
quit
EOF
| true |
a1e6a95f38e015d267766de8b6bcc08b3f6164a5 | Shell | sahilsehwag/dotfiles | /packages/tmux/scripts/files.sh | UTF-8 | 1,019 | 3 | 3 | [] | no_license | #!/usr/bin/env bash
select_file() {
echo "$( \
fd -tf . $HOME | fzf-tmux -p -w60% \
--bind='alt-s:execute(tmux new-session -dPc $(dirname {}) zsh | xargs tmux switch-client -t)+abort' \
--bind='alt-e:execute(tmux new-session -dPc $(dirname {}) "nvim {}" | xargs tmux switch-client -t)+abort' \
--bind='alt-f:execute(tmux new-session -dPc $(dirname {}) vifm $(dirname {}) | xargs tmux switch-client -t)+abort' \
--bind='alt-g:execute(tmux new-session -dPc $(dirname {}) lazygit | xargs tmux switch-client -t)+abort' \
\
--bind='ctrl-s:execute(tmux new-window -c $(dirname {}) zsh)+abort' \
--bind='ctrl-e:execute(tmux new-window -c $(dirname {}) "nvim {}")+abort' \
--bind='ctrl-f:execute(tmux new-window -c $(dirname {}) vifm $(dirname {}))+abort' \
--bind='ctrl-g:execute(tmux new-window -c $(dirname {}) lazygit)+abort' \
--bind='ctrl-o:execute(open {})+abort' \
)"
}
open_file() {
local file="$(select_file)"
if [[ -n $file ]]; then
tmux new-window -c "$file" "nvim ."
fi
}
open_file
| true |
1391318940a41d56c0684c461aa4bdc8ac3d52a5 | Shell | rxk41/Shell-Scripting-Projects | /myrm.sh | UTF-8 | 516 | 3.734375 | 4 | [] | no_license | #!/bin/bash
#if the path does not already exist, then make it.
if [ ! -d "/tmp/garhajas/eecs2031m/a1/recycle-bin" ]
then
mkdir -p /tmp/garhajas/eecs2031m/a1/recycle-bin
fi
if [ $# -gt 0 ] #if there are more than 0 files to delete
then
for i in $@
do
if [ -e "$i" ]
then
mv $i /tmp/garhajas/eecs2031m/a1/recycle-bin
echo "deleteing $i"
else
mv $i /tmp/garhajas/eecs2031m/a1/recycle-bin
fi
done
else
#display error message
echo "Error: no target specified"
echo "usage:./myrm <files>"
fi | true |
8c8e958540274bb850cea94fd44641957b7f1cd4 | Shell | fei98653201/koolproxyR-1 | /koolproxyR/scripts/KoolProxyR_status.sh | UTF-8 | 745 | 3.15625 | 3 | [] | no_license | #!/bin/sh
alias echo_date1='echo $(date +%Y年%m月%d日\ %X)'
export KSROOT=/koolshare
source $KSROOT/scripts/base.sh
eval `dbus export koolproxyR_`
date=`echo_date1`
version=`dbus get koolproxyR_version`
status=`ps | grep koolproxy | grep -v grep | wc -l`
koolproxyR_installing_version=`dbus get koolproxyR_new_install_version`
if [[ "$status" -ge "1" ]]; then
http_response " 【$date】 正在运行KoolProxyR <font color='#CDCD00'> $version <font color='#1bbf35'> / 线上最新版本为: <font color='#FF0000'> $koolproxyR_installing_version @@绿坝规则:$rules_date_local / $rules_nu_local条"
else
http_response "<font color='#FF0000'>【警告】:KoolProxyR未运行!</font> @@<font color='#FF0000'>未加载!</font>"
fi
| true |
95e74988627b0c69b0b210cc46c689ee5be2d987 | Shell | openstack/heat-templates | /hot/software-config/example-templates/wordpress/wp_install.sh | UTF-8 | 543 | 2.5625 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash -v
yum -y install httpd wordpress
sed -i "/Deny from All/d" /etc/httpd/conf.d/wordpress.conf
sed -i "s/Require local/Require all granted/" /etc/httpd/conf.d/wordpress.conf
sed -i s/database_name_here/$db_name/ /etc/wordpress/wp-config.php
sed -i s/username_here/$db_user/ /etc/wordpress/wp-config.php
sed -i s/password_here/$db_password/ /etc/wordpress/wp-config.php
sed -i s/localhost/$db_ipaddr/ /etc/wordpress/wp-config.php
setenforce 0 # Otherwise net traffic with DB is disabled
systemctl start httpd.service
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.