blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
e15ae829da717783d292650395cbeb4e68b99a89
|
Shell
|
googleads/google-ads-ruby
|
/scripts/codegen.sh
|
UTF-8
| 850
| 2.59375
| 3
|
[
"Apache-2.0",
"MPL-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-protobuf"
] |
permissive
|
#!/bin/bash
set -euxo pipefail
rm -rf lib/google/ads/google_ads/factories
mkdir -p lib/google/ads/google_ads/factories
bundle exec ruby codegen/main.rb
bundle exec standardrb --fix -- lib/google/ads/google_ads/factories/**/*.rb lib/google/ads/google_ads/factories.rb
GEM_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )/../"
(
cd "$(mktemp -d)" || exit 1
bundle init
echo "gem 'google-ads-googleads', path: '$GEM_DIR'" >> Gemfile
bundle install
# Conduct a basic check that we still have a functional library and that all
# the factories actually work
cat <<EORUBY | bundle exec ruby
require 'google/ads/google_ads'
client = Google::Ads::GoogleAds::GoogleAdsClient.new("$GEM_DIR/google_ads_config.rb")
client.service.campaign
client.resource.campaign
client.operation.campaign
client.enum.policy_topic_entry_type
EORUBY
)
| true
|
00e569caf6d4a9895b26b4d6a47bbc216d23188f
|
Shell
|
Shubham-Sahoo/Operating-System
|
/Assignment_1/1b.sh
|
UTF-8
| 293
| 2.96875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
mkdir 1.b.files.out
FILES=$(ls 1.b.files/)
FILES=(${FILES[@]})
touch 1.b.out.txt
for FILE in ${FILES[@]}
do
det=$(sort -nr "1.b.files/$FILE")
touch "1.b.files.out/$FILE"
echo $det > "1.b.files.out/$FILE"
echo $det >> 1.b.out.txt
done
sort -nr 1.b.out.txt > /dev/null
| true
|
070e1ab434f54ec7abdf812b3d681ea86f3ed053
|
Shell
|
tamerlanST/rapi_test
|
/report/get_report_data.sh
|
UTF-8
| 1,884
| 2.625
| 3
|
[] |
no_license
|
#!/bin/bash
source ../lib.sh
#report/update_report
svc='report/update_report'
params='{"n":"get_report_data","ct":"avl_unit","p":"{\"bind\":null}","tbl":[{"n":"unit_stats","l":"Статистика","f":0,"c":"","cl":"","p":"{\"address_format\":\"0_10_5\",\"time_format\":\"%E.%m.%Y_%H:%M:%S\",\"us_units\":0}","sch":{"y":0,"m":0,"w":0,"f1":0,"f2":0,"t1":0,"t2":0},"sl":"[\"Address\",\"Time Format\"]","s":"[\"address_format\",\"time_format\",\"us_units\"]"}],"id":0,"itemId":'$itemId',"callMode":"create"}'
resp=`curl -sX POST --data-urlencode "params=$params" "$server?svc=$svc&sid=$sid"`
reportId=`expr match "$resp" '.\([0-9]*\),'`
#echo $reportId
#echo $resp
#mresp=
#echo $mresp
if [ "${resp:0:8}" == '{"error"' ]; then
error "Report creation error - $resp";
fi
# report/get_report_data
svc='report/get_report_data'
params='{"itemId":'$itemId',"col":['$reportId']}'
resp=`curl -sX POST --data-urlencode "params=$params" "$server?svc=$svc&sid=$sid"`
#echo $resp
mresp='[{"id":'$reportId',"n":"get_report_data","ct":"avl_unit","p":"{\"bind\":null}","tbl":[{"n":"unit_stats","l":"Статистика","c":"","cl":"","cp":"","s":"[\"address_format\",\"time_format\",\"us_units\"]","sl":"[\"Address\",\"Time Format\"]","p":"{\"address_format\":\"0_10_5\",\"time_format\":\"%E.%m.%Y_%H:%M:%S\",\"us_units\":0}","sch":{"f1":0,"f2":0,"t1":0,"t2":0,"m":0,"y":0,"w":0,"fl":0},"f":0}]}]'
#echo $mresp
if [ "$resp" != "$mresp" ]; then
error
fi
#delete report
svc='report/update_report'
params='{"id":'$reportId',"itemId":'$itemId',"callMode":"delete"}'
resp=`curl -sX POST --data-urlencode "params=$params" "$server?svc=$svc&sid=$sid"`
#echo $resp
mresp="[$reportId,null]"
#echo $mresp
if [ ${resp:0:8} == '{"error"' ]; then
error "Report delete error - $resp";
elif [ "$resp" != "$mresp" ]; then
error "Can't delete report with id:$reportId"
fi
| true
|
267b7e5c988ad579c859dffce712ee5ff33ab9b4
|
Shell
|
hy0kl/study
|
/shell/remote.sh
|
UTF-8
| 4,308
| 4.0625
| 4
|
[] |
no_license
|
#!/bin/sh
# Time-stamp: <2011-05-28 18:02:10 Saturday by taoshanwen>
readonly PROGRAM_NAME="remote.sh"
readonly PROGRAM_VERSION="1.0"
bin=`dirname "$0"`
bin=`cd "$bin"; pwd`
echo $bin
cat "$bin"/common.sh
exit 0
. "$bin"/common.sh
usage()
{
code=1
if [ $# -gt 0 ]; then
code="$1"
fi
if [ "$code" != 0 ]; then
redirect="1>&2"
fi
eval cat "$redirect" << EOF
usage: ${PROGRAM_NAME} [OPTIONS] <HOST> <COMMAND>
${PROGRAM_NAME} [OPTIONS] -H <HOST> <COMMAND>
${PROGRAM_NAME} [OPTIONS] -c <COMMAND> <HOSTS>
${PROGRAM_NAME} [OPTIONS] -f <HOSTS_FILE> <COMMAND>
${PROGRAM_NAME} [OPTIONS] -F <FILE> [-d <DST_FILE>] <HOSTS>
Options:
-H <HOST>
Add host.
-f <HOSTS_FILE>
Add the hosts file.
-F <FILE>
Add the file to copy.
-d <DST_FILE>
Set the destination file.
-l <LOGIN_NAME>
Specifies the user to log in as on the remote machine.
-n Do not really execute command, only print command to execute.
-q Quiet, do not write process info to standard output.
-s When execute commands failed, stop execute other commands and exit.
-g Execute command foreground.
-i [<INSTALL_DIR>]
Install this shell script to your machine, INSTALL_DIR default is /usr/bin.
-o SSH_OPTIONS
Set ssh options.
-v Output version info.
-h Output this help.
EOF
exit "$code"
}
isExecute=1
IFS=$'\n'
background="&"
while getopts ":hvH:f:F:d:l:nqc:sigo:" OPT; do
case "$OPT" in
H)
hosts="$hosts\n$OPTARG"
;;
f)
if [ ! -r "$OPTARG" ]; then
echoe "Can not read file \`$OPTARG'."
usage
fi
hosts="$hosts\n`cat $OPTARG`"
;;
F)
if [ ! -r "$OPTARG" ]; then
echoe "Can not read file \`$OPTARG'."
usage
fi
isCopy=1
files="$files $OPTARG"
;;
d)
dstFile="$OPTARG"
;;
l)
user="$OPTARG"
;;
n)
isExecute=0
;;
q)
isQuiet=1
;;
c)
command="$OPTARG"
;;
s)
isStop=1
;;
g)
background=
;;
i)
install
;;
o)
sshOptions="$OPTARG"
;;
v)
version
;;
h)
usage 0
;;
:)
case "${OPTARG}" in
?)
echoe "Option \`-${OPTARG}' need argument.\n"
usage
esac
;;
?)
echoe "Invalid option \`-${OPTARG}'.\n"
usage
;;
esac
done
shift $((OPTIND - 1))
sshOpts="-o StrictHostKeyChecking=no $sshOptions"
ssh="ssh $sshOpts"
scp="scp $sshOpts"
if [ -z "$isCopy" ]; then
if [ -z "$hosts" ]; then
if [ "$#" -lt 1 ]; then
echoe "No host and command specify.\n"
usage
elif [ "$#" -lt 2 ]; then
echoe "No command specify.\n"
usage
fi
if [ -n "$command" ]; then
for i in $@; do
hosts="$hosts\n$i"
done
else
hosts="$hosts\n$1"
shift
command="$@"
fi
else
if [ "$#" -lt 1 ]; then
echoe "No command specify.\n"
usage
fi
if [ -n "$command" ]; then
for i in $@; do
hosts="$hosts\n$i"
done
else
command="$@"
fi
fi
for i in `printf "$hosts"`; do
[ -n "$user" ] && login=" -l $user"
executeCommand "$ssh $i$login \"$command\" 2>&1 | sed \"s/^/$i: /\" $background" "$isExecute" "$isQuiet" "$isStop"
done
wait
exit
fi
IFS=
for i in $@; do
hosts="$hosts\n$i"
done
IFS=$'\n'
for i in `printf "$hosts"`; do
if [ -z "$user" ]; then
host="$i"
else
host="$user@$i"
fi
executeCommand "$scp -r $files $host:$dstFile 2>&1 | sed \"s/^/$i: /\" $background" "$isExecute" "$isQuiet" "$isStop"
done
wait
| true
|
ec3782f6b88e6d26bf72e8bc5fc1639f786bd706
|
Shell
|
Kukuster/howmanysquares
|
/containers/compile/ubuntu1804clang_debug/compile.sh
|
UTF-8
| 649
| 3.1875
| 3
|
[] |
no_license
|
#!/bin/bash
# for stability and compatibility reasons,
# because it's the GraphicsMagick++-config command is what gives actual exact keys required for proper compilation of a C++ with Magick++ library,
# and because output of this command substantially depends on the platform,
# it was decided to calculate those keys beforehand during the building of the image
source="./source/showsquares.cpp ./source/KukMagick.cpp ./source/KukMagician.cpp"
output="./hmsbin/showsquares"
magickkeys=`GraphicsMagick++-config --cppflags --cxxflags --ldflags --libs`
TIMEFORMAT='compilation time: %3Rs'
time {
clang++ -v -g -o $output $source $magickkeys
}
| true
|
113e17ab8722bfe91247dffdb9c558e61a219138
|
Shell
|
pikju/api-umbrella
|
/templates/etc/perp/trafficserver/rc.main.etlua
|
UTF-8
| 1,242
| 3.40625
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -e -u
# Redirect stderr to stdout
exec 2>&1
umask "<%- config['umask'] %>"
if [ "${1}" = "start" ]; then
echo "starting ${2}..."
api_umbrella_user="<%- config['user'] %>"
api_umbrella_group="<%- config['group'] %>"
run_args=("-e" "rc.env")
if [ -n "$api_umbrella_user" ]; then
run_args+=("-u" "$api_umbrella_user")
fi
dirs=("<%- config['var_dir'] %>/trafficserver")
mkdir -p "${dirs[@]}"
chmod 750 "${dirs[@]}"
if [ -n "$api_umbrella_user" ]; then
chown "$api_umbrella_user":"$api_umbrella_group" "${dirs[@]}"
fi
destination="<%- config['log']['destination'] %>"
if [ "$destination" = "console" ]; then
if [ -n "$api_umbrella_user" ]; then
chown "$api_umbrella_user":"$api_umbrella_group" /dev/stdout
chown "$api_umbrella_user":"$api_umbrella_group" /dev/stderr
fi
ln -sf /dev/stdout "<%- config['log_dir'] %>/trafficserver/access.log"
ln -sf /dev/stderr "<%- config['log_dir'] %>/trafficserver/diags.log"
ln -sf /dev/stderr "<%- config['log_dir'] %>/trafficserver/manager.log"
ln -sf /dev/stderr "<%- config['log_dir'] %>/trafficserver/traffic.out"
fi
exec runtool ${run_args[@]+"${run_args[@]}"} traffic_manager --nosyslog
fi
exit 0
| true
|
dd700385bb0536309e045d226f5dc13a5e4e32dd
|
Shell
|
skywind3000/vim
|
/tools/bin/fff
|
UTF-8
| 32,923
| 3.84375
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
#
# fff - fucking fast file-manager.
get_os() {
# Figure out the current operating system to set some specific variables.
# '$OSTYPE' typically stores the name of the OS kernel.
case $OSTYPE in
# Mac OS X / macOS.
darwin*)
opener=open
file_flags=bIL
;;
haiku)
opener=open
[[ -z $FFF_TRASH_CMD ]] &&
FFF_TRASH_CMD=trash
[[ $FFF_TRASH_CMD == trash ]] && {
FFF_TRASH=$(finddir -v "$PWD" B_TRASH_DIRECTORY)
mkdir -p "$FFF_TRASH"
}
;;
esac
}
setup_terminal() {
# Setup the terminal for the TUI.
# '\e[?1049h': Use alternative screen buffer.
# '\e[?7l': Disable line wrapping.
# '\e[?25l': Hide the cursor.
# '\e[2J': Clear the screen.
# '\e[1;Nr': Limit scrolling to scrolling area.
# Also sets cursor to (0,0).
printf '\e[?1049h\e[?7l\e[?25l\e[2J\e[1;%sr' "$max_items"
# Hide echoing of user input
stty -echo
}
reset_terminal() {
# Reset the terminal to a useable state (undo all changes).
# '\e[?7h': Re-enable line wrapping.
# '\e[?25h': Unhide the cursor.
# '\e[2J': Clear the terminal.
# '\e[;r': Set the scroll region to its default value.
# Also sets cursor to (0,0).
# '\e[?1049l: Restore main screen buffer.
printf '\e[?7h\e[?25h\e[2J\e[;r\e[?1049l'
# Show user input.
stty echo
}
clear_screen() {
# Only clear the scrolling window (dir item list).
# '\e[%sH': Move cursor to bottom of scroll area.
# '\e[9999C': Move cursor to right edge of the terminal.
# '\e[1J': Clear screen to top left corner (from cursor up).
# '\e[2J': Clear screen fully (if using tmux) (fixes clear issues).
# '\e[1;%sr': Clearing the screen resets the scroll region(?). Re-set it.
# Also sets cursor to (0,0).
printf '\e[%sH\e[9999C\e[1J%b\e[1;%sr' \
"$((LINES-2))" "${TMUX:+\e[2J}" "$max_items"
}
setup_options() {
# Some options require some setup.
# This function is called once on open to parse
# select options so the operation isn't repeated
# multiple times in the code.
# Format for normal files.
[[ $FFF_FILE_FORMAT == *%f* ]] && {
file_pre=${FFF_FILE_FORMAT/'%f'*}
file_post=${FFF_FILE_FORMAT/*'%f'}
}
# Format for marked files.
[[ $FFF_MARK_FORMAT == *%f* ]] && {
mark_pre=${FFF_MARK_FORMAT/'%f'*}
mark_post=${FFF_MARK_FORMAT/*'%f'}
}
# Find supported 'file' arguments.
file -I &>/dev/null || : "${file_flags:=biL}"
}
get_term_size() {
# Get terminal size ('stty' is POSIX and always available).
# This can't be done reliably across all bash versions in pure bash.
read -r LINES COLUMNS < <(stty size)
# Max list items that fit in the scroll area.
((max_items=LINES-3))
}
get_ls_colors() {
# Parse the LS_COLORS variable and declare each file type
# as a separate variable.
# Format: ':.ext=0;0:*.jpg=0;0;0:*png=0;0;0;0:'
[[ -z $LS_COLORS ]] && {
FFF_LS_COLORS=0
return
}
# Turn $LS_COLORS into an array.
IFS=: read -ra ls_cols <<< "$LS_COLORS"
for ((i=0;i<${#ls_cols[@]};i++)); {
# Separate patterns from file types.
[[ ${ls_cols[i]} =~ ^\*[^\.] ]] &&
ls_patterns+="${ls_cols[i]/=*}|"
# Prepend 'ls_' to all LS_COLORS items
# if they aren't types of files (symbolic links, block files etc.)
[[ ${ls_cols[i]} =~ ^(\*|\.) ]] && {
ls_cols[i]=${ls_cols[i]#\*}
ls_cols[i]=ls_${ls_cols[i]#.}
}
}
# Strip non-ascii characters from the string as they're
# used as a key to color the dir items and variable
# names in bash must be '[a-zA-z0-9_]'.
ls_cols=("${ls_cols[@]//[^a-zA-Z0-9=\\;]/_}")
# Store the patterns in a '|' separated string
# for use in a REGEX match later.
ls_patterns=${ls_patterns//\*}
ls_patterns=${ls_patterns%?}
# Define the ls_ variables.
# 'declare' can't be used here as variables are scoped
# locally. 'declare -g' is not available in 'bash 3'.
# 'export' is a viable alternative.
export "${ls_cols[@]}" &>/dev/null
}
get_w3m_path() {
# Find the path to the w3m-img library.
w3m_paths=(/usr/{local/,}{lib,libexec,lib64,libexec64}/w3m/w3mi*)
read -r w3m _ < <(type -p w3mimgdisplay "${w3m_paths[@]}")
}
get_mime_type() {
# Get a file's mime_type.
mime_type=$(file "-${file_flags:-biL}" "$1" 2>/dev/null)
}
status_line() {
# Status_line to print when files are marked for operation.
local mark_ui="[${#marked_files[@]}] selected (${file_program[*]}) [p] ->"
# Escape the directory string.
# Remove all non-printable characters.
PWD_escaped=${PWD//[^[:print:]]/^[}
# '\e7': Save cursor position.
# This is more widely supported than '\e[s'.
# '\e[%sH': Move cursor to bottom of the terminal.
# '\e[30;41m': Set foreground and background colors.
# '%*s': Insert enough spaces to fill the screen width.
# This sets the background color to the whole line
# and fixes issues in 'screen' where '\e[K' doesn't work.
# '\r': Move cursor back to column 0 (was at EOL due to above).
# '\e[m': Reset text formatting.
# '\e[H\e[K': Clear line below status_line.
# '\e8': Restore cursor position.
# This is more widely supported than '\e[u'.
printf '\e7\e[%sH\e[30;4%sm%*s\r%s %s%s\e[m\e[%sH\e[K\e8' \
"$((LINES-1))" \
"${FFF_COL2:-1}" \
"$COLUMNS" "" \
"($((scroll+1))/$((list_total+1)))" \
"${marked_files[*]:+${mark_ui}}" \
"${1:-${PWD_escaped:-/}}" \
"$LINES"
}
read_dir() {
# Read a directory to an array and sort it directories first.
local dirs
local files
local item_index
# If '$PWD' is '/', unset it to avoid '//'.
[[ $PWD == / ]] && PWD=
for item in "$PWD"/*; do
if [[ -d $item ]]; then
dirs+=("$item")
((item_index++))
# Find the position of the child directory in the
# parent directory list.
[[ $item == "$OLDPWD" ]] &&
((previous_index=item_index))
else
files+=("$item")
fi
done
list=("${dirs[@]}" "${files[@]}")
# Indicate that the directory is empty.
[[ -z ${list[0]} ]] &&
list[0]=empty
((list_total=${#list[@]}-1))
# Save the original dir in a second list as a backup.
cur_list=("${list[@]}")
}
print_line() {
# Format the list item and print it.
local file_name=${list[$1]##*/}
local file_ext=${file_name##*.}
local format
local suffix
# If the dir item doesn't exist, end here.
if [[ -z ${list[$1]} ]]; then
return
# Directory.
elif [[ -d ${list[$1]} ]]; then
format+=\\e[${di:-1;3${FFF_COL1:-2}}m
suffix+=/
# Block special file.
elif [[ -b ${list[$1]} ]]; then
format+=\\e[${bd:-40;33;01}m
# Character special file.
elif [[ -c ${list[$1]} ]]; then
format+=\\e[${cd:-40;33;01}m
# Executable file.
elif [[ -x ${list[$1]} ]]; then
format+=\\e[${ex:-01;32}m
# Symbolic Link (broken).
elif [[ -h ${list[$1]} && ! -e ${list[$1]} ]]; then
format+=\\e[${mi:-01;31;7}m
# Symbolic Link.
elif [[ -h ${list[$1]} ]]; then
format+=\\e[${ln:-01;36}m
# Fifo file.
elif [[ -p ${list[$1]} ]]; then
format+=\\e[${pi:-40;33}m
# Socket file.
elif [[ -S ${list[$1]} ]]; then
format+=\\e[${so:-01;35}m
# Color files that end in a pattern as defined in LS_COLORS.
# 'BASH_REMATCH' is an array that stores each REGEX match.
elif [[ $FFF_LS_COLORS == 1 &&
$ls_patterns &&
$file_name =~ ($ls_patterns)$ ]]; then
match=${BASH_REMATCH[0]}
file_ext=ls_${match//[^a-zA-Z0-9=\\;]/_}
format+=\\e[${!file_ext:-${fi:-37}}m
# Color files based on file extension and LS_COLORS.
# Check if file extension adheres to POSIX naming
# stardard before checking if it's a variable.
elif [[ $FFF_LS_COLORS == 1 &&
$file_ext != "$file_name" &&
$file_ext =~ ^[a-zA-Z0-9_]*$ ]]; then
file_ext=ls_${file_ext}
format+=\\e[${!file_ext:-${fi:-37}}m
else
format+=\\e[${fi:-37}m
fi
# If the list item is under the cursor.
(($1 == scroll)) &&
format+="\\e[1;3${FFF_COL4:-6};7m"
# If the list item is marked for operation.
[[ ${marked_files[$1]} == "${list[$1]:-null}" ]] && {
format+=\\e[3${FFF_COL3:-1}m${mark_pre:= }
suffix+=${mark_post:=*}
}
# Escape the directory string.
# Remove all non-printable characters.
file_name=${file_name//[^[:print:]]/^[}
printf '\r%b%s\e[m\r' \
"${file_pre}${format}" \
"${file_name}${suffix}${file_post}"
}
draw_dir() {
# Print the max directory items that fit in the scroll area.
local scroll_start=$scroll
local scroll_new_pos
local scroll_end
# When going up the directory tree, place the cursor on the position
# of the previous directory.
((find_previous == 1)) && {
((scroll_start=previous_index-1))
((scroll=scroll_start))
# Clear the directory history. We're here now.
find_previous=
}
# If current dir is near the top of the list, keep scroll position.
if ((list_total < max_items || scroll < max_items/2)); then
((scroll_start=0))
((scroll_end=max_items))
((scroll_new_pos=scroll+1))
# If curent dir is near the end of the list, keep scroll position.
elif ((list_total - scroll < max_items/2)); then
((scroll_start=list_total-max_items+1))
((scroll_new_pos=max_items-(list_total-scroll)))
((scroll_end=list_total+1))
# If current dir is somewhere in the middle, center scroll position.
else
((scroll_start=scroll-max_items/2))
((scroll_end=scroll_start+max_items))
((scroll_new_pos=max_items/2+1))
fi
# Reset cursor position.
printf '\e[H'
for ((i=scroll_start;i<scroll_end;i++)); {
# Don't print one too many newlines.
((i > scroll_start)) &&
printf '\n'
print_line "$i"
}
# Move the cursor to its new position if it changed.
# If the variable 'scroll_new_pos' is empty, the cursor
# is moved to line '0'.
printf '\e[%sH' "$scroll_new_pos"
((y=scroll_new_pos))
}
draw_img() {
# Draw an image file on the screen using w3m-img.
# We can use the framebuffer; set win_info_cmd as appropriate.
[[ $(tty) == /dev/tty[0-9]* && -w /dev/fb0 ]] &&
win_info_cmd=fbset
# X isn't running and we can't use the framebuffer, do nothing.
[[ -z $DISPLAY && $win_info_cmd != fbset ]] &&
return
# File isn't an image file, do nothing.
get_mime_type "${list[scroll]}"
[[ $mime_type != image/* ]] &&
return
# w3m-img isn't installed, do nothing.
type -p "$w3m" &>/dev/null || {
cmd_line "error: Couldn't find 'w3m-img', is it installed?"
return
}
# $win_info_cmd isn't installed, do nothing.
type -p "${win_info_cmd:=xdotool}" &>/dev/null || {
cmd_line "error: Couldn't find '$win_info_cmd', is it installed?"
return
}
# Get terminal window size in pixels and set it to WIDTH and HEIGHT.
if [[ $win_info_cmd == xdotool ]]; then
IFS=$'\n' read -d "" -ra win_info \
< <(xdotool getactivewindow getwindowgeometry --shell)
declare "${win_info[@]}" &>/dev/null || {
cmd_line "error: Failed to retrieve window size."
return
}
else
[[ $(fbset --show) =~ .*\"([0-9]+x[0-9]+)\".* ]]
IFS=x read -r WIDTH HEIGHT <<< "${BASH_REMATCH[1]}"
fi
# Get the image size in pixels.
read -r img_width img_height < <("$w3m" <<< "5;${list[scroll]}")
# Substract the status_line area from the image size.
((HEIGHT=HEIGHT-HEIGHT*5/LINES))
((img_width > WIDTH)) && {
((img_height=img_height*WIDTH/img_width))
((img_width=WIDTH))
}
((img_height > HEIGHT)) && {
((img_width=img_width*HEIGHT/img_height))
((img_height=HEIGHT))
}
clear_screen
status_line "${list[scroll]}"
# Add a small delay to fix issues in VTE terminals.
((BASH_VERSINFO[0] > 3)) &&
read "${read_flags[@]}" -srn 1
# Display the image.
printf '0;1;%s;%s;%s;%s;;;;;%s\n3;\n4\n' \
"${FFF_W3M_XOFFSET:-0}" \
"${FFF_W3M_YOFFSET:-0}" \
"$img_width" \
"$img_height" \
"${list[scroll]}" | "$w3m" &>/dev/null
# Wait for user input.
read -ern 1
# Clear the image.
printf '6;%s;%s;%s;%s\n3;' \
"${FFF_W3M_XOFFSET:-0}" \
"${FFF_W3M_YOFFSET:-0}" \
"$WIDTH" \
"$HEIGHT" | "$w3m" &>/dev/null
redraw
}
redraw() {
# Redraw the current window.
# If 'full' is passed, re-fetch the directory list.
[[ $1 == full ]] && {
read_dir
scroll=0
}
clear_screen
draw_dir
status_line
}
mark() {
# Mark file for operation.
# If an item is marked in a second directory,
# clear the marked files.
[[ $PWD != "$mark_dir" ]] &&
marked_files=()
# Don't allow the user to mark the empty directory list item.
[[ ${list[0]} == empty && -z ${list[1]} ]] &&
return
if [[ $1 == all ]]; then
if ((${#marked_files[@]} != ${#list[@]})); then
marked_files=("${list[@]}")
mark_dir=$PWD
else
marked_files=()
fi
redraw
else
if [[ ${marked_files[$1]} == "${list[$1]}" ]]; then
unset 'marked_files[scroll]'
else
marked_files[$1]="${list[$1]}"
mark_dir=$PWD
fi
# Clear line before changing it.
printf '\e[K'
print_line "$1"
fi
# Find the program to use.
case "$2" in
${FFF_KEY_YANK:=y}|${FFF_KEY_YANK_ALL:=Y}) file_program=(cp -iR) ;;
${FFF_KEY_MOVE:=m}|${FFF_KEY_MOVE_ALL:=M}) file_program=(mv -i) ;;
${FFF_KEY_LINK:=s}|${FFF_KEY_LINK_ALL:=S}) file_program=(ln -s) ;;
# These are 'fff' functions.
${FFF_KEY_TRASH:=d}|${FFF_KEY_TRASH_ALL:=D})
file_program=(trash)
;;
${FFF_KEY_BULK_RENAME:=b}|${FFF_KEY_BULK_RENAME_ALL:=B})
file_program=(bulk_rename)
;;
esac
status_line
}
trash() {
# Trash a file.
cmd_line "trash [${#marked_files[@]}] items? [y/n]: " y n
[[ $cmd_reply != y ]] &&
return
if [[ $FFF_TRASH_CMD ]]; then
# Pass all but the last argument to the user's
# custom script. command is used to prevent this function
# from conflicting with commands named "trash".
command "$FFF_TRASH_CMD" "${@:1:$#-1}"
else
cd "$FFF_TRASH" || cmd_line "error: Can't cd to trash directory."
if cp -alf "$@" &>/dev/null; then
rm -r "${@:1:$#-1}"
else
mv -f "$@"
fi
# Go back to where we were.
cd "$OLDPWD" ||:
fi
}
bulk_rename() {
# Bulk rename files using '$EDITOR'.
rename_file=${XDG_CACHE_HOME:=${HOME}/.cache}/fff/bulk_rename
marked_files=("${@:1:$#-1}")
# Save marked files to a file and open them for editing.
printf '%s\n' "${marked_files[@]##*/}" > "$rename_file"
"${EDITOR:-vi}" "$rename_file"
# Read the renamed files to an array.
IFS=$'\n' read -d "" -ra changed_files < "$rename_file"
# If the user deleted a line, stop here.
((${#marked_files[@]} != ${#changed_files[@]})) && {
rm "$rename_file"
cmd_line "error: Line mismatch in rename file. Doing nothing."
return
}
printf '%s\n%s\n' \
"# This file will be executed when the editor is closed." \
"# Clear the file to abort." > "$rename_file"
# Construct the rename commands.
for ((i=0;i<${#marked_files[@]};i++)); {
[[ ${marked_files[i]} != "${PWD}/${changed_files[i]}" ]] && {
printf 'mv -i -- %q %q\n' \
"${marked_files[i]}" "${PWD}/${changed_files[i]}"
local renamed=1
}
} >> "$rename_file"
# Let the user double-check the commands and execute them.
((renamed == 1)) && {
"${EDITOR:-vi}" "$rename_file"
source "$rename_file"
rm "$rename_file"
}
# Fix terminal settings after '$EDITOR'.
setup_terminal
}
open() {
# Open directories and files.
if [[ -d $1/ ]]; then
search=
search_end_early=
cd "${1:-/}" ||:
redraw full
elif [[ -f $1 ]]; then
# Figure out what kind of file we're working with.
get_mime_type "$1"
# Open all text-based files in '$EDITOR'.
# Everything else goes through 'xdg-open'/'open'.
case "$mime_type" in
text/*|*x-empty*|*json*)
# If 'fff' was opened as a file picker, save the opened
# file in a file called 'opened_file'.
((file_picker == 1)) && {
printf '%s\n' "$1" > \
"${XDG_CACHE_HOME:=${HOME}/.cache}/fff/opened_file"
exit
}
clear_screen
reset_terminal
"${VISUAL:-${EDITOR:-vi}}" "$1"
setup_terminal
redraw
;;
*)
# 'nohup': Make the process immune to hangups.
# '&': Send it to the background.
# 'disown': Detach it from the shell.
nohup "${FFF_OPENER:-${opener:-xdg-open}}" "$1" &>/dev/null &
disown
;;
esac
fi
}
cmd_line() {
# Write to the command_line (under status_line).
cmd_reply=
# '\e7': Save cursor position.
# '\e[?25h': Unhide the cursor.
# '\e[%sH': Move cursor to bottom (cmd_line).
printf '\e7\e[%sH\e[?25h' "$LINES"
# '\r\e[K': Redraw the read prompt on every keypress.
# This is mimicking what happens normally.
while IFS= read -rsn 1 -p $'\r\e[K'"${1}${cmd_reply}" read_reply; do
case $read_reply in
# Backspace.
$'\177'|$'\b')
cmd_reply=${cmd_reply%?}
# Clear tab-completion.
unset comp c
;;
# Tab.
$'\t')
comp_glob="$cmd_reply*"
# Pass the argument dirs to limit completion to directories.
[[ $2 == dirs ]] &&
comp_glob="$cmd_reply*/"
# Generate a completion list once.
[[ -z ${comp[0]} ]] &&
IFS=$'\n' read -d "" -ra comp < <(compgen -G "$comp_glob")
# On each tab press, cycle through the completion list.
[[ -n ${comp[c]} ]] && {
cmd_reply=${comp[c]}
((c=c >= ${#comp[@]}-1 ? 0 : ++c))
}
;;
# Escape / Custom 'no' value (used as a replacement for '-n 1').
$'\e'|${3:-null})
read "${read_flags[@]}" -rsn 2
cmd_reply=
break
;;
# Enter/Return.
"")
# If there's only one search result and its a directory,
# enter it on one enter keypress.
[[ $2 == search && -d ${list[0]} ]] && ((list_total == 0)) && {
# '\e[?25l': Hide the cursor.
printf '\e[?25l'
open "${list[0]}"
search_end_early=1
# Unset tab completion variables since we're done.
unset comp c
return
}
break
;;
# Custom 'yes' value (used as a replacement for '-n 1').
${2:-null})
cmd_reply=$read_reply
break
;;
# Replace '~' with '$HOME'.
"~")
cmd_reply+=$HOME
;;
# Anything else, add it to read reply.
*)
cmd_reply+=$read_reply
# Clear tab-completion.
unset comp c
;;
esac
# Search on keypress if search passed as an argument.
[[ $2 == search ]] && {
# '\e[?25l': Hide the cursor.
printf '\e[?25l'
# Use a greedy glob to search.
list=("$PWD"/*"$cmd_reply"*)
((list_total=${#list[@]}-1))
# Draw the search results on screen.
scroll=0
redraw
# '\e[%sH': Move cursor back to cmd-line.
# '\e[?25h': Unhide the cursor.
printf '\e[%sH\e[?25h' "$LINES"
}
done
# Unset tab completion variables since we're done.
unset comp c
# '\e[2K': Clear the entire cmd_line on finish.
# '\e[?25l': Hide the cursor.
# '\e8': Restore cursor position.
printf '\e[2K\e[?25l\e8'
}
key() {
# Handle special key presses.
[[ $1 == $'\e' ]] && {
read "${read_flags[@]}" -rsn 2
# Handle a normal escape key press.
[[ ${1}${REPLY} == $'\e\e['* ]] &&
read "${read_flags[@]}" -rsn 1 _
local special_key=${1}${REPLY}
}
case ${special_key:-$1} in
# Open list item.
# 'C' is what bash sees when the right arrow is pressed
# ('\e[C' or '\eOC').
# '' is what bash sees when the enter/return key is pressed.
${FFF_KEY_CHILD1:=l}|\
${FFF_KEY_CHILD2:=$'\e[C'}|\
${FFF_KEY_CHILD3:=""}|\
${FFF_KEY_CHILD4:=$'\eOC'})
open "${list[scroll]}"
;;
# Go to the parent directory.
# 'D' is what bash sees when the left arrow is pressed
# ('\e[D' or '\eOD').
# '\177' and '\b' are what bash sometimes sees when the backspace
# key is pressed.
${FFF_KEY_PARENT1:=h}|\
${FFF_KEY_PARENT2:=$'\e[D'}|\
${FFF_KEY_PARENT3:=$'\177'}|\
${FFF_KEY_PARENT4:=$'\b'}|\
${FFF_KEY_PARENT5:=$'\eOD'})
# If a search was done, clear the results and open the current dir.
if ((search == 1 && search_end_early != 1)); then
open "$PWD"
# If '$PWD' is '/', do nothing.
elif [[ $PWD && $PWD != / ]]; then
find_previous=1
open "${PWD%/*}"
fi
;;
# Scroll down.
# 'B' is what bash sees when the down arrow is pressed
# ('\e[B' or '\eOB').
${FFF_KEY_SCROLL_DOWN1:=j}|\
${FFF_KEY_SCROLL_DOWN2:=$'\e[B'}|\
${FFF_KEY_SCROLL_DOWN3:=$'\eOB'})
((scroll < list_total)) && {
((scroll++))
((y < max_items)) && ((y++))
print_line "$((scroll-1))"
printf '\n'
print_line "$scroll"
status_line
}
;;
# Scroll up.
# 'A' is what bash sees when the up arrow is pressed
# ('\e[A' or '\eOA').
${FFF_KEY_SCROLL_UP1:=k}|\
${FFF_KEY_SCROLL_UP2:=$'\e[A'}|\
${FFF_KEY_SCROLL_UP3:=$'\eOA'})
# '\e[1L': Insert a line above the cursor.
# '\e[A': Move cursor up a line.
((scroll > 0)) && {
((scroll--))
print_line "$((scroll+1))"
if ((y < 2)); then
printf '\e[L'
else
printf '\e[A'
((y--))
fi
print_line "$scroll"
status_line
}
;;
# Go to top.
${FFF_KEY_TO_TOP:=g})
((scroll != 0)) && {
scroll=0
redraw
}
;;
# Go to bottom.
${FFF_KEY_TO_BOTTOM:=G})
((scroll != list_total)) && {
((scroll=list_total))
redraw
}
;;
# Show hidden files.
${FFF_KEY_HIDDEN:=.})
# 'a=a>0?0:++a': Toggle between both values of 'shopt_flags'.
# This also works for '3' or more values with
# some modification.
shopt_flags=(u s)
shopt -"${shopt_flags[((a=${a:=$FFF_HIDDEN}>0?0:++a))]}" dotglob
redraw full
;;
# Search.
${FFF_KEY_SEARCH:=/})
cmd_line "/" "search"
# If the search came up empty, redraw the current dir.
if [[ -z ${list[*]} ]]; then
list=("${cur_list[@]}")
((list_total=${#list[@]}-1))
redraw
search=
else
search=1
fi
;;
# Spawn a shell.
${FFF_KEY_SHELL:=!})
reset_terminal
# Make fff aware of how many times it is nested.
export FFF_LEVEL
((FFF_LEVEL++))
cd "$PWD" && "$SHELL"
setup_terminal
redraw
;;
# Mark files for operation.
${FFF_KEY_YANK:=y}|\
${FFF_KEY_MOVE:=m}|\
${FFF_KEY_TRASH:=d}|\
${FFF_KEY_LINK:=s}|\
${FFF_KEY_BULK_RENAME:=b})
mark "$scroll" "$1"
;;
# Mark all files for operation.
${FFF_KEY_YANK_ALL:=Y}|\
${FFF_KEY_MOVE_ALL:=M}|\
${FFF_KEY_TRASH_ALL:=D}|\
${FFF_KEY_LINK_ALL:=S}|\
${FFF_KEY_BULK_RENAME_ALL:=B})
mark all "$1"
;;
# Do the file operation.
${FFF_KEY_PASTE:=p})
[[ ${marked_files[*]} ]] && {
[[ ! -w $PWD ]] && {
cmd_line "warn: no write access to dir."
return
}
# Clear the screen to make room for a prompt if needed.
clear_screen
reset_terminal
stty echo
printf '\e[1mfff\e[m: %s\n' "Running ${file_program[0]}"
"${file_program[@]}" "${marked_files[@]}" .
stty -echo
marked_files=()
setup_terminal
redraw full
}
;;
# Clear all marked files.
${FFF_KEY_CLEAR:=c})
[[ ${marked_files[*]} ]] && {
marked_files=()
redraw
}
;;
# Rename list item.
${FFF_KEY_RENAME:=r})
[[ ! -e ${list[scroll]} ]] &&
return
cmd_line "rename ${list[scroll]##*/}: "
[[ $cmd_reply ]] &&
if [[ -e $cmd_reply ]]; then
cmd_line "warn: '$cmd_reply' already exists."
elif [[ -w ${list[scroll]} ]]; then
mv "${list[scroll]}" "${PWD}/${cmd_reply}"
redraw full
else
cmd_line "warn: no write access to file."
fi
;;
# Create a directory.
${FFF_KEY_MKDIR:=n})
cmd_line "mkdir: " "dirs"
[[ $cmd_reply ]] &&
if [[ -e $cmd_reply ]]; then
cmd_line "warn: '$cmd_reply' already exists."
elif [[ -w $PWD ]]; then
mkdir -p "${PWD}/${cmd_reply}"
redraw full
else
cmd_line "warn: no write access to dir."
fi
;;
# Create a file.
${FFF_KEY_MKFILE:=f})
cmd_line "mkfile: "
[[ $cmd_reply ]] &&
if [[ -e $cmd_reply ]]; then
cmd_line "warn: '$cmd_reply' already exists."
elif [[ -w $PWD ]]; then
: > "$cmd_reply"
redraw full
else
cmd_line "warn: no write access to dir."
fi
;;
# Show file attributes.
${FFF_KEY_ATTRIBUTES:=x})
[[ -e "${list[scroll]}" ]] && {
clear_screen
status_line "${list[scroll]}"
stat "${list[scroll]}"
read -ern 1
redraw
}
;;
# Toggle executable flag.
${FFF_KEY_EXECUTABLE:=X})
[[ -f ${list[scroll]} && -w ${list[scroll]} ]] && {
if [[ -x ${list[scroll]} ]]; then
chmod -x "${list[scroll]}"
status_line "Unset executable."
else
chmod +x "${list[scroll]}"
status_line "Set executable."
fi
}
;;
# Show image in terminal.
${FFF_KEY_IMAGE:=i})
draw_img
;;
# Go to dir.
${FFF_KEY_GO_DIR:=:})
cmd_line "go to dir: " "dirs"
# Let 'cd' know about the current directory.
cd "$PWD" &>/dev/null ||:
[[ $cmd_reply ]] &&
cd "${cmd_reply/\~/$HOME}" &>/dev/null &&
open "$PWD"
;;
# Go to '$HOME'.
${FFF_KEY_GO_HOME:='~'})
open ~
;;
# Go to trash.
${FFF_KEY_GO_TRASH:=t})
get_os
open "$FFF_TRASH"
;;
# Go to previous dir.
${FFF_KEY_PREVIOUS:=-})
open "$OLDPWD"
;;
# Refresh current dir.
${FFF_KEY_REFRESH:=e})
open "$PWD"
;;
# Directory favourites.
[1-9])
favourite="FFF_FAV${1}"
favourite="${!favourite}"
[[ $favourite ]] &&
open "$favourite"
;;
# Quit and store current directory in a file for CD on exit.
# Don't allow user to redefine 'q' so a bad keybinding doesn't
# remove the option to quit.
q)
: "${FFF_CD_FILE:=${XDG_CACHE_HOME:=${HOME}/.cache}/fff/.fff_d}"
[[ -w $FFF_CD_FILE ]] &&
rm "$FFF_CD_FILE"
[[ ${FFF_CD_ON_EXIT:=1} == 1 ]] &&
printf '%s\n' "$PWD" > "$FFF_CD_FILE"
exit
;;
esac
}
main() {
# Handle a directory as the first argument.
# 'cd' is a cheap way of finding the full path to a directory.
# It updates the '$PWD' variable on successful execution.
# It handles relative paths as well as '../../../'.
#
# '||:': Do nothing if 'cd' fails. We don't care.
cd "${2:-$1}" &>/dev/null ||:
[[ $1 == -v ]] && {
printf '%s\n' "fff 2.2"
exit
}
[[ $1 == -h ]] && {
man fff
exit
}
# Store file name in a file on open instead of using 'FFF_OPENER'.
# Used in 'fff.vim'.
[[ $1 == -p ]] &&
file_picker=1
# bash 5 and some versions of bash 4 don't allow SIGWINCH to interrupt
# a 'read' command and instead wait for it to complete. In this case it
# causes the window to not redraw on resize until the user has pressed
# a key (causing the read to finish). This sets a read timeout on the
# affected versions of bash.
# NOTE: This shouldn't affect idle performance as the loop doesn't do
# anything until a key is pressed.
# SEE: https://github.com/dylanaraps/fff/issues/48
((BASH_VERSINFO[0] > 3)) &&
read_flags=(-t 0.05)
((${FFF_LS_COLORS:=1} == 1)) &&
get_ls_colors
((${FFF_HIDDEN:=0} == 1)) &&
shopt -s dotglob
# Create the trash and cache directory if they don't exist.
mkdir -p "${XDG_CACHE_HOME:=${HOME}/.cache}/fff" \
"${FFF_TRASH:=${XDG_DATA_HOME:=${HOME}/.local/share}/fff/trash}"
# 'nocaseglob': Glob case insensitively (Used for case insensitive search).
# 'nullglob': Don't expand non-matching globs to themselves.
shopt -s nocaseglob nullglob
# Trap the exit signal (we need to reset the terminal to a useable state.)
trap 'reset_terminal' EXIT
# Trap the window resize signal (handle window resize events).
trap 'get_term_size; redraw' WINCH
get_os
get_term_size
get_w3m_path
setup_options
setup_terminal
redraw full
# Vintage infinite loop.
for ((;;)); {
read "${read_flags[@]}" -srn 1 && key "$REPLY"
# Exit if there is no longer a terminal attached.
[[ -t 1 ]] || exit 1
}
}
main "$@"
| true
|
6647ad2ff6115241e957a00a2addf3803a8bf683
|
Shell
|
karouu/bash-script-learning
|
/Mad_Libs.sh
|
UTF-8
| 206
| 2.640625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
echo "pls enter a Noun then press Enter:"
read noun
echo "pls enter a Verb then press Enter:"
read verb
echo "pls enter a Adjective then press Enter:"
read adj
echo "$noun $verb $adj"
| true
|
ba128405d5dd8c16d5861b6de0212a9a7377e513
|
Shell
|
ns-bak/tetracorder-tutorial
|
/src-local/specpr/manual.programmers.notes.html/convert.all.to.html
|
UTF-8
| 230
| 2.953125
| 3
|
[] |
no_license
|
#!/bin/sh
# Print commands on HP-UX. Roger N. Clark Feb 28. 2008
for i in \
Cover \
sec1 \
sec2 \
sec3 \
sec4 \
sec5 \
sec6 \
sec7 \
sec8
do
echo "groff -t -e -T html $i > ${i}.html"
groff -t -e -T html $i > ${i}.html
done
| true
|
80b07c802d843a778e302aa34d55736714704b9e
|
Shell
|
hariknair77/bash
|
/until.sh
|
UTF-8
| 70
| 2.640625
| 3
|
[] |
no_license
|
#!/usr/bin/bash
i=10
until [[ $i -lt 3 ]]; do
echo $i
let i-=1
done
| true
|
52beef628be785e257f1da979798019235b37c83
|
Shell
|
i3wm/configFiles
|
/scripts/ubuntu-minimal/lubuntu.sh
|
UTF-8
| 5,602
| 3.125
| 3
|
[] |
no_license
|
#!/bin/bash
# Check if the internet connection connection was configured properly
printf '\nDid you configure the internet connection properly by running both `nm-connection-editor` & `nm-applet`?\n'
read -p 'Continue? (y/n): ' choice
case "$choice" in
y* | Y*) gtr=1;;
*) exit;;
esac
# Release variable
unset gtr
unset choice
# Aptik backup directory
sleep 1
printf '\nPlease enter the absolute location of Aptik backup directory:- '
read -p dir
# Accept password
sleep 1
echo -n 'Enter your root password: '
read -s passwd
echo "Password accepted."
# Enabling firewall
sleep 1
printf '\nEnabling Firewall . . .\n'
echo $passwd | sudo -S echo " "
sudo ufw enable
sudo -K
# Removing unwanted packages
sleep 1
echo $passwd | sudo -S echo " "
sudo apt-get remove --purge -y scrot xfburn gpick mtpaint abiword gnumeric xpad
sudo -K
# Setting up apt such that it doesn't keep or install recommended and suggested packages
sleep 1
echo $passwd | sudo -S echo " "
printf '\nSetting up no-install-recommends . . .\n\n'
printf 'APT::Install-Recommends "false";\nAPT::AutoRemove::RecommendsImportant "false";\nAPT::AutoRemove::SuggestsImportant "false";' | sudo tee /etc/apt/apt.conf.d/99_norecommends
printf '\n'
# Autoremove recommended and suggested packages
sleep 1
sudo apt-get autoremove -y
sudo apt-get autoclean
sudo apt-get clean
sudo -K
# Adding aptik ppa
sleep 1
echo $passwd | sudo -S echo " "
sudo add-apt-repository ppa:teejee2008/ppa -y
sudo apt update
sudo apt -y install aptik
sudo -K
# Restoring ppas and crontab with aptik
sleep 1
cd $dir
sleep 1
echo $passwd | sudo -S echo " "
sudo aptik --yes --restore-ppa
sudo -K
sleep 1
echo $passwd | sudo -S echo " "
sudo aptik --yes --restore-crontab
sudo -K
sleep 1
cd
# Install necessary packages that were autoremoved by the previous command plus extra packages
sleep 1
echo $passwd | sudo -S echo " "
sudo apt update
sudo -K
sleep 1
echo $passwd | sudo -S echo " "
sudo apt-get dist-upgrade -y
sudo -K
sleep 1
echo $passwd | sudo -S echo " "
sudo apt install -y chromium-browser zsh wget ca-certificates mc alsa-utils pulseaudio pavucontrol compton libnotify-bin conky parcellite vim vim-gtk gksu unzip gtk2-engines-pixbuf gtk2-engines-murrine gpicview mpv acpi ncdu git feh xinit i3 i3status i3lock rofi rxvt-unicode-256color xsel gstreamer1.0-plugins-ugly gstreamer1.0-plugins-bad gstreamer1.0-libav gstreamer1.0-fluendo-mp3 oxideqt-codecs-extra ttf-mscorefonts-installer unrar libavcodec-extra
sudo -K
# Using aptik to restore packages
sleep 1
echo $passwd | sudo -S echo " "
sudo aptik --yes --restore-packages
sudo -K
# Download mint-y-theme
printf '\nDownloading mint-y-theme . . .\n'
sleep 1
mkdir -p /tmp/mint-y-theme && cd /tmp/mint-y-theme
sleep 1
git init
sleep 1
git remote add origin https://github.com/linuxmint/mint-y-theme
sleep 1
git config core.sparsecheckout true
sleep 1
echo "usr/share/themes/*" >> .git/info/sparse-checkout
sleep 1
git pull --depth=1 origin master
# Install mint-y-theme
printf '\nInstalling mint-y-theme . . .\n'
sleep 1
cd usr/share/themes
sleep 1
echo $passwd | sudo -S echo " "
sudo cp -R Mint-Y Mint-Y-Dark Mint-Y-Darker /usr/share/themes
sudo -K
# Download Font-Awesome
printf '\nDownloading Font-Awesome . . .\n'
sleep 1
mkdir -p /tmp/font-awesome && cd /tmp/font-awesome
sleep 1
git init
sleep 1
git remote add origin https://github.com/FortAwesome/Font-Awesome
sleep 1
git config core.sparsecheckout true
sleep 1
echo "fonts/*" >> .git/info/sparse-checkout
sleep 1
git pull --depth=1 origin master
# Install Font-Awesome
printf '\nInstalling Font-Awesome . . .\n'
sleep 1
cd fonts
sleep 1
echo $passwd | sudo -S echo " "
sudo mkdir -p /usr/share/fonts/custom
sleep 1
sudo cp FontAwesome.otf /usr/share/fonts/custom
sudo -k
# Download Monaco and TimesNewRoman
printf '\nDownloading Monaco . . .\n'
sleep 1
mkdir -p /tmp/monaco && cd /tmp/monaco
sleep 1
git init
sleep 1
git remote add origin https://github.com/i3wm/minimal
sleep 1
git pull --depth=1 origin master
# Install Monaco and TimesNewRoman
printf '\nInstalling Monaco . . .\n'
sleep 1
echo $passwd | sudo -S echo " "
sudo cp Monaco.ttf /usr/share/fonts/custom
sudo -K
# Update font cache
sleep 1
echo $passwd | sudo -S echo " "
sudo fc-cache -fv
sudo -K
# Install cursor theme
printf '\nDownloading cursor theme . . .\n'
sleep 1
mkdir -p /tmp/aesthetic && cd /tmp/aesthetic
sleep 1
git init
sleep 1
git remote add origin https://github.com/i3wm/aesthetic
sleep 1
git pull origin master
printf '\nInstalling cursor theme . . .\n'
sleep 1
echo $passwd | sudo -S echo " "
sudo cp -r ComixCursors-White /usr/share/icons
sleep 1
sudo sed -i '/Inherits/c\Inherits\=ComixCursors\-White' /usr/share/icons/default/index.theme
sudo -K
# Power button config
sleep 1
echo $passwd | sudo -S echo " "
sudo sed -i '/HandlePowerKey/c\HandlePowerKey\=ignore' /etc/systemd/logind.conf
sudo -K
# Install clipboard for urxvt
printf '\nInstalling Urxvt clipboard . . .\n'
sleep 1
mkdir -p /tmp/urxvt && cd /tmp/urxvt
sleep 1
git init
sleep 1
git remote add origin https://github.com/i3wm/urxvt
sleep 1
git pull origin master
sleep 1
echo $passwd | sudo -S echo " "
sudo mkdir -p /usr/lib/urxvt/perl
sudo cp clipboard /usr/lib/urxvt/perl/
sudo -K
# Set vim globally as editor
sleep 1
echo $passwd | sudo -S echo " "
sudo update-alternatives --install /usr/bin/editor editor /usr/bin/vim 100
sudo -K
# Set urxvt as global terminal
sleep 1
echo $passwd | sudo -S echo " "
sudo update-alternatives --config x-terminal-emulator
sudo -K
# Message
printf '\nApply themes with lxappearance and change fonts in .gtkrc-2.0 & settings.ini\n'
| true
|
e901b4d0d57afed6da86eec258e778c9cdf53862
|
Shell
|
mcthoren/scripts
|
/patch_box
|
UTF-8
| 887
| 3.28125
| 3
|
[
"Unlicense"
] |
permissive
|
#!/bin/sh
# this is meant to be run from cron.
# the idea here is: when i'm on vacation my OpenBSD boxes take care of themselves.
# XXX todo: only restart daemons when necessary
# until we figure out how to do that, just restart all the daemons
# in case a crypto lib, the daemon itsself or sth important updated.
echo "$0[$$]: checking for patches..."
restart_daemons() {
echo "\n$0[$$]: patches detected, restarting daemons"
# if we resstart smtpd at the same time, we don't the mail :x
echo "/usr/sbin/rcctl restart smtpd" | /usr/bin/at now +1 min
/usr/sbin/rcctl restart sshd
/usr/sbin/rcctl restart httpd
/usr/sbin/rcctl restart unwind
# /usr/sbin/rcctl restart nsd
# /usr/sbin/rcctl restart unbound
}
# is there a better way to tell if a daemon need restarting?
SP_OUT="$(/usr/sbin/syspatch 2>&1 )" && [ ! -z "${SP_OUT}" ] && { echo "${SP_OUT}"; restart_daemons; }
| true
|
3b9e774a78247f1c2dc58405b02f4e63e443d932
|
Shell
|
brunotikami/Configuration
|
/bashrc
|
UTF-8
| 790
| 2.765625
| 3
|
[] |
no_license
|
#
# ~/.bashrc
#
# If not running interactively, don't do anything
[[ $- != *i* ]] && return
alias ls='ls --color=auto'
PS1='[\u@\h \W]\$ '
export EDITOR="vim"
export LANG=zh_CN.UTF-8
complete -cf sudo
export HISTSIZE="100"
alias grep='grep --color=auto'
alias cls='clear'
alias ll='ls -l'
alias trs='trs {=zh}'
function _update_ps1() {
export PS1="$(~/Code/mapleray/Configuration/powerline-shell/powerline-shell.py $?)"
}
export PROMPT_COMMAND="_update_ps1"
export VIRTUALENVWRAPPER_PYTHON=/usr/bin/python3
export PATH=${PATH}:/root/.gem/ruby/2.0.0/bin
export PATH=${PATH}:/opt/android-sdk/platform-tools
export PATH=${PATH}:/home/conan/.gem/ruby/2.0.0/bin
export WORKON_HOME=~/Virtualenvs
source /usr/bin/virtualenvwrapper.sh
source ~/Code/mapleray/Configuration/git-completion.bash
| true
|
9fb826407e48b2b56e47e0c32f8578ddfa675835
|
Shell
|
didistars328/bash_scripting
|
/if.sh
|
UTF-8
| 1,843
| 2.921875
| 3
|
[] |
no_license
|
#!/bin/bash
RESTORE=$'\033[0m'
RED=$'\033[00;31m'
GREEN=$'\033[00;32m'
YELLOW=$'\033[00;33m'
BLUE=$'\033[00;34m'
PURPLE=$'\033[00;35m'
CYAN=$'\033[00;36m'
LIGHTGRAY=$'\033[00;37m'
LRED=$'\033[01;31m'
LGREEN=$'\033[01;32m'
LYELLOW=$'\033[01;33m'
LBLUE=$'\033[01;34m'
LPURPLE=$'\033[01;35m'
LCYAN=$'\033[01;36m'
WHITE=$'\033[01;37m'
echo "${GREEN}==============="
echo "##### IF ######"
echo "===============${RESTORE}"
echo "${YELLOW}Usage:${RESTORE}"
cat <<EOF
if [ condition-true ]
then
command 1
command 2
...
fi
EOF
echo "${BLUE}Example:${RESTORE}"
out='MY_SHELL="bash"
if [ "$MY_SHELL" = "bash" ]
then
echo "You seem to like the bash shell."
fi'
echo "${out}"
echo "${GREEN}TEST:${RESTORE}"
eval "${out}"
echo
echo "${GREEN}=================="
echo "### IF # ELSE ####"
echo "==================${RESTORE}"
echo "${YELLOW}Usage:${RESTORE}"
cat <<EOF
if [ condition-true ]
then
command 1
command 2
...
else
command 3
command 4
...
fi
EOF
echo "${BLUE}Example:${RESTORE}"
out='MY_SHELL="zsh"
if [ "$MY_SHELL" = "bash" ]; then
echo "You seem to like the bash shell."
else
echo "You do not seem to like the bash shell."
fi'
echo "${out}"
echo "${GREEN}TEST:${RESTORE}"
eval "${out}"
echo
echo "${GREEN}=================="
echo "### IF # ELIF ####"
echo "==================${RESTORE}"
echo "${YELLOW}Usage:${RESTORE}"
cat <<EOF
if [ condition-true ]
then
command 1
command 2
...
elif [ condition-true ]
then
command 3
command 4
...
else
command 5
command 6
...
fi
EOF
echo "${BLUE}Example:${RESTORE}"
out='MY_SHELL="zsh"
if [ "$MY_SHELL" = "bash" ]
then
echo "You seem to like the bash shell."
elif [ "$MY_SHELL" = "zsh" ]
then
echo "You seem to like the zsh shell."
else
echo "You do not seem to like the bash or zsh shells."
fi'
echo "${out}"
echo "${GREEN}TEST:${RESTORE}"
eval "${out}"
| true
|
a2f5082606feca3767352f8dc0b0bf964d10a116
|
Shell
|
Tobilan/data-diode
|
/transfer_data/scripts/create_serial_recv_startup_cron
|
UTF-8
| 1,114
| 2.984375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
echo "Adding cron entry to start Serial Receive on startup"
echo "Starting cron"
sudo crontab -l
echo " "
echo "Adding cron entry"
# Initialize UART0 CTS/RTS
echo "@reboot /opt/sierra/data_diode/transfer_data/scripts/gpio/gpio_set_UART0_flow_control.sh" > cron_serial_startup
# Set GPIO 26 (Remote Reboot pin for internal unit) as output
echo "@reboot raspi-gpio set 26 op dl" >> cron_serial_startup
# Start Receiving process
echo "@reboot python /opt/sierra/data_diode/transfer_data/serial-recv-file.py" >> cron_serial_startup
# Periodic check to verify process still running
echo "* * * * * /opt/sierra/data_diode/transfer_data/scripts/serial_recv_pidcheck.sh" >> cron_serial_startup
# Daily log upload (at 1:01 AM)
echo "01 01 * * * /opt/sierra/data_diode/scripts/daily_log_upload" >> cron_serial_startup
# Monitor network for outages and auto-reboot
echo "* * * * * /opt/sierra/data_diode/scripts/network_check_external.sh" >> cron_serial_startup
sudo sh -c 'crontab cron_serial_startup'
rm cron_serial_startup
echo "Updated cron..."
sudo crontab -l
| true
|
8917f2b4bcf3786acfbf41de0a047ba293959cdf
|
Shell
|
mezarin/kabanero-operator
|
/contrib/get_operator_config.sh
|
UTF-8
| 685
| 3.625
| 4
|
[
"Apache-2.0",
"BSD-2-Clause",
"LGPL-2.1-or-later",
"BSD-3-Clause",
"GPL-1.0-or-later",
"MPL-2.0"
] |
permissive
|
#!/bin/bash
set -Eeuox pipefail
BASEURL=$1
DEST=$2
if [ -z ${BASEURL} ]; then
echo "Usage: get_operator_config.sh [base url] [output filename]"
exit -1
fi
if [ -z ${DEST} ]; then
echo "Usage: get_operator_config.sh [base url] [output filename]"
exit -1
fi
curl -f $BASEURL/operator.yaml -o operator.yaml
curl -f $BASEURL/role.yaml -o role.yaml
curl -f $BASEURL/role_binding.yaml -o role_binding.yaml
curl -f $BASEURL/service_account.yaml -o service_account.yaml
rm -f $DEST
for f in operator.yaml role.yaml role_binding.yaml service_account.yaml; do
cat $f >> $DEST; echo "---" >> $DEST
done
rm operator.yaml role.yaml role_binding.yaml service_account.yaml
| true
|
e554d61669aee2922d6c98e773df7e20d22dfdb9
|
Shell
|
gnclmorais/formation
|
/slay
|
UTF-8
| 8,587
| 3.640625
| 4
|
[
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
#!/usr/bin/env bash
###############################################################################
# ERROR: Let the user know if the script fails
###############################################################################
trap 'ret=$?; test $ret -ne 0 && printf "\n \e[31m\033[0m Formation failed \e[31m\033[0m\n" >&2; exit $ret' EXIT
set -e
###############################################################################
# TWIRL: Check for required functions file
###############################################################################
if [ -e twirl ]; then
cd "$(dirname "${BASH_SOURCE[0]}")" \
&& . "twirl"
else
printf "\n ⚠️ ./twirl not found 💃🏾 First, you need to twirl on your haters\n"
exit 1
fi
###############################################################################
# CHECK: Bash version
###############################################################################
check_bash_version
###############################################################################
# Get in Formation! http://patorjk.com/software/taag/ ( font: Script )
###############################################################################
printf "
_
| |
| | __ ,_ _ _ _ __, _|_ 🍋 __ _ _
|/ / \_/ | / |/ |/ | / | | | / \_/ |/ |
|__/\__/ |_/ | | |_/\_/|_/|_/|_/\__/ | |
|\ ---------------------------------------------
|/ Cause you slay [for Bash 3.2 - 3.9]
╭───────────────────────────────────────────────────╮
│ Okay developers now let's get in ${bold}formation${normal}. │
│───────────────────────────────────────────────────│
│ Safe to run multiple times on the same machine. │
│ It ${green}installs${reset}, ${blue}upgrades${reset}, or ${yellow}skips${reset} packages based │
│ on what is already installed on the machine. │
╰───────────────────────────────────────────────────╯
${dim}$(get_os) $(get_os_version) ${normal} // ${dim}$BASH ${normal} // ${dim}$BASH_VERSION${reset}
"
###############################################################################
# CHECK: Internet
###############################################################################
chapter "Checking internet connection…"
check_internet_connection
###############################################################################
# PROMPT: Password
###############################################################################
chapter "Caching password…"
ask_for_sudo
###############################################################################
# PROMPT: SSH Key
###############################################################################
chapter 'Checking for SSH key…'
ssh_key_setup
###############################################################################
# INSTALL: Dependencies
###############################################################################
chapter "Installing Dependencies…"
# -----------------------------------------------------------------------------
# XCode
# -----------------------------------------------------------------------------
if type xcode-select >&- && xpath=$( xcode-select --print-path ) &&
test -d "${xpath}" && test -x "${xpath}" ; then
print_success_muted "Xcode already installed. Skipping."
else
step "Installing Xcode…"
xcode-select --install
print_success "Xcode installed!"
fi
if [ ! -d "$HOME/.bin/" ]; then
mkdir "$HOME/.bin"
fi
# -----------------------------------------------------------------------------
# NVM
# -----------------------------------------------------------------------------
if [ -x nvm ]; then
step "Installing NVM…"
curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.36.0/install.sh | bash
print_success "NVM installed!"
step "Installing latest Node…"
nvm install node
nvm use node
nvm run node --version
nodev=$(node -v)
print_success "Using Node $nodev!"
else
print_success_muted "NVM/Node already installed. Skipping."
fi
# -----------------------------------------------------------------------------
# Homebrew
# -----------------------------------------------------------------------------
if ! [ -x "$(command -v brew)" ]; then
step "Installing Homebrew…"
curl -fsS 'https://raw.githubusercontent.com/Homebrew/install/master/install' | ruby
export PATH="/usr/local/bin:$PATH"
print_success "Homebrew installed!"
else
print_success_muted "Homebrew already installed. Skipping."
fi
if brew list | grep -Fq brew-cask; then
step "Uninstalling old Homebrew-Cask…"
brew uninstall --force brew-cask
print_success "Homebrew-Cask uninstalled!"
fi
###############################################################################
# INSTALL: taps & brews
###############################################################################
if [ -e $cwd/swag/taps ]; then
chapter "Tapping some casks for Homebrew…"
for tap in $(<$cwd/swag/taps); do
brew tap $tap
done
fi
if [ -e $cwd/swag/brews ]; then
chapter "Installing Homebrew formulae…"
for brew in $(<$cwd/swag/brews); do
install_brews $brew
done
fi
###############################################################################
# UPDATE: Homebrew
###############################################################################
chapter "Updating Homebrew formulae…"
brew update
###############################################################################
# INSTALL: casks
###############################################################################
if [ -e $cwd/swag/casks ]; then
chapter "Installing apps via Homebrew…"
for cask in $(<$cwd/swag/casks); do
install_application_via_brew $cask
done
fi
###############################################################################
# INSTALL: Mac App Store Apps
###############################################################################
chapter "Installing apps from App Store…"
if [ -x mas ]; then
print_warning "Please install mas-cli first: brew mas. Skipping."
else
if [ -e $cwd/swag/apps ]; then
if mas_setup; then
# Workaround for associative array in Bash 3
# https://stackoverflow.com/questions/6047648/bash-4-associative-arrays-error-declare-a-invalid-option
for app in $(<$cwd/swag/apps); do
KEY="${app%%::*}"
VALUE="${app##*::}"
install_application_via_app_store $KEY $VALUE
done
else
print_warning "Please signin to App Store first. Skipping."
fi
fi
fi
###############################################################################
# CLEAN: Homebrew files
###############################################################################
chapter "Cleaning up Homebrew files…"
brew cleanup 2> /dev/null
###############################################################################
# INSTALL: npm packages
###############################################################################
if [ -e $cwd/swag/npm ]; then
chapter "Installing npm packages…"
for pkg in $(<$cwd/swag/npm); do
KEY="${pkg%%::*}"
VALUE="${pkg##*::}"
install_npm_packages $KEY $VALUE
done
fi
###############################################################################
# OPTIONAL: Customizations
###############################################################################
chapter "Adding hot sauce…"
if [ -f "$HOME/.hot-sauce" ]; then
if ask "Do you want to add hot sauce?" Y; then
. "$HOME/.hot-sauce"; printf "\n You got hot sauce in your bag. 🔥 ${bold}Swag.${normal}\n";
else
print_success_muted "Hot sauce declined. Skipped.";
fi
else
print_warning "No ~/.hot-sauce found. Skipping."
fi
###############################################################################
# Activate 🐟
###############################################################################
chapter "Setting fish as the default shell…"
if [ -e /usr/local/bin/fish ]; then
# https://github.com/fish-shell/fish-shell#switching-to-fish
chsh -s /usr/local/bin/fish
fi
###############################################################################
# 🍋🍋🍋🍋🍋🍋🍋🍋🍋🍋🍋🍋🍋🍋🍋🍋🍋🍋🍋🍋🍋🍋🍋🍋🍋🍋🍋🍋🍋🍋🍋🍋🍋🍋🍋🍋🍋🍋🍋🍋🍋
###############################################################################
e_lemon_ated
| true
|
c8e80a0adafc6705c878d715d8f32e0cc97c3c8b
|
Shell
|
jphein/boxen
|
/usr/share/ltsp/screen.d/boxen/vncviewer
|
UTF-8
| 1,466
| 3.734375
| 4
|
[] |
no_license
|
#!/bin/sh
#
# Screen script that launches vncviewer. Can be called from lts.conf
# like this:
# SCREEN_07="vncviewer -fullscreen server"
# or like this:
# SCREEN_07="vncviewer"
#
# Copyright (c) 2016 Jeffrey Hein <http://jphein.com>
#
# This software is licensed under the GNU General Public License version 2,
# the full text of which can be found in the COPYING file.
. /usr/share/ltsp/screen-x-common
# segfaults if HOME is unset.
export HOME=${HOME:-/root}
# The same screen script can be used for other vncviwers too, by just symlinking
# screen.d/{otherviewer} to screen.d/vncviewer.
basename=${0##*/}
if ! type $basename >/dev/null 2>&1; then
echo "$basename couldn't be found."
if [ "$basename" = "vncviewer" ]; then
echo "Please make sure you have installed xtightvncviwer into your client chroot."
fi
read nothing
exit 1
fi
# Make XINITRC_DAEMON default to "True", to prevent X from restarting after
# logout. If you don't want that, force XINITRC_DAEMON=False in lts.conf.
export XINITRC_DAEMON="${XINITRC_DAEMON-True}"
# If no parameters were passed, set some reasonable defaults.
if [ -z "$*" ]; then
RDP_OPTIONS=${VNC_OPTIONS:-"-fullscreen"}
RDP_SERVER=${VNC_SERVER:-"server"}
fi
COMMAND="echo $VNC_PASSWORD | $basename $* $VNC_OPTIONS $VNC_SERVER"
# The following logic is described at the top of xinitrc.
exec xinit /usr/share/ltsp/xinitrc "$COMMAND" -- "$DISPLAY" "vt${TTY}" -nolisten tcp $X_ARGS >/dev/null
| true
|
c948a6369eb7056bd10b9cdc097329ca9e270b01
|
Shell
|
vaishnavisirsat/Assignment10Three
|
/flipCoinCombination3.sh
|
UTF-8
| 565
| 3.203125
| 3
|
[] |
no_license
|
#!/bin/bash -x
head=1
tail=0
HH=0
HT=0
TH=0
TT=0
for (( c=0 ; c<4 ; c++ ))
do
arr[0]=$(($RANDOM%2))
arr[1]=$(($RANDOM%2))
if (( ${arr[0]}==$head && ${arr[1]}==$head ))
then
HH=$(($HH+1))
elif (( ${arr[0]}==$head && ${arr[1]}==$tail ))
then
HT=$(($HT+1))
elif (( ${arr[0]}==$tail && ${arr[1]}==$head ))
then
TH=$(($TH+1))
else
TT=$(($TT+1))
fi
done
HHP=$(( $HH*100 / 4 ))
HTP=$(( $HT*100 / 4 ))
THP=$(( $TH*100 / 4 ))
TTP=$(( $TT*100 / 4 ))
echo "HH % is : $HHP %"
echo "HT % is : $HTP %"
echo "TH % is : $THP %"
echo "TT % is : $TTP %"
| true
|
8788df88116a24d6a926aa643d043f65c77b1b88
|
Shell
|
the-redback/contrib
|
/keepalived-vip/build/build.sh
|
UTF-8
| 1,435
| 3.3125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o pipefail
get_src()
{
hash="$1"
url="$2"
f=$(basename "$url")
curl -sSL "$url" -o "$f"
echo "$hash $f" | sha256sum -c - || exit 10
tar xzf "$f"
rm -rf "$f"
}
apt-get update && apt-get install -y --no-install-recommends \
curl \
gcc \
libssl-dev \
libnl-3-dev libnl-route-3-dev libnl-genl-3-dev iptables-dev libnfnetlink-dev libiptcdata0-dev \
make \
libipset-dev \
git \
libsnmp-dev \
automake \
ca-certificates
cd /tmp
# download, verify and extract the source files
get_src $SHA256 \
"https://github.com/acassen/keepalived/archive/v$VERSION.tar.gz"
cd keepalived-$VERSION
aclocal
autoheader
automake --add-missing
autoreconf
./configure --prefix=/keepalived \
--sysconfdir=/etc \
--enable-snmp \
--enable-sha1
make && make install
tar -czvf /keepalived.tar.gz /keepalived
| true
|
38104f8d1d96b372ab62cfb7a184c1a2195f3aba
|
Shell
|
lulugyf/cproj
|
/csf_sc/sh/cpsmake.sh
|
GB18030
| 11,075
| 3.171875
| 3
|
[] |
no_license
|
##CPPļб
FirstCppFile=""
CppFileList=""
ProgType=""
#######builder bc bp server liupengc #######
#### get "sh cppcompile.sh bc_bp_server.cpp lib str" ###
#### θ 1 , bc or bp or server name
get_cps_lib()
{
my_date=`date +%Y%m%d%H%M%S`
lib_result_file=$FirstCppFile"_"${my_date}
sh cppcompile.sh $ProgType $CppFileList > $lib_result_file 2>&1
link_lib_str=`get_lib_str $lib_result_file`
rm -rf $lib_result_file
echo $link_lib_str
}
#### get "sh cppcompile.sh bc_bp_server.cpp lib str" ###
#### θ 1 , bc or bp or server name
get_cps_libforexe()
{
my_date=`date +%Y%m%d%H%M%S`
lib_result_file=$FirstCppFile"_"${my_date}
sh cppcompileforexe.sh $CppFileList > $lib_result_file 2>&1
link_lib_str=`get_lib_str $lib_result_file`
rm -rf $lib_result_file
echo $link_lib_str
}
#### bc bp ĺ
#### θ: 1
#### in_para : bc or bp
build_bc_bp_single()
{
my_date=`date +%Y%m%d%H%M%S`
bc_bp_full_name=$1 # bc or bp : eg : cQTimeAgeLimit.cpp
makefile_name="makefile_"${bc_bp_full_name}_${my_date}
bc_bp_name=`echo $bc_bp_full_name|cut -f 1 -d .`
target_name="lib"${bc_bp_name}".so"
#cppcompile_result=`sh cppcompile.sh $bc_bp_full_name` #ȡűִн
#link_lib_str=`echo $cppcompile_result|awk '{i=index($0,":"); j=index($0,"Success"); print substr($0,i+1,j-i-1)}'` # ȡ bc bp ļ
#link_lib_str=`get_cps_lib`
my_date=`date +%Y%m%d%H%M%S`
lib_result_file=$FirstCppFile"_"${my_date}
sh cppcompile.sh $ProgType $CppFileList > $lib_result_file 2>&1
link_lib_str=`get_lib_str $lib_result_file`
returnCode=$?
rm -rf $lib_result_file
# echo $link_lib_str
# makedep=`echo $link_lib_str|grep "ChangedDependSo:YES"|wc -l`
# if [ "$makedep" -eq "0" ]
# then
# link_lib_str=`echo $link_lib_str|sed "s/ChangedDependSo:NO//g"`
# else
# link_lib_str=`echo $link_lib_str|sed "s/ChangedDependSo:YES//g"`
# fi
echo "ssssss"$returnCode
link_lib_str_len=${#link_lib_str} #ȡַȵı
link_lib_str_all=""
WORK_LIB_DIR=${HOME}/run/solib
if [ $link_lib_str_len -lt 3 ] # γ ƴmakefileļ
then
link_lib_str_all=""
else
link_lib_str_all="-L${WORK_LIB_DIR} ${link_lib_str}"
fi
makefile_str=
echo "===> ${WORK_LIB_DIR}/lib${bc_bp_name}.so"
if [ -f ${WORK_LIB_DIR}/lib${bc_bp_name}.so ]; then
makefile_str="include \$(HOME)/run/framework/tools/env.mk.new\nlib${bc_bp_name}.so:${bc_bp_name}.o\n\t\$(CPP_COMP) \$(CPPFLAGS) \$(INCLUDE) \$(DYNLIBFLAGS) -o \$@ ${bc_bp_name}.o -L\$(ORA_LIB) -L\$(TUX_LIB) ${link_lib_str_all}\n\t mv ${WORK_LIB_DIR}/lib${bc_bp_name}.so ${WORK_LIB_DIR}/solib_old/lib${bc_bp_name}.so.${my_date}; mv \$@ ${WORK_LIB_DIR}" # ע˴ֻǽ .so ļǿƸƵ solib ļ
else
makefile_str="include \$(HOME)/run/framework/tools/env.mk.new\nlib${bc_bp_name}.so:${bc_bp_name}.o\n\t\$(CPP_COMP) \$(CPPFLAGS) \$(INCLUDE) \$(DYNLIBFLAGS) -o \$@ ${bc_bp_name}.o -L\$(ORA_LIB) -L\$(TUX_LIB) ${link_lib_str_all}\n\t mv \$@ ${WORK_LIB_DIR}" # ע˴ֻǽ .so ļǿƸƵ solib ļ
fi
echo $makefile_str > $makefile_name
make -f $makefile_name $target_name
rm -rf $makefile_name
if [ "$returnCode" -eq "100" ]
then
echo "ļ"
elif [ "$returnCode" -eq "101" ]
then
echo ""
echo "ļݱڱ̬ļҪ룬ʹádepmk±룡"
else
echo "ˣ"
fi
}
#### Ŀ¼µ bc bp ĺ
#### θ: 0
build_bc_bp_all()
{
ls_flags=" -t *.cpp" # ls ָѡ, Ĭǰĸ˳; -t : ļʱ; -r : ; Ĭȱĵ; мDzҪ -l ѡ; Ҫalias alias ls='ls -l'
ls ${ls_flags} | while read file_name
do
CppFileList=$file_name
FirstCppFile=$file_name
build_bc_bp_single $file_name
done
}
#### 뵥 server
build_server_single()
{
my_date=`date +%Y%m%d%H%M%S`
server_full_name=$1 # server cpp, server: eg : RPubGetSeq.cpp
makefile_name="makefile_"${server_full_name}_${my_date}
server_name=`echo $server_full_name|cut -f 1 -d .`
#cppcompile_result=`sh cppcompile.sh ${server_full_name}` #ȡűִн
#echo "$server_full_name $makefile_name $cppcompile_result "
#link_lib_str=`echo ${cppcompile_result}|awk '{i=index($0,":"); j=index($0,"Success"); print substr($0,i+1,j-i-1)}'` # ȡ server Ŀļ
link_lib_str=`get_cps_lib`
link_lib_str_len=${#link_lib_str} #ȡַȵı
link_lib_str_all=""
WORK_LIB_DIR=${HOME}/run/solib
if [ $link_lib_str_len -lt 3 ] # γɺƴmakefileļ
then
link_lib_str_all=""
else
link_lib_str_all="-L${WORK_LIB_DIR} ${link_lib_str}"
fi
echo "######### $server_name"
dep_content=`cat ${server_name}.dep`
service_list=`grep void ${server_name}.cpp | tr -s "(" " " | awk '{if($5=="TPSVCINFO") {printf("-s %s ", $4)}}'`
####makefile_str="include \$(HOME)/run/framework/tools/env.mk.new\n${server_name}:${dep_content}\n\t\$(BUILD_SERVER) ${service_list} -o ${server_name} -f\"\$?\" -l \"-L\$(COMMON_LIB) -L\$(XML_LIB) -L\$(TUX_LIB) -L\$(SYS_LIBS) -L\$(ORA_LIB) ${link_lib_str_all}\"\n\ttmshutdown -s ${server_name}; mv \$@ \${BOSS_BIN};tmboot -s ${server_name}" # ע˴ֻǽserverļǿƸƵ bin ļ
####ʱΪ벻
makefile_str="include \$(HOME)/run/framework/tools/env.mk.new\n${server_name}:${dep_content}\n\t\$(BUILD_SERVER) ${service_list} -o ${server_name} -f\"\$?\" -l \"-L\$(COMMON_LIB) -L\$(XML_LIB) -L\$(TUX_LIB) -L\$(SYS_LIBS) -L\$(ORA_LIB) ${link_lib_str_all}\"\n\tmv \$@ \${BOSS_BIN}"
####makefile_str="include \$(HOME)/run/framework/tools/env.mk.new\n${server_name}:${dep_content}\n\t\$(BUILD_SERVER) ${service_list} -o ${server_name} -f\"\$?\" -l \" ${link_lib_str_all} -L\$(COMMON_LIB) -L\$(XML_LIB) -L\$(TUX_LIB) -L\$(SYS_LIBS) -L\$(ORA_LIB) \"\n\tmv \$@ \${BOSS_BIN}"
echo $makefile_str > $makefile_name
make -f $makefile_name $server_name
rm -rf $makefile_name
}
#### Ŀ¼µ server ĺ
#### θ: 0
build_server_all()
{
ls_flags=" -t R*.cpp" # ls ָѡ, Ĭǰĸ˳; -t : ļʱ; -r : ; Ĭȱĵ; мDzҪ -l ѡ; Ҫalias alias ls='ls -l'
ls ${ls_flags} | while read file_name
do
CppFileList=$file_name
FirstCppFile=$file_name
build_server_single $file_name
done
}
#### ִг
build_exec()
{
my_date=`date +%Y%m%d%H%M%S`
exec_name=$1 # ִгơ
makefile_name="makefile_"${exec_name}_${my_date}
link_lib_str=`get_cps_libforexe`
# echo "$link_lib_str"
link_lib_str_len=${#link_lib_str} #ȡַȵı
link_lib_str_all=""
WORK_LIB_DIR=${HOME}/run/solib
if [ $link_lib_str_len -lt 3 ] # γɺƴmakefileļ
then
link_lib_str_all=""
else
link_lib_str_all="-L${WORK_LIB_DIR} ${link_lib_str}"
fi
echo "#########"
dep_content=`echo "$CppFileList"| sed "s/.cpp/.o/g"`
makefile_str="include \$(HOME)/run/framework/tools/env.mk.new\n${exec_name}:${dep_content}\n\t\$(CPP_COMP) \$(CPPFLAGS) -o \$@ ${dep_content} -L\$(EXEC_COMMON_LIB) -L\$(XML_LIB) -L\$(ORA_LIB) -L\$(TUX_LIB) ${link_lib_str_all}" # ע˴ֻǽserverļǿƸƵ bin ļ
echo $makefile_str > $makefile_name
echo "${makefile_str}"
make -f $makefile_name $exec_name
rm -rf $makefile_name
}
############## main ##############
##################################
#ű 4 ÷:
# usage: xx.sh 0 cQTimeAgeLimit.cpp ʾ뵥bc or bp;
# usage: xx.sh 0 all ʾ뵱ǰĿ¼еbc or bp
# usage: xx.sh 1 RRouteInfo.cpp ʾ뵥server
# usage: xx.sh 1 all ʾ뵱ǰĿ¼еserver
#ʹýűʱ,Ƚ뵽bc or bp Ŀ¼ȥ;
usage()
{
echo "ű 5 ÷:"
echo "usage: xx.sh 0 cQTimeAgeLimit.cpp ʾ뵥bc or bp"
echo "usage: xx.sh 0 all ʾ뵱ǰĿ¼еbc or bp"
echo "usage: xx.sh 1 RRouteInfo.cpp ʾ뵥server"
echo "usage: xx.sh 1 all ʾ뵱ǰĿ¼еserver"
echo "usage: xx.sh 2 ExecFileName file1 file2 ... ڱִļ"
}
if [ $# -lt 2 ]
then
usage;
fi
if [ "$1" = "0" -o "$1" = "1" ] # BC BP SERVER
then
CppFileList=$2
FirstCppFile=$2
if [ "$#" != "2" ]; then
usage;
fi
if [ "$1" = "0" ] # BC BP
then
ProgType="so"
if [ "$2" = "all" ] #е
then
echo "build_bc_bp_all"
build_bc_bp_all
else
echo "build_bc_bp_single"
build_bc_bp_single $2
fi
else # SERVER
ProgType="service"
if [ "$2" = "all" ] #е
then
echo "build_server_all"
build_server_all
else
echo "build_server_single"
build_server_single $2
fi
fi
elif [ "$1" = "2" ] #ִг
then
ProgType="exec"
if [ $# -lt 3 ]
then
usage;
else
FirstCppFile=$3
CppFileList=`echo "$*"| sed "s/^2//g"`
CppFileList=`echo $CppFileList|awk '{i=index($0, $1);j=length($1);print substr($0, i+j)}'`
echo "CppFileList=$CppFileList"
build_exec $2
fi
else
usage;
fi
| true
|
9d5d5aadf4e0434c810c1ff98f20f2df239f4ca9
|
Shell
|
exrat/upload-mktorrent
|
/up.sh
|
UTF-8
| 5,699
| 3.640625
| 4
|
[] |
no_license
|
#!/bin/bash
#
# Auteur ......... : ex_rat d'après le script d'Aerya | https://upandclear.org
# https://upandclear.org/2016/09/29/script-simpliste-de-creation-de-torrent/
# Variables ...... : A définir ici et ne pas modifier la suite du script
# User ruTorrent & URL d'annonce
USER=exrat
TRACKER="https://annonce.tracker.bt"
# Dossier adapté pour conf ruTorrent mondedie.fr
# On ne touche pas si on n'a pas une bonne raison
# Pour Transmission, ce sera surement: TORRENT="/home/$USER/download" (jamais testé !)
TORRENT="/home/$USER/torrents"
WATCH="/home/$USER/watch"
##################################################
# Récupération threads
THREAD=$(grep -c processor < /proc/cpuinfo)
if [ "$THREAD" = "" ]; then
THREAD=1
fi
FONCAUTO () {
TAILLE=$(du -s "$TORRENT"/"$FILE" | awk '{ print $1 }')
if [ "$TAILLE" -lt 65536 ]; then # - de 64 Mo
PIECE=15 # 32 Ko
elif [ "$TAILLE" -lt 131072 ]; then # - de 128 Mo
PIECE=16 # 64 Ko
elif [ "$TAILLE" -lt 262144 ]; then # - de 256 Mo
PIECE=17 # 128 Ko
elif [ "$TAILLE" -lt 524288 ]; then # - de 512 Mo
PIECE=18 # 256 Ko
elif [ "$TAILLE" -lt 1048576 ]; then # - de 1 Go
PIECE=19 # 512 Ko
elif [ "$TAILLE" -lt 2097152 ]; then # - de 2 Go
PIECE=20 # 1 Mo
elif [ "$TAILLE" -lt 4194304 ]; then # - de 4 Go
PIECE=21 # 2 Mo
elif [ "$TAILLE" -lt 8388608 ]; then # - de 8 Go
PIECE=22 # 4 Mo
elif [ "$TAILLE" -lt 16777216 ]; then # - de 16 Go
PIECE=23 # 8 Mo
elif [ "$TAILLE" -lt 33554432 ]; then # - de 32 Go
PIECE=24 # 16 Mo
elif [ "$TAILLE" -ge 33554432 ]; then # + de 32 Go
PIECE=25 # 32 Mo
fi
}
FONCCREATE () {
mktorrent -p -l "$PIECE" -a "$TRACKER" -t "$THREAD" "$TORRENT"/"$FILE"
chown "$USER":"$USER" "$FILE".torrent
}
FONCANNUL () {
whiptail --title "Annulation" --msgbox " Création $FILE.torrent annulé" 13 60
exit 0
}
command -v mktorrent >/dev/null 2>&1 # test presence mktorrent
if [ $? = 1 ]; then
apt-get install -y mktorrent
fi
if [ "$1" = "" ]; then # mode boite de dialogue
NAME=$(whiptail --title "Nom de la source" --inputbox "Entrez le nom du fichier ou dossier source" 10 60 3>&1 1>&2 2>&3)
exitstatus=$?
if [ $exitstatus = 0 ]; then
FILE=$NAME
if [ -d "$TORRENT/$FILE" ] || [ -f "$TORRENT/$FILE" ]; then
echo
else
whiptail --title "Erreur" --msgbox "Le fichier ou dossier source n'existe pas\nVérifiez le nom exact" 13 60
exit 0
fi
else
FONCANNUL
fi
OPTION=$(whiptail --title "Taille de pièces" --menu "Choisissez la taille de pièces du .torrent" 15 60 8 \
"01" " Automatique" \
"02" " 32 Ko" \
"03" " 64 Ko" \
"04" " 128 Ko" \
"05" " 256 Ko" \
"06" " 512 Ko" \
"07" " 1 Mo" \
"08" " 2 Mo" \
"09" " 4 Mo" \
"10" " 8 Mo" \
"11" " 16 Mo" \
"12" " 32 Mo" 3>&1 1>&2 2>&3)
if [ "$OPTION" = 01 ]; then
FONCAUTO
elif [ "$OPTION" = 02 ]; then
PIECE=15 # 32 Ko
elif [ "$OPTION" = 03 ]; then
PIECE=16 # 64 Ko
elif [ "$OPTION" = 04 ]; then
PIECE=17 # 128 Ko
elif [ "$OPTION" = 05 ]; then
PIECE=18 # 256 Ko
elif [ "$OPTION" = 06 ]; then
PIECE=19 # 512 Ko
elif [ "$OPTION" = 07 ]; then
PIECE=20 # 1 Go
elif [ "$OPTION" = 08 ]; then
PIECE=21 # 2 Go
elif [ "$OPTION" = 09 ]; then
PIECE=22 # 4 Go
elif [ "$OPTION" = 10 ]; then
PIECE=23 # 8 Go
elif [ "$OPTION" = 11 ]; then
PIECE=24 # 16 Go
elif [ "$OPTION" = 12 ]; then
PIECE=25 # 32 Go
else
FONCANNUL
fi
if [ -d /home/"$USER/$FILE".torrent ] || [ -f /home/"$USER/$FILE".torrent ]; then
REMOVE=$(whiptail --title "Erreur" --menu "Le fichier $FILE.torrent existe déjà en :\n/home/$USER/$FILE.torrent\nvoulez vous le supprimer ?" 15 60 2 \
"1" " Oui" \
"2" " Non" 3>&1 1>&2 2>&3)
if [ "$REMOVE" = 1 ]; then
rm -f /home/"$USER"/"$FILE".torrent
elif [ "$REMOVE" = 2 ]; then
FONCANNUL
else
FONCANNUL
fi
fi
FONCCREATE
SEED=$(whiptail --title "Mise en seed" --menu "Voulez vous mettre le torrent en seed ?" 15 60 2 \
"1" " Oui" \
"2" " Non" 3>&1 1>&2 2>&3)
if [ "$SEED" = 1 ]; then
mv "$FILE".torrent "$WATCH"/"$FILE".torrent
whiptail --title "Ok" --msgbox " Torrent ajouté en:\n $WATCH/$FILE.torrent\n Source:\n $TORRENT/$FILE" 13 60
elif [ "$SEED" = 2 ]; then
if [ -d /home/"$USER/$FILE".torrent ] || [ -f /home/"$USER/$FILE".torrent ]; then
echo
else # en cas de déplacement du script
mv "$FILE".torrent /home/"$USER"/"$FILE".torrent
fi
whiptail --title "Ok" --msgbox " Torrent ajouté en:\n /home/$USER/$FILE.torrent\n Source:\n $TORRENT/$FILE" 13 60
else
rm "$FILE".torrent
FONCANNUL
fi
elif [ "$1" = "--auto" ]; then # mode full auto
FILE="$2"
if [ -d /home/"$USER/$FILE".torrent ] || [ -f /home/"$USER/$FILE".torrent ]; then
rm -f /home/"$USER"/"$FILE".torrent # anti doublon multi test
fi
FONCAUTO
FONCCREATE
mv "$FILE".torrent "$WATCH"/"$FILE".torrent
else # mode semi auto
FILE="$1"
if [ -d "$TORRENT/$FILE" ] || [ -f "$TORRENT/$FILE" ]; then
echo
else
echo "Erreur, vérifiez le nom du dossier ou fichier source"
exit 0
fi
if [ -d /home/"$USER/$FILE".torrent ] || [ -f /home/"$USER/$FILE".torrent ]; then
echo -n -e "$1.torrent existe déjà, voulez vous le supprimer ? (y/n): "
read -r REMOVE
if [ "$REMOVE" = "y" ]; then
rm -f /home/"$USER"/"$FILE".torrent
else
exit 0
fi
fi
FONCAUTO
FONCCREATE
echo -n -e "Voulez vous mettre le torrent en seed ? (y/n): "
read -r SEED
if [ "$SEED" = "y" ]; then
mv "$FILE".torrent "$WATCH"/"$FILE".torrent
echo "$FILE.torrent en seed"
else
if [ -d /home/"$USER/$FILE".torrent ] || [ -f /home/"$USER/$FILE".torrent ]; then
echo
else # en cas de déplacement du script
mv "$FILE".torrent /home/"$USER"/"$FILE".torrent
echo "$FILE.torrent en /home/$USER"
fi
fi
fi
| true
|
931904bd7cf4954b11dd9c1fbfc118d3dd55406f
|
Shell
|
dancor/dancomp
|
/old-bin/rrd
|
UTF-8
| 212
| 3.234375
| 3
|
[] |
no_license
|
#!/bin/sh
# run, sleep, notify, record
if [[ $# -lt 2 ]]
then
echo 'usage: '"$0"' <secs> <command>';
exit;
fi
N="$1"
shift
C="$@"
snt "$N" 'STOP '"$C" &
PID=$!
rrr "$@"
ps "$PID" > /dev/null && kill "$PID" ;
| true
|
5bab790257cb0328bd13cf2ad77d93379079333a
|
Shell
|
Bondzio/AUR
|
/gnome-shell-extension-window-buttons-git/PKGBUILD
|
UTF-8
| 2,253
| 3.0625
| 3
|
[] |
no_license
|
# Maintainer: XZS <d dot f dot fischer at web dot de>
# Contributor: Pi3R1k <pierrick.brun at gmail dot com>
pkgname=gnome-shell-extension-window-buttons-git
pkgver=10_e.g.o.r6.g9aad6a8
pkgrel=1
pkgdesc="Extension which puts minimize, maximize and close buttons in the top panel."
arch=('any')
url="https://github.com/danielkza/Gnome-Shell-Window-Buttons-Extension"
license=('GPL3')
depends=('gnome-shell>=3.4' 'gnome-shell<3.15')
# template start; name=git; version=1;
makedepends+=('git')
source+=("$pkgname::git+$url")
md5sums+=('SKIP')
provides+=("${pkgname%-git}")
conflicts+=("${pkgname%-git}")
pkgver() {
cd $pkgname
git describe --long --tags 2>/dev/null | sed 's/[^[:digit:]]*\(.\+\)-\([[:digit:]]\+\)-g\([[:xdigit:]]\{7\}\)/\1.r\2.g\3/'
if [ ${PIPESTATUS[0]} -ne 0 ]; then
printf "r%s.%s" "$(git rev-list --count HEAD)" "$(git rev-parse --short HEAD)"
fi
}
# vim: filetype=sh
# template end;
# template start; name=modularize-package; version=1;
package() {
for function in $(declare -F | grep -Po 'package_[[:digit:]]+[[:alpha:]_]*$')
do
$function
done
}
# vim: filetype=sh
# template end;
# template start; name=install-code; version=1;
package_01_locate() {
msg2 'Locating extension...'
cd "$(dirname $(find -name 'metadata.json'))"
extname=$(grep -Po '(?<="uuid": ")[^"]*' metadata.json)
destdir=""$pkgdir/usr/share/gnome-shell/extensions/$extname""
}
package_02_install() {
msg2 'Installing extension code...'
find -maxdepth 1 \( -iname '*.js*' -or -iname '*.css' -or -iname '*.ui' \) -exec install -Dm644 -t "$destdir" '{}' +
}
# vim: filetype=sh
# template end;
# template start; name=install-schemas; version=1;
if [ -z "$install" ]
then
install=gschemas.install
fi
package_10_schemas() {
msg2 'Installing schemas...'
find -name '*.xml' -exec install -Dm644 -t "$pkgdir/usr/share/glib-2.0/schemas" '{}' +
}
# vim: filetype=sh
# template end;
# template start; name=unify-conveniencejs; version=1;
depends+=(gnome-shell-extensions)
package_03_unify_conveniencejs() {
ln -fs \
../user-theme@gnome-shell-extensions.gcampax.github.com/convenience.js \
"$destdir/convenience.js"
}
# vim: filetype=sh
# template end;
package_09_theme() {
cp -r --no-preserve=ownership,mode themes "$destdir"
}
| true
|
a5198587cbfb12fbf1fbe2cfd004c3f6c4476d59
|
Shell
|
JetBrains/teamcity-msbuild-logger
|
/tools/dotnet-sdk
|
UTF-8
| 847
| 4.15625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
function sdk_help(){
echo ".NET Command Line SDK Switcher (1.0.0)
Usage: dotnet sdk [command]
Usage: dotnet sdk [version]
Commands:
latest Swtiches to the latest .NET Core SDK version
list Lists all installed .NET Core SDKs
help Display help
Versions:
An installed version number of a .NET Core SDK"
}
function sdk_list(){
echo "The installed .NET Core SDKs are:"
ls -1 "/usr/local/share/dotnet/sdk"
}
function sdk_latest(){
if [ -e global.json ]; then
rm global.json
fi
echo ".NET Core SDK version switched to latest version."
dotnet --version
}
case "$1" in
"help")
sdk_help
;;
"")
sdk_help
;;
"list")
sdk_list
;;
"latest")
sdk_latest
;;
*)
echo "Switching .NET Core SDK version to $1"
echo "{
\"sdk\": {
\"version\": \"$1\"
}
}" >> global.json
;;
esac
| true
|
f31ecc836f7ae5679abe4bb3be4b7966ff2dd260
|
Shell
|
amm834/developement_env_setup
|
/dev_env_setup.sh
|
UTF-8
| 1,085
| 3.515625
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
if [[ $(uname -m) != "aarch64" ]]; then
echo -e "Your environment should be \e[32mAndroid (aarch64/arm64)\e[0m"
exit 1
fi
#remove motd file
rm $PREFIX/etc/motd
# install coreutils
apt upgrade && apt update -y
# install packages
pkgs=("git" "wget" "zsh" "composer" "php" "apache" "php-apache" "mariadb" "rust" "clang" "camake" "lsd")
pkgsLength=${#pkgs[@]}
for (( i = 0; i <= $pkgsLength; i++ )); do
pkg install ${pkgs[$i]} -y
done
pkg upgrade
# download server configuration file via wget
wget https://raw.githubusercontent.com/amm834/developement_env_setup/main/httpd.conf
# remove default configuration file
rm $PREFIX/etc/apache2/httpd.conf
# move downloaded configuration file
mv httpd.conf $PREFIX/etc/apache2
# download service file
wget https://raw.githubusercontent.com/amm834/developement_env_setup/main/service
# change as executable file
chmod +x service
# move service file to bin
mv service $PREFIX/bin/
# install oh-my-zsh
sh -c "$(curl -fsSL https://raw.githubusercontent.com/robbyrussell/oh-my-zsh/master/tools/install.sh)"
| true
|
50e44aa3eda43ed0627a9da88e74a1982b6c4d18
|
Shell
|
solderzzc/cloudrouter-1
|
/build/rpms/legacy/build-components.sh
|
UTF-8
| 1,981
| 4.125
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
COMPONENTS_DIR=${COMPONENTS_DIR-./components}
RPM_BUILD_SOURCES=$(rpmbuild --eval '%{_sourcedir}')
RPM_BUILD_RPMS=$(rpmbuild --eval '%{_rpmdir}')
RPM_BUILD_SRPMS=$(rpmbuild --eval '%{_srcrpmdir}')
RPM_BUILD_OPTS=(--define="distribution $OS")
function usage(){
echo "${BASH_SOURCE[0]} [-s|-h]"
echo " -s build only source rpm (SRPM)"
echo " -h help"
exit 1
}
# handle opts
while getopts "sh" opt; do
case $opt in
s) SOURCE_ONLY=1;;
h) usage;;
\?) usage;;
esac
done
shift $((OPTIND-1))
if [ ! -d ${COMPONENTS_DIR} ]; then
echo >&2 "[ERROR] COMPONENTS_DIR=${COMPONENTS_DIR} does not exist." \
"This can be set via the COMPONENTS_DIR environment variable."
exit 1
fi
if [ $# -eq 0 ]; then
COMPONENTS=( $(ls -d ${COMPONENTS_DIR}/* | xargs -I {} basename {}) )
else
COMPONENTS=( $@ )
fi
if [ -z ${SOURCE_ONLY} ]; then
RPM_BUILD_OPTS=("${RPM_BUILD_OPTS[@]}" -ba)
else
RPM_BUILD_OPTS=("${RPM_BUILD_OPTS[@]}" -bs)
fi
mkdir -p ${RPM_BUILD_SOURCES}
for COMPONENT in "${COMPONENTS[@]}"; do
COMPONENT_DIR=${COMPONENTS_DIR}/${COMPONENT}
LOG_FILE=build-${COMPONENT}.log
if [ -d ${COMPONENT_DIR} ]; then
# make if Makefile is provided
[[ -f ${COMPONENT_DIR}/Makefile ]] \
&& ( cd ${COMPONENT_DIR} && make clean && make source)
# move all local patches
find ${COMPONENT_DIR} ! -name "sources" -a ! -name "*.spec" \
-exec cp {} ${RPM_BUILD_SOURCES}/. \;
# fetch all externam patches/sources
2>&1 find ${COMPONENT_DIR} -name "*.spec" \
-exec spectool --sourcedir --get-files {} \; \
-exec rpmbuild "${RPM_BUILD_OPTS[@]}" --clean {} \; | tee ${LOG_FILE}
# lets pull out the rpms created
find ${RPM_BUILD_RPMS} -name "*.rpm" \
-exec mv {} . \;
find ${RPM_BUILD_SRPMS} -name "${COMPONENT}*.rpm" \
-exec mv {} . \;
fi
done
| true
|
003df111f7f0d97d70b592a68376c4c887792943
|
Shell
|
daiqian111/V4-panel
|
/config/diy.sh
|
UTF-8
| 391
| 3.1875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
#panel路径
PanelPath="/jd/panel"
#判断panel文件夹是否存在,若不存在,复制/jd目录内
if [[ ! -d "$PanelPath" ]]; then
echo "控制面板已和谐,重新拷贝面板目录..."
cp -r /jd/config/panel /jd/
echo "启动控制面板挂载程序..."
pm2 stop /jd/panel/server.js
pm2 start /jd/panel/server.js
else
echo "控制面板还存在."
fi
| true
|
be4c3441309f848106049f03c922e25a1151d99b
|
Shell
|
Luoyadan/UCR-CS172-Project
|
/crawler.sh
|
UTF-8
| 229
| 2.984375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
if [ -z "$1" ]; then
TWEETS=0
else
TWEETS=$1
fi
if [ -z "$2" ]; then
mkdir -p data
FILEPATH="data"
else
mkdir -p $2
FILEPATH=$2
fi
echo "Writing twitter data to /$FILEPATH ..."
python twitterGeo.py $TWEETS $FILEPATH
| true
|
066f9d78b534e972cf443998119b3e8e11263adb
|
Shell
|
c3w/snapdragon
|
/html/ackall.cgi
|
UTF-8
| 175
| 2.859375
| 3
|
[] |
no_license
|
#!/bin/sh
TRAP_DIR="/data/snapdragon/traps"
cd ${TRAP_DIR}
for trap in *trap; do {
mv -- ${trap} ${trap}.ack
}; done
echo "Location: http://ipncore.com/snapdragon/"
echo
| true
|
6209cf10696d65a212120691967813b659a5500f
|
Shell
|
phidra/discussion-testing-pbf2json
|
/test.sh
|
UTF-8
| 184
| 2.53125
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
root_dir="$(realpath "$(dirname "$0")" )"
set -o xtrace
"${root_dir}/_build/bin/converter" "${root_dir}/data/data.osm.pbf"
| true
|
b17268a6a5469441de4c5e615118c51b8ca7249b
|
Shell
|
timkuel/2750-SPT-Projects
|
/project2/contact_one.sh
|
UTF-8
| 713
| 3.703125
| 4
|
[] |
no_license
|
!/bin/bash
## Timothy Kuelker
## Uses grep to search a pattern inside of the here document
## Loops through continuously until user enters control + c
x=""
##loop runs until a break command is entered
while [[ true ]]
do
echo -e "\nEnter a regex to search the HERE document for, or hit CTR-C to exit"
read x
##checking if pattern is in the here document
if grep -i "$x" << HERE
John Doe, jdoe@great.com, 800-555-1111, California
Jane Doe, jand@super.edu, 876-555-1321, New York
John Smith, bsmith@fast.net, 780-555-1234, Florida
Paul Wang, pwang@cs.kent.edu, 330-672-9050, Ohio
HERE
then
echo -e "\nFound some matches to the pattern, there they are.\n"
else
echo -e "\nThe match couldnt be found in the HERE document, try another!"
fi
done
exit 0
| true
|
303b63858586061d37a7f19229e874267d3c4077
|
Shell
|
henryho1612/dotfiles
|
/.zshrc
|
UTF-8
| 5,894
| 2.9375
| 3
|
[] |
no_license
|
# Path to your oh-my-zsh configuration.
ZSH=$HOME/.oh-my-zsh
# Set name of the theme to load.
# Look in ~/.oh-my-zsh/themes/
# Optionally, if you set this to "random", it'll load a random theme each
# time that oh-my-zsh is loaded.
ZSH_THEME="jaischeema"
# Example aliases
# alias zshconfig="mate ~/.zshrc"
# alias ohmyzsh="mate ~/.oh-my-zsh"
# Set to this to use case-sensitive completion
# CASE_SENSITIVE="true"
# Uncomment this to disable bi-weekly auto-update checks
# DISABLE_AUTO_UPDATE="true"
# Uncomment to change how often before auto-updates occur? (in days)
# export UPDATE_ZSH_DAYS=13
# Uncomment following line if you want to disable colors in ls
# DISABLE_LS_COLORS="true"
# Uncomment following line if you want to disable autosetting terminal title.
# DISABLE_AUTO_TITLE="true"
# Uncomment following line if you want to disable command autocorrection
# DISABLE_CORRECTION="true"
# Uncomment following line if you want red dots to be displayed while waiting for completion
COMPLETION_WAITING_DOTS="true"
# Uncomment following line if you want to disable marking untracked files under
# VCS as dirty. This makes repository status check for large repositories much,
# much faster.
# DISABLE_UNTRACKED_FILES_DIRTY="true"
# Which plugins would you like to load? (plugins can be found in ~/.oh-my-zsh/plugins/*)
# Custom plugins may be added to ~/.oh-my-zsh/custom/plugins/
# Example format: plugins=(rails git textmate ruby lighthouse)
plugins=(git apache2-macports git-extras macports osx web-search zsh-syntax-highlighting)
# source
# http://code.tutsplus.com/tutorials/how-to-customize-your-command-prompt--net-24083
# http://zsh.sourceforge.net/Doc/Release/Prompt-Expansion.html
# http://usefulshortcuts.com/downloads/ALT-Codes.pdf
# http://stackoverflow.com/questions/1348842/what-should-i-set-java-home-to-on-osx
source $ZSH/oh-my-zsh.sh
function virtualenv-info {
[ $VIRTUAL_ENV ] && echo '('`basename $VIRTUAL_ENV`')'
}
# Get current day/Time
function henryh-date {
date '+%a %Y-%m-%d %T'
}
# Add space between
function put_spacing {
# ${COLUMNS}: a size of a terminal window
local term_width
(( term_width = ${COLUMNS} - 1 ))
local fill_bar=""
local pwd_len=""
local user="%n"
local host="%M"
local current_dir="%~"
local date="$(henryh-date)"
local left_left_prompt_size=${#${(%):-╭╔ ${user}@${host} $(virtualenv-info) ${current_dir}}}
local left_right_prompt_size=${#${(%):-${date}}}
local left_prompt_size
(( left_prompt_size = ${left_left_prompt_size} + ${left_right_prompt_size} ))
if [[ "$left_prompt_size" -gt $term_width ]]; then
((pwd_len=$term_width - $left_prompt_size))
else
fill_bar="${(l.(($term_width - $left_prompt_size - 5)).. .)}"
fi
echo "%{$fg[white]%} ${fill_bar} %{$reset_color%}"
}
# Git variables
ZSH_THEME_GIT_PROMPT_PREFIX="%{$terminfo[bold]$fg[magenta]%}"
ZSH_THEME_GIT_PROMPT_SUFFIX="%{$reset_color%}"
ZSH_THEME_GIT_PROMPT_DIRTY="%{$terminfo[bold]$fg[red]%} ✘"
ZSH_THEME_GIT_PROMPT_UNTRACKED="%{$terminfo[bold]$fg[yellow]%} °"
ZSH_THEME_GIT_PROMPT_CLEAN="%{$terminfo[bold]$fg[green]%} ✔"
# Login information - source <http://zsh.sourceforge.net/Doc/Release/Prompt-Expansion.html>
# %n username / %M the full machine hostname / %m the hostname up to the first '.' / %~ the current working directory starts with $HOME, that part is replaced by a ‘~’
# %B%b start(stop) bold-faced mode
local user_host='%{$terminfo[bold]$fg[green]%}%n%{$fg[white]%}@%{$fg[red]%}%M%{$reset_color%}'
local virtual_env_info='$(virtualenv-info)'
local current_dir='%{$terminfo[bold]$fg[blue]%}%~%{$reset_color%}'
local more_space='$(put_spacing)'
local date_time='%{$terminfo[bold]$fg[cyan]%}$(henryh-date)%{$reset_color%}'
local return_code="%(?..%{$terminfo[bold]$fg[red]%}%? ↵%{$reset_color%})"
PROMPT="
╭╔ ${user_host} ${virtual_env_info} ${current_dir} ${more_space} ${date_time}
╰╚%B%b "
RPROMPT='${return_code} $(git_prompt_info) '
# Some useful alias
alias sd="sudo shutdown -h now"
alias rs="sudo shutdown -r now"
# PATH
export PATH=/usr/share/python:$PATH
export PATH=$PATH:/usr/local/mysql/bin # MySql path
# UTF8
export LC_ALL=en_US.UTF-8
export LANG=en_US.UTF-8
# Determine platform
local unamestr=$(uname)
local platform=""
if [[ $unamestr == "Linux" ]]; then
platform="Linux"
elif [[ $unamestr == "Darwin" ]]; then
platform="Mac"
fi
# Some config
if [[ $platform == "Linux" ]]; then
# show details for ls command
alias ls='ls -aCFho --color=auto'
elif [[ $platform == "Mac" ]] then
#GNU version commands
export PATH=/opt/local/libexec/gnubin:$PATH
# Macports-home path
export PATH=$HOME/macports/bin:$HOME/macports/sbin:$PATH
export MANPATH=$HOME/macports/share/man:$MANPATH
export PATH=$HOME/bin/macports/libexec/gnubin:$PATH
export PERL5LIB=$HOME/macports/lib/perl5/5.12.4:$HOME/macports/lib/perl5/vendor_perl/5.12.4:$PERL5LIB
# Macports-system path
export PATH=$PATH:/opt/local/bin:/opt/local/sbin
# Some useful alias
alias port-home='$HOME/macports/bin/port'
alias port-system='sudo /opt/local/bin/port'
alias wifion="networksetup -setairportpower en1 on"
alias wifioff="networksetup -setairportpower en1 off"
alias wifirs="networksetup -setairportpower en1 off && networksetup -setairportpower en1 on"
# ls alias when macports and no macports
if [ -f $HOME/bin/macports/libexec/gnubin/ls ]; then
alias ls='ls -aCFho --color=auto'
else
alias ls='ls -aCFho -G'
fi
fi
# Unused
# export DHIS2_HOME="/Volumes/Data/apple/DHIS_HOME" # Point to a DHIS directory
# export MAVEN_OPTS="-Xmx512m -XX:MaxPermSize=512m" # Increased memory for Maven
# export CATALINA_OPTS="-Xms10m -Xmx1024m" # Increased memory for Tomcat
# export JAVA_HOME=$(/usr/libexec/java_home) # Declare Java_home
| true
|
3e675521072a8173fa5030908a800b9c55f62a09
|
Shell
|
rimjhimroy/katacoda-scenarios-1
|
/azure-functions/azure-functions-java/set-env.sh
|
UTF-8
| 456
| 2.546875
| 3
|
[] |
no_license
|
apt-get update
yes | apt-get install ca-certificates curl apt-transport-https lsb-release gnupg
curl -sL https://packages.microsoft.com/keys/microsoft.asc | \
gpg --dearmor | \
tee /etc/apt/trusted.gpg.d/microsoft.asc.gpg > /dev/null
AZ_REPO=$(lsb_release -cs)
echo "deb [arch=amd64] https://packages.microsoft.com/repos/azure-cli/ $AZ_REPO main" | \
tee /etc/apt/sources.list.d/azure-cli.list
apt-get update
yes | apt-get install azure-cli
| true
|
7b4cf477fd8356dede6c9b21f935537ed382b707
|
Shell
|
ammilam/mac-intro
|
/cleanup.sh
|
UTF-8
| 1,269
| 3.359375
| 3
|
[] |
no_license
|
#! /bin/bash
# this script is intended to clean up the resources created by the setup.sh
terraform state pull > terraform.tfstate
REGION=$(cat terraform.tfstate|jq -r '.outputs.location.value')
REPO=$(cat terraform.tfstate|jq -r '.outputs.repo.value')
TOKEN=$(cat token)
USERNAME=$(cat terraform.tfstate|jq -r '.outputs.username.value')
SA_NAME=account.json
EMAIL=$(cat terraform.tfstate|jq -r '.outputs.email.value')
NAME=$(cat terraform.tfstate|jq -r '.outputs.cluster_name.value')
PROJECT=$(cat terraform.tfstate|jq -r '.outputs.project_id.value')
export GOOGLE_APPLICATION_CREDENTIALS=./$SA_NAME
terraform destroy -var "google_credentials=${SA_NAME}" -var "repo=${REPO}" -var "github_token=${TOKEN}" -var "username=${USERNAME}" -var "email_address=${EMAIL}" -var "cluster_name=${NAME}" -var "project_id=${PROJECT}" -auto-approve
gcloud compute disks list --format=json|
jq --raw-output '.[] | "\(.name)|\(.zone)"' $PDB_FILE|
while IFS="|" read -r name zone; do
echo name=$name zone=$(echo $zone|awk '{print $NF}' FS=/)
gcloud compute disks delete $(echo $name|grep $NAME) --zone=$(echo $zone|awk '{print $NF}' FS=/)
done
read -p 'Do you want to delete the Service Account json? ' p
if [[ $p == 'y' ]]
then
rm ./$SA_NAME
fi
if [[ $p == 'n' ]]
then
exit 1
fi
| true
|
d3d6532109e782fa30bdc9be1f55493fdc1bed8a
|
Shell
|
mjambon/blog
|
/src/dictionary/shrink-images
|
UTF-8
| 290
| 3.625
| 4
|
[] |
no_license
|
#! /bin/bash
#
# Take the original images and reduce their size to as to make the web page
# faster to load.
#
set -eu
src_dir=img/orig
dst_dir=img
for src in "$src_dir"/*.jpg; do
dst=$dst_dir/$(basename "$src")
echo "resize $src -> $dst"
convert "$src" -resize 500x500 "$dst"
done
| true
|
43d2db438fe87f565c32b63585711116d7c474b7
|
Shell
|
dds/dotfiles
|
/bin/restart-gpgagent
|
UTF-8
| 391
| 3.1875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
[ -z "$GPG_KEY" ] && echo "Must set \$GPG_KEY environment variable" && exit 1
pkill -9 '(ssh-agent|gpg-agent)'
for keystub in $(gpg --with-keygrip --list-secret-keys $GPG_KEY | grep Keygrip | awk '{print $3}'); do
keyfile="${GNUPG_HOME:-$HOME/.gnupg}/private-keys-v1.d/$keystub.key"
[ -w "$keyfile" ] && rm -f "$keyfile"
done
gpg --card-status
gpgconf --launch gpg-agent
| true
|
3ca40b86920bd7ee01bf9d2064ae38d96fcefd0d
|
Shell
|
hopefulp/sandbox
|
/pypbs/pbs_vasp_kisti_skl2.sh
|
UTF-8
| 694
| 2.984375
| 3
|
[] |
no_license
|
#!/bin/sh
#PBS -V
#PBS -A vasp
#PBS -q normal
#PBS -l select=20:ncpus=40:mpiprocs=40:ompthreads=1
#PBS -l walltime=48:00:00
if [ -z $PBS_JOBNAME ]; then
echo "Usage:: qsub -N dirname $SB/pypbs/pbs_vasp.sh"
exit 1
fi
log_dir=$PBS_O_WORKDIR
jobname=$PBS_JOBNAME
wdir=$jobname
log_file=$log_dir/${PBS_JOBID}_$jobname
echo $jobname > $log_file
NPROC=`wc -l < $PBS_NODEFILE`
echo "NPROC = $NPROC" >> $log_file
echo start >> $log_file
date >> $log_file
if [ $exe ]; then
EXEC="$HOME/bin/vasp_gam"
else
EXEC="$HOME/bin/vasp_std"
fi
cd $log_dir/$wdir
mpirun $EXEC > $log_dir/$jobname.log
mv $log_dir/$jobname.log $log_dir/$jobname.out
echo end >> $log_file
date >> $log_file
| true
|
8a2ff1ef138f3eaa4021788c70cb12034fb2fe8f
|
Shell
|
molleweide/dorothy
|
/commands/get-array
|
UTF-8
| 113
| 2.53125
| 3
|
[
"LicenseRef-scancode-public-domain",
"Unlicense"
] |
permissive
|
#!/usr/bin/env bash
source "$DOROTHY/sources/strict.bash"
size="$1"
for (( i=0; i < size; i++ )); do
echo
done
| true
|
598c06a029a4f2c5ef7ea8d6ad823532f4a55b91
|
Shell
|
ramanath16/devops
|
/shell-scripting/bashdirectorycheck.sh
|
UTF-8
| 177
| 3.640625
| 4
|
[] |
no_license
|
#!/bin/bash
directory=$1
#bash check if directory exists
if [ -d $directory ]&& [ ! -z '$directory' ]; then
echo "Directory exists!"
else
echo "directory does not exists!"
fi
| true
|
0bd1b8b1b57ed635c207fbc8ee18df087eda76b5
|
Shell
|
voltrevo/docker-workspace
|
/dw
|
UTF-8
| 1,059
| 4.15625
| 4
|
[] |
no_license
|
#!/bin/bash -e
trim() {
local var="$*"
# remove leading whitespace characters
var="${var#"${var%%[![:space:]]*}"}"
# remove trailing whitespace characters
var="${var%"${var##*[![:space:]]}"}"
echo -n "$var"
}
function dw_rm() {
docker volume rm "dw-$1" >/dev/null
}
function dw_create() {
docker run -w "//root/$1" -it --rm -v docker-workspace-share:/root/.config/docker-workspace -v "dw-$1:/root/$1" -e TERM=xterm-256color docker-workspace tmux -u
}
function dw_ls() {
docker volume ls | grep -o 'dw-[a-zA-Z_-]*' | sed 's/^dw-//'
}
if [ $# -ge 1 ]; then
CMD=$(trim $1)
if [ $CMD == "ls" ]; then
dw_ls
exit 0
fi
if [ $CMD == "rm" ]; then
dw_rm $2
exit 0
fi
if [ $CMD == "create" ]; then
dw_create $2
exit 0
fi
fi
if [ $# -ne 0 ]; then
echo Unrecognized command 1>&2
exit 1
fi
echo "Current workspaces:"
dw_ls
echo "Enter workspace to use (can be new):"
read workspace
# TODO: User in interactive mode should be able to enter `rm test` to remove test and restart
dw_create $workspace
| true
|
6e83dde72a9e176b9e954306bd28cd31b7d1ce39
|
Shell
|
google/jsonnet
|
/test_suite/count_tests.sh
|
UTF-8
| 354
| 3.3125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
NUM_TESTS=0
NUM_FILES=0
for TEST in *.jsonnet ; do
NUM_FILES=$((NUM_FILES + 1))
if [ -r "$TEST.golden" ] ; then
NUM_TESTS=$((NUM_TESTS + 1))
else
NUM_TESTS=$((NUM_TESTS + $(grep "std.assertEqual" $TEST | wc -l)))
fi
done
echo "There are $NUM_TESTS individual tests split across $NUM_FILES scripts."
| true
|
6d4a04bef8ccb10ef6e21cc058f58518a349aa72
|
Shell
|
pascalw/inlets-heroku
|
/inlets-client.sh
|
UTF-8
| 252
| 2.625
| 3
|
[] |
no_license
|
#!/usr/bin/env sh
APP_URL=$(heroku info -s | grep web_url | cut -d= -f2)
echo "Connecting to $APP_URL...\n"
inlets client \
--remote $(echo "$APP_URL" | sed 's/https:/wss:/' | sed 's/.$//') \
--token $(heroku config:get TOKEN) \
--upstream "$@"
| true
|
d7a95c4bfee8ec749ccc925ce7ca33c7463d8fe0
|
Shell
|
southpolenator/SharpDebug
|
/dumps/download.sh
|
UTF-8
| 885
| 3.5625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
dumps_version="dumps_3"
ARTIFACTS_USER='cidownload'
ARTIFACTS_PASSWORD='AP6JaG9ToerxBc7gWP5LcU1CNpb'
ARTIFACTS_URL="https://sharpdebug.jfrog.io/sharpdebug/api/storage/generic-local/$dumps_version/"
command -v curl >/dev/null 2>&1 || { echo "Please install 'curl'." >&2; exit 1; }
command -v unzip >/dev/null 2>&1 || { echo "Please install 'unzip'." >&2; exit 1; }
files=$(curl -u$ARTIFACTS_USER:$ARTIFACTS_PASSWORD $ARTIFACTS_URL 2>/dev/null | grep -Po '(?<="uri" : "/)[^"]*')
for file in $files ; do
url="https://sharpdebug.jfrog.io/sharpdebug/generic-local/$dumps_version/$file"
echo $url '-->' $file
curl -u$ARTIFACTS_USER:$ARTIFACTS_PASSWORD $url --output $file 2>/dev/null
extract_path=$(pwd)
if grep -q clr "$file"; then
subfolder="${file%.*}"
extract_path="$extract_path/$subfolder"
fi
unzip -qo $file -d $extract_path
rm $file
done
| true
|
d7395f92c9cda51b16fae4d388a24b46c3b4d943
|
Shell
|
vaquarkhan/spark-dataframe-introduction
|
/src/bash/download-for-cluster.md
|
UTF-8
| 797
| 3.71875
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Check wheather you have bsondump command or not
if [ `which bsondump` == "" ]; then
echo "WARN: You don't have bsondump command. You should install mongodbd."
exit
fi
# Make a directory to store downloaded data
mkdir -p /mnt/github-archive-data/
cd /mnt/github-archive-data/
# Download Github Archive data at 2015-01-01
# https://www.githubarchive.org/
wget http://data.githubarchive.org/2015-01-{01..30}-{0..23}.json.gz
# Download Github user data at 2015-01-29
# And arrange the data as 'github-users.json'
wget http://ghtorrent.org/downloads/users-dump.2015-01-29.tar.gz
tar zxvf users-dump.2015-01-29.tar.gz
# Replace ObjectId with null. ObjectId is used for mongoDB, not valid JSON.
bsondump dump/github/users.bson | sed -e "s/ObjectId([^)]*)/null/" > github-users.json
| true
|
384090609a7104ed460db0f9865214c173e63287
|
Shell
|
JamieJQuinn/dotenv
|
/scripts/open_terminal
|
UTF-8
| 187
| 2.53125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
if wmctrl -l | grep xfce-main-terminal -q; then
xfce4-terminal --tab --initial-title=xfce-main-terminal
else
xfce4-terminal --initial-title=xfce-main-terminal
fi
| true
|
b97407c9a618c74c70fa59c7076054ac2245438d
|
Shell
|
dudeofawesome/docker-minecraft-server
|
/src/servers/spigot/entrypoint.sh
|
UTF-8
| 1,469
| 3.71875
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env sh
set -e
if "$EULA"; then
echo "eula=true" > /data/eula.txt
else
echo "Error: Minecraft server EULA needs to be accepted"
echo "Set env var EULA to true to accept"
exit 1
fi
if [ ! "$MINECRAFT_VERSION" ]; then
echo "You must set MINECRAFT_VERSION in env."
echo "Specify a version of Minecraft, like '1.13.2'"
echo "'release' will keep you always up to date, which may mean upgrading before you meant to"
echo "'snapshot' will keep you up to date on the latest snapshot"
exit 1
fi
RES=$(python3 /download.py)
MINECRAFT_VERSION=$(echo $RES | cut -f 1 -d ' ')
JAR_PATH="$JAR_DIR/spigot-$MINECRAFT_VERSION.jar"
if [ ! -f "$JAR_PATH" ]; then
pushd /mc-server
java -jar /mc-server/BuildTools.jar --rev "$MINECRAFT_VERSION"
if [ ! -f "./spigot-$MINECRAFT_VERSION.jar" ]; then
>&2 echo "Failed to create spigot server jar"
exit 1
fi
mv "./spigot-$MINECRAFT_VERSION.jar" "$JAR_PATH"
popd
fi
echo "Starting Minecraft $MINECRAFT_VERSION server"
java -server -Xms$RAM_MAX -Xmx$RAM_MAX \
-XX:+UnlockExperimentalVMOptions -XX:MaxGCPauseMillis=100 \
-XX:+DisableExplicitGC -XX:TargetSurvivorRatio=90 \
-XX:G1NewSizePercent=50 -XX:G1MaxNewSizePercent=80 \
-XX:G1MixedGCLiveThresholdPercent=35 -XX:+AlwaysPreTouch \
-XX:+ParallelRefProcEnabled \
-jar "$JAR_PATH" \
nogui
if [ $? -ne 0 ]; then
echo "You might have a memory issue"
echo "Try decreasing RAM_MAX"
exit 1
fi
echo "Minecraft server stopped"
| true
|
dcb663e5d250b360137b570fa71fa36609243ff3
|
Shell
|
SmartLyu/shell
|
/Get_path
|
UTF-8
| 241
| 3.1875
| 3
|
[] |
no_license
|
#!/bin/bash
# email: luyu151111@163.com
path=${0%/*}
echo $path | grep '^/' &>/dev/null
if [ $? -ne 0 ];then
path=$(echo $path | sed -r 's/.\/*//')
path=$PWD/$path
fi
path=$(echo $path | sed -r 's/\/{2,}/\//')
echo '脚本所在位置为'$path
| true
|
f1453a284e355cbfe0c8250fc3b2e1869afed999
|
Shell
|
revuloj/revo-medioj
|
/araneujo-s/voko-sesio-sekretoj.ŝablono
|
UTF-8
| 915
| 3.171875
| 3
|
[] |
no_license
|
#!/bin/bash
#set -x
secrets=$(docker secret ls --filter name=voko-sesio. -q)
if [ ! -z "${secrets}" ]; then
echo "# forigante malnovajn sekretojn voko-sesio.* ..."
docker secret rm ${secrets}
fi
echo
echo "# metante novajn sekretojn..."
ftp_password=$(cat /dev/urandom | tr -dc A-Z_a-z-0-9 | head -c${1:-16})
ftp_user=sesio
echo ${ftp_password} | docker secret create voko-sesio.ftp_password -
echo ${ftp_user} | docker secret create voko-sesio.ftp_user -
# hack...?
echo ${ftp_password} | docker secret create voko-sesio.ftp_password2 -
echo ${ftp_user} | docker secret create voko-sesio.ftp_user2 -
if [[ ! -z "${FTP_SERVER2}" ]]; then
echo ${FTP_SERVER2} | docker secret create voko-sesio.ftp_server2 -
echo ${FTP_PASSWD2} | docker secret create voko-sesio.ftp_password2 -
echo ${FTP_USER2} | docker secret create voko-sesio.ftp_user2 -
fi
docker secret ls --filter name=voko-sesio.
| true
|
f702f06c28f53aeee361d0a7dc251db265e84255
|
Shell
|
RafaelAPB/blockchain-integration-framework
|
/weaver/tests/network-setups/fabric/shared/network2/fabric-ca/registerEnroll.sh
|
UTF-8
| 15,262
| 2.875
| 3
|
[
"CC-BY-4.0",
"Apache-2.0"
] |
permissive
|
function createOrg1 {
NW_CFG_PATH="$1"
CA_PORT="$2"
echo "NW_CFG_PATH = $NW_CFG_PATH"
echo "Enroll the CA admin"
echo
mkdir -p $NW_CFG_PATH/peerOrganizations/org1.network2.com/
export FABRIC_CA_CLIENT_HOME=$NW_CFG_PATH/peerOrganizations/org1.network2.com/
# rm -rf $FABRIC_CA_CLIENT_HOME/fabric-ca-client-config.yaml
# rm -rf $FABRIC_CA_CLIENT_HOME/msp
set -x
fabric-ca-client enroll -u https://admin:adminpw@localhost:${CA_PORT} --caname ca.org1.network2.com --tls.certfiles $NW_CFG_PATH/fabric-ca/org1/tls-cert.pem
set +x
echo 'NodeOUs:
Enable: true
ClientOUIdentifier:
Certificate: cacerts/localhost-'${CA_PORT}'-ca-org1-network2-com.pem
OrganizationalUnitIdentifier: client
PeerOUIdentifier:
Certificate: cacerts/localhost-'${CA_PORT}'-ca-org1-network2-com.pem
OrganizationalUnitIdentifier: peer
AdminOUIdentifier:
Certificate: cacerts/localhost-'${CA_PORT}'-ca-org1-network2-com.pem
OrganizationalUnitIdentifier: admin
OrdererOUIdentifier:
Certificate: cacerts/localhost-'${CA_PORT}'-ca-org1-network2-com.pem
OrganizationalUnitIdentifier: orderer' > $NW_CFG_PATH/peerOrganizations/org1.network2.com/msp/config.yaml
echo
echo "Register peer0"
echo
set -x
fabric-ca-client register --caname ca.org1.network2.com --id.name peer0 --id.secret peer0pw --id.type peer --tls.certfiles $NW_CFG_PATH/fabric-ca/org1/tls-cert.pem
set +x
echo
echo "Register user"
echo
set -x
fabric-ca-client register --caname ca.org1.network2.com --id.name user1 --id.secret user1pw --id.type client --tls.certfiles $NW_CFG_PATH/fabric-ca/org1/tls-cert.pem
set +x
echo
echo "Register the org admin"
echo
set -x
fabric-ca-client register --caname ca.org1.network2.com --id.name org1admin --id.secret org1adminpw --id.type admin --tls.certfiles $NW_CFG_PATH/fabric-ca/org1/tls-cert.pem
set +x
mkdir -p $NW_CFG_PATH/peerOrganizations/org1.network2.com/peers
mkdir -p $NW_CFG_PATH/peerOrganizations/org1.network2.com/peers/peer0.org1.network2.com
echo
echo "## Generate the peer0 msp"
echo
set -x
fabric-ca-client enroll -u https://peer0:peer0pw@localhost:${CA_PORT} --caname ca.org1.network2.com -M $NW_CFG_PATH/peerOrganizations/org1.network2.com/peers/peer0.org1.network2.com/msp --csr.hosts peer0.org1.network2.com --tls.certfiles $NW_CFG_PATH/fabric-ca/org1/tls-cert.pem
set +x
cp $NW_CFG_PATH/peerOrganizations/org1.network2.com/msp/config.yaml $NW_CFG_PATH/peerOrganizations/org1.network2.com/peers/peer0.org1.network2.com/msp/config.yaml
echo
echo "## Generate the peer0-tls certificates"
echo
set -x
fabric-ca-client enroll -u https://peer0:peer0pw@localhost:${CA_PORT} --caname ca.org1.network2.com -M $NW_CFG_PATH/peerOrganizations/org1.network2.com/peers/peer0.org1.network2.com/tls --enrollment.profile tls --csr.hosts peer0.org1.network2.com --csr.hosts localhost --tls.certfiles $NW_CFG_PATH/fabric-ca/org1/tls-cert.pem
set +x
cp $NW_CFG_PATH/peerOrganizations/org1.network2.com/peers/peer0.org1.network2.com/tls/tlscacerts/* $NW_CFG_PATH/peerOrganizations/org1.network2.com/peers/peer0.org1.network2.com/tls/ca.crt
cp $NW_CFG_PATH/peerOrganizations/org1.network2.com/peers/peer0.org1.network2.com/tls/signcerts/* $NW_CFG_PATH/peerOrganizations/org1.network2.com/peers/peer0.org1.network2.com/tls/server.crt
cp $NW_CFG_PATH/peerOrganizations/org1.network2.com/peers/peer0.org1.network2.com/tls/keystore/* $NW_CFG_PATH/peerOrganizations/org1.network2.com/peers/peer0.org1.network2.com/tls/server.key
mkdir $NW_CFG_PATH/peerOrganizations/org1.network2.com/msp/tlscacerts
cp $NW_CFG_PATH/peerOrganizations/org1.network2.com/peers/peer0.org1.network2.com/tls/tlscacerts/* $NW_CFG_PATH/peerOrganizations/org1.network2.com/msp/tlscacerts/ca.crt
mkdir $NW_CFG_PATH/peerOrganizations/org1.network2.com/tlsca
cp $NW_CFG_PATH/peerOrganizations/org1.network2.com/peers/peer0.org1.network2.com/tls/tlscacerts/* $NW_CFG_PATH/peerOrganizations/org1.network2.com/tlsca/tlsca.org1.network2.com-cert.pem
mkdir $NW_CFG_PATH/peerOrganizations/org1.network2.com/ca
cp $NW_CFG_PATH/peerOrganizations/org1.network2.com/peers/peer0.org1.network2.com/msp/cacerts/* $NW_CFG_PATH/peerOrganizations/org1.network2.com/ca/ca.org1.network2.com-cert.pem
mkdir -p $NW_CFG_PATH/peerOrganizations/org1.network2.com/users
mkdir -p $NW_CFG_PATH/peerOrganizations/org1.network2.com/users/User1@org1.network2.com
echo
echo "## Generate the user msp"
echo
set -x
fabric-ca-client enroll -u https://user1:user1pw@localhost:${CA_PORT} --caname ca.org1.network2.com -M $NW_CFG_PATH/peerOrganizations/org1.network2.com/users/User1@org1.network2.com/msp --tls.certfiles $NW_CFG_PATH/fabric-ca/org1/tls-cert.pem
set +x
mkdir -p $NW_CFG_PATH/peerOrganizations/org1.network2.com/users/Admin@org1.network2.com
echo
echo "## Generate the org admin msp"
echo
set -x
fabric-ca-client enroll -u https://org1admin:org1adminpw@localhost:${CA_PORT} --caname ca.org1.network2.com -M $NW_CFG_PATH/peerOrganizations/org1.network2.com/users/Admin@org1.network2.com/msp --tls.certfiles $NW_CFG_PATH/fabric-ca/org1/tls-cert.pem
set +x
cp $NW_CFG_PATH/peerOrganizations/org1.network2.com/msp/config.yaml $NW_CFG_PATH/peerOrganizations/org1.network2.com/users/Admin@org1.network2.com/msp/config.yaml
}
function createOrg2 {
NW_CFG_PATH="$1"
CA_PORT="$2"
echo "NW_CFG_PATH = $NW_CFG_PATH"
echo "Enroll the CA admin"
echo
mkdir -p $NW_CFG_PATH/peerOrganizations/org2.network2.com/
export FABRIC_CA_CLIENT_HOME=$NW_CFG_PATH/peerOrganizations/org2.network2.com/
# rm -rf $FABRIC_CA_CLIENT_HOME/fabric-ca-client-config.yaml
# rm -rf $FABRIC_CA_CLIENT_HOME/msp
set -x
fabric-ca-client enroll -u https://admin:adminpw@localhost:${CA_PORT} --caname ca.org2.network2.com --tls.certfiles $NW_CFG_PATH/fabric-ca/org2/tls-cert.pem
set +x
echo 'NodeOUs:
Enable: true
ClientOUIdentifier:
Certificate: cacerts/localhost-'${CA_PORT}'-ca-org2-network2-com.pem
OrganizationalUnitIdentifier: client
PeerOUIdentifier:
Certificate: cacerts/localhost-'${CA_PORT}'-ca-org2-network2-com.pem
OrganizationalUnitIdentifier: peer
AdminOUIdentifier:
Certificate: cacerts/localhost-'${CA_PORT}'-ca-org2-network2-com.pem
OrganizationalUnitIdentifier: admin
OrdererOUIdentifier:
Certificate: cacerts/localhost-'${CA_PORT}'-ca-org2-network2-com.pem
OrganizationalUnitIdentifier: orderer' > $NW_CFG_PATH/peerOrganizations/org2.network2.com/msp/config.yaml
echo
echo "Register peer0"
echo
set -x
fabric-ca-client register --caname ca.org2.network2.com --id.name peer0 --id.secret peer0pw --id.type peer --tls.certfiles $NW_CFG_PATH/fabric-ca/org2/tls-cert.pem
set +x
echo
echo "Register user"
echo
set -x
fabric-ca-client register --caname ca.org2.network2.com --id.name user1 --id.secret user1pw --id.type client --tls.certfiles $NW_CFG_PATH/fabric-ca/org2/tls-cert.pem
set +x
echo
echo "Register the org admin"
echo
set -x
fabric-ca-client register --caname ca.org2.network2.com --id.name org2admin --id.secret org2adminpw --id.type admin --tls.certfiles $NW_CFG_PATH/fabric-ca/org2/tls-cert.pem
set +x
mkdir -p $NW_CFG_PATH/peerOrganizations/org2.network2.com/peers
mkdir -p $NW_CFG_PATH/peerOrganizations/org2.network2.com/peers/peer0.org2.network2.com
echo
echo "## Generate the peer0 msp"
echo
set -x
fabric-ca-client enroll -u https://peer0:peer0pw@localhost:${CA_PORT} --caname ca.org2.network2.com -M $NW_CFG_PATH/peerOrganizations/org2.network2.com/peers/peer0.org2.network2.com/msp --csr.hosts peer0.org2.network2.com --tls.certfiles $NW_CFG_PATH/fabric-ca/org2/tls-cert.pem
set +x
cp $NW_CFG_PATH/peerOrganizations/org2.network2.com/msp/config.yaml $NW_CFG_PATH/peerOrganizations/org2.network2.com/peers/peer0.org2.network2.com/msp/config.yaml
echo
echo "## Generate the peer0-tls certificates"
echo
set -x
fabric-ca-client enroll -u https://peer0:peer0pw@localhost:${CA_PORT} --caname ca.org2.network2.com -M $NW_CFG_PATH/peerOrganizations/org2.network2.com/peers/peer0.org2.network2.com/tls --enrollment.profile tls --csr.hosts peer0.org2.network2.com --csr.hosts localhost --tls.certfiles $NW_CFG_PATH/fabric-ca/org2/tls-cert.pem
set +x
cp $NW_CFG_PATH/peerOrganizations/org2.network2.com/peers/peer0.org2.network2.com/tls/tlscacerts/* $NW_CFG_PATH/peerOrganizations/org2.network2.com/peers/peer0.org2.network2.com/tls/ca.crt
cp $NW_CFG_PATH/peerOrganizations/org2.network2.com/peers/peer0.org2.network2.com/tls/signcerts/* $NW_CFG_PATH/peerOrganizations/org2.network2.com/peers/peer0.org2.network2.com/tls/server.crt
cp $NW_CFG_PATH/peerOrganizations/org2.network2.com/peers/peer0.org2.network2.com/tls/keystore/* $NW_CFG_PATH/peerOrganizations/org2.network2.com/peers/peer0.org2.network2.com/tls/server.key
mkdir $NW_CFG_PATH/peerOrganizations/org2.network2.com/msp/tlscacerts
cp $NW_CFG_PATH/peerOrganizations/org2.network2.com/peers/peer0.org2.network2.com/tls/tlscacerts/* $NW_CFG_PATH/peerOrganizations/org2.network2.com/msp/tlscacerts/ca.crt
mkdir $NW_CFG_PATH/peerOrganizations/org2.network2.com/tlsca
cp $NW_CFG_PATH/peerOrganizations/org2.network2.com/peers/peer0.org2.network2.com/tls/tlscacerts/* $NW_CFG_PATH/peerOrganizations/org2.network2.com/tlsca/tlsca.org2.network2.com-cert.pem
mkdir $NW_CFG_PATH/peerOrganizations/org2.network2.com/ca
cp $NW_CFG_PATH/peerOrganizations/org2.network2.com/peers/peer0.org2.network2.com/msp/cacerts/* $NW_CFG_PATH/peerOrganizations/org2.network2.com/ca/ca.org2.network2.com-cert.pem
mkdir -p $NW_CFG_PATH/peerOrganizations/org2.network2.com/users
mkdir -p $NW_CFG_PATH/peerOrganizations/org2.network2.com/users/User1@org2.network2.com
echo
echo "## Generate the user msp"
echo
set -x
fabric-ca-client enroll -u https://user1:user1pw@localhost:${CA_PORT} --caname ca.org2.network2.com -M $NW_CFG_PATH/peerOrganizations/org2.network2.com/users/User1@org2.network2.com/msp --tls.certfiles $NW_CFG_PATH/fabric-ca/org2/tls-cert.pem
set +x
mkdir -p $NW_CFG_PATH/peerOrganizations/org2.network2.com/users/Admin@org2.network2.com
echo
echo "## Generate the org admin msp"
echo
set -x
fabric-ca-client enroll -u https://org2admin:org2adminpw@localhost:${CA_PORT} --caname ca.org2.network2.com -M $NW_CFG_PATH/peerOrganizations/org2.network2.com/users/Admin@org2.network2.com/msp --tls.certfiles $NW_CFG_PATH/fabric-ca/org2/tls-cert.pem
set +x
cp $NW_CFG_PATH/peerOrganizations/org2.network2.com/msp/config.yaml $NW_CFG_PATH/peerOrganizations/org2.network2.com/users/Admin@org2.network2.com/msp/config.yaml
}
function createOrderer {
NW_CFG_PATH="$1"
echo "NW_CFG_PATH = $NW_CFG_PATH"
echo "Enroll the CA admin"
echo
mkdir -p $NW_CFG_PATH/ordererOrganizations/network2.com
export FABRIC_CA_CLIENT_HOME=$NW_CFG_PATH/ordererOrganizations/network2.com
# rm -rf $FABRIC_CA_CLIENT_HOME/fabric-ca-client-config.yaml
# rm -rf $FABRIC_CA_CLIENT_HOME/msp
set -x
fabric-ca-client enroll -u https://admin:adminpw@localhost:8054 --caname ca.orderer.network2.com --tls.certfiles $NW_CFG_PATH/fabric-ca/ordererOrg/tls-cert.pem
set +x
echo 'NodeOUs:
Enable: true
ClientOUIdentifier:
Certificate: cacerts/localhost-8054-ca-orderer-network2-com.pem
OrganizationalUnitIdentifier: client
PeerOUIdentifier:
Certificate: cacerts/localhost-8054-ca-orderer-network2-com.pem
OrganizationalUnitIdentifier: peer
AdminOUIdentifier:
Certificate: cacerts/localhost-8054-ca-orderer-network2-com.pem
OrganizationalUnitIdentifier: admin
OrdererOUIdentifier:
Certificate: cacerts/localhost-8054-ca-orderer-network2-com.pem
OrganizationalUnitIdentifier: orderer' > $NW_CFG_PATH/ordererOrganizations/network2.com/msp/config.yaml
echo
echo "Register orderer"
echo
set -x
fabric-ca-client register --caname ca.orderer.network2.com --id.name orderer --id.secret ordererpw --id.type orderer --tls.certfiles $NW_CFG_PATH/fabric-ca/ordererOrg/tls-cert.pem
set +x
echo
echo "Register the orderer admin"
echo
set -x
fabric-ca-client register --caname ca.orderer.network2.com --id.name ordererAdmin --id.secret ordererAdminpw --id.type admin --tls.certfiles $NW_CFG_PATH/fabric-ca/ordererOrg/tls-cert.pem
set +x
mkdir -p $NW_CFG_PATH/ordererOrganizations/network2.com/orderers
mkdir -p $NW_CFG_PATH/ordererOrganizations/network2.com/orderers/network2.com
mkdir -p $NW_CFG_PATH/ordererOrganizations/network2.com/orderers/orderer.network2.com
echo
echo "## Generate the orderer msp"
echo
set -x
fabric-ca-client enroll -u https://orderer:ordererpw@localhost:8054 --caname ca.orderer.network2.com -M $NW_CFG_PATH/ordererOrganizations/network2.com/orderers/orderer.network2.com/msp --csr.hosts orderer.network2.com --csr.hosts localhost --tls.certfiles $NW_CFG_PATH/fabric-ca/ordererOrg/tls-cert.pem
set +x
cp $NW_CFG_PATH/ordererOrganizations/network2.com/msp/config.yaml $NW_CFG_PATH/ordererOrganizations/network2.com/orderers/orderer.network2.com/msp/config.yaml
echo
echo "## Generate the orderer-tls certificates"
echo
set -x
fabric-ca-client enroll -u https://orderer:ordererpw@localhost:8054 --caname ca.orderer.network2.com -M $NW_CFG_PATH/ordererOrganizations/network2.com/orderers/orderer.network2.com/tls --enrollment.profile tls --csr.hosts orderer.network2.com --csr.hosts localhost --tls.certfiles $NW_CFG_PATH/fabric-ca/ordererOrg/tls-cert.pem
set +x
cp $NW_CFG_PATH/ordererOrganizations/network2.com/orderers/orderer.network2.com/tls/tlscacerts/* $NW_CFG_PATH/ordererOrganizations/network2.com/orderers/orderer.network2.com/tls/ca.crt
cp $NW_CFG_PATH/ordererOrganizations/network2.com/orderers/orderer.network2.com/tls/signcerts/* $NW_CFG_PATH/ordererOrganizations/network2.com/orderers/orderer.network2.com/tls/server.crt
cp $NW_CFG_PATH/ordererOrganizations/network2.com/orderers/orderer.network2.com/tls/keystore/* $NW_CFG_PATH/ordererOrganizations/network2.com/orderers/orderer.network2.com/tls/server.key
mkdir $NW_CFG_PATH/ordererOrganizations/network2.com/orderers/orderer.network2.com/msp/tlscacerts
cp $NW_CFG_PATH/ordererOrganizations/network2.com/orderers/orderer.network2.com/tls/tlscacerts/* $NW_CFG_PATH/ordererOrganizations/network2.com/orderers/orderer.network2.com/msp/tlscacerts/tlsca.network2.com-cert.pem
mkdir $NW_CFG_PATH/ordererOrganizations/network2.com/msp/tlscacerts
cp $NW_CFG_PATH/ordererOrganizations/network2.com/orderers/orderer.network2.com/tls/tlscacerts/* $NW_CFG_PATH/ordererOrganizations/network2.com/msp/tlscacerts/tlsca.network2.com-cert.pem
mkdir -p $NW_CFG_PATH/ordererOrganizations/network2.com/users
mkdir -p $NW_CFG_PATH/ordererOrganizations/network2.com/users/Admin@network2.com
echo
echo "## Generate the admin msp"
echo
set -x
fabric-ca-client enroll -u https://ordererAdmin:ordererAdminpw@localhost:8054 --caname ca.orderer.network2.com -M $NW_CFG_PATH/ordererOrganizations/network2.com/users/Admin@network2.com/msp --tls.certfiles $NW_CFG_PATH/fabric-ca/ordererOrg/tls-cert.pem
set +x
cp $NW_CFG_PATH/ordererOrganizations/network2.com/msp/config.yaml $NW_CFG_PATH/ordererOrganizations/network2.com/users/Admin@network2.com/msp/config.yaml
}
| true
|
fd0b506ba5c40c8cf3ce31af8770f1acc9ace149
|
Shell
|
Monet-Network/monetd
|
/e2e/tests/evictiontest/run-test.sh
|
UTF-8
| 653
| 3.046875
| 3
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash
set -eu
mydir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" > /dev/null && pwd )"
NET=${1:-"evictiontest"}
PORT=${2:-8080}
CONFIG_DIR="$HOME/.giverny/networks/$NET/"
KEY_DIR="$HOME/.giverny/networks/$NET/keystore/"
PWD_FILE="$mydir/../../networks/pwd.txt"
# The network starts with two nodes, A and B, both whitelisted.
# The script tells A to evict B from the whitelist. The core of this test is to
# check that B was also automatically evicted from the Babble validator-set and
# suspended.
node $mydir/index.js --datadir="$CONFIG_DIR"
ret=$?
$mydir/../../scripts/testlastblock.sh $( giverny network dump $NET | awk -F "|" '{print $2}')
| true
|
04aa877e93da1ba16fe29187fff1b3145c78a486
|
Shell
|
daeyun/Nebula
|
/scripts/dev-run.sh
|
UTF-8
| 468
| 2.65625
| 3
|
[] |
no_license
|
#!/bin/sh
SCRIPT_DIR=`dirname $0`
source "$SCRIPT_DIR/config.sh"
cd $SERVICE_ROOT
./sbt one-jar
cd -
MAIN_JAR=$SERVICE_ROOT"target/scala-2.10/nebula_2.10-*-one-jar.jar"
chmod 755 $MAIN_JAR
java -server -Xmx10240m -jar $MAIN_JAR \
-admin.port=':9990' \
-service.port='42001' \
-graph.path='/resources/nebula/graph_files/' \
-graph.prefix='graph-' \
-log.level='INFO' \
-log.output='/dev/stderr' \
-service.name='NebulaService' \
$@
| true
|
bd7380719e5c31351189eb34bf49f00beeefc4f7
|
Shell
|
mehdibehroozi/hcp-diffusion-dcm
|
/sumGlobal.sh
|
UTF-8
| 640
| 3.15625
| 3
|
[
"MIT"
] |
permissive
|
## Get streamline count for track types
dir_results=/projects/ap66/uqjmcfad/HCP_SubcorticalRoute/Results/Global
for tcktype in SC-PUL PUL-AMY; do
cd ${dir_results}/endsonly/${tcktype}
rm -rf ${dir_results}/${tcktype}/count_*
for i in *.tck; do
filename=$(basename ${i} .tck)
count=$(tckinfo ${i} | sed '3!d' | grep -Eo '[0-9]')
count=$(echo $count | tr -d ' ')
subject=$(echo $filename | egrep -o [0-9]+ | tail -c 7)
tckname=$(basename ${i} _${subject}.tck)
echo "Writing streamline count ${count} for ${tckname}, ${subject}..."
echo "$subject $count" >> ${dir_results}/endsonly/${tcktype}/count_${tckname}.txt
done
done
| true
|
ba2c63174d9bb811cb37d7166b76fcbe9d295855
|
Shell
|
msiatczy/ksh_examples
|
/sybaseenv/scripts/findNondefaultConfigs.scr
|
UTF-8
| 4,545
| 3.59375
| 4
|
[] |
no_license
|
#!/bin/ksh
#--------------------------------------------------------------------------
# Usage: findNondefaultConfigs.scr -S <DBSERVER>|all
#
# Description: Look for logins which do not have access to their default database
#
# Location: Full directory path for location of script. If there is a
# master copy, mention where it is located.
#
# Called by: (optional) Script or Autosys script that calls this script
#
# Calls: (optional) Any scripts this script executes
#
# Parameters: <parm1> - short description of parm if not obvious
# <parm2> - short description of parm if not obvious
#
# Modification History:
# 07/10/07 M. Siatczynski Initial Script
#--------------------------------------------------------------------------
. /sccm/dba/sybase/.dbaSybProfile
#------------------------------------------------------------------------------
# Sample block for input parameters
#------------------------------------------------------------------------------
export USAGE="ERROR - Usage: $0 -S <DBSERVER>|all"
export CALLED="$0 $*"
if [ $# -eq 0 ]
then
echo $USAGE
exit 1
else
while [ $# -gt 0 ]
do
case $1 in
-S) export ONEDBSERVER=$2;shift 2;;
-*) echo $USAGE;exit 1;;
*) echo $USAGE;exit 1;;
esac
done
fi
if [ ! "$ONEDBSERVER" ]
then
echo $USAGE
exit 1
fi
#--------------------------------------------------------------------------
# Initialize Variables
#--------------------------------------------------------------------------
export DBSERVERLIST=/tmp/dbserverlist.$$
export SQLOUT=/tmp/sqlout.$$
export LOGFILE=$LOGDIR/misc/findNondefaultConfigs.$DATETIME
export HIGHRC=0
#--------------------------------------------------------------------------
# define cleanup function and trap
#--------------------------------------------------------------------------
function tempCleanup {
test -f $DBSERVERLIST && \rm -f $DBSERVERLIST
test -f $SQLOUT && \rm -f $SQLOUT
}
trap 'tempCleanup' EXIT INT TERM
#--------------------------------------------------------------------------
# For each ASE...
#--------------------------------------------------------------------------
> $LOGFILE
echo "`date` START check non-default config values" >> $LOGFILE
#--------------------------------------------------------------------------
# Create a working dbserverlist
#--------------------------------------------------------------------------
if [ "$ONEDBSERVER" = "all" ]
then
getAseList > $DBSERVERLIST
else
echo $ONEDBSERVER > $DBSERVERLIST
fi
#--------------------------------------------------------------------------
# For each DBSERVER
# execute sql
#--------------------------------------------------------------------------
cat $DBSERVERLIST |
while read DBSERVER
do
export DBSERVER
echo "`date` Checking $DBSERVER..." >> $LOGFILE
> $SQLOUT
$ISQLEXE -U$ADMINLOGIN -S$DBSERVER -w200 <<-! | grep -v Password >> $SQLOUT
`getPass $DBSERVER $ADMINLOGIN`
use master
go
set nocount on
go
select dbserver = convert(char(20),@@servername),
parm = convert(char(30), name),
dval = convert(char(12), space(12-char_length(convert(varchar(12), defvalue)))+ convert(varchar(12), defvalue)),
cval = convert(char(12), space(12-char_length(isnull(a.value2, convert(char(32), a.value)))) + isnull(a.value2, convert(char(32), a.value))),
rval = convert(char(12), space(12-char_length(isnull(b.value2, convert(char(32), b.value)))) + isnull(b.value2, convert(char(32), b.value)))
into #configs
from master.dbo.sysconfigures a,
master.dbo.syscurconfigs b
where
a.config *= b.config
-- ignore config=19 (User Defined Cache)
and parent != 19
and a.config != 19
order by name
go
select DBSERVER=dbserver,Parameter=parm,DefaultVal=dval,CurrVal=cval,RunVal=rval from #configs where dval != cval
go
drop table #configs
go
!
cat $SQLOUT >> $LOGFILE
checkSqlErr $SQLOUT
if [ $? -eq 1 ]
then
echo "$DBSERVER: SQL FAILED" >> $LOGFILE
export HIGHRC=1
continue
else
echo "$DBSERVER: SQL SUCCESSFUL" >> $LOGFILE
fi
done
#--------------------------------------------------------------------------
# Exit with final status
#--------------------------------------------------------------------------
if [ $HIGHRC -eq 1 ]
then
echo "\nScript FAILED"
echo "Review output in $LOGFILE\n"
exit 1
else
echo "\nScript SUCCESSFUL"
echo "Review output in $LOGFILE\n"
exit 0
fi
| true
|
23c3237bb1466dfff8f3a10523a69f194d517126
|
Shell
|
adriananeci/k8s-the-hard-way
|
/c.local_vagrant_kubeadm/scripts_kubeadm_local/07_k8s_cni.sh
|
UTF-8
| 1,331
| 3.203125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
config_separator=":"
if [[ ${os} == "windows" ]]
then
config_separator=";"
fi
export KUBECONFIG="~/.kube/config${config_separator}./kubeconfig"
kubectl config view --flatten > ~/.kube/config
kubectl config use-context 'kubernetes-admin@kubernetes'
nc_command="nc"
command -v ${nc_command} >/dev/null 2>&1 || nc_command="ncat"
#Sleep to wait for controller-0 to come up
echo "Waiting for kube api-server to come up ..."
KUBERNETES_PUBLIC_ADDRESS=$(vagrant ssh master -c "ip address show | grep 'inet 10.240' | sed -e 's/^.*inet //' -e 's/\/.*$//' | tr -d '\n'" 2>/dev/null)
while ! ${nc_command} -z ${KUBERNETES_PUBLIC_ADDRESS} 6443
do
sleep 10; echo "Sleeping another 10 seconds ..."
done
kubectl apply -f ../../cni/calico/calico.yaml
kubectl get pods -l k8s-app=kube-dns -n kube-system
kubectl run --generator=run-pod/v1 busybox --image=busybox:1.28 --command -- sleep 3600
kubectl get pods -l run=busybox
POD_NAME=$(kubectl get pods -l run=busybox -o jsonpath="{.items[0].metadata.name}")
while [[ $(kubectl get pods -l run=busybox -o jsonpath="{..status.conditions[?(@.type=='Ready')].status}") != "True" ]];
do echo "waiting for pod ${POD_NAME} to become ready!" && sleep 3; done
kubectl exec -ti ${POD_NAME} -- nslookup kubernetes
kubectl get componentstatuses
kubectl get nodes -o wide
| true
|
6984f9d1b2e336123848ef1608a73356d5312941
|
Shell
|
cwida/fsst
|
/paper/evolution.sh
|
UTF-8
| 4,212
| 2.578125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# output format: STCB CCB CR
# STCB: symbol table construction cost in cycles-per-compressed byte (constructing a new ST per 8MB text)
# CCB: compression speed cycles-per-compressed byte
# CR: compression (=size reduction) factor achieved
(for i in dbtext/*; do (./cw-strncmp $i 2>&1) | awk '{ l++; if (l==3) t=$2; if (l==6) c=$2; d=$1}END{print t " " c " " d}'; done) | awk '{t+=$1;c+=$2;d+=$3;k++}END{ print (t/k) " " (c/k) " " d/k " iterative|suffix-array|dynp-matching|strncmp|scalar" }'
(for i in dbtext/*; do (./cw $i 2>&1) | awk '{ l++; if (l==3) t=$2; if (l==6) c=$2; d=$1}END{print t " " c " " d}'; done) | awk '{t+=$1;c+=$2;d+=$3;k++}END{ print (t/k) " " (c/k) " " d/k " iterative|suffix-array|dynp-matching|str-as-long|scalar"}'
(for i in dbtext/*; do (./cw-greedy $i 2>&1) | awk '{ l++; if (l==3) t=$2; if (l==6) c=$2; d=$1}END{print t " " c " " d}'; done) | awk '{t+=$1;c+=$2;d+=$3;k++}END{ print (t/k) " " (c/k) " " d/k " iterative|suffix-array|greedy-match|str-as-long|scalar" }'
(for i in dbtext/*; do (./vcw $i 2>&1) | fgrep -v target | awk '{ l++; if (l==2) t=$2; if (l==4) c=$2; d=$1}END{print t " " c " " d}'; done) | awk '{t+=$1;c+=$2;d+=$3;k++}END{ print (t/k) " " (c/k) " " d/k " bottom-up|binary-search|greedy-match|str-as-long|scalar" }'
(for i in dbtext/*; do (./hcw $i 511 -adaptive 2>&1) | fgrep -v target | awk '{ l++; if (l==2) t=$2; if (l==4) c=$2; d=$1}END{print t " " c " " d}'; done) | awk '{t+=$1;c+=$2;d+=$3;k++}END{ print (t/k) " " (c/k) " " d/k " bottom-up|lossy-hash|greedy-match|str-as-long|branch-scalar" }'
#(for i in dbtext/*; do (./hcw-opt $i 511 -branch 2>&1) | fgrep -v target | awk '{ l++; if (l==2) t=$2; if (l==4) c=$2; d=$1}END{print t " " c " " d}'; done) | awk '{t+=$1;c+=$2;d+=$3;k++}END{ print (t/k) " " (c/k) " " d/k " bottom-up|lossy-hash|greedy-match|str-as-long|branch-scalar|optimized-construction" }'
(for i in dbtext/*; do (./hcw-opt $i 511 -adaptive 2>&1) | fgrep -v target | awk '{ l++; if (l==2) t=$2; if (l==4) c=$2; d=$1}END{print t " " c " " d}'; done) | awk '{t+=$1;c+=$2;d+=$3;k++}END{ print (t/k) " " (c/k) " " d/k " bottom-up|lossy-hash|greedy-match|str-as-long|adaptive-scalar|optimized-construction" }'
(for i in dbtext/*; do (./hcw-opt $i 2>&1) | fgrep -v target | awk '{ l++; if (l==2) t=$2; if (l==4) c=$2; d=$1}END{print t " " c " " d}'; done) | awk '{t+=$1;c+=$2;d+=$3;k++}END{ print (t/k) " " (c/k) " " d/k " bottom-up|lossy-hash|greedy-match|str-as-long|avx512|optimized-construction" }'
# on Intel SKX CPUs| the results look like:
#
# 75.117,160.11,1.97194 iterative|suffix-array|dynp-matching|strncmp|scalar
# \--> 160 cycles per byte produces a very slow compression speed (say ~20MB/s on a 3Ghz CPU)
#
# 73.6948,81.6404,1.97194 iterative|suffix-array|dynp-matching|str-as-long|scalar
# \--> str-as-long (i.e. FSST focusing on 8-byte word symbols) improves compression speed 2x
#
# 74.4996,37.457,1.94764 iterative|suffix-array|greedy-match|str-as-long|scalar
# \--> dynamic programming brought only 3% smaller size. So drop it and gain another 2x compression speed.
#
# 2.10217,19.9739,2.33083 bottom-up|binary-search|greedy-match|str-as-long|scalar
# \--> bottom-up is *really* better in terms of compression factor than iterative with suffix array.
#
# 1.74783,10.7009,2.28103 bottom-up|lossy-hash|greedy-match|str-as-long|scalar-branch
# \--> hashing significantly improves compression speed at only 5% size cost (due to hash collisions)
#
# 1.74783,9.8142,2.28103 bottom-up|lossy-hash|greedy-match|str-as-long|scalar-adaptive
# \--> adaptive use of encoding kernels gives compression speed a small bump
#
# 0.820435,4.12261,2.19227 bottom-up|lossy-hash|greedy-match|str-as-long|avx512|optimized-construction
# \--> symboltable optimizations & AVX512 kick in, resp. for construction time and compression speed.
#
# optimized construction refers to the combination of three changes:
# - reducing the amount of bottom-up passes from 10 to 5 (less learning time, but.. slighty worsens CR)
# - looking at subsamples in early rounds (increasing the sample as the rounds go up). Less compression work.
# - splitting the counters for less cache pressure and aiding fast skipping over counts-of-0
| true
|
79f06fac2c6e95e6cba4dd647c3ee7d2ca51d584
|
Shell
|
BrianElugan99/datawave-muchos
|
/scripts/dw-play-redeploy.sh
|
UTF-8
| 399
| 2.8125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
# This script is intended only to simplify your interaction with datawave.yml playbook
# for the purpose of forcing a rebuild and redeploy of datawave on your cluster.
# Note that any script arguments are passed thru to Ansible directly.
readonly SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
"${SCRIPT_DIR}"/dw-play.sh -e '{ "dw_force_redeploy": true }' $@
| true
|
53ea3ad23aa392db83ee78f188e47a7c37b1c222
|
Shell
|
cr3/dotfiles
|
/bin/screen-attach
|
UTF-8
| 771
| 3.640625
| 4
|
[] |
no_license
|
#!/bin/sh
# -U option basically means "terminal does not have UTF-8 support"
if test "$1" == "-U"; then
unset LC_CTYPE
fi
# Bring in my ssh-agent, so screen will have it available in all windows.
keychain --nocolor --quiet ~/.ssh/id_rsa
. ~/.keychain/`hostname`-sh
# Not a standard environment variable, but I need it for host-dependent
# decisions in .screenrc.
export HOST=`hostname`
# Record $DISPLAY for later use.
if test "$SSH_CLIENT" != ""; then
IDXHOST=`echo $SSH_CLIENT | sed 's/ .*//'`
else
IDXHOST=`hostname -i | sed 's/ //g'`
fi
mkdir -p ~/.screen/$IDXHOST/
if test "$DISPLAY" != ""; then
echo $DISPLAY > ~/.screen/$IDXHOST/display
fi
# multi-attach to main session, creating it if necessary. exec to save a
# process.
exec screen -S main -xRR
| true
|
ccd421791386465e3ac78d2e5e14e3fe656ee882
|
Shell
|
RATDistributedSystems/workload-generator
|
/create-conf.sh
|
UTF-8
| 226
| 3.21875
| 3
|
[] |
no_license
|
#!/bin/bash
str=""
for i in $(echo $PARAM_WEBSERVER_ADDR | tr "," "\n"); do
str="server $i:$PARAM_WEBSERVER_PORT;\n\t\t$str"
done
FILENAME=$1
sed -i "s/#PARAM_SERVER_CONFIG#/${str}/g" $FILENAME
/etc/init.d/nginx restart
| true
|
5ede00c8529762bd1ff8cfc1b91d9eaaffdca052
|
Shell
|
khollbach/config-files
|
/.config/bash/prompt
|
UTF-8
| 1,889
| 4.15625
| 4
|
[] |
no_license
|
#!/bin/bash
PROMPT_COMMAND=prompt_command
#prompt_contents='\u@\h:\w'
prompt_contents='\w'
prompt_color=9
# Different prompt color, to tell my machines apart.
regex='^khollbach-96.*$'
if [[ $(hostname) =~ $regex ]]; then
prompt_color=14
fi
unset regex
# This sets the prompt, $PS1.
#
# It automatically re-runs each time, just before the prompt is printed.
function prompt_command {
# Reset the terminal title after running any program, since it seems many
# programs set it and then forget to clean it up when they exit.
echo -ne "\e]0;\a"
# Color escape codes.
local color='\[\e[1;38;5;'"$prompt_color"'m\]' # bold text, colorful
local green='\[\e[38;5;2m\]'
local reset='\[\e[0m\]'
# Show the current git branch if you're in a git repo and you've checked
# out anything other than master.
local git_branch=$(parse_git_branch)
if [ -n "$git_branch" ] \
&& [ "$git_branch" != '(master)' ] \
&& [ "$git_branch" != '(main)' ]
then
git_branch=" $green$git_branch$reset"
else
git_branch=""
fi
# Add a line break if $prompt_contents is anything more than '\w', or \w is
# longer than 48 chars, or git branch is showing.
local newline=""
local w=$(dirs +0)
if [[ -n "$prompt_contents" &&
("$prompt_contents" != '\w' || ${#w} -gt 48 || -n "$git_branch")
]]; then
newline="\n"
fi
# Set the prompt; e.g.:
# |
# | ~/some-dir (branch-name)
# | $ _
# or:
# |
# | ~/some-dir$ _
PS1="$reset\n"
PS1="$PS1$color$prompt_contents$reset$git_branch$newline"
PS1="$PS1$color"'\$'"$reset "
}
# Get the current git branch.
#
# https://stackoverflow.com/questions/15883416/adding-git-branch-on-the-bash-command-prompt
function parse_git_branch {
command git branch 2> /dev/null | sed -e '/^[^*]/d' -e 's/* \(.*\)/(\1)/'
}
| true
|
4ad865789c0820c5e6bee9c3e54ffca59e4a74f4
|
Shell
|
eumel8/otc_ims
|
/tests/test.sh
|
UTF-8
| 658
| 2.5625
| 3
|
[] |
no_license
|
#!/bin/sh
# Prerequisites:
# Cloud API Credentials
# image disk on OBS ansible011:bionic-server-cloudimg-amd64.vmdk
echo "SCENARIO 1: create image"
ansible-playbook test.yml -e "image_name=ansible-image01 image_url=ansible011:bionic-server-cloudimg-amd64.vmdk image_min_disk=12 localaction=create waitfor=true" || exit 1
echo "SCENARIO 2: show image"
ansible-playbook test.yml -e "image_name=ansible-image01 localaction=show" || exit 1
echo "SCENARIO 3: list images (pagination)"
ansible-playbook test.yml -e "localaction=list" || exit 1
echo "SCENARIO 4: delete image"
ansible-playbook test.yml -e "image_name=ansible-image01 localaction=delete" || exit 1
| true
|
e136f5e5689a642113cfb69b3e91b5aa4f01b5cc
|
Shell
|
vonLeebpl/WotExplorer
|
/.provision/scripts/php.sh
|
UTF-8
| 3,614
| 2.953125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
export LANG=C.UTF-8
PHP_TIMEZONE=$1
HHVM=$2
PHP_VERSION=$3
if [[ $HHVM == "true" ]]; then
echo ">>> Installing HHVM"
# Get key and add to sources
wget --quiet -O - http://dl.hhvm.com/conf/hhvm.gpg.key | sudo apt-key add -
echo deb http://dl.hhvm.com/ubuntu trusty main | sudo tee /etc/apt/sources.list.d/hhvm.list
# Update
sudo apt-get update
# Install HHVM
# -qq implies -y --force-yes
sudo apt-get install -qq hhvm
# Start on system boot
sudo update-rc.d hhvm defaults
# Replace PHP with HHVM via symlinking
sudo /usr/bin/update-alternatives --install /usr/bin/php php /usr/bin/hhvm 60
sudo service hhvm restart
else
sudo add-apt-repository -y ppa:ondrej/php
sudo apt-key update
sudo apt-get update
sudo apt-get install -qq php7.0 php7.0-cli php7.0-fpm php7.0-mysql php7.0-pgsql php7.0-sqlite php7.0-curl php7.0-dev php7.0-gd php7.0-intl php7.0-imap php7.0-mbstring php7.0-opcache php7.0-soap php7.0-tidy php7.0-xmlrpc
sudo apt-get install -qq php-pear php-xdebug
# Set PHP FPM to listen on TCP instead of Socket
sudo sed -i "s/listen =.*/listen = 127.0.0.1:9000/" /etc/php/7.0/fpm/pool.d/www.conf
# Set PHP FPM allowed clients IP address
sudo sed -i "s/;listen.allowed_clients/listen.allowed_clients/" /etc/php/7.0/fpm/pool.d/www.conf
# Set run-as user for PHP/7.0-FPM processes to user/group "vagrant"
# to avoid permission errors from apps writing to files
sudo sed -i "s/user = www-data/user = vagrant/" /etc/php/7.0/fpm/pool.d/www.conf
sudo sed -i "s/group = www-data/group = vagrant/" /etc/php/7.0/fpm/pool.d/www.conf
sudo sed -i "s/listen\.owner.*/listen.owner = vagrant/" /etc/php/7.0/fpm/pool.d/www.conf
sudo sed -i "s/listen\.group.*/listen.group = vagrant/" /etc/php/7.0/fpm/pool.d/www.conf
sudo sed -i "s/listen\.mode.*/listen.mode = 0666/" /etc/php/7.0/fpm/pool.d/www.conf
sudo echo ';;;;;;;;;;;;;;;;;;;;;;;;;;' >> /etc/php/7.0/fpm/php.ini
sudo echo '; Added to enable Xdebug ;' >> /etc/php/7.0/fpm/php.ini
sudo echo ';;;;;;;;;;;;;;;;;;;;;;;;;;' >> /etc/php/7.0/fpm/php.ini
sudo echo '' >> /etc/php/7.0/fpm/php.ini
# sudo echo 'zend_extension="'$(find / -name 'xdebug.so' 2> /dev/null)'"' >> /etc/php/7.0/fpm/php.ini
sudo echo 'xdebug.default_enable = 1' >> /etc/php/7.0/fpm/php.ini
sudo echo 'xdebug.idekey = "vagrant"' >> /etc/php/7.0/fpm/php.ini
sudo echo 'xdebug.remote_enable = 1' >> /etc/php/7.0/fpm/php.ini
sudo echo 'xdebug.remote_autostart = 0' >> /etc/php/7.0/fpm/php.ini
sudo echo 'xdebug.remote_port = 9000' >> /etc/php/7.0/fpm/php.ini
sudo echo 'xdebug.remote_handler=dbgp' >> /etc/php/7.0/fpm/php.ini
sudo echo 'xdebug.remote_log="/var/log/xdebug/xdebug.log"' >> /etc/php/7.0/fpm/php.ini
sudo sed -i "s/error_reporting = E_ALL &/error_reporting = E_ALL ;&/" /etc/php/7.0/fpm/php.ini
sudo sed -i "s/display_errors = Off/display_errors = On/" /etc/php/7.0/fpm/php.ini
sudo sed -i "s/display_startup_errors = Off/display_startup_errors = On/" /etc/php/7.0/fpm/php.ini
sudo sed -i "s/track_errors = Off/track_errors = On/" /etc/php/7.0/fpm/php.ini
sudo sed -i "s/;date.timezone =/date.timezone = Europe/Warsaw/" /etc/php/7.0/fpm/php.ini
cd /etc/php/7.0/cli/
sudo mv php.ini php.ini.original
sudo mv conf.d conf.d.original
sudo mv ./pool.d/www.conf ./pool.d/www.conf.original
sudo ln -s /etc/php/7.0/fpm/php.ini
sudo ln -s /etc/php/7.0/fpm/conf.d
cd pool.d/
sudo ln -s /etc/php/7.0/fpm/pool.d/www.conf
sudo service php7.0-fpm restart
fi
| true
|
1f6043355d57e6cfa4daf515ac047b9f8471de91
|
Shell
|
fervitor/Atividades_Shell
|
/Lista_1/4.sh
|
UTF-8
| 259
| 2.515625
| 3
|
[] |
no_license
|
#!/bin/bash
d1=1
d2=2
d3=3
echo -e"OS DIRETORIOS SERÂO ENCAMINHADOS PARA O ARQUIVO que_lista_linda.txt NA PASTA TMP"
echo -e $(ls $d1) >> /tmp/que_lista_linda.txt
echo -e $(ls $d2) >> /tmp/que_lista_linda.txt
echo -e $(ls $d3) >> /tmp/que_lista_linda.txt
| true
|
3479bc496449c96242a6aae1f3dded37f4a5a3cd
|
Shell
|
davidddw/imageBuilder
|
/debian_8.x_kvm_livecloud/scripts/base.sh
|
UTF-8
| 1,058
| 2.65625
| 3
|
[
"Apache-2.0"
] |
permissive
|
cat <<'EOF' > /etc/apt/sources.list
deb http://ftp.cn.debian.org/debian/ jessie main contrib non-free
deb-src http://ftp.cn.debian.org/debian/ jessie main contrib non-free
deb http://ftp.cn.debian.org/debian-security/ jessie/updates main
deb-src http://ftp.cn.debian.org/debian-security/ jessie/updates main
EOF
apt-get update
apt-get install -y --force-yes chkconfig libglib2.0-0 curl stress tcpdump \
hping3 netperf iperf nmap mtr redis-server openjdk-7-jdk apache2-utils parted \
gdisk nginx sshpass
cat <<'EOF' > /root/.bashrc
PS1='${debian_chroot:+($debian_chroot)}\[\033[01;31m\]\u@\h\[\033[00m\]:\[\033[01;34m\]\w\[\033[00m\]\$ '
force_color_prompt=yes
alias rm='rm -i'
alias cp='cp -i'
alias mv='mv -i'
# Source global definitions
if [ -f /etc/bashrc ]; then
. /etc/bashrc
fi
alias grep='grep --color=auto'
alias l.='ls -d .* --color=auto'
alias ll='ls -l --color=auto'
alias ls='ls --color=auto'
export EDITOR=vim
export VISUAL=vim
EOF
echo "UseDNS no" >> /etc/ssh/sshd_config
sed -i "s/nameserver .*/nameserver 8.8.8.8/" /etc/resolv.conf
| true
|
fa94b014f01698727a09304e023d87a67a13f97e
|
Shell
|
skobba/fdk-tools
|
/search-and-replace/fdk-rename.sh
|
UTF-8
| 428
| 3.578125
| 4
|
[] |
no_license
|
#!/bin/bash
path=~/github_brreg/fdk/applications
replace_list=./fdk-rename-list.txt
file_type="*.jsx"
IFS=$'\n' # make newlines the only separator
set -f # disable globbing
for i in $(cat < "$replace_list"); do
IFS=' ' read -ra VALUES <<< "$i" #Convert string to array
#echo Old: ${VALUES[0]} - New: ${VALUES[1]}
./search-and-replace-in-files.sh $path $file_type ${VALUES[0]} ${VALUES[1]}
done
| true
|
9d56b60b4175ad1df7ddef5416b6e9dc379403c4
|
Shell
|
son-link/ejemplos-dialogos-bash
|
/sino_dialog.sh
|
UTF-8
| 246
| 2.78125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Ejemplo de Si/No
dialog --title "Pregunta" --yesno "¿Quiere continuar con la operación?" 0 0
if [ $? = 0 ]; then
dialog --title "Si" --msgbox "Vamos a continuar" 0 0;
else
dialog --title "No" --msgbox "Hasta luego" 0 0;
fi
clear
| true
|
f23ec53b0042ce374f06083ca569f90ddc5c34f1
|
Shell
|
SBU-BMI/quip_prad_cancer_detection
|
/prediction_3classes/other/start_color.sh
|
UTF-8
| 811
| 2.515625
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
source ../conf/variables.sh
cd color
nohup bash color_stats.sh ${PATCH_PATH} 0 8 &> ${LOG_OUTPUT_FOLDER}/log.color_stats_0.txt &
nohup bash color_stats.sh ${PATCH_PATH} 1 8 &> ${LOG_OUTPUT_FOLDER}/log.color_stats_1.txt &
nohup bash color_stats.sh ${PATCH_PATH} 2 8 &> ${LOG_OUTPUT_FOLDER}/log.color_stats_2.txt &
nohup bash color_stats.sh ${PATCH_PATH} 3 8 &> ${LOG_OUTPUT_FOLDER}/log.color_stats_3.xt &
nohup bash color_stats.sh ${PATCH_PATH} 4 8 &> ${LOG_OUTPUT_FOLDER}/log.color_stats_4.txt &
nohup bash color_stats.sh ${PATCH_PATH} 5 8 &> ${LOG_OUTPUT_FOLDER}/log.color_stats_5.txt &
nohup bash color_stats.sh ${PATCH_PATH} 6 8 &> ${LOG_OUTPUT_FOLDER}/log.color_stats_6.txt &
nohup bash color_stats.sh ${PATCH_PATH} 7 8 &> ${LOG_OUTPUT_FOLDER}/log.color_stats_7.txt &
cd ..
wait
exit 0
| true
|
5e06586e3b8186a7487a182cf1268de082c86b9a
|
Shell
|
tuksik/ubuntu-cookbooks
|
/cookbooks/go-server/recipes/install.bash
|
UTF-8
| 1,315
| 3.90625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
function installDependencies()
{
apt-get update
apt-get install -y default-jre-headless
}
function install()
{
# Install
local serverPackageFile="$(getTemporaryFile "$(getFileExtension "${serverDownloadURL}")")"
local agentPackageFile="$(getTemporaryFile "$(getFileExtension "${agentDownloadURL}")")"
curl -L "${serverDownloadURL}" -o "${serverPackageFile}" &&
dpkg -i "${serverPackageFile}" &&
service go-server start
curl -L "${agentDownloadURL}" -o "${agentPackageFile}" &&
dpkg -i "${agentPackageFile}" &&
service go-agent start
rm -f "${serverPackageFile}" "${agentPackageFile}"
# Only Create Go-Agent Folder Structure
for ((i = 1; i <= numberOfAgent; i++))
do
local goAgentFolder="/var/lib/go-agent-${i}"
mkdir -p "${goAgentFolder}" &&
chown -R 'go:go' "${goAgentFolder}"
done
}
function main()
{
appPath="$(cd "$(dirname "${0}")" && pwd)"
source "${appPath}/../../../lib/util.bash" || exit 1
source "${appPath}/../attributes/default.bash" || exit 1
header 'INSTALLING GO-SERVER'
checkRequireRootUser
checkPortRequirement "${serverPort}"
checkPortRequirement "${agentPort}"
installDependencies
install
installCleanUp
displayOpenPorts
}
main "${@}"
| true
|
9747c8404bc0530f65a5977ac32eef6674c0d0fc
|
Shell
|
pombase/fypo
|
/comprehensive_diff/make_comprehensive_diff.sh
|
UTF-8
| 1,017
| 3.28125
| 3
|
[
"CC-BY-4.0"
] |
permissive
|
#!/bin/sh
# Wrapper script for docker.
#
# We map this folder to a docker volume
#
IMAGE=${IMAGE:-odkfull}
ODK_JAVA_OPTS=-Xmx16G
ODK_DEBUG=${ODK_DEBUG:-no}
TIMECMD=
if [ x$ODK_DEBUG = xyes ]; then
echo "Running ${IMAGE} with ${ODK_JAVA_OPTS} of memory for ROBOT and Java-based pipeline steps."
TIMECMD="/usr/bin/time -f ### DEBUG STATS ###\nElapsed time: %E\nPeak memory: %M kb"
fi
git show HEAD:src/ontology/fypo-edit.owl > fypo-edit-old.owl
docker run -v $PWD/../src/ontology:/work/ontology:ro -v $PWD:/work/comprehensive_diff -w /work/ontology -e ROBOT_JAVA_ARGS="$ODK_JAVA_OPTS" -e JAVA_OPTS="$ODK_JAVA_OPTS" --rm -ti obolibrary/$IMAGE $TIMECMD robot diff --labels True --left ../comprehensive_diff/fypo-edit-old.owl --left-catalog catalog-v001.xml --right fypo-edit.owl -f html -o ../comprehensive_diff/editdiff.html
case "$@" in
*update_repo*|*release*)
echo "Please remember to update your ODK image from time to time: https://oboacademy.github.io/obook/howto/odk-update/."
;;
esac
| true
|
3b67fc2aa8c121486fd0c7d419cb229b2a6a799e
|
Shell
|
convox/convox
|
/examples/httpd/scripts/mariadb_check.sh
|
UTF-8
| 873
| 3.609375
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -ex -o pipefail
dburl=$MARIA_URL
if [ $1 == "mysql" ]; then
dburl=$MYSQL_URL
fi
proto="$(echo $dburl | grep :// | sed -e's,^\(.*://\).*,\1,g')"
# remove the protocol
url="$(echo ${dburl/$proto/})"
# extract the user (if any)
userpass="$(echo $url | grep @ | cut -d@ -f1)"
pass="$(echo $userpass | grep : | cut -d: -f2)"
if [ -n "$pass" ]; then
user="$(echo $userpass | grep : | cut -d: -f1)"
else
user=$userpass
fi
# extract the host
host_with_port="$(echo ${url/$userpass@/} | cut -d/ -f1)"
# by request - try to extract the port
port="$(echo $host_with_port | sed -e 's,^.*:,:,g' -e 's,.*:\([0-9]*\).*,\1,g' -e 's,[^0-9],,g')"
host="$(echo ${host_with_port} | cut -d: -f1)"
dbname="$(echo $url | grep / | cut -d/ -f2-)"
mariadb -h${host} -P${port} -u${user} -p${pass} -D${dbname} -e 'SELECT COUNT(*) FROM data' | grep 5
echo "check is done"
| true
|
1abbfc02d74fd8c3de29f6ad2ff0331fac240c96
|
Shell
|
brownman/SHELL_STEPS
|
/BANK/round_up.sh
|
UTF-8
| 439
| 3.3125
| 3
|
[] |
no_license
|
round_up(){
#depend_cmd: notify-send
#echo "`whoami`:$@" >> $file_log
count=`cat $file_count`
re='^[0-9]+$'
if ! [[ $count =~ $re ]] ; then
echo "error: Not a number" >&2; rm $file_count; exit 1
fi
let 'count += 1'
echo "$count" > $file_count
notify-send "round" "X $count"
}
set_env(){
file_count=$dir_workspace/count
}
init(){
[ ! -f $file_count ] && { echo 1 > $file_count; }
}
steps(){
set_env
init
round_up
}
steps
| true
|
a432840a20b6685035ce18fff84a2e6e8ab53e9b
|
Shell
|
oeg-upm/gtfs-bench
|
/generation/resources/csvs/distribution.sh
|
UTF-8
| 820
| 3
| 3
|
[
"Apache-2.0"
] |
permissive
|
declare -a arr=("AGENCY.csv" "TRIPS.csv" "CALENDAR.csv" "FEED_INFO.csv" "FREQUENCIES.csv" "CALENDAR_DATES.csv" "ROUTES.csv" "STOPS.csv" "STOP_TIMES.csv" "SHAPES.csv")
#Move cwd to here if needed
if [ ! -z "$2" ]
then
cd $2
fi
unzip $1.zip -d $1
cd $1
for j in "${arr[@]}"
do
NAME=`basename $j .csv`
csvjson $j > "$NAME.json"
../di-csv2xml Category -i $j -o "$NAME.xml"
done
zip $1-json.zip *.json
zip $1-sql.zip *.csv
zip $1-xml.zip *.xml
cp ../$1.zip $1-csv.zip
zip $1-best.zip TRIPS.csv SHAPES.csv FREQUENCIES.csv CALENDAR.csv CALENDAR_DATES.csv STOP_TIMES.json STOPS.json FEED_INFO.json
zip $1-worst.zip TRIPS.json SHAPES.csv FREQUENCIES.csv CALENDAR.csv CALENDAR_DATES.json ROUTES.csv STOPS.json FEED_INFO.json
rm *.csv
rm *.xml
rm *.json
cd ..
rm $1.zip
rm -r ../../output/$1/
mv $1 ../../output/
| true
|
1bb0a7ac1ab9c05ca257645bf8c2ea7f7ba8d896
|
Shell
|
NicoPampe/dotFiles
|
/lock
|
UTF-8
| 150
| 2.59375
| 3
|
[] |
no_license
|
#!/bin/bash
cd ~/.i3/Pictures/
declare -a arr=(*.png)
RANGE=${#arr[@]}
index=$RANDOM
let "index %= $RANGE"
i3lock -i ~/.i3/Pictures/${arr[$index]}
| true
|
5db381b8f460b9689e5a38e57384da7a7800b25a
|
Shell
|
viewstools/morph
|
/release.sh
|
UTF-8
| 164
| 2.78125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
VERSION=`npm version $1`
echo $VERSION
git commit -am "chore: $VERSION"
git tag $VERSION
git push
git push --tags
echo "Now run: npm publish"
| true
|
54e97e8c8820a7b233e4ac8d31e6b5c12bbc75c7
|
Shell
|
contours/corenlp
|
/add-doc-ids.sh
|
UTF-8
| 230
| 3.4375
| 3
|
[] |
no_license
|
#! /bin/sh
for filepath
do
filename=$(basename $filepath)
docid=${filename%.txt}
outfile=${filepath/%txt/xml}
echo "<doc id=\"$docid\">" > $outfile
cat $filepath >> $outfile
echo "</doc>" >> $outfile
done
| true
|
6e571985cef9a1f6e5fa20a5e04a346f9b8e1989
|
Shell
|
lboecker/dotfiles
|
/bashrc
|
UTF-8
| 1,982
| 3.5
| 4
|
[
"ISC"
] |
permissive
|
#!/bin/bash
stty -ixon -ixoff
shopt -s checkwinsize
shopt -s histappend
PROMPT_COMMAND=prompt_command
HISTSIZE=100000
HISTCONTROL=ignoreboth
export VISUAL=vim
export PAGER=less
if command -v gpg > /dev/null 2>&1; then
# shellcheck disable=SC2155
export GPG_TTY=$(tty)
fi
alias grep='grep --color=auto'
if ls --group-directories-first /dev/null > /dev/null 2>&1; then
alias ls='LC_COLLATE=C ls --color=auto --group-directories-first'
else
alias ls='LC_COLLATE=C ls --color=auto'
fi
alias ll='ls -Fhl'
alias la='ls -AFhl'
if [[ -f ~/.bash_aliases ]]; then
. ~/.bash_aliases
fi
conda_script_dir=~/conda/etc/profile.d
test -f $conda_script_dir/conda.sh && source $conda_script_dir/conda.sh
test -f $conda_script_dir/mamba.sh && source $conda_script_dir/mamba.sh
unset conda_script_dir
if command -v lesspipe > /dev/null 2>&1; then
eval "$(lesspipe)"
fi
if command -v dircolors > /dev/null 2>&1; then
if [[ -r ~/.dircolors ]]; then
eval "$(dircolors -b ~/.dircolors)"
else
eval "$(dircolors -b)"
fi
fi
if [[ -z "$BASH_COMPLETION_VERSINFO" ]] &&
[[ -f /usr/share/bash-completion/bash_completion ]]; then
. /usr/share/bash-completion/bash_completion
fi
prompt_command() {
local exit_status=$?
local branch
local status_color
local prompt_char=$'\u00b7'
PS1="\[\e[1;34m\]$CONDA_PROMPT_MODIFIER\[\e[0m\]"
PS1+="\[\e[1;32m\]\u@\h\[\e[0m\] \[\e[1;34m\]\w\[\e[0m\] "
if [[ "$(git rev-parse --is-inside-work-tree 2>/dev/null)" = "true" ]] ||
[[ "$(git rev-parse --is-inside-git-dir 2>/dev/null)" = "true" ]]; then
branch=$(git symbolic-ref -q --short HEAD)
[[ -n "$branch" ]] && PS1+="\[\e[1;32m\][$branch]\[\e[0m\] "
fi
if [[ "$exit_status" -eq 0 ]]; then
status_color="\e[1;90m"
else
status_color="\e[1;31m"
fi
PS1+="\[$status_color\]$prompt_char\[\e[0m\] "
if [[ "$TERM" = xterm* ]] || [[ -n "$TMUX" ]]; then
PS1="\[\e]0;\u@\h: \w\a\]$PS1"
fi
history -a && history -c && history -r
}
| true
|
a85d2097fab8fa7c0f7dd7dbb09ddb13c534fe40
|
Shell
|
sasye93/containerization-samples
|
/target/scala-2.12/containerize/build_20191114_114205/compose/swarm-init.sh
|
UTF-8
| 5,099
| 2.71875
| 3
|
[] |
no_license
|
#!/bin/sh
(docker node ls | grep "Leader") > /dev/null 2>&1
if [ $? -ne 0 ]; then
docker swarm init --advertise-addr eth0
fi
SKIP_NET_INIT=0
docker network inspect timeservice > /dev/null 2>&1
if [ $? -eq 0 ]; then
docker network rm timeservice > /dev/null 2>&1
if [ $? -ne 0 ]; then
echo "Could not remove network timeservice. Continuing with the old network. Remove network manually to update it next time."
SKIP_NET_INIT=1
fi
fi
if [ $SKIP_NET_INIT -eq 0 ]; then
docker network create -d overlay --attachable=true timeservice
fi
SKIP_NET_INIT=0
docker network inspect containerized_scalaloci_project > /dev/null 2>&1
if [ $? -eq 0 ]; then
docker network rm containerized_scalaloci_project > /dev/null 2>&1
if [ $? -ne 0 ]; then
echo "Could not remove network containerized_scalaloci_project. Continuing with the old network. Remove network manually to update it next time."
SKIP_NET_INIT=1
fi
fi
if [ $SKIP_NET_INIT -eq 0 ]; then
docker network create -d overlay --attachable=true containerized_scalaloci_project
fi
SKIP_NET_INIT=0
docker network inspect timeservice_gateway > /dev/null 2>&1
if [ $? -eq 0 ]; then
docker network rm timeservice_gateway > /dev/null 2>&1
if [ $? -ne 0 ]; then
echo "Could not remove network timeservice_gateway. Continuing with the old network. Remove network manually to update it next time."
SKIP_NET_INIT=1
fi
fi
if [ $SKIP_NET_INIT -eq 0 ]; then
docker network create -d overlay --attachable=true timeservice_gateway
fi
SKIP_NET_INIT=0
docker network inspect pipeline > /dev/null 2>&1
if [ $? -eq 0 ]; then
docker network rm pipeline > /dev/null 2>&1
if [ $? -ne 0 ]; then
echo "Could not remove network pipeline. Continuing with the old network. Remove network manually to update it next time."
SKIP_NET_INIT=1
fi
fi
if [ $SKIP_NET_INIT -eq 0 ]; then
docker network create -d overlay --attachable=true pipeline
fi
SKIP_NET_INIT=0
docker network inspect chat > /dev/null 2>&1
if [ $? -eq 0 ]; then
docker network rm chat > /dev/null 2>&1
if [ $? -ne 0 ]; then
echo "Could not remove network chat. Continuing with the old network. Remove network manually to update it next time."
SKIP_NET_INIT=1
fi
fi
if [ $SKIP_NET_INIT -eq 0 ]; then
docker network create -d overlay --attachable=true chat
fi
SKIP_NET_INIT=0
docker network inspect masterworker > /dev/null 2>&1
if [ $? -eq 0 ]; then
docker network rm masterworker > /dev/null 2>&1
if [ $? -ne 0 ]; then
echo "Could not remove network masterworker. Continuing with the old network. Remove network manually to update it next time."
SKIP_NET_INIT=1
fi
fi
if [ $SKIP_NET_INIT -eq 0 ]; then
docker network create -d overlay --attachable=true masterworker
fi
echo "---------------------------------------------"
echo ">>> Creating stacks from compose files... <<<"
echo "---------------------------------------------"
bash stack-thesis.samples.worker.MasterWorker.sh
if [ $? -ne 0 ]; then
exit 1;
fi
bash stack-thesis.samples.chat.MultitierApi.sh
if [ $? -ne 0 ]; then
exit 1;
fi
bash stack-thesis.listings.APIGateway.timeservice.MultitierApi.sh
if [ $? -ne 0 ]; then
exit 1;
fi
bash stack-thesis.samples.timeservice.MultitierApi.sh
if [ $? -ne 0 ]; then
exit 1;
fi
bash stack-thesis.listings.APIGateway.gateway.api.ConsumerApi.sh
if [ $? -ne 0 ]; then
exit 1;
fi
bash stack-thesis.samples.eval.msa.Pipeline.sh
if [ $? -ne 0 ]; then
exit 1;
fi
echo "-----------------------"
echo ">>> Nodes in Swarm: <<<"
echo "-----------------------"
docker node ls
docker swarm join-token manager
docker swarm join-token worker
echo "------------------------"
echo ">>> Stacks in Swarm: <<<"
echo "------------------------"
docker stack ls
echo "------------------------"
echo "-----------------"
echo "Services in stack 'thesis_samples_worker_masterworker':"
docker stack services thesis_samples_worker_masterworker
echo "-----------------"
echo "Services in stack 'thesis_samples_chat_multitierapi':"
docker stack services thesis_samples_chat_multitierapi
echo "-----------------"
echo "Services in stack 'thesis_listings_apigateway_timeservice_multitierapi':"
docker stack services thesis_listings_apigateway_timeservice_multitierapi
echo "-----------------"
echo "Services in stack 'thesis_samples_timeservice_multitierapi':"
docker stack services thesis_samples_timeservice_multitierapi
echo "-----------------"
echo "Services in stack 'thesis_listings_apigateway_gateway_api_consumerapi':"
docker stack services thesis_listings_apigateway_gateway_api_consumerapi
echo "-----------------"
echo "Services in stack 'thesis_samples_eval_msa_pipeline':"
docker stack services thesis_samples_eval_msa_pipeline
echo "----------------------------------"
echo ">>> All services in the Swarm: <<<"
echo "----------------------------------"
docker service ls
echo "--------------------------"
echo "Swarm initialization done. Note that you might have to forward ports to your machine to make them accessable if you run Docker inside a VM (e.g. toolbox)."
echo ">> PRESS ANY KEY TO CONTINUE / CLOSE <<"
read -n 1 -s
exit 0
| true
|
ddaa2ae594b0b96cf787ed0471e55aba0b024f5f
|
Shell
|
makiolo/npm-mas-mas
|
/cmaki_scripts/ci.sh
|
UTF-8
| 707
| 2.859375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -e
export NPP_CACHE="${NPP_CACHE:-FALSE}"
env | sort
if [[ -d "bin" ]]; then
rm -Rf bin
fi
if [[ -d "artifacts" ]]; then
rm -Rf artifacts
fi
if [[ -d "node_modules" ]]; then
rm -Rf node_modules
fi
if [ -f "artifacts.json" ]; then
rm artifacts.json
fi
if [ -f "package.json" ]; then
echo [1/2] compile
npm install
echo [2/2] run tests
npm test
else
echo [1/2] compile
./node_modules/cmaki_scripts/setup.sh && ./node_modules/cmaki_scripts/compile.sh
echo [2/2] run tests
./node_modules/cmaki_scripts/test.sh
fi
if [ -f "cmaki.yml" ]; then
echo [3/3] upload artifact
if [ -f "package.json" ]; then
npm run upload
else
./node_modules/cmaki_scripts/upload.sh
fi
fi
| true
|
b8f6d2aaa68dc0608c2b62f64250460c3dec06b4
|
Shell
|
saeidzebardast/dotfiles
|
/.bashrc
|
UTF-8
| 617
| 3.015625
| 3
|
[] |
no_license
|
source ~/.exports
# append to the history file, don't overwrite it
shopt -s histappend
# check the window size after each command and, if necessary,
# update the values of LINES and COLUMNS.
shopt -s checkwinsize
# set a fancy prompt (non-color, unless we know we "want" color)
case "$TERM" in
xterm-color) color_prompt=yes;;
esac
# enable programmable completion features (you don't need to enable
# this, if it's already enabled in /etc/bash.bashrc and /etc/profile
# sources /etc/bash.bashrc).
if [ -f /etc/bash_completion ] && ! shopt -oq posix; then
source /etc/bash_completion
fi
source ~/.aliases
| true
|
ee0823dd749bfc6fce361239e84b29acf95a8855
|
Shell
|
alejandro1395/GenPhenprimates
|
/Quality_resequenced/Check_premature_STOP_codon/CountStopCodons_JA.sh
|
UTF-8
| 1,565
| 2.890625
| 3
|
[] |
no_license
|
#!/bin/bash
#SBATCH --array=1-15995
#SBATCH --job-name=CountStop
#SBATCH --output=/dev/null
#SBATCH --error=/dev/null
#SBATCH --time=06:00:00
#Define modules
module purge
module unload gcc/4.9.3-gold
module load gcc/6.3.0
module load PYTHON/3.6.3
module load EMBOSS
#Define PATH argument
SPECIES_IDs=/scratch/devel/avalenzu/PhD_EvoGenom/GenomPhenom200primates/data/Genomes/Annotations/REFS/
BIN=/scratch/devel/avalenzu/PhD_EvoGenom/GenomPhenom200primates/bin/MUSCLE/
INDIR_RESEQUENCED=/scratch/devel/avalenzu/PhD_EvoGenom/GenomPhenom200primates/human_driven_results/Quality_resequenced/Check_premature_STOP_codon/Resequenced_pep_genes_fasta/
OUTDIR=/scratch/devel/avalenzu/PhD_EvoGenom/GenomPhenom200primates/human_driven_results/Quality_resequenced/Check_premature_STOP_codon/StopCodonsCount/
SRC=/scratch/devel/avalenzu/PhD_EvoGenom/GenomPhenom200primates/src/Quality_resequenced/Check_premature_STOP_codon/
TRAITS=/scratch/devel/avalenzu/PhD_EvoGenom/GenomPhenom200primates/data/Phenomes/Primate_Traits/OUTPUT/TraitsPerSpecies.txt
mkdir -p ${OUTDIR}
# Define arguments in each task
ARGUMENT1=`awk -v task_id=$SLURM_ARRAY_TASK_ID 'NR==task_id' ${SRC}Group_by_gene_input_JA.txt`
# Print info of the task
echo $ARGUMENT1
# EXECUTING PART
gene_name=$(echo $(basename $(dirname $ARGUMENT1)))
mkdir -p ${OUTDIR}${gene_name}
species_name=$(echo $ARGUMENT1 | rev | cut -d'/' -f1 | rev | cut -d \. -f 1)
python ${SRC}CountStopCodons.py ${INDIR_RESEQUENCED}${gene_name}/${species_name}.qual.pep.fa \
${gene_name} \
${OUTDIR}${gene_name}/${species_name}.StopCountTable
| true
|
e3350f68356c3caa9d6b2519475875924c43763e
|
Shell
|
jordanblakey/shell-scripting
|
/loops.sh
|
UTF-8
| 639
| 3.484375
| 3
|
[] |
no_license
|
#!/bin/bash
# WHILE LOOPS
num=1
while [ $num -le 10 ]; do
echo $num
num=$((num + 1)) # Need $ for assignment
done
num=1
while [ $num -le 20 ]; do # Not sure why need $num for this conditional but not on the next line.
if (( ((num % 2)) == 0 )); then
num=$((num + 1))
continue
fi
if ((num >= 15)); then
break
fi
echo $num
num=$((num + 1))
done
until [ $num -gt 10 ]; do # do, then, done, fi, are all just curly bracket equvalents.
echo $num # Need $ to print, too
num=$((num + 1))
done
# FOR LOOPS
for (( i=0; i <= 10; i=i+1)); do # C style loop.
echo $i
done
for i in {A..Z}; do
echo $i
done
| true
|
d670529c2592edf573e7af848f685d67e566a0fb
|
Shell
|
unicefuganda/ureport
|
/ci-start-server.sh
|
UTF-8
| 1,143
| 3.53125
| 4
|
[
"BSD-2-Clause"
] |
permissive
|
#!/bin/bash
SETTINGS_FILE=$1
if [[ -z "${SETTINGS_FILE}" ]]; then
echo -e "\nERROR: You must pass in a settings file to run with, e.g. 'ci_settings'\n"
exit -1
fi
echo "Ensure you have uwsgi installed (brew install uwsgi)"
VIRTUALENV_ACTIVATE="${UREPORT_VIRTUAL_ENV_HOME}/bin/activate"
cd ureport_project
echo "Starting server deamon with settings [${SETTINGS_FILE}.py] running in [`pwd`]"
if [ ! -d "target" ]; then
mkdir target
fi
echo -e "\nStarting Server...\n" > target/ureport-server.log
# --plugin-dir=/usr/lib/uwsgi \
# --plugins=python \
# --env DJANGO_SETTINGS_MODULE=ci_settings \
# --chdir=${UREPORT_HOME}/ureport_project \
# http://projects.unbit.it/uwsgi/wiki/Example - Good list of different ways to start uwsgi
uwsgi --master \
--pp .. \
--pidfile=target/server.pid \
--env="DJANGO_SETTINGS_MODULE=ureport_project.${SETTINGS_FILE}" \
--module=wsgi_app \
--socket=0.0.0.0:8001 \
--vacuum \
--virtualenv=${UREPORT_VIRTUAL_ENV_HOME} \
-w django_wsgi \
--daemonize=target/ureport-server.log
#echo $! > target/server.pid
cd ..
| true
|
74ec2ff208a169ab0099577f388d3df49c09bf7f
|
Shell
|
nedbat/dot
|
/.config/shellrc/open.sh
|
UTF-8
| 85
| 2.875
| 3
|
[] |
no_license
|
if command -v xdg-open >/dev/null; then
open() {
xdg-open "$@"
}
fi
| true
|
88a5c29b6582304ba04a19a2c8ffe45238b00192
|
Shell
|
MartinBasti/actions
|
/markdownlint/action.sh
|
UTF-8
| 135
| 2.671875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
cd "$GITHUB_WORKSPACE" || exit 1
declare exitstatus
exitstatus=0
mdl -g . || exitstatus=$?
exit "$exitstatus"
| true
|
091471c7b44f0cc503e5cf61f12dc4458e6d2f53
|
Shell
|
pgoultiaev/nomad-demo
|
/vagrant_build.sh
|
UTF-8
| 1,140
| 2.609375
| 3
|
[] |
no_license
|
export DEBIAN_FRONTEND=noninteractive
sudo apt-get update
sudo apt-get -qq install apt-transport-https ca-certificates
sudo apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D
echo "deb https://apt.dockerproject.org/repo debian-jessie main" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
sudo apt-get update
sudo apt-get -qq install docker-engine
echo Fetching Nomad consul...
cd /tmp/
sudo curl -sSL https://releases.hashicorp.com/nomad/0.4.0/nomad_0.4.0_linux_amd64.zip -o nomad.zip
sudo curl -sSL https://releases.hashicorp.com/consul/0.6.4/consul_0.6.4_linux_amd64.zip -o consul.zip
sudo curl -sSL https://releases.hashicorp.com/consul/0.6.4/consul_0.6.4_web_ui.zip -o consul_ui.zip
echo Installing ...
sudo unzip nomad.zip -d /usr/bin
sudo unzip consul.zip -d /usr/bin
sudo mkdir -p /lib/consul/ui
sudo unzip consul_ui.zip -d /lib/consul/ui
sudo mv /tmp/*.service /lib/systemd/system
sudo systemctl start docker
sudo gpasswd -a vagrant docker
sudo docker run -d --restart=always --dns '172.17.0.1' --name connectable -v /var/run/docker.sock:/var/run/docker.sock gliderlabs/connectable:master
| true
|
f846ddd42985251c0d60d62b4f7e2338d4e9b30c
|
Shell
|
hpcc-docker-kubernetes/HPCC-Docker-Ansible
|
/hpcc-tools/mon_ips.sh
|
UTF-8
| 3,149
| 3.859375
| 4
|
[] |
no_license
|
#!/bin/bash
SCRIPT_DIR=$(dirname $0)
function usage()
{
cat <<EOF
Usage: $(basename $0)
EOF
exit 1
}
function collect_ips()
{
if [ -z "$1" ] || [ "$1" != "-x" ]
then
trials=3
while [ $trials -gt 0 ]
do
${SCRIPT_DIR}/get_ips.sh
${SCRIPT_DIR}/get_ips.py -i $ips_dir -l $lbs_dir
[ $? -eq 0 ] && break
trials=$(expr $trials \- 1)
sleep 5
done
fi
}
function log()
{
echo "$(date "+%Y-%m-%d_%H-%M-%S") $1 " >> $LOG_FILE 2>&1
}
#------------------------------------------
# Need root or sudo
#
SUDOCMD=
[ $(id -u) -ne 0 ] && SUDOCMD=sudo
#------------------------------------------
# Runtime parameters
#
HPCC_MGR_DIR=/var/lib/hpcc_manager
enabled=${HPCC_MGR_DIR}/enabled
ips_dir=${HPCC_MGR_DIR}/hosts/ips
lbs_dir=${HPCC_MGR_DIR}/hosts/lb-ips
mkdir -p $ips_dir
mkdir -p $lbs_dir
rm -rf ${ips_dir}/*
rm -rf ${lbs_dir}/*
conf_pod_ips=/etc/HPCCSystems/ips
conf_svc_ips=/etc/HPCCSystems/lb-ips
#------------------------------------------
# LOG
#
LOG_DIR=/var/log/hpcc-tools
mkdir -p $LOG_DIR
LOG_DATE=$(date "+%Y-%m-%d")
LOG_FILE=${LOG_DIR}/mon_ips.log
touch ${LOG_FILE}
#exec 2>$LOG_FILE
#set -x
if [ -n "$KUBE_PROVIDER" ] && [ "$RUN_PROVIDER_SCRIPT" -eq 1 ]
then
cmd="${SCRIPT_DIR}/providers/$KUBE_PROVIDER"
if [ -e $cmd ]
then
echo "run $cmd"
eval "$cmd"
fi
fi
while [ 1 ]
do
sleep 5
CUR_LOG_DATE=$(date "+%Y-%m-%d")
if [ "$CUR_LOG_DATE" != "$LOG_DATE" ]
then
mv $LOG_FILE ${LOG_DIR}/mon_ips_${LOG_DATE}.log
LOG_DATE=$CUR_LOG_DATE
touch $LOG_FILE
fi
# Monitor is not enabled, ignored
[ ! -e $enabled ] && sleep 2 && continue
# First time configuration
#if [ ! -e /etc/HPCCSystems/real/environment.xml ]
if [ ! -e /etc/ansible/ips ]
then
log "Configure HPCC cluster at the first time ... "
${SCRIPT_DIR}/config_hpcc.sh >> $LOG_FILE 2>&1
continue
fi
# Collect cluster ips
collect_ips
# Check if any ip changed
diff $conf_pod_ips $ips_dir > /tmp/pod_ips_diff.txt
pod_diff_size=$(ls -s /tmp/pod_ips_diff.txt | cut -d' ' -f1)
pod_diff_str=$(cat /tmp/pod_ips_diff.txt | grep diff | grep -v -i esp | grep -v -i roxie)
svc_diff_size=0
if [ -z "$USE_SVR_IPS" ] || [ $USE_SVR_IPS -ne 1 ]
then
diff $conf_svc_ips $lbs_dir > /tmp/svc_ips_diff.txt
svc_diff_size=$(ls -s /tmp/svc_ips_diff.txt | cut -d' ' -f1)
fi
reconfig_hpcc=0
if [ -n "$USE_SVR_IPS" ] && [ $USE_SVR_IPS -ne 0 ] && [ $pod_diff_size -ne 0 ]
then
log "IP(s) changed. Re-configure HPCC cluster ... "
reconfig_hpcc=1
elif [ -n "$pod_diff_str" ] || [ $svc_diff_size -ne 0 ]
then
log "Non esp/roxie ip(s) changed. Re-configure HPCC cluster ... "
reconfig_hpcc=1
fi
# Re-configure
if [ $reconfig_hpcc -ne 0 ]
then
${SCRIPT_DIR}/config_hpcc.sh >> $LOG_FILE 2>&1
elif [ $pod_diff_size -ne 0 ]
then
log "Only esp/roxie ip(s) changed. Just update Ansible host file ... "
${SCRIPT_DIR}/ansible/setup.sh -d $ips_dir -c /etc/HPCCSystems/hpcc.conf >> $LOG_FILE 2>&1
cp -r $ips_dir/roxie $conf_pod_ips/
cp -r $ips_dir/esp $conf_pod_ips/
fi
done
| true
|
5a1c102b37fb9cf8ef0bfc9885f7afcd4f48eebb
|
Shell
|
crazyguy106/cfclinux
|
/steven/network_research/exercises/day1/script_improved.sh
|
UTF-8
| 2,105
| 4.1875
| 4
|
[] |
no_license
|
#!/bin/bash
# Get ip from user
IP=$1
if [ -z "$IP" ]
then
echo "Please enter an IP address as an argument"
exit
fi
# Check if SSH is open
nmap $IP -p 22 -oG nmap_info.scan
if [ -z "$(cat nmap_info.scan | grep 22 | grep open)" ]
then
echo "[!] Port is closed on $IP, please try again with a new IP"
exit
fi
# Check if user wants to run a Brute force attack
read -p "[*] SSH is found on $IP; would you like to start a BF attack? [Y/n] " input
input=$(echo ${input:-y} | tr '[:upper:]' '[:lower:]')
# Requires user to input y/yes/n/no else, keep requesting user for proper input
until [ "$input" == "y" ] || [ "$input" == "yes" ] || [ "$input" == "n" ] || [ "$input" == "no" ]
do
read -p "[!] You have inputted $input. Please enter either y or n " input
input=$(echo ${input:-y} | tr '[:upper:]' '[:lower:]')
done
if [ "$input" == "n" ] || [ "$input" == "no" ]
then
echo "Thank you for using my program!"
exit
fi
# Ask for login name
read -p "[*] BF attack: Please enter a login name to access " login
# Let user choose to use crunch or password list
read -p "[?] BF attack: Crunch or password list? [c/p/quit] " input
input=$(echo $input | tr '[:upper:]' '[:lower:]')
until [ "$input" == "c" ] || [ "$input" == "p" ] || [ "$input" == "quit" ]
do
read -p "[!] You have inputted $input. Please enter c or p or quit " input
done
case $input in
c)
read -p "[*] Enter min char: " min
read -p "[*] Enter max char: " max
read -p "[*] Enter chars: " chars
crunch $min $max $chars > pass.lst
;;
p)
read -p "[*] Please specity a list to use: " list
cat $list > pass.lst
;;
quit)
echo "Thank you for using my program!"
exit
;;
esac
# Execute hydra attack and save result
hydra -l $login -P pass.lst $IP ssh -vV > hydra_res.txt
# Check if hydra attack was successful and password found
hydra_output=$(cat hydra_res.txt | grep password: | awk '{print $NF}')
if [ -z "$hydra_output" ]
then
echo "A password was not found"
echo "Thank you for using my program!"
else
echo "A password has been found! It is: $hydra_output"
echo "Thank you for using my program!"
fi
| true
|
163c64487c3d697488a9ad2067d70ef95077d932
|
Shell
|
Nabarun21/Anomaly-Detection-for-ECAL-DQM
|
/semi_supervised_learning/test/run29.sh
|
UTF-8
| 1,727
| 2.875
| 3
|
[] |
no_license
|
#!/bin/bash
##$ -pe smp 24 # 24 cores and 4 GPUs per machine
# so ask for 4 cores to get one GPU
#$ -M ndev@nd.edu
#$ -m abe
#$ -q gpu # Specify queue
#$ -l hostname="qa-1080ti-004" # This job is just going to use one GPU card
#$ -N choose_loss # Specify job name
#$ -o sgeLogs42 # Where to put the output
# Since UGE doesn't have the nice submit file format from HTCondor, we have to define our possible jobs here
echo Starting...
echo `pwd`
echo Initializing environment
if [ -r /opt/crc/Modules/current/init/bash ];then
source /opt/crc/Modules/current/init/bash
fi
module load tensorflow
module load cudnn
echo '==================================='
pwd
echo '==================================='
#ls -alh
echo '==================================='
#printenv
echo '==================================='
#uname -a
echo '==================================='
#cat /proc/cpuinfo
echo '==================================='
#echo Will run $cmd
echo '==================================='
# Make a working directory
#wd=workingdir_${JOB_NAME}_${QUEUE}_${JOB_ID}
#mkdir -p results/$wd
#pushd results/$wd
#echo $cmd
cd /afs/crc.nd.edu/user/n/ndev/DQM_ML/Anomaly-Detection-for-ECAL-DQM/
echo `pwd`
source set_env_preprocess.sh
python /afs/crc.nd.edu/user/n/ndev/DQM_ML/Anomaly-Detection-for-ECAL-DQM/scripts/model_v42.py --model_name v42 --loss_name MSE --opt_name adadelta
cd -
echo '==================================='
#ls -alh
echo '==================================='
# Move the log file into the appropriate results directory, in case something fails later.
#set model = *.tgz #There should only be one of these!
echo Done!
| true
|
024f1463ebcbf30a17061e9957034f0b9f24f67f
|
Shell
|
krishnakumarkp/sdk_generator
|
/git_push.sh
|
UTF-8
| 1,801
| 4.21875
| 4
|
[] |
no_license
|
#!/bin/sh
SCRIPT="$0"
echo "# START SCRIPT: $SCRIPT"
echo_usage() {
echo "Usage: $0 -s swagger_version"
echo ""
echo "-s swagger_version the tag of the swagger file in git repo"
echo "-h print help"
}
SWAGGER_VERSION=
while getopts :s:c:h opt; do
case $opt in
s)
SWAGGER_VERSION=$OPTARG
;;
h)
echo_usage
exit 0
;;
\?)
echo "Unknown option: -$OPTARG"
echo_usage
exit 1
;;
esac
done
if [ -z "$SWAGGER_VERSION" ]
then
echo "No swagger version specified, Can not continue."
exit 1
fi
COMMIT="true"
CURRENTDATETIME=`date +"%Y%m%d.%H%M%S"`
SDK_TAG="${SWAGGER_VERSION}.${CURRENTDATETIME}"
SDK_GEN_FOLDER="sdk-php"
SDK_REPO_FOLDER="sdk-repo"
SDK_REPO="git@github.com:krishnakumarkp/sdk-php.git"
COMMIT_MSG="Generated at $(date)"
TAG_MSG="Generated at $(date)"
delete_folder_if_exists(){
folder=$1
if [ -d "./$folder" ]; then
rm -rf $folder
echo "delete $folder "
fi
}
#checkout the repo for generated code
delete_folder_if_exists $SDK_REPO_FOLDER
git clone $SDK_REPO $SDK_REPO_FOLDER
echo "clone $SDK_REPO_FOLDER"
if [ ! -d "$SDK_REPO_FOLDER" ]; then
echo "Error: ${SDK_REPO_FOLDER} not found. Can not continue."
exit 1
fi
if [ ! -d "$SDK_GEN_FOLDER" ]; then
echo "Error: ${SDK_GEN_FOLDER} not found. Can not continue."
exit 1
fi
#into the repo
cd $SDK_REPO_FOLDER
#clean up the repo folder; remove everything except .git folder
find -maxdepth 1 ! -name .git ! -name . -exec rm -rv {} \;
#copy the generated code into repo
cp -a ../$SDK_GEN_FOLDER/. .
echo "copy $SDK_GEN_FOLDER to $SDK_REPO_FOLDER "
#start commit
if [ "$COMMIT" = "true" ]; then
echo "$COMMIT_MSG"
git add .
git commit -a -m "$COMMIT_MSG"
git push origin master
git tag -a $SDK_TAG -m "$TAG_MSG"
git push origin $SDK_TAG
echo "git pushed"
fi
| true
|
e8e21a0bf82035aaf96b083cf0d86f26e1d8cb98
|
Shell
|
jmchatman/BASH
|
/bash_tutorial/ch_09/reply.sh
|
UTF-8
| 286
| 2.78125
| 3
|
[] |
no_license
|
#!/bin/bash
#reply.sh
echo
echo -n "What is your favorite vegtable?"
read
echo "Your favorite vegtable is $REPLY."
echo
echo -n "What is your favorite fruit?"
read fruit
echo "Your favorite fruit is $fruit."
echo "but..."
echo "Value of \$REPLY is is still $REPLY."
echo
exit 0
| true
|
2f2a4e38da403e2770f266564ffe579d7c2590f1
|
Shell
|
calestar/my-config
|
/bin/serve_doc
|
UTF-8
| 458
| 3.671875
| 4
|
[] |
no_license
|
#!/bin/bash
if [ ! $# = 1 ]; then
echo "Expecting exactly one argument: the path (or file) to serve"
exit
fi
if [ -d $1 ]; then
directory=$1
else
directory=$(dirname $1)
fi
directory=$(realpath $directory)
echo "Serving $directory, will be hosted on:"
echo $DOCKER_HOST | sed -e 's![^/]*//\([^:]*\).*!http://\1:6419!'
docker run \
--rm \
-v $directory:/data \
-p 6419:6419 \
-t \
-i fstab/grip \
grip /data/ 0.0.0.0:6419
| true
|
88d63e6630259e254bb98ed4924023642c175b4a
|
Shell
|
iagoleal/dotfiles
|
/.bin/searcher
|
UTF-8
| 381
| 3.46875
| 3
|
[] |
no_license
|
#!/bin/sh
FOLDER="$(realpath "$1")"
finder() {
fzf-ueberzogen.sh --prompt="β " --scheme=path --info=inline --no-multi --preview 'fzf-preview.sh {}'
}
spawn() {
herbstclient spawn xdg-open "$@"
}
item=$(cd $FOLDER && rg --files | sed "s:^\./::g" | finder)
exit_code=$?
item_path="$FOLDER/$item"
if [ $exit_code == 0 ]; then
spawn "$item_path"
else
exit "$exit_code"
fi
| true
|
e46292fffc4772845ef6ae794221f0b9c7471b07
|
Shell
|
sspeng/HTMBench
|
/benchmark/memcached-set/memcached/compile.sh
|
UTF-8
| 726
| 3.046875
| 3
|
[] |
no_license
|
#!/bin/bash
set -x
CURRENT=$(pwd)
TM_LIB=${TSX_ROOT}/lib/rtm
chmod u+x configure
if [ "$1" == "origin" ]; then
./configure --prefix=$CURRENT/build_origin
sed -i 's/-O2/-O3/g' Makefile
make clean
make -j && make install
mv memcached build_origin/
fi
if [ "$1" == "rtm" ]; then
./configure --prefix=$CURRENT/build_rtm
make clean
./makefile_editor.py Makefile Makefile.rtm "CPPFLAGS+=-DRTM -I$TM_LIB" "INCLUDES+=-I$TM_LIB" "CFLAGS+=-I$TM_LIB -pthread" "LDFLAGS+=-L$TM_LIB" "LIBS+=-lrtm"
mv Makefile.rtm Makefile
sed -i 's/-O2/-O3/g' Makefile
cd $TSX_ROOT/lib
make CC=gcc
cd $CURRENT
make -j && make install
#mv memcached build_rtm/
fi
if [ "$1" == "clean" ]; then
make clean
rm -rf build_rtm build_origin
fi
| true
|
38269670b0f8c7dbaf705b2fb93d122260649923
|
Shell
|
kmhofmann/selene
|
/.azure-pipelines/ubuntu-18.04_install_vcpkg_deps.sh
|
UTF-8
| 501
| 2.953125
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
vcpkg_base_dir=$1
shift
vcpkg_libraries=$@
echo "vcpkg_base_dir=${vcpkg_base_dir}"
echo "vcpkg_libraries=${vcpkg_libraries}"
echo "CC=${CC}"
echo "CXX=${CXX}"
echo "CXXFLAGS=${CXXFLAGS}"
echo "LDFLAGS=${LDFLAGS}"
CC=gcc-8 CXX=g++-8 CXXFLAGS="" LDFLAGS="" ${vcpkg_base_dir}/vcpkg/bootstrap-vcpkg.sh
${vcpkg_base_dir}/vcpkg/vcpkg install ${vcpkg_libraries}
rm -rf ${vcpkg_base_dir}/vcpkg/buildtrees
rm -rf ${vcpkg_base_dir}/vcpkg/downloads
rm -rf ${vcpkg_base_dir}/vcpkg/packages
| true
|
96dc83b80086cf4e47c01111af85b869cfdb1c2c
|
Shell
|
HongboTang/Dpseudoobscura_LinkedSelection
|
/Scripts_AlignmentAndSNPCalling/step5_initialvariants.sh
|
UTF-8
| 586
| 2.90625
| 3
|
[] |
no_license
|
#!/bin/bash
#SBATCH --mem=100GB
cd /datacommons/noor2/klk37/BackgroundSelection/BAMS_forGATK
PATH=/datacommons/noor/klk37/java/jdk1.8.0_144/bin:$PATH
export PATH
#Create initial variant calls
#ulimit -c unlimited
FILES=*.bam
for BAM in $FILES
do
ID="$(echo ${BAM} | awk -F'[.]' '{print $1}')"
echo "calling variants for $ID"
OUT="$ID".g.vcf.gz
echo "$OUT"
java -jar /datacommons/noor2/klk37/BackgroundSelection/GATK-3.8-0/GenomeAnalysisTK.jar -T HaplotypeCaller \
-R /datacommons/noor2/klk37/BackgroundSelection/dpse-all-chromosome-r3.04.fasta -I $BAM -o $OUT -ERC GVCF
done
| true
|
838466bafcedc8f81f63424ed04500e1dd829405
|
Shell
|
RNNPredict/LSTM-MXNet-
|
/production/submit-job.sh
|
UTF-8
| 1,891
| 3.296875
| 3
|
[] |
no_license
|
#!/bin/sh
WKDIR=$(pwd)
if [ ! -d "${WKDIR}/.git" ]; then
echo 'This script must be executed on local git repository root dir.' 1>&2
exit 1
fi
if [ -z "$JOB_QUEUE_NAME" ]; then
echo "Missing environment variable 'JOB_QUEUE_NAME'." 1>&2
exit 1
fi
if [ -z "$JOB_DEFINITION_NAME" ]; then
echo "Missing environment variable 'JOB_DEFINITION_NAME'." 1>&2
exit 1
fi
JOB_NAME="job-`date +%s`"
# aws --region $AWS_DEFAULT_REGION batch register-job-definition \
# --job-definition-name ${JOB_DEFINITION_NAME} \
# --container-properties file://production/aws/batch/job-mxnet.json \
# --type container
JOB_DEFINITION_ARN=$( aws --region $AWS_DEFAULT_REGION batch describe-job-definitions \
--job-definition-name ${JOB_DEFINITION_NAME} \
--status ACTIVE \
| jq -r '.jobDefinitions | max_by(.revision).jobDefinitionArn' \
) && echo ${JOB_DEFINITION_ARN}
if [ -z "$SEQUENCE_LEN" ]; then
SEQUENCE_LEN=130
fi
if [ -z "$LSTM_LAYERS" ]; then
LSTM_LAYERS=3
fi
if [ -z "$UNITS_IN_CELL" ]; then
UNITS_IN_CELL=512
fi
if [ -z "$BATCH_SIZE" ]; then
BATCH_SIZE=32
fi
if [ -z "$LEARNING_EPOCHS" ]; then
LEARNING_EPOCHS=10
fi
if [ -z "$LEARNING_RATE" ]; then
LEARNING_RATE=0.01
fi
if [ -z "$GPUS" ]; then
GPUS=0
fi
aws --region $AWS_DEFAULT_REGION batch submit-job \
--job-name ${JOB_NAME} --job-queue ${JOB_QUEUE_NAME} \
--job-definition ${JOB_DEFINITION_ARN} \
--container-overrides "{\"environment\": [
{ \"name\": \"SEQUENCE_LEN\", \"value\": \"${SEQUENCE_LEN}\"},
{ \"name\": \"LSTM_LAYERS\", \"value\": \"${LSTM_LAYERS}\"},
{ \"name\": \"UNITS_IN_CELL\", \"value\": \"${UNITS_IN_CELL}\"},
{ \"name\": \"BATCH_SIZE\", \"value\": \"${BATCH_SIZE}\"},
{ \"name\": \"LEARNING_EPOCHS\", \"value\": \"${LEARNING_EPOCHS}\"},
{ \"name\": \"LEARNING_RATE\", \"value\": \"${LEARNING_RATE}\"},
{ \"name\": \"GPUS\", \"value\": \"${GPUS}\"}
]}"
| true
|
d39ba110c14b9f4586aea5918495b7b40c29ce1f
|
Shell
|
mikalv/mkinitcpio-ykfde
|
/install/ykfde
|
UTF-8
| 352
| 2.6875
| 3
|
[] |
no_license
|
#!/bin/sh
build() {
add_binary /usr/lib/udev/ykfde
add_file /usr/lib/initcpio/udev/20-ykfde.rules /usr/lib/udev/rules.d/20-ykfde.rules
add_file /etc/ykfde.conf
add_file /etc/ykfde-challenge /
add_module 'usbhid'
}
help() {
echo "This hook adds support for opening LUKS devices with Yubico key."
echo "Please use command 'ykfde' to prepare."
}
| true
|
bd3bc1d20a0551a27d1dcb9350bf4d655c926b3a
|
Shell
|
lukakerr/dotfiles
|
/bin/git_ssh.sh
|
UTF-8
| 1,124
| 4.3125
| 4
|
[] |
no_license
|
# Change all git repository remote urls to ssh
# After the first run, there should be no output
# if ran again (and no new git repos are created).
# This means all remote origins have been updated
function set_url() {
echo "Updating remote to: " git@github.com:$1/$2.git
git remote set-url origin git@github.com:$1/$2.git
}
# Array of directories containing git directories
# e.g:
# ~/dev
# git_dir_1/
GIT_BASE_DIRS=(
~/dev
~/CLD
~/Ironskinn
~
)
function change() {
for dir in $1/*; do
if [[ -d $dir ]]; then
cd "$dir"
set_origin "$dir"
fi
done
}
function set_origin() {
REPO=$1
if [ -d .git ]; then
REMOTE=$(git config --get remote.origin.url)
# Remote is a github url
if [[ $REMOTE = *"://github.com"* ]]; then
REPO_NAME=$(basename `git rev-parse --show-toplevel`)
# Matches a github username or organization
regex="github\.com\/(.*)\/"
if [[ $REMOTE =~ $regex ]]; then
USER_NAME=${BASH_REMATCH[1]}
set_url $USER_NAME $REPO_NAME
fi
fi
fi
}
for git_dir in "${GIT_BASE_DIRS[@]}"; do
cd $git_dir
change $git_dir
done
| true
|
55a180eefb6c03b2f94824685a86c936cbcd56c4
|
Shell
|
chuckxyu/docker-cent6-mrtg
|
/set.sh
|
UTF-8
| 543
| 2.71875
| 3
|
[] |
no_license
|
#!/bin/bash
sleep 2
IP=$(ip addr list eth0 | grep "inet " | cut -d' ' -f6 | cut -d/ -f1)
echo "ip address = $IP"
sed -ri "s/%%IPADDRESS%%/$IP/" /etc/snmp/snmpd.conf
sed -ri "s/%%IPADDRESS%%/$IP/" /root/mk-mrtg.sh
cat /etc/snmp/snmpd.conf | grep mynetwork
cat /root/mk-mrtg.sh | grep cfgmaker
echo "Start snmp service.."
service snmpd start
chkconfig snmpd on
sleep 1
echo "Check snmpwalk at $IP."
snmpwalk -v 2c -c public $IP | head -10
#sed -ri "s/%%IPADDRESS%%/$IP/" /root/mk-mrtg.sh
echo "Start mrtg configuration .. "
/root/mk-mrtg.sh
| true
|
263b01b4ee82b5a1d4a78448b21d6712b37ed7d7
|
Shell
|
TapiocaTechnologies/.scripts
|
/ping_hosts
|
UTF-8
| 603
| 3.421875
| 3
|
[] |
no_license
|
#!/bin/bash
#serverList=(192.168.1.116 192.168.1.11 192.168.1.12)
figlet Tapioca Ping || echo -e "Tapioca Ping"
echo "-------------------------------------------------------------------------------------------"
echo "Please enter a list of IPs / hostnames to ping (ex localhost 127.0.0.1): "
read -a serverList
for host in ${serverList[@]}; do
echo -e "Pinging $host: "
ping -c 1 $host
if [ $? -eq 0 ]; then
echo -e "$host ping successful\n
-------------------------------------------------"
else
echo -e "$host ping unsuccessful\n
-------------------------------------------------"
fi
done
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.