blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 4 115 | path stringlengths 2 970 | src_encoding stringclasses 28 values | length_bytes int64 31 5.38M | score float64 2.52 5.28 | int_score int64 3 5 | detected_licenses listlengths 0 161 | license_type stringclasses 2 values | text stringlengths 31 5.39M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
2fa8f18c7a553e3e3be861c47fc83e7379ad24b1 | Shell | CycloneAwakening/ZonamaDev | /basebox/scripts/firstboot.d/82home-link | UTF-8 | 341 | 3.546875 | 4 | [
"MIT"
] | permissive | #!/bin/bash
#
# TODO - should we even do this? Doing for now to make it easy but worry about stuff creeping into code with hard coded paths
cd /home
(
l=$(readlink swgemu)
if [ -z "$l" -o "$l" != "${ZDUSER}" ]; then
[ -L swgemu ] && rm swgemu
ln -vfs ${ZDUSER} swgemu && echo "Created /home/swgemu -> ${ZDHOME} symlink"
fi
)
| true |
37d7032491b58e9027ab0289b9358fc9196afe75 | Shell | forksbot/provision | /test/install.test.bats | UTF-8 | 3,856 | 2.90625 | 3 | [] | no_license | #!/usr/bin/env bats
@test "system" {
locale -a | grep -q "en_GB.utf8"
locale -a | grep -q "en_US.utf8"
[ -n "$TRAVIS" ] && skip "travis/docker does not have systemd"
localectl status | grep -q "LANG=en_GB.UTF-8"
localectl status | grep -q "X11 Layout: us"
localectl status | grep -qE "X11 Model: pc10."
localectl status | grep -q "X11 Variant: colemak"
}
# Tests that expected stuff has been installed and are on PATH
@test "apt" {
if [[ $(lsb_release -si) == "Arch" ]]; then
run which chromium
else
run which google-chrome
fi
[ "$status" -eq 0 ]
run which guake
[ "$status" -eq 0 ]
run which cmake
[ "$status" -eq 0 ]
}
@test "llvm" {
run clang --version
[ "$status" -eq 0 ]
echo "$output" && echo "$output" | grep "3.7.1"
# compiled s.t. we have sanitizers
if [[ $(lsb_release -si) == "Arch" ]]; then
run ls /usr/lib/clang/3.7.1/lib/linux/libclang_rt.asan_cxx-x86_64.a
else
run ls /usr/local/lib/clang/3.7.1/lib/linux/libclang_rt.asan_cxx-x86_64.a
fi
[ "$status" -eq 0 ]
# with lldb
run lldb --version
[ "$status" -eq 0 ]
echo "$output" && echo "$output" | grep "3.7.1"
# with analyzer and scan-build
run c++-analyzer --version
[ "$status" -eq 0 ]
run which scan-build
[ "$status" -eq 0 ]
}
@test "profanity" {
run which profanity
[ "$status" -eq 0 ]
run profanity --version
[ "$status" -eq 0 ]
echo "$output"
echo "$output" | grep "OTR support\: Enabled"
if [ -z "$TRAVIS" ]; then
# Won't have desktop support in travis container
echo "$output" | grep "Desktop notification support\: Enabled"
fi
}
@test "node" {
run which node
[ "$status" -eq 0 ]
run node --version
[ "$status" -eq 0 ]
echo "$output" && echo "$output" | grep "v4."
run node -pe process.release.lts
echo "$output" && echo "$output" | grep "Argon"
}
@test "sublime" {
run which subl
[ "$status" -eq 0 ]
}
@test "clone" {
run which arc
[ "$status" -eq 0 ]
run man -w z
[ "$status" -eq 0 ]
run man -w bats
[ "$status" -eq 0 ]
}
@test "dotfiles" {
# Verify that directories are created and dotfiles are linked
[ -d "$HOME/repos/dotfiles" ]
[ -L "$HOME/.aliases" ]
[ -L "$HOME/.bash_profile" ]
[ -L "$HOME/.prompt" ]
[ -L "$HOME/.bashrc" ]
[ -d "$HOME/.config/sublime-text-3/Packages/User" ]
[ -L "$HOME/.config/sublime-text-3/Packages/User" ]
[ -r "$HOME/.config/sublime-text-3/Packages/User/SublimeLinter.sublime-settings" ]
[ -L "$HOME/.clang-format" ]
[ -L "$HOME/.dircolors" ]
[ -L "$HOME/.eslintrc" ]
[ -L "$HOME/.exports" ]
[ -L "$HOME/.functions" ]
[ -L "$HOME/.ghci" ]
[ -L "$HOME/.gitconfig" ]
[ -L "$HOME/.hgrc" ]
[ -L "$HOME/.iface" ]
[ -L "$HOME/.inputrc" ]
[ -L "$HOME/.jshintrc" ]
[ -L "$HOME/.mpdconf" ]
[ -L "$HOME/.nanorc" ]
[ -d "$HOME/.ncmpcpp" ]
[ -L "$HOME/.path" ]
[ -d "$HOME/.templates" ]
[ -L "$HOME/.tmux.conf" ]
[ -L "$HOME/.xprofile" ]
[ -L "$HOME/.Xresources" ]
[ -L "$HOME/.yrcli.json" ]
}
@test "evars" {
[ -n "$TRAVIS" ] && skip "travis/docker shell by design not interactive"
[ "$CXX" = "clang++" ]
}
@test "npm" {
run which badgify
[ "$status" -eq 0 ]
run which pm2
[ "$status" -eq 0 ]
}
@test "pip" {
run which pylint
[ "$status" -eq 0 ]
}
@test "cluxdev" {
[ -n "$TRAVIS" ] && skip "not building + linking all dev modules on travis"
[ -d "$HOME/repos" ]
run which bndg # should have been symlinked
[ "$status" -eq 0 ]
}
@test "secrets" {
[ -n "$TRAVIS" ] && skip "no priveleges to do secrets test on travis"
[ -r "$HOME/.config/Mumble/Mumble.conf" ]
[ -d "$HOME/.ssh" ]
[ -r "$HOME/.ssh/config" ]
[ -d "$HOME/.ssh/.git" ]
[ -r "$HOME/.ssh/.gitignore" ]
[ -d "$HOME/.gnupg" ]
[ -d "$HOME/.gnupg/.git" ]
run gpg --list-keys
echo "$output" && echo "$output" | grep -q "clux"
[ -d "$HOME/repos/dotclux" ]
}
| true |
c5f69a9837d5f6ce5568261ddb02fc0a8282aaf3 | Shell | za-ek/cli | /port-forward | UTF-8 | 885 | 3.921875 | 4 | [
"MIT"
] | permissive | #!/bin/bash
function getAddress {
echo "$1" | cut -d':' -f 1;
}
function getPort {
echo "$1" | cut -d':' -f 2;
}
case "$1" in
h)
printf "\tportforward - Simplification for iptables port forwarding\n";
printf "\t--Usage: portforward tcp|udp|all src dst\n";
printf "\t--Example: portforward tcp eth0:10080 192.168.56.100:80\n";
;;
tcp|udp)
if [ ! -z $2 ] && [ ! -z $3 ]; then
iptables -t nat -A PREROUTING -p $1 -i $(getAddress $2) --dport $(getPort $2) -j DNAT --to-destination $(getAddress $3):$(getPort $3)
iptables -A FORWARD -p $1 -d $(getAddress $3) --dport $(getPort $3) -m state --state NEW,ESTABLISHED,RELATED -j ACCEPT
echo "Done";
else
echo "Missing param";
$0 h;
fi
;;
all)
$0 tcp $2 $3;
$0 udp $2 $3;
;;
*)
read -p "Use TCP? [y/N]" -n 1 -r
echo
if [[ $REPLY =~ ^[Yy]$ ]]
then
$0 tcp $1 $2
fi
;;
esac
exit 0;
| true |
2782f72e4ff741d8e334f859ef7f24cb298f19cb | Shell | fredericoalvares/csar-public-library | /com/easyvirt/tensorflow/linux/scripts/install_tensorflow.sh | UTF-8 | 241 | 3.265625 | 3 | [] | no_license | #!/bin/bash -e
echo "Using pip. Installing tensorflow on unbuntu."
PACKAGE="tensorflow"
if [[ ("$GPU_ENABLED" == "true") ]]; then
PACKAGE="${PACKAGE}-gpu"
fi
PACKAGE="${PACKAGE}==$TF_VERSION"
pip install $PACKAGE
echo "End of $0"
| true |
307e11754ce91c0ad043aa5d8df843edb2e15a87 | Shell | apple/swift-corelibs-foundation | /Darwin/Config/install-swiftmodules.sh | UTF-8 | 1,013 | 3.15625 | 3 | [
"Apache-2.0",
"Swift-exception"
] | permissive | #!/bin/sh
#===----------------------------------------------------------------------===//
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2020 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See http://swift.org/LICENSE.txt for license information
# See http://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
#===----------------------------------------------------------------------===//
set -e
#set -xv
# This only needs to run during installation, but that includes "installapi".
[ "$ACTION" = "installapi" -o "$ACTION" = "install" ] || exit 0
[ "$SKIP_INSTALL" != "YES" ] || exit 0
[ "$SWIFT_INSTALL_MODULES" = "YES" ] || exit 0
srcmodule="${BUILT_PRODUCTS_DIR}/${PRODUCT_NAME}.swiftmodule"
dstpath="${INSTALL_ROOT}/${INSTALL_PATH}/"
if [ ! -d "$srcmodule" ]; then
echo "Cannot find Swift module at $srcmodule" >&2
exit 1
fi
mkdir -p "$dstpath"
cp -r "$srcmodule" "$dstpath"
| true |
6aba298ee524d89ec13b89c2d24dde08789c0954 | Shell | FrancisVega/s-front-workflow | /install.sh | UTF-8 | 2,136 | 4.21875 | 4 | [] | no_license | #!/bin/bash
# Instala el proyecto Gulp Secuoyas.
# Dependencias: nodejs
# Secuoyas 2015
# Programs
APPS=(node bower gulp)
# Symbols
SKULL="\xE2\x98\xA0"
CHECK="\xe2\x9c\x93"
# Colors
colorReset="\x1b[39;49;00m"
colorRed="\x1b[31;01m"
colorGreen="\x1b[32;01m"
colorPirite="\x1b[43;01m]"
# Welcome
echo -e "\nSecuoyas front-end instalation script\n"
# Func. Comprueba si existe un programa.
command_exists () { type "$1" &> /dev/null ; }
# Func. Instala un programa.
install_app () {
# Node
# Si no está instalado node, el script te avisa y para el proceso de instalación.
if [ "$1" = ${APPS[0]} ]; then
echo "Instala node.js con macports, homebrew o desde www.nodejs.org\n";
echo -e "Instalación abortada"
exit
fi
# Bower
# Si no está bower se instala con npm
if [ "$1" = ${APPS[1]} ]; then
read -p "Bower no está en el sistema, ¿instalarlo? (s/n)? " answer
case ${answer:0:1} in
s|S )
echo "npm install -g bower";;
* )
echo No
echo -e "Instalación abortada"
exit;;
esac
fi
# Gulp (global)
# Si no está gulp se instala con npm
if [ "$1" = ${APPS[2]} ]; then
read -p "Gulp no está en el sistema, ¿instalarlo? (s/n)? " answer
case ${answer:0:1} in
s|S )
echo "npm install -g gulp";;
* )
echo No
echo -e "Instalación abortada"
exit;;
esac
fi
}
# Pasamos por todos los programas (node y bower)
for ((i=0; i<${#APPS[*]}; i++));
do
# Si no está instalado, sugemios la instalación
if ! command_exists ${APPS[$i]}; then
echo -e ${APPS[$i]} $colorRed$SKULL $colorReset
install_app ${APPS[$i]};
else
echo -e ${APPS[$i]} $colorGreen$CHECK $colorReset
fi
done
# Con todas las dependencias instaladas, vamos con el proyecto.
echo -e "\nInstalando dependencias de node y bower"
npm install --save-dev --silent
bower install --save
# Bye :)
echo -e "\nParece que todo ha ido bien. Buena suerte :)\n"
| true |
9ee9a7925d287e8ff0e840bd8b528a404edbfef9 | Shell | isholao/gitlist | /scripts/optimize.sh | UTF-8 | 339 | 2.609375 | 3 | [
"MIT",
"BSD-3-Clause"
] | permissive | #!/usr/bin/env bash
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
TOP=$DIR/..
set -e
pushd $TOP
sudo rm -rf ./cache
mkdir -p ./cache
touch ./cache/.gitkeep
chmod 0770 ./cache
composer install
composer install --no-dev
composer dump-autoload --optimize
npm install --unsafe-perm
npm run build --verbose
rm -rf ./node_modules
popd
| true |
2118dc31e380726e17c8be085aaba224aaffa524 | Shell | PedroFSousa/verbose-eureka | /scripts/setup-host/linux/setup-host.sh | UTF-8 | 4,308 | 3.796875 | 4 | [] | no_license | #!/bin/bash
set -e
rollback_dockerd_args () {
if [ ! -z "$need_rollback" ]; then
[ -z "$original_docker_userns_remap" ] && \
docker_daemon_rm_arg "userns-remap" || \
docker_daemon_set_arg "userns-remap" "$original_docker_userns_remap" 1
[ -z "$original_docker_data_root" ] && \
docker_daemon_rm_arg "data-root" || \
docker_daemon_set_arg "data-root" "$original_docker_data_root" 1
echo "Rolled back Docker daemon configuration."
else
echo "No Docker daemon configuration rollback necessary."
fi
}
trap rollback_dockerd_args ERR
daemon_conf_path=/etc/docker/daemon.json
default_data_root=/var/lib/docker
min_data_root_space=100000000
dockremap_id_first=500000
dockremap_id_range=65536
max_user_namespaces=15000
force=0
# formatting
tab_size=4
fold_width=$(echo $(($(stty size | awk '{print $2}' 2>/dev/null) - $tab_size)) || echo 75)
tabs $tab_size
color () { echo -en "\033[1;$1m$2\033[0m"; }
bold () { echo -en "\033[1m$1\033[0m"; }
uline () { echo -en "\033[4m$1\033[0m"; }
title () { echo -en "$(bold "$(uline "$1"):\n")"; }
indent () { printf "%$1s" | tr ' ' '\t'; }
echof () { echo -e "$1" | fold -s -w $fold_width | sed -e "s|^|$(indent $2)|g";}
# log functions
err () { echof "[$(color 31 "_ERR")] $1" $2; return 1; }
info () { echof "[$(color 34 "INFO")] $1" $2; }
warn () { echof "[$(color 33 "WARN")] $1" $2; }
# parse arguments
for i in "$@"
do
case $i in
-f)
force=1
;;
*)
err "Unrecognized argument: $i"
;;
esac
done
# check privileges
[ "$EUID" -ne 0 ] && err "Script must be run with root privileges."
# get os
eval $(grep "^ID=" /etc/os-release || true)
os=$(echo -n $ID)
echof "Detected Linux distro: $os"
[ "$os" != "rhel" ] && [ "$os" != "centos" ] && [ "$os" != "ubuntu" ] && [ "$os" != "debian" ] && err "Unsupported Linux distro: '$os'"
# intro text
info_text="This script will configure this machine to allow for the Coral Docker stack to be deployed.
If you have any questions, please contact us at coral@lists.inesctec.pt."
todos_rhel="- Update all packages on the system (yum update)
\n- Enable repos:\n\t$(color 33 rhel-7-server-extras-rpms)\n\t$(color 33 rhel-7-server-optional-rpms)\n\t$(color 33 rhel-server-rhscl-7-rpms)
\n- Add the Docker CE repo:\n\t$(color 33 https://download.docker.com/linux/centos/docker-ce.repo)
\n- Configure user namespaces in the Linux kernel\n (required for secure isolation of Docker containers)
\n- Install packages (and dependencies):\n\t$(color 33 yum-utils)\n\t$(color 33 rsync)\n\t$(color 33 @development)\n\t$(color 33 rh-python36)\n\t$(color 33 docker-ce)\n\t$(color 33 docker-compose)
\n- Start the Docker daemon and set it to automatically start on boot\n"
todos_centos="- Update all packages on the system (yum update)
\n- Add the Docker CE repo:\n\t$(color 33 https://download.docker.com/linux/centos/docker-ce.repo)
\n- Configure user namespaces in the Linux kernel\n (required for secure isolation of Docker containers)
\n- Install packages (and dependencies):\n\t$(color 33 yum-utils)\n\t$(color 33 rsync)\n\t$(color 33 centos-release-scl)\n\t$(color 33 rh-python36)\n\t$(color 33 docker-ce)\n\t$(color 33 docker-compose)
\n- Start the Docker daemon and set it to automatically start on boot\n"
todos_ubuntu="- Update all packages on the system (apt-get update)
\n- Add the Docker CE repo:\n\t$(color 33 https://download.docker.com/linux/$os stable)
\n- Install packages (and dependencies):\n\t$(color 33 curl)\n\t$(color 33 apt-transport-https)\n\t$(color 33 ca-certificates)\n\t$(color 33 software-properties-common)\n\t$(color 33 gnupg)\n\t$(color 33 python3.6)\n\t$(color 33 docker-ce)\n\t$(color 33 docker-compose)
\n- Add the current user to the docker group and set ownership and permissions on the default volumes directory\n"
todos_debian=$todos_ubuntu
todos=$(echo \$todos_$os)
title "\nINTRO"
echof "$info_text\n" 1
title "YOU ARE ABOUT TO"
echof "$(eval echo $(echo $todos))" 1
echo -e "$(color 32 "Press any key to proceed")\t(Ctrl-C to cancel)\n"
read -n 1 -s -r
# execute setup script for detected os
if [ "$os" = "rhel" ] || [ "$os" = "centos" ]; then source rpm.sh
elif [ "$os" = "ubuntu" ] || [ "$os" = "debian" ]; then source deb.sh
else err "Unsupported Linux distro: '$os'"; fi
# done
info "Host is now ready to deploy the Coral Docker stack.\n"
| true |
1dccbeb57e3fddc8ea6c3757a623ebb080b39cd7 | Shell | cucumberlinux/lfscript | /scripts/lfs-11236/man-pages | UTF-8 | 667 | 3.421875 | 3 | [
"MIT"
] | permissive | #!/bin/bash
# The instructions in this file are extracted from
# 'Linux From Scratch 8.1' (SVN-20170428 / r11236) but are modified for use
# with LFScript 4 which installs the software to a fake root directory.
#
# Linux From Scratch is released under the MIT license.
# Copyright (C) 1999-2017, Gerard Beekmans
WGETLIST="https://www.kernel.org/pub/linux/docs/man-pages/man-pages-4.10.tar.xz"
MD5SUMLIST="c76a2844ea70e374fdff9c2e61bbe85b"
###############################################
installation() { # INSTALLING SYSTEM SOFTWARE #
###############################################
make DESTDIR=${FAKEROOT} install
#################
} # END OF FILE #
#################
| true |
c01d3f4393d00a04d8d279cfb6392744cdef491c | Shell | lanyuqingri/yodart | /runtime/lib/cloudapi/login/request.sh | UTF-8 | 1,155 | 3.46875 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
HOST="device-account.rokid.com"
ROKID_MASTER_ID=""
while [ $# -gt 0 ]; do
case "$1" in
-h)
HOST="$2"
shift
;;
-u)
ROKID_MASTER_ID="$2"
shift
;;
--*)
echo "Illegal option $1"
;;
esac
shift $(( $# > 0 ? 1 : 0 ))
done
NOW_EPOCH_TIMESTAMP=`date +%s`
DEVICE_ID=`getprop ro.boot.serialno`
DEVICE_TYPE_ID=`getprop ro.boot.devicetypeid`
__DIRNAME=`dirname $0`
DEVICE_SECRET=`sh ${__DIRNAME}/print-secret.sh`
MY_SIGN="${DEVICE_SECRET}${DEVICE_TYPE_ID}${DEVICE_ID}${NOW_EPOCH_TIMESTAMP}${DEVICE_SECRET}"
RL_SIGN=`echo $MY_SIGN | head -c -1 | md5sum | head -n1 | cut -d " " -f1 | awk '{print toupper($0)}'`
MY_OPTS="
deviceId=$DEVICE_ID
deviceTypeId=$DEVICE_TYPE_ID
namespaces=basic_info,custom_config
time=$NOW_EPOCH_TIMESTAMP
sign=$RL_SIGN
"
if [[ ! -z $ROKID_MASTER_ID ]]; then
MY_OPTS="$MY_OPTS userId=$ROKID_MASTER_ID"
fi
qs_stringify() {
local IFS="$1"; shift; echo "$*";
}
POST_DATA=`qs_stringify '&' $MY_OPTS`
URI="https://$HOST/device/loginV2.do"
curl -D /tmp/LOGIN_HEADER -H "Content-Type: application/x-www-form-urlencoded" -d "$POST_DATA" $URI
| true |
3d5223e1bf94279bf9ccb0babdfdd20ce3f167de | Shell | JBZoo/Cli | /demo/movies/progress-bar.sh | UTF-8 | 2,542 | 3.375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env sh
#
# JBZoo Toolbox - Cli.
#
# This file is part of the JBZoo Toolbox project.
# For the full copyright and license information, please view the LICENSE
# file that was distributed with this source code.
#
# @license MIT
# @copyright Copyright (C) JBZoo.com, All rights reserved.
# @see https://github.com/JBZoo/Cli
#
. demo-magic.sh
cd ..
clear
pei "# In this demo, you will see the basic features of Progress Bar for CLI app."
pei "# ProgressBar helps you perform looping actions and output extra info to profile your app."
pei "# Now we will take a look at its main features."
pei "# See it here './demo/Commands/DemoOutput.php'"
pei "# Let's get started!"
wait
pei ""
pei ""
pei "# At first, let me show you the output of the progress by default."
pei "./my-app progress-bar --case=simple"
wait
pei "clear"
pei "# Different levels of verbosity give different levels of detail."
pei ""
pei ""
pei "# Verbose level (-v)"
pei "./my-app progress-bar --case=messages -v"
wait
pei ""
pei "# Very Verbose level (-vv)"
pei "./my-app progress-bar --case=messages -vv"
wait
pei ""
pei "# Debug level, max (-vvv)"
pei "./my-app progress-bar --case=messages -vvv"
wait
pei "clear"
pei "# You can use any iterated object as a data source for the widget."
pei "# Let's look at an associative array as an example."
pei "./my-app progress-bar --case=array"
pei "# As you can see, you can customize the message. This is useful for logs."
wait
pei "clear"
pei "# You can easily disable progress bar"
pei "./my-app progress-bar --case=messages --no-progress"
wait
pei ""
pei "# Or quickly switch to crontab mode"
pei "./my-app progress-bar --case=messages --output-mode=cron"
wait
pei "clear"
pei ""
pei "# It's ready for ELK Stack (Logstash)."
pei "./my-app progress-bar --case=messages --output-mode=logstash | jq"
wait
pei "clear"
pei "# It is easy to interrupt the execution."
pei "./my-app progress-bar --case=break"
wait
pei "clear"
pei "# If an unexpected error occurs, it will stop execution and display detailed information on the screen."
pei "./my-app progress-bar --case=exception -vv"
wait
pei "clear"
pei "# You can catch all exceptions without interrupting execution."
pei "# And output only one single generic message at the end."
pei "./my-app progress-bar --case=exception-list -vv"
wait
pei "clear"
pei "##############################"
pei "# That's all for this demo. #"
pei "# Have a nice day =) #"
pei "# Thank you! #"
pei "##############################"
| true |
4c9feb9b3a308980ef771872caa23f50e4d85847 | Shell | dwp/aws-pdm-dataset-generation | /steps/source.sh | UTF-8 | 1,466 | 3.359375 | 3 | [] | no_license | #!/bin/bash
###############
# Set Variables
###############
SOURCE_DB="${source_db}"
DATA_LOCATION="${data_location}/$4" #reading s3_prefix as command line argument (4th argument)
DICTIONARY_LOCATION="${dictionary_location}"
SERDE="${serde}"
SOURCE_DIR=/opt/emr/sql/extracted/src/main/resources/scripts/source
(
# Import the logging functions
source /opt/emr/logging.sh
# Import resume step function
source /opt/emr/resume_step.sh
function log_wrapper_message() {
log_pdm_message "$${1}" "source_sql.sh" "$${PID}" "$${@:2}" "Running as: ,$USER"
}
echo "START_RUNNING_SOURCE ......................"
log_wrapper_message "start running source ......................."
#####################
# Run SQL Scripts
#####################
#shellcheck disable=SC2038
# here we are finding SQL files and don't have any non-alphanumeric filenames
if ! find $SOURCE_DIR -name '*.sql' \
| xargs -n1 -P"${processes}" /opt/emr/with_retry.sh hive \
--hivevar source_database="$SOURCE_DB" \
--hivevar data_path="$DATA_LOCATION" \
--hivevar serde="$SERDE" \
--hivevar dictionary_path="$DICTIONARY_LOCATION" -f; then
echo source stage failed >&2
exit 1
fi
echo "FINISHED_RUNNING_SOURCE......................"
log_wrapper_message "finished running source......................."
) >> /var/log/pdm/source_sql.log 2>&1
| true |
52ad53c8c9ae79e09e887a898b9f16495f566c20 | Shell | lingtalfi/lingula | /code/home/tasks.d/tmp_depository_path.sh | UTF-8 | 500 | 3.046875 | 3 | [] | no_license | #!/usr/bin/env bash
############################################################
# GOAL:
# to update the CONFIG[tmp_depository_path] value.
# Doing so allow the user to redefine the depository
# from the config file, which is handful.
############################################################
startTask "tmp_depository_path"
CONFIG[tmp_depository_path]="$VALUE"
log "tmp_depository_path: tmp_depository_path is now set to $VALUE for the current project"
endTask "tmp_depository_path"
| true |
b8e735adfe2ea372ba0980f536371e33dcbfad91 | Shell | upendrasingh1/AkkaRestApiSample | /ScalableRestServiceAkka/src/main/docker/services/build_deploy_gsp_services.sh | UTF-8 | 12,463 | 3.328125 | 3 | [] | no_license | #!/usr/bin/env bash
function wait_for_service()
if [ $# -ne 1 ]
then
echo usage $FUNCNAME "service";
echo e.g: $FUNCNAME docker-proxy
else
serviceName=$1
while true; do
REPLICAS=$(docker service ls | grep -E "(^| )$serviceName( |$)" | awk '{print $3}')
if [[ $REPLICAS == "1/1" || $REPLICAS == "global" ]]; then
break
else
echo "Waiting for the $serviceName service... ($REPLICAS)"
sleep 5
fi
done
fi
echo "Stopping and removing all containers and images from the Configured Docker Swarm"
eval $(docker-machine env node-1)
docker service rm $(docker service ls -q)
docker stop $(docker ps -a -q);docker rm -f $(docker ps -a -q);docker rmi -f $(docker images -q)
eval $(docker-machine env node-2)
docker stop $(docker ps -a -q);docker rm -f $(docker ps -a -q);docker rmi -f $(docker images -q)
eval $(docker-machine env node-3)
docker stop $(docker ps -a -q);docker rm -f $(docker ps -a -q);docker rmi -f $(docker images -q)
eval $(docker-machine env node-4)
docker stop $(docker ps -a -q);docker rm -f $(docker ps -a -q);docker rmi -f $(docker images -q)
eval $(docker-machine env node-1)
docker network rm my-net
echo "Setting environment variables"
export KV_IP=172.31.15.183;export MASTER_IP=172.31.15.181;export SLAVE_IP1=172.31.15.182; export SLAVE_IP2=172.31.6.170
echo "Compile the platform binaries..."
mvn -f ../../../../pom.xml clean package
echo "Build frontend image from the project"
eval $(docker-machine env node-1)
CURL_CA_BUNDLE= docker-compose -f gsp_platform_seed.yml build frontend
echo "Build backend image from the project"
CURL_CA_BUNDLE= docker-compose -f gsp_platform_seed.yml build backend
echo "Build akkaseed image from the project"
CURL_CA_BUNDLE= docker-compose -f gsp_platform_seed.yml build akkaseed
#echo "Compile proxy"
cd /root/proxy
#cd docker-flow-proxy
#export PATH=$PATH:/usr/local/go/bin
#export GOPATH=/usr/local/go/
#go get -d -v -t && go test --cover -v ./... --run UnitTest && go build -v -o docker-flow-proxy
docker build -t proxy .
cd /root/work/tally-gsp-demo/src/main/docker/services
echo "Build haproxy image from the project"
#CURL_CA_BUNDLE= docker-compose -f gsp_platform_seed.yml build proxy
echo "Export images to tar files"
docker save frontend > /tmp/frontend.tar;docker save backend > /tmp/backend.tar;docker save proxy > /tmp/proxy.tar;docker save akkaseed > /tmp/akkaseed.tar
echo "Force enabling "
ssh root@$KV_IP 'update-ca-trust force-enable';ssh root@$MASTER_IP 'update-ca-trust force-enable';ssh root@$SLAVE_IP1 'update-ca-trust force-enable';ssh root@$SLAVE_IP2 'update-ca-trust force-enable'
echo "Exporting the exported tar files to docker swarm"
scp /tmp/akkaseed.tar root@$KV_IP:/root;scp /tmp/akkaseed.tar root@$MASTER_IP:/root;scp /tmp/akkaseed.tar root@$SLAVE_IP1:/root;scp /tmp/akkaseed.tar root@$SLAVE_IP2:/root; \
scp /tmp/proxy.tar root@$KV_IP:/root;scp /tmp/proxy.tar root@$MASTER_IP:/root;scp /tmp/proxy.tar root@$SLAVE_IP1:/root;scp /tmp/proxy.tar root@$SLAVE_IP2:/root; \
scp /tmp/frontend.tar root@$KV_IP:/root;scp /tmp/frontend.tar root@$MASTER_IP:/root;scp /tmp/frontend.tar root@$SLAVE_IP1:/root;scp /tmp/frontend.tar root@$SLAVE_IP2:/root; \
scp /tmp/backend.tar root@$KV_IP:/root;scp /tmp/backend.tar root@$MASTER_IP:/root;scp /tmp/backend.tar root@$SLAVE_IP1:/root;scp /tmp/backend.tar root@$SLAVE_IP2:/root; \
scp /etc/pki/ca-trust/source/anchors/devdockerCA.crt root@$KV_IP:/etc/pki/ca-trust/source/anchors;scp /etc/pki/ca-trust/source/anchors/devdockerCA.crt root@$MASTER_IP:/etc/pki/ca-trust/source/anchors;scp /etc/pki/ca-trust/source/anchors/devdockerCA.crt root@$SLAVE_IP1:/etc/pki/ca-trust/source/anchors;scp /etc/pki/ca-trust/source/anchors/devdockerCA.crt root@$SLAVE_IP2:/etc/pki/ca-trust/source/anchors
echo "Install the certificates"
ssh root@$KV_IP 'update-ca-trust extract';ssh root@$MASTER_IP 'update-ca-trust extract';ssh root@$SLAVE_IP1 'update-ca-trust extract';ssh root@$SLAVE_IP2 'update-ca-trust extract'
echo "Restart the docker"
ssh root@$KV_IP 'service docker restart';ssh root@$MASTER_IP 'service docker restart';ssh root@$SLAVE_IP1 'service docker restart';ssh root@$SLAVE_IP2 'service docker restart'
echo "For sanity once again removing all the images"
ssh root@$KV_IP 'docker rmi akkaseed;docker rmi proxy;docker rmi backend; docker rmi frontend';ssh root@$MASTER_IP 'docker rmi akkaseed;docker rmi proxy;docker rmi backend; docker rmi frontend';ssh root@$SLAVE_IP1 'docker rmi akkaseed;docker rmi proxy;docker rmi backend; docker rmi frontend';ssh root@$SLAVE_IP2 'docker rmi akkaseed;docker rmi proxy;docker rmi backend; docker rmi frontend'
echo "Loading images on the machines"
ssh root@$KV_IP 'docker load < akkaseed.tar;docker load < proxy.tar;docker load < backend.tar;docker load < frontend.tar';ssh root@$MASTER_IP 'docker load < akkaseed.tar;docker load < proxy.tar;docker load < backend.tar;docker load < frontend.tar';ssh root@$SLAVE_IP1 'docker load < akkaseed.tar;docker load < proxy.tar;docker load < backend.tar;docker load < frontend.tar';ssh root@$SLAVE_IP2 'docker load < akkaseed.tar;docker load < proxy.tar;docker load < backend.tar;docker load < frontend.tar'
eval $(docker-machine env node-1)
docker node ls
docker network create --driver overlay --subnet=10.0.9.0/24 my-net
#docker login -u GSPREPO -p tally123 https://ec2-35-154-15-160.ap-south-1.compute.amazonaws.com
echo "First set up Monitoring Services...."
echo "Starting elastic search..."
ssh root@$KV_IP 'sysctl -w vm.max_map_count=262144;'
#docker service create --name elasticsearch \
# --constraint=node.role==manager \
# --network my-net \
# -p 9200:9200 \
# --reserve-memory 800m \
# elasticsearch:latest
#wait_for_service elasticsearch
echo "Deploying logstash and all the nodes...."
ssh root@$KV_IP 'rm -rf /root/docker/logstash;rmdir /root/docker/logstash; mkdir -p /root/docker/logstash;yum -y install wget;pushd /root/docker/logstash; wget https://raw.githubusercontent.com/vfarcic/cloud-provisioning/master/conf/logstash.conf; popd'; \
ssh root@$MASTER_IP 'rm -rf /root/docker/logstash;rmdir /root/docker/logstash; mkdir -p /root/docker/logstash;yum -y install wget;pushd /root/docker/logstash; wget https://raw.githubusercontent.com/vfarcic/cloud-provisioning/master/conf/logstash.conf; popd'; \
ssh root@$SLAVE_IP1 'rm -rf /root/docker/logstash;rmdir /root/docker/logstash; mkdir -p /root/docker/logstash;yum -y install wget;pushd /root/docker/logstash; wget https://raw.githubusercontent.com/vfarcic/cloud-provisioning/master/conf/logstash.conf; popd'; \
ssh root@$SLAVE_IP2 'rm -rf /root/docker/logstash;rmdir /root/docker/logstash; mkdir -p /root/docker/logstash;yum -y install wget;pushd /root/docker/logstash; wget https://raw.githubusercontent.com/vfarcic/cloud-provisioning/master/conf/logstash.conf; popd'
#docker service create --name logstash \
# --mount "type=bind,source=/root/docker/logstash,target=/conf" \
# --network my-net \
# -e LOGSPOUT=ignore \
# --reserve-memory 100m \
# logstash:latest logstash -f /conf/logstash.conf
#wait_for_service logstash
#docker service create --name kibana \
# -p 5601:5601 \
# --network my-net \
# -e ELASTICSEARCH_URL=http://elasticsearch:9200 \
# --reserve-memory 50m \
# --label com.df.notify=true \
# --label com.df.distribute=true \
# --label com.df.servicePath=/app/kibana,/bundles,/elasticsearch,/api,/plugins,/app/timelion \
# --label com.df.port=5601 \
# kibana:latest
#wait_for_service kibana
#docker service create --name logspout \
# --network my-net \
# --mode global \
# --mount "type=bind,source=/var/run/docker.sock,target=/var/run/docker.sock" \
# -e SYSLOG_FORMAT=rfc3164 \
# gliderlabs/logspout syslog://logstash:51415
#wait_for_service logspout
#echo "Start the zookeeper..."
#docker service create --name zookeeper \
# -p 2181 \
# -e ZOO_MY_ID=1 \
# -e ZOO_SERVERS=server.1=zookeeper:2888:3888 \
# --network my-net \
# zookeeper
#echo "Locate zookeeper container and create path /ClusterSystem manually..and pressany key"
#read -p 'Input: ' input
docker service create --name swarm-listener \
--network my-net \
--mount "type=bind,source=/var/run/docker.sock,target=/var/run/docker.sock" \
-e DF_NOTIF_CREATE_SERVICE_URL=http://proxy:8080/v1/docker-flow-proxy/reconfigure \
-e DF_NOTIF_REMOVE_SERVICE_URL=http://proxy:8080/v1/docker-flow-proxy/remove \
--constraint 'node.role==manager' \
vfarcic/docker-flow-swarm-listener
sleep 4
docker service create --name proxy \
-p 80:80 \
-p 443:443 \
-p 8080:8080 \
--network my-net \
-e MODE=swarm \
-e LISTENER_ADDRESS=swarm-listener \
proxy
docker service create --name akkaseed \
--hostname akkaseed \
-p 2552 \
--network my-net \
akkaseed
docker service create --name backend \
--network my-net \
backend
#sleep 2
docker service create --name frontend \
-p 8080 \
--network my-net \
--label com.df.notify=true \
--label com.df.distribute=true \
--label com.df.servicePath=/addition \
--label com.df.port=8080 \
frontend
#docker stack up --compose-file=docker-compose.yml mystack
sleep 2
echo "To start vizualization service"
docker service create \
--name=viz \
--publish=5000:8080/tcp \
--constraint 'node.role==manager' \
--mount=type=bind,src=/var/run/docker.sock,dst=/var/run/docker.sock \
manomarks/visualizer
sleep 8
docker service create \
--publish 9008:9000 \
--limit-cpu 0.5 \
--name portainer-swarm \
--constraint=node.role==manager \
--mount=type=bind,src=/var/run/docker.sock,dst=/var/run/docker.sock \
portainer/portainer \
-H unix:///var/run/docker.sock
#docker \
# service create --name cadvisor \
# --mode global \
# --network my-net \
# --label com.docker.stack.namespace=monitoring \
# --container-label com.docker.stack.namespace=monitoring \
# --mount type=bind,src=/,dst=/rootfs:ro \
# --mount type=bind,src=/var/run,dst=/var/run:rw \
# --mount type=bind,src=/sys,dst=/sys:ro \
# --mount type=bind,src=/var/lib/docker/,dst=/var/lib/docker:ro \
# google/cadvisor:v0.24.1
#wait_for_service cadvisor
#docker \
# service create --name node-exporter \
# --mode global \
# --network my-net \
# --label com.docker.stack.namespace=monitoring \
# --container-label com.docker.stack.namespace=monitoring \
# --mount type=bind,source=/proc,target=/host/proc \
# --mount type=bind,source=/sys,target=/host/sys \
# --mount type=bind,source=/,target=/rootfs \
# --mount type=bind,source=/etc/hostname,target=/etc/host_hostname \
# -e HOST_HOSTNAME=/etc/host_hostname \
# basi/node-exporter \
# -collector.procfs /host/proc \
# -collector.sysfs /host/sys \
# -collector.filesystem.ignored-mount-points "^/(sys|proc|dev|host|etc)($|/)" \
# --collector.textfile.directory /etc/node-exporter/ \
# --collectors.enabled="conntrack,diskstats,entropy,filefd,filesystem,loadavg,mdadm,meminfo,netdev,netstat,stat,textfile,time,vmstat,ipvs"
#wait_for_service node-exporter
#docker \
# service create --name alertmanager \
# --network my-net \
# --label com.docker.stack.namespace=monitoring \
# --container-label com.docker.stack.namespace=monitoring \
# --publish 9093:9093 \
# -e "SLACK_API=https://hooks.slack.com/services/TOKEN-HERE" \
# -e "LOGSTASH_URL=http://logstash:8080/" \
# basi/alertmanager \
# -config.file=/etc/alertmanager/config.yml
#wait_for_service alertmanager
#docker \
# service create \
# --name prometheus \
# --network my-net \
# --label com.docker.stack.namespace=monitoring \
# --container-label com.docker.stack.namespace=monitoring \
# --publish 9090:9090 \
# basi/prometheus-swarm \
# -config.file=/etc/prometheus/prometheus.yml \
# -storage.local.path=/prometheus \
# -web.console.libraries=/etc/prometheus/console_libraries \
# -web.console.templates=/etc/prometheus/consoles \
# -alertmanager.url=http://alertmanager:9093
#wait_for_service prometheus
#docker \
# service create \
# --name grafana \
# --network my-net \
# --label com.docker.stack.namespace=monitoring \
# --container-label com.docker.stack.namespace=monitoring \
# --publish 3000:3000 \
# -e "PROMETHEUS_ENDPOINT=http://prometheus:9090" \
# basi/grafana
#wait_for_service grafana
curl "$(docker-machine ip node-1):8080/v1/docker-flow-proxy/reconfigure?serviceName=frontend&servicePath=/addition&port=8080&distribute=true"
echo "Services initializing..."
sleep 20
| true |
f227a8e95d21c6ec8f648f67b42cb3af558bf1ca | Shell | Gonzalob90/capstone-udacity-devops | /upload_docker.sh | UTF-8 | 277 | 2.890625 | 3 | [] | no_license | #!/usr/bin/env bash
# 1. Create dockerpath
dockerpath=gonzalob90/udacity-capstone-project
# 2. Authenticate & tag
docker login
docker tag udacity-capstone-project $dockerpath
echo "Docker ID and Image: $dockerpath"
# Push image to a docker repository
docker push $dockerpath | true |
6af433f9f2a0f94102fe26cd8735acc8a2a9f25d | Shell | waterlu/hyperledger-fabric-java-demo | /network/generate.sh | UTF-8 | 1,318 | 3.109375 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/sh
#
# Copyright IBM Corp All Rights Reserved
#
# SPDX-License-Identifier: Apache-2.0
#
set -ev
# remove previous crypto material and config transactions
rm -fr config/*
rm -fr crypto-config/*
if [ ! -d config ]; then
mkdir config
fi
# generate crypto material
cryptogen generate --config=./crypto-config.yaml
if [ "$?" -ne 0 ]; then
echo "Failed to generate crypto material..."
exit 1
fi
# generate genesis block for orderer
configtxgen -profile OrdererGenesis -outputBlock ./config/genesis.block
if [ "$?" -ne 0 ]; then
echo "Failed to generate orderer genesis block..."
exit 1
fi
# generate channel configuration transaction
configtxgen -profile MyChannel -outputCreateChannelTx ./config/mychannel.tx -channelID mychannel
if [ "$?" -ne 0 ]; then
echo "Failed to generate channel configuration transaction..."
exit 1
fi
# generate anchor peer transaction
configtxgen -profile MyChannel -outputAnchorPeersUpdate ./config/baiduMSPanchors.tx -channelID mychannel -asOrg baiduMSP
if [ "$?" -ne 0 ]; then
echo "Failed to generate anchor peer update for baiduMSP..."
exit 1
fi
configtxgen -profile MyChannel -outputAnchorPeersUpdate ./config/jdMSPanchors.tx -channelID mychannel -asOrg jdMSP
if [ "$?" -ne 0 ]; then
echo "Failed to generate anchor peer update for jdMSP..."
exit 1
fi
| true |
036566fc13459c9deb1f8612e7bdfdc208a3f9a2 | Shell | bibige666/referance_of_wangdao | /其他代码/SpellCheck_by刘昭/bin/test.sh | UTF-8 | 583 | 3.359375 | 3 | [] | no_license | #!/bin/bash
cd ../
echo $PWD
cd shellScripts
echo $PWD
function start
{
#./sleep.sh &
cd /home/anboqing/spellCorrect/bin/
./spellCorrect
}
function stop
{
cd /home/anboqing/spellCorrect/bin/
killall -9 spellCorrect
}
function stat
{
ps -ef | grep spellCorrect
}
# like switch
case $1 in
start) start ;;
stop) stop ;;
restart restart ;;
stat) stat ;;
*) echo "Igorn.." ;;
esac
echo `ps -ef | grep 'sleep.sh'`
#pid=`ps -ef | grep 'sleep.sh' | awk -F' ' '{print $2}'`
pid=$(pidof -x sleep.sh)
echo $pid
kill -9 $pid
echo $?
#tar -czvf shell.tar.gz shellScripts/
| true |
65b86722e0a0bdda1dd1f24b580aba3302293f31 | Shell | heavysink/repo | /archlinuxcn/ocaml-migrate-parsetree2/PKGBUILD | UTF-8 | 1,672 | 2.59375 | 3 | [] | no_license | # Maintainer: Daniel Peukert <daniel@peukert.cc>
# Contributor: Jakob Gahde <j5lx@fmail.co.uk> (ocaml-migrate-parsetree PKGBUILD)
_projectname='ocaml-migrate-parsetree'
pkgname="${_projectname}2"
pkgver='2.3.0'
pkgrel=2
pkgdesc='Convert OCaml parsetrees between different major versions - 2.x.x version'
arch=('x86_64' 'i686' 'arm' 'armv6h' 'armv7h' 'aarch64')
url="https://github.com/ocaml-ppx/$_projectname"
license=('custom:LGPL2.1 with linking exception')
depends=('ocaml>=4.02.3')
makedepends=('dune>=2.3.0')
options=('!strip')
source=(
"$pkgname-$pkgver-$pkgrel.tar.gz::$url/archive/v$pkgver.tar.gz"
"$pkgname.diff"
)
sha512sums=('f2000939eee0b2eac93d059292b0bc13aa809c9fe5e54b1e0bf412e41921647e9bc71ef23e0c6fba70e481891ece5a65763743932c69bf278a1036c437313219'
'0809be6bb40f51cc034debf8083aa2a916aa36768af9e98a8a462e7136eb0e0b2ccced4afc90c1c1b46552689225b35e29b6f20dce549c655a0210da6bfebf63')
_sourcedirectory="$_projectname-$pkgver"
prepare() {
cd "$srcdir/$_sourcedirectory/"
patch --forward -p1 < "../$pkgname.diff"
}
build() {
cd "$srcdir/$_sourcedirectory/"
dune build --release --verbose
}
# fails because of a circular dependency on this package by lwt
# check() {
# cd "$srcdir/$_sourcedirectory/"
# dune runtest --release --verbose
# }
package() {
cd "$srcdir/$_sourcedirectory/"
DESTDIR="$pkgdir" dune install --prefix '/usr' --libdir 'lib/ocaml' --release --verbose
install -dm755 "$pkgdir/usr/share/doc/$pkgname"
mv "$pkgdir/usr/doc/$pkgname/"* "$pkgdir/usr/share/doc/$pkgname/"
rm -r "$pkgdir/usr/doc/"
install -dm755 "$pkgdir/usr/share/licenses/$pkgname"
ln -sf "/usr/share/doc/$pkgname/LICENSE.md" "$pkgdir/usr/share/licenses/$pkgname/LICENSE.md"
}
| true |
7561768fcd35099ecb0dd8e54cdbfba6ffef332a | Shell | rrialq/shell-scripts | /arch/1-pre-installation.sh | UTF-8 | 1,350 | 3.625 | 4 | [] | no_license | #!/bin/bash
. ./configuration
setTheKeyboardLayout() {
title '1.1 Set the keyboard layout'
loadkeys es
}
connectToTheInternet() {
title '1.3 Connect to the Internet'
ping -c1 archlinux.org
}
updateTheSystemClock() {
title '1.4 Update the system clock'
timedatectl set-ntp true
}
partitionTheDisks() {
title '1.5 Partition the disks'
title ' * Creating GPT Partition table'
echo "label: gpt" | sfdisk /dev/sda \
&& title ' * Creating /Bios boot partition: 2M' \
&& echo ',2M,21686148-6449-6E6F-744E-656564454649,*' | sfdisk /dev/sda \
&& title ' * Creating /boot partition: 1512M' \
&& echo ',1512M' | sfdisk -a /dev/sda \
&& title ' * Creating / partition: The rest of the disk' \
&& echo ',,' | sfdisk -a /dev/sda
}
formatThePartitions() {
title '1.6 Format the partitions'
title ' * Formatting /dev/sda2'
mkfs.xfs -f /dev/sda2 \
&& title ' * Formatting /dev/sda3' \
&& mkfs.xfs -f /dev/sda3
}
mountTheFileSystems() {
title '1.7 Mount the partitions'
mount /dev/sda3 /mnt \
&& mkdir /mnt/boot \
&& mount /dev/sda2 /mnt/boot
}
title '1. Pre-installation'
setTheKeyboardLayout \
&& connectToTheInternet \
&& updateTheSystemClock \
&& partitionTheDisks \
&& formatThePartitions \
&& mountTheFileSystems \
&& printf "\n${0} OK\n"
| true |
8fd297fcde2efff84b381c69ce3182c49ebba7c5 | Shell | arsham/dotfiles | /zsh/.zsh/plugins/kubectl/_kubectl-arguments | UTF-8 | 542 | 3.703125 | 4 | [] | no_license | #!/bin/bash
# Usage:
# k <command> [-c=CONTEXT] [-n=NAMESPACE]
#
# By default it operates on all namespaces in the current context.
_namespace="--all-namespaces"
_context=""
for ARGUMENT in "$@"
do
KEY=$(echo $ARGUMENT | cut -f1 -d '=')
VALUE=$(echo $ARGUMENT | cut -f2 -d '=')
case "$KEY" in
--namespace|-n) _namespace="-n ${VALUE}" ;;
--context|-c) _context="--context ${VALUE}" ;;
*)
echo "Unrecognised option ${KEY}"
exit 1
;;
esac
done
# vim: ft=zsh:nowrap
| true |
25180296ff34cc812104e3153b2c49231ce56b0d | Shell | dmatlack/crunchbang-config | /dzen/stats.sh | UTF-8 | 2,566 | 3.921875 | 4 | [] | no_license | #! /bin/bash
###########################################################
#
# Generate output for dzen that describes the system
# state (e.g. battery, volume, etc.).
#
###########################################################
DZEN_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
source $DZEN_DIR/style.sh
ICON_PATH="$DZEN_DIR/icons"
ICON_COLOR=${blue}
ICON_DEAD_COLOR=${_black_}
VOLUME_ICON="$ICON_PATH/vol.1.xbm"
WIRELESS_ICON="$ICON_PATH/wireless1.xbm"
CLOCK_FORMAT="%l:%M %p"
SEP="^fg(${_black_})|^fg()"
function icon_c {
echo "^fg($2)^i($1)^fg()"
}
function icon {
icon_c "$1" "$ICON_COLOR"
}
function bar_c {
echo $1 | dzen2-gdbar -l "" -w 33 -min 0 -max 100 -o -nonl -fg $2 -bg ${_black_}
}
function bar {
bar_c $1 ${blue}
}
battery() {
acpi_ouput=$(acpi -b)
percentage=$(echo "$acpi_ouput" | cut -d "," -f 2 | tr -d " %")
status=$(echo "$acpi_ouput" | cut -d " " -f 3 | tr -d ", ")
if [ $status = Charging ]; then
echo -n "$(icon $ICON_PATH/ac14.xbm)"
elif [ $status = Unknown ]; then
echo -n "$(icon $ICON_PATH/error1.xbm)"
else
echo -n "$(icon $ICON_PATH/battery.full.xbm)"
fi
if [ $percentage -le 20 ]; then
echo "$(bar_c $percentage ${red}) "
else
echo "$(bar $percentage) "
fi
}
volume() {
volume=$(amixer get Master | egrep -o "[0-9]+%" | tr -d "%")
#The following lines are buttons to change the sound
#echo -n "^ca(1, amixer -q set Master 5%-)"
#echo -n "^ca(3, amixer -q set Master 5%+)"
#echo -n "^ca(2, amixer -q set Master toggle)"
if [ -z "$(amixer get Master | grep "\[on\]")" ]; then
# muted
echo -n "$(icon_c $VOLUME_ICON $ICON_DEAD_COLOR)"
volume="0"
else
echo -n "$(icon $VOLUME_ICON)"
fi
echo -n "$(bar $volume)"
#echo "^ca()^ca()^ca()"
}
function wireless {
quality=$(cat /proc/net/wireless | grep wlan0 | cut -d " " -f 6 | tr -d ".")
if [ "$quality" ]; then
echo -n "$(icon $WIRELESS_ICON)"
essid=$(iwconfig wlan0 | head -1 | cut -d: -f2 | tr -d '\" ')
ip="$(ifconfig wlan0 | grep "inet addr" | cut -d: -f2 | cut -d" " -f1)"
if [ -z "$ip" ]; then
essid_color="${red}"
else
essid_color="${_green_}"
fi
echo -n " ^fg(${essid_color})$essid^fg() ^fg(${_black_})$ip^fg()"
else
echo -n "$(icon_c $WIRELESS_ICON $ICON_DEAD_COLOR)"
#quality="0"
fi
#echo -n "$(bar $quality)"
}
function clock {
echo $(date +"$CLOCK_FORMAT")
}
while :; do
echo -n "$(wireless) $SEP "
echo -n "$(battery) $SEP "
echo -n "$(volume) $SEP "
echo "$(clock) "
sleep 0.5
done
| true |
f8c2c6ba471344ab0f4a2b5ca7d714cdfa669b4c | Shell | conesphere/idx | /idx-id2file | UTF-8 | 408 | 3.484375 | 3 | [] | no_license | #!/bin/bash
###################################
# retrieves a filename from an ID #
# retrieved filenames are not absolute, because they might be double #
while read -r id
do
loc="${IDX_DIR}"/$(idxpand "${id}")
idxchkloc "${loc}" || continue
if [[ ! -f "${loc}/name" ]]
then
name="${id}"
else
read -r name < "${loc}/name"
fi
if [[ -z "${name}" ]]
then
name="${id}"
fi
echo "${name}"
done
| true |
deef3707fbe7603afd729a561e532cf0dd3884aa | Shell | sovicheacheth/CS522-BigDataProjectFinalPart | /Part6-Spark Project Analyze Apache Log/output.sh~ | UTF-8 | 298 | 3.078125 | 3 | [] | no_license | #!/bin/sh
rm -fr output.txt
STARTTIME=`date +%s.%N`
spark-submit --class "edu.mum.cs522.spark.LogAnalyzer" --master local[4] spark.jar >> output.txt
ENDTIME=`date +%s.%N`
TIMEDIFF=`echo "$ENDTIME - $STARTTIME" | bc | awk -F"." '{print $1"."substr($2,1,3)}'`
echo "Execution Time : $TIMEDIFF"
| true |
a8a4ee96db0e9ada37de6a71f22e0144bc418d75 | Shell | evechen0424/CentOS-6-system-info | /system_info.sh | UTF-8 | 705 | 3.1875 | 3 | [] | no_license | #!/bin/bash
CPU_model=`cat /proc/cpuinfo |grep "model name"|awk 'BEGIN {FS=":"}; {print $2}'|head -1`
CPU_cores=`cat /proc/cpuinfo|grep "model name"|wc -l`
partition=`df -h`
mem=`free -m`
system_bit=`uname -a |awk '{print $(NF-1)}'`
realese=`cat /etc/redhat-release`
network=`ifconfig`
echo "
CPU model name :$CPU_model
CPU cores : $CPU_cores
memory :
$mem
partition :
$partition
network info :
$network
OS : $realese
OS bit : $system_bit
" >> system_info.txt
echo -e "
CPU model name :\e[35m$CPU_model\e[m
CPU cores : \e[35m$CPU_cores\e[m
memory :
\e[35m$mem\e[m
partition :
\e[35m$partition\e[m
network info :
\e[35m$network\e[m
OS : \e[35m$realese\e[m
OS bit : \e[35m$system_bit\e[m
| true |
84275cd063a12e5a7e97ea94954ccc11b947e078 | Shell | goodcjw/CS35L12S | /sample_lab_2/sample1.sh | UTF-8 | 112 | 2.921875 | 3 | [] | no_license | # /bin/bash
sum=0
i=0
while (( $i <= 10 ))
do
let sum=$sum+$i
let i=$i+1
printf ' %s\n' $sum
done
| true |
88659a493bd529cab343ea6be18a5826c8214e4f | Shell | 806572349/Taroco-UI-NEW | /run.sh | UTF-8 | 458 | 2.71875 | 3 | [
"MIT"
] | permissive | #!/bin/sh
# kill old container
if docker ps | grep -i taroco-ui
then
docker kill taroco-ui
fi
# remove old container
if docker ps -a | grep -i taroco-ui
then
docker rm taroco-ui
fi
# remove old images
if docker images | grep docker_taroco-ui:latest
then
docker rmi docker_taroco-ui:latest
fi
unzip dist.zip
docker build --rm -t docker_taroco-ui:latest .
docker run -p 80:80 --name taroco-ui -d docker_taroco-ui:latest | true |
1dc255f0238f3d0c7ed62d72ac617c5993a54b0e | Shell | cubgs53/path | /omp.pbs | UTF-8 | 461 | 2.890625 | 3 | [
"MIT"
] | permissive | #!/bin/sh -l
#PBS -l nodes=1:ppn=24
#PBS -l walltime=0:30:00
#PBS -n
#PBS -p 1023
#PBS -j oe
set -eou pipefail
main() {
if [ -z ${EXE+x} ] || [ -z ${N+x} ]; then
echo "usage: qsub run.pbs -N <name> -vEXE=<executable>,N=<n>[,AMPL=]"
exit -1
fi
if [ -z ${AMPL+x} ]; then
"$EXE" -n "$N"
else
amplxe-cl -collect hotspots -r "$EXE-ampl" \
"$EXE" -n "$N"
fi
}
module load cs5220
cd $PBS_O_WORKDIR
main
| true |
5998895f24f954a8783b31edd42d14a5e580c88c | Shell | deflomu/scripts | /checkSamba.sh | UTF-8 | 231 | 2.953125 | 3 | [] | no_license | #!/bin/sh
SAMBA_RUNNING=`ps|grep smbd|grep -v grep|wc -l`;
if [ $SAMBA_RUNNING == 0 ]; then
logger -t checkSamba Samba seems not to be running. Saving Log...
logread > /etc/automatic/SambaDidNotRun.log
fs -l RestartSamba;
fi;
| true |
7f8d4d1d699e05a5be23064c4ceeb21d70e7c440 | Shell | yongkyung-oh/CMU-Large_Scale_Multimedia_Analysis | /run_download_video.sh | UTF-8 | 161 | 2.71875 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
#!/usr/bin/env bash
input=$1
while IFS= read -r line
do
wget "https://s20-11775-data.s3.amazonaws.com/video/${line}" -P $2
done < "$input" | true |
7db33feee3d343f59cf4c035c16f7a11ad25736d | Shell | akos222/mn-applet | /curr.sh | UTF-8 | 1,247 | 2.96875 | 3 | [] | no_license | #!/bin/bash
songPlaying(){
isplaying=`cat ~/.config/Google\ Play\ Music\ Desktop\ Player/json_store/playback.json | grep playing | sed 's/\"playing\"\:\ //;s/,//' | sed 's/[^a-z]*//g'`
artist=`cat ~/.config/Google\ Play\ Music\ Desktop\ Player/json_store/playback.json | grep artist | sed 's/\"artist\":\ "//;s/\",//'`
title=`cat ~/.config/Google\ Play\ Music\ Desktop\ Player/json_store/playback.json | grep title | sed 's/\"title\":\ "//;s/\",//'`
}
nextSong(){
sleep 3
newTitle=`cat ~/.config/Google\ Play\ Music\ Desktop\ Player/json_store/playback.json | grep title | sed 's/\"title\":\ "//;s/\",//;s/^ *//g'`
newArtist=`cat ~/.config/Google\ Play\ Music\ Desktop\ Player/json_store/playback.json | grep artist | sed 's/\"artist\":\ "//;s/\",//;s/^ *//g'`
icon=`cat ~/.config/Google\ Play\ Music\ Desktop\ Player/json_store/playback.json | grep albumArt | sed 's/\"albumArt\":\ "//;s/\"//'`
}
echoer(){
echo $artist '-' $title
}
ison=`ps cax | grep Google`
if [ "$ison" == '' ]; then
echo "♪♪♪"
else
songPlaying
echoer
nextSong
echo $newTitle $title
if [ "$newTitle" != "$title" ]; then
curl $icon > ~/.tmppic
notify-send -a "Currently Playing" -i ~/.tmppic "$newArtist" "$newTitle"
fi
fi
| true |
2334efd246079beac5899e02e03739d87c66303b | Shell | mhbm/Laravel---Entrevista | /install.sh | UTF-8 | 1,537 | 2.96875 | 3 | [] | no_license | #!/bin/bash
#-------------------------------------------------------------------------
#
# @FILESOURCE install.sh
# @AUTHOR Mateus Macedo
# @DATE Seg 15 Setembro
# @VERSION 1.0.0
#
#-------------------------------------------------------------------------
#-------------------------------------------------------------------------
# __BEGIN_MAIN__
# Install PHP 5.6
echo "Instalando o PHP 5.6"
sudo apt-get install python-software-properties
sudo add-apt-repository ppa:ondrej/php
sudo apt-get update
sudo apt-get install -y php5.6 php5.6-mcrypt php5.6-gd
sudo apt-get install php5.6-mbstring
sudo apt-get install php5.6-xml
echo "Fim da Instalação do PHP 5.6"
# Install apache2
echo "Instalando o apache2"
sudo apt-get install apache2 libapache2-mod-php5
echo "Fim da instalação do apache2"
# Install Composer
echo 'Instalando o composer'
curl -sS https://getcomposer.org/installer | php
sudo mv composer.phar /usr/local/bin/composer
sudo chmod +x /usr/local/bin/compose
echo 'Fim da instalação do composer'
# Install sqlite
echo 'Instalação do sqlite'
sudo apt-get install sqlite
sudo apt-get install php5.6-sqlite
echo 'Fim da Instalação do sqlite'
echo 'Instalação do phpunit'
sudo apt-get install phpunit
echo 'Fim da instalação do phpunit'
composer global require "laravel/installer"
# Install packages
echo 'Realizando o composer'
composer install
composer require doctrine/dbal
# Create datanase.sqlite
echo 'Criando o database.sqlite'
touch database/database.sqlite
sudo chmod 777 database/database.sqlite
# __END_MAIN__
| true |
208c4e4f09bdcb4293e2754394d6eec675ff1f04 | Shell | thiagofeijor/hello-world-shell-script | /HelloShell.sh | UTF-8 | 2,020 | 3.9375 | 4 | [] | no_license | read -p "What is your name?" AWR
# number: -eq -ne -gt -ge -lt -le
# string: = != -n -z
if [ "$AWR" = "Thiago" -o "$AWR" = "sudo" ]
then
echo "Welcome back, sir!"
fi
printAnswer () {
echo "Your answer: $1"
}
printAnswer $AWR
case $AWR in
[0-9])
echo "It is a number"
;;
Jo)
echo "Hello Jo"
;;
finish)
exit 0
;;
esac
echo "Program name: $1 \nNumber of parameters: $#\nParameters: $*\nFirst parameter: $0"
for param in $*
do
echo "Param: $param"
done
KERNEL=$(uname -r)
HOSTNAME=$(hostname)
CPUNO=$(cat /proc/cpuinfo |grep "model name"|wc -l)
CPUMODEL=$(cat /proc/cpuinfo |grep "model name"|head -n1|cut -c14-)
MEMTOTAL=$(expr $(cat /proc/meminfo |grep MemTotal|tr -d ' '|cut -d: -f2|tr -d kB) / 1024) # Em MB
FILESYS=$(df -h|egrep -v '(tmpfs|udev)')
UPTIME=$(uptime -s)
echo "=================================================================="
echo "Relatório da Máquina: $HOSTNAME"
echo "Data/Hora: $(date +%d/%m/%y-%H:%M)"
echo "=================================================================="
echo
echo "Máquina Ativa desde: $UPTIME"
echo
echo "Versão do Kernel: $KERNEL"
echo
echo "CPUs:"
echo "Quantidade de CPUs/Core: $CPUNO"
echo "Modelo da CPU: $CPUMODEL"
echo
echo "Memória Total: $MEMTOTAL MB"
echo
echo "Partições:"
echo "$FILESYS"
echo
echo "=================================================================="
if [ -n "$1" ]
then
echo "Start user search: $1"
ls /home/$1 > /dev/null 2>&1 || { echo "Usuario Inexistente" ; exit 1; }
USERID=$(grep $1 /etc/passwd|cut -d":" -f3)
DESC=$(grep $1 /etc/passwd|cut -d":" -f5 | tr -d ,)
USOHOME=$(du -sh /home/$1|cut -f1)
echo "=========================================================================="
echo "Relatório do Usuário: $1"
echo
echo "UID: $USERID"
echo "Nome ou Descrição: $DESC"
echo
echo "Total Usado no /home/$1: $USOHOME"
echo
echo "Ultimo Login:"
lastlog -u $1
echo "=========================================================================="
fi
exit 0
| true |
c4a50d7f63f919423afe644974cf9cc322e08750 | Shell | open-contracting/deploy | /run.py | UTF-8 | 933 | 2.9375 | 3 | [
"MIT"
] | permissive | #!/bin/sh
# https://superuser.com/a/1622435/1803567
"""$(dirname $(readlink $(which salt-ssh) || which salt-ssh))"/bin/python3 - "$@" <<"EOF"""
import os
import socket
import sys
import salt.cli.ssh
import salt.client.ssh
def main():
# Replace program name to match Saltfile.
sys.argv[0] = "salt-ssh"
# See salt/scripts.py::salt_ssh
client = salt.cli.ssh.SaltSSH()
# See salt/cli/ssh.py::SaltSSH
client.parse_args()
ssh = salt.client.ssh.SSH(client.config)
# Port-knock all the targets.
print("Port-knocking:")
for name, target in ssh.targets.items():
print(f"- {target['host']} ({name})")
for target in ssh.targets.values():
try:
socket.create_connection((target["host"], 8255), 1)
except OSError:
pass
# Run salt-ssh as usual.
print("Running...")
os.execvp("salt-ssh", sys.argv)
if __name__ == "__main__":
main()
| true |
eb5559930e1d8ca2d1a6b91bb19e74f41b1c43c3 | Shell | WayneJz/COMP9041-18S2 | /Test12/eating.sh | UTF-8 | 120 | 2.75 | 3 | [
"MIT"
] | permissive | #!/bin/sh -e
cat "$1"|egrep "[A-Z]+"|cut -d ":" -f 2|cut -d "," -f 1|sed "s/^ //g"|sed "s/\"//g"|sort -n|uniq|sort -n
| true |
e343193c1bdbc90bd034eca2a6bc3d25f1d2a1f9 | Shell | coxw/dotfiles | /media/cask.sh | UTF-8 | 891 | 2.90625 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
# Start
echo "`basename $0` starting."
# Link Homebrew casks in `/Applications` rather than `~/Applications`
export HOMEBREW_CASK_OPTS="--appdir=/Applications"
# Ask for the administrator password upfront
sudo -v
# setup taps
brew tap caskroom/versions
brew tap caskroom/fonts
# install cask
brew install caskroom/cask/brew-cask
# install applications
brew cask install kodi
brew cask install vlc
brew cask install java
brew cask install google-chrome
brew cask install transmission
brew cask install vlc
brew cask install crashplan
brew cask install handbrake
brew cask install airserver
brew cask install alfred
brew cask install spotify
# install fonts
brew cask install font-source-code-pro
brew cask install font-source-code-pro-for-powerline
brew cask install font-fira-sans
# cleanup unneeded files
brew cleanup
# Finished
echo "`basename $0` complete."
| true |
57025b130e139fd93b31fc437e28cac288bfd52c | Shell | Araksya-Hambaryan/ITC-9 | /Meliq_Melqonyan/Homeworks/Bash/1.21.05.18/flag.sh | UTF-8 | 312 | 3.0625 | 3 | [] | no_license | #!/bin/bash
flag() {
case "$1" in
-name)
echo -ne $2
;;
-N)
echo -ne $2
;;
*)
echo "error"
;;
esac
}
main() {
echo -ne "nermuce -name kam -N ev inchvor ban \n"
flag $1 $2
}
main $1 $2
echo -ne "\n"
| true |
18ab6d3ec455d157175311d5e066a3c6864d4c02 | Shell | mikeslattery/dotfiles | /bin/watchfile | UTF-8 | 477 | 3.765625 | 4 | [] | no_license | #!/bin/bash
# Watch file
# Run a command whenever a file or directory changes.
# Usage: watchfile <file> <command> <args...>
file="$1"
shift
ts=''
while true; do
ots="$ts"
ts="$(stat -c %Y "$file")"
[[ "$ts" == "$ots" ]] || "$@"
sleep 1
done
#TODO:
# --stdin Read stdin for command from file
# --cls Clear screen after each run
# --time Print time before each run
# -- Command follows. Useful it watching multiple files
# Use inotifywait if in path
| true |
0fc475acc563d3d5fefe40340dccd0110dfdaccd | Shell | yuhuanq/dots | /.bashrc | UTF-8 | 1,229 | 2.765625 | 3 | [] | no_license | # .bashrc
# Source global definitions
if [ -f /etc/bashrc ]; then
. /etc/bashrc
fi
# Uncomment the following line if you don't like systemctl's auto-paging feature:
# export SYSTEMD_PAGER=
# User specific aliases and functions
if [ -f ~/.bash_aliases ]; then
. ~/.bash_aliases
fi
#export TERM=xterm-256color
#export TERM=tmux-256color
export EDITOR=vimx
export OPAMEXTERNALSOLVER="cudf_remote_proxy %{input}% %{output}% %{criteria}%"
eval $(opam config env)
# custom prompt green='\e[0;32m'
GREEN='\e[0;32m'
red='\e[0;31m'
RED='\e[1;31m'
blue='\e[0;34m'
BLUE='\e[1;34m'
cyan='\e[0;36m'
CYAN='\e[1;36m'
NC='\e[0m'
#if [ $(id -u) -eq 0 ];
#then
# PS1="┌${RED}[\u]${NC} [\h]$ps1_informer:\[\e[0;32;49m\]\w\[\e[0m \n└> "
#else
# PS1="┌[${GREEN}\u${NC}] [\h]$ps1_informer:\[\e[0;32;49m\]\w\[\e[0m \n└> "
#fi
# lambda prompt
PS1=" \[\e[1;31m\]λ \W \[\e[0m\]"
PATH="$HOME/bin:$PATH"
# uncomment if using gruvbox colorscheme
#source "$HOME/.vim/bundle/gruvbox/gruvbox_256palette.sh"
BASE16_SHELL=$HOME/.config/base16-shell/
[ -n "$PS1" ] && [ -s $BASE16_SHELL/profile_helper.sh ] && eval "$($BASE16_SHELL/profile_helper.sh)"
set -o vi
[ -f ~/.fzf.bash ] && source ~/.fzf.bash
. "$HOME/.cargo/env"
| true |
9a04a2bbd7122686778c1cf22e587e7f7d2ef0b3 | Shell | apallath/parallel_algos | /src/demo_usage/benchmark_mpi_pi.sh | UTF-8 | 598 | 3.0625 | 3 | [
"MIT"
] | permissive | #!/bin/bash
proccts=(1 2 4 8 12 16)
#Strong scaling
for p in ${proccts[@]}
do
echo "Running strong $p"
echo $p >> mpi_pi_strong_output.txt
echo $p >> mpi_pi_strong_time.txt
(time mpirun -np $p --use-hwthread-cpus demo_mpi_pi 10000000000) >> mpi_pi_strong_output.txt 2>> mpi_pi_strong_time.txt
done
#Weak scaling
for p in ${proccts[@]}
do
echo "Running weak $p"
echo $p >> mpi_pi_weak_output.txt
echo $p >> mpi_pi_weak_time.txt
steps=$(bc <<< "$p * 1000000000")
(time mpirun -np $p --use-hwthread-cpus demo_mpi_pi $steps) >> mpi_pi_weak_output.txt 2>> mpi_pi_weak_time.txt
done
| true |
2c6c3c326c2a3366ad8f70406f310c67e6f45b3d | Shell | oswemo/UrlCheck | /ci/scripts/unit-test.sh | UTF-8 | 741 | 3.28125 | 3 | [] | no_license | #!/bin/sh
set -e -x
# Setup the gopath based on current directory.
export GOPATH=$PWD
ls -la
# Now we must move our code from the current directory ./hello-go to $GOPATH/src/github.com/JeffDeCola/hello-go
mkdir -p src/github.com/oswemo
cp -R ./UrlCheck src/github.com/oswemo/.
# All set and everything is in the right place for go
echo "Gopath is: " $GOPATH
echo "pwd is: " $PWD
cd src/github.com/oswemo/UrlCheck
ls -lat
make deps
# RUN unit_tests and it shows the percentage coverage
# print to stdout and file using tee
make test | tee test_coverage.txt
# add some whitespace to the begining of each line
sed -i -e 's/^/ /' test_coverage.txt
# Move to coverage-results directory.
mv test_coverage.txt $GOPATH/coverage-results/. | true |
3636445d115a5df7c818f4df7ebd28d5e81b15d9 | Shell | kweerious/rinxter-testbed | /cookbooks/main/templates/default/rinxter.erb | UTF-8 | 705 | 3.59375 | 4 | [] | no_license | #!/bin/bash
### BEGIN INIT INFO
# Provides: rinxter
# Required-Start: $network $syslog mysql tomcat
# Required-Stop: $syslog
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: Start daemon at boot time
# Description: Enable service provided by daemon.
### END INIT INFO
RETVAL=0;
RINXTER_BASE="/home/vagrant/rinxter/base"
start() {
echo "Starting Rinxter"
$RINXTER_BASE/bin/tstart.sh &2>&1 > $RINXTER_BASE/logs/service.log
}
stop() {
echo "Stopping Rinxter"
$RINXTER_BASE/bin/tstop.sh
}
restart() {
stop
start
}
case "$1" in
start)
start
;;
stop)
stop
;;
restart)
restart
;;
*)
echo $"Usage: $0 {start|stop|restart}"
exit 1
esac
exit $RETVAL
| true |
1f2747dbc17513b1ecca56195272acb7ed6cf093 | Shell | mruettgers/docker-doorbell | /files/var/www/localhost/htdocs/ring.sh | UTF-8 | 435 | 2.765625 | 3 | [] | no_license | #!/usr/bin/haserl
content-type: text/plain
OK
<%
export $(cat /doorbell/.env | xargs)
PULSE_SINK=${GET_pulse_sink:-${PULSE_SINK}}
PULSE_VOLUME=${GET_pulse_volume:-""}
DOORBELL_SOUND=${GET_doorbell_sound:-${DOORBELL_SOUND}}
ARGS=""
if [ ! -z "${PULSE_SINK}" ]; then
ARGS="${ARGS} -d ${PULSE_SINK}"
fi
if [ ! -z "${PULSE_VOLUME}" ]; then
ARGS="${ARGS} --volume=${PULSE_VOLUME} "
fi
/usr/bin/paplay ${ARGS} ${DOORBELL_SOUND}
%> | true |
edfffdde8e1e06827feb67674d9afb02711da112 | Shell | LabNeuroCogDevel/anki_mratlas | /mkMasks.bash | UTF-8 | 783 | 3.3125 | 3 | [] | no_license | #!/usr/bin/env bash
set -euo pipefail
trap 'e=$?; [ $e -ne 0 ] && echo "$0 exited in error"' EXIT
cd $(dirname $0)
#
# 20180911 - create flashcards from afni whereami atlas
#
atlas=TT_Daemon
#atlas=CA_N27_ML
[ ! -d masks/$atlas ] && mkdir -p masks/$atlas
whereami -atlas $atlas -show_atlas_code|
grep -iv Left |
grep -v Brodman|
grep : |
while IFS=":" read side area roi; do
out=masks/$atlas/${area// /_}
[ -r $out.nii.gz ] && continue
whereami -mask_atlas_region "$atlas:$side:$area" -prefix $out.nii.gz
3dcalc -overwrite -a $out.nii.gz -expr a*$roi -prefix $out.nii.gz
3dCM $out.nii.gz > $out.txt
done
3dbucket -overwrite -prefix masks/$atlas.nii.gz masks/$atlas/*.nii.gz
3dTstat -overwrite -max -prefix masks/$atlas.all.nii.gz masks/$atlas.nii.gz
| true |
85c8f4b63c710db90a8c4bdcc7e2b7f7dc9e93d0 | Shell | danielschwierzeck/u-boot-test-hooks | /bin/u-boot-test-flash | UTF-8 | 370 | 2.84375 | 3 | [
"MIT"
] | permissive | #!/bin/bash
#
# SPDX-License-Identifier: MIT
#
# Hook script for U-Boot pytest framework
#
set -e
set -x
export LC_ALL=C
board_type=$1
board_identity=$2
qemu_pflash_bin=${U_BOOT_BUILD_DIR}/pflash.bin
uboot_bin=${U_BOOT_BUILD_DIR}/u-boot.bin
dd if=/dev/zero bs=1M count=4 | tr '\000' '\377' > ${qemu_pflash_bin}
dd if=${uboot_bin} of=${qemu_pflash_bin} conv=notrunc
| true |
8e2f50066d46164b08f8e180bd2ab25d903824bb | Shell | chenyingnan/kolla | /docker/elasticsearch/extend_start.sh | UTF-8 | 437 | 3.359375 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
if [[ ! -d "/var/log/kolla/elasticsearch" ]]; then
mkdir -p /var/log/kolla/elasticsearch
fi
if [[ $(stat -c %a /var/log/kolla/elasticsearch) != "755" ]]; then
chmod 755 /var/log/kolla/elasticsearch
fi
# Only update permissions if permissions need to be updated
if [[ $(stat -c %U:%G /var/lib/elasticsearch/data) != "elasticsearch:elasticsearch" ]]; then
sudo chown elasticsearch: /var/lib/elasticsearch/data
fi
| true |
61f79358b627ec325a2b9a7a8053afe31e71b357 | Shell | Raqui333/dotfiles | /scripts/bar/dwm_status.sh | UTF-8 | 1,072 | 3.640625 | 4 | [] | no_license | #!/bin/bash
LATEST_KERNEL=$(curl -s "https://www.kernel.org/" | awk '/<strong>/{gsub(/[^0-9.]/,"");if (NR==92) print}')
SPACE=" | "
volume() {
icon="$ICONS/Volume.xbm"
command=$(amixer get Master | awk '{gsub(/[][%]/,"")}; /Front Left:/{lf=$5}; /Front Right:/{rf=$5}; END{printf("%i%\n", lf+rf)}')
echo "$command"
}
kernel() {
icon="$ICONS/Gentoo.xbm"
current=$(uname -r)
latest=$LATEST_KERNEL
if [[ $current = $latest ]]
then
command=$current
else
command="$latest -> $current"
fi
echo "$command"
}
packages() {
icon="$ICONS/Packages.xbm"
command=$(ls -d /var/db/pkg/*/* | wc -l)
echo "$command"
}
dtime() {
icon="$ICONS/Relogio.xbm"
command=$(date +'%b %d, %a %I:%M %P')
echo "$command"
}
mem() {
icon="$ICONS/Mem.xbm"
command=$(free -h | awk '/Mem/{print $3, $2}' OFS=' / ')
echo "$command"
}
temp() {
icon="$ICONS/Temp.xbm"
command=$(sensors | awk '/Core/{printf("%i°C\n", $3)}')
echo "$command"
}
echo "$(temp)$SPACE$(mem)$SPACE$(packages)$SPACE$(kernel)$SPACE$(volume)$SPACE$(dtime)"
| true |
97eabd7ef75b518c59ba96ea0c959292168a6749 | Shell | seanrmurphy/vugu-tdl-swagger | /backend/delete-cognito-configuration.sh | UTF-8 | 720 | 3.125 | 3 | [] | no_license | #! /usr/bin/env bash
check_env_vars() {
if [ -z "$GOFULLSTACKPROFILE" ]
then
echo "Environment variable GOFULLSTACKPROFILE not defined...exiting..."
exit
fi
}
check_env_vars
# Get the ARN of the user pool
POOLARN=$(aws cognito-idp list-user-pools --max-results 10 --profile $GOFULLSTACKPROFILE | jq -r '.UserPools[] | select(.Name=="todo_api") | .Id')
# Delete the user pool
echo 'Deleting user pool domain'
aws cognito-idp delete-user-pool-domain \
--domain todo-api-client \
--user-pool-id $POOLARN \
--profile $GOFULLSTACKPROFILE
# Delete the user pool
echo 'Deleting user pool todo_api (and associated state)'
aws cognito-idp delete-user-pool \
--user-pool-id $POOLARN \
--profile $GOFULLSTACKPROFILE
| true |
ffc1c59c132b34a4d16ee57ec1f8cbcbe9c03ad6 | Shell | customme/installer | /nginx/install.sh | UTF-8 | 5,097 | 3.21875 | 3 | [] | no_license | #!/bin/bash
#
# nginx自动编译安装程序
BASE_DIR=`pwd`
REL_DIR=`dirname $0`
cd $REL_DIR
DIR=`pwd`
cd - > /dev/null
if [[ -f $DIR/common.sh ]]; then
source $DIR/common.sh
elif [[ -f $DIR/../common/common.sh ]]; then
source $DIR/../common/common.sh
fi
source $DIR/config.sh
# 初始化
function init()
{
# 安装依赖
yum install -y wget bzip2 unzip
yum install -y gcc gcc-c++ make automake autoconf
yum install -y lua-devel gd-devel
}
# 下载安装包
function download()
{
# 出错立即退出
set -e
# 下载
if [[ ! -f $NGINX_PKG ]]; then
debug "Wget $NGINX_URL"
wget -c $NGINX_URL
fi
if [[ ! -f $PCRE_PKG ]]; then
debug "Wget $PCRE_URL"
wget -c $PCRE_URL
fi
if [[ ! -f $OPENSSL_PKG ]]; then
debug "Wget $OPENSSL_URL"
wget -c $OPENSSL_URL
fi
if [[ ! -f $ZLIB_PKG ]]; then
debug "Wget $ZLIB_URL"
wget -c $ZLIB_URL
fi
if [[ ! -f $CACHE_PURGE_PKG ]]; then
debug "Wget $CACHE_PURGE_URL"
wget -c $CACHE_PURGE_URL
fi
if [[ ! -f $DEVEL_KIT_PKG ]]; then
debug "Wget $DEVEL_KIT_URL"
wget -c $DEVEL_KIT_URL -O $DEVEL_KIT_PKG
fi
if [[ ! -f $FORM_INPUT_PKG ]]; then
debug "Wget $FORM_INPUT_URL"
wget -c $FORM_INPUT_URL -O $FORM_INPUT_PKG
fi
if [[ ! -f $UPSTREAM_PKG ]]; then
debug "Wget $UPSTREAM_URL"
wget -c $UPSTREAM_URL -O $UPSTREAM_PKG
fi
if [[ ! -f $LUA_PKG ]]; then
debug "Wget $LUA_URL"
wget -c $LUA_URL -O $LUA_PKG
fi
}
# 创建用户
function create_user()
{
# 出错不要立即退出
set +e
groupadd -f $NGINX_GROUP
useradd -M -s /bin/nologin $NGINX_USER -g $NGINX_GROUP
}
# 安装
function install()
{
init
# 出错立即退出
set -e
# 解压
tar -xzf $NGINX_PKG
tar -xjf $PCRE_PKG
tar -xzf $OPENSSL_PKG
tar -xzf $ZLIB_PKG
tar -xzf $CACHE_PURGE_PKG
tar -xzf $DEVEL_KIT_PKG
tar -xzf $FORM_INPUT_PKG
unzip -o $UPSTREAM_PKG
tar -xzf $LUA_PKG
cd $NGINX_NAME
# 配置
./configure --user=$NGINX_USER --group=$NGINX_GROUP \
--prefix=$NGINX_INSTALL_DIR \
--sbin-path=/usr/sbin/nginx \
--conf-path=$NGINX_CONF_DIR/nginx.conf \
--http-log-path=$NGINX_LOG_DIR/access.log \
--error-log-path=$NGINX_LOG_DIR/error.log \
--pid-path=$NGINX_LOG_DIR/nginx.pid \
--lock-path=$NGINX_LOG_DIR/nginx.lock \
--with-poll_module \
--with-http_ssl_module \
--with-http_sub_module \
--with-http_gzip_static_module \
--with-http_random_index_module \
--with-http_secure_link_module \
--with-http_stub_status_module \
--with-http_realip_module \
--with-http_image_filter_module \
--with-pcre=$DIR/$PCRE_NAME \
--with-openssl=$DIR/$OPENSSL_NAME \
--with-zlib=$DIR/$ZLIB_NAME \
--add-module=$DIR/$CACHE_PURGE_NAME \
--add-module=$DIR/$DEVEL_KIT_NAME \
--add-module=$DIR/$FORM_INPUT_NAME \
--add-module=$DIR/$UPSTREAM_NAME \
--add-module=$DIR/$LUA_NAME \
--with-ld-opt="-Wl,-rpath,/usr/local/lib"
# 构建
make
# 安装
make install
# 清理
cd - > /dev/null
rm -rf $NGINX_NAME $PCRE_NAME $OPENSSL_NAME $ZLIB_NAME $CACHE_PURGE_NAME $DEVEL_KIT_NAME $FORM_INPUT_NAME $UPSTREAM_NAME $LUA_NAME
}
# 注册服务
function reg_service()
{
if [[ "$SYS_VERSION" =~ 6 ]]; then
conf_service > /etc/init.d/nginx
chmod +x /etc/init.d/nginx
chkconfig --add /etc/init.d/nginx
chkconfig nginx on
elif [[ "$SYS_VERSION" =~ 7 ]]; then
conf_service_7 > /lib/systemd/system/nginx.service
chmod +x /lib/systemd/system/nginx.service
systemctl enable nginx.service
fi
}
# 卸载nginx
function clean_nginx()
{
# 关闭nginx
nginx -s quit
# 删除安装目录 配置文件目录 日志目录
rm -rf $NGINX_INSTALL_DIR $NGINX_CONF_DIR $NGINX_LOG_DIR /usr/sbin/nginx
if [[ "$SYS_VERSION" =~ 6 ]]; then
chkconfig --del /etc/init.d/nginx
chkconfig nginx off
rm -f /etc/init.d/nginx
elif [[ "$SYS_VERSION" =~ 7 ]]; then
systemctl disable nginx.service
rm -f /lib/systemd/system/nginx.service
fi
}
# 用法
function usage()
{
echo "Usage: $0 [ -c create user ] [ -d download source package ] [ -i install ] [ -r register system service ] [ -u uninstall ] [ -v verbose ]"
}
function main()
{
if [[ $# -eq 0 ]]; then
usage
exit 1
fi
while getopts "acdirsuv" name; do
case "$name" in
c)
create_flag=1;;
d)
download_flag=1;;
i)
install_flag=1;;
r)
register_flag=1;;
u)
clean_flag=1;;
v)
debug_flag=1;;
?)
usage
exit 1;;
esac
done
[[ $create_flag ]] && log_fn create_user
[[ $download_flag ]] && log_fn download
[[ $install_flag ]] && log_fn install
[[ $register_flag ]] && log_fn reg_service
[[ $clean_flag ]] && log_fn clean_nginx
}
main "$@" | true |
9ca82abfdb601182e7aeb4ecac700ec9fe902632 | Shell | javabean/Dockerfiles | /wordpress/bin/wp-changeDomainName.sh | UTF-8 | 4,073 | 3.859375 | 4 | [] | no_license | #!/bin/sh
set -eu
(set -o | grep -q pipefail) && set -o pipefail
(set -o | grep -q posix) && set -o posix
#set -x
print_usage() {
cat << EOT
Usage
${0##*/} -f old_fqdn -r new_fqdn
Changes a WordPress instance domain name.
E.g.: sudo -u www-data ${0##*/} -f "beta.example.net" -r "www.example.net"
EOT
}
main() {
local BACKUP_DIR="${BACKUP_DIR:-/tmp}"
local BACKUP_SUFFIX="`date +%Y%m%d-%H%M%S`"
local URL_FIND="${URL_FIND:-}"
local URL_REPLACE="${URL_REPLACE:-}"
# Options
while getopts "b:f:r:" option; do
case "$option" in
b) BACKUP_DIR="$OPTARG" ;;
f) URL_FIND="$OPTARG" ;;
r) URL_REPLACE="$OPTARG" ;;
*) print_usage; exit 1 ;;
esac
done
shift $((OPTIND - 1)) # Shift off the options and optional --
if [ -z "${BACKUP_DIR}" -o -z "${URL_FIND}" -o -z "${URL_REPLACE}" ]; then
print_usage
exit 1
fi
if echo "${URL_FIND}${URL_REPLACE}" | grep -q '[^-a-zA_Z0-9.]'; then
echo "Aborting: illegal char(s) in ${URL_FIND} or ${URL_REPLACE}"
exit 1
fi
local DB_HOST="$(wp config get --constant=DB_HOST)"
local DB_USER="$(wp config get --constant=DB_USER)"
local DB_PASSWORD="$(wp config get --constant=DB_PASSWORD)"
local DB_NAME="$(wp config get --constant=DB_NAME)"
local DB_PREFIX="$(wp config get --global=table_prefix)"
if [ -z "$DB_HOST" -o -z "$DB_USER" -o -z "${DB_NAME}" -o -z "${DB_PREFIX}" ]; then
echo "Can not read database information from `wp config path`: aborting!"
exit 1
fi
local WP_MIGRATE_DB_WAS_INACTIVE=
if ! wp plugin is-installed wp-migrate-db ; then
echo "Installing wp-migrate-db..."
wp plugin install wp-migrate-db --activate --activate-network
WP_MIGRATE_DB_WAS_INACTIVE=1
fi
if [ `wp plugin get wp-migrate-db --field=status` = "inactive" ]; then
WP_MIGRATE_DB_WAS_INACTIVE=1
echo "Activating wp-migrate-db..."
wp plugin activate wp-migrate-db --network
fi
local DB_DEST_BACKUP_FILE="${BACKUP_DIR}/wp-db-backup_${BACKUP_SUFFIX}.sql.gz"
# local DB_DUMP_FILE="${BACKUP_DIR}/wp-db.sql.gz"
echo "Backing up WordPress database to ${DB_DEST_BACKUP_FILE}..."
#mysqldump -u "${DB_USER}" -p"${DB_PASSWORD}" --single-transaction --databases "${DB_NAME}" | gzip > "${DB_DEST_BACKUP_FILE}"
wp migratedb export "${DB_DEST_BACKUP_FILE}" --skip-replace-guids --exclude-spam --gzip-file
echo "Converting WordPress database -- ${URL_FIND} -> ${URL_REPLACE}"
wp migratedb find-replace --find="//${URL_FIND},%2F%2F${URL_FIND},%252F%252F${URL_FIND}" --replace="//${URL_REPLACE},%2F%2F${URL_REPLACE},%252F%252F${URL_REPLACE}" --skip-replace-guids --exclude-spam
# echo "Dumping and converting WordPress database into ${DB_DUMP_FILE} -- ${URL_FIND} -> ${URL_REPLACE}"
# wp migratedb export "${DB_DUMP_FILE}" --find="//${URL_FIND},%2F%2F${URL_FIND},%252F%252F${URL_FIND}" --replace="//${URL_REPLACE},%2F%2F${URL_REPLACE},%252F%252F${URL_REPLACE}" --skip-replace-guids --exclude-spam --gzip-file
#
# echo "Importing database from ${DB_DUMP_FILE} into ${DB_USER}@${DB_HOST}:${DB_NAME}..."
# # wait up to 1.5 minutes for the remote DB to be available (use case: startup)
# #mysqladmin --silent --no-beep --wait=9 --connect_timeout 10 -h "${MYSQL_HOST}" -u "${MYSQL_USER}" -p"${MYSQL_PASSWORD}" ping
# ( echo "USE \`${DB_NAME}\`;" && zcat "${DB_DUMP_FILE}" ) | mysql --batch --connect-timeout=30 -u "$DB_USER" -p"$DB_PASSWORD" -h "$DB_HOST"
( echo "USE \`${DB_NAME}\`; update ${DB_PREFIX}usermeta set meta_value='${URL_REPLACE}' where meta_key='source_domain' and meta_value='${URL_FIND}'" ) | mysql --batch --connect-timeout=30 -u "${DB_USER}" -p"${DB_PASSWORD}" -h "${DB_HOST}"
if [ -n "$WP_MIGRATE_DB_WAS_INACTIVE" ]; then
echo "Deactivating wp-migrate-db..."
wp plugin deactivate wp-migrate-db --network
fi
echo "Flushing WordPress caches..."
wp cache flush
if wp plugin is-installed w3-total-cache ; then
local W3TC_STATUS=`wp plugin get w3-total-cache --field=status`
if [ "${W3TC_STATUS}" = "active" -o "${W3TC_STATUS}" = "active-network" ]; then
wp total-cache flush all
wp total-cache pgcache_cleanup
wp total-cache cdn_purge
fi
fi
}
main "$@"
| true |
19f8af99b30385d5fe472986066fb813f217650e | Shell | symanli/htmlbutcher | /locale/install.sh | UTF-8 | 134 | 2.734375 | 3 | [] | no_license | #!/bin/sh
for file in *.po; do
BN=${file%.po}
msgfmt -o $BN.mo $file
mkdir -p $BN
cp $BN.mo $BN/htmlbutcher.mo
done
exit 0
| true |
861fcbde7d07800afc01abff57b18a45bf502eaa | Shell | ROAD2018/Audio-Enhancement-with-Deep-Learning | /scripts/train/autoencoder/activations.sh | UTF-8 | 1,055 | 2.515625 | 3 | [] | no_license | #!/bin/bash
root=$(git rev-parse --show-toplevel)
export PYTHONPATH="${root}/src"
cd ${root}
SEEDS=${1-"123 2112 997"}
ACTIVATIONS="relu elu prelu gelu swish"
LAYERS="5"
KERNELS="15"
CHANNELS="16"
for seed in ${SEEDS}; do
for layer in ${LAYERS}; do
for kernel in ${KERNELS}; do
for channel in ${CHANNELS}; do
for activation in ${ACTIVATIONS}; do
python3 src/autoencoder/train.py \
--epochs 10 \
--patience 3 \
--batch_size 16 \
\
--train_files 512 \
--save_every_n_steps 100 \
--transformations none \
\
--learning_rate 0.001 \
--num_layers ${layer} \
--channels ${channel} \
--kernel_size ${kernel} \
--random_seed ${seed} \
--activation ${activation} \
--norm batch_norm \
--model_dir "models/autoencoder/activations_${seed}_${activation}_l_${layer}_c_${channel}_k_${kernel}"
done;
done;
done;
done;
done; | true |
51fb88d1f1c1154177e1f14a9f6d377f12703006 | Shell | subho007/dotzsh | /runcoms/zshrc | UTF-8 | 681 | 2.90625 | 3 | [
"MIT"
] | permissive | #
# Executes commands at the start of an interactive session.
#
# Authors:
# Sorin Ionescu <sorin.ionescu@gmail.com>
#
# Before Prezto Source
source $HOME/.zprezto/before.zsh
# Source Prezto.
if [[ -s "${ZDOTDIR:-$HOME}/.zprezto/init.zsh" ]]; then
source "${ZDOTDIR:-$HOME}/.zprezto/init.zsh"
fi
PATH=$HOME/.rvm/bin:$PATH # Add RVM to PATH for scripting
if [[ -s "$HOME/.pythonrc" ]]; then
export PYTHONSTARTUP="$HOME/.pythonrc"
fi
# Load the functions and aliases
source $HOME/.zprezto/zfunctions.zsh
##################### AFTER.zsh ########################
source $HOME/.zprezto/after.zsh
test -e "${HOME}/.iterm2_shell_integration.zsh" && source "${HOME}/.iterm2_shell_integration.zsh"
| true |
94d854d901c4811f7d557b71e7eb4bcbb186adaf | Shell | vsethum/packer-vagrant | /packer/scripts/packages.sh | UTF-8 | 1,248 | 3.15625 | 3 | [] | no_license | #!/usr/bin/env bash
# add ppa to install current versions of nodejs
#apt-get install -y python-software-properties software-properties-common
#add-apt-repository -y ppa:chris-lea/node.js
sudo apt-get update
sudo apt-get install -y vim git zip unzip curl wget
#install java
if which java >/dev/null; then
echo "skip java 8 installation"
else
echo "java 8 installation"
apt-get -y install python-software-properties software-properties-common
hash -r
add-apt-repository ppa:webupd8team/java
apt-get update && apt-get -y upgrade
echo oracle-java8-installer shared/accepted-oracle-license-v1-1 select true | /usr/bin/debconf-set-selections
apt-get -y install oracle-java8-installer
update-alternatives --display java
apt-get -y install oracle-java8-set-default && apt-get clean
apt-get update
apt-get -y install openjdk-8-jre-headless
fi
#install nvm
curl -o- https://raw.githubusercontent.com/creationix/nvm/v0.33.6/install.sh | bash
export NVM_DIR="$HOME/.nvm"
[ -s "$NVM_DIR/nvm.sh" ] && . "$NVM_DIR/nvm.sh" # This loads nvm
nvm install stable
#install node and npm
#curl -sL https://deb.nodesource.com/setup_8.x | sudo -E bash -
#sudo apt-get install -y nodejs
#sudo apt-get install build-essential | true |
9dc7d7b8f4ccf05c1bb91133e204ee4824831d9d | Shell | ouwenpf/Linux-base | /shell脚本/analysi_time.sh | UTF-8 | 1,243 | 3.46875 | 3 | [] | no_license | #!/bin/bash
#
[ $# -ne 3 ] && echo "Usage $0:请输入三个参数,第一个参数为0(表示当天)格式如:20180101,后面两个参数为日志格式如:201801010000" && exit
if [ -z "$1" -o "$1" != "0" ];then
logdir="/data/www/loginfo/Logs/$1"
else
logdir="/data/www/loginfo/Logs/$(date +%Y%m%d)"
fi
[ ! -f ${logdir}/log.txt ] && exit || > $logdir/analysi_time.log
a=$2
b=$3
list=`grep 'GAME.*ReConn' $logdir/log.txt|sort -t '-' -k1rn|awk -F '"' '$1>"'$a'" && $1<"'$b'" {print $4}'|awk '{a[$1]++}END{for(i in a)print i,a[i]}'|sort -t " " -k2rn|awk '{OFS="-";print $1,$2}'`
for i in $list
do
UUID=`echo $i|awk -F '-' '{print $1}'`
title=`echo $i|awk '{print $1}'`
echo "================$title==================" >> $logdir/analysi_time.log
grep "GAME.*ReConn" $logdir/log.txt|sort -t '-' -k1rn|grep "$UUID"|awk -F '--' '$1>"'$a'" && $1<"'$b'" {print $0}'|sort -t '-' -k1rn >> $logdir/analysi_time.log
grep "HALL.*First" $logdir/log.txt|sort -t '-' -k1rn|grep "$UUID" >> $logdir/analysi_time.log
grep "GAME.*First" $logdir/log.txt|sort -t '-' -k1rn|grep "$UUID" >> $logdir/analysi_time.log
#grep "GAME.*$UUID" $logdir/log.txt >> $logdir/analysi_time.log
echo "" >> $logdir/analysi_time.log
done
| true |
9cd7d3d6defb62245a624460314b855b5c0555a0 | Shell | THUSHARTOM/Learn_bash | /filetest.sh | UTF-8 | 297 | 3.5625 | 4 | [] | no_license | #! /bin/bash
echo -e "Enter the name of the file : \c"
read file_name
if [ -f $file_name ]
then
if [ -w $file_name ]
then
echo "Type or press ctrl+d to exit"
cat >> $file_name
else
echo "file doesnt have write permission"
fi
else
echo "Not Found"
fi | true |
29b75fed76efcfd6b2b17402b4cc0c905385d209 | Shell | njachowski/eggnog | /transpose.sh | UTF-8 | 93 | 2.953125 | 3 | [] | no_license | #!/bin/bash
string=''
for x in `cat $1`
do
string="$string,$x"
done
echo $string | cut -c2-
| true |
4ccdb65d38322fbea02d79cace4dc5c35b8f870e | Shell | jcarlos2289/semanticMapsKnnSmooting | /respaldar.sh | UTF-8 | 301 | 2.84375 | 3 | [] | no_license | #Script para enviar los .tex del documento hacia el repositorio de bitbucket
#By JCarlos
if test $# -lt 1
then
echo "Error: Introduce el Nombre para el Commit"
else
git add .
git commit -m $1
git push -u origin master
echo "Iniciando respaldo del commit " $1
echo "Respaldo Terminado "
fi
| true |
804ec30520320de4f086d57c4e24b6dbd448f2d5 | Shell | ErikEkstedt/.files | /zsh/.config/zsh/functions.zsh | UTF-8 | 1,416 | 3.46875 | 3 | [] | no_license | # sourced from zshrc
function g() {
ls -a | grep -i $1
}
function print_path() { #{{{
function _print_path() { #{{{
for p in $path; do
echo "$p"
done
} #}}}
_print_path | bat
} #}}}
lfcd () {
tmp="$(mktemp)"
lf -last-dir-path="$tmp" "$@"
if [ -f "$tmp" ]; then
dir="$(cat "$tmp")"
rm -f "$tmp"
if [ -d "$dir" ]; then
if [ "$dir" != "$(pwd)" ]; then
cd "$dir"
fi
fi
fi
} #}}}
function _browser_tab() {
if [ -z $(pgrep -i $BROWSER) ]; then
echo $process "is NOT running."
$BROWSER --new-tab $@ &!
else
echo $process "is running."
$BROWSER --new-tab $@
fi
}
# Movement
vi-cmd-up-line-history() {
zle vi-cmd-mode
zle up-line-or-history
}
zle -N vi-cmd-up-line-history
vi-cmd-down-line-history() {
zle vi-cmd-mode
zle down-line-or-history
}
zle -N vi-cmd-down-line-history
# Tmux
function tns() {
_session_exists() {
tmux list-sessions | sed -E 's/:.*$//' | grep -q "^$session_name$"
}
_not_in_tmux() {
[ -z "$TMUX" ]
}
if [ -z $1 ]; then
session_name=$(basename "$PWD" | sed -E 's/[.]/DOT/g' | sed -E 's/DOTfiles/DOTFiles/g')
else
session_name=$1
fi
if _not_in_tmux; then
tmux new-session -As "$session_name"
else
if ! _session_exists; then
(TMUX='' tmux new-session -Ad -s "$session_name")
fi
tmux switch-client -t "$session_name"
fi
}
| true |
42fbfcdacc3e095d9aa15f8cfdb034bd291af34e | Shell | roidayan/ovs-tests | /test-tc-meter.sh | UTF-8 | 2,081 | 3 | 3 | [] | no_license | #!/bin/bash
#
# Test 'dangling' act_police action.
# Use TCP traffic to test per flow rate limit. TC filters will refer to
# the 'dangling' police action
# Bug SW #2707092, metering doesn't work before version xx.30.1602 xx.31.0354 xx.32.0114
my_dir="$(dirname "$0")"
. $my_dir/common.sh
require_module act_police
IP1="7.7.7.1"
IP2="7.7.7.2"
RATE=500
TMPFILE=/tmp/iperf3.log
config_sriov 2
enable_switchdev
require_interfaces REP REP2
unbind_vfs
bind_vfs
function cleanup() {
ip netns del ns0
ip netns del ns1
reset_tc $REP $REP2
sleep 0.5
tc action flush action police
}
trap cleanup EXIT
function config_police() {
config_vf ns0 $VF $REP $IP1
config_vf ns1 $VF2 $REP2 $IP2
reset_tc $REP $REP2
tc action flush action police
tc action add police rate ${RATE}mbit burst 40m conform-exceed drop/pipe
echo "add arp rules"
tc_filter add dev $REP protocol arp parent ffff: prio 1 flower \
action mirred egress redirect dev $REP2
tc_filter add dev $REP2 protocol arp parent ffff: prio 1 flower \
action mirred egress redirect dev $REP
echo "add vf meter rules"
tc_filter add dev $REP prio 2 protocol ip parent ffff: \
flower ip_proto tcp dst_ip $IP2 \
action police index 1 \
mirred egress redirect dev $REP2
tc_filter add dev $REP2 prio 2 protocol ip parent ffff: \
flower ip_proto tcp dst_ip $IP1 \
action police index 1 \
mirred egress redirect dev $REP
fail_if_err
ip link show dev $REP
tc filter show dev $REP ingress
ip link show dev $REP2
tc filter show dev $REP2 ingress
}
function test_tcp() {
title "Test iperf3 tcp $VF($IP1) -> $VF2($IP2)"
ip netns exec ns1 timeout 11 iperf3 -s -D
sleep 0.5
ip netns exec ns0 timeout 11 iperf3 -c $IP2 -t 10 -J c -P2 > $TMPFILE &
sleep 11
killall -9 iperf3 &>/dev/null
sleep 0.5
}
function run() {
title "Test act_police action"
config_police
test_tcp
verify_iperf3_bw $TMPFILE $RATE
}
run
trap - EXIT
cleanup
test_done
| true |
39d532d4a834e2ebce699c5976845ce54752c69a | Shell | lapig-ufg/lapig-maps | /devops/scripts/deploy_ows.sh | UTF-8 | 766 | 3.40625 | 3 | [] | no_license | #!/bin/bash
REPOSITORY_DIR="$HOME/repositories/lapig-maps"
TMP_PROD_DIR="/data/lapig-maps/prod_tmp"
PROD_DIR="/data/lapig-maps/prod"
CATALOG_DIR="/data/catalog"
mkdir -p $REPOSITORY_DIR
mkdir -p $TMP_PROD_DIR
case $1 in
'prod')
echo "Deploy LAPIG-MAPS-OWS"
echo " 1) Updating sources"
cd $REPOSITORY_DIR
git pull
rm -fR $TMP_PROD_DIR/*
cp -R $REPOSITORY_DIR/src/* $TMP_PROD_DIR/
echo " 2) Building OWS"
cd $TMP_PROD_DIR/ows
npm install
ln -s $CATALOG_DIR data_dir/catalog
echo " 3) Replacing production enviroment"
rm -fR $PROD_DIR
mv $TMP_PROD_DIR $PROD_DIR
echo " 4) Restarting LAPIG-MAPS"
sudo /etc/init.d/lapig-maps-ows stop
sudo /etc/init.d/lapig-maps-ows start
;;
esac
echo "Done !!!"
| true |
6580a40a7d9dc7dd3f68b85c643a71a9c6c37ee7 | Shell | Qabrix/Bash-Scripts | /system_info.sh | UTF-8 | 4,076 | 3.90625 | 4 | [] | no_license | #!/bin/bash
export LC_NUMERIC="en_US.UTF-8"
convert_bytes() {
if (( $(echo "$1 < 1024" | bc) )); then
printf "%.2f B/s" $1
elif (( $(echo "$1 < 1048576" | bc) )); then
echo "$(echo "scale=2; $1/1024" | bc) KB/s"
else
echo "$(echo "scale=2; $1/1048576" | bc) MB/s"
fi
}
output_data(){
printf "${ORANGE}Current network speed${NC}: ${ARROWDOWN} %s ${ARROWUP} %s\n" "$1" "$2"
printf "${ORANGE}Average network speed${NC}: ${ARROWDOWN} %s ${ARROWUP} %s\n" "$3" "$4"
printf "${ORANGE}Uptime: \n"
printf "${LGREEN}\tDays: ${NC}%d \n" $5
printf "${LGREEN}\tHours: ${NC}%d \n" $6
printf "${LGREEN}\tMinutes: ${NC}%d \n" $7
printf "${LGREEN}\tSeconds: ${NC}%d \n\n" $8
printf "${ORANGE}System load: ${NC}%s\n\n" $9
}
prepare_average_netSpeed() {
avgDownload=0
avgUpload=0
for i in {25..1}; do
arrayDownload[$i]=${arrayDownload[$i-1]}
arrayUpload[$i]=${arrayUpload[$i-1]}
avgDownload=`echo "($avgDownload+${arrayDownload[i]})" | bc`
avgUpload=`echo "($avgUpload+${arrayUpload[i]})" | bc`
done
arrayDownload[0]=$1
arrayUpload[0]=$2
avgDownload=`echo "($avgDownload+${arrayDownload[0]})" | bc`
avgUpload=`echo "($avgUpload+${arrayUpload[0]})" | bc`
avgDowload=`echo "$avgDownload/10" | bc`
avgUpload=`echo "$avgUpload/10" | bc`
avgDown=$(convert_bytes $avgDowload)
avgUp=$(convert_bytes $avgUpload)
}
draw_chart() {
#wybieranie download/upload
declare arr
declare DRAWCOLOR
if [ "$1" -eq "0" ]; then
printf "${ORANGE}Download speed: \n\n${NC}"
DRAWCOLOR=$LGREEN
arr=("${arrayDownload[@]}")
elif [ "$1" -eq "1" ]; then
printf "${ORANGE}Upload speed: \n\n${NC}"
DRAWCOLOR=$MAGENTA
arr=("${arrayUpload[@]}")
fi
max=0.0
for n in "${arr[@]}"; do
(( $(echo "$n > $max" | bc) )) && max=$n
done
scaler=`echo "scale=2; $max/13" | bc`
(( $(echo "$max > 0.0" |bc) )) &&
for i in {0..12}; do
legend=$(convert_bytes $max)
printf "${NC}%s " $legend
addSpaces=$((15 - ${#legend}))
for ((c=1; c<$addSpaces; c++)); do
printf " "
done
printf "${DRAWCOLOR}"
for j in {0..25}; do
if (( $(echo "${arr[$j]} >= $max" | bc) )); then
printf "\u2588\u2588 "
else
printf " "
fi
done
max=`echo "$max - $scaler" | bc`
printf "\n"
done
printf "${NC}"
}
main() {
while true; do
sleep 1
clear
downloadMeas2=`awk '/enp0s31f6:/ {print $2}' /proc/net/dev`
uploadMeas2=`awk '/enp0s31f6:/ {print $10}' /proc/net/dev`
downloadInBytes=$(($downloadMeas2 - $downloadMeas1))
uploadInBytes=$(($uploadMeas2 - $uploadMeas1))
download=$(convert_bytes $downloadInBytes)
upload=$(convert_bytes $uploadInBytes)
downloadMeas1=$downloadMeas2
uploadMeas1=$uploadMeas2
timeBoot=`awk '{print int($1)}' /proc/uptime`
upSec=`echo "$timeBoot%60" | bc`
upMin=`echo "($timeBoot/60)%60" | bc`
upHr=`echo "($timeBoot/3600)%24" | bc`
upDays=`echo "($timeBoot/(3600*24))" | bc`
sysLoad=`awk '{print $4}' /proc/loadavg`
prepare_average_netSpeed $downloadInBytes $uploadInBytes
# 1 2 3 4 5 6 7 8 9
output_data "$download" "$upload" "$avgDown" "$avgUp" $upDays $upHr $upMin $upSec $sysLoad
#0 => download 1 => upload
draw_chart 0
draw_chart 1
done
}
LGREEN='\033[1;32m'
ORANGE='\u001b[0;33m'
MAGENTA='\u001b[0;35m'
NC='\033[0m'
ARROWDOWN='\u2193'
ARROWUP='\u2191'
downloadMeas1=`awk '/enp0s31f6:/ {print $2}' /proc/net/dev`
uploadMeas1=`awk '/enp0s31f6:/ {print $10}' /proc/net/dev`
arrayDownload=(0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0)
arrayUpload=(0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0)
declare avgDown
declare avgUp
main | true |
1cdd64753d74f2b7acb8a2f5dc1c52b6e1ecb6bc | Shell | UtsavChokshiCNU/GenSym-Test2 | /src/bundle/java/com/gensym/bundle/mybuild | UTF-8 | 948 | 3.21875 | 3 | [] | no_license | #!/bin/sh
# Builder for keygui.jar
# Mark David, 5/11/01
# This shell script builds a new keygui.jar file
# in the bundle/java/com/gensym/bundle directory.
# How to build and distribute: run this shell
# script (or execute its parts by hand).
# Then put the keygui.jar in the license
# server folder (replacing the previously
# existing one).
#
# It is expected that the distribution is to
# machine with a previously installed license generator.
# The launcher should be a bat file consisting of
# just this command (or one substantially similar):
#
# java -Dlogs="c:/LicenseServer/logs" -Djava.class.path=c:\LicenseServer\keygui.jar LicenseKey c:\LicenseServer\data
#
# To create a new distribution zip, unzip
# bundle/java/license-server.zip
# to a staging directory, replace the keygui.jar
# file, then rezip the staging directory.
echo Building keygui.jar . . .
javac *.java
jar -cfvm ../../../keygui.jar manifest.stub *.class
echo DONE.
| true |
641dc39977b8d22f856b806c5545f7d995f9817b | Shell | AlexandrinaBraguta/Repository_laborator1 | /docker/php-fpm/php-fpm-status.sh | UTF-8 | 226 | 2.640625 | 3 | [] | no_license | #!/bin/bash
export FPMPORT=${1:-9000}
export SCRIPT_NAME=/status
export SCRIPT_FILENAME=/status
export QUERY_STRING='json&full'
export REQUEST_METHOD=GET
(cgi-fcgi -bind -connect 127.0.0.1:${FPMPORT} | awk 'NR>5') || exit 1
| true |
ae87512e239a23bb04239cbd8dd8df63ca9bebb5 | Shell | nayanavenkataramana/ioc-cbc-tools | /cbc_thermal/cbc_thermald_start | UTF-8 | 344 | 2.515625 | 3 | [
"BSD-3-Clause"
] | permissive | #!/bin/bash -e
if [ ! -e /etc/ioc-cbc-tools/thermal-conf.xml ]; then
mkdir -p /etc/ioc-cbc-tools/
cp /usr/share/ioc-cbc-tools/thermal-conf.xml /etc/ioc-cbc-tools/thermal-conf.xml
fi
/usr/bin/cbc_thermal &
sleep 10
/usr/bin/thermald --no-daemon --ignore-cpuid-check --ignore-default-control --config-file /etc/ioc-cbc-tools/thermal-conf.xml
| true |
c6c9bc1e03498621f638eca1213e7eda6f950c36 | Shell | lululeta2014/mihaib | /lethe/linux-setup/common-gui/chromium.sh | UTF-8 | 1,047 | 3.578125 | 4 | [] | no_license | #!/bin/bash
set -u # exit if using uninitialised variable
set -e # exit if some command in this script fails
trap "echo $0 failed because a command in the script failed" ERR
SCRIPT=`readlink -f "$0"`
DIR=`dirname "$SCRIPT"`
source "$DIR/../sourceme.bash"
if [ "$MB_LSB_ID" == "Debian" ]; then
CHROMIUM_BIN='chromium'
else
CHROMIUM_BIN='chromium-browser'
fi
FORCE_FLAG=''
if [ $# == 1 ]; then
# can't use [$#==1 -a $1=='--force'] because $1 is unbound if $# is 0
if [ $1 == '--force' ]; then
FORCE_FLAG='--force'
fi
fi
rm -rf ~/.config/chromium ~/.cache/chromium
"$DIR"/chromium.py $FORCE_FLAG ~/.config/chromium \
--window-width "$MB_BROWSER_WIDTH" \
--window-height "$MB_BROWSER_HEIGHT" \
--chromium-bin "$CHROMIUM_BIN"
rm -rf ~/.config/chromium-"$MB_BROWSER_ALT_PROFILE" \
~/.cache/chromium-"$MB_BROWSER_ALT_PROFILE"
"$DIR"/chromium.py $FORCE_FLAG ~/.config/chromium-"$MB_BROWSER_ALT_PROFILE" \
--enable-translate \
--window-width "$MB_BROWSER_WIDTH" \
--window-height "$MB_BROWSER_HEIGHT" \
--chromium-bin "$CHROMIUM_BIN"
| true |
f1fe77dc3149fa93f98766a19c4f70474fbb30e3 | Shell | kaos-addict/Scripts | /bin/SystemInfo | UTF-8 | 1,133 | 3.90625 | 4 | [] | no_license | #!/bin/bash
### Imports ###################################################################
source ScriptFunctions
Import OptionParser
### Options ###################################################################
scriptDescription="Display some basic system information. Useful for /etc/issue."
Parse_Options "$@"
### Operation #################################################################
function count_cpus() {
grep -i "model name" /proc/cpuinfo | wc -l
}
function parse_cpu() {
grep -i "$1" /proc/cpuinfo | head -n $2 | tail -n 1 | cut -d: -f2 | tr "\n" "\0"
}
function parse_mem() {
grep -i "$1" /proc/meminfo | cut -d: -f2 | tr " kB\n" "\0"
}
IN="\033[36;22m"
OUT="\033[34;1m"
echo -e "${IN}System........:${OUT}" `uname -mrs`
echo -e "${IN}Compilation...:${OUT}" `uname -v`
for i in $(seq $(count_cpus))
do
echo -e "${IN}Processor.....:${OUT}" ` parse_cpu "model name" $i`
echo -e "${IN} Clock.....:${OUT}" `parse_cpu "cpu MHz" $i` "MHz"
echo -e "${IN} Bogomips..:${OUT}" `parse_cpu "bogomips" $i`
done
echo -e "${IN}Memory........:${OUT}" `parse_mem "MemTotal"`"k"
echo -en "\033[0m"
| true |
8766494d4b4acddba0452efc948ba50de3e57649 | Shell | Someguy123/mssql-docker | /main/extra/entry.sh | UTF-8 | 1,507 | 3.296875 | 3 | [
"X11",
"MIT"
] | permissive | #!/usr/bin/env bash
#########
#
# Docker Entry Point Script for someguy123/mssql
# github.com/Someguy123/mssql-docker
#
# License: MIT / X11
#
#########
. /opt/sg-mssql/colors.sh
echo 'PS1="\[\033[35m\]\t \[\033[32m\]\h\[\033[m\]:\[\033[33;1m\]\w\[\033[m\] # "' >> /root/.bashrc
chsh -s /bin/bash root
msg
msg green "#####################################################################################################################"
msg green "#"
msg green "# Welcome to Someguy123's MSSQL Docker Image ( someguy123/mssql ) "
msg green "#"
msg green "# Microsoft SQL Server tools (MSSQL) should be installed into /opt/mssql-tools/bin"
msg green "#"
msg green "# Copy your DSN files into /opt/sg-mssql/dsns using the placeholder "
msg green "# 'MS_DRIVER' within the 'Driver =' setting."
msg green "#"
msg green "# Example DSN available at: ${BOLD}/opt/sg-mssql/example_dsn.ini"
msg green "#"
msg green "# Run ${BOLD}install_dsns${RESET}${GREEN} to install all DSN files inside of ${BOLD}/opt/sg-mssql/dsns"
msg green "# with automatic placeholder replacement of 'MS_DRIVER' with the name of the current ODBC driver."
msg green "#"
msg green "#####################################################################################################################\n"
msg yellow "\nContents of '/opt/mssql-tools/bin':\n"
ls --color=always -l /opt/mssql-tools/bin
export PATH="/opt/mssql-tools/bin:/opt/sg-mssql/bin:${PATH}"
msg magenta "\n##################################################\n"
bash
| true |
4264a0c60bba6b92461f189555dc15f821648691 | Shell | diogenesrj/s373 | /.git-clone-dir/majestic/etc/netctl/hooks/10-disconnect.sh | UTF-8 | 780 | 3.328125 | 3 | [] | no_license | #!/bin/sh
#
# $interface (ex: wlan0)
# $profile (ex: wlan0-essid)
# $action (ex: CONNECT, see wpa_actiond --help)
# $ssid
if [ "$profile" = "eth0" ]
then
case "$action" in
# CONNECT, LOST, REESTABLISHED, FAILED, DISCONNECT
"LOST"|"FAILED"|"DISCONNECT")
rmmod r8169
modprobe r8169
if [ "$DISPLAY" ]; then
notify-send "NetCtl" "Modulo de rede wi-fi recarregado..."
fi
;;
"CONNECT")
if [ "$DISPLAY" ]; then
notify-send "NetCtl" "Conectado!"
fi
;;
"REESTABLISHED")
if [ "$DISPLAY" ]; then
notify-send "NetCtl" "Reconectado!"
fi
;;
#*)
# unset http_proxy
# ;;
esac
fi
| true |
ed50f845738d3e3723e0b59470ec404cb24a14ca | Shell | cha63506/configs-n900 | /etc/osso-backup/pre-backup.d/mission-control.sh | UTF-8 | 324 | 2.984375 | 3 | [] | no_license | #!/bin/sh
echo "$*" | grep -q "comm_and_cal" || exit 0
# Copy to a temporary location so that restore script can
# manually move them to the original one and restart
# the applications using it.
ACCOUNTSDIR=/home/user/.rtcom-accounts
BACKUPDIR=/tmp/.rtcom-accounts
rm -rf "$BACKUPDIR"
cp -a "$ACCOUNTSDIR" "$BACKUPDIR"
| true |
a18edc04fe0925dbf335dfff23b168496af2567d | Shell | rogue0137/comp-env-setup | /aws_commands.sh | UTF-8 | 532 | 2.703125 | 3 | [] | no_license |
# SSM
# get parameters
aws ssm get-parameters-by-path --path ${PATH} --with-decryption
# set new parameter
aws ssm put-parameter --name ${PATH}/${NAME} --value ${NEW_NAME} --type SecureString
# change value
aws ssm put-parameter --name ${PATH}/${NAME} --value ${NEW_NAME} --type SecureString --overwrite
# when setting a link
aws ssm put-parameter --cli-input-json '{
"Name": ${PATH}/${NAME}
"Value": "${URL}",
"TYPE": "SecureString"
}'
# see logs
awslogs get ${PATH} -s ${TIME}
# example
awslogs get ${PATH} -s '10 minute' | true |
c35e648a601acff7850247eac2a2908911fd7af6 | Shell | petronny/aur3-mirror | /ifshow-git/PKGBUILD | UTF-8 | 803 | 2.96875 | 3 | [] | no_license | # Maintainer: Miguel Paolino <mpaolino at gmail com>
pkgname=ifshow-git
pkgver=20110712
pkgrel=1
pkgdesc="A next-generation tool to display the status of network interfaces"
arch=('i686' 'x86_64')
url="http://github.com/awgn/ifshow"
license=('GPL')
groups=
provides=
depends=('gcc-libs' 'pciutils')
optdepends=('net-tools' 'ethtool')
makedepends=('git' 'cmake' 'gcc')
conflicts=()
replaces=()
backup=()
install=
source=()
md5sums=()
_gitroot="git://github.com/awgn/ifshow.git"
_gitname="ifshow"
build() {
cd "$srcdir"
if [[ -d "$_gitname" ]] ; then
git pull origin || return 1
else
git clone "$_gitroot" "$_gitname" || return 1
fi
cd $_gitname
cmake ./
make
mkdir -p "$pkgdir/usr/sbin"
install -o root -g root -m 755 ifshow "$pkgdir/usr/sbin/ifshow"
}
| true |
1d91dc5fe2a58ed6dd214fa4c8b2bca6274be80e | Shell | faustovrz/bugcount | /inst/scripts/analysis.sh | UTF-8 | 449 | 2.9375 | 3 | [] | no_license | #!/bin/bash
# USAGE:
# analysis.sh wf_consolidated.tab
DATA=$1
#DATA='wf_consolidated.tab'
GLM='./analysis.R'
GLMPREFIX='resistance_analysis'
MM='./blup_heritability.R'
MMPREFIX='blup_analysis'
### Analysis by plant or leaf
for UNIT in plant leaf
do
nohup bash -c "${GLM} ${UNIT} ${GLMPREFIX} ${DATA}" \
> ${UNIT}_${GLMPREFIX}.log 2>&1 &
nohup bash -c "${MM} ${UNIT} ${MMPREFIX} ${DATA}" \
> ${UNIT}_${MMPREFIX}.log 2>&1 &
done
| true |
5d21e43e3f0858bfdf8096666751438de6444a38 | Shell | pulp-platform/hero-support | /linux/zynqlinux/custom_files/mount_nfs.sh | UTF-8 | 196 | 2.796875 | 3 | [] | no_license | #!/bin/sh
# Mount NFS share on bordcomputer if not mounted yet.
if ! grep -qs '/mnt/nfs ' /proc/mounts; then
mount -o nolock -t nfs 129.132.24.199:/home/vogelpi/pulp_on_fpga/share /mnt/nfs
fi
| true |
4b3793f04e5ffa6e42206e9cdddbd426f408bc91 | Shell | tvartom/ansible-collection-serveradmin | /tools/bootstrap.sh | UTF-8 | 5,770 | 3.796875 | 4 | [
"MIT"
] | permissive | #!/bin/bash
# Run this script with:
# $ curl https://raw.githubusercontent.com/tvartom/ansible-collection-serveradmin/master/tools/bootstrap.sh -o bootstrap.sh && sudo bash bootstrap.sh
#
# You need a serveradmin-repository with settings for this server.
# E.g.
function pause(){
read -p "$*"
}
echo "############################################"
echo "### BOOTSTRAP for Serveradmin by Tvartom ###"
echo "############################################"
echo ""
echo "Bootstrap this server with Ansible and the Serveradmin-repository."
echo ""
if [ "$EUID" -ne 0 ]
then echo "Please run as root"
exit
fi
echo "### Serveradmin-repository on Github ###"
SA_REPO_HOST="github.com"
read -p "Username or team on $SA_REPO_HOST for owner of serveradmin-repository: " SA_REPO_USER
read -p "Name of ${SA_REPO_USER}'s serveradmin-repository: " SA_REPO_NAME
SA_REPO="git@$SA_REPO_HOST:$SA_REPO_USER/$SA_REPO_NAME.git"
echo $SA_REPO;
echo ""
echo "### Serveradmin branch ###"
read -p "Repository branch [default]: " SA_BRANCH
SA_BRANCH="${SA_BRANCH:+ -b $SA_BRANCH}"
echo ""
echo "### Serveradmin settings ###"
SA_INVENTORY_NAME_DEFAULT="$(hostname)"
read -p "Name of this server in inventory-file [$SA_INVENTORY_NAME_DEFAULT]: " SA_INVENTORY_NAME
SA_INVENTORY_NAME="${SA_INVENTORY_NAME:-$SA_INVENTORY_NAME_DEFAULT}"
SA_USER_DEFAULT="serveradmin"
# read -p "Name of servadmin-user [$SA_USER_DEFAULT]: " SA_USER
# SA_USER="${SA_USER:-$SA_USER_DEFAULT}"
SA_USER="$SA_USER_DEFAULT"
SA_PATH_DEFAULT="/opt/$SA_USER"
# read -p "Path to serveradmin [$SA_PATH_DEFAULT]: " SA_PATH
# SA_PATH="${SA_PATH:-$SA_PATH_DEFAULT}"
SA_PATH="$SA_PATH_DEFAULT"
echo ""
echo "Update system..."
# Start by updating system
# For AWS, gdisk is missing: https://www.spinics.net/lists/centos-devel/msg18766.html
dnf -y install gdisk
dnf -y update
SYSTEM_USER_HOME="/home/system"
echo -n "Make sure '$SYSTEM_USER_HOME' exists... "
mkdir -p "$SYSTEM_USER_HOME"
chmod u=rwx,g=rx,o=rx "$SYSTEM_USER_HOME"
echo -e "Done\n"
SA_USER_HOME="$SYSTEM_USER_HOME/$SA_USER"
if ! id -u $SA_USER > /dev/null 2>&1; then
echo -n "Creating user '$SA_USER'... "
useradd --system --create-home --home "$SA_USER_HOME" "$SA_USER"
echo -e "Done\n"
else
echo -e "User '$SA_USER' already exists.\n"
fi
usermod -aG adm "$SA_USER"
usermod -aG wheel "$SA_USER"
if grep -q "^$SA_USER" "/etc/sudoers"; then
echo -e "User '$SA_USER' is already sudoer.\n"
else
echo -n "Make '$SA_USER' sudoer without password.... "
echo "$SA_USER ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers
echo -e "Done\n"
fi
mkdir -p "$SA_USER_HOME/.ssh"
chown $SA_USER:$SA_USER "$SA_USER_HOME/.ssh"
chmod u=rwx,g=,o= "$SA_USER_HOME/.ssh"
SA_DEPLOY_KEY="$SA_USER_HOME/.ssh/deploy_${SA_REPO_HOST}_${SA_REPO_USER}_${SA_REPO_NAME}"
SA_DEPLOY_KEY_COMMENT="${SA_USER}@${SA_INVENTORY_NAME} $(date +"%Y-%m-%d")"
if [ ! -f "$SA_DEPLOY_KEY" ]; then
echo -n "Generating deploy-key... "
sudo -u $SA_USER \
ssh-keygen -b 4096 \
-t rsa \
-q \
-N "" \
-f "$SA_DEPLOY_KEY" \
-C "$SA_DEPLOY_KEY_COMMENT"
echo -e "Done\n"
else
echo -e "Deploy key already exists with fingerprint: $(ssh-keygen -l -E md5 -f "$SA_DEPLOY_KEY" | grep -Po "(?<=MD5:).{47}").\n"
# "
SA_DEPLOY_KEY_COMMENT="$(sed -e "s/ssh-rsa \S* \?//" "${SA_DEPLOY_KEY}.pub" 2>/dev/null)"
# "
fi
echo "Add key as a read-only deploy-key on Github:"
echo "1. Log in as, or as administrator for '$SA_REPO_NAME' on $SA_REPO_HOST."
echo "2. Goto https://$SA_REPO_HOST/$SA_REPO_USER/$SA_REPO_NAME/settings/keys"
echo "3. Press 'Add deploy key'"
echo "4. Fill in:"
echo " Title:"
echo "$SA_DEPLOY_KEY_COMMENT"
echo " Key:"
ssh-keygen -y -f "$SA_DEPLOY_KEY"
echo " Allow write access: No"
echo ""
echo "5. Press 'Add key'"
echo ""
pause 'Press [Enter] when done to continue...'
echo -n "Install Git..."
# if [ "$(cat /etc/centos-release | tr -dc '0-9.'|cut -d \. -f1)" = "7" ]; then
# yum -y install https://centos7.iuscommunity.org/ius-release.rpm
# yum -y install git2u-all
# else
dnf -y install git
# fi
echo -e "Done\n"
echo "Installing Python3 with pip..."
dnf -y install python3 python3-pip python3-libselinux
echo "Installing Ansible with pip to get latest version..."
sudo -u $SA_USER pip3 install --user ansible
echo -n "Creating /repos for serveradmin... "
mkdir -p "$SA_PATH/repos"
chown -R $SA_USER:$SA_USER "$SA_PATH"
chmod -R u=rwx,g=rwx,o=rx "$SA_PATH"
echo -e "Done\n"
SA_PATH_REPO="$SA_PATH/repos/serveradmin"
cd "$SA_PATH"
if [ -d "$SA_PATH_REPO/.git" ]; then
echo "Remove serveradmin-repo to make a clean download."
rm -r "$SA_PATH_REPO"
fi
echo "Cloning serveradmin-repo..."
# git 2.10+ stödjer core.sshCommand
sudo -u "$SA_USER" git -c core.sshCommand="ssh -i $SA_DEPLOY_KEY" clone --recursive$SA_BRANCH $SA_REPO "$SA_PATH_REPO"
#else
# sudo -i -u "$SA_USER" -- bash -c "cd $SA_PATH_REPO && git -c core.sshCommand='ssh -i $SA_DEPLOY_KEY' pull --recurse-submodules"
# sudo -i -u "$SA_USER" -- bash -c "cd $SA_PATH_REPO && git -c core.sshCommand='ssh -i $SA_DEPLOY_KEY' submodule update --force --recursive"
#fi
PLAYBOOK_TO_RUN="temp_playbook-bootstrap.yml"
echo -e \
"---\n"\
"- import_playbook: collections/ansible_collections/tvartom/serveradmin/playbooks/playbook-serveradmin.yml\n"\
" when: \"'bootstrap' in ansible_run_tags\"" \
| sudo -u $SA_USER tee "$SA_PATH_REPO/$PLAYBOOK_TO_RUN" > /dev/null
sudo -i -u "$SA_USER" -- bash -c "cd $SA_PATH_REPO && ansible-playbook --extra-vars \"target='$SA_INVENTORY_NAME' connection_type='local'\" --tags bootstrap '$PLAYBOOK_TO_RUN'"
echo -e "Done\n"
echo "### Bootstrap is done ###"
echo "1. REBOOT! Run 'sudo reboot'. (Kernel is probably updated)"
echo "2. Relogin as root or a sudo-user specified in serveradmin."
echo "3. Run '<prefix>_ansible_serveradmin' to setup server."
| true |
41f434bccca7e5cf042516a512301ac41262fdbc | Shell | IPFR33LY/docker-stack-this | /ARCHIVE/traefik_stack4/runup.sh | UTF-8 | 1,542 | 3.359375 | 3 | [
"GPL-1.0-or-later",
"GPL-3.0-or-later"
] | permissive | #!/usr/bin/env bash
set -o errexit
trap 'echo "Aborting due to errexit on line $LINENO. Exit code: $?" >&2' ERR
set -o errtrace
set -o nounset
###############################################################################
# Functions
###############################################################################
# setup network
NTW_FRONT=webgateway
if [ ! "$(docker network ls --filter name=$NTW_FRONT -q)" ]; then
docker network create --driver overlay --attachable --opt encrypted "$NTW_FRONT"
echo "Network: $NTW_FRONT was created."
else
echo "Network: $NTW_FRONT already exist."
fi
# setup network
NTW_FRONT=traefik
if [ ! "$(docker network ls --filter name=$NTW_FRONT -q)" ]; then
docker network create --driver overlay --attachable --opt encrypted "$NTW_FRONT"
echo "Network: $NTW_FRONT was created."
else
echo "Network: $NTW_FRONT already exist."
fi
# launch the stack
echo; echo;
docker stack deploy --compose-file docker-compose.yml proxy
# List
echo; echo;
echo "docker stack ls ..."
docker stack ls;
echo; echo ; sleep 2
# Follow deployment in real time
#watch docker service ls
MIN=1
MAX=8
echo; echo;
for ACTION in $(seq $MIN $MAX); do
echo
echo "docker service ls | Check $ACTION" of $MAX; echo;
docker service ls && echo && sleep 2;
done
echo; echo ; sleep 2
# See Traefik logs
echo; echo;
echo "To see Traefik logs type: ";
echo " docker service logs -f traefik_traefik"; echo;
# by Pascal Andy | # https://twitter.com/askpascalandy
# https://github.com/pascalandy/docker-stack-this
# | true |
e27f6330c80893d143a3480b17e8282b9a1cd518 | Shell | Shahraaz/S5_Ref | /OS/Temp/ShellSCripting/while.sh | UTF-8 | 114 | 3.171875 | 3 | [
"MIT"
] | permissive | #!/bin/bash
i=1
read -p "enter the number" n
while [ $i -le $n ]
do
echo "Welcome $i times."
i=$((i+1))
done
| true |
f0f54f93cd11431fa2b6bf3124dce3eb8bc7a3e7 | Shell | popiel/Lacuna-Automation | /count_where.sh | UTF-8 | 313 | 3.296875 | 3 | [] | no_license | #!/bin/sh
when=$1
[ -z "$when" ] && when=`date +'%Y-%m-%d'`
what=$2
# ls log/$when/*$what | xargs -n 1 get_json |
(for f in `ls log/$when/*$what`; do
get_json.pl stack $f || echo $f >&2;
done) \
| sed 's/.*at Client.pm line [0123456789]*...t//' | perl -pe '1 while s/\([^()]*\)//;' | sort | uniq -c | sort -n
| true |
b4c772ac88d00840adc445c6671d4c02e52f401e | Shell | OskarPersson/dotfiles | /profile | UTF-8 | 860 | 2.546875 | 3 | [] | no_license | export PS1='\W \u$ '
alias mysql="/Applications/MAMP/Library/bin/mysql --auto-rehash --host=localhost -uroot -proot"
alias love="/Applications/love.app/Contents/MacOS/love"
# Set CLICOLOR if you want Ansi Colors in iTerm2
export CLICOLOR=1
# Set colors to match iTerm2 Terminal Colors
export TERM=xterm-256color
export LSCOLORS=gxBxhxDxfxhxhxhxhxcxcx
# Correct Vim colors
source "$HOME/.vim/bundle/gruvbox/gruvbox_256palette.sh"
# Setting PATH for Python 3.4
# The orginal version is saved in .profile.pysave
PATH="/Library/Frameworks/Python.framework/Versions/3.4/bin:${PATH}"
export PATH
export GOPATH=$HOME/Git/go/
export GOBIN=$GOPATH/bin
export PATH=$PATH:$GOBIN
export MAMP_PHP=/Applications/MAMP/bin/php/php5.6.10/bin
export PATH="$MAMP_PHP:$PATH"
test -e ${HOME}/.iterm2_shell_integration.bash && source ${HOME}/.iterm2_shell_integration.bash
| true |
8fe717e3b5859490a4400f69a64fc9e64c2d9fb8 | Shell | burakbayramli/books | /Python_Scripting_for_Computational_Science_Third_Edition/py/intro/datatrans-eff.sh | UTF-8 | 1,812 | 3.34375 | 3 | [] | no_license | #!/bin/sh -x
# testing the efficiency of reading 100000 (x,y) data points
# from file and writing (x,f(y)) data back to file again
# generate input data file:
python $scripting/src/efficiency/datatrans/xygenerator.py 0:10,0.0001 'x*x' > datatrans.tmp
# run Python scripts:
echo "datatrans1.py: plain Python"
time $scripting/src/py/intro/datatrans1.py datatrans.tmp tmp.1
echo "datatrans2.py: Python using plain lists"
time $scripting/src/py/intro/datatrans2.py datatrans.tmp tmp.1
echo "datatrans3a.py: Python w/NumPy arrays and filetable"
time $scripting/src/py/intro/datatrans3a.py datatrans.tmp tmp.1
echo "datatrans3b.py: Python w/NumPy arrays and TableIO"
time $scripting/src/py/intro/datatrans3b.py datatrans.tmp tmp.1
echo "datatrans3c.py: Python w/NumPy arrays and split of file.read()"
time $scripting/src/py/intro/datatrans3c.py datatrans.tmp tmp.1
echo "datatrans3d.py: Python w/NumPy arrays and Scientific.IO.ArrayIO"
time $scripting/src/py/intro/datatrans3d.py datatrans.tmp tmp.1
echo "datatrans1.pl: plain Perl"
time $scripting/src/perl/datatrans1.pl datatrans.tmp tmp.1
echo "datatrans1.tcl: plain Tcl"
time $scripting/src/tcl/datatrans1.tcl datatrans.tmp tmp.1
echo "compiling C and C++ codes in $scripting/src/misc/datatrans"
thisdir=`pwd`
cd $scripting/src/efficiency/datatrans/C
./make.sh
cd ../C++
./make.sh
cd $thisdir
echo
echo "datatrans1.app: plain C"
time $scripting/src/efficiency/datatrans/C/datatrans1.app datatrans.tmp tmp.1
echo "datatrans1.app: plain C++"
time $scripting/src/efficiency/datatrans/C++/datatrans1.app datatrans.tmp tmp.1
time $scripting/src/efficiency/datatrans/C++/datatrans1_eff.app datatrans.tmp tmp.1
# clean up:
#rm -f datatrans.tmp tmp.1 \
rm -f $scripting/src/efficiency/datatrans/C/*.app \
$scripting/src/efficiency/datatrans/C++/*.app
| true |
905550f5e1263686a3e641dd29ad082b9888f3d8 | Shell | smaeul/impose | /impose.sh | UTF-8 | 7,696 | 3.765625 | 4 | [
"0BSD"
] | permissive | #!/bin/sh -efu
#
# Copyright © 2019 Samuel Holland <samuel@sholland.org>
# SPDX-License-Identifier: 0BSD
#
# Main impose script
#
# Dependencies:
# - POSIX sh + local
# - readlink(1)
#
# Optional dependencies:
# - hostname(1)
# - rsync(1)
# - ssh(1)
# - sudo(8)
#
# Module selected on the command line
CMDLINE_MODULES=
# Display messages in color when this is a positive integer
COLOR=1
# Software identifier
IMPOSE=${0##*/}
# Library script path
LIB=$PWD/lib.sh
# Do not perform any action when this is a positive integer
NO_ACTION=0
# The root of the destination directory hierarchy
ROOT=
# Print verbose messages when this is a positive integer
VERBOSE=0
# Software version
VERSION=0.1
. "$LIB"
# version
version() {
printf '%s version %s\n' "$IMPOSE" "$VERSION" >&2
}
# usage
usage() {
version
printf 'usage: %s [-CVchnv] [-R ROOT] [-m MODULE] [HOST...]\n' "$0" >&2
}
# config_parse FILE
config_parse() {
xargs < "$1"
}
# host_get_config HOST...
host_get_config() {
local file
local name
for name; do
test -n "$name" || continue
for file in "hosts/${name}" "hosts/${name%%.*}"; do
debug "host_get_config: trying '${file}'"
test -f "$file" || continue
echo "$file"
return 0
done
done
debug "host_get_config: trying 'hosts/default'"
test -f "hosts/default" && echo "hosts/default"
}
# host_get_self
host_get_self() {
hostname -f 2>/dev/null || hostname 2>/dev/null || :
}
# host_impose_modules HOST MODULE...
host_impose_modules() {
local cmd
local host
host=$1
shift
notice "Running on ${HOST}"
debug "${HOST}: Synchronizing source files"
rsync -a --delete "${PWD}/" "${host}:/tmp/impose/"
cmd="./impose.sh $(impose_cmdline "$@")"
debug "${HOST}: Running remote command '${cmd}'"
ssh -ttq "$host" "cd /tmp/impose && ${cmd}"
}
# impose_cmdline MODULE...
impose_cmdline() {
local args
local iter
if test "$COLOR" -gt 0; then
args=${args:+$args }-c
else
args=${args:+$args }-C
fi
iter=$NO_ACTION
while test "$iter" -gt 0; do
args=${args:+$args }-n
iter=$((iter-1))
done
iter=$VERBOSE
while test "$iter" -gt 0; do
args=${args:+$args }-v
iter=$((iter-1))
done
if test -n "$ROOT"; then
args=${args:+$args }-R${ROOT}
fi
for MODULE; do
args=${args:+$args }-m${MODULE}
done
echo "$args"
}
# impose_modules MODULE...
impose_modules() {
if test "$NO_ACTION" -le 0 && test "$(id -u)" -ne 0; then
if test -n "$(command -v sudo)"; then
notice "Authenticating via sudo"
if sudo -u root "$0" $(impose_cmdline "$@"); then
return
fi
fi
die "Must be running as root to modify the local machine. Try '-n'"
fi
for MODULE; do
(
MODSRC=${PWD}/modules/${MODULE}
export COLOR IMPOSE LIB MODSRC MODULE NO_ACTION ROOT VERBOSE
umask 0577
test -d "$MODSRC" || die "Module '${MODULE}' does not exist"
notice "Imposing module '${MODULE}'"
if test -x "${MODSRC}/pre"; then
debug "${MODULE}: Running pre-apply script"
(umask 0022; "${MODSRC}/pre")
fi
if test -f "${MODSRC}/directories"; then
while read path perms user group; do
test -n "$path" || continue
test -z "$perms" && perms=0755
test -z "$user" && user=root
test -z "$group" && group=$user
debug "${MODULE}: Updating directory '${path}'"
dest=$ROOT$path
if ! test -d "$dest"; then
test -e "$dest" && noact rm -f "$dest"
noact mkdir "$dest"
fi
noact chown "${user}:${group}" "$dest"
noact chmod "$perms" "$dest"
done < "${MODSRC}/directories"
fi
if test -f "${MODSRC}/files"; then
while read path perms user group; do
test -n "$path" || continue
test -z "$perms" && perms=0644
test -z "$user" && user=root
test -z "$group" && group=$user
debug "${MODULE}: Updating file '${path}'"
src=$MODSRC$path
dest=$ROOT$path
tmp=${dest%/*}/..impose.$$.${dest##*/}
if test -h "$src" || test -h "$dest"; then
# Don't copy if both are symlinks with the same destination
if test "$(readlink "$src")" = "$(readlink "$dest")"; then
tmp=$dest
fi
else
# Don't copy if both are regular files with the same contents
if test -f "$dest" && cmp -s "$src" "$dest"; then
tmp=$dest
fi
fi
test "$tmp" != "$dest" && noact cp -P "$src" "$tmp"
noact chown -h "${user}:${group}" "$tmp"
# Cannot chmod symlinks on Linux
test -h "$src" || noact chmod "$perms" "$tmp"
if test "$tmp" != "$dest"; then
# Remove destination before mv if it is a dir or dir symlink
if test -d "$dest"; then
if test -h "$dest"; then
noact rm "$dest"
else
noact rmdir "$dest"
fi
fi
noact mv "$tmp" "$dest"
fi
done < "${MODSRC}/files"
fi
if test -x "${MODSRC}/post"; then
debug "${MODULE}: Running post-apply script"
(umask 0022; "${MODSRC}/post")
fi
) &
wait $! || {
warn "Failed to impose module '${MODULE}'"
break
}
done
}
# main [ARG...]
main() {
test -t 2 || COLOR=0
while getopts :CR:Vchm:nv OPTION; do
case "$OPTION" in
C) COLOR=0 ;;
R) ROOT=$OPTARG ;;
V) version; return 0 ;;
c) COLOR=1 ;;
h) usage; return 0 ;;
m) CMDLINE_MODULES=${CMDLINE_MODULES:+$CMDLINE_MODULES }${OPTARG} ;;
n) NO_ACTION=$((NO_ACTION+1)) ;;
v) VERBOSE=$((VERBOSE+1)) ;;
:) usage; die "Missing argument to -${OPTARG}" ;;
?) usage; die "Bad option: -${OPTARG}" ;;
esac
done
shift $((OPTIND-1))
if test -n "$ROOT" && test "$ROOT" = "${ROOT#/}"; then
die "The argument to -R must be an absolute path"
fi
if test "$#" -gt 0; then
for HOST; do
if test -n "$CMDLINE_MODULES"; then
MODULES=$CMDLINE_MODULES
else
if ! let CONFIG := host_get_config "$HOST"; then
warn "Skipping ${HOST}: No configuration found"
continue
fi
if ! let MODULES := config_parse "$CONFIG"; then
warn "Skipping ${HOST}: Bad configuration format"
continue
fi
if test -z "$MODULES"; then
warn "Skipping ${HOST}: Nothing to do"
continue
fi
fi
if test "${HOST%%.*}" = "localhost"; then
impose_modules $MODULES
else
host_impose_modules "$HOST" $MODULES
fi
done
else
if test -n "$CMDLINE_MODULES"; then
MODULES=$CMDLINE_MODULES
else
if ! let CONFIG := host_get_config "$(host_get_self)"; then
die "No configuration found for the local machine"
fi
if ! let MODULES := config_parse "$CONFIG"; then
die "Bad configuration format in '${CONFIG}'"
fi
fi
impose_modules $MODULES
fi
}
main "$@"
| true |
f9308c988c67603b2e086ef026b3fd6b52e90963 | Shell | monotonemonk/arch_svntogit_community- | /redis/repos/community-x86_64/PKGBUILD | UTF-8 | 1,654 | 2.53125 | 3 | [] | no_license | # $Id$
# Maintainer: Sergej Pupykin <pupykin.s+arch@gmail.com>
# Maintainer: Bartłomiej Piotrowski <bpiotrowski@archlinux.org>
# Contributor: Jan-Erik Rediger <badboy at archlinux dot us>
# Contributor: nofxx <x@<nick>.com>
pkgname=redis
pkgver=3.2.7
pkgrel=1
pkgdesc='Advanced key-value store'
arch=('i686' 'x86_64')
url='http://redis.io/'
license=('BSD')
depends=('jemalloc' 'grep' 'shadow')
backup=('etc/redis.conf'
'etc/logrotate.d/redis')
install=redis.install
source=(http://download.redis.io/releases/redis-$pkgver.tar.gz
redis.service
redis.logrotate
redis.conf-sane-defaults.patch
redis-2.8.11-use-system-jemalloc.patch)
sha256sums=('bf9df3e5374bfe7bfc3386380f9df13d94990011504ef07632b3609bb2836fa9'
'cceff2a097d9041a0c73caeb5c33e849af783c6a12db866f24b8417ac3ac9d11'
'8b4c2caabb4f54157ad91ca472423112b1803685ad18ed11b60463d78494df13'
'22cd3b9f7e9b17647a615d009b50603e7978b0af26c3e2c53560e57573b996ed'
'b1d2802d7e459799565fb4660e97e57a23de0aa47689656ece4a00d1053dd919')
prepare() {
cd $pkgname-$pkgver
patch -p1 -i ../redis.conf-sane-defaults.patch
# patch -p1 -i ../redis-2.8.11-use-system-jemalloc.patch
}
build() {
make -C $pkgname-$pkgver
}
package() {
cd $pkgname-$pkgver
make PREFIX="$pkgdir"/usr install
install -Dm644 COPYING "$pkgdir"/usr/share/licenses/redis/LICENSE
install -Dm644 redis.conf "$pkgdir"/etc/redis.conf
install -Dm644 ../redis.service "$pkgdir"/usr/lib/systemd/system/redis.service
# files kept for compatibility with installations made before 2.8.13-2
install -Dm644 ../redis.logrotate "$pkgdir"/etc/logrotate.d/redis
ln -sf redis-server "$pkgdir"/usr/bin/redis-sentinel
}
| true |
34ffa7c1b063d08fb03dac25df61e8aa6b5fa328 | Shell | Phala-Network/phala-blockchain | /standalone/pruntime/scripts/cluster-state-transfer.sh | UTF-8 | 642 | 3.40625 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/sh
set -e
WORKER0=http://localhost:8000
WORKER1=http://localhost:8001
WORKER1_DATA_DIR=data/storage_files/
echo "Starting"
REQUEST=$(curl -s $WORKER1/prpc/PhactoryAPI.GenerateClusterStateRequest)
echo "REQUEST: $REQUEST"
STATE_INFO=$(curl -s -d $REQUEST $WORKER0/prpc/PhactoryAPI.SaveClusterState?json)
echo "STATE_INFO: $STATE_INFO"
FILENAME=$(echo $STATE_INFO | jq -r .filename)
echo "FILENAME: $FILENAME"
URL=$WORKER0/download/$FILENAME
DST=$WORKER1_DATA_DIR/$FILENAME
echo "Downloading $URL to $DST"
curl -s $URL -o $DST
echo "Loading state"
curl -s -d $STATE_INFO $WORKER1/prpc/PhactoryAPI.LoadClusterState?json
echo Done
| true |
06e6494c4bcf3d41c6b0550adb5340a8967121e9 | Shell | jasondebolt/phoenix | /Phoenix/deploy-dev-ec2.sh | UTF-8 | 2,170 | 4 | 4 | [
"MIT"
] | permissive | #!/bin/bash
set -e
# Creates or updates AWS ECS resources required to run one or more ECS tasks/services.
# USAGE:
# ./deploy-dev-ec2.sh [create | update]
#
# EXAMPLES:
# ./deploy-dev-ec2.sh create
# ./deploy-dev-ec2.sh update
# Check for valid arguments
if [ $# -ne 1 ]
then
echo "Incorrect number of arguments supplied. Pass in either 'create' or 'update'."
exit 1
fi
# Convert create/update to uppercase
OP=$(echo $1 | tr '/a-z/' '/A-Z/')
CLOUDFORMATION_ROLE=$(jq -r '.Parameters.IAMRole' template-ssm-globals-macro-params.json)
ORGANIZATION_NAME=$(jq -r '.Parameters.OrganizationName' template-ssm-globals-macro-params.json)
PROJECT_NAME=$(jq -r '.Parameters.ProjectName' template-ssm-globals-macro-params.json)
ENVIRONMENT=`jq -r '.Parameters.Environment' template-ec2-params-dev.json`
STACK_NAME=$PROJECT_NAME-ec2-$ENVIRONMENT
VERSION_ID=$ENVIRONMENT-`date '+%Y-%m-%d-%H%M%S'`
CHANGE_SET_NAME=$VERSION_ID
# Regenerate the dev params file into a format the the CloudFormation CLI expects.
python parameters_generator.py template-ec2-params-dev.json cloudformation > temp1.json
# Make macro name unique in the AWS account:
# https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-cloudformation-macro.html#cfn-cloudformation-macro-name
sed "s/PROJECTNAMELambdaMacro/${PROJECT_NAME}LambdaMacro/g" template-ec2.json > temp0.json
# Validate the CloudFormation template before template execution.
aws cloudformation validate-template --template-body file://temp0.json
aws cloudformation create-change-set --stack-name $STACK_NAME \
--change-set-name $CHANGE_SET_NAME \
--template-body file://temp0.json \
--parameters file://temp1.json \
--change-set-type $OP \
--capabilities CAPABILITY_NAMED_IAM \
--role-arn $CLOUDFORMATION_ROLE
aws cloudformation wait change-set-create-complete \
--change-set-name $CHANGE_SET_NAME --stack-name $STACK_NAME
# Let's automatically execute the change-set for now
aws cloudformation execute-change-set --stack-name $STACK_NAME \
--change-set-name $CHANGE_SET_NAME
aws cloudformation wait stack-$1-complete --stack-name $STACK_NAME
# Cleanup
rm temp1.json
| true |
0b3b0840fcbeac8a8b34b32d430ea46f3bb069d2 | Shell | ybADMIN/prepare-commit-msg | /prepare-commit-msg | UTF-8 | 759 | 3.5625 | 4 | [] | no_license | #!/usr/bin/env bash
# Если это мердж коммит, то ничего подставлять не надо
if [[ $(cat "$1" | grep -c 'Merge') -ge 1 ]]; then
exit 0
fi
FORMAT='[#__value__]'
REGEXP_FOR_SEARCH='\K\d+'
BRANCH_NAME=$(git symbolic-ref --short HEAD)
BRANCH_NAME=${BRANCH_NAME##*/}
BRANCH_NAME=${BRANCH_NAME%%-*}
BRANCH_NAME=$(echo "${BRANCH_NAME}" | grep -oP ${REGEXP_FOR_SEARCH})
BRANCH_NAME_IS_DIGIT=$(echo "${BRANCH_NAME}" | grep -cE '^[0-9]+$')
if [[ ${BRANCH_NAME_IS_DIGIT} -eq 0 ]]; then
exit 0
fi
TASK_NAME=$(echo "${FORMAT}" | sed "s/__value__/${BRANCH_NAME}/g")
BRANCH_IN_COMMIT=$(fgrep -c "${TASK_NAME}" $1)
if [ -n "${TASK_NAME}" ] && ! [[ ${BRANCH_IN_COMMIT} -ge 1 ]]; then
echo "${TASK_NAME} $(cat $1)" > $1
fi
| true |
c9ff5899b0722c7115e2ffdac394f999bb764639 | Shell | Snaipe/packages | /csptr/arch/git/PKGBUILD | UTF-8 | 611 | 2.875 | 3 | [] | no_license | pkgname=libcsptr-git
pkgver=v2.0.1.r0.gff405f2
pkgrel=1
pkgdesc="A smart pointers library for the C programming language"
arch=('i686' 'x86_64')
url="http://github.com/Snaipe/libcsptr.git"
license=('MIT')
makedepends=('git')
provides=('libcsptr')
conflicts=('libcsptr')
source=("$pkgname"::"git://github.com/Snaipe/libcsptr.git")
md5sums=('SKIP')
pkgver() {
cd "${srcdir}/${pkgname}"
git describe --long | sed 's/\([^-]*-g\)/r\1/;s/-/./g'
}
build() {
cd ${srcdir}/${pkgname}
./autogen.sh
./configure --prefix=/usr
make
}
package() {
cd ${srcdir}/${pkgname}
make DESTDIR="$pkgdir/" install
}
| true |
f13a4e2d005d65661771dc5054a8d24f30f99d2a | Shell | IasonManolas/MyDevEnviroment | /setup_dev_env.sh | UTF-8 | 1,444 | 2.78125 | 3 | [] | no_license | #!/bin/bash
set -e #exit if a command fails
RED='\033[0;31m'
NC='\033[0m' # No Color
echo -e "${RED}Installing vim..${NC}"
sudo apt-get update
echo -e "${RED}Installing vim..${NC}"
sudo apt-get install vim
echo -e "${RED}Installing tmux..${NC}"
sudo apt-get install tmux
echo -e "${RED}Installing git..${NC}"
sudo apt-get install git
echo -e "${RED}Installing zsh..${NC}"
sudo apt-get install zsh
chsh -s $(which zsh)
echo -e "${RED}Installing Oh My Zsh..${NC}"
sh -c "$(curl -fsSL https://raw.githubusercontent.com/robbyrussell/oh-my-zsh/master/tools/install.sh)"
echo -e "${RED}Installing required agnoster fonts..${NC}"
sudo apt-get install fonts-powerline
echo -e "${RED}Cloning vimrc and tmuxrc files..${NC}"
mv dotFiles/.* ~/
rm -rf ../MyDevEnviroment
echo -e "${RED}Installing vim plugin manager..${NC}"
git clone https://github.com/VundleVim/Vundle.vim.git ~/.vim/bundle/Vundle.vim
sudo apt install clang-format
echo -e "${RED}Installing vim plugins listed in ~/.vimrc..${NC}"
vim +PluginInstall +qall
echo -e "${RED}Installing tmux plugin manager..${NC}"
#Install plugins with prefix+I
git clone https://github.com/tmux-plugins/tpm ~/.tmux/plugins/tpm
echo -e "${RED}Installing powerline..${NC}"
sudo apt-get install python-pip
sudo pip install powerline-status
git clone https://github.com/powerline/fonts.git && cd fonts && sh ./install.sh
:so %
#reload tmux conf!!!
echo -e "${RED}Install tmux plugins by pressing prefix+I ..${NC}"
| true |
6d40c8764333ecd3c6142813377c2a87e38825b7 | Shell | MusikAnimal/pageviews | /bin/symlinks.sh | UTF-8 | 1,000 | 3.171875 | 3 | [
"CC0-1.0",
"MIT"
] | permissive | #!/bin/bash
# This script is used on Toolforge to add necessary symlinks that we don't
# want in version control. They effectively change the document root in lighttpd.
# This needs to be ran after each deploy (and is sourced in deploy.sh).
#
# To use, optionally pass in the name of the app you're currently working with,
# otherwise it will use the app specified in the $PWD:
# sh setup.sh topviews
if [ -z "$1" ]; then
app=$(basename "../$PWD")
else
app=$1
fi
cd public_html
ln -s $app/index.php index.php
ln -s $app/api.php api.php
rm application-*
ln -s $app/application-* .
ln -s $app/faq faq
ln -s $app/url_structure url_structure
cd ~
ln -s public_html/_browser_check.php _browser_check.php
ln -s public_html/_data_links.php _data_links.php
ln -s public_html/_footer.php _footer.php
ln -s public_html/_header.php _header.php
ln -s public_html/_head.php _head.php
ln -s public_html/_modals.php _modals.php
ln -s public_html/_output.php _output.php
ln -s public_html/images images
| true |
d971b6027c23276f5fdacf0e2183eba2881e26bd | Shell | chirdxing/WeEvent | /weevent-build/bin/start-all.sh | UTF-8 | 447 | 3.59375 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
current_path=$(pwd)
if [[ -e ${current_path}/install-all.sh ]];then
echo "Error operation "
echo "Current path ${current_path} is source code package, only install path can execute start-all.sh "
exit 1
fi
for module in $(ls -l|grep ^d|awk '{print $9}');
do
# every directory is a single module
if [[ ${module} != "lib" ]];then
cd ${module};
./${module}.sh start;
fi
cd ${current_path}
done
| true |
e01b87e87bd15695ef44e5a56b35e0e143290536 | Shell | CYsuncheng/MarkdownNotes | /Code_Backup/CheckAppSize/iOS/checksize.sh | UTF-8 | 2,444 | 3.625 | 4 | [] | no_license | #!/bin/sh
base_root_path=$1
target_root_path=$2
echo "上一版本文件的目录:${base_root_path}";
echo "当前版本文件的目录:${target_root_path}";
root_path=$PWD
package_file='Package$'
assets_car='Assets.car'
bundle_file='bundle$'
# for path in $1 $2;
# do
# cd ${path}
# du -ch | grep ${package_file} | awk '{print "package file size = ", $1}'
# ls -lh | grep ${assets_car} | awk '{print "Assets.car size = ", $5}'
# bundle_size_kb=$(du -ck | grep ${bundle_file} | awk '{sum+=$1} END {print sum}')
# bundle_size_Mb=`expr ${bundle_size_kb} / 1024`
# echo "all .bundle file size = ${bundle_size_Mb} M"
# done
cd $1
echo "开始计算..."
package_size_1=$(du -ch | grep ${package_file} | awk '{print $1}')
assets_size_1=$(ls -lh | grep ${assets_car} | awk '{print $5}')
bundle_size_kb_1=$(du -ck | grep ${bundle_file} | awk '{sum+=$1} END {print sum}')
bundle_size_mb_1=`expr ${bundle_size_kb_1} / 1024`
# 解析出来文件名
bundle_file_1=${base_root_path#*LuoJiFM-IOS_release_}
bundle_file_1=${bundle_file_1%/Payload*}"-bundle.txt"
du -ch | grep 'bundle$' > $bundle_file_1
base_bundle_list_file_path=$PWD/$bundle_file_1
echo "生成 bundle 文件列表,目录:" ${base_bundle_list_file_path}
cd $2
package_size_2=$(du -ch | grep ${package_file} | awk '{print $1}')
assets_size_2=$(ls -lh | grep ${assets_car} | awk '{print $5}')
bundle_size_kb_2=$(du -ck | grep ${bundle_file} | awk '{sum+=$1} END {print sum}')
bundle_size_mb_2=`expr ${bundle_size_kb_2} / 1024`
bundle_file_2=${target_root_path#*LuoJiFM-IOS_release_}
bundle_file_2=${bundle_file_2%/Payload*}"-bundle.txt"
du -ch | grep 'bundle$' > $bundle_file_2
target_bundle_list_file_path=$PWD/$bundle_file_2
echo "生成 bundle 文件列表,目录:" ${target_bundle_list_file_path}
printf "\n\n\n"
echo "================================================================================"
echo " 主要文件大小变化对比 "
echo "================================================================================"
printf "%-20s %-20s %-20s\n" FileName Pervious Current
printf "%-20s %-20s %-20s\n" Package ${package_size_1} ${package_size_2}
printf "%-20s %-20s %-20s\n" Assets ${assets_size_1} ${assets_size_2}
printf "%-20s %-20s %-20s\n" Bundle ${bundle_size_mb_1}M ${bundle_size_mb_2}M
cd ${root_path}
python3 diffbundle.py ${base_bundle_list_file_path} ${target_bundle_list_file_path} | true |
3c80b6769c6d7d9468452b70c2d14a1ae6bb0699 | Shell | caruccio/openshift-cartridge-worker | /usr/template/worker-start | UTF-8 | 114 | 2.671875 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
echo $$ > $OPENSHIFT_DATA_DIR/worker.pid
i=1
while sleep 1; do
echo `date`: loop $i
let i+=1
done
| true |
422f619cbfce76a9c3903cd4d82887e02e0120f9 | Shell | klcathy/cs111 | /lab1-skeleton/test-1c.sh | UTF-8 | 815 | 2.90625 | 3 | [] | no_license | #!/bin/sh
# CS 111 Test Cases 1C
# echo a > a.txt ; sort b.txt
# cat < a.txt
###############################################################################
# echo abc; echo def
# echo ghi ; echo jkl
###############################################################################
# echo "cats
# dogs
# fish
# birds
# hamsters
# snakes
# rabbits" > animals.txt
# (cat animals.txt | head -n 5 | sort -r; sleep 5; echo testing)
# (sleep 1; echo stuff; sleep 1; echo passed)
# (sleep 3; echo hello world > test.txt; cat test.txt > test2.txt; cat test2.txt)
# echo parallel
###############################################################################
echo 1
echo 2
echo 3
echo 4
echo 5
echo 6
echo 7
echo 8
echo 9
echo 10
###############################################################################
| true |
5f83455dc91b453eaf748dc30332077ed6cfcd70 | Shell | TomSellers/Fathom | /report.sh | UTF-8 | 243 | 2.53125 | 3 | [] | no_license | #!/bin/bash
#
# report.sh v0.98.01
#
# Output standard nmap scan results from the .nmap file in the ./logs/ dir
#
#
#
# Part of the Fathom suite written by Tom Sellers <fathom_at_fadedcode.net>
#
# Requires:
#
#
#
cat "./logs/$1.nmap"
| true |
6561139691a9a79d7c40ea0db0749840bb738b9a | Shell | ub-digit/bestall | /docker/build/tag_build_push.sh | UTF-8 | 161 | 2.96875 | 3 | [] | no_license | #!/bin/bash
if test "$1" = ""
then
echo Usage: $0 git-revision-tag
exit
fi
git tag $1
git push origin $1
GIT_REVISION=$1 docker-compose build
./push.sh $1
| true |
661b9c258d34b216691e15a23c3c1749a8d21e3e | Shell | ricardofunke/makemyenv | /makemyenv/deletemyenv.sh | UTF-8 | 2,034 | 3.8125 | 4 | [] | no_license | #!/bin/bash
set -e
USER_HOME="/home/vagrant"
TICKETS_DIR="${USER_HOME}/tickets"
DB_SERVER='192.168.110.120'
DB_ADM='admin'
DB_PASS='4c2b2cdcbe7f369d3d01a8f3c5202e37'
if [[ -z $1 ]]; then
echo 'Error: Please insert a LESA ticket name'
exit 1
fi
if [[ $1 =~ [a-z0-9]+-[0-9]+ ]]; then
ticket=$1
else
echo "Error: Invalid ticket name: \"$1\""
echo ' Please use a name like: "customer-123"'
exit 1
fi
if ! [[ -d $TICKETS_DIR/$ticket ]]; then
echo "The environment for $ticket doesn't exist."
echo ' Nothing to do. Exiting...'
exit 1
fi
dbuser=${ticket//-/}
dbpass=${ticket//-/}
dbname=${ticket//-/}
db="$(grep '$db_type' $TICKETS_DIR/${ticket}/modules/liferay/manifests/init.pp | awk -F'=' '{print $2}' | grep -Eo '[a-z0-9]+')" || (echo 'no $db_type variable'; exit 1)
cd $TICKETS_DIR/$ticket
echo 'Stopping App Server gracefully...'
[[ $(vagrant ssh -c 'pkill java') ]] || true # should continue even if it fails
echo 'Waiting for App Server to stop...'
sleep 20
echo 'Killing App Server...'
[[ $(vagrant ssh -c 'pkill -9 java') ]] || true # should continue even if it fails
vagrant destroy -f
case $db in
postgresql)
# better not indent heredocs
PGPASSWORD=$DB_PASS psql -h $DB_SERVER -U $DB_ADM postgres << END
DROP DATABASE ${dbname};
DROP USER ${dbuser};
END
;;
mysql)
mysql -h $DB_SERVER -u $DB_ADM -p${DB_PASS} << END
DROP DATABASE ${dbname};
REVOKE ALL PRIVILEGES ON ${dbname}.* FROM '${dbuser}'@'%';
DROP USER ${dbuser};
END
;;
mssql)
DB_ADM='sa'
DB_PASS='password'
dbuser='sa'
dbpass='password'
isql MSSQLServer $DB_ADM $DB_PASS -b << EOF
DROP DATABASE ${dbname}
EOF
;;
oracle)
DB_ADM='SYSTEM'
DB_PASS='password'
sqlplus ${DB_ADM}/${DB_PASS}@${DB_SERVER}/ORCL << END
ALTER SESSION SET "_ORACLE_SCRIPT"=true;
DROP USER ${dbuser} CASCADE;
END
;;
db2)
DB_ADM='liferay'
DB_PASS='R3m3mb3r321'
dbuser='liferay'
dbpass='R3m3mb3r321'
sshpass -p ${DB_PASS} ssh ${DB_ADM}@${DB_SERVER} "db2 \"drop db $dbname\""
;;
esac
cd $TICKETS_DIR
rm -rf $ticket
| true |
cd531419184f50775bb2f1479e0b66dedb8e64b0 | Shell | sri-arjuna/script-tools.tui | /Scripts-in-bin/yumresume | UTF-8 | 637 | 3.34375 | 3 | [] | no_license | #!/bin/bash
#
# sea's Script Tools: The 3rd Generation
# File Description: Resumes pendent or unfinished files to update/remove/modify
script_version=0.7
# Author: Simon A. Erat (sea) <erat.simon AT gmail.com>
# Created (y.m.d): 2011.11.01
# Changed: 2012.08.16
# License: GPL v3
#
# Title
#
source /usr/share/script-tools/st.cfg
tui-title "Yum Resume, using yum-complete-transaction ($script_version)"
#
# Help
#
[ "-h" = "$1" ] && \
echo -e "$(basename $0) ($script_version)
\rResumes last transaction
\r" && exit 99
#
# Display
#
sudo yum-complete-transaction || su -c yum-complete-transaction
| true |
bf97bf8bffb1e38da3c33321bd60f6e72d357bb5 | Shell | cmg-dev/dotfiles-1 | /install.sh | UTF-8 | 2,470 | 2.984375 | 3 | [] | no_license | echo "Installing all your stuff!"
echo ""
echo ""
echo "---> bash"
if [ ! -e ~/.git-prompt.sh ]; then
curl https://raw.githubusercontent.com/git/git/master/contrib/completion/git-prompt.sh -o ~/.git-prompt.sh
fi
if [ ! -e ~/.jg ]; then
# jg bins. Since my bashrc is inspired by his, I am using his binaries as
# well
git clone https://github.com/junegunn/dotfiles.git ~/.jg
fi
ln -svf ~/.dotfiles/bash/bashrc ~/.bashrc
ln -svf ~/.dotfiles/bash/bash_profile ~/.bash_profile
ln -svf ~/.dotfiles/bash/git.bash ~/.git.bash
# scripts
mkdir -p ~/bin
for bin in ~/.jg/bin/*; do
ln -svf $bin ~/bin
done
echo "---> bash done"
# Brew
echo ""
echo "---> Brew"
brew tap Homebrew/bundle
cd ~/.dotfiles/brew/ && brew bundle
cd ~/.dotfiles/
echo "---> Brew done"
echo "---> Post brew"
$(brew --prefix)/opt/fzf/install
echo "---> Post brew done"
echo ""
echo "---> Git"
ln -s ~/.dotfiles/git/gitconfig ~/.gitconfig
ln -s ~/.dotfiles/git/gitignore_global ~/.gitignore_global
echo "---> Git done"
echo ""
echo "---> Python / pip"
pip install -r ~/.dotfiles/pip/packages.txt
pip3 install -r ~/.dotfiles/pip/packages3.txt
echo "---> Python / pip done"
echo ""
echo "---> NPM"
python ~/.dotfiles/npm/install.py
echo "---> NPM done"
echo ""
echo "---> tmux"
ln -s ~/.dotfiles/tmux/.tmux.conf ~/.tmux.conf
tic -x ~/.dotfiles/tmux/tmux.terminfo
echo "---> tmux done"
echo ""
echo "---> Alacritty"
ln -s ~/.dotfiles/alacritty/alacritty.yml ~/.config/alacritty.yml
echo "---> Alacritty done"
echo ""
echo "---> vim"
mkdir -p ~/.config/nvim/
ln -s ~/.dotfiles/vim/init.vim ~/.config/nvim/init.vim
ln -s ~/.dotfiles/vim/ginit.vim ~/.config/nvim/ginit.vim
ln -s ~/.dotfiles/vim/UltiSnips/ ~/.config/nvim/UltiSnips
ln -s ~/.dotfiles/vim/init.vim ~/.vimrc
ln -s ~/.dotfiles/vim/init.vim ~/.gvimrc
ln -s ~/.dotfiles/vim/ideavimrc.vim ~/.ideavimrc
ln -s ~/.dotfiles/vim/xvimrc.vim ~/.xvimrc
echo "---> vim done"
echo ""
echo "---> spacemacs"
ln -s ~/.dotfiles/spacemacs/.spacemacs ~/.spacemacs
echo "---> spacemacs done"
echo ""
echo "---> proton"
ln -s ~/.dotfiles/proton/.proton ~/.proton
echo "---> proton done"
echo ""
echo "---> sublimious"
ln -s ~/.dotfiles/sublimious/.sublimious ~/.sublimious
echo "---> sublimious done"
echo ""
echo "---> kwm"
mkdir -p ~/.kwm/
ln -s ~/.dotfiles/kwm/kwmrc ~/.kwm/kwmrc
ln -s ~/.dotfiles/khd/kwmrc ~/.kwm/.khdrc
echo "---> kwm done"
echo ""
echo "---> term"
tic -x ~/.dotfiles/term/xterm-256color-italic.terminfo
echo "---> term done"
| true |
9b8607c2fb15a43df1d82be89b3c3640d9830f0f | Shell | mrwangyu2/setting_ubuntu | /functions/configure_rc_local.sh | UTF-8 | 210 | 2.625 | 3 | [] | no_license | #!/bin/bash
function configure_rc_local(){
echo_message header "Starting configure_zsh fucntions"
superuser_do cp ./data/rc-local.service /etc/systemd/system/.
superuser_do cp ./data/rc.local /etc/.
}
| true |
8c975371780cc1f3311bcd338cf9f32591c17c29 | Shell | koreahong/bigquery | /ch07/02.sh | UTF-8 | 396 | 2.625 | 3 | [] | no_license | read -d '' QUERY_TEXT << EOF
SELECT
start_station_name
, AVG(duration) as duration
, COUNT(duration) as num_trips
FROM \`bigquery-public-data\`.london_bicycles.cycle_hire
GROUP BY start_station_name
ORDER BY num_trips DESC
LIMIT 5
EOF
read -d '' request << EOF
{
"useLegacySql": false,
"useQueryCache": false,
"query": \"${QUERY_TEXT}\"
}
EOF
request=$(echo "$request" | tr '\n' ' ') | true |
213312cee0634e1754e342615fea3648da42628c | Shell | espsofttech-pawan/esqro_new_admin | /application/libraries/blockcypher/php-client/blockcypher/php-client/generate-api.sh | UTF-8 | 779 | 2.984375 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/sh
# Get ApiGen.phar
wget http://www.apigen.org/apigen.phar
# Generate SDK Docs
php apigen.phar generate --template-theme="bootstrap" -s lib -d ../gh-pages/docs
# Copy Home Page from Master Branch to Gh-Pages folder
cp -r docs/* ../gh-pages/
# Copy samples
cp -r sample ../gh-pages/sample
# As PHP is not allowed in Github
cp sample/index.php ../gh-pages/sample/index.html
cd ../gh-pages
# Set identity
git config --global user.email "travis@travis-ci.org"
git config --global user.name "Travis"
# Add branch
git init
git remote add origin https://${GH_TOKEN}@github.com/blockcypher/php-client.git > /dev/null
git checkout -B gh-pages
# Push generated files
git add .
git commit -m "Docs updated by Travis"
git push origin gh-pages -fq > /dev/null
| true |
37afef64240fe2c514620a42fa83dfff108f9380 | Shell | webhead404/thremulation-station | /vagrant/scripts/download-pneuma-linux.sh | UTF-8 | 692 | 3.34375 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
PNEUMA_URL="https://s3.amazonaws.com/operator.payloads.open/payloads/pneuma/pneuma-linux"
PNEUMA_SERVICE_FILE="https://raw.githubusercontent.com/webhead404/thremulation-station/main/vagrant/scripts/pneuma-agent.service"
INSTALL_DIR="/opt/pneuma"
SCRIPTS_DIR="/vagrant"
# Stage Pneuma download
cd "$(mktemp -d)"
curl $PNEUMA_URL -o pneuma-agent
echo "Pulling service file via dev Github"
curl $PNEUMA_SERVICE_FILE -o pneuma-agent.service
mkdir $INSTALL_DIR
cp pneuma-agent $INSTALL_DIR
chmod +x $INSTALL_DIR/pneuma-agent
cp pneuma-agent.service /etc/systemd/system
# Cleanup temporary directory
cd ..
rm -rf "$(pwd)"
systemctl enable pneuma-agent
systemctl start pneuma-agent
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.