blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 4 115 | path stringlengths 2 970 | src_encoding stringclasses 28 values | length_bytes int64 31 5.38M | score float64 2.52 5.28 | int_score int64 3 5 | detected_licenses listlengths 0 161 | license_type stringclasses 2 values | text stringlengths 31 5.39M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
e4abc39c615e606b7b6d3be39988168f9223648c | Shell | HCrane/bash-scripts | /install_fresh.sh | UTF-8 | 4,315 | 4.03125 | 4 | [] | no_license | #!/bin/bash
################################################################################
# Custom standard install programm for freshly installed linux
# Written by Emanuel Moser#
################################################################################
################################################################################
# Design
################################################################################
bold=$(tput bold)
normal=$(tput sgr0)
red=$(tput setaf 1)
green=$(tput setaf 2)
################################################################################
################################################################################
# Fetching new package list
# Declare if custom programs from the web will be downloaded
#Declare is Z Shell and Config from IAIK TU GRAZ should be installed
# 1 = true
# 0 = flase
################################################################################
apt-get update
CUSTOM_PROGRAMMS=0
ZSH=1
################################################################################
################################################################################
declare -a INSTALL
################################################################################
# Declaration of the packages you want installed
# CUSTOM packages are used for special computers. e.g. not every pc of mine
# needs virtualbox. Can be commented out if not needed
################################################################################
CUSTOM1=virtualbox
CUSTOM2=chromium
CUSTOM3=kde-config-touchpad
INSTALL=( unrar unzip git htop curl firmware-iwlwifi $CUSTOM1 $CUSTOM2 $CUSTOM3 )
for i in "${INSTALL[@]}"
do
if [[ $(dpkg-query -W -f='${Status}' $i 2>/dev/null | grep -c "ok installed") -eq 0 ]]; then
echo "[${red}NO${normal}]${bold}$i${normal} will be installed"
apt-get install $i -y
elif [[ $(dpkg-query -W -f='${Status}' $i 2>/dev/null | grep -c "ok installed") -eq 1 ]]; then
echo "${bold}$i${normal} already installed installed"
fi
done
################################################################################
################################################################################
declare -a NAME
declare -a LINK
################################################################################
# Custom software that cannot be found in the package lists because of reasons
# !!!!!!!ATTENTION:The order is important how you add the names and links!!!!!!!
################################################################################
if [[ CUSTOM_PROGRAMMS -eq 1 ]]; then
NAME=( atom gitkraken )
LINK=( https://atom.io/download/deb https://release.gitkraken.com/linux/gitkraken-amd64.deb )
x=0
for i in "${NAME[@]}"
do
if [[ $(dpkg-query -W -f='${Status}' $i 2>/dev/null | grep -c "ok installed") -eq 0 ]]; then
echo "[${red}NO${normal}]${bold}$i${normal} will be installed"
#creating -deb file with name of package
filename="${NAME[$x]}.deb"
wget -O $filename ${LINK[$x]}
echo $filename
dpkg -i $filename
apt-get -f install -y
rm -Rf $filename
x=$((x+1))
elif [[ $(dpkg-query -W -f='${Status}' $i 2>/dev/null | grep -c "ok installed") -eq 1 ]]; then
echo "[${green}OK${normal}]${bold}$i${normal} already installed installed"
x=$((x+1))
fi
done
fi
################################################################################
################################################################################
# Custom software that cannot be found in the package lists because of reasons
# !!!!!!!ATTENTION:The order is important how you add the names and links!!!!!!!
################################################################################
if [[ ZSH -eq 1 ]]; then
if [[ $(dpkg-query -W -f='${Status}' zsh 2>/dev/null | grep -c "ok installed") -eq 0 ]]; then
echo "[${red}NO${normal}]${bold}zsh${normal} will be installed"
sudo aptitude install zsh &&
wget -O ~/.zshrc http://git.grml.org/f/grml-etc-core/etc/zsh/zshrc &&
chsh -s /usr/bin/zsh
elif [[ $(dpkg-query -W -f='${Status}' $i 2>/dev/null | grep -c "ok installed") -eq 1 ]]; then
echo "[${green}OK${normal}]${bold}zsh${normal} already installed installed"
fi
fi
################################################################################
| true |
59bd468f58efb36a72fa9375fd8071d98c833600 | Shell | b25723/tmd | /vpn | UTF-8 | 1,590 | 2.75 | 3 | [] | no_license | #!/bin/sh
set -x
#sleep 5
#export inf=$(cat /proc/net/dev|grep -i vpn_office|awk -F: '{print $1}')
#add a ap connected detection
#write a function to verify ...
#or write a framework-style similar
#IP_SET=$(ip a s enp0s25|grep -i 'inet\b'|awk '{print $2}')
#SSID=$(iwconfig wlp4s0|grep -i essid|awk -FESSID: '{print $2}')
INF=$(cat /proc/net/dev|grep -i vpn_office|awk -F: '{print $1}')
ETH=$(sudo ethtool enp0s25|grep Link|awk '{print $3}')
if [ "$ETH" = "yes" ];then
sudo vpnclient stop
#sleep 1
echo 'a point'
touch /tmp/a
fi
INF=$(cat /proc/net/dev|grep -i vpn_office|awk -F: '{print $1}')
ETH=$(sudo ethtool enp0s25|grep Link|awk '{print $3}')
if [ "$INF" != "vpn_office" ] && [ "$ETH" = "no" ];then
sudo vpnclient start
sleep 1
#echo 'nameserver 10.28.1.212' > /etc/resolv.conf
echo 'b point'
touch /tmp/b
sudo ip a a 192.168.30.200/24 dev vpn_office
sudo ip r a 10.24.0.0/16 via 192.168.30.1
sudo ip r a 10.40.0.0/16 via 192.168.30.1
sudo ip r a 10.205.0.0/24 via 192.168.30.1
fi
SSID=$(iwconfig wlp4s0|grep -i essid|awk -FESSID: '{print $2}')
INF=$(cat /proc/net/dev|grep -i vpn_office|awk -F: '{print $1}')
ETH=$(sudo ethtool enp0s25|grep Link|awk '{print $3}')
if [ "$SSID" != "off/any" ] && [ "$ETH" = "no" ];then
#if [ "$SSID" = "AP2000" ] && [ "$ETH" = "no" ];then
sudo vpnclient start
sleep 1
#echo 'nameserver 10.28.1.212' > /etc/resolv.conf
echo 'c point'
touch /tmp/c
sudo ip a a 192.168.30.200/24 dev vpn_office
sudo ip r a 10.24.0.0/16 via 192.168.30.1
sudo ip r a 10.40.0.0/16 via 192.168.30.1
sudo ip r a 10.205.0.0/24 via 192.168.30.1
fi
| true |
bc8cf1f8528e27b489200582f66f10a3846bc7d4 | Shell | dpalma9/initMachine | /init.sh | UTF-8 | 6,327 | 3.78125 | 4 | [] | no_license | #/bin/bash!
#**************
# CABECERA
#**************
# Descripcion: Install software to workstation.
# Version: 1.0
# Autor: Daniel Palna
# Fecha creacion/modificacion: 23 Jul 2018
# Entrada: Ninguna
# Salida:
#**************
#**************
# VARS BLOCK
OPTION=$1
root_path=`pwd`
#**************
help() {
echo ""
echo "#####################################"
echo "# Help Menu #"
echo "#####################################"
echo ""
echo "Uso: $0 [update|tools|basic|custom]" >&2
echo ""
echo " -h, -H, --help Execution help"
echo ""
echo "Para ejecutar el script tendremos que hacerlo ejecutándolo con alguna de las opciones descritas"
echo "y proseguir seleccionando opciones de los menus que se nos presentarán."
echo "Ejemplo Uso: "
echo " $0 update"
echo " $0 tools"
echo ""
echo "NOTE: tools option will open a menu to choose which tool you want to install."
echo ""
}
custom() {
echo ""
echo"# Customize menu #"
echo ""
echo "1. Numix Gtk theme and icon pack."
echo "2. X-Arc theme."
read -p "Choose an option: " OPTION
case "$OPTION" in
1)
sudo add-apt-repository ppa:numix/ppa
sudo apt-get update
sudo apt-get install numix-gtk-theme numix-icon-theme-circle
;;
2)
echo "WARNING: Need to install gnome-themes-standard package. Installing..."
sudo apt install gnome-themes-standard
xarcV=1.4.7
wget https://gitlab.com/LinxGem33/X-Arc-White/uploads/26bccc81678392584149afa3167f8e78/osx-arc-collection_"$xarcV"_amd64.deb
sudo dpkg -i $root_path/osx-arc-collection_"$xarcV"_amd64.deb
rm -rf $root_path/osx-arc-collection_"$xarcV"_amd64.deb
;;
*) #end of options
echo "Not valid option!"
exit 1
;;
esac
}
update() {
echo "WARNING: Upgrading system..."
sudo apt update
sudo apt -V upgrade
}
tools() {
echo ""
echo "# Tools Menu #"
echo ""
echo "1. Install Google Chrome."
echo "2. Install zsh and oh-my-zsh."
echo "3. Install Spotify."
echo "4. Install Gnome Tweak Tool."
echo "5. Install tilix and pokemon terminal."
echo "6. Install Telegram."
echo "7. Install vundle (vim plugin)."
echo "8. Install fzf (historical)."
read -p "Choose an option: " OPTION
case "$OPTION" in
1)
gChrome
;;
2)
zsh
custom_zsh
;;
3)
spotify
;;
4)
gnomeTool
;;
5)
echo "(WARNING!) This will be to install python pip 3i.6!"
sudo apt-get install python3-pip
sudo apt install tilix
sudo pip3 install git+https://github.com/LazoCoder/Pokemon-Terminal.git
;;
6)
teleV=1.3.10
echo "Installing Telegram."
wget https://telegram.org/dl/desktop/linux/tsetup."teleV".tar.xz
tar -Jxvf $root_path/tsetup."teleV".tar.xz
mv $root_path/Telegram $HOME
rm tsetup."teleV".tar.xz
;;
7)
vundle
;;
8)
git clone --depth 1 https://github.com/junegunn/fzf.git ~/.fzf
~/.fzf/install
;;
*) #fin de las opciones
echo "Not valid option!"
exit 1
;;
esac
}
gChrome() {
echo "Downloading Google Chrome..."
wget https://dl.google.com/linux/direct/google-chrome-stable_current_amd64.deb
chrome=`ls | grep google`
sudo dpkg -i $chrome
echo "Deleting .deb package..."
rm -rf $chrome
}
basic() {
packages="curl wget apt-transport-https ca-certificates software-properties-common gitk"
echo "Installing packages: $packages"
sudo apt install -y $packages
echo "Now, installing bashtop: "
sudo add-apt-repository ppa:bashtop-monitor/bashtop
sudo apt update
sudo apt install bashtop
echo "Visit: https://github.com/aristocratos/bashtop"
}
zsh() {
echo "Initializing zsh env..."
sudo apt-get install zsh
command -v zsh | sudo tee -a /etc/shells
chsh -s "$(command -v zsh)" "${USER}"
sh -c "$(curl -fsSL https://raw.githubusercontent.com/robbyrussell/oh-my-zsh/master/tools/install.sh)"
}
custom_zsh() {
echo "Starting customizing zsh"
cp $root_path/zsh/zshrc $HOME/.zshrc
cp $root_path/zsh/dani.zsh-theme $HOME/.oh-my-zsh/themes/
chown $USER:$USER $HOME/.zshrc
chown -R $USER:$USER $HOME/.oh-my-zsh
echo "Now we're going to install the autosuggestions zsh plugin..."
git clone https://github.com/zsh-users/zsh-autosuggestions ${ZSH_CUSTOM:-~/.oh-my-zsh/custom}/plugins/zsh-autosuggestions
}
vundle() {
echo "Installing Vundle..."
git clone https://github.com/VundleVim/Vundle.vim.git $HOME/.vim/bundle/Vundle.vim
echo "Visit: https://github.com/VundleVim/Vundle.vim"
echo "Copying .vimrc config"
cp $root_path/vim/vimrc $HOME/.vimrc
chown $USER:$USER $HOME/.vimrc
echo "Open vim and execute :PluginInstall"
}
spotify() {
echo "Installing Spotify..."
#sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 931FF8E79F0876134EDDBDCCA87FF9DF48BF1C90
#echo deb http://repository.spotify.com stable non-free | sudo tee /etc/apt/sources.list.d/spotify.list
#sudo apt-get update && sudo apt install spotify-client
snap install spotify
}
gnomeTool() {
guide="https://itsfoss.com/gnome-shell-extensions/"
echo "Installing Gnome Tweak Tool..."
sudo apt install -y gnome-tweak-tool gnome-shell-extensions
echo "Don't forget to install the necessary plugin in Firefox or Chrome to install plugins."
sleep 2
echo "Visit $guide"
}
#**************
# CUERPO
#**************
#Ayuda del script
while true; do
case "$OPTION" in
-[hH] | --help)
help
break
;;
update)
update
break
;;
tools)
tools
break
;;
basic)
basic
break
;;
custom)
custom
break
;;
test)
echo "test"
;;
*) #Fin de las opciones
echo ""
echo "=========================================================================="
echo " INFO: Por favor, revisa la ayuda del script para lanzarlo correctamente."
echo "=========================================================================="
echo ""
sleep 1
help
exit 1
;;
esac
done
## fin de bucle de lectura inicial
| true |
271fc96e78a27f389cc8c3756c03c2c9240bad49 | Shell | cmoralesdiego/test-environment | /infrastructure/testnet/bin/clean_ethstats.sh | UTF-8 | 231 | 2.703125 | 3 | [
"Apache-2.0"
] | permissive |
#!/bin/bash
set -u
set -e
echo "[!!] Run this script from the directory test-environment/infrastructure/testnet/"
if [[ -d "./eth-netstats" ]]; then
rm -rf ./eth-netstats
fi
echo "Removing netstats dependencies"
set +u
set +e
| true |
96f5dfe98411bdf1c83f3435e3b43d4ea98cc59c | Shell | trueos/pcbsd | /src-sh/pcbsd-utils/pc-thinclient/resources/scripts/tcslogin.sh | UTF-8 | 4,228 | 3.703125 | 4 | [] | no_license | #!/bin/sh
PATH="/bin:/sbin:/usr/bin:/usr/sbin:/usr/local/bin" ; export PATH
HOME="/root" ; export HOME
TERM="cons25" ; export TERM
# Clear the screen first
clear
# Check if we are running as an install server
if [ -e "/etc/installserver" ] ; then
/etc/scripts/tcsinstall.sh
halt -p
fi
# Restart moused
/etc/rc.d/moused restart
# Check if given option to use vesa mode at boot splash
xvesa="NO"
v=`/bin/kenv xvesa`
if [ $? -eq 0 ]
then
xvesa=$v
fi
# Figure out which NIC we are using
NIC="`ifconfig | grep '<UP' | grep -v '^lo0' | cut -d ':' -f 1`"
echo "Running on NIC: $NIC"
# Now get the MAC address of this NIC
MAC="`ifconfig ${NIC} | grep ether | cut -d ' ' -f 2`"
# Now check if we have a saved xorg.conf file for this MAC
if [ -e "/mnt/xorg-config/${MAC}.conf" ]
then
if [ "$xvesa" = "YES" ]
then
echo "Running in Failsafe VESA mode."
cp /etc/scripts/cardDetect/XF86Config.compat /etc/X11/xorg.conf
else
echo "Using saved xorg config for $MAC"
cp /mnt/xorg-config/${MAC}.conf /etc/X11/xorg.conf
fi
# Now bring up the X session to query for XDM servers
/usr/local/bin/X -broadcast
reboot
else
echo "No saved xorg-config for this MAC: $MAC"
echo "Do you wish to setup a custom xorg.conf for this system? (Y/N)"
read -t10s response
if [ "${response}" = "Y" -o "${response}" = "y" ]
then
while
z=1
do
dialog --menu 'Select your setup option' 15 55 10 'vesa' 'Use failsafe VESA mode' 'auto' 'Run X auto-detection' 'viconfig' 'Edit xorg.conf with vi' 'test' 'Test Configuration' 'shell' 'Run Shell' 'save' 'Save working config' 'close' 'Close and Reboot' 2>/tmp/ans
ANS=`cat /tmp/ans`
case $ANS in
vesa) cp /etc/scripts/cardDetect/XF86Config.compat /etc/X11/xorg.conf
echo "Copied failsafe 1024x768 VESA config."
echo "[Press Enter to Continue]"
read tmp
;;
auto) X -configure
cat /root/xorg.conf.new >> /etc/X11/xorg.conf
clear
echo "Copied Xorg auto-detected config."
echo "[Press Enter to Continue]"
read tmp
;;
viconfig) vi /etc/X11/xorg.conf
echo "[Press Enter to Continue]"
read tmp
;;
shell) /bin/tcsh ;;
save) echo "Preparing to upload /etc/X11/xorg.conf to the server."
echo "You will need to enter the password for user 'pxeboot' in order to proceed."
echo "[Press Enter to Continue]"
read tmp
cp /etc/X11/xorg.conf /tmp/${MAC}.conf
echo "put /tmp/${MAC}.conf" > /tmp/uploadcmd
sftp pxeboot@192.168.2.2 </tmp/uploadcmd
rm /tmp/uploadcmd >/dev/null 2>/dev/null
echo "[Press Enter to Continue]"
read tmp
;;
test) echo "#!/bin/sh
echo 'Accept this configuration? (Y/N)'
read -t15s response
if [ \"${response}\" = \"Y\" -o \"${response}\" = \"y\" ]
then
touch /tmp/workingX
fi" >/tmp/testX
chmod 755 /tmp/testX
rm /tmp/workingX >/dev/null 2>/dev/null
echo "#!/bin/sh
xv -root -quit -max /etc/scripts/wallpaper.png
xterm -e /bin/sh -c /tmp/testX" > /root/.xinitrc
chmod 755 /root/.xinitrc
echo "Preparing to test X. If the screen does not display, wait 15 seconds and X will exit."
echo "[Press Enter to Continue]"
read tmp
startx
if [ -e "/tmp/testX" ]
then
echo "Congratulations! X has been correctly configured."
echo "Please select 'save working config' to upload the configuration to the server. "
fi
echo "[Press Enter to Continue]"
read tmp
;;
close) reboot ;;
*) ;;
esac
done
else
# User didn't want to config X, run with autodetect
# Now bring up the X session to query for XDM servers
echo "Auto-detecting X settings"
/usr/local/bin/X -configure
cp /root/xorg.conf.new /etc/X11/xorg.conf
/usr/local/bin/X -broadcast
reboot
fi
fi
| true |
c86ccd5c92b4049f9127fcaeed673379f6c45d9e | Shell | ReeceRobinson/X-Plane_Plugin | /cp_win32.sh | UTF-8 | 450 | 3.015625 | 3 | [] | no_license | # /bin/sh
# copy a file to the XP machine for testing
#ftp xp << EOT
#cd /Users/me/Desktop/xplane/Resources/plugins
#bin
#put $1
#EOT
WINDOWS_SHARE="\\\\jb-notebook3\Users\jblie_000\Desktop\X-Plane 10\Resources\plugins"
MOUNTPOINT=/mnt/windows
mountpoint -q $MOUNTPOINT
if [ $? -ne 0 ]; then
sudo mount -t cifs -o user="Jorg Bliesener",uid=jbliesener,gid=jbliesener "$WINDOWS_SHARE" "$MOUNTPOINT"
fi
unzip -o -d "$MOUNTPOINT" target/TeensyControls.zip
| true |
e7b306dfb3f6d27aa718ca69ef7be5a210ca00eb | Shell | PathwayCommons/factoid | /ci.sh | UTF-8 | 2,056 | 4.0625 | 4 | [
"MIT",
"CC0-1.0"
] | permissive | #!/bin/bash
# This is a helper script to set up a very simple CI dev/testing server. It can
# be used with `cron` in order to set up regular builds, e.g. for every 15 minutes:
#
# `crontab -e`
#
# @reboot /home/username/rethinkdb.sh > /home/username/rethinkdb.log
# */15 * * * * /home/username/master.sh > /home/username/master.log
#
# To use this script, create a script per server instance, e.g. `master.sh`:
#
# #!/bin/bash
#
# # Mandatory repo/branch conf
# export REPO=https://github.com/PathwayCommons/factoid.git
# export BRANCH=master
# export JOB_NAME=factoid-master
#
# # Project-specific env vars
# export PORT=3000
#
# ./ci.sh
echo "--"
echo "Starting $JOB_NAME build on"
date
WORKSPACE=/home/`whoami`/$JOB_NAME
WORKSPACE_TMP=/tmp/$JOB_NAME
rm -rf $WORKSPACE_TMP
mkdir -p $WORKSPACE_TMP
cd $WORKSPACE_TMP
# get the repo
git clone $REPO $WORKSPACE_TMP
git checkout $BRANCH
# build
npm install
npm run clean || echo "No clean script found"
export NODE_ENV=production
npm run build || echo "No build script found"
if [ $COMMAND ]
then
npm run $COMMAND
fi
# stop the old screen session
echo "Quitting old screen session..."
screen -S $JOB_NAME -X -p 0 stuff ^C && echo "Sent ^C" || echo "No screen session to ^C"
screen -S $JOB_NAME -X quit && echo "Quit old screen session" || echo "No screen session to stop"
#echo "Waiting a bit to let the old app exit..."
#sleep 30
# swap out old workspace with new one
echo "Replacing workspace..."
mkdir -p /tmp/rm
mv $WORKSPACE /tmp/rm/$JOB_NAME && echo "Moved old workspace to /tmp/rm" || echo "No old workspace to move"
mv $WORKSPACE_TMP $WORKSPACE
cd $WORKSPACE
echo "Replaced workspace"
# start the server in a screen session
echo "Starting new screen session..."
screen -d -m -S $JOB_NAME bash -c "npm run ${START_SCRIPT:-start} 2>&1 | tee ~/$JOB_NAME.screen.log"
echo "New screen session started"
# delete the old workspace files
echo "Deleting old workspace..."
rm -rf /tmp/rm/$JOB_NAME && echo "Old workspace deleted" || echo "No old workspace to delete"
echo "CI script complete"
| true |
bbb4b692ae542bb543bb6fb3f5d9dfa6bdb67c3f | Shell | margro/moservices | /modules/rssex2/rssex2/bin/nph-translate | UTF-8 | 63 | 2.75 | 3 | [] | no_license | #!/bin/sh
if [ -f "$1" ]; then
cat "$1"
rm -f "$1"
fi
| true |
f7566933ce1667f458d564e8ef8e88f16f237279 | Shell | RJVB/DropScript | /DropScript-master/examples/Terminal/NewShellHere.sh | UTF-8 | 1,323 | 3.71875 | 4 | [] | no_license | #!/bin/sh
##
# Open a terminal window and change directory to a folder
#
# Wilfredo Sanchez | wsanchez@wsanchez.net
# Copyright (c) 2001-2002 Wilfredo Sanchez Vega.
# All rights reserved.
#
# Permission to use, copy, modify, and distribute this software for
# any purpose with or without fee is hereby granted, provided that the
# above copyright notice and this permission notice appear in all
# copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHORS DISCLAIM ALL
# WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
# AUTHORS BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR
# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
##
# EXTENSIONS : # Accepted file extentions
# OSTYPES : "fold" # Accepted file types
# ROLE : Viewer # Role (Editor, Viewer, None)
# SERVICEMENU : Terminal/New Shell Here # Name of Service menu item
Destination="$1";
if [ ! -d "${Destination}" ]; then exit 1; fi;
"$(dirname "$0")/terminal" --activate --showtitle false -e "
cd \"${Destination}\" || exit;
clear;
echo \"Working directory is: ${Destination}\";
";
| true |
577c91593d0b455c2086fa8248fd99911c844cf3 | Shell | DamianZaremba/rpm | /update_repo_server.sh | UTF-8 | 1,500 | 3.9375 | 4 | [] | no_license | #!/bin/bash
#############################################################
# Simple bash script to update the repos from git #
#############################################################
BASE_REPO="git://github.com/DamianZaremba/rpm.git";
BASE_DIR="/var/www/vhosts/repo.nodehost.co.uk/";
DUMP_DIR="/home/rpms/%%-TYPE-%%/"
REPOS=( "CentOS-5" );
for repo in ${REPOS[@]};
do
MY_DUMP_DIR=$(echo $DUMP_DIR | sed "s/%%-TYPE-%%/$repo/g");
mkdir -p "$(echo $DUMP_DIR | sed "s/%%-TYPE-%%/$repo/g")../";
test -d "$BASE_DIR/$repo" || mkdir -p "$BASE_DIR/$repo";
if [ -d $MY_DUMP_DIR/.git ];
then
cd $MY_DUMP_DIR
git pull
else
git clone --quiet --progress $BASE_REPO -b $repo $MY_DUMP_DIR;
if [ "$?" != "0" ];
then
# This happens when the branch doesn't exist
rm -rf $MY_DUMP_DIR;
fi
fi
ls $MY_DUMP_DIR
if [ -d $MY_DUMP_DIR/RPMS/ ];
then
cd $MY_DUMP_DIR/RPMS/;
for arch in *;
do
test -d "$MY_DUMP_DIR/RPMS/$arch" || continue;
cd "$MY_DUMP_DIR/RPMS/$arch";
echo "Starting $arch";
rsync -vr --delete *.rpm "$BASE_DIR/$repo/$arch/";
echo "Running createrepo for $repo->$arch"
createrepo -d "$BASE_DIR/$repo/$arch"
echo "Running repoview for $repo->$arch"
repoview -t "$repo - $arch" "$BASE_DIR/$repo/$arch";
echo "Fixing access stuff"
chown -R www-server:www-data "$BASE_DIR/$repo/$arch";
find "$BASE_DIR/$repo/$arch" -type f -exec chmod 640 {} \;
find "$BASE_DIR/$repo/$arch" -type d -exec chmod 750 {} \;
done
fi
done
| true |
3e9775262ba20bb9e04296c57f24334c8c55b16a | Shell | TakahiroMiyaura/AzureMapsIndoorDataSamples | /ARM-Template/postdeploy.sh | UTF-8 | 831 | 2.75 | 3 | [
"MIT"
] | permissive | adtname=$1
rgname=$2
egname=$3
egid=$4
funcappid=$5
echo "adt name: ${adtname}"
echo "rgname:" ${rgname}
echo "egname: ${egname}"
echo "egid: ${egid}"
echo "funcappid: ${funcappid}"
# echo 'installing azure cli extension'
az config set extension.use_dynamic_install=yes_without_prompt
az extension add --name azure-iot -y
# az eventgrid topic create -g $rgname --name $egname -l $location
az dt endpoint create eventgrid --dt-name $adtname --eventgrid-resource-group $rgname --eventgrid-topic $egname --endpoint-name "$egname-ep"
az dt route create --dt-name $adtname --endpoint-name "$egname-ep" --route-name "$egname-rt"
# Create Subscriptions
az eventgrid event-subscription create --name "$egname-ufstate-sub" --source-resource-id $egid --endpoint "$funcappid/functions/updatemapsfeaturestate" --endpoint-type azurefunction | true |
d0a28be800f1ffbb014d5338266dabfbe5ae0d84 | Shell | Blixter/ramverk1-proj | /init-database.bash | UTF-8 | 478 | 3.109375 | 3 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/usr/bin/env bash
# Create a database file and change the mode
echo "Creating database file: 'data/db.sqlite'..."
touch data/db.sqlite
chmod 777 data/db.sqlite
echo "Creating tables for database..."
# Create the database tables
sqlite3 data/db.sqlite < sql/ddl/post_sqlite.sql
sqlite3 data/db.sqlite < sql/ddl/user_sqlite.sql
echo "Inserting default data...."
# Inserting data
sqlite3 data/db.sqlite < sql/ddl/insert.sql
echo ""
echo "Database successfully initialized."
| true |
c8764d85643534be7d8688a01a99bccd42bed4cf | Shell | alehandru/grafana | /elasticsearch/run | UTF-8 | 1,174 | 3.59375 | 4 | [] | no_license | #!/bin/bash
ES_HOME=/opt/elasticsearch
ES_MIN_MEM=256m
ES_MAX_MEM=1g
DAEMON=${ES_HOME}/bin/elasticsearch
NAME=elasticsearch
DESC=elasticsearch
PID_FILE=/var/run/${NAME}.pid
LOG_DIR=${ES_HOME}/logs
DATA_DIR=${ES_HOME}/data
WORK_DIR=${ES_HOME}/tmp
CONFIG_FILE=${ES_CONFIG}/config/elasticsearch.yml
DAEMON_OPTS=""
ES_JAVA_OPTS="${ES_JAVA_OPTS} -Des.path.home=$ES_HOME -Des.path.logs=$LOG_DIR -Des.path.data=$DATA_DIR -Des.path.work=$WORK_DIR"
ES_USER=elasticsearch
export ES_JAVA_OPTS
test -x $DAEMON || exit 0
set -e
case "$1" in
start)
echo -n "Starting $DESC: "
mkdir -p $LOG_DIR $DATA_DIR $WORK_DIR
if start-stop-daemon --user "${ES_USER}" -c "${ES_USER}" --start --pidfile $PID_FILE --startas $DAEMON -- ${DAEMON_OPTS}
then
echo "started."
else
echo "failed."
fi
;;
stop)
echo -n "Stopping $DESC: "
if start-stop-daemon --stop --pidfile $PID_FILE
then
echo "stopped."
else
echo "failed."
fi
;;
restart|force-reload)
${0} stop
sleep 0.5
${0} start
;;
*)
N=/etc/init.d/$NAME
echo "Usage: $N {start|stop|restart|force-reload}" >&2
exit 1
;;
esac
| true |
42cc6c780a289d09d017be895691f9843e839d53 | Shell | riteshja88/UIC | /fall2012/cs450/ragarw8/hw7/scripts/.svn/text-base/post-process.svn-base | UTF-8 | 6,705 | 3.578125 | 4 | [] | no_license | #!/bin/bash
source "scripts/variables"
#cleanup
#rm logs/*.post1
#rm logs/*.post2 ..done before starting stage2
#Remove 0 size .ping files
for host in `ls logs/*.ping`; do
if [ -s $host ]; then
echo "Do nothing" > /dev/null
else
rm $host
fi
done
#Remove errors
#100% packet loss...ping does not give average value
for host in `ls logs/*.ping`; do
cat $host|sed -e 's/.*100%.*/rtt min\/avg\/max\/mdev = 0.0\/unknown\/0.0\/0.0 ms/g' > $host"2"
mv $host"2" $host
done
#pruning complete
echo "Pruning complete"
date
#Stage1
#{
#will create 3 files for each host with their ips, average rtts and distance
cd logs
for source_host in `ls *.ping`; do
#{
cd ..
IP=`echo $source_host|cut -d '.' -f1,2,3,4`
source_host_location=`cat data/IP_locations.txt |grep $IP|awk '{print $2,$3}'`
cat logs/$source_host|grep statistics|awk '{print $2}' > logs/"$source_host""_ips.post1"
cat logs/$source_host|grep avg|awk '{print $4}'|tr '/' ' '|awk '{print $2}' > logs/"$source_host""_averages.post1"
#this loop will calculate the distances of all the other ips with source_host
cd logs
touch "$source_host""_distances.post1" #to avoid error messages when file is not present
rm "$source_host""_distances.post1"
touch "$source_host""_sips.post1" #to avoid error messages when file is not present
rm "$source_host""_sips.post1"
touch "$source_host""_sloc.post1"
rm "$source_host""_sloc.post1"
touch "$source_host""_dloc.post1"
rm "$source_host""_dloc.post1"
for dest_host in `cat "$source_host""_ips.post1"`; do
IP=`echo $dest_host|cut -d '.' -f1,2,3,4`
dest_host_location=`cat ../data/IP_locations.txt |grep $IP|awk '{print $2,$3}'`
distance=`../scripts/gcdist.pl $source_host_location $dest_host_location`
echo $distance >> "$source_host""_distances.post1"
echo $source_host >> "$source_host""_sips.post1"
echo $source_host_location >> "$source_host""_sloc.post1"
echo $dest_host_location >> "$source_host""_dloc.post1"
done
cd ..
cd logs
#}
done
cd ..
#Prune stage1 data
#Remove 0 size _ips, _averages and _distance files (.post1)
for file in `ls logs/*.post1`; do
if [ -s $file ]; then
echo "Do nothing" > /dev/null
else
rm $file
fi
done
#delete raw data files
#rm logs/*.ping ..dont do this here, it is needed for next loop
echo "Stage1 complete"
#}
rm logs/all_data1.post2 #for Correctness as we are appending data in this loop
date
#Stage2
#will create a file for each host with distance and avg_rtt combining them
for source_host in `ls logs/*.ping`; do
if [ -s $source_host"_ips.post1" ] ; then
#paste $source_host"_ips.post1" $source_host"_averages.post1" $source_host"_distances.post1" > $source_host.post2
paste $source_host"_distances.post1" $source_host"_averages.post1" >> "logs/all_data1.post2"
paste "$source_host""_sips.post1" $source_host"_ips.post1" $source_host"_distances.post1" $source_host"_averages.post1" $source_host"_sloc.post1" $source_host"_dloc.post1" >> "logs/all_data"
#echo "`wc -l $source_host"_distances.post1"` `wc -l $source_host"_averages.post1"`"
else
echo "Do nothing" > /dev/null
fi
done
#awk add speed to all data
cat logs/all_data|awk '{print $0,$3*1000*2/$4}' > logs/all_data_speed
sort -t ' ' -k 1 -n logs/all_data1.post2 > logs/all_data2.post2 #sort data based on distance(optional) ...makes it easy to verify (will make the calculation of average for same distance, simple in next step) #done before
#delete stage 1 files
#rm logs/*.post1
echo "Stage2 complete"
date
#Stage 3
#prepare a file with sorted, unique length and rtt pairs
cat logs/all_data2.post2 |tr ".\t" " "|cut -d " " -f1,3 > logs/all_data1.post3 #truncate part after decmimal point to approximate
#sort -t ' ' -k 1 -n logs/all_data1.post3 > logs/all_data2.post3 #sort data based on distance(optional) (will make the calculation of average for same distance, simple in next step) #done before
uniq logs/all_data1.post3 > logs/all_data2.post3 #Remove duplicate lines(duplicate distance rtt pairs)
echo "Stage3 complete"
date
#delete stage 2 files
#rm logs/*.post2
#Stage 4(Final stage)
#take averages of rtt for duplicate distance values
#Awk script in readable format
#cat logs/all_data3.post3|awk '
#BEGIN
#{
# print "BEGIN";distance="none";avg_rtt="none"
#}
#{
# if(distance != "none")
# {
# if($1 == distance)
# {
# avg_rtt = avg_rtt + $2;
# avg_rtt = avg_rtt / 2;
# }
# else
# {
# print distance,avg_rtt;
# distance=$1;avg_rtt=$2
# }
# }
# else{
# distance=$1;avg_rtt=$2
# }
#}
#END
#{
# print "END";print distance,avg_rtt
#}
#'
#remove unknown string to avoid unnecessary errors during average calculations
cat logs/all_data2.post3|grep unknown -v > logs/all_data1.post4
# rtt in seconds #cat logs/all_data1.post4|awk ' BEGIN { distance="none";avg_rtt="none" } { if(distance != "none") { if($1 == distance) { avg_rtt = avg_rtt + $2; avg_rtt = int(avg_rtt / 2); } else { print distance,avg_rtt/1000; distance=$1;avg_rtt=$2 } } else{ distance=$1;avg_rtt=$2 } } END { print distance,avg_rtt/1000 } ' > data/post-processed.txt
cat logs/all_data1.post4|awk ' BEGIN { distance="none";avg_rtt="none" } { if(distance != "none") { if($1 == distance) { avg_rtt = avg_rtt + $2; avg_rtt = int(avg_rtt / 2); } else { print distance,avg_rtt; distance=$1;avg_rtt=$2 } } else{ distance=$1;avg_rtt=$2 } } END { print distance,avg_rtt } ' > data/post-processed.txt
#cat data/post-processed1.txt |tr ".\t" " "|cut -d " " -f1,2 > data/post-processed.txt #truncate part after decmimal point to approximate
#rm data/post-processed1.txt
#Generate CDF table
cat data/post-processed.txt | awk '{print $1, NR-1}' > logs/cdf_data.post4
cat logs/cdf_data.post4|awk ' { print $1,$2/max} ' max=`sort -n -k2 logs/cdf_data.post4 |tail -1|cut -f2 -d ' '` > data/cdf.txt
#Calcuate Speed of bits and mean of speed of bits(multiplying by 1000 to convert ms to secs, mutiplying by 2 as it is round trip time and not one way time) UNIT: km/s
cat data/post-processed.txt|awk 'BEGIN {sum=0} {if($2 != 0){speed = $1*1000*2/$2;sum += speed; print $1,$2,speed}} END {print sum/NR}' > data/post-processed_with_speed.txt
#Calculate Standard Deviation
head -n -1 data/post-processed_with_speed.txt|awk 'BEGIN {sum=0}{print $1,$2,$3,$3-mean; sum+=($3-mean)*($3-mean)} END {print sqrt(sum/NR)}' mean=`tail -1 data/post-processed_with_speed.txt` > data/post-processed_with_speed_variance.txt
echo "Stage4 complete"
date
#Last cleanup
#rm logs/*.ping
| true |
fd01459642a9674f545cb25f39b3f03f3a9e430c | Shell | zenazn/dotfiles | /profile.d/40-git | UTF-8 | 453 | 3.953125 | 4 | [] | no_license | #!/bin/bash
# Jump tables for ~/git
mkjump t "$HOME/git" <<EOF
EOF
gh() {
local url
url=$(git config --get remote.origin.url)
if [ $? != 0 ]; then
return $?
fi
if [ "${url:0:4}" == "git@" ]; then
url=$(echo "$url" | sed -E 's|^git@([^:]+):([^.]+)(\.git)?$|https://\1/\2|')
elif [ "${url:0:8}" == "https://" ]; then
url="${url%.git}"
else
echo "What kind of remote URL is $url anyways?"
return 1
fi
open "$url"
}
| true |
965e5b3709ffb4beae9276239e92a27dcbd99cb8 | Shell | robotarmy/rvm | /scripts/functions/logging | UTF-8 | 5,238 | 4.0625 | 4 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
# Logging functions
# check if user wants colors and if output goes to terminal
# rvm_pretty_print_flag:
# - 0|no - disabled always
# - 1|auto - automatic depending if the output goes to terminal (default)
# - 2|force - forced always
# to select which terminal output should be checked use first param:
# - stdout - for stdout (default)
# - stderr - for stderr
# - number - for the given terminal fd
# - else - for both stdout and stderr
rvm_pretty_print()
{
case "${rvm_pretty_print_flag:=auto}" in
(0|no)
return 1
;;
(1|auto)
case "${TERM:-dumb}" in
(dumb|unknown) return 1 ;;
esac
case "$1" in
(stdout) [[ -t 1 ]] || return 1 ;;
(stderr) [[ -t 2 ]] || return 1 ;;
([[:digit:]]) [[ -t $1 ]] || return 1 ;;
(any) [[ -t 1 || -t 2 ]] || return 1 ;;
(*) [[ -t 1 && -t 2 ]] || return 1 ;;
esac
return 0
;;
(2|force)
return 0
;;
esac
}
case "${TERM:-dumb}" in
(dumb|unknown)
rvm_error_clr=""
rvm_warn_clr=""
rvm_debug_clr=""
rvm_notify_clr=""
rvm_reset_clr=""
;;
(*)
if
[[ -z "${rvm_error_clr:-}" || ${rvm_reload_flag:-0} -eq 1 ]]
then
export rvm_error_clr rvm_warn_clr rvm_debug_clr rvm_notify_clr rvm_reset_clr
rvm_error_clr="$( "${rvm_scripts_path:-${rvm_path}/scripts}/color" "${rvm_error_color:-red}" )"
rvm_warn_clr="$( "${rvm_scripts_path:-${rvm_path}/scripts}/color" "${rvm_warn_color:-yellow}" )"
rvm_debug_clr="$( "${rvm_scripts_path:-${rvm_path}/scripts}/color" "${rvm_debug_color:-magenta}" )"
rvm_notify_clr="$( "${rvm_scripts_path:-${rvm_path}/scripts}/color" "${rvm_notify_color:-green}" )"
rvm_reset_clr="$( "${rvm_scripts_path:-${rvm_path}/scripts}/color" "${rvm_reset_color:-reset}" )"
fi
;;
esac
rvm_error()
{
if rvm_pretty_print stderr
then printf "%b" "${rvm_error_clr:-}$*${rvm_reset_clr:-}\n"
else printf "%b" "$*\n"
fi >&2
}
rvm_help()
{
"${rvm_scripts_path}/help" "$@"
}
rvm_error_help()
{
rvm_error "$1"
shift
rvm_help "$@"
}
rvm_fail()
{
rvm_error "$1"
exit "${2:-1}"
}
rvm_warn()
{
if rvm_pretty_print stdout
then printf "%b" "${rvm_warn_clr:-}$*${rvm_reset_clr:-}\n"
else printf "%b" "$*\n"
fi
}
rvm_debug()
{
(( ${rvm_debug_flag:-0} )) || return 0
if rvm_pretty_print stderr
then printf "%b" "${rvm_debug_clr:-}$*${rvm_reset_clr:-}\n"
else printf "%b" "$*\n"
fi >&2
}
rvm_debug_stream()
{
(( ${rvm_debug_flag:-0} || ${rvm_trace_flag:-0} )) || return 0
if rvm_pretty_print stdout
then \cat - | awk '{print "'"${rvm_debug_clr:-}"'"$0"'"${rvm_reset_clr:-}"'"}'
else \cat -
fi >&2
}
rvm_log()
{
if rvm_pretty_print stdout
then printf "%b" "${rvm_notify_clr:-}$*${rvm_reset_clr:-}\n"
else printf "%b" "$*\n"
fi
}
__rvm_dotted_run()
{
typeset __message="$1"
shift
"$@" | __rvm_dotted "${__message}" || return $?
__rvm_check_pipestatus ${PIPESTATUS[@]} ${pipestatus[@]} || return $?
}
__rvm_dotted()
{
typeset flush __show_nth_dot
__show_nth_dot=${rvm_show_every_nth_dot:-1}
unset rvm_show_every_nth_dot
if (( __show_nth_dot == 1 ))
then __show_nth_dot=""
else __show_nth_dot="if (NR%${__show_nth_dot}==1)"
fi
if awk '{fflush;}' <<<EO 2>/dev/null
then flush=fflush
else flush=flush
fi
if (( $# ))
then awk '{if (NR==1){printf "'"${rvm_notify_clr:-}$*${rvm_reset_clr:-}"'"}; '"$__show_nth_dot"' printf "."; '$flush';} END{if (NR>0) print ""; else print "'"${rvm_notify_clr:-}$*${rvm_reset_clr:-}"'"}'
else awk '{printf "."; '$flush';} END{if (NR>0) print ""}'
fi
}
__rvm_log_dotted()
{
typeset __log_file __message __iterator
__log_file="$1"
__message="$2"
shift 2
"$@" 2>&1 | tee -a "${__log_file}" | __rvm_dotted "${__message}" || return $?
__rvm_check_pipestatus ${PIPESTATUS[@]} ${pipestatus[@]} || return $?
}
__rvm_check_pipestatus()
{
for __iterator
do
case "${__iterator}" in
("") true ;;
(0) true ;;
(*) return ${__iterator} ;;
esac
done
return 0
}
__rvm_every_nth_dot()
{
typeset result=0
export rvm_show_every_nth_dot=$1 # show dot for every nth line
shift
"$@" || result=$?
unset rvm_show_every_nth_dot
return $result
}
__rvm_wait_anykey()
{
if [[ -n "${1:-}" ]]
then echo "$1"
fi
typeset _read_char_flag
if [[ -n "${ZSH_VERSION:-}" ]]
then _read_char_flag=k
else _read_char_flag=n
fi
builtin read -${_read_char_flag} 1 -s -r anykey
}
__rvm_table_br()
{
typeset width=${COLUMNS:-78}
width=$(( width > 116 ? 116 : width ))
printf "%-${width}s\n" " " | sed 's/ /*/g'
}
__rvm_fold()
{
if fold -s -w 10 <<<bla >/dev/null
then fold -s -w $1
else fold -w $1
fi
}
__rvm_table_wrap_text()
{
typeset width=${COLUMNS:-78}
width=$(( width > 116 ? 116 : width ))
width=$(( width - 4 )) # "* <content> *"
__rvm_fold $width | awk -v width=$width '{printf "* %-"width"s *\n", $0}'
}
# echo text | __rvm_table [header]
__rvm_table()
{
if
[[ -n "${1:-}" ]]
then
__rvm_table_br
echo "$1" | __rvm_table_wrap_text
fi
__rvm_table_br
\cat "${2:--}" | __rvm_table_wrap_text
__rvm_table_br
}
| true |
92bf88fe788324e9f0113b0ef50af36e0e312a31 | Shell | rheehot/openSUSE | /packages/g/grep/profile.sh | UTF-8 | 522 | 2.8125 | 3 | [] | no_license | #!/bin/sh
# profiling script for profile-guided-optimizations (PGO)
# must be fully deterministic in what it does for reproducible builds
# should cover most code for good PGO optimization benefit
# See https://github.com/bmwiedemann/theunreproduciblepackage/tree/master/pgo
# for background information on PGO reproducibility
grep=src/grep
t=COPYING
exec > /dev/null
for param in "" "-v" "-i" "-h" "-H" "-l" "-L" "-q" "-n" "-Z" "-E" "-F" "-P" "-e" "-w" "-c" "-o" ; do
$grep $param "GNU" $t
$grep $param "G.*U" $t
done
| true |
7ff16f7a6b1d3e7e1fc905313e6773720a5be575 | Shell | litentry/litentry-node | /scripts/start-devnet.sh | UTF-8 | 958 | 3.765625 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
EXECUTOR=
BINARY=litentry-node
# 1. Locate project workspace
SCRIPT_DIR="$(cd -- "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P)"
CWD=$(dirname $SCRIPT_DIR)
# 2. Determine exector, prefer to execute release version
if [[ -f $CWD/target/release/$BINARY ]]
then
EXECUTOR=$CWD/target/release/$BINARY
elif [[ -f $CWD/target/debug/$BINARY ]]
then
EXECUTOR=$CWD/target/debug/$BINARY
else
echo "No available binary found. Exiting..."
exit 1
fi
# 2.1 Check *rust* env
. $SCRIPT_DIR/check-rust-env.sh || exit 1
# 3. Execute
echo "Exector: $EXECUTOR"
stopNodes() {
local numOfProcess=-1
while [ "$numOfProcess" -ne "0" ]; do
echo "Killing $BINARY ..."
pkill $BINARY
sleep 1
numOfProcess=`ps aux | grep $BINARY | grep -v grep | wc -l`
done
}
# stop all nodes
stopNodes
echo "Starting dev node ..."
$EXECUTOR --tmp --dev --rpc-external --ws-external --rpc-methods Unsafe --rpc-cors all --alice
| true |
553ab76e403b056cd5775d96f505a1353b106ae4 | Shell | petronny/aur3-mirror | /stem-git/PKGBUILD | UTF-8 | 662 | 2.9375 | 3 | [] | no_license | # Contributor: Spider.007 <archlinux AT spider007 DOT net>
pkgname=stem-git
pkgver=20130328
pkgrel=1
pkgdesc="Python controller library for Tor"
arch=('i686' 'x86_64')
url="https://stem.torproject.org/"
license=('LGPL3')
makedepends=('git')
optdepends=('tor: you need a tor-server to talk to')
_gitroot=('https://git.torproject.org/stem.git')
_gitname=('stem')
build() {
cd $srcdir
msg "Connecting to githup.com GIT server...."
if [ -d $_gitname ] ; then
cd $_gitname && git pull origin && cd ..
msg "Updated local checkout"
else
git clone $_gitroot
fi
cd $srcdir/$_gitname
python setup.py install --root="$pkgdir/" --optimize=1
}
| true |
220e260d34723407138a5c5b2af75d321808c178 | Shell | veekoo/weatherBandit | /concatVideos | UTF-8 | 406 | 2.84375 | 3 | [] | no_license | #!/bin/bash
source .config
ls ${pathToFile}/*mp4 | perl -pe 's,(.*),file \1,' > /tmp/files
cat /tmp/files | perl -ne 'print "$1\n" if (/.*\/(.*?-\d+)-.*?\.mp4/);' | sort | uniq | xargs -I XXX -n 1 echo grep XXX- /tmp/files \> /tmp/day \; ffmpeg -f concat -safe 0 -protocol_whitelist "file,http,https,tcp,tls" -i /tmp/day -c copy ${pathToFile}/XXX.mp4 > /tmp/s
. /tmp/s
rm -f /tmp/files /tmp/day /tmp/s
| true |
1fd5988d2940b7f46bfdef43b1a8ce1aac1a332a | Shell | breuleux/myia | /scripts/ci-install.sh | UTF-8 | 1,749 | 3.59375 | 4 | [
"MIT"
] | permissive | #!/bin/bash
set -x
set -e
DEV=cpu
while [[ $# -gt 0 ]]; do
key="$1"
case $key in
--gpu)
DEV=gpu
shift
;;
esac
done
if [ ! -d $HOME/miniconda ]; then
wget -nv https://repo.continuum.io/miniconda/Miniconda3-latest-`uname -s`-`uname -m`.sh -O miniconda.sh
bash miniconda.sh -b -p $HOME/miniconda
fi
export PATH="$HOME/miniconda/bin:$PATH"
hash -r
conda config --set always_yes yes --set changeps1 no
conda update -q conda
conda install pip
conda init
. $HOME/miniconda/etc/profile.d/conda.sh
conda env remove -n test
conda create -n test python=3.7
# Activate conda environment and install poetry.
conda activate test
curl -sSL https://raw.githubusercontent.com/python-poetry/poetry/master/get-poetry.py | python
source $HOME/.poetry/env
# Install myia_utils. It's used later to re-generate conda environment files.
cd myia_utils
poetry install
cd ..
# Install myia, backend and frontend plugins using poetry.
poetry install
cd myia_backend_pytorch
poetry install
cd ../myia_backend_python
poetry install
cd ../myia_backend_relay
poetry install
cd ../myia_frontend_pytorch
poetry install
cd ..
# Complete installation with specific conda packages using environment files.
# Re-generate environment files before using them.
./scripts/gen_conda_env_file.sh
conda env update --file environment.yml
cd myia_backend_pytorch
./scripts/gen_conda_env_file.sh
conda env update --file environment-${DEV}.yml
cd ../myia_backend_python
./scripts/gen_conda_env_file.sh
conda env update --file environment.yml
cd ../myia_backend_relay
./scripts/gen_conda_env_file.sh
conda env update --file environment.yml
cd ../myia_frontend_pytorch
./scripts/gen_conda_env_file.sh
conda env update --file environment-${DEV}.yml
cd ..
| true |
bf3715bc6f5c2c6f53480f47405003272517c022 | Shell | skiggety/skiggety-utils | /bin/git-ready | UTF-8 | 1,487 | 3.875 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
. $SKIGGETY_UTILS_DIR/lib/skiggety-utils.bash
function main {
while [[ "$1" == -* ]]; do
# accept -n for git commit -n
if [ "$1" == "-n" ]; then
GIT_COMMIT_OPTIONS="$GIT_COMMIT_OPTIONS $1"
shift
# TODO: -m "commit comment"
else
exit_with_error "option not supported: $1"
fi
done
run-or-fail git add -p
# TODO^4: just exit 0 right here unless there are changes staged
shellask "Are your changes what you want? (use 'git diff --staged' or 'git vimdiff --staged')" || \
exit_with_error "Prepare your changes the way you want and then try $(basename $0) again."
echo
run-or-fail git commit $GIT_COMMIT_OPTIONS
# TODO: just exit 0 right here unless there are changes staged (yes, we're checking again because the user might
# have changed something during shellask
# TODO^3: if the remote branch doesn't exist yet, we should skip trying git pull, since it will fail anyway
echo
git pull
echo
hesitate-and-run-or-fail git push -u
echo
git -c color.ui=always --no-pager log --oneline --decorate=short origin/HEAD~~..HEAD -n 3
echo
echo "...finished '$(basename "$0")'."
}
# TODO^2: move/reuse?
function run-or-fail {
$@ || exit_with_error "Command \"$@\" FAILED"
}
# TODO^2: move/reuse?
function hesitate-and-run-or-fail {
hesitate-and-run $@ || exit_with_error "Command \"$@\" FAILED"
}
main "$@"
| true |
1653a57989c4f28578ceb021b56660768f0a6155 | Shell | aviralnimbekar/coding-club | /day5/assignments/q5-NumberIntoWords.sh | UTF-8 | 509 | 3.359375 | 3 | [] | no_license | #!/bin/bash -x
read -p "Enter a single digit number: " Num
if (( $Num == 0 ))
then
echo "Zero"
elif (( $Num == 1 ))
then
echo "One"
elif (( $Num == 2 ))
then
echo "Two"
elif (( $Num == 3 ))
then
echo "Three"
elif (( $Num == 4 ))
then
echo "Four"
elif (( $Num == 5 ))
then
echo "Five"
elif (( $Num == 6 ))
then
echo "Six"
elif (( $Num == 7 ))
then
echo "Seven"
elif (( $Num == 8 ))
then
echo "Eight"
elif (( $Num == 9 ))
then
echo "Nine"
else
echo "Please enter single digit number"
fi
| true |
6a2f96bbead84ba27f8ff9c05c5b262b487f1873 | Shell | degeable/University-Projects | /Unix for devs/lab5/2.sh | UTF-8 | 285 | 3.28125 | 3 | [] | no_license | #!/bin/sh
if [ "$#" -ne 4 ]; then
echo "Usage $0 INPUTFILE COUNTRYCODE START END"
exit 0
fi
# pass the arguments to awk with -v flag
awk -v code=$2 -v start=$3 -v end=$4 -F ',' '{if(($2 == code) && ($3>=start) && ($3<=end)){print $3 " "$4;}}' $1 > filtered.data
gnuplot pop.conf | true |
10da1ca1b9aa6bab1c4e1f79e339ffb095df0405 | Shell | LiTianjue/DLP | /managment/depends/bro/build-bro-2.5.sh | UTF-8 | 754 | 2.96875 | 3 | [] | no_license | #!/bin/bash
WD=`pwd`
dpkg -l | awk '{print $2}' | sort > old.txt
# 1. 安装依赖环境
apt-get update && apt-get upgrade -y && echo `date`
apt-get -y install build-essential git bison flex gawk cmake swig libssl-dev libgeoip-dev libpcap-dev python-dev libcurl4-openssl-dev wget libncurses5-dev ca-certificates --no-install-recommends
# 2. 下载编译Bro
VER=2.5
${WD}/common/buildbro ${VER} http://www.bro.org/downloads/bro-${VER}.tar.gz
ln -s /usr/local/bro-${VER} /usr/local//bro
#
## Final setup stuff
#
cp ${WD}/common/bro_profile.sh /etc/profile.d/bro.sh
source /etc/profile.d/bro.sh
#
## Cleanup, so docker-squash can do it's thing
#
dpkg -l | awk '{print $2}' | sort > new.txt
apt-get clean
rm -rf /var/lib/apt/lists/* /tmp/*
| true |
8553807c80039033df32ab8cc9ce454610b6ba5e | Shell | MNMdiagnostics/NaszeGenomy | /vcf_processing/denovo_extract.sh | UTF-8 | 296 | 2.59375 | 3 | [] | no_license | TRIO=$1
PROBAND=`echo $TRIO | cut -d, -f3`
MULITSAMPLE_VCF=/tmp/multisample_20210716.dv.bcfnorm.filt.vcf.gz
echo $TRIO $PROBAND
bcftools view -f "PASS,." -s $TRIO -Ou $MULITSAMPLE_VCF \
| bcftools view -e 'INFO/AC<1' -Ou \
| bcftools +mendelian -t $TRIO -mx -Oz -o ${PROBAND}.mendelian.vcf.gz
| true |
1430de154f668948cea5933b29e90cc59e3124f8 | Shell | enots227/kafka-custom-sink-connector | /kafka/kafka-start.sh | UTF-8 | 972 | 2.90625 | 3 | [
"MIT"
] | permissive | export SCALA_VERSION="2.13"
export KAFKA_VERSION="2.7.0"
export KAFKA_HOME=/opt/kafka_$SCALA_VERSION-$KAFKA_VERSION
# Kafka Environment Configuration ########
cd $KAFKA_HOME/config
# set zookeeper.connect ===========
FIND="^zookeeper.connect=.*$"
REPLACE="zookeeper.connect=${KAFKA_ZOOKEEPER_CONNECT}"
sed -i "s/${FIND}/${REPLACE}/" server.properties
# set broker.id ===================
FIND="^broker.id=[[:digit:]]*$"
REPLACE="broker.id=${KAFKA_BROKER_ID}"
sed -i "s/${FIND}/${REPLACE}/" server.properties
# set advertised.listeners ========
# ensures / -> \/ (forward slashes escape the sed find and replace)
KAFKA_ADVERTISED_LISTENERS=$(echo $KAFKA_ADVERTISED_LISTENERS | sed "s/\//\\\\\//g")
FIND="^#\{0,1\}advertised.listeners=.*$"
REPLACE="advertised.listeners=${KAFKA_ADVERTISED_LISTENERS}"
sed -i "s/${FIND}/${REPLACE}/" server.properties
# Start Kafka ############################
$KAFKA_HOME/bin/kafka-server-start.sh $KAFKA_HOME/config/server.properties
| true |
b103992eaea0098b345e595509b5f58787dd021a | Shell | hurwitzlab/cyverse-file-download | /stampede/run.sh | UTF-8 | 2,155 | 3.46875 | 3 | [
"MIT"
] | permissive | #!/bin/bash
#SBATCH -A iPlant-Collabs
#SBATCH -p normal
#SBATCH -t 24:00:00
#SBATCH -N 1
#SBATCH -n 1
#SBATCH -J cyvrsdl
module load launcher/3.2
set -u
FILES_LIST=""
DEST_DIR=""
UNCOMPRESS=0
PARAMRUN="$TACC_LAUNCHER_DIR/paramrun"
export LAUNCHER_PLUGIN_DIR="$TACC_LAUNCHER_DIR/plugins"
export LAUNCHER_WORKDIR="$PWD"
export LAUNCHER_RMI="SLURM"
export LAUNCHER_SCHED="interleaved"
function lc() {
FILE=$1
[[ -f "$FILE" ]] && wc -l "$FILE" | cut -d ' ' -f 1
}
function USAGE() {
printf "Usage:\\n %s -f FILES_LIST -d DEST_DIR\\n\\n" "$(basename "$0")"
echo "Required arguments:"
echo " -d DEST_DIR"
echo " -f FILES_LIST"
echo
echo "Options:"
echo " -z (UNCOMPRESS default $UNCOMPRESS)"
exit "${1:-0}"
}
[[ $# -eq 0 ]] && USAGE 1
while getopts :d:f:zh OPT; do
case $OPT in
d)
DEST_DIR="$OPTARG"
;;
f)
FILES_LIST="$OPTARG"
;;
z)
UNCOMPRESS=1
;;
h)
USAGE
;;
:)
echo "Error: Option -$OPTARG requires an argument."
exit 1
;;
\?)
echo "Error: Invalid option: -${OPTARG:-""}"
exit 1
esac
done
if [[ -z "$DEST_DIR" ]]; then
echo "-d DEST_DIR is required"
exit 1
fi
if [[ -z "$FILES_LIST" ]]; then
echo "-f FILES_LIST is required"
exit 1
fi
if [[ ! -f "$FILES_LIST" ]]; then
echo "FILES_LIST \"$FILES_LIST\" is not a regular file"
exit 1
fi
imkdir "$DEST_DIR"
PARAM="$$.param"
i=0
while read -r FILE; do
i=$((i+1))
BASENAME=$(basename "$FILE")
printf "%3d: %s\\n" $i "$BASENAME"
echo "sh $PWD/get.sh $FILE $DEST_DIR $UNCOMPRESS" >> "$PARAM"
done < "$FILES_LIST"
NJOBS=$(lc "$PARAM")
if [[ $NJOBS -lt 1 ]]; then
echo "No files to get!"
exit 1
fi
echo "Will download \"$NJOBS\" to \"$DEST_DIR\""
LAUNCHER_JOB_FILE="$PARAM"
if [[ $NJOBS -gt 16 ]]; then
LAUNCHER_PPN=16
else
LAUNCHER_PPN=$NJOBS
fi
export LAUNCHER_JOB_FILE
export LAUNCHER_PPN
$PARAMRUN
echo "Ended LAUNCHER $(date)"
rm "$PARAM"
echo "Done, comments to Ken Youens-Clark kyclark@email.arizona.edu"
| true |
6bf060820a130cb0fb46de73ac9bc68b122ec1ff | Shell | luiarthur/cytof5 | /sims/vb/sim.sh | UTF-8 | 1,077 | 3.421875 | 3 | [
"MIT"
] | permissive | #!/bin/bash
# Load parallelizer script
source ../sim_study/engine.sh
# Results directory
RESULTS_DIR=$1
# AWS Bucket to store results
AWS_BUCKET=$2
# Maximum number of cores to use
MAX_CORES=15
# STAGGER_TIME in seconds. To avoid mass dumping to disk simultaneously.
STAGGER_TIME=0
# SIMDATA PATH
SIMDAT_PATH=(
[5]="../sim_study/simdata/kills-flowsom/N500/K5/90/simdat.bson"
[10]="../sim_study/simdata/kills-flowsom/N5000/K10/1/simdat.bson"
)
# K
K="5 10"
# Seeds to use
SEEDS=`seq -w 10`
# TODO
BATCHSIZES="100 500 2000"
K_VB="30 10 5"
for bs in $BATCHSIZES; do
for k_vb in $K_VB; do
for k in $K; do
for seed in $SEEDS; do
# Experiment name
EXP_NAME=K${k}/BS${bs}/K_VB${k_vb}/$seed
# Dir for experiment results
EXP_DIR=$RESULTS_DIR/$EXP_NAME/
mkdir -p $EXP_DIR
echo $EXP_DIR
# julia command to run
jlCmd="julia vb_sim.jl $seed $EXP_DIR ${SIMDAT_PATH[$k]} $k_vb $bs"
engine $RESULTS_DIR $AWS_BUCKET $EXP_NAME "$jlCmd" $MAX_CORES $STAGGER_TIME
done
done
done
done
| true |
a5c7a8910614c307851005c98d2041caa4efe249 | Shell | pkgix/pkgix-repo | /pkgs/dev/python2-pip | UTF-8 | 713 | 2.8125 | 3 | [] | no_license |
source_pkg ".functions/build" "${repo}"
version=1.5.6
description="An easy_install replacement for installing pypi python packages"
depends=("dev/python2" "dev/python2-setuptools")
website="http://www.pip-installer.org"
license=('MIT')
isinstalled() {
return 1
}
iscompat() {
return 0
}
build() {
fetch_extract \
"http://pypi.python.org/packages/source/p/pip/pip-${version}.tar.gz" \
"b1a4ae66baf21b7eb05a5e4f37c50c2706fa28ea1f8780ce8efe14dcd9f1726c"
cd "pip-${version}"
python2 setup.py build
}
installenv() {
cd "pip-${version}"
python2 setup.py install --prefix="${prefix}/usr" --root="${destdir}"
sed -i 's|^#!/.*$|#!/usr/bin/env python2|' "${dest_prefix}/usr/bin/"*
}
# vim: set ft=sh :
| true |
172387d25596a39617f057e977e77a905710679d | Shell | guodongxiaren/mybin | /mvcgi.sh | UTF-8 | 181 | 2.890625 | 3 | [] | no_license | #! /bin/sh
CGIPATH='/Library/WebServer/CGI-Executables'
if [ $# -eq 2 ];then
mkdir -p $CGIPATH/$2
sudo cp $1 $CGIPATH/$2
elif [ $# -eq 1 ];then
sudo cp $1 $CGIPATH/
fi
| true |
836f1a03583c402c9028e91443d3f1ea71e3bb47 | Shell | ToucanToco/toucan-connectors | /toucan_connectors/install_scripts/mssql_TLSv1_0.sh | UTF-8 | 926 | 3.125 | 3 | [
"BSD-3-Clause"
] | permissive | #!/bin/bash
set -e
if grep -q 'TLSv1' /etc/ssl/openssl.cnf; then
if grep -q 'TLSv1.2' /etc/ssl/openssl.cnf; then
sed -i 's/DEFAULT@SECLEVEL=2/DEFAULT@SECLEVEL=1/g' /etc/ssl/openssl.cnf
sed -i 's/TLSv1.2/TLSv1.0/g' /etc/ssl/openssl.cnf
fi
else
echo "[system_default_sect]" >> /etc/ssl/openssl.cnf
echo "MinProtocol = TLSv1.0" >> /etc/ssl/openssl.cnf
echo "DEFAULT@SECLEVEL=1" >> /etc/ssl/openssl.cnf
fi
if [[ -e ~/mssql-installed ]]; then
echo "MSSQL connector dependencies are already installed."
exit
fi
apt-get update
apt-get install -fyq gnupg curl
curl https://packages.microsoft.com/keys/microsoft.asc | apt-key add -
source /etc/os-release &&\
curl "https://packages.microsoft.com/config/${ID}/${VERSION_ID}/prod.list" \
| tee /etc/apt/sources.list.d/mssql-release.list
apt-get update
ACCEPT_EULA=Y apt-get -y install msodbcsql17 unixodbc-dev
touch ~/mssql-installed
| true |
f4ee6dab04cb208a40ab65664caeb79e9ca19694 | Shell | m-atthieu/hugin-external-scripts | /python.sh | UTF-8 | 2,399 | 3.203125 | 3 | [
"BSD-3-Clause"
] | permissive | # ------------------
# python2.7
# ------------------
# $Id: $
# Copyright (c) 2007, Ippei Ukai
# prepare
source ../scripts/functions.sh
check_SetEnv
# export REPOSITORYDIR="/PATH2HUGIN/mac/ExternalPrograms/repository" \
# ARCHS="ppc i386" \
# ppcTARGET="powerpc-apple-darwin8" \
# i386TARGET="i386-apple-darwin8" \
# ppcMACSDKDIR="/Developer/SDKs/MacOSX10.4u.sdk" \
# i386MACSDKDIR="/Developer/SDKs/MacOSX10.4u.sdk" \
# ppcONLYARG="-mcpu=G3 -mtune=G4" \
# i386ONLYARG="-mfpmath=sse -msse2 -mtune=pentium-m -ftree-vectorize" \
# ppc64ONLYARG="-mcpu=G5 -mtune=G5 -ftree-vectorize" \
# OTHERARGs="";
# -------------------------------
# 20120418.0 hvdw build python as part of Hugin
# -------------------------------
# init
fail()
{
echo "** Failed at $1 **"
exit 1
}
mkdir -p "$REPOSITORYDIR/bin";
mkdir -p "$REPOSITORYDIR/lib";
mkdir -p "$REPOSITORYDIR/include";
case $(basename $(pwd)) in
"Python-2.7.5")
_PY_VER=2.7
_PY_MAJ=27
;;
"Python-3.3.2")
_PY_VER=3.3
_PY_MAJ=3
;;
*)
fail "unknown python version"
esac
# compile
TARGET=$x64TARGET
MACSDKDIR=$x64MACSDKDIR
ARCHARGs="$x64ONLYARG"
OSVERSION="$x64OSVERSION"
CC=$x64CC
CXX=$x64CXX
mkdir -p build-$ARCHS
cd build-$ARCHS
# --with-universal-archs="intel" --enable-universalsdk=$MACSDKDIR
# Specifying both --enable-shared and --enable-framework is not supported
env \
CC=$CC CXX=$CXX \
CFLAGS="-isysroot $MACSDKDIR -arch x86_64 $ARCHARGs $OTHERARGs -O3 -dead_strip" \
CXXFLAGS="-isysroot $MACSDKDIR -arch x86_64 $ARCHARGs $OTHERARGs -O3 -dead_strip" \
CPPFLAGS="-I$REPOSITORYDIR/include" \
LDFLAGS="-L$REPOSITORYDIR/lib -mmacosx-version-min=$OSVERSION -dead_strip -prebind" \
NEXT_ROOT="$MACSDKDIR" \
../configure --enable-framework=$REPOSITORYDIR/Frameworks --with-framework-name=Python${_PY_MAJ} \
--prefix=$REPOSITORYDIR \
--with-libs='-lz' \
--enable-toolbox-glue --enable-ipv6 --enable-unicode \
--with-cxx-main=$CXX \
|| fail "configure step for python ${_PY_VER} multi arch";
make clean;
make || fail "failed at make step of python ${_PY_VER} multi arch";
make install || fail "make install step of python ${_PY_VER} multi arch";
#chmod u+w $REPOSITORYDIR/lib/libpython2.7.dylib
rm -rf $REPOSITORYDIR/Frameworks/Python${_PY_MAJ}.framework/Versions/${_PY_VER}/lib/python${_PY_VER}/test
# clean
cd ..
rm -rf build-$ARCHS
| true |
f06c124c8920dd7b25e1e0ec2fdaba9421219923 | Shell | arcta/server-setup | /system/tensorflow.sh | UTF-8 | 1,052 | 3.390625 | 3 | [] | no_license | #!/bin/bash
path=$(cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd | sed -e "s/system/environment/g")
if [ "$(lspci | grep VGA | grep -i nvidia)" != "" ]; then
echo "
Installing TesorFlow + GPU ..."
sudo apt install --yes libcupti-dev
sudo dpkg -i cuda-repo-<distro>_<version>_<architecture>.deb
sudo dpkg -i cuda-repo-ubuntu1604_8.0.61-1_amd64.deb
sudo apt update && sudo apt install --yes cuda
wget http://developer.download.nvidia.com/compute/redist/cudnn/v6.0/cudnn-8.0-linux-x64-v6.0.tgz
tar -xzvf cudnn-8.0-linux-x64-v6.0.tgz
sudo cp -P cuda/include/cudnn.h /usr/local/cuda-8.0/include
sudo cp -P cuda/lib64/libcudnn* /usr/local/cuda-8.0/lib64/
sudo chmod a+r /usr/local/cuda-8.0/lib64/libcudnn*
rm -rf cuda cudnn-8.0-linux-x64-v6.0.tgz
echo "tensorflow-gpu" >> $path/install-python-packages.txt
echo "[tensorflow] Done: $(nvcc --version)" >> install.log 2>&1
else
echo "tensorflow" >> $path/install-python-packages.txt
echo "[tensorflow] Done: NO GPU SUPPORT" >> install.log 2>&1
fi
| true |
3f068f73a226074f937cadfd4814e8d3b4e8c7a1 | Shell | manno/dotfiles | /dot_zsh/completion.zsh | UTF-8 | 1,720 | 2.71875 | 3 | [] | no_license | # cache
cachedir=$HOME/.zsh/cache/$UID
[ ! -d $cachedir ] && mkdir -p $cachedir
zstyle ':completion:*' use-cache on
zstyle ':completion:*' cache-path $cachedir
# ignore patterns
zstyle ':completion:*:(all-|)files' ignored-patterns '(|*/)CVS' '(|*/)#.svn'
zstyle ':completion:*:cd:*' ignored-patterns '(*/)#CVS' '(|*/)#.svn'
# path completion: ../
zstyle ':completion:*' special-dirs true
# hosts + ssh
typeset -U hosts
if [ -f "$HOME/.ssh/known_hosts" ]; then
hosts=(${${${${(f)"$(<$HOME/.ssh/known_hosts)"}:#[0-9]*}%%\ *}%%,*})
zstyle ':completion:*:hosts' hosts $hosts
fi
# fuzzy
zstyle ':completion:*' completer _complete _match _approximate
zstyle ':completion:*:match:*' original only
zstyle ':completion:*:approximate:*' max-errors 1 numeric
# increase tolerance with length of typing
zstyle -e ':completion:*:approximate:*' \
max-errors 'reply=($((($#PREFIX+$#SUFFIX)/3))numeric)'
# insert all expansions for expand completer
#zstyle ':completion:*:expand:*' tag-order all-expansions
# ignore nonexisting command function
zstyle ':completion:*:functions' ignored-patterns '_*'
# process id menu
#zstyle ':completion:*:*:*:*:processes' menu yes select
#zstyle ':completion:*:processes' command 'ps -e'
zstyle ':completion:*:*:kill:*' menu yes select
zstyle ':completion:*:kill:*' force-list always
# ?
zstyle ':completion:*' completer _complete _match
zstyle ':completion:*' list-colors ${(s.:.)LS_COLORS}
zstyle ':completion:*' matcher-list '' 'm:{a-z}={A-Z}'
autoload -Uz compinit
compinit -i
#autoload predict-on
#predict-on
# functions to make completion for git aliases work
_git_co() { _git_checkout }
_git_lg() { _git_log }
# FIXME lsd completion is broken
_lsd() { _ls }
| true |
8b59df128da2ab2380aa55885407a31e9f63da66 | Shell | martingutierrezg/zsh-config | /debian_install_zsh.sh | UTF-8 | 2,002 | 3.03125 | 3 | [] | no_license | #!/usr/bin/env sh
# Crude and sequential script to install and configure zsh as the main shell on Debian 10.
# Depends on the .dot files found on the root of this repository.
# Uses fzf, zsh-completions, zsh-autosuggestions, and the powerlevel10k zsh theme.
# To convert bash_history to the zsh equivalent see: https://gist.github.com/muendelezaji/c14722ab66b505a49861b8a74e52b274
# Use like:
# git init . && git remote add origin https://github.com/martingutierrezg/zsh-config.git && git pull origin master && chmod +x ./debian_install_zsh.sh && ./debian_install_zsh.sh
# Install zsh
sudo apt install zsh
# Install fzf
sudo apt install fzf
# Install zsh-completions from SUSE's repositories
echo 'deb http://download.opensuse.org/repositories/shells:/zsh-users:/zsh-completions/Debian_Testing/ /' | sudo tee /etc/apt/sources.list.d/shells:zsh-users:zsh-completions.list
curl -fsSL https://download.opensuse.org/repositories/shells:zsh-users:zsh-completions/Debian_Testing/Release.key | gpg --dearmor | sudo tee /etc/apt/trusted.gpg.d/shells_zsh-users_zsh-completions.gpg > /dev/null
sudo apt update
sudo apt install zsh-completions
# Install zsh-autosuggestions from SUSE's repositories
echo 'deb http://download.opensuse.org/repositories/shells:/zsh-users:/zsh-autosuggestions/Debian_Testing/ /' | sudo tee /etc/apt/sources.list.d/shells:zsh-users:zsh-autosuggestions.list
curl -fsSL https://download.opensuse.org/repositories/shells:zsh-users:zsh-autosuggestions/Debian_Testing/Release.key | gpg --dearmor | sudo tee /etc/apt/trusted.gpg.d/shells_zsh-users_zsh-autosuggestions.gpg > /dev/null
sudo apt update
sudo apt install zsh-autosuggestions
# Change the user's default shell to zsh
chsh -s /usr/bin/zsh
# Change root's default shell to zsh
sudo chsh -s /usr/bin/zsh
# Download powerlevel10k theme (the target dir deviates from the official instructions)
# The theme is sourced on line 9 of ~/.zsh.local
git clone --depth=1 https://github.com/romkatv/powerlevel10k.git ~/.powerlevel10k
| true |
809a97127366298576c3f2df5e3583a6a83e43e4 | Shell | othermoon/sonic-field-video | /src/ffmpeg_scripts/lut.sh | UTF-8 | 732 | 2.984375 | 3 | [
"CC0-1.0"
] | permissive | #!/bin/zsh
# Description:
# Apply a cube lut to a video.
#
# Args:
# <video in name> <lut>
#
# Out:
# <in-cube>.nut
#
. $(dirname "$0")/encoding.sh
lut=$(get_lut $2)
cmd="${exe} -y -i '${1}' -i '${1}' ${enc} -filter_complex \"
[0:v]
zscale=rin=full:r=full,
format=gbrpf32le,
lut3d=
file='${lut}':
interp=tetrahedral,
zscale=rin=full:r=full
[v];
[1:a]
asetpts=PTS-STARTPTS
[a]\" -map '[v]' -map '[a]' -map_metadata -1 '${1%.*}-cube.nut'"
echo
echo '================================================================================'
echo Will Run ${cmd}
echo '================================================================================'
echo
echo $cmd > run.sh
. ./run.sh
. $(dirname "$0")/review.sh "${1%.*}-cube.nut"
| true |
77d6e59d54708f973383c94478d7ee8a070c80fa | Shell | xinxian0458/dcos-vagrant-box | /bin/virtualbox.sh | UTF-8 | 651 | 3.125 | 3 | [
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
set -o errexit
set -o nounset
set -o pipefail
echo '>>> Installing packages required by VBoxGuestAdditions'
# https://github.com/dotless-de/vagrant-vbguest/blob/master/lib/vagrant-vbguest/installers/redhat.rb
yum install --assumeyes --tolerant kernel-devel-$(uname -r) gcc binutils make perl bzip2
VBOX_VERSION=$(cat /home/vagrant/.vbox_version)
echo ">>> Installing VBoxGuestAdditions ${VBOX_VERSION}"
cd /tmp
mount -o loop /home/vagrant/VBoxGuestAdditions_${VBOX_VERSION}.iso /mnt
# TODO: fix OpenGL support module installation
sh /mnt/VBoxLinuxAdditions.run || true
umount /mnt
rm -rf /home/vagrant/VBoxGuestAdditions_*.iso
| true |
8b25fdb1009a1493ad20464cfa4fc24123fb9354 | Shell | Wildsong/vagrant-arcgis-enterprise | /resources/scripts/portal_install.sh | UTF-8 | 2,802 | 3.671875 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash -x
# get fully qualified domain name
FQDN="$(hostname --fqdn)"
# install required packages
sudo apt-get install -y dos2unix fontconfig gettext libice6 libsm6 libxtst6 libxrender1
# extract installation resources
tar -xzf /vagrant/resources/proprietary/ArcGIS_Portal_Linux.tar.gz -C /tmp
# run the setup script
if [ -d /opt/arcgis/portal ]; then
echo "Software already installed."
else
# determine if prvc or ecp license is being used and save the path
if [ -f "/vagrant/resources/proprietary/server.prvc" ]; then
LICENSE_FILE="/vagrant/resources/proprietary/server.prvc"
elif [ -f "/vagrant/resources/proprietary/server.ecp" ]; then
LICENSE_FILE="/vagrant/resources/proprietary/server.ecp"
fi
sudo su -c "/tmp/PortalForArcGIS/Setup -m silent -l yes -a /vagrant/resources/proprietary/portal.prvc -d /opt" arcgis
# clean out the installation resources
rm -rf /tmp/PortalForArcGIS
# copy the startup file to the init.d directory so ArcGIS Portal will know how to start with the instance boot
sudo cp /opt/arcgis/portal/framework/etc/arcgisportal /etc/init.d/
# use sed to edit the arcgisportal init.d file so it knows where to find the installtion of server
sudo sed -e 's/\/arcgis\/portal/\/opt\/arcgis\/portal/' -i /etc/init.d/arcgisportal
# set ArcGIS Portal to start with the instance boot
sudo /lib/systemd/systemd-sysv-install enable arcgisportal
fi
# now actually start portal server
echo "Starting portal"
echo "AGSPORTAL = $AGSPORTAL"
sudo systemctl start arcgisportal
echo "Waiting for portal to start."
sleep 10
# Find the newest log file and watch it.
LOGDIR=/opt/arcgis/portal/usr/arcgisportal/logs/ARCGIS.VM/portal
CURRENTLOG=`sudo ls -1t $LOGDIR | head -1`
echo "Watching $CURRENTLOG"
sudo tail -f "$LOGDIR/$CURRENTLOG" &
echo "Have you considered getting an SSD?"
sleep 10
PORTALURL="https://$FQDN:7443/arcgis/portaladmin"
curl --retry 10 -Ss -k $PORTALURL
if [ $? != 0 ]; then
echo "Portal server not responding on $PORTALURL."
sleep 10
fi
# use the admin api to set up the server site using the default config-store and directories locations
echo "Configure site via REST"
curl --retry 60 -X POST -k \
-H "Content-Type: application/x-www-form-urlencoded" \
-d 'username=admin&password=Esri3801&fullname=Administrator&email=nobody@esri.com&description=The initial admin account&securityQuestionIdx=1&securityQuestionAns=Mumbai&contentStore={"type":"fileStore","provider":"FileSystem","connectionString":"/opt/arcgis/portal/usr/arcgisportal/content"}&f=json' \
"https://$FQDN:7443/arcgis/portaladmin/createNewSite" >> /tmp/curl.log
if [ $? == 0 ]; then
echo "Portal is now set up."
else
echo "Set up failed."
# Comment out tnext line o press on to next step
#exit 1
fi
exit 0
| true |
d486e006ce4c0ea7573c9d285ebdc072864bb474 | Shell | FauxFaux/debian-control | /f/fsharp/fsharp_4.0.0.4+dfsg2-2_all/postinst | UTF-8 | 248 | 2.640625 | 3 | [] | no_license | #!/bin/sh
set -e
# Automatically added by dh_installcliframework
if [ "$1" = "configure" ] && [ -x /usr/share/cli-common/framework-package-install ]; then
/usr/share/cli-common/framework-package-install fsharp
fi
# End automatically added section
| true |
fe937fa099a8565735a971bfb8d4f27ff139720b | Shell | artshumrc/beyond-words | /bin/config | UTF-8 | 577 | 2.78125 | 3 | [
"BSD-2-Clause"
] | permissive | #!/bin/bash
SELF_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
pushd "$SELF_DIR/.." > /dev/null
export PROJECT_NAME=beyondwords
export PROJECT_DIR=`pwd`
mkdir -p "$PROJECT_DIR/.meteor/local/deploy"
export BUILD_DEST=`pushd $PROJECT_DIR/.meteor/local/deploy > /dev/null; pwd; popd > /dev/null`
export DEPLOY_ARCH=os.linux.x86_64
export IMAGE_NAME=${PROJECT_NAME}-app
export BUILD_TAG=`git describe --dirty --always --tags`
export REGISTRY_PREFIX="us.gcr.io/archimedes-01201"
export DEFAULT_IMAGE_TAG="$REGISTRY_PREFIX/$IMAGE_NAME:$BUILD_TAG"
popd > /dev/null
| true |
7e5378b285c72e2ac326d39e6a15b087a4e66a7d | Shell | miyurusankalpa/IPv6Data | /microsoft/script.sh | UTF-8 | 291 | 3.109375 | 3 | [] | no_license | #!/bin/bash
for i1 in {21..22} ; do
for i2 in {0..20} ; do
ip="2620:1ec:$i1::$i2"
file='out.txt'
write="echo $ip >> $file"
scrape="echo -n | timeout 1 openssl s_client -connect [$ip]:443 | openssl x509 -noout -text | grep DNS: >> $file"
eval $write
eval $scrape
done
done
| true |
6e5cbaf51e5ab46a49b8c9cf04e71515ee8b8f9a | Shell | kaci65/alx-system_engineering-devops | /0x04-loops_conditions_and_parsing/6-superstitious_numbers | UTF-8 | 260 | 3.484375 | 3 | [] | no_license | #!/usr/bin/env bash
# Print strings based on a given number
num=1
while [ $num -le 20 ]; do
echo $num
case $num in
4)
echo "bad luck from China"
;;
9)
echo "bad luck from Japan"
;;
17)
echo "bad luck from Italy"
;;
esac
num=$((num+1))
done
| true |
6bf0c1d6b4e52c4475a60955ec61c0fd76817012 | Shell | mo/scripts-server | /lib/deploy-app | UTF-8 | 716 | 3.625 | 4 | [
"MIT"
] | permissive | #!/bin/bash
set -e
set -o pipefail
SCRIPT_DIR=$(dirname $0)
if [ "$DEBUG" == "1" ]; then
set -x
fi
if [ "$3" == "" ]; then
echo "usage: $(basename $0) DEPLOYMENT_CONFIG APP_ID SERVER_IP"
exit 1
fi
DEPLOYMENT_CONFIG=$1
APP_ID=$2
SERVER_IP=$3
echo -e "\n\n\n====[ installing app: $APP_ID ]=====================================================\n"
run-app-step() {
STEP_NAME=$1
export DEBUG
$SCRIPT_DIR/$STEP_NAME $DEPLOYMENT_CONFIG $APP_ID $SERVER_IP
}
run-app-step setup-app-webserver-startup
run-app-step setup-app-misc
run-app-step setup-app-config-file
run-app-step setup-app-in-haproxy
run-app-step setup-app-in-traefik
run-app-step setup-app-in-mysql
run-app-step setup-app-in-postgresql
| true |
41d1b48336e86cd14827f49d14e89b3bd78ef29f | Shell | zhangdongyue/psshcp-exp | /my_pscp | UTF-8 | 151 | 2.78125 | 3 | [] | no_license | #!/bin/sh
if [ $# -lt 3 ]
then
echo "Usage xxx <host.lst> <localfile> <remotepath>"
exit
fi
while read line
do
./scp.exp $2 $line:$3
done < $1
| true |
afdb6d26e4614b1ba4845389006a0b84771b339e | Shell | gonzsa04/SistemasOperativos | /FicherosP1/Obligatoria/script.sh | UTF-8 | 1,875 | 3.640625 | 4 | [] | no_license | # poner antes chmod +x ./script.txt en la consola para dar permisos para ejecutar el script
if [ ! -e ./mytar ]; then # miramos si mytar existe y es ejecutable
echo "Mytar no encontrado"
exit 1
elif [ ! -x ./mytar ]; then
echo "Mytar no es ejecutable"
exit 1
fi
if [ -d "tmp" ]; then
rm -rf -- tmp # borramos el directorio tmp si lo encontramos
fi
mkdir tmp # creamos nuevo directorio tmp
cd tmp # nos metemos dentro de el
if [ ! -e file1.txt ]; then
touch file1.txt
echo "Hello World!" > file1.txt # escribimos Hello World! y se lo metemos al file1
fi
if [ ! -e file2.txt ]; then
touch file2.txt
head -10 /etc/passwd > file2.txt # copiamos las 10 primeras líneas del fichero /etc/passwd.
fi
if [ ! -e file3.dat ]; then
touch file3.dat # ponemos un contenido binario aleatorio de 1024 bytes,
head -c 1024 /dev/urandom > file3.dat # tomado del dispositivo/dev/urandom
fi
./../mytar -c -f mytar.mtar file1.txt file2.txt file3.dat #creamos el comprimido con los tres archivos
if [ ! -d out ]; then
mkdir out # creamos out en tmp
fi
cp ./mytar.mtar ./out/mytar.mtar # y copiamos el comprimido en el
cd out
./../../mytar -x -f mytar.mtar # extraemos el contenido
# comparamos si los nuevos archivos extraidos son iguales que los originales
if diff ../file1.txt file1.txt >/dev/null ; then
if diff ../file2.txt file2.txt >/dev/null ; then
if diff ../file3.dat file3.dat >/dev/null ; then
../..
echo "Correct" # si todos estan correctos
exit 0
else # si falla uno de ellos
../..
echo "Uno o mas archivos son diferentes"
exit 1
fi
else
../..
echo "Uno o mas archivos son diferentes"
exit 1
fi
else
../..
echo "Uno o mas archivos son diferentes"
exit 1
fi
| true |
cbde0f17a83cfc4b60ad7a1637a59eab0aa4974b | Shell | INOS-soft/pbi | /modules/print/scribus/pbi.conf | UTF-8 | 665 | 2.671875 | 3 | [
"MIT"
] | permissive | #!/bin/sh
# PBI Build Configuration
# Place over-rides and settings here
#
# XDG Desktop Menu Spec:
# http://standards.freedesktop.org/menu-spec/menu-spec-1.0.html
##############################################################################
# Program Name
PBI_PROGNAME="Scribus"
# Program Website
PBI_PROGWEB="http://www.scribus.net"
# Program Author / Vendor
PBI_PROGAUTHOR="Scribus Team"
# Default Icon (Relative to %%PBI_APPDIR%% or resources/)
PBI_PROGICON="scribus.png"
# The target port we are building
PBI_MAKEPORT="print/scribus/"
PBI_BUILDKEY="01"; export PBI_BUILDKEY
export PBI_PROGNAME PBI_PROGWEB PBI_PROGAUTHOR PBI_PROGICON PBI_MAKEPORT
| true |
c0c065ddc77204152c9f1655c6052ceb53cac9ca | Shell | justinp/git-series | /tests/series_propagate_2.sh | UTF-8 | 650 | 2.625 | 3 | [] | no_license | #!/bin/sh
. $(dirname $0)/common-propagate.sh
gitcant "cleanly propagate if there are conflicts" series propagate 0.1
( echo a && echo b && echo C && echo d && echo e && echo f ) > README
qgit add README
qgit commit -m "merge manually"
gitcan "finish propagating once conflicts are resolved" series propagate 0.1
qgit checkout series/0.1
readmeShouldBe a b C d e
qgit checkout series/0.2
readmeShouldBe a b C d e f
shouldDescendFrom series/0.1 series/0.2
qgit checkout series/0.3
readmeShouldBe a b C d e f
shouldDescendFrom series/0.1 series/0.3
qgit checkout develop
readmeShouldBe a b C d e f g
shouldDescendFrom series/0.1 develop
wrapup
| true |
b633d04add150ed2526a70b9c3caeedffa27cb79 | Shell | JarateKing/BaseHud | /resource/rename_to_lowercase.sh | UTF-8 | 522 | 3.578125 | 4 | [
"MIT"
] | permissive | #!/bin/sh
# make all subfolders lowercase
for d in $(find "." -type d | grep [A-Z] | grep -v fonts); do
newd="$(echo "$d" | tr 'A-Z' 'a-z')"
mkdir "$newd"
for f in $(find "$d" -maxdepth 1 -type f); do
mv "$f" "$newd"
done
done
# delete old capitalized folders
for d in $(find "." -type d | grep [A-Z] | grep -v fonts); do
rm -rf "$d"
done
# make all files lowercase
for d in $(find "." -type d | grep -v fonts); do
for f in $(find "$d" -maxdepth 1 -type f | grep [A-Z]); do
rename 'y/A-Z/a-z/' "$f"
done
done | true |
e9913e362b3f6a698c3e45778473cac54b596967 | Shell | imperialebola2018/ebola-server | /staging/provision/setup-docker.sh | UTF-8 | 1,107 | 3.640625 | 4 | [] | no_license | #!/usr/bin/env bash
set -x
COMPOSE_VERSION=1.17.0
if which -a docker > /dev/null; then
echo "docker is already installed"
else
# If adding a large disk at /mnt/data, then this is usful
# The big docker directory is /var/lib/docker - we'll move that
# out onto the external disk:
# mkdir -p /mnt/data/docker/var-lib-docker
# ln -s /mnt/data/docker/var-lib-docker /var/lib/docker
echo "installing docker"
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
sudo add-apt-repository \
"deb [arch=amd64] https://download.docker.com/linux/ubuntu \
$(lsb_release -cs) \
stable"
sudo apt-get update
sudo apt-get install -y docker-ce
sudo usermod -aG docker vagrant
fi
if which -a docker-compose > /dev/null; then
echo "docker-compose is already installed"
else
echo "installing docker-compose"
sudo curl -L \
"https://github.com/docker/compose/releases/download/$COMPOSE_VERSION/docker-compose-$(uname -s)-$(uname -m)" \
-o /usr/bin/docker-compose
sudo chmod +x /usr/bin/docker-compose
fi
| true |
754344bb389cea30424c4772cbeafefc8a832169 | Shell | xiaohesen/imagebuilder | /imagebuilder/3.0/openwrt-imagebuilder-ramips-mt7620/build_dir/target-mipsel_24kc_musl/root-ramips/lib/netifd/proto/3g.sh | UTF-8 | 3,053 | 3.484375 | 3 | [] | no_license | #!/bin/sh
[ -n "$INCLUDE_ONLY" ] || {
NOT_INCLUDED=1
INCLUDE_ONLY=1
. ../netifd-proto.sh
. ./ppp.sh
init_proto "$@"
}
handle_2G_mode() {
local count=0
local dev=$(echo $1|cut -d '/' -f 3)
local dir=modem.$(find /sys/devices/platform/ -name $dev |tail -n 1|cut -d '/' -f 8|cut -d ':' -f 1)
local mode=$(cat /tmp/$dir/signal |cut -d '"' -f 4)
[ -f /tmp/$dir/fail_count ] && {
count=$(cat /tmp/$dir/fail_count)
}
[ "$mode" = "gsm" -o "$mode" = "cdma" -o "$mode" = "tdma" ] && {
let count=count+1
echo $count >/tmp/$dir/fail_count
[ $count -gt 10 ] && {
logger modem delay dial,120s
sleep 120
}
return
}
[ -f /tmp/$dir/fail_count ] && {
rm /tmp/$dir/fail_count
}
}
proto_3g_init_config() {
no_device=1
available=1
ppp_generic_init_config
proto_config_add_string "device:device"
proto_config_add_string "apn"
proto_config_add_string "service"
proto_config_add_string "pincode"
proto_config_add_string "dialnumber"
}
proto_3g_setup() {
local interface="$1"
local chat
json_get_var device device
json_get_var apn apn
json_get_var service service
json_get_var pincode pincode
json_get_var dialnumber dialnumber
[ -n "$dat_device" ] && device=$dat_device
device="$(readlink -f $device)"
[ -e "$device" ] || {
proto_set_available "$interface" 0
return 1
}
case "$service" in
cdma|evdo)
chat="/etc/chatscripts/evdo.chat"
;;
*)
chat="/etc/chatscripts/3g.chat"
cardinfo=$(gcom -d "$device" -s /etc/gcom/getcardinfo.gcom)
if echo "$cardinfo" | grep -q Novatel; then
case "$service" in
umts_only) CODE=2;;
gprs_only) CODE=1;;
*) CODE=0;;
esac
export MODE="AT\$NWRAT=${CODE},2"
elif echo "$cardinfo" | grep -q Option; then
case "$service" in
umts_only) CODE=1;;
gprs_only) CODE=0;;
*) CODE=3;;
esac
export MODE="AT_OPSYS=${CODE}"
elif echo "$cardinfo" | grep -q "Sierra Wireless"; then
SIERRA=1
elif echo "$cardinfo" | grep -qi huawei; then
case "$service" in
umts_only) CODE="14,2";;
gprs_only) CODE="13,1";;
*) CODE="2,2";;
esac
export MODE="AT^SYSCFG=${CODE},3FFFFFFF,2,4"
fi
if [ -n "$pincode" ]; then
PINCODE="$pincode" gcom -d "$device" -s /etc/gcom/setpin.gcom || {
proto_notify_error "$interface" PIN_FAILED
proto_block_restart "$interface"
return 1
}
fi
[ -n "$MODE" ] && gcom -d "$device" -s /etc/gcom/setmode.gcom
# wait for carrier to avoid firmware stability bugs
[ -n "$SIERRA" ] && {
gcom -d "$device" -s /etc/gcom/getcarrier.gcom || return 1
}
if [ -z "$dialnumber" ]; then
dialnumber="*99***1#"
fi
;;
esac
handle_2G_mode $device
connect="${apn:+USE_APN=$apn }DIALNUMBER=$dialnumber /usr/sbin/chat -t5 -v -E -f $chat"
ppp_generic_setup "$interface" \
noaccomp \
nopcomp \
novj \
nobsdcomp \
noauth \
maxfail 1 \
set EXTENDPREFIX=1 \
lock \
crtscts \
115200 "$device"
return 0
}
proto_3g_teardown() {
proto_kill_command "$interface"
}
[ -z "$NOT_INCLUDED" ] || add_protocol 3g
| true |
b46d0a0cfdedef537cdad59d53b3d1482dd18eac | Shell | darshanrajgor/ShellScripts | /tutorials/dirtree.sh | UTF-8 | 938 | 3.578125 | 4 | [] | no_license | #!/bin/sh
search () {
for dir in `echo *`
do
if [ -d "$dir" ] ; then
zz=0
while [ $zz != $deep ]
do
echo -n "| "
zz=`expr $zz + 1`
done
if [ -L "$dir" ] ; then
echo "+---$dir" `ls -l $dir | sed 's/^.*'$dir' //'`
else
echo "+---$dir"
if cd "$dir" ; then
deep=`expr $deep + 1`
search
numdirs=`expr $numdirs + 1`
fi
fi
fi
done
cd ..
if [ "$deep" ] ; then
swfi=1
fi
deep=`expr $deep - 1`
}
if [ $# = 0 ] ; then
cd `pwd`
else
cd $1
fi
echo "Initial directory = `pwd`"
swfi=0
deep=0
numdirs=0
zz=0
while [ "$swfi" != 1 ]
do
search
done
echo "Total directories = $numdirs"
exit 0
# ==> Challenge: try to figure out exactly how this script works.
| true |
7e4554913dd46da6c7f2db9147dfde2006709e20 | Shell | kyukhin/tcl2lua | /gain.sh | UTF-8 | 1,089 | 3.140625 | 3 | [] | no_license | #!/bin/bash
# Usage: $0 convert_suite_stderr convert_suite_dst_dir
echo "fully processed files: $(grep -Rc X\! $2 | grep :0\$ | cut -d: -f1 | sed 's@^dst[^/]\+/@@' | wc -l)"
grep -Rc X\! $2 | grep :0\$ | cut -d: -f1 | sed 's@^dst[^/]\+/@@'
echo "unprocessed expressions: $((grep -v 'creating Jim interpretor\|\[total cmds\]' $1 | grep 'X!' | sort | cut -d: -f2 | tr '\n' '+'; echo 0) | bc)"
echo
for w in "foreach" "for" "expr" "expr01" "cmd" "capable" "case" "procname"; do
echo -n "$w "
(grep -v 'creating Jim interpretor\|\[total cmds\]' $1 | grep 'X!' | sort | grep ${w}\" | cut -d: -f2 | tr '\n' '+'; echo 0) | bc
done | sort -nk2,2 -r | column -t
echo
echo "unprocessed cmds:"
grep -R X\!cmd $2 | cut -d\! -f2 | cut -d\" -f3 | sort | uniq -c | sort -nk1,1 -r
echo
echo "unprocessed expressions per file:"
grep -Rc X\! $2 | grep -v ':0$' | tr : ' ' | sort -nk2,2 -r | column -t
echo
echo "w/ one unprocessed instr:"
for f in $(grep -Rc X\! $2 | tr : ' ' | sort -nk2,2 -r | column -t | grep ' 1$' | cut -d' ' -f1); do
echo "$f: $(grep X\! $f | sed 's@^.*X!@@')"
done
| true |
5e1deff93d6d9b98b7e84496b0e8329205afe546 | Shell | meemeer/dotfiles | /.zshrc | UTF-8 | 13,569 | 2.921875 | 3 | [] | no_license | # Created by newuser for 4.3.12
source ~/dotfiles/.zsh/auto-fu.zsh
# auto-fuを初期化する。
zle-line-init() {
auto-fu-init
}
zle -N zle-line-init
zle -N zle-keymap-select auto-fu-zle-keymap-select
# auto-fuをカスタマイズする。
## Enterを押したときは自動補完された部分を利用しない。
afu+cancel-and-accept-line() {
((afu_in_p == 1)) && { afu_in_p=0; BUFFER="$buffer_cur" }
zle afu+accept-line
}
zle -N afu+cancel-and-accept-line
bindkey -M afu "^M" afu+cancel-and-accept-line
# キーバインド設定
bindkey -e
# 指定したコマンド名がなく、ディレクトリ名と一致した場合 cd する
setopt auto_cd
# cdで移動してもpushdと同じようにディレクトリスタックに追加する。
setopt auto_pushd
# カレントディレクトリ中に指定されたディレクトリが見つからなかった場合に
# 移動先を検索するリスト。
cdpath=(~)
## ディレクトリが変わったらディレクトリスタックを表示。
chpwd_functions=($chpwd_functions dirs)
# ヒストリ
HISTFILE=~/.zsh_history
HISTSIZE=1000000
SAVEHIST=$HISTSIZE
# ヒストリファイルにコマンドラインだけではなく実行時刻と実行時間も保存する。
setopt extended_history
# 直前と同じコマンドラインはヒストリに追加しない
setopt hist_ignore_dups
# コマンドラインの先頭がスペースで始まる場合ヒストリに追加しない
setopt hist_ignore_space
# すぐにヒストリファイルに追記する。
setopt inc_append_history
# シェルのプロセスごとに履歴を共有
setopt share_history
# Ctrl+S/Ctrl+Q によるフロー制御を使わないようにする
setopt NO_flow_control
# 色を使う
setopt prompt_subst
# PROMPT内で「%」文字から始まる置換機能を有効にする。
setopt prompt_percent
#コピペの時rpromptを非表示する
setopt transient_rprompt
## 256色生成用便利関数
### red: 0-5
### green: 0-5
### blue: 0-5
color256()
{
local red=$1; shift
local green=$2; shift
local blue=$3; shift
echo -n $[$red * 36 + $green * 6 + $blue + 16]
}
fg256()
{
echo -n $'\e[38;5;'$(color256 "$@")"m"
}
bg256()
{
echo -n $'\e[48;5;'$(color256 "$@")"m"
}
## プロンプトの作成
### ↓のようにする。
### -(user@debian)-(0)-<2011/09/01 00:54>------------------------------[/home/user]-
### -[84](0)% [~]
## バージョン管理システムの情報も表示する
autoload -Uz vcs_info
zstyle ':vcs_info:*' formats \
'(%{%F{white}%K{green}%}%s%{%f%k%})-[%{%F{white}%K{blue}%}%b%{%f%k%}]'
zstyle ':vcs_info:*' actionformats \
'(%{%F{white}%K{green}%}%s%{%f%k%})-[%{%F{white}%K{blue}%}%b%{%f%k%}|%{%F{white}%K{red}%}%a%{%f%k%}]'
### プロンプトバーの左側
### %{%B%}...%{%b%}: 「...」を太字にする。
### %{%F{cyan}%}...%{%f%}: 「...」をシアン色の文字にする。
### %n: ユーザ名
### %m: ホスト名(完全なホスト名ではなくて短いホスト名)
### %{%B%F{white}%(?.%K{green}.%K{red})%}%?%{%f%k%b%}:
### 最後に実行したコマンドが正常終了していれば
### 太字で白文字で緑背景にして異常終了していれば
### 太字で白文字で赤背景にする。
### %{%F{white}%}: 白文字にする。
### %(x.true-text.false-text): xが真のときはtrue-textになり
### 偽のときはfalse-textになる。
### ?: 最後に実行したコマンドの終了ステータスが0のときに真になる。
### %K{green}: 緑景色にする。
### %K{red}: 赤景色を赤にする。
### %?: 最後に実行したコマンドの終了ステータス
### %{%k%}: 背景色を元に戻す。
### %{%f%}: 文字の色を元に戻す。
### %{%b%}: 太字を元に戻す。
### %D{%Y/%m/%d %H:%M}: 日付。「年/月/日 時:分」というフォーマット。
prompt_bar_left_self="(%{%B%}%n%{%b%}%{%F{cyan}%}@%{%f%}%{%B%}%m%{%b%})"
prompt_bar_left_status="(%{%B%F{white}%(?.%K{green}.%K{red})%}%?%{%k%f%b%})"
prompt_bar_left_date="<%{%B%}%D{%Y/%m/%d %H:%M}%{%b%}>"
prompt_bar_left="-${prompt_bar_left_self}-${prompt_bar_left_status}-${prompt_bar_left_date}-"
### プロンプトバーの右側
### %{%B%K{magenta}%F{white}%}...%{%f%k%b%}:
### 「...」を太字のマジェンタ背景の白文字にする。
### %d: カレントディレクトリのフルパス(省略しない)
prompt_bar_right="-[%{%B%K{magenta}%F{white}%}%d%{%f%k%b%}]-"
### 2行目左にでるプロンプト。
### %h: ヒストリ数。
### %(1j,(%j),): 実行中のジョブ数が1つ以上ある場合だけ「(%j)」を表示。
### %j: 実行中のジョブ数。
### %{%B%}...%{%b%}: 「...」を太字にする。
### %#: 一般ユーザなら「%」、rootユーザなら「#」になる。
prompt_left="-[%h]%(1j,(%j),)%{%B%}%#%{%b%} "
## プロンプトフォーマットを展開した後の文字数を返す。
## 日本語未対応。
count_prompt_characters()
{
# print:
# -P: プロンプトフォーマットを展開する。
# -n: 改行をつけない。
# sed:
# -e $'s/\e\[[0-9;]*m//g': ANSIエスケープシーケンスを削除。
print -n -P -- "$1" | sed -e $'s/\e\[[0-9;]*m//g' | wc -m
}
## プロンプトを更新する。
update_prompt()
{
# プロンプトバーの左側の文字数を数える。
# 左側では最後に実行したコマンドの終了ステータスを使って
# いるのでこれは一番最初に実行しなければいけない。そうし
# ないと、最後に実行したコマンドの終了ステータスが消えて
# しまう。
local bar_left_length=$(count_prompt_characters "$prompt_bar_left")
# プロンプトバーに使える残り文字を計算する。
# $COLUMNSにはターミナルの横幅が入っている。
local bar_rest_length=$[COLUMNS - bar_left_length]
local bar_left="$prompt_bar_left"
# パスに展開される「%d」を削除。
local bar_right_without_path="${prompt_bar_right:s/%d//}"
# 「%d」を抜いた文字数を計算する。
local bar_right_without_path_length=$(count_prompt_characters "$bar_right_without_path")
# パスの最大長を計算する。
# $[...]: 「...」を算術演算した結果で展開する。
local max_path_length=$[bar_rest_length - bar_right_without_path_length]
# パスに展開される「%d」に最大文字数制限をつける。
# %d -> %(C,%${max_path_length}<...<%d%<<,)
# %(x,true-text,false-text):
# xが真のときはtrue-textになり偽のときはfalse-textになる。
# ここでは、「%N<...<%d%<<」の効果をこの範囲だけに限定させる
# ために用いているだけなので、xは必ず真になる条件を指定している。
# C: 現在の絶対パスが/以下にあると真。なので必ず真になる。
# %${max_path_length}<...<%d%<<:
# 「%d」が「${max_path_length}」カラムより長かったら、
# 長い分を削除して「...」にする。最終的に「...」も含めて
# 「${max_path_length}」カラムより長くなることはない。
bar_right=${prompt_bar_right:s/%d/%(C,%${max_path_length}<...<%d%<<,)/}
# 「${bar_rest_length}」文字分の「-」を作っている。
# どうせ後で切り詰めるので十分に長い文字列を作っているだけ。
# 文字数はざっくり。
local separator="${(l:${bar_rest_length}::-:)}"
# プロンプトバー全体を「${bar_rest_length}」カラム分にする。
# %${bar_rest_length}<<...%<<:
# 「...」を最大で「${bar_rest_length}」カラムにする。
bar_right="%${bar_rest_length}<<${separator}${bar_right}%<<"
# プロンプトバーと左プロンプトを設定
# "${bar_left}${bar_right}": プロンプトバー
# $'\n': 改行
# "${prompt_left}": 2行目左のプロンプト
PROMPT="${bar_left}${bar_right}"$'\n'"${prompt_left}"
# 右プロンプト
# %{%B%F{white}%K{green}}...%{%k%f%b%}:
# 「...」を太字で緑背景の白文字にする。
# %~: カレントディレクトリのフルパス(可能なら「~」で省略する)
RPROMPT="[%{%B%F{white}%K{magenta}%}%~%{%k%f%b%}]"
# バージョン管理システムの情報を取得する。
LANG=C vcs_info >&/dev/null
# バージョン管理システムの情報があったら右プロンプトに表示する。
if [ -n "$vcs_info_msg_0_" ]; then
RPROMPT="${vcs_info_msg_0_}-${RPROMPT}"
fi
}
## コマンド実行前に呼び出されるフック。
precmd_functions=($precmd_functions update_prompt)
### 非端末プロセスなら終了
#[ $#PROMPT -eq 0 -o $#TERM -eq 0 ] && return
## PROMPT, RPROMPT
#local GREEN=$'%{\e[1;32m%}'
#local BLUE=$'%{\e[1;34m%}'
#local DEFAULT=$'%{\e[00;m%}'
#
#PROMPT=$BULE'[${USER}@${HOSTNAME}] %(!.#.$) '$DEFAULT
#RPROMPT=$GREEN'[%~]'$DEFAULT
### Autocomplete
autoload -U compinit
#compinit -u
compinit
## 補完方法毎にグループ化する。
### 補完方法の表示方法
### %B...%b: 「...」を太字にする。
### %d: 補完方法のラベル
zstyle ':completion:*' format '%B%d%b'
zstyle ':completion:*' group-name ''
## 補完侯補をメニューから選択する。
### select=2: 補完候補を一覧から選択する。
### ただし、補完候補が2つ以上なければすぐに補完する。
zstyle ':completion:*:default' menu select=2
## 補完候補に色を付ける。
### "": 空文字列はデフォルト値を使うという意味。
zstyle ':completion:*:default' list-colors ""
## 補完候補がなければより曖昧に候補を探す。
### m:{a-z}={A-Z}: 小文字を大文字に変えたものでも補完する。
### r:|[._-]=*: 「.」「_」「-」の前にワイルドカード「*」があるものとして補完する。
zstyle ':completion:*' matcher-list 'm:{a-z}={A-Z} r:|[._-]=*'
## 補完方法の設定。指定した順番に実行する。
### _oldlist 前回の補完結果を再利用する。
### _complete: 補完する。
### _match: globを展開しないで候補の一覧から補完する。
### _history: ヒストリのコマンドも補完候補とする。
### _ignored: 補完候補にださないと指定したものも補完候補とする。
### _approximate: 似ている補完候補も補完候補とする。
### _prefix: カーソル以降を無視してカーソル位置までで補完する。
zstyle ':completion:*' completer \
_oldlist _complete _match _history _ignored _approximate _prefix
## 補完候補をキャッシュする。
zstyle ':completion:*' use-cache yes
## 詳細な情報を使う。
zstyle ':completion:*' verbose yes
## sudo時にはsudo用のパスも使う。
#zstyle ':completion:sudo:*' environ PATH="$SUDO_PATH:$PATH"
## カーソル位置で補完する。
setopt complete_in_word
## globを展開しないで候補の一覧から補完する。
setopt glob_complete
## 補完時にヒストリを自動的に展開する。
setopt hist_expand
# ファイル名の展開で、辞書順ではなく数値的にソートされるようになる
setopt numeric_glob_sort
# ビープ音を鳴らさないようにする
setopt no_beep
# コマンドラインの引数で --prefix=/usr などの = 以降でも補完できる
setopt magic_equal_subst
# ファイル名の展開でディレクトリにマッチした場合末尾に / を付加する
setopt mark_dirs
# ファイル名で #, ~, ^ の 3 文字を正規表現として扱う
setopt extended_glob
# 内部コマンド jobs の出力をデフォルトで jobs -l にする
setopt long_list_jobs
# ログイン・ログアウト
## 全てのユーザのログイン・ログアウトを監視する。
watch="all"
## ログイン時にはすぐに表示する。
log
# Ctrl+D では終了しないようになる(exit, logout などを使う)
setopt ignore_eof
# 単語
## 「/」も単語区切りとみなす。
WORDCHARS=${WORDCHARS:s,/,,}
# alias
## ページャーを使いやすくする。
### grep -r def *.rb L -> grep -r def *.rb |& lv
alias -g L="|& $PAGER"
## grepを使いやすくする。
alias -g G='| grep'
## 後はおまけ。
alias -g H='| head'
alias -g T='| tail'
alias -g S='| sed'
## 完全に削除。
alias rr="command rm -rf"
## ファイル操作を確認する。
alias rm="rm -i"
alias cp="cp -i"
alias mv="mv -i"
## pushd/popdのショートカット。
alias pd="pushd"
alias po="popd"
alias la="ls -lhAF --color=auto"
alias ps="ps -fU$(whoami) --forest"
# ウィンドウタイトル
## 実行中のコマンドとユーザ名とホスト名とカレントディレクトリを表示。
update_title() {
local command_line=
typeset -a command_line
command_line=${(z)2}
local command=
if [ ${(t)command_line} = "array-local" ]; then
command="$command_line[1]"
else
command="$2"
fi
print -n -P "\e]2;"
echo -n "(${command})"
print -n -P " %n@%m:%~\a"
}
## X環境上でだけウィンドウタイトルを変える。
if [ -n "$DISPLAY" ]; then
preexec_functions=($preexec_functions update_title)
fi
| true |
bd907e9d6fc257c6abf89ab6eec9e384ed0079d6 | Shell | Epiconcept-Paris/whiteboard | /deploy/monitorLogs.sh | UTF-8 | 830 | 2.890625 | 3 | [
"BSD-3-Clause"
] | permissive | #!/bin/bash
pids=""
commandlog="logcommand.out"
ssh sparkrunner "/space/hadoop/hadoop_home/bin/yarn application -list | grep RUNNING | grep "$1" | awk '{print \$1}' | xargs /space/hadoop/hadoop_home/bin/yarn applicationattempt -list | grep appattempt | grep RUNNING | awk '{print \$1}' | xargs /space/hadoop/hadoop_home/bin/yarn container -list | grep container_ | awk '{print \$1 \" \" \$10}' | awk '{split(\$1, a, \"_\");split(\$2, b, \":\");split(b[1], c, \"-\");print \"ssh yarn@\" c[1] \".oxa tail -n 10 -f /space/hadoop/hadoop_run/logs/yarn/application_\" a[3] \"_\" a[4] \"/\" \$1 }'"> $commandlog
filename="$commandlog"
while read -r line
do
$line/stderr &
pids="$pids $!"
$line/stdout &
pids="$pids $!"
done < "$filename"
rm $commandlog
echo "PIDS: oo$pids oo"
trap "kill -9 $pids" SIGINT SIGTERM
wait $pids
| true |
e95443873a2673a7a985b06b6d4ad36aa6105857 | Shell | surendra-chouhan/Shell-Scripting-Programs | /Arrays/digitsRepeated.sh | UTF-8 | 568 | 3.734375 | 4 | [] | no_license | #!/bin/bash
for ((j=0; j<=100; j++))
do
range[j]=$j
done
echo "The range is : " ${range[@]}
counter=0
for ((i=${range[0]}; i<${#range[@]}; i++))
do
num=$i
if [ $num -lt 10 ]
then
continue
else
s=0
rev=""
temp=$num
while [ $num -gt 0 ]
do
s=$(( $num%10 ))
num=$(( $num/10 ))
rev=$( echo ${rev}${s} )
done
if [ $temp -eq $rev ]
then
array[counter++]=$temp
fi
fi
done
echo "The digits that are repeated twice are : "${array[@]}
| true |
5a076a1e30770fbe64cbb018e40935fa281ef766 | Shell | satishkori/weave-demos | /quartet/scripts/setup-cluster-dev.sh | UTF-8 | 2,856 | 3.453125 | 3 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/bin/bash -xe
## DEVELOPMENT VERSION OF `setup-cluster.sh`, YOU SHOULD PROBABLY
## USE `setup-cluster.sh`, UNLESS YOU KNOW WHAT YOU ARE DOING.
source $(git rev-parse --show-toplevel)/quartet/scripts/defaults.sh
WEAVE="${WEAVE}-dev"
head_node="${MACHINE_NAME_PREFIX}-1"
## Initial token to keep Machine happy
temp_swarm_dicovery_token="token://$(${DOCKER_SWARM_CREATE})"
swarm_flags="--swarm --swarm-discovery=${temp_swarm_dicovery_token}"
## Actual token to be used with proxied Docker
swarm_dicovery_token="token://$(${DOCKER_SWARM_CREATE})"
find_tls_args="cat /proc/\$(pgrep /usr/local/bin/docker)/cmdline | tr '\0' '\n' | grep ^--tls | tr '\n' ' '"
for i in '1' '2' '3'; do
if [ ${i} = '1' ]; then
## The first machine shall be the Swarm master
$DOCKER_MACHINE_CREATE \
${swarm_flags} \
--swarm-master \
"${MACHINE_NAME_PREFIX}-${i}"
else
## The rest of machines are Swarm slaves
$DOCKER_MACHINE_CREATE \
${swarm_flags} \
"${MACHINE_NAME_PREFIX}-${i}"
fi
## This environment variable is respected by Weave,
## hence it needs to be exported
export DOCKER_CLIENT_ARGS="$($DOCKER_MACHINE config)"
for c in weave weavedns weaveexec; do
docker ${DOCKER_CLIENT_ARGS} load -i ~/Code/weave/${c}.tar
done
tlsargs=$($DOCKER_MACHINE ssh "${MACHINE_NAME_PREFIX}-${i}" "${find_tls_args}")
## We are going to use IPAM, hence we launch it with
## the following arguments
$WEAVE launch -iprange 10.2.3.0/24 -initpeercount 3
## WeaveDNS also needs to be launched
$WEAVE launch-dns "10.9.1.${i}/24" -debug
## And now the proxy
$WEAVE launch-proxy --with-dns --with-ipam ${tlsargs}
## Let's connect-up the Weave cluster by telling
## each of the node about the head node
if [ ${i} -gt '1' ]; then
$WEAVE connect $($DOCKER_MACHINE ip ${head_node})
fi
## Default Weave proxy port is 12375, we shall point
## Swarm agents at it next
weave_proxy_endpoint="$($DOCKER_MACHINE ip):12375"
## Now we need restart Swarm agents like this
$DOCKER ${DOCKER_CLIENT_ARGS} rm -f swarm-agent
$DOCKER ${DOCKER_CLIENT_ARGS} run -d --name=swarm-agent \
swarm join \
--addr ${weave_proxy_endpoint} ${swarm_dicovery_token}
done
## Next we will also restart the Swarm master with the new token
export DOCKER_CLIENT_ARGS=$($DOCKER_MACHINE config ${head_node})
swarm_master_args_fmt='-d --name={{.Name}} -p 3376:3376 {{range .HostConfig.Binds}}-v {{.}} {{end}}swarm{{range .Args}} {{.}}{{end}}'
swarm_master_args=$($DOCKER ${DOCKER_CLIENT_ARGS} inspect \
--format="${swarm_master_args_fmt}" \
swarm-agent-master \
| sed "s|${temp_swarm_dicovery_token}|${swarm_dicovery_token}|")
$DOCKER ${DOCKER_CLIENT_ARGS} rm -f swarm-agent-master
$DOCKER ${DOCKER_CLIENT_ARGS} run ${swarm_master_args}
## And make sure Weave cluster setup is comple
$WEAVE status
| true |
9407b65a057060a0bde2aea79fdf3e18e3020215 | Shell | zsxwz/zstermux | /cloud-torrent.sh | UTF-8 | 922 | 2.671875 | 3 | [] | no_license | #!/data/data/com.termux/files/usr/bin/bash
if ! [ -x "$(command -v screen)" ] ; then
apt install screen -y
fi
if [ -x "$(command -v cloud-torrert)" ] ; then
cd ~
screen -dmS cloud-torrert cloud-torrent -p 1024
echo "cloud-torrent已在后台运行,请用浏览器打开访问,localhost:1024"
am start -a android.intent.action.VIEW -d http://localhost:1024
cd ~
sh zs.sh
else
cd ~
wget https://github.zsxwz.com/https://github.com/boypt/simple-torrent/releases/download/1.2.11/cloud-torrent_linux_arm64.gz
gzip -d cloud-torrent_linux_arm64.gz
chmod +x cloud-torrent_linux_arm64
mv cloud-torrent_linux_arm64 /data/data/com.termux/files/usr/bin/cloud-torrent
screen -dmS cloud-torrert cloud-torrent -p 1024
echo "cloud-torrent已在后台运行,请用浏览器打开访问,localhost:1024"
echo ""
am start -a android.intent.action.VIEW -d http://localhost:1024
cd ~
sh zs.sh
fi
exit
| true |
5780e2d20daacd6539d4b128ff4a1e7b33274b08 | Shell | boklm/tor-browser-build | /projects/compiler-rt/build | UTF-8 | 1,807 | 2.703125 | 3 | [] | no_license | #!/bin/bash
[% c("var/set_default_env") -%]
distdir=/var/tmp/dist/[% project %]
mkdir -p /var/tmp/dist
tar -C /var/tmp/dist -xf [% c('input_files_by_name/cmake') %]
tar -C /var/tmp/dist -xf [% c('input_files_by_name/ninja') %]
export PATH="/var/tmp/dist/ninja:/var/tmp/dist/cmake/bin:$PATH"
[% pc(c('var/compiler'), 'var/setup', { compiler_tarfile => c('input_files_by_name/' _ c('var/compiler')) }) %]
tar -C /var/tmp/dist -xf [% c('input_files_by_name/binutils') %]
export PATH="/var/tmp/dist/binutils/bin:$PATH"
mkdir -p /var/tmp/build
cd /var/tmp/build
tar -xf $rootdir/[% c('input_files_by_name/clang-source') %]
cd clang-source
export LLVM_HOME=$(pwd)
mkdir build
cd build
installdir=/var/tmp/build/install
mkdir -p $installdir
cmake ../compiler-rt/ -GNinja \
-DCMAKE_INSTALL_PREFIX=$installdir \
-DCMAKE_BUILD_TYPE=Release \
-DCMAKE_SYSTEM_NAME=Android \
-DCMAKE_ANDROID_ARCH_ABI="[% c('var/abi') %]" \
-DCMAKE_ANDROID_NDK="$ANDROID_NDK_HOME" \
-DCMAKE_C_FLAGS="-fuse-ld=lld --rtlib=compiler-rt $defines" \
-DCMAKE_CXX_FLAGS="-fuse-ld=lld --rtlib=compiler-rt $defines" \
-DCMAKE_EXE_LINKER_FLAGS="-L$ANDROID_NDK_HOME/toolchains/llvm/prebuilt/linux-x86_64/[% c('var/libdir') %]" \
-DCOMPILER_RT_BUILD_BUILTINS=ON \
-DCOMPILER_RT_BUILD_LIBFUZZER=OFF \
-DCOMPILER_RT_BUILD_MEMPROF=OFF \
-DCOMPILER_RT_BUILD_ORC=OFF \
-DCOMPILER_RT_BUILD_PROFILE=OFF \
-DCOMPILER_RT_BUILD_SANITIZERS=OFF \
-DCOMPILER_RT_BUILD_XRAY=OFF
ninja -j[% c("num_procs") %] -v install
mkdir -p $distdir/lib/clang/[% c("var/llvm_version") %]/lib/linux/
mv $installdir/lib/linux/libclang_rt.builtins-*-android.a $distdir/lib/clang/[% c("var/llvm_version") %]/lib/linux/
cd /var/tmp/dist
[% c('tar', {
tar_src => [ project ],
tar_args => '-caf ' _ dest_dir _ '/' _ c('filename'),
}) %]
| true |
7baff9c3c610d73fd5a2e73199aa914f86eebd0d | Shell | Grigory-Rylov/andoid_method_trace_recorder | /scripts/integration_test.sh | UTF-8 | 1,212 | 3.546875 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
# Integration tests of Android Methods Tracer Recorder
set -x
# TODO(geaden): Assign version dynamically
COMMAND="java -jar app/build/libs/android-profiler-2.0.0.jar -m -a com.github.grishberg.testapp.MainActivity -p com.github.grishberg.testapp -t 15"
echo '1. Compile and build latest version of AMTR'
./gradlew app:fatJar
echo '2. Compile and build latest version of testapp'
pushd testapp
./gradlew :app:installDebug
popd
echo '3. Clean test trace files'
rm /tmp/*.trace
echo '4. Start trace for first device'
FIRST_TRACE=/tmp/first.trace
eval "$COMMAND -o $FIRST_TRACE"
test -f "$FIRST_TRACE" || {
echo 'Failed to create trace file'
exit 1
}
echo '5. Start trace for device serial'
adb devices | while read line
do
if [ ! "$line" = "" ] && [ `echo $line | awk '{print $2}'` = "device" ]
then
device=`echo $line | awk '{print $1}'`
SERIAL_TRACE="/tmp/$device.trace"
eval "$COMMAND -serial $device -o $SERIAL_TRACE"
test -f "$SERIAL_TRACE" || {
echo 'Failed to create trace file'
# TODO(geaden): This doesn't stop the execution. Probably must be extracted into a function
exit 1
}
fi
done
echo "PASSED."
| true |
1bc6b439ab14134eb3be5cdc36f1fd0bc6fda3c4 | Shell | oxtopus/nupic-build-matrix | /build-all.sh | UTF-8 | 400 | 3 | 3 | [] | no_license | #!/bin/bash
set -o errexit
function build {
git clone --depth=1 $1 $2/nupic
pushd $2
docker build --no-cache -t nupic:`(cd nupic && git rev-parse HEAD)`-$3 .
rm -rf nupic
popd
}
build $1 ubuntu/14.04/clang ubuntu-14.04-clang
build $1 ubuntu/14.04/gcc ubuntu-14.04-gcc
build $1 debian/jessie/clang debian-jessie-clang
build $1 centos/7/gcc centos-7-gcc
build $1 centos/6/gcc centos-6-gcc
| true |
27c9ec6c7f8e2e944738747445162973c88513d3 | Shell | nkjzm/fashion-parsing | /examples/tangseng/convert_h5_to_png.sh | UTF-8 | 940 | 3.203125 | 3 | [
"LicenseRef-scancode-generic-cla",
"BSD-2-Clause",
"LicenseRef-scancode-public-domain",
"BSD-3-Clause"
] | permissive | # Convert data in h5 to appropiate format for CRF
DATASETS=(
"tmm_dataset_sharing"
"fashionista-v1.0"
"fashionista-v0.2"
)
MODELS=(
"attrlog"
"sege-8s"
)
ITERATION=80000
MODEL_DIR="models/"
for dataset in ${DATASETS[@]}; do
if [ ${dataset} = "tmm_dataset_sharing" ]
then
python examples/fashionpose/export.py \
--inputs data/tmm_dataset_sharing/TMM_test.h5 \
--output data/tmm_dataset_sharing/testimages
else
python examples/fashionpose/export.py \
--inputs data/${dataset}/test-1.h5 \
--output data/${dataset}/testimages
fi
for model in ${MODELS[@]}; do
if [ ${dataset} = "tmm_dataset_sharing" ]
then
MODEL_SUFFIX="tmm"
else
MODEL_SUFFIX=${dataset}
fi
python examples/fashionpose/export.py \
--inputs ${MODEL_DIR}${model}-${MODEL_SUFFIX}/test-iter${ITERATION}.h5 \
--output ${MODEL_DIR}${model}-${MODEL_SUFFIX}/mask
done
done | true |
c364bf3a170ed7aed79c5edff08e7b9f6df7d0cc | Shell | yongjinwu/C_Projects | /calculator.sh | UTF-8 | 468 | 3.875 | 4 | [] | no_license | #!/bin/sh
num="Usage -./calculator.sh value1 operator value2\nWhere,\nvalue1: numeric value\nvalue2: numeric value\noperator: one of +,-,/,x"
if [ $# -eq 0 ]; then
echo -e $num
exit
elif [ "$2" == "/" ] && [ "$3" -eq 0 ]; then
echo "Division-by-zero Error!"
exit
elif [ "$2" == "+" ]; then
num=$(( $1 + $3 ))
elif [ "$2" == "-" ]; then
num=$(( $1 - $3 ))
elif [ "$2" == "/" ]; then
num=$(( $1 / $3 ))
elif [ "$2" == "x" ]; then
num=$(( $1 * $3 ))
fi
echo -e $num | true |
1fb867bd76c0c2f3765d05c43bdcfca5566bc95f | Shell | richyen/toolbox | /pg/fdw/mysql_fdw/entrypoint.sh | UTF-8 | 1,090 | 3 | 3 | [] | no_license | #!/bin/bash
if [[ ${HOSTNAME} == 'pg' ]]; then
yum -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm
yum -y install centos-release-scl-rh
yum -y install postgresql13-devel centos-release-scl-rh llvm-devel vim git mariadb-devel
yum -y groupinstall development
yum -y install postgresql${PGMAJOR}-contrib
su - postgres -c "pg_ctl -D /var/lib/pgsql/${PGMAJOR}/data start"
git clone https://github.com/enterprisedb/mysql_fdw.git
cd mysql_fdw && make USE_PGXS=1 && make USE_PGXS=1 install
psql -c "create extension mysql_fdw" postgres postgres
psql -c "CREATE SERVER mysql_server FOREIGN DATA WRAPPER mysql_fdw options (host 'mysql');" postgres postgres
psql -c "CREATE USER MAPPING FOR postgres SERVER mysql_server OPTIONS (username 'root', password 'example');" postgres postgres
psql -c "CREATE FOREIGN TABLE fdw_test (time_zone_id int, use_leap_seconds boolean) server mysql_server options (dbname 'mysql', table_name 'time_zone');" postgres postgres
psql -c "select * from fdw_test;"
fi
# Keep things running
tail -f /dev/null
| true |
c959b6d432eff41b5d662b815524d514944cf426 | Shell | bcsflilong/box | /updatenote | UTF-8 | 443 | 2.9375 | 3 | [] | no_license | #!/bin/sh
# 批处理提交本地文件到csdn code
get_char()
{
SAVEDSTTY=`stty -g`
stty -echo
stty cbreak
dd if=/dev/tty bs=1 count=1 2> /dev/null
stty -raw
stty echo
stty $SAVEDSTTY
}
GIT_HOME=/home/tone/公共/git/box
CIMMIT_INFO="`date` 更新文件"
echo $CIMMIT_INFO
cd $GIT_HOME
git status
git pull
git status
git add .
git commit -m "${CIMMIT_INFO}"
#git log
git push origin master
echo "Press any key to continue!"
char=`get_char`
| true |
324e8337e34792c8c3e90c92322f3097075dc026 | Shell | tiagoprn/devops | /shellscripts/databases/pg_dump.sh | UTF-8 | 418 | 3.171875 | 3 | [
"MIT"
] | permissive | #!/bin/bash
HOST="localhost"
PORT=5432
USERNAME="tmc"
PASSWORD="v3t3x2y2"
DATABASE="tmc_financeiro"
ROLE=$USERNAME
FORMAT="c"
FILE="${DATABASE}.${FORMAT}.backup"
UNIX_TIMESTAMP=$(date +%s)
printf "When prompted, type PASSWORD: ${PASSWORD}\n"
pg_dump --host $HOST --port $PORT --username $USERNAME --role $ROLE --format $FORMAT --verbose --file $FILE $DATABASE
tar cfjv $FILE.$UNIX_TIMESTAMP.tar.bz2 $FILE
rm $FILE
| true |
d4958b12e962e729cb11a0a4cde5f41da81b0272 | Shell | hadi1315/linux_shell_scripting | /tutorial_scripts/video_tutorial_05.sh | UTF-8 | 379 | 2.609375 | 3 | [] | no_license | #!/usr/bin/env bash
cat > test_file # creates file with text
# test_file content is entered here...
cat < test_file # prints the content
cat test_file # prints the content
cat > test_file2 # creates file with text
# test_file2 content is entered here..
cat test_file test_file2 > merged_test_files # merge two files
cat < merged_test_files # shows both test files' content
| true |
1515b48b8ea78a349b2e9c5577a76e12c952f3bb | Shell | goFrendiAsgard/stiki-ir-2021-demo-crawler | /coba.sh | UTF-8 | 360 | 2.515625 | 3 | [] | no_license | echo "[" > hasil.json
for KEYWORD in "sepeda" "fixie" "masker"
do
URL="https://api.bukalapak.com/searches/suggestions?word=${KEYWORD}&access_token=pXZBFJn14JRN47ouN4ZIF7vZWfxRDFPi8Vy66_4REBHZLg"
curl --location --request GET "${URL}" \
--header 'Cookie: __cfduid=d812448bce8383a32a3ced455a794b0cd1615366991' >> hasil.json
echo "," >> hasil.json
sleep 1
done
echo "]" >> hasil.json | true |
6534ba8854ec682f36a2bd123318d8fed884f633 | Shell | MarcCoru/crop-type-mapping | /download.sh | UTF-8 | 1,144 | 3.265625 | 3 | [
"MIT"
] | permissive | #!/bin/bash
function downloadBavarianCrops {
mkdir data
cd data
wget https://zenodo.org/record/5707142/files/BavarianCrops.zip
unzip -o BavarianCrops.zip
rm BavarianCrops.zip
cd ..
}
function downloadmodels {
mkdir models
cd models
wget https://zenodo.org/record/5707142/files/models.zip
unzip -o models.zip
rm models.zip
cd ..
}
function downloadnotebookdata {
mkdir data
cd data
wget https://zenodo.org/record/5707142/files/notebookdata.zip
unzip -o notebookdata.zip
rm notebookdata.zip
cd ..
}
function downloaddataduplo {
mkdir models
cd models
wget https://zenodo.org/record/5707142/files/duplo.zip
unzip -o duplo.zip
rm duplo.zip
cd ..
}
if [ "$1" == "dataset" ]; then
downloadBavarianCrops
elif [ "$1" == "models" ]; then
downloadmodels
elif [ "$1" == "notebookdata" ]; then
downloadnotebookdata
elif [ "$1" == "duplo" ]; then
downloaddataduplo
elif [ "$1" == "all" ]; then
downloadBavarianCrops
downloadmodels
downloadnotebookdata
downloaddataduplo
else
echo "please provide 'dataset', 'models','notebookdata', 'duplo', or 'all' as argument"
fi
| true |
36459436d0c376c7b27186d64801abe7d4fef497 | Shell | windystrife/UnrealEngine_NVIDIAGameWorks | /Engine/Build/BatchFiles/Mac/AddEULAToDMG.sh | UTF-8 | 391 | 2.765625 | 3 | [
"MIT",
"LicenseRef-scancode-proprietary-license"
] | permissive | #/bin/sh
DMG_PATH=$1
DMG_TEMP_PATH="${DMG_PATH}.temp.dmg"
EULA_PATH=$2
# Unflatten the image
hdiutil unflatten "$DMG_PATH"
# Append the rez file containing the EULA
xcrun Rez -isysroot /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.9.sdk Carbon.r "$EULA_PATH" -a -o "$DMG_PATH"
# Flatten the image again
hdiutil flatten "$DMG_PATH"
exit $?
| true |
e74a315ab700c6da2c1b7c530e71719d8ceae23d | Shell | dimduj/Drupal-Jmeter | /DrupalStress.sh | UTF-8 | 430 | 2.59375 | 3 | [] | no_license | #!/bin/bash
# The host under test.
HOST=site1.dpi247.dev
# A Drupal username.
USER=toto
# USER's password
PASS='toto'
# A node id to edit.
EDIT_ID=42
# Ramp up by factors of sqrt(2).
#for thread_count in 2 3 4 6 8 11 16 23 32 45 64 91 128 181 256 362 512
for thread_count in 1
do
jmeter.sh -n -t DrupalStress.jmx -Jhost=$HOST -Juser=$USER\
-Jpassword=$PASS -Jthreads=$thread_count -Jedit_id=$EDIT_ID
done | true |
165d148b3f834ec20c9633492d037e7c47b84a11 | Shell | ilventu/aur-mirror | /perl-find-lib/PKGBUILD | UTF-8 | 744 | 2.53125 | 3 | [] | no_license | # Maintainer: xduugu
pkgname=perl-find-lib
pkgver=1.01
pkgrel=1
pkgdesc="Helper to smartly find libs to use in the filesystem tree"
arch=('any')
url="http://search.cpan.org/dist/Find-Lib/"
license=('GPL' 'PerlArtistic')
depends=('perl')
options=('!emptydirs')
source=(http://search.cpan.org/CPAN/authors/id/Y/YA/YANNK/Find-Lib-$pkgver.tar.gz)
md5sums=('8986f9679a88fb265a5e5cf4720f22a1')
sha256sums=('bf2088885b2c4cc660d5cf1c2d5e8330b119b1ee8ead64b018ed16f3ad62e5be')
build() {
cd "$srcdir/Find-Lib-$pkgver" &&
PERL_MM_USE_DEFAULT=1 perl Makefile.PL INSTALLDIRS=vendor &&
make &&
make install DESTDIR="$pkgdir" || return 1
find "$pkgdir" -name '.packlist' -delete
find "$pkgdir" -name '*.pod' -delete
}
# vim:set ts=2 sw=2 et:
| true |
27ebf537e2eeecabe437fd0f206cedbe02402014 | Shell | hhadian/kaldi | /egs/mgb2_arabic/s5/local/mgb_train_lms_extra_pocolm.sh | UTF-8 | 4,819 | 3.703125 | 4 | [
"Apache-2.0",
"LicenseRef-scancode-public-domain"
] | permissive | #!/usr/bin/env bash
# Copyright 2016 Johns Hopkins University (author: Daniel Povey)
# 2017 Vimal Manohar
# Apache 2.0
#
# It is based on the example scripts distributed with PocoLM
# It will first check if pocolm is installed and if not will process with installation
set -e
stage=0
set -o pipefail
set -u
stage=0
dir=data/local/pocolm
cmd=run.pl
echo "$0 $@" # Print the command line for logging
. utils/parse_options.sh || exit 1;
lm_text=$1
mer=$2
if [ $# -ne 2 ]; then
echo "Usage: $0 <lm-text> <mer>"
exit 1
fi
lm_dir=${dir}/data
mkdir -p $dir
. ./path.sh || exit 1; # for KALDI_ROOT
export PATH=$KALDI_ROOT/tools/pocolm/scripts:$PATH
( # First make sure the pocolm toolkit is installed.
cd $KALDI_ROOT/tools || exit 1;
if [ -d pocolm ]; then
echo Not installing the pocolm toolkit since it is already there.
else
echo "$0: Please install the PocoLM toolkit with: "
echo " cd ../../../tools; extras/install_pocolm.sh; cd -"
exit 1;
fi
) || exit 1;
num_dev_sentences=30000
RANDOM=0
if [ $stage -le 0 ]; then
mkdir -p ${dir}/data
mkdir -p ${dir}/data/text
echo "$0: Getting the Data sources"
rm ${dir}/data/text/* 2>/dev/null || true
# Full acoustic transcripts
cat data/train_mer$mer/text | cut -d ' ' -f 2- | \
shuf > ${dir}/train_mer${mer}_text
head -n $num_dev_sentences < ${dir}/train_mer${mer}_text > \
${dir}/data/text/dev.txt
tail -n +$[num_dev_sentences+1] < ${dir}/train_mer${mer}_text | \
gzip -c > \
${dir}/data/text/train_mer${mer}.txt.gz
# Get text from the extra LM corpus
cat $lm_text | gzip -c > ${dir}/data/text/mgb_arabic.txt.gz
cp data/dev_non_overlap/text ${dir}/data/mgb2_dev.txt
fi
if [ $stage -le 1 ]; then
mkdir -p $dir/data/work
if [ ! -f $dir/data/work/word_counts/.done ]; then
get_word_counts.py $dir/data/text $dir/data/work/word_counts
touch $dir/data/work/word_counts/.done
fi
fi
lexicon=data/local/dict/lexicon.txt
[ ! -f $lexicon ] && echo "$0: No such file $lexicon" && exit 1;
if [ $stage -le 2 ]; then
cat $lexicon | awk '{print $1}' > $dir/data/work/wordlist
wordlist_to_vocab.py --unk-symbol="<UNK>" $dir/data/work/wordlist > \
$dir/data/work/vocab_wordlist.txt
touch $dir/data/work/.vocab_wordlist.txt.done
fi
order=4
wordlist=$dir/data/work/wordlist
min_counts="default=5 train_mer${mer}=2"
lm_name="`basename ${wordlist}`_${order}"
if [ -n "${min_counts}" ]; then
lm_name+="_`echo ${min_counts} | tr -s "[:blank:]" "_" | tr "," "." | tr "=" "-"`"
fi
unpruned_lm_dir=${lm_dir}/${lm_name}.pocolm
export PATH=$KALDI_ROOT/tools/pocolm/scripts:$PATH
if [ $stage -le 3 ]; then
echo "$0: training the unpruned LM"
$cmd ${unpruned_lm_dir}/log/train.log \
train_lm.py --wordlist=$wordlist --num-splits=10 --warm-start-ratio=20 \
--limit-unk-history=true \
--fold-dev-into=train_mer$mer \
--min-counts="${min_counts}" \
${dir}/data/text ${order} ${lm_dir}/work ${unpruned_lm_dir}
for x in mgb2_dev; do
$cmd ${unpruned_lm_dir}/log/compute_data_prob_${x}.log \
get_data_prob.py ${dir}/data/${x}.txt ${unpruned_lm_dir}
cat ${unpruned_lm_dir}/log/compute_data_prob_${x}.log | grep -F '[perplexity'
done
fi
if [ $stage -le 4 ]; then
echo "$0: pruning the LM (to larger size)"
# Using 20 million n-grams for a big LM for rescoring purposes.
size=20000000
$cmd ${dir}/data/lm_${order}_prune_big/log/prune_lm.log \
prune_lm_dir.py --target-num-ngrams=$size --initial-threshold=0.02 \
${unpruned_lm_dir} ${dir}/data/lm_${order}_prune_big
for x in mgb2_dev; do
$cmd ${dir}/data/lm_${order}_prune_big/log/compute_data_prob_${x}.log \
get_data_prob.py ${dir}/data/${x}.txt ${dir}/data/lm_${order}_prune_big
cat ${dir}/data/lm_${order}_prune_big/log/compute_data_prob_${x}.log | grep -F '[perplexity'
done
mkdir -p ${dir}/data/arpa
format_arpa_lm.py ${dir}/data/lm_${order}_prune_big | gzip -c > ${dir}/data/arpa/${order}gram_big.arpa.gz
fi
if [ $stage -le 5 ]; then
echo "$0: pruning the LM (to smaller size)"
# Using 2 million n-grams for a smaller LM for graph building. Prune from the
# bigger-pruned LM, it'll be faster.
size=2000000
$cmd ${dir}/data/lm_${order}_prune_small/log/prune_lm.log \
prune_lm_dir.py --target-num-ngrams=$size ${dir}/data/lm_${order}_prune_big \
${dir}/data/lm_${order}_prune_small
for x in mgb2_dev; do
$cmd ${dir}/data/lm_${order}_prune_small/log/compute_data_prob_${x}.log \
get_data_prob.py ${dir}/data/${x}.txt ${dir}/data/lm_${order}_prune_small
cat ${dir}/data/lm_${order}_prune_small/log/compute_data_prob_${x}.log | grep -F '[perplexity'
done
format_arpa_lm.py ${dir}/data/lm_${order}_prune_small | gzip -c > ${dir}/data/arpa/${order}gram_small.arpa.gz
fi
| true |
116d311fe4b262def069e866d8a051c73f1499bc | Shell | Hank0438/NCTU_SA | /hw2-1 | UTF-8 | 387 | 2.96875 | 3 | [] | no_license | #!/bin/sh
# top 5
ls -l -AR | grep ^- | awk '{print $5 "\t" $9}' | sort -n -r | head -n 5 | awk '{cnt++; print cnt ": " $1 " " $2}'
# total directory except ./
ls -l -AR | grep ^d | wc -l | awk '{print "Dir num: " $1}'
# total file num + 1
ls -l -AR | grep ^- | wc -l | awk '{print "File num: " $1}'
# total file size
ls -l -AR | grep ^- | awk '{ total+=$5 }END{ print "Total: " total}'
| true |
e122c2df60cea6f60e31325371f25ee2a223c63b | Shell | GithubcomRoman/OpenVZ_ct_monitoring | /vzubc_custom.sh | UTF-8 | 6,589 | 3.546875 | 4 | [] | no_license | #!/bin/sh
# vzubc - a tool for displaying OpenVZ user beancounters.
# Copyright (C) 2011-2012, Parallels, Inc. All rights reserved.
# Licensed under GNU GPL v2 or later version.
umask 0077
CTIDS=""
FILE=""
WATCH=""
ARGV=$*
# For vzlist to work, make sure /usr/sbin is in $PATH
if ! echo ":${PATH}:" | fgrep -q ':/usr/sbin:'; then
PATH="/usr/sbin:$PATH"
fi
while test $# -gt 0; do
case $1 in
*)
# Try to convert CT name to ID
ID=$(vzlist -H -o ctid "$1" 2>/dev/null)
if test $? -eq 0; then
CTIDS=$(echo $CTIDS $ID)
else
CTIDS=$(echo $CTIDS $1)
fi
;;
esac
shift
done
chk_zeroarg() {
if test -z "$2"; then
echo "Error: option $1 requires an argument" 1>&2
usage 1
fi
}
if test -n "$thr" -a -z "$color" -a -z "$quiet"; then
echo "Error: -qh and -qm only make sense with --quiet or --color"
usage 1
fi
if test -z "$FILE"; then
# No file name given, substitute sane default
FILE=/proc/bc/resources
test -f $FILE || FILE=/proc/user_beancounters
fi
# Test that input file is readable
if test "$FILE" != "-"; then
cat < "$FILE" > /dev/null || exit 1
fi
# Relative/incremental mode preparations and sanity checks
if test -n "$relative" -o -n "$incremental"; then
# Create dir if needed
if ! test -d $STOREDIR; then
mkdir $STOREDIR || exit 1
fi
# Check we can write to it
touch $STOREDIR/ubc.test || exit 1
rm -f $STOREDIR/ubc.test || exit 1
fi
# Re-exec ourselves under watch
test -z "$watch" || exec watch $WATCH_ARGS -- $0 $ARGV -W
cat $FILE | LANG=C awk -v CTIDS=" ${CTIDS} " -v quiet="$quiet" \
-v qheld="$Q_HELD" -v qmaxheld="$Q_MAXHELD" \
-v rel="$relative" -v storedir="$STOREDIR" \
-v inc="$incremental" $AWK_COLORS \
'
#-----formula
function hr(res, v) {
if ((v == 9223372036854775807) || (v == 2147483647) || (v == 0))
return "- ";
i=1
if ((res ~ /pages$/) && (v != 0)) {
v = v*4; i++
}
while (v >= 1024) {
v=v/1024
i++
}
fmt="%d%c"
if (v < 100)
fmt="%.3g%c"
return sprintf(fmt, v, substr(" KMGTPEZY", i, 1))
}
function dp(p, d) {
if ((d == 0) || (d == 9223372036854775807) || (d == 2147483647))
return "- "
r = sprintf("%.1f", p / d * 100);
fmt="%d"
if (r < 10)
fmt="%.1g"
r = sprintf(fmt, r)
if (r == 0)
return "- "
return r "%"
}
function important(id, held, maxheld, barrier, limit, failcnt) {
if (failcnt > 0)
return 2;
if (barrier == 0)
barrier = limit;
if ((barrier == 9223372036854775807) || (barrier == 2147483647))
return 0;
if (held > barrier)
return 2;
if (held > barrier * qheld)
return 1;
if (maxheld > barrier * qmaxheld)
return 1;
return 0;
}
BEGIN {
if (qheld > 1)
qheld /= 100
if (qmaxheld > 1)
qmaxheld /= 100
if (qheld > qmaxheld)
qheld=qmaxheld
bcid=-1
}
/^Version: / {
if ($2 != "2.5") {
print "Error: unknown version:",
$2 > "/dev/stderr"
exit 1
}
next
}
/^[[:space:]]*uid / {
next
}
/^[[:space:]]*dummy/ {
id=""
next
}
/^[[:space:]]*[0-9]+:/ {
header=""
bcid=int($1)
if ((CTIDS !~ /^[ ]*$/) && (CTIDS !~ " " bcid " ")) {
skip=1
next
}
skip=0
#prepare_header()
id=$2
held=$3
maxheld=$4
barrier=$5
limit=$6
failcnt=$7
}
/^[[:space:]]*[a-z]+/ {
id=$1
held=$2
maxheld=$3
barrier=$4
limit=$5
failcnt=$6
}
((id!="") && (!skip)) {
if ((bcid < 0) && (rel || inc)) {
print "Error: can not use relative/incremental" \
" modes: BCID is unknown" > "/dev/stderr"
exit 1
}
newfc=failcnt
store=storedir "/ubc." bcid "." id
if ( (rel) && (failcnt > 0) ) {
f_file=store ".failcnt"
getline oldfc < f_file
if (oldfc > 0)
failcnt=failcnt-oldfc
if (failcnt < 0)
failcnt=newfc
}
save_held=0
dh=0
if (inc) {
d_held=" "
h_file=store ".held"
save_held=1
getline o_held < h_file
if (o_held >= 0) {
dh=held - o_held
sig="+"
if (dh < 0) {
dh=-dh; sig="-"
}
if (dh != 0)
d_held = sprintf("%7s", sig hr(id, dh))
else
save_held=0
}
}
imp=important(id, held, maxheld, barrier, limit, failcnt)
if ((quiet) && (!imp) && (dh==0)) {
id=""
next
}
if (header != "") {
if (printed)
print ""
print header
printed=1
header=""
}
if (imp == 2)
printf c_e;
else if (imp == 1)
printf c_w;
else
printf c_n;
# printf "%13s|%5s%s %4s %4s|%5s %4s %4s|%5s|%5s| %5s\n" c_n,
printf "%13s | %5s%s %4s %4s | %5s %4s %4s | %5s | %5s | %5s\n" c_n,
id,
hr(id, held), d_held, dp(held, barrier), dp(held, limit),
hr(id, maxheld), dp(maxheld, barrier), dp(maxheld, limit),
hr(id, barrier), hr(id, limit), hr("", failcnt)
if ( (rel) && (newfc > 0) ) {
print newfc > f_file
close(f_file)
}
if (save_held) {
print held > h_file
close(h_file)
}
id=""
}
END {
# if (printed)
# printf "----------------------------------------------------------------%s\n", inc ? "-------" : ""
#printf hr(id, held)
}
'
| true |
8b8f5d531c15a071f8c46d7e8df6947a6a039105 | Shell | habibrahmanbd/SSBandTesting | /scripts/evaluate_all.sh | UTF-8 | 402 | 3.40625 | 3 | [] | no_license | #!/bin/bash
#This script will run for all repositories in a file
#rm -rf /repos/*.csv
#python3 json_preprocess.py
#CSV File for result
rm -rf results.csv
touch results.csv
echo 'ProjectName, Covered, Not Covered, Total (Covered + Not Covered), % of Coverage, %Covered, % Not Covered'>>results.csv
repo_list=$1
while IFS= read -r line; do
echo "$line"
./evaluate.sh $line
done < $repo_list
| true |
dfaece7db2641dec025660031d78576016ea9ee7 | Shell | syslogfather/PurpleteamLab | /Vagrant/logger_bootstrap.sh | UTF-8 | 17,023 | 3.40625 | 3 | [] | no_license | #! /usr/bin/env bash
# This is the script that is used to provision the logger host
# Override existing DNS Settings using netplan, but don't do it for Terraform AWS builds
download_palantir_osquery_config() {
if [ -f /opt/osquery-configuration ]; then
echo "[$(date +%H:%M:%S)]: osquery configs have already been downloaded"
else
# Import Palantir osquery configs into Fleet
echo "[$(date +%H:%M:%S)]: Downloading Palantir osquery configs..."
cd /opt && git clone https://github.com/palantir/osquery-configuration.git
fi
}
install_fleet_import_osquery_config() {
if [ -f "/opt/fleet" ]; then
echo "[$(date +%H:%M:%S)]: Fleet is already installed"
else
cd /opt || exit 1
echo "[$(date +%H:%M:%S)]: Installing Fleet..."
if ! grep 'fleet' /etc/hosts; then
echo -e "\n127.0.0.1 fleet" >>/etc/hosts
fi
if ! grep 'logger' /etc/hosts; then
echo -e "\n127.0.0.1 logger" >>/etc/hosts
fi
# Set MySQL username and password, create fleet database
mysql -uroot -e "ALTER USER 'root'@'localhost' IDENTIFIED WITH mysql_native_password BY 'fleet';"
mysql -uroot -pfleet -e "create database fleet;"
# Always download the latest release of Fleet
curl -s https://api.github.com/repos/fleetdm/fleet/releases | grep 'https://github.com' | grep "/fleet.zip" | cut -d ':' -f 2,3 | tr -d '"' | tr -d ' ' | head -1 | wget --progress=bar:force -i -
unzip fleet.zip -d fleet
cp fleet/linux/fleetctl /usr/local/bin/fleetctl && chmod +x /usr/local/bin/fleetctl
cp fleet/linux/fleet /usr/local/bin/fleet && chmod +x /usr/local/bin/fleet
# Prepare the DB
fleet prepare db --mysql_address=127.0.0.1:3306 --mysql_database=fleet --mysql_username=root --mysql_password=fleet
# Copy over the certs and service file
cp /vagrant/resources/fleet/server.* /opt/fleet/
cp /vagrant/resources/fleet/fleet.service /etc/systemd/system/fleet.service
# Create directory for logs
mkdir /var/log/fleet
# Install the service file
/bin/systemctl enable fleet.service
/bin/systemctl start fleet.service
# Start Fleet
echo "[$(date +%H:%M:%S)]: Waiting for fleet service to start..."
while true; do
result=$(curl --silent -k https://127.0.0.1:8412)
if echo "$result" | grep -q setup; then break; fi
sleep 1
done
fleetctl config set --address https://172.16.20.35:8412
fleetctl config set --tls-skip-verify true
fleetctl setup --email admin@purpleteamlab.network --username admin --password 'admin123#' --org-name PurpleteamLab
fleetctl login --email admin@purpleteamlab.network --password 'admin123#'
# Set the enrollment secret to match what we deploy to Windows hosts
mysql -uroot --password=fleet -e 'use fleet; update enroll_secrets set secret = "enrollmentsecret";'
echo "Updated enrollment secret"
# Change the query invervals to reflect a lab environment
# Every hour -> Every 3 minutes
# Every 24 hours -> Every 15 minutes
sed -i 's/interval: 3600/interval: 180/g' osquery-configuration/Fleet/Endpoints/MacOS/osquery.yaml
sed -i 's/interval: 3600/interval: 180/g' osquery-configuration/Fleet/Endpoints/Windows/osquery.yaml
sed -i 's/interval: 28800/interval: 900/g' osquery-configuration/Fleet/Endpoints/MacOS/osquery.yaml
sed -i 's/interval: 28800/interval: 900/g' osquery-configuration/Fleet/Endpoints/Windows/osquery.yaml
# Don't log osquery INFO messages
# Fix snapshot event formatting
fleetctl get options >/tmp/options.yaml
/usr/bin/yq w -i /tmp/options.yaml 'spec.config.options.enroll_secret' 'enrollmentsecret'
/usr/bin/yq w -i /tmp/options.yaml 'spec.config.options.logger_snapshot_event_type' 'true'
fleetctl apply -f /tmp/options.yaml
# Use fleetctl to import YAML files
fleetctl apply -f osquery-configuration/Fleet/Endpoints/MacOS/osquery.yaml
fleetctl apply -f osquery-configuration/Fleet/Endpoints/Windows/osquery.yaml
for pack in osquery-configuration/Fleet/Endpoints/packs/*.yaml; do
fleetctl apply -f "$pack"
done
# Add Splunk monitors for Fleet
# Files must exist before splunk will add a monitor
touch /var/log/fleet/osquery_result
touch /var/log/fleet/osquery_status
/opt/splunk/bin/splunk add monitor "/var/log/fleet/osquery_result" -index osquery -sourcetype 'osquery:json' -auth 'admin:changeme' --accept-license --answer-yes --no-prompt
/opt/splunk/bin/splunk add monitor "/var/log/fleet/osquery_status" -index osquery-status -sourcetype 'osquery:status' -auth 'admin:changeme' --accept-license --answer-yes --no-prompt
fi
}
install_zeek() {
echo "[$(date +%H:%M:%S)]: Installing Zeek..."
# Environment variables
NODECFG=/opt/zeek/etc/node.cfg
if ! grep 'zeek' /etc/apt/sources.list.d/security:zeek.list; then
sh -c "echo 'deb http://download.opensuse.org/repositories/security:/zeek/xUbuntu_18.04/ /' > /etc/apt/sources.list.d/security:zeek.list"
fi
wget -nv https://download.opensuse.org/repositories/security:zeek/xUbuntu_18.04/Release.key -O /tmp/Release.key
apt-key add - </tmp/Release.key &>/dev/null
# Update APT repositories
apt-get -qq -ym update
# Install tools to build and configure Zeek
apt-get -qq -ym install zeek crudini
export PATH=$PATH:/opt/zeek/bin
pip install zkg==2.1.1
zkg refresh
zkg autoconfig
zkg install --force salesforce/ja3
# Load Zeek scripts
echo '
@load protocols/ftp/software
@load protocols/smtp/software
@load protocols/ssh/software
@load protocols/http/software
@load tuning/json-logs
@load policy/integration/collective-intel
@load policy/frameworks/intel/do_notice
@load frameworks/intel/seen
@load frameworks/intel/do_notice
@load frameworks/files/hash-all-files
@load base/protocols/smb
@load policy/protocols/conn/vlan-logging
@load policy/protocols/conn/mac-logging
@load ja3
redef Intel::read_files += {
"/opt/zeek/etc/intel.dat"
};
redef ignore_checksums = T;
' >>/opt/zeek/share/zeek/site/local.zeek
# Configure Zeek
crudini --del $NODECFG zeek
crudini --set $NODECFG manager type manager
crudini --set $NODECFG manager host localhost
crudini --set $NODECFG proxy type proxy
crudini --set $NODECFG proxy host localhost
# Setup $CPUS numbers of Zeek workers
# AWS only has a single interface (eth0), so don't monitor eth0 if we're in AWS
if ! curl -s 169.254.169.254 --connect-timeout 2 >/dev/null; then
# TL;DR of ^^^: if you can't reach the AWS metadata service, you're not running in AWS
# Therefore, it's ok to add this.
crudini --set $NODECFG worker-eth0 type worker
crudini --set $NODECFG worker-eth0 host localhost
crudini --set $NODECFG worker-eth0 interface eth0
crudini --set $NODECFG worker-eth0 lb_method pf_ring
crudini --set $NODECFG worker-eth0 lb_procs "$(nproc)"
fi
crudini --set $NODECFG worker-eth0 type worker
crudini --set $NODECFG worker-eth0 host localhost
crudini --set $NODECFG worker-eth0 interface eth0
crudini --set $NODECFG worker-eth0 lb_method pf_ring
crudini --set $NODECFG worker-eth0 lb_procs "$(nproc)"
# Setup Zeek to run at boot
cp /vagrant/resources/zeek/zeek.service /lib/systemd/system/zeek.service
systemctl enable zeek
systemctl start zeek
# Configure the Splunk inputs
mkdir -p /opt/splunk/etc/apps/Splunk_TA_bro/local && touch /opt/splunk/etc/apps/Splunk_TA_bro/local/inputs.conf
crudini --set /opt/splunk/etc/apps/Splunk_TA_bro/local/inputs.conf monitor:///opt/zeek/spool/manager index zeek
crudini --set /opt/splunk/etc/apps/Splunk_TA_bro/local/inputs.conf monitor:///opt/zeek/spool/manager sourcetype zeek:json
crudini --set /opt/splunk/etc/apps/Splunk_TA_bro/local/inputs.conf monitor:///opt/zeek/spool/manager whitelist '.*\.log$'
crudini --set /opt/splunk/etc/apps/Splunk_TA_bro/local/inputs.conf monitor:///opt/zeek/spool/manager blacklist '.*(communication|stderr)\.log$'
crudini --set /opt/splunk/etc/apps/Splunk_TA_bro/local/inputs.conf monitor:///opt/zeek/spool/manager disabled 0
# Ensure permissions are correct and restart splunk
chown -R splunk:splunk /opt/splunk/etc/apps/Splunk_TA_bro
/opt/splunk/bin/splunk restart
# Verify that Zeek is running
if ! pgrep -f zeek >/dev/null; then
echo "Zeek attempted to start but is not running. Exiting"
exit 1
fi
}
install_velociraptor() {
echo "[$(date +%H:%M:%S)]: Installing Velociraptor..."
if [ ! -d "/opt/velociraptor" ]; then
mkdir /opt/velociraptor
fi
echo "[$(date +%H:%M:%S)]: Attempting to determine the URL for the latest release of Velociraptor"
LATEST_VELOCIRAPTOR_LINUX_URL=$(curl -sL https://github.com/Velocidex/velociraptor/releases/latest | grep linux-amd64 | grep href | head -1 | cut -d '"' -f 2 | sed 's#^#https://github.com#g')
echo "[$(date +%H:%M:%S)]: The URL for the latest release was extracted as $LATEST_VELOCIRAPTOR_LINUX_URL"
echo "[$(date +%H:%M:%S)]: Attempting to download..."
wget -P /opt/velociraptor --progress=bar:force "$LATEST_VELOCIRAPTOR_LINUX_URL"
if [ "$(file /opt/velociraptor/velociraptor*linux-amd64 | grep -c 'ELF 64-bit LSB executable')" -eq 1 ]; then
echo "[$(date +%H:%M:%S)]: Velociraptor successfully downloaded!"
else
echo "[$(date +%H:%M:%S)]: Failed to download the latest version of Velociraptor. Please open a DetectionLab issue on Github."
return
fi
cd /opt/velociraptor || exit 1
mv velociraptor-*-linux-amd64 velociraptor
chmod +x velociraptor
cp /vagrant/resources/velociraptor/server.config.yaml /opt/velociraptor
echo "[$(date +%H:%M:%S)]: Creating Velociraptor dpkg..."
./velociraptor --config /opt/velociraptor/server.config.yaml debian server
echo "[$(date +%H:%M:%S)]: Installing the dpkg..."
if dpkg -i velociraptor_*_server.deb >/dev/null; then
echo "[$(date +%H:%M:%S)]: Installation complete!"
else
echo "[$(date +%H:%M:%S)]: Failed to install the dpkg"
return
fi
}
install_suricata() {
# Run iwr -Uri testmyids.com -UserAgent "BlackSun" in Powershell to generate test alerts from Windows
echo "[$(date +%H:%M:%S)]: Installing Suricata..."
# Install suricata
apt-get -qq -y install suricata crudini
test_suricata_prerequisites
# Install suricata-update
cd /opt || exit 1
git clone https://github.com/OISF/suricata-update.git
cd /opt/suricata-update || exit 1
pip install pyyaml
python setup.py install
cp /vagrant/resources/suricata/suricata.yaml /etc/suricata/suricata.yaml
crudini --set --format=sh /etc/default/suricata '' iface eth0
# update suricata signature sources
suricata-update update-sources
# disable protocol decode as it is duplicative of Zeek
echo re:protocol-command-decode >>/etc/suricata/disable.conf
# enable et-open and attackdetection sources
suricata-update enable-source et/open
suricata-update enable-source ptresearch/attackdetection
# Configure the Splunk inputs
crudini --set /opt/splunk/etc/apps/search/local/inputs.conf monitor:///var/log/suricata index suricata
crudini --set /opt/splunk/etc/apps/search/local/inputs.conf monitor:///var/log/suricata sourcetype suricata:json
crudini --set /opt/splunk/etc/apps/search/local/inputs.conf monitor:///var/log/suricata whitelist 'eve.json'
crudini --set /opt/splunk/etc/apps/search/local/inputs.conf monitor:///var/log/suricata disabled 0
crudini --set /opt/splunk/etc/apps/search/local/props.conf suricata:json TRUNCATE 0
# Update suricata and restart
suricata-update
service suricata stop
service suricata start
sleep 3
# Verify that Suricata is running
if ! pgrep -f suricata >/dev/null; then
echo "Suricata attempted to start but is not running. Exiting"
exit 1
fi
# Configure a logrotate policy for Suricata
cat >/etc/logrotate.d/suricata <<EOF
/var/log/suricata/*.log /var/log/suricata/*.json
{
hourly
rotate 0
missingok
nocompress
size=500M
sharedscripts
postrotate
/bin/kill -HUP \`cat /var/run/suricata.pid 2>/dev/null\` 2>/dev/null || true
endscript
}
EOF
}
test_suricata_prerequisites() {
for package in suricata crudini; do
echo "[$(date +%H:%M:%S)]: [TEST] Validating that $package is correctly installed..."
# Loop through each package using dpkg
if ! dpkg -S $package >/dev/null; then
# If which returns a non-zero return code, try to re-install the package
echo "[-] $package was not found. Attempting to reinstall."
apt-get clean && apt-get -qq update && apt-get install -y $package
if ! which $package >/dev/null; then
# If the reinstall fails, give up
echo "[X] Unable to install $package even after a retry. Exiting."
exit 1
fi
else
echo "[+] $package was successfully installed!"
fi
done
}
install_guacamole() {
echo "[$(date +%H:%M:%S)]: Installing Guacamole..."
cd /opt || exit 1
apt-get -qq install -y libcairo2-dev libjpeg62-dev libpng-dev libossp-uuid-dev libfreerdp-dev libpango1.0-dev libssh2-1-dev libssh-dev tomcat8 tomcat8-admin tomcat8-user
wget --progress=bar:force "http://apache.org/dyn/closer.cgi?action=download&filename=guacamole/1.0.0/source/guacamole-server-1.0.0.tar.gz" -O guacamole-server-1.0.0.tar.gz
tar -xf guacamole-server-1.0.0.tar.gz && cd guacamole-server-1.0.0 || echo "[-] Unable to find the Guacamole folder."
./configure &>/dev/null && make --quiet &>/dev/null && make --quiet install &>/dev/null || echo "[-] An error occurred while installing Guacamole."
ldconfig
cd /var/lib/tomcat8/webapps || echo "[-] Unable to find the tomcat8/webapps folder."
wget --progress=bar:force "http://apache.org/dyn/closer.cgi?action=download&filename=guacamole/1.0.0/binary/guacamole-1.0.0.war" -O guacamole.war
mkdir /etc/guacamole
mkdir /etc/guacamole/shares
sudo chmod 777 /etc/guacamole/shares
mkdir /usr/share/tomcat8/.guacamole
cp /vagrant/resources/guacamole/user-mapping.xml /etc/guacamole/
cp /vagrant/resources/guacamole/guacamole.properties /etc/guacamole/
cp /vagrant/resources/guacamole/guacd.service /lib/systemd/system
sudo ln -s /etc/guacamole/guacamole.properties /usr/share/tomcat8/.guacamole/
sudo ln -s /etc/guacamole/user-mapping.xml /usr/share/tomcat8/.guacamole/
sudo ln -s /usr/local/lib/freerdp /usr/lib/x86_64-linux-gnu/
systemctl enable guacd
systemctl enable tomcat8
systemctl start guacd
systemctl start tomcat8
}
postinstall_tasks() {
# Include Splunk and Zeek in the PATH
echo export PATH="$PATH:/opt/splunk/bin:/opt/zeek/bin" >>~/.bashrc
echo "export SPLUNK_HOME=/opt/splunk" >>~/.bashrc
# Ping DetectionLab server for usage statistics
curl -s -A "DetectionLab-logger" "https:/ping.detectionlab.network/logger" || echo "Unable to connect to ping.detectionlab.network"
}
configure_splunk_inputs() {
# Suricata
crudini --set /opt/splunk/etc/apps/search/local/inputs.conf monitor:///var/log/suricata index suricata
crudini --set /opt/splunk/etc/apps/search/local/inputs.conf monitor:///var/log/suricata sourcetype suricata:json
crudini --set /opt/splunk/etc/apps/search/local/inputs.conf monitor:///var/log/suricata whitelist 'eve.json'
crudini --set /opt/splunk/etc/apps/search/local/inputs.conf monitor:///var/log/suricata disabled 0
crudini --set /opt/splunk/etc/apps/search/local/props.conf suricata:json TRUNCATE 0
# Fleet
/opt/splunk/bin/splunk add monitor "/var/log/fleet/osquery_result" -index osquery -sourcetype 'osquery:json' -auth 'admin:changeme' --accept-license --answer-yes --no-prompt
/opt/splunk/bin/splunk add monitor "/var/log/fleet/osquery_status" -index osquery-status -sourcetype 'osquery:status' -auth 'admin:changeme' --accept-license --answer-yes --no-prompt
# Zeek
mkdir -p /opt/splunk/etc/apps/Splunk_TA_bro/local && touch /opt/splunk/etc/apps/Splunk_TA_bro/local/inputs.conf
crudini --set /opt/splunk/etc/apps/Splunk_TA_bro/local/inputs.conf monitor:///opt/zeek/spool/manager index zeek
crudini --set /opt/splunk/etc/apps/Splunk_TA_bro/local/inputs.conf monitor:///opt/zeek/spool/manager sourcetype zeek:json
crudini --set /opt/splunk/etc/apps/Splunk_TA_bro/local/inputs.conf monitor:///opt/zeek/spool/manager whitelist '.*\.log$'
crudini --set /opt/splunk/etc/apps/Splunk_TA_bro/local/inputs.conf monitor:///opt/zeek/spool/manager blacklist '.*(communication|stderr)\.log$'
crudini --set /opt/splunk/etc/apps/Splunk_TA_bro/local/inputs.conf monitor:///opt/zeek/spool/manager disabled 0
# Ensure permissions are correct and restart splunk
chown -R splunk:splunk /opt/splunk/etc/apps/Splunk_TA_bro
/opt/splunk/bin/splunk restart
}
main() {
apt_install_prerequisites
modify_motd
test_prerequisites
fix_eth0_static_ip
install_splunk
download_palantir_osquery_config
install_fleet_import_osquery_config
install_velociraptor
install_suricata
install_zeek
install_guacamole
postinstall_tasks
}
splunk_only() {
install_splunk
configure_splunk_inputs
}
# Allow custom modes via CLI args
if [ -n "$1" ]; then
eval "$1"
else
main
fi
exit 0
| true |
86bcdbe06477d260925768605832f0cbaa14b058 | Shell | florinrusu96/dotfiles | /scripts/util/pbpaste | UTF-8 | 242 | 2.859375 | 3 | [
"MIT"
] | permissive | #!/bin/bash
# the pbpaste script
if [[ "$(uname)" == "Darwin" ]]; then
pbpaste
elif grep -iq microsoft /proc/version; then
powershell.exe Get-Clipboard | sed 's/\r$//' | sed -z '$ s/\n$//'
else
xclip -selection clipboard -out
fi
exit 0
| true |
d38108f3100fda9a96affa5a4de1410c6fa1d8b5 | Shell | agvim/homeshick | /lib/commands/offline.sh | UTF-8 | 571 | 3.234375 | 3 | [
"MIT"
] | permissive | #!/bin/bash
function offline {
[[ ! $1 ]] && help_err offline
INITIALPWD=$PWD
local action=$1
if [[ $action == "make" ]]; then
MAKEFOLDER=$(mktemp -d) && \
cp -r -P $HOME/.homesick $MAKEFOLDER/ && \
find $HOME/.homesick -mindepth 3 -maxdepth 3 -name 'makeoffline.sh' -exec {} "$MAKEFOLDER" \; && \
tar -jcf environment.tbz -C $MAKEFOLDER . && \
success "environment.tbz compressed in $PWD" && rm -rf "$MAKEFOLDER"
elif [[ $action == "deploy" ]]; then
$HOME/.homesick/repos/homeshick/bin/homeshick link
else
help_err offline
fi
return $EX_SUCCESS
}
| true |
e38a12a8ffbc163c64b5b8c6b4a134d0596d9b91 | Shell | arwright3/wtgpipeline | /mask_coadd.sh | UTF-8 | 1,787 | 3.40625 | 3 | [
"MIT"
] | permissive | #!/bin/bash
set -uxv
#adam-use# use to make the coadd.stars.reg file (without input mask file) or to apply a coadd mask/reg file to the coadd flags file
#adam-example# ./mask_coadd.sh MACS1226+21 W-C-RC coadd.stars.reg 2>&1 | tee -a OUT-mask_coadd.stars.log
#adam-example# ./mask_coadd.sh MACS1226+21 W-C-RC coadd.asteroids.reg 2>&1 | tee -a OUT-mask_coadd.asteroids.log
#adam-old# subarudir=$1
#adam-old# cluster=$2
#adam-old# filter=$3
subarudir=/nfs/slac/g/ki/ki18/anja/SUBARU/
cluster=$1
filter=$2
flagFile=''
if [ $# -gt 2 ]; then
flagFile=$3
fi
lenstype='good'
if [ $# -gt 3 ]; then
lenstype=$4
fi
. progs.ini > /tmp/progs.out 2>&1
workdir=${subarudir}/${cluster}/masks
if [ ! -d $workdir ]; then
mkdir $workdir
fi
coadd_dir=${subarudir}/${cluster}/${filter}/SCIENCE/coadd_${cluster}_${lenstype}
#if no region file given, then make the coadd.stars.reg file
if [ -z "$flagFile" ]; then
./maskstars.sh -i ${coadd_dir}/coadd.fits \
-o ${workdir}/coadd.stars.reg \
-a USNOB1 \
-s ${AUTOMASKCONF}/SUBARU_V_15.reg \
-p 14.0 -m 0.2 -l 18.5
fi
#if you give an input region file given, then apply it to the coadd
if [ ! -z "$flagFile" ]; then
base=`basename $flagFile .reg`
sed 's/polygon/ POLYGON/g' ${workdir}/${flagFile} > ${workdir}/${base}.ww.reg
${P_WW} -c lensconf/poly_flag.ww \
-FLAG_NAMES ${coadd_dir}/coadd.flag.fits \
-FLAG_MASKS "0xfff" \
-FLAG_WMASKS "0x0" \
-FLAG_OUTFLAGS "1,2,4,8,16,32,64,128,256,512,1024,2048" \
-POLY_NAMES ${workdir}/${base}.ww.reg \
-POLY_OUTFLAGS "4096" \
-OUTFLAG_NAME ${coadd_dir}/coadd.flag.masked.fits
mv ${coadd_dir}/coadd.flag.fits ${coadd_dir}/coadd.flag.swarp.fits
mv ${coadd_dir}/coadd.flag.masked.fits ${coadd_dir}/coadd.flag.fits
fi
| true |
74dcc32a2dbb115dfbd9fc134ef617a98822ce5d | Shell | mbalassi/streaming-performance | /src/test/resources/Performance/spark-remove-files.sh | UTF-8 | 330 | 3.1875 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
thisDir=$(dirname $0)
thisDir=$(readlink -f "$thisDir")
source $thisDir/load-spark-config.sh
hadoopDir=$1
hdfsDir=$2
if [ $# -gt 1 ]; then
echo REMOVING:
ssh $hadoopUser@$hadoopMaster ''$hadoopDir'/bin/hdfs dfs -rm -r '$hdfsDir'/*.csv'
else
echo "USAGE:"
echo "run <hadoop directory> <hdfs directory>"
fi
| true |
9d6f1e7c299e60898140da4f1ed39f10344b6153 | Shell | cseelye/terrain-model | /container_build/build-gdal | UTF-8 | 1,783 | 3.515625 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
set -euo pipefail
GDAL_VERSION=${GDAL_VERSION-3.5.0}
CORES=${CORES-$(getconf _NPROCESSORS_ONLN)}
PROJ_INSTALL_PREFIX=${PROJ_INSTALL_PREFIX-/usr/local}
GCC_ARCH=${GCC_ARCH-$(uname -m)}
DEST_DIR=${GDAL_DEST-/build_gdal}
echo "${0} ENVIRONMENT"
export
echo "================"
# build-proj must be run first with the same environment so that GDAL can find PROJ in ${DEST_DIR}/${PROJ_INSTALL_PREFIX}
# =============================================================================
# Build GDAL
mkdir gdal
curl -LSsf https://github.com/OSGeo/gdal/archive/v${GDAL_VERSION}.tar.gz | tar xz -C gdal --strip-components=1
pushd gdal
./autogen.sh
CFLAGS='-O2 -g0' LDFLAGS="-L/${DEST_DIR}/${PROJ_INSTALL_PREFIX}/lib -linternalproj -s" \
./configure \
--prefix=/usr/local \
--sysconfdir=/etc \
--without-libtool \
--with-hide-internal-symbols \
--with-jpeg12 \
--with-python \
--with-webp \
--with-proj=/${DEST_DIR}/${PROJ_INSTALL_PREFIX} \
--with-libtiff=internal \
--with-rename-internal-libtiff-symbols \
--with-geotiff=internal \
--with-rename-internal-libgeotiff-symbols
make -j${CORES}
make install DESTDIR="${DEST_DIR}"
popd
# Strip binaries
for f in "${DEST_DIR}"/usr/local/lib/*; do
${GCC_ARCH}-linux-gnu-strip -s ${f} 2>/dev/null || /bin/true
done
for f in "${DEST_DIR}"/usr/local/bin/*; do
${GCC_ARCH}-linux-gnu-strip -s ${f} 2>/dev/null || /bin/true
done
for f in "${DEST_DIR}"/usr/local/lib/python*/site-packages/osgeo/*.so; do
${GCC_ARCH}-linux-gnu-strip -s ${f} 2>/dev/null || /bin/true
done
# Move python libraries into the "system" python location
mkdir --parent ${DEST_DIR}/usr/lib/
mv ${DEST_DIR}/usr/local/lib/python* ${DEST_DIR}/usr/lib/
| true |
40a02e134706019ec9d1b098a80e3913318d7985 | Shell | jonathanwang/dotfiles | /myprofile | UTF-8 | 3,715 | 3.578125 | 4 | [] | no_license | #!/bin/bash
# Include guard
if [ -z "$DOTFILES" ]; then
export DOTFILES=yes
else
return 0
fi
# Operating system specific settings
if [[ "$OSTYPE" == "linux-gnu" ]]; then
# Linux
# Enable color support of ls
if [ -x /usr/bin/dircolors ]; then
test -r ~/.dircolors && eval "$(dircolors -b ~/.dircolors)" || eval "$(dircolors -b)"
alias ls='ls --color=auto'
fi
elif [[ "$OSTYPE" == "darwin"* ]]; then
# Mac OS X
# Enable color support of ls
export CLICOLOR=1
export LSCOLORS=ExGxBxDxCxEgEdxbxgxcxd
#export LSCOLORS=GxFxCxDxBxegedabagaced
else
echo "Unrecognized operating system"
fi
# Settings that depend on whether or not the session is over SSH
if [ -n "$SSH_CLIENT" ] || [ -n "$SSH_TTY" ]; then
# Using SSH
# Bash prompt with hostname
export PS1='\[\033[0;0m\]\u@\h\[\033[00m\]:\[\033[0;34m\]\w\[\033[00m\]$ '
else
# Not using SSH
# Bash prompt without hostname
export PS1='\[\033[0;0m\]\u\[\033[00m\]:\[\033[0;34m\]\w\[\033[00m\]$ '
fi
# Set the editor
export EDITOR=vim
# bash git prompt: https://github.com/magicmonty/bash-git-prompt
#
# This prompt is also used even when not in the git repository to get the last command indicator.
# To use the promt only in repositories, uncomment the line below.
# GIT_PROMPT_ONLY_IN_REPO=1
# Set the beginning of the git prompt to the last command indicator and a color reset plus the current PS1 except with
# the extra '$ ' trimmed from the end so that it can be reinserted at the end.
GIT_PROMPT_START="_LAST_COMMAND_INDICATOR_\[\033[0m\] $(echo ${PS1%??})"
# No leading space in front of the status component.
GIT_PROMPT_LEADING_SPACE=0
# The end of the git prompt is set to sequence that was removed from PS1.
GIT_PROMPT_END="$ "
# Load the git prompt
source ~/dotfiles/bash-git-prompt/gitprompt.sh
# Git lg (if unset)
git config --global --get alias.lg > /dev/null \
|| git config --global alias.lg "log --color --graph --pretty=format:'%Cred%h%Creset -%C(yellow)%d%Creset %s %Cgreen(%cr) %C(bold blue)<%an>%Creset' --abbrev-commit"
# Git listing of branches by date
alias glist='for ref in $(git for-each-ref --sort=-committerdate --format="%(refname)" refs/heads/ refs/remotes); do git log -n1 $ref --pretty=format:"%Cgreen%cr%Creset %C(yellow)%d%Creset %C(bold blue)<%an>%Creset%n" | cat ; done | awk '"'! a["'$0'"]++'"
# grep aliases
alias grep='grep --color=auto'
alias fgrep='fgrep --color=auto'
alias egrep='egrep --color=auto'
# Don't put duplicate lines in the history.
HISTCONTROL=ignoredups
# Append and combine history from multiple bash sessions.
unset HISTFILESIZE
export HISTSIZE=10000
export PROMPT_COMMAND="$PROMPT_COMMAND;history -a;"
shopt -s histappend
# Enable programmable completion features (you don't need to enable this, if it's already enabled in /etc/bash.bashrc
# and /etc/profile sources /etc/bash.bashrc).
# if ! shopt -oq posix; then
# if [ -f /usr/share/bash-completion/bash_completion ]; then
# . /usr/share/bash-completion/bash_completion
# elif [ -f /etc/bash_completion ]; then
# . /etc/bash_completion
# elif [ -f /opt/local/etc/profile.d/bash_completion.sh ]; then
# . /opt/local/etc/profile.d/bash_completion.sh
# fi
# fi
# The pattern "**" used in a pathname expansion context will match all files and zero or more directories and
# subdirectories. Older shells (OS X) might not have this option.
shopt -s globstar
# Check the window size after each command and, if necessary, update the values of LINES and COLUMNS.
shopt -s checkwinsize
# Disable Ctrl-D to logout of a shell session
set -o ignoreeof
# Re-enable Ctrl-S to forward search commands in the shell
stty -ixon
| true |
1b91cfd679a1adceff46699d09657f320d023b29 | Shell | grengojbo/toolbox | /connect-kubernetes-azure.sh | UTF-8 | 234 | 2.90625 | 3 | [
"BSD-2-Clause"
] | permissive | #!/bin/sh
if [[ $# -ne 4 ]]; then
echo "Usage: $0 \"<azure_username>\" \"<azure_password>\" \"<resource_group>\" \"<cluster_name>\""
exit 1
fi
az login -u "$1" -p "$2"
az aks get-credentials --resource-group "$3" --name "$4" | true |
77f1881d57317f9c2c642e8693b18f8bf5048200 | Shell | boukisn/emergentree | /homepi/scripts/processing.sh | UTF-8 | 999 | 3.078125 | 3 | [] | no_license |
YESTERDAY=$(date +%Y:%m:%d -d "yesterday")
SEC=$(date +%S)
DATE=$(date +%m_%d_%H_)
MIN=$(date +%M)
PAST=$(expr $MIN - 1)
LOG=.log
echo $PAST
HOME_DIR='/home/pi/emergentree/homepi'
if [ $PAST -lt 10 ]; then
PAST=0$PAST
fi
echo sensor_$DATE$PAST$LOG
if [ $SEC -lt 30 ]; then
# Download sensor data
sudo python $HOME_DIR/backend/download.py sensor_$DATE$PAST$LOG $HOME_DIR/warehouse/sensor_$DATE$PAST$LOG
sudo chown pi:pi $HOME_DIR/warehouse/sensor_$DATE$PAST$LOG
# Log wind speed vs. acceleration
sudo python $HOME_DIR/processing/slope_test.py $HOME_DIR/warehouse/sensor_$DATE$PAST$LOG $HOME_DIR/warehouse/wind$LOG $HOME_DIR/warehouse/output_slopes.log
# Calculate 1st regression
sudo python $HOME_DIR/processing/regression.py $HOME_DIR/warehouse/output_slopes.log $HOME_DIR/warehouse/regress.log
# Calculate 2nd regression
sudo python $HOME_DIR/processing/standard.py $HOME_DIR/warehouse/regress.log $HOME_DIR/frontend/server/risk.config
python $HOME_DIR/warehouse/windmaker.py
fi
| true |
bc47d833c22cb4d16d3bc3456bfbe577ce5294df | Shell | tomkendig/bin | /exiftoolCreateMetaShiftToYear.sh | UTF-8 | 1,116 | 3.71875 | 4 | [] | no_license | #!/bin/sh
if [ -z "$1" ] ; then
echo "First argument is directory, second argument is a year date"
exit
elif [ -z "$2" ] ; then
echo "date of form year required"
else
echo "path used is $1 and year used is $2"
fi
exif=0
noexif=0
#./exiftool -P -progress "-AllDates=${2}" ${f}
for f in ${1}/*.jpg ; do
#echo `./exiftool ${f} | grep "Create Date" | head -1`
if [ -z "`./exiftool ${f} | grep 'Create Date' | head -1`" ] ; then
#echo `stat ${f} | grep Modify | cut -d. -f1 | cut -d: -f2,3,4 | sed 's/-/:/g'`
./exiftool -P -progress "-AllDates=`stat ${f} | grep Modify | cut -d. -f1 | cut -d: -f2,3,4 | sed 's/-/:/g'`" ${f}
noexif=$(( noexif+1 ))
else
exif=$(( exif+1 ))
fi
#echo $((`./exiftool ${1} | grep "Create Date" | head -1 | cut -d: -f2` - ${2}))
./exiftool -P -progress "-AllDates-=$((`./exiftool ${f} | grep "Create Date" | head -1 | cut -d: -f2` - ${2})):0:0 0" ${f} #change all meta dates in a passed directory to a known date from commandline
done
echo "${noexif}/${exif}/$(( $noexif+$exif )) files had exif meta inserted/already had exif data/total files processed"
| true |
7c5807d3c638e6c1c3a94616d95866f039b215ce | Shell | 7aske/scripts | /src/ccheck.sh | UTF-8 | 804 | 3.421875 | 3 | [] | no_license | #!/usr/bin/env bash
CCHECK_PATTERN="${CCHECK_PATTERN:-"@(Refactor|Bug|DeadCode|Incomplete|Cleanup|Warning|CopyPast[ae]|Temporary|Optimization|Note|Todo|Hack)"}"
#printf '\e]8;;http://example.com\e\\This is a link\e]8;;\e\\\n'
_PWD="$(pwd)"
if $(git rev-parse --is-inside-work-tree 2>/dev/null); then
for file in $(git ls-files); do
match="$(grep --color=always -En "$CCHECK_PATTERN" "$file")"
if [ -n "$match" ]; then
IFS=$'\n'
for line in $match; do
printf "\e[35mfile://$_PWD/$file\e[0m:${line}\n"
done
IFS=$' '
fi
done
else
for file in $(find . -type f); do
match="$(grep --color=always -En "$CCHECK_PATTERN" "$file")"
if [ -n "$match" ]; then
IFS=$'\n'
for line in $match; do
printf "\e[35mfile://$_PWD/$file\e[0m:${line}\n"
done
IFS=$' '
fi
done
fi
| true |
0d6cff8f3b4c787353a216524f2da43d26805c72 | Shell | jeromelau1984/DevTool | /DevTool/assets/carrobot_dumplog.sh | UTF-8 | 9,302 | 3.25 | 3 | [] | no_license | LOG_DIR=/storage/sdcard0/ileja_logs/
LOG_DIR_BK=/storage/sdcard0/ileja_logs_bk/
LogFile=/storage/sdcard0/dump_log_info.log
DUMP_LOG_SCRIPT_FILE_EXT=/storage/sdcard1/carrobot_dumplog.sh
PREDEFINE_SCRIPT_FILE=/storage/sdcard1/carrobot.sh
CMD_TYPE_PROP=root.cmd.type
CMD_RUN_PROP=root.cmd.remote
CMD_TYPE_VALUE="aios"
#output redirect path: /storage/sdcard0/dump_log_info.log
exec 0>&1
exec 2>&1
exec 1>>$LogFile
echo "==================================="
echo "============DUMP HEADER============"
echo "DUMP EXEC time : "
date
echo "===========END DUMP HEADER========="
echo "==================================="
get_DeveloperMode() {
if [ -f "${DUMP_LOG_SCRIPT_FILE_EXT}" ] || [ -f "${PREDEFINE_SCRIPT_FILE}" ]; then
echo 1
else
echo 0
fi
}
IS_DEVELOP_MODE="$(get_DeveloperMode)"
get_datetime() {
DATETIME=$(date "+%Y.%m.%d_%H.%M.%S")
echo $DATETIME
}
LOG_FILENAME="$(get_datetime)"
LOG_PATH="${LOG_DIR}""${LOG_FILENAME}""/"
if [ ! -d "${LOG_DIR}" ]; then
mkdir "${LOG_DIR}"
echo "mkdir " "${LOG_DIR}"
fi
if [ ! -d "${LOG_PATH}" ]; then
mkdir "${LOG_PATH}"
echo "mkdir " "${LOG_PATH}"
fi
echo "inited==================="
# trigger to catch aios log and upload firstly
setprop $CMD_TYPE_PROP "$CMD_TYPE_VALUE"
setprop $CMD_RUN_PROP 1
sleep 5
dumpsys cpuinfo > "${LOG_PATH}""cpuinfo.log"
ps -t -p -P > "${LOG_PATH}""ps.log"
logcat -b main -v threadtime -d -t 5000 -N 5000 -f "${LOG_PATH}""main.log"
logcat -b system -v threadtime -d -f "${LOG_PATH}""system.log"
logcat -b events -v threadtime -d -f "${LOG_PATH}""events.log"
logcat -b radio -v threadtime -d -f "${LOG_PATH}""radio.log"
dmesg > "${LOG_PATH}""dmesg.log"
dumpsys batterystats > "${LOG_PATH}""batterystats.log"
dumpsys connectivity > "${LOG_PATH}""connectivity.log"
getprop > "${LOG_PATH}""prop.log"
echo "sys base info==================="
cat /sys/bootinfo/powerup_reason > "${LOG_PATH}""powerup_reason"
cp /data/system/packages.xml "${LOG_PATH}"
echo "sys dropbox info==================="
DROPBOX_DIR=/data/system/dropbox/
echo `ls -l /data/system/dropbox| grep "system_app*"` > "${DROPBOX_DIR}""dropbox.txt"
echo "===================\n" >> "${DROPBOX_DIR}""dropbox.txt"
for tmp in `ls /data/system/dropbox | grep "system_app*"`; do
echo "-------$tmp-------" >> "${DROPBOX_DIR}""dropbox.txt"
cat "${DROPBOX_DIR}""$tmp" >> "${DROPBOX_DIR}""dropbox.txt"
done
mv "${DROPBOX_DIR}""dropbox.txt" "${LOG_PATH}""dropbox.txt"
#删除内容,不删目录
rm -rf /data/system/dropbox/*
echo "sys anr info==================="
ANR_DIR=/data/anr/
echo `ls -l /data/anr | grep "traces*"` > "${ANR_DIR}""anr.txt"
echo "===================\n" >> "${ANR_DIR}""anr.txt"
for tmp in `ls /data/anr | grep "traces*"`; do
echo "-------$tmp-------" >> "${ANR_DIR}""anr.txt"
cat "${ANR_DIR}""$tmp" >> "${ANR_DIR}""anr.txt"
done
mv "${ANR_DIR}""anr.txt" "${LOG_PATH}""anr.txt"
#删除内容,不删目录
rm -rf /data/anr/*
cp -r /sdcard/Android/data/com.aispeech.aios/cache/AIOSLog.txt "${LOG_PATH}""AIOS_AILog.txt"
cp -r /sdcard/Android/data/com.aispeech.aios/cache/AIOSEngineLog.txt "${LOG_PATH}""AIOSEngine_AILog.txt"
cp -r /sdcard/Android/data/com.ileja.bluetoothext/cache/AIBTLog.txt "${LOG_PATH}""AIBT_AILog.txt"
cp -r /sdcard/Android/data/com.ileja.carrobot/cache/AILog.txt "${LOG_PATH}""Launcher_AILog.txt"
cp -r /sdcard/Android/data/com.ileja.carrobot.aichat/cache/AILog.txt "${LOG_PATH}""Chat_AILog.txt"
cp -r /sdcard/Android/data/com.ileja.carrobot.mapdownloader/cache/AILog.txt "${LOG_PATH}""MapDown_AILog.txt"
cp -r /sdcard/Android/data/com.ileja.carrobot.music/cache/AILog.txt "${LOG_PATH}""FM_AILog.txt"
cp -r /sdcard/Android/data/com.ileja.carrobot.navigation/cache/AILog.txt "${LOG_PATH}""Navi_AILog.txt"
cp -r /sdcard/Android/data/com.ileja.carrobot.phone/cache/AILog.txt "${LOG_PATH}""Phone_AILog.txt"
cp -r /sdcard/Android/data/com.ileja.carrobot.traffic/cache/AILog.txt "${LOG_PATH}""Traffic_AILog.txt"
cp -r /sdcard/Android/data/com.ileja.carrobot.wechat/cache/AILog.txt "${LOG_PATH}""WeChat_AILog.txt"
cp -r /sdcard/Android/data/com.ileja.ailbs/cache/AILog.txt "${LOG_PATH}""AILBS_AILog.txt"
cp -r /sdcard/Android/data/com.ileja.gesture/cache/AILog.txt "${LOG_PATH}""Gesture_AILog.txt"
cp -r /sdcard/Android/data/com.ileja.aicar/cache/AILog.txt "${LOG_PATH}""AICar_AILog.txt"
cp -r /sdcard/Android/data/com.ileja.aicore/cache/AILog.txt "${LOG_PATH}""AICore_AILog.txt"
cp -r /sdcard/Android/data/com.ileja.aitelcomm/cache/AILog.txt "${LOG_PATH}""AITelcomm_AILog.txt"
cp -r /sdcard/Android/data/com.ileja.fotaupgrade/cache/fotaupgrade.log "${LOG_PATH}""FotaUpgrade_AILog.txt"
cp -r /data/carrobot/obdvolt.log "${LOG_PATH}""obdvolt.log"
echo "cp app log info==================="
#cp -r /data/system/dropbox "${LOG_PATH}"
#cp -r /data/tombstones "${LOG_PATH}"
#cp -r /sdcard/mtklog/audio_dump "${LOG_PATH}""audio_dump"
#cp -r /sdcard/mtklog/aee_exp "${LOG_PATH}""aee_exp1"
#rm -rf /sdcard/mtklog/aee_exp/*
#cp -r /data/aee_exp "${LOG_PATH}""aee_exp2"
echo "cp bt hci_log and hfp_audio_log==================="
cp -r /sdcard/btsnoop_hci.log "${LOG_PATH}""hci_dump.log"
cp -r /sdcard/mtklog/audio_dump "${LOG_PATH}""audio_dump"
sleep 2
cp -r /sdcard/Pictures/Screenshots "${LOG_PATH}"
rm /sdcard/Pictures/Screenshots/*
echo "cp Screenshots info==================="
rm -rf /sdcard/Android/data/com.aispeech.aios/cache/AIOSLog.txt
rm -rf /sdcard/Android/data/com.aispeech.aios/cache/AIOSEngineLog.txt
rm -rf /sdcard/Android/data/com.ileja.bluetoothext/cache/AIBTLog.txt
rm -rf /sdcard/Android/data/com.ileja.carrobot/cache/AILog.txt
rm -rf /sdcard/Android/data/com.ileja.carrobot.aichat/cache/AILog.txt
rm -rf /sdcard/Android/data/com.ileja.carrobot.mapdownloader/cache/AILog.txt
rm -rf /sdcard/Android/data/com.ileja.carrobot.music/cache/AILog.txt
rm -rf /sdcard/Android/data/com.ileja.carrobot.navigation/cache/AILog.txt
rm -rf /sdcard/Android/data/com.ileja.carrobot.phone/cache/AILog.txt
rm -rf /sdcard/Android/data/com.ileja.carrobot.traffic/cache/AILog.txt
rm -rf /sdcard/Android/data/com.ileja.carrobot.wechat/cache/AILog.txt
rm -rf /sdcard/Android/data/com.ileja.ailbs/cache/AILog.txt
rm -rf /sdcard/Android/data/com.ileja.gesture/cache/AILog.txt
rm -rf /sdcard/Android/data/com.ileja.aicar/cache/AILog.txt
rm -rf /sdcard/Android/data/com.ileja.aicore/cache/AILog.txt
rm -rf /sdcard/Android/data/com.ileja.aitelcomm/cache/AILog.txt
rm -rf /sdcard/Android/data/com.ileja.fotaupgrade/cache/fotaupgrade.log
echo "rm app log info==================="
#单个文件40000行压缩后50K左右
MIN_LINE=5000
MAX_LINE=40000
tail -n ${MAX_LINE} "${LOG_PATH}""AIOS_AILog.txt" > "${LOG_PATH}""AIOS.txt"
tail -n ${MAX_LINE} "${LOG_PATH}""AIOSEngine_AILog.txt" > "${LOG_PATH}""AIOSEngine.txt"
tail -n ${MAX_LINE} "${LOG_PATH}""AIBT_AILog.txt" > "${LOG_PATH}""AIBT.txt"
tail -n ${MAX_LINE} "${LOG_PATH}""Launcher_AILog.txt" > "${LOG_PATH}""Launcher.txt"
tail -n ${MAX_LINE} "${LOG_PATH}""Chat_AILog.txt" > "${LOG_PATH}""Chat.txt"
tail -n ${MAX_LINE} "${LOG_PATH}""MapDown_AILog.txt" > "${LOG_PATH}""MapDown.txt"
tail -n ${MAX_LINE} "${LOG_PATH}""FM_AILog.txt" > "${LOG_PATH}""FM.txt"
tail -n ${MAX_LINE} "${LOG_PATH}""Navi_AILog.txt" > "${LOG_PATH}""Navi.txt"
tail -n ${MAX_LINE} "${LOG_PATH}""Phone_AILog.txt" > "${LOG_PATH}""Phone.txt"
tail -n ${MAX_LINE} "${LOG_PATH}""Traffic_AILog.txt" > "${LOG_PATH}""Traffic.txt"
tail -n ${MAX_LINE} "${LOG_PATH}""WeChat_AILog.txt" > "${LOG_PATH}""WeChat.txt"
tail -n ${MAX_LINE} "${LOG_PATH}""AILBS_AILog.txt" > "${LOG_PATH}""AILBS.txt"
tail -n ${MAX_LINE} "${LOG_PATH}""Gesture_AILog.txt" > "${LOG_PATH}""Gesture.txt"
tail -n ${MAX_LINE} "${LOG_PATH}""AICar_AILog.txt" > "${LOG_PATH}""AICar.txt"
tail -n ${MAX_LINE} "${LOG_PATH}""AICore_AILog.txt" > "${LOG_PATH}""AICore.txt"
tail -n ${MAX_LINE} "${LOG_PATH}""AITelcomm_AILog.txt" > "${LOG_PATH}""AITelcomm.txt"
tail -n ${MAX_LINE} "${LOG_PATH}""FotaUpgrade_AILog.txt" > "${LOG_PATH}""FotaUpgrade.txt"
tail -n ${MAX_LINE} "${LOG_PATH}""dropbox.txt" > "${LOG_PATH}""system_dropbox.txt"
tail -n ${MAX_LINE} "${LOG_PATH}""anr.txt" > "${LOG_PATH}""system_anr_trace.txt"
echo "tail app log info==================="
rm "${LOG_PATH}""AIOS_AILog.txt"
rm "${LOG_PATH}""AIOSEngine_AILog.txt"
rm "${LOG_PATH}""AIBT_AILog.txt"
rm "${LOG_PATH}""Launcher_AILog.txt"
rm "${LOG_PATH}""Chat_AILog.txt"
rm "${LOG_PATH}""MapDown_AILog.txt"
rm "${LOG_PATH}""FM_AILog.txt"
rm "${LOG_PATH}""Navi_AILog.txt"
rm "${LOG_PATH}""Phone_AILog.txt"
rm "${LOG_PATH}""Traffic_AILog.txt"
rm "${LOG_PATH}""WeChat_AILog.txt"
rm "${LOG_PATH}""AILBS_AILog.txt"
rm "${LOG_PATH}""Gesture_AILog.txt"
rm "${LOG_PATH}""AICar_AILog.txt"
rm "${LOG_PATH}""AICore_AILog.txt"
rm "${LOG_PATH}""AITelcomm_AILog.txt"
rm "${LOG_PATH}""FotaUpgrade_AILog.txt"
rm "${LOG_PATH}""dropbox.txt"
rm "${LOG_PATH}""anr.txt"
echo "rm old app log info==================="
cd "${LOG_DIR}"
tar -cvzf "${LOG_FILENAME}"".tar.gz" "${LOG_FILENAME}"
echo "tar==================="
# backup logs into sdcard when to be under develop mode
backupLogs() {
if [ ! -d "${LOG_DIR_BK}" ]; then
mkdir "${LOG_DIR_BK}"
echo "mkdir " "${LOG_DIR_BK}"
fi
if [ $IS_DEVELOP_MODE = "1" ]; then
cp -r "$LOG_DIR." "$LOG_DIR_BK"
fi
}
rm -rf "${LOG_PATH}"
echo "rm -rf ${LOG_PATH}"
backupLogs
echo "rm log folder==================="
| true |
8ae286f0fd306587b9a9f67f2c6dca7908ac3304 | Shell | informagi/REL | /scripts/download_data.sh | UTF-8 | 209 | 3.453125 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
TARGET_DIR="$1"
mkdir -p "$TARGET_DIR"
shift
for name in "$@" ; do
URL="http://gem.cs.ru.nl/${name}.tar.gz"
echo Fetching $URL
curl "$URL" | tar xz --directory "$TARGET_DIR"
done
| true |
552bf21a2fba0cd31711c236c82fe109cc029b54 | Shell | patverga/things-look-like-things | /bash/pattern-queries.sh | UTF-8 | 453 | 3.34375 | 3 | [] | no_license | #!/bin/bash
set -e
./dont-run-too-many-jobs.sh
./check-args.sh $#
# file where each line contains a thing we want to find things that look like
ROOT=/home/pat/things-look-like-things
PATTERN_FILE=$ROOT/src/main/resources/patterns
while read QUERY
do
QUERY=`echo $QUERY | sed 's/?//g'`
ARGUMENTS="--pattern=$QUERY|--data=$1|--extractor=$2 "
echo $ARGUMENTS
qsub -cwd -l mem_free=32G -v ARGS="$ARGUMENTS " qsub-job.sh
done < ${PATTERN_FILE}
| true |
a6071ceb6a94fded0ab48beb6d8ec4039f905634 | Shell | wizaplace/drone-ci-git | /git.sh | UTF-8 | 1,184 | 3.84375 | 4 | [
"MIT"
] | permissive | #!/bin/bash
echo "## Setting up .netrc"
echo "machine $DRONE_NETRC_MACHINE" >> "$HOME/.netrc"
echo "login $DRONE_NETRC_USERNAME" >> "$HOME/.netrc"
echo "password $DRONE_NETRC_PASSWORD" >> "$HOME/.netrc"
if [ -z "$DRONE_WORKSPACE" ]; then
DRONE_WORKSPACE=$(pwd)
else
if [ ! -d "$DRONE_WORKSPACE" ]; then
echo "## Creating workspace (${DRONE_WORKSPACE})"
mkdir -p "$DRONE_WORKSPACE"
fi
fi
bareRepoPath="/bareRepo"
mkdir -p "$bareRepoPath"
cd "$bareRepoPath"
if [ ! -d "$bareRepoPath/refs" ]; then
echo "## Cloning bare repo from ${DRONE_REMOTE_URL} into ${bareRepoPath}"
git clone --bare --mirror "$DRONE_REMOTE_URL" .
else
git cat-file -t "$DRONE_COMMIT_SHA" 2>/dev/null 1>/dev/null # test if we have the requested commit locally already
result=$?
if [ $result != 0 ]; then
echo "## Fetching repo updates"
for i in {1..2}; do git fetch && break || sleep 1; done
else
echo "## Repo already has the required ref, nothing to fetch"
fi
fi
echo "## Exporting ${DRONE_COMMIT_SHA} to workspace (${DRONE_WORKSPACE})"
set -e
git archive --format=tar "$DRONE_COMMIT_SHA" | (cd "$DRONE_WORKSPACE" && tar xf -)
| true |
b3cdb1b3795602b8a1de94d548aafc0753aa9dbf | Shell | diepes/LinuxWifiPerformancLogging | /WifiLogSpeed.sh | UTF-8 | 955 | 3.484375 | 3 | [] | no_license | #!/bin/bash
# (c)Pieter E Smit 2011 - GPL-v3
# Monitors performance of wifi card, and dumps stats to file.
# Edit vars below.
dev="wlan0"
pingip="10.0.0.2"
filehistory="wifispeed.txt"
#History
#v1 - inital script
#v2 - more generic add vars
#v3 - add uniq and grep filter, can run more aggressive, only log low speed <100Mb/s
#v4 - shorten output
(
echo "`date +%T` , START - iwconfig monitor dev $dev"
while true; do
sleep 0.1
echo -n `date +%T`
echo -n " , "
echo -n `iwconfig $dev | grep -o " Rate=.* Mb/s \|Link Quality.*" `
echo -n " , "
ping $pingip -c 1 | grep -o " .\{1,3\}% packet loss"
done
) | stdbuf -oL uniq --count --skip-chars=10 \
| grep -v --line-buffered --before-context=1 --after-context=1 \
"Rate=300 Mb\|Rate=2.. Mb\|Rate=1.. Mb\|Rate=121.5 Mb" \
| sed --unbuffered \
-e 's/Link Quality/LQ/g;s/Signal level/Sig/g;s/packet loss/loss/g' \
| tee --append $filehistory
#THE END
| true |
78c032af44153963fc9e9cc38d3650f7aeab7623 | Shell | GenericMappingTools/sandbox | /win32-dev/netcdf-4.2.1.1/libcf/cfcheck/tst_cfcheck.sh | UTF-8 | 363 | 2.84375 | 3 | [
"NetCDF"
] | permissive | #!/bin/sh
# Copyright 2006, Unidata/UCAR. See COPYRIGHT file.
# This is part of the libcf library.
# This shell script runs the cfcheck tests.
# Ed Hartnett, 11/23/06
# $Id$
set -e
echo ""
echo "*** Testing cfcheck."
echo "*** testing that useage works..."
./cfcheck &> useage.txt
#diff -w usage.txt ref_usage.txt
echo "*** All tests of cfcheck passed!"
exit 0
| true |
944ce832f0d654efceeb5a514050f3bcd492fc4b | Shell | Meenapintu/laughing-adventure | /android-studio.sh | UTF-8 | 851 | 3.53125 | 4 | [] | no_license | #!/bin/bash
echo "########### INSTALLING ANDROID STUDIO#########"
echo "Select bin folder of extracted zip"
PATH_BIN=$(zenity --file-selection --directory)
if [ ! -f "$PATH_BIN/studio.sh" ]
then
echo "File $FILE does not contain studio.sh file "
exit
fi
printf ". .. .. .. .. .. .. \n"
sudo echo "[Desktop Entry]
Name=Android Studio
Type=Application
Exec=$PATH_BIN/studio.sh
Terminal=false
Icon=$PATH_BIN/studio.png
Comment=Integrated Development Environment
NoDisplay=false
Categories=Development;IDE;
Name[en]=Android Studio" >/usr/share/applications/"android studio.desktop"
printf ". .. .. .. .. .. .. \n"
sudo desktop-file-install /usr/share/applications/"android studio.desktop"
printf ". .. .. .. .. .. .. \n"
cd /usr/local/bin
sudo rm -rf studio.sh
sudo ln -s $PATH_BIN/studio.sh
printf "\nRUN command studio.sh\n"
| true |
e5dcaefc37c5b8663da6e6a1181e1d81281b90d2 | Shell | AliEdalat/MyBackup-in-bash | /bash/untitled.sh | UTF-8 | 1,415 | 3.390625 | 3 | [] | no_license | entry="/home/ali/workspace/BigNumber/Makefile.bak"
destination="/home/ali/workspace/alibackup"
diractory="/home/ali/workspace/BigNumber"
#if grep -Fxq "$(basename $entry)" "$destination/${diractory##*/}/packup.log"
# then
# code if found
# echo -e "found"
# sed -i '/'"$(basename $entry)"'/c\'"$(basename $entry) $(date '+20%y-%m-%d %H:%M:%S')"'' "$destination/${diractory##*/}/packup.log"
# else
# code if not found
# echo "$(basename $entry) $(date '+20%y-%m-%d %H:%M:%S')" >> "$destination/${diractory##*/}/packup.log"
# fi
#regex="\s+"$(basename $entry)"\s+"
#file_content=$( cat "$destination/${diractory##*/}/packup.log" )
#if [[ " $file_content " =~ $regex ]] # please note the space before and after the file content
# then
# echo "found"
# sed -i '/'"$(basename $entry)"'/c\'"$(basename $entry) $(date '+20%y-%m-%d %H:%M:%S')"'' "$destination/${diractory##*/}/packup.log"
#else
# echo "not found"
# echo "$(basename $entry) $(date '+20%y-%m-%d %H:%M:%S')" >> "$destination/${diractory##*/}/packup.log"
#fi
#while true
#do
# for i
# do
# echo $i
# done
#done
result=`grep -i "$(basename $entry) " $destination/${diractory##*/}/packup.log`;
echo $result
#result="A5.pdf 2017-07-08 14:51:58"
idw="$( cut -d ' ' -f 2 <<< "$result" ) $( cut -d ' ' -f 3 <<< "$result" )"
dif=$(expr $(date +%s) - $(date -d "$idw" '+%s'))
echo $idw
echo $dif
| true |
fdcfc1bc47cbde2cf757fe70989fb72e59d23b5e | Shell | Sprunjer/Simple-Util | /commit.sh | UTF-8 | 315 | 3.03125 | 3 | [] | no_license | git init
git add -A
git status
echo "Enter the commit message"
read message
git commit -m $message
git log
read -n1 -p "Do you want to push to the branch?" choice
case $choice in
y|Y) read -n -p "What is orgin?" branch
git push origin $branch;;
n|N) echo no ;;
*) echo dont know ;;
esac
| true |
5d91ab2039455f73cb788b31f65ebc20f4a167a0 | Shell | bschiela/dotrc | /bin/nb | UTF-8 | 419 | 3.484375 | 3 | [] | no_license | #!/bin/bash
# go to research notebook
cd ~/workspace/notebook/research/
# determine which tex file to open
if [[ $# -eq 0 ]];
then FILE=stream;
else FILE=$1;
fi
FILE="tex/$FILE.tex"
# ensure file exists
if [[ ! -f $FILE ]];
then
printf "%s does not exist\n" $FILE
echo "try one of the following:"
ls ./tex/*.tex
exit 1;
fi
# open file, ready for note-taking
vim $FILE "+normal G" +VimtexView +VimtexCompile
| true |
82c0307da6bcd479ad431b9ad6d8b1ea818d55ce | Shell | adrianlzt/cerebro | /monitorizacion/icinga-nagios/check_stale.sh | UTF-8 | 507 | 3.40625 | 3 | [] | no_license | #!/bin/bash
EXPECTED_ARGS=8
if [ $# -ne $EXPECTED_ARGS ]
then
echo "Usage: `basename $0` -h HOST -p PORT -c CRIT_THRESHOLD -w WARN_THRESHOLD"
exit $E_BADARGS
fi
HOST=$2
PORT=$4
CRIT=$6
WARN=$8
TOTAL_SVC=$(echo -e "GET services\nStats: state = 0" | nc $HOST $PORT)
STALED_SVC=$(echo -e "GET services\nStats: state = 0\nFilter: service_staleness >= 1.5" | nc $HOST $PORT)
/usr/lib64/nagios/plugins/check_generic.pl -e "echo \"$STALED_SVC*100/$TOTAL_SVC\" | bc -l" -w ">$WARN" -c ">$CRIT" -p pct_staled
| true |
a434080cfb649843e41db1ee72d88598c7f3295f | Shell | parti-coop/catan-ios-app | /bin/keys.sh | UTF-8 | 472 | 3.65625 | 4 | [] | no_license | #!/bin/sh
set -e
if [ $# -eq 0 ]
then
echo "Usage : $0 [development|production]"
exit
fi
environment_name=$1
. bin/parse_yaml.sh
eval $(parse_yaml ./env.yml "CATAN_CONFIG_")
prefix="CATAN_CONFIG_${environment_name}_"
env_list=$(set | awk -F "=" '{print $1}' | grep "^${prefix}.*")
while IFS= read -r line
do
echo "${line#$prefix} ==> ${!line}"
eval "bundle exec pod keys set ${line#$prefix} ${!line}"
done <<< "$env_list"
bundle exec pod keys generate
| true |
0aca5f8e8a3ae476faf0209f2f7c8bd15bb23ab1 | Shell | oaqa/FlexNeuART | /scripts/data_convert/wikipedia_dpr/download_dpr_passages.sh | UTF-8 | 443 | 3.546875 | 4 | [
"BSD-2-Clause",
"Apache-2.0"
] | permissive | #!/bin/bash -e
dstDir="$1"
[ "$dstDir" != "" ] || { echo "Specify destination directory (1st arg)" ; exit 1 ; }
[ -d "$dstDir" ] || { echo "Not a directory: $dstDir (1st arg)" ; exit 1 ; }
cd "$dstDir"
fileName="psgs_w100.tsv.gz"
fileNameSuccess=${fileName}.SUCCESS
if [ -f "$fileNameSuccess" ] ; then
echo "Already downloaded!"
exit 0
fi
wget "https://dl.fbaipublicfiles.com/dpr/wikipedia_split/$fileName"
touch "$fileNameSuccess"
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.