blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 4 115 | path stringlengths 2 970 | src_encoding stringclasses 28 values | length_bytes int64 31 5.38M | score float64 2.52 5.28 | int_score int64 3 5 | detected_licenses listlengths 0 161 | license_type stringclasses 2 values | text stringlengths 31 5.39M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
d5b5d3d24bc44a3029e39c5c1ff6f273c7ce3d8d | Shell | idelsink/rc-backups | /FILE/_USER_/bash/.bash_aliases | UTF-8 | 1,370 | 3.046875 | 3 | [
"MIT"
] | permissive | # no fuckups plz
alias rm='rm -I'
alias cp='cp -i'
alias mv='mv -i'
# make all the things nice!
alias ls='ls --color'
alias ll='ls -lh --color'
alias la='ll -a --color'
alias less='less --raw-control-chars'
# let out my inner child!
alias boop='touch'
# I did an oopsie
alias fuck='sudo $(history -p \!\!)'
# some bash stuff
alias reload-bashrc='. ~/.bashrc'
alias dirs='dirs -v'
# gdb
alias gdb='gdb -quiet'
alias arm-none-eabi-gdb='arm-none-eabi-gdb -quiet'
alias xdo='xdg-open'
alias ip='ip -c'
# Adding an extra space behind command
# Makes it possible to expand command and use aliasses
alias watch='watch --color '
# docker
alias docker-rm-containers='docker rm $(docker ps -a -q)' # Delete all containers
alias docker-rm-images='docker rmi $(docker images -q)' # Delete all images
alias docker-rm-all='docker_rm_containers ; docker_rm_images'
# package updates
alias dnfu='sudo dnf upgrade --refresh'
# vpn
alias connect-vpn='. $HOME/scripts/vpn/connect-vpn.sh'
# Set screen commands
alias home-dual='xrandr --output DP-1 --primary --mode 3440x1440 --pos 0x0 --output HDMI-0 --mode 1920x1080 --pos 3440x180'
alias home-single='xrandr --output HDMI-0 --primary --mode 1920x1080 --output DP-1 --mode 1920x1080 --same-as HDMI-0'
# include aliases with specific system aliases
if [ -f ~/.bash_aliases_system ]; then
. ~/.bash_aliases_system
fi
| true |
5c00b0ac7fa922792caa542516b0947a04cbec80 | Shell | brucesdad13/dotfiles | /scripts/batch-rename | UTF-8 | 407 | 3.765625 | 4 | [] | no_license | #!/bin/bash
# Check that we actually want to do this potentially dangerous operation without review
read -p "Are you sure you want to use this script (considered batch-rename-safe)? " -n 1 -r
echo
if [[ $REPLY =~ ^[Yy]$ ]]
then
# Loop through matching filenames
for filename in *$1*;
do
# Rename the file replacing the old pattern with the new
mv "$filename" "${filename//$1/$2}";
done
fi
| true |
c98de893fc8623f71be6547cf956b03e42a7d163 | Shell | mikiair/raspi-gpio3-shutdown | /script/reconfigure | UTF-8 | 273 | 2.8125 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
echo "Reconfigure raspi-gpio3-shutdown service..."
set -e
cd "$(dirname "$0")/.."
sudo systemctl stop raspi-gpio3-shutdown
sudo cp raspi-gpio3-shutdown.conf /etc
sudo systemctl start raspi-gpio3-shutdown
systemctl status raspi-gpio3-shutdown
echo "Done."
| true |
5cf1c436c80e5786b93f27053846a4a562e00631 | Shell | marciniuk/BlackArrow | /blackarrow/.config/polybar/covid-19.sh | UTF-8 | 363 | 2.9375 | 3 | [
"ISC"
] | permissive | #!/bin/sh
download() {
stats="$(curl -s "https://disease.sh/v2/countries/Poland")"
todaycases="$(echo "$stats" | jq '.todayCases' 2>/dev/null)" || return
todaydeaths="$(echo "$stats" | jq '.todayDeaths')"
echo " $todaycases $todaydeaths" > ~/.cache/covid-19
}
ping -w 1 corona.lmao.ninja >/dev/null 2>&1 && download
cat ~/.cache/covid-19
| true |
ad573b398b9e73f9240daadf88378913557d63e2 | Shell | cmeinco/aws-security-group-analysis | /setup.sh | UTF-8 | 535 | 3.65625 | 4 | [] | no_license | #!/usr/bin/env bash
# if terraform does not exist
if [ ! -f ./terraform ]; then
echo "Terraform not found!, going to get the latest and greatest..."
latest_version=$(curl --silent https://releases.hashicorp.com/terraform/ | grep href | grep terraform | head -1 | awk -F"/" '{print $3}')
echo "downloading ${latest_version}"
wget https://releases.hashicorp.com/terraform/${latest_version}/terraform_${latest_version}_linux_amd64.zip
unzip terraform_${latest_version}_linux_amd64.zip
fi
mkdir bin
mv terraform bin/
| true |
019091b97c87e0b919013f9591661034b72c1d83 | Shell | maunier/host-cli | /sh/zhost-add.sh | UTF-8 | 187 | 3.15625 | 3 | [] | no_license | #!/bin/zsh
IFS=,
content=$1
hostFile='/etc/hosts'
for param in ${content[@]}
do
addStr="$addStr $param"
done
sudo sed -i '.bac' "
\$a\\
$addStr
" $hostFile;
cat -n $hostFile;
| true |
164f1989de2dd497251cb14c508ab7708d43e5ff | Shell | instantlinux/docker-tools | /images/vsftpd/entrypoint.sh | UTF-8 | 1,598 | 3.71875 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/sh -e
if [ ! -f /etc/timezone ] && [ ! -z "$TZ" ]; then
# At first startup, set timezone
cp /usr/share/zoneinfo/$TZ /etc/localtime
echo $TZ >/etc/timezone
fi
if [ -z "$PASV_ADDRESS" ]; then
echo "** This container will not run without setting for PASV_ADDRESS **"
sleep 10
exit 1
fi
if [ -e /run/secrets/$FTPUSER_PASSWORD_SECRET ]; then
adduser -u $FTPUSER_UID -s /bin/sh -g "ftp user" -D $FTPUSER_NAME
echo "$FTPUSER_NAME:$(cat /run/secrets/$FTPUSER_PASSWORD_SECRET)" \
| chpasswd -e
fi
# There is a vexing problem with permissions of /dev/stdout under
# Docker, gave up trying to fix symlink issues. Here's the workaround.
if [ "$VSFTPD_LOG_FILE" == /dev/stdout ]; then
VSFTPD_LOG_FILE=/var/log/stdout.txt
touch $VSFTPD_LOG_FILE
tail -f -n 0 $VSFTPD_LOG_FILE &
fi
cat <<EOF >/etc/vsftpd/vsftpd.conf
anon_mkdir_write_enable=$ANON_MKDIR_WRITE_ENABLE
anon_upload_enable=$ANON_UPLOAD_ENABLE
anonymous_enable=$ANONYMOUS_ENABLE
listen=YES
local_enable=YES
local_umask=$LOCAL_UMASK
log_ftp_protocol=$LOG_FTP_PROTOCOL
nopriv_user=vsftp
pasv_address=$PASV_ADDRESS
pasv_enable=YES
pasv_max_port=$PASV_MAX_PORT
pasv_min_port=$PASV_MIN_PORT
seccomp_sandbox=NO
use_localtime=$USE_LOCALTIME
vsftpd_log_file=$VSFTPD_LOG_FILE
write_enable=$WRITE_ENABLE
xferlog_enable=YES
EOF
if [ "$(ls -A /etc/vsftpd.d)" ]; then
cat /etc/vsftpd.d/* >> /etc/vsftpd/vsftpd.conf
fi
# Invoke as a child process; version 3.0.3 crashes if run as PID 1
# See https://github.com/InfrastructureServices/vsftpd/commit/970711fde95bee3de1e4a5e0b557c3132d0c3e3f
vsftpd /etc/vsftpd/vsftpd.conf
| true |
7dcc86b82635cb34c46fac11c12d64e745eb3a1d | Shell | kbingham/vsp-tests | /scripts/logger.sh | UTF-8 | 412 | 3.671875 | 4 | [] | no_license | #!/bin/sh
now() {
awk '/^now/ {time=$3; printf("[%u.%06u]", time / 1000000000, (time % 1000000000) / 1000) ; exit}' /proc/timer_list
}
label=${1:+ [$1]}
TRACE_MARKER=/sys/kernel/debug/tracing/trace_marker
if [ -e $TRACE_MARKER ]; then
extra_log_files=$TRACE_MARKER
fi
while read line ; do
newline="$(now)$label $line"
echo "$newline"
for f in $extra_log_files; do
echo "$newline" >> $f;
done;
done
| true |
5e70fb13cafcf489ff055fd428a9d5f889143ed5 | Shell | Welasco/labtest | /script.sh | UTF-8 | 6,469 | 3.359375 | 3 | [] | no_license | #!/bin/bash
#############################
# Script Definition
#############################
logpath=/var/log/deploymentscriptlog
#############################
# Upgrading Linux Distribution
#############################
echo "#############################" >> $logpath
echo "Upgrading Linux Distribution" >> $logpath
echo "#############################" >> $logpath
sudo apt-get update >> $logpath
sudo apt-get -y upgrade >> $logpath
echo " " >> $logpath
#############################
# Installing .Net Core
#############################
echo "#############################" >> $logpath
echo "Installing .Net Core" >> $logpath
echo "#############################" >> $logpath
wget -q https://packages.microsoft.com/config/ubuntu/18.04/packages-microsoft-prod.deb -O packages-microsoft-prod.deb >> $logpath
sudo dpkg -i packages-microsoft-prod.deb >> $logpath
sudo add-apt-repository universe >> $logpath
sudo apt-get update >> $logpath
sudo apt-get install -y apt-transport-https >> $logpath
sudo apt-get install -y dotnet-sdk-3.0 >> $logpath
echo " " >> $logpath
#############################
# Installing NodeJS
#############################
echo "#############################" >> $logpath
echo "Installing NodeJS" >> $logpath
echo "#############################" >> $logpath
curl -sL https://deb.nodesource.com/setup_10.x | sudo -E bash - >> $logpath
sudo apt-get install -y nodejs >> $logpath
#############################
# Python is already installed
#############################
echo "#############################" >> $logpath
echo "Installing Python" >> $logpath
echo "#############################" >> $logpath
sudo apt-get install -y python-pip >> $logpath
sudo apt-get install -y python3-pip >> $logpath
echo " " >> $logpath
#############################
# Installing Apache and PHP
#############################
# Installing Apache
echo "#############################" >> $logpath
echo "Installing Apache" >> $logpath
echo "#############################" >> $logpath
sudo apt-get install -y apache2 >> $logpath
# Installing PHP
echo "Installing PHP" >> $logpath
sudo apt-get install -y php >> $logpath
sudo systemctl restart apache2.service >> $logpath
printf '%s\n' '' \
'<?php' \
' phpinfo();' \
'?>' \
'' \
'' > /var/www/html/phpinfo.php
echo " " >> $logpath
#############################
# Installing Java
#############################
echo "#############################" >> $logpath
echo "Installing Java" >> $logpath
echo "#############################" >> $logpath
sudo apt install -y default-jdk >> $logpath
echo " " >> $logpath
#############################
#Install Docker
#############################
echo "#############################" >> $logpath
echo "Installing Docker" >> $logpath
echo "#############################" >> $logpath
wget -qO- https://get.docker.com/ | sh >> $logpath
sudo usermod -aG docker $1
echo " " >> $logpath
#############################
# Preparing DNS Host file
#############################
echo "#############################" >> $logpath
echo "Preparing DNS" >> $logpath
echo "#############################" >> $logpath
sudo bash -c "echo 127.0.0.1 contoso.com >> /etc/hosts"
sudo bash -c "echo 127.0.0.1 www.contoso.com >> /etc/hosts"
cat /etc/hosts >> $logpath
echo " " >> $logpath
#############################
# Preparing Code
#############################
echo "#############################" >> $logpath
echo "Preparing Code" >> $logpath
echo "#############################" >> $logpath
sudo apt-get install -y unzip >> $logpath
wget https://github.com/Welasco/labtest/raw/master/oss-labs.zip >> $logpath
unzip oss-labs.zip -d /opt >> $logpath
mv /opt/oss-labs /opt/apps
# Preparing NodeJS Code
echo "Preparing NodeJS App (npm install)" >> $logpath
cd /opt/apps/NodeJSApp
npm install >> $logpath
chmod +x /opt/apps/NodeJSApp/bin/www
echo " " >> $logpath
# Preparing Python Code
echo "Preparing Python App (pip install)" >> $logpath
cd /opt/apps/PythonApp
pip3 install -r requirements.txt >> $logpath
chmod +x /opt/apps/PythonApp/app.py
echo " " >> $logpath
# Preparing Java
echo "Preparing Java App" >> $logpath
cd /opt/apps/JavaApp
chmod +x /opt/apps/JavaApp/mvnw
# build java
# mvnw clean package
echo " " >> $logpath
# Preparing PHP Code
# Setting UP Apache VDir
echo "Preparing PHP Code" >> $logpath
printf '%s\n' '' \
'<VirtualHost *:8088>' \
' DocumentRoot /var/www/contoso.com' \
' ErrorLog ${APACHE_LOG_DIR}/error.log' \
' CustomLog ${APACHE_LOG_DIR}/access.log combined' \
' <Directory /var/www/contoso.com>' \
' Options Indexes FollowSymLinks MultiViews' \
' AllowOverride All' \
' Require all granted' \
' </Directory>' \
'</VirtualHost>' \
'' \
'' > /etc/apache2/sites-available/contoso.com.conf
cat /etc/apache2/sites-available/contoso.com.conf >> $logpath
sudo mkdir /var/www/contoso.com
sudo cp /var/www/html/phpinfo.php /var/www/contoso.com/phpinfo.php
sudo bash -c "echo Listen 8088 >> /etc/apache2/ports.conf"
cat /etc/apache2/ports.conf >> $logpath
sudo a2enmod rewrite >> $logpath
sudo a2ensite contoso.com.conf >> $logpath
sudo systemctl reload apache2
sudo cp -rR /opt/apps/PHPApp/. /var/www/contoso.com/
sudo service apache2 restart
echo " " >> $logpath
sudo cp -rR /opt/apps/MainApp/. /var/www/html/
#############################
# Loading App Services Daemon
#############################
echo "#############################" >> $logpath
echo "Preparing Apps Daemon" >> $logpath
echo "#############################" >> $logpath
cp /opt/apps/PythonApp/pythonapp.service /lib/systemd/system
cp /opt/apps/NodeJSApp/nodejsapp.service /lib/systemd/system
cp /opt/apps/JavaApp/javaapp.service /lib/systemd/system
sudo systemctl daemon-reload
sudo systemctl enable nodejsapp
sudo systemctl enable pythonapp
sudo systemctl enable javaapp
sudo service nodejsapp start
sudo service pythonapp start
sudo service javaapp start
sudo service nodejsapp status >> $logpath
sudo service pythonapp status >> $logpath
sudo service javaapp status >> $logpath
echo " " >> $logpath
#############################
# Cleaning Resources
#############################
echo "#############################" >> $logpath
echo "Cleaning Resources" >> $logpath
echo "#############################" >> $logpath
rm -rf /opt/apps/VMTemplate
rm -rf /opt/apps/StaticDesign
rm -rf /opt/apps/.gitignore
rm -rf /opt/apps/README.md
rm -rf /opt/apps/.vscode
ls /opt/apps >> $logpath | true |
3f4308c879ac3c8d4c96cafdbe5564b1fb6754ca | Shell | rgl/rancher-ubuntu-vagrant | /provision-rancher-example-cluster.sh | UTF-8 | 7,836 | 2.8125 | 3 | [] | no_license | #!/bin/bash
source /vagrant/lib.sh
registry_domain="${1:-pandora.rancher.test}"; shift || true
rancher_server_domain="${1:-server.rancher.test}"; shift || true
rancher_server_url="https://$rancher_server_domain"
k8s_version="${1:-v1.21.4-rancher1-1}"; shift || true
pod_network_cidr='10.62.0.0/16' # default is 10.42.0.0/16.
service_network_cidr='10.63.0.0/16' # default is 10.43.0.0/16.
service_node_port_range='30000-32767' # default is 30000-32767
dns_service_ip_address='10.63.0.10' # default is 10.43.0.10.
cluster_domain='example.domain' # default is cluster.local.
cluster_name='example'
admin_api_token="$(cat ~/.rancher-admin-api-token)"
# create the cluster.
# NB this JSON can be obtained by observing the network when manually creating a cluster from the rancher UI.
# NB also use the schemas browser at https://server.rancher.test:8443/v3/schemas.
# NB to troubleshoot why the cluster provisioning is failing with something like:
# cluster c-fhrlt state: provisioning Failed to get job complete status for job rke-network-plugin-deploy-job in namespace kube-system
# execute:
# docker ps -a -f status=exited --format '{{.Names}} {{.Command}}' --no-trunc | grep -v /pause | grep rke-network-plugin
# then get the logs with, e.g.:
# docker logs k8s_rke-network-plugin-pod_rke-network-plugin-deploy-job-tcm8p_kube-system_ac5adeb3-16ca-417d-b899-f51f14d5c712_0
# see https://server.rancher.test:8443/v3/schemas/cluster
# see https://server.rancher.test:8443/v3/schemas/rancherKubernetesEngineConfig
# see https://server.rancher.test:8443/v3/schemas/rkeConfigServices
# see https://server.rancher.test:8443/v3/schemas/kubeAPIService
# see https://server.rancher.test:8443/v3/schemas/kubeControllerService
# see https://server.rancher.test:8443/v3/schemas/kubeletService
# see https://rancher.com/docs/rancher/v2.x/en/cluster-provisioning/rke-clusters/windows-clusters/
# see docker ps --format '{{.Image}} {{.Names}} {{.Command}}' --no-trunc
# see docker logs kubelet
# see find /opt -type f | grep -v /catalog-cache
# see /etc/cni
echo "creating the cluster..."
cluster_response="$(wget -qO- \
--header 'Content-Type: application/json' \
--header "Authorization: Bearer $admin_api_token" \
--post-data '{
"type": "cluster",
"name": "'$cluster_name'",
"description": "hello world",
"dockerRootDir": "/var/lib/docker",
"enableClusterAlerting": false,
"enableClusterMonitoring": false,
"enableNetworkPolicy": false,
"windowsPreferedCluster": false,
"rancherKubernetesEngineConfig": {
"type": "rancherKubernetesEngineConfig",
"kubernetesVersion": "'$k8s_version'",
"addonJobTimeout": 45,
"enableCriDockerd": false,
"ignoreDockerVersion": true,
"rotateEncryptionKey": false,
"sshAgentAuth": false,
"authentication": {
"type": "authnConfig",
"strategy": "x509"
},
"dns": {
"type": "dnsConfig",
"nodelocal": {
"type": "nodelocal",
"ip_address": "",
"node_selector": null,
"update_strategy": {}
}
},
"network": {
"type": "networkConfig",
"mtu": 0,
"plugin": "flannel",
"options": {
"flannel_backend_type": "host-gw",
"flannel_iface": "eth1"
}
},
"ingress": {
"type": "ingressConfig",
"provider": "nginx",
"defaultBackend": false,
"httpPort": 0,
"httpsPort": 0
},
"monitoring": {
"type": "monitoringConfig",
"provider": "metrics-server",
"replicas": 1
},
"services": {
"type": "rkeConfigServices",
"kubeApi": {
"type": "kubeAPIService",
"alwaysPullImages": false,
"podSecurityPolicy": false,
"serviceClusterIpRange": "'$service_network_cidr'",
"serviceNodePortRange": "'$service_node_port_range'",
"secretsEncryptionConfig": {
"enabled": false,
"type": "secretsEncryptionConfig"
}
},
"kubeController": {
"type": "kubeControllerService",
"clusterCidr": "'$pod_network_cidr'",
"serviceClusterIpRange": "'$service_network_cidr'"
},
"kubelet": {
"type": "kubeletService",
"clusterDnsServer": "'$dns_service_ip_address'",
"clusterDomain": "'$cluster_domain'"
},
"etcd": {
"creation": "12h",
"extraArgs": {
"heartbeat-interval": 500,
"election-timeout": 5000
},
"gid": 0,
"retention": "72h",
"snapshot": false,
"uid": 0,
"type": "etcdService",
"backupConfig": {
"type": "backupConfig",
"enabled": true,
"intervalHours": 12,
"retention": 6,
"safeTimestamp": false,
"timeout": 300
}
}
},
"upgradeStrategy": {
"maxUnavailableControlplane": "1",
"maxUnavailableWorker": "10%",
"drain": "false",
"nodeDrainInput": {
"deleteLocalData": false,
"force": false,
"gracePeriod": -1,
"ignoreDaemonSets": true,
"timeout": 120,
"type": "nodeDrainInput"
},
"maxUnavailableUnit": "percentage"
}
},
"localClusterAuthEndpoint": {
"enabled": true,
"type": "localClusterAuthEndpoint"
},
"labels": {},
"annotations": {},
"agentEnvVars": [],
"scheduledClusterScan": {
"enabled": false,
"scheduleConfig": null,
"scanConfig": null
}
}' \
"$rancher_server_url/v3/cluster")"
cluster_id="$(jq -r .id <<<"$cluster_response")"
echo "$cluster_id" >/vagrant/shared/example-cluster-id
# save the registration node commands.
echo "getting the rancher-agent registration command..."
while true; do
cluster_registration_token_response="$(
wget -qO- \
--header 'Content-Type: application/json' \
--header "Authorization: Bearer $admin_api_token" \
--post-data '{"type":"clusterRegistrationToken","clusterId":"'$cluster_id'"}' \
"$rancher_server_url/v3/clusterregistrationtoken" || true)"
[ -n "$cluster_registration_token_response" ] && break || sleep 5
done
cluster_registration_token_url="$(jq -r .links.self <<<"$cluster_registration_token_response")"
cluster_registration_response="$(
wget -qO- \
--header 'Content-Type: application/json' \
--header "Authorization: Bearer $admin_api_token" \
"$cluster_registration_token_url")"
rancher_node_command="$(jq -r .nodeCommand <<<"$cluster_registration_response")"
echo "$rancher_node_command" >/vagrant/shared/rancher-ubuntu-registration-node-command.sh
| true |
74eb449946b3338cd441d8209c510f9837c852ad | Shell | NoUniformDay/HyperPooch | /scripts/startUp.sh | UTF-8 | 1,137 | 2.53125 | 3 | [] | no_license | #!/bin/bash
#
# Bash script that initialises network such as
#
echo "POST request Enroll on Org1 ..."
echo
ORG1_TOKEN=$(curl -s -X POST \
http://localhost:4000/users \
-H "content-type: application/x-www-form-urlencoded" \
-d "orgName=org1&username=eric")
echo $ORG1_TOKEN
sleep 5
echo "POST request Create channel ..."
echo
curl -s POST \
http://localhost:4000/channels \
-H "content-type: application/x-www-form-urlencoded" \
-d "channelName=mychannel&channelConfigPath=../artifacts/channel/mychannel.tx&orgName=org1&username=eric"
echo
echo
sleep 5
echo "POST request Join channel on Org1"
echo
curl -s -X POST \
http://localhost:4000/channels/mychannel/peers \
-H "content-type: application/x-www-form-urlencoded" \
-d "peers=peer1&peers=peer2&channelName=mychannel&orgName=org1&username=eric"
echo
echo
echo "POST Install chaincode on Org1"
echo
curl -s -X POST \
http://localhost:4000/chaincodes \
-H "content-type: application/x-www-form-urlencoded" \
-d "peers=peer1&peers=peer2&chaincodeName=mycc&chaincodePath=github.com/chaincode&chaincodeVersion=v0&orgName=org1&username=eric"
echo
echo
| true |
207647c2babe34fbdc9d7a983c77e102edb33892 | Shell | KernelFolla/homestead | /scripts/remove-mongodb.sh | UTF-8 | 500 | 3.34375 | 3 | [
"MIT"
] | permissive | while fuser /var/lib/dpkg/lock /var/lib/apt/lists/lock >/dev/null 2>&1 ; do
echo "Waiting for other software managers to finish..."
sleep 2
done
export DEBIAN_FRONTEND=noninteractive
if [ ! $(dpkg-query -W -f='${Status}' mongodb-org 2>/dev/null | grep -c "ok installed") -eq 0 ];
then
apt-get remove -y --purge mongodb-org -qq > /dev/null
apt-get autoremove -y -qq > /dev/null
apt-get autoclean -qq > /dev/null
echo "MongoDB removed with success"
else
echo "MongoDB already removed"
fi
| true |
5a7755bd860044f90793663aab96472661aafa19 | Shell | lambdaforg/syso | /zestaw4/zad1/script | UTF-8 | 74 | 2.640625 | 3 | [] | no_license | #!/bin/bash
valid=true
while [ $valid ]
do
d=`date +%H:%M:%S`
echo $d
done | true |
237269e742e3aa6e1d13a896f447b275e6974931 | Shell | pkmoore/CrashSimulator | /test/test_callread.sh | UTF-8 | 418 | 2.953125 | 3 | [] | no_license | #!/bin/sh
cd ../sample_programs > /dev/null;
echo "asdfasdf" | strace -f -s 9999 -vvvvv -o callread.strace ./callread;
cd .. > /dev/null;
OUTPUT=$(python main.py \
-c "['sample_programs/callread']" \
-t sample_programs/callread.strace);
RET=$?;
echo $OUTPUT | grep -q "asdfasd";
FOUND=$?;
rm sample_programs/callread.strace;
cd test > /dev/null;
if [ $RET -ne 0 ] || [ $FOUND -ne 0 ];
then exit 1;
fi
| true |
ae9db5b31349e02310d487278e3ee687a443e394 | Shell | Alex-sanda/bam | /src/tool/preprocess_train_data.sh | UTF-8 | 1,677 | 2.921875 | 3 | [] | no_license | main_operation=$1
main_function=$2
main_data=$3
main_category=$4
main_category_num=$5
main_dict_num=$6
main_dict_thre=$7
train_file_prefix=../${main_data}/${main_category}.train.
dev_file_prefix=../${main_data}/${main_category}.dev.
test_file_prefix=../${main_data}/${main_category}.test.
#<<BLOCK
python filter_style_ngrams.py $train_file_prefix $main_category_num $main_function
if [ "$main_data" = "amazon" ]; then
for((i=0;i < $main_category_num; i++))
do
python use_nltk_to_filter.py ../${main_data}/${main_category}.train.${i}.tf_idf
cp ../${main_data}/${main_category}.train.${i}.tf_idf.filter ../${main_data}/${main_category}.train.${i}.tf_idf
done
fi
for((i=0;i < $main_category_num; i++))
do
python preprocess_train.py ${train_file_prefix}${i} ${train_file_prefix}${i} ${main_function} ${main_dict_num} ${main_dict_thre}
python preprocess_test.py ${dev_file_prefix}${i} ${train_file_prefix}${i} $main_function $main_dict_num $main_dict_thre
sh build_data.sh ${train_file_prefix}${i}.data.${main_function} ${dev_file_prefix}${i}.data.${main_function}
done
#BLOCK
train_data_file=../${main_data}/train.data.${main_function}
test_data_file=../${main_data}/test.data.${main_function}
dict_train_file=../${main_data}/zhi.dict.${main_function}
mv $train_data_file ${train_data_file}.old
mv $test_data_file ${test_data_file}.old
cat ${train_file_prefix}*.data.${main_function}.train >> $train_data_file
cat ${train_file_prefix}*.data.${main_function}.test >> $test_data_file
python shuffle.py $train_data_file
python shuffle.py $test_data_file
cat ${test_data_file}.shuffle >>${train_data_file}.shuffle
python create_dict.py ${train_data_file} $dict_train_file
| true |
0d51b065b41113994b29f1cde01dcefb3bdf9e8b | Shell | jfqd/mi-qutic-mail | /copy/opt/local/bin/uptodate | UTF-8 | 3,430 | 2.859375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/bash
if [[ -z "$1" ]]; then
echo "skip uptodate on mx for now"
exit 0
fi
/opt/local/bin/pkgin -y ug
/opt/local/bin/pkgin clean
svcadm restart dovecot
svcadm restart postfix
svcadm disable amavisd
svcadm disable fail2ban
pkill fail2ban-server
sleep 10
rm /var/run/fail2ban.sock || true
svcadm enable fail2ban
pkill amavisd
if [[ ! `egrep 'msginfo->originating\(c\(.*\)\);' /opt/local/sbin/amavisd` ]]; then
sed -i \
"s| Amavis::load_policy_bank(\$_,\$msginfo) for \@bank_names;| Amavis::load_policy_bank(\$_,\$msginfo) for \@bank_names;\n \$msginfo->originating(c(\'originating\'));|" \
/opt/local/sbin/amavisd
fi
rm /opt/local/lib/svc/manifest/amavisd.xml
cat >> /opt/local/lib/svc/manifest/amavisd.xml << EOF
<?xml version="1.0"?>
<!DOCTYPE service_bundle SYSTEM "/usr/share/lib/xml/dtd/service_bundle.dtd.1">
<service_bundle type="manifest" name="export">
<service name="pkgsrc/amavisd" type="service" version="1">
<create_default_instance enabled="false" />
<single_instance />
<dependency name='fs-local' grouping='require_all' restart_on='none' type='service'>
<service_fmri value='svc:/system/filesystem/local' />
</dependency>
<dependency name='loopback' grouping='require_all' restart_on='error' type='service'>
<service_fmri value='svc:/network/loopback:default' />
</dependency>
<dependency name='physical' grouping='require_all' restart_on='error' type='service'>
<service_fmri value='svc:/network/physical:default' />
</dependency>
<dependency name='name-services' grouping='require_all' restart_on='refresh' type='service'>
<service_fmri value='svc:/milestone/name-services' />
</dependency>
<dependency name='system-log' grouping='optional_all' restart_on='none' type='service'>
<service_fmri value='svc:/system/system-log' />
</dependency>
<exec_method type="method" name="start" exec="/opt/local/lib/svc/method/amavisd start" timeout_seconds="60" />
<exec_method type="method" name="stop" exec="/opt/local/lib/svc/method/amavisd stop" timeout_seconds="120" />
<exec_method type="method" name="refresh" exec="/opt/local/lib/svc/method/amavisd reload" timeout_seconds="60" />
<exec_method type="method" name="restart" exec="/opt/local/lib/svc/method/amavisd restart" timeout_seconds="60" />
<property_group name="startd" type="framework">
<propval name="duration" type="astring" value="contract" />
<propval name="ignore_error" type="astring" value="core,signal" />
</property_group>
<property_group name="application" type="application">
<propval name="config_file" type="astring" value="/opt/local/etc/amavisd.conf" />
</property_group>
<template>
<common_name>
<loctext xml:lang="C">Amavisd</loctext>
</common_name>
</template>
</service>
</service_bundle>
EOF
# enable
chown root:amavis /opt/local/etc/amavisd.conf
chmod 0640 /opt/local/etc/amavisd.conf
svccfg import /opt/local/lib/svc/manifest/amavisd.xml
/opt/local/bin/sa-update
chown -R spamd:spamd /var/spamassassin/
chown -R spamd:spamd /opt/local/etc/spamassassin/sa-update-keys
svcadm enable amavisd
sed -i -e "s/iteritems()/items()/g" /opt/local/bin/pyzor
sed -i -e "s/iteritems()/items()/g" /opt/local/lib/python3.9/site-packages/pyzor/client.py
sed -i -e "s/xrange(length)/range(length)/g" /opt/local/lib/python3.9/site-packages/pyzor/digest.py
svcs -vx | true |
14ba75943ac2592801778a562820e4605a0086fe | Shell | dinfucker/SCRIPT-VPN | /u-d.sh | UTF-8 | 12,966 | 3.078125 | 3 | [] | no_license | #!/usr/bin/env bash
# Functions
ok() {
echo -e '\e[32m'$1'\e[m';
}
die() {
echo -e '\e[1;35m'$1'\e[m';
}
des() {
echo -e '\e[1;31m'$1'\e[m'; exit 1;
}
# install screenfetch
ok "❯❯❯ install screenfetch"
cd
wget -q https://kguza.net/scrip/u-d/openvpn/menu/screenfetch-dev
mv screenfetch-dev /usr/bin/screenfetch
chmod +x /usr/bin/screenfetch
echo "clear" >> .profile
echo "screenfetch" >> .profile
echo "clear" >> .bashrc
echo "screenfetch" >> .bashrc
#<BODY text='ffffff'>
kguza="https://kguza.net/scrip/u-d/openvpn"
#OS
if [[ -e /etc/debian_version ]]; then
VERSION_ID=$(cat /etc/os-release | grep "VERSION_ID")
fi
# Sanity check
if [[ $(id -g) != "0" ]] ; then
des "❯❯❯ สคริปต์ต้องทำงานเป็น root."
fi
#if [[ ! -e /dev/net/tun ]] ; then
# des "❯❯❯ TUN/TAP อุปกรณ์ไม่พร้อมใช้งาน."
#fi
dpkg -l openvpn > /dev/null 2>&1
if [[ $? -eq 0 ]]; then
des "❯❯❯ OpenVPN ได้รับการติดตั้งแล้ว"
fi
# IP Address
SERVER_IP=$(wget -qO- ipv4.icanhazip.com);
if [[ "$SERVER_IP" = "" ]]; then
SERVER_IP=`ifconfig | grep -Eo 'inet (addr:)?([0-9]*\.){3}[0-9]*' | grep -Eo '([0-9]*\.){3}[0-9]*' | grep -v '127.0.0' | grep -v '192.168'`;
fi
echo "$SERVER_IP" > /usr/bin/ipsm
# Install openvpn
die "❯❯❯ apt-get update"
apt-get update -q > /dev/null 2>&1
die "❯❯❯ apt-get install openvpn curl openssl"
apt-get install -qy openvpn curl > /dev/null 2>&1
#die "❯❯❯ Generating CA Config"
cd /
wget -q -O ovpn.tar "$kguza/conf/openvpn.tar"
tar xf ovpn.tar
rm ovpn.tar
cat > /etc/openvpn/KGUZA.ovpn <<EOF1
client
dev tun
proto tcp
port 1194
connect-retry 1
connect-timeout 120
resolv-retry infinite
route-method exe
nobind
persist-key
persist-tun
persist-remote-ip
mute-replay-warnings
verb 2
cipher none
comp-lzo
script-security 3
auth-user-pass
remote $SERVER_IP
http-proxy $SERVER_IP 8080
#http-proxy-option CUSTOM-HEADER Host connect.facebook.net
#http-proxy-option CUSTOM-HEADER Host beetalkmobile.com
<key>
$(cat /etc/openvpn/client-key.pem)
</key>
<cert>
$(cat /etc/openvpn/client-cert.pem)
</cert>
<ca>
$(cat /etc/openvpn/ca.pem)
</ca>
EOF1
cat > /etc/openvpn/KGUZA.ovpn << KGUZA
client
dev tun
proto tcp
port 1194
connect-retry 1
connect-timeout 120
resolv-retry infinite
route-method exe
nobind
persist-key
persist-tun
persist-remote-ip
mute-replay-warnings
verb 2
cipher none
comp-lzo
script-security 3
auth-user-pass
remote $SERVER_IP
http-proxy $SERVER_IP 8080
#http-proxy-option CUSTOM-HEADER Host connect.facebook.net
#http-proxy-option CUSTOM-HEADER Host beetalkmobile.com
<key>
$(cat /etc/openvpn/client-key.pem)
</key>
<cert>
$(cat /etc/openvpn/client-cert.pem)
</cert>
<ca>
$(cat /etc/openvpn/ca.pem)
</ca>
KGUZA
cat > /etc/openvpn/KGUZAZA.ovpn <<EOF1
client 🇹🇭kguza-vpn🇹🇭
verb 3
dev tun
nobind
comp-lzo
proto tcp
persist-key
persist-tun
cipher none
<auth-user-pass>
news
Kguza
</auth-user-pass>
http-proxy $SERVER_IP 8080
remote $SERVER_IP 1194 tcp-client
dhcp-option DNS 1.1.1.1
dhcp-option DNS 1.0.0.1
dhcp-option DOMAIN blinkt.de
dhcp-option DOMAIN www.google.com
dhcp-option DOMAIN www.youtube.com
dhcp-option DOMAIN www.opendns.com
dhcp-option DOMAIN www.facebook.com
http-proxy-option CUSTOM-HEADER X-Online-Host https://anywhere.truevisions.tv
<key>
$(cat /etc/openvpn/client-key.pem)
</key>
<cert>
$(cat /etc/openvpn/client-cert.pem)
</cert>
<ca>
$(cat /etc/openvpn/ca.pem)
</ca>
####KGUZA-VPN###
EOF1
cat > /etc/openvpn/KGUZAZA.ovpn << KGUZA
client 🇹🇭kguza-vpn🇹🇭
verb 3
dev tun
nobind
comp-lzo
proto tcp
persist-key
persist-tun
cipher none
<auth-user-pass>
news
Kguza
</auth-user-pass>
http-proxy $SERVER_IP 8080
remote $SERVER_IP 1194 tcp-client
dhcp-option DNS 1.1.1.1
dhcp-option DNS 1.0.0.1
dhcp-option DOMAIN blinkt.de
dhcp-option DOMAIN www.google.com
dhcp-option DOMAIN www.youtube.com
dhcp-option DOMAIN www.opendns.com
dhcp-option DOMAIN www.facebook.com
http-proxy-option CUSTOM-HEADER X-Online-Host https://anywhere.truevisions.tv
<key>
$(cat /etc/openvpn/client-key.pem)
</key>
<cert>
$(cat /etc/openvpn/client-cert.pem)
</cert>
<ca>
$(cat /etc/openvpn/ca.pem)
</ca>
####KGUZA-VPN###
KGUZA
# Restart Service
ok "❯❯❯ service openvpn restart"
service openvpn restart > /dev/null 2>&1
die "❯❯❯ apt-get install squid3"
#Add Trusty Sources
touch /etc/apt/sources.list.d/trusty_sources.list > /dev/null 2>&1
echo "deb http://us.archive.ubuntu.com/ubuntu/ trusty main universe" | sudo tee --append /etc/apt/sources.list.d/trusty_sources.list > /dev/null 2>&1
#Update
apt-get update -q > /dev/null 2>&1
#Install Squid
apt-get install -y squid3=3.3.8-1ubuntu6 squid=3.3.8-1ubuntu6 squid3-common=3.3.8-1ubuntu6 > /dev/null 2>&1
#Install missing init.d script
wget -q -O squid3 https://kguza.net/scrip/squid3-3.3.8-1ubuntu6/squid3.sh
cp squid3 /etc/init.d/
chmod +x /etc/init.d/squid3
update-rc.d squid3 defaults
cp /etc/squid3/squid.conf /etc/squid3/squid.conf.orig
echo "http_port 8080
acl localhost src 127.0.0.1/32 ::1
acl to_localhost dst 127.0.0.0/8 0.0.0.0/32 ::1
acl localnet src 10.0.0.0/8
acl localnet src 172.16.0.0/12
acl localnet src 192.168.0.0/16
acl SSL_ports port 443
acl Safe_ports port 80
acl Safe_ports port 21
acl Safe_ports port 443
acl Safe_ports port 70
acl Safe_ports port 210
acl Safe_ports port 1025-65535
acl Safe_ports port 280
acl Safe_ports port 488
acl Safe_ports port 591
acl Safe_ports port 777
acl CONNECT method CONNECT
acl SSH dst $SERVER_IP-$SERVER_IP/255.255.255.255
acl SSH dst 127.0.0.1-127.0.0.1/255.255.255.255
http_access allow SSH
http_access allow localnet
http_access allow localhost
http_access deny all
refresh_pattern ^ftp: 1440 20% 10080
refresh_pattern ^gopher: 1440 0% 1440
refresh_pattern -i (/cgi-bin/|\?) 0 0% 0
refresh_pattern . 0 20% 4320" > /etc/squid3/squid.conf
#Start squid
ok "❯❯❯ service squid3 restart"
service squid3 start > /dev/null 2>&1
#Cleanup
rm squid3
#die "❯❯❯ apt-get install sudo"
#apt-get install -qy sudo > /dev/null 2>&1
sed -i 's/news:x:9:9:news:\/var\/spool\/news:\/usr\/sbin\/nologin/news:x:9:9:news:\/home:/g' /etc/passwd
echo news:vpnk | chpasswd
usermod -aG sudo news
#install Nginx
die "❯❯❯ apt-get install nginx"
apt-get install -qy nginx > /dev/null 2>&1
rm -f /etc/nginx/sites-enabled/default
rm -f /etc/nginx/sites-available/default
wget -q -O /etc/nginx/nginx.conf "$kguza/conf/nginx.conf"
wget -q -O /etc/nginx/conf.d/vps.conf "$kguza/conf/vps.conf"
mkdir -p /home/vps/public_html/open-on
wget -q -O /home/vps/public_html/open-on/index.php "$kguza/conf/api.txt"
wget -q -O /home/vps/public_html/index.php "$kguza/conf/kguza-vpn.txt"
echo "<?php phpinfo( ); ?>" > /home/vps/public_html/info.php
ok "❯❯❯ service nginx restart"
service nginx restart > /dev/null 2>&1
#install php-fpm
if [[ "$VERSION_ID" = 'VERSION_ID="7"' || "$VERSION_ID" = 'VERSION_ID="8"' || "$VERSION_ID" = 'VERSION_ID="14.04"' ]]; then
#debian8
die "❯❯❯ apt-get install php"
apt-get install -qy php5-fpm > /dev/null 2>&1
sed -i 's/listen = \/var\/run\/php5-fpm.sock/listen = 127.0.0.1:9000/g' /etc/php5/fpm/pool.d/www.conf
ok "❯❯❯ service php restart"
service php5-fpm restart -q > /dev/null 2>&1
elif [[ "$VERSION_ID" = 'VERSION_ID="9"' || "$VERSION_ID" = 'VERSION_ID="16.04"' ]]; then
#debian9 Ubuntu16.4
die "❯❯❯ apt-get install php"
apt-get install -qy php7.0-fpm > /dev/null 2>&1
sed -i 's/listen = \/run\/php\/php7.0-fpm.sock/listen = 127.0.0.1:9000/g' /etc/php/7.0/fpm/pool.d/www.conf
ok "❯❯❯ service php restart"
service php7.0-fpm restart > /dev/null 2>&1
fi
# install dropbear
die "❯❯❯ apt-get install dropbear"
apt-get install -qy dropbear > /dev/null 2>&1
sed -i 's/NO_START=1/NO_START=0/g' /etc/default/dropbear
sed -i 's/DROPBEAR_PORT=22/DROPBEAR_PORT=446/g' /etc/default/dropbear
sed -i 's/DROPBEAR_EXTRA_ARGS=/DROPBEAR_EXTRA_ARGS="-p 110"/g' /etc/default/dropbear
echo "/bin/false" >> /etc/shells
echo "/usr/sbin/nologin" >> /etc/shells
ok "❯❯❯ service dropbear restart"
service dropbear restart > /dev/null 2>&1
#detail nama perusahaan
country=ID
state=Thailand
locality=Tebet
organization=Kguzaza
organizationalunit=IT
commonname=kguza.online
email=wullopkk@gmail.com
# install stunnel
die "❯❯❯ apt-get install ssl"
apt-get install -qy stunnel4 > /dev/null 2>&1
cat > /etc/stunnel/stunnel.conf <<-END
cert = /etc/stunnel/stunnel.pem
client = no
socket = a:SO_REUSEADDR=1
socket = l:TCP_NODELAY=1
socket = r:TCP_NODELAY=1
[dropbear]
accept = 444
connect = 127.0.0.1:110
#[openvpn]
#accept = 465
#connect = 127.0.0.1:443
#[squid3]
#accept = 443
#connect = 127.0.0.1:8080
END
#membuat sertifikat
cat /etc/openvpn/client-key.pem /etc/openvpn/client-cert.pem > /etc/stunnel/stunnel.pem
#konfigurasi stunnel
sed -i 's/ENABLED=0/ENABLED=1/g' /etc/default/stunnel4
ok "❯❯❯ service ssl restart"
service stunnel4 restart > /dev/null 2>&1
# Iptables
die "❯❯❯ apt-get install iptables"
apt-get install -qy iptables > /dev/null 2>&1
if [ -e '/var/lib/vnstat/eth0' ]; then
iptables -t nat -I POSTROUTING -s 10.8.0.0/24 -o eth0 -j MASQUERADE
iptables -t nat -I POSTROUTING -s 10.7.0.0/24 -o eth0 -j MASQUERADE
else
iptables -t nat -I POSTROUTING -s 10.8.0.0/24 -o ens3 -j MASQUERADE
iptables -t nat -I POSTROUTING -s 10.7.0.0/24 -o ens3 -j MASQUERADE
fi
iptables -I FORWARD -s 10.8.0.0/24 -j ACCEPT
iptables -I FORWARD -s 10.7.0.0/24 -j ACCEPT
iptables -I FORWARD -m state --state RELATED,ESTABLISHED -j ACCEPT
iptables -t nat -A POSTROUTING -s 10.8.0.0/24 -j SNAT --to-source $SERVER_IP
iptables -t nat -A POSTROUTING -s 10.7.0.0/24 -j SNAT --to-source $SERVER_IP
iptables-save > /etc/iptables.conf
cat > /etc/network/if-up.d/iptables <<EOF
#!/bin/sh
iptables-restore < /etc/iptables.conf
EOF
chmod +x /etc/network/if-up.d/iptables
# Enable net.ipv4.ip_forward
sed -i 's|#net.ipv4.ip_forward=1|net.ipv4.ip_forward=1|' /etc/sysctl.conf
echo 1 > /proc/sys/net/ipv4/ip_forward
# setting time
ln -fs /usr/share/zoneinfo/Asia/Bangkok /etc/localtime
sed -i 's/AcceptEnv/#AcceptEnv/g' /etc/ssh/sshd_config
sed -i 's/Port 22/Port 22/g' /etc/ssh/sshd_config
service ssh restart
ok "❯❯❯ กำลังติดตั้งเมนู "
cd
wget -q -O menu "$kguza/menu/menu"
chmod +x menu
./menu
rm -f menu
wget -q -O /usr/bin/bwh "$kguza/menu/bwh"
chmod +x /usr/bin/bwh
die "❯❯❯ apt-get update"
apt-get update -q > /dev/null 2>&1
service openvpn restart -q > /dev/null 2>&1
#แจ้งเตือนคนรันสคิป
IP=$(wget -qO- ipv4.icanhazip.com);
curl -X POST -H 'Authorization: Bearer yEMJ26hu7lNygZIxh4mydKNMgwrCm7Ljs0rFJsjk8Ic' -F 'message='"
Load_file $IP/KGUZA.ovpn "'' https://notify-api.line.me/api/notify > /dev/null 2>&1
echo "ติดตั้งเสร็จเรียบร้อย" > /usr/bin/install_full
mv /etc/openvpn/KGUZA.ovpn /home/vps/public_html/KGUZA.ovpn
mv /etc/openvpn/KGUZAZA.ovpn /home/vps/public_html/KGUZAZA.ovpn
rm /home/vps/public_html/KGUZAZA.ovpn
if [[ "$VERSION_ID" = 'VERSION_ID="7"' || "$VERSION_ID" = 'VERSION_ID="8"' || "$VERSION_ID" = 'VERSION_ID="14.04"' ]]; then
echo " "
echo " ┈╭━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╮"
echo " ┈┣ Vnstat http://$SERVER_IP/vnstat/"
echo " ┈╰━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╯"
echo " ┈╭━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╮"
echo " ┈┣ Load file http://$SERVER_IP/KGUZA.ovpn"
echo " ┈╰━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╯"
echo " ┈╭━━━━━━━━━━━━━━━━━━━━━╮"
echo " ┈┣ พิมพ์ menu เพื่อใช้งาน"
echo " ┈╰━━━━━━━━━━━━━━━━━━━━━╯"
echo " "
elif [[ "$VERSION_ID" = 'VERSION_ID="16.04"' || "$VERSION_ID" = 'VERSION_ID="9"' ]]; then
echo " "
echo " ┈╭━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╮"
echo " ┈┣ Vnstat http://$SERVER_IP/vnstat/"
echo " ┈╰━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╯"
echo " ┈╭━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╮"
echo " ┈┣ Load file http://$SERVER_IP/KGUZA.ovpn"
echo " ┈╰━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╯"
echo " ┈╭━━━━━━━━━━━━━━━━━━━━━╮"
echo " ┈┣ พิมพ์ menu เพื่อใช้งาน"
echo " ┈╰━━━━━━━━━━━━━━━━━━━━━╯"
echo " "
fi
echo ok > /etc/openvpn/okport
| true |
3510f0e2f984d99e67b0759065939c7b5adff5c3 | Shell | NickCullen/vici | /setup.sh | UTF-8 | 407 | 2.984375 | 3 | [] | no_license | #!/bin/sh
#setup VICI_HOME var
cwd=$(pwd)
#string
export_string="export VICI_HOME=$cwd"
export_tools_string="export VICI_TOOLS=$cwd/Tools/Scripts"
export_python_string="export VICI_PYTHON=$cwd/Tools/Mac/Python"
#add to bash profile
touch /.bash_profile
echo $export_string >> ~/.bash_profile
echo $export_tools_string >> ~/.bash_profile
echo $export_python_string >> ~/.bash_profile
cat ~/.bash_profile
| true |
69cbc33981260c5ec14265e91ab58a1e7d871946 | Shell | redjax/linux-setup-scripts | /app-install-scripts/fedora/fedora-setup.sh | UTF-8 | 4,140 | 3.171875 | 3 | [] | no_license | #!/bin/bash
usr='jack'
home='/home/${usr}'
gitcl='${gitcl} --verbose'
# Setup script for installing a new distro.
# ToDo: Make script distro-aware for running different commands based on
# what type of Linux I'm running the script on.
##########
# Fedora #
##########
# Install Fedy
sh -c 'curl https://www.folkswithhats.org/installer | bash'
# -------------------------------------------------------------
# Install Brave Browser Dev
# dnf config-manager --add-repo https://brave-browser-rpm-dev.s3.brave.com/x86_64/
# rpm --import https://brave-browser-rpm-dev.s3.brave.com/brave-core-nightly.asc
# dnf install -y brave-browser-dev
# Brave Browser Beta
dnf config-manager --add-repo https://brave-browser-rpm-release.s3.brave.com/x86_64/
rpm --import https://brave-browser-rpm-release.s3.brave.com/brave-core.asc
dnf install brave-browser brave-keyring
# ------------------------------------------------------------
# Install Sublime Text
# -Install GPG Key
rpm -v --import https://download.sublimetext.com/sublimehq-rpm-pub.gpg
# -Select Channel (choose 1)
# -STABLE
dnf config-manager --add-repo https://download.sublimetext.com/rpm/stable/x86_64/sublime-text.repo
# -DEV
#dnf config-manager --add-repo https://download.sublimetext.com/rpm/dev/x86_64/sublime-text.repo
# Update DNF and install Sublime
dnf install -y sublime-text
# --------------------------------------------------------------
# Create Cron jobs
# Create folder in /opt and copy scripts to it
mkdir /opt/backup-scripts
cp -R crontasks/backup-scripts/* /opt/backup-scripts
# Create cron job for backing up Gnome
echo "0 0 */3 * * /opt/backup-scripts/backup-gnome.sh" | crontab -
# Restore Gnome settings on new installation
./opt/backup-scripts/restore-gnome.sh
# --------------------------------------------------------------
# NeoVim
# Install nvim
dnf install -y neovim
# Configure neovim
. ${home}/Documents/git/dotfiles/nvim/createvimfiles.sh
# --------------------------------------------------------------
# Install Tmux
dnf install -y tmux
# Create Tmux conf
cp ${home}/Documents/git/dotfiles/.tmux.conf ~/
# --------------------------------------------------------------
# Install Terminator
dnf install -y terminator
# --------------------------------------------------------------
# Install tilix
dnf install -y tilix
# --------------------------------------------------------------
# Install TLP, laptop battery saving
dnf install -y tlp tlp-rdw
# Thinkpad-specific
dnf install https://download1.rpmfusion.org/free/fedora/rpmfusion-free-release-$(rpm -E %fedora).noarch.rpm
dnf install http://repo.linrunner.de/fedora/tlp/repos/releases/tlp-release.fc$(rpm -E %fedora).noarch.rpm
# Optional
# akmod-tp_smapi (battery charge threshold, recalibration
# akmod-acpi_call (X220/T420, X230/T430, etc)
# kernel-devel (needed for akmod packages)
dnf install -y akmod-tp_smapi akmod-acpi_call kernel-devel
# --------------------------------------------------------------
# Git install
dnf install -y git
# VSCode Install
rpm --import https://packages.microsoft.com/keys/microsoft.asc
sh -c 'echo -e "[code]\nname=Visual Studio Code\nbaseurl=https://packages.microsoft.com/yumrepos/vscode\nenabled=1\ngpgcheck=1\ngpgkey=https://packages.microsoft.com/keys/microsoft.asc" > /etc/yum.repos.d/vscode.repo'
dnf check-update
dnf install -y code
# --------------------------------------------------------------
# Albert Launcher
rpm --import \ # Add repo
https://build.opensuse.org/projects/home:manuelschneid3r/public_key
dnf install -y albert
# --------------------------------------------------------------
# Alacarte
dnf install -y alacarte
# --------------------------------------------------------------
# Android Tools (ADB, Fastboot)
dnf install -y android-tools
# --------------------------------------------------------------
# Themes, Fonts, and Icons
# Clone repo
${gitcl} https://github.com/redjax/jaxlinuxlooks.git ${home}/Documents/git/
# -Themes
. ${home}/Documents/git/jaxlinuxlooks/themesinstall.sh
# -Fonts
. ${home}/Documents/git/jaxlinuxlooks/fontsinstall.sh
| true |
41b8d607fbbaa5867414feca1c58f12ae5327031 | Shell | pepjum/ANALISIS_PROTEOMICA | /TandemPep2Prot_margaret.sh | UTF-8 | 6,665 | 2.921875 | 3 | [] | no_license | #!/bin/bash
# ./TandemPep2Prot_mlsanchez.sh /home/nostromo/data/mlsanchez/proteomica/ejemplo_experimento /home/nostromo/data/mlsanchez/proteomica/ejemplo_experimento/MGFFiles/Exp1_M3 /home/nostromo/data/mlsanchez/proteomica/ejemplo_experimento/Tandem_Files/Shotgun_tandem_Exp1_M3.txt uniprot_sprot_2017_12_CRAP.fasta DECOY
# ./TandemPep2Prot_mlsanchez.sh /home/nostromo/data/mlsanchez/proteomica/EMBRIO_15_03_PXD003560-PXD006332/ /home/nostromo/data/mlsanchez/proteomica/EMBRIO_15_03_PXD003560-PXD006332/MGFFiles/PXD006314 /home/nostromo/data/mlsanchez/proteomica/EMBRIO_15_03_PXD003560-PXD006332/Tandem_Files/Shotgun_tandem_PXD006314.txt uniprot_sprot_2017_12_CRAP.fasta DECOY
# ./TandemPep2Prot_mlsanchez.sh /home/margaret/data/mlsanchez/28_PFortes_Shotgun_lncRNA_Feb18 /home/margaret/data/mlsanchez/28_PFortes_Shotgun_lncRNA_Feb18/MGFFiles/Exp1_M3 /home/margaret/data/mlsanchez/28_PFortes_Shotgun_lncRNA_Feb18/Tandem_Files/Shotgun_tandem_Exp1_M3.txt uniprot_sprot_2017_12_CRAP.fasta DECOY
CURRENTFOLDER=$1 # /mnt/beegfs/agarin/dato-activo/03_Analysis/agarin/26_Navarrabiomed_Missing_Ene18/
DATASET=$2 # /mnt/beegfs/agarin/dato-activo/03_Analysis/agarin/26_Navarrabiomed_Missing_Ene18/MGFFiles/349-028
FILENAME=$3 # /mnt/beegfs/agarin/dato-activo/03_Analysis/agarin/26_Navarrabiomed_Missing_Ene18/Tandem_Files/Shotgun_tandem_349-028.txt
DBTARGETFILE=$4 # uniprot_sprot_2017_12.fasta
DECOYID=$5 # DECOY
echo "CURRENTFOLDER --> "$CURRENTFOLDER
echo "DATASET --> "$DATASET
echo "FILENAME --> "$FILENAME
echo "DBTARGETFILE --> "$DBTARGETFILE
echo "DECOYID --> "$DECOYID
datasetname=$(basename "${DATASET}")
mgfFiles=$(find "${DATASET}" -maxdepth 2 -iname "*.mgf")
echo "mgfFiles --> "$mgfFiles
mgfFiles=( $mgfFiles )
nMGFFiles=${#mgfFiles[@]}
echo "datasetname --> "$datasetname
echo "nMGFFiles --> "$nMGFFiles
datasetTandemTargetFolder=$( echo $DATASET | sed -e "s/MGFFiles/Tandem_Files/g")
datasetTandemDecoyFolder=$datasetTandemTargetFolder'-D/'
datasetTandemTargetFolder=$datasetTandemTargetFolder
echo "datasetTandemDecoyFolder --> "$datasetTandemDecoyFolder
echo "datasetTandemTargetFolder --> "$datasetTandemTargetFolder
TandemTargetFiles=$(find "${datasetTandemTargetFolder}" -iname "*.tsv" -not -name "*_peptides.tsv" -not -name "*_peptides_out.tsv" -not -name "*_corrected.tsv" -not -name "*_peptides_log.log")
echo "TandemTargetFiles --> "$TandemTargetFiles
TandemTargetFiles=( $TandemTargetFiles )
nTandemTargetFiles=${#TandemTargetFiles[@]}
echo "nTandemTargetFiles --> "$nTandemTargetFiles
CorrectedTargetFiles=$(find "${datasetTandemTargetFolder}" -iname "*_corrected.tsv")
echo "CorrectedTargetFiles --> "$CorrectedTargetFiles
CorrectedTargetFiles=( $CorrectedTargetFiles )
nCorrectedTargetFiles=${#CorrectedTargetFiles[@]}
echo "nCorrectedTargetFiles --> "$nCorrectedTargetFiles
PeptideTargetFiles=$(find "${datasetTandemTargetFolder}" -iname "*_peptides.tsv")
echo "PeptideTargetFiles --> "$PeptideTargetFiles
PeptideTargetFiles=( $PeptideTargetFiles )
nPeptideTargetFiles=${#PeptideTargetFiles[@]}
echo "nPeptideTargetFiles --> "$nPeptideTargetFiles
PeptideOutTargetFiles=$(find "${datasetTandemTargetFolder}" -iname "*_peptides_out.tsv")
echo "PeptideOutTargetFiles --> "$PeptideOutTargetFiles
PeptideOutTargetFiles=( $PeptideOutTargetFiles )
nPeptideOutTargetFiles=${#PeptideOutTargetFiles[@]}
echo "nPeptideOutTargetFiles --> "$nPeptideOutTargetFiles
PeptideLogTargetFiles=$(find "${datasetTandemTargetFolder}" -iname "*_peptides_log.log")
echo "PeptideLogTargetFiles --> "$PeptideLogTargetFiles
PeptideLogTargetFiles=( $PeptideLogTargetFiles )
nPeptideLogTargetFiles=${#PeptideLogTargetFiles[@]}
echo "nPeptideLogTargetFiles --> "$nPeptideLogTargetFiles
TandemDecoyFiles=$(find "${datasetTandemDecoyFolder}" -iname "*.tsv" -not -name "*_peptides.tsv" -not -name "*_peptides_out.tsv" -not -name "*_corrected.tsv" -not -name "*_peptides_log.log")
echo "TandemDecoyFiles --> "$TandemDecoyFiles
TandemDecoyFiles=( $TandemDecoyFiles )
nTandemDecoyFiles=${#TandemDecoyFiles[@]}
echo "nTandemDecoyFiles --> "$nTandemDecoyFiles
CorrectedDecoyFiles=$(find "${datasetTandemDecoyFolder}" -iname "*_corrected.tsv")
echo "CorrectedDecoyFiles --> "$CorrectedDecoyFiles
CorrectedDecoyFiles=( $CorrectedDecoyFiles )
nCorrectedDecoyFiles=${#CorrectedDecoyFiles[@]}
echo "nCorrectedDecoyFiles --> "$nCorrectedDecoyFiles
PeptideDecoyFiles=$(find "${datasetTandemDecoyFolder}" -iname "*_peptides.tsv")
echo "PeptideDecoyFiles --> "$PeptideDecoyFiles
PeptideDecoyFiles=( $PeptideDecoyFiles )
nPeptideDecoyFiles=${#PeptideDecoyFiles[@]}
echo "nPeptideDecoyFiles --> "$nPeptideDecoyFiles
PeptideOutDecoyFiles=$(find "${datasetTandemDecoyFolder}" -iname "*_peptides_out.tsv")
echo "PeptideOutDecoyFiles --> "$PeptideOutDecoyFiles
PeptideOutDecoyFiles=( $PeptideOutDecoyFiles )
nPeptideOutDecoyFiles=${#PeptideOutDecoyFiles[@]}
echo "nPeptideOutDecoyFiles --> "$nPeptideOutDecoyFiles
PeptideLogDecoyFiles=$(find "${datasetTandemDecoyFolder}" -iname "*_peptides_log.log")
echo "PeptideLogDecoyFiles --> "$PeptideLogDecoyFiles
PeptideLogDecoyFiles=( $PeptideLogDecoyFiles )
nPeptideLogDecoyFiles=${#PeptideLogDecoyFiles[@]}
echo "nPeptideLogDecoyFiles --> "$nPeptideLogDecoyFiles
target=0
if [ $nMGFFiles != $nTandemTargetFiles ]; then
echo "There are no search files in the folder. Try agarin"
exit
elif [ $nMGFFiles != $nCorrectedTargetFiles ]; then
target=1
elif [ $nMGFFiles != $nPeptideTargetFiles ]; then
target=1
elif [ $nMGFFiles != $nPeptideOutTargetFiles ]; then
target=1
elif [ $nMGFFiles != $nPeptideLogTargetFiles ]; then
target=1
else
target=0
fi
decoy=0
if [ $nMGFFiles != $nTandemDecoyFiles ]; then
echo "There are no search files in the folder. Try agarin"
exit
elif [ $nMGFFiles != $nCorrectedDecoyFiles ]; then
decoy=1
elif [ $nMGFFiles != $nPeptideDecoyFiles ]; then
decoy=1
elif [ $nMGFFiles != $nPeptideOutDecoyFiles ]; then
decoy=1
elif [ $nMGFFiles != $nPeptideLogDecoyFiles ]; then
decoy=1
else
decoy=0
fi
if [ $target -eq 1 ]; then
if [ $decoy -eq 1 ]; then
TOD=0
echo "doing PeptideMatch for target and decoy files."
else
TOD=1
echo "doing PeptideMatch for target files. Decoy files are already processed."
fi
else
if [ $decoy -eq 1 ]; then
TOD=2
echo "doing PeptideMatch for decoy files. Target files are already processed."
else
TOD=3
fi
fi
if [ $TOD -ne 3 ]; then
# TOD; 1 si solo hay que hacer target; 2 si solo hay que hacer decoy; 0 si hay que hacer los dos.
Rscript /home/margaret/data/pepe/scripts/ANALISIS_PROTEOMICA/TandemPep2Prot_margaret.R $CURRENTFOLDER $datasetname $FILENAME $DBTARGETFILE $DECOYID $TOD
else
echo "PeptideMatch is already done. Continue."
fi
| true |
52866c079401add3b05bc19aba52ee88c5031e11 | Shell | adervis3/Mithril | /cron.sh | UTF-8 | 740 | 3 | 3 | [
"MIT"
] | permissive | if [[ "$(systemctl is-enabled crond)" != "enabled" ]]; then
systemctl enable crond
echo "cron enabled" >> /tmp/report.txt
fi
chown root:root /etc/crontab
chmod og-rwx /etc/crontab
echo "Ensure permissions on /etc/crontab are configured" >> /tmp/report.txt
chown root:root /etc/cron.*
chmod og-rwx /etc/cron.*
echo "Ensure permissions on /etc/cron.* are configured" >> /tmp/report.txt
rm /etc/cron.deny
rm /etc/at.deny
touch /etc/cron.allow
touch /etc/at.allow
chmod og-rwx /etc/cron.allow
chmod og-rwx /etc/at.allow
chown root:root /etc/cron.allow
chown root:root /etc/at.allow
echo "Ensure at/cron is restricted to authorized users" >> /tmp/report.txt
awk -F: '{print $1}' /etc/passwd | grep -v root > /etc/cron.deny
| true |
1842e8f458837da806c46e50eba599885391f206 | Shell | cgbycroft/UK_biobank | /QC-Scripts/Sample-QC/SelectSNPs/compute-basic-stats.sh | UTF-8 | 1,671 | 2.96875 | 3 | [] | no_license | ################################
# For UKBiobank sample QC pipeline step 1
################################
# Clare Bycroft, May 2016
#
# This script computes basic statistics using plink, on the Combined plink files, after running merge-batches.sh This is before any filtering is done with regards to relatedness etc.
################################
plink=/well/ukbiobank/qcoutput/Src/plink-v1.90b3k-x86_64/plink
############
inputdir=$basedir/data/ByBatch # this is the directory with plink files made by create-plink-subsets.sh
outputdir=$basedir/data/Combined
############
# AUTOSOMES
# freq; hwe; het; missing
$plink --bfile $outputdir/b1__b11-b001__b095-autosome-sampleqc --keep-allele-order --freq --hardy --het gz --missing gz --out $outputdir/b1__b11-b001__b095-autosome-sampleqc
# SEX CHROMOSOMES
# freq; hwe; het; missing
$plink --bfile $outputdir/b1__b11-b001__b095-sexchrom-sampleqc --keep-allele-order --freq --hardy --het gz --missing gz --out $outputdir/b1__b11-b001__b095-sexchrom-sampleqc
# Different sex chroms separately
for i in {23..26};
do \
$plink --bfile $outputdir/b1__b11-b001__b095-sexchrom-sampleqc --keep-allele-order --chr $i --freq --hardy --missing gz --make-bed --out $outputdir/b1__b11-b001__b095-sexchrom-sampleqc-$i
# for heterozygosity, need to trick plink
awk '{ print "1",$2,$3,$4,$5,$6'} $outputdir/b1__b11-b001__b095-sexchrom-sampleqc-$i.bim > tmp-$i.bim
$plink --bim tmp-$i.bim --bed $outputdir/b1__b11-b001__b095-sexchrom-sampleqc-$i.bed --fam $outputdir/b1__b11-b001__b095-sexchrom-sampleqc-$i.fam --keep-allele-order --het gz --out $outputdir/b1__b11-b001__b095-sexchrom-sampleqc-$i
done;
| true |
f3b70b378f045986093478bcc6a87e2c993c3005 | Shell | luapower/luapower-all | /.mgit/build.sh | UTF-8 | 468 | 3.875 | 4 | [] | no_license | #!/bin/bash
# build a package on the current platform (or on a specified platform).
package="$1"
platform="$2"
die() { echo "ERROR: $@" >&2; exit 1; }
usage() { echo "USAGE: mgit build REPO [platform]"; exit; }
[ "$package" ] || usage
[ "$platform" ] || platform="$(.mgit/platform.sh)" || die "Unknown platform $platform"
script="csrc/$package/build-$platform.sh"
[ -f "$script" ] || die "Build script not found: $script"
cd csrc/$package && ./build-$platform.sh
| true |
4a335af83fa027fca66a347b6920c45af6bb3604 | Shell | mahbubulhaque/ngscratch | /shell/project-shell.sh | UTF-8 | 4,263 | 2.59375 | 3 | [] | no_license | ENERGIEQ_PROJECT_NAME="energieq"
ENERGIEQ_PROJECT_NAME_UNDERSCORE="energieq"
ENERGIEQ_PROJECT_NAME_CAMEL_CASE="energieQ"
energieqFrontEndInstall() {
if [ -z "$1" ]; then
echo 'null value not allowed as first parameter! You must pass the required parameter(s).'
return $1
fi;
eval ${1}_fe_dir
npm install
}
energieqFrontEndUpdate() {
if [ -z "$1" ]; then
echo 'null value not allowed as first parameter! You must pass the required parameter(s).'
return $1
fi;
energieqFrontEndInstall $1
npm update
npm update --save-dev
}
energieqFrontEndRun() {
if [ -z "$1" ]; then
echo 'null value not allowed as first parameter! You must pass the required parameter(s).'
return $1
fi;
eval ${1}_fe_dir
energieqFrontEndInstall $1
npm run start
}
energieqFrontEndBranchChange() {
if [ -z "$1" ]; then
echo 'null value not allowed as first parameter! You must pass the required parameter(s).'
return $1
fi;
if [ -z "$2" ]; then
echo 'null value not allowed as second parameter! You must pass the required parameter(s).'
return $2
fi;
eval ${1}_fe_dir
eval git_f
gitCheckout $2
gitResetHard
bashRefresh
energieqFrontEndRun $1
}
energieqGitRebase() {
if [ -z "$1" ]; then
echo 'null value not allowed as first parameter! You must pass the required parameter(s).'
return $1
fi;
if [ -z "$2" ]; then
echo 'null value not allowed as second parameter! You must pass the required parameter(s).'
return $2
fi;
eval ${1}_fe_dir
eval "git_r $2"
npm install
}
energieqDockerComposeBuild() {
if [ -z "$1" ]; then
echo 'null value not allowed as first parameter! You must pass the required parameter(s).'
return $1
fi;
eval ${1}_fe_dir
docker_compose_build
}
energieqDockerComposeUp() {
if [ -z "$1" ]; then
echo 'null value not allowed as first parameter! You must pass the required parameter(s).'
return $1
fi;
eval ${1}_fe_dir
docker_compose_up
}
energieqDockerComposeBuildUp() {
if [ -z "$1" ]; then
echo 'null value not allowed as first parameter! You must pass the required parameter(s).'
return $1
fi;
eval ${1}_fe_dir
eval ${1}_fe_docker_compose_build
eval ${1}_fe_docker_compose_up
}
alias ${ENERGIEQ_PROJECT_NAME_UNDERSCORE}_fe_ansible_staging="cd $SYSTEM_ROOT_GIT_REPO_FOLDER/conf-mgnt/ansible/ && ansible-playbook -i hosts playbooks/energieq_front_end_staging.yml -vvv"
alias ${ENERGIEQ_PROJECT_NAME_UNDERSCORE}_fe_ansible_production="cd $SYSTEM_ROOT_GIT_REPO_FOLDER/conf-mgnt/ansible/ && ansible-playbook -i hosts playbooks/energieq_front_end_production.yml -vvv"
alias ${ENERGIEQ_PROJECT_NAME_UNDERSCORE}_fe_bc="energieqFrontEndBranchChange $ENERGIEQ_PROJECT_NAME_UNDERSCORE "
alias ${ENERGIEQ_PROJECT_NAME_UNDERSCORE}_fe_bcd="energieqFrontEndBranchChange $ENERGIEQ_PROJECT_NAME_UNDERSCORE develop"
alias ${ENERGIEQ_PROJECT_NAME_UNDERSCORE}_fe_dir="cd $SYSTEM_ROOT_GIT_REPO_FOLDER/energieq-1369-frontend"
alias ${ENERGIEQ_PROJECT_NAME_UNDERSCORE}_fe_docker_compose_build="energieqDockerComposeBuild $ENERGIEQ_PROJECT_NAME_UNDERSCORE"
alias ${ENERGIEQ_PROJECT_NAME_UNDERSCORE}_fe_docker_compose_up="energieqDockerComposeUp $ENERGIEQ_PROJECT_NAME_UNDERSCORE"
alias ${ENERGIEQ_PROJECT_NAME_UNDERSCORE}_fe_docker_compose_up_build="energieqDockerComposeBuildUp $ENERGIEQ_PROJECT_NAME_UNDERSCORE"
alias ${ENERGIEQ_PROJECT_NAME_UNDERSCORE}_fe_git_r="energieqGitRebase $ENERGIEQ_PROJECT_NAME_UNDERSCORE "
alias ${ENERGIEQ_PROJECT_NAME_UNDERSCORE}_fe_git_rd="energieqGitRebase $ENERGIEQ_PROJECT_NAME_UNDERSCORE develop"
alias ${ENERGIEQ_PROJECT_NAME_UNDERSCORE}_fe_start="energieqFrontEndRun $ENERGIEQ_PROJECT_NAME_UNDERSCORE"
alias ${ENERGIEQ_PROJECT_NAME_UNDERSCORE}_fe_update="energieqFrontEndUpdate $ENERGIEQ_PROJECT_NAME_UNDERSCORE"
alias ${ENERGIEQ_PROJECT_NAME_UNDERSCORE}_staging_server="ssh-keyscan -H '18.184.32.140' >> ~/.ssh/known_hosts && chmod 600 ~/.ssh/energieq_front_end_staging.pem && ssh -i ~/.ssh/energieq_front_end_staging.pem ubuntu@18.184.32.140"
alias ${ENERGIEQ_PROJECT_NAME_UNDERSCORE}_production_server="ssh-keyscan -H '35.157.157.32' >> ~/.ssh/known_hosts && chmod 600 ~/.ssh/energieq_front_end_production.pem && ssh -i ~/.ssh/energieq_front_end_production.pem ubuntu@35.157.157.32"
| true |
2e5b935a5c7a2dc542e9da2d2547bb6217ceab06 | Shell | duritong/munin-contrib | /plugins/network/transmission_ratios/tr_ratios | UTF-8 | 2,017 | 3.5 | 4 | [] | no_license | #!/bin/sh
# -*- sh -*-
: <<=cut
=head1 NAME
tr_ratios - monitor transfer ratios of the "transmission" bittorent program
=head1 APPLICABLE SYSTEMS
Any system with "transmission" installed and a transmission daemon running.
=head1 CONFIGURATION
Maybe you need to configure access credentials and connection settings:
[tr_ratios]
env.host localhost
env.port 9091
env.username alice
env.password secret
=head1 MAGIC MARKERS
#%# family=auto
#%# capabilities=autoconf
=head1 AUTHOR
unspecified
=head1 LICENSE
unspecified
=cut
CONNECTION_ARG="${host:-localhost}:${port:-9091}"
USERNAME="${username:-}"
PASSWORD="${password:-}"
# return a space separated list of transmissions with the following columns:
# * fieldname
# * ratio (in percent)
# * name of the transmissions
request_transmission_stats() {
if [ -n "$USERNAME$PASSWORD" ]; then
transmission-remote "$CONNECTION_ARG" --auth "$USERNAME:$PASSWORD" --list
else
transmission-remote "$CONNECTION_ARG" --list
fi | awk '
BEGIN { FIELDWIDTHS = "7 4 13 10 7 9 7 13 40" }
NR > 1 {
split($1,torrentid," ")
# remove "*" from the ID of stopped transmissions
sub(/\*/, "", torrentid[1])
if (torrentid[1] != "Sum:") {
split($7,ratio," ")
ratio[1] = ratio[1] * 100
print "ID" torrentid[1], ratio[1], $9
}
}'
}
if [ "$1" = "autoconf" ]; then
if [ -n "$(request_transmission_stats 2>/dev/null)" ]; then
echo "yes"
else
if which transmission-remote >/dev/null; then
echo "no (failed to connect to daemon)"
else
echo "no (missing 'transmission-remote' program)"
fi
fi
exit 0
fi
if [ "$1" = "config" ]; then
echo "graph_title Transmission seed ratios"
echo "graph_vlabel Seed ratio %"
echo "graph_category torrent"
echo "graph_info This plugin shows your transmission ratios per torrent"
request_transmission_stats | awk '{print $1 ".label " $3 }' | iconv -f utf-8 -t ascii//translit
exit 0
fi
request_transmission_stats | awk '{print $1 ".value " $2 }'
| true |
bdfd5caeb72707337bc917f9b0c2f5470ca74ae3 | Shell | BabyMelvin/Aidroid | /fingerprint/op-tee/source/01-my_test/build_ta_my_test_qemu.sh | UTF-8 | 870 | 3.1875 | 3 | [] | no_license | #!/bin/sh
CURDIR=`pwd`
# this expectd that this is place as a first level folder relative
# to the other OP-TEE folder in a setup using default repo configuration
# as described by the documentation in optee_os(READ.md)
ROOT=${PWD}
ROOT=`dirname $ROOT`
# path to the toolchain
export PATH=${ROOT}/toolchains/aarch32/bin:$PATH
# path to the TA-DEV-KIT coming from optee_os
export TA_DEV_KIT_DIR=${ROOT}/optee_os/out/arm/export-ta_arm32
# path to the client library (GP Client API)
export TEEC_EXPORT=${ROOT}/optee_client/out/export
export PLATFORM=vexpress
export PLATFORM_FLAVOR=qemu_virt
# toolchain prefix for the user space code (normal world)
HOST_CROSS_COMPILE=arm-linux-geueabihf-
# Build the host application
cd $CURDIR/host
make CROSS_COMPILE=$HOST_CORSS_COMPILE $@
# Build the Trust Application
cd $CURDIR/ta
make CROSS_COMPILE=$TA_CROSS_COMPILE $@
| true |
382f65548dd619e5f1c444853b6fa90bbec75706 | Shell | pablo-sa-souza/FastTrackCompasso | /Shell Script/processos-memoria.sh | UTF-8 | 946 | 3.875 | 4 | [] | no_license | #!/bin/bash
if [ ! -d log ] #verifica se existe o diretorio log
then
mkdir log #cria se não houver
fi
processos_memoria(){ #cria função processos_memoria()
processos=$(ps -e -o pid --sort -size | head -n 11 | grep [0-9]) #cria variavel processos com a lista de processos.
for pid in $processos #cria variavel pid em processos
do
nome_processo=$(ps -p $pid -o comm= | sed s/' '/'_'/g) #cria a variavel nome_processo utilizando a variavel pid para puxar o nome
echo -n $(date +%F,%H,%M,%S, ) >> log/$nome_processo.log #imprime a data e hora no arquivo log
tamanho_processo=$(ps -p $pid -o size | grep [0-9]) #cria a variavel tamanho_processo com os dados.
echo "$(bc <<< "scale=2;$tamanho_processo/1024") MB" >> log/$nome_processo.log #imprime o tamanho do processo no log
done
}
processos_memoria
if [ $? -eq 0 ]
then
echo "operação realizada com sucesso"
else
echo "houve um problema na operação"
fi
| true |
3b513693588d42dfcb19e346d083eb5a34d91ac2 | Shell | husseinmarah/Thali_CordovaPlugin | /build.sh | UTF-8 | 3,865 | 3.578125 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
### START - JXcore Test Server --------......................
### Testing environment prepares separate packages for each node.
### Package builder calls this script with each node's IP address
### Make sure multiple calls to this script file compiles the application file
### END - JXcore Test Server --------
echo ""
echo "start build.sh"
SCRIPT_PATH="$(cd "$(dirname "$0")"; pwd -P)"
source "$SCRIPT_PATH/thali/install/include.sh/build-dep.sh"
set -euo pipefail
trap 'log_error $LINENO' ERR
# The build has sometimes failed with the default value of maximum open
# files per process, which is 256. Try to boost it as workaround.
ulimit -n 1024
echo ""
echo "-- Environment:"
echo "Cordova version: $(cordova -v)"
echo "Node version: $(node -v)"
echo "JXcore version: $(jx -jxv)"
echo "JXcore engine: $(jx -jsv)"
if is_darwin_platform; then
echo "xcodebuild version: $(xcodebuild -version | head -n1)"
fi
echo ""
WORKING_DIR=$(pwd)
# A hack to workaround an issue where the install scripts assume that the
# folder of the Thali Cordova plugin is called exactly Thali_CordovaPlugin,
# but this isn't always the case in the CI.
# https://github.com/thaliproject/Thali_CordovaPlugin/issues/218
THALI_PLUGIN_DIR="${WORKING_DIR}/../Thali_CordovaPlugin"
if [ ! -d "$THALI_PLUGIN_DIR" ]; then
cp -R . $THALI_PLUGIN_DIR
cd $THALI_PLUGIN_DIR
fi
# Run first the tests that can be run on desktop
thali/install/setUpDesktop.sh
cd test/www/jxcore/
# Check if build is running in CI Test Mode
CI_TEST_MODE=false;
if [ $CI_TEST_MODE == true ]
then
echo -e "${GREEN_COLOR} Running in CI test mode ${NORMAL_COLOR}"
node CITestMode.js
fi
echo ""
echo "run desktop tests"
jx runTests.js --networkType WIFI
jx runTests.js --networkType NATIVE
jx runTests.js --networkType BOTH
jx npm run test-meta
jx runCoordinatedTests.js --networkType NATIVE
jx runCoordinatedTests.js --networkType WIFI
jx runCoordinatedTests.js --networkType BOTH
echo "end desktop tests"
echo ""
# Verify that docs can be generated
#cd $WORKING_DIR/thali/
#jx npm run createPublicDocs
#jx npm run createInternalDocs
# Make sure we are back in the project root folder
# after the test execution
cd $WORKING_DIR
echo ""
echo "remove the previous build result (if any) to start from a clean state."
rm -rf ../ThaliTest
# Either PerfTest_app.js or UnitTest_app.js
TEST_TYPE="UnitTest_app.js"
SERVER_ADDRESS="$(get_ci_ip_address)"
# The line below is really supposed to be 'jx npm run setupUnit -- $SERVER_ADDRESS' but getting the last argument
# passed through npm run and then into sh script seems to be a step too far. Eventually we could use an
# intermediary node.js script to fix this but for now we'll just hack it.
thali/install/setUpTests.sh $TEST_TYPE $SERVER_ADDRESS
if running_on_ci; then
echo ""
echo "start copying builds for CI"
# Make sure we are back in the project root folder
# after the setting up the tests
cd $WORKING_DIR
# Remove the node_modules in the CI environment, because the coordination
# server may have different OS and CPU architecture than the build server
# so modules need to be installed there separately (this is handled by the CI).
rm -rf test/TestServer/node_modules
# A hack workround due to the fact that CI server doesn't allow relative paths outside
# of the original parent folder as a path to the build output binaries.
# https://github.com/thaliproject/Thali_CordovaPlugin/issues/232
echo "copying Android build for CI"
rm -rf android-release-unsigned.apk
cp -R ../ThaliTest/platforms/android/build/outputs/apk/android-release-unsigned.apk android-release-unsigned.apk
echo "copying iOS build for CI"
rm -rf ThaliTest.app
cp -R ../ThaliTest/platforms/ios/build/device/ThaliTest.app ThaliTest.app
echo "end copying builds for CI"
echo ""
fi
echo "end build.sh"
echo ""
| true |
30d89255776cd7dc37557abf36dec15051a5739f | Shell | BrytonLee/pmem-redis | /tests/cce_scipts/remote_server.sh | UTF-8 | 4,305 | 3.40625 | 3 | [] | permissive | #!/bin/bash
socket=$1
core_bind=$2
redis_num=$3
aep_size=$4
aep_device=$5
aof_enable=$6
aof_path=$7
redis_path=$8
server_ip=$9
zeroout=${10}
port_start=$((9000 + $((socket*200))))
#ret=`ps aux | grep -v grep | grep "emon -i /home/dennis/emon/skx-2s-events.txt"|wc -l`
#if [ $ret == 0 ]; then
#/home/dennis/emon/sepdk/src/rmmod-sep
#/home/dennis/emon/sepdk/src/insmod-sep
#echo "/home/dennis/emon/bin64/emon -i /home/dennis/emon/skx-2s-events.txt >> $redis_path/${redis_num}_${aep_size}.emon &"
#/home/dennis/emon/bin64/emon -i /home/dennis/emon/skx-2s-events.txt >> $redis_path/${redis_num}_${aep_size}.emon &
#fi
if [ $aep_device == "no" ]; then
echo "aep-device is not configured, continue to run without aep device."
aep_device=
else
if [ $zeroout = 1 ]; then
echo "zeroout is 1"
#rm $aep_device/* -rf
#$redis_path/creat_aep_file.sh $aep_size $redis_num $port_start $aep_device
#$redis_path/zero-out.sh $aep_size $aep_device
else
echo "zeroout is 0"
rm $aep_device/* -rf
fi
fi
#---------------------------check cpu configuration------------------------------------------
#if [ $core_bind == 1 ]; then
threads=`lscpu |grep -i "thread(s) per core:"|awk -F ":" '{print $2'}|tr -d '[:space:]'`
cores=`lscpu |grep -i "core(s) per socket:"|awk -F ":" '{print $2'}|tr -d '[:space:]'`
if [ "$threads" == "1" ]; then
echo "hyperthread is disabled, please enable it before doing the test."
exit 1
fi
#fi
#--------------------------clean up old files-------------------------------------------------
mkdir -p ${aof_path}
rm ${aof_path}/*.aof -f
#--------------------------start servers------------------------------------------------------
local_thread=`lscpu |grep "NUMA node${socket} CPU(s):"|awk '{print $(nf)}'|awk -F ',' '{print $1}'|awk -F '-' '{print $1}'|awk -F ':' '{print $2}'`
remote_thread=$(($local_thread + $cores*2))
echo "redis_num",$redis_num, "core", $cores, "socket number",$socketnum, "local_thread", ${local_thread}
if [ $core_bind == 1 ]; then
if [ $redis_num -gt $cores ]; then
echo "you're running too many redis servers! each redis server need 2 cpu threads."
exit 1
fi
fi
for (( instances=1; instances <= $redis_num; instances++ ))
do
port=$((port_start+instances))
#aep_start=$((${aep_size} * (${instances}-1)))
aof_name=${instances}.aof
if [ $core_bind == 1 ];then
cores_bind=$((${local_thread}+${instances}-1)),$((${remote_thread} + ${instances}-1))
sockets_config="taskset -c $cores_bind"
else
cores_bind="${local_thread}-$((${local_thread}+${cores}-1)),${remote_thread}-$((${remote_thread} + ${cores}-1))"
sockets_config="taskset -c $cores_bind"
echo
fi
#NUMA="numactl -N $socket -l $sockets_config"
NUMA="numactl -m $socket $sockets_config"
if [ $aof_enable = "disable" ]; then
if [ ! $aep_device ]; then
echo " $NUMA $redis_path/redis-server --bind $server_ip --appendonly no --port ${port} >& /dev/null &"
$NUMA $redis_path/redis-server --bind $server_ip --appendonly no --port ${port} >& /dev/null &
else
echo "$NUMA $redis_path/redis-server --bind $server_ip --appendonly no --port ${port} --nvm-maxcapacity ${aep_size} --nvm-dir ${aep_device} --nvm-threshold 64 >& /dev/null &"
$NUMA $redis_path/redis-server --bind $server_ip --appendonly no --port ${port} --nvm-maxcapacity ${aep_size} --nvm-dir ${aep_device} --nvm-threshold 64 >& /dev/null &
fi
elif [ $aof_enable = "enable" ]; then
if [ ! $aep_device ]; then
echo " $NUMA $redis_path/redis-server --bind $server_ip --appendonly yes --appendfsync always --appendfilename ${instances}.aof --dir ${aof_path} --port ${port} >& /dev/null &"
$NUMA $redis_path/redis-server --bind $server_ip --appendonly yes --appendfsync always --appendfilename ${instances}.aof --dir ${aof_path} --port ${port} >& /dev/null &
else
echo "$NUMA $redis_path/redis-server --bind $server_ip --appendonly yes --appendfsync always --appendfilename ${instances}.aof --port ${port} --pmdir ${aep_device} ${aep_size}g >& /dev/null &"
$NUMA $redis_path/redis-server --bind $server_ip --appendonly yes --appendfsync always --appendfilename ${instances}.aof --port ${port} --pmdir ${aep_device} ${aep_size}g >& /dev/null &
fi
fi
done
| true |
b6d8ddb923914ec6e33c16f7c9fed47d8d1faf44 | Shell | ejo090/versionbattle | /crawl.sh | UTF-8 | 4,559 | 3.484375 | 3 | [] | no_license | #!/bin/bash
## CONFIG
# MAX NUMBER OF PROCESSES FOR PARALLEL PROCESSING
PROC=6
alias db="sudo -u postgres psql 1>/dev/null 2>/dev/null -U postgres -d instances -c "
function scrape() {
DOMAIN=$1
LINK="https://$DOMAIN/about"
RESULT=$(curl -m 5 -k $LINK 2>/dev/null | xmllint --html --xpath "//div/div[1]/div[2]/" - 2>/dev/null | sed -e 's/<[^>]*>//g')
if [ "$RESULT" == "" ];then
REG="TRUE"
else
REG="FALSE"
fi
LINK="https://$DOMAIN/about/more"
API_LINK="https://$DOMAIN/api/v1/instance"
RESULT=$(curl -m 5 -k $LINK 2>/dev/null)
API_RESULT=$(curl -m 5 -kL $API_LINK -w "\ntime=%{time_total} code=%{http_code}" 2>/dev/null)
INSTANCE_FULL_VER=$(echo $API_RESULT | jq -r '.version' 2>/dev/null)
if [[ $INSTANCE_FULL_VER =~ ^([0-9]+\.[0-9]+)\.([0-9]+) ]]; then INSTANCE_SIMPLE_VER=${BASH_REMATCH[1]}; fi
if [[ $INSTANCE_SIMPLE_VER < 1.5 ]] ; then
USERS=$(echo $RESULT | xmllint --html --xpath "/html/body/div/div/div[1]/div[2]/div[1]/strong" - 2>/dev/null | sed -e 's/<[^>]*>//g' | sed -e 's/[, ]//g')
STATUSES=$(echo $RESULT | xmllint --html --xpath "/html/body/div/div/div[1]/div[2]/div[2]/strong" - 2>/dev/null | sed -e 's/<[^>]*>//g' | sed -e 's/[, ]//g')
CONNS=$(echo $RESULT | xmllint --html --xpath "/html/body/div/div/div[1]/div[2]/div[3]/strong" - 2>/dev/null | sed -e 's/<[^>]*>//g' | sed -e 's/[, ]//g')
else
USERS=$(echo $RESULT | xmllint --html --xpath "/html/body/div/div[2]/div/div[1]/div[1]/strong" - 2>/dev/null | sed -e 's/<[^>]*>//g' | sed -e 's/[, ]//g')
STATUSES=$(echo $RESULT | xmllint --html --xpath "/html/body/div/div[2]/div/div[1]/div[2]/strong" - 2>/dev/null | sed -e 's/<[^>]*>//g' | sed -e 's/[, ]//g')
CONNS=$(echo $RESULT | xmllint --html --xpath "/html/body/div/div[2]/div/div[1]/div[3]/strong" - 2>/dev/null | sed -e 's/<[^>]*>//g' | sed -e 's/[, ]//g')
fi
echo "$DOMAIN, $USERS, $STATUSES, $CONNS, $REG" >> "scrape.txt"
}
function crawl() {
DOMAIN=$1
if [ "$DOMAIN" == "" ]; then return 1; fi
LINK="https://$DOMAIN/api/v1/instance"
RESULT=$(curl -6 -m 5 -kL $LINK -w "\ntime=%{time_total} code=%{http_code}" 2>/dev/null)
CODE=$?
VER=$(echo $RESULT | jq -r '.version' 2>/dev/null)
TIME=$(echo "$(echo $RESULT | grep "time=" | sed -r 's/.*time=([0-9]+\.[0-9]+) code=([0-9]{3}$)/\1/') * 1000" | bc)
STATUS=$(echo $RESULT |grep "time="| sed -r 's/.*time=([0-9]+\.[0-9]+) code=([0-9]{3})$/\2/')
# pass v6
if [ "$STATUS" == "200" ]; then
RESULT=$(curl -4 -m 5 -kL $LINK -w "\ntime=%{time_total} code=%{http_code}" 2>/dev/null)
STATUS=$(echo $RESULT |grep "time="| sed -r 's/.*time=([0-9]+\.[0-9]+) code=([0-9]{3})$/\2/')
scrape $DOMAIN
# pass v4/v6
if [ "$STATUS" == "200" ]; then
if [[ ! "$VER" =~ [0-9]+(\.[0-9]+){2} ]]; then
echo "$DOMAIN, Up, 0.0.0, $TIME, v4/v6" >> result.txt
else
echo "$DOMAIN, Up, $VER, $TIME, v4/v6" >> result.txt
fi
# pass v6 only
else
if [[ ! "$VER" =~ [0-9]+(\.[0-9]+){2} ]]; then
echo "$DOMAIN, Up, 0.0.0, $TIME, v6" >> result.txt
else
echo "$DOMAIN, Up, $VER, $TIME, v6" >> result.txt
fi
fi
# cannot pass v6
else
RESULT=$(curl -4 -m 5 -kL $LINK -w "\ntime=%{time_total} code=%{http_code}" 2>/dev/null)
VER=$(echo $RESULT | jq -r '.version' 2>/dev/null)
TIME=$(echo "$(echo $RESULT | grep "time=" | sed -r 's/.*time=([0-9]+\.[0-9]+) code=([0-9]{3}$)/\1/') * 1000" | bc)
STATUS=$(echo $RESULT |grep "time="| sed -r 's/.*time=([0-9]+\.[0-9]+) code=([0-9]{3})$/\2/')
# pass v4 only
if [ "$STATUS" == "200" ]; then
scrape $DOMAIN
if [[ ! "$VER" =~ [0-9]+(\.[0-9]+){2} ]]; then
if [ "$CODE" != "6" ]; then
echo "$DOMAIN, Up, 0.0.0, $TIME, v4/ex" >> result.txt
else
echo "$DOMAIN, Up, 0.0.0, $TIME, v4" >> result.txt
fi
else
if [ "$CODE" != "6" ]; then
echo "$DOMAIN, Up, $VER, $TIME, v4/ex" >> result.txt
else
echo "$DOMAIN, Up, $VER, $TIME, v4" >> result.txt
fi
fi
# cannot connect
else
echo "$DOMAIN, Down, $STATUS" >> result.txt
fi
fi
sort result.txt -o result.txt
}
echo -n > result.txt
echo -n > scrape.txt
export -f crawl
export -f scrape
if [ -f instances.list ]; then
xargs -n1 -P$PROC -I % bash -c "crawl $INSTANCE %" < instances.list
else
curl -s https://instances.mastodon.xyz/instances.json | jq -r '.[].name' > .instances.list
xargs -n1 -P$PROC -I % bash -c "crawl $INSTANCE %" < .instances.list
rm -f .instances.list
fi
| true |
1e39fb04858bea2d59eb0461f54df227a8dbe118 | Shell | tinyloop/eccv16-taxonomy | /run.sh | UTF-8 | 1,575 | 3.734375 | 4 | [
"MIT"
] | permissive | #! /bin/bash
cd ./example
count=($(ls -1 ./ | grep .solverstate | wc -l))
filename=$(date +"%F_%H_%M")
echo $filename
gpu_num=0
gpu_count=($(nvidia-smi -L | wc -l))
if [ $gpu_count -gt 1 ]
then
read -p "You have more than one graphic card. Do you want to see the current process list?(y/n)" answer
case ${answer:0:1} in
y|Y )
nvidia-smi
;;
esac
while :
do
read -p "Enter GPU number : " answer
gpu_num=${answer}
if [ "$gpu_num" -ge 0 -a "$gpu_num" -lt "$gpu_count" ]
then
break
fi
done
fi
echo Using GPU '#'$gpu_num.
if [ $count -ge "1" ]
then
list=($(ls -1 ./*.solverstate | tr '\n' '\0' | xargs -0 -n 1 basename | sort -V -r))
read -p "You have a solverstate. Do you want to continue learning process from the last(y/n)? " answer
case ${answer:0:1} in
y|Y )
../caffe/build/tools/caffe train -solver ./solver.prototxt -gpu $gpu_num -snapshot ./$list &> $filename.log &
;;
* )
../caffe/build/tools/caffe train -solver ./solver.prototxt -gpu $gpu_num &> $filename.log &
;;
esac
else
../caffe/build/tools/caffe train -solver ./solver.prototxt -gpu $gpu_num &> $filename.log &
fi
cd ..
tail -F ./example/$filename.log
#script for future use
#!/bin/bash
#list=$(ls -1 ./regularized_fix/*.solverstate | tr '\n' '\0' | xargs -0 -n 1 basename)
#for file in $list
#do
# echo $file
#done
#files=./regularized_fix/"*.solverstate"
#regex='([0-9]+)\.solverstate'
#for f in $files
#do
# [[ $f =~ $regex ]]
# echo ${BASH_REMATCH[1]}
#done
#list=$(ls -1 ./regularized_fix/*.solverstate | tr '\n' '\0' | xargs -0 -n 1 basename | sort -V)
| true |
ee2f41259c84a908e27e091e442a24967d4d5ba0 | Shell | Security-Onion-Solutions/securityonion-setup | /bin/so-allow | UTF-8 | 5,772 | 3.796875 | 4 | [] | no_license | #!/bin/bash
#
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#########################################
# function to validate IP address
#########################################
is_ip() {
case "$*" in
""|*[!0-9.]*) return 1 ;;
esac
oldIFS=$IFS
IFS='.'
set -- $*
IFS=$oldIFS
[ $# -eq 4 ] || return 1
for ipseg in $1 $2 $3 $4
do
case $ipseg in
*[!0-9]*) return 1 ;;
esac
[ $ipseg -le 255 ] || return 1
done
}
is_cidr() {
[[ "$1" =~ ^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])(\/([0-9]|[1-2][0-9]|3[0-2]))$ ]] && return 0
}
#########################################
# check for root privileges
#########################################
if [[ $(/usr/bin/id -u) -ne 0 ]]; then
echo "This script needs to be run as root. Please try again using sudo."
exit
fi
#########################################
# Check if Elastic is enabled
#########################################
. /etc/nsm/securityonion.conf
if [ "$LOGSTASH_ENABLED" = "yes" ] || [ "$ELASTICSEARCH_ENABLED" = "yes" ] && [ -x /usr/sbin/so-allow-elastic ] ; then
/usr/sbin/so-allow-elastic
exit 0
fi
#########################################
# Prompt user for kind of device
#########################################
echo "This program allows you to add a firewall rule to allow connections from a new IP address."
echo
echo "What kind of communication would you like to allow?"
echo
echo "[a] - analyst - ports 22/tcp, 443/tcp, and 7734/tcp"
echo "[c] - apt-cacher-ng client - port 3142/tcp"
echo "[l] - syslog device - port 514"
echo "[o] - OSSEC/Wazuh agent - port 1514"
echo "[r] - OSSEC/Wazuh registration service - port 1515/tcp"
echo "[s] - Security Onion sensor - 22/tcp, 4505/tcp, 4506/tcp, and 7736/tcp"
echo
echo "If you need to add any ports other than those listed above,"
echo "you can do so using the standard 'ufw' utility."
echo
echo "For more information, please see:"
echo "https://docs.securityonion.net/en/16.04/Firewall"
device="none"
while [ "$device" = "none" ]; do
echo
echo "Please enter your selection:"
read input
case $input in
a)
device="analyst"
proto="proto tcp"
port="22,443,7734"
;;
c)
device="apt-cacher-ng client"
proto="proto tcp"
port="3142"
;;
l)
device="syslog"
proto=""
port="514"
;;
o)
device="OSSEC/Wazuh agent"
proto="proto udp"
port="1514"
;;
r)
device="OSSEC/Wazuh registration service"
proto="proto tcp"
port="1515"
;;
s)
device="Security Onion sensor"
proto="proto tcp"
port="22,4505,4506,7736"
;;
esac
done
#########################################
# Prompt user for IP address
#########################################
valid="no"
while [ "$valid" = "no" ]; do
echo
echo "Configuring firewall for $device..."
echo "Please enter the IP address (or CIDR range) you'd like to allow to connect to port(s): $port"
read address
# Check if CIDR
is_cidr $address && valid="yes"
# Check if IP
[ $? -ne 0 ] && is_ip $address && valid="yes"
done
#########################################
# Confirm rule before adding
#########################################
echo "We're going to allow connections from $address to port(s) $port."
echo
echo "Here's the firewall rule we're about to add:"
rule="sudo ufw allow $proto from $address to any port $port"
echo $rule
echo
if [ "$device" == "analyst" ]; then
echo "We're also whitelisting $address in /var/ossec/etc/ossec.conf to prevent OSSEC Active Response from blocking it. Keep in mind, the OSSEC server will be restarted once configuration is complete."
fi
echo
echo "To continue and add this rule, press Enter."
echo "Otherwise, press Ctrl-c to exit."
read input
#########################################
# Run the command to add the firewall rule
#########################################
$rule
echo "Rule has been added."
echo
echo "Here is the entire firewall ruleset:"
ufw status
echo
if [ "$device" == "analyst" ]; then
if ! grep -q "<white_list>$address</white_list>" /var/ossec/etc/ossec.conf ; then
DATE=`date`
sed -i 's/<\/ossec_config>//' /var/ossec/etc/ossec.conf
sed -i '/^$/N;/^\n$/D' /var/ossec/etc/ossec.conf
echo -e "<!--Address $address added by /usr/sbin/so-allow on "$DATE"-->\n <global>\n <white_list>$address</white_list>\n </global>\n</ossec_config>" >> /var/ossec/etc/ossec.conf
echo "Added whitelist entry for $address in /var/ossec/etc/ossec.conf."
echo
echo "Restarting OSSEC Server..."
service ossec-hids-server restart
fi
fi
echo
| true |
6b52f19e0316e0993dad2f38eb3edf99f22e856b | Shell | antenore/svntogit-community | /pidgin-talkfilters/repos/community-x86_64/PKGBUILD | UTF-8 | 980 | 2.53125 | 3 | [] | no_license | # Maintainer: Connor Behan <connor.behan@gmail.com>
pkgname=pidgin-talkfilters
pkgver=2.7.0
pkgrel=5
pkgdesc="Implements GNU talkfilters in pidgin chats"
arch=('x86_64')
url="https://bitbucket.org/rekkanoryo/purple-plugin-pack"
license=('GPL')
depends=('libpurple' 'talkfilters')
makedepends=('intltool' 'gettext' 'python' 'pidgin')
source=(https://bitbucket.org/rekkanoryo/purple-plugin-pack/downloads/purple-plugin-pack-$pkgver.tar.bz2 python3.patch)
sha256sums=('2bbcf5e778a33968ba7f2864d2a6cb526a8984be3e4151642a583eee8eafb03c'
'842292c95e61fb5a45b30eaed490d29ee67f725b46301e3590feafeb10014980')
build() {
cd "$srcdir"/purple-plugin-pack-$pkgver
patch -Np1 -i ../python3.patch
sed -i -e 's|GETTEXT_PACKAGE=plugin_pack|GETTEXT_PACKAGE=pidgin_talkfilters|'\
configure.ac
autoreconf -vfi
PYTHON=/usr/bin/python \
./configure --prefix=/usr --with-plugins=talkfilters
make
}
package() {
cd "$srcdir"/purple-plugin-pack-$pkgver
make DESTDIR="$pkgdir" install
}
| true |
5fa95a259a00b1aaa41008c00429b46afc8f3c1e | Shell | kaushalyekishor/CodingClubAssignments | /Day-3/settingEnvPath/env.sh | UTF-8 | 120 | 2.71875 | 3 | [] | no_license | #!/bin/bash -x
if [ $usersecret ]
then
echo "path already exists"
else
export usersecret=dH34zJaa23;
echo $usersecret;
fi
| true |
16b71d8e8e2a58869a969a21c1815ddb5ee88715 | Shell | igrep/parallel-dirs | /bin/mrr-exec | UTF-8 | 272 | 3 | 3 | [] | no_license | #!/bin/bash
# 使うときは
# 1. mrr-pathsをPATHに置く。
# 2. 普通に実行するのではなくsource mrr-execとする。
eval $@
IFS_bk=$IFS
IFS='
'
pwd_save=$( pwd )
for dir in $( mrr-paths -ns $pwd_save )
do
cd $dir
eval $@
done
cd $pwd_save
IFS=$IFS_bk
| true |
0efc15dbeaa9579b6ec595ccc2d5958cb36df8f2 | Shell | jwillker/bkp-full-diff-rest | /full.backup.sh | UTF-8 | 1,711 | 3.75 | 4 | [] | no_license | #!/bin/bash
echo "Programa de backup full"
echo " "
dadosfull() {
SRCDIR="/dados/" #diretórios que serão feito backup
DSTDIR=/backup/full #diretório de destino do backup
DATA=`date +%d-%m-%Y` #pega data atual
TIME_BKCP=+15 #número de dias em que será deletado o arquivo de backup
#criar o arquivo full-data.tar no diretório de destino
ARQ=$DSTDIR/full-$DATA.tar.gz
#data de inicio backup
DATAIN=`date +%c`
echo "Data de inicio: $DATAIN"
}
backupfull(){
sync
tar -czvf $ARQ $SRCDIR
if [ $? -eq 0 ] ; then
echo "----------------------------------------"
echo "Backup Full concluído com Sucesso"
DATAFIN=`date +%c`
echo "Data de termino: $DATAFIN"
echo "Backup realizado com sucesso" >> /var/log/backup_full.log
echo "Criado pelo usuário: $USER" >> /var/log/backup_full.log
echo "INICIO: $DATAIN" >> /var/log/backup_full.log
echo "FIM: $DATAFIN" >> /var/log/backup_full.log
echo "-----------------------------------------" >> /var/log/backup_full.log
echo "Backup realizado com sucesso
Criado pelo usuário: $USER
INICIO: $DATAIN
FIM: $DATAFIN
------------------------------------------------" > /backup/log/full.log
echo " "
echo "Log gerado em /var/log/backup_full.log"
else
echo "ERRO! Backup do dia $DATAIN" >> /var/log/backup_full.log
fi
}
procuraedestroifull(){
#apagando arquivos mais antigos (a mais de 20 dias que existe)
find $DSTDIR -name "f*" -ctime $TIME_BKCP -exec rm -f {} ";"
if [ $? -eq 0 ] ; then
echo "Arquivo de backup mais antigo eliminado com sucesso!"
else
echo "Erro durante a busca e destruição do backup antigo!"
fi
}
dadosfull
backupfull
procuraedestroifull
python /backup/send_email_full.py
exit 0
| true |
016c995a61b8130bdcb59db0543f9f10aef90b06 | Shell | geeksScript/geeksScript_kiddie_contest | /question-1/gS_kiddie_script2.sh | UTF-8 | 1,560 | 3.875 | 4 | [] | no_license |
#!/bin/bash
echo "Initialising $(date +%H:%M:%S)"
filename="file"
echo -e "$SHELL\n" > $filename
# Each user in a separated line too
for line in $(cut -d":" -f1 /etc/passwd)
do
echo -e "$line\n" >> $filename
done
echo -e "$(pwd)\n" >> $filename
echo -e "$(whoami)\n" >> $filename
echo "Data saved in the file: $filename"
echo "Number of words: $(cat $filename | wc -m)"
echo "Number of lines in the file: $(cat $filename | wc -l)"
echo "Number of characters in the file: $(cat $filename | wc -c)"
echo "Count of pattern 'file' in the file: $(cat $filename | grep -c file)"
read -p "Introduce a file name: " filename1
for line in $(cat $filename1)
do
case $line in
# Using regex with sed it deletes ";" and "," from the email ID
*.net*) [[ -e dotnet_emails.txt ]] || echo "List of .net domains:" > dotnet_emails.txt ; echo "$line" | sed -e "s/[;,]//g" >> dotnet_emails.txt ;;
*.org*) [[ -e dotorg_emails.txt ]] || echo "List of .org domains:" > dotorg_emails.txt ; echo "$line" | sed -e "s/[;,]//g" >> dotorg_emails.txt ;;
*.com*) [[ -e dotorg_emails.txt ]] || echo "List of .com domains:" > dotcom_emails.txt ; echo "$line" | sed -e "s/[;,]//g" >> dotcom_emails.txt ;;
*) [[ -e rest_of_emails.txt ]] || echo "Leftover emails list:" > rest_of_emails.txt ; echo "$line" | sed -e "s/[;,]//g" >> rest_of_emails.txt ;;
esac
done
echo "All task completed $(date +%H:%M:%S)"
exit 1
| true |
ec04b096b5fedfcb830abc7e312635512f656ae3 | Shell | schrht/aliyun-test-scripts | /storage_pt/cloud_type.sh | UTF-8 | 273 | 3.078125 | 3 | [] | no_license | #!/bin/bash
PATH=~/workspace/bin:/usr/sbin:/usr/local/bin:$PATH
# This script is used to tell the cloud type
grep -i aliyun /etc/cloud/cloud.cfg &>/dev/null
if [ $? -eq 0 ]; then
echo "aliyun"
exit 0
fi
if [ "" = "" ]; then
echo "aws"
else
echo "azure"
fi
exit 0
| true |
45d415bcee687bcbc9c8730252650a3970542f81 | Shell | masashiendou/dotfiles | /autokeyconfig.sh | UTF-8 | 974 | 2.796875 | 3 | [] | no_license | #!/bin/bash
STR=$(xprop -id $(xprop -root 32x '\t$0' _NET_ACTIVE_WINDOW | cut -f 2) WM_CLASS)
MOD1=$(xmodmap -pm|grep Super_R)
MOD2=$(xmodmap -pm|grep mod4)
if [[ $STR == *tmux* ]] && [[ $MOD1 == *"Super_R (0x6c)"* ]]; then
# echo "Current keymap is I3 but current window is TMUX"
# xkbcomp ~/git/dotfiles/tmux-config.xkm $DISPLAY
~/.xkb2/script/tmux-config.sh
# xmodmap .Xmodmap_tmux
elif [[ ! $STR == *tmux* ]] && [[ $MOD1 == *"Super_R (0x86)"* ]]; then
# echo "Current keymap is TMUX but current window is I3"
# xkbcomp ~/git/dotfiles/i3-config.xkm $DISPLAY
#~/.xkb/script/i3-config.sh
xmodmap .Xmodmap_i3
elif [[ $STR == *tmux* ]] && [[ $MOD2 == *"Alt_R (0x86)"* ]]; then
~/.xkb2/script/tmux-config.sh
elif [[ $STR == *tmux* ]] && [[ $MOD2 == *"Alt_R (0x6c)"* ]]; then
~/.xkb2/script/tmux-config.sh
elif [[ ! $STR == *tmux* ]] && [[ $MOD2 == *"Alt_R (0x86)"* ]]; then
~/.xkb/script/i3-config.sh
else
# echo "Current keymap and window are maching !!!"
:
fi
| true |
45a80f63f37fe59c21a9a8cd24b56f01487e6511 | Shell | jcomeauictx/openwrt-freifunk | /rootfs/www/cgi-bin/lan.html | UTF-8 | 6,910 | 3.046875 | 3 | [] | no_license | #!/bin/sh
export DATE="18.4.2010";SCRIPT=${0#/rom}
export TITLE="Admin: LAN"
. ${SCRIPT%/*}/cgi-bin-pre.sh
cat<<EOF
<H1>Admin: LAN</H1>
EOF
if [ "$REQUEST_METHOD" = "POST" ]; then
read QUERY_STRING
fi
if [ -z "$QUERY_STRING" ]; then
#Speedups
lan_proto="$(nvram get lan_proto)"
lan_ipaddr=$(nvram get lan_ipaddr)
lan_netmask=$(nvram get lan_netmask)
dhcp_start=$(nvram get dhcp_start)
dhcp_num=$(nvram get dhcp_num)
test -z "$dhcp_num" && dhcp_num=4
wifi_ipaddr=$(nvram get wifi_ipaddr)
wifi_netmask=$(nvram get wifi_netmask)
ff_lan_proto=$(nvram get ff_lan_proto)
test -z "$ff_lan_proto" && lan_proto=static
case $lan_proto in
dhcp|disabled|olsr)
;;
*)lan_proto=static
;;
esac
cat<<EOF
<FORM ACTION="lan.html" CLASS="form" METHOD="POST"><TABLE CLASS="shadow0" CELLPADDING="0" CELLSPACING="0"><TR><TD><TABLE CLASS="shadow1" CELLPADDING="0" CELLSPACING="0"><TR><TD><TABLE CLASS="shadow2" CELLPADDING="0" CELLSPACING="0"><TR><TD><TABLE BORDER="0"
CLASS="formfixwidth"><TR
TITLE="Determines the operation mode of the four ethernet connectors. If still not configured, the default 'Static' is used.">
<TD>LAN
Protocol:</TD><TD><SELECT NAME="lan_proto" ONCHANGE="this.form.lan_ipaddr.disabled=(0!=this.value.indexOf('static')&&0!=this.value.indexOf('olsr'));this.form.lan_netmask.disabled=(0!=this.value.indexOf('static')&&0!=this.value.indexOf('olsr'));this.form.lan_gateway.disabled=(0!=this.value.indexOf('static')&&0!=this.value.indexOf('olsr'));this.form.dhcp_start.disabled=(0!=this.value.indexOf('static')&&0!=this.value.indexOf('olsr'));this.form.dhcp_num.disabled=(0!=this.value.indexOf('static')&&0!=this.value.indexOf('olsr'));this.form.dhcp_lease.disabled=(0!=this.value.indexOf('static')&&0!=this.value.indexOf('olsr'));">
<OPTION
VALUE='dhcp' $(test "$lan_proto" = "dhcp" && echo "selected=selected")>Query DHCP
server</OPTION>
<OPTION
VALUE='static' $(test "$lan_proto" = "static" && echo "selected=selected")>Static</OPTION>
<OPTION
VALUE='disabled' $(test "$lan_proto" = "disabled" && echo "selected=selected")>Disabled</OPTION>
</SELECT> </TD>
</TR><TR
TITLE="This is the IP address setting of the four ethernet connectors.">
<TD>LAN
IP:</TD><TD><INPUT NAME="lan_ipaddr" SIZE="32" TYPE="TEXT" VALUE="$lan_ipaddr"$(if [ "$lan_proto" != "static" ] && [ "$lan_proto" != "olsr" ];then echo ' disabled="disabled"';fi)></TD>
</TR>
<TR
TITLE="The netmask determines, which IP addresses can be reached directly.">
<TD>LAN
Netmask:</TD><TD><INPUT NAME="lan_netmask" SIZE="32" TYPE="TEXT" VALUE="$lan_netmask"$(if [ "$lan_proto" != "static" ] && [ "$lan_proto" != "olsr" ];then echo ' disabled="disabled"';fi)></TD>
</TR>
<TR
TITLE="Default route for the LAN jacks. This setting is normally left blank">
<TD>LAN
Default Route:</TD><TD><INPUT NAME="lan_gateway" SIZE="32" TYPE="TEXT" VALUE="$(nvram get lan_gateway)"$(if [ "$lan_proto" != "static" ] && [ "$lan_proto" != "olsr" ];then echo ' disabled="disabled"';fi)></TD>
</TR>
<TR
TITLE="Configures static routes with the 'ip:netmask:gatewayip:metric:interface' notation. Example: '10.1.2.0:255.255.255.0:0.0.0.0:1:vlan1'. Separate multiple entries with space.">
<TD>Static Routes:</TD><TD><INPUT NAME="static_route" SIZE="32" TYPE="TEXT" VALUE="$(nvram get static_route)"></TD>
</TR><TR
TITLE="Disables the IP address translation (NAT/Masquerading) between the internal LAN and the wireless network. For special purposes, this option allows the direct communication between the internal LAN and the wireless LAN.">
<TD>Disable NAT:</TD><TD><INPUT NAME="ff_nonat" TYPE="CHECKBOX" VALUE="1"$(if [ "$(nvram get ff_nonat)" = "1" ];then echo ' checked="checked"';fi)></TD>
</TR>
<TR
TITLE="Disables the firewall function. The NAT rules (translation of internal LAN addresses to the IP address of the wireless network) will stay in force.">
<TD>Disable Firewall:</TD><TD><INPUT NAME="fw_disable" TYPE="CHECKBOX" VALUE="1"$(if [ "$(nvram get fw_disable)" = "1" ];then echo ' checked="checked"';fi)></TD>
</TR>
<TR><TD COLSPAN="2">
EOF
if [ "$(test -n "$lan_ipaddr" && ipcalc -n $lan_ipaddr $wifi_netmask|cut -d'=' -f2)" = "$(test -n "$wifi_ipaddr" && ipcalc -n $wifi_ipaddr $wifi_netmask|cut -d'=' -f2)" ]; then
cat<<EOF
<B>Note</B>:
The firewall function between WLAN and LAN is not active, because the
LAN IP address ($lan_ipaddr) is part of the WLAN network
($wifi_ipaddr/$wifi_netmask).
EOF
fi
cat<<EOF
</TD>
</TR>
<TR
TITLE="Starting value for the dynamic allocated IP addresses.">
<TD>DHCP
Start IP:</TD><TD>$(test -n "$lan_ipaddr" && ipcalc -n $lan_ipaddr $lan_netmask|cut -d'=' -f2|cut -d'.' -f1-3).<INPUT NAME="dhcp_start" SIZE="6" TYPE="TEXT" VALUE="${dhcp_start##*.}"$(if [ "$lan_proto" != "static" ] && [ "$lan_proto" != "olsr" ];then echo ' disabled="disabled"';fi)></TD>
</TR>
<TR
TITLE="Count of IP addresses managed by the DHCP server. The sum total of start value and count should be less then 255.">
<TD>DHCP
Number of Users:</TD><TD><INPUT NAME="dhcp_num" SIZE="6" TYPE="TEXT" VALUE="$dhcp_num"$(if [ "$lan_proto" != "static" ] && [ "$lan_proto" != "olsr" ];then echo ' disabled="disabled"';fi)>(DHCP off with "0")</TD>
</TR><TR
TITLE="Time (in seconds) after which the leased IP address may be reallocated. Leave input field empty or enter 0 for the 12h default (43200s).">
<TD>DHCP
Lease Time:</TD><TD><INPUT NAME="dhcp_lease" SIZE="6" TYPE="TEXT" VALUE="$(nvram get dhcp_lease)"$(if [ "$lan_proto" != "static" ] && [ "$lan_proto" != "olsr" ];then echo ' disabled="disabled"';fi)>seconds</TD>
</TR><TR><TD COLSPAN="2"> </TD>
</TR><TR><TD COLSPAN="2"><INPUT NAME="post_lan" TITLE="Apply these settings. The settings are applied with the next restart." VALUE="Apply" TYPE="submit"> <INPUT NAME="post_abort" TITLE="Cancel dialog page." VALUE="Cancel" TYPE="submit"></TD>
</TR>
</TABLE></TD></TR></TABLE></TD></TR></TABLE></TD></TR></TABLE></FORM>
<P><B>Tip</B>:
These settings influence the configuration, which is sent to wired clients
via DHCP. To ensure a convenient network access, you should enter the <B>Host
Name</B> and the internal <B>Domain</B> (-> <A HREF="system.html">System</A>).
</P>
EOF
else
IFS=\&;set ${QUERY_STRING%%[^%&=+-:@-_a-z~]*};unset IFS;eval $*
DIRTY=
if [ -n "$post_lan" ]; then
test -n "$lan_proto" && ff_lan_proto=1
for V in ff_lan_proto lan_proto lan_ipaddr lan_netmask lan_gateway static_route ff_nonat fw_disable dhcp_start dhcp_num dhcp_lease; do
eval "C=\$$V"
C=$(httpd -d "$C")
if [ "$C" != "$(nvram get $V)" ]; then
DIRTY=1
nvram set $V="$C"
fi
done
if checkbridge; then
DIRTY=1
fi
fi
if [ -n "$DIRTY" ]; then
nvram commit>/dev/null 2>&1
cat<<EOF
<TABLE BORDER="0" CLASS="note"><TR><TD>The
changed settings are committed. The settings are active after the next
<A HREF="reset.html">Restart</A>.</TD>
</TR>
</TABLE>
EOF
else
cat<<EOF
<TABLE BORDER="0" CLASS="note"><TR><TD>No settings are
changed.</TD>
</TR>
</TABLE>
EOF
fi
fi
. ${SCRIPT%/*}/cgi-bin-post.sh
| true |
af592ffa861edf3b34e9ebaa1a8a32544a53afdb | Shell | mahmoudimus/.vim | /install.sh | UTF-8 | 171 | 2.6875 | 3 | [] | no_license | #!/bin/bash
#TODO: check if user and home are defined
INSTALL_DIR=$(dirname $0)
ln -s ${INSTALL_DIR}/.vimrc ${HOME}/.vimrc
ln -s ${INSTALL_DIR}/.gvimrc ${HOME}/.gvimrc
| true |
9d2fe872f5ae845894df2c415aa5da8579a00916 | Shell | likwid/dotfiles | /zsh/shims.zsh | UTF-8 | 313 | 2.671875 | 3 | [] | no_license | # rbenv
if which rbenv > /dev/null; then eval "$(rbenv init -)"; fi
# hub
if which hub > /dev/null; then eval "$(hub alias -s)"; fi
# jenv
if which jenv > /dev/null; then eval "$(jenv init -)"; fi
# Pyenv
[ -f ~/.pyenv_profile ] && source ~/.pyenv_profile
# nvm
[ -s ~/.nvm_profile ] && source ~/.nvm_profile
| true |
e85e3b05922ba358c4358f2d27ffc419c8a43ea5 | Shell | phm87/docker-chips | /entry.sh | UTF-8 | 1,786 | 2.96875 | 3 | [] | no_license | #!/bin/bash
# source : https://github.com/jl777/komodo/blob/master/zcutil/docker-entrypoint.sh
#set -ex
echo "...Checking chips.conf"
if [ ! -e "$HOME/.chips/chips.conf" ]; then
mkdir -p $HOME/.chips
echo "...Creating chips.conf"
cat <<EOF > $HOME/.chips/chips.conf
rpcuser=${rpcuser:-chipsrpc}
rpcpassword=${rpcpassword:-`dd if=/dev/urandom bs=33 count=1 2>/dev/null | base64`}
txindex=1
rpcallowip=127.0.0.1
rpcallowip=${listenip:-127.0.0.1}
addnode=5.9.253.195
rpcport=12454
# port=57777
addnode=54.36.126.42
addnode=145.239.149.173
addnode=54.39.53.170
addnode=159.69.23.30
addnode=95.213.238.98
addnode=144.217.10.241
addnode=5.189.232.34
addnode=139.99.125.27
EOF
cat $HOME/.chips/chips.conf
fi
# ToDo: Needs some rework. I was sick
if [ $# -gt 0 ]; then
args=("$@")
elif [ -z ${assetchain+x} ]; then
args=("-gen -genproclimit=${genproclimit:-2} -pubkey=${pubkey}")
else
# TODO : remove this part if needed or remove all args
args=("-pubkey=${pubkey} -ac_name=${assetchain} -addnode=${seednode}")
fi
echo "****************************************************"
echo "Download bootstrap"
echo "****************************************************"
mkdir -p ~/.chips
cd ~/.chips
wget http://bootstrap3rd.dexstats.info/CHIPS-bootstrap.tar.gz
tar xvzf CHIPS-bootstrap.tar.gz
rm CHIPS-bootstrap.tar.gz
echo "****************************************************"
echo "Bootstrap downloaded"
echo "****************************************************"
echo "Running: chipsd -reindex"
echo "****************************************************"
exec ~/chips3/src/chipsd -reindex
sleep 5000
IP="$(curl ifconfig.me)"
ln_args=("--alias=friendlyalias --ipaddr=${IP} --rgb=555555 --log-level=debug")
screen -dmS lightningd lightningd ${ln_args[@]}
| true |
9bc1c511d62afc973c020e6e7cfdcd5cffbb2df7 | Shell | xionghaihua/docker-redis | /entrypoint.sh | UTF-8 | 639 | 2.65625 | 3 | [] | no_license | #!/bin/bash
if ! which redis-server >/dev/null 2>&1; then
source /etc/profile.d/redis.sh
fi
#sysctl -w net.core.somaxconn=1024
#sysctl -w vm.overcommit_memory=1
#echo never > /sys/kernel/mm/transparent_hugepage/enabled
#echo never > /sys/kernel/mm/transparent_hugepage/defrag
#useradd -u 6379 redis
#chown -R redis.redis /data/redis/data
#chown -R redis.redis /opt/apply/redis
#chown -R redis.redis /usr/bin/redis-server
#chown -R redis.redis /usr/bin/redis-cli
#chmod +x /usr/bin/redis-server
#chmod +x /usr/bin/redis-cli
sed -i "/^#[[:space:]]\+requirepass/c requirepass '$PASSWD'" /opt/apply/redis/etc/redis.conf
exec "$@"
| true |
4dbef25fc5d477511657a290951969f097b6c80f | Shell | BrainsOnBoard/CX_neural_compass_paper | /model_learn/batch_bee.sh | UTF-8 | 1,315 | 3.65625 | 4 | [] | no_license | #!/bin/bash
if [ "$#" -ne "4" ]; then
echo "Usage: batch_bee.sh TYPE(sing,pano,2bar) SPINEML_2_BRAHMS_PATH OUT_PATH SYSTEMML_PATH"
exit -1
fi
if [ "$1" = "pano" ]; then
echo "Panorama experiments running"
stim_type=("pano")
stim_type_expt=("2")
elif [ "$1" = "sing" ]; then
echo "Single bar experiments running"
stim_type=("sing")
stim_type_expt=("1")
elif [ "$1" = "2bar" ]; then
echo "Two bar experiments running"
stim_type=("2bar")
stim_type_expt=("1")
else
echo "Unknown experiment type, valide types are pano, sing, and 2bar"
exit -1
fi
# check beeworld is running
ps -ax | grep [b]eeworld &> /dev/null
if [ $? == 0 ]; then
echo "Running Beeworld found! Continuing..."
else
echo "Beeworld not running, please run beeworld and try again"
exit -1
fi
S2B_PATH=$2
OUT_PATH=$3
SYSTEMML_INSTALL_PATH=$4
MDL_DIR=$PWD
run_nums=("1" "2" "3" "4" "5" "6" "7" "8" "9" "10")
for ((r=0;r<${#stim_type[@]};++r)); do
for ((i=0;i<${#run_nums[@]};++i)); do
sleep 1
cd $S2B_PATH
PATH=/bin:/usr/bin:/usr/local/bin:${SYSTEMML_INSTALL_PATH}/BRAHMS/bin/brahms/:. BRAHMS_NS=${SYSTEMML_INSTALL_PATH}/Namespace SYSTEMML_INSTALL_PATH=${SYSTEMML_INSTALL_PATH} ./convert_script_s2b -w $S2B_PATH -m ${MDL_DIR} -e ${stim_type_expt[r]} -o ${OUT_PATH}/${stim_type[r]}/drosphs${run_nums[i]}/
cd -
done
done
| true |
6212c24eda77d5b041403d15e4d5a74d742a119f | Shell | kjbrandstatter/FusionFS-BluegeneP | /jobs/metadata_test.sh | UTF-8 | 252 | 2.9375 | 3 | [] | no_license | #!/bin/bash
DIRNAME=$1
start=`date +%s`
for i in {1..5000}
do
echo doing $i
touch $DIRNAME'/'f_$i
rm $DIRNAME'/'f_$i
done
end=`date +%s`
diff=$(( $end - $start ))
echo "$DIRNAME $start $end $diff" >> /intrepid-fs0/users/dzhao/persistent/result | true |
5f9350d681e220e9ea5dee12aea5ad65bd7fa5bf | Shell | evanweible-wf/dotfiles | /scripts/includes.sh | UTF-8 | 454 | 2.90625 | 3 | [] | no_license | #!/bin/sh
ZSH='/Users/evanweible/dev/config/dotfiles'
br () {
printf "\n"
}
info () {
printf "[\033[00;34m..\033[0m] $1"
}
user () {
printf "\r[\033[0;33m?\033[0m] $1 "
}
success () {
printf "\r\033[2K[\033[00;32mOK\033[0m] $1\n"
}
error () {
printf "\r\033[2K[\033[00;31mERR\033[0m] $1\n"
}
fail () {
printf "\r\033[2K[\033[0;31mFAIL\033[0m] $1\n"
echo ''
exit
}
run () {
"$@" 2> "_stderr.txt" 1> "_stdout.txt"
}
| true |
c727699df04f72017d51e9c23e0c9c3c15388cf6 | Shell | peccu/setup-bin | /setup.sh | UTF-8 | 1,423 | 3.28125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
[ -x ~/bin ] || mkdir ~/bin
cd ~/bin
function byrust(){
USER=$1
REPO=$2
V=$3
APP=$4
POSTFIX=i686-unknown-linux-musl
PACK=${REPO}-${V}-${POSTFIX}
EXT=tar.gz
echo curl -LO https://github.com/${USER}/${REPO}/releases/download/${V}/${PACK}.${EXT}
curl -LO https://github.com/${USER}/${REPO}/releases/download/${V}/${PACK}.${EXT}
tar xzf ${PACK}.${EXT}
mv ${PACK}/${APP} ${APP}
rm ${PACK}.${EXT}
rm -rf ${PACK}
}
function fzf_install(){
V=$1
PACK=fzf-${V}-linux_amd64
EXT=tgz
curl -LO https://github.com/junegunn/fzf-bin/releases/download/${V}/${PACK}.${EXT}
tar xzf fzf-${V}-linux_amd64.tgz
rm ${PACK}.${EXT}
}
function bygolang(){
USER=$1
REPO=$2
V=$3
PACK=${REPO}_${V}_linux_amd64.zip
curl -LO https://github.com/${USER}/${REPO}/releases/download/${V}/${PACK}
unzip ${PACK}
rm ${PACK}
}
# ripgrep
VERSION_RIPGREP=11.0.2
which rg >/dev/null || byrust BurntSushi ripgrep $VERSION_RIPGREP rg
# fd
VERSION_FD=v7.2.0
which fd >/dev/null || byrust sharkdp fd $VERSION_FD fd
# godit
which godit >/dev/null || docker run --rm -it --name golang -v $(pwd):/cwd -w /cwd golang:1.11 bash -c 'go get github.com/nsf/godit && mv /go/bin/godit /cwd/'
# # micro
# which micro >/dev/null || curl https://getmic.ro | bash
# 9t
VERSION_9t=0.0.2
which 9t >/dev/null || bygolang gongo 9t $VERSION_9t
# fzf
VERSION_fzf=0.18.0
which fzf >/dev/null || fzf_install $VERSION_fzf
| true |
03c7eaada0d72862d832559499d80add620c773f | Shell | binaryphile/concorde | /examples/bin/myscript | UTF-8 | 997 | 3.859375 | 4 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
source concorde.bash
$(require_relative ../lib/hello)
get <<'EOS'
myscript [options] <name>...<name>
Outputs "Hello, world!" when run without options.
Outputs "Hello, <name>!" when provided with a name.
Multiple names result in multiple greetings, one per line.
Options:
--mellow Don't use an exclamation mark
-g GREETING Use GREETING instead of "Hello"
EOS
printf -v usage '\n%s\n' "$__"
myscript_main () {
$(grab 'greeting mellow_flag' from "${1:-}"); shift
local punctuation
(( mellow_flag )) && punctuation=. || punctuation=''
hello "$greeting" "${1:-}" "$punctuation"
(( $# )) && shift
while (( $# )); do
hello "$greeting" "$1" "$punctuation"
shift
done
}
sourced && return
strict_mode on
get <<'EOS'
'' --mellow '' "don't use an exclamation mark (flag)"
-g '' greeting "an alternative greeting to 'Hello'"
EOS
$(parse_options __ "$@") || die "$usage" 0
myscript_main __ "$@"
| true |
15b9a9c0d4e79b4c30decabf10070312e0c32087 | Shell | pirafrank/dotfiles | /zsh/zprezto/runcoms/zprofile | UTF-8 | 502 | 2.59375 | 3 | [
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | # vim:syntax=zsh
# vim:filetype=zsh
#
# Executes commands at login pre-zshrc.
#
# ".zprofile is meant as an alternative to .zlogin for ksh fans;
# the two are not intended to be used together, although
# this could certainly be done if desired."
#
# Authors:
# Sorin Ionescu <sorin.ionescu@gmail.com>
#
#
# Browser
#
if [[ "$OSTYPE" == darwin* ]]; then
export BROWSER='open'
fi
# kitty config dir
export KITTY_CONFIG_DIRECTORY="$HOME/dotfiles/gui_terminals/kitty"
# default RAILS environment
RAILS_ENV="development"
| true |
485b2929e4e0aadfdb87f9169312cfc659c441bb | Shell | s1van/hLSM | /tests/run_fm.sh | UTF-8 | 1,726 | 3.015625 | 3 | [] | no_license | #!/bin/bash
ANALYZER=`dirname $0`/io-analysis.sh;
TESTER=`dirname $0`/test.sh;
CLEANER=`dirname $0`/cache-cleanup.sh;
run() {
local TDIR=$1;
local RDIR=$2;
local TAG=$3;
local CONF=$4;
local DEVICE=$5;
local INUM=$6;
echo "run $@";
TRACE=$TDIR/$TAG;
$CLEANER -b;
sleep 2;
RES=$RDIR/$TAG;
mkdir -p $RES;
$ANALYZER -d "$DEVICE" -o "$TRACE" -p "$TESTER $CONF $RES $INUM";
cd $TRACE && cp $TRACE/*.png $RES;
cd $TRACE && cp $TRACE/*.out $RES;
cp -f $STORE/LOG $RES;
cp -f $STORE/1/LOG $RES;
cp /tmp/hlsm_log $RES;
#sleep 10; # skip preloading
for DEV in $DEVICE; do
blkparse -i $TRACE/$DEV| tail -100 > $RES/${DEV}.out
done
}
mk_store() {
SROOT=$(dirname $STORE); #primary store
rm -rf $STORE $SEC_STORAGE;
cp -rf $SROOT/$BASE $STORE &
cp -rf $SROOT/$BASE $SEC_STORAGE &
wait;
}
run_test() {
local TAG=$1;
local CONF=$2;
source $CONF;
mk_store;
run $TDIR $RDIR $TAG "$CONF" "$DEVS" 1 &
wait;
}
run_orig_test() {
local TAG=$1;
local CONF=$2;
source $CONF;
SROOT=$(dirname $STORE); #primary store
rm -rf $STORE;
cp -rf $SROOT/$BASE $STORE;
run $TDIR $RDIR $TAG "$CONF" "$DEVS" 1 &
wait;
}
# global variables
TDIR=$HOME/store/trace/hlsm/fm;
RDIR=$HOME/store/result/hlsm/fm;
CDIR=`dirname $0`/conf/fm;
# block devices used
D1=sde;
D2=md0;
DEVS="$D1 $D2";
#############
# r = 4
#############
BASE=r4_base;
#run_test fm_w_a $CDIR/fm_w_a.conf;
#run_orig_test df_w_a $CDIR/df_w_a.conf;
run_orig_test df_r $CDIR/df_r.conf;
#run_orig_test dfh_w_a $CDIR/dfh_w_a.conf;
#run_orig_test df_max $CDIR/df_max.conf;
#run_test fm_max $CDIR/fm_max.conf;
#run_orig_test df_50 $CDIR/df_50.conf;
#run_orig_test df_90 $CDIR/df_90.conf;
#run_orig_test df_r $CDIR/df_r.conf;
#run_test fm_r $CDIR/fm_r.conf;
| true |
3b4baabe8ca63f2b050705032f36d8474272761e | Shell | neo900/script_mysql_backup | /backup.sh | UTF-8 | 457 | 3.4375 | 3 | [] | no_license | #!/bin/bash
timestamp=`date +%F`
user=""
pass=""
log="/home/dump/mysql_backup/backup.log"
DIR="/home/dump/mysql_backup/db_backup"
mkdir $DIR/$timestamp
echo Backup start - `date` >> $log
for i in `cat /home/dump/mysql_backup/db_name`
do
echo '***' Backup $i start - `date` >> $log
mysqldump -u$user -p$pass -B $i > $DIR/$timestamp/$i.sql
echo '***' Backup $i complete - `date` >> $log
done
echo Backup complete - `date`>> $log
| true |
2d4a6dd24d649d22567e00bbbd602d3951430c37 | Shell | gman999/repl-kit | /mirror_check.sh | UTF-8 | 2,023 | 3.921875 | 4 | [] | no_license | #!/bin/sh
# check_mirror.sh 20140701 gman
# DEPS wget, tor/torsocks
# determine integrity of AroundBlocks.info mirror compared to local copy
# add to iframe_output.html iframe if good, add to bad_sites if bad
# to use add full urls to $urls file. periodic housekeeping of it is fine.
# Mirror site data is stored in /tmp since it can get sloppy.
# variables
store="${store:-/tmp}" # where the action is
master="${master:-/usr/local/www/nginx/aroundblocks.info}" # master www site
urls=`cat $store/urls.txt` # this should be a text file in $store
sites="echo $urls | sed 's~http[s]*://~~g'" # $urls without http/s
iframe_output="{iframe_output:-$master/mirror_status.html}"
tor="${tor:-/usr/local/bin/usewithtor}" # torsocks location
wget="${wget:-/usr/local/bin/wget --no-check-certificate -m -P $store $urls}"
now="`date "+%Y%m%d-%H:%M:%S`"
# check if $master is at least populated, if not a good copy
master_check () {
if [ ! -f $master/* ]; then
# echo "The $master directory is empty. Nothing to diff with."
logger -s "The $master directory is empty. Nothing to diff with."
exit 1;
else
logger -s "$master seems populated, at least"
fi
}
# wget each mirror site and put into /tmp/$urls
get_site () {
if [ *.onion ]; then
$tor /usr/local/bin/wget --no-check-certificate -m -P $store $urls
else
/usr/local/bin/wget --no-check-certificate -m -P $store $urls
fi
}
# diff the $master and mirror and output diff to /tmp
diff_site () {
/usr/bin/diff -qr $master $store/$sites >$store/diff_$site
}
master_check; get_site; diff_site
# if there's content to diff_$site add to bad_sites. If not, ignore
if [ ! -s "diff_$site" ]; then
echo " $site" >>$store/bad_sites
echo "<p> $site bad `date "+%Y%m%d-%H:%M:%S"` >>$store/bad_sites.txt
logger -s "diff problem with $site"
cat $store/notice | mail -s "Problem with your AroundBlocks.info mirror" $admin
else
echo "$site good as of $now" >$iframe_output
logger -s "$site good as of `date`."
fi
true
| true |
12fc5c0b1e6bcb3a9be6d61c8a04b566ab704305 | Shell | scottcunningham/thinklight-notify.sh | /thinklight-notify.sh | UTF-8 | 618 | 3.53125 | 4 | [] | no_license | #!/bin/bash
# thinklight-notify.sh
#
# A utility to flash your Thinkpad's thinklight, designed for IM notifications.
#
# Requires $LIGHTPATH to be writable by your current user.
export LOCKFILE='/tmp/thinklight-lock.lock'
export LIGHTPATH='/sys/class/leds/tpacpi::thinklight/brightness'
function lightoff {
echo 0 > $LIGHTPATH
}
function lighton {
echo 255 > $LIGHTPATH
}
function flash {
if [ -f LOCKFILE ]; then exit -1; fi
/usr/bin/touch $LOCKFILE
mplayer "$1" &
for x in `/usr/bin/seq 0 2`; do lighton; sleep 0.75; lightoff; sleep 0.75; done
/bin/rm $LOCKFILE
}
flash $@
| true |
cff39971e071a5c43da08018e49fc84d7e4a9f14 | Shell | kienle1819/script-backupMysql | /backupMysql | UTF-8 | 478 | 3.6875 | 4 | [] | no_license | #!/bin/bash
CURRENT_DIR=/root/backup
MYSQL_DIR=$CURRENT_DIR/mysql
MYSQLUSER=root
MYSQLPASSWORD='youroorpasswordhere'
TODAY=`date +%d_%m_%Y`
if [ ! -d "$MYSQL_DIR" ]; then
# Control will enter here if $DIRECTORY doesn't exist.
mkdir $MYSQL_DIR
fi
while read line
do
#echo -e "$line";
mysqldump -u root -p"$MYSQLPASSWORD" $line | gzip > $MYSQL_DIR/$line.$TODAY.sql.gz;
sleep 30;
done < $CURRENT_DIR/mysqldb.txt
#delete old files
find $MYSQL_DIR -type f -mtime +7 -exec rm {} \;
| true |
e8f6d89d4adbdd5c114c7deff44ea176a722fbee | Shell | foodmaher/Teros | /debian_source.sh | UTF-8 | 2,268 | 3.71875 | 4 | [] | no_license | #!/data/data/com.termux/files/usr/bin/bash
# Functions install Debian OS ;;
Fundebian () {
pn="$(pwd)"
#cd "${terp}"
folder=debian-fs
tarball="debian-rootfs.tar.gz"
if [ -d "$folder" ]; then
first=1
echo "skipping downloading"
fi
if [ "$first" != 1 ]; then
if [ ! -f $tarball ]; then
case `dpkg --print-architecture` in
aarch64)
archurl="arm64" ;;
arm)
archurl="armhf" ;;
amd64)
archurl="amd64" ;;
x86_64)
archurl="amd64" ;;
i*86)
archurl="i386" ;;
x86)
archurl="i386" ;;
*)
err=3; des="unknown architecture"; echo "$des"; exitf ;;
esac
URL="https://raw.githubusercontent.com/EXALAB/AnLinux-Resources/master/Rootfs/Debian/${archurl}/debian-rootfs-${archurl}.tar.gz" -O $tarball
namepackage="${tarball}"
dialogwget
fi
cur=`pwd`
mkdir -p "$folder"
cd "$folder"
filetar=${cur}/${tarball}||:
dialogtar
cd "$cur"
fi
mkdir -p debian-binds
bin=start-debian.sh
echo "writing launch script"
cat > $bin <<- EOM
#!/bin/bash
cd \$(dirname \$0)
## unset LD_PRELOAD in case termux-exec is installed
unset LD_PRELOAD
command="proot"
command+=" --link2symlink"
command+=" -0"
command+=" -r $folder"
if [ -n "\$(ls -A debian-binds)" ]; then
for f in debian-binds/* ;do
. \$f
done
fi
command+=" -b /dev"
command+=" -b /proc"
command+=" -b debian-fs/tmp:/dev/shm"
## uncomment the following line to have access to the home directory of termux
#command+=" -b /data/data/com.termux/files/home:/root"
## uncomment the following line to mount /sdcard directly to /
#command+=" -b /sdcard"
command+=" -w /root"
command+=" /usr/bin/env -i"
command+=" HOME=/root"
command+=" PATH=/usr/local/sbin:/usr/local/bin:/bin:/usr/bin:/sbin:/usr/sbin:/usr/games:/usr/local/games"
command+=" TERM=\$TERM"
command+=" LANG=C.UTF-8"
command+=" /bin/bash --login"
com="\$@"
if [ -z "\$1" ]; then
exec \$command
else
\$command -c "\$com"
fi
EOM
echo "fixing shebang of $bin"
termux-fix-shebang $bin
echo "making $bin executable"
chmod +x $bin
echo "removing image for some space"
dialog --backtitle "PAGE REMOVE ..." --msgbox "Remove ${tarball} ?" 8 30
if [ $? -eq 1 ];then
rm ${tarball}
fi
dialog --backtitle "PAGE END ..." --infobox "You can now launch Debian with the ./${bin} script, and run scrpit command (bash GUI.sh) install" 10 30
clear
./start-debian.sh
}
#errall=1
| true |
04870ab9a1f74b76c12196da44f59b2ba07f327a | Shell | vicboma1/bash-utils-arcade | /anyadir-bat-PCLauncherIni.sh | UTF-8 | 557 | 2.84375 | 3 | [] | no_license | #!/bin/bash
for file in $(find . -name "*.iso*" -type f | sed 's/ /_/g')
do
ori=$(echo "${file}" | sed 's/_/ /g')
filename="${ori%.*}"
name=${filename#*./}
_bat=$(echo "${name}.bat")
echo "[${name}]" >> "E:\Hyperspin-1.5\RocketLauncher\Modules\PCLauncher\PCLauncher.ini"
echo "FadeTitle=Dolphin" >> "E:\Hyperspin-1.5\RocketLauncher\Modules\PCLauncher\PCLauncher.ini"
echo "Application=..\..\_Arcade\the-eye.eu\public\rom\Nintendo GameCube\Dolphin-x64\\${_bat}" >> "E:\Hyperspin-1.5\RocketLauncher\Modules\PCLauncher\PCLauncher.ini"
done
| true |
c3e41cfe594af6ec839605e5a924e44f7a04f8fa | Shell | k3zi/STM-Server | /start.sh | UTF-8 | 305 | 2.515625 | 3 | [] | no_license | #!/bin/bash
# declare an array called array and define 3 vales
ports=( "5001" "5002" "5003" "5004" "5005" "5006" "5007" "5008" )
for i in "${ports[@]}"
do
forever start -w --watchDirectory /home/stm/api/ /home/stm/api/prod-server.js production $i
done
forever start node_modules/forever-webui/app.js
| true |
9688b8ae2b96859d614ffa1c81f594edfffd9818 | Shell | AbdelrahimKoura/Hands-On-Generative-AI-with-Python-and-TensorFlow-2 | /Chapter_2/azure.sh | UTF-8 | 1,124 | 3.25 | 3 | [
"MIT"
] | permissive | #!/bin/bash
export AZ_USERNAME="joseph.j.babcock@gmail.com"
export AZ_PASSWD="TapFav53**"
export RESOURCE_GROUP_NAME="KubeTest"
export LOCATION="eastus"
export NAME="KubeTestCluster"
export AGENT_SIZE="Standard_ND6"
export AGENT_COUNT="3"
export KF_NAME="AZKFTest"
export BASE_DIR=/opt
export KF_DIR=${BASE_DIR}/${KF_NAME}
export CONFIG_URI="https://raw.githubusercontent.com/kubeflow/manifests/v1.0-branch/kfdef/kfctl_k8s_istio.yaml"
echo "===== Install Azure CLI ====="
brew install azure-cli
echo "===== Login to Azure ====="
az login -u ${AZ_USERNAME} -p ${AZ_PASSWD}
echo "===== Create Resource Group ====="
az group create -n ${RESOURCE_GROUP_NAME} -l ${LOCATION}
echo "===== Create AKS Cluster in Resource Group ====="
az aks create -g ${RESOURCE_GROUP_NAME} -n ${NAME} -s ${AGENT_SIZE} -c ${AGENT_COUNT} -l ${LOCATION} --generate-ssh-keys
echo "===== Get Azure Credentials ====="
az aks get-credentials --resource-group ${RESOURCE_GROUP_NAME} --name ${NAME}
echo "===== Install Kubeflow on AKS ====="
if [ -d ${KF_DIR} ]; then rm -r -f ${KF_DIR}; fi
mkdir -p ${KF_DIR}
cd ${KF_DIR}
kfctl apply -V -f ${CONFIG_URI}
| true |
13b2f6bd5bb32a17e9170855ba8b57e55e8d40a4 | Shell | pawsong/DICTIONARY | /emphasize.sh | UTF-8 | 1,343 | 4.0625 | 4 | [] | no_license | #!/usr/bin/env bash
ORANGE="<span style='color:#FFCC00; font-weight:bold;'>"
PUPPLE="<span style='color:#A600FF; font-weight:bold;'>"
MINT="<span style='color:#00FFCC; font-weight:bold;'>"
BLUE="<span style='color:#00A6FF; font-weight:bold;'>"
function takePrefix() {
PREFIX=${FILE:0:1}
UPPER_PREFIX=`echo $PREFIX | tr '[:lower:]' '[:upper:]'`
echo $UPPER_PREFIX
}
function takeWithoutPrefix() {
REMAINDER=${FILE:1}
LOWER_REMAINDER=`echo $REMAINDER | tr '[:upper:]' '[:lower:]'`
echo $LOWER_REMAINDER
}
if [ -z ${1} ]; then
echo '[Error] Invalid File Name'
exit 1
fi
FILE=$1
COLOR=$2
PREFIX=$(takePrefix)
REMAINDER=$(takeWithoutPrefix)
TITLE=$PREFIX$REMAINDER
DIRECTORY=./$PREFIX/$TITLE.md
if [ ! -e $DIRECTORY ]; then
echo "[Error] Doesn't Exist File"
exit 1
fi
case $COLOR in
orange)
echo "Emphasize ORANGE at $DIRECTORY"
echo "$ORANGE" >> $DIRECTORY
;;
pupple)
echo "Emphasize PUPPLE at $DIRECTORY"
echo "$PUPPLE" >> $DIRECTORY
;;
mint)
echo "Emphasize MINT at $DIRECTORY"
echo "$MINT" >> $DIRECTORY
;;
blue)
echo "Emphasize MINT at $DIRECTORY"
echo "$BLUE" >> $DIRECTORY
;;
*)
echo "$ORANGE" >> $DIRECTORY
esac
| true |
61b4c1029f179b664b18ff21ad2df33be5c1f8e7 | Shell | paulbdavis/doti3 | /blocks/vpn | UTF-8 | 1,284 | 3.421875 | 3 | [] | no_license | #!/usr/bin/env bash
declare -a vpns
. "$HOME/.config/vpns"
vpn_count=${#vpns[@]}
vpn_name="$(systemctl list-units --plain --state=active | grep openvpn-client@ | awk '{print $1}' | sed -e 's/openvpn-client@//' -e 's/.service//')"
index=0
current_index=-1
while [[ "${index}" -lt "${vpn_count}" ]]
do
if [[ "${vpn_name}" == "${vpns[$index]}" ]]
then
current_index="${index}"
fi
((index++))
done
new_index="${current_index}"
if [[ -n "$BLOCK_BUTTON" ]]
then
case $BLOCK_BUTTON in
1) ((new_index++));;
# 4) ((new_index++));;
# 5) ((new_index--));;
esac
fi
if [[ "${new_index}" -ne "${current_index}" ]]
then
if [[ "${new_index}" -lt 0 ]]
then
let "new_index = ${vpn_count} - 1"
elif [[ "${new_index}" -ge "${vpn_count}" ]]
then
new_index=0
fi
new_vpn="${vpns[$new_index]}"
sudo systemctl stop "openvpn-client@${vpn_name}"
sudo systemctl start "openvpn-client@${new_vpn}"
vpn_name="$(systemctl list-units --plain --state=active | grep openvpn-client@ | awk '{print $1}' | sed -e 's/openvpn-client@//' -e 's/.service//')"
fi
color="#dca3a3"
lock=""
if [[ -n "$vpn_name" ]]
then
color="#7f9f7f"
lock=""
fi
echo $lock $vpn_name
echo
echo $color
| true |
82ddf6f36d4dd308bf9274d199ae0a42ae148bf6 | Shell | Mustaavalkosta/android_build_scripts | /include/snapshot.sh | UTF-8 | 3,133 | 3.640625 | 4 | [] | no_license | #!/bin/bash
# Build script for M release
# ccache variables
#export USE_CCACHE=1
#export CCACHE_DIR=/home/mustaavalkosta/storage/ccache-3.1.9
# rsync retry count
MAX_RETRIES=10
if [ -z "$CM_VERSION" ]
then
echo "CM_VERSION is not set."
exit 0
fi
if [ -z "$RELEASE_NAME" ]
then
echo "RELEASE_NAME is not set."
exit 0
fi
# Android source tree root
SOURCE_ROOT=/home/mustaavalkosta/storage/cm/$CM_VERSION/snapshot
build()
{
if [ -z "$1" ]
then
echo "Insufficient parameters. Usage: $FUNCNAME [device]"
exit 0
fi
# Device
local DEVICE="$1"
# Local dirs on codefi.re server
local PROJECT_DIR="cm-$(echo $CM_VERSION |tr . -)-unofficial-$DEVICE"
# Main output dir
local OUTPUT_DIR="$LOCAL_BASE_DIR/$PROJECT_DIR/snapshots"
# Check if output dirs exist and create them if they don't
if [ ! -d "$OUTPUT_DIR" ]
then
mkdir -p "$OUTPUT_DIR"
fi
if [ ! -d "$OUTPUT_DIR/revisions" ]
then
mkdir -p "$OUTPUT_DIR/revisions"
fi
if [ ! -d "$OUTPUT_DIR/changelogs" ]
then
mkdir -p "$OUTPUT_DIR/changelogs"
fi
# Run build
cd "$SOURCE_ROOT"
repo sync local_manifest --force-sync # update manifest to bring in manifest changes first
repo sync -j8 -d --force-sync
# Check for sync error
if [ $? -ne 0 ]
then
exit 1
fi
REVISION_TIMESTAMP=$(date -u +"%Y-%m-%d %R %Z")
source build/envsetup.sh
lunch cm_$DEVICE-userdebug
make clean
TARGET_UNOFFICIAL_BUILD_ID="$RELEASE_NAME" mka bacon
# Check for build fail
if [ $? -eq 0 ]
then
cp -v "$SOURCE_ROOT"/out/target/product/$DEVICE/cm-$CM_VERSION-*-UNOFFICIAL-$RELEASE_NAME-$DEVICE.zip* "$OUTPUT_DIR"
ZIPNAME=`find $SOURCE_ROOT/out/target/product/$DEVICE/cm-$CM_VERSION-*-UNOFFICIAL-$RELEASE_NAME-$DEVICE.zip -exec basename {} .zip \;`
LAST_REVISIONS=`find $OUTPUT_DIR/revisions -maxdepth 1 -type f | sort | tail -n 1`
if [ ! -z "$LAST_REVISIONS" ]
then
NEW_REVISIONS="$OUTPUT_DIR/revisions/$ZIPNAME.txt"
CHANGELOG="$OUTPUT_DIR/changelogs/$ZIPNAME.changelog"
generate_changelog "$LAST_REVISIONS" "$NEW_REVISIONS" "$CHANGELOG" "$REVISION_TIMESTAMP"
else
NEW_REVISIONS="$OUTPUT_DIR/revisions/$ZIPNAME.txt"
generate_revisions "$NEW_REVISIONS" "$REVISION_TIMESTAMP"
fi
make clean
else
echo "##############################################################"
echo "## BUILD FAILED ##"
echo "##############################################################"
exit 1
fi
# Sync with opendesireproject.org
i=0
false
while [ $? -ne 0 -a $i -lt $MAX_RETRIES ]
do
i=$[$i+1]
rsync -avvru -e ssh --delete --timeout=120 "$LOCAL_BASE_DIR/$PROJECT_DIR" "mustaavalkosta@opendesireproject.org:~/dl.opendesireproject.org/www/"
done
ssh mustaavalkosta@opendesireproject.org 'cd ~/ota-scanner/ && python scanner.py'
# Basketbuild
sync_basketbuild "$LOCAL_BASE_DIR/$PROJECT_DIR/" "/$PROJECT_DIR"
}
| true |
e803c7c4bbdb451983e0d816244a7d9587b7ef71 | Shell | luzi82/codelog.flask | /s58_aws_deploy.sh | UTF-8 | 749 | 2.984375 | 3 | [] | no_license | #!/bin/bash -e
. _env.sh
if [ -z ${STAGE+x} ]; then export STAGE=dev; fi
MY_TMP_DIR_PATH=${PROJECT_ROOT_PATH}/aws.deploy.tmp
rm -rf ${MY_TMP_DIR_PATH}
mkdir -p ${MY_TMP_DIR_PATH}
SERVERLESS=${PROJECT_ROOT_PATH}/aws_env/node_modules/.bin/serverless
${SERVERLESS} --version
. ${PROJECT_ROOT_PATH}/aws_env/venv/bin/activate
cd ${PROJECT_ROOT_PATH}/src
cp ${PROJECT_ROOT_PATH}/src/requirements.txt ${MY_TMP_DIR_PATH}/
cp --parents `find -name \*.py` ${MY_TMP_DIR_PATH}/
cp --parents `find -name \*.tmpl` ${MY_TMP_DIR_PATH}/
cd ${MY_TMP_DIR_PATH}
cp ${PROJECT_ROOT_PATH}/aws/serverless.yml ${MY_TMP_DIR_PATH}/
${SERVERLESS} create_domain --stage ${STAGE}
${SERVERLESS} deploy --stage ${STAGE} -v
cd ${PROJECT_ROOT_PATH}
rm -rf ${MY_TMP_DIR_PATH}
| true |
69eaca506a2920fa43f25885c60613c6a38dbcfe | Shell | levismad/BlenderUrlChecker | /script.sh | UTF-8 | 421 | 3.078125 | 3 | [] | no_license | #!/bin/bash
echo "Bash version ${BASH_VERSION}..."
echo "current: "$(env | grep CURRENT)
currentStr=$(printenv CURRENT)
CURRENT=$(($currentStr + 0))
currentStr=$(printenv TOTAL)
total=$(($currentStr + 0))
default=500
for (( i=$CURRENT; i<=$total; i+= $default ))
do
echo "current: "$CURRENT
CURRENT=$(($default + $i))
cross-env BARRAMENTO=$i,$CURRENT node index.js
export CURRENT
done | true |
93141948d7d76ff279895a06bd971968adf5c732 | Shell | alejandro1395/GenPhenprimates | /Quality_genome_bottle/CDS_sequences/Individuals_create_input_JA.sh | UTF-8 | 534 | 2.71875 | 3 | [] | no_license | #!/bin/bash
module purge
module load gcc/4.9.3-gold
module load PYTHON/3.6.3
#We keep the species names for each one of the primates used for annotation from their path
INDIR=/scratch/devel/lkuderna/PGDP/project_NIST/GENE_VARIANTS/
SRC=/scratch/devel/avalenzu/PhD_EvoGenom/GenomPhenom200primates/src/Quality_genome_bottle/CDS_sequences/
#create variable file
touch Individuals_input_JA.txt
#Loop for human genes
for filepath in $(ls ${INDIR}*/*private_CDS.tsv);
do echo "$filepath">> Individuals_input_JA.txt
echo $filepath
done
| true |
6fa4e232e369260830d96c0895ec399dc62d2b79 | Shell | tdgfederal/automated-analytics | /run.sh | UTF-8 | 1,428 | 3.453125 | 3 | [] | no_license | PROJECT="automated-analytics"
DEFAULT_ACTION="plan"
BUCKET="tdg-automated-analytics"
REGION="us-east-1"
DEFAULT_ENV="dev"
DEFAULT_VARFILE="terraform.tfvars"
PROJECT_DIR="./"
[[ $1 = "" ]] && ACTION=$DEFAULT_ACTION || ACTION="$1"
[[ $2 = "" ]] && ENV=$DEFAULT_ENV || ACTION="$2"
function init {
printf "\e[1;35mRunning Terraform init for $PROJECT $ENV in $REGION with global/$REGION/$VARFILE and $PROJECT_DIR/$ENV/$REGION/$VARFILE\e[0m\n"
terraform init -backend-config="bucket=$BUCKET" -backend-config="key=$REGION-$PROJECT-$ENV.tfstate" \
-backend-config="region=$REGION" -backend=true -get=true -input=false \
-backend-config="encrypt=true" $PROJECT_DIR
}
function plan {
init
printf "\e[1;35mRunning Terraform plan\e[0m\n"
terraform plan
}
function apply {
plan
printf "\e[1;35mRunning Terraform apply\e[0m\n"
terraform apply
}
function destroy {
plan
printf "\e[1;31mRunning Terraform destroy\e[0m\n"
terraform destroy $PROJECT_DIR
}
function usage {
printf "\033[1;31mArgument(s) error\033[0m\n"
echo "usage : ./run.sh <action> <env> <region> "
echo " : Default project is live. Action can be init, plan, apply, graph, or destroy."
echo "example : ./run.sh init dev"
}
case "$ACTION" in
"init") init ;;
"plan") plan ;;
"apply") apply ;;
"destroy") destroy ;;
*) usage ;;
esac | true |
fd8ab6e5ae7f40fb32af8728a073e49c9d877cd2 | Shell | lnd3/im-tools | /imlogin.sh | UTF-8 | 9,400 | 2.859375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
source utils.sh
## clean up
removeFileIfExist cookiefile
removeFileIfExist location
removeFileIfExist location2
removeFileIfExist oauth_token
removeFileIfExist pupilids
removeFileIfExist output.txt
##
username=$1
password=$2
if test -z "$username"
then
read -p "Username: " username
fi
if test -z "$password"
then
read -s -p "Password: " password
fi
## Fetch initial redirect first
curl -X GET 'https://hub.infomentor.se' -b cookiefile -c cookiefile -i > output0.txt
cat output0.txt | dos2unix | grep 'Location:'| cut -d' ' -f2 > location
location="https://hub.infomentor.se"$(cat location)
echo "********************************************"
echo " > Extracted location: "$location
echo "********************************************"
## Follow initial redirect
curl -X GET $location -b cookiefile -c cookiefile -i\
| grep -oP "(?<=(oauth_token\" value=\"))[\w+=/]+" > oauth_token
oauth_token=$(cat oauth_token)
echo "********************************************"
echo " > Extracted oauth token: "$oauth_token
echo "********************************************"
## Build auth structure
jsonblob=$(cat data.json)
jsonblob1=${jsonblob/username/$username}
jsonblob2=${jsonblob1/password/$password}
## Initial login page - set the cookies [ASP.NET_SessionId, BIGipServerinfomentor]
curl -X POST 'https://infomentor.se/swedish/production/mentor/' \
-H 'Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3' \
-H 'Accept-Encoding: deflate, br' \
-H 'Accept-Language: sv-SE,sv;q=0.9,en-US;q=0.8,en;q=0.7' \
-H 'Cache-Control: no-cache' \
-H 'Connection: keep-alive' \
-H 'Content-Type: application/x-www-form-urlencoded' \
-H 'Origin: https://hub.infomentor.se' \
-H 'Pragma: no-cache' \
-H 'Referer: https://hub.infomentor.se/Authentication/Authentication/Login?ReturnUrl=%2F' \
-H 'Sec-Fetch-Mode: navigate' \
-H 'Sec-Fetch-Site: same-site' \
-H 'Upgrade-Insecure-Requests: 1' \
-H 'User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.120 Safari/537.36' \
-H 'cache-control: no-cache' \
-d oauth_token=$oauth_token \
-b cookiefile -c cookiefile -o output1.txt
######################################
## Send credentials - sets the cookies [.ASPXAUTH, NotandaUppl]
curl -X POST 'https://infomentor.se/swedish/production/mentor/' \
-H 'Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3' \
-H 'Content-Type: application/x-www-form-urlencoded' \
-H 'Origin: https://infomentor.se' \
-H 'Sec-Fetch-Mode: navigate' \
-H 'Sec-Fetch-User: ?1' \
-H 'Upgrade-Insecure-Requests: 1' \
-H 'User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.120 Safari/537.36' \
-H 'cache-control: no-cache' \
--data-ascii $jsonblob2 \
-b cookiefile -c cookiefile -o output2.txt
## Enable pin page - sets the cookies [984527]
curl -X GET 'https://infomentor.se/Swedish/Production/mentor/Oryggi/PinLogin/EnablePin.aspx' \
-H 'Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3' \
-H 'Accept-Encoding: deflate, br' \
-H 'Accept-Language: sv-SE,sv;q=0.9,en-US;q=0.8,en;q=0.7' \
-H 'Cache-Control: no-cache' \
-H 'Connection: keep-alive' \
-H 'Pragma: no-cache' \
-H 'Referer: https://infomentor.se/swedish/production/mentor/' \
-H 'Sec-Fetch-Mode: navigate' \
-H 'Sec-Fetch-Site: same-origin' \
-H 'Sec-Fetch-User: ?1' \
-H 'Upgrade-Insecure-Requests: 1' \
-H 'User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.120 Safari/537.36' \
-H 'cache-control: no-cache' \
-b cookiefile -c cookiefile -o output3.txt
######################################
## Send dont activate pin
curl -X POST 'https://infomentor.se/Swedish/Production/mentor/Oryggi/PinLogin/EnablePin.aspx' \
-H 'Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3' \
-H 'Accept-Encoding: deflate, br' \
-H 'Accept-Language: sv-SE,sv;q=0.9,en-US;q=0.8,en;q=0.7' \
-H 'Cache-Control: no-cache' \
-H 'Connection: keep-alive' \
-H 'Content-Type: application/x-www-form-urlencoded' \
-H 'Origin: https://infomentor.se' \
-H 'Pragma: no-cache' \
-H 'Referer: https://infomentor.se/Swedish/Production/mentor/Oryggi/PinLogin/EnablePin.aspx' \
-H 'Sec-Fetch-Mode: navigate' \
-H 'Sec-Fetch-Site: same-origin' \
-H 'Sec-Fetch-User: ?1' \
-H 'Upgrade-Insecure-Requests: 1' \
-H 'User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.120 Safari/537.36' \
-H 'cache-control: no-cache' \
-d '__EVENTTARGET=aDontActivatePin&__EVENTARGUMENT=&__VIEWSTATE=%2FwEPDwULLTExNjgzNDAwMjdkZEPHrLmSUp3IKh%2FYk4WyEHsBQdMx&__VIEWSTATEGENERATOR=7189AD5F&__EVENTVALIDATION=%2FwEdAANT4hIcRyCqQMJVzIysT0grY9gRTC512bYsbnJ8gQeUrlnllTXttyQbAlgyFMdw9va%2BKdVQbZxLkS3XlIJc4f5qeOcV0g%3D%3D' \
-b cookiefile -c cookiefile -o output4.txt
## login
curl -X GET 'https://hub.infomentor.se/authentication/authentication/login?apitype=im1&forceOAuth=true' \
-H 'Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3' \
-H 'Accept-Encoding: deflate, br' \
-H 'Accept-Language: sv-SE,sv;q=0.9,en-US;q=0.8,en;q=0.7' \
-H 'Cache-Control: no-cache' \
-H 'Connection: keep-alive' \
-H 'Pragma: no-cache' \
-H 'Referer: https://infomentor.se/Swedish/Production/mentor/Oryggi/PinLogin/EnablePin.aspx' \
-H 'Sec-Fetch-Mode: navigate' \
-H 'Sec-Fetch-Site: same-site' \
-H 'Sec-Fetch-User: ?1' \
-H 'Upgrade-Insecure-Requests: 1' \
-H 'User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.120 Safari/537.36' \
-H 'cache-control: no-cache' \
-b cookiefile -c cookiefile -i -o output5.txt
## mentor - redirecting to login page so auth was unsuccessful previously
curl -X POST 'https://infomentor.se/swedish/production/mentor/' \
-H 'Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3' \
-H 'Accept-Encoding: deflate, br' \
-H 'Accept-Language: sv-SE,sv;q=0.9,en-US;q=0.8,en;q=0.7' \
-H 'Cache-Control: no-cache' \
-H 'Connection: keep-alive' \
-H 'Content-Type: application/x-www-form-urlencoded' \
-H 'Origin: https://hub.infomentor.se' \
-H 'Pragma: no-cache' \
-H 'Referer: https://hub.infomentor.se/authentication/authentication/login?apitype=im1&forceOAuth=true' \
-H 'Sec-Fetch-Mode: navigate' \
-H 'Sec-Fetch-Site: same-site' \
-H 'Sec-Fetch-User: ?1' \
-H 'Upgrade-Insecure-Requests: 1' \
-H 'User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.120 Safari/537.36' \
-H 'cache-control: no-cache' \
-d oauth_token=$oauth_token \
-b cookiefile -c cookiefile -s -i \
-o output6.txt
cat output6.txt | dos2unix | grep 'Location:'| cut -d' ' -f2 > location2
location2=$(cat location2)
echo "********************************************"
echo " > Extracted redirect location: "$location2
echo "********************************************"
if test -z "$location2"
then
echo " > Failed to extract location, exiting..."
exit
fi
## login callback - using the 'location' in the previous response
curl -X GET $(cat location2) \
-H 'Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3' \
-H 'Accept-Encoding: deflate, br' \
-H 'Accept-Language: sv-SE,sv;q=0.9,en-US;q=0.8,en;q=0.7' \
-H 'Cache-Control: no-cache' \
-H 'Connection: keep-alive' \
-H 'Pragma: no-cache' \
-H 'Referer: https://hub.infomentor.se/authentication/authentication/login?apitype=im1&forceOAuth=true' \
-H 'Sec-Fetch-Mode: navigate' \
-H 'Sec-Fetch-Site: same-origin' \
-H 'Sec-Fetch-User: ?1' \
-H 'Upgrade-Insecure-Requests: 1' \
-H 'User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.120 Safari/537.36' \
-H 'cache-control: no-cache' \
-b cookiefile -c cookiefile -o output7.txt
## get pupil links so we can extract pupil id's
curl -X GET \
"https://hub.infomentor.se/#/" \
-H 'Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3' \
-H 'Accept-Encoding: gzip, deflate, br' \
-H 'Accept-Language: sv-SE,sv;q=0.9,en-US;q=0.8,en;q=0.7' \
-H 'Cache-Control: no-cache' \
-H 'Connection: keep-alive' \
-H 'Pragma: no-cache' \
-H 'Referer: https://hub.infomentor.se/authentication/authentication/login?apitype=im1&forceOAuth=true' \
-H 'Sec-Fetch-Mode: navigate' \
-H 'Sec-Fetch-Site: same-origin' \
-H 'Sec-Fetch-User: ?1' \
-H 'Upgrade-Insecure-Requests: 1' \
-H 'User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.120 Safari/537.36' \
-H 'cache-control: no-cache' \
-b cookiefile -c cookiefile \
| gunzip - \
| grep -oP "(?<=(/Account/PupilSwitcher/SwitchPupil/))[0-9]*" \
| sort \
| uniq > pupilids
| true |
c6df12b96bfc389566a3e699b62e961655e626c0 | Shell | josealves94/script_shell_post_install | /RUN_list.sh | UTF-8 | 788 | 3.125 | 3 | [] | no_license | #!/bin/bash
### LIST DES SERVEURS ###
#########################
LIST_SERVERS=$(cat LIST_SSH_Dlebre1_YES.txt)
### SCRIPT A POUSSER ###
########################
FILE_BDD="bdd_serv.txt"
#export FILE_BDD="bdd_serv.txt"
echo '"NOM DU SERVEUR";"NOM DE LA BASE";"SGBD (TYPE ET VERSION)"'
for server in $LIST_SERVERS
do
#echo $server
#echo $server >> $FILE_BDD
#export server="$server"
scp -i /root/.ssh/id_dsa_gs -q -o "BatchMode=yes" audit_bdd.sh $server:/root
scp -i /root/.ssh/id_dsa_gs -q -o "BatchMode=yes" $FILE_BDD $server:/root
ssh -i /root/.ssh/id_dsa_gs $server /root/audit_bdd.sh
ssh -i /root/.ssh/id_dsa_gs $server rm -f /root/audit_bdd.sh
#echo ===============
#echo =============== >> $FILE_BDD
done
| true |
fb9d41b3fd1c694871abffeee6ad2a55fe99c965 | Shell | kurisu/dotfiles | /.bash.d/iterm.sh | UTF-8 | 784 | 3.453125 | 3 | [] | no_license | # $1 = type; 0 - both, 1 - tab, 2 - title
# rest = text
setTerminalText () {
# echo works in bash & zsh
local mode=$1 ; shift
echo -ne "\033]$mode;$@\007"
}
newTerminalWindow () {
osascript &>/dev/null <<EOF
tell application "iTerm"
create window with default profile
end tell
EOF
}
stt_both () { setTerminalText 0 $@; }
stt_tab () { setTerminalText 1 $@; }
stt_title () { setTerminalText 2 $@; }
function jcurl() {
curl "$@" | json | pygmentize -l json
}
export -f jcurl
function jcat() {
cat "$@" | json | pygmentize -l json
}
export -f jcat
function jcurlauth() {
curl -H "Accept: application/json" -H "Content-Type: application/json" -H "X-User-Email: $1" -H "X-User-Token: $2" ${@:3} | json | pygmentize -l json
}
export -f jcurlauth
| true |
9c259f71c415ef6b6b3c3dbb2cd229c7b4d8251b | Shell | auskim/dotfiles | /sources/bashrc | UTF-8 | 1,105 | 3.359375 | 3 | [] | no_license | #!/bin/bash
############
# Setup
############
# Use vim key bindings in bash
set -o vi
############
# Source
############
source <(cat $HOME/.shell/*)
source <(cat $DOT_DIR/bash/*.bash)
############
# Variables
############
# Add data and time to history
export HISTTIMEFORMAT="%d/%m/%y %T "
# Ignore duplicates and commands that start with whitespace
export HISTCONTROL=ignoreboth
# Set history file size
export HISTSIZE=100
# Add color matches to grep by default
export GREP_OPTIONS="--color=auto"
#TODO: figure out termcap color options
# Add color options to less
#export LESS="--RAW-CONTROL-CHARS"
#[[ -f ~/.LESS_TERMCAP ]] && . ~/.LESS_TERMCAP
############
# History
############
# Review commands with history expansion before executing
shopt -s histverify
# Append to the history file instead of overwriting it
shopt -s histappend
# Allow editing of failed substitutions
shopt -s histreedit
# Set history file location
export HISTFILE=$HOME/.shell/.bash_history
##########
# Import custom bashrc
##########
if [ -f $HOME/.bashrc_original ]; then
source $HOME/.bashrc_original
fi
| true |
779e022e1c2abfa4ff43aecf6e83050576ec15b0 | Shell | knighton/fontasy | /fontasy/pre/preproc.sh | UTF-8 | 5,125 | 3.765625 | 4 | [] | no_license | #!/bin/sh
#
# Font dataset preprocessing pipeline.
#
# 1. Find fonts
# 2. Dedupe fonts
# 3. Visualize font properties
# 4. Filter fonts
# 5. List characters.
# 6. Visualize character frequencies.
# 7. Calculate heights.
# 8. Visualize heights.
# 9. Make dataset.
# 10. Visualize dataset.
# 11. Split dataset.
PY=python3
TTF_ROOT=~/dataset/fonts/
PROC_ROOT=data/pre/
FONT_PROP=regular
CHRS=33..127
MIN_FONT_SIZE=1
MAX_FONT_SIZE=64
FONT_SIZE=40
IMG_HEIGHT=64
MAX_ASCENT=46
MAX_DESCENT=18
IMG_WIDTH=48
MIN_FONT_OK_FRAC=0.8
VAL_FRAC=0.1
rm -rf $PROC_ROOT
mkdir -p $PROC_ROOT
on_begin() {
T0=`date +%s.%N`
}
on_end() {
T1=`date +%s.%N`
$PY -c "print('[%8.3f sec] %s' % ($T1 - $T0, '$1'))"
}
# 1. Find fonts.
#
# Traverse the given directory tree and list all font files found, with their
# hashes and font names. Results in a JSONL file with entries:
#
# {
# file: str,
# size: int,
# sha256: str,
# name: str,
# }
on_begin
$PY -m fontasy.pre.find_fonts \
--in $TTF_ROOT \
--out $PROC_ROOT/found_fonts.jsonl
on_end "1. Find fonts"
# 2. Dedupe fonts.
#
# Drop the fonts with hash or name collisions, resulting in a new list (same
# format as previous step).
on_begin
$PY -m fontasy.pre.dedupe_fonts \
--in $PROC_ROOT/found_fonts.jsonl \
--out $PROC_ROOT/deduped_fonts.jsonl \
--out_hash2files $PROC_ROOT/dedupe_hash2files.jsonl \
--out_name2files $PROC_ROOT/dedupe_name2files.jsonl
on_end "2. Dedupe fonts"
# 3. Visualize font properties.
#
# Collect and dump some statistics about the properties of the fonts we have.
#
# Probably want to review and drop unusual properties for distribution sanity.
on_begin
$PY -m fontasy.pre.vis_font_props \
--in $PROC_ROOT/deduped_fonts.jsonl \
--out $PROC_ROOT/font_properties.txt
on_end "3. Visualize font properties"
# 4. Filter fonts.
#
# Just drop all non-regular fonts, resulting in a new list (same format).
on_begin
$PY -m fontasy.pre.filter_fonts \
--in $PROC_ROOT/deduped_fonts.jsonl \
--font_prop $FONT_PROP \
--out $PROC_ROOT/fonts.jsonl
on_end "4. Restrict to regular fonts"
echo
echo Fonts:
F=$PROC_ROOT/found_fonts.jsonl
N=`cat $F | wc -l`
echo - Found $N \($F\)
F=$PROC_ROOT/deduped_fonts.jsonl
N=`cat $F | wc -l`
echo - Deduped $N \($F\)
F=$PROC_ROOT/fonts.jsonl
N=`cat $F | wc -l`
echo - Filtered $N \($F\)
echo
# 5. List characters.
#
# Get the list of supported code points for each font.
on_begin
$PY -m fontasy.pre.list_chars \
--in $PROC_ROOT/fonts.jsonl \
--out $PROC_ROOT/chars.jsonl
on_end "5. List characters per font"
# 6. Visualize character frequencies.
#
# Show the distribution of supported characters in the selected fonts, for
# deciding what broadly-available characters to include in dataset.
on_begin
$PY -m fontasy.pre.vis_char_freqs \
--in $PROC_ROOT/chars.jsonl \
--out_by_char $PROC_ROOT/char_freqs_by_char.jsonl \
--out_by_freq $PROC_ROOT/char_freqs_by_freq.jsonl \
--out_table $PROC_ROOT/char_freqs_table.txt
on_end "6. Visualize characters per font (to decide characters to use)"
# 7. Calculate heights.
#
# Calculate the heights (ascent and descent) for every font for every sane font
# size. This is used to decide ideal font size to use (trading off font size
# and font coverage).
on_begin
$PY -m fontasy.pre.calc_heights \
--in $PROC_ROOT/fonts.jsonl \
--min_font_size $MIN_FONT_SIZE \
--max_font_size $MAX_FONT_SIZE \
--out $PROC_ROOT/heights.i16
on_end "7. Get heights for every font size"
# 8. Visualize heights.
#
# Display visualizations to help determine the optimal font size to use.
on_begin
$PY -m fontasy.pre.vis_heights \
--in $PROC_ROOT/heights.i16 \
--min_font_size $MIN_FONT_SIZE \
--max_font_size $MAX_FONT_SIZE \
--img_height $IMG_HEIGHT \
--out_coverage $PROC_ROOT/heights_coverage.txt \
--out_best $PROC_ROOT/heights_best.csv
on_end "8. Visualize font heights (to decide font size to use)"
# 9. Make dataset.
#
# Draw the glyphs to a binary table.
on_begin
$PY -m fontasy.pre.make_dataset \
--in $PROC_ROOT/fonts.jsonl \
--chars $CHRS \
--font_size $FONT_SIZE \
--max_ascent $MAX_ASCENT \
--max_descent $MAX_DESCENT \
--img_width $IMG_WIDTH \
--min_font_ok_frac $MIN_FONT_OK_FRAC \
--out $PROC_ROOT/dataset/
on_end "9. Make dataset (fonts x chars)"
# 10. Visualize dataset.
#
# Show important distribution information about the dataset created.
on_begin
$PY -m fontasy.pre.vis_dataset \
--in $PROC_ROOT/dataset/ \
--out_font_freqs $PROC_ROOT/dataset_font_freqs.png \
--out_char_freqs $PROC_ROOT/dataset_char_freqs.png \
--out_char_table $PROC_ROOT/dataset_char_table.txt \
--out_heatmap_txt $PROC_ROOT/dataset_heatmap.txt \
--out_heatmap_img $PROC_ROOT/dataset_heatmap.png \
--out_heatmap_img_log10 $PROC_ROOT/dataset_heatmap_log10.png
on_end "10. Analyze dataset distributions"
# 11. Split dataset.
#
# Divide samples into training and validation splits.
on_begin
$PY -m fontasy.pre.split_dataset \
--dataset $PROC_ROOT/dataset/ \
--val_frac $VAL_FRAC
on_end "11. Create dataset splits"
| true |
97cfd3debeb6a74ee693206c398cc683080b2c3c | Shell | amitsagargowda/LinuxSystemProgramming | /Shell/Bank.sh | UTF-8 | 1,921 | 4.375 | 4 | [] | no_license | #! /bin/bash
###############################################################################
# Student ID :
#
# Student Name :
#
# File Name : debit.sh
#
# This script should take two arguments. A filename that points to the account
# balance, and a number that indicates the debit amount. The script should
# reduce the account balance by the debit amount, and save the result.
###############################################################################
BALANCEFILE=$1
DEBITAMT=0
main()
{
#shift filename it is not required
shift
#while loop till all number are read and debited
while [ -n "$1" ]; do
DEBITAMT=$1
#check if balance if valid number
#if valid perform debit operation
if echo "$DEBITAMT" | grep -qE '^[0-9]+$'; then
read balance < $BALANCEFILE
echo "Debiting Amount : \$$DEBITAMT"
newbalance=$((balance - DEBITAMT))
#check if sufficient balance present to debit
#if not throw error and exit
if [ $newbalance -lt 0 ]
then
printf "Insufficient Balance \n"
exit 1
fi
echo $newbalance > $BALANCEFILE
else
echo "Desposit amount is not a valid number"
fi
#shift to next number
shift
done
}
validatefile()
{
# Balance file is required for all operation
# check for it and throw error
if [ ! -f $BALANCEFILE ]
then
printf "$BALANCEFILE file missing\n"
printf "Balance file is required for debit\n"
exit 1
fi
read balance < $BALANCEFILE
#check if balance in balance file is valide
if echo "$balance" | grep -qE '^[0-9]+$'; then
printf ""
else
echo "Account Balance in file is a not a valid number"
exit 1
fi
}
#check if valid number of arguments are passed
if [ $# -lt 2 ]
then
printf "Usage: \n"
printf "\t ./debit.sh <balance file> <desposit amout>\n"
printf "\t Example: ./debit.sh balance.txt 100\n"
exit 1
fi
#validate balance file content
validatefile
#first entry main
main $@
| true |
9d0e49af4a35f097af8a52abe1306b92b646dbf1 | Shell | wearecollins/MMI-Prototypes | /prototypes/_web/MMI_Performance/v3/cvt.sh | UTF-8 | 1,399 | 3.453125 | 3 | [] | no_license | #!/bin/bash
#create videos file
# $1 = dir $2 = name $3 = original file
TIME=4
TOTAL=15
cd $1
# for i in `seq 0 1`;
# do
# T=$(($i*$TIME))
# T2=$((($i+1)*$TIME))
# B=$(($i % 2))
# echo "$2 00:00:$T 00:00:$T2 $T $B" >> videos
# done
mkdir tmp
# split video
# while read vidtime to num bottom; do
# # while read -r video vidtime to num bottom; do
# B=$([ $bottom == 0 ] && echo '0' || echo 'ih/2')
# # file=tmp/$num.webm
# # file=$(printf '%q' "tmp/$num.webm")
# ffmpeg -loglevel 24 -ss $vidtime -i $2.webm -to $to -vf "crop=iw:ih/2:0:$B" tmp/$num.webm;
# done < videos.txt;
ffmpeg -loglevel 24 -ss 00:00:00 -i $2.webm -to 00:00:03 -speed 8 -vf "crop=iw/2:ih/2:0:0" tmp/1.webm;
ffmpeg -loglevel 24 -ss 00:00:03 -i $2.webm -to 00:00:06 -speed 8 -vf "crop=iw/2:ih/2:0:ih/2" tmp/2.webm;
ffmpeg -loglevel 24 -ss 00:00:06 -i $2.webm -to 00:00:09 -speed 8 -vf "crop=iw/2:ih/2:0:0" tmp/3.webm;
ffmpeg -loglevel 24 -ss 00:00:09 -i $2.webm -to 00:00:12 -speed 8 -vf "crop=iw/2:ih/2:0:ih/2" tmp/4.webm;
# ffmpeg -loglevel 24 -ss 00:00:12 -i $2.webm -to 00:00:15 -speed 8 -vf "crop=iw:ih/2:0:0" tmp/5.webm;
# bring it all back 2gether
ffmpeg -loglevel 24 -f concat -i <(for f in tmp/*.webm; do echo "file '$PWD/$f'"; done) $2_cut.webm
echo $3
# add in audio
ffmpeg -loglevel 24 -i $2_cut.webm -i $3 -c copy -map 0:0 -map 1:1 -shortest $2_merged.webm
# # kill everything
# rm videos
rm -r tmp | true |
65889bb9eed7b0166c9858cc563d60bf64a10645 | Shell | gizemuyar/OperatingSystemsLabProjects | /project1.sh | UTF-8 | 667 | 3.875 | 4 | [] | no_license | #!/bin/bash
filename=$1;
if [ -f $filename ]; then
if [ $# -eq 2 ]; then
if [ $2 = "-h" ]; then
printf "$filename";
awk 'BEGIN{FS=""}{for(i=1;i<=NF;i++)count++}END{print " contains "count" letters."}' $filename;
elif [ $2 = "-k" ]; then
printf "$filename";
awk 'BEGIN{FS=" "}{for(i=1;i<=NF;i++)count++}END{print " contains "count" words."}' $filename;
elif [ $2 = "-s" ]; then
printf "$filename";
awk 'BEGIN{FS=FNR}{for(i=1;i<=NF;i++)count++}END{print " contains "count" lines."}' $filename;
else
echo "YOU DID NOT ENTERED A VALID ARGUMENT"
fi
else
echo "THE NUMBER OF ARGUMENTS IS MISSING !"
fi
else
echo "$filename IS NOT EXISTS !"
fi
| true |
00e6bf4d8e28973afdc53bb0aaf393b0eba1b80a | Shell | barnabycolby/barnabycolby.io | /nginx/build.sh | UTF-8 | 1,264 | 4.90625 | 5 | [] | no_license | #!/bin/bash
# This script replaces {{top}} instances in Nginx config files with the first script argument, providing variable-like functionality.
# The generated files are put into a built directory, leaving the originals in tact.
# Exit on failure
set -e
# Check that the script has been called with the first argument
# %/ removes the trailing slash if it exists
TOP="${1%/}"
if [ -z "${TOP}" ]; then
echo "The first argument to this script should be the value of the {{top}} variable."
exit 1
fi
# Check that the script has been called with the second argument
# %/ removes the trailing slash if it exists
NGINX_DIR="${2%/}"
if [ -z "${NGINX_DIR}" ]; then
echo "The second argument to this script should be the path of a directory containing nginx files."
exit 2
fi
# Escape the given argument so that we can pass it to sed
escaped_top_value="$(echo "${TOP}" | sed -e 's/[\/&]/\\&/g')"
# Remake the built directory
nginx_built="${NGINX_DIR}/built"
if [ -d "${nginx_built}" ]; then
rm -r ${nginx_built}
fi
mkdir ${nginx_built}
for nginx_file_path in ${NGINX_DIR}/*.nginx; do
base_nginx_file_path="${nginx_file_path##*/}"
sed -- "s/{{top}}/${escaped_top_value}/g" ${nginx_file_path} > ${nginx_built}/${base_nginx_file_path}
done
| true |
832f547fb65c70423a85200e30978529c22656fd | Shell | shiitake/debian-rice | /polybar/click-handlers/ssid-click.sh | UTF-8 | 374 | 2.734375 | 3 | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | #!/usr/bin/env bash
Title="Access Point"
Body=""
NETSTAT="netstat -i | tr -s ' ' | grep wlp3s0 | cut -d' ' -f "
PROC_NET_DEV="cat /proc/dev/net | tr -s ' ' | grep wlp3s0 | cut -d' ' -f "
Body+="Identifier: ``"
Body+="IP Address:"
Body+="Physical Address:"
Body+="Encryption Type:"
Body+="Frequency: ``"
Body+="MTU: `eval $NETSTAT 2`"
notify-send -u low "$Title" "$Body"
| true |
8191b38588051452133b39b533fd4011132f6aff | Shell | tom-010/good-tests | /deploy_tools/add_ssl_certificate.sh | UTF-8 | 296 | 3 | 3 | [] | no_license | source .env
num=$(echo "${SITENAME}" | awk -F. '{print NF-1}')
if [ "${num}" -gt 1 ] # it is subdomain with more than one dot
then
sudo certbot --nginx -d $SITENAME
else
sudo certbot --nginx -d $SITENAME -d "www.${SITENAME}"
fi
sudo systemctl daemon-reload
sudo systemctl reload nginx
| true |
26c3a0b311a0e44332762da79447d35a2eb66a54 | Shell | lukaspirpamer/FSL-scripts | /rsc/scripts/fs_label2vol.sh | UTF-8 | 2,138 | 3.953125 | 4 | [] | no_license | #!/bin/bash
# Converts Freesurfer label files to NIFTI.
# Written by Andreas Heckel
# University of Heidelberg
# heckelandreas@googlemail.com
# https://github.com/ahheckel
# 07/03/2014
trap 'echo "$0 : An ERROR has occured."' ERR
set -e
Usage() {
echo ""
echo "Usage: `basename $0` <SUBJECTS_DIR> <source-subject> <hemi> <template> <opts|none > <output-dir> <FS-label1 FS-label2 ... >"
echo "Example: `basename $0` ./FS_subj subj01 lh ./FS_sess/subj01/bold/001/ftemplate.nii \"--proj frac 0 1 0.1 --fillthresh 0.3 --reg ./FS_sess/subj01/bold/register.dat\" ./nifti-mni-labels one.label two.label three.label"
echo ""
echo "NOTE: If set to \"none\" <opts> defaults to \"--proj frac 0 1 0.1 --fillthresh 0.5 --identity\""
exit 1
}
[ "$7" = "" ] && Usage
# define vars
sdir="$1" ; shift
src="$1" ; shift
hemi="$1" ; shift
templ="$1" ; shift
opts="$1" ; shift ; if [ x"$opts" = "xnone" ] ; then opts="--fillthresh 0.5 --proj frac 0 1 0.1 --identity" ; fi
outdir="$1" ; shift
labels="" ; while [ _$1 != _ ] ; do
labels="$labels $1"
shift
done
# checks
err=0
if [ ! -d $sdir/$src ] ; then echo "`basename $0` : ERROR: '$sdir/$src' not found..." ; err=1 ; fi
if [ ! -d $sdir/$trg ] ; then echo "`basename $0` : ERROR: '$sdir/$trg' not found..." ; err=1 ; fi
for label in $labels ; do
if [ ! -f $label ] ; then echo "`basename $0` : ERROR: '$label' not found..." ; err=1 ; fi
done
if [ $err -eq 1 ] ; then exit 1 ; fi
label=""
# define exit trap
trap "rm -f $outdir/file.$$ ; exit" EXIT
# create outdir
mkdir -p $outdir
# execute
rm -f $outdir/$(basename $0).cmd
echo "`basename $0` : creating commands in '$outdir/$(basename $0).cmd'..."
for label in $labels ; do
# check
touch $outdir/file.$$ ; if [ -f $(dirname $label)/file.$$ ] ; then echo "`basename $0` : ERROR: input dir. and output dir. are identical - exiting..." ; exit 1 ; fi
# execute
cmd="SUBJECTS_DIR=$sdir ; mri_label2vol --label $label --hemi $hemi --subject $src --temp $templ --o $outdir/$(basename $label).nii.gz $opts"
echo " $cmd" | tee -a $outdir/$(basename $0).cmd
done
. $outdir/$(basename $0).cmd
# done
echo "`basename $0` : done."
| true |
d56c2103945024009b6957753e8ab3c6ee88545b | Shell | wcsyzj/ljohn_ops | /lnmp/lnmp.sh | UTF-8 | 5,241 | 3 | 3 | [] | no_license | !/bin/bash
#
nginx() {
echo "安装前环境准备..."
sleep 3
yum remove nginx mysql mariadb php -y
rpm -ivh http://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm
yum groupinstall "Development Tools" "Development Libraries" -y
yum install openssl-devel \
ncurses-devel \
cmake \
pcre-devel \
libxml2-devel \
bzip2-devel \
libcurl-devel \
libmcrypt-devel -y
iptables -F
systemctl stop firewalld.service
systemctl disable firewalld.service
setenforce 0
sed -i '/^SELINUX\>/d' /etc/selinux/config
echo "SELINUX=disabled" >>/etc/selinux/config
echo "Install nginx..."
sleep 3
#编译安装nginx
id nginx &>/dev/null && userdel -r nginx
groupdel nginx
groupadd -r nginx
useradd -r -g nginx nginx
tar xf $PWD/nginx-1.10.3.tar.gz
cd nginx-1.10.3
./configure \
--prefix=$dir/nginx \
--sbin-path=$dir/nginx/sbin/nginx \
--conf-path=/etc/nginx/nginx.conf \
--error-log-path=/var/log/nginx/error.log \
--http-log-path=/var/log/nginx/access.log \
--pid-path=/var/run/nginx/nginx.pid \
--lock-path=/var/lock/nginx.lock \
--user=nginx \
--group=nginx \
--with-http_ssl_module \
--with-http_flv_module \
--with-http_stub_status_module \
--with-http_gzip_static_module \
--http-client-body-temp-path=/var/tmp/nginx/client/ \
--http-proxy-temp-path=/var/tmp/nginx/proxy/ \
--http-fastcgi-temp-path=/var/tmp/nginx/fcgi/ \
--http-uwsgi-temp-path=/var/tmp/nginx/uwsgi \
--http-scgi-temp-path=/var/tmp/nginx/scgi \
--with-pcre
make && make install
mkdir -p /var/tmp/nginx/client
#添加对php的支持
sed -i '65,71s/^[[:space:]]\+#//g' /etc/nginx/nginx.conf
sed -i '45s/index.html/index.php index.html/g' /etc/nginx/nginx.conf
echo "fastcgi_param SCRIPT_FILENAME \$document_root\$fastcgi_script_name;" >> /etc/nginx/fastcgi_params
#添加环境变量
echo "export PATH=$dir/nginx/sbin:$PATH" >/etc/profile.d/nginx.sh
source /etc/profile
nginx
#Nginx测试
if curl 127.0.0.1 &>/dev/null;then
echo "Nginx is SUCCESS!"
else
echo "Nginx is Failure!"
fi
}
mysql () {
echo "Install MySQL..."
sleep 3
#编译安装MySQL
id mysql &>/dev/null && userdel -r mysql
groupadd -r mysql
useradd -g mysql -r -s /sbin/nologin -M -d /mydata/data mysql
tar xf $PWD/mysql-5.5.58.tar.gz
cd $PWD/mysql-5.5.58
cmake \
-DCMAKE_INSTALL_PREFIX=$dir/mysql \
-DMYSQL_DATADIR=/mydata/data \
-DSYSCONFDIR=/etc \
-DWITH_MYISAM_STORAGE_ENGINE=1 \
-DWITH_INNOBASE_STORAGE_ENGINE=1 \
-DWITH_MEMORY_STORAGE_ENGINE=1 \
-DWITH_READLINE=1 \
-DMYSQL_UNIX_ADDR=/var/lib/mysql/mysql.sock \
-DMYSQL_TCP_PORT=3306 \
-DENABLED_LOCAL_INFILE=1 \
-DWITH_PARTITION_STORAGE_ENGINE=1 \
-DEXTRA_CHARSETS=all \
-DDEFAULT_CHARSET=utf8 \
-DDEFAULT_COLLATION=utf8_general_ci \
-DMYSQL_USER=mysql \
-DWITH_DEBUG=0 \
-DWITH_SSL=system
make && make install
#初始化数据库
cd $dir/mysql
chown -R root.mysql ./*
[ ! -d /mydata/data ] && mkdir -p /mydata/data
scripts/mysql_install_db --user=mysql --datadir=/mydata/data/
#修改MySQL参数文件
/usr/bin/cp support-files/my-large.cnf /etc/my.cnf
sed -i '/\[mysqld\]/a datadir= /mydata/data\ninnodb_file_per_table= ON\nskip_name_resolve= ON' /etc/my.cnf
#生成MySQL启动文件
/usr/bin/cp support-files/mysql.server /etc/rc.d/init.d/mysqld
ln -s $dir/mysql/include/ /usr/include/mysql
echo "$dir/mysql/lib/" >/etc/ld.so.conf.d/mysql.conf
ldconfig
#添加MySQL环境变量
echo "export PATH=$dir/mysql/bin:$PATH" >/etc/profile.d/mysql.sh
source /etc/profile
#启动MySQL
/etc/init.d/mysqld start
}
php () {
echo "Install PHP..."
sleep 3
tar xf $PWD/php-5.6.32.tar.bz2
cd $PWD/php-5.6.32
#打补丁,解决编译安装过程中的报错
curl -o php-5.x.x.patch https://mail.gnome.org/archives/xml/2012-August/txtbgxGXAvz4N.txt
patch -p0 -b < ./php-5.x.x.patch
./configure --prefix=$dir/php \
--with-mysql=$dir/mysql \
--with-openssl \
--enable-fpm \
--enable-sockets \
--enable-sysvshm \
--with-mysqli=$dir/mysql/bin/mysql_config \
--enable-mbstring \
--with-freetype-dir \
--with-jpeg-dir \
--with-png-dir \
--with-zlib-dir \
--with-libxml-dir=/usr/include/libxml2/libxml \
--enable-xml \
--with-mhash \
--with-mcrypt \
--with-config-file-path=/etc \
--with-config-file-scan-dir=/etc/php.d \
--with-bz2 \
--with-curl
make && make install
#生成php-fpm启动文件
/usr/bin/cp sapi/fpm/init.d.php-fpm /etc/rc.d/init.d/php-fpm
chmod +x /etc/rc.d/init.d/php-fpm
#修改php参数文件
/usr/bin/cp $dir/php/etc/php-fpm.conf.default $dir/php/etc/php-fpm.conf
sed -i -e '/pm.max_children/d' -e \
'/\<pm.start_servers\>/d' -e \
'/\<pm.min_spare_servers\>/d' -e \
'/\<pm.max_spare_servers\>/d' -e \
'/pid = run\/php-fpm.pid/s/^;//g' $dir/php/etc/php-fpm.conf
cat >>$dir/php/etc/php-fpm.conf <<EOF
pm.max_children = 150
pm.start_servers = 8
pm.min_spare_servers = 5
pm.max_spare_servers = 10
EOF
/etc/init.d/php-fpm start
echo "php install SUCCESS!"
}
#主程序
main () {
PWD=$(pwd)
if [ ! -f $PWD/mysql-5.5.58.tar.gz ] || [ ! -f $PWD/nginx-1.10.3.tar.gz ] || [ ! -f $PWD/php-5.6.32.tar.bz2 ];then
echo "请将安装文件与脚本放在同一目录下!"
exit 1
fi
if [ $# -eq 0 ];then
dir=/usr/local
elif [ $# -eq 1 ];then
dir=$1
if [ ! -d $dir ];then
mkdir -p $dir
fi
else
echo "The parameter is invalid,Please input again and rerun it!"
exit 1
fi
nginx
mysql
php
}
main
| true |
9aecfaf14f6718160fe53d342fc1cd25a427b48d | Shell | real420og/docker-root-xhyve | /contrib/makehdd/makehdd.sh | UTF-8 | 806 | 3.109375 | 3 | [] | no_license | #!/bin/sh
DEV=/dev/vda
MNT=/mnt/vda1
(echo n; echo p; echo 2; echo; echo +1000M; echo w) | fdisk ${DEV}
(echo t; echo 82; echo w) | fdisk ${DEV}
until [ -b "${DEV}2" ]; do
sleep 0.5
done
mkswap -L DOCKERROOT-SWAP ${DEV}2
(echo n; echo p; echo 1; echo; echo; echo w) | fdisk ${DEV}
until [ -b "${DEV}1" ]; do
sleep 0.5
done
mkfs.ext4 -b 4096 -i 4096 -F -L DOCKERROOT-DATA ${DEV}1
mkdir -p ${MNT}
mount -t ext4 ${DEV}1 ${MNT}
mkdir -p ${MNT}/var/lib/docker-root
wget -qO ${MNT}/var/lib/docker-root/profile https://raw.githubusercontent.com/ailispaw/docker-root-xhyve/master/contrib/configs/profile
wget -qO ${MNT}/var/lib/docker-root/start.sh https://raw.githubusercontent.com/ailispaw/docker-root-xhyve/master/contrib/configs/start.sh
chmod +x ${MNT}/var/lib/docker-root/start.sh
sync; sync; sync
| true |
735c6ffc961aae0d85bb12a5189083492a93e836 | Shell | alenahoopoe/urban_data | /Spark/job.spark.sh | UTF-8 | 1,280 | 2.65625 | 3 | [] | no_license | #!/bin/sh
#SBATCH --nodes=2
#SBATCH --output="slurm-%j.out"
#SBATCH -t 2-00:00
#SBATCH --job-name=sparktest
#SBATCH --partition=parallel
#SBATCH --ntasks-per-node=16
#SBATCH --exclusive
#SBATCH --no-kill
module load anaconda3/5.2.0 java/1.8.0_131 spark/2.4.6-hd2.7
source ../../../Scripts/venv-urban/bin/activate
module load ant/1.9.6
export JAVA_HOME="/gpfsnyu/packages/java/jdk1.8.0_131"
export SPARK_VERSION="2.4.6-hd2.7"
export SPARK_HOME="/gpfsnyu/packages/spark/${SPARK_VERSION}"
export SPARK_LOCAL_SCRATCH_DIR="/scratch/${USER}/sparkscratch/"
export SPARK_LOCAL_DIR="/tmp/${USER}/spark"
ipnip=$(hostname -i)
echo -e " ssh -N -L 4040:$ipnip:4040 $USER@hpc.shanghai.nyu.edu\n"
XDG_RUNTIME_DIR=""
ipnport=$(shuf -i8000-9999 -n1)
ipnip=$(hostname -i)
echo -e "\n"
echo " Paste ssh command in a terminal on local host (i.e., laptop)"
echo " ------------------------------------------------------------"
echo -e " ssh -N -L $ipnport:$ipnip:$ipnport $USER@hpc.shanghai.nyu.edu\n"
echo " Open this address in a browser on local host; see token below"
echo " ------------------------------------------------------------"
echo -e " localhost:$ipnport \n\n"
jupyter-lab --no-browser --port=$ipnport --ip=$ipnip
| true |
c19095b5313b6e2a915646d4d091fbc0af8a032a | Shell | rosrez/bash | /09-host-file.sh | UTF-8 | 136 | 3.328125 | 3 | [] | no_license | #!/bin/bash
# Argument should be formatted as host:path
FULL=${1}
FN=${FULL#*:}
HOST=${FULL%:*}
echo HOST is $HOST
echo FILE is $FN
| true |
b52fae4d69e77df09f84fb77247d14d19d02cdf7 | Shell | havering/cs344 | /Block 1/stats | UTF-8 | 7,312 | 4.34375 | 4 | [] | no_license | #!/bin/bash
# Diana O'Haver
# Program 1
# Program to compute averages and medians from an input file of numbers
# rounding and math functions to produce correct averages
# example from http://stackoverflow.com/questions/2395284/round-a-divided-number-in-bash
round() {
printf "%.$2f" "$1"
}
math() {
echo "$*" | bc -l
}
function counterIsZero {
echo "Counter is 0!"
exit 1
}
# grab process id into $ variable
pid=$$
# You should also use the trap command to catch interrupt, hangup, and terminate signals to remove the temporary files if the stats program is terminated unexpectedly.
trap "rm -rf *.${pid}; exit 1" INT HUP TERM
# conditional test to see if data passed in as file or stdin
# uses the number of arguments to determine this
numArgs=$#
if [ "$numArgs" = "1" ];
then
datafilepath="/dev/stdin"
elif [ "$numArgs" = "2" ];
then
datafilepath=$2
fi
#########################################
# check for less arguments than expected
#########################################
if [ "$#" -lt 1 ]; # if less than 1 argument provided, output usage
then
echo "Usage: stats {-rows|-cols} [file]" 1>&2 # send stdout to stderror
exit 1
fi
#########################################
# check for more arguments than expected
#########################################
if [ "$#" -gt 2 ]; # if more than 2 arguments provided, invalid usage
then
echo "Usage: stats {-rows|-cols} [file]" 1>&2 # send stdout to stderror
exit 1
fi
#########################################
# check if valid flag hasn't been passed in
#########################################
if [[ $1 != -r* ]] && [[ $1 != -c* ]];
then
echo "Usage: stats {-rows|-cols} [file]" 1>&2 # send stdout to stderror
exit 1
fi
#########################################
# check if file exists
#########################################
if [[ ! -e "$datafilepath" ]];
then
echo "stats: cannot read $datafilepath" 1>&2
exit 1
fi
#########################################
# check if file is not able to be read
#########################################
if [[ ! -r "$datafilepath" ]];
then
echo "stats: cannot read $datafilepath" 1>&2
exit 1
fi
#########################################
# check if file is empty
#########################################
if [[ ! -s "$datafilepath" ]];
then
echo "Error: file is empty." 1>&2
exit 1
fi
#########################################
# couldn't get snippet from assignment specs to work, trying boolean
# check for whether user wants rows or columns
#########################################
if [[ $1 == -r* ]];
then
wantsRows=true
wantsCols=false
fi
if [[ $1 == -c* ]];
then
wantsRows=false
wantsCols=true
fi
#########################################
# Calculations for ROWS
#########################################
if [ "$wantsRows" = true ];
then
echo "Average Median"
while read i # i is each row
do
sum=0 # initialize sum and count to calc average
count=0
for num in $i # for each number on the line currently being read in
do
tempArray[$count]=$num # assign num to spot in array
count=$(($count + 1)) # increment count to move through array
sum=$(($sum + $num)) # continue tracking running total
done
# use rounding and math functions from above
average=$(round $(math "$sum/$count") 0)
# sort the array; algorithm adapted from http://linuxpoison.blogspot.com/2012/10/bash-script-how-to-sort-array.html
# declare receiving array: http://www.tldp.org/LDP/abs/html/declareref.html
declare -a sortArray
# create a temp file: http://ss64.com/bash/touch.html
touch tmp.$$ # appending .$$ attaches process ID to temp file - this enables simultaneous runs to track which files should be closed
# output existing unsorted array to temp file, then sort it using built-in functionality in bash
for k in "${tempArray[@]}"; do
echo $k >> tmp.$$
`sort -g tmp.$$ -o tmp.$$`
done
# read in back in to sortArray
while read line; do
sortArray=(${sortArray[@]} $line)
done < tmp.$$
# remove the temp file
rm tmp.$$
# rounding issues in bash seem to be rendering this accurate regardless of number of numbers on line?
median=$(($count / 2))
echo "$average ${sortArray[$median]}"
unset -v sortArray
done <$datafilepath
fi
#########################################
# Calculations for COLUMNS
#########################################
if [ "$wantsCols" = true ];
then
# swap the columns and rows - i.e. rows now contain values previously held by columns, so rows code above can be reused
# adapted from cut tutorial at: http://www.thegeekstuff.com/2013/06/cut-command-examples/
k=1 # variable to track column position
while read line
do
proc=$(cut -f$k $datafilepath)
if [ "$proc" != "" ]; then # if the line is blank, don't keep it
echo $proc > temp.$$
cat temp.$$ >> temp2.$$
fi
k=$(($k + 1))
done <$datafilepath
# remove newline that keeps showing up at EOF
# example sourced from: http://stackoverflow.com/questions/16365155/removing-a-newline-character-at-the-end-of-a-file
# truncate -s $(($(stat -c '%s' temp2)-1)) temp2 # this issue disappeared using the grading file instead of the test file
# temp files to hold averages and medians
touch avgs.$$
touch meds.$$
# then use the same strategy from above to calculate averages and medians
while read eachRow # in the input file
do
summer=0 # initialize sum and count to calc average
counter=0
for number in $eachRow
do
tempArray2[$counter]=$number # assign num to spot in array
counter=$(($counter + 1)) # increment count to move through array
summer=$(($summer + $number)) # continue tracking running total
done
if [ "$counter" = 0 ]; # residual error handling from blank line problem above
then
counterIsZero
else
# use rounding and math functions from above
average2=$(round $(math "$summer/$counter") 0)
fi
# sort the array; algorithm adapted from http://linuxpoison.blogspot.com/2012/10/bash-script-how-to-sort-array.html
# declare receiving array: http://www.tldp.org/LDP/abs/html/declareref.html
declare -a sortArray2
# create a temp file: http://ss64.com/bash/touch.html
touch tmp2.$$
# output existing unsorted array to temp file, then sort it
for f in "${tempArray2[@]}"; do
echo $f >> tmp2.$$
`sort -g tmp2.$$ -o tmp2.$$`
done
# read in back in to sortArray
while read line2; do
sortArray2=(${sortArray2[@]} $line2)
done < tmp2.$$
# remove the temp file
rm tmp2.$$
# rounding issues in bash seem to be rendering this accurate regardless of number of numbers on line?
median2=$(($counter / 2))
echo -n $average2 >> avgs.$$
echo -n " " >> avgs.$$
echo -n ${sortArray2[$median2]} >> meds.$$
echo -n " " >> meds.$$
unset -v sortArray2
done < temp2.$$
# at this point, the averages and medians should be in those temp files
# so they can just be output to match the specs
echo "Averages:"
cat avgs.$$
printf "\n"
echo "Medians:"
cat meds.$$
printf "\n"
# clean up temp files
rm meds.$$
rm avgs.$$
rm temp.$$
rm temp2.$$
fi
| true |
1b61a086d85742eb1661b16afebd29787f15f3fa | Shell | paulczar/devops-austin-docker | /live_demo/docker203.sh | UTF-8 | 366 | 2.515625 | 3 | [
"MIT"
] | permissive | #!/bin/bash
cd docker203
echo "docker build -t demo/znc ."
docker build -t demo/znc .
echo "docker run -d -e ZNC_USER=paultest -e ZNC_PASS=paultest \n
-p 6667 -u znc demo/znc start-znc"
ID=$(docker run -d -e ZNC_USER=paultest -e ZNC_PASS=paultest \
-p 6667 -u znc demo/znc start-znc)
sleep 1
echo "docker logs $ID"
docker logs $ID
cd .. | true |
f1eb98e9a667102adad6b054ac4c3f86f35ab221 | Shell | petronny/aur3-mirror | /binary-clock/PKGBUILD | UTF-8 | 808 | 2.59375 | 3 | [] | no_license | # $Id: pkgbuild-mode.el,v 1.23 2007/10/20 16:02:14 juergen Exp $
# Maintainer: <ivo@Arch>
pkgname=binary-clock
pkgver=3883e88
pkgrel=3
pkgdesc="Ncurses clock, with time displayed in colourful binary "
arch=('i686' 'x86_64')
url="https://github.com/JohnAnthony/Binary-Clock"
license=('GPL')
groups=()
depends=('ncurses')
makedepends=('git')
optdepends=()
provides=()
conflicts=()
replaces=()
backup=()
options=()
install=
changelog=
noextract=()
source=("git+https://github.com/JohnAnthony/Binary-Clock.git")
md5sums=('SKIP')
_gitname="Binary-Clock"
pkgver() {
cd $srcdir/$_gitname
git describe --always | sed 's|-|.|g'
}
package() {
cd $srcdir/$_gitname
make || return 1
install -d $pkgdir/usr/bin
install -m 755 $srcdir/$_gitname/binclock $pkgdir/usr/bin/binclock
}
# vim:set ts=2 sw=2 et:
| true |
b6f6439d2bcb1d092f3a55e227f001ad0fc00488 | Shell | soulflyer/tm | /dottmux/1Left3Right | UTF-8 | 1,107 | 2.71875 | 3 | [] | no_license | #!/bin/bash
#
# Contains commands to add a new window to an existing tmux session
#
# Note that it is best to create the window in detached state (-d option) so that
# the window is complete before displaying it with select-window. tmux messes up the
# initial display if you don't.
# *************************************************************************************
# Build your window here
# *************************************************************************************
tmux split-window -d -h -t $LABEL
tmux split-window -d -h -t $LABEL #'emacsclient -nw . ; bash -l'
tmux split-window -d -v -t $LABEL
t the layout by hand then call tmux list-windows to get the incantation for select-layout
tmux select-layout -t $LABEL " 67,204x64,0,0{111x64,0,0,92x64,112,0[92x30,112,0,92x22,112,31,92x10,112,54]}"
tmux select-window -t $LABEL
tmux send-keys -t :$LABEL.3 'ec .' C-m
# emacs client screws up if this sleep is removed or shortened
sleep 3
tmux send-keys -t :$LABEL.2 'ec .' C-m
tmux select-pane -t 2
# *************************************************************************************
| true |
8a8d83cd653c7ed01985a018e4735268bf9e4230 | Shell | hhpack/process | /hhtool | UTF-8 | 570 | 4.15625 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
#
# Reformat source code file
#
format() {
echo "Start formatting the source code."
FILES=`find src test example bin -name "*.hack"`
for FILE in $FILES
do
hackfmt $FILE -i
echo $FILE
done
echo "Source code formatting is complete"
}
#
# Typecheck for hack lang code
#
check() {
if [ ! -e '.hhconfig' ]; then
touch .hhconfig
fi
hh_client check
}
case "$1" in
format)
format
;;
check)
check
;;
*)
echo "Development tool for Hack"
echo $"Usage: $0 {format|check}"
RETVAL=1
esac
exit $RETVAL
| true |
5b3b3901636fd1270bc6920e8d4d202d811e20bc | Shell | badochov/JNP1_1 | /test.sh | UTF-8 | 1,788 | 4.09375 | 4 | [] | no_license | #!/bin/bash
if [[ $# != 2 ]]; then
echo "Sposób uzytkowania: $0 <ścieżka/do/folderu/z/testami> <ścieżka/do/folderu/z/projektem> " >&2
exit 1
fi
tests=$(realpath "$1")
project=$(realpath "$2")
threshold=${3:-1}
if ! [[ -d "$tests" ]]; then
echo "Podany folder z testami nie istnieje"
exit 1
fi
if ! [[ -d "$project" ]]; then
echo "Podany folder z projektem nie istnieje"
exit 1
fi
total=0
correct=0
leaked=0
function traverse_folder() {
folder="$1"
shopt -s nullglob
for f in "$folder"/*.in; do
rand_float="$(printf '0%s\n' "$(echo "scale=8; $RANDOM/32768" | bc )")"
if (( $(echo "$rand_float < $threshold" |bc -l) )); then
run_test "$f"
fi
done
shopt -s nullglob
for d in "$folder"/*/; do
echo "$d"
traverse_folder "$(realpath "$d")"
done
}
RED='\033[0;31m'
GREEN='\033[0;32m'
NOCOLOR='\033[0m'
function run_test() {
input_file="$1"
output_file=${input_file//.in/.out}
error_file=${input_file//.in/.err}
((total++))
echo -e "\e[1mTest $f \e[0m"
./nod < "$input_file" 1>"$temp_out" 2>"$temp_err"
if cmp -s "$output_file" "$temp_out" ; then
echo -ne "${GREEN}stdout ok${NOCOLOR}, "
if cmp -s "$error_file" "$temp_err" ; then
echo -ne "${GREEN}stderr ok${NOCOLOR}\n"
((correct++))
else
echo -ne "${RED}stderr nieprawidlowe${NOCOLOR}\n"
diff -d "$error_file" "$temp_err"
fi
else
echo -ne "${RED}stdout nieprawidlowe${NOCOLOR}\n"
diff -d "$output_file" "$temp_out"
fi
}
temp_out=$(mktemp)
temp_err=$(mktemp)
trap 'rm -f "$temp_out $temp_err" nod' INT TERM HUP EXIT
cd "$2" || exit
g++ -Wall -Wextra -O2 -std=c++17 nod.cc -o nod
traverse_folder "$tests"
echo "total: ${total}, correct ${correct}"
| true |
765a988b6b00b5dcf15eede437a1add78ed73bb8 | Shell | bsaig/home-control | /install/install.sh | UTF-8 | 3,562 | 3.65625 | 4 | [] | no_license | #!/bin/sh
#This shell will install all the libraries and packages (including LAMP) to make our webserver & database work
#
#
echo ""
echo "Installation is going to start - Please follow Instruction"
echo ""
# 1 - "SYSTEM UPDATE & UPGRADE" (OK)
#------------------------------------------------
sudo apt-get -y update
sudo apt-get -y upgrade
#------------------------------------------------
# 2 - "MODIFICATION OF TEMP PRIVILEDGE" (OK)
#------------------------------------------------
sudo chown root:root /tmp
sudo chmod 1777 /tmp
#------------------------------------------------
# 3 - "INSTALLATION OF MYSQL SERVER" (OK)
#------------------------------------------------
#-------Will ask for the root password-----------
sudo apt-get -y install mysql-server-5.5
#------------------------------------------------
# 4 - "INSTALLATION OF TOOLS" (OK)
#------------------------------------------------
sudo apt-get -y install minicom
#tar in /home/pi
tar -xzf /home/pi/install/libraries/pyserial-3.0.1.tar.gz
cd pyserial-3.0.1/
sudo python setup.py install
cd
sudo rm -R pyserial-3.05/
tar -xzf /home/pi/install/libraries/MinimalModbus-0.7.tar.gz
cd MinimalModbus-0.7/
sudo python setup.py install
cd
sudo rm -R MinimalModbus-0.7/
tar -xzf /home/pi/install/libraries/RPi.GPIO-0.6.1.tar.gz
cd RPi.GPIO-0.6.1/
sudo python setup.py install
cd
sudo rm -r RPi.GPIO-0.6.1/
#------------------------------------------------
# 5 - "INSTALLATION OF NGNIX" (OK)
#------------------------------------------------
sudo apt-get -y install nginx
sudo service nginx start
echo ""
echo "Nginx default page should be acccessible at the server IP"
echo ""
read -p "Can you see it ? (y/n) " REPLY
if [ "$REPLY" == "n" ]; then
echo "Check that ngnix was installed correctly."
fi
#------------------------------------------------
# 6 - "INSTALLATION OF PHP-FPM" (OK)
#------------------------------------------------
sudo apt-get -y install php5-fpm
#------------------------------------------------
# 7 - "INSTALLATION AND CONFIGURATION OF PHPMYADMIN" (OK)
#------------------------------------------------
echo ""
echo "Installing PHPmyadmin, when configuration screen appears press TAB then enter if using NGNIX"
echo ""
sudo apt-get -y install phpmyadmin # Appuyer sur TAB et entrer pour le choix de la configuration
#Passwd: randompassword
# If the phpmyadmin page is not working HTTP 404 follow the steps bellow:
## "First, remove the symlink you just created by running:"
# rm -rf /usr/share/nginx/www
## "That won't delete phpMyAdmin, it'll just delete the symlink. Now we'll create a new one using:"
# sudo ln -s /usr/share/phpmyadmin/ /var/www/html/phpmyadmin
## "Since you've set root to /var/www/html, that's your "home" directory or root path that your server block uses. What the above command does is create a symlink from where the phpMyAdmin files are to your root directory."
## "Once the new symlink is there, you should be able confirm that by running:"
# ls -al /var/www/html
## "That should produce something that looks like:"
# lrwxrwxrwx 1 root root 22 Apr 4 14:31 phpmyadmin -> /usr/share/phpmyadmin/
## "Which means the symlink is valid and should now work when you visit:"
# http://IP_ADDR/phpmyadmin
## "Where IP_ADDR is your IP address."
sudo service php5-fpm restart
#------------------------------------------------
# 8 - "Installation of mysql python"
#------------------------------------------------
sudo apt-get -y install python-mysqldb
#------------------------------------------------
echo ""
echo "INSTALLATION DONE"
echo ""
| true |
55400d033d4c70ac958c40fa690cee51b3758856 | Shell | jkiggins/dot-configs | /dot-local/bin/activ-notify | UTF-8 | 1,277 | 4.0625 | 4 | [] | no_license | #!/bin/bash
# flags
VERBOSE=2
# defaults
LOOP=
NOTIFY_ARGS=
ACTIV_ARGS=
ARGS=
TIME_N=5000
# parse command lines
# off limits from notify-send
# -u -t -a -i -c -h -v
# --urgency --expire --app-name --icon --category --hint --version
show_help(){
echo "Usage: activ-notify [-l|-loop] title msg"
echo "Options:"
echo " -l -loop Loop until Ctrl-c"
echo
echo "title and msg are passed to notify-send"
echo "all other options are sent to from activ"
echo
activ -h
}
die(){
echo "Error"
exit 1
}
verbose_printf(){
if [ $VERBOSE -ge 2 ]; then
printf "$@"
fi;
}
info_printf(){
if [ $VERBOSE -ge 1 ]; then
printf "$@"
fi;
}
ARGS=("$@")
# echo "ARGS(before): ${ARGS[@]}"
# Strip out last two positonal args
NOTIFY_ARGS=(-t ${TIME_N} \"${ARGS[-2]}\" \"${ARGS[-1]}\")
unset ARGS[-1]
unset ARGS[-1]
# echo "ARGS(after): ${ARGS[@]}"
for i in ${ARGS[*]}; do
case $i in
-h|-\?|--help)
show_help
exit
;;
-l|--loop)
LOOP=1
;;
-v|--verbose)
VERBOSE=2
;&
*)
ACTIV_ARGS=(${ACTIV_ARGS[*]} $i)
esac
done
verbose_printf "NOTIFY_ARGS: %s\n" "${NOTIFY_ARGS[*]}"
verbose_printf "ACTIV_ARGS: %s\n" "${ACTIV_ARGS[*]}"
while :;
do
activ ${ACTIV_ARGS[*]} && notify-send ${NOTIFY_ARGS[*]}
if [ -z "$LOOP" ]; then
break
fi
done
| true |
6bc18c6c0d95a034266332a6812254ae241ec5c2 | Shell | whojr01/bdb-provision | /dirshare/ClearDisk.sh | UTF-8 | 534 | 3.609375 | 4 | [
"MIT"
] | permissive | #!/bin/bash
# This script is dangerous since it destroys the partitions on
# the disks /dev/sdb through /dev/sde.
DRIVES="b c d e"
echo
echo This will destroy the contents of the drives listed below.
for i in $DRIVES
do
ls /dev/sd$i
done
echo "Are you absolutely sure you want to do this enter (kanGar0o)?"
read response
[[ $response == kanGar0o ]] || exit 1
for i in $DRIVES
do
echo "Gone! Wiped disk."
ls /dev/sd$i
dd if=/dev/zero of=/dev/sd$i bs=512 count=1
wipefs -a /dev/sd$i
done
echo "I hope your not crying..."
| true |
e2c80356b15a6ac8ebea32087b88d004e5744dfe | Shell | senorrossie/sh-a8-16mbgamedisk | /tools/dir2ydir.sh | UTF-8 | 749 | 3.828125 | 4 | [] | no_license | #!/bin/bash
#----------
# Generate directories per decenium for the .XEX/.CAR files in current directory (and move files there).
#
# NOTE: The Homesoft collection does not set the modification time to a usable value. Fandal properly sets it.
SRCDIR="${1:-../work/a8_fandal_cz/Binaries/Games}"
OUTDIR="${2:-../media/ATARI/ByYEAR/Fandal/Games}"
printf "Copying files from %s to %s...\n" "${SRCDIR}" "${OUTDIR}"
while IFS=$';' read -r src dst; do
FY=${dst%%/*}
if [ ! -d ${OUTDIR}/$FY ]; then
mkdir -p "${OUTDIR}/$FY"
fi
#printf "%s. cp -a '%s' '%s'\n" "$FY" "$src" "$dst"
cp -a "${src}" "${OUTDIR}/${dst}"
done < <( find "${SRCDIR}" -type f -name "*.[XxCcAa][EeAaTt][XxRr]" -printf "%p;%TY/%f\n" )
printf " ... DONE!\n"
| true |
4619b399df0c99205850aa56abe218b7e197a265 | Shell | Teichlab/mapcloud | /scripts/10x/utils/cramfastq.sh | UTF-8 | 624 | 3.09375 | 3 | [] | no_license | #!/bin/bash
set -eo pipefail
#discriminate between old and new CRAM files based on the presence of @SQ lines in the CRAM header
if [[ `samtools view -H $1 | grep '@SQ' | wc -l` == 0 ]]
then
#new file, no alignment, can just go straight for FASTQ
samtools fastq -1 $1\_R1_001.fastq.gz -2 $1\_R2_001.fastq.gz --i1 $1\_I1_001.fastq.gz -n -i --index-format i8 $1
else
#old file with alignment, needs bamcollate2 incantation
samtools view -b $1 | bamcollate2 collate=1 reset=1 resetaux=0 auxfilter=RG,BC,QT | samtools fastq -1 $1\_R1_001.fastq.gz -2 $1\_R2_001.fastq.gz --i1 $1\_I1_001.fastq.gz -n -i --index-format i8 -
fi
| true |
8b897f9122b6693c8590d78611780778aee4bbb1 | Shell | daggersdrawn/dotfiles | /home/bin/launcher | UTF-8 | 1,399 | 3.9375 | 4 | [] | no_license | #!/bin/bash
#
# pbrisbin 2010
#
# heavily based on ghost's dmlauncher:
#
# http://ghost1227.com/tech/software-scripts/a-not-so-simple-dmenu-wrapper/
#
###
message() { echo 'usage: launcher [ -a <executable> ... ] | [ -d <launcher> ... ]'; exit 1; }
errorout() { echo "error: $*"; exit 1; }
addlauncher() {
local app
for app in "$@"; do
# not in our path
which $app &>/dev/null || continue
# already added
[[ -L "$cache/$app" ]] && continue
# add it
ln -s $(which "$app") "$cache/$app" || errorout "$app: error adding launcher"
done
}
dellauncher() {
local app
for app in "$@"; do
# does it exist
[[ -L "$cache/$app" ]] || continue
# remove it
rm -f "$cache/$app" || errorout "$app: error removing launcher"
done
}
dmlrun() {
local exe
exe="$((ls $cache; echo -e "add\ndel") | $dmenu)"
if [[ -n "$exe" ]]; then
case "$exe" in
add\ *) $0 ${exe/add/-a} & ;;
del\ *) $0 ${exe/del/-d} & ;;
*) exec $exe ;;
esac
fi
}
parse_options() {
dmenu="dmenu ${DMENU_OPTIONS:--i}"
# make cache
[[ -d "$cache" ]] || mkdir -p "$cache"
# getopts
case "$1" in
-h|--help) message ;;
-a|--add) shift; addlauncher "$@" ;;
-d|--del) shift; dellauncher "$@" ;;
*) dmlrun ;;
esac
}
cache="$XDG_CACHE_HOME/launcher"
parse_options "$@"
| true |
84ea4cf7d3f9df47b7637e02941c86a455a236cd | Shell | krantim/pol | /infrastructure/packer/centos/scripts/serverspec.sh | UTF-8 | 917 | 3.484375 | 3 | [] | no_license | #!/bin/sh -e
# Install Ruby
yum -y install ruby ruby-devel rubygems
gem install bundler -v 1.6.2 --no-ri --no-rdoc
cd /tmp/test
bundle install --path vendor/bundle
echo "WI-169 Tests are currently disabled."
exit 0
echo "Running tests..."
TEST_FILES=`ls ./spec/*_spec.rb | xargs`
TEST_RESULTS_FILE="test-results.txt"
EXEC_MODE=true bundle exec "rspec --format documentation --out ${TEST_RESULTS_FILE} --format documentation ${TEST_FILES}"
# Exit with error code if we have test failures or no test results
# @see https://circleci.com/docs/rspec-wrong-exit-code
if [ -e $TEST_RESULTS_FILE ]; then
echo "INFO File ${TEST_RESULTS_FILE} exists."
else
echo "ERROR Could not find test results.Something is wrong. Exiting now."
exit 1
fi
if grep --quiet "^Failed examples:$" $TEST_RESULTS_FILE; then
echo "ERROR Found test failures inside the test results. Exiting now."
exit 1
fi
# Cleanup
rm -r /tmp/test
| true |
9cf5b21989e788151cfae95d4a8e6699c3beac28 | Shell | kiranbhakre/hdp-scripts | /hdp-install/create-mapred-historydirs-onhdfs.sh | UTF-8 | 561 | 2.796875 | 3 | [] | no_license | #!/bin/bash
echo "Create mapreduce directories on HDFS in /mapred/history/ location"
echo "These commands need to be run as HDFS superuser"
su - hdfs -c "hadoop fs -mkdir -p /mr-history/tmp"
su - hdfs -c "hadoop fs -chmod -R 1777 /mr-history/tmp"
su - hdfs -c "hadoop fs -mkdir -p /mr-history/done"
su - hdfs -c "hadoop fs -chmod -R 1777 /mr-history/done"
su - hdfs -c "hadoop fs -chown -R mapred:hdfs /mr-history"
su - hdfs -c "hadoop fs -mkdir -p /app-logs"
su - hdfs -c "hadoop fs -chmod -R 1777 /app-logs"
su - hdfs -c "hadoop fs -chown yarn /app-logs"
| true |
c790d74d8d764db59584bcea60cb8684a2663163 | Shell | readex-eu/readex-apps | /production_apps/OPENFOAM/OpenFOAM/scripts_taurus_hsw/run_saf.sh | UTF-8 | 1,294 | 2.84375 | 3 | [
"BSD-3-Clause"
] | permissive | #!/bin/sh
#SBATCH --time=1:00:00
#SBATCH --nodes=1
#SBATCH --tasks-per-node=24
#SBATCH --cpus-per-task=1
#SBATCH --partition=haswell
#SBATCH --reservation=READEX
#SBATCH -A p_readex
#SBATCH --exclusive
#SBATCH --mem-per-cpu=2500M
#SBATCH --comment="no_monitoring"
#SBATCH -J "SAFfoam"
#SBATCH --mail-type=ALL
#SBATCH --mail-user=ondrej.vysocky@vsb.cz
APP='srun -n 24 simpleFoam -parallel'
export FM_DIR=$( dirname "${BASH_SOURCE[0]}" )
if [ "$FM_DIR" == "." ]
then
export FM_DIR=$(pwd)
fi
export FM_DIR=$FM_DIR/..
cd $FM_DIR
source readex_env/set_env_saf.source
source scripts_$READEX_MACHINE/environment.sh
cd $FM_DIR/OpenFOAM-v1612+/
source $FM_DIR/OpenFOAM-v1612+/etc/bashrc
cd ../../motorBike24/
export SCOREP_ENABLE_TRACING=false
export SCOREP_ENABLE_PROFILING=true
export SCOREP_MPI_ENABLE_GROUPS=ENV
export SCOREP_FILTERING_FILE=scorep.filt
rm -rf scorep-*
rm -f old_scorep.filt
echo "" > scorep.filt
iteration=0
result=1
while [ $result != 0 ]; do
iteration=$(($iteration +1))
echo "result = "$result
# run the application
$APP
echo "ITERATION done."
$FM_DIR/scripts_$READEX_MACHINE/do_scorep_autofilter_single.sh 0.1
result=$?
echo "scorep_autofilter_singe done ($result)."
mv scorep-* $iteration"_scorep"
cp scorep.filt $iteration"_scorep.filt"
done
echo "end."
| true |
2b6db3d1cc263d0f5b59e5b6701fa7b343136662 | Shell | golosio/annabell | /crossvalidation/round1/randomize_check/check.sh | UTF-8 | 2,485 | 3.375 | 3 | [] | no_license | cat ../randomize/log_rand.txt | sed 's/^tell/? tell/; s/^\.bx /? bx /' | grep -v '\->' | grep -v '\.' | grep -v '>>' | grep -v 'it is a animal' | grep -v '? what kind of animal' | sed 's/\? which/which/' > tmp1.txt
cat tmp1.txt | grep -A1 '^?' | grep -v '\-\-' > tmp2.txt
cat tmp2.txt | grep '^?' > questions.txt
cat tmp2.txt | grep -v '^?' > answers.txt
k=0
good=0
err_people=0
err_body=0
err_skills=0
:> diff.txt
n1=$(cat ../randomize/a_people.txt | wc -l)
n2=$(cat ../randomize/a_people.txt ../randomize/a_body.txt | wc -l)
cat ../randomize/answers_rand.txt | while read nn l; do
k=$(expr $k + 1)
l1=$(head -$k answers.txt | tail -1)
if [ "$l1" != "$l" ]; then
echo >> diff.txt
echo $nn >>diff.txt
echo $l1 >>diff.txt
echo $l >>diff.txt
if [ $nn -le $n1 ]; then
err_people=$(expr $err_people + 1)
elif [ $nn -le $n2 ]; then
err_body=$(expr $err_body + 1)
else
err_skills=$(expr $err_skills + 1)
fi
echo $err_people > err_people.txt
echo $err_body > err_body.txt
echo $err_skills > err_skills.txt
else
good=$(expr $good + 1)
echo $good > good.txt
fi
done
nxxx=$(cat ../randomize/answers_rand.txt | grep xxx | wc -l)
echo "num. of \"tell me\" questions: $nxxx"
#echo The number of good xxx must be subtracted from the skills errors,
#echo and added to the total number of good answers
#echo edit the file ../randomize/log_rand.txt ,
#echo search the questions that start with \"tell me\"
#echo and check the answer
./check2.sh
q_animal=$(cat ../randomize/log_rand.txt | grep '^? what kind of' | wc -l)
good_animal=$(cat ../randomize/log_rand.txt | grep -B1 '^? what kind of' | grep '^it is a animal$' | wc -l)
err_animal=$(expr $q_animal - $good_animal)
echo "it is an animal errors: $err_animal / $q_animal"
tell_me_good=$(cat tmp9.txt)
err_people=$(cat err_people.txt)
err_body=$(cat err_body.txt)
err_skills=$(cat err_skills.txt)
err_skills=$(expr $err_skills - $tell_me_good)
echo -n "people errors: $err_people / "
cat ../randomize/q_people.txt | wc -l
echo -n "body errors: $err_body / "
cat ../randomize/q_body.txt | wc -l
err_skills=$(expr $err_skills + $err_animal)
q_skills1=$(cat ../randomize/q_skills1.txt ../randomize/q_skills2.txt ../randomize/q_skills3.txt | wc -l)
q_skills=$(expr $q_skills1 + $q_animal)
echo "skills errors: $err_skills / $q_skills"
good=$(cat good.txt)
good=$(expr $good + $tell_me_good + $good_animal)
qtot=$(expr $(cat answers.txt | wc -l) + $q_animal)
echo "good: $good / $qtot"
| true |
4456677586f67146e3a9890b4d349dc339aa14ac | Shell | samsquanch01/easy_rom_builder | /olivewood-clone_sources.sh | UTF-8 | 3,188 | 2.828125 | 3 | [] | no_license | #! /bin/bash
## Device Tree
git clone https://github.com/iprouteth0/device_xiaomi_olivewood-1 device/xiaomi/olivewood
git clone https://github.com/Xiaomi-SDM439-Development/android_device_xiaomi_sdm439 device/xiaomi/sdm439-common
## Vendor Tree
git clone https://github.com/Evolution-X-Devices/vendor_xiaomi vendor/xiaomi
git clone https://github.com/iprouteth0/vendor_xiaomi_olivewood_64 vendor/xiaomi/olivewood
git clone https://github.com/Stargazer19/hardware_qcom_display-caf-msm8937 hardware/qcom-caf/msm8937/display
git clone https://github.com/Stargazer19/hardware_qcom_media-caf-msm8937 hardware/qcom-caf/msm8937/media
git clone https://github.com/Stargazer19/hardware_qcom_audio-caf-msm8937 hardware/qcom-caf/msm8937/audio
cp hardware/qcom-caf/msm8996/Android.* hardware/qcom-caf/msm8937/
cat $THIDIR/device_mk_changes-msm8937.txt >> device/xiaomi/olivewood/device.mk
echo "TARGET_KERNEL_CLANG_COMPILE=true" >> device/xiaomi/olivewood/BoardConfig.mk
## Kernel Sources:
## For second kernel source link, add the following;
##
## Add this to BoardConfig.mk or BoardConfigCommon.mk:
##
## TARGET_KERNEL_CLANG_COMPILE := true
#git clone https://github.com/MiCode/Xiaomi_Kernel_OpenSource/tree/olivewood-p-oss -b olivewood-p-oss kernel/xiaomi/sdm439
echo "Please choose which kernel source you would like to use:"
echo "1 for Joel's a10 kernel"
echo "2 for Joel's a11 kernel"
echo "3 for cherry kernel"
echo "4 for RALegacy kernel"
echo "5 for lolz kernel"
read KERNEL_CHOICE
rm -rf kernel/xiaomi/sdm439
if [[ $KERNEL_CHOICE = 1 ]]
then
git clone https://github.com/iprouteth0/android_kernel_xiaomi_sdm439 kernel/xiaomi/sdm439
elif [[ $KERNEL_CHOICE = 2 ]]
then
git clone https://github.com/Xiaomi-SDM439-Development/android_kernel_xiaomi_sdm439 kernel/xiaomi/sdm439
elif [[ $KERNEL_CHOICE = 3 ]]
then
git clone https://github.com/iprouteth0/kernel_cherry_sdm439 kernel/xiaomi/sdm439
elif [[ $KERNEL_CHOICE = 4 ]]
then
git clone https://github.com/iprouteth0/RALegacy_kernel_sdm439 kernel/xiaomi/sdm439
elif [[ $KERNEL_CHOICE = 5 ]]
then
git clone https://github.com/iprouteth0/lolz_kernel_redmi8 kernel/xiaomi/sdm439
elif [[ ! $KERNEL_CHOICE =~ [1-5] ]]
then
git clone https://github.com/iprouteth0/android_kernel_xiaomi_sdm439 kernel/xiaomi/sdm439
fi
## match device tree files to rom tree
cp device/xiaomi/olivewood/lineage_olivewood.mk device/xiaomi/olivewood/$ROMNAME\_olivewood.mk
sed -i "s|vendor/lineage/config|vendor/$VENDOR_CONFIG/config|" device/xiaomi/olivewood/$ROMNAME\_olivewood.mk
sed -i "s|lineage|$ROMNAME|" device/xiaomi/olivewood/$ROMNAME\_olivewood.mk
sed -i "s|lineage|$ROMNAME|" device/xiaomi/olivewood/AndroidProducts.mk
#echo "WITH_GAPPS := true" >> device/xiaomi/olivewood/$ROMNAME\_olivewood.mk
if [[ -d "vendor/$VENDOR_CONFIG/config" ]]
then
if [[ ! -f "vendor/$VENDOR_CONFIG/config/common_full_phone.mk" ]]
then
sed -i "s|common_full_phone.mk|common.mk|" device/xiaomi/olivewood/$ROMNAME\_olivewood.mk
fi
fi
if [[ -d "vendor/$VENDOR_CONFIG/configs" ]]
then
if [[ ! -f "vendor/$VENDOR_CONFIG/configs/common_full_phone.mk" ]]
then
sed -i "s|common_full_phone.mk|common.mk|" device/xiaomi/olivewood/$ROMNAME\_olivewood.mk
fi
fi
| true |
f81850b14a1973a8d5b1cf08cb96c0ffd7e444e8 | Shell | rui314/mold | /test/elf/ifunc-dynamic.sh | UTF-8 | 516 | 2.75 | 3 | [
"MIT",
"Zlib",
"BSD-3-Clause",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | #!/bin/bash
. $(dirname $0)/common.inc
supports_ifunc || skip
cat <<EOF | $CC -o $t/a.o -c -xc -
#include <stdio.h>
__attribute__((ifunc("resolve_foobar")))
static void foobar(void);
static void real_foobar(void) {
printf("Hello world\n");
}
typedef void Func();
static Func *resolve_foobar(void) {
return real_foobar;
}
int main() {
foobar();
}
EOF
$CC -B. -o $t/exe1 $t/a.o -Wl,-z,lazy
$QEMU $t/exe1 | grep -q 'Hello world'
$CC -B. -o $t/exe2 $t/a.o -Wl,-z,now
$QEMU $t/exe2 | grep -q 'Hello world'
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.