blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 4 115 | path stringlengths 2 970 | src_encoding stringclasses 28 values | length_bytes int64 31 5.38M | score float64 2.52 5.28 | int_score int64 3 5 | detected_licenses listlengths 0 161 | license_type stringclasses 2 values | text stringlengths 31 5.39M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
a5f9af631e678cb55b4147f43c63ff9de5362052 | Shell | CaydenFranklin/AltiusWorkingScripts | /run_fqpy_script.sh | UTF-8 | 764 | 2.515625 | 3 | [] | no_license | #!/bin/bash
memSize="40G" # this varies widely with job needs; use ls -lah on your fastq files to estimate what you'll need
OUTDIR="/net/seq/data/projects/SuperIndex/cfranklin/slurm_output" # fine to use your favorite current directory in this case
jobName="cfranklinFastQDownload" # customize as desired
jobID=$(sbatch --parsable --partition=queue0 --job-name=$jobName --output=${OUTDIR}/${jobName}.o%j --error=${OUTDIR}/${jobName}.e%j --mem=$memSize --priority=10 --nodelist $1 <<EOF
#!/bin/bash
module load python/3.6.4
module load sratoolkit/2.9.1
module load atlas-lapack/3.10.2
module load numpy/1.11.0
module load scipy/1.0.0
module load pandas/0.19.1
module load pigz/2.3.3
python FasterQ-Downloader.py $1 $2
EOF
)
exit 0 | true |
22cfe2044e523f105debb11e419f2db98ec35b00 | Shell | tchajed/cloud-arch | /build.sh | UTF-8 | 4,715 | 3.34375 | 3 | [
"MIT"
] | permissive | #!/bin/sh
set -e
. ./common.sh
sudo date > /dev/null
sec "Configuring disk image for the cloud"
msg "Checking that we have Arch's install scripts"
pacman -Qi arch-install-scripts >/dev/null || sudo pacman -S arch-install-scripts # for genfstab and arch-chroot
cp bootstrapped.raw archlinux.raw.tmp
tmp=$(mktemp -d -t arch-cloud-build.XXXXXXXXXX)
tmp=$(readlink -f "$tmp")
./mount.sh "archlinux.raw.tmp" "$tmp"
if [ ! -e ".mountpoint" ]; then
exit 1
fi
lodev=$(cat .mountpoint)
msg "Generating /etc/fstab for $tmp"
uuid=$(sudo blkid -o value "/dev/mapper/${lodev}p1" | head -n1)
echo "UUID=$uuid / ext4 rw,relatime,data=ordered 0 1" | sudo tee -a "$tmp/etc/fstab"
msg "Setting up bootloader"
sudo mkdir -p "$tmp/boot/syslinux"
sudo cp -r "$tmp/usr/lib/syslinux/bios"/*.c32 "$tmp/boot/syslinux/"
sudo extlinux --install "$tmp/boot/syslinux"
sudo sed -i "1i \\SERIAL 0 115200" "$tmp/boot/syslinux/syslinux.cfg"
sudo sed -i "s@DEFAULT arch@DEFAULT archfallback@" "$tmp/boot/syslinux/syslinux.cfg"
sudo sed -i "s@TIMEOUT 50@TIMEOUT 5@" "$tmp/boot/syslinux/syslinux.cfg"
sudo sed -i "s@PROMPT 0@PROMPT 1@" "$tmp/boot/syslinux/syslinux.cfg"
sudo sed -i "s@^UI @# UI @" "$tmp/boot/syslinux/syslinux.cfg"
sudo sed -i "s@root=/dev/sda3@root=UUID=$uuid console=tty0 console=ttyS0,115200n8@" "$tmp/boot/syslinux/syslinux.cfg"
msg2 "Writing MBR"
sudo dd conv=notrunc bs=440 count=1 "if=$tmp/usr/lib/syslinux/bios/mbr.bin" "of=/dev/${lodev}"
msg "Enabling [multilib]"
sudo sed -i '/#\[multilib\]/,+1s/^#//' "$tmp/etc/pacman.conf"
sudo arch-chroot "$tmp" pacman -Sy
msg "Configuring cloud-init"
# Set up main user
msg2 "Configuring default user"
sudo sed -i "s@distro: ubuntu@distro: arch@" "$tmp/etc/cloud/cloud.cfg"
sudo sed -i "s@name: ubuntu@name: arch@" "$tmp/etc/cloud/cloud.cfg"
sudo sed -i "s@gecos: Ubuntu@gecos: Arch@" "$tmp/etc/cloud/cloud.cfg"
sudo sed -i "s@groups: .*@groups: [adm, wheel]@" "$tmp/etc/cloud/cloud.cfg"
sudo sed -i "/sudo:/d" "$tmp/etc/cloud/cloud.cfg"
sudo sed -i '/# %wheel ALL=(ALL) NOPASSWD: ALL/s/^# //' "$tmp/etc/sudoers"
# Set up data sources
msg2 "Setting up data sources"
sudo sed -i "/Example datasource config/i \\datasource_list: [ NoCloud, ConfigDrive, OpenNebula, Azure, AltCloud, OVF, MAAS, GCE, OpenStack, CloudSigma, Ec2, CloudStack, None ]" "$tmp/etc/cloud/cloud.cfg"
# Avoid errors about syslog not existing
# See https://bugs.launchpad.net/cloud-init/+bug/1172983
msg2 "Fixing syslog permissions"
sudo sed -i "/datasource_list/i \\syslog_fix_perms: null" "$tmp/etc/cloud/cloud.cfg"
# Don't start Ubuntu things
msg2 "Disabling unused modules"
sudo sed -i '/emit_upstart/d' "$tmp/etc/cloud/cloud.cfg"
sudo sed -i '/ubuntu-init-switch/d' "$tmp/etc/cloud/cloud.cfg"
sudo sed -i '/grub-dpkg/d' "$tmp/etc/cloud/cloud.cfg"
sudo sed -i '/apt-pipelining/d' "$tmp/etc/cloud/cloud.cfg"
sudo sed -i '/apt-configure/d' "$tmp/etc/cloud/cloud.cfg"
sudo sed -i '/byobu/d' "$tmp/etc/cloud/cloud.cfg"
# Set up network
msg "Configuring network"
echo '[Match]
Name=e*
[Network]
DHCP=ipv4
[DHCPv4]
UseHostname=false' | sudo tee "$tmp/etc/systemd/network/dhcp-all.network"
sudo arch-chroot "$tmp" systemctl enable systemd-networkd.service
sudo arch-chroot "$tmp" systemctl enable systemd-resolved.service
sudo ln -sfn /run/systemd/resolve/resolv.conf "$tmp/etc/resolv.conf"
sudo sed -i 's/network.target/network-online.target/' "$tmp/usr/lib/systemd/system/cloud-init.service"
# Start daemons on boot
msg "Enabling system services"
sudo arch-chroot "$tmp" systemctl enable sshd
sudo arch-chroot "$tmp" systemctl enable cloud-init
sudo arch-chroot "$tmp" systemctl enable cloud-config
sudo arch-chroot "$tmp" systemctl enable cloud-final
# Remove machine ID so it is regenerated on boot
msg "Wiping machine ID"
printf "" | sudo tee "$tmp/etc/machine-id"
printf "" | sudo tee "$tmp/var/lib/dbus/machine-id"
msg "Writing motd"
# We don't want to use the pacman keys in the image, because the private
# component is effectively public (anyone with the image can find out what it
# is). Unfortunately, there is no easy way to run the necessary commands on
# (only) the next boot as far as I'm aware. Instead, we put it in the motd so
# that any user will see it the first time they log into the machine.
# For future reference, the private key can be checked with
#
# sudo gpg --homedir /etc/pacman.d/gnupg -K
#
echo "Welcome to your brand new Arch cloud instance!
Before doing anything else, you should re-key pacman with
# pacman-key --init
# pacman-key --populate archlinux
You might also want to update the system using
# pacman -Syu
(you can change this message by editing /etc/motd)" | sudo tee -a "$tmp/etc/motd"
./unmount.sh "$tmp"
mv archlinux.raw.tmp archlinux.raw
| true |
5eda44e9dd20a011c54084afe3b9cb2933feb007 | Shell | ibmmkeppeler/cont_mq | /mq_tester.sh | UTF-8 | 1,354 | 3.21875 | 3 | [] | no_license | #/bin/bash
HOSTIP="158.176.129.211"
NODEPORT="30183"
read -p 'NodePort:i ' NODEPORT
read -p 'Iterations: ' y
echo "--------------------------------------------------------------------"
echo "| Starting MQ Test |"
echo "--------------------------------------------------------------------"
x=1
while [ $x -le y ]
do
sleep 1
echo "Sending Message @ "$(date "+%H:%M:%S")
curl -i -s -k https://${HOSTIP}:${NODEPORT}/ibmmq/rest/v1/messaging/qmgr/QM1/queue/DEV.QUEUE.1/message -X POST -u app:password -H 'ibm-mq-rest-csrf-token: blank' -H 'Content-Type: text/plain;charset=utf-8' -d "${x}: Hello World" # | sed -n 's/^\(ibm-mq-md-messageId\)/\1/p'
echo "--------------------------------------------------------------------"
sleep 1
echo "Receiving Message @ "$(date "+%H:%M:%S")
curl -i -s -k https://${HOSTIP}:${NODEPORT}/ibmmq/rest/v1/messaging/qmgr/QM1/queue/DEV.QUEUE.1/message -X DELETE -u app:password -H 'ibm-mq-rest-csrf-token: blank' # | tail -1
echo ""
echo "--------------------------------------------------------------------"
x=$(( $x + 1 ))
done
echo "--------------------------------------------------------------------"
echo "| Ending MQ Test |"
echo "--------------------------------------------------------------------"
| true |
c92d47ba9f4b1cae4c183ba5b05949830bea2e38 | Shell | chenzihao2/learning | /linux/shell_exercise/statistics.sh | UTF-8 | 568 | 3.703125 | 4 | [] | no_license | #########################################################################
# File Name: statistics.sh
# Author: chenzihao
# Created Time: 2020年06月30日 星期二 18时59分08秒
#########################################################################
#!/bin/bash
if [ -z $1 ] || [ ! -e $1 ]
then
echo 'no file'
exit
fi
statistics () {
for char in {a..z}
do
echo "$char - `grep -io "$char" $1 | wc -l`" | tr /a-z/ /A-Z/ >> tmp.txt
done
#cat tmp.txt | sort -rn -k 2 -t -
sort -rn -k 2 -t - tmp.txt
rm tmp.txt
}
statistics $1
| true |
a0a166a4ebbcb0ba38b04efdd64fb117bd775e53 | Shell | zwqjsj0404/hadoop_automation | /sshCopyId.sh | UTF-8 | 434 | 2.703125 | 3 | [] | no_license | #!/bin/bash
/usr/bin/expect <<EOD
spawn ssh-copy-id -i /home/hduser/.ssh/id_rsa.pub hduser@$1
expect "hduser@$1's password:"
send "abcd\r\n"
interact
EOD
automatic ssh and exit
>> cat sshAndExit.sh
#!/bin/bash
filecontent=( `cat "ListOfSlaves" `)
for t in "${filecontent[@]}"
do
ip_addr=$t
#echo $ip_addr
/usr/bin/expect <<EOD
spawn ssh -o "StrictHostKeyChecking no" $ip_addr
expect "hduser@$ip_addr"
send "exit"
interact
EOD
done | true |
61c40520c84624dc495fb294094af7278a333663 | Shell | vlakas/Honey-Net | /iktomi/scripts/extractor.sh | UTF-8 | 1,240 | 2.765625 | 3 | [
"MIT"
] | permissive | #!/bin/bash
cd ../logs
cat log25.log | grep -a "connection from" | cut -d " " -f 3 | cut -d ":" -f 1 | sort | uniq > port25.log
cat log80.log | grep -a "connection from" | cut -d " " -f 3 | cut -d ":" -f 1 | sort | uniq > port80.log
cat log21.log | grep -a "connection from" | cut -d " " -f 3 | cut -d ":" -f 1 | sort | uniq > port21.log
cat log8080.log | grep -a "connection from" | cut -d " " -f 3 | cut -d ":" -f 1 | sort | uniq > port8080.log
cat log443.log | grep -a "connection from" | cut -d " " -f 3 | cut -d ":" -f 1 | sort | uniq > port443.log
rm ../FINAL.log 2>/dev/null
echo "PORT 25: " >> ../FINAL.log
echo " " >> ../FINAL.log
cat port25.log >> ../FINAL.log
echo " " >> ../FINAL.log
echo "PORT 80: " >> ../FINAL.log
echo " " >> ../FINAL.log
cat port80.log >> ../FINAL.log
echo " " >> ../FINAL.log
echo "PORT 21: " >> ../FINAL.log
echo " " >> ../FINAL.log
cat port21.log >> ../FINAL.log
echo " " >> ../FINAL.log
echo "PORT 8080: " >> ../FINAL.log
echo " " >> ../FINAL.log
cat port8080.log >> ../FINAL.log
echo " " >> ../FINAL.log
echo "PORT 443: " >> ../FINAL.log
echo " " >> ../FINAL.log
cat port443.log >> ../FINAL.log
echo " " >> ../FINAL.log
rm port25.log
rm port80.log
rm port21.log
rm port8080.log
rm port443.log
| true |
0323080da7254fc64c676999bb31e3e793a5f0d4 | Shell | sturbi/environment | /linux/bashrc | UTF-8 | 534 | 2.859375 | 3 | [] | no_license | # .bashrc
# Source global definitions
if [ -f /etc/bashrc ]; then
. /etc/bashrc
fi
#User specific aliases and functions
alias wget="curl -O"
source ~/.govc.conf
export GOPATH=$HOME/go
export DISPLAY=:0
ssh() {
if [ "$(ps -p $(ps -p $$ -o ppid=) -o comm=)" = "tmux: server" ]; then
tmux rename-window "$*"
command ssh "$@"
tmux set-window-option automatic-rename "on" 1>/dev/null
else
command ssh "$@"
fi
}
export SSH_AUTH_SOCK=/data/wsl-ssh-pageant/ssh-agent.sock
| true |
5eab9a8f176c097e5124bff76b04f927dab2a815 | Shell | uc-cdis/gen3-qa | /check-pod-health.sh | UTF-8 | 1,299 | 3.65625 | 4 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
# curl service endpoints; exit failure if any service returns status code != 200
# Service Health Endpoints
commons_name=$KUBECTL_NAMESPACE
if [[ "$KUBECTL_NAMESPACE" == "default" ]]; then
commons_name="qa"
fi
commons_url="https://${commons_name}.planx-pla.net"
indexd="${commons_url}/index/_status"
sheepdog="${commons_url}/api/_status"
peregrine="${commons_url}/peregrine/_status"
portal="${commons_url}/"
fence="${commons_url}/user/jwt/keys"
selenium="selenium-hub:4444/status"
if [ -n $1 ] && [ "$1" == "dataguids.org" ]; then
health_endpoints=( $indexd $portal )
elif ! (g3kubectl get pods --no-headers -l app=portal | grep portal) > /dev/null 2>&1; then
health_endpoints=( $sheepdog $peregrine $fence )
else
health_endpoints=( $sheepdog $peregrine $portal $fence )
fi
if [[ "$(hostname)" == *"cdis-github-org"* ]] || [[ "$(hostname)" == *"planx-ci-pipeline"* ]]; then
echo "do not include $selenium in the health check."
else
health_endpoints+=($selenium)
fi
exit_code=0
checkHealth() {
status=$(curl -s -o /dev/null -w "%{http_code}" "${1}")
if [[ $status != "200" ]]; then
exit_code=1
fi
printf 'Health %-60s: %s\n' "${1}" "$status"
}
for health_endpoint in ${health_endpoints[@]}; do
checkHealth ${health_endpoint}
done
exit $exit_code
| true |
e60a0c3a872df4bd74a23f7244433990de10384a | Shell | vicgong/GitRepository | /scripttool/stat_aliveHost.sh | UTF-8 | 189 | 3.234375 | 3 | [] | no_license | #!/bin/bash
# Filename: stat_aliveHost.sh
for ip in 192.168.0.{0..255}; do
ping -w 10 -c 10 $ip &> /dev/nul;
if [ $? -eq 0 ] ;then
echo $ip is alive
else
echo $ip is unreachable
fi
done
| true |
66689aa7b29f832789d4b5e76345faeaa9478b94 | Shell | pcm2718/collateral-bishop | /collateral-bishop.sh | UTF-8 | 396 | 2.6875 | 3 | [] | no_license | #!/bin/bash
THREADS=8
mpirun -np $THREADS bin/collateral-bishop
#if [ $? -eq 0 ]
#then
# cp tmp/img_0.ppm tmp/feh_tmp.ppm
# feh -. --force-aliasing tmp/feh_tmp.ppm &
# ppmtojpeg tmp/feh_tmp.ppm > tmp/feh_tmp.jpeg
# textme
# echo "Success."
#else
# echo "Image generation failed." | mutt -s "Buddhabrot Notice" -- 4357573639@mms.att.net
# echo "Failure."
#fi
| true |
647adc3075238da191ece86d490bcb7cd9245e70 | Shell | flavio-silva/microservices | /run.sh | UTF-8 | 821 | 2.953125 | 3 | [] | no_license | #!/bin/bash
function down_app_container() {
echo "Deleting latest docker image"
docker-compose down
}
function delete_latest_docker_image() {
echo "Deleting latest docker image..."
docker rmi -f fiap-microservices:latest
docker rmi -f frontend:latest
}
function build_application() {
echo "Building java api..."
mvn clean install -f services/java-user-api
echo "Building Vue app..."
cd services/front-end
npm install
npm run build
cd ../..
}
function build_docker_image() {
echo "Building docker image..."
docker-compose build
}
function up_app_container() {
echo "Deleting latest docker image"
docker-compose up
}
time (down_app_container)
time (delete_latest_docker_image)
time (build_application)
time (build_docker_image)
time (up_app_container)
| true |
3dce43964b1fb9fc31ba6aa091ba8b3eb70aa68f | Shell | dsw88/desktop-provisioning | /linux/bootstrap/install-packages | UTF-8 | 1,482 | 3.875 | 4 | [] | no_license | #!/bin/bash
# This script installs all the packages I use for development on a Debian box at FamilySearch
#Function that installs a package, given its name
function install-package {
apt-get -y install $1
if [ $? -ne 0 ]; then
echo "Error installing $1"
exit 1
fi
}
#Function that pretty-prints an install header message
function print-header {
printf "\n\n"
echo "###############################"
echo "# Installing $1"
echo "###############################"
}
#Install Apache2
print-header apache2
install-package apache2
#Install Java7 (OpenJDK)
print-header openjdk-7-jdk
install-package openjdk-7-jdk
#Install Maven
print-header maven
install-package maven
#Install Git
print-header git
install-package git
#Install Ruby
print-header ruby
install-package ruby1.9.1
#Install Vim
print-header vim
install-package vim
#Install other packages
print-header "other-development-tools-packages"
install-package curl
install-package libreadline6-dev
install-pacakge zlib1g-dev
install-package libssl-dev
install-package libyaml-dev
install-package libsqlite3-dev
install-package sqlite3
install-package libxml2-dev
install-package libxslt1-dev
install-package autoconf
install-package libgdbm-dev
install-package libncurses5-dev
install-package automake
install-package "make"
install-package libtool
install-package bison
install-package libffi-dev
install-package libexpat1-dev
install-package build-essential
install-package terminator
printf "\n\n"
| true |
0dc0bcb84e567bd7ee476ac2f6bbdeb4c3929e73 | Shell | microsoft/accessibility-insights-service | /packages/resource-deployment/scripts/app-insights-create.sh | UTF-8 | 1,473 | 4 | 4 | [
"MIT",
"LicenseRef-scancode-generic-cla"
] | permissive | #!/bin/bash
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# shellcheck disable=SC1090
set -eo pipefail
export resourceGroupName
export subscription
export appInsightsKey
exitWithUsageInfo() {
echo "
Usage: $0 -r <resource group> -s <subscription name or id>
"
exit 1
}
# Read script arguments
while getopts ":r:s:" option; do
case $option in
r) resourceGroupName=${OPTARG} ;;
s) subscription=${OPTARG} ;;
*) exitWithUsageInfo ;;
esac
done
if [[ -z $resourceGroupName ]] || [[ -z $subscription ]]; then
exitWithUsageInfo
exit 1
fi
echo "Installing microsoft.insights extension for azure-cli"
az extension add -n application-insights
echo "Creating Application Insights resource using ARM template"
export resourceName
resources=$(az deployment group create \
--subscription "$subscription" \
--resource-group "$resourceGroupName" \
--template-file "${0%/*}/../templates/app-insights.template.json" \
--query "properties.outputResources[].id" \
-o tsv)
. "${0%/*}/get-resource-name-from-resource-paths.sh" -p "Microsoft.insights/components" -r "$resources"
appInsightsName=$resourceName
echo "Successfully created Application Insights '$appInsightsName'"
appInsightsKey=$(az monitor app-insights component show --app "$appInsightsName" --resource-group "$resourceGroupName" --query "instrumentationKey" -o tsv)
echo "App Insights key fetched '$appInsightsKey'"
| true |
f81fae4a172f2907a57862285175fb778ec00269 | Shell | jtimberman/oc-redis | /test/integration/default/bats/verify.bats | UTF-8 | 785 | 2.71875 | 3 | [
"Apache-2.0"
] | permissive | @test "configured for port 6379 (default redis port)" {
grep -qx 'port 6379' /etc/redis/redis.conf
}
@test "redis-server is running on 127.0.0.1:6379" {
# by default, redis-cli will use -h 127.0.0.1 and -p 6379
redis-cli ping
}
@test "sets up the redis-fig runit service" {
sv status redis-fig
redis-cli -p 8300 ping
}
@test "sets up the redis-newton instance w/ attribute config" {
sv status redis-newton
grep -qx 'port 9700' /etc/redis/newton.conf
grep -qx 'pidfile /var/run/redis/redis-server.pid' /etc/redis/newton.conf
grep -qx 'bind 10.0.2.15' /etc/redis/newton.conf
redis-cli -h 10.0.2.15 -p 9700 ping
}
@test "sets up redis-fig-newton instance" {
sv status redis-fig-newton
grep -qx 'port 7600' /etc/redis/fig-newton.conf
redis-cli -p 7600 ping
}
| true |
65ce55176a9cf81d2bf4bc7d65dd41f7ad0b96f0 | Shell | mihirjpatel/kt | /terraform-modules/gcp/modules/gke-k8s/configure_k8s.sh | UTF-8 | 2,863 | 3.578125 | 4 | [] | no_license | #!/bin/bash
set -eu
# Save certificates
echo "$ISTIO_INGRESS_GATEWAY_CERT_PRIVATE_KEY" > artnet.key
echo "$ISTIO_INGRESS_GATEWAY_CERT" > artnet.pem
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
gcloud --quiet config set project "$GOOGLE_PROJECT_ID"
gcloud --quiet config set compute/zone "$GOOGLE_COMPUTE_ZONE"
gcloud auth activate-service-account --key-file="$GOOGLE_APPLICATION_CREDENTIALS"
gcloud container clusters get-credentials "$CLUSTER_NAME" --zone "$GOOGLE_COMPUTE_ZONE" --project "$GOOGLE_PROJECT_ID"
SERVICE_ACCOUNT_INSTALLED=$(kubectl get serviceaccount -n kube-system | grep -q tiller || echo "false")
if [ "$SERVICE_ACCOUNT_INSTALLED" = "false" ]; then
# Create a service account "Tiller" in Kubernetes that is a cluster admin
kubectl create -f "$SCRIPT_DIR/helm-service-account.yaml"
fi
# Install Tiller Kubernetes as the "Tiller" service account
helm init --service-account tiller
# Wait for Tiller to be available
kubectl rollout status -w deployment/tiller-deploy --namespace=kube-system
# Install the latest Istio
ISTIO_TMP_DIR=$(mktemp -d)
cd "$ISTIO_TMP_DIR"
curl -L https://git.io/getLatestIstio | sh -
ISTIO_PATH="$ISTIO_TMP_DIR/$(ls)"
cd -
# Using NodePort instead of LoadBalancer, for easy port forwarding and integration testing
helm install "$ISTIO_PATH/install/kubernetes/helm/istio" \
--name istio \
--namespace istio-system \
--set ingress.service.type="$INGRESS_SERVICE_TYPE" \
--set gateways.istio-ingressgateway.type="$INGRESS_SERVICE_TYPE" \
--set tracing.service.type="$SERVICE_TYPE" \
--set grafana.enabled="true" \
--set tracing.enabled="true" \
--set servicegraph.enabled="true"
# Get names of all Istio deployments
DEPLOYMENT_NAMES=$(kubectl get deployment -o=json --namespace istio-system | jq ".items[].metadata.name")
# Wait for all Istio services
while read -r line; do
echo "Checking deployment of $line"
kubectl rollout status -w "deployment/$line" --namespace=istio-system
done <"$DEPLOYMENT_NAMES"
# Labelling namespace default for sidecar injection
kubectl label namespace default istio-injection=enabled
# Showing all namespaces and the sidecar injection status
kubectl get namespace -L istio-injection
# delete secret if exists
kubectl delete secret artnetdev || true
kubectl delete secret database-connstr || true
# create secret for docker registry
kubectl create secret docker-registry \
artnetdev \
--docker-server "$DOCKER_REGISTRY" \
--docker-email "$DOCKER_USER_RO" \
--docker-username "$DOCKER_USER_RO" \
--docker-password "$DOCKER_PASSWORD_RO"
# create secret for database connection
kubectl create secret generic \
database-connstr --from-literal=connStr="${GALLERY_SQL_CONSTR}"
# save certificate secrets
kubectl create -n istio-system secret \
tls istio-ingressgateway-certs \
--key "artnet.key" \
--cert "artnet.pem"
| true |
c9c8184b99484571803b484da7981f705bcbbbf9 | Shell | cchu70/perfect_mapper | /get_polished_kmer_list.sh | UTF-8 | 348 | 2.59375 | 3 | [] | no_license | #!/bin/bash
# script to make the uniqe kmer and true kmer list from the POLISHED version of the chrX assembly
module load canu
# make the db
# meryl count k=21 memory=40 threads=12 chrX.fasta output chrX.meryl
# Dump
meryl-lookup -dump -sequence chrX.fasta -mers chrX.meryl -threads 12 -min 1 | awk '$5 >0 {print $4"\t"$5}' > chrX.kmer_list.txt | true |
f36c665048fdacad174fa98ebae4fe5544fc3fae | Shell | SujalAhrodia/ECE-792 | /HW3/Q2/rule.sh | UTF-8 | 113 | 2.53125 | 3 | [] | no_license | #!/bin/bash
a=$(echo "$1" | cut -c-6)
ip=$a.0/24
iptables -t nat -A POSTROUTING -s $ip ! -d $ip -j MASQUERADE
| true |
e4a952569f7b76d05e1cdeff078e11f7a6165955 | Shell | uesp/uesp-lucenesearch-old | /build | UTF-8 | 791 | 3.3125 | 3 | [] | no_license | #!/bin/bash
source ./config.inc
if [ -n "$1" ]; then
dumpfile="$1"
else
dumps="$base/dumps"
[ -e $dumps ] || mkdir $dumps
dumpfile="$dumps/dump-$dbname.xml"
timestamp=`date -u +%Y-%m-%d`
slave=`php $mediawiki/maintenance/getSlaveServer.php \
$dbname \
--conf $mediawiki/LocalSettings.php \
--aconf $mediawiki/AdminSettings.php`
echo "Dumping $dbname..."
cd $mediawiki && php maintenance/dumpBackup.php \
$dbname \
--conf $mediawiki/LocalSettings.php \
--aconf $mediawiki/AdminSettings.php \
--current \
--server=$slave > $dumpfile
[ -e $indexes/status ] || mkdir -p $indexes/status
echo "timestamp=$timestamp" > $indexes/status/$dbname
fi
cd $base &&
java -cp LuceneSearch.jar org.wikimedia.lsearch.importer.BuildAll $dumpfile $dbname
| true |
eb3ff4c35a4442f3d6a578214c2c7358cba3c27c | Shell | XGWang0/Suse_testsuite | /tests/qa_test_coreutils/qa_test_coreutils/orig_test_suite/fmt/long-line | UTF-8 | 2,784 | 2.859375 | 3 | [] | no_license | #!/bin/sh
# make sure fmt -s works even on long lines
if test "$VERBOSE" = yes; then
set -x
fmt --version
fi
pwd=`pwd`
t0=`echo "$0"|sed 's,.*/,,'`.tmp; tmp=$t0/$$
trap 'status=$?; cd $pwd; chmod -R u+rwx $t0; rm -rf $t0 && exit $status' 0
trap '(exit $?); exit $?' 1 2 13 15
framework_failure=0
mkdir -p $tmp || framework_failure=1
cd $tmp || framework_failure=1
(echo ' '; yes) | head -n1000 | tr '\n' ' ' > in || framework_failure=1
cat <<\EOF > exp || framework_failure=1
y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y
y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y
y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y
y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y
y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y
y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y
y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y
y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y
y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y
y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y
y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y
y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y
y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y
y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y
y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y
y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y
y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y
y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y
y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y
y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y
y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y
y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y
y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y
y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y
y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y
y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y
y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y
y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y
y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y y
y y
EOF
if test $framework_failure = 1; then
echo "$0: failure in testing framework" 1>&2
(exit 1); exit 1
fi
fail=0
fmt -s in > out || fail=1
cmp out exp || fail=1
test $fail = 1 && diff out exp 2> /dev/null
(exit $fail); exit $fail
| true |
ee8e6c06bdac307427736a49b494dd33fe17ae0a | Shell | iforwms/dotfiles | /scripts/shutdown | UTF-8 | 1,287 | 3.96875 | 4 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/usr/bin/env bash
echo " Checking repos..."
modifiedArray=()
unpushed=()
dirty=false
while read line; do
repoPath=`echo $line | sed "s/\/.git//"`
modified=`git -C $repoPath status -s`
if [[ ! -z "$modified" ]]; then
modifiedArray+=( "$repoPath" )
fi
unpushed=`git -C $repoPath cherry -v 2>/dev/null`
if [[ ! -z "$unpushed" ]]; then
unpushedArray+=( "$repoPath" )
fi
done < <(find -L $HOME/code -mindepth 1 -maxdepth 4 -type d -name .git -prune)
modifiedLength="${#modifiedArray[@]}"
if ((modifiedLength > 0)); then
dirty=true
echo
echo -e "\033[1;35m" Found $modifiedLength modified repos:"\033[0;37m"
echo
for i in "${modifiedArray[@]}"
do
echo " " $i
done
fi
unpushedLength="${#unpushedArray[@]}"
if ((unpushedLength > 0)); then
dirty=true
echo
echo -e "\033[1;35m" Found $unpushedLength unpushed repos:"\033[0;37m"
echo
for i in "${unpushedArray[@]}"
do
echo " " $i
done
fi
if [[ ! "$dirty" = true ]]; then
read -p "Nothing to push, are you sure you want to shutdown? (y/n) " -n 1 -r
echo
if [[ ! $REPLY =~ ^[Yy]$ ]]
then
echo "Aborting shutdown..."
exit 1
fi
echo "Shutting down..."
sudo shutdown -h now
fi
| true |
5b7241a5332d746c844f0bbd2ab5fb0247b13998 | Shell | jeffersonscientific/aquaplanet | /exp/compile_aquaplanet_mazama.sh | UTF-8 | 4,574 | 3.171875 | 3 | [] | no_license | #!/bin/bash
#SBATCH -n 8
#SBATCH -o compile_AqP.out
#SBATCH -e compile_AqP.err
# unalias *
#set echo
#set -x
#
ROOT_DIR=`pwd`
#--------------------------------------------------------------------------------------------------------
#export platform=gaea.intel # A unique identifier for your platform
PLATFORM=gfdl_ws_64_mazama.intel # A unique identifier for your platform
template=$ROOT_DIR/../bin/mkmf.template.$PLATFORM # path to template for your platform
mkmf=$ROOT_DIR/../bin/mkmf # path to executable mkmf
sourcedir=$ROOT_DIR/../src # path to directory containing model source code
pathnames=$ROOT_DIR/../input/path_names # path to file containing list of source paths
ppdir=$ROOT_DIR/../postprocessing # path to directory containing the tool for combining distributed diagnostic output files
#--------------------------------------------------------------------------------------------------------
execdir=$ROOT_DIR/exec.$PLATFORM # where code is compiled and executable is created
executable=$execdir/idealized_moist.x
#
module purge
module load intel/19
module load mpich_3/
module load netcdf/
module load netcdf-fortran/
#
module load autotools/
module list
#
COMP="intel19"
MPI="mpich3"
COMP_MPI="${COMP}_${MPI}"
VER="1.0.0"
#
#source $MODULESHOME/init/csh
#module use -a /ncrc/home2/fms/local/modulefiles
#module unload PrgEnv-pgi PrgEnv-pathscale PrgEnv-intel PrgEnv-gnu PrgEnv-cray
#module unload netcdf fre
#module load PrgEnv-intel/4.0.46
#module swap intel intel/12.1.3.293
#module load netcdf/4.2.0
#module load hdf5/1.8.8
#module list
#--------------------------------------------------------------------------------------------------------
# compile combine tool
echo "ppdir: ${ppdir}"
cd $ppdir
#cc -O -c -I/opt/cray/netcdf/4.2.0/intel/120/include mppnccombine.c
$CC -O -c -I${NETCDF_INC} -I${NETCDF_FORTRAN_INC} mppnccombine.c
echo "*** compiled mppnccompine.c (step 1)"
#if ( $status != 0 ) exit 1
#
# is $status a cshell thing? this seems to evaluate to 1 even for a successful compile, so let's just skip these...
#echo "*** status: ${status}"
if [[ $? -ne 0 ]]; then
exit 1
fi
#cc -O -o mppnccombine.x -L/opt/cray/netcdf/4.2.0/intel/120/lib/libnetcdf_c++4_intel.a -lnetcdf mppnccombine.o
$CC -O -o mppnccombine.x -L${NETCDF_LIB} -L${NETCDF_FORTRAN_LIB} -lnetcdf -lnetcdff mppnccombine.o
#if ( $status != 0 ) exit 1
if [[ $? -ne 0 ]]; then
exit 1
fi
#--------------------------------------------------------------------------------------------------------
#--------------------------------------------------------------------------------------------------------
# setup directory structure
#if ( ! -d $execdir ) mkdir -p $execdir
if [[ ! -d $execdir ]]; then
mkdir -p $execdir
fi
#
cd $execdir
#--------------------------------------------------------------------------------------------------------
# execute mkmf to create makefile
export cppDefs="-Duse_libMPI -Duse_netCDF -Duse_LARGEFILE -DINTERNAL_FILE_NML -DOVERLOAD_C8"
#$mkmf -a $sourcedir -t $template -p $executable:t -c "$cppDefs" $pathnames $sourcedir/shared/include $sourcedir/shared/mpp/include
$mkmf -a $sourcedir -t $template -p $executable -c "$cppDefs" $pathnames $sourcedir/shared/include $sourcedir/shared/mpp/include
#
if [[ $? -ne 0 ]]; then
echo "ERROR: mkmf failed for idealized_moist model"
exit 1
fi
# --- execute make ---
#make $executable:t
cd $execdir
echo "** ** ** do MAKE now..."
echo "** ** ** ** "
echo "** ** ** ** "
make
#if ( $status != 0 ) then
if [[ $? -ne 0 ]]; then
#unset echo
echo "*** STATUS: $? ** $executable"
echo "ERROR: make failed for idealized_moist model"
exit 1
fi
#unset echo
echo "NOTE: make successful for idealized_moist model"
#
# install stuff:
# NOTE: we could set the $executable variable to do this in the compile, except that then we have to go get the postprocessing
# tool and timestamp script (ugh!) anyway, so we'll just copy everything pseudo-manually.
TARGET_DIR="/share/cees/software/aquaplanet/${COMP_MPI}/${VER}"
echo "Compile complete! Now, copy bits to: ${TARGET_DIR}"
if [[ ! -d "${TARGET_DIR}/bin" ]]; then
mkdir -p ${TARGET_DIR}/bin
fi
#
if [[$? -eq 0 ]]; then
cp ${ppdir}/mppnccombine.x ${TARGET_DIR}/bin/
cp ${ROOT_DIR}/../bin/time_stamp.csh ${TARGET_DIR}/bin/
cp $executable ${TARGET_DIR}/bin
#
# optionally?
cp -r ${ROOT_DIR}/../input ${TARGET_DIR}/sample_input
cp run_idealized_moist_on_mazama.sh ${TARGET_DIR}/
#
fi
| true |
6120ab02733af4c02dca50820b3224a4595ad064 | Shell | walterkwon/settings | /scripts/script-project | UTF-8 | 3,990 | 3.453125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
set -euo pipefail
# USAGE
# script-project package hello-world "A CLI to say Hello World"
# script-project {project_type} {name} {description}
# {project_type} can be package, web-app, or native-mobile-app
# {name} should be a slug like hello-world
# {description} should be a short sentence like "A CLI to say Hello World"
task="Generating a project"
script-log state "$task"
# ==================================================================
# VARIABLES
# ==================================================================
template_path=$HOME/drive/settings/scripts/utils/projects/
user_name="trevordmiller"
user_full_name="Trevor D. Miller"
user_email="trevordmiller@icloud.com"
user_website="http://www.trevordmiller.com"
project_year=$(date +"%Y")
project_type=$1
project_name=$2
project_description=$3
# ==================================================================
# TEMPLATES
# ==================================================================
script-log state "Generating files and folders"
case "$project_type" in
"package" )
mkdir "$project_name"
cd "$project_name"
cp -a "$template_path"/package/. ./
cp -a "$template_path"/shared/. ./
;;
"web-app" )
create-react-app "$project_name"
cd "$project_name"
cp -a "$template_path"/shared/. ./
;;
"native-mobile-app" )
create-react-native-app "$project_name"
cd "$project_name"
cp -a "$template_path"/shared/. ./
;;
esac
# VARIABLE SUBSTITUTION
grep -rl '%substitute' . --exclude-dir=node_modules | xargs sed -i '' \
-e "s|%substitute_user_name|$user_name|g" \
-e "s|%substitute_user_full_name|$user_full_name|g" \
-e "s|%substitute_user_email|$user_email|g" \
-e "s|%substitute_user_website|$user_website|g" \
-e "s|%substitute_project_year|$project_year|g" \
-e "s|%substitute_project_name|$project_name|g" \
-e "s|%substitute_project_description|$project_description|g"
# ==================================================================
# DEPENDENCIES
# ==================================================================
script-log state "Installing dependencies"
case "$project_type" in
"package" )
yarn add --dev babel-cli babel-preset-env eslint jest np
;;
esac
# ==================================================================
# SOURCE CONTROL
# ==================================================================
script-log state "Adding source control"
git init
git add -A
git commit -m "Scaffold project"
script-log action "Enter GitHub password"
curl -u "$user_name" https://api.github.com/user/repos -d "{\"name\":\"$project_name\", \"description\":\"$project_description\"}"
sleep 3
git remote add origin https://github.com/"$user_name"/"$project_name".git
git push -u origin master
# ==================================================================
# CONTINUOUS INTEGRATION & DEPLOYMENT
# ==================================================================
script-log state "Initializing continuous integration and deployment"
travis enable -r "$user_name"/"$project_name"
case "$project_type" in
"package" )
yarn publish
;;
"web-app" )
now
;;
esac
# ==================================================================
# MANUAL SETUP
# ==================================================================
script-log action "MANUAL SETUP"
script-log action "TODO: Protect master branch"
open "https://github.com/$user_name/$project_name/settings/branches"
# ==================================================================
# RESULTS
# ==================================================================
script-log state "Opening results"
open "https://github.com/$user_name/$project_name"
open "https://travis-ci.org/$user_name/$project_name"
case "$project_type" in
"package" )
open "https://www.npmjs.com/package/$project_name"
;;
"web-app" )
open "$(pbpaste)"
;;
esac
# ==================================================================
script-log state "DONE $task"
| true |
b6e1c4c04e09aa8be0e61632b4758e38dbc2e401 | Shell | perfsonar/toolkit | /toolkit/perfsonar-toolkit/scripts/system_environment/disable_zeroconf | UTF-8 | 297 | 2.921875 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
#######################
# Disable zeroconf route. Does not matter if this is running in an
# 'upgrade' or 'new install' context.
#######################
grep NOZEROCONF /etc/sysconfig/network &> /dev/null
if [ $? != 0 ]; then
cat >> /etc/sysconfig/network <<EOF
NOZEROCONF=yes
EOF
fi
| true |
f109fac47c209c5031609553fcaba81fe49df4ce | Shell | scottt/scottt-bin | /prefix | UTF-8 | 107 | 3.171875 | 3 | [] | no_license | #!/bin/sh
case $# in
1) N=$1 ;;
*) printf 'usage: prefix N_CHARACTERS\n'>&2; exit 2;;
esac
cut -c 1-"$N"
| true |
8c33aa09243580f2308476413e5a201879475a82 | Shell | vinriviere/cross-mint-ubuntu | /.github/guess_debemail.sh | UTF-8 | 440 | 2.96875 | 3 | [] | no_license | # This script fragment must be sourced by the main script file
# in order to define the DEBEMAIL variable
# . .github/guess_debemail.sh
# Unset variables which might interfere
# https://manpages.debian.org/jessie/devscripts/debchange.1.fr.html#DESCRIPTION
unset DEBFULLNAME EMAIL
# Guess maintainer name and e-mail from GPG key
export DEBEMAIL=$(gpg --list-secret-keys | sed -n 's/^uid *//p' | sed 's/.*] *//')
echo "DEBEMAIL=$DEBEMAIL"
| true |
23a65f4b415aafc3deca67bf2292fd155883b482 | Shell | humnaawan/3D-galaxies-kavli | /runscripts/get_features/bash_get_illustris_features.sh | UTF-8 | 757 | 2.625 | 3 | [] | no_license | #!/bin/bash
base_dir='/Users/humnaawan/repos/3D-galaxies-kavli/'
for proj in xy yz xz
do
summary_datapath=${base_dir}'data/sum_illustris/'${proj}'/'
for rdecider in 50 #100
do
echo 'Running for '${proj}' for Rdecider = '${rdecider}
shape_datapath=${base_dir}'outputs/illustris_z0.4_shape'${rdecider}'/'
outdir=${base_dir}'outputs/illustris_z0.4_3proj_shape'${rdecider}'_features/'
python /Users/humnaawan/repos/3D-galaxies-kavli/runscripts/get_features/get_features.py \
--summary_datapath=${summary_datapath} \
--shape_datapath=${shape_datapath} --outdir=${outdir} \
--data_tag=${proj} --summed_data --rdecider=${rdecider}
done
done
| true |
336957723ff07471e0e9d1cca21d292ac38260bc | Shell | qingw/dotfiles-3 | /bin/bootstrap-macos.sh | UTF-8 | 1,863 | 3.359375 | 3 | [] | no_license | #!/bin/bash
# check if ssh key exist
if ! [ -f "$HOME/.ssh/id_rsa" ]; then
echo "please copy your ssh key first"
exit 1
fi
# install xcode commandline tools
# if ! [ -d "/Library/Developer/CommandLineTools" ]; then
if ! /usr/sbin/pkgutil --pkg-info com.apple.pkg.CLTools_Executables; then
echo "install Xcode CommandLine Tools"
# This file prompts that Command Line Tools should be installed
sudo /usr/bin/touch /tmp/.com.apple.dt.CommandLineTools.installondemand.in-progress
sudo /usr/sbin/softwareupdate -i "$(/usr/sbin/softwareupdate -l | /usr/bin/grep -B 1 -E "Command Line (Developer|Tools)" | /usr/bin/awk -F"*" '/^ +\\*/ {print $2}' | /usr/bin/sed 's/^ *//' | /usr/bin/tail -n1)"
sudo /bin/rm -f /tmp/.com.apple.dt.CommandLineTools.installondemand.in-progress
# sudo /usr/bin/xcode-select --switch /Library/Developer/CommandLineTools
fi
# if ! [ -d "/Library/Developer/CommandLineTools" ]; then
if ! /usr/sbin/pkgutil --pkg-info com.apple.pkg.CLTools_Executables; then
echo "install Xcode CommandLine Tools failed, Please install it manually"
sudo /usr/bin/xoode-select --install
exit 1
fi
# install homebrew
if ! [ -f "/usr/local/bin/brew" ]; then
/usr/bin/ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
fi
git clone git@github.com:SteamedFish/dotfiles.git
cd ~/dotfiles && git submodule update --init --recursive --remote
# install Xcode
/usr/local/bin/brew install mas
if ! /usr/local/bin/mas account; then
/usr/local/bin/mas signin steamedfish@me.com
fi
xcode_id=$(/usr/local/bin/mas search Xcode | /usr/bin/grep -E '^[0-9]* *Xcode$' | /usr/bin/awk '{print $1}')
/usr/local/bin/mas install ${xcode_id}
# accept Xcode license
sudo xcodebuild -license accept
if [[ "$(kextstat | grep "FakeSMC")" != "" ]]; then
# disable computer sleep
sudo pmset -a sleep 0
fi
| true |
e4f2c6095b8721135467d2dde907af3b9dab8723 | Shell | zhangyancoder/original | /run_multi.sh | UTF-8 | 2,779 | 3.328125 | 3 | [] | no_license | #!/bin/bash
starttime=`date "+%Y_%m_%d_%H_%M_%S"`
single_sleeptime=1
cycle_sleeptime=10
cycle=3
LOGDIR=./log
FIODIR=/home/simth/fio-2.14
iostateip=`cat ./monitor_ip.conf`
collect_basic()
{
rm -rf basicinfo
echo starttime: $starttime >> basicinfo
for node in `cat monitor_ip.conf`;do
echo ============================
echo $node basicinfo is: >> basicinfo
ssh $node sh /home/simth/basicinfo.sh >> basicinfo
done
}
run_withmulticlient()
{
# clear history log
for((i=0;i<${cycle};i++))
do
rm -rf log$i
done
for((i=0;i<${cycle};i++));do
./fullfill.sh
mkdir log${i}
for singleline in `cat run_config`;
do
kill -9 `ps -ef|grep iostat|awk '{print $2}'`
kill -9 `ps -ef|grep sar|grep DEV| awk '{print $2}'`
nohup ./collectstat.sh &
#for singlenode in `cat cacehlist`
#do
#ssh ${singlenode} echo 3 > /proc/sys/vm/drop_caches
#done
str_cmd="${FIODIR}/fio "
j=0
realtime=`date "+%Y_%m_%d_%H_%M_%S"`
str_subfix=`echo $singleline |awk -F "_" '{print $1"_"$2"_"$3"_"$4"_"$5"_"$6"_"$7"_"$8"_"$9}'`
# Set iostat and sar monitor interval to 2 seconds, so total monitor count is duration/2
runtime=`cat ./log/${singleline}|grep "runtime="|awk -F "=" '{print int($2)/10}'`
warmup=`cat log/${singleline}|grep ramp_time=1|awk -F '=' '{print int($2)/10}'`
echo warmup is $warmup ================
runtime=$[runtime + warmup]
echo runtime is: $runtime
for k in $iostateip
do
echo "Test case_log is:" log${i}/${singleline}_${realtime}.log >> /home/simth/log${i}/log_iostate_$k
done
nohup ./iostat_ceph.sh $runtime log${i} &
for fioserver in `cat fioserver_list.conf`
do
str_client="--client $fioserver ${LOGDIR}/${str_subfix}_${j}.config"
str_cmd="${str_cmd} ${str_client}"
j=$[j+1]
done
echo command is: $str_cmd
$str_cmd |tee log${i}/${singleline}_${realtime}.log
echo ===================================
sleep $single_sleeptime
done
cp run_config log${i}/
cp rbdpoolstat.log log${i}/
cp poolsize.log log${i}/
rm -rf rbdpoolstat.log
rm -rf poolsize.log
sleep $cycle_sleeptime
done
kill -9 kill -9 `ps -ef|grep collectstat|awk '{print $2}'`
result_collet()
{
rm -rf result_tmp
mkdir result_tmp
mkdir result_tmp/base
cp log0/* result_tmp/base/
cp run_config result_tmp/
cp pre_multiclient.sh result_tmp/
cp resultcollect.sh result_tmp/
cp total.sh result_tmp/
cd result_tmp/
./total.sh
cp total_result.csv ../
cd ..
}
tar -zcf ${starttime}.tar.gz basicinfo log*
}
echo cycle to run: $cycle
collect_basic
run_withmulticlient
| true |
eb9f8cdc4af53e99bf47d8a27b711f8a8df3b731 | Shell | izderadicka/mybookshelf2 | /deploy/cleanup.sh | UTF-8 | 864 | 3.8125 | 4 | [] | no_license | #!/bin/bash
set -e -x
if [[ ! -f .env ]]; then
echo "init.sh script was not run (no .env file), exiting"
exit 1
fi
cat <<EOF
This script will clean all docker artifacts (images, volumes, containers ...) from deployment.
ALL DATA WILL BE LOST!
Do you want to continue?
EOF
read -p "Enter y to continue: " ans
if [[ $ans != "y" ]]; then
exit 0
fi
export $(cat .env | xargs)
if [[ $MBS2_ENVIRONMENT == developement ]]; then
compose_files=""
elif [[ $MBS2_ENVIRONMENT == stage ]]; then
compose_files="-f docker-compose.yml -f docker-compose-stage.yml"
else
echo Uknown environment - exiting
exit 1
fi
echo Starting cleanup of $MBS2_ENVIRONMENT environment
docker-compose $compose_files down --rmi all -v
docker rmi `docker images -f "reference=mbs2*" -q`
if [[ $MBS2_ENVIRONMENT == stage ]]; then
docker volume rm code
fi
rm .env | true |
c859d565e335d45eea888f54e7947ef5bc937640 | Shell | petronny/aur3-mirror | /tagurit/PKGBUILD | UTF-8 | 582 | 3.09375 | 3 | [] | no_license | # Maintainer: Thomas Dziedzic < gostrc at gmail >
pkgname=tagurit
pkgver=0.1
pkgrel=1
pkgdesc='Watches git repos for new tags.'
arch=('any')
url='https://github.com/gostrc/tagurit'
license=('unknown')
depends=('ruby' 'git')
build() {
local _gitroot='git://github.com/gostrc/tagurit.git'
local _gitname='tagurit'
if [ -d ${_gitname} ] ; then
cd ${_gitname}
git pull
else
git clone ${_gitroot} ${_gitname}
cd ${_gitname}
fi
git checkout 0.1
}
package() {
local _gitname='tagurit'
cd ${_gitname}
install -D tagurit.rb \
${pkgdir}/usr/bin/tagurit
}
| true |
f95aaa7063c84ac5ba24a1dc2ef408663833cebc | Shell | callerc1/dotfiles-old | /rbenv/install.sh | UTF-8 | 361 | 3.5625 | 4 | [] | no_license | #!/bin/sh
#
# Installs -
# rbenv
#
###############################################################################
# Include the general functions
. ./functions/general
# Check for Homebrew
if ! command_exists brew; then
print_error "Nooooooo! Homebrew isn't installed! Can't install rbenv"
else
print_block "Installing rbenv"
brew install rbenv
fi
| true |
2f7619febeeffce30ca3866dedd900d9682c32bf | Shell | koraynilay/linux-custom-scripts | /push.sh | UTF-8 | 325 | 3.296875 | 3 | [] | no_license | #!/bin/sh
git status
git add . -v
git status
#sleep 1
git commit -m "$(date +%Y-%m-%d_%H:%M:%S)"
#sleep 1
notify-send -a GitHub "pushing changes in $PWD..."
git push
if [ $? -eq 0 ];then
notify-send -a GitHub "changes in $PWD pushed"
else
notify-send -u critical -a GitHub "error in push of changes in $PWD"
fi
| true |
782f01373a5a3cb3c6712d9c43fdac0a3535a7a1 | Shell | aesadde/dotfiles | /ARCHIVE/bashrc | UTF-8 | 1,246 | 3.171875 | 3 | [] | no_license | # ===[ Variables ]=== {{{1
OSTYPE="$(uname -s)"
ARCH="$(uname -m)"
DOTF="$HOME/dotfiles"
#1}}}
# ===[ Global options ]=== {{{1
#Append to history file
shopt -s histappend
#Autocorrect typos in path names when using cd
shopt -s cdspell
#expand aliases
shopt -s expand_aliases
#vi editing mode
set -o vi
#Case insensitive globbing for pathname expansion
shopt -s nocaseglob
#Shell colors
export CLICOLOR=1
export LSCOLORS=dxgxcxdxcxegedacagacad
#keeping everything clean. Source all the files
[ -r $DOTF/customFunctions ] && [ -f $DOTF/customFunctions ] && source $DOTF/customFunctions
[[ -f $DOTF/aliases ]] && source $DOTF/aliases
#1}}}
# ===[ OS specific ]=== {{{1
if [ $OSTYPE == "Darwin" ]; then
[[ -f $DOTF/osx.settings ]] && source $DOTF/osx.settings
[[ -f $DOTF/aliases.local ]] && source $DOTF/aliases.local
elif [ $OSTYPE == "Linux" ]; then
export PATH="$HOME/.local/bin:$PATH"
export LD_LIBRARY_PATH="$HOME/local/lib:/lib:/lib64"
export PS1='\e[0;33m\W $\e[0;37m'
elif [ "$(expr substr $OSTYPE 1 10)" == "MINGW32_NT" ]; then
export EDITOR="/c/Program\ Files\ (x86)/Vim/vim74/gvim.exe"
fi
#1}}}
export PATH="$PATH:$HOME/.rvm/bin" # Add RVM to PATH for scripting
[ -f ~/.fzf.bash ] && source ~/.fzf.bash
| true |
14ee5aea550de243c292e7ea4017ce6a2bb2461d | Shell | luballe/raspi-forest | /sw/Papa/scripts/wait_4_host.sh | UTF-8 | 543 | 3.21875 | 3 | [
"MIT"
] | permissive | #!/bin/sh
hostname=$1
path="/home/pi/scripts/"
pipe_sufix="_pipe_status"
pipe=$path$hostname$pipe_sufix
var1=0
if [ ! -p "$pipe" ]
then
msg="No named pipe: "
echo $msg$pipe
exit 1
fi
read var1 < $pipe
#command=$commmand$pipe
#eval $command
#read var1 < /home/pi/scripts/forest2_status
while [ $var1 -eq 0 ]
do
msg=" is not Alive! Trying again in 1 sec..."
echo $hostname$msg
read var1 < $pipe
#eval $command
#read var1 < /home/pi/scripts/forest2_status
#echo $var1
sleep 1
done
msg=" is up!"
echo $hostname$msg
sleep 5
| true |
b13ad48c4b22e3a7f3b9475ab5e99012c6088031 | Shell | MarcSteven/ScriptForXcode | /check_image.sh | UTF-8 | 1,397 | 3.734375 | 4 | [] | no_license | #!/bin/sh
# check_image.sh
#
#
# Created by Marc Zhao on 2017/4/28.
#
function display_code {
ERROR_LOCATION=$(grep -Ron "\[UIImage imageNamed:\s*@\"$1\"\s*\]" $PROJECT_NAME)
if [[ -z $ERROR_LOCATION ]]; then
ERROR_LOCATION=$(grep -Ron "UIImage(named\:\s*\"$1\"\s*)" $PROJECT_NAME)
fi
ERROR_LOCATION=$(echo $ERROR_LOCATION | cut -d ':' -f 1,2)
echo "$ERROR_LOCATION: error: Missing imageset with name $1"
}
function display_img {
local IMG_LOC=$(find "$PROJECT_NAME" -name "$1.imageset" | sed 's/.xcassets\//.xcassets:.\//')
echo "$IMG_LOC/:: error: No more refs to imageset $1"
}
USED_NAMES=()
#find obj-c [UIImage imageNamed:@""]
USED_NAMES+=($(grep -Ron '\[UIImage imageNamed:\s*@"[^"]*"\s*\]' $PROJECT_NAME | cut -d '"' -f 2 | sort -u))
#find swift UIImage(named "")
USED_NAMES+=($(grep -Ron 'UIImage(named\:\s*"[^"]\{1,\}"\s*)' $PROJECT_NAME | cut -d '"' -f 2 | sort -u))
#find images names in assets
PRESENTED_IMAGES=$(find "$PROJECT_NAME" -name *.imageset | grep -v Pods | /usr/bin/sed -e 's/.*\///' -e 's/\.imageset$//' | sort -u)
EXIT_CODE=0
echo "Missing imageset with name:"
for name in $(comm -23 <(printf '%s' "$USED_NAMES") <(printf '%s' "$PRESENTED_IMAGES")); do
show_code $name
EXIT_CODE=1
done
echo "No more refs to imageset:"
for name in $(comm -13 <(printf '%s' "$USED_NAMES") <(printf '%s' "$PRESENTED_IMAGES")); do
show_img $name
EXIT_CODE=1
done
echo
exit $EXIT_CODE
| true |
7d1e4a77ee6df7938b9251f44ba41bae7a349c15 | Shell | UndergroundLabs/ZeroMQ-PHP-Ubuntu-Install | /zeromq-php.sh | UTF-8 | 514 | 3.25 | 3 | [] | no_license | #!/bin/bash
# This script will install ZeroMQ and the PHP language bindings
# on Ubuntu 14. This script will only install php5-cli.
#
# Author: James Jeffery <jameslovescode@gmail.com>
# Always run this on a fresh machine
apt-get update
# Install ZMQ and other required tools
apt-get install -f git libzmq3 libzmq3-dev php5-cli php-pear php5-dev pkg-config
# Install ZMQ via pecl
echo "" | pecl install zmq-beta
# Add the extension
echo "extension=zmq.so" > /etc/php5/cli/conf.d/zmq.ini
# You're all done
echo 'Roger that. All done' | true |
dde5825ee779487fc3e441fb3edf70d281f1fa83 | Shell | heqianying/biosys-analytics | /assignments/02-bash-scripting-grad/hello.sh | UTF-8 | 315 | 3.234375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
GREETINGS=$1
NAME=${2:-Human}
if [[ $# -eq 0 ]]; then
printf "Usage: %s hello.sh GREETING [NAME]\n"
exit 1
elif [[ $# -gt 2 ]]; then
echo "Usage: %s hello.sh GREETING [NAME]\n"
exit 1
elif [[ $2 -eq 0 ]]; then
echo ""$GREETINGS","$NAME"!"
exit 0
else
echo ""$GREETINGS","$NAME"!"
fi
| true |
3fdcb3fc17168f4e08d0c87b4d8876d15cecb628 | Shell | vcodery/Utils | /src/main/sbin/delay.sh | UTF-8 | 789 | 3.71875 | 4 | [] | no_license | #!/bin/bash
###########################################################
### FUNC : Statistical command execution delay
### USAGE : delay.sh your_command
### Example: delay.sh sleep 1s
### BY : vcodery
### DATE : 2019/06/01
###########################################################
# mark start time
starttime=`date +'%Y-%m-%d %H:%M:%S'`
echo "======================[ starting execute at $starttime ]======================"
# your command
cmd=$@
echo "execute cmd : $cmd"
$cmd
# mark end time
endtime=`date +'%Y-%m-%d %H:%M:%S'`
echo "======================[ finished execute at $endtime ]======================"
# count
start_seconds=$(date --date="$starttime" +%s);
end_seconds=$(date --date="$endtime" +%s);
echo "本次运行时间: "$((end_seconds-start_seconds))"s"
| true |
b853c4c63ccdbd995a32bd8e500e65f4939aca09 | Shell | salevajo/benchmark | /fetch.sh | UTF-8 | 183 | 2.515625 | 3 | [] | no_license | #!/bin/bash
cd data
for i in {0..9}
do
echo "Downloading thread$i.zip"
curl http://downloads.digitalcorpora.org/corpora/files/govdocs1/threads/thread$i.zip -o thread$i.zip
done
| true |
891aa65ef1ceb8ed66bca4a14d233a2d406da02e | Shell | LayerDE/Einfuehrung-ins-Betriebssystem-Linux | /exam/2 KonsoleBashscripting/2a.sh | UTF-8 | 72 | 2.546875 | 3 | [] | no_license | #!/bin/bash
if test $b -le 42
then
echo "<=42"
else
echo ">=42"
fi
| true |
d54606164473565db8d442bfae976911de8c215a | Shell | renesas-rcar/meta-renesas | /meta-rcar-adas/recipes-bsp/bsp-config/files/bsp-config_v3m.sh | UTF-8 | 2,325 | 3.015625 | 3 | [
"MIT"
] | permissive | #!/bin/sh
SYSFS_UIO_DRIVER="/sys/bus/platform/drivers/uio_pdrv_genirq/"
SYSFS_VSPD_DRIVER="/sys/bus/platform/drivers/vsp1/"
SYSFS_DU_DRIVER="/sys/bus/platform/drivers/rcar-du/"
SYSFS_CSI_DRIVER="/sys/bus/platform/drivers/rcar-csi2/"
SYSFS_VIN_DRIVER="/sys/bus/platform/drivers/rcar-vin/"
SYSFS_LVDS_DRIVER="/sys/bus/platform/drivers/rcar-lvds/"
VSPD_DEVICE="fea20000.vsp_00"
DU_DEVICE="feb00000.du_00"
CSI_DEVICE="feaa0000.csi_00"
VIN0_DEVICE="e6ef0000.vin_00"
VIN1_DEVICE="e6ef1000.vin_01"
VIN2_DEVICE="e6ef2000.vin_02"
VIN3_DEVICE="e6ef3000.vin_03"
LVDS_DEVICE="feb90000.lvds_00"
if [ "x$1" = "xdefault" ]
then
echo "Setting up default configuration"
# Unbind all devices from the UIO driver
echo $LVDS_DEVICE > $SYSFS_UIO_DRIVER/unbind
echo $VSPD_DEVICE > $SYSFS_UIO_DRIVER/unbind
echo $DU_DEVICE > $SYSFS_UIO_DRIVER/unbind
echo $CSI_DEVICE > $SYSFS_UIO_DRIVER/unbind
echo $VIN0_DEVICE > $SYSFS_UIO_DRIVER/unbind
echo $VIN1_DEVICE > $SYSFS_UIO_DRIVER/unbind
echo $VIN2_DEVICE > $SYSFS_UIO_DRIVER/unbind
echo $VIN3_DEVICE > $SYSFS_UIO_DRIVER/unbind
# BIND them with Linux drivers
echo $LVDS_DEVICE > $SYSFS_LVDSkk_DRIVER/bind
echo $VSPD_DEVICE > $SYSFS_VSPD_DRIVER/bind
echo $DU_DEVICE > $SYSFS_DU_DRIVER/bind
echo $CSI_DEVICE > $SYSFS_CSI_DRIVER/bind
echo $VIN0_DEVICE > $SYSFS_VIN_DRIVER/bind
echo $VIN1_DEVICE > $SYSFS_VIN_DRIVER/bind
echo $VIN2_DEVICE > $SYSFS_VIN_DRIVER/bind
echo $VIN3_DEVICE > $SYSFS_VIN_DRIVER/bind
fi
if [ "x$1" = "xadas" ]
then
echo "Setting up ADAS configuration"
# Unbind all devices from the Linux drivers
echo $DU_DEVICE > $SYSFS_DU_DRIVER/unbind
echo $VSPD_DEVICE > $SYSFS_VSPD_DRIVER/unbind
echo $LVDS_DEVICE > $SYSFS_LVDS_DRIVER/unbind
echo $CSI_DEVICE > $SYSFS_CSI_DRIVER/unbind
echo $VIN0_DEVICE > $SYSFS_VIN_DRIVER/unbind
echo $VIN1_DEVICE > $SYSFS_VIN_DRIVER/unbind
echo $VIN2_DEVICE > $SYSFS_VIN_DRIVER/unbind
echo $VIN3_DEVICE > $SYSFS_VIN_DRIVER/unbind
# BIND them with UIO driver
echo $DU_DEVICE > $SYSFS_UIO_DRIVER/bind
echo $VSPD_DEVICE > $SYSFS_UIO_DRIVER/bind
echo $LVDS_DEVICE > $SYSFS_UIO_DRIVER/bind
echo $CSI_DEVICE > $SYSFS_UIO_DRIVER/bind
echo $VIN0_DEVICE > $SYSFS_UIO_DRIVER/bind
echo $VIN1_DEVICE > $SYSFS_UIO_DRIVER/bind
echo $VIN2_DEVICE > $SYSFS_UIO_DRIVER/bind
echo $VIN3_DEVICE > $SYSFS_UIO_DRIVER/bind
fi
| true |
35cc3fc2266b34731c9836740cc8bb8d7b33755d | Shell | jhcook/miscellaneous | /srechallenge/bootstrap.sh | UTF-8 | 1,277 | 3.703125 | 4 | [] | no_license | #!/usr/bin/env bash
# A simple utility to install cpx-server in Kubernetes.
#
# Requires: Python3 kubectl Docker Minikube
#
# Tested on: macOS Big Sur 11
#
# Author: Justin Cook <jhcook@secnix.com>
set -o nounset
set -o errexit
# Check dependencies in PATH
for cmd in docker kubectl python3 minikube
do which ${cmd} 2>&1 >/dev/null || {
echo "command not found in PATH: ${cmd}" ; exit 1 ; }
done
# Run unit tests
python3 -m unittest discover -s .
# Create Minikube VM
echo "Starting Minikube..."
minikube start --addons registry,ingress --insecure-registry "localhost"
# Build Docker image and push to local registry
eval $(minikube docker-env)
echo "Building cpx_server Docker image..."
cd src
# https://stackoverflow.com/questions/37573476/
# docker-complaining-about-all-proxy-environment-variable-with-proxy-unknown-sch
unset ALL_PROXY
docker build --tag localhost:5000/cpx_server .
docker push localhost:5000/cpx_server
cd $OLDPWD
# Deploy to Kubernetes
if [ ! -d "tmp" ]
then
mkdir tmp
fi
sed -e "s/__DRHOST__/localhost:5000/g" src/cpx-server-deployment.yaml > \
tmp/cpx-server.yaml
cat src/cpx-server-service.yaml >> tmp/cpx-server.yaml
cat src/cpx-server-ingress.yaml >> tmp/cpx-server.yaml
echo "Deploying cps-server components"
kubectl apply -f tmp/cpx-server.yaml | true |
824396c9cd3c9db55deb5e56483a547cdcf9af88 | Shell | shizonic/packages | /net/open-iscsi/files/open-iscsi | UTF-8 | 1,756 | 3.625 | 4 | [
"GPL-2.0-only",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/bin/sh /etc/rc.common
START=50
STOP=50
#USE_PROCD=1
DAEMON=/sbin/iscsid
ADM=/sbin/iscsiadm
PIDFILE=/var/run/iscsid.pid
log()
{
logger -t iscsid -s -p daemon.info "$@"
}
#start_service() {
# procd_open_instance
# procd_set_param command $DAEMON -f -d 8 -p $PIDFILE
# procd_set_param respawn
# procd_close_instance
#}
prepare() {
[ ! -f /etc/iscsi/initiatorname.iscsi ] && logger "Generate initator name ..." \
&& /usr/sbin/iscsi-gen-initiatorname
mkdir -p /var/run/lock/iscsi
}
unmount() {
TARGETS="$(iscsiadm -m session -P 3 | grep disk | awk '{print $4}')"
for VAL in $TARGETS; do
MOUNTED="$(mount | grep $VAL | awk '{print $1}')"
for PART in $MOUNTED; do
log "Umount $PART"
$(umount $PART)
done
done
RETVAL=$?
}
start() {
pgrep -o iscsid && return 1
prepare
$DAEMON -p $PIDFILE
starttargets
}
starttargets() {
log "Setting up iSCSI targets"
$ADM -m node -l
}
stoptargets() {
unmount
if [ $RETVAL -ne 0 ]; then
log "Could not umount all partitions, verify file lock!"
return $RETVAL
fi
log "Disconnecting iSCSI targets"
sync
$ADM -m node -u
}
stop() {
pgrep -o iscsid || return 1
stoptargets
#needs more checks
if [ $RETVAL -ne 0 ]; then
log "Could not stop all targets, try again later"
return $RETVAL
fi
log "Stopping iSCSI initiator service"
$ADM -k 0 2
rm -f $PIDFILE
}
restart() {
stop
if [ $RETVAL -ne 0 ]; then
log "Stopping iSCSI initiator service failed, not starting"
return $RETVAL
fi
start
}
restarttargets() {
stoptargets
if [ $RETVAL -ne 0 ]; then
log "Could not stop all targets, try again later"
return $RETVAL
fi
starttargets
}
| true |
51277deaf5eacfa89daaed937cfb3afa4f80c01b | Shell | rushyang/GTU-OS-Bash-Scripts | /Working/System Scripts/hist_back.sh | UTF-8 | 477 | 3.609375 | 4 | [] | no_license | #! /bin/bash
temp=$(mktemp history.XXXX)
cp "$HOME/.bash_history" "$temp"
# Fetchng today's date.
now=`date +%d%b%Y_%H`
# My History back up directory location
histdir="$HOME/Experiments/AutoHistBackup"
if [[ ! -d $histdir ]]; then
mkdir -p "$histdir"
fi
month=`date +%Y_%b`
path="$histdir/$month"
if [[ -d "$path" ]]; then
mv "$temp" "$path"
mv "$path/$temp" "$path/$now"
else
mkdir "$histdir/$month"
mv "$temp" "$path"
mv "$path/$temp" "$path/$now"
fi
ls "$path/"
| true |
dcac968e8a44c77398f8aa651b3c541e7043e515 | Shell | corps/dotfiles | /profile/path.bash | UTF-8 | 130 | 2.53125 | 3 | [] | no_license | DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
export PATH=$HOME/bin:$PATH
export PATH=$DIR/../bin:$PATH
| true |
52fffea8ba9684dcf3c5a3b2c9eed8671b346f74 | Shell | linuxsquad/IPA_bulk_password_expiration_change | /modifyExpDate.sh | UTF-8 | 2,476 | 3.90625 | 4 | [] | no_license | #!/bin/bash
#
# AUTHOR: linuxsquad
#
# DATE: 2017-03-07
#
# DESCRIPTION: changing user password expiration day in bulk
#
# PRE-REQUISIT: Gain admin privileges prior to running the script (kinit <admin>)
#
# INPUT: -E|e [YYYYMMDD] A new [E]xpiration date, this must be provided if -M option used.
# -M [M]odify the expiration date; if ommitted, script just shows the affected users.
# -T|t "string" String argument used to [T]est when current user passwords expiring, for instance "next week", "tomorrow", "next month", etc. If ommitted, using "next week"
#
# OUTPUT: list of affected users
#
# RELEASE NOTE: 1.0
# 1.1 Command line option for entering a new expiration date
#
# USAGE EXAMPLE:
# 1- Find out all users whos password expires next week:
#
# ./modifyExpDate.sh -t "next week"
#
# 2- Update expiration date to [Oct-01-2020] for all users whos password expires next month:
#
# ./modifyExpDate.sh -t "next month" -M -e "20201001"
typeset TEMPLATE="./modify.ldif"
typeset NEWEXPDATEADD="230101Z"
typeset PASSFILE="./password"
t_string="next week"
while getopts "Mt:T:e:E:" opt
do
case "${opt}" in
M) actionFlag="MDF"
;;
t|T) t_string=${OPTARG}
;;
e|E) t_expdate=${OPTARG}
;;
\?)
echo " ERR01: Invalid option: -${OPTARG}"
exit 1
;;
esac
done
if [ "X"${actionFlag} == "XMDF" ] &&
[[ $(/bin/date +%Y%m%d ) -gt ${t_expdate} ]]
then
echo " ERR02: A new expiration date has to be in a future. Use the following format: [YYYYMMDD]"
exit 1
fi
typeset TESTDATE=$(/bin/date -d "${t_string}" +%Y%m%d)
typeset NEWEXPDATE=${t_expdate}"${NEWEXPDATEADD}"
echo "The following user passwords are expiring prior to the "${TESTDATE}" and will be changed to "${t_expdate:-"[NON SUPPLIED]"}
sleep 5
ipa user-find | grep User\ login | cut -d":" -f2 | while read user
do
# extract password expiration date
expdate=$( ldapsearch -x -h localhost -p 389 -vv uid=$user 2>&1 | grep Expira | sed -E -e 's/^.*\: //' -e 's/[0-9]{6}Z//' )
# if password expiration is within time interval
if [[ ${expdate} < ${TESTDATE} ]]
then
echo -e ${expdate}"\t"${user}
if [ "X"${actionFlag} == "XMDF" ]
then
sed -e "s/\%\%USER\%\%/$user/" -e "s/\%\%EXPIRATIONDATE\%\%/${NEWEXPDATE}/" ${TEMPLATE} > ${TEMPLATE}"."${user}
ldapmodify -x -h localhost -D "cn=Directory Manager" -y ${PASSFILE} -f ${TEMPLATE}"."${user}
fi
fi
wait
done
| true |
27189db58b340095c9c52c7a4293f5ad5f63fd34 | Shell | jirikadlec2/rushvalley | /python/runUpload.sh | UTF-8 | 440 | 3.125 | 3 | [
"MIT"
] | permissive | #! /bin/bash
cd /home/WWO_Admin/decagonUpload/
#get the current date/time for the logfile
now=$(date)
#parse out whitespace
now_mod=${now// /_}
now_mod=${now_mod//__/_}
log=logfiles/$now_mod"_log.log"
#run the transfer and write output to log
./data_transfer.py $now >> $log
#clean up the .dxd files generated
cd dxd/
rm *.dxd
#clean up the logfiles if they're becoming cluttered (only 14 allowed at a time)
cd ../
./clean_logs.py
| true |
081c4654de29a63fad5e0bf20e331daf34c14b1e | Shell | nworbnhoj/gargoyle | /package/plugin-gargoyle-openvpn/files/www/utility/openvpn_upload_client.sh | UTF-8 | 9,973 | 2.96875 | 3 | [] | no_license | #!/usr/bin/haserl --upload-limit=1048576 --upload-dir=/tmp/
<%
# This program is copyright © 2012-2013 Eric Bishop and is distributed under the terms of the GNU GPL
# version 2.0 with a special clarification/exception that permits adapting the program to
# configure proprietary "back end" software provided that all modifications to the web interface
# itself remain covered by the GPL.
# See http://gargoyle-router.com/faq.html#qfoss for more information
eval $( gargoyle_session_validator -c "$COOKIE_hash" -e "$COOKIE_exp" -a "$HTTP_USER_AGENT" -i "$REMOTE_ADDR" -r "login.sh" -t $(uci get gargoyle.global.session_timeout) -b "$COOKIE_browser_time" )
echo "Content-Type: text/html; charset=utf-8"
echo ""
echo '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">'
echo '<html xmlns="http://www.w3.org/1999/xhtml">'
echo '<body>'
ip_to_int()
{
ip=$1
ip_parts=$(echo $ip | sed 's/\./ /g')
mult=256*256*256;
count=0
for p in $ip_parts ; do
count=$(( $count + ($mult*p) ))
mult=$(( $mult/256 ))
done
echo $count
}
int_to_ip()
{
int=$1
ip=""
for m in $((256*256*256)) $((256*256)) 256 1 ; do
next=$(( $int/$m ))
int=$(( $int - ($next*$m) ))
if [ -z "$ip" ] ; then
ip="$next"
else
ip="$ip.$next"
fi
done
echo $ip
}
restart_network=0
old_replace_ip=""
new_replace_ip=""
dir_rand=$(</dev/urandom tr -dc a-z | head -c 12)
tmp_dir="/tmp/vpn_client_upload_$dir_rand"
mkdir -p "$tmp_dir"
cd "$tmp_dir"
client_name=$(uci get openvpn_gargoyle.client.id 2>/dev/null)
if [ -z "$client_name" ] ; then
client_name_rand=$(</dev/urandom tr -dc a-z | head -c 12)
client_name="grouter_client_$client_name_rand"
fi
error=""
tab=$(printf "\t")
ta_direction=""
block_non_openvpn=0
if [ -s "$FORM_openvpn_client_zip_file" ] ; then
is_targz=$(dd if="$FORM_openvpn_client_zip_file" bs=1 count=2 2>/dev/null | hexdump -v -e '1/1 "%02x"')
if [ "x$is_targz" = "x1f8b" ] ; then
tar xzf "$FORM_openvpn_client_zip_file" >/dev/null 2>&1
else
unzip "$FORM_openvpn_client_zip_file" >/dev/null 2>&1
fi
OLD_IFS="$IFS"
IFS=$(printf "\n\r")
files=$(find .)
for f in $files ; do
if [ ! -d "$f" ] ; then mv "$f" . ; fi
done
for f in $files ; do
if [ -d "$f" ] && [ "$f" != "." ] ; then rm -rf "$f" ; fi
done
IFS="$OLD_IFS"
conf_file=$(grep -l "^[$tab ]*ca\|^[$tab ]*cert" * 2>/dev/null | head -n 1)
ca_file=$( egrep "^[$tab ]*ca[$tab ]+" "$conf_file" | sed 's/^.*\///g' | sed 's/[\t ]*$//g' | sed 's/^.*[\t ]//g' )
cert_file=$(egrep "^[$tab ]*cert[$tab ]+" "$conf_file" | sed 's/^.*\///g' | sed 's/[\t ]*$//g' | sed 's/^.*[\t ]//g' )
key_file=$( egrep "^[$tab ]*key[$tab ]+" "$conf_file" | sed 's/^.*\///g' | sed 's/[\t ]*$//g' | sed 's/^.*[\t ]//g' )
ta_file=$( egrep "^[$tab ]*tls\-auth[$tab ]+" "$conf_file" | egrep "^[$tab ]*tls\-auth[$tab ]+" | awk ' { print $2 } ' | sed 's/^.*\///g' )
ta_direction=$( egrep "^[$tab ]*tls\-auth[$tab ]+" "$conf_file" | egrep "^[$tab ]*tls\-auth[$tab ]+" | awk ' { print $3 } ' )
if [ -e ./block_non_openvpn ] ; then
block_non_openvpn=1
fi
if [ -s network ] ; then
expected_ip=$(awk ' $0 ~ /ipaddr/ { print $NF }' network)
expected_mask=$(awk ' $0 ~ /netmask/ { print $NF }' network)
if [ -n "$expected_ip" ] && [ -n "$expected_mask" ] ; then
cur_ip=$(uci get network.lan.ipaddr)
cur_mask=$(uci get network.lan.netmask)
cur_ip_int=$(ip_to_int $cur_ip)
cur_mask_int=$(ip_to_int $cur_mask)
cur_sub_ip_int=$(($cur_ip_int & $cur_mask_int))
cur_sub_ip=$(int_to_ip $cur_sub_ip_int)
exp_ip_int=$(ip_to_int $expected_ip)
exp_mask_int=$(ip_to_int $expected_mask)
cur_test=$(( $cur_mask_int & $cur_ip_int ))
exp_test=$(( $exp_mask_int & $exp_ip_int ))
if [ "$cur_test" != "$exp_test" ] ; then
new_ip_int=$(($exp_ip_int+1))
new_ip=$(int_to_ip $new_ip_int)
if [ "$FORM_net_mismatch_action" = "query" ] ; then
echo "<script type=\"text/javascript\">top.clientNetMismatchQuery(\"$expected_ip/$expected_mask\",\"$cur_sub_ip/$cur_mask\", \"$new_ip\" );</script>"
echo "</body></html>"
cd /tmp
rm -rf "$tmp_dir"
exit
elif [ "$FORM_net_mismatch_action" = "change" ] ; then
old_dns=$(uci get network.lan.dns)
if [ "$old_dns" = "$cur_ip" ] ; then
uci set network.lan.dns="$new_ip"
fi
uci set network.lan.ipaddr="$new_ip"
uci set network.lan.netmask="$expected_mask"
uci commit
restart_network=1
old_replace_ip="$cur_ip"
new_replace_ip="$new_ip"
fi
#do nothing if net_mismatch_action is "keep"
fi
fi
fi
if [ ! -f "$ca_file" ] ; then
error=$(i18n openvpn.uc_CA_f)
elif [ ! -f "$cert_file" ] ; then
error=$(i18n openvpn.uc_crt_f)
elif [ ! -f "$key_file" ] ; then
error=$(i18n openvpn.uc_key_f)
elif [ ! -f "$conf_file" ] ; then
error=$(i18n openvpn.uc_cfg_f)
else
cat "$conf_file" | tr -d "\r" > "${client_name}.conf"
cat "$ca_file" | tr -d "\r" > "${client_name}_ca.crt"
cat "$cert_file" | tr -d "\r" > "${client_name}.crt"
cat "$key_file" | tr -d "\r" > "${client_name}.key"
rm "$conf_file" "$ca_file" "$cert_file" "$key_file"
if [ -f "$ta_file" ] ; then
cat "$ta_file" | tr -d "\r" > "${client_name}_ta.key"
rm "$ta_file"
fi
fi
rm "$FORM_openvpn_client_zip_file"
elif [ -s "$FORM_openvpn_client_conf_file" ] && [ -s "$FORM_openvpn_client_ca_file" ] && [ -s "$FORM_openvpn_client_cert_file" ] && [ -s "$FORM_openvpn_client_key_file" ] ; then
cat "$FORM_openvpn_client_conf_file" | tr -d "\r" > "${client_name}.conf"
cat "$FORM_openvpn_client_ca_file" | tr -d "\r" > "${client_name}_ca.crt"
cat "$FORM_openvpn_client_cert_file" | tr -d "\r" > "${client_name}.crt"
cat "$FORM_openvpn_client_key_file" | tr -d "\r" > "${client_name}.key"
rm "$FORM_openvpn_client_conf_file" "$FORM_openvpn_client_ca_file" "$FORM_openvpn_client_cert_file" "$FORM_openvpn_client_key_file"
if [ -s "$FORM_openvpn_client_ta_key_file" ] ; then
ta_direction=$( egrep "^[$tab ]*tls\-auth[$tab ]+" "${client_name}.conf" | egrep "^[$tab ]*tls\-auth[$tab ]+" | awk ' { print $3 } ' )
cat "$FORM_openvpn_client_ta_key_file" | tr -d "\r" > "${client_name}_ta.key"
rm "$FORM_openvpn_client_ta_key_file"
fi
elif [ -n "$FORM_openvpn_client_conf_text" ] && [ -n "$FORM_openvpn_client_ca_text" ] && [ -n "$FORM_openvpn_client_cert_text" ] && [ -n "$FORM_openvpn_client_key_text" ] ; then
printf "$FORM_openvpn_client_conf_text" | tr -d "\r" > "${client_name}.conf"
printf "$FORM_openvpn_client_ca_text" | tr -d "\r" > "${client_name}_ca.crt"
printf "$FORM_openvpn_client_cert_text" | tr -d "\r" > "${client_name}.crt"
printf "$FORM_openvpn_client_key_text" | tr -d "\r" > "${client_name}.key"
if [ -n "$FORM_openvpn_client_ta_key_text" ] ; then
ta_direction=$( egrep "^[$tab ]*tls\-auth[$tab ]+" "${client_name}.conf" | egrep "^[$tab ]*tls\-auth[$tab ]+" | awk ' { print $3 } ' )
printf "$FORM_openvpn_client_ta_key_text" | tr -d "\r" > "${client_name}_ta.key"
fi
fi
#For client config, ta_direction can be 1 (client) or omitted, but never 0 (server) or anything else
if [ "$ta_direction" != "1" ] ; then
ta_direction=""
else
ta_direction=" 1"
fi
if [ ! -f "${client_name}.conf" ] ; then
error=$(i18n openvpn.uc_cfg_f)
fi
if [ -z "$error" ] ; then
sed -i 's/^[\t ]*ca[\t ].*$/ca \/etc\/openvpn\/'"${client_name}_ca.crt"'/g' "${client_name}.conf"
sed -i 's/^[\t ]*cert[\t ].*$/cert \/etc\/openvpn\/'"${client_name}.crt"'/g' "${client_name}.conf"
sed -i 's/^[\t ]*key[\t ].*$/key \/etc\/openvpn\/'"${client_name}.key"'/g' "${client_name}.conf"
sed -i 's/^[\t ]*status[\t ].*$/status \/var\/openvpn\/current_status/g' "${client_name}.conf"
if [ -f "${client_name}_ta.key" ] ; then
sed -i 's/^[\t ]*tls\-auth[\t ].*$/tls-auth \/etc\/openvpn\/'"${client_name}_ta.key${ta_direction}"'/g' "${client_name}.conf"
fi
#proofreading
use_tap=$(egrep "^[$tab ]*dev[$tab ]+tap" "${client_name}.conf")
if [ -n "$use_tap" ] ; then
error=$(i18n openvpn.uc_TAP_Err)
fi
if [ -z "$error" ] ; then
mv "${client_name}.conf" "${client_name}_ca.crt" "${client_name}.crt" "${client_name}.key" /etc/openvpn/
if [ -e "${client_name}_ta.key" ] ; then
mv "${client_name}_ta.key" /etc/openvpn/
fi
#run constant uci commands
uci set openvpn_gargoyle.server.enabled="false" >/dev/null 2>&1
uci set openvpn_gargoyle.client.enabled="true" >/dev/null 2>&1
uci set openvpn_gargoyle.client.id="$client_name" >/dev/null 2>&1
uci set openvpn.custom_config.config="/etc/openvpn/$client_name.conf" >/dev/null 2>&1
uci set openvpn.custom_config.enabled="1" >/dev/null 2>&1
#block non-openvpn traffic to prevent leak if openvpn quits unexpectedly?
if [ "$block_non_openvpn" = "1" ] ; then
uci set openvpn_gargoyle.@client[0].block_non_openvpn="true"
fi
uci commit
#run other commands passed to script (includes firewall config and openvpn restart)
if [ -n "$FORM_commands" ] ; then
tmp_file="$tmp_dir/tmp.sh"
printf "%s" "$FORM_commands" | tr -d "\r" > "$tmp_file"
sh "$tmp_file"
if [ "$restart_network" = "1" ] && [ -n "$old_replace_ip" ] && [ -n "$new_replace_ip" ] ; then
sh /usr/lib/gargoyle/update_router_ip.sh "$old_replace_ip" "$new_replace_ip"
sh /usr/lib/gargoyle/restart_network.sh
fi
fi
wait_secs=25
have_tun_if=$(ifconfig 2>/dev/null | grep "^tun")
while [ -z "$have_tune_if" ] && [ "$wait_secs" -gt 0 ] ; do
sleep 1
have_tun_if=$(ifconfig 2>/dev/null | grep "^tun")
wait_secs=$(( $wait_secs - 1 ))
done
if [ -z "$have_tun_if" ] ; then
error=$(i18n openvpn.uc_conn_Err)
fi
fi
fi
result="$error"
if [ -z "$error" ] ; then
result="Success"
fi
echo "<script type=\"text/javascript\">top.clientSaved(\"$result\");</script>"
echo "</body></html>"
cd /tmp
rm -rf "$tmp_dir"
%>
| true |
422ef2c157b3b540fa205c1425c5ee6565d9a287 | Shell | to4iki/LensKit | /Scripts/generate-auto-lens.sh | UTF-8 | 444 | 3.9375 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
CMD=`basename $0`
DIR=`dirname $0`
SOURCE_DIR=$1
OUTPUT_DIR=$2
if [[ $# -ne 2 ]]; then
echo "Usage: $CMD <source-dir> <code-generated-dir>" 1>&2
exit 1
fi
if which sourcery >/dev/null; then
# Run sourcery
sourcery --sources "$SOURCE_DIR" --templates "$DIR"/../Templates --output "$OUTPUT_DIR"
else
echo "Warning: sourcery not installed, download from https://github.com/krzysztofzablocki/Sourcery"
fi
| true |
789edde132cf42bef56882b6674e4fc5e77ff7ad | Shell | DuyNguyen1879/mariaDB-master-to-master | /replication_status.sh | UTF-8 | 519 | 3.171875 | 3 | [] | no_license | #! /bin/bash
PW=P4SSWORD
DBUSER=DB-USER
remoteSlave=$(ssh -p22 node-db2 /home/user/mysql_slave_status.sh | grep -E -i "Master_Log_File|Exec_Master_Log_Pos" | awk '{print $2}'| uniq )
localMaster=$(mysql -u $DBUSER -p$PW -e "show master status\G" | grep -E "mariadb|Position" | awk '{print $2}')
echo -n "REPLICATION STATUS IS:\n"
if [ "$localMaster" == "$remoteSlave" ]; then echo "OK"; else echo "NOT OK";fi
echo -n "\n";
echo -n "remote Slave: "$remoteSlave"\n";
echo -n "local Master: "$localMaster"\n";
| true |
c089008e72a3e9447c9d662af296a1967dcbd5f8 | Shell | amin-o/picoctf-2018 | /cryptography/crypto_warmup_1.sh | UTF-8 | 649 | 3.4375 | 3 | [] | no_license | #!/usr/bin/bash
message="llkjmlmpadkkc"
key="thisisalilkey"
i=0
decrypt_message=""
while [ $i -lt ${#message} ] ; do
#get ascii chars
message_char=${message:$i:1}
key_char=${key:$i:1}
#convert ascii chars into
message_char_int=$(printf "%d " "'$message_char")
key_char_int=$(printf "%d " "'$key_char")
#decrypt single char
decrypt_int=$(( (($message_char_int - $key_char_int + 26) % 26) + 65 ))
decrypt_char=$(printf "\x$(printf %x $decrypt_int)")
#append to the decrypt message
decrypt_message=${decrypt_message}$decrypt_char
i=$(( $i + 1 ))
done
echo "picoCTF{$decrypt_message}"
| true |
f5127d7b761b694855edc5ea4cc0ad94d1fa3a46 | Shell | shamazmazum/DPorts | /net/DarwinStreamingServer/files/darwin_streaming_server.in | UTF-8 | 609 | 2.921875 | 3 | [] | no_license | #!/bin/sh
#
# $FreeBSD: head/net/DarwinStreamingServer/files/darwin_streaming_server.in 340872 2014-01-24 00:14:07Z mat $
#
# PROVIDE: darwin_streaming_server
# REQUIRE: streamingadminserver
#
darwin_streaming_server_enable=${darwin_streaming_server_enable-"NO"}
. /etc/rc.subr
name=darwin_streaming_server
rcvar=darwin_streaming_server_enable
command=%%PREFIX%%/sbin/DarwinStreamingServer
load_rc_config ${name}
case "$streamingadminserver_enable" in
[Yy][Ee][Ss]|[Tt][Rr][Uu][Ee]|[Oo][Nn]|1)
darwin_streaming_server_enable="NO"
;;
esac
pidfile=/var/run/DarwinStreamingServer.pid
run_rc_command "$1"
| true |
c4701d758bf60337afea3ef07db85bce04d24695 | Shell | wolfgangbrandl/DB2_restore_test | /util.bash | UTF-8 | 9,821 | 3.59375 | 4 | [] | no_license | #!/usr/bin/bash
#-------------------------------------------------------------------
# stoppt alle Applikationen fuer die mitgegebene DB
#-------------------------------------------------------------------
function create_db ()
{
DBT=$1
CONT=$2
LOG=$3
printf "Anlegen der Source Datenbank %s with Automatic Storage\n" "$DBT"
mkdir -p "$CONT"/tablespace/"$DBT"
mkdir -p "$CONT"/TS_U_SPACE/"$DBT"
mkdir -p "$CONT"/TS_M_SPACE/"$DBT"
mkdir -p "$CONT"/TS_B_SPACE/"$DBT"
mkdir -p "$CONT"/TS_N_SPACE/"$DBT"
mkdir -p "$CONT"/metadata/"$DBT"
mkdir -p "$LOG"/log/archlog/"$DBT"
mkdir -p "$LOG"/log/"$DBT"
db2 -v "CREATE DATABASE $DBT
AUTOMATIC STORAGE YES
ON '$CONT/tablespace/$DBT'
DBPATH ON '$CONT/metadata/$DBT'
USING CODESET IBM-850 TERRITORY AT
COLLATE USING IDENTITY
PAGESIZE 4096
DFT_EXTENT_SZ 32
CATALOG TABLESPACE MANAGED BY AUTOMATIC STORAGE
EXTENTSIZE 4
AUTORESIZE YES
INITIALSIZE 32 M
MAXSIZE NONE
TEMPORARY TABLESPACE MANAGED BY AUTOMATIC STORAGE
EXTENTSIZE 32
FILE SYSTEM CACHING
USER TABLESPACE MANAGED BY AUTOMATIC STORAGE
EXTENTSIZE 32
AUTORESIZE YES
INITIALSIZE 32 M
MAXSIZE NONE"
db2 -v "update db cfg for $DBT using newlogpath $LOG/log/$DBT"
db2 +o connect to "$DBT"
db2 +o connect reset
}
#-------------------------------------------------------------------
# Check Backup
#-------------------------------------------------------------------
check_backup ()
{
RC=$1
File=$2
basefilename=$(awk 'BEGIN{FS="."}{print $1}' "$File" )
if [ "$RC" -ne 0 ]
then
printf "BACKUP %s failed\n" "$basefilename"
exit 8
else
base=$(grep "The timestamp for this backup image is" "$File" | awk '{print $11}')
cat "$File"
echo "$base"
return "$base"
fi
}
#-------------------------------------------------------------------
# stoppt alle Applikationen fuer die mitgegebene DBT
#-------------------------------------------------------------------
function force ()
{
for a in $(db2 list applications|grep "$1"|awk '{print $3}')
do
printf "force Application: %s\n" "$a"
db2 force application \("$a"\)
done
}
#-------------------------------------------------------------------
# check return code
#-------------------------------------------------------------------
check_RC ()
{
RC=$1
MSG=$2
if [ "$RC" -ne 0 ]
then
if [ "$RC" -ne 2 ]
then
printf "%s Returns: %s\n" "$MSG" "$RC"
exit 8
fi
fi
}
#-------------------------------------------------------------------
# check db2 return code
#-------------------------------------------------------------------
check_sqlcode ()
{
sqlcode=$1
msg=$2
if [[ $sqlcode == 1271 ]]; then
echo "Warning not all tablespaces restored"
return 0
fi ;
if [[ $sqlcode != 0 ]]; then
error_msg="$msg rc = $sqlcode"
printf "Datum: %s Error: %s\n" "$(date)" "$error_msg"
exit 8
fi ;
}
#-------------------------------------------------------------------
# Befuellen der Tabellen
#-------------------------------------------------------------------
insert_into_table ()
{
DBT=$1
tablename=$2
maxc=$3
sqlcode=$(db2 +o -ec "connect to $DBT")
check_sqlcode "$sqlcode" "Connect failed "
ccnt=0
pid=$$
while [ $ccnt -lt "$maxc" ]; do
let ccnt++
obj=$( < /dev/random tr -dc 'a-zA-Z0-9 ' | fold -w 32 | head -n 1)
db2 +o +c "insert into $tablename (pid,object) values($pid,'$obj')"
done
db2 +o commit
db2 +o connect reset
}
#-------------------------------------------------------------------
# Update der Tabellen
#-------------------------------------------------------------------
update_table ()
{
DBT=$1
tablename=$2
sqlcode=$(db2 +o -ec "connect to $DBT")
check_sqlcode "$sqlcode" "Connect failed "
obj=$( < /dev/random tr -dc 'a-zA-Z0-9 ' | fold -w 32 | head -n 1)
short=${obj:0:1}
db2 -v "update $tablename set object='$obj' where object like '$short%'"
}
#-------------------------------------------------------------------
# Versucht die mitgegebene Datenbank zu stoppen und dann zu loeschen
#-------------------------------------------------------------------
function smooth_drop_without_archive_logs ()
{
DBT=$1
CONT=$2
LOG=$3
force "$DBT"
db2 +o connect to "$DBT"
RC=$?
if [ $RC -ne 0 ]
then
printf "Database %s does not exist" "$DBT"
db2 uncatalog database "$DBT"
else
db2 QUIESCE DATABASE IMMEDIATE FORCE CONNECTIONS
db2 UNQUIESCE DATABASE
db2 +o connect reset
db2 drop database "$DBT"
db2 uncatalog database "$DBT"
fi
rm -rf "$CONT"/tablespace/"$DBT"/*
rm -rf "$CONT"/TS_U_SPACE/"$DBT"/*
rm -rf "$CONT"/TS_M_SPACE/"$DBT"/*
rm -rf "$CONT"/TS_B_SPACE/"$DBT"/*
rm -rf "$CONT"/TS_N_SPACE/"$DBT"/*
rm -rf "$CONT"/metadata/"$DBT"/*
rm -rf "$LOG"/log/"$DBT"/*
rm -rf "$PWD"/logretain/*
mkdir -p "$CONT"/tablespace/"$DBT"
mkdir -p "$CONT"/TS_U_SPACE/"$DBT"
mkdir -p "$CONT"/TS_M_SPACE/"$DBT"
mkdir -p "$CONT"/TS_B_SPACE/"$DBT"
mkdir -p "$CONT"/TS_N_SPACE/"$DBT"
mkdir -p "$CONT"/metadata/"$DBT"
mkdir -p "$LOG"/log/"$DBT"
mkdir -p "$PWD"/logretain
}
#-------------------------------------------------------
# Versucht die mitgegebene Datenbank zu stoppen und dann zu koeschen
#-------------------------------------------------------
function smooth_drop ()
{
DBT=$1
CONT=$2
LOG=$3
force "$DBT"
db2 +o connect to "$DBT"
RC=$?
if [ $RC -ne 0 ]
then
printf "Database %s does not exist" "$DBT"
db2 uncatalog database "$DBT"
else
db2 "QUIESCE DATABASE IMMEDIATE FORCE CONNECTIONS"
db2 "UNQUIESCE DATABASE"
db2 +o "connect reset"
db2 drop database "$DBT"
db2 uncatalog database "$DBT"
fi
rm -rf "$CONT"/tablespace/"$DBT"/*
rm -rf "$CONT"/TS_U_SPACE/"$DBT"/*
rm -rf "$CONT"/TS_M_SPACE/"$DBT"/*
rm -rf "$CONT"/TS_B_SPACE/"$DBT"/*
rm -rf "$CONT"/TS_N_SPACE/"$DBT"/*
rm -rf "$CONT"/metadata/"$DBT"/*
rm -rf "$LOG"/log/archlog/"$DBT"/*
rm -rf "$LOG"/log/"$DBT"/*
rm -rf "$PWD"/logretain/*
mkdir -p "$CONT"/tablespace/"$DBT"
mkdir -p "$CONT"/TS_U_SPACE/"$DBT"
mkdir -p "$CONT"/TS_M_SPACE/"$DBT"
mkdir -p "$CONT"/TS_B_SPACE/"$DBT"
mkdir -p "$CONT"/TS_N_SPACE/"$DBT"
mkdir -p "$CONT"/metadata/"$DBT"
mkdir -p "$LOG"/log/archlog/"$DBT"
mkdir -p "$LOG"/log/"$DBT"
mkdir -p "$PWD"/logretain
}
#-------------------------------------------------------
# Monitoring Table content
#-------------------------------------------------------
function mon_table ()
{
DBT=$1
printf "Table Content\n"
db2 +o connect to "$DBT"
db2 -x "select 'TABLEA Count: ' || count(*) from QTEST.TABLEA WITH UR"
db2 -x "select 'TABLEB Count: ' || count(*) from QTEST.TABLEB WITH UR"
db2 -x "select 'TABLEC Count: ' || count(*) from QTEST.TABLEC WITH UR"
db2 -x "select 'TABLEG Count: ' || count(*) from QTEST.TABLEG WITH UR"
db2 -x "select 'TABLEM Count: ' || count(*) from QTEST.TABLEM WITH UR"
db2 -x "select 'TABLEN Count: ' || count(*) from QTEST.TABLEN WITH UR"
db2 -x "select 'TABLEA Max: ' || max(uptime) from QTEST.TABLEA WITH UR"
db2 -x "select 'TABLEB Max: ' || max(uptime) from QTEST.TABLEB WITH UR"
db2 -x "select 'TABLEC Max: ' || max(uptime) from QTEST.TABLEC WITH UR"
db2 -x "select 'TABLEG Max: ' || max(uptime) from QTEST.TABLEG WITH UR"
db2 -x "select 'TABLEM Max: ' || max(uptime) from QTEST.TABLEM WITH UR"
db2 -x "select 'TABLEN Max: ' || max(uptime) from QTEST.TABLEN WITH UR"
db2 +o connect reset
}
#-------------------------------------------------------
# Monitoring Tablespace State
#-------------------------------------------------------
function mon_tablespace ()
{
DBT=$1
printf "Tablespace State\n"
db2 +o connect to "$DBT"
db2 -x "select varchar(TBSP_NAME,20) as TABLESPACE,varchar(TBSP_STATE,15) as STATE from table(sysproc.MON_GET_TABLESPACE('',-1))"
db2 +o connect reset
}
#-------------------------------------------------------
# Monitoring Database Container Path
#-------------------------------------------------------
function mon_container ()
{
DBT=$1
printf "Database Containers\n"
db2 +o connect to "$DBT"
db2 -x "SELECT DBPARTITIONNUM, char(TYPE,40), char(PATH,100) FROM TABLE(ADMIN_LIST_DB_PATHS()) AS FILES"
db2 +o connect reset
}
#-------------------------------------------------------
# Create tables generated
#-------------------------------------------------------
function create_table ()
{
DBT=$1
TABLENAME=$2
TABLESPACE=$3
db2 +o connect to "$DBT"
db2 -v "create table $QUAL.$TABLENAME (ind integer not null generated always as identity (start with 1 increment by 1),
pid integer not null default 1,
crtime timestamp not null default current timestamp,
uptime timestamp not null generated always for each row on update as row change timestamp,
object varchar(255) ,
primary key (ind,crtime)
) in $TABLESPACE"
db2 +o connect reset
}
#-------------------------------------------------------
# Create tables non generated
#-------------------------------------------------------
function create_tablenon_generated ()
{
DBT=$1
TABLENAME=$2
TABLESPACE=$3
db2 -x -o connect to "$DBT"
db2 -v "create table $QUAL.$TABLENAME (ind integer not null default 100,
pid integer not null default 1,
crtime timestamp not null default current timestamp,
uptime timestamp not null default current timestamp,
object varchar(255) ,
primary key (ind,crtime)
) in $TABLESPACE"
db2 -x -o connect reset
}
| true |
a827bbf49ab5acc6278ef99e6b90b866a06394c4 | Shell | jshelton/newVerify | /oldVerify/vci | UTF-8 | 2,656 | 4.125 | 4 | [
"MIT"
] | permissive | #!/bin/sh
########################
# Name: [vci]
# Created: Tuesday, January 22, 2019
# Source: Joshua Shelton
# Description:
# This will look at the rsync running process to verify images in
# that path and keep running until the rsync process has finished.
#
# Improvements:
# - This currently uses verifyPath. and verifyImport.sh
# Modified:
# 2019-01-22 - Changed name to vci (verify current import)
_OPTIONS=$1
_FNAME=$2
_DEBUG="off"
LOG_FILE=$HOME/tmp/verifyLog
VERIFY_SCRIPT_PATH=$HOME/local/bin/verifyPath.sh
function DEBUG()
{
[ "$_DEBUG" == "on" ] && $@
}
function DEBUG_TEST()
{
eval " [ $@ ] "
RUN_RESULT=$?
if [ $RUN_RESULT = 0 ]; then
RESULT="TRUE"
else
RESULT="FALSE"
fi
[ "$_DEBUG" == "on" ] && echo "TEST [$@]: $RESULT"
}
function usage()
{
# Script Name...
echo "${0##/*/}:"
# Usage for script...
#REPLACE NEXT LINE
echo "usage: ${0##/*/} "
echo
# Description for script...
#REPLACE NEXT LINE
echo "\t This script looks in rsync to see if any directories are copying files. It will run verifyPath.sh to verify the destination folder. "
# ...Description of script..."
echo
# Switch descriptions...
# echo "Options: "
##REPLACE NEXT LINE
# echo "-o, --option\t: ..option description..."
##REPLACE NEXT LINE
# echo "-o2, --option2\t: ...option description..."
# echo
# Tips for using script...
# echo "Tips:"
##REPLACE NEXT LINE
# echo "...Tips for usage..."
}
### Begining of Process Arguments ---
# To get help
if [[ "$_OPTIONS" = "-h" || "$_OPTIONS" = "--help" ]]; then
usage
exit
# Option 1
#elif [[ "$_OPTIONS" = "-o" || "$_OPTIONS" = "--option" ]]; then
# DEBUG echo "option 2"
# Option 2
#elif [[ "$_OPTIONS" = "-o2" || "$_OPTIONS" = "--option2" ]]; then
# DEBUG echo "option 2"
# ...Enter Switches here...
fi
# Default
# if a valid filename is the first argument, then ignore all other switches.
if [[ ! -e $_FNAME && -e $1 ]]; then
_FNAME=$1
fi
### End of Process Arguments ---
### Begin of Program ---
while ps | grep -v grep | grep 'rsync.*/Volumes/Seagate Pictures 8TB/Import/.*' > /dev/null;
do
WORK_DIR="$(ps | grep -v grep | grep 'rsync.*/Volumes/Seagate Pictures 8TB/Import/.*' | head -n 1 | sed -e 's#.* /#/#')";
if [ -d "$WORK_DIR" ];
then
cd "$WORK_DIR";
echo "Verifying Dir: $(pwd)" | tee $LOG_FILE;
$VERIFY_SCRIPT_PATH ;
else
echo "Trouble changing directory: '$WORK_DIR'";
fi;
echo "$(date) - Still Running..." | tee $LOG_FILE ;
sleep 10;
done
echo "Last Check" > $LOG_FILE
$VERIFY_SCRIPT_PATH | tee $LOG_FILE
$VERIFY_SCRIPT_PATH | tee $LOG_FILE
echo "Finished Last Check" > $LOG_FILE
### End of Program ---
DEBUG echo "DONE"
| true |
ed009a85dd1520e9a45fee2b5d52102452a114ff | Shell | Ponce/slackbuilds | /network/AdGuardHome/rc.AdGuardHome | UTF-8 | 1,037 | 4.03125 | 4 | [] | no_license | #!/bin/bash
# Start/stop/restart the AdGuard Home
bin=/usr/sbin/AdGuardHome
config=/etc/AdGuardHome.yaml
workdir=/var/lib/AdGuardHome
pidfile=/run/AdGuardHome.pid
start_AdGuardHome() {
echo "Starting AdGuard Home... "
if [ -f $pidfile ]; then
echo "AdGuard Home is already running with PID $(cat ${pidfile})."
exit 0
fi
mkdir -p $workdir
nohup $bin --config $config --work-dir $workdir --no-check-update \
--pidfile $pidfile 0<&- &>/dev/null &
}
stop_AdGuardHome() {
echo "Stoppping AdGuard Home... "
[ -f $pidfile ] && kill $(cat ${pidfile})
}
restart_AdGuardHome() {
stop_AdGuardHome
sleep 1
start_AdGuardHome
}
status_AdGuardHome() {
if [ -f $pidfile ]; then
echo "AdGuard Home is running with PID $(cat ${pidfile})."
else
echo "AdGuard Home is stopped."
exit 1
fi
}
case "$1" in
'start')
start_AdGuardHome
;;
'stop')
stop_AdGuardHome
;;
'restart')
restart_AdGuardHome
;;
'status')
status_AdGuardHome
;;
*)
echo "usage $0 start|stop|restart|status"
esac
| true |
0df011b3b6741f8ea5809d5fcc650a96e770305a | Shell | grahamgilbert/macscripts | /Munki/BinJAMF/installcheck.sh | UTF-8 | 80 | 2.703125 | 3 | [] | no_license | #!/bin/bash
file="/usr/sbin/jamf"
if [ -f "$file" ]
then
exit 0
else
exit 1
fi | true |
eda78b5be8512d386e0b3674370308d8664fc6da | Shell | brecht-d-m/amma-bda | /datalab/sources/lint.sh | UTF-8 | 1,143 | 3.421875 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Checks all components.
if [ -z "$REPO_DIR" ];
then echo "REPO_DIR is not set. Please run `source tools/initenv.sh` first";
exit 1;
fi
SRC_PATHS=(
"lib/api"
"lib/datalab"
)
BUILD_DIR=$REPO_DIR/build
LOG_FILE=$BUILD_DIR/lint.log
mkdir -p $BUILD_DIR
for SRC in "${SRC_PATHS[@]}"
do
echo "Linting $SRC ... " | tee -a $LOG_FILE
SRC_DIR=$REPO_DIR/sources/$SRC
pushd $SRC_DIR >> /dev/null
./lint.sh 2>&1 | tee -a $LOG_FILE
popd >> /dev/null
echo | tee -a $LOG_FILE
done
echo "Lint completed." | tee -a $LOG_FILE
| true |
c7e5c214a31d5a9b18486d800fd2366403b1a647 | Shell | wangweiX/myDevOps | /linux-script/raspberrypi/ubuntu/20.04/firewall.sh | UTF-8 | 5,938 | 3.59375 | 4 | [
"MIT"
] | permissive | #!/bin/bash
################################################
#
# Script Firewall for INPUT and OUTPUT rules
#
# https://help.ubuntu.com/community/UFW
# https://www.digitalocean.com/community/tutorials/iptables-essentials-common-firewall-rules-and-commands
# https://www.cyberciti.biz/tips/linux-unix-bsd-nginx-webserver-security.html
#
################################################
version_num=20181010
RED="\033[0;31m"
GREEN="\033[0;32m"
NO_COLOR="\033[0m"
# define interfaces IP
MY_ETH0_IP_SEG=$(ip addr show eth0 | grep "inet\b" | awk '{print $2}')
MY_ETH0_IP=$(echo ${MY_ETH0_IP_SEG} | cut -d/ -f1)
### Interfaces ###
PUB_IF=$MY_ETH0_IP # public interface
LO_IF="lo" # loopback
VPN_IF="eth1" # vpn / private net
backup_rules() {
echo "Saving iptables rules: "
mkdir -p /etc/iptables_history
IPT_BACKUP_FILE=/etc/iptables_history/iptables.$(date +%y%m%d_%H%M%S)
iptables-save > $IPT_BACKUP_FILE
echo -e "$GREEN Iptables rules saved in $IPT_BACKUP_FILE $NO_COLOR"
}
clean_iptables() {
echo "Cleaning rules - setting policies - flush rules - delete chains: "
iptables -P INPUT ACCEPT
iptables -P OUTPUT ACCEPT
iptables -P FORWARD DROP
iptables --flush # Flush all rules, but keep policies
iptables -t nat --flush # Flush NAT table as well
iptables --delete-chain
iptables -t mangle -F
echo -e "$GREEN Cleaning done. $NO_COLOR"
}
input_rules() {
echo -en "Creating rules for allowed INPUT traffic: $RED\n"
# Unlimited lo access
iptables -A INPUT -i lo -j ACCEPT
# Unlimited vpn / pnet access
if ifconfig eth1 &> /dev/null;then
iptables -A INPUT -i eth0 -j ACCEPT
else
# Local traffic - allow all on intranet interface. <<<Apply to VPC environment>>>
iptables -A INPUT -p tcp -m state --state NEW -m tcp -s $MY_ETH0_IP_SEG -j ACCEPT
fi
iptables -A INPUT -p tcp -m state --state NEW -m tcp -s localhost -j ACCEPT
iptables -A INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT
iptables -A INPUT -p icmp -m icmp --icmp-type echo-request -m limit --limit 10/second -j ACCEPT
iptables -A INPUT -p icmp -m icmp --icmp-type echo-reply -m limit --limit 10/second -j ACCEPT
iptables -A INPUT -p icmp -m icmp --icmp-type time-exceeded -m limit --limit 10/second -j ACCEPT
iptables -A INPUT -p icmp -m icmp --icmp-type destination-unreachable -m limit --limit 10/second -j ACCEPT
iptables -A INPUT -p icmp -j DROP
###### Add the input rules here:
# iptables -A INPUT -p tcp -m state --state NEW -m tcp -s <source_address> --dport <destnation_port> -j ACCEPT
###### Add an end
# ssh 端口只对跳板机开放
iptables -A INPUT -p tcp -m state --state NEW,ESTABLISHED -m tcp -s xxx.xxx.xxx.xxx --dport 22 -j ACCEPT
# 80、443 端口只对 SLB 开放
iptables -A INPUT -p tcp -m state --state NEW,ESTABLISHED -m tcp -s xxx.xxx.xxx.xxx --dport 80 -j ACCEPT
iptables -A INPUT -p tcp -m state --state NEW,ESTABLISHED -m tcp -s xxx.xxx.xxx.xxx --dport 443 -j ACCEPT
# allow your own app port
iptables -A INPUT -p tcp -m state --state NEW,ESTABLISHED -m tcp -s xxx.xxx.xxx.xxx --dport xxx -j ACCEPT
iptables -A INPUT -j DROP
echo -e "$GREEN INPUT rules created done. $NO_COLOR"
}
output_rules() {
echo -en "Creating rules for allowed OUTPUT traffic: $RED\n"
# Unlimited lo access
iptables -A OUTPUT -o lo -j ACCEPT
# Unlimited vpn / pnet access
if ifconfig eth1 &> /dev/null;then
iptables -A OUTPUT -o eth0 -j ACCEPT
else
# Local traffic - allow all on intranet interface. <<<Apply to VPC environment>>>
iptables -A OUTPUT -p tcp -m state --state NEW -m tcp -s $MY_ETH0_IP_SEG -j ACCEPT
fi
iptables -A OUTPUT -p tcp -m state --state NEW -m tcp -s localhost -j ACCEPT
iptables -A OUTPUT -m state --state RELATED,ESTABLISHED -j ACCEPT
iptables -A OUTPUT -p icmp -m icmp --icmp-type echo-request -m limit --limit 10/second -j ACCEPT
iptables -A OUTPUT -p icmp -m icmp --icmp-type echo-reply -m limit --limit 10/second -j ACCEPT
iptables -A OUTPUT -p icmp -m icmp --icmp-type time-exceeded -m limit --limit 10/second -j ACCEPT
iptables -A OUTPUT -p icmp -m icmp --icmp-type destination-unreachable -m limit --limit 10/second -j ACCEPT
iptables -A OUTPUT -p icmp -j DROP
###### Add the output rules here:
# iptables -A OUTPUT -p tcp -m state --state NEW -d <destnation_address> --dport <destnation_port> -j ACCEPT
###### Add an end
# allow DNS-NTP-FTP-HTTP-HTTPS-SMTP
PORTS1="53 123"
for port1 in $PORTS1;do iptables -A OUTPUT -p udp -m state --state NEW --dport $port1 -j ACCEPT;done
PORTS2="22 21 80 443 25"
for port2 in $PORTS2;do iptables -A OUTPUT -p tcp -m state --state NEW --dport $port2 -j ACCEPT;done
# allow your own port
PORTS3="8888 9999 "
for port3 in $PORTS3;do iptables -A OUTPUT -p tcp -m state --state NEW,ESTABLISHED --dport $port3 -j ACCEPT;done
iptables -A OUTPUT -j DROP
echo -e "$GREEN OUTPUT rules created done. $NO_COLOR"
}
save_rules(){
if [ ! -f "/etc/iptables.conf" ]; then
touch /etc/iptables.conf
fi
iptables-save > /etc/iptables.conf
grep 'iptables-restore' /etc/rc.local &> /dev/null
if [ $? != 0 ] ; then
sed -i '/exit\s0/d' /etc/rc.local
echo -e "iptables-restore < /etc/iptables.conf\nexit 0" >> /etc/rc.local
fi
echo -e "$GREEN iptables rules saved done. $NO_COLOR"
}
if ! id |grep "uid=0(root)" &> /dev/null; then
echo -e "$RED ERROR: You need to run this script as ROOT user $NO_COLOR" >&2
exit 2
fi
if [ "$1" = "-h" ] || [ "$1" = "-H" ] || [ "$1" = "--help" ] || [ "$1" = "--HELP" ]; then
echo "Please run in the root user: bash ubuntu_firewall.sh !!"
exit 2
fi
echo "############################################"
echo $(basename $0)
printf "Version: %s\n" $version_num
echo "############################################"
backup_rules
clean_iptables
input_rules
output_rules
save_rules
echo "############################################"
echo "Done. "
| true |
2c7f91744a3892a45836d527451239106094d20a | Shell | YadominJinta/dotfiles | /zsh/install.sh | UTF-8 | 1,060 | 2.5625 | 3 | [] | no_license | #! /usr/bin/env bash
cd $HOME
mkdir -p .zsh/themes
mkdir -p .zsh/plugins
echo "Setting up themes"
cd ~/.zsh/themes
git clone --depth=1 https://github.com/romkatv/powerlevel10k
cd ~/.zsh/plugins
git clone --depth=1 https://github.com/zsh-users/zsh-syntax-highlighting
git clone --depth=1 https://github.com/zsh-users/zsh-autosuggestions
git clone --depth=1 https://github.com/zsh-users/zsh-history-substring-search
echo "Setting up python"
pip3 config set global.index-url https://mirrors.bfsu.edu.cn/pypi/web/simple
pip3 install -U pip
pip3 install trash-cli
pip3 install thefuck
echo "Setting up rust"
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh
cat > ~/.cargo/config << EOF
[source.crates-io]
replace-with = \'tuna\'
[source.tuna]
registry = \"https://mirrors.bfsu.edu.cn/git/crates.io-index.git\"
EOF
echo "Setting SDKMAN"
curl -s "https://get.sdkman.io" | bash
echo "Setting Node"
export N_PREFIX="$HOME/.n"
curl -L https://git.io/n-install | bash
curl https://cdn.jsdelivr.net/gh/YadominJinta/dotfiles/zsh/.zshrc -o ~/.zshrc
| true |
308f458cb87f0b1d30423648c5eda41774cc4721 | Shell | Nicholas1126/frida-fuzzer-aio | /frida-android/build.sh | UTF-8 | 943 | 3 | 3 | [
"Apache-2.0"
] | permissive | #/bin/bash
# 1.进入python3-android目录,交叉编译构建android python3.9环境
# cd ../python3-android
# ARCH=arm64 ANDROID_API=21 ./build.sh --enable-shared
# mv build python3.9
# tar -czf ../frida-android/python3.9-arm64.tar.gz python3.9
# 2.编译完成后拷贝到手机的/data/local/tmp目录
cd ../frida-android/
unzip frida-14.2.13-py3.8-android-aarch64.egg -d frida-14.2.13-py3.8-android-aarch64
tar -czf frida-14.2.13-py3.8-android-aarch64.tar.gz frida-14.2.13-py3.8-android-aarch64
rm -rf frida-14.2.13-py3.8-android-aarch64
cp ../python3-android/python3.9-arm64.tar.gz .
cp ../python3-android/python3.9-arm.tar.gz .
adb push frida-server-14.2.13-android-arm64 /data/local/tmp
adb push frida-14.2.13-py3.8-android-aarch64.tar.gz /data/local/tmp
adb push python3.9-arm64.tar.gz /data/local/tmp
adb push get-pip.py /data/local/tmp
adb push run.sh /data/local/tmp
echo "Build Done! Please execute run.sh in /data/local/tmp " | true |
38d52a0403115a7a921eccc6b6a12b9f41753509 | Shell | xendk/dais | /entrypoint.sh | UTF-8 | 230 | 2.71875 | 3 | [] | no_license | #!/bin/sh
# We need this script as we can't pass a list of files through Github
# Action argumements, but passes files as a single string. So we need
# this script to split the arguments before passing them to dais.
/src/dais $*
| true |
77c94e3a0145f0bc817f9903930eeb3c8ea3127c | Shell | bool3max/dots | /scripts/blocks/spp_statusbar | UTF-8 | 1,065 | 3.625 | 4 | [] | no_license | #!/bin/sh
# simple shell script that outputs the current spotify status in the form of $ARTIST - $SONGNAME, along with a FontAwesome's play/pause icons
#depends on 'sp' (a shell utility for interacting with Spotify's DBUS MPRIS protocol) and gnu grep
#meant to be used in a status script such as i3blocks or i3status
metadata_raw=$($HOME/.scripts/sp metadata)
if [[ $? = 1 ]]; then
#sp metadata failed, presumably spotify isn't running
echo " Spotify not running..."
exit 0
fi
artist_name=$(grep -Po '(?<=albumArtist\|)(.*)' <<< "$metadata_raw")
song_title=$(grep -Po '(?<=title\|)(.*)' <<< "$metadata_raw")
play_status=$(dbus-send --print-reply --dest=org.mpris.MediaPlayer2.spotify /org/mpris/MediaPlayer2 org.freedesktop.DBus.Properties.Get string:'org.mpris.MediaPlayer2.Player' string:'PlaybackStatus')
final="$artist_name - $song_title"
if [[ $(grep -Po 'Playing' <<< "$play_status") = 'Playing' ]]; then
final=" $final"
elif [[ $(grep -Po 'Paused' <<< "$play_status") = 'Paused' ]]; then
final=" $final"
fi
echo $final
| true |
4e4d076eb2e95863ca4aa8a08dcd520664e3e1cf | Shell | cypayne/palumbi_scripts | /batch-bt2-whitesharks.sh | UTF-8 | 1,241 | 2.921875 | 3 | [] | no_license | #!/bin/bash
# usage: sbatch batch-bt2-whitesharks.sh bt2index path_to_read_files
'''
#FQ="${@:3}"
# for i in $FQ; do
#XXX for val in OC-116_S12 OC-1_S7 OC-2_S8 OC-3_S9 OC-66_S11 OC-72_S10 WS10-16_S1 WS10-17_S2 WS10-22_S3 WS11-06_S4 WS12-03_S5 WS12-10_S6
#XXX for val in OC-116_S12 OC-1_S7 OC-3_S9 OC-66_S11 OC-72_S10 WS10-16_S1 WS10-17_S2 WS10-22_S3 WS11-06_S4 WS12-03_S5 WS12-10_S6
for val in OC-2_S8 OC-3_S9 OC-66_S11 OC-72_S10 WS10-16_S1 WS10-17_S2 WS10-22_S3 WS11-06_S4 WS12-03_S5 WS12-10_S6
#for val in OC-116_S12 OC-1_S7
do
fwdlist=""
revlist=""
for i in {1..6}
do
fwdlist="${fwdlist}${val}_L00${i}_forward_paired.fq.gz"
revlist="${revlist}${val}_L00${i}_reverse_paired.fq.gz"
#The if-then command puts commas after every file name 1-5, but not 6
if [ $i -lt 6 ]
then
fwdlist="${fwdlist},"
revlist="${revlist},"
fi
done
echo -e "#!/bin/bash\n#SBATCH -p spalumbi,owners,normal,hns\n#SBATCH --ntasks=1\n#SBATCH --cpus-per-task=16\n#SBATCH -t 24:00:00\n#SBATCH --mem 48000" > TEMPBATCH.sbatch
echo "srun bowtie2 -p 16 --end-to-end -x $1 -1 ${fwdlist} -2 ${revlist} | samtools view -b > ${val}_paired_post_trim_to_white_shark.bam" >> TEMPBATCH.sbatch
sbatch TEMPBATCH.sbatch
done
'''
| true |
a24c61efed1a874fcbcee29b3e6447d8b7fad704 | Shell | jiwon-choe/Brown-SMCSim | /SMC/UTILS/models/system/gem5_fullsystem_arm7.sh | UTF-8 | 943 | 2.5625 | 3 | [
"MIT"
] | permissive | #!/bin/bash
# print_msg "Loading Host Model: ARMv7 (Cortex-A15)"
export GEM5_NUMCPU=8
export HOST_CLOCK_FREQUENCY_GHz=2
export ARCH_ALIAS="arm7"
export GEM5_PLATFORM=ARM
export GEM5_MACHINETYPE="VExpress_EMM"
export GEM5_DTBFILE="$M5_PATH/binaries/vexpress.aarch32.ll_20131205.0-gem5.${GEM5_NUMCPU}cpu.dtb"
export GEM5_KERNEL="$M5_PATH/binaries/vmlinux.aarch32.ll_20131205.0-gem5"
export GEM5_DISKIMAGE="$M5_PATH/disks/linux-aarch32-ael.img"
export HOST_CROSS_COMPILE=arm-linux-gnueabihf-
export ARCH="arm" # Notice this is case sensitive
export COMPILED_KERNEL_PATH=${SMC_WORK_DIR}/linux_kernel/linux-linaro-tracking-gem5-ll_20131205.0-gem5-a75e551/
export GEM5_CHECKPOINT_LOCATION="NONE"
export GEM5_CPUTYPE=timing
export L1I_CACHE_SIZE=32kB
export L1D_CACHE_SIZE=64kB
export L2_CACHE_SIZE=2MB
# This is used only for full-system simulation, but for comparison with modelsim we must use fcfs
export DRAM_SCHEDULING_POLICY_GEM5="frfcfs"
| true |
ccf8af586ee1b1d655af3fd90ad90f300c5f06b0 | Shell | lielongxingkong/openstack_logs | /swift/for_puppet/ring.sh | UTF-8 | 1,609 | 3 | 3 | [] | no_license | #!/bin/bash
sed -i s/[[:space:]]//g ./zCloudRing.conf
sudo cp ./zCloudRing.conf /etc/swift/
cd /etc/swift
RING_CONF="./zCloudRing.conf"
sudo swift-ring-builder account.builder create 18 3 1
sudo swift-ring-builder container.builder create 18 3 1
sudo swift-ring-builder object.builder create 18 3 1
while read line; do
name=`echo $line|awk -F '=' '{print $1}'`
value=`echo $line|awk -F '=' '{print $2}'`
case $name in
"proxy_ip")
proxy_ip=`echo $value|awk -F '|' '{print $1}'`
proxy_user=`echo $value|awk -F '|' '{print $2}'`
;;
"datanode")
zone=`echo $value|awk -F '|' '{print $1}'`
ip=`echo $value|awk -F '|' '{print $2}'`
swift_path=`echo $value|awk -F '|' '{print $3}'`
weight=`echo $value|awk -F '|' '{print $4}'`
sudo swift-ring-builder account.builder add $zone-$ip:6002/$swift_path $weight
sudo swift-ring-builder container.builder add $zone-$ip:6001/$swift_path $weight
sudo swift-ring-builder object.builder add $zone-$ip:6000/$swift_path $weight
;;
*)
;;
esac
done < $RING_CONF
sudo swift-ring-builder account.builder
sudo swift-ring-builder container.builder
sudo swift-ring-builder object.builder
sudo swift-ring-builder account.builder rebalance
sudo swift-ring-builder container.builder rebalance
sudo swift-ring-builder object.builder rebalance
while read line; do
name=`echo $line|awk -F '=' '{print $1}'`
value=`echo $line|awk -F '=' '{print $2}'`
case $name in
"datanode")
ip=`echo $value|awk -F '|' '{print $2}' `
user=`echo $value|awk -F '|' '{print $5}' `
# scp /etc/swift/*.gz $user@$ip:/etc/swift/
;;
*)
;;
esac
done < $RING_CONF
sudo rm /etc/swift/zCloudRing.conf
| true |
20c83bdc3d7715ff78c60f962f457971b6e893ba | Shell | kragebein/plexbot | /bot/plugins/pb_extras.sh | UTF-8 | 4,294 | 3.109375 | 3 | [] | no_license | #!/bin/bash -
#===============================================================================
#
# FILE: pb_extras.sh
#
# USAGE: ./pb_extras.sh
#
# DESCRIPTION: This is a collection of many smaller functions that are useful
# but not critical to the operation of plexbot.
# CREATED: 06/12/2019 10:34
# REVISION: ---
#===============================================================================
load $pb/../../master.sh
_script="pb_extras.sh"
regex="^[.]plex"
getplex() {
lastadd_json=$(curl -s "$tt_hostname/api/v2?apikey=$tt_apikey&cmd=get_recently_added&count=1")
lastadd=$(echo "$lastadd_json" |jq -r '.response.data.recently_added[0].parent_title')
case "$lastadd" in
'')
lastadd=$(echo "$lastadd_json" |jq -r '.response.data.recently_added[0].title');;
' ')
lastadd=$(echo "$lastadd_json" |jq -r '.response.data.recently_added[0].title');;
esac
section=$(echo "$lastadd_json" |jq -r '.response.data.recently_added[0].section_id')
if [ "$section" = "2" ]; then
lastadd_section="[S]"
else
lastadd_section="[F]"
fi
added_at=$(echo "$lastadd_json" |jq -r '.response.data.recently_added[0].added_at')
now="$(date +%s)"
seconds="$(echo $now - $added_at |bc)"
minutes="$(echo $seconds/60 |bc)"
hours="$(echo $minutes/60 |bc)"
if [ "$seconds" -le "60" ]; then timesince="$seconds sekunder siden";fi
if [ "$minutes" -le "60" ]; then timesince="$minutes minutter siden";fi
if [ "$hours" -ge "1" ]; then timesince="~$hours timer siden";fi
activity=$(curl -s "$tt_hostname/api/v2?apikey=$tt_apikey&cmd=get_activity")
stream_count=$(echo "$activity" | jq -r '.response.data.stream_count')
}
showstreams() {
QUERY="$(curl -s "$tt_hostname/api/v2?apikey=$tt_apikey&cmd=get_activity")"
STREAM_COUNT=$(echo "$QUERY" | jq -r '.response.data.stream_count')
COUNTER=0
COUNT=1
buf="$(mktemp)"
while [ $COUNTER -lt $STREAM_COUNT ]; do
STATE=$(echo "$QUERY" |jq -r ".response.data.sessions[$COUNTER].state")
if [ "$STATE" = "error" ]; then break;fi
STREAM_BIT=$(echo "$QUERY" |jq -r ".response.data.sessions[$COUNTER].bitrate")
STREAM_RES=$(echo "$QUERY" |jq -r ".response.data.sessions[$COUNTER].video_resolution")
STREAM_CALCBIT="$(echo "scale=2; $STREAM_BIT/1000" |bc -l)" #kalkuler teoretisk utnyttelse av båndbredde
UTIL="$(echo "scale=2; (100*$STREAM_CALCBIT)/50." |bc -l)"
STREAM_CODEC=$(echo "$QUERY" |jq -r ".response.data.sessions[$COUNTER].video_codec")
STREAM_AUDIO=$(echo "$QUERY" |jq -r ".response.data.sessions[$COUNTER].audio_codec")
WHO_PLAYS=$(echo "$QUERY" |jq -r ".response.data.sessions[$COUNTER].user" | sed 's/@.*//g' |awk -F "." '{print $1}')
WHAT_PLAYS=$(echo "$QUERY" |jq -r ".response.data.sessions[$COUNTER].full_title")
STREAM_TYPE=$(echo "$QUERY" |jq -r ".response.data.sessions[$COUNTER].video_decision" |sed 's/copy/Direct Stream/g' |sed 's/direct play/Direct Play/' |sed 's/transcode/Transcode/g')
STREAM_PERC=$(echo "$QUERY" |jq -r ".response.data.sessions[$COUNTER].progress_percent")
STREAM_APP=$(echo "$QUERY" |jq -r ".response.data.sessions[$COUNTER].platform")
RATING_KEY=$(echo "$QUERY" |jq -r ".response.data.sessions[$COUNTER].rating_key")
if [ "$STREAM_TYPE" = "Transcode" ]; then
FLAMIN="🔥"
TRANS_BIT="$(echo "$QUERY" |jq -r ".response.data.sessions[$COUNTER].bitrate")"
TRANS_RES="$(echo "$QUERY" |jq -r ".response.data.sessions[$COUNTER].video_resolution")"
TRANS_CALCBIT="$(echo "scale=2; $STREAM_BIT/1000" |bc -l)"
TRANS_UTIL="$(echo "scale=2; (100*$STREAM_CALCBIT)/50." |bc -l)"
else
FLAMIN="👍"
fi
if [ "$WHO_PLAYS" != "this_user_wont_be_shown_in_the_overview" ]; then
t="$WHO_PLAYS"
user_convert "$WHO_PLAYS"
echo "$t -> $WHO_PLAYS"
say "$who : "
say "$who :#$COUNT:${FLAMIN}$user spiller av $WHAT_PLAYS. ($STREAM_PERC%/100%) [$STREAM_CODEC/$STREAM_AUDIO/${STREAM_BIT}kbps($UTIL%)/${STREAM_RES}]. Bruker $STREAM_TYPE via $STREAM_APP"
echo "$COUNT $RATING_KEY" >> $buf
fi
let COUNTER=COUNTER+1
let COUNT=COUNT+1
mv $buf /tmp/.sbuf
done
}
case ${cmd//./} in
'plex')
getplex
say "$who :Siste lagt til: $lastadd_section $lastadd ($timesince)"
if [ "$stream_count" = "0" ]; then
say "$who :Plex e folketomt! E ingen som stream no."
else
showstreams
fi ;;
esac
| true |
308530c465b2991977de4c2c17cb0a811b5e8f9c | Shell | faun/dotfiles | /shrc/aliases.sh | UTF-8 | 4,421 | 3.328125 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
# directory listing & navigation
alias l="ls -lAh"
alias ll='ls -hl'
alias la='ls -a'
alias lla='ls -lah'
alias ..='cd ..'
alias ...='cd .. ; cd ..'
alias q='exit'
# tmux
alias tat='tmux_attach'
alias t='tmux_attach'
alias tmux='tmux_attach'
# ruby
alias bi='bundle install'
alias bu='bundle update'
alias be='bundle exec'
alias b='bundle exec !!'
alias r='bundle exec rspec'
# rails
alias tlog='tail -f log/development.log'
alias scaffold='script/generate nifty_scaffold'
alias migrate='rake db:migrate db:test:clone'
alias rst='touch tmp/restart.txt'
alias rdm="rake db:migrate"
alias rdtp="rake db:test:prepare"
alias rsa='rake spec:all'
alias rc='rails console'
alias rs='rails server'
alias rsd='rails server --debugger'
alias spec='/usr/bin/time bundle exec rspec'
# misc
alias retag='ctags -R --exclude=.svn --exclude=.git --exclude=log *'
alias f='find . -iname'
alias ducks='du -cksh * | sort -rn|head -11' # Lists folders and files sizes in the current folder
alias m='more'
alias df='df -h'
alias lm='!! | more'
alias sane='stty sane'
alias py='python'
alias sha1="openssl sha1"
# JavaScript
alias nombom='npm cache verify && bower cache clean && rm -rf node_modules bower_components && npm install && bower install'
alias vim_bundle_install='vim +PlugInstall'
alias vim_bundle_update='vim +PlugUpdate +qall'
alias vim_bundle_clean='vim +PlugClean +qall'
alias vim_bundle_maintenance='vim +PlugInstall +PlugUpdate +PlugClean +qall'
if command -v nvim >/dev/null 2>&1; then
VIM_EXE='nvim'
else
VIM_EXE='vim'
fi
export EDITOR="$VIM_EXE"
alias vim="\$VIM_EXE -O"
# It's aliases all the way down
alias local_vim_bundles='$EDITOR $HOME/.bundles.local.vim'
alias local_gitconfig='$EDITOR $HOME/.gitconfig.local'
alias local_shell_conf='$EDITOR $HOME/.local.sh'
alias local_tmux_conf='$EDITOR $HOME/.tmux.local'
alias local_vimrc='$EDITOR $HOME/.vimrc.local'
kcontext() {
kubectl config use-context "$(kubectl config get-contexts -o name | fzf)"
}
migrations() {
local migration_name
migration_name="$(find ./db/migrate/* | sort -nr | fzf --reverse || exit 1)"
if [[ -n $migration_name ]]; then
vim -O "$migration_name"
fi
}
current_namespace() {
local cur_ctx
cur_ctx="$(current_context)"
ns="$(kubectl config view -o=jsonpath="{.contexts[?(@.name==\"${cur_ctx}\")].context.namespace}")"
if [[ -z "${ns}" ]]; then
echo "default"
else
echo "${ns}"
fi
}
confirm() {
echo "${1:-Are you sure? [y/N]}"
read -r answer
if echo "$answer" | grep -iq "^y"; then
return 0
else
return 1
fi
}
current_context() {
kubectl config view -o=jsonpath='{.current-context}'
}
strip_ansi() {
if command -v strip-ansi >/dev/null 2>&1; then
strip-ansi
else
npx strip-ansi-cli 2>/dev/null
npm install --global strip-ansi-cli >/dev/null 2>&1 &
fi
}
namespace_options() {
kubens | strip_ansi | fzf || current_namespace
}
context_options() {
kubectx | strip_ansi | fzf || current_context
}
alias k="kubectl"
alias kctx='kubectx'
alias kns='kubens "$(namespace_options)"'
alias ktx='kubectx "$(context_options)"'
alias ktxd='CONTEXT="$(context_options)"; confirm "Delete context $CONTEXT?" && kubectl config unset "contexts.$CONTEXT"'
kcapp() {
if [[ $# -ne 1 ]]; then
echo "Usage kcapp <app_label>"
return 1
fi
kubectl get pod -l app="$1" \
--sort-by=.status.startTime \
--field-selector=status.phase=Running \
-o=jsonpath='{.items[-1:].metadata.name}' |
tail -1
}
kcin() {
if [[ $# -ne 1 ]]; then
echo "Usage kcin <istio_label>"
return 1
fi
kubectl get -n istio-system pod -l istio="$1" \
-o=jsonpath='{.items[-1:].metadata.name}'
}
kcimt() {
if [[ $# -ne 1 ]]; then
echo "Usage kcimt <istio-mixer-type>"
return 1
fi
kubectl get -n istio-system pod -l istio-mixer-type="$1" \
-o=jsonpath='{.items[-1:].metadata.name}'
}
alias kcistio=kcin
kcrelease() {
if [[ $# -ne 1 ]]; then
echo "Usage kcrelease <release_label>"
return 1
fi
kubectl get pods -l release="$1" \
--sort-by=.status.startTime \
--field-selector=status.phase=Running \
-o=jsonpath='{.items[-1:].metadata.name}' |
tail -1
}
kcrun() {
if [[ $# -ne 1 ]]; then
echo "Usage krun <run_label>"
return 1
fi
kubectl get pod -l run="$1" \
-o=jsonpath='{.items[-1:].metadata.name}' |
tail -1
}
kcx() {
if [[ $# -lt 3 ]]; then
echo "Usage kcx <pod> <container> [commands]"
return 1
fi
kubectl exec -it "$1" -c "$2" -- "${@:3}"
}
| true |
accf47e2352186a1cc4683bf12e5cc88833b28fb | Shell | RonieGSS/cakephp | /bin/bin/mkdocs | UTF-8 | 1,279 | 3.40625 | 3 | [] | no_license | #! /bin/bash
echo '
= = = = = = = = = = = = = = = = = = = = = = = = = =
MkDocs Commands
- - - - - - - - - - - - - - - - - - - - - - - - - -
Select from the following set of commands:
- - - - - - - - - - - - - - - - - - - - - - - - - -
DESCRIPTIONS
- - - - - - - - - - - - - - - - - - - - - - - - - -
1) Mkdocs serve => Build and run mkdocs container
2) Mkdocs build => Build css, html, js files
3) Mkdocs logs => Show container logs
4) Back => Return to bin/gss panel
5) Quit => Close the panel
= = = = = = = = = = = = = = = = = = = = = = = = = =
'
PS3='Please enter the number of your choice: '
mkdocs=("Mkdocs serve" "Mkdocs build" "Mkdocs logs" "Back" "Quit")
cd `dirname $0`/../../
select opt in "${mkdocs[@]}"
do
case $opt in
"Mkdocs serve")
bash ops/docker/mkdocs/bin/serve
break
;;
"Mkdocs build")
bash ops/docker/mkdocs/bin/build
break
;;
"Mkdocs logs")
bash ops/docker/mkdocs/bin/logs
break
;;
"Back")
bash bin/gss
break
;;
"Quit")
break
;;
*) echo "invalid option $REPLY";;
esac
done | true |
6a1e853f96a55d4d1c8c6e675139f56f27c7a2c4 | Shell | Jxrgxn/dotfiles | /.dotfiles/shell/z_login.zsh | UTF-8 | 1,133 | 3.078125 | 3 | [
"MIT"
] | permissive | #-------------------------------------------------------------------------------
#
# shell/z_login.zsh
# Commands to be run for each terminal "login"
#
#-------------------------------------------------------------------------------
# Window title - for Timing.app <https://itunes.apple.com/us/app/timing/id431511738?mt=12>
echo -ne "\e]1;${USER}@${HOST%%.*}:${PWD/#$HOME/~}\a"
# SSH - Print out the fingerprint and comment of the default public key for this user@host
sshkeyfingerprint
if (( $? != 0 )); then
echo "No SSH key found"
sshnewkey "${USER}@${HOST}"
fi
# Antigen
local antigen_dir='~/dev/shell/antigen'
# Install antigen
if [ ! -d "$antigen_dir" ]; then
echo "*** Installing antigen"
git clone https://github.com/zsh-users/antigen.git "$antigen_dir"
fi
source "$antigen_dir/antigen.zsh"
antigen use oh-my-zsh
# Override the oh-my-zsh 'd' alias
unalias d && alias d='git diff'
# Antigen Bundles
antigen bundle common-aliases
antigen bundle nojhan/liquidprompt
antigen bundle zsh-users/zsh-syntax-highlighting
antigen bundle robbyrussell/oh-my-zsh plugins/ruby
# Antigen Themes
antigen theme gnzh
antigen apply
| true |
104a781254c44f462d699f574b60e1f0853946dc | Shell | StackArch/pkg-iniparse | /PKGBUILD | UTF-8 | 1,520 | 2.953125 | 3 | [] | no_license | # Maintainer: BigfootACA <bigfoot@classfun.cn>
_pyname=iniparse
pkgbase=python-$_pyname
pkgname=(python{,2}-$_pyname)
pkgver=0.5
pkgrel=1
pkgdesc="Accessing and Modifying INI files"
arch=(any)
url="https://github.com/candlepin/python-iniparse"
license=(MIT)
depends=(
python
python-six
python2
python2-six
)
makedepends=(
python-setuptools
python2-setuptools
)
source=(https://pypi.io/packages/source/${_pyname::1}/$_pyname/$_pyname-$pkgver.tar.gz)
md5sums=('2054bab923df21107652d009f2373789')
sha256sums=('932e5239d526e7acb504017bb707be67019ac428a6932368e6851691093aa842')
sha512sums=('b3f10d1b36497c3c5c71cb0a1ac73d74d8944f4ad3b7acc4a4b0246c2f1a20c184d9af20bbb3cb8ec4f57fddfb5e103b92688847debb4200ef0583353d7f9556')
prepare(){
cp -a $_pyname-$pkgver{,-py2}
}
build(){
pushd $_pyname-$pkgver
python setup.py build
popd
pushd $_pyname-$pkgver-py2
python2 setup.py build
popd
}
_package_python(){
depends=(
python
python-six
)
cd $_pyname-$pkgver
python setup.py install --root "$pkgdir" --optimize=1
install -Dm644 LICENSE "$pkgdir"/usr/share/licenses/"$pkgname"/LICENSE
mv "$pkgdir"/usr/share/doc/{${_pyname}*,${pkgname}}
}
_package_python2(){
depends=(
python2
python2-six
)
cd $_pyname-$pkgver-py2
python2 setup.py install --root "$pkgdir" --optimize=1
install -Dm644 LICENSE "$pkgdir"/usr/share/licenses/"$pkgname"/LICENSE
mv "$pkgdir"/usr/share/doc/{${_pyname}*,${pkgname}}
}
eval "package_python-${_pyname}(){ _package_python; }"
eval "package_python2-${_pyname}(){ _package_python2; }"
| true |
7ea266fa6b3237e7e98171029092e170e2457013 | Shell | Lemon080910/xiaomi_3c | /squashfs-root/etc/hotplug.d/iface/20-firewall | UTF-8 | 317 | 3.125 | 3 | [] | no_license | #!/bin/sh
logger -t "${0}-firewall[$$]" "HotPlugEvent: $ACTION of $INTERFACE ($DEVICE)"
[ "$ACTION" = "ifup" ] || exit 0
/etc/init.d/firewall enabled || exit 0
fw3 -q network "$INTERFACE" >/dev/null || exit 0
logger -t firewall "HotPlugEvent: Reloading firewall due to ifup of $INTERFACE ($DEVICE)"
fw3 -q reload
| true |
3a17cc5e8490d41e02bf5e36ebfbeb7b4dfca86f | Shell | shorif2000/aws-ec2-install-scripts | /aws-ec2-rhel8-php73-fpm-server.sh | UTF-8 | 2,241 | 2.609375 | 3 | [] | no_license | #/bin/bash
yum update -y
yum install gcc gcc-c++ make python3-docutils -y
cd ~
wget https://github.com/skvadrik/re2c/releases/download/1.1.1/re2c-1.1.1.tar.gz
tar zxvf re2c-1.1.1.tar.gz
cd re2c-1.1.1
./configure --prefix=/opt/SP/re2c --enable-docs
make clean && make && make install
echo 'export PATH=$PATH:/opt/SP/re2c/bin' >> ~/.bash_profile
cd ~
source .bash_profile
cd ~
wget https://github.com/Kitware/CMake/releases/download/v3.13.3/cmake-3.13.3.tar.gz
tar zxvf cmake-3.13.3.tar.gz
cd cmake-3.13.3
./bootstrap
make clean && make && make DESTDIR=/opt/SP/cmake install
echo 'export PATH=$PATH:/opt/SP/cmake/usr/local/bin' >> ~/.bash_profile
cd ~
source .bash_profile
yum install zlib-devel -y
cd ~
wget https://libzip.org/download/libzip-1.5.1.tar.xz
tar xf libzip-1.5.1.tar.xz
cd libzip-1.5.1
mkdir build
cd build
/opt/SP/cmake/usr/local/bin/cmake ..
make && make test && make install
echo "/usr/local/lib64" >> /etc/ld.so.conf
ldconfig
cd ~
wget https://ftp.gnu.org/gnu/bison/bison-3.5.tar.gz
tar -zxvf bison-3.5.tar.gz
cd bison-3.5
./configure --prefix=/opt/SP/bison-3.5
make clean && make && make install
ln -s /opt/SP/bison-3.5 /opt/SP/bison
echo 'export PATH=$PATH:/opt/SP/bison' >> ~/.bash_profile
cd ~
source .bash_profile
yum install autoconf bzip2-devel curl-devel libpng-devel libzip-devel libzip libxml2-devel openldap-devel gnutls-devel libicu-devel openssl-devel systemd-devel -y
cd ~
wget https://www.php.net/distributions/php-7.3.14.tar.gz
tar -zxvf php-7.3.14.tar.gz
cd php-7.3.14
./buildconf --force
./configure --prefix=/opt/SP/php-7.3.14 \
--enable-fpm \
--with-fpm-systemd \
--enable-zip \
--with-libzip \
--with-zlib \
--with-bz2 \
--with-curl \
--with-gd \
--with-openssl \
--with-ldap \
--with-libdir=lib64 \
--enable-mbstring \
--with-pcre-regex \
--with-oci8=shared,$ORACLE_HOME \
--quiet
make clean && make && make install
cp ~/aws-ec2-install-scripts/assets/services/php /etc/init.d
chown root:root /etc/init.d/php
chmod 611 /etc/init.d/php
chkconfig php on
service php start
netstat -antup | grep -i 7000
yum remove gcc gcc-c++ make python3-docutils bzip2-devel curl-devel libpng-devel libzip-devel libxml2-devel openldap-devel gnutls-devel libicu-devel openssl-devel systemd-devel zlib-devel -y
| true |
532bef0e2f66eeee380a06ae91caf4b6c528ff72 | Shell | jafingerhut/p4-guide | /action-profile-and-selector/run-tests.sh | UTF-8 | 373 | 2.515625 | 3 | [] | no_license | #! /bin/bash
# VPP_P4_INSTAL_DIR should be the directory where you have cloned a
# copy of this repository:
# https://github.com/jafingerhut/VPP_P4
# It contains some Python code import'd by action-profile-tests.py
VPP_P4_INSTALL_DIR=${HOME}/p4-docs/VPP_P4
export PYTHONPATH=${VPP_P4_INSTALL_DIR}:${PYTHONPATH}
./action-profile-tests.py --json action-profile.json
| true |
b5ee099377f5e7e787916b31d7b140f5f77844f7 | Shell | tovrleaf/sh-scripts | /mysqldumps.sh | UTF-8 | 585 | 3.453125 | 3 | [] | no_license | #!/bin/sh
# @author Niko Kivelä <niko@tovrleaf.com>
# @since Sun Dec 13 23:49:23 EEST 2009
# Backup mysql per dump in $HOME/backups/<daynumber>/<dump>
# Run via cron, or manually when feel like it.
DAY=`/bin/date +%u`
function dumpsql()
{
/usr/bin/mysqldump -hlocalhost -u$1 -e -p$2 $1 > $HOME/backups/mysqldump-$1.sql && gzip $HOME/backups/mysqldump-$1.sql
if [ -f $HOME/backups/$DAY/mysqldump-$1.sql.gz ]
then
rm $HOME/backups/$DAY/mysqldump-$1.sql.gz
fi
mv $HOME/backups/mysqldump-$1.sql.gz $HOME/backups/$DAY/mysqldump-$1.sql.gz
}
# dumpsql <database> <password>
| true |
6c92f2120078bdcbe3ba0a0abff19eb20be6f584 | Shell | ahelal/terraform-azure-demo | /bin/deploy-users.sh | UTF-8 | 527 | 3.28125 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
set -e
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
ROOT_DIR="${DIR}/../"
VARS_DIR="${ROOT_DIR}/vars"
if [ -f "${VARS_DIR}/generated_vars.sh" ]; then
echo "sourcing ${VARS_DIR}/generated_vars.sh"
source "${VARS_DIR}/generated_vars.sh"
fi
if [ -f "${VARS_DIR}/terraform_variables.sh" ]; then
echo "sourcing ${VARS_DIR}/terraform_variables.sh"
source "${VARS_DIR}/terraform_variables.sh"
fi
cd "${ROOT_DIR}/terraform/users"
terraform init
terraform apply -auto-approve
| true |
a1a7bb522fa96d16f6b70910398f81e6c8375ee4 | Shell | NILGroup/TFG-1819-Biblioteca | /janet-full-install.sh | UTF-8 | 6,405 | 3.390625 | 3 | [] | no_license | #!/bin/bash
DIRECTORY=$(cd `dirname $0` && pwd)
set -e
echo "Programa de instalación de Janet."
echo "-----------------------------------"
if ! [ $(id -u) = 0 ]; then
echo "Este script solo puede ser ejecutado por un superusuario." >&2
exit 1
fi
if [ $SUDO_USER ]; then
real_user=$SUDO_USER
else
real_user=$(whoami)
fi
echo -n "Seguro que quieres instalar Janet (y/n)? "
read answer
if [ "$answer" != "${answer#[Nn]}" ] ;then
exit 0
fi
echo "-----------------------------------"
echo "Comprobando integridad de ficheros"
if [ ! -d "Servidor" ] || [ ! -d "Jarvis" ] || [ ! -f "wskey.conf" ]; then
echo "ERROR! No se localizan los ficheros de instalación." >&2
exit 1
else
echo "Ok"
fi
echo "-----------------------------------"
export DEBIAN_FRONTEND=noninteractive
echo "Actualizando apt..."
apt-get update >/dev/null
echo "Instalando Python 3..."
apt-get install -yq build-essential checkinstall >/dev/null
apt-get install -yq libreadline-gplv2-dev libncursesw5-dev libssl-dev libsqlite3-dev tk-dev libgdbm-dev libc6-dev libbz2-dev zlib1g-dev unzip >/dev/null
apt-get install -yq python3 python3-dev python3-pip >/dev/null
echo "Ok"
echo "-----------------------------------"
echo "Instalando Git..."
apt-get -yq install git-all >/dev/null
echo "Ok"
echo "-----------------------------------"
echo "Instalando MongoDB..."
if ! [ -x "$(command -v mongo)" ]; then
apt-get -yq install dirmngr >/dev/null
apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 9DA31620334BD75D9DCB49F368818C72E52529D4
source /etc/os-release
if [ $ID == 'debian' ]; then
if [ $VERSION_ID == "9" ]; then
echo "deb http://repo.mongodb.org/apt/debian stretch/mongodb-org/4.0 main" > /etc/apt/sources.list.d/mongodb-org-4.0.list
else
echo "deb http://repo.mongodb.org/apt/debian jessie/mongodb-org/4.0 main" > /etc/apt/sources.list.d/mongodb-org-4.0.list
fi
else
if [ $VERSION_ID == "18.04" ]; then
echo "deb http://repo.mongodb.org/apt/ubuntu bionic/mongodb-org/4.0 multiverse" > /etc/apt/sources.list.d/mongodb-org-4.0.list
elif [ $VERSION_ID == "16.04" ]; then
echo "deb http://repo.mongodb.org/apt/ubuntu xenial/mongodb-org/4.0 multiverse" > /etc/apt/sources.list.d/mongodb-org-4.0.list
else
echo "deb http://repo.mongodb.org/apt/ubuntu trusty/mongodb-org/4.0 multiverse" > /etc/apt/sources.list.d/mongodb-org-4.0.list
fi
fi
apt-get update >/dev/null
apt-get install -yq mongodb-org >/dev/null
printf "[Unit]\nDescription=High-performance, schema-free document-oriented database\nAfter=network.target\n\n[Service]\nUser=mongodb\n ExecStart=/usr/bin/mongod --quiet --config /etc/mongod.conf\n\n[Install]\nWantedBy=multi-user.target" > /etc/systemd/system/mongodb.service
systemctl enable mongodb
systemctl start mongodb
fi
echo "Ok"
echo "-----------------------------------"
echo "Creando grupo y usuario..."
if ! id "tfg-biblio" >/dev/null 2>&1; then
useradd -m -d /home/tfg-biblio -s /sbin/nologin -U tfg-biblio
echo "Ok"
else
echo "El usuario 'tfg-biblio' ya existe, continúo..."
fi
echo "-----------------------------------"
echo "Instalando Janet..."
mkdir /home/tfg-biblio/janet
mv Servidor/* /home/tfg-biblio/janet/
mv wskey.conf /home/tfg-biblio/janet/
chown -R tfg-biblio:tfg-biblio /home/tfg-biblio/janet
chmod -R 777 /home/tfg-biblio/janet
echo "Ok"
echo "-----------------------------------"
echo "Instalando dependencias..."
pip3 install -r /home/tfg-biblio/janet/requirements.txt >/dev/null
echo "Ok"
echo "-----------------------------------"
echo "Instalando Jarvis..."
PYT=$(python3 --version 2>&1 | grep -oP '([0-9]).([0-9])')
mkdir /home/tfg-biblio/Jarvis
mv Jarvis/regex_featurizer.py /usr/local/lib/python$PYT/dist-packages/rasa_nlu/featurizers/regex_featurizer.py
mv Jarvis/* /home/tfg-biblio/Jarvis/
chown -R tfg-biblio:tfg-biblio /home/tfg-biblio/Jarvis
chmod -R 777 /home/tfg-biblio/Jarvis
cd /home/tfg-biblio/
python3 -m spacy download es_core_news_sm >/dev/null
echo "Ok"
echo "-----------------------------------"
echo "Preparando Base de datos..."
mongo admin <<EOF
use admin
var user = {
"user" : "rasa",
"pwd" : "Pitonisa46",
roles : [{
"role" : "readWrite",
"db" : "rasa"
}]
}
db.createUser(user);
exit
EOF
mongoimport --db janet --collection localizaciones --file /home/tfg-biblio/janet/bibliotecas.json
mongo <<EOF
use janet
db.localizaciones.createIndex({kw: "text"});
exit
EOF
echo "Ok"
echo "-----------------------------------"
echo "Creando daemons..."
mv /home/tfg-biblio/janet/janet.service /etc/systemd/system/janet.service
mv /home/tfg-biblio/Jarvis/jarvisactions.service /etc/systemd/system/jarvisactions.service
mv /home/tfg-biblio/Jarvis/jarvis.service /etc/systemd/system/jarvis.service
systemctl enable janet.service
systemctl enable jarvisactions.service
systemctl enable jarvis.service
echo "Ok"
echo "-----------------------------------"
echo "Entrenando Jarvis por primera vez, esta operación durará varios minutos..."
cd /home/tfg-biblio/Jarvis/
sudo -u tfg-biblio python3 JarvisMain.py -t all
echo "Ok"
echo "-----------------------------------"
echo "Creando servicio del destructor imperial"
mycron=${TMPDIR:-/tmp}/xyz.$$
trap "rm -f $tmp; exit 1" 0 1 2 3 13 15
echo "*/15 * * * * tfg-biblio python3 /home/tfg-biblio/janet/DestructorImperial.py" >> $mycron
crontab -u tfg-biblio $mycron
rm -f $mycron
echo "Ok"
echo "-----------------------------------"
echo "Arrancando servicios"
systemctl start janet.service
systemctl start jarvisactions.service
systemctl start jarvis.service
echo "-----------------------------------"
echo "Borrando archivos temporales"
if [ -d "$DIRECTORY/Servidor" ]; then rm -Rf $DIRECTORY/Servidor; fi
if [ -d "$DIRECTORY/Jarvis" ]; then rm -Rf $DIRECTORY/Jarvis; fi
if [ -d "$DIRECTORY/Clientes" ]; then rm -Rf $DIRECTORY/Clientes; fi
if [ -f "$DIRECTORY/.gitignore" ]; then rm $DIRECTORY/.gitignore; fi
if [ -f "$DIRECTORY/README.md" ]; then rm $DIRECTORY/README.md; fi
if [ -f "$DIRECTORY/LICENSE.md" ]; then rm $DIRECTORY/LICENSE.md; fi
if [ -f "/home/tfg-biblio/Jarvis/bibliotecas.json" ]; then rm /home/tfg-biblio/Jarvis/bibliotecas.json; fi
echo "Ok"
echo "-----------------------------------"
echo "Instalación realizada con éxito!"
exit 0
| true |
0f4e3311218d932f546f6b745d48e640f566e9bf | Shell | zmjohnson42/dotfiles | /zsh/env.sh | UTF-8 | 1,310 | 2.953125 | 3 | [] | no_license | #!/bin/zsh
# PATHS
export PATH="/usr/local/share/python:/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbina"
export PYENV_ROOT="$HOME/.pyenv";
export PATH="$PYENV_ROOT/bin:$PATH";
eval "$(pyenv init -)";
eval $(direnv hook zsh);
# ALIASES
# dev
alias devdep="bundle exec cap development deploy"
alias pushdep="gp ibp-ghd secure-dev && devdep"
#alias brewup='brew update; brew upgrade; brew prune; brew cleanup; brew doctor'
alias webstat="bundle exec cap development deploy:web:status"
## shell
alias cls="clear"
alias zln="~/.dotfiles/ln.zsh"
## folders
alias dt="cd ~/DialogTech"
alias .f="cd ~/.dotfiles"
alias crbn="cd ~/DialogTech/sandbox/CRBN"
alias sand="cd ~/DialogTech/sandbox"
alias mods="cd ~/DialogTech/sandbox/CRBN/application/modules"
alias org="cd ~/DialogTech/sandbox/Organic/"
alias migra="cd ~/DialogTech/sandbox/LaMigra"
alias ifby="cd ~/DialogTech/sandbox/Ifbyphone"
alias migrate="./bin/migrate migrations:migrate"
## jupyter
alias notes="cd ~/Documents/JupyterNotebooks"
alias jup="jupyter notebook"
## vim
alias v="vim"
## git
alias gs="git status -s"
## functions
vstat () {
vagrant status | awk '{if (NF==3 && NR!=1 && ($2 == "running")) print "\033[32m"($1); else if (NF==3 && NR!=1) print "\033[31m" ($1)}'
}
eval "$(pyenv init -)"
eval "$(pyenv virtualenv-init -)"
## mongo
| true |
05ebff8cd098781d024b49dad326837c525b8a74 | Shell | alvatar/snippets | /command-line/rename_mp3.sh | UTF-8 | 1,077 | 3.78125 | 4 | [] | no_license | #!/bin/bash
DIR=/mnt/usb/TODO/complete
TARGET_NON_STANDARD_DIR=/mnt/usb/TODO
TARGET_RENAMED_DIR=/mnt/usb/music/albums
# The tricky way to get find's output into an array
unset files i
while IFS= read -r -d $'\0' file; do
files[i++]="$file"
ALL=$(basename "$file")
YEAR=$(echo $ALL | grep -o '[1-2][0-9][0-9][0-9]\+')
LABEL=$(echo $ALL | grep -Po '(?<=\().*(?=\))')
ARTIST=$(echo $ALL | awk '{split($0,a," - "); print a[1]}')
ALBUM=$(echo $ALL | awk '{split($0,a," - "); print a[3]}' | sed s/' ([^)]*)'/''/g)
LABEL_LOW=$(echo $LABEL | awk '{print tolower($0)}')
ARTIST_LOW=$(echo $ARTIST | awk '{print tolower($0)}')
ALBUM_LOW=$(echo $ALBUM | awk '{print tolower($0)}')
if [ -z "$YEAR" ] | [ -z "$LABEL" ] | [ -z "$ARTIST" ] | [ -z "$ALBUM" ]; then
printf "ATTENTION! Found non-standard naming: $file --> MOVING\n"
mv "$file" "$TARGET_NON_STANDARD_DIR"
else
NEW_NAME="$YEAR + $ARTIST_LOW + $ALBUM_LOW [$LABEL_LOW]"
mv "$file" "$TARGET_RENAMED_DIR/$NEW_NAME"
fi
done < <(find $DIR -maxdepth 1 -type d ! -path $DIR -type d -print0)
| true |
2c245aa29455e42dcac836f284f51fe1af3ed75b | Shell | bcbrockway/minikube-sandbox | /error-spike.sh | UTF-8 | 146 | 2.765625 | 3 | [] | no_license | #!/bin/bash
MAX=2400
SLEEP=1
for ((i=0; i<=MAX; i++)); do
echo -ne $i / $MAX\\r
echo error | nc minikube 80 > /dev/null
sleep $SLEEP
done
| true |
67e2da177c20b6878c08b49625e621bc615ad6f6 | Shell | eerimoq/monolinux-example-project | /3pp/linux/tools/virtio/ringtest/run-on-all.sh | UTF-8 | 670 | 3.515625 | 4 | [
"Linux-syscall-note",
"GPL-2.0-only",
"MIT"
] | permissive | #!/bin/sh
# SPDX-License-Identifier: GPL-2.0
CPUS_ONLINE=$(lscpu --online -p=cpu|grep -v -e '#')
#use last CPU for host. Why not the first?
#many devices tend to use cpu0 by default so
#it tends to be busier
HOST_AFFINITY=$(echo "${CPUS_ONLINE}"|tail -n 1)
#run command on all cpus
for cpu in $CPUS_ONLINE
do
#Don't run guest and host on same CPU
#It actually works ok if using signalling
if
(echo "$@" | grep -e "--sleep" > /dev/null) || \
test $HOST_AFFINITY '!=' $cpu
then
echo "GUEST AFFINITY $cpu"
"$@" --host-affinity $HOST_AFFINITY --guest-affinity $cpu
fi
done
echo "NO GUEST AFFINITY"
"$@" --host-affinity $HOST_AFFINITY
echo "NO AFFINITY"
"$@"
| true |
e9036c91bb44a6983093c163d76b07a72fdd4ca0 | Shell | milanbojovic/bash_scripts | /Install_script_final.sh | UTF-8 | 3,749 | 3.40625 | 3 | [] | no_license | #!/bin/bash
printGreen(){
printf "\n%s\n\n" "`tput smso``tput setf 2` $1 `tput sgr0`" | tee -a $log_file;
sleep 2;
};
printRed(){
printf "\n%s\n\n" "`tput smso``tput setf 4` $1 `tput sgr0`" | tee -a $log_file;
sleep 2;
};
install(){
printGreen "Installing $1:" | tee -a $log_file;
sleep 2;
apt-get update;
apt-get install -y $1;
status=$?;
if [ $status -eq 0 ]; then
printGreen "$1 installation completed successfully";
else
printRed "$1 installation FAILED" !!!;
fi
};
echo '';
log_file=install_script_log;
printGreen "Install script started" "Checking if user has ROOT privileges...";
if [ $UID -eq 0 ]; then
#Repositories for updating of packages
printGreen "Downloading GetDeb and PlayDeb";
sleep 1;
wget http://archive.getdeb.net/install_deb/getdeb-repository_0.1-1~getdeb1_all.deb http://archive.getdeb.net/install_deb/playdeb_0.3-1~getdeb1_all.deb;
printGreen "Installing GetDeb";
sleep 1;
dpkg -i getdeb-repository_0.1-1~getdeb1_all.deb;
pringGreen "Installing PlayDeb";
sleep 1;
dpkg -i playdeb_0.3-1~getdeb1_all.deb;
printGreen "Deleting Downloads";
rm -f getdeb-repository_0.1-1~getdeb1_all.deb;
rm -f playdeb_0.3-1~getdeb1_all.deb;
printGreen "Deleting Downloads GetDeb and PlayDeb - installation compleete";
sleep 1;
install apache2;
install skype;
install dropbox;
install tomcat7;
#Adding GIT repository:
echo | add-apt-repository ppa:git-core/ppa;
install git;
install maven;
#Adding Gimp repository:
echo | add-apt-repository ppa:otto-kesselgulasch/gimp;
install gimp;
install gimp-data;
install gimp-plugin-registry;
install gimp-data-extras;
install flashplugin-installer;
#Adding repository JAVA
echo | add-apt-repository ppa:webupd8team/java;
install oracle-java7-installer;
install eclipse;
#Adding VLC repository:
echo | add-apt-repository ppa:videolan/stable-daily;
install vlc;
printGreen "Installing DVD - encoding support for VLC:";
sleep 1;
echo 'deb http://download.videolan.org/pub/debian/stable/ /' | tee -a /etc/apt/sources.list.d/libdvdcss.list;
echo 'deb-src http://download.videolan.org/pub/debian/stable/ /' | tee -a /etc/apt/sources.list.d/libdvdcss.list;
wget -O - http://download.videolan.org/pub/debian/videolan-apt.asc | apt-key add -;
install libxine1-ffmpeg mencoder flac faac faad sox ffmpeg2theora libmpeg2-4 uudeview libmpeg3-1 mpeg3-utils mpegdemux liba52-dev mpeg2dec vorbis-tools id3v2 mpg321 mpg123 libflac++6 totem-mozilla icedax lame libmad0 libjpeg-progs libdvdcss2;
install ubuntu-wallpapers;
install ubuntu-restricted-extras;
# For updates of some existing packages
printGreen "Adding repository for gnome 3 library updates";
echo | add-apt-repository -y ppa:gnome3-team/gnome3;
status=$?;
if [ $status -eq 0 ]; then
printGreen "Gnome 3 library repo added successfully";
else
printRed "Gnome 3 library repo adding FAILED !!!";
fi
# suggestion from "howtoubuntu.org"
printGreen "Adding repository for Oracle Java";
echo | add-apt-repository -y ppa:webupd8team/y-ppa-managsudo add-apt-repository -y ppa:webupd8team/javaer;
status=$?;
if [ $status -eq 0 ]; then
printGreen "Oracle Java repo added successfully";
else
printRed "Oracle Java repo adding FAILED !!!";
fi
#Upgrade all packages which can be upgraded
printGreen "Updating and upgrading packages !!!";
apt-get update;
apt-get -y upgrade;
##Cleanup !!!!!!!!!!!!
printGreen "Cleaning Up";
printf "\n%s\n\n" "" | tee -a $log_file;
sleep 2;
sudo apt-get -f install;
sudo apt-get autoremove;
sudo apt-get -y autoclean;
sudo apt-get -y clean;
printGreen "Script finished execution. Have a nice DAY!";
exit 1;
else
printRed "Error USER: '$USER' is not root.";
fi
printGreen "Script exiting";
| true |
38289ea2aab239b503c378ac6928e2070a75e457 | Shell | darkoppressor/cheese-engine | /tools/build-system/scripts/data/build-sounds | UTF-8 | 2,171 | 3.890625 | 4 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/bin/bash
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
cd $DIR
PROJECT_DIRECTORY=$1
SOURCE_PROJECT=""
SOUND_DIRECTORY=""
build_project_sounds () {
local directory=$1
for file in $directory/* ; do
if [ -f "$file" ]; then
if [[ $file =~ \.mmpz$ ]]; then
prefix=$SOURCE_PROJECT/
suffix=.mmpz
file_name=${file#$prefix}
file_name=${file_name%$suffix}
file_name="$file_name.ogg"
prefix=$directory/
plain_file_name=${file#$prefix}
prefix=$SOURCE_PROJECT/
sound_dir=${file#$prefix}
sound_dir=${sound_dir%$plain_file_name}
# If the sound does not already exist at the destination
if [ ! -e "$SOUND_DIRECTORY/$file_name" ]; then
mkdir -p "$SOUND_DIRECTORY/$sound_dir"
$LMMS --render "$file" --output "$SOUND_DIRECTORY/$file_name" --format 'ogg' --samplerate 44100 --bitrate 160 --interpolation 'sincmedium' --oversampling 1 --loop
fi
elif [[ $file =~ \.sfxr$ ]]; then
prefix=$SOURCE_PROJECT/
suffix=.sfxr
file_name=${file#$prefix}
file_name=${file_name%$suffix}
file_name="$file_name.ogg"
prefix=$directory/
plain_file_name=${file#$prefix}
prefix=$SOURCE_PROJECT/
sound_dir=${file#$prefix}
sound_dir=${sound_dir%$plain_file_name}
# If the sound does not already exist at the destination
if [ ! -e "$SOUND_DIRECTORY/$file_name" ]; then
mkdir -p "$SOUND_DIRECTORY/$sound_dir"
wav_file=$(echo "$file_name" | sed -e "s/.ogg$/.wav/")
/home/tails/build-server/sfxr-hacked/sfxr "$file" "$SOUND_DIRECTORY/$wav_file"
oggenc -q 5 "$SOUND_DIRECTORY/$wav_file" -o "$SOUND_DIRECTORY/$file_name"
rm -f "$SOUND_DIRECTORY/$wav_file"
fi
fi
fi
done
for d in $directory/*/ ; do
if [ -d "$d" ]; then
build_project_sounds "$d"
fi
done
}
if [ -n "$PROJECT_DIRECTORY" ]; then
SOURCE_PROJECT="$PROJECT_DIRECTORY/development/sounds"
SOUND_DIRECTORY="$PROJECT_DIRECTORY/data/sounds"
# Build all sounds in project source directory recursively
build_project_sounds "$SOURCE_PROJECT"
else
echo "build-sounds - build all sound data files for the passed project"
echo "Usage: build-sounds PROJECT-DIRECTORY"
fi
| true |
0414c999035c2f4e9923c74864a7e712f2e2ffbe | Shell | closescreen/clhousesample | /history_uid_SQL-run-3.sh | UTF-8 | 981 | 3.328125 | 3 | [] | no_license | set +x
set -o pipefail
set -u
# Для каждого файла из sqldir не имеющего рядом "$f.ok.result" выполняет SQL из файла и создает "$f.ok.result"
# SQL- файлы общие для всех серверов.
# Файлы результатов, логов - для каждого сервера - свои.
# Однопоточный.
myserv=`hostname`
day=${1:?DAY!} #"2017-03-20"
sqldir="../../reg_history_uid/$day"
for f in `find $sqldir -name "*.sql"`; do
#echo "$f..."
[[ -s "$f.${myserv}.ok.result" ]] && continue # если есть .ok. то пропуск
t1=`date +%s`
cat "$f" | clickhouse-client >>"$f.${myserv}.log" 2>&1
if [[ $? -eq 0 ]]; then echo ok>>$f.${myserv}.ok.result; else echo NO>>$f.${myserv}.no.result; fi
t2=`date +%s`
dt=$(( $t2 - $t1 ))
sl=$(( $dt / 10 ))
sleep $sl
done
# потом еще нужна будет очистка от старых файлов
| true |
0ef09b23ecebd21e4211db725b8de1d2fc8d11b4 | Shell | GoogleChromeLabs/chromeos_smart_card_connector | /third_party/webports/src/src/ports/libsodium/build.sh | UTF-8 | 1,982 | 3.234375 | 3 | [
"LGPL-2.1-only",
"LGPL-2.0-or-later",
"Apache-2.0",
"GPL-2.0-or-later",
"LicenseRef-scancode-mit-old-style",
"MPL-1.1",
"ImageMagick",
"MIT",
"LicenseRef-scancode-warranty-disclaimer",
"GFDL-1.2-only",
"BSD-3-Clause",
"LicenseRef-scancode-other-permissive",
"GPL-1.0-or-later",
"Libpng",
"LicenseRef-scancode-on2-patent",
"LicenseRef-scancode-greg-roelofs",
"LicenseRef-scancode-unknown-license-reference",
"WxWindows-exception-3.1",
"GPL-2.0-only",
"BSL-1.0",
"LGPL-2.0-only",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-unknown"
] | permissive | # Copyright (c) 2011 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
EXTRA_CONFIGURE_ARGS="--disable-pie --disable-shared"
RunTest() {
naclport_test/crypto_box_test
}
TestStep() {
MakeDir naclport_test/lib
# the libtool warns "libtool: install: warning: remember to run
# `libtool --finish pepper_31/toolchain/linux_pnacl/usr/lib'"
(cd src/libsodium;
/bin/bash ../../libtool --mode=install /usr/bin/install \
-c libsodium.la $(cd ../../naclport_test/lib && pwd))
if [[ ${NACL_ARCH} == pnacl ]]; then
EXT=.bc
else
EXT=${NACL_EXEEXT}
fi
# on newlib_arm compilation crashed when without -lssp,
# on other platforms it was ok without it
LSSP=""
if [[ ${NACL_ARCH} == arm && ${TOOLCHAIN} == newlib ]]; then
LSSP="-lssp"
fi
INCLUDES="-Isrc/libsodium/include -Isrc/libsodium/include/sodium \
-I${SRC_DIR}/src/libsodium/include \
-I${SRC_DIR}/src/libsodium/include/sodium"
${NACLCXX} ${INCLUDES} ${NACLPORTS_CPPFLAGS} ${NACLPORTS_CFLAGS} \
${NACLPORTS_LDFLAGS} -o naclport_test/crypto_box_test${EXT} \
${START_DIR}/crypto_box_test.c naclport_test/lib/libsodium.a \
-lnacl_io -lpthread ${LSSP}
[[ ${NACL_ARCH} == "pnacl" ]] && ${PNACLFINALIZE} \
-o naclport_test/crypto_box_test${NACL_EXEEXT} naclport_test/crypto_box_test${EXT}
echo "Running test"
if [ "${NACL_ARCH}" = "pnacl" ]; then
local pexe=crypto_box_test${NACL_EXEEXT}
(cd naclport_test;
TranslateAndWriteLauncherScript ${pexe} x86-32 crypto_box_test.x86-32${EXT} crypto_box_test)
RunTest
(cd naclport_test;
TranslateAndWriteLauncherScript ${pexe} x86-64 crypto_box_test.x86-64${EXT} crypto_box_test)
RunTest
echo "Tests OK"
elif [ "$(uname -m)" = "${NACL_ARCH_ALT}" ]; then
WriteLauncherScript naclport_test/crypto_box_test crypto_box_test${EXT}
RunTest
echo "Tests OK"
fi
}
| true |
170c4b4863e8445705d3aab420fb734dd8c3c71a | Shell | sumpygump/vimrc | /autoinstall.sh | UTF-8 | 1,295 | 3.921875 | 4 | [] | no_license | #!/bin/sh
INSTALL_TO=~
warn() {
echo "$1" >&2
}
die() {
warn "$1"
exit 1
}
[ -e "$INSTALL_TO/vimrc" ] && die "$INSTALL_TO/vimrc already exists."
[ -e "~/.vim" ] && die "~/.vim already exists."
[ -e "~/.vimrc" ] && die "~/.vimrc already exists."
cd "$INSTALL_TO"
git clone git://github.com/sumpygump/vimrc.git
cd vimrc
echo "----------------"
symlink_with_checks() {
sourcepath="$1"
targetname="$2"
if [ -e "$targetname" ]; then
warn "Not symlinking $targetname; already exists"
else
ln -s "$sourcepath" "$targetname"
fi
}
# Symlink ~/.vim and ~/.vimrc
cd ~
symlink_with_checks "$INSTALL_TO/vimrc/vim/vimrc" .vimrc
symlink_with_checks "$INSTALL_TO/vimrc/vim" .vim
touch ~/.vim/user.vim
echo "----------------"
# Plug Install
## Detect if installed version of vim will work
vim --version | grep "\+syntax" > /dev/null
if [ $? -eq 0 ]; then
vim +PlugInstall +qall
else
echo "NOTE: installed version of vim doesn't support plugins"
echo "Skipped install of plugins"
echo "Perhaps upgrade vim (apt install vim)"
echo "And then run this:"
echo "vim +PlugInstall +qall"
fi
echo "----------------"
echo "You should run this:"
echo "sudo apt install exuberant-ctags"
echo
echo "Installed and configured .vim, have fun."
| true |
9fab76afc107faa04e9f22c6b9b7c245d5d5edb2 | Shell | 0xch4z/dotfiles | /zsh/zsh/plugins.zsh | UTF-8 | 152 | 2.734375 | 3 | [
"CC0-1.0"
] | permissive | mkdir -p "$ZSH_PLUGINS"
ensure_plugin() {
repo="$1"
git clone "$1" ~/$ZSH_PLUGINS/
}
ensure_plugin "https://github.com/kutsan/zsh-system-clipboard"
| true |
c76da4c6f42e63e1118b81cf647067814fc87ecb | Shell | bullno1/hakomari | /src/providers/eth/create1 | UTF-8 | 300 | 2.9375 | 3 | [] | no_license | #!/bin/sh -e
PASSPHRASE=$(cat $HAKOMARI_PASSPHRASE_FILE)
rm -rf /tmp/*
hakomari-show "Creating wallet..."
ethkey --secrets-path /tmp --lock ${PASSPHRASE} newbare
KEYID=$(ethkey --secrets-path /tmp)
mv /tmp/${KEYID}.json $1
ADDRESS="0x$(jq -r '.address' "$1")"
hakomari-show "Address: ${ADDRESS}"
| true |
c90c768e546070bfb8e1eaa65e9063ae4c6643fd | Shell | ConnerWallace/dotfiles | /bin/sshScanSubnet | UTF-8 | 2,708 | 3.25 | 3 | [] | no_license | #!/bin/bash
set -euo pipefail
IFS=$'\n\t'
#requires: yq and avahi-resolve
# yq comes with xq which is jq for xml
# https://github.com/kislyuk/yq
# pip install yq
command -v jq >/dev/null 2>&1 || { echo >&2 "I require jq but it's not installed. Aborting."; exit 1; }
command -v xq >/dev/null 2>&1 || { echo >&2 "I require xq but it's not installed. Aborting."; exit 1; }
command -v avahi-resolve-address >/dev/null 2>&1 || { echo >&2 "I require avahi-resolve-address but it's not installed install 'avahi-utils'. Aborting."; exit 1; }
port=22
show_help () {
echo "Usage: sshScanSubnet {target specification}"
echo " target specification: whatever is valid for nmap"
}
firstArg=${1:-}
if [[ $firstArg == "-h" ]] || [[ $firstArg == "--help" ]]; then
show_help
exit 0
fi
if [ $# -eq 0 ]; then
#no arguments
source sind.sh
IFS=$'\r\n' GLOBIGNORE='*' command eval 'subnets=($(ip -o -f inet addr show | awk "/scope global/ {print \$2 \" \" \$4}" | column -t))'
subnetChoice=$(sind "Choose one" "${subnets[@]}")
subnet=$(echo "${subnets[$subnetChoice]}" | awk '{print $2}')
else
subnet=$firstArg
fi
echo "scanning $subnet"
sudo nmap -Pn -oX - -p$port $subnet > /tmp/nmapOutput
stage2=$(cat /tmp/nmapOutput | xq -c '.nmaprun.host[] | [(.address | if type=="array" then [.[0]."@addr", .[1]."@addr", .[1]."@vendor"] else [."@addr"] end), .ports.port.state."@state"]')
#echo -e "$stage2\n\n"
hosts=$(echo "$stage2" | grep open | sed 's/["\[\]]*//g' | sed 's/,open$//' | sed 's/,/, /g')
#set +e #read returns nonzero, is fine
#read -r -d '' hosts << EOM
#10.4.4.1,80:2A:A8:8F:44:33,Ubiquiti Networks
#10.4.4.2,10:DD:B1:B3:EA:57,Apple
#10.4.4.6,B8:27:EB:1D:D7:AA,Raspberry Pi Foundation
#10.4.4.89,80:2A:A8:90:48:E5,Ubiquiti Networks
#10.4.4.91,80:2A:A8:90:48:DA,Ubiquiti Networks
#10.4.4.95,B8:27:EB:B8:FD:95,Raspberry Pi Foundation
#10.4.4.96,B8:27:EB:B8:FD:95,Raspberry Pi Foundation
#10.4.4.116,C4:8E:8F:F3:48:EF,Hon Hai Precision Ind.
#10.4.4.137,D4:6E:0E:03:74:0B,Tp-link Technologies
#10.4.4.172,B8:27:EB:7C:0F:E6,Raspberry Pi Foundation
#10.4.4.190,B8:27:EB:4C:4A:E6,Raspberry Pi Foundation
#10.4.4.219,78:2B:CB:8A:79:08,Dell
#10.4.4.198
#10.4.4.221
#EOM
#set -e
#echo "$hosts"
#echo -e "\n\n"
count=$(echo "$hosts" | wc -l);
echo "resolving hostnames for $count hosts"
#get mdns names
while read line ; do
#line is "ip name"
#not every host is guarenteed a name
ip=$(echo $line | cut -d' ' -f1)
hostname=$(echo $line | cut -d' ' -f2)
hosts="${hosts/$ip/$ip, $hostname}"
done <<< "$(avahi-resolve-address $(echo "$hosts" | cut -d, -f1) 2>/dev/null)"
echo "$hosts" | perl -pe 's/^([^,]*),(?=[^,]*,[^,]*$)/\1, ,/' | column -t -s,
| true |
d100af311f536fbd50038b194992416ef266cd18 | Shell | chuangxiaomoo/bin | /ma | UTF-8 | 1,204 | 3.78125 | 4 | [] | no_license | #! /bin/bash
function fn_main()
{
if [ "$#" -lt 1 ] ; then
echo "Usage: $0 keyword" && exit 1
fi
# call from vimrc map
if [ "$MANWIDTH" == 88 ] ; then
f_redirct=/tmp/.ma
else
f_redirct=/dev/stdout
fi
keyword=$1
TMP=/tmp/.man
man -f ${keyword} 2>&1 | tee $TMP
[ ${PIPESTATUS[0]} -ne 0 ] && exit 1
# only one
line=`cat $TMP | wc -l`
[ "${line}" -eq 1 ] && man $keyword > $f_redirct && exit
# default the 1st
read -p "Please input manpage index number: " man_index
while :; do
[ "${man_index}" == "" ] && man $keyword > $f_redirct && exit
# multi choice
indexs=`man -f $keyword | awk -F'[ ()]' '{print $3}'`
match_word=`echo ${indexs}| xargs -n 1 | grep "\<$man_index\>"`
match_open=`echo ${indexs}| xargs -n 1 | grep "\<$man_index"`
match_lines=${match_word:-${match_open}}
match_count=`echo $match_lines | wc -w`
if [ $match_count -eq 1 ]; then
man $match_lines $keyword > $f_redirct
exit
else
read -p "Please input a accuracy index number: " man_index
fi
done
}
fn_main $@
| true |
02ee6dccbde3bba6c579d318ce7c90f4ce0ea3cd | Shell | BurnsCommaLucas/Resume | /build.bash | UTF-8 | 883 | 2.953125 | 3 | [] | no_license | #!/bin/bash
DIR=$(dirname $(realpath $0))
echo "$DIR"
# build gfm markdown readme
echo '##########################################################'
echo building gfm readme
echo '##########################################################'
pandoc "$DIR/resume.tex" -o "$DIR/README.md" -t gfm
# build pdf
echo '##########################################################'
echo building latex
echo '##########################################################'
pdflatex -file-line-error $DIR/resume.tex
# build html
echo '##########################################################'
echo building html
echo '##########################################################'
pandoc "$DIR/resume.tex" -o "$DIR/resume.html"
cat "$DIR/header.html" "$DIR/resume.html" "$DIR/footer.html" >"$DIR/resume.tmp"
#mv -v "$DIR/resume.html" ~/.local/share/Trash/
mv "$DIR/resume.tmp" "$DIR/resume.html"
| true |
90844b7b79b024dd09c221531d732e92d352783d | Shell | Chiasung/ether_ros | /scripts/optimizations/full_dynamic_ticks.sh | UTF-8 | 1,725 | 3.5 | 4 | [] | no_license | #!/bin/sh
# Linux has a number of boot parameters that enhances CPU isolation:
# isolcpus=<cpu set> This parameter specifies a set of CPUs that will be excluded from the Linux scheduler load balancing algorithm. The set is specified as a comma separated list of cpu numbers or ranges. E.g. "0", "1-2" or "0,3-4". The set specification must not contain any spaces. It is definitely recommended to use this parameter if the target kernel lacks support for CPU hotplug.
# nohz_full=<cpu set> A list of CPUs for which full dynamic ticks should be enabled. If the kernel configuration CONFIG_NO_HZ_FULL_ALL was given, then this list will be all CPUs except CPU 0, and this boot option is not needed.
# To achieve isolation in the RT domain (CPU2 and CPU3), use the following parameters:
###
# isolcpus=2,3 nohz_full=2,3
###
# After the system has booted, check the boot messages to verify that full dynamic ticks was enabled, e.g. using the shell command dmesg. Search for entries similar to the following:
# NO_HZ: Full dynticks CPUs: 2-3.
# Also make sure there is an entry similar to the following:
# Experimental no-CBs CPUs: 0-7.
# The no-CB CPU list must include the CPU list for full dynticks.
# When choosing the CPU lists on hardware using simulated CPUs, such as hyperthreads, ensure you include real cores and not half a core. The latter could occur if one hyperthread is in the set of CPUs using full dynamic ticks feature while the other hyperthread on the same core does not. This can cause problems when pinning interrupts to a CPU. The two hyperthreads might also affect each other depending on the load.
##### Generaly because our application is multi-threaded, full dynamic ticks is not encouraged.
| true |
52db1b29a36997543d83dd3b50b8c8725746befb | Shell | saalfeldlab/template-building | /scripts/dataWrangling/runTimeMemStat | UTF-8 | 2,089 | 2.953125 | 3 | [
"BSD-2-Clause"
] | permissive | #!/bin/bash
BASEDIR="/nrs/saalfeld/john/projects/flyChemStainAtlas/all_evals"
#dir_list="\
# JFRCtemplate2010/antsRegDog JFRCtemplate2010/antsRegOwl JFRCtemplate2010/antsRegYang JFRCtemplate2010/cmtkCOG JFRCtemplate2010/cmtkCow JFRCtemplate2010/cmtkHideo \
# JFRC2013_lo/antsRegDog JFRC2013_lo/antsRegOwl JFRC2013_lo/antsRegYang JFRC2013_lo/cmtkCOG JFRC2013_lo/cmtkCow JFRC2013_lo/cmtkHideo \
# TeforBrain_f/antsRegDog TeforBrain_f/antsRegOwl TeforBrain_f/antsRegYang TeforBrain_f/cmtkCOG TeforBrain_f/cmtkCow TeforBrain_f/cmtkHideo \
# F-antsFlip_lo/antsRegDog F-antsFlip_lo/antsRegOwl F-antsFlip_lo/antsRegYang F-antsFlip_lo/cmtkCOG F-antsFlip_lo/cmtkCow F-antsFlip_lo/cmtkHideo \
# F-cmtkFlip_lof/antsRegDog F-cmtkFlip_lof/antsRegOwl F-cmtkFlip_lof/antsRegYang F-cmtkFlip_lof/cmtkCOG F-cmtkFlip_lof/cmtkCow F-cmtkFlip_lof/cmtkHideo \
# indvs/C3_def/antsRegDog8 indvs/C3_def/antsRegOwl indvs/C3_def/antsRegYang indvs/C3_def/cmtkCOG indvs/C3_def/cmtkCow indvs/C3_def/cmtkHideo \
# indvs/D1_def/antsRegDog8 indvs/D1_def/antsRegOwl indvs/D1_def/antsRegYang indvs/D1_def/cmtkCOG indvs/D1_def/cmtkCow indvs/D1_def/cmtkHideo"
dir_list="\
indvs/C3_def/antsRegDog8 indvs/C3_def/antsRegOwl indvs/C3_def/antsRegYang indvs/C3_def/cmtkCOG indvs/C3_def/cmtkCow indvs/C3_def/cmtkHideo \
indvs/D1_def/antsRegDog8 indvs/D1_def/antsRegOwl indvs/D1_def/antsRegYang indvs/D1_def/cmtkCOG indvs/D1_def/cmtkCow indvs/D1_def/cmtkHideo"
#line=3
TABLE_FILE="/nrs/saalfeld/john/projects/flyChemStainAtlas/all_evals/time_mem_data.csv"
for d in $dir_list;
do
echo $d
dest_file="${BASEDIR}/${d}/timeMemStats.csv"
echo "DEST FILE $dest_file"
prefix=$(echo $d | sed -e 's:indvs/::g' -e 's:/:,:g')
echo "${prefix}"
N=$(ls -U ${BASEDIR}/${d}/*sh.o* | wc -l)
echo "N: $N"
if [[ $N != 20 ]];
then
echo "Expect exactly 20 outputs, but found $N, skipping"
continue
fi
#parseRunTimes $dest_file `ls ${BASEDIR}/${d}/*sh.o*`
sed "s/^/${prefix},/g" $dest_file >> $TABLE_FILE
echo " "
done
echo " "
echo " "
| true |
00edb78cff5ce8534fede935dc152d7a25d6f450 | Shell | yangtaossr/datasets | /pkgs/ncbi-datasets-cli/conda/build.sh | UTF-8 | 250 | 2.515625 | 3 | [
"LicenseRef-scancode-public-domain"
] | permissive | for name in datasets dataformat; do
bazel build --build_event_text_file=$name.bep //src:$name
bin_name=$(sed -n '/^completed {/,/^}/{s,.*uri: "file://\(.*\)",\1,p}' $name.bep)
cp "$bin_name" "$PREFIX/bin/"
done
bazel clean
bazel shutdown
| true |
724b4064b3d84859dd9e9fe7233f834b5ba5ba3b | Shell | mouraja/estudos | /python/teste_argumentos.py | UTF-8 | 788 | 3.703125 | 4 | [] | no_license | #!/bin/bash
function usage() {
cat <<HELP
sitaxe: $0 [-a <opcao>] [-b <opcao>] [-c <opcao>] [-d] [-e] [-f] [-h];
HELP
exit 1
}
while getopts "a:b:c:defh" opt;
do
case $opt in
a)
echo "-$opt was triggered, Parameter: $OPTARG" >&2;
;;
b)
echo "-$opt was triggered, Parameter: $OPTARG" >&2;
;;
c)
echo "-$opt was triggered, Parameter: $OPTARG" >&2;
;;
d)
echo "-$opt was triggered, Without Parameter: $OPTARG" >&2;
;;
e)
echo "-$opt was triggered, Without Parameter: $OPTARG" >&2;
;;
f)
echo "-$opt was triggered, Without Parameter: $OPTARG" >&2;
;;
h)
usage;
;;
\?)
echo "Invalid option: -$opt" >&2;
exit 1;
;;
:)
echo "Option -$opt requires an argument." >&2;
exit 1;
;;
esac
done
| true |
f787ee533bef6f501072db9c2f10a2d62ce47aeb | Shell | PeiwenWu/Adaptation-Interspeech18 | /adaptation/run_si_train.sh | UTF-8 | 4,647 | 2.90625 | 3 | [
"MIT"
] | permissive | #!/bin/bash
# Copyright 2017 Ke Wang
# Begin config
stage=5
train_nj=100
decode_nj=50
# End config
. ./cmd.sh
. ./path.sh
. utils/parse_options.sh
train_dir=data/train
test_dir=data/test_50speaker
if [ $stage -le 0 ]; then
echo ========================================================================
echo " Data & Lexicon & Language Preparation "
echo ========================================================================
# Data Preparation
lexicon=/home/train03/data/resources/zhongwen.lex
lm_arpa=/home/train03/data/resources/lmprune/1e_8/lm.upper.arpa.gz
utils/data/validate_data_dir.sh --no-wav $train_dir || exit 1;
utils/data/validate_data_dir.sh --no-wav $test_dir || exit 1;
misc/prep_train_dict.sh $lexicon $train_dir $test_dir || exit 1;
utils/prepare_lang.sh data/local/dict "<UNK>" data/local/lang data/lang || exit 1;
# prepare lang dir for test
misc/prep_test_dict.sh $lexicon data/local/dict data/local/dict_test || exit 1;
utils/prepare_lang.sh --phone-symbol-table data/lang/phones.txt data/local/dict_test \
"<UNK>" data/local/lang_test_tmp data/lang_test_tmp || exit 1;
# Change the LM vocabulary to be the intersection of the current LM vocab and the set
# of words in the pronunciation lexicon. Also renormalizes the LM by recomputing the
# backoff weights, and remove those ngrams whose probabilities are lower than the
# backed-off estimates.
utils/format_lm_sri.sh --srilm_opts "-subset -prune-lowprobs -unk" \
data/lang_test_tmp $lm_arpa data/lang_test || exit 1;
fi
if [ $stage -le 1 ]; then
echo ========================================================================
echo " MonoPhone Training "
echo ========================================================================
steps/train_mono.sh --nj $train_nj --cmd "$train_cmd" data/train data/lang exp/mono
fi
if [ $stage -le 2 ]; then
echo ========================================================================
echo " tri1 : Deltas + Delta-Deltas Training & Decoding "
echo ========================================================================
steps/align_si.sh --cmd "$train_cmd" --nj $train_nj \
data/train data/lang exp/mono exp/mono_ali || exit 1;
steps/train_deltas.sh --cmd "$train_cmd" \
7000 130000 data/train data/lang exp/mono_ali exp/tri1 || exit 1;
# Decode tri1
(
utils/mkgraph.sh data/lang_test exp/tri1 exp/tri1/graph || exit 1;
steps/decode.sh --cmd "$decode_bigmem_cmd" --config conf/decode.config --nj $decode_nj \
exp/tri1/graph $test_dir exp/tri1/decode_test || exit 1;
) &
fi
if [ $stage -le 3 ]; then
echo ========================================================================
echo " tri2 : Deltas + Delta-Deltas Training & Decoding "
echo ========================================================================
# Align tri1
steps/align_si.sh --cmd "$train_cmd" --nj $train_nj \
data/train data/lang exp/tri1 exp/tri1_ali || exit 1;
steps/train_deltas.sh --cmd "$train_cmd" \
7000 130000 data/train data/lang exp/tri1_ali exp/tri2 || exit 1;
# Decode tri2
(
utils/mkgraph.sh data/lang_test exp/tri2 exp/tri2/graph
steps/decode.sh --cmd "$decode_bigmem_cmd" --config conf/decode.config --nj $decode_nj \
exp/tri2/graph $test_dir exp/tri2/decode_test || exit 1;
) &
fi
if [ $stage -le 4 ]; then
echo ========================================================================
echo " tri3 : LDA + MLLT Training & Decoding "
echo ========================================================================
# Triphone wiht LDA+MLLT
steps/align_si.sh --cmd "$train_cmd" --nj $train_nj \
data/train data/lang exp/tri2 exp/tri2_ali || exit 1;
steps/train_lda_mllt.sh --cmd "$train_cmd" \
7000 130000 data/train data/lang exp/tri2_ali exp/tri3 || exit 1;
# Decode tri3
(
utils/mkgraph.sh data/lang_test exp/tri3 exp/tri3/graph || exit 1;
steps/decode.sh --cmd "$decode_bigmem_cmd" --config conf/decode.config --nj $decode_nj \
exp/tri3/graph $test_dir exp/tri3/decode_test || exit 1;
) &
fi
if [ $stage -le 5 ]; then
echo ========================================================================
echo " NN Training & Decoding "
echo ========================================================================
# TDNN + LSTM training
misc/run_tdnn_lstm.sh --nj 100 --gmm "tri3" --stage -10 \
--train_set "train" --test_sets "test_50speaker" || exit 1;
fi
| true |
96f0febcbe67258667c10e9910d224a553c9952f | Shell | anderson-uchoa/predicting-refactoring-ml | /data-collection/run-cloud.sh | UTF-8 | 1,465 | 3.4375 | 3 | [
"MIT"
] | permissive | #! /bin/bash
export IFS=","
if [ "$#" -ne 8 ]; then
echo "wrong usage" >&2
exit 1
fi
CLASS="refactoringml.App"
JAR_PATH=predicting-refactoring-ml/data-collection/target/refactoring-analyzer-0.0.1-SNAPSHOT-jar-with-dependencies.jar
REFACTORINGMINER_JAR_PATH=predicting-refactoring-ml/data-collection/lib/RefactoringMiner-20190430.jar
OUTPUT_PATH=output
PROJECTS_CSV_PATH=$1
BEGIN=$2
END=$3
URL=$4
USER=$5
PWD=$6
STORAGE_MACHINE=$7
THRESHOLD=$8
mkdir $OUTPUT_PATH
echo ""
i=0
cat $PROJECTS_CSV_PATH | while
read PROJECT REPO DATASET; do
let "i++"
if [ $i -ge $BEGIN -a $i -le $END ]; then
echo "INIT $i = $PROJECT"
echo "$i=$PROJECT" >> execution.txt
OUTPUT_PROJECT_PATH="$OUTPUT_PATH/$PROJECT"
mkdir $OUTPUT_PROJECT_PATH
STORAGE_PATH="$OUTPUT_PROJECT_PATH/storage"
mkdir $STORAGE_PATH
echo "Running refactoring analyzer"
java -Xmx800m -Xms350m -cp $REFACTORINGMINER_JAR_PATH:$JAR_PATH $CLASS $DATASET $REPO $STORAGE_PATH $URL $USER $PWD $THRESHOLD >> log.txt 2>> error.txt
if [ $? -eq 0 ]
then
echo "Packing the java files"
mv log.txt $OUTPUT_PROJECT_PATH
mv error.txt $OUTPUT_PROJECT_PATH
zip -q -r $DATASET-$PROJECT.zip $OUTPUT_PROJECT_PATH/*
scp $DATASET-$PROJECT.zip $STORAGE_MACHINE/$DATASET-$PROJECT.zip
rm $DATASET-$PROJECT.zip
fi
echo "Deleting folder"
rm -rf $OUTPUT_PROJECT_PATH
rm error.txt log.txt nohup.out
rm -rf /tmp/15*
echo ""
echo "#####################"
echo ""
fi
done
| true |
4fa6d0e71185fcc6e1e809f82d02d36f75ccd8a5 | Shell | mmlindeboom/always-connected-daemon | /checkconnection.sh | UTF-8 | 984 | 3.578125 | 4 | [] | no_license | #!/bin/bash
AIRPORT="en1"; #may be en0, use networksetup -listallhardwareports to check
WIFI_NETWORK_NAME="YOUR_WIRELESS_NETWORK_NAME"
WIFI_PASSWORD="YOUR_PASSWORD"
THE_DATE=$(date)
# Check to see if connected to WIFI_NETWORK_NAME every 20 seconds
function connect {
networksetup -setairportpower $AIRPORT off
networksetup -setairportpower $AIRPORT on
sleep 2
if networksetup -getairportnetwork $AIRPORT | grep -i -a $WIFI_NETWORK_NAME ;
then
echo 'Connected!';
fi
if networksetup -setairportnetwork $AIRPORT $WIFI_NETWORK_NAME $WIFI_PASSWORD | grep -i -a "Failed" ;
then
echo 'Failed to connect, just restarting...';
networksetup -setairportpower $AIRPORT off
networksetup -setairportpower $AIRPORT on
sleep 1
fi
networksetup -getairportnetwork $AIRPORT
}
while true;
do
echo 'Checking on' $THE_DATE
if networksetup -getairportnetwork $AIRPORT | grep -i -a $WIFI_NETWORK_NAME ;
then
echo "Connected"
else
connect
fi
sleep 20
done
| true |
43d30d852d0f5ffb94c8898c8bb4d9691b7d3b84 | Shell | juancarloscavero/aws-practica | /autoUpDown.sh | UTF-8 | 2,035 | 3.234375 | 3 | [] | no_license | #!/bin/bash
now=$(date +%H:%M)
apagar=$(aws ec2 describe-instances --region eu-west-1 --filters 'Name=tag:hora_apagado,Values=*' | jq -r '.Reservations[].Instances[].Tags[] | select(.Key=="hora_apagado")'| jq -r .Value)
echo $now
echo $apagar
if [ "$apagar" == "$now" ]; then
id=$(aws ec2 describe-instances --region eu-west-1 --filters "Name=tag:hora_apagado,Values='$apagar'" | jq -r .Reservations[].Instances[].InstanceId)
echo $id
status=$(aws ec2 describe-instance-status --instance-id $id --region eu-west-1 | jq -r .InstanceStatuses[0].InstanceState.Name)
echo $status
if [ "$status" != "stopped" ]; then
aws ec2 stop-instances --region eu-west-1 --instance-ids $id
while [[ "$status" != "null" ]];
do
sleep 3
echo '...'
status=$(aws ec2 describe-instance-status --instance-id $id --region eu-west-1 | jq -r .InstanceStatuses[0].InstanceState.Name)
echo $status
done
echo 'Stopped baby!'
fi
fi
encender=$(aws ec2 describe-instances --region eu-west-1 --filters 'Name=tag:hora_encendido,Values=*' | jq -r '.Reservations[].Instances[].Tags[] | select(.Key=="hora_encendido")'| jq -r .Value)
if [ "$encender" == "$now" ]; then
id=$(aws ec2 describe-instances --region eu-west-1 --filters "Name=tag:hora_encendido,Values='$encender'" | jq -r .Reservations[].Instances[].InstanceId)
echo $id
status=$(aws ec2 describe-instance-status --instance-id $id --region eu-west-1 | jq -r .InstanceStatuses[0].InstanceState.Name)
echo $status
if [ "$status" != "running" ]; then
aws ec2 start-instances --region eu-west-1 --instance-ids $id
while [[ "$status" != "running" ]];
do
sleep 3
echo ...
status=$(aws ec2 describe-instance-status --instance-id $id --region eu-west-1 | jq -r .InstanceStatuses[0].InstanceState.Name)
done
echo 'Encendida bitch!!!'
fi
fi
| true |
76993a07c7f818657a7d79e908f86ff3a1affa1e | Shell | dwhswenson/repo-tools | /install_miniconda.sh | UTF-8 | 1,249 | 3.875 | 4 | [
"MIT"
] | permissive | #!/bin/bash
# Usage:
# source install_miniconda.sh
#
# Note that this must be sourced, not executed. This is because it changes
# environment variables.
#
# There are two environment variables that you can set to change behavior:
# * $CONDA_PY will affect which Python version is default in miniconda, and
# will also be used by conda to select the Python version. If unset,
# default is "36".
# * $CONDA_VERSION will select which version of miniconda is installed.
# If unset, default is "latest".
if [ -z "$CONDA_VERSION" ]
then
CONDA_VERSION="latest"
fi
if [ -z "$CONDA_PY" ]
then
CONDA_PY=36
fi
pyV=${CONDA_PY:0:1}
MINICONDA=Miniconda${pyV}-${CONDA_VERSION}-Linux-x86_64.sh
MINICONDA_MD5=$(curl -s https://repo.continuum.io/miniconda/ | grep -A3 $MINICONDA | sed -n '4p' | sed -n 's/ *<td>\(.*\)<\/td> */\1/p')
wget https://repo.continuum.io/miniconda/$MINICONDA
# check the MD5 hash; error is mismatch
if [[ $MINICONDA_MD5 != $(md5sum $MINICONDA | cut -d ' ' -f 1) ]]; then
echo "Miniconda MD5 mismatch"
echo "Expected: $MINICONDA_MD5"
echo "Found: $(md5sum $MINICONDA | cut -d ' ' -f 1)"
exit 1
fi
# install miniconda and update PATH
bash $MINICONDA -b
export PATH=$HOME/miniconda${pyV}/bin:$PATH
| true |
82b6329222765ce02c3d0a2a9669f25cec5850e2 | Shell | cultcode/mon | /LogTruncate.sh | UTF-8 | 1,364 | 3.9375 | 4 | [] | no_license | #!/bin/bash
#set -x
usage () {
echo "Usage: $0 SERVICE_NAME LOG_FILE"
}
if [ "$#" -ne 2 ];then
usage;
exit 1;
fi;
#if [ ! -f "$1" ];then
# echo "SERVICE_NAME $1 is not existent"
# usage;
# exit 1;
#fi;
if [ ! -f "$2" ];then
echo "LOG_FILE $2 is not existent"
usage;
exit 1;
fi;
SERVICE_DIR=$(dirname "$1")
SERVICE_NAME=$(basename "$1")
LOG_FILE=$2
LOG_FILE_BAK=${LOG_FILE}"."$(date +"%Y%m%d_%H%M%S")
IFS=$'\n'
RETVAL=0
items=""
pids=""
length=0
init () {
IFS_old=$IFS
IFS=$'\x0A'
items=(`ps -ef | grep "[0-9]\+:[0-9]\+:[0-9]\+ \S*$SERVICE_NAME\b"`)
IFS=$IFS_old
length=${#items[@]}
if [ "$length" -gt 0 ];then
for ((i=0; i<$length; i++))
do
pids[$i]=`echo ${items[$i]} | awk '{print $2}'`
echo ${items[$i]}
echo ${pids[$i]}
done
fi;
mv $LOG_FILE $LOG_FILE_BAK
RETVAL=$?
if [ $RETVAL -eq 0 ];then
echo "Mv $LOG_FILE $LOG_FILE_BAK Succeed";
else
echo "Mv $LOG_FILE $LOG_FILE_BAK Fail";
fi;
if [ "${#items}" -le 0 ];then
echo "$SERVICE_NAME doesn't run"
return 1;
fi;
for p in ${pids[@]}; do
kill -USR1 $p
RETVAL=$?
if [ $RETVAL -eq 0 ];then
echo "Truncate $LOG_FILE of Process $p Succeed";
else
echo "Truncate $LOG_FILE of Process $p Fail";
fi;
done
return $RETVAL;
}
init;
RETVAL=$?
exit $RETVAL;
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.