blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 4 115 | path stringlengths 2 970 | src_encoding stringclasses 28 values | length_bytes int64 31 5.38M | score float64 2.52 5.28 | int_score int64 3 5 | detected_licenses listlengths 0 161 | license_type stringclasses 2 values | text stringlengths 31 5.39M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
0d0706298efb93a59b89b159663a8608ef9eb291 | Shell | epicsNSLS2-deploy/Docker-Builder | /build_image.sh | UTF-8 | 2,294 | 4.59375 | 5 | [] | no_license | #!/bin/bash
# Function that enters a target directory, and builds a docker image from it
function build_image () {
IMAGE_NAME=$1
cd $IMAGE_NAME
docker build -t isa/$IMAGE_NAME .
cd ..
}
# Print the help message
function print_help () {
echo
echo "USAGE:"
echo " ./build_image.sh help - will display this help message"
echo " ./build_image.sh all - will build all docker images sequentially."
echo " ./build_image.sh [Distribution Branch] - will build all images for particular distro branch. Ex: debian"
echo " ./build_image.sh [Distribution] - will build a single container image."
echo
echo " Ex. ./build_image.sh ubuntu18.04"
echo " Ex. ./build_image.sh debian"
echo " Ex. ./build_image.sh all"
echo
echo "Supported distributions: [ ubuntu18.04, ubuntu19.04, ubuntu20.04, debian8, debian9, debian10, centos7, centos8 ]"
echo
exit
}
# First check if number of arguments is correct
if [ "$#" != "1" ];
then
echo
echo "Exactly 1 argument is required for run_container.sh."
print_help
else
TO_RUN=$1
fi
# Check if input parameter is valid
if [ "$TO_RUN" != "help" ];
then
case $TO_RUN in
ubuntu18.04|ubuntu19.04|ubuntu20.04|debian8|debian9|debian10|centos7|centos8|ubuntu|debian|centos|all) echo "Valid option $TO_RUN. Starting Docker-Builder...";;
*) echo "ERROR - $TO_RUN is not a supported container"
print_help;;
esac
else
print_help
fi
# Otherwise if TO_RUN is valid container name or all
# run the run_container function
if [ "$TO_RUN" = "all" ];
then
build_image ubuntu18.04
build_image ubuntu19.04
build_image ubuntu20.04
build_image debian8
build_image debian9
build_image debian10
build_image centos7
build_image centos8
elif [ "$TO_RUN" = "debian" ];
then
build_image debian8
build_image debian9
build_image debian10
elif [ "$TO_RUN" = "ubuntu" ];
then
build_image ubuntu18.04
build_image ubuntu19.04
build_image ubuntu20.04
elif [ "$TO_RUN" = "centos" ];
then
build_image centos7
build_image centos8
else
build_image "$TO_RUN"
fi
# Clean up the intermediate images
echo "Removing previous image versions..."
docker image prune -f
echo "Docker image created for $TO_RUN. Use docker image ls to see all images."
echo "Run a build with: ./run_container.sh $TO_RUN"
echo "Done."
exit
| true |
3c506b138dc71e08a23abb6b5e2bd27526380cd3 | Shell | justynaBanaszkiewicz12305/UniversityCodesmash | /scripts/setup_account.sh | UTF-8 | 857 | 3.375 | 3 | [] | no_license | #!/usr/bin/env bash
username=$1
repository=spring-2015-uni-code-smash
ruby_version=ruby-2.2.2
chmod_no_public_access=751
user_home="/home/${username}"
user_setup_script=${user_home}/setup.sh
adduser --gecos "" ${username}
sudo smbpasswd -a ${username}
cp -r ./${repository} ${user_home}
chown -R ${username} ${user_home}/${repository}
cat > ${user_setup_script} <<_EOF_
#!/bin/bash
[ -s "$HOME/.rvm/scripts/rvm" ] && . "$HOME/.rvm/scripts/rvm"]
source /etc/profile
cd ${user_home}
chmod -R ${chmod_no_public_access} ${user_home}
cd ${user_home}/${repository}
rvm --default use ${ruby_version}
rvm user gemsets
gem install bundler
bundle
echo 'Done!'
_EOF_
chmod +x ${user_setup_script}
chown -R ${username} ${user_setup_script}
sudo usermod -a -G rvm ${username}
sudo -i -u $username bash -l ${user_setup_script}
| true |
2abab353ef3963e74499f30fe3dcea9015aa30b7 | Shell | go-barbarians/barbarian-hive | /opt/barbarian/control/await-llap | UTF-8 | 1,657 | 3.15625 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env mksh
# Copyright 2018 Barbarians.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Usage: await-llap
# Waits until the LLAP service is up and running and exit
# or 360s have passed and abort - whichever is sooner
ELAPSED=0
LLAP_STATUS=`$HADOOP_HOME/bin/yarn application -list | grep llap | tail -n 1 | nawk '{ print $6 }'`
while [ "$LLAP_STATUS" != "RUNNING" ]
do
sleep 10
ELAPSED=$(($ELAPSED + 10))
if [ $ELAPSED -gt 360 ]
then
echo "timed out waiting for LLAP daemons to be assigned"
exit -1
fi
LLAP_STATUS=`$HADOOP_HOME/bin/yarn application -list | grep llap | tail -n 1 | nawk '{ print $6 }'`
done
ELAPSED=0
LLAP_RUNSTATE=`$HADOOP_HOME/bin/yarn application -list | grep llap | tail -n 1 | nawk '{ print $8 }'`
while [ "$LLAP_RUNSTATE" != "100%" ]
do
sleep 10
ELAPSED=$(($ELAPSED + 10))
if [ $ELAPSED -gt 60 ]
then
echo "timed out waiting for LLAP daemons to be initialized"
exit -1
fi
LLAP_RUNSTATE=`$HADOOP_HOME/bin/yarn application -list | grep llap | tail -n 1 | nawk '{ print $8 }'`
done
# allow some time for the YARN containers to initialize
sleep 30
echo "LLAP daemons are running"
| true |
1720019015ea1988635704ba87552b5e778eb90b | Shell | tmack91/Sysadmin-Solaris | /mk-flash.sh | UTF-8 | 708 | 3.671875 | 4 | [
"Apache-2.0"
] | permissive | #!/usr/bin/ksh
#
# Create Flash Archive
#
# rob.brown@ioko.com - 28/Sep/2008
#
HOSTNAME=`hostname`
DATE=`date +%H%M-%d-%h-%Y`
FLAR_IMAGE_NAME="${HOSTNAME}-${DATE}"
AUTHOR="rob.brown@ioko.com"
FLAR_DESCRIPTION="Solaris 10 test image"
OUTPUT_DIR="/tmp"
#
# Notes
# * To Exclude a file or directory use -x file/directory
# * If you wish to exclude multiple file/directories then use multiple -x
# on the same line.
# * By default it uses the -S which skips size checking. If you want a "proper"
# archive this should be removed. (Takes a bloody age though)
#
FILENAME="`date +%H%M-%d-%h-%Y`.flar"
flarcreate -n ${FLAR_IMAGE_NAME} -S -c \
-a "${AUTHOR}" \
-e "${FLAR_DESCRIPTION}" \
-x /share \
${OUTPUT_DIR}/${FILENAME}
| true |
fc23f3a9f8625c94de76096f76efaf89e078d4db | Shell | Kerusak/xs2a | /scripts/keycloak-dev/add-realm-role.sh | UTF-8 | 991 | 3.125 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
hostAndPort=$1
realm=$2
adminUser=$3
adminPassword=$4
role=$5
# Get and parse access token
RESP=$(curl -s -X POST "$hostAndPort/auth/realms/master/protocol/openid-connect/token" \
-H "Content-Type: application/x-www-form-urlencoded" \
-d "username=$adminUser" \
-d "password=$adminPassword" \
-d 'grant_type=password' \
-d 'client_id=admin-cli')
TKN=`echo $RESP | sed 's/.*access_token":"//g' | sed 's/".*//g'`
# Create client
role="{\"name\":\"$role\",\"scopeParamRequired\":\"\"}"
RESP=$(curl -s -i -X POST "$hostAndPort/auth/admin/realms/$realm/roles/" \
-H "Content-Type: application/json;charset=UTF-8" \
-H "Authorization: Bearer $TKN" \
-d $role | grep "Location:")
location=`echo ${RESP#* } | tr -d '\n\r'`
RESP=$(curl -s -i -X GET "$location" \
-H "Content-Type: application/json;charset=UTF-8" \
-H "Authorization: Bearer $TKN" )
roleId=`echo ${RESP##*/} | tr -d '\n\r'`
roleId=`echo ${roleId##*id\":\"}`
echo ${roleId:0:36}
| true |
d802919bdd59860485a6ffe929ab297f2e33d93e | Shell | Xequals0/Everything-Else | /Computer Security/Assignment4/input.sh | UTF-8 | 1,134 | 2.546875 | 3 | [] | no_license | # CS419 Computer Security: Assignment 4
# Professor Krzyzanowski Spring 2018
# Anirudh Tunoori netid: at813
# Yaowen Zhang netid: yz579
# This is the input script for the protostar challenge
printf "~~~Beginning The Exploit Input Script For All Challenges~~~\n"
# Stack1
printf "\n"
printf "Running stack1... \n"
echo
./stack1 "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAdcba"
echo "----------------------------------------------------------------------"
# Stack4
printf "Running stack4... \n"
echo
./stack4 "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"+"\xf4\x83\x04\x08"
echo "------------------------------------------------------------------------"
# Format0
printf "Running format0... \n"
echo
./format0 "%64d\xef\xbe\xad\xde"
echo "------------------------------------------------------------------------"
# Heap0
printf "Running heap0... \n"
echo
./heap0 "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA"+"\x64\x84\x04\x08"
echo "------------------------------------------------------------------------"
| true |
993cdf47e1cf088e8fe20d5316b58b39bfa0eda5 | Shell | MobilityDB/MobilityDB-Azure | /automaticClusterDeployment/KubernetesCluster/deployK8SCluster.sh | UTF-8 | 7,846 | 3.828125 | 4 | [] | no_license | #!/bin/bash
################################################################################
# Script Description #
################################################################################
# This script is used to automatically deploy a MobilityDB cluster on the Cloud.
# More specifically, the cluster will be hosted in Microsoft Azure, hence an Azure
# account with a valid subscription is needed to run it. To corectly initialize the
# cluster, the following Configuration tab should be parametrized:
# AzureUsername parameter is used to login your Azure account.
# The default ResourceGroupName, Location and VirtualNetwork values can be used.
# Subscription defines the name of the active Azure subscription.
# VMsNumber determines the number of Worker nodes and VMsSize the size of each machine.
# SSHPublicKeyPath and SSHPrivateKeyPath values specify the location of the ssh private
# and public keys to access the created VMs. By default, the files will be stored in
# ~/.ssh/ directory. Finally, Gitrepo specifies the Github repository from which the
# installation scripts and the rest source files will be found.
################################################################################
# Configuration #
################################################################################
AzureUsername="zas1122@hotmail.com"
ResourceGroupName="TestGroup"
Location="germanywestcentral"
VirtualNetwork="test-vnet"
Subscription="CODE WIT"
VMsNumber=1
VMsSize="Standard_B2s" #Visit https://azure.microsoft.com/en-us/pricing/details/virtual-machines/series/
# to see the full list of available VMs
SSHPublicKeyPath="~/.ssh/id_rsa.pub"
SSHPrivateKeyPath="~/.ssh/id_rsa"
Gitrepo="https://github.com/JimTsesm/MobilityDB-Azure.git"
Service_app_url="http://python-app2"
Service_tenant="18f19e28-1ea1-4b0c-bbc0-cf7538f92d05"
################################################################################
#Login to Azure using Azure CLI
#read -sp "Azure password: " AZ_PASS && echo && az login -u $AzureUsername -p $AZ_PASS
read -sp "Azure Client secret: " AZ_PASS && echo && az login --service-principal -u "$Service_app_url" -p $AZ_PASS --tenant "$Service_tenant"
#Select the desired subscription
az account set --subscription "$Subscription"
#Create a new Resource Group
az group create --name $ResourceGroupName --location $Location
#Create a new Virtual Network
az network vnet create --name $VirtualNetwork --resource-group $ResourceGroupName --subnet-name default
################################################################################
# Coordinator Creation #
################################################################################
VMName="Coordinator";
#Create a VM for the coordinator
az vm create --name $VMName --resource-group $ResourceGroupName --public-ip-address-allocation static --image "UbuntuLTS" --size $VMsSize --vnet-name $VirtualNetwork --subnet default --admin-username azureuser --generate-ssh-keys;
#Open port 6443 to allow K8S connections
az vm open-port -g $ResourceGroupName -n $VMName --port 6443 --priority 1020;
#Open port 30001 to allow K8S service exposure
az vm open-port -g $ResourceGroupName -n $VMName --port 30001 --priority 1030;
#Clone the github repository to the VM
az vm run-command invoke -g $ResourceGroupName -n $VMName --command-id RunShellScript --scripts "git clone $Gitrepo /home/azureuser/MobilityDB-Azure"
#Execute the installtion scripts from the clone GitHub repository
az vm run-command invoke -g $ResourceGroupName -n $VMName --command-id RunShellScript --scripts "sudo bash /home/azureuser/MobilityDB-Azure/automaticClusterDeployment/KubernetesCluster/installDockerK8s.sh"
az vm run-command invoke -g $ResourceGroupName -n $VMName --command-id RunShellScript --scripts "sudo bash /home/azureuser/MobilityDB-Azure/automaticClusterDeployment/KubernetesCluster/runOnMaster.sh"
az vm run-command invoke -g $ResourceGroupName -n $VMName --command-id RunShellScript --scripts "sudo bash /home/azureuser/MobilityDB-Azure/automaticClusterDeployment/KubernetesCluster/runOnMaster2.sh"
#Get Join token from the logs of the previous command (sudo kubeadm init)
#Operations: cat the log, remove \n and \, get everything after "kubeadm join" until the next \ and finally remove the \
JOINCOMMAND=$(az vm run-command invoke -g $ResourceGroupName -n Coordinator --command-id RunShellScript --scripts "sudo cat /var/lib/waagent/run-command/download/2/stdout" | sed 's/\\n/ /g' | sed 's/\\\\/ /g' |grep -o 'kubeadm join.* \[' | sed 's/\[//g' | sed 's/\\t/ /g')
echo "Coordinator Node was successfully deployed."
################################################################################
################################################################################
# Workers Creation #
################################################################################
#Create the VMs with the given parameters
for i in $(seq 1 $VMsNumber)
do
VMName="Worker$i";
#Create the VM
az vm create --name $VMName --resource-group $ResourceGroupName --public-ip-address-allocation static --image "UbuntuLTS" --size $VMsSize --vnet-name $VirtualNetwork --subnet default --ssh-key-value $SSHPublicKeyPath --admin-username azureuser &
done
wait #for all the subprocesses of the parallel loop to terminate
for i in $(seq 1 $VMsNumber)
do
VMName="Worker$i";
#Open port 5432 to accept inbound connection from the Citus coordinator
az vm open-port -g $ResourceGroupName -n $VMName --port 5432 --priority 1010 &
done
wait #for all the subprocesses of the parallel loop to terminate
for i in $(seq 1 $VMsNumber)
do
VMName="Worker$i";
#Clone the github repository to the VM
az vm run-command invoke -g $ResourceGroupName -n $VMName --command-id RunShellScript --scripts "git clone $Gitrepo /home/azureuser/MobilityDB-Azure" &
done
wait #for all the subprocesses of the parallel loop to terminate
#Install the required software to every Worker
#The for loop is executed in parallel. This means that every Worker will install the software at the same time.
for i in $(seq 1 $VMsNumber)
do
VMName="Worker$i";
#Execute the installtion script from the clone GitHub repository
az vm run-command invoke -g $ResourceGroupName -n $VMName --command-id RunShellScript --scripts "sudo bash /home/azureuser/MobilityDB-Azure/automaticClusterDeployment/KubernetesCluster/installDockerK8s.sh" &
done
wait #for all the subprocesses of the parallel loop to terminate
#Run the initialization commands in each Worker
for i in $(seq 1 $VMsNumber)
do
VMName="Worker$i";
#Execute the installtion script from the clone GitHub repository
az vm run-command invoke -g $ResourceGroupName -n $VMName --command-id RunShellScript --scripts "sudo bash /home/azureuser/MobilityDB-Azure/automaticClusterDeployment/KubernetesCluster/runOnWorker.sh" &
done
wait #for all the subprocesses of the parallel loop to terminate
echo "Worker Nodes were successfully deployed."
#Add each Worker Node to K8S Cluster
for i in $(seq 1 $VMsNumber)
do
VMName="Worker$i";
az vm run-command invoke -g $ResourceGroupName -n $VMName --command-id RunShellScript --scripts "$JOINCOMMAND"
done
echo "Worker Nodes were successfully added to the cluster."
################################################################################
################################################################################
# MobilityDB Deployment #
################################################################################
#az vm run-command invoke -g $ResourceGroupName -n Coordinator --command-id RunShellScript --scripts "bash /home/azureuser/MobilityDB-Azure/KubernetesDeployment/scripts/startK8s.sh"
################################################################################
| true |
f2119667644275781a6529547f0cb09f39359735 | Shell | sds1037681793/ib-web | /WEB-INF/classes/database-backup/backup.sh | UTF-8 | 2,069 | 3.984375 | 4 | [] | no_license | #!/bin/bash
#功能说明:本功能用于备份数据库
#数据库用户名
dbuser='rib'
#数据库密码
dbpasswd='rib'
#不进行导出的数据库名,可以定义多个数据库,中间以空格隔开,如:test test1 test2
exclude_dbname='information_schema mysql performance_schema sys'
#备份时间
backtime=`date +%Y%m%d_%H%M%S`
#日志备份路径
logpath='/home/cando/database-backup/logs'
#数据备份路径
datapath='/home/cando/database-backup/data'
#整个数据库中的库列表
alldatabase=$(mysql -u $dbuser -p$dbpasswd -Bse 'show databases')
if [ ! -d $logpath ];
then
mkdir -p $logpath;
fi
if [ ! -d $datapath ];
then
mkdir -p $datapath;
fi
#日志记录
echo $(date "+%Y-%m-%d %H:%M:%S") 备份数据库开始 >> ${logpath}/mysqllog.log
#正式备份数据库
for database in $alldatabase; do
skipdb=-1;
if [ "$exclude_dbname" != "" ]; then
for db in $exclude_dbname; do
if [ "$database" == "$db" ];then
skipdb=1;
break;
fi
done
fi
if [ "$skipdb" == "-1" ]; then
source=`mysqldump -u ${dbuser} -p${dbpasswd} ${database}> ${datapath}/${database}_${backtime}.sql` 2>> ${logpath}/mysqllog.log;
#备份成功以下操作
if [ "$?" == 0 ];then
cd $datapath
#为节约硬盘空间,将数据库压缩
tar zcf ${database}_${backtime}.tar.gz ${database}_${backtime}.sql > /dev/null
#删除原始文件,只留压缩后文件
rm -f ${datapath}/${database}_${backtime}.sql
echo $(date "+%Y-%m-%d %H:%M:%S") 数据库 ${database} 备份成功 >> ${logpath}/mysqllog.log
else
#备份失败则进行以下操作
echo $(date "+%Y-%m-%d %H:%M:%S") 数据库 ${database} 备份失败 >> ${logpath}/mysqllog.log
fi
fi
done
#删除超过5天的备份数据(具体保留多少时间,可根据实际情况进行调整)
find $datapath -mtime +5 -name '*.tar.gz' -exec rm -rf {} \; | true |
9b7efcb496c91617fbb9cc5f47d64b309ec2771b | Shell | brettkolodny/open-runtime-module-library | /scripts/run.sh | UTF-8 | 287 | 2.90625 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
set -e
COMMAND=$1
shift
set -x
for file in **/Cargo.toml; do
if [ "$file" == "xtokens/Cargo.toml" ] || [ "$file" == "xcm-support/Cargo.toml" ] || [ "$file" == "unknown-tokens/Cargo.toml" ]
then
continue
fi
cargo $COMMAND --manifest-path "$file" $@;
done
| true |
fea9d2a63505f407f059a2b135325252ad0a0853 | Shell | wilson-tim/stellahomedw | /DWLIVE/hotelpayments/HJload/iacs_hj_load_all.ksh | UTF-8 | 12,298 | 3.078125 | 3 | [] | no_license | #!/usr/bin/ksh
######################################################################################################
#
# PROGRAM: iacs_hj_load_all.ksh
#
# DATE BY DESCRIPTION
# ---- -- -----------
#
# 12/06/02 JR Initial Version.
# 17/12/02 LA Allocate more memory to java xmlload
# 08/09/03 JR Allocae more memory to Java XML booking load using http2ftp, added -mx256m for booking.xml
# 06/07/05 PB Handle invalid xml files.
# Set java database connection string dynamically.
# Increase memory allocation for java booking and booking_accom loads from 256 to 512
#
# Script to load into datawarehouse-iacs tables from xml files provided by Hayes & Jarvis
# Step 1 : Fetch files from remote H&J server using http , this will create .xml files into /data/hotelpayments/XMLFILES
# Step 2 : Reference and Booking files will be loaded first into temp tables using Oracle PutXML
# Step 3 : Reference and Booking data loaded into actual tables using pl sql program
#
# #######################################################################################################
#
# Set ClassPath variable
# Set oracle variables
#
. /home/dw/bin/set_oracle_variables.ksh
. /home/dw/bin/set_java_variables.ksh
today_d=`date +%Y%b%d`
case $ORACLE_SID in
DWD) java_conn="jdbc:oracle:thin:@dwdev:1521:dwd";;
DWT) java_conn="jdbc:oracle:thin:@dwtest:1521:dwt";;
#DWL) java_conn="jdbc:oracle:thin:@dwlive_en1:1521:dwl";;
DWL) java_conn="jdbc:oracle:thin:@vsvr-orac01:1521:dwl";;
esac
dbase_id=DWLIVE
home="/home/dw/"$dbase_id # Home path
lg_path=$home"/logs/"
f_log_path=$lg_path"hotelpayments" # Path for output from jobs
iacs_path=$home"/hotelpayments/HJload" # Path for the hotel payments sql source files
zip_path="/data/hotelpayments/XMLFILES/" # Path for zip files
echo "IACS Hayes&Jarvis Load started" > $f_log_path/iacshjload_all$today_d.log
date >> $f_log_path/iacshjload_all$today_d.log
########################################################################################################################
# Step 1 : Fetch files from remote H&J server using http , this will create .xml files into /data/hotelpayments/SMLFILES
########################################################################################################################
#it calls TMS - database name hjtmsmis user name iacs/password ip addres is 10.8.253.60
echo "Fetching reference and booking data file from H&J server " >> $f_log_path/iacshjload_all$today_d.log
java http2file hjintranet 80 /sql/?sql=execute+hj_location?root=root /data/hotelpayments/XMLFILES/location.xml >> $f_log_path/iacshjload_all$today_d.log 2>&1
java http2file hjintranet 80 /sql/?sql=execute+hj_property?root=root /data/hotelpayments/XMLFILES/property.xml >> $f_log_path/iacshjload_all$today_d.log 2>&1
java http2file hjintranet 80 /sql/?sql=execute+hj_propertycode?root=root /data/hotelpayments/XMLFILES/propertycode.xml >> $f_log_path/iacshjload_all$today_d.log 2>&1
java -mx512m http2file hjintranet 80 /sql/?sql=execute+hj_booking?root=root /data/hotelpayments/XMLFILES/booking.xml >> $f_log_path/iacshjload_all$today_d.log 2>&1
java -mx512m http2file hjintranet 80 /sql/?sql=execute+hj_bookingaccom?root=root /data/hotelpayments/XMLFILES/bookingaccom.xml >> $f_log_path/iacshjload_all$today_d.log 2>&1
#######################################################################################################################
# Step 2 : Reference and Booking files will be loaded first into temp tables using Oracle PutXML
#######################################################################################################################
date >> $f_log_path/iacshjload_all$today_d.log
echo "Loading Reference data into temporary tables " >> $f_log_path/iacshjload_all$today_d.log
# Check Reference data xml files are ok first
echo "Checking for invalid Reference data xml files" >> $f_log_path/iacshjload_all$today_d.log
grep -i 'could not find' /data/hotelpayments/XMLFILES/location.xml
grep_status=$?
if [[ $grep_status -eq 0 ]] then
echo " " >> $f_log_path/iacshjload_all$today_d.log
echo "*** ERROR INVALID XML FILE *** "/data/hotelpayments/XMLFILES/location.xml >> $f_log_path/iacshjload_all$today_d.log
echo " " >> $f_log_path/iacshjload_all$today_d.log
fi
grep -i 'could not find' /data/hotelpayments/XMLFILES/property.xml
grep_status=$?
if [[ $grep_status -eq 0 ]] then
echo " " >> $f_log_path/iacshjload_all$today_d.log
echo "*** ERROR INVALID XML FILE *** "/data/hotelpayments/XMLFILES/property.xml >> $f_log_path/iacshjload_all$today_d.log
echo " " >> $f_log_path/iacshjload_all$today_d.log
fi
grep -i 'could not find' /data/hotelpayments/XMLFILES/propertycode.xml
grep_status=$?
if [[ $grep_status -eq 0 ]] then
echo " " >> $f_log_path/iacshjload_all$today_d.log
echo "*** ERROR INVALID XML FILE *** "/data/hotelpayments/XMLFILES/propertycode.xml >> $f_log_path/iacshjload_all$today_d.log
echo " " >> $f_log_path/iacshjload_all$today_d.log
fi
java OracleXML putXML -user "iacs/iacs" -conn $java_conn -commitBatch 10 -rowTag LOCATION -dateFormat "dd/MM/yyyy" -fileName /data/hotelpayments/XMLFILES/location.xml temp_xml_location >> $f_log_path/iacshjload_all$today_d.log 2>&1
# Check some rows were loaded
grep -i "rows into temp_xml_location" $f_log_path/iacshjload_all$today_d.log
grep_status=$?
if [[ $grep_status != 0 ]] then
echo "*** ERROR - MISSING ROWS from location.xml ***" >> $f_log_path/iacshjload_all$today_d.log
fi
java OracleXML putXML -user "iacs/iacs" -conn $java_conn -commitBatch 10 -rowTag PROPERTY -dateFormat "dd/MM/yyyy" -fileName /data/hotelpayments/XMLFILES/property.xml temp_xml_property >> $f_log_path/iacshjload_all$today_d.log 2>&1
# Check some rows were loaded
grep -i "rows into temp_xml_property" $f_log_path/iacshjload_all$today_d.log
grep_status=$?
if [[ $grep_status != 0 ]] then
echo "*** ERROR - MISSING ROWS from property.xml ***" >> $f_log_path/iacshjload_all$today_d.log
fi
java OracleXML putXML -user "iacs/iacs" -conn $java_conn -commitBatch 10 -rowTag OTOP_PROPERTY -dateFormat "dd/MM/yyyy" -fileName /data/hotelpayments/XMLFILES/propertycode.xml temp_xml_otop_property >> $f_log_path/iacshjload_all$today_d.log 2>&1
# Check some rows were loaded
grep -i "rows into temp_xml_otop_property" $f_log_path/iacshjload_all$today_d.log
grep_status=$?
if [[ $grep_status != 0 ]] then
echo "*** ERROR - MISSING ROWS from propertycode.xml ***" >> $f_log_path/iacshjload_all$today_d.log
fi
date >> $f_log_path/iacshjload_all$today_d.log
echo "Loading HandJ booking and bookingaccom data into temporary tables " >> $f_log_path/iacshjload_all$today_d.log
# Check Booking data xml files are ok next
echo "Checking for invalid Booking data xml files" >> $f_log_path/iacshjload_all$today_d.log
grep -i 'could not find' /data/hotelpayments/XMLFILES/booking.xml
grep_status=$?
if [[ $grep_status -eq 0 ]] then
echo " " >> $f_log_path/iacshjload_all$today_d.log
echo "*** ERROR INVALID XML FILE *** "/data/hotelpayments/XMLFILES/booking.xml >> $f_log_path/iacshjload_all$today_d.log
echo " " >> $f_log_path/iacshjload_all$today_d.log
fi
grep -i 'could not find' /data/hotelpayments/XMLFILES/bookingaccom.xml
grep_status=$?
if [[ $grep_status -eq 0 ]] then
echo " " >> $f_log_path/iacshjload_all$today_d.log
echo "*** ERROR INVALID XML FILE *** "/data/hotelpayments/XMLFILES/bookingaccom.xml >> $f_log_path/iacshjload_all$today_d.log
echo " " >> $f_log_path/iacshjload_all$today_d.log
fi
java -mx512m OracleXML putXML -user "iacs/iacs" -conn $java_conn -commitBatch 10 -rowTag BOOKING -dateFormat "dd/MM/yyyy" -fileName /data/hotelpayments/XMLFILES/booking.xml temp_xml_booking >> $f_log_path/iacshjload_all$today_d.log 2>&1
# Check some rows were loaded
grep -i "rows into temp_xml_booking" $f_log_path/iacshjload_all$today_d.log
grep_status=$?
if [[ $grep_status != 0 ]] then
echo "*** ERROR - MISSING ROWS from booking.xml ***" >> $f_log_path/iacshjload_all$today_d.log
fi
java -mx512m OracleXML putXML -user "iacs/iacs" -conn $java_conn -commitBatch 10 -rowTag BOOKINGACCOM -dateFormat "dd/MM/yyyy" -fileName /data/hotelpayments/XMLFILES/bookingaccom.xml temp_xml_booking_accom >> $f_log_path/iacshjload_all$today_d.log 2>&1
# Check some rows were loaded
grep -i "rows into temp_xml_booking_accom" $f_log_path/iacshjload_all$today_d.log
grep_status=$?
if [[ $grep_status != 0 ]] then
echo "*** ERROR - MISSING ROWS from bookingaccom.xml ***" >> $f_log_path/iacshjload_all$today_d.log
fi
date >> $f_log_path/iacshjload_all$today_d.log
#######################################################################################################################
# Step 3 Reference and Booking data will be loaded fron temp tables to actual tables using pl sql program
########################################################################################################################
echo "Loading H&J reference and Booking data into actual tables" >> $f_log_path/iacshjload_all$today_d.log
sqlplus iacs/iacs @$iacs_path/iacs_hj_load >> $f_log_path/iacshjload_all$today_d.log
date >> $f_log_path/iacshjload_all$today_d.log
#######################################################################################################################
# Now zip these files and move ref and booking file into backup dir
#######################################################################################################################
echo "XML load finished now backing up the files" >> $f_log_path/iacshjload_all$today_d.log
date >> $f_log_path/iacshjload_all$today_d.log
cd $zip_path
for filename in `ls *.xml`
do
zip xmlbkup$today_d.zip $filename >> $f_log_path/iacshjload_all$today_d.log
done
cd $iacs_path
# Take backup of a zip files
mv -f $zip_path/*.zip $zip_path/backup
##############################################################################################
# Delete any zip files older than 60 days
echo "About to delete the following backup files"
find $zip_path/backup $zip_path/backup/*.* -mtime +100 -exec ls -ltr {} \;
find $zip_path/backup $zip_path/backup/*.* -mtime +100 -exec rm -f {} \;
date >>$f_log_path/iacshjload_all$today_d.log
echo "FINISHED Hayes&Jarvis Load!" >> $f_log_path/iacshjload_all$today_d.log &
##############################################################################################
# if Error in xml load then send log file along with no. of records in iacs_general_error table to support team
##############################################################################################
# Check for problems with xml files
grep -i "ERROR" $f_log_path/iacshjload_all$today_d.log
grep_status=$?
if [[ $grep_status != 0 ]] then
# xml files not invalid but also need to check if rows were loaded
grep -i "MISSING ROWS" $f_log_path/iacshjload_all$today_d.log
grep_status=$?
fi
sqlplus -s dw/dbp << !
set heading off
set termout off
set feedback off
set lines 2000
set echo off
set verify off
spool /home/dw/DWLIVE/hotelpayments/HJload/errorfile.err
select datetime_errors_found , parameter1, parameter2, error_code, description1
from iacs.iacs_general_error
where to_char(datetime_errors_found) = to_char(sysdate)
and (description2 <> 'Travelink' OR description2 IS NULL)
and severity > 1;
exit
!
# -s tests that size > 0 bytes
# $grep_status indicates error in xml files
if [[ -s $iacs_path/errorfile.err ]] | [[ $grep_status -eq 0 ]] then
echo "Batch Program run on machine:" > $iacs_path/mail.lst
hostname >> $iacs_path/mail.lst
cat $f_log_path/iacshjload_all$today_d.log >> $iacs_path/mail.lst
if [[ -s $iacs_path/errorfile.err ]] then
echo ' Following records from iacs_general_error table are in error :' >> $iacs_path/mail.lst
fi
cat $iacs_path/errorfile.err >> $iacs_path/mail.lst
echo "Error found"
mailx -s "DWHSE side : ERRORS in Hayes & Jarvis booking load into iacs datawarehouse" basds@firstchoice.co.uk < /home/dw/DWLIVE/hotelpayments/HJload/mail.lst
mailx -s "DWHSE side : ERRORS in Hayes & Jarvis booking load into iacs datawarehouse" jon.hollamby@firstchoice.co.uk < /home/dw/DWLIVE/hotelpayments/HJload/mail.lst
fi
| true |
397d9b073c5e8cd98c6be0f7ec5d70eca5a933c4 | Shell | envoyproxy/envoy | /examples/dynamic-config-cp/verify.sh | UTF-8 | 2,864 | 2.734375 | 3 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/bin/bash -e
export NAME=dynamic-config-cp
export UPARGS=" proxy"
# shellcheck source=examples/verify-common.sh
. "$(dirname "${BASH_SOURCE[0]}")/../verify-common.sh"
run_log "Check port 10000 is not open (still shows as succeeded)"
nc -zv localhost 10000 |& grep -v open
run_log "Check the static cluster"
curl -s http://localhost:19000/config_dump \
| jq -r '.configs[1].static_clusters' \
| grep 'go-control-plane'
run_log "Check there is no config for dynamic clusters"
curl -s http://localhost:19000/config_dump \
| jq -r '.configs[1].dynamic_active_clusters // "NO_CLUSTERS"' \
| grep NO_CLUSTERS
run_log "Bring up go-control-plane"
"${DOCKER_COMPOSE[@]}" up --build -d go-control-plane
wait_for 30 sh -c "${DOCKER_COMPOSE[*]} ps go-control-plane | grep healthy | grep -v unhealthy"
wait_for 10 bash -c "responds_with 'Request served by service1' http://localhost:10000"
run_log "Check for response from service1 backend"
responds_with \
"Request served by service1" \
http://localhost:10000
run_log "Check config for active clusters"
curl -s http://localhost:19000/config_dump \
| jq -r '.configs[1].dynamic_active_clusters' \
| grep '"version_info": "1"'
curl -s http://localhost:19000/config_dump \
| jq -r '.configs[1].dynamic_active_clusters' \
| grep '"address": "service1"'
run_log "Bring down the control plane"
"${DOCKER_COMPOSE[@]}" stop go-control-plane
wait_for 10 sh -c "\
curl -s http://localhost:19000/config_dump \
| jq -r '.configs[1].dynamic_active_clusters' \
| grep '\"version_info\": \"1\"'"
run_log "Check for continued response from service1 backend"
responds_with \
"Request served by service1" \
http://localhost:10000
run_log "Check config for active clusters"
curl -s http://localhost:19000/config_dump \
| jq -r '.configs[1].dynamic_active_clusters' \
| grep '"version_info": "1"'
curl -s http://localhost:19000/config_dump \
| jq -r '.configs[1].dynamic_active_clusters' \
| grep '"address": "service1"'
run_log "Edit resource.go"
sed -i'.bak' s/service1/service2/ resource.go
sed -i'.bak2' s/\"1\",/\"2\",/ resource.go
run_log "Bring back up the control plane"
"${DOCKER_COMPOSE[@]}" up --build -d go-control-plane
wait_for 30 sh -c "${DOCKER_COMPOSE[*]} ps go-control-plane | grep healthy | grep -v unhealthy"
run_log "Check for response from service2 backend"
wait_for 5 bash -c "responds_with \
'Request served by service2' \
http://localhost:10000"
run_log "Check config for active clusters pointing to service2"
curl -s http://localhost:19000/config_dump \
| jq -r '.configs[1].dynamic_active_clusters' \
| grep '"version_info": "2"'
curl -s http://localhost:19000/config_dump \
| jq -r '.configs[1].dynamic_active_clusters' \
| grep '"address": "service2"'
mv resource.go.bak resource.go
| true |
2f999e248699bf08266ac07fc1ec638dcfae5a63 | Shell | NikhilDusane222/Shellscript | /Sequences/2diceadd.sh | UTF-8 | 126 | 2.828125 | 3 | [] | no_license | #!/bin/bash -x
dice1=$((1+RANDOM%6))
dice2=$((1+RANDOM%6))
sum=$(($dice1 + $dice2))
printf $"Sum of both dice is: $sum"'\n'
| true |
5018c2395aa075f7c610737fb4ed3a3a60fb6137 | Shell | OpenMandrivaAssociation/vdr-plugin-console | /03_command_from_cli.dpatch | UTF-8 | 3,327 | 2.65625 | 3 | [] | no_license | #! /bin/sh /usr/share/dpatch/dpatch-run
## 03_command_from_cli.dpatch by Thomas Schmidt <tschmidt@debian.org>
##
## All lines beginning with `## DP:' are a description of the patch.
## DP: Added commandline option --command to be able to specify which
## DP: command should be called when starting the plugin.
@DPATCH@
diff -urNad vdr-plugin-console-0.6.0/console.c /tmp/dpep.7Ca0hf/vdr-plugin-console-0.6.0/console.c
--- vdr-plugin-console-0.6.0/console.c 2004-09-11 23:44:25.000000000 +0200
+++ /tmp/dpep.7Ca0hf/vdr-plugin-console-0.6.0/console.c 2005-04-11 19:55:48.583455832 +0200
@@ -60,14 +60,34 @@
const char *cPluginConsole::CommandLineHelp() {
- return NULL;
+ // Return a string that describes all known command line options.
+ return " -c prog, --command=prog specify the programm which is\n"
+ " called when you start the plugin\n";
}
bool cPluginConsole::ProcessArgs(int argc, char *argv[]) {
- return true;
+ // Implement command line argument processing here if applicable.
+ static struct option long_options[] = {
+ { "command", required_argument, NULL, 'c' },
+ { NULL, no_argument, NULL, 0 },
+ };
+
+ int c;
+ while ((c = getopt_long(argc, argv, "c:", long_options, NULL)) != -1) {
+ switch (c) {
+ case 'c':
+ fprintf(stderr, "arg: %s\n", optarg);
+ ConsoleCmd = strdup(optarg);
+ break;
+ default:
+ fprintf(stderr, "arg char: %c\n", c);
+ return false;
+ }
+ }
+ return true;
}
diff -urNad vdr-plugin-console-0.6.0/virtualconsoles.c /tmp/dpep.7Ca0hf/vdr-plugin-console-0.6.0/virtualconsoles.c
--- vdr-plugin-console-0.6.0/virtualconsoles.c 2004-09-11 23:40:18.000000000 +0200
+++ /tmp/dpep.7Ca0hf/vdr-plugin-console-0.6.0/virtualconsoles.c 2005-04-11 20:11:07.817766136 +0200
@@ -20,6 +20,17 @@
#include "i18n.h"
+// This program will be used to realize a console.
+// Be carefull! If your VDR runs as root, then every
+// user can kill your machine if you don't protect
+// the cnsoles.
+// So the default is "/bin/login".
+
+#ifdef CONSOLE_USE_SHELL_INSTEAD_LOGIN
+const char *ConsoleCmd="/bin/sh";
+#else
+const char *ConsoleCmd="/bin/login";
+#endif
cConsConsoles::cConsConsoles()
@@ -128,9 +139,9 @@
int cConsConsoles::CreateConsole() {
- char* const args[] = {PROG_FOR_CONSOLE, NULL};
+ char* const args[] = { (char*)ConsoleCmd, NULL };
- cConsVirtualConsole* p = new cConsVirtualConsole(tr("Console"), PROG_FOR_CONSOLE, args);
+ cConsVirtualConsole* p = new cConsVirtualConsole( tr("Console"), ConsoleCmd, args );
if (p) {
diff -urNad vdr-plugin-console-0.6.0/virtualconsoles.h /tmp/dpep.7Ca0hf/vdr-plugin-console-0.6.0/virtualconsoles.h
--- vdr-plugin-console-0.6.0/virtualconsoles.h 2004-09-11 23:40:18.000000000 +0200
+++ /tmp/dpep.7Ca0hf/vdr-plugin-console-0.6.0/virtualconsoles.h 2005-04-11 19:55:48.584455680 +0200
@@ -21,19 +21,7 @@
-// This program will be used to realize a console.
-// Be carefull! If your VDR runs as root, then every
-// user can kill your machine if you don't protect
-// the cnsoles.
-// So the default is "/bin/login".
-
-#ifdef CONSOLE_USE_SHELL_INSTEAD_LOGIN
-#define PROG_FOR_CONSOLE "/bin/sh"
-#else
-#define PROG_FOR_CONSOLE "/bin/login"
-#endif
-
-
+extern const char *ConsoleCmd;
class cConsVirtualConsole;
| true |
8640d21b4d3b03ff54b01545760d446e61837d83 | Shell | scudiero/tools | /lib/SelectMenuNew.sh | UTF-8 | 7,019 | 3.515625 | 4 | [] | no_license | ## XO NOT AUTOVERSION
#===================================================================================================
# version="2.1.2" # -- dscudiero -- Thu 05/24/2018 @ 9:24:51.78
#===================================================================================================
# Display a selection menu
# SelectMenuNew <MenueItemsArrayName> <returnVariableName> <Prompt text>
# First line of the array is the header, first char of the header is the data delimiter
#
# If first 2 chars of the returnVariableName is 'ID' then will return the ordinal number of the
# response, otherwise the input line responding to the ordinal selected will be returned
#===================================================================================================
# 03-8-16 - dgs - initial
#===================================================================================================
# Copyright 2016 David Scudiero -- all rights reserved.
# All rights reserved
#===================================================================================================
function SelectMenuNew {
local menuListArrayName returnVarName menuPrompt allowMultiples=false allowRange=false screenWidth=80
local i printStr tmpStr length validVals numCols=0 ordinalInData=false
## Parse arguments
until [[ -z "$*" ]]; do
case ${1:0:2} in
-m ) allowMultiples=true ;;
-r ) allowRange=true ;;
* )
[[ -z $menuListArrayName ]] && { menuListArrayName=$1[@]; menuListArray=("${!menuListArrayName}"); shift || true; continue; }
[[ -z $returnVarName ]] && { returnVarName="$1"; shift || true; continue; }
menuPrompt="$menuPrompt $1"
;;
esac
shift || true
done
dump 2 menuListArrayName returnVarName menuPrompt allowMultiples allowRange
[[ $TERM != '' && $TERM != 'dumb' ]] && { screenWidth=$(stty size </dev/tty); screenWidth="${screenWidth#* }"; }
## Parse header
local header="${menuListArray[0]}"
local delim=${header:0:1}
for (( i=0; i<=${#header}; i++ )); do
[[ ${header:$i:1} == $delim ]] && let numCols=numCols+1;
done
tmpStr="$(Lower "$(cut -d"$delim" -f2 <<< "$header")")"
[[ ${tmpStr:0:3} == 'ord' || ${tmpStr:0:3} == 'key' ]] && ordinalInData=true && tmpStr="$(cut -d"$delim" -f2 <<< "$header")" || tmpStr='Ord'
[[ $menuPrompt == '' ]] && menuPrompt="\n${tabStr}Please enter the ordinal number $(ColorM "($tmpStr)") for an item above (or 'X' to quit) > "
dump -3 header delim numCols ordinalInData menuPrompt
## Loop through data and get the max widths of each column
maxWidths=()
for record in "${menuListArray[@]}"; do
record="${record:1}"
for (( i=1; i<=$numCols; i++ )); do
local tmpStr="$(echo "$record" | cut -d$delim -f$i)"
maxWidth=${maxWidths[$i]}
[[ ${#tmpStr} -gt $maxWidth ]] && maxWidths[$i]=${#tmpStr}
done
done
if [[ $verboseLevel -ge 3 ]]; then for (( i=1; i<= $numCols; i++ )); do echo '${maxWidths[$i]} = >'${maxWidths[$i]}'<'; done fi
## Loop through data and build menu lines
declare -A menuItems
local key menuItemsCntr=-1 menuItemsKeys=()
for record in "${menuListArray[@]}"; do
record="${record:1}"
unset menuItem
for (( i=1; i<=$numCols; i++ )); do
local tmpStr="$(echo "$record" | cut -d$delim -f$i)"$(PadChar ' ' $screenWidth)
maxWidth=${maxWidths[$i]}
menuItem=$menuItem${tmpStr:0:$maxWidth+1}
done
if [[ $ordinalInData == true ]]; then
key="$(cut -d' ' -f1 <<< $menuItem)"
menuItem="$(Trim "$(cut -d' ' -f2- <<< "$menuItem")")"
else
((menuItemsCntr++)) || true
key=$menuItemsCntr
fi
menuItems[$key]="$menuItem"
menuItemsKeys+=($key)
[[ $(IsNumeric "$key") == true ]] && validVals="$validVals,$key"
done
# for i in ${menuItemsKeys[@]}; do
# echo -e "\tkey: '$i', value: '${menuItems[$i]}'";
# done
# Pause
## Display menu
tmpStr=${#menuItemsKeys[@]}
maxIdxWidth=${#tmpStr}
## Print header
unset printStr
if [[ $ordinalInData == false ]]; then
printStr="Ord$(PadChar ' ' 10)"
let length=$maxIdxWidth+2
printStr=${printStr:0:$length+1}
else
[[ ${maxWidths[1]} -lt $maxIdxWidth+2 ]] && let length=$maxIdxWidth+2 || let length=${maxWidths[1]}
fi
key="${menuItemsKeys[0]}"
printStr="${printStr}${menuItems[$key]}"
[[ $ordinalInData == true ]] && printStr="${key} ${printStr:0:$screenWidth}" || printStr="${printStr:0:$screenWidth}"
echo -e "\t$(ColorM "$printStr")"
## Print 'data' rows
menuItemsKeys=("${menuItemsKeys[@]:1}") ## pop off the first row which contains the header
for i in ${menuItemsKeys[@]}; do
menuItem="${menuItems[$i]}"
tmpStr="(${i})$(PadChar ' ' 10)"
printStr="$(ColorM "${tmpStr:0:$length}") $menuItem"
printStr="${printStr:0:$screenWidth}"
echo -e "\t$printStr"
done;
## Print prompt
echo -ne "$menuPrompt"
[[ $ordinalInData != true ]] && validVals="{1-$i}" || unset validVals
## Loop on response
unset ans retVal invalidVals
while [[ $ans == '' ]]; do
read ans; ans=$(Lower $ans)
[[ ${ans:0:1} == 'x' || ${ans:0:1} == 'q' ]] && eval $returnVarName='' && return 0
[[ ${ans:0:1} == 'r' ]] && eval $returnVarName='REFRESHLIST' && return 0
## If ans contains a '-' and allow range is set then expand the range
if [[ $(Contains "$ans" '-' ) == true && $allowRange == true ]]; then
local front=${ans%%-*}; lowerIdx=${front: -1}
local back=${ans##*-}; upperIdx=${back:0:1}
for ((iix=$lowerIdx+1; iix<$upperIdx; iix++)); do
front="$front,$iix"
done
ans="$front,$back"
fi
## Check responses
foundAll=true
for token in ${ans//,/ }; do
if [[ ${menuItems["$token"]+abc} ]]; then
token="$(Trim "${menuItems[$token]}")";
retVal="$retVal|$token"
else
foundAll=false
fi
done
if [[ $foundAll != true ]]; then
printf "${tabStr}$(ColorE *Error*) -- Invalid selection, '$ans', valid value in $validVals, please try again > "
unset ans invalidVals
fi
done
## Return the data in the named variable
[[ ${retVal:0:1} == '|' ]] && retVal="${retVal:1}"
eval $returnVarName=\"$(Trim $retVal)\"
} #SelectMenuNew
#===================================================================================================
# Check-in Log
#===================================================================================================
## Wed Jan 4 13:54:23 CST 2017 - dscudiero - General syncing of dev to prod
## Thu Feb 16 06:59:22 CST 2017 - dscudiero - Added an option to pull the ordinals from the input data
## 04-17-2017 @ 10.31.12 - ("2.0.15") - dscudiero - fix issue when returning data for xxxxId variables
## 04-25-2017 @ 14.40.09 - ("2.0.16") - dscudiero - Remove debug stuff
## 04-26-2018 @ 08:33:54 - 2.0.28 - dscudiero - Remove debug statement
## 05-14-2018 @ 08:29:56 - 2.1.1 - dscudiero - Add ability to specify ranges
## 05-24-2018 @ 09:26:44 - 2.1.2 - dscudiero - Fix spelling
| true |
054e651df7a0d3ffdb7c636ca0bb02c8e5e19adc | Shell | megalnx/slackvirt | /bin/wpa-notify | UTF-8 | 329 | 2.921875 | 3 | [] | no_license | #!/bin/bash
ICON=/usr/share/pixmaps/wpa_gui.png
case "$2" in
CONNECTED)
DISPLAY=:0.0 notify-send -i $ICON "WPA supplicant:" "$1 connected";
tmux display-message "$1 connected"
;;
DISCONNECTED)
DISPLAY=:0.0 notify-send -i $ICON "WPA supplicant:" "$1 disconnected";
tmux display-message "$1 disconnected"
;;
esac
| true |
4ce76e0ee749e34a023f1e4a971c425e556b80cd | Shell | petronny/aur3-mirror | /dmd2-complete/PKGBUILD | UTF-8 | 2,960 | 2.890625 | 3 | [] | no_license | # Maintainer: Mihail Strashun <m.strashun@gmail.com> aka Volfram
pkgname=dmd2-complete
pkgver=2.057
pkgrel=1
pkgdesc="The Digital Mars D compiler & Standard Library (D2 version)"
arch=('i686' 'x86_64')
url="http://www.digitalmars.com/d/2.0/"
source=(http://ftp.digitalmars.com/dmd.$pkgver.zip)
md5sums=('531c4b60eb002ea8abbe5c80b2eb677d')
provides=('d-compiler='$pkgver 'dmd2='$pkgver 'libphobos2='$pkgver)
license=('custom')
options=('!strip' 'docs')
conflicts=('libtango' 'dmd2' 'dmd2-bin')
depends=('gcc-libs')
if [ $CARCH = 'x86_64' ]
then
archstr="64"
fi
if [ $CARCH = 'i686' ]
then
archstr="32"
fi
build() {
# Build and install dmd binary
cd $srcdir/dmd2/src/dmd
make -f posix.mak MODEL=$archstr
install -Dm755 ./dmd $pkgdir/usr/bin/dmd
oldpath=$PATH
export PATH=$PATH:`pwd`
# Copy additional tools
install -Dm755 $srcdir/dmd2/linux/bin$archstr/dumpobj $pkgdir/usr/bin/dumpobj
install -Dm755 $srcdir/dmd2/linux/bin$archstr/obj2asm $pkgdir/usr/bin/obj2asm
install -Dm755 $srcdir/dmd2/linux/bin$archstr/rdmd $pkgdir/usr/bin/rdmd
echo -e "[Environment]\nDFLAGS=-m$archstr -I/usr/include/d -I/usr/include/d/druntime/import -L-L/usr/lib -L-lrt" > $startdir/dmd.conf
install -Dm644 $startdir/dmd.conf $pkgdir/etc/dmd.conf
# Copy the license
install -Dm644 $srcdir/dmd2/license.txt $pkgdir/usr/share/licenses/dmd/COPYING
# Copy man files
for x in $srcdir/dmd2/man/man1/*.1; do
install -Dm644 $x "$pkgdir/usr/share/man/man1/$(basename $x)"
done
for x in $srcdir/dmd2/man/man1/*.5; do
install -Dm644 $x "$pkgdir/usr/share/man/man5/$(basename $x)"
done
# Copy documentation
mkdir -p $pkgdir/usr/share/doc/d/phobos
docs="$srcdir/dmd2/html/d"
for x in $(find $docs/*.html $docs/*.gif $docs/*.ico $docs/*.jpg $docs/*.css); do
install -Dm644 $x "$pkgdir/usr/share/doc/d/$(basename $x)"
done
for x in $(find $docs/phobos/*.html $docs/phobos/*.gif $docs/phobos/*.css); do
install -Dm644 $x "$pkgdir/usr/share/doc/d/phobos/$(basename $x)"
done
# Standard library and runtime
# Build and install druntime
cd $srcdir/dmd2/src/druntime
make -f posix.mak MODEL=$archstr
install -Dm644 ./lib/libdruntime.a $pkgdir/usr/lib/libdruntime.a
# Build and install standard library binary
cd $srcdir/dmd2/src/phobos
make -f posix.mak MODEL=$archstr
install -Dm644 ./generated/linux/release/$archstr/libphobos2.a $pkgdir/usr/lib/libphobos2.a
# Install standard library *.d modules
mkdir -p $pkgdir/usr/include/d
cd $srcdir/dmd2/src/phobos
cp -Rf std $pkgdir/usr/include/d
cp -Rf etc $pkgdir/usr/include/d
cp -f {crc32,index,unittest}.d $pkgdir/usr/include/d
# Install druntime *.d modules
mkdir -p $pkgdir/usr/include/d/druntime
cd $srcdir/dmd2/src/druntime/
cp -Rf import $pkgdir/usr/include/d/druntime
# Copy license
install -Dm644 $srcdir/dmd2/src/phobos/phoboslicense.txt $pkgdir/usr/share/licenses/$pkgname/LICENSE
export PATH=$oldpath
}
| true |
c4dac6209992190150c64534b7751d293c20c523 | Shell | lse/docker-gitolite | /docker-entrypoint.sh | UTF-8 | 1,777 | 3.890625 | 4 | [
"MIT"
] | permissive | #!/bin/sh
# if command is sshd, set it up correctly
if [ "${1}" = 'sshd' ]; then
set -- dumb-init /usr/sbin/sshd -D -e
# Setup SSH HostKeys if needed
for algorithm in rsa dsa ecdsa ed25519
do
keyfile=/etc/ssh/keys/ssh_host_${algorithm}_key
[ -f $keyfile ] || ssh-keygen -q -N '' -f $keyfile -t $algorithm
grep -q "HostKey $keyfile" /etc/ssh/sshd_config || echo "HostKey $keyfile" >> /etc/ssh/sshd_config
done
# Setup AuthorizedKeysCommand if needed
if [ -z "$(grep '^AuthorizedKeysCommand' /etc/ssh/sshd_config)" ]; then
echo 'AuthorizedKeysCommand /usr/local/bin/auth_key_git' >> /etc/ssh/sshd_config
fi
# Setup AuthorizedKeysCommandUser user if needed
if [ -z "$(grep '^AuthorizedKeysCommandUser' /etc/ssh/sshd_config)" ]; then
echo 'AuthorizedKeysCommandUser root' >> /etc/ssh/sshd_config
fi
fi
if [ ! -f "/etc/authkeygit/authkeygitrc" ] || [ -n "${CONFD_CMDLINE}" ]; then
mkdir -p /etc/authkeygit
if ! eval ${CONFD_CMDLINE:-confd -onetime -backend env}; then
echo "confd failed" >&2
exit 1
fi
fi
# Fix permissions at every startup
chown -R git:git ~git
# Setup gitolite admin
if [ ! -f ~git/.ssh/authorized_keys ]; then
if [ -n "$SSH_KEY" ]; then
[ -n "$SSH_KEY_NAME" ] || SSH_KEY_NAME=admin
echo "$SSH_KEY" > "/tmp/$SSH_KEY_NAME.pub"
su - git -c "gitolite setup -pk \"/tmp/$SSH_KEY_NAME.pub\""
rm "/tmp/$SSH_KEY_NAME.pub"
else
echo "You need to specify SSH_KEY on first run to setup gitolite"
echo "You can also use SSH_KEY_NAME to specify the key name (optional)"
echo 'Example: docker run -e SSH_KEY="$(cat ~/.ssh/id_rsa.pub)" -e SSH_KEY_NAME="$(whoami)" jgiannuzzi/gitolite'
exit 1
fi
# Check setup at every startup
else
su - git -c "gitolite setup"
fi
exec "$@"
| true |
a49581aba6fdc0b5da4f95a9724168b8b9381d54 | Shell | gjvanoldenborgh/climexp_data | /FUBData/update.sh | UTF-8 | 922 | 2.828125 | 3 | [] | no_license | #!/bin/sh
base=ftp://strat50.met.fu-berlin.de/pub/outgoing/_matthes/CMIP5_solardata
wget -N $base/TSI_WLS_ann_1610_2008.txt
wget -N $base/spectra_1610_2000a_21Jan09.txt.gz
wget -N $base/spectra_2000_2008a_6May09.txt.gz
wget -N $base/TSI_WLS_mon_1882_2008.txt
wget -N $base/spectra_1882_2000m_17Dec08.txt.gz
wget -N $base/spectra_2000_2008m_6May09.txt.gz
for file in *.txt.gz
do
gunzip -c $file > `basename $file .gz`
done
head -2 TSI_WLS_ann_1610_2008.txt | sed -e 's/^/# /' > tsi_wls_ann.dat
echo "# TSI [W/m2] Total Solar Irradiance" >> tsi_wls_ann.dat
tail -n +4 TSI_WLS_ann_1610_2008.txt | sed -e 's/13..\..... //' >> tsi_wls_ann.dat
head -2 TSI_WLS_mon_1882_2008.txt | sed -e 's/^/# /' > tsi_wls_mon.dat
echo "# TSI [W/m2] Total Solar Irradiance" >> tsi_wls_mon.dat
tail -n +4 TSI_WLS_mon_1882_2008.txt | sed -e 's/\.0 //g' -e 's/13..\..... //' >> tsi_wls_mon.dat
$HOME/NINO/copyfiles.sh tsi_wls_???.dat
| true |
e472bc341b468aef300e7d8f0eba962a53ff49f8 | Shell | arfc/saltproc | /tools/ci/restore-openmc.sh | UTF-8 | 1,063 | 3.171875 | 3 | [
"BSD-3-Clause"
] | permissive | #!/bin/bash
set -ex
# Move cached OpenMC libaries to PATH
sudo mv $HOME/openmc_src/bin/openmc /usr/local/bin/openmc
OPENMC_LIBS=(libopenmc.so cmake libpugixml.a pkgconfig)
for LIB in ${OPENMC_LIBS[@]}; do
sudo mv $HOME/openmc_src/lib/$LIB /usr/local/lib/.
done
sudo mv $HOME/openmc_src/lib/ /usr/local/lib/
#sudo mv $HOME/openmc_src/share/openmc /usr/local/share/openmc
#sudo mv $HOME/openmc_src/share/man /usr/local/share/man
INCLUDES=(fmt xtl xtensor gsl gsl-lite openmc pugixml.hpp pugiconfig.hpp)
for I in ${INCLUDES[@]}; do
sudo mv $HOME/openmc_src/include/$I /usr/local/include/$I
done
# Move MCPL stuff
MCPL_BINARIES=(pymcpltool mcpl-config mcpltool mcpl2ssw ssw2mcpl mcpl2phits phits2mcpl)
for BINARY in ${MCPL_BINARIES[@]}; do
sudo mv $HOME/mcpl_src/bin/$BINARY /usr/local/bin/.
done
MCPL_LIBS=(libsswmcpl.so libphitsmcpl.so libmcpl.so)
for LIB in ${MCPL_LIBS[@]}; do
sudo mv $HOME/mcpl_src/lib/$LIB /usr/local/lib/.
done
sudo mv $HOME/mcpl_src/include/mcpl.h /usr/local/include/.
sudo mv $HOME/mcpl_src/share/MCPL /usr/local/share/.
| true |
a254b8578715192104238ebf65e8aeeed8e72123 | Shell | RakeshVaghasiya/cortx-s3server | /scripts/create_auth_jks_password.sh | UTF-8 | 2,750 | 3.203125 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/sh
#
# Copyright (c) 2020 Seagate Technology LLC and/or its Affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For any questions about this software or licensing,
# please email opensource@seagate.com or cortx-questions@seagate.com.
#
set -e
SCRIPT_PATH=$(readlink -f "$0")
BASEDIR=$(dirname "$SCRIPT_PATH")
AUTH_INSTALL_PATH="/opt/seagate/cortx/auth"
DEV_VM_JKS_DIR="/root/.cortx_s3_auth_jks"
AUTH_KEY_ALIAS="s3auth_pass"
DEFAULT_KEYSTORE_PASSWD="seagate"
DEFAULT_KEY_PASSWD="seagate"
if [[ "$BASEDIR" == "$AUTH_INSTALL_PATH/scripts" ]];
then
# this is executed for cluster deployment
AUTH_KEYSTORE_PROPERTIES_FILE="$AUTH_INSTALL_PATH/resources/keystore.properties"
AUTH_JKS_TEMPLATE_FILE="$AUTH_INSTALL_PATH/scripts/s3authserver.jks_template"
AUTH_JKS_FILE="$AUTH_INSTALL_PATH/resources/s3authserver.jks"
cp -f $AUTH_JKS_TEMPLATE_FILE $AUTH_JKS_FILE
else
# this script executed for dev vm
mkdir -p $DEV_VM_JKS_DIR
cp -f $BASEDIR/s3authserver.jks_template $DEV_VM_JKS_DIR/s3authserver.jks
cp -f $BASEDIR/../auth/resources/keystore.properties $DEV_VM_JKS_DIR/keystore.properties
AUTH_KEYSTORE_PROPERTIES_FILE=$DEV_VM_JKS_DIR/keystore.properties
AUTH_JKS_FILE=$DEV_VM_JKS_DIR/s3authserver.jks
fi
# Generate random password for jks keystore
generate_keystore_password(){
echo "Generating random password for jks keystore used in authserver..."
# Generate random password
new_keystore_passwd=$(cat /dev/urandom | tr -dc 'a-zA-Z0-9@#+' | fold -w 12 | head -n 1)
# Update keystore password
# Note JKS store need same password for store password and key password otherwise JKS will not work
keytool -storepasswd -storepass $DEFAULT_KEYSTORE_PASSWD -new $new_keystore_passwd -keystore $AUTH_JKS_FILE
keytool -keypasswd --keypass $DEFAULT_KEY_PASSWD -new $new_keystore_passwd -alias $AUTH_KEY_ALIAS -storepass $new_keystore_passwd --keystore $AUTH_JKS_FILE
# Update keystore.properties file with new password
sudo sed -i 's/s3KeyStorePassword=.*$/s3KeyStorePassword='$new_keystore_passwd'/g' $AUTH_KEYSTORE_PROPERTIES_FILE
sudo sed -i 's/s3KeyPassword=.*$/s3KeyPassword='$new_keystore_passwd'/g' $AUTH_KEYSTORE_PROPERTIES_FILE
echo "jks keystore passwords are updated successfully...."
}
generate_keystore_password
| true |
19169ff3f1328bf505f85a60f85b321e51e821b0 | Shell | aegony/CSS-framework-downloader | /frameworks/susy.sh | UTF-8 | 562 | 3.546875 | 4 | [] | no_license | #!/bin/bash
# Susy
function download() {
git clone git@github.com:oddbird/susy.git
cd susy
open .
}
echo
echo "$question"
read answer
if [ "$answer" = "yes" ]; then
if [ -d "susy" ]; then
echo "Susy directory already exist."
# echo "$exit_key"
break
fi
if [ ! -d "susy" ]; then
echo "Susy is downloading ..."
echo
download
echo "$down_complete"
echo
open https://oddbird.net/susy
break
fi
elif [ "$answer" = "no" ]; then
echo "$cdn_source"
echo "dunno why there is no CDN source"
break
else
echo "$wrong_answer"
break
fi | true |
b57cf7169d1d6727863e124d4d7dc444dc08883e | Shell | layereight/i3wm-conf | /touchpad_configure.sh | UTF-8 | 508 | 2.71875 | 3 | [] | no_license | #!/bin/sh
script_dir=$(dirname ${0})
. ${script_dir}/touchpad_id.sh
$(get_touchpad_id)
TOUCHPAD_ID=$?
#echo TOUCHPAD_ID=$TOUCHPAD_ID
# enable horizontal AND vertical scrolling
xinput --set-prop ${TOUCHPAD_ID} "Synaptics Two-Finger Scrolling" 1 1
# inverted/natural scrolling
xinput --set-prop ${TOUCHPAD_ID} "Synaptics Scrolling Distance" -69 -69
# enable palm detection
xinput --set-prop ${TOUCHPAD_ID} "Synaptics Palm Detection" 1
xinput --set-prop ${TOUCHPAD_ID} "Synaptics Tap Action" 2 3 0 0 1 3 0
| true |
e5327ae2e4bd994dd358a69e26426871274fe3fc | Shell | ilventu/aur-mirror | /python2-pyechonest/PKGBUILD | UTF-8 | 850 | 2.890625 | 3 | [] | no_license | # Maintainer: masutu <masutu dot arch at gmail dot com>
_modulename=pyechonest
pkgname=python2-${_modulename}
pkgver=4.2.19
pkgrel=1
pkgdesc="An open source Python library for the Echo Nest API."
arch=('any')
url="https://github.com/echonest/${_modulename}"
license=('custom:BSD3')
depends=('python2')
options=(!emptydirs)
source=(https://github.com/downloads/echonest/${_modulename}/${_modulename}-${pkgver}.zip)
md5sums=('fc65dbd5e66e3d5e5092b9423fc6200e')
package() {
cd "$srcdir/${_modulename}"
for file in $(find . -name '*.py' -print); do
sed -i 's_^#!.*/usr/bin/python_#!/usr/bin/python2_' $file
sed -i 's_^#!.*/usr/bin/env.*python_#!/usr/bin/env python2_' $file
done
python2 setup.py install --root="$pkgdir/" --optimize=1
install -Dm644 LICENSE "${pkgdir}/usr/share/licenses/${pkgname}/LICENSE"
}
# vim:set ts=2 sw=2 et:
| true |
65064ddd701610646ad6ddf46251f72406ee1511 | Shell | ci2c/code | /scripts/julien/ictus_dcm.sh~ | UTF-8 | 598 | 3.109375 | 3 | [] | no_license | #!/bin/bash
output=/NAS/dumbo/protocoles/ictus/Data/subjects/
cd $1
echo "$1"
first_dicom=$(find . -type f -name "*" -print -quit)
echo "${first_dicom}"
dcmdump -M +P "0008,0020" +P "0010,0010" +P "0010,0040" ${first_dicom} +P "0010,0030" | sed -e 's/.*\[\(.*\)\].*/\1/'
echo "Enter patient folder name : "
read folder_name
mkdir -p ${output}${folder_name}
for serie in $(ls -d $1/*)
do
echo "checking ${serie}"
dcm2nii -x n -r n -g n -o ${output}${folder_name} ${serie}/*
echo gz compression ...
done
pigz -p 8 -v ${output}${folder_name}/*.nii
| true |
d849762c088e6a69cacc0597508ded4dac883549 | Shell | deas/dotfiles | /hooks/post-up | UTF-8 | 830 | 2.921875 | 3 | [
"MIT"
] | permissive | #!/bin/sh
# touch "$HOME"/.psqlrc.local
#
# if [ -e "$HOME"/.vim/autoload/plug.vim ]; then
# vim -E -s +PlugUpgrade +qa
# else
# curl -fLo "$HOME"/.vim/autoload/plug.vim --create-dirs \
# https://raw.githubusercontent.com/junegunn/vim-plug/master/plug.vim
# fi
# vim -u "$HOME"/.vimrc.bundles +PlugUpdate +PlugClean! +qa
#
# reset -Q
#
# # detect old OS X broken /etc/zshenv and suggest rename
# if grep -qw path_helper /etc/zshenv 2>/dev/null; then
# dir=$(CDPATH= cd -- "$(dirname -- "$0")" && pwd -P)
#
# cat <<MSG >&2
# Warning: \`/etc/zshenv' configuration file on your system may cause unexpected
# PATH changes on subsequent invocations of the zsh shell. The solution is to
# rename the file to \`zprofile':
# sudo mv /etc/{zshenv,zprofile}
#
# (called from ${dir}/post-up:${LINENO})
#
# MSG
# fi
#
| true |
062f51137892f288f965fc1a04c43be435c72059 | Shell | brikeats/SimpleElastix-build | /make_selx.sh | UTF-8 | 1,536 | 3.078125 | 3 | [] | no_license | #!/bin/bash
# run this with
# docker run --rm -e PLAT=manylinux2010_x86_64 -v /home/user/local_dir:/io quay.io/pypa/manylinux2010_x86_64 /io/make_selx.sh
IO_DIR=${IO_DIR:-/io}
BASE_DIR=${BASE_DIR:-$IO_DIR}
PLAT=${PLAT:-manylinux2010_x86_64}
PYTHON_TARGET=${PYTHON_TARGET:-cp38-cp38}
MODULE_NAME=${MODULE_NAME:-SimpleITK-Elastix}
MAKE_THREADS=${MAKE_THREADS:-4}
PYTHON_BIN=/opt/python/$PYTHON_TARGET/bin
PYTHON_EXE=$PYTHON_BIN/python
INCLUDES=(/opt/python/$PYTHON_TARGET/include/*) # all subdirs
PYTHON_INCLUDE=${INCLUDES[0]} # get the appropriate include directory
mkdir -p "$BASE_DIR"
cd $BASE_DIR
git clone https://github.com/SuperElastix/SimpleElastix.git
cd SimpleElastix
git pull
rm -rf build
mkdir build
cd build
cmake ../SuperBuild
cmake -DBUILD_EXAMPLES:BOOL=OFF \
-DBUILD_TESTING:BOOL=OFF \
-DWRAP_CSHARP:BOOL=OFF \
-DWRAP_JAVA:BOOL=OFF \
-DWRAP_LUA:BOOL=OFF \
-DWRAP_R:BOOL=OFF \
-DWRAP_RUBY:BOOL=OFF \
-DWRAP_TCL:BOOL=OFF \
-DWRAP_PYTHON:BOOL=ON \
-DPYTHON_EXECUTABLE:STRING=$PYTHON_EXE \
-DPYTHON_INCLUDE_DIR:STRING=$PYTHON_INCLUDE .
make -j$MAKE_THREADS
# copy the setup.py script
cp SimpleITK-build/Wrapping/Python/Packaging/setup.py SimpleITK-build/Wrapping/Python/
cd SimpleITK-build/Wrapping/Python/
sed -i.bak -e "s/sitkHASH\s*=\s*[\"'][a-zA-Z0-9]*[\"']/sitkHASH = None/" -e "s/name\s*=\s*[\"']SimpleITK[\"']/name='$MODULE_NAME'/" setup.py
$PYTHON_EXE setup.py bdist_wheel
cd dist
auditwheel repair --plat $PLAT *.whl
mkdir -p "$IO_DIR"
cp wheelhouse/*.whl "$IO_DIR"
| true |
e6e686770af277f96c3651c2416b460c6f107c61 | Shell | narmender/openbus | /core/src/main/hbase/queryscripts/sessionsPerDay.sh | UTF-8 | 206 | 2.703125 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
#./sessionsPerDay.sh 20131126 [0000z2ur1hruUUG-MhpsITK9JY_:0]
SESSION=$2
if [ -n "$SESSION" ]; then
./scancols.sh wslog_session $2 "daily:$1"
else
./scanrows.sh wslog_session "daily:$1"
fi
| true |
f1f3cbea6615a9c2b23cf29f1c0df6d73303e6d6 | Shell | xsh-lib/aws | /functions/ec2/volume/snapshot/create.sh | UTF-8 | 2,012 | 3.9375 | 4 | [
"MIT"
] | permissive | #? Description:
#? Create snapshot for EC2 EBS volume.
#? The snapshot is tagged after it's created.
#?
#? Usage:
#? @create
#? [-r REGION]
#? -i VOLUME_ID
#?
#? Options:
#? [-r REGION]
#?
#? Region name.
#? Defalt is to use the region in your AWS CLI profile.
#?
#? -i VOLUME_ID
#?
#? EBS volume identifier.
#?
#? @xsh /trap/err -e
#? @subshell
#?
function create () {
declare OPTIND OPTARG opt
declare -a region_sopt region_lopt
declare volume_id
while getopts r:i: opt; do
case $opt in
r)
region_sopt=(-r "${OPTARG:?}")
region_lopt=(--region "${OPTARG:?}")
;;
i)
volume_id=$OPTARG
;;
*)
return 255
;;
esac
done
declare snapshot_id
# creating snapshot
printf "creating snapshot for volume: $volume_id ..."
snapshot_id=$(
aws "${region_lopt[@]}" \
--query "SnapshotId" \
--output text \
ec2 create-snapshot --volume-id "$volume_id")
printf " $snapshot_id ... [ok]\n"
# get the tag `Name` for volume
declare volume_tag
volume_tag=$(xsh aws/ec2/tag/get "${region_sopt[@]}" -i "$volume_id" -t Name)
# get instance id
declare instance_id
instance_id=$(
aws "${region_lopt[@]}" \
--query "Volumes[].Attachments[].InstanceId" \
--output text \
ec2 describe-volumes --volume-ids "$volume_id")
# get the tag `Name` for instance
declare instance_tag
instance_tag=$(xsh aws/ec2/tag/get "${region_sopt[@]}" -i "$instance_id" -t Name)
# tag the snapshot
declare ts snapshot_tag
ts=$(date '+%Y%m%d-%H%M')
snapshot_tag=${volume_tag:-${instance_tag:-snapshot}}-$ts
printf "tagging the snapshot: $snapshot_id ..."
xsh aws/ec2/tag/create "${region_sopt[@]}" -i "$snapshot_id" -t Name -v "$snapshot_tag"
printf " $snapshot_tag ... [ok]\n"
}
| true |
74fe24c59739c2d88b44b68679dc411f3dccade6 | Shell | altlinux/girar | /bin/girar-check-acl-leader | UTF-8 | 1,624 | 3.9375 | 4 | [] | no_license | #!/bin/sh -efu
. girar-sh-functions
. shell-quote
. shell-args
if [ "${1-}" = '--help' ]; then
echo 'Usage: $PROG person item repo dir'
exit 0
fi
[ "$#" -ge 4 ] || show_usage 'Not enough arguments.'
[ "$#" -le 4 ] || show_usage 'Too many arguments.'
person="$1"; shift
item="$1"; shift
[ -n "$item" ] ||
fatal 'Empty item'
repository="$1"; shift
dir="$1"; shift
if GIRAR_USER="$person" girar-check-superuser "$repository"; then
# no further checks for superuser
exit 0
fi
acl_packages="$dir/list.packages.$repository"
acl_groups="$dir/list.groups.$repository"
deny()
{
printf '%s\n' "$*"
exit 1
}
if [ -z "${item##@*}" ]; then
item_type='Group'
item_acl="$acl_groups"
else
item_type='Project'
item_acl="$acl_packages"
fi
quote_sed_regexp_variable qitem "$item"
owners="$(sed -n "s/^$qitem[[:space:]]\+//p" "$item_acl")"
[ -n "$owners" ] ||
deny "$item: $item_type not found in ${item_acl##*/}"
leader="${owners%% *}"
[ -n "$leader" ] ||
deny "$item: $item_type leader not found in ${item_acl##*/}"
loop=
while [ -n "$leader" -a -z "${leader##@*}" -a "$leader" != "@nobody" ]; do
grp="$leader"
quote_sed_regexp_variable qitem "$grp"
owners="$(sed -n "s/^$qitem[[:space:]]\+//p" "$acl_groups")"
[ -n "$owners" ] ||
deny "$grp: Group not found in ${acl_groups##*/}"
leader="${owners%% *}"
[ -n "$leader" ] ||
deny "$grp: Group leader not found in ${acl_groups##*/}"
[ -z "$loop" -o -n "${loop##* $leader *}" ] ||
deny "$leader: Group loop detected"
loop=" $loop $leader "
done
[ "$leader" = "$person" ] ||
deny "$item: Permission denied, only $leader is allowed to change this acl"
| true |
f92ab632a59df85c31e4d773d265acc68669a38c | Shell | chandra-pal/vlcc | /BashScripts/copy.sh | UTF-8 | 664 | 3.21875 | 3 | [
"MIT"
] | permissive | #! /bin/bash
#script to copy files from one server to another
scp ec2-user@52.221.243.74:/home/ec2-user/* /home/ec2-user
#scp username@serverip:/sourceaddress/ destinationaddress
#when you are logged into the destination servr
#script when you are logged in the source server
scp /home/ec2-user/testfile2 ec2-user@52.221.243.74:/home/ec2-user/
#CHANGE FILE PERMISSION AFTER UPLOADING THE FILES
ssh 52.221.243.74 chmod 644 /home/ec2-user/testfile2
#!/bin/sh
for i in `more userlist.txt `
do
echo $i
adduser $i
done
###Create an encrypted password
###You need to create encrypted password using perl crypt():
$ perl -e 'print crypt("password", "salt"),"\n"' | true |
363e9ffff70398ca1404c5ec7651efed5d56b142 | Shell | eoinwoods/RmiPerformance | /runserver.sh | UTF-8 | 433 | 2.8125 | 3 | [] | no_license | #!/bin/bash
dir=$(cd $(dirname ${BASH_SOURCE[0]}) && pwd)
jarfile=${dir}/build/libs/RmiPerformance.jar
echo "Using JAR $jarfile for classpath"
# useCodebaseOnly=true is the default from 1.7.0_21 onwards
# http://docs.oracle.com/javase/7/docs/technotes/guides/rmi/enhancements-7.html
java -cp ${jarfile} \
-Djava.rmi.server.codebase=file://${jarfile} \
-Djava.rmi.server.useCodebaseOnly=true \
com.artechra.SimpleRmiServer
| true |
573199fea4eba92b0c5bb0bc18c8bbb53016eadf | Shell | nnaabbcc/debian-preseed | /03.rti_dds/03.rti_dds.sh | UTF-8 | 406 | 3.046875 | 3 | [] | no_license |
# install rti dds
dds_file=$1
if [ -f $dds_file ]; then
echo installing dds from $dds_file
else
echo please set dds installer file
exit 1
fi
rm -rf tmp
mkdir tmp
tar xvf $dds_file -C tmp
echo 123456 | sudo -S expect dds.expect
export NDDSHOME=/opt/rti_connext_dds-5.3.0
sudo cp rti_license.dat $NDDSHOME
sudo chmod 655 $NDDSHOME/rti_license.dat
echo export NDDSHOME=$NDDSHOME >> $HOME/.bashrc
| true |
e2c0833f095957057055c2b182988f653bbe6b70 | Shell | abelsiqueira/pres-poincare-2015 | /conicas/conicas.sh | UTF-8 | 485 | 3.4375 | 3 | [] | no_license | #!/bin/bash
template=conicas-template.tex
file=conicas.list
vars=(QUADA QUADB QUADC QUADD QUADE QUADF FSET FMIN XVAL YVAL)
rm -f iter*
iter=0
cat $file | while read line
do
line=($line)
output=`cat -E $template`
for i in $(seq 0 $((${#vars[@]}-1)))
do
var=${vars[i]}
val=${line[i]}
output=$(echo $output | sed "s/$var/$val/")
done
echo $output | sed 's/\$ /\n/g' > tmp.tex
latexmk -pdf tmp.tex
mv tmp.pdf iter$iter.pdf
iter=$((iter+1))
done
rm -f tmp.*
| true |
856632b98516de21c8293870286d392ecda8b00a | Shell | pantasio/todo | /Shell/newshell.sh | UTF-8 | 691 | 2.796875 | 3 | [] | no_license | #!/bin/sh
##################################################
# Name: $1
# Description: Does a backup of your MySQL Database utilizng LVM Snapshot.
# Script Maintainer: Le Duc Hoang
#
# Last Updated: August 8th 2013
##################################################
PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin:/root/bin
##################################################
# Variables
#
user=$LOGNAME
password="password"
datadir="/blah/important/"
tmpmountpoint="/mnt/temp_mount"
dstdir="/blah/backups/mysql_backups/"
##################################################
# Set Level of organization wanted
#
YEAR=`date +%Y`
MONTH=`date +%m`
#DAY=`date +%d`
#TIME=`date +%k%M` | true |
0afbefcab0ad1a04aab342d73cf599844e3d888f | Shell | glbrimhall/bin | /dlog.sh | UTF-8 | 266 | 3.3125 | 3 | [] | no_license | #!/bin/sh
if [ -f "$HOME/bin/docker-default-container.sh" ]; then
. $HOME/bin/docker-default-container.sh
fi
CONTAINER=${1:-$DEFAULT_CONTAINER}
if [ -f docker-compose.yml ] && [ "$1" = "" ]; then
docker-compose logs --follow
else
docker logs --follow $1
fi
| true |
5707de3a617b651bf8c7c902efd41270e30da7c5 | Shell | garethmidwood/backbot-service | /install.sh | UTF-8 | 3,570 | 4.0625 | 4 | [] | no_license | #!/bin/bash
INSTALL_DIR=.
LOGFILE='install.log'
ICON_INCOMPLETE_COLOUR=`tput setaf 1`
ICON_COMPLETE_COLOUR=`tput setaf 2`
TEXT_COLOUR=`tput setaf 2`
BAR_COLOUR_COMPLETE=`tput setaf 6`
BAR_COLOUR_REMAINING=`tput setaf 3`
ERROR_COLOUR=`tput setaf 1`
NO_COLOUR=`tput sgr0`
ICON_INCOMPLETE="${BAR_COLOUR_REMAINING}\xc2\xa2${NO_COLOUR}"
ICON_COMPLETE="${ICON_COMPLETE_COLOUR}\xcf\xbe${NO_COLOUR}"
ICON_ERROR="${ERROR_COLOUR}\xcf\xbf${NO_COLOUR}"
BINARY_DIR=/usr/local/bin
SERVICE_DIR=/lib/systemd/system
RELEASE='https://github.com/garethmidwood/backbot-service/raw/master/backbot.sh'
TMP_RELEASE_FILE=$(mktemp)
TARGET_RELEASE_PATH="${BINARY_DIR}/backbot.sh"
SERVICE='https://github.com/garethmidwood/backbot-service/raw/master/backbot.service'
TMP_SERVICE_FILE=$(mktemp)
TARGET_SERVICE_PATH="${SERVICE_DIR}/backbot.service"
function err {
log "FATAL ERROR: ${1}"
completeLogEntry
echo -ne "\n"
echo -ne "- ${ERROR_COLOUR}${1}${NO_COLOUR}\r"
echo -ne "\n"
echo -ne "- ${ICON_ERROR} installation failed"
echo -ne "\n"
exit 1
}
function log {
echo $1 >> ${INSTALL_DIR}/${LOGFILE}
}
function initLogEntry {
touch ${INSTALL_DIR}/${LOGFILE}
> ${INSTALL_DIR}/${LOGFILE}
log "============================================="
log "Installation started at `date`"
log "============================================="
}
function completeLogEntry {
log "fin"
log ""
log ""
}
function progress {
TOTAL=100
COMPLETE=$1
REMAINING=$(($TOTAL-$1))
COMPLETE_CHAR_COUNT=$(($COMPLETE/5))
REMAINING_CHAR_COUNT=$(($REMAINING/5))
COMPLETE_CHARS=`eval printf "%0.sϾ" $(seq 0 $COMPLETE_CHAR_COUNT)`
REMAINING_CHARS=`eval printf "%0.s." $(seq 0 $REMAINING_CHAR_COUNT)`
echo -ne "- ${ICON_INCOMPLETE} ${BAR_COLOUR_COMPLETE}installing ${BAR_COLOUR_COMPLETE}${COMPLETE_CHARS:1}${BAR_COLOUR_REMAINING}${REMAINING_CHARS:1} ${TEXT_COLOUR}(${COMPLETE}%)${NO_COLOUR}\r"
}
function checkSystemRequirements {
echo -ne "${BAR_COLOUR_REMAINING}Checking system requirements${NO_COLOUR}\r"
if (( $EUID != 0 )); then
err "This script must be run as root user"
fi
}
initLogEntry
progress 5
checkSystemRequirements
progress 10
log "Downloading latest release to $TMP_RELEASE_FILE"
if curl -LsSo $TMP_RELEASE_FILE $RELEASE ; then
progress 20
log "Copying release to $TARGET_RELEASE_PATH"
if cp $TMP_RELEASE_FILE $TARGET_RELEASE_PATH ; then
progress 30
log "Successfully downloaded release file. Making read/executable"
chmod +rx $TARGET_RELEASE_PATH
progress 40
log "Script is now executable"
else
err "Error when copying release to ${TARGET_RELEASE_PATH}"
fi
else
err "Error when downloading release from ${RELEASE}"
fi
log "Downloading service to $TMP_SERVICE_FILE"
if curl -LsSo $TMP_SERVICE_FILE $SERVICE ; then
progress 50
log "Copying service to $TARGET_SERVICE_PATH"
if cp $TMP_SERVICE_FILE $TARGET_SERVICE_PATH ; then
progress 60
log "Successfully downloaded service file"
else
err "Error when copying service to ${TARGET_SERVICE_PATH}"
fi
else
err "Error when downloading service from ${SERVICE}"
fi
log "Creating service user"
if ! id -u backbot; then
useradd --system backbot
fi
if ! which systemctl; then
# TODO: Make this less debian-y
apt-get update
apt-get install systemd
fi
log "Enabling service"
systemctl enable backbot
log "Starting service"
systemctl start backbot
progress 100
echo -ne "- ${ICON_COMPLETE} successfully installed"
echo -ne "\n"
log "Installation completed successfully"
completeLogEntry
exit 0
| true |
d75b6afc8ba501795c3dc03426e6782ff17a86c7 | Shell | gialnet/PostgreSQLFunctions | /sql/09_rpc_script.sh | UTF-8 | 1,728 | 3.734375 | 4 | [] | no_license | #!/bin/sh
#
# Antonio Pérez Caballero
# 22 Diciembre 2011
# descomprimir una release y copiar los archivos en las carpetas detino para Aguas y Mancomunidad
#
# ejemplo de uso: ./subir_release.sh release_11_12_22.zip
#
clear
nombre_file=$1
if [ -n "$nombre_file" ]
then
echo 'existe, voy a borrar la versión anterior'
carpeta="${nombre_file%.[^.]*}"
#sudo rm -r $carpeta
su --session-command="rm -r $carpeta" ec2-user &
else
echo 'Not found directory'
fi
# descomprimir archivo
echo "descomprimir $nombre_file"
#sudo unzip -q $nombre_file
su --session-command="unzip -q $nombre_file" ec2-user &
# hay que probar esto para SmartOS
su - usuario -c "command args"
# eliminar la versión anterior del servidor WEB
# se eliminan todos los archivos y carpetas
#sudo rm -r /var/www/html/*
su --session-command="rm -r /var/www/html/*" ec2-user &
# Crear la carpeta de la mancomunidad
#sudo mkdir /var/www/html/mm
su --session-command="mkdir /var/www/html/mm" ec2-user &
# copiar el archivo de reglas de Apache al directorio raiz
#sudo cp /var/www/.htaccess /var/www/html
su --session-command="cp /var/www/.htaccess /var/www/html" ec2-user &
# Copiar la nueva versión en el home del servidor Apache
echo "Publicar el contenido de la carpeta $carpeta"
#sudo cp -R $carpeta/* /var/www/html/
su --session-command="cp -R $carpeta/* /var/www/html/" ec2-user &
#sudo cp -R $carpeta/* /var/www/html/mm/
su --session-command="cp -R $carpeta/* /var/www/html/mm/" ec2-user &
# Ajustar el usuario de la conexión a la Mancomunidad
#sudo sed -i 's/REGISTRO_AS/REGISTRO_MM/' /var/www/html/mm/php/config/pebi_cn.inc.php
su --session-command="sed -i 's/REGISTRO_AS/REGISTRO_MM/' /var/www/html/mm/php/config/pebi_cn.inc.php" ec2-user &
| true |
9482e35c48eeab0c3edf1a79880b62ff1ea46ec8 | Shell | jrd/compose-systemd | /compose-dirs | UTF-8 | 9,559 | 4.03125 | 4 | [
"MIT"
] | permissive | #!/bin/bash
# vim: set ts=2 sts=2 sw=2:
version=1.6.1
conf=/etc/compose-dirs.conf
[ -f "$conf" ] || exit 1
# shellcheck disable=SC1090,SC1091
. "$conf"
[ -n "$compose_dir" ] && [ -n "$compose_user" ] && [ -n "$tmpl_name" ] && [ -n "$deps_file" ] || exit 1
[ -d "$compose_dir" ] || exit 1
cd "$compose_dir"
docker="$(which docker)"
if "$docker" compose version | grep -q 'Docker Compose'; then
dc="$docker compose"
else # legacy
dc="$(which docker-compose)"
fi
usage() {
cat <<EOF
Version $version. By Cyrille Pontvieux, 2020-2023, MIT licence
compose-dirs install OPTIONS ACTION [OPTS]
OPTIONS:
-h, --help: this help message
-V, --version: show version
-v, --verbose: verbose output
ACTION:
install: install compose-dirs on the system
update [service]: update systemd unit files (or just the service provided)
start, stop, reload, restart, status [service]: do the acton on all (or only provided service) systemd unit files
CONFIG:
compose directory: $compose_dir
compose user: $compose_user
systemd base service name: $tmpl_name@.service
dependencies file: $compose_dir/$deps_file
DEPS FORMAT:
compose_to_run:compose_dep_1,compose_dep_2
EOF
}
install_wait_for_cpu_idle() {
set -e
# shellcheck disable=SC2046
[ $(id -u) -eq 0 ]
mkdir -p /usr/local/bin
cat > /usr/local/bin/wait-for-cpu-idle <<'EOF'
#!/usr/bin/env -S python3 -u
from os import execvp, cpu_count, getloadavg
from random import randint
from sys import argv, exit, stderr
from time import sleep
THRESHOLD = 0.6
DELAY_SEC = 10
MAX_DELAY_SEC = 60 * 5
NB_PROC = cpu_count()
def get_cpu_load() -> float:
return getloadavg()[0] / NB_PROC
args = argv[1:]
total_delay = 0
pre_sleep = randint(DELAY_SEC // 2, 2 * DELAY_SEC)
print(f"Waiting {pre_sleep} seconds")
sleep(pre_sleep)
total_delay += pre_sleep
cpu_load = get_cpu_load()
while total_delay < MAX_DELAY_SEC and cpu_load > THRESHOLD:
print(f"Average CPU load too high ({cpu_load} > {THRESHOLD})")
print(f"Delay ({DELAY_SEC}\") starting {args}")
sleep(DELAY_SEC)
total_delay += DELAY_SEC
cpu_load = get_cpu_load()
if total_delay < MAX_DELAY_SEC:
execvp(args[0], args)
else:
print(f"Max delay of {MAX_DELAY_SEC}\" exceeded, exit with error", file=stderr)
exit(1)
EOF
chmod +x /usr/local/bin/wait-for-cpu-idle
}
install_template() {
set -e
# shellcheck disable=SC2046
[ $(id -u) -eq 0 ]
cat > "/etc/systemd/system/$tmpl_name@.service" <<EOF
[Unit]
Description=Service for docker compose in %I
BindsTo=docker.service
After=docker.service
[Service]
Type=simple
ProtectSystem=yes
ProtectHome=yes
User=$compose_user
WorkingDirectory=$compose_dir/%I
StandardOutput=journal
StandardError=journal
SyslogIdentifier=compose-%i
SyslogLevel=debug
SyslogLevelPrefix=false
ExecStartPre=@/usr/local/bin/wait-for-cpu-idle compose-wait-%i echo "Ready to start %i"
EOF
if echo "$dc" | grep -q ' '; then # 'docker compose' case
cat >> "/etc/systemd/system/$tmpl_name@.service" <<EOF
ExecStart=@$docker compose-%i compose up --no-color --build --remove-orphans
ExecReload=@$docker compose-reload-%i compose up --no-color --build --remove-orphans -d --wait
ExecStopPost=@$docker compose-stop-%i compose down
EOF
else # legacy 'docker-compose' case
cat >> "/etc/systemd/system/$tmpl_name@.service" <<EOF
ExecStart=@$dc compose-%i up --no-color --build --remove-orphans
ExecReload=@$dc compose-reload-%i up --no-color --build --remove-orphans -d
ExecStopPost=@$dc compose-stop-%i down
EOF
fi
cat >> "/etc/systemd/system/$tmpl_name@.service" <<EOF
TimeoutStartSec=infinity
RestartSec=5
TimeoutStopSec=1min
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload
}
find_loaded_instances() {
systemctl list-units -q --plain --no-legend --state=loaded --type=service "$tmpl_name@*" | cut -d' ' -f1 | xargs -I @ systemd-escape -u --instance "@"
}
find_instances() {
cut -d: -f1 "$compose_dir/$deps_file"
}
find_instance_deps() {
grep "^$1:" "$compose_dir/$deps_file" | cut -d: -f2 | tr ',' '\n' | sort
}
find_instances_to_update() {
limited_instances="$1"
to_delete=""
to_update=""
loaded_instances="$(find_loaded_instances)"
instances="$(find_instances)"
if [ -n "$limited_instances" ]; then
loaded_instances=$(for inst in $loaded_instances; do echo "$limited_instances" | grep -q "^$inst\$" && echo "$inst"; done)
instances=$(for inst in $instances; do echo "$limited_instances" | grep -q "^$inst\$" && echo "$inst"; done)
fi
for i in $loaded_instances; do
if ! echo "$instances" | grep -q "^$i\$"; then
to_delete="$to_delete $i"
fi
done
for i in $instances; do
i_escaped=$(systemd-escape "$i")
defined_deps=$(find_instance_deps "$i")
actual_deps=$(systemctl list-dependencies -q --plain --no-legend "$tmpl_name@$i_escaped.service" | sed -rn "/ *$tmpl_name@/{s/ *$tmpl_name@(.*)\.service/\1/;p}" | sort)
if [ "$defined_deps" != "$actual_deps" ]; then
to_update="$to_update $i"
fi
done
echo "$to_delete:$to_update"
}
update() {
set -e
# shellcheck disable=SC2046
[ $(id -u) -eq 0 ]
instances=$(find_instances_to_update "$opts")
to_delete=$(echo "$instances" | cut -d: -f1)
to_update=$(echo "$instances" | cut -d: -f2)
for i in $to_delete; do
i_escaped=$(systemd-escape "$i")
systemctl disable --no-reload "$tmpl_name@$i_escaped.service"
confd="/etc/systemd/system/$tmpl_name@$i_escaped.service.d"
if [ -d "$confd" ]; then
rm -rf "$confd"
fi
done
for i in $to_update; do
deps=$(find_instance_deps "$i")
i_escaped=$(systemd-escape "$i")
confd="/etc/systemd/system/$tmpl_name@$i_escaped.service.d"
rm -rf "$confd" 2>/dev/null || true
if [ -n "$deps" ]; then
mkdir -p "$confd"
requires=""
afters="docker.service"
for dep in $deps; do
[ -n "$requires" ] && sep=" " || sep=""
dep_escaped=$(systemd-escape "$dep")
requires="$requires$sep$tmpl_name@$dep_escaped.service"
afters="$afters $tmpl_name@$dep_escaped.service"
done
cat > "$confd/deps.conf" <<EOF
[Unit]
Requires=$requires
After=$afters
EOF
fi
systemctl enable --no-reload "$tmpl_name@$i_escaped.service"
done
systemctl daemon-reload
}
find_order() {
python3 - "$compose_dir/$deps_file" <<'EOF'
from sys import argv
deps_file = argv[1]
lines = map(lambda l: l.rstrip(), list(open(deps_file)))
def order_deps_first(lines):
tree = dict([line.split(':') for line in lines])
prev_svc = []
new_lines = []
for svc, deps in tree.items():
if svc not in prev_svc:
if deps:
for dep in deps.split(','):
if dep not in prev_svc:
prev_svc.append(dep)
new_lines.append(':'.join([dep, tree[dep]]))
prev_svc.append(svc)
new_lines.append(':'.join([svc, deps]))
return new_lines
while True:
ordered_lines = order_deps_first(lines)
if ordered_lines == lines:
break
else:
lines = ordered_lines
for line in lines:
print(line)
EOF
}
start() {
set -e
# shellcheck disable=SC2046
[ $(id -u) -eq 0 ]
if [ -n "$opts" ]; then
for i in $opts; do
[ -n "$verbose" ] && echo "Starting $tmpl_name@$i"
systemctl start --no-block "$tmpl_name@$i.service"
done
else
for i in $(find_order | cut -d: -f1 | xargs -I @ systemd-escape "@"); do
[ -n "$verbose" ] && echo "Starting $tmpl_name@$i"
systemctl start --no-block "$tmpl_name@$i.service"
done
fi
}
stop() {
set -e
# shellcheck disable=SC2046
[ $(id -u) -eq 0 ]
if [ -n "$opts" ]; then
for i in $opts; do
[ -n "$verbose" ] && echo "Stopping $tmpl_name@$i"
systemctl stop --no-block "$tmpl_name@$i.service"
done
else
for i in $(find_order | cut -d: -f1 | xargs -I @ systemd-escape "@" | tac); do
[ -n "$verbose" ] && echo "Stopping $tmpl_name@$i"
systemctl stop --no-block "$tmpl_name@$i.service"
done
fi
}
reload() {
set -e
# shellcheck disable=SC2046
[ $(id -u) -eq 0 ]
if [ -n "$opts" ]; then
for i in $opts; do
[ -n "$verbose" ] && echo "Reloading $tmpl_name@$i"
systemctl reload --no-block "$tmpl_name@$i.service"
done
else
for i in $(find_order | cut -d: -f1 | xargs -I @ systemd-escape "@" | tac); do
[ -n "$verbose" ] && echo "Reloading $tmpl_name@$i"
systemctl reload --no-block "$tmpl_name@$i.service"
done
fi
}
status() {
if [ -n "$opts" ]; then
instances="$opts"
else
instances="$(find_instances)"
fi
[ -n "$verbose" ] && lines=10 || lines=0
for i in $instances; do
systemctl status --no-pager "--lines=$lines" -o cat "$tmpl_name@$(systemd-escape "$i").service"
if [ -n "$verbose" ]; then
(cd "$compose_dir/$i" && $dc ps)
fi
done
}
verbose=0
action=
opts=
while [ -n "$1" ]; do
arg="$1"; shift
if [ "$arg" == "-h" ] || [ "$arg" == "--help" ]; then
usage
exit 0
elif [ "$arg" == "-V" ] || [ "$arg" == "--version" ]; then
echo "$version"
exit 0
elif [ "$arg" == "-v" ] || [ "$arg" == "--verbose" ]; then
verbose=1
fi
if [ -z "$action" ]; then
action="$arg"
elif [ -z "$opts" ]; then
opts="$arg"
else
opts="$opts $arg"
fi
done
case "$action" in
install)
install_wait_for_cpu_idle
install_template
;;
update)
update
;;
start)
start
;;
stop)
stop
;;
reload)
reload
;;
restart)
stop
sleep 3
start
;;
status)
status
;;
*)
usage >&2
exit 1
;;
esac
| true |
7efb7b4d7f770003062f097216abeab5fb962378 | Shell | paultag/archvsync | /bin/websync | UTF-8 | 10,081 | 3.671875 | 4 | [] | no_license | #! /bin/bash
# No, we can not deal with sh alone.
set -e
set -u
# ERR traps should be inherited from functions too. (And command
# substitutions and subshells and whatnot, but for us the function is
# the important part here)
set -E
# websync script for Debian
# Based losely on the old websync written by an
# unknown number of different people over the years and ftpsync.
#
# Copyright (C) 2008,2009 Joerg Jaspert <joerg@debian.org>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; version 2.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
# In case the admin somehow wants to have this script located someplace else,
# he can set BASEDIR, and we will take that. If it is unset we take ${HOME}
# How the admin sets this isn't our place to deal with. One could use a wrapper
# for that. Or pam_env. Or whatever fits in the local setup. :)
BASEDIR=${BASEDIR:-"${HOME}"}
# Script version. DO NOT CHANGE, *unless* you change the master copy maintained
# by Joerg Jaspert and the Debian mirroradm group.
# This is used to track which mirror is using which script version.
VERSION="0815"
# Source our common functions
. "${BASEDIR}/etc/common"
########################################################################
########################################################################
## functions ##
########################################################################
########################################################################
# All the stuff we want to do when we exit, no matter where
cleanup() {
trap - ERR TERM HUP INT QUIT EXIT
# all done. Mail the log, exit.
log "Mirrorsync done";
if [ -n "${MAILTO}" ]; then
# In case rsync had something on stderr
if [ -s "${LOGDIR}/rsync-${NAME}.error" ]; then
mail -e -s "[${PROGRAM}@$(hostname -s)] ($$) rsync ERROR on $(date +"%Y.%m.%d-%H:%M:%S")" ${MAILTO} < "${LOGDIR}/rsync-${NAME}.error"
fi
if [ "x${ERRORSONLY}x" = "xfalsex" ]; then
# And the normal log
MAILFILES="${LOG}"
if [ "x${FULLLOGS}x" = "xtruex" ]; then
# Someone wants full logs including rsync
MAILFILES="${MAILFILES} ${LOGDIR}/rsync-${NAME}.log"
fi
cat ${MAILFILES} | mail -e -s "[${PROGRAM}@$(hostname -s)] web sync finished on $(date +"%Y.%m.%d-%H:%M:%S")" ${MAILTO}
fi
fi
savelog "${LOGDIR}/rsync-${NAME}.log"
savelog "${LOGDIR}/rsync-${NAME}.error"
savelog "$LOG" > /dev/null
rm -f "${LOCK}"
}
# Check rsyncs return value
check_rsync() {
ret=$1
msg=$2
# 24 - vanished source files. Ignored, that should be the target of $UPDATEREQUIRED
# and us re-running. If it's not, uplink is broken anyways.
case "${ret}" in
0) return 0;;
24) return 0;;
23) return 2;;
30) return 2;;
*)
error "ERROR: ${msg}"
return 1
;;
esac
}
########################################################################
########################################################################
# As what are we called?
NAME="`basename $0`"
# Now source the config.
. "${BASEDIR}/etc/${NAME}.conf"
########################################################################
# Config options go here. Feel free to overwrite them in the config #
# file if you need to. #
# On debian.org machines the defaults should be ok. #
########################################################################
########################################################################
# There should be nothing to edit here, use the config file #
########################################################################
MIRRORNAME=${MIRRORNAME:-`hostname -f`}
# Where to put logfiles in
LOGDIR=${LOGDIR:-"${BASEDIR}/log"}
# Our own logfile
LOG=${LOG:-"${LOGDIR}/${NAME}.log"}
# Where should we put all the mirrored files?
TO=${TO:-"/org/www.debian.org/www"}
# used by log() and error()
PROGRAM=${PROGRAM:-"${NAME}-$(hostname -s)"}
# Where to send mails about mirroring to?
if [ "x$(hostname -d)x" != "xdebian.orgx" ]; then
# We are not on a debian.org host
MAILTO=${MAILTO:-"root"}
else
# Yay, on a .debian.org host
MAILTO=${MAILTO:-"mirrorlogs@debian.org"}
fi
# Want errors only or every log?
ERRORSONLY=${ERRORSONLY:-"true"}
# Want full logs, ie. including the rsync one?
FULLLOGS=${FULLLOGS:-"false"}
# How many logfiles to keep
LOGROTATE=${LOGROTATE:-14}
# Our lockfile
LOCK=${LOCK:-"${TO}/Website-Update-in-Progress-${MIRRORNAME}"}
# Do we need another rsync run?
UPDATEREQUIRED="${TO}/Website-Update-Required-${MIRRORNAME}"
# Trace file for mirror stats and checks (make sure we get full hostname)
TRACE=${TRACE:-".project/trace/${MIRRORNAME}"}
# rsync program
RSYNC=${RSYNC:-rsync}
# Rsync filter rules. Used to protect various files we always want to keep, even if we otherwise delete
# excluded files
RSYNC_FILTER=${RSYNC_FILTER:-"--filter=protect_Website-Update-in-Progress-${MIRRORNAME} --filter=protect_${TRACE} --filter=protect_Website-Update-Required-${MIRRORNAME}"}
# Default rsync options for *every* rsync call
RSYNC_OPTIONS=${RSYNC_OPTIONS:-"-prltvHSB8192 --timeout 3600 --stats ${RSYNC_FILTER}"}
RSYNC_OPTIONS2=${RSYNC_OPTIONS2:-"--max-delete=40000 --delay-updates --delete --delete-after --delete-excluded"}
# Which rsync share to use on our upstream mirror?
RSYNC_PATH=${RSYNC_PATH:-"web.debian.org"}
# our username for the rsync share
RSYNC_USER=${RSYNC_USER:-""}
# the password
RSYNC_PASSWORD=${RSYNC_PASSWORD:-""}
# a possible proxy
RSYNC_PROXY=${RSYNC_PROXY:-""}
# General excludes.
EXCLUDE=${EXCLUDE:-"--exclude ${HOSTNAME}"}
# The temp directory used by rsync --delay-updates is not
# world-readable remotely. Always exclude it to avoid errors.
EXCLUDE="${EXCLUDE} --exclude .~tmp~/"
# And site specific excludes, by default its the sponsor stuff that should be local to all (except templates)
SITE_FILTER=${SITE_FILTER:-"--include sponsor.deb.* --exclude sponsor_img.* --exclude sponsor.html --exclude sponsor.*.html --filter=protect_sponsor_img.* --filter=protect_sponsor.html --filter=protect_sponsor.*.html"}
# Hooks
HOOK1=${HOOK1:-""}
HOOK2=${HOOK2:-""}
HOOK3=${HOOK3:-""}
HOOK4=${HOOK4:-""}
# Are we a hub?
HUB=${HUB:-"false"}
# Some sane defaults
cd "${BASEDIR}"
umask 022
# If we are here for the first time, create the
# destination and the trace directory
mkdir -p "${TO}/.project/trace"
# Used to make sure we will have the archive fully and completly synced before
# we stop, even if we get multiple pushes while this script is running.
# Otherwise we can end up with a half-synced archive:
# - get a push
# - sync, while locked
# - get another push. Of course no extra sync run then happens, we are locked.
# - done. Archive not correctly synced, we don't have all the changes from the second push.
touch "${UPDATEREQUIRED}"
# Check to see if another sync is in progress
if ! ( set -o noclobber; echo "$$" > "${LOCK}") 2> /dev/null; then
if ! $(kill -0 $(cat ${LOCK}) 2>/dev/null); then
# Process does either not exist or is not owned by us.
echo "$$" > "${LOCK}"
else
echo "Unable to start rsync, lock file still exists, PID $(cat ${LOCK})"
exit 1
fi
fi
trap cleanup EXIT ERR TERM HUP INT QUIT
# Start log by redirecting everything there.
exec >"$LOG" 2>&1 </dev/null
# Look who pushed us and note that in the log.
log "Mirrorsync start"
PUSHFROM="${SSH_CONNECTION%%\ *}"
if [ -n "${PUSHFROM}" ]; then
log "We got pushed from ${PUSHFROM}"
fi
log "Acquired main lock"
HOOK=(
HOOKNR=1
HOOKSCR=${HOOK1}
)
hook $HOOK
# Now, we might want to sync from anonymous too.
# This is that deep in this script so hook1 could, if wanted, change things!
if [ -z ${RSYNC_USER} ]; then
RSYNCPTH="${RSYNC_HOST}"
else
RSYNCPTH="${RSYNC_USER}@${RSYNC_HOST}"
fi
# Now do the actual mirroring, and run as long as we have an updaterequired file.
export RSYNC_PASSWORD
export RSYNC_PROXY
while [ -e "${UPDATEREQUIRED}" ]; do
log "Running mirrorsync, update is required, ${UPDATEREQUIRED} exists"
rm -f "${UPDATEREQUIRED}"
log "Syncing: ${RSYNC} ${RSYNC_OPTIONS} ${RSYNC_OPTIONS2} ${EXCLUDE} ${SITE_FILTER} ${RSYNCPTH}::${RSYNC_PATH} ${TO}"
set +e
${RSYNC} ${RSYNC_OPTIONS} ${RSYNC_OPTIONS2} ${EXCLUDE} ${SITE_FILTER} \
${RSYNCPTH}::${RSYNC_PATH} "${TO}" >"${LOGDIR}/rsync-${NAME}.log" 2>"${LOGDIR}/rsync-${NAME}.error"
result=$?
set -e
log "Back from rsync with returncode ${result}"
set +e
check_rsync $result "Sync went wrong, got errorcode ${result}. Logfile: ${LOG}"
GO=$?
set -e
if [ ${GO} -eq 2 ] && [ -e "${UPDATEREQUIRED}" ]; then
log "We got error ${result} from rsync, but a second push went in hence ignoring this error for now"
elif [ ${GO} -ne 0 ]; then
exit 3
fi
HOOK=(
HOOKNR=2
HOOKSCR=${HOOK2}
)
hook $HOOK
done
mkdir -p "${TO}/.project/trace"
LC_ALL=POSIX LANG=POSIX date -u > "${TO}/${TRACE}"
echo "Used websync version: ${VERSION}" >> "${TO}/${TRACE}"
echo "Running on host: $(hostname -f)" >> "${TO}/${TRACE}"
HOOK=(
HOOKNR=3
HOOKSCR=${HOOK3}
)
hook $HOOK
if [ x${HUB} = "xtrue" ]; then
log "Trigger slave mirrors"
${BASEDIR}/bin/runmirrors "websync"
log "Trigger slave done"
HOOK=(
HOOKNR=4
HOOKSCR=${HOOK4}
)
hook $HOOK
fi
# All done, rest is done by cleanup hook.
| true |
c9d4d33a348576a7709a9491f36c0a3a2464597b | Shell | robin-31/Shell-Scripting-Program | /Sequences-Problem/diceRange.sh | UTF-8 | 142 | 2.90625 | 3 | [] | no_license | #!/bin/bash -x
lowerLimit=0
upperLimit=6
diff=$(( $upperLimit - $lowerLimt + 1 ))
dice=$(( $RANDOM%diff ))
echo "your dice number is : $dice"
| true |
da49e729a8c122c82ca961f4b9176f5b118e73bc | Shell | HelenaNascimento/spyder | /continuous_integration/posix/runtests.sh | UTF-8 | 524 | 3.0625 | 3 | [
"LGPL-3.0-or-later",
"OFL-1.1",
"LGPL-2.1-or-later",
"CC-BY-2.5",
"CC-BY-3.0",
"CC-BY-4.0",
"GPL-1.0-or-later",
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown",
"GPL-3.0-only",
"LGPL-2.1-only",
"LicenseRef-scancode-proprietary-license",
"MIT",
"LGPL-3.0-only",
"Python-2.0",
"GPL-2.0-only",
"Apache-2.0",
"LGPL-2.0-or-later",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/bin/bash
source $HOME/miniconda/etc/profile.d/conda.sh
conda activate test
# We have two kinds of tests:
#
# 1. The new ones, based on pytest
# 2. The old ones, present in the main section
# at the end of several files.
#
# Notes:
# - We always run our new tests in Travis.
# - Circle runs a mix of both for old Python versions or
# things we can test in macOS.
if [ "$CI_PYTEST" = "true" ]; then
python bootstrap.py -- --reset
python runtests.py
else
./continuous_integration/posix/modules_test.sh
fi
| true |
07542149f6166c6c65cc24f0c25306b9fbd59356 | Shell | damienSimon/os-installer | /scripts_bash/config/environment_variables_for_ubuntu_on_virtualbox.sh | UTF-8 | 1,128 | 2.640625 | 3 | [] | no_license | #!/bin/bash
# Variables d'environnement pour l'installation VM Ubuntu 18.04.4 LTS de test dans VirtualBox
NOM_VIRTUAL_MACHINE="test-vm-ubuntu-18-04"
REPERTOIRE_VIRTUAL_BOX="/home/dasim/perso/outils/VirtualBox"
#CHEMIN_FICHIER_ISO="${REPERTOIRE_VIRTUAL_BOX}/iso/ubuntu-18.04.4-desktop-amd64.iso"
CHEMIN_FICHIER_ISO="${REPERTOIRE_VIRTUAL_BOX}/iso/test_dasim_iso_perso1.iso"
CHEMIN_FICHIER_VDI="${REPERTOIRE_VIRTUAL_BOX}/VMs/${NOM_VIRTUAL_MACHINE}/${NOM_VIRTUAL_MACHINE}.vdi"
CHEMIN_REPERTOIRE_SCRIPTS_BASH="/home/dasim/perso/developpement/os-installer/scripts_bash"
SCRIPT_TEMPLATE="${CHEMIN_REPERTOIRE_SCRIPTS_BASH}/UnattendedTemplates_override/ubuntu_preseed_override.cfg"
POST_INSTALL_TEMPLATE="${CHEMIN_REPERTOIRE_SCRIPTS_BASH}/UnattendedTemplates_override/debian_postinstall_override.sh"
POST_INSTALL_COMMAND="/bin/bash ${CHEMIN_REPERTOIRE_SCRIPTS_BASH}/scripts/01_install_docker.sh"
MEMOIRE_ALLOUEE_VM="8192"
VRAM_ALLOUEE_VM="256"
ESPACE_DISQUE_ALLOUEE_VM="30000"
NB_CPU_VM="2"
OS_USER="dasim"
OS_PASSWORD="${OS_USER}"
OS_LANGUAGE="fr_FR"
OS_LOCALE="${OS_LANGUAGE}"
OS_PAYS="FR"
OS_KEYBOARD="fr"
OS_TIMEZONE="Europe/Paris" | true |
10ba7ea52c9962dd3fb2912fc790ad293a5f56e5 | Shell | ercolessi/O2DataProcessing | /tools/epn/run.sh | UTF-8 | 1,946 | 2.765625 | 3 | [] | no_license | #!/bin/bash
export GEN_TOPO_PARTITION=test # ECS Partition
export DDMODE=processing # DataDistribution mode - possible options: processing, disk, processing-disk, discard
# Use these settings to fetch the Workflow Repository using a hash / tag
#export GEN_TOPO_HASH=1 # Fetch O2DataProcessing repository using a git hash
#export GEN_TOPO_SOURCE=v0.5 # Git hash to fetch
# Use these settings to specify a path to the workflow repository in your home dir
export GEN_TOPO_HASH=0 # Specify path to O2DataProcessing repository
export GEN_TOPO_SOURCE=/home/drohr/alice/O2DataProcessing # Path to O2DataProcessing repository
export GEN_TOPO_LIBRARY_FILE=testing/detectors/TPC/workflows.desc # Topology description library file to load
export GEN_TOPO_WORKFLOW_NAME=ctf-and-display # Name of workflow in topology description library
export WORKFLOW_DETECTORS=ALL # Optional parameter for the workflow: Detectors to run reconstruction for (comma-separated list)
export WORKFLOW_DETECTORS_QC= # Optional parameter for the workflow: Detectors to run QC for
export WORKFLOW_DETECTORS_CALIB= # Optional parameters for the workflow: Detectors to run calibration for
export WORKFLOW_PARAMETERS= # Additional paramters for the workflow
export RECO_NUM_NODES_OVERRIDE=0 # Override the number of EPN compute nodes to use (default is specified in description library file)
export NHBPERTF=256 # Number of HBF per TF
/home/epn/pdp/gen_topo.sh > $HOME/gen_topo_output.xml
| true |
194f072677040b20f4851328c9bb2554addcaf52 | Shell | lkiarest/pycart | /docker.sh | UTF-8 | 426 | 3.25 | 3 | [] | no_license | #!/bin/bash
envtype=$1
setting_file=settings.dev.py
nettype=host
if [ "$envtype" == "prod" ]; then
setting_file=settings.prod.py
nettype=bridge
fi
# build image
echo building image ...
sudo docker build --build-arg SETTING_FILE=$setting_file -t pycart .
# start container
echo start container ...
sudo docker run -d --name pycart --net $nettype -p 8080:8080 pycart
echo container started. visit http://localhost:8080
| true |
6190bed5bcabda5c1c1d135b3cd57d17263361e4 | Shell | termux/termux-packages | /packages/fish/build.sh | UTF-8 | 1,093 | 2.75 | 3 | [
"Apache-2.0"
] | permissive | TERMUX_PKG_HOMEPAGE=https://fishshell.com/
TERMUX_PKG_DESCRIPTION="The user-friendly command line shell"
TERMUX_PKG_LICENSE="GPL-2.0"
TERMUX_PKG_MAINTAINER="@termux"
TERMUX_PKG_VERSION="3.6.1"
TERMUX_PKG_REVISION=1
TERMUX_PKG_SRCURL=https://github.com/fish-shell/fish-shell/releases/download/$TERMUX_PKG_VERSION/fish-${TERMUX_PKG_VERSION}.tar.xz
TERMUX_PKG_SHA256=55402bb47ca6739d8aba25e41780905b5ce1bce0a5e0dd17dca908b5bc0b49b2
TERMUX_PKG_AUTO_UPDATE=true
# fish calls 'tput' from ncurses-utils, at least when cancelling (Ctrl+C) a command line.
# man is needed since fish calls apropos during command completion.
TERMUX_PKG_DEPENDS="libc++, ncurses, libandroid-support, ncurses-utils, man, bc, pcre2, libandroid-spawn"
TERMUX_PKG_BUILD_IN_SRC=true
TERMUX_PKG_EXTRA_CONFIGURE_ARGS="
-DBUILD_DOCS=OFF
"
termux_step_pre_configure() {
CXXFLAGS+=" $CPPFLAGS"
}
termux_step_post_make_install() {
cat >> $TERMUX_PREFIX/etc/fish/config.fish <<HERE
function __fish_command_not_found_handler --on-event fish_command_not_found
$TERMUX_PREFIX/libexec/termux/command-not-found \$argv[1]
end
HERE
}
| true |
3797904bb34bcf132228dfe0c6a12c94b6d271e1 | Shell | ChrisTheShark/elasticloader-image | /load_data.sh | UTF-8 | 970 | 3.75 | 4 | [] | no_license | #!/bin/bash
echo "Loading index to Elasticsearch."
status=$(curl --write-out %{http_code} --silent --output /dev/null \
-u elastic:changeme -X PUT http://elasticsearch:9200/vehicles \
-d @/opt/index.json --header "Content-Type: application/json")
# Allowing 400 as the index may have already been added. Remove
# if use case requires fresh creation each run.
if [ $status -eq 200 -o $status -eq 400 ]
then
echo "Successfully created index, loading test documents."
status=$(curl --write-out %{http_code} --silent --output /dev/null \
-u elastic:changeme -vX PUT http://elasticsearch:9200/vehicles/_bulk \
--data-binary @/opt/data.json --header "Content-Type: application/json")
if [ $status -eq 200 ]
then
echo "Data load complete. Exiting gracefully."
exit 0
else
echo "Index created, data load failed!"
exit 1
fi
else
echo "Could not create index! Received status code $status." >&2
exit 1
fi
| true |
094b36679626f05ae378329d63509347e7d3a0cf | Shell | fzdxyhr/ads-ha | /src/main/resources/ha/shell/stop_group_mysql.sh | UTF-8 | 1,340 | 3.375 | 3 | [] | no_license | #!/bin/bash
## docker内mysql数据库配置文件挂载到linux的目录结构
configDir=/home/group/config
## 更新数据库配置文件
function updateMyCnf(){
## 清空原来配置文件
: > $configDir/my.cnf
## BASE CONFIG
echo "[mysqld]" >> $configDir/my.cnf
echo "skip-host-cache" >> $configDir/my.cnf
echo "skip-name-resolve" >> $configDir/my.cnf
echo "pid-file =/var/run/mysqld/mysqld.pid" >> $configDir/my.cnf
echo "socket =/var/run/mysqld/mysqld.sock" >> $configDir/my.cnf
echo "port=3306" >> $configDir/my.cnf
echo "basedir=/usr" >> $configDir/my.cnf
echo "datadir=/var/lib/mysql" >> $configDir/my.cnf
echo "tmpdir=/tmp" >> $configDir/my.cnf
echo "lc-messages-dir=/usr/share/mysql" >> $configDir/my.cnf
echo "explicit_defaults_for_timestamp" >> $configDir/my.cnf
echo "log-error=/var/log/mysql/error.log" >> $configDir/my.cnf
echo "sql_mode=NO_ENGINE_SUBSTITUTION,STRICT_ALL_TABLES" >> $configDir/my.cnf
echo "symbolic-links=0" >> $configDir/my.cnf
}
## 安装mysql-client
function execMysqlClient(){
sudo apt-get update
sudo apt autoremove mysql-client -y
sudo apt-get install mysql-client -y
}
function main() {
## 修改mysql my.cnf配置文件
updateMyCnf
## 重启mysql
docker restart $1
}
myPath=$configDir/
configFile="$myPath"my.cnf
mkdir -p $myPath
touch $configFile
main $@
exit 0
| true |
1bd573116c96e440fd8e3b1d608969e9e810663e | Shell | Constellation-Labs/constellation | /main-net/modules/grafana/templates/cluster_info_to_targets.sh.tpl | UTF-8 | 321 | 2.59375 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env sh
URL="${url}"
QUERY='
map({
targets: [(.ip.host + ":9000")],
labels: {
alias: .alias,
id: .id.hex
}
})
'
RESPONSE=$(curl $URL) && \
JSON=$(echo "$RESPONSE" | jq '.' | jq "$QUERY") && \
printf "$JSON" | jq '.' > /home/admin/grafana-dashboard/prometheus/targets/targets.json
| true |
1002446ed6f2ea76783170022f2f0127ebe587a1 | Shell | alexnoz/wasm-worker-api | /test/fixtures/run | UTF-8 | 1,233 | 3.8125 | 4 | [] | no_license | #!/usr/bin/env bash
echo_yellow () {
printf "\033[1;33m${*}\033[0m\n"
}
file="module"
build="build"
name=$(basename "$0")
tempPre="_pre.js"
# Placeholder
pathPH="<%path%>"
# A absolute path to the folder containing this script.
# We need this because this script can be run from any directory.
path=$(echo "$(pwd)/$0" | sed -e "s/$name//" -e 's/\.\///')
test -d $path$build > /dev/null 2>&1 && rm -rf $path$build
mkdir $path$build
pathToMod="$path$build/"
case "$OSTYPE" in
msys|cygwin)
pathToMod=$(echo "$pathToMod" | sed -e 's/^\///' -e 's/\//\\\\\\\\/g' -e 's/^./\0:/');;
esac
# Substitute the placeholder with the absolute path in the `pre.js`,
# contents of which will be inserted at the beginning of the glue code.
# We need this so that the `Module` could find the .wasm file.
sed -e "s|$pathPH|$pathToMod|" < "$path/pre.js" > "$path/$tempPre"
echo_yellow Compiling C++...
# Compilation
emcc --bind "$path/main.cc" -Oz -o "$path/$build/$file.js" --pre-js "$path/$tempPre" \
-s WASM=1 -s NO_EXIT_RUNTIME=1 -s ALLOW_MEMORY_GROWTH=1 \
-s "EXPORTED_RUNTIME_METHODS=['cwrap', 'addOnPostRun']" \
-s "DEFAULT_LIBRARY_FUNCS_TO_INCLUDE=['malloc', 'free']" \
-s ENVIRONMENT=node
echo_yellow Done
rm "$path/$tempPre"
| true |
7b1da5d2a375a772fea37cf7adc30409ca4ff78a | Shell | OmarCastro/dotfiles | /.i3/i3blocks/scripts/disk | UTF-8 | 1,647 | 3.390625 | 3 | [] | no_license | #!/bin/sh
# Copyright (C) 2014 Julien Bonjean <julien@bonjean.info>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
DIR="${BLOCK_INSTANCE:-$HOME}"
ALERT_LOW="${1:-10}" # color will turn red under this value (default: 10%)
case $BLOCK_BUTTON in
1)
CONTENT=$(df -h | awk '{print $6 " " $4 " / " $2 " " $5}' | tail -n +2 | column -t -N filesystem,free,/,total,use% )
let "WIDTH = $(wc -L <<< "$CONTENT") + 4"
echo "$CONTENT" | tail +2 | awk '/ 9[0-9]%/{print "<span background=\"#ee4835\" foreground=\"#eee8d5\">"$0"</span>"}1' | rofi \
-dmenu \
-mesg "$(echo "$CONTENT" | head -n 1 | sed -r 's|([^ ]+)|<b><u>\1</u></b>|g')" \
-markup \
-theme "omar-message" \
-m -3 \
-theme-str '#window {anchor:southeast; width:'"$WIDTH"'ch;}' \
-eh 1 \
-no-fixed-num-lines \
> /dev/null
;;
esac
df -h -P -l "$DIR" | awk -v alert_low=$ALERT_LOW '
/\/.*/ {
# full text
print $4 "/" $2
# short text
print $4
use=$5
# no need to continue parsing
exit 0
}
END {
gsub(/%$/,"",use)
if (100 - use < alert_low) {
# color
print "#FF5555"
}
}
'
| true |
6581b1b300945bf6a9f823f0bd7cfce000c1cf34 | Shell | Jd-75/torbrowser-launcher-git | /PKGBUILD | UTF-8 | 723 | 2.90625 | 3 | [] | no_license | pkgname=torbrowser-launcher-git
pkgver=v0.1.8
pkgrel=1
pkgdesc="A program to help you download, keep updated, and run the Tor Browser Bundle."
arch=('any')
url="https://github.com/micahflee/torbrowser-launcher"
license=('MIT')
depends=('python2' 'gnupg' 'wmctrl' 'python2-psutil' 'python2-pyliblzma' 'python2-txsocksx' 'python2-service-identity')
makedepends=('git' 'python2-setuptools')
source=("$pkgname"::git://github.com/micahflee/torbrowser-launcher.git)
md5sums=('SKIP')
pkgver() {
cd "$pkgname"
local ver=$(git describe --always)
printf "%s\n" "${ver//-/.}"
}
package() {
cd "$pkgname"
sed -i '1s/python$/python2/' torbrowser-launcher
python2 setup.py install --root=$pkgdir
}
# vim:set ts=2 sw=2 et:
| true |
a8f1408d54dbabc5855cbf1d6a860f65e1054a63 | Shell | p-org/P | /Tst/PortfolioTests/Scripts/deploy_coyote.sh | UTF-8 | 1,997 | 3.671875 | 4 | [
"MIT"
] | permissive | #!/usr/bin/env bash
toolName=${1}
shift
projectPath=${1}
shift
projectName=${1}
shift
timeMax=${1}
shift
memMax=${1}
shift
args=$@
shopt -s expand_aliases
source ~/.bash_profile
outPath="runs/${toolName}/${projectName}"
echo -e "--------------------"
echo -e "Project Name: ${projectName}"
echo -e "Input Path : ${projectPath}"
echo -e "Output Path : ${outPath}"
projectPath=$(realpath ${projectPath})
if [ -d "${outPath}" ]; then rm -Rf ${outPath}; fi
mkdir -p ${outPath}
echo -e "--------------------"
echo -e "Compiling P Model into C#"
count=`ls -1 ${projectPath}/*.pproj 2>/dev/null | wc -l`
if [ $count != 0 ]
then
echo -e " Ignoring .pproj File"
fi
inputFiles=$(find ${projectPath} -not -path "*/.*" -not -name ".*" -type f -name "*.p")
inputCSharpFiles=""
if [ -d "${projectPath}/PForeign" ]; then
inputCSharpFiles=$(find ${projectPath}/PForeign -not -path "*/.*" -not -name ".*" -type f -name "*.cs")
fi
if [[ ! -z "$inputCSharpFiles" ]]
then
echo -e " Found CSharp Foreign Functions"
cp ${inputCSharpFiles} ${outPath}
fi
pcl ${inputFiles} -outputDir:${outPath} > ${outPath}/compile.out
if grep -q "Build succeeded." ${outPath}/compile.out; then
if grep -q ".dll" ${outPath}/compile.out; then
dllFile=$(grep ".dll" ${outPath}/compile.out | awk 'NF>1{print $NF}')
echo -e " Done"
else
echo -e " .dll file not found. Check ${outPath}/compile.out for details"
tail -50 ${outPath}/compile.out
exit
fi
else
echo -e " Compilation fail. Check ${outPath}/compile.out for details"
tail -50 ${outPath}/compile.out
exit
fi
echo -e "--------------------"
echo -e "Running PMC"
cd ${outPath}
pmc ${dllFile} -t ${timeMax} ${args} > >(tee -a run.out) 2>> >(tee -a run.err >&2)
cd -
mkdir -p ${outPath}/output
python3 Scripts/coyote_stats.py ${projectName} ${outPath}/run.out > ${outPath}/output/stats-${projectName}.log
mkdir -p stats
python3 Scripts/coyote_compile_results.py --tool ${toolName} ${outPath}/output/stats-${projectName}.log
| true |
b9918137f346b1b8ca067b77e0cfa317c2b2f15e | Shell | saga1015/scripts | /setup-scripts/git-install | UTF-8 | 364 | 2.96875 | 3 | [] | no_license | #!/bin/bash
###############################
#
# Git 1.7.7.3
# http://git-scm.com/
#
###############################
cd /usr/local/src;
wget http://git-core.googlecode.com/files/git-1.7.7.3.tar.gz;
echo '382ee40da74a1b4a1875820c0f0a35c9ccd750f8 git-1.7.7.3.tar.gz' | sha1sum -c;
tar -xvzf git-1.7.7.3.tar.gz && cd git-1.7.7.3;
./configure;
make && make install;
| true |
a9e123b2df7271bf4c4fc57486e95f96fffa9fdf | Shell | Raytsang123/T-Miner | /fakeddit/init_benign.sh | UTF-8 | 435 | 2.640625 | 3 | [
"MIT"
] | permissive | if [ ! -d "$1" ]; then
mkdir $1
mkdir $1/checkpoints
mkdir $1/data
python split_train_test.py $1 $2
# cp data/train_x.txt data/train_y.txt data/dev_x.txt data/dev_y.txt data/test_x.txt data/test_y.txt $1/data/
cp data/train_def_x.txt data/train_def_y.txt data/dev_def_x.txt data/dev_def_y.txt $1/data/
else
cp data/train_def_x.txt data/train_def_y.txt data/dev_def_x.txt data/dev_def_y.txt $1/data/
fi
| true |
83c9dfb152b34d9745d5622d757353f103c6b4a0 | Shell | wuqunyong/APie | /scripts/make_release.sh | UTF-8 | 1,835 | 3.890625 | 4 | [] | no_license | #! /bin/bash
set -e # Exit immediately if a simple command exits with a non-zero status.
set -x # activate debugging from here
trap 'echo ">>> Something went wrong. Unable to generate the release package <<<"' ERR
echo "$0"
BASE_DIR="$(cd "$(dirname -- "$0")" ; pwd)"
echo "CurDir: $BASE_DIR"
SPEC=`ls *.spec`
Name=`awk '/__name/{printf $3; exit;}' "$SPEC"`
Version=`awk '/__ver/{printf $3; exit;}' "$SPEC"`
Release=`awk '/__rel/{printf $3; exit;}' "$SPEC"`
ZipName="${Name}-${Version}-${Release}"
PackageName="${ZipName}.x86_64.rpm"
echo "Name: $Name"
echo "Version: $Version"
echo "Release: $Release"
echo "PackageName: $PackageName"
if [ ! -e ./build/x86_64/${PackageName} ]
then
echo "./build/x86_64/${PackageName} not exist"
exit 1
fi
if [ -e "$ZipName.x86_64.zip" ]
then
/bin/rm -f "$ZipName.x86_64.zip"
echo "rm -f $ZipName.x86_64.zip"
exit 1
fi
TMP_DIR="`mktemp -d`"
echo "TMP_DIR=$TMP_DIR"
# make sure we won't remove the wrong files
echo $TMP_DIR | grep "^/tmp/" >/dev/null 2>&1 || exit 1
#trap "rm -rf $TMP_DIR" EXIT
mkdir -p "$TMP_DIR/$ZipName"
/bin/cp -rf ./releaseTemplate/* "$TMP_DIR/$ZipName"
/bin/cp -f ./build/x86_64/${PackageName} "$TMP_DIR/$ZipName/res"
/bin/cp -rf ./SQL/updates/* "$TMP_DIR/$ZipName/sql/updates"
if [ -n "$1" ]; then
echo "first params:$1"
/bin/cp -rf ./SQL/base/* "$TMP_DIR/$ZipName/sql/base"
else
echo "no params"
fi
pushd "$TMP_DIR"
sed -i "s/@@version@@/${Version}/g" ./$ZipName/config/install.conf
sed -i "s/@@release@@/${Release}/g" ./$ZipName/config/install.conf
sed -i "s/@@package_name@@/${PackageName}/g" ./$ZipName/shell/install.sh
sed -i "s/@@package_name@@/${PackageName}/g" ./$ZipName/shell/upgrade.sh
/usr/bin/zip -r "$ZipName.x86_64.zip" "$ZipName/"
/bin/mv "$ZipName.x86_64.zip" "${BASE_DIR}/"
popd
echo ">>> Generate successfully! <<<"
| true |
f12eb69f94919bff53f64f3f73093502453b238a | Shell | bhuang95/JEDI-AERODA-NUOPCchem-WORKFLOW | /ush/JEDI/run_seasBin_fcst2da_fgat.sh | UTF-8 | 2,188 | 3.21875 | 3 | [] | no_license | #!/bin/ksh
set -x
JEDIcrtm=${HOMEgfs}/fix/jedi_crtm_fix_20200413/CRTM_fix/
WorkDir=${DATA:-$pwd/hofx_aod.$$}
RotDir=${ROTDIR:-/scratch1/BMC/gsd-fv3-dev/MAPP_2018/bhuang/JEDI-2020/JEDI-FV3/expRuns/aero_c96_jedi3densvar/dr-data/}
validtime=${CDATE:-"2001010100"}
nexttime=$($NDATE $assim_freq $CDATE)
cdump=${CDUMP:-"gdas"}
itile=${itile:-1}
mem=${imem:-0}
mkdir ${WorkDir}
if [[ ${mem} -gt 0 ]]; then
cdump="enkfgdas"
memdir="mem"`printf %03d $mem`
restart_interval=${restart_interval_enkf}
elif [[ ${mem} -eq 0 ]]; then
cdump="enkfgdas"
memdir="ensmean"
restart_interval=${restart_interval_enkf}
elif [[ ${mem} -eq -1 ]]; then
cdump="gdas"
memdir=""
restart_interval=${restart_interval_cntl}
fi
vyy=$(echo $validtime | cut -c1-4)
vmm=$(echo $validtime | cut -c5-6)
vdd=$(echo $validtime | cut -c7-8)
vhh=$(echo $validtime | cut -c9-10)
vdatestr="${vyy}${vmm}${vdd}.${vhh}0000"
dir_tracer="${RotDir}/${cdump}.${vyy}${vmm}${vdd}/${vhh}/${memdir}/RESTART"
if [ ${FGAT3D} == "TRUE" -a ${FGAT3D_onlyCenter} != "TRUE" ]; then
nexttimem3=$($NDATE -$assim_freq_half $nexttime)
nexttimep3=$($NDATE $assim_freq_half $nexttime)
else
nexttimem3=${nexttime}
nexttimep3=${nexttime}
fi
nexttimetmp=${nexttimem3}
while [ ${nexttimetmp} -le ${nexttimep3} ]; do
nyytmp=$(echo $nexttimetmp | cut -c1-4)
nmmtmp=$(echo $nexttimetmp | cut -c5-6)
nddtmp=$(echo $nexttimetmp | cut -c7-8)
nhhtmp=$(echo $nexttimetmp | cut -c9-10)
ndatestrtmp="${nyytmp}${nmmtmp}${nddtmp}.${nhhtmp}0000"
fname_tracer="${ndatestrtmp}.fv_tracer.res.tile${itile}.nc.ges"
fname_tracer_orig="${ndatestrtmp}.fv_tracer.res.tile${itile}.nc.ges_orig"
/bin/cp -r ${dir_tracer}/${fname_tracer} ${dir_tracer}/${fname_tracer_orig}
ncrename -O -v seas1,seas6 -v seas2,seas1 -v seas3,seas2 -v seas4,seas3 -v seas5,seas4 ${dir_tracer}/${fname_tracer} ${dir_tracer}/${fname_tracer}_tmp
/bin/rm -rf ${dir_tracer}/${fname_tracer}
ncrename -O -v seas6,seas5 ${dir_tracer}/${fname_tracer}_tmp ${dir_tracer}/${fname_tracer}
/bin/rm -rf ${dir_tracer}/${fname_tracer}_tmp
nexttimetmp=$($NDATE +$restart_interval $nexttimetmp)
done
exit $?
| true |
ed5b010d8d097dff7663167685f01223463a381b | Shell | lbechberger/LearningPsychologicalSpaces | /code/shell_scripts/Shapes/experiment_1.sh | UTF-8 | 4,678 | 3.296875 | 3 | [
"MIT"
] | permissive | #!/bin/bash
echo 'experiment 1 - inception baseline'
# declare some lists to make code below less repetitive
default_baselines=("--zero")
default_regressors=("--linear")
default_lassos=("0.001 0.002 0.005 0.01 0.02 0.05 0.1 0.2 0.5 1.0 2.0 5.0 10.0")
default_noises=("0.1 0.25 0.55")
default_best_noise=0.1
default_comparison_noises=("0.0 0.1")
default_dims=("1 2 3 5 6 7 8 9 10")
default_targets=("mean median")
default_shuffled_flag="--shuffled"
baselines="${baselines:-$default_baselines}"
regressors="${regressors:-$default_regressors}"
lassos="${lassos:-$default_lassos}"
noises="${inception_noises:-$default_noises}"
best_noise="${best_noise:-$default_best_noise}"
comparison_noises="${comparison_noises:-$default_comparison_noises}"
dims="${dims:-$default_dims}"
targets="${targets:-$default_targets}"
shuffled_flag="${shuffled_flag:-$default_shuffled_flag}"
# no parameter means local execution
if [ "$#" -ne 1 ]
then
echo '[local execution]'
cmd='python -m'
script=code.ml.regression.regression
# parameter 'grid' means execution on grid
elif [ $1 = grid ]
then
echo '[grid execution]'
cmd=qsub
script=code/ml/regression/regression.sge
# all other parameters are not supported
else
echo '[ERROR: argument not supported, exiting now!]'
exit 1
fi
# set up the directory structure
echo ' setting up directory structure'
for noise in $default_noises
do
mkdir -p 'data/Shapes/ml/experiment_1/noise_'"$noise"'/'
done
mkdir -p 'data/Shapes/ml/experiment_1/noise_0.0/'
# first analyze the 4d space(s)
echo ' noise types for 4D space'
for noise in $default_noises
do
for target in $targets
do
for baseline in $baselines
do
echo " $baseline"
$cmd $script data/Shapes/ml/dataset/targets.pickle "$target"'_4' 'data/Shapes/ml/dataset/pickle/features_'"$noise"'.pickle' data/Shapes/ml/dataset/pickle/folds.csv 'data/Shapes/ml/experiment_1/noise_'"$noise"'/'"$target"'_4.csv' -s 42 -e data/Shapes/ml/dataset/pickle/features_0.0.pickle $baseline
done
for regressor in $regressors
do
echo " $regressor"
$cmd $script data/Shapes/ml/dataset/targets.pickle "$target"'_4' 'data/Shapes/ml/dataset/pickle/features_'"$noise"'.pickle' data/Shapes/ml/dataset/pickle/folds.csv 'data/Shapes/ml/experiment_1/noise_'"$noise"'/'"$target"'_4.csv' -s 42 -e data/Shapes/ml/dataset/pickle/features_0.0.pickle $shuffled_flag $regressor
done
done
done
# compare performance to same train and test noise (either none or best noise setting)
echo ' performance comparison: same train and test noise'
for noise in $comparison_noises
do
for target in $targets
do
for regressor in $regressors
do
echo " $regressor"
$cmd $script data/Shapes/ml/dataset/targets.pickle "$target"'_4' 'data/Shapes/ml/dataset/pickle/features_'"$noise"'.pickle' data/Shapes/ml/dataset/pickle/folds.csv 'data/Shapes/ml/experiment_1/noise_'"$noise"'/'"$target"'_4_same_noise.csv' -s 42 $regressor
done
done
python -m code.ml.regression.cluster_analysis 'data/Shapes/ml/dataset/pickle/features_'$noise'.pickle' -n 100 -s 42 > 'data/Shapes/ml/experiment_1/noise_'"$noise"'/cluster_analysis.txt'
done
# now run the regression for the other target spaces using the selected noise level (only correct targets)
echo ' other dimensions'
for dim in $dims
do
for target in $targets
do
for baseline in $baselines
do
echo " $baseline"
$cmd $script data/Shapes/ml/dataset/targets.pickle "$target"'_'"$dim" 'data/Shapes/ml/dataset/pickle/features_'"$best_noise"'.pickle' data/Shapes/ml/dataset/pickle/folds.csv 'data/Shapes/ml/experiment_1/noise_'"$best_noise"'/'"$target"'_'"$dim"'.csv' -s 42 -e data/Shapes/ml/dataset/pickle/features_0.0.pickle $baseline
done
for regressor in $regressors
do
echo " $regressor"
$cmd $script data/Shapes/ml/dataset/targets.pickle "$target"'_'"$dim" 'data/Shapes/ml/dataset/pickle/features_'"$best_noise"'.pickle' data/Shapes/ml/dataset/pickle/folds.csv 'data/Shapes/ml/experiment_1/noise_'"$best_noise"'/'"$target"'_'"$dim"'.csv' -s 42 -e data/Shapes/ml/dataset/pickle/features_0.0.pickle $regressor
done
done
done
# finally do a grid search on the lasso regressor for the selected noise level (only correct targets)
echo ' lasso regressor on 4D space(s)'
for target in $targets
do
for lasso in $lassos
do
echo " lasso $lasso"
$cmd $script data/Shapes/ml/dataset/targets.pickle "$target"'_4' 'data/Shapes/ml/dataset/pickle/features_'"$best_noise"'.pickle' data/Shapes/ml/dataset/pickle/folds.csv 'data/Shapes/ml/experiment_1/noise_'"$best_noise"'/'"$target"'_4.csv' -s 42 -e data/Shapes/ml/dataset/pickle/features_0.0.pickle --lasso $lasso
done
done
| true |
a58f1f79288a204c183e5ba05626a81dc45fdb43 | Shell | gboyegadada/ecomm-rest-api | /reset-db.sh | UTF-8 | 201 | 2.53125 | 3 | [
"MIT"
] | permissive | #!/bin/sh
# reset-db.sh
host="${DB_HOST:-db}"
mysql -h "$host" -u "root" -e "DROP DATABASE app; CREATE DATABASE app;"
mysql -h "$host" -u "root" < ./database/tshirtshop.sql
echo "DB has been reset." | true |
46b75b861ce7bb22823b618009d4d50123af498b | Shell | puppetlabs/PIE_tools | /splunk/tasks/install_pe.sh | UTF-8 | 1,160 | 3.484375 | 3 | [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | #!/bin/bash
# $PT_tar_file
function is_ok
{
ret=$1
error=$2
if [[ ${ret} -ne 0 ]];then
echo -e "[${error}] failed with code [$ret]"
exit 2
fi
}
if [ ! -f "${PT_tar_file}" ];then
echo -n "Failed to find the tar file [${PT_tar_file}]. Check the upload."
exit 2
fi
tar xvf ${PT_tar_file}
is_ok $? “Error: Failed to untar [${PT_tar_file}]”
PE_FILE_NAME=$(echo $PT_tar_file | cut -d\. -f1-3)
cd ${PE_FILE_NAME}
consol_admin_password_omit=$(grep '#"console_admin_password' conf.d/pe.conf)
if [ -z "${consol_admin_password_omit}" ];then
sed -i 's/"console_admin_password/#"console_admin_password/' conf.d/pe.conf
fi
sudo ./puppet-enterprise-installer -c conf.d/pe.conf
is_ok $? “Error: Failed to install Puppet Enterprise. Please check the logs and call Bryan.x ”
## Finalize configuration
echo “Finalize PE install”
sudo puppet agent -t
## Create and configure Certs
sudo chmod 777 /etc/puppetlabs/puppet/puppet.conf
echo "autosign = true" >> /etc/puppetlabs/puppet/puppet.conf
sudo chmod 755 /etc/puppetlabs/puppet/puppet.conf
sudo yum install -y ruby
echo "I'd restart the master now to be safe!"
| true |
525a8f5be1bcafafefd43abdb6b804b2746a6730 | Shell | eboxyz/dotfiles | /bin/bin/rofi-pass | UTF-8 | 17,747 | 3.484375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
# rofi-pass
# (c) 2015 Rasmus Steinke <rasi@xssn.at>
basecommand=$(echo "$0" | gawk '{ print $1 }')
# set default settings
_rofi () {
rofi -i -width 700 -no-levenshtein-sort "$@"
}
URL_field='url'
USERNAME_field='user'
AUTOTYPE_field='autotype'
delay=2
default_do='menu' # menu, copyPass, typeUser, typePass, copyUser, copyUrl, viewEntry, typeMenu, actionMenu, copyMenu, openUrl
auto_enter='false'
notify='false'
password_length='20'
help_color=""
clip=primary
default_user=john_doe
default_user2=mary_ann
password_length=12
autotype="Alt+1"
type_user="Alt+2"
type_pass="Alt+3"
open_url="Alt+4"
copy_name="Alt+u"
copy_url="Alt+l"
copy_pass="Alt+p"
show="Alt+o"
copy_entry="Alt+2"
type_entry="Alt+1"
copy_menu="Alt+c"
action_menu="Alt+a"
type_menu="Alt+t"
help="Alt+h"
switch="Alt+x"
insert_pass="Alt+n"
# get all password files and create an array
list_passwords() {
cd "${root}" || exit
passwords=( **/*.gpg )
for password in "${passwords[@]}"; do
filename="${password}"
filename="${filename%.gpg}"
echo "$filename"
done
}
doClip () {
if [[ $clip == "primary" ]]; then
xclip
elif [[ $clip == "clipboard" ]]; then
xclip -selection clipboard;
elif [[ $clip == "both" ]]; then
xclip; xclip -o | xclip -selection clipboard;
fi
}
checkIfPass () {
rm -f "$HOME/.cache/rofi-pass/last_used"
echo "${root}: $selected_password" > "$HOME/.cache/rofi-pass/last_used"
chmod 600 "$HOME/.cache/rofi-pass/last_used"
}
autopass () {
rm -f "$HOME/.cache/rofi-pass/last_used"
echo "${root}: $selected_password" > "$HOME/.cache/rofi-pass/last_used"
chmod 600 "$HOME/.cache/rofi-pass/last_used"
if [[ -z "${stuff["$AUTOTYPE_field"]}" ]]; then
if [[ "${stuff["${USERNAME_field}"]}" ]]; then
echo -n "${stuff["${USERNAME_field}"]}" | xdotool type --clearmodifiers --file -
xdotool key Tab
fi
echo -n "${password}" | xdotool type --clearmodifiers --file -
sleep 1
if [[ ${auto_enter} == "true" ]]; then
xdotool key Return
fi
else
for word in ${stuff["$AUTOTYPE_field"]}; do
if [[ $word == ":tab" ]]; then
xdotool key Tab;
elif [[ $word == ":space" ]]; then
xdotool key space
elif [[ $word == ":delay" ]]; then
sleep "${delay}";
elif [[ $word == ":enter" ]]; then
xdotool key Return;
elif [[ $word == "pass" ]]; then
echo -n "${password}" | xdotool type --clearmodifiers --file -
else
echo -n "${stuff[${word}]}" | xdotool type --clearmodifiers --file -
fi
done
if [[ ${auto_enter} == "true" ]]; then
xdotool key Return
fi
fi
clearUp
}
openURL () {
checkIfPass
$BROWSER "$(pass "$selected_password" | grep "${URL_field}: " | gawk '{sub(/:/,"")}{print $2}1' | head -1)"; exit;
clearUp
}
typeUser () {
checkIfPass
echo -n "${stuff[${USERNAME_field}]}" | xdotool type --clearmodifiers --file -
clearUp
}
typePass () {
checkIfPass
echo -n "${password}" | xdotool type --clearmodifiers --file -
if [[ $notify == "true" ]]; then
if [[ "${stuff[notify]}" == "false" ]]; then
:
else
notify-send "rofi-pass" "finished typing password";
fi
elif [[ $notify == "false" ]]; then
if [[ "${stuff[notify]}" == "true" ]]; then
notify-send "rofi-pass" "finished typing password";
else
:
fi
fi
clearUp
}
typeField () {
checkIfPass
echo -n "${stuff[${typefield}]}" | xdotool type --clearmodifiers --file -
clearUp
}
copyUser () {
checkIfPass
echo -n "${stuff[${USERNAME_field}]}" | doClip
clearUp
}
copyField () {
checkIfPass
echo -n "${stuff[${copyfield}]}" | doClip
clearUp
}
copyURL () {
checkIfPass
echo -n "${stuff[${URL_field}]}" | doClip
clearUp
}
copyPass () {
checkIfPass
echo -n "$password" | doClip
notify-send "rofi-pass" "Copied Password\nClearing in 45 seconds"
$(sleep 45; echo -n "" | xclip; echo "" | xclip -selection clipboard | notify-send "rofi-pass" "Clipboard cleared") &
}
viewEntry () {
checkIfPass
showEntry "${selected_password}"
}
generatePass () {
askGen () {
askGenMenu=$(echo -e "Yes\nNo" | rofi -dmenu -p "Generate new Password for ${selected_password}? > ")
if [[ $askGenMenu == "Yes" ]]; then
true
elif [[ $askGenMenu == "No" ]]; then
generatePass
fi
}
checkIfPass
symbols=$(echo -e "0 Cancel\n---\n1 Yes\n2 No" | rofi -dmenu -p "Use Symbols? > ")
if [[ $symbols == "0 Cancel" ]]; then
mainMenu;
elif [[ $symbols == "1 Yes" ]]; then
symbols="";
elif [[ $symbols == "2 No" ]]; then
symbols="-n";
fi
HELP="<span color='$help_color'>Enter Number or hit Enter to use default length</span>"
length=$(echo -e "" | _rofi -dmenu -mesg "${HELP}" -p "Password length? (Default: ${password_length}) > ")
askGen
if [[ $length == "" ]]; then
pass generate ${symbols} -i "$selected_password" "${password_length}" > /dev/null;
else
pass generate ${symbols} -i "$selected_password" "${length}" > /dev/null;
fi
}
# main Menu
mainMenu () {
if [[ $1 == "--bmarks" ]]; then
selected_password="$(list_passwords 2>/dev/null \
| _rofi -mesg "Bookmarks Mode. ${switch} to switch" \
-dmenu \
-kb-custom-1 "Alt+x" \
-select "$entry" \
-p "rofi-pass > ")"
rofi_exit=$?
if [[ $rofi_exit -eq 1 ]]; then
exit
elif [[ $rofi_exit -eq 10 ]]; then
$(${basecommand})
elif [[ $rofi_exit -eq 0 ]]; then
openURL
fi
else
unset selected_password
HELP="Welcome to rofi-pass. Use <span color='$help_color'>${insert_pass}</span> to create a new pass entry.
Run ${default_do} with <span color='$help_color'>Enter</span>. For more help hit <span color='$help_color'>${help}</span>."
selected_password="$(list_passwords 2>/dev/null \
| _rofi -mesg "${HELP}" \
-dmenu -kb-custom-1 "${autotype}" \
-kb-custom-2 "${type_user}" \
-kb-custom-3 "${type_pass}" \
-kb-custom-4 "${open_url}" \
-kb-custom-5 "${copy_name}" \
-kb-custom-6 "${copy_pass}" \
-kb-custom-7 "${show}" \
-kb-custom-8 "${copy_url}" \
-kb-custom-9 "${type_menu}" \
-kb-custom-14 "${action_menu}" \
-kb-custom-15 "${copy_menu}" \
-kb-custom-16 "${help}" \
-kb-custom-17 "${switch}" \
-kb-custom-18 "${insert_pass}" \
-dmenu \
-select "$entry" \
-p "rofi-pass > ")"
rofi_exit=$?
if [[ $rofi_exit -eq 1 ]]; then
exit
fi
# generate Array of fields
# password_temp=$(PASSWORD_STORE_DIR="${root}" pass "$selected_password")
# password="${password_temp%%$'\n'*}"
# fields="$(echo "${password_temp}" | tail -n +2)"
# pass_key_value=$(echo "${fields}" | awk '$1 ~ /:$/{$1=$1;print}')
mapfile -t password_temp < <(PASSWORD_STORE_DIR="${root}" pass "$selected_password")
password=${password_temp[0]}
if [[ ${password} == "#FILE="* ]]; then
pass_file="${password#*=}"
mapfile -t password_temp < <(PASSWORD_STORE_DIR="${root}" pass "${pass_file}")
password=${password_temp[0]}
fi
fields=$(printf '%s\n' "${password_temp[@]:1}" | awk '$1 ~ /:$/{$1=$1;print}')
declare -A stuff
stuff["pass"]=${password}
if [[ -n $fields ]]; then
while read -r LINE; do
_id="${LINE%%: *}"
_val="${LINE#* }"
stuff["${_id}"]=${_val}
done < <(echo "${fields}")
if test "${stuff['autotype']+autotype}"
then
:
else
stuff["autotype"]="${USERNAME_field} :tab pass"
fi
fi
fi
pass_content="$(for key in "${!stuff[@]}"; do echo "${key}: ${stuff[$key]}"; done)"
# actions based on keypresses
if [[ "${rofi_exit}" -eq 0 ]]; then typeMenu;
elif [[ "${rofi_exit}" -eq 13 ]]; then openURL;
elif [[ "${rofi_exit}" -eq 10 ]]; then sleep 0.2; autopass;
elif [[ "${rofi_exit}" -eq 14 ]]; then copyMenu;
elif [[ "${rofi_exit}" -eq 11 ]]; then sleep 0.2; typeUser;
elif [[ "${rofi_exit}" -eq 12 ]]; then sleep 0.2; typePass;
elif [[ "${rofi_exit}" -eq 17 ]]; then copyURL;
elif [[ "${rofi_exit}" -eq 16 ]]; then viewEntry;
elif [[ "${rofi_exit}" -eq 18 ]]; then export default_do="menu"; typeMenu;
elif [[ "${rofi_exit}" -eq 15 ]]; then copyPass;
elif [[ "${rofi_exit}" -eq 23 ]]; then actionMenu;
elif [[ "${rofi_exit}" -eq 25 ]]; then unset selected_password; helpMenu;
elif [[ "${rofi_exit}" -eq 24 ]]; then copyMenu;
elif [[ "${rofi_exit}" -eq 26 ]]; then $(${basecommand} --bmarks);
elif [[ "${rofi_exit}" -eq 27 ]]; then insertPass;
fi
clearUp
}
clearUp () {
password=''
selected_password=''
unset stuff
unset password
unset selected_password
unset password_temp
unset stuff
}
helpMenu () {
helptext=$(echo -e "${autotype}: Autotype
${type_user}: Type Username
${type_pass}: Type Password
---
${copy_name}: Copy Username
${copy_pass}: Copy Password
${copy_url}: Copy URL
${open_url}: Open URL
${copy_menu}: Copy Custom Field
---
${action_menu}: Edit, Move, Delete, Re-generate Submenu
${show}: Show Password File
${insert_pass}: Insert new Pass Entry
${switch}: Switch Pass/Bookmark Mode" | _rofi -dmenu -mesg "Hint: All hotkeys are configurable in config file" -p "Help > ")
help_val=$?
if [[ $help_val -eq 1 ]]; then exit;
else unset helptext; mainMenu; fi
}
typeMenu () {
if [[ -n $default_do ]]; then
if [[ $default_do == "menu" ]]; then
checkIfPass
typefield=$(printf '%s\n' "${!stuff[@]}" | sort | _rofi -dmenu -p "Choose Field to type > ")
val=$?
if [[ $val -eq 1 ]]; then
exit
fi
if [[ $typefield == "" ]]; then
exit;
elif [[ $typefield == "password" ]]; then
typePass
elif [[ $typefield == "${AUTOTYPE_field}" ]]; then
autopass
else
typeField
fi
clearUp
elif [[ $default_do == "${AUTOTYPE_field}" ]]; then
autopass
else
$(${default_do})
fi
fi
}
copyMenu () {
checkIfPass
copyfield=$(printf '%s\n' "${!stuff[@]}" | sort | _rofi -dmenu -p "Choose Field to copy > ")
val=$?
if [[ $val -eq 1 ]]; then
exit;
fi
if [[ $copyfield == "pass" ]]; then
copyPass;
else
copyField
fi
clearUp
}
actionMenu () {
checkIfPass
action=$(echo -e "< Return\n---\n1 Move Password File\n2 Delete Password File\\n3 Edit Password File\n4 Generate New Password" | _rofi -dmenu -p "Choose Action > ")
if [[ ${action} == "1 Move Password File" ]]; then
manageEntry move;
elif [[ ${action} == "2 Delete Password File" ]]; then
manageEntry delete;
elif [[ ${action} == "3 Edit Password File" ]]; then
manageEntry edit;
elif [[ ${action} == "4 Generate New Password" ]]; then
generatePass;
elif [[ ${action} == "< Return" ]]; then
mainMenu;
elif [[ ${action} == "" ]]; then
exit
fi
}
showEntry () {
if [[ -z $pass_content ]]; then
password_temp=$(PASSWORD_STORE_DIR="${root}" pass "$selected_password")
password="${password_temp%%$'\n'*}"
pass_key_value=$(echo "${password_temp}" | tail -n+2 | grep ': ')
declare -A stuff
while read -r LINE; do
_id="${LINE%%: *}"
_val="${LINE#* }"
stuff["${_id}"]=${_val}
done < <(echo "${pass_key_value}")
stuff["pass"]=${password}
if test "${stuff['autotype']+autotype}"
then
:
else
stuff["autotype"]="${USERNAME_field} :tab pass"
fi
pass_content="$(for key in "${!stuff[@]}"; do echo "${key}: ${stuff[$key]}"; done)"
fi
HELP="<span color='${help_color}'>${copy_entry}: Copy Entry</span>"
bla=$(echo -e "< Return\n${pass_content}" | _rofi -dmenu -mesg "Enter: Copy entry to clipboard" -p "> ")
rofi_exit=$?
word=$(echo "$bla" | gawk -F': ' '{print $1}')
if [[ ${rofi_exit} -eq 1 ]]; then
exit
elif [[ ${rofi_exit} -eq 0 ]]; then
if [[ ${bla} == "< Return" ]]; then
mainMenu
else
if [[ -z $(echo -n "${stuff[${word}]}") ]]; then
echo -n "$word" | doClip
else
echo -n "${stuff[${word}]}" | doClip
fi
notify-send "rofi-pass" "Copied Password\nClearing in 45 seconds"
$(sleep 45; echo -n "" | xclip; echo "" | xclip -selection clipboard | notify-send "rofi-pass" "Clipboard cleared") &
exit
fi
fi
exit
unset stuff
unset password
unset selected_password
unset password_temp
unset stuff
exit
}
manageEntry () {
if [[ "$1" == "edit" ]]; then
EDITOR=$EDITOR PASSWORD_STORE_DIR="${root}" pass edit "${selected_password}"
mainMenu
elif [[ $1 == "move" ]]; then
cd "${root}" || exit
selected_password2=$(basename "$selected_password" .gpg)
group=$(find -type d -not -iwholename '*.git*' -printf '%d\t%P\n' | sort -r -nk1 | cut -f2- | _rofi -dmenu -p "Choose Group > ")
if [[ $group == "" ]]; then
exit
fi
PASSWORD_STORE_DIR="${root}" pass mv "$selected_password" "${group}"
mainMenu
elif [[ "$1" == "delete" ]]; then
HELP="<span color='$help_color'>Selected entry: ${selected_password}</span>"
ask=$(echo -e "Yes\nNo" | _rofi -mesg "${HELP}" -dmenu -p "Are You Sure? > ")
if [[ "$ask" == "Yes" ]]; then
PASSWORD_STORE_DIR="${root}" pass rm --force "${selected_password}"
elif [[ "$ask" == "No" ]]; then
mainMenu
elif [[ -z "$ask" ]]; then
exit
fi
else
mainMenu
fi
}
listgpg () {
find . -type f -not -path '*/\.*' | cut -c 3-
}
insertPass () {
url=$(xclip -o)
cd "${root}"
name="$(listgpg | rofi -dmenu -format 'f' -mesg "Type name, make sure it is unique" -p "> ")"
# name="$(echo -e "$(list_passwords 2>/dev/null)" | rofi -dmenu -mesg "Type name, make sure it is unique" -p "> ")"
val=$?
if [[ $val -eq 1 ]]; then
exit
fi
user=$(echo -e "${default_user2}\n$USER\n${default_user}" | rofi -dmenu -mesg "Chose Username or type" -p "> ")
val=$?
if [[ $val -eq 1 ]]; then
exit
fi
group=$(echo -e "No Group\n---\n$(find -type d -not -iwholename '*.git*' -printf '%d\t%P\n' | sort -r -nk1 | cut -f2-)" | rofi -dmenu -p "Choose Group > ")
val=$?
if [[ $val -eq 1 ]]; then
exit
fi
if [[ "$group" == "No Group" ]]; then
if [[ $url == http* ]]; then
echo -e "PASS\n---\n${USERNAME_field}: $user\n${URL_field}: $url" | PASSWORD_STORE_DIR="${root}" pass insert -m "${name}" > /dev/null && PASSWORD_STORE_DIR="${root}" pass generate -ni "${name}" "${password_length}" >/dev/null && PASSWORD_STORE_DIR="${root}" pass edit "${name}"
else
echo -e "PASS\n---\n${USERNAME_field}: $user" | PASSWORD_STORE_DIR="${root}" pass insert -m "${name}" > /dev/null && PASSWORD_STORE_DIR="${root}" pass generate -ni "${name}" "${password_length}" >/dev/null && PASSWORD_STORE_DIR="${root}" pass edit "${name}"
fi
else
if [[ $url == http* ]]; then
echo -e "PASS\n---\n${USERNAME_field}: $user\n${URL_field}: $url" | PASSWORD_STORE_DIR="${root}" pass insert -m "${group}/${name}" > /dev/null && PASSWORD_STORE_DIR="${root}" pass generate -ni "${group}/${name}" "${password_length}" >/dev/null && PASSWORD_STORE_DIR="${root}" pass edit "${group}/${name}"
else
echo -e "PASS\n---\n${USERNAME_field}: $user" | PASSWORD_STORE_DIR="${root}" pass insert -m "${group}/${name}" > /dev/null && PASSWORD_STORE_DIR="${root}" pass generate -ni "${group}/${name}" "${password_length}" >/dev/null && PASSWORD_STORE_DIR="${root}" pass edit "${group}/${name}"
fi
fi
}
help_msg () {
echo "rofi-pass (Version: 1.2)"
echo ""
echo -e "Usage:\n"
echo "--insert insert new entry to password store"
echo "--manage edit/move/delete entries"
echo -e "--root set custom root directory"
echo "--last-used highlight last used item"
echo "--show-last show details of last used Entry"
echo "--bmarks run bookmarks Mode"
echo ""
}
main () {
# enable extended globbing
shopt -s nullglob globstar
# check if global config exists and load it
if [[ -f /etc/rofi-pass.conf ]]; then
source /etc/rofi-pass.conf
fi
# check if local config exists and load it
if [[ -f "$HOME/.config/rofi-pass/config" ]]; then
source "$HOME/.config/rofi-pass/config"
fi
# create tmp dir
if [[ ! -d "$HOME/.cache/rofi-pass" ]]; then
mkdir "$HOME/.cache/rofi-pass"
fi
if [[ -n $keyboard ]]; then
setxkbmap ${keyboard}
fi
# set help color
if [[ $help_color == "" ]]; then
help_color=$(rofi -dump-xresources | grep 'rofi.color.normal' | gawk -F ',' '/,/{gsub(/ /, "", $2); print $2}')
fi
# check for BROWSER variable, use xdg-open as fallback
if [[ -z $BROWSER ]]; then
export BROWSER=xdg-open
fi
# check if alternative root directory was given on commandline
if [[ -r "$HOME/.cache/rofi-pass/last_used" ]] && [[ $1 == "--last-used" || $1 == "--show-last" ]]; then
export root; root=$(awk -F ': ' '{ print $1 }' "$HOME/.cache/rofi-pass/last_used")
elif [[ -n "$2" && "$1" == "--root" ]]; then
export root="${2}"
elif [[ -n $root ]]; then
export root="${root}"
elif [[ -n ${PASSWORD_STORE_DIR} ]]; then
export root=${PASSWORD_STORE_DIR}
else
export root="$HOME/.password-store"
fi
export PASSWORD_STORE_DIR="${root}"
case $1 in
--insert)
insertPass
;;
--root)
mainMenu
;;
--manage)
manageEntry
;;
--help)
help_msg
;;
--last-used)
if [[ -r "$HOME/.cache/rofi-pass/last_used" ]]; then
entry="$(awk -F ': ' '{ print $2 }' "$HOME/.cache/rofi-pass/last_used")"
fi
mainMenu
;;
--show-last)
if [[ -r "$HOME/.cache/rofi-pass/last_used" ]]; then
selected_password="$(awk -F ': ' '{ print $2 }' "$HOME/.cache/rofi-pass/last_used")" viewEntry
else
mainMenu
fi
;;
--bmarks)
mainMenu --bmarks;
;;
*)
mainMenu
;;
esac
}
main "$@"
| true |
233a8bdc307dbf68b029006b1780e89439376348 | Shell | tayre/config | /.bash_profile | UTF-8 | 1,882 | 3.375 | 3 | [] | no_license | export CLICOLOR=1
export LSCOLORS=ExGxBxDxCxEgEdxbxgxcxd
# A few usefull aliases
alias l='ls -la'
alias ll='ls -l'
alias ..='cd ..'
alias ...='cd ../..'
alias ....='cd ../../..'
alias .....='cd ../../../..'
# Create simple aliases for colors
export BLACK="\033[01;30m"
export MAGENTA="\033[1;31m"
export ORANGE="\033[1;33m"
export GREEN="\033[1;32m"
export PURPLE="\033[1;35m"
export WHITE="\033[1;37m"
export BOLD=""
export RESET="\033[m"
if [[ $- == *i* ]]
then
c_cyan=`tput setaf 6`
c_red=`tput setaf 1`
c_green=`tput setaf 2`
c_sgr0=`tput sgr0`
fi
parse_git_branch ()
{
if git rev-parse --git-dir >/dev/null 2>&1
then
git_status="$(git status 2> /dev/null)"
branch_pattern="On branch ([^${IFS}]*)"
remote_pattern="# Your branch is (.*) of"
diverge_pattern="# Your branch and (.*) have diverged"
# add an else if or two here if you want to get more specific
if [[ ${git_status} =~ ${remote_pattern} ]]; then
if [[ ${BASH_REMATCH[1]} == "ahead" ]]; then
remote="↑"
elif [ ${BASH_REMATCH[1]} == "behind" ]]; then
remote="↓"
fi
fi
if [[ ${git_status} =~ ${diverge_pattern} ]]; then
remote=":arrow_up_down:"
fi
if [[ ${git_status} =~ ${branch_pattern} ]]; then
branch=${BASH_REMATCH[1]}
echo "[${branch}${remote}]"
fi
else
return 0
fi
}
branch_color ()
{
if git rev-parse --git-dir >/dev/null 2>&1
then
git_status="$(git status 2> /dev/null)"
color=""
if [[ ${git_status} =~ "working directory clean" ]]; then
color="${c_green}"
else
color=${c_red}
fi
else
return 0
fi
echo -ne $color
}
export PS1='\w\[${c_sgr0}\] \[$(branch_color)\]$(parse_git_branch)\[${c_sgr0}\]: '
| true |
5288dcf82806b15f63cfcf6b970f2c67bf5dfe6c | Shell | joao-parana/torque | /scripts/torque_mom_scheduler_run.sh | UTF-8 | 695 | 3.21875 | 3 | [
"MIT"
] | permissive | #!/bin/bash
# Exit if any command fails
set -e
# VARIABLES
PATH=/usr/sbin:/usr/bin:/sbin:/bin
DAEMON=/usr/sbin/pbs_mom
# Include torque defaults if available
if [ -f /etc/default/torque-mom ]; then
. /etc/default/torque-mom
elif [ -f /etc/default/torque ]; then
. /etc/default/torque
fi
# This is always daemonized, so I run it here
/usr/sbin/pbs_sched
## ulimit -l unlimited is needed for mom, otherwise it crashes at startup
## as it tries to lock the memory
## NOTE! This requires to run the container with the --privileged option
ulimit -l unlimited || ( echo "You must run the container with the --privileged option" ; exit 1 )
ulimit -s unlimited
exec "$DAEMON" -D
| true |
3e062e4ef227659ad06db90568507282cf6778fa | Shell | xwzliang/unix_power_tools | /03_working_with_files_and_directories/01_directories_and_files/01_mkdir_dash_p_mkdir_with_parent_directories.bats | UTF-8 | 222 | 2.890625 | 3 | [] | no_license | #!/usr/bin/env bats
@test "test mkdir -p" {
[ ! -d $PWD/test ]
dir_for_test=$PWD/test/01/02/03
# mkdir -p will create parent directory if not existed
run mkdir -p $dir_for_test
[ -d $dir_for_test ]
run rm -r test
}
| true |
e884f831ad43cb406ef776dc7d8ba8982fb95899 | Shell | mk-fg/fgtk | /desktop/uri_handlers/magnet | UTF-8 | 1,128 | 3.671875 | 4 | [
"WTFPL",
"LicenseRef-scancode-public-domain"
] | permissive | #!/bin/bash
typeset -A dst_dirs
while [[ -n "$1" ]]; do
case "$1" in
--dst-*)
dst_type=${1#--dst-}
shift
dst_dirs[$dst_type]=$1 ;;
-x) set -x ;;
*) break ;;
esac
shift
done
[[ -z "$1" ]] && { echo >&2 "Expecting at least one link"; exit 1; }
coproc awk '
match($0, /\<xt=urn:btih:([0-9a-zA-Z]+)/, a)\
{printf("bt %s.magnet\n", a[1]); fflush(); next}
{ print $0 |& "md5sum"; close("md5sum", "to"); "md5sum" |& getline
printf("misc %s.magnet\n", $1); fflush() }'
err=
for src in "$@"; do
echo "$src"> /dev/fd/"${COPROC[1]}"
read dst_type dst_basename < /dev/fd/"${COPROC[0]}"
[[ -n "${dst_dirs[$dst_type]}" ]] && dst="${dst_dirs[$dst_type]}/${dst_basename}"\
|| { printf >&2 "ERROR: No --dst-$dst_type specified for link: %q\n" "$src"; dst=; }
[[ -n "$dst" ]] || {
echo >&2 "ERROR: Failed to classify link, skipping" \
"(type: ${dst_type}, basename: ${dst_basename})"; err=true; continue; }
echo "$src" >"$dst" || {
echo >&2 "ERROR: Failed to write to a destination file (${dst})"; err=true; }
done
eval "exec ${COPROC[0]}>&- ${COPROC[1]}>&-"
wait
[[ -z "$err" ]] && exit 0 || exit 1
| true |
2fba86b3f05040bc648d9915448f129acc8f7ed7 | Shell | 369lo/cgs3767-assignment-3 | /assignment3/program3.sh | UTF-8 | 265 | 2.546875 | 3 | [] | no_license | #!/bin/sh
echo ""
echo $PATH
echo ""
echo -n "FIU's IP:"
dig +short fiu.edu
echo ""
echo "Connections to computer"
netstat -natp
echo ""
echo "Connection to google"
tracepath www.google.com
echo ""
echo "Host IP"
hostname -I
/bin/bash
| true |
44069b51f6b416f96df35c81ee1a9b42e5aa2679 | Shell | NHS-digital-website/hippo | /ci-cd/bin/hippo/users-and-groups/users-yaml | UTF-8 | 751 | 3.78125 | 4 | [
"OGL-UK-3.0"
] | permissive | #!/bin/bash
# curent folder
DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
function main {
local authors_file=$1
local editors_file=$2
local admins_file=$3
cat "${DIR}/../../../users.base.yaml"
while read -r line; do
process_line "${line}"
done < "$authors_file"
while read -r line; do
process_line "${line}"
done < "$editors_file"
while read -r line; do
process_line "${line}"
done < "$admins_file"
}
function process_line {
local line=$1
local user_name
local user_email
user_name=$(echo "${line}" | awk -F ", ?" '{print $1}')
user_email=$(echo "${line}" | awk -F ", ?" '{print $2}')
${DIR}/user-yaml "${user_name}" "${user_email}"
}
main "$@"
| true |
a0f1f504625a6f378df7112d5f404e43c1d2cc37 | Shell | NickLediet/mouseconn | /bin/preinstall.sh | UTF-8 | 489 | 3.625 | 4 | [] | no_license | #!/usr/bin/env bash
# Prompt for sudo privs
sudo -v
########################
## Verify OS
########################
function osx-setup {
echo 'OSX Detected...'
echo 'Installing OSX Dependencies'
brew install blueutil
}
function linx-setup {
echo 'Linux Detects'
# TODO: Add check for distro to install with correct package manager
sudo apt-get install bluetooth bluez libbluetooth-dev libudev-dev -y
}
if [[ "$OSTYPE" =~ ^darwin ]]; then
osx-setup
else
linux-setup
fi
| true |
95f226bf2a7fa85512fe1cbd3dadf70f7862a387 | Shell | Interlisp/medley | /scripts/install-diff-filter.sh | UTF-8 | 542 | 3.390625 | 3 | [
"MIT"
] | permissive | #!/bin/sh
## This script installs the `eolconv` git diff filter in order to render diffs
## of files with CR line terminators correctly. The filter is installed locally
## and only affects the medley repository.
SCRIPTPATH="$( cd "$(dirname "$0")" >/dev/null 2>&1 ; pwd -P )"
EOLCONV="$SCRIPTPATH/eolconv.sh"
GITATTRIBUTES="$SCRIPTPATH/../.gitattributes"
chmod +x "$EOLCONV"
git config --local --add diff.creol.textconv "$EOLCONV"
echo "* diff=creol" >> "$GITATTRIBUTES"
## for good measure
git config --local --add core.autocrlf false
| true |
ba136b0d4be507fb4947cc09d159756685d4fdf6 | Shell | easybe/alpine-reviewboard | /dev/docker-entrypoint.sh | UTF-8 | 525 | 2.8125 | 3 | [
"MIT"
] | permissive | #!/bin/sh
set -e
cd /src
if [ ! -f settings_local.py ]; then
python setup.py install_node_deps
pip install --no-cache-dir -e .
sed 's/allow_input=True/allow_input=False/' \
contrib/internal/prepare-dev.py | python
fi
case $1 in
server)
shift
exec contrib/internal/devserver.py "$@"
;;
test)
shift
exec reviewboard/manage.py test -- "$@"
;;
manage)
shift
exec reviewboard/manage.py "$@"
;;
*)
exec ash
esac
| true |
f03a65825ed42f29f88e36e8a6b22f28ff1040f7 | Shell | pradeepbhaduria/image-gallery | /services/thumbnail-service/pubsub.sh | UTF-8 | 1,706 | 2.921875 | 3 | [] | no_license | # https://cloud.google.com/run/docs/tutorials/pubsub
export GOOGLE_CLOUD_PROJECT=chitrashala
export TOPIC_NAME=gcs-events
export PROJECT_NUMBER="$(gcloud projects list --filter=${GOOGLE_CLOUD_PROJECT} --format='value(PROJECT_NUMBER)')"
export SERVICE_NAME=thumbnail-service
export SERVICE_URL="$(gcloud run services list --platform managed --filter=${SERVICE_NAME} --format='value(URL)')"
export SERVICE_ACCOUNT=${TOPIC_NAME}-sa
export BUCKET_PICTURES=chitra-bhandar-${GOOGLE_CLOUD_PROJECT}
# export THUMBNAIL_BUCKET=uploaded-pictures-${GOOGLE_CLOUD_PROJECT}
# Create pubsub topic
gcloud pubsub topics create ${TOPIC_NAME}
# Create Pub/Sub notifications when files are stored in the bucket:
gsutil notification create -t ${TOPIC_NAME} -f json gs://chitra-bhandar
# Create a service account to represent the Pub/Sub subscription identity:
gcloud iam service-accounts create ${SERVICE_ACCOUNT} --display-name "Cloud Run Pub/Sub Invoker"
# Give the service account permission to invoke the service:
gcloud run services add-iam-policy-binding ${SERVICE_NAME} \
--member=serviceAccount:${SERVICE_ACCOUNT}@${GOOGLE_CLOUD_PROJECT}.iam.gserviceaccount.com \
--role=roles/run.invoker \
--platform managed \
--region=us-central1
# Enable Pub/Sub to create authentication tokens in our project:
gcloud projects add-iam-policy-binding ${GOOGLE_CLOUD_PROJECT} \
--member=serviceAccount:service-${PROJECT_NUMBER}@gcp-sa-pubsub.iam.gserviceaccount.com \
--role=roles/iam.serviceAccountTokenCreator
# Finally, create a Pub/Sub subscription with the service account:
gcloud pubsub subscriptions create ${TOPIC_NAME}-subscription --topic ${TOPIC_NAME} \
--push-endpoint=${SERVICE_URL} \
--push-auth-service-account=${SERVICE_ACCOUNT}@${GOOGLE_CLOUD_PROJECT}.iam.gserviceaccount.com | true |
2b6a092dde44a4d13d6fb089e559e35f4ea2ea91 | Shell | huangweiqing80/dhd_rel_1_206_43569 | /connection_scripts/p2papp_disconnected.sh | UTF-8 | 1,460 | 2.953125 | 3 | [] | no_license | #
# This script is called by p2papp after tearing down a connection between
# the two peers. This script de-initializes the network interface for the
# connection.
#
# Copyright (C) 2010, Broadcom Corporation
# All Rights Reserved.
#
# This is UNPUBLISHED PROPRIETARY SOURCE CODE of Broadcom Corporation;
# the contents of this file may not be disclosed to third parties, copied
# or duplicated in any form, in whole or in part, without the prior
# written permission of Broadcom Corporation.
#
# $Id: p2papp_disconnected.sh,v 1.10 2010/02/24 23:47:36 dlo Exp $
#
echo === Kill existing dhcpd:
dheth1_pid=`ps aux | grep dhcpd | awk '{ print $2 }'`
echo kill -9 $dheth1_pid
kill -9 $dheth1_pid
echo === Kill existing dhclient:
dheth1_pid=`ps aux | grep dhclient | awk '{ print $2 }'`
echo kill -9 $dheth1_pid
kill -9 $dheth1_pid
#
# Actions for the AP peer in a P2P connection
#
if [ $1 == ap ]; then
#echo
#echo === Check that we can no longer ping the peer:
#echo
#echo ping -c 1 -W 3 192.168.16.202
#ping -c 1 -W 3 192.168.16.202
echo ifconfig $2 0.0.0.0
/sbin/ifconfig $2 0.0.0.0
kill $(ps -e | grep dhcpd | awk '{ print $1 }')
fi
#
# Actions for the STA peer in a P2P connection
#
if [ $1 == sta ]; then
#echo
#echo === Check that we can no longer ping the peer:
#echo
#echo ping -c 1 -W 3 192.168.16.1
#ping -c 1 -W 3 192.168.16.1
echo ifconfig $2 0.0.0.0
/sbin/ifconfig $2 0.0.0.0
kill $(ps -e | grep dhclient | awk '{ print $1 }')
fi
| true |
a5f8691a2c34df012eb88c104bea4fba043e52e7 | Shell | TumTum/redmine_scheduling_poll | /bin/travisci_exec_test.sh | UTF-8 | 912 | 2.90625 | 3 | [
"MIT"
] | permissive | #!/bin/bash
# inspired by https://gist.github.com/pinzolo/7599006
export REDMINE_LANG=en
export RAILS_ENV=test
retval=0
#REDMINE_VERSION="2.4.0"
PLUGIN_NAME=redmine_scheduling_poll
cd redmine
# Execute test
bundle exec rake redmine:plugins:test NAME=${PLUGIN_NAME}
retval=$?
if [ "$retval" -ne 0 ]; then
echo "Interrupt executing test."
exit $retval
fi
# Execute UI test
if [[ "$REDMINE_VERSION" == 3.4.* ]]; then
echo "FIXME: skip UI Test for redmine 3.4" >&2 # FIXME skip UI Test for redmine 3.4
else
bundle exec rake -T | grep redmine:plugins:test:ui > /dev/null 2> /dev/null
if [ "$?" -eq 0 ]; then
phantomjs --webdriver 0.0.0.0:4444 >> phantomjs.log &
bundle exec rake redmine:plugins:test:ui NAME=${PLUGIN_NAME}
retval=$?
killall phantomjs
cat phantomjs.log
fi
if [ "$retval" -ne 0 ]; then
# echo "Interrupt executing test."
exit $retval
fi
fi
exit $retval
| true |
f7b5431382f3aeb20aaa2235232356762fb54614 | Shell | coinmenace/adzcoin-build | /adzcoin-linuxdeployqt.sh | UTF-8 | 2,051 | 3.0625 | 3 | [
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | #!/bin/bash
#
# Copyright (c) 2017-2018 The Swipp developers
#
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Recipe for creating a Swipp QT AppImage package
# Create AppDir FHS-like stucture
mkdir -p swipp.AppDir/usr swipp.AppDir/usr/bin
# Used by AppImageKit-checkrt (see below)
mkdir -p swipp.AppDir/usr/optional swipp.AppDir/usr/optional/libstdc++
# Copy files into empty AppDir
cp swipp-qt swipp.AppDir/usr/bin
# Get and run linuxdeployqt
wget -c https://github.com/probonopd/linuxdeployqt/releases/download/continuous/linuxdeployqt-continuous-x86_64.AppImage
chmod a+x linuxdeployqt-continuous-x86_64.AppImage
# Prepare AppDir
./linuxdeployqt-continuous-x86_64.AppImage swipp.AppDir/usr/bin/swipp-qt -bundle-non-qt-libs
# Workaround to increase compatibility with older systems; see https://github.com/darealshinji/AppImageKit-checkrt for details
rm swipp.AppDir/AppRun
cp /usr/lib/x86_64-linux-gnu/libstdc++.so.6 swipp.AppDir/usr/optional/libstdc++/
wget -c https://github.com/darealshinji/AppImageKit-checkrt/releases/download/continuous/exec-x86_64.so -O swipp.AppDir/usr/optional/exec.so
wget -c https://github.com/darealshinji/AppImageKit-checkrt/releases/download/continuous/AppRun-patched-x86_64 -O swipp.AppDir/AppRun
chmod a+x swipp.AppDir/AppRun
# Copy in desktop descriptor and icon
printf "[Desktop Entry]\nType=Application\nName=swipp-qt\nGenericName=swipp-qt\nComment=Store and transfer Swipp coins\nIcon=swipp\nExec=../usr/bin/swipp-qt\nTerminal=false\nCategories=Network;Finance;" > swipp.AppDir/swipp-qt.desktop
cp src/qt/res/icons/swipp.png swipp.AppDir/
# Manually invoke appimagetool so that the modified AppRun stays intact
./linuxdeployqt-continuous-x86_64.AppImage --appimage-extract
export PATH=$(readlink -f ./squashfs-root/usr/bin):$PATH
./squashfs-root/usr/bin/appimagetool swipp.AppDir swipp-qt-x86_64.AppImage
# Purge temporary files
rm -rf swipp.AppDir linuxdeployqt-continuous-x86_64.AppImage squashfs-root
| true |
333549807acf582af78664c5dbbb13814c970810 | Shell | ebo/netcdf-c | /ncdump/tst_formatx3.sh | UTF-8 | 1,689 | 3.546875 | 4 | [
"BSD-3-Clause"
] | permissive | #!/bin/sh
if test "x$srcdir" = x ; then srcdir=`pwd`; fi
. ../test_common.sh
# This shell script runs the ncdump tests.
# get some config.h parameters
if test -f ${top_builddir}/config.h ; then
if fgrep -e '#define ENABLE_CDF5 1' ${top_builddir}/config.h >/dev/null ; then
ENABLE_CDF5=1
else
ENABLE_CDF5=0
fi
else
echo "Cannot locate config.h"
exit 1
fi
# This shell script tests the output several previous tests.
ECODE=0
echo ""
echo "*** Testing extended file format output."
set -e
echo "Test extended format output for a netcdf-3 file"
rm -f tmp_tst_formatx3
${NCGEN} -k nc3 -b -o ./tst_formatx3.nc $srcdir/ref_tst_small.cdl
${NCDUMP} -K tst_formatx3.nc >tmp_tst_formatx3
if ! grep 'classic mode=00000000' <tmp_tst_formatx3 ; then
echo "*** Fail: extended format for a classic file"
ECODE=1
fi
echo "Test extended format output for a 64-bit offset netcdf-3 file"
rm -f tmp_tst_formatx3
${NCGEN} -k nc6 -b -o ./tst_formatx3.nc $srcdir/ref_tst_small.cdl
${NCDUMP} -K tst_formatx3.nc >tmp_tst_formatx3
if ! grep '64-bit offset mode=00000200' <tmp_tst_formatx3 ; then
echo "*** Fail: extended format for a 64-bit classic file"
ECODE=1
fi
# Only do following test if ENABLE_CDF5 is true.
if test "x$ENABLE_CDF5" = x1 ; then
echo "Test extended format output for a 64-bit CDF-5 classic file"
rm -f tmp_tst_formatx3
${NCGEN} -k5 -b -o ./tst_formatx3.nc $srcdir/ref_tst_small.cdl
${NCDUMP} -K tst_formatx3.nc >tmp_tst_formatx3
if ! grep -F '64-bit data mode=00000020' <tmp_tst_formatx3 ; then
echo "*** Fail: extended format for a 64-bit CDF-5 classic file"
ECODE=1
fi
fi
rm -f tmp_tst_formatx3 tst_formatx3.nc
exit $ECODE
| true |
4506072a5744e48738564b2291948fdd53e77010 | Shell | jomare1188/NRGSC | /bash_scripts.sh | UTF-8 | 400 | 2.921875 | 3 | [
"CC0-1.0"
] | permissive | main_folder="./"
mkdir -p libraries
## get samples names in the folder
ls $main_folder | grep Sample_ > "$main_folder"/samples2.txt
## build txgen.txt
cut -f1 "$main_folder"/Sample_19/quant.sf > tmp
paste tmp tmp > "$main_folder"/txgen2.txt
rm tmp
# remove first line
sed -i -e "1d" "$main_folder"/txgen2.txt
# write first line
#sed -i '1s/^/transcript_id gene_id\n/' "$main_folder"/txgen2.txt
##
| true |
ec750a3a06c03dcbe950690a8942cdad8b2e55dc | Shell | dvdtknsn/easi-app | /scripts/release_static | UTF-8 | 2,050 | 3.15625 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
#
set -eu -o pipefail
case "$APP_ENV" in
"dev")
STATIC_S3_BUCKET="$STATIC_S3_BUCKET_DEV"
EASI_URL="https://dev.easi.cms.gov"
export REACT_APP_OKTA_DOMAIN="https://test.idp.idm.cms.gov"
export REACT_APP_OKTA_CLIENT_ID="$OKTA_CLIENT_ID_DEV"
export REACT_APP_OKTA_SERVER_ID="$OKTA_SERVER_ID_DEV"
export REACT_APP_LD_CLIENT_ID="$LD_CLIENT_ID_DEV"
;;
"impl")
STATIC_S3_BUCKET="$STATIC_S3_BUCKET_IMPL"
EASI_URL="https://impl.easi.cms.gov"
export REACT_APP_OKTA_DOMAIN="https://impl.idp.idm.cms.gov"
export REACT_APP_OKTA_CLIENT_ID="$OKTA_CLIENT_ID_IMPL"
export REACT_APP_OKTA_SERVER_ID="$OKTA_SERVER_ID_IMPL"
export REACT_APP_LD_CLIENT_ID="$LD_CLIENT_ID_IMPL"
;;
"prod")
STATIC_S3_BUCKET="$STATIC_S3_BUCKET_PROD"
EASI_URL="https://easi.cms.gov"
export REACT_APP_OKTA_DOMAIN="https://idm.cms.gov"
export REACT_APP_OKTA_CLIENT_ID="$OKTA_CLIENT_ID_PROD"
export REACT_APP_OKTA_SERVER_ID="$OKTA_SERVER_ID_PROD"
export REACT_APP_LD_CLIENT_ID="$LD_CLIENT_ID_PROD"
;;
*)
echo "APP_ENV value not recognized: ${APP_ENV:-unset}"
echo "Allowed values: 'dev', 'impl', 'prod'"
exit 1
;;
esac
export REACT_APP_APP_ENV="$APP_ENV"
export REACT_APP_OKTA_ISSUER="${REACT_APP_OKTA_DOMAIN}/oauth2/${REACT_APP_OKTA_SERVER_ID}"
export REACT_APP_OKTA_REDIRECT_URI="${EASI_URL}/implicit/callback"
export REACT_APP_API_ADDRESS="${EASI_URL}/api/v1"
export REACT_APP_GRAPHQL_ADDRESS="${EASI_URL}/api/graph/query"
# Check if we have any access to the s3 bucket
# Since `s3 ls` returns zero even if the command failed, we assume failure if this command prints anything to stderr
s3_err="$(aws s3 ls "$STATIC_S3_BUCKET" 1>/dev/null 2>&1)"
if [[ -z "$s3_err" ]] ; then
( set -x -u ;
yarn install
yarn run build || exit
aws s3 sync --no-progress --delete build/ s3://"$STATIC_S3_BUCKET"/
)
else
echo "+ aws s3 ls $STATIC_S3_BUCKET"
echo "$s3_err"
echo "--"
echo "Error reading the S3 bucket. Are you authenticated?" 1>&2
exit 1
fi
| true |
6534fc7bec5819140c6ad582a357dd602145598b | Shell | Yavras/systemy-operacyjne-2 | /lab3_symlinks/zad4.sh | UTF-8 | 206 | 3.46875 | 3 | [] | no_license | #!/bin/bash
TEMP_FILE=""
XXX=""
if [ $# -eq 1 ]
then
for i in $1/*; do
if [ -L $i ]
then
TEMP_FILE=$(readlink $i)
ls -l $TEMP_FILE | grep ^-
fi
done
else echo "Invalid amount of arguments"
fi
| true |
07b743e0c974dcb8f70a46099bfae95b008408eb | Shell | ramezsaeed/dev_process | /scripts/jumpscale_prepare_release.sh | UTF-8 | 1,135 | 3.90625 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
# current Git branch
branch=$(git symbolic-ref HEAD | sed -e 's,.*/\(.*\),\1,')
# v1.0.0, v1.5.2, etc.
versionFrom=$1
versionTo=$2
versionLabel=v$versionTo
repo=$3
# establish branch and tag name variables
masterBranch=master
releaseBranch=$versionTo
cd $repo
git checkout master
git pull
echo $repo
# create the release branch from the -develop branch
git checkout -b $releaseBranch $masterBranch
# file in which to update version number
setupFile="setup.py"
# find version number assignment ("= v1.5.5" for example)
# and replace it with newly specified version number
sed -i.backup -E "s/(JumpScale9[A-Za-z]*)>=$versionFrom/\1>\=$versionTo/" $setupFile $setupFile
sed -i.backup -E "s/version='[0-9.]+'/version\='$versionTo'/" $setupFile $setupFile
# remove backup file created by sed command
rm $setupFile.backup
# commit version number increment
git commit -am "Incrementing version number to $versionLabel"
git push -u origin $releaseBranch
# merge release branch with the new version number into master
git checkout $masterBranch
git merge --no-ff $releaseBranch
git push -u origin $masterBranch
| true |
634e409ef5124fb82fb20c7e9cc289f0dead8b03 | Shell | archer16/supertux_md | /data/images/imb_creatures_tux_big.sh | UTF-8 | 874 | 2.5625 | 3 | [] | no_license | #!/bin/bash
#==================================================[ create pack for big tux ]==================================================
set_path creatures/tux/big
palette=$target/creatures/tux/_palette.png
import_colors=24
tux_big=(
"walk-0.png" "walk-1.png" "walk-2.png" "walk-3.png" "walk-4.png" "walk-5.png" "idle-0.png" "idle-1.png" "kick-0.png" "stand-0.png"
"jump-0.png" "skid-0.png" "duck-0.png" "backflip-0.png" "backflip-1.png" "backflip-2.png" "backflip-3.png" "backflip-4.png" "buttjump-0.png"
)
import_loop "${tux_big[@]}"
#change to image folder
cd $target/$image_path
echo " * Building image pack"
build_pack _big_pack.png "$line"
create_palette _big_pack.png _big_palette.png
#Create demo animations
# animate backflip 11
# animate walk 6
echo "------------------------------------------------------------"
| true |
8e0adf300e0099e0d6ee1119e8dccee648bcdcd1 | Shell | flovearth/45_Some_Bash_Scripts | /03_add_fixed_number.sh | UTF-8 | 121 | 3.125 | 3 | [] | no_license | #!/bin/bash
base_value=5
echo "Enter a number: "
read number
total=$((number+base_value))
echo "Total value is: $total"
| true |
d29e8d100060c1b867cbd50ae66c4bab96b292fe | Shell | Vigilox/core-blackberry | /RCSBlackBerry/sign/encode | UTF-8 | 2,680 | 2.796875 | 3 | [] | no_license | #!/bin/sh
# rapc bbb.obf.jar
# sign bbb.cod
#
#
# C:\Program Files\Eclipse BB5.0\plugins\net.rim.ejde.componentpack4.5.0_4.5.0.21\components\bin\rapc.exe
# codename=deliverables\Standard\4.5.0\RCSBlackBerry deliverables\Standard\4.5.0\RCSBlackBerry.rapc
# -sourceroot=Z:\RCSBlackBerry\Sources\Workspace\RCSBlackBerry\src;Z:\RCSBlackBerry\Sources\Workspace\RCSBlackBerry\res
# -import=C:\Program Files\Eclipse BB5.0\plugins\net.rim.ejde.componentpack4.5.0_4.5.0.21\components\lib\net_rim_api.jar
# Z:\RCSBlackBerry\Sources\Workspace\RCSBlackBerry\bin
#
#
RIM43="/Developer/eclipse/plugins/net.rim.ejde.componentpack4.3.0_4.3.0.17/components/lib/"
RIM45="/Developer/eclipse/plugins/net.rim.ejde.componentpack4.5.0_4.5.0.21/components/lib/"
RIM47="/Developer/eclipse/plugins/net.rim.ejde.componentpack4.7.0_4.7.0.53/components/lib/"
RIM50="/Developer/eclipse/plugins/net.rim.ejde.componentpack5.0.0_5.0.0.25/components/lib/"
#RAPC="java -jar ~/Development/BlackBerry/bin/rapc.jar"
PROGUARD="/Users/zeno/Development/Java/proguard4.4/bin/proguard.sh"
#BUILD="BBB_OBF"
#COD="working/BBB.cod"
RAPC="rapc"
CJAR="cleaned.jar"
BASE="net_rim_bb_lib"
LIBBASE="net_rim_bb_lib_base"
COREJAD="$BASE.jad"
COREJAR="$BASE.jar"
CORE="$BASE.cod"
LIBJAR="$LIBBASE.jar"
LIB="$LIBBASE.cod"
OJAR="obf.jar"
CCOD="$BASE.compiled.cod"
SCOD="$BASE.signed.cod"
RIMVER=$RIM45
DIR="4.5.0"
DIST="../../dist"
RIMAPI="$RIMVER/net_rim_api.jar"
BIN=bin
SIGN="/Users/zeno/Projects/RCSBlackBerry/Sources/Workspace/RCSBlackBerry/sign/SignatureTool.jar"
SIGNPASS="KUZ93HQ1"
echo "---------------"
echo "Copy"
mkdir working
cp $DIR/$COREJAD working
cp $DIR/$BASE.csl working/$BASE.signed.csl
cp $BASE.alx working
echo "---------------"
echo "Jar"
cp $DIR/$COREJAR working
cp $DIR/$LIBJAR working/library.jar
pushd working
mkdir jar
pushd jar
jar xf ../$COREJAR
rm $BASE*
#rm -fr tests
jar cf ../$CJAR *
popd
rm $COREJAR
popd
echo "---------------"
echo "Obfuscate"
$PROGUARD @obfuscate.pro
ls -la working/$CJAR working/$OJAR
cp working/$OJAR working/$BASE.jar
echo "---------------"
echo "Preverify"
preverify -classpath $RIMAPI working/$BASE.jar
echo "---------------"
echo "Rapc"
pushd working
$RAPC encode=$BASE jad=$BASE.jad -import=$RIMAPI $BASE.jar
mv $BASE.cod $CCOD
popd
echo "---------------"
echo "Sign"
pushd working
cp $CCOD $SCOD
java -jar $SIGN -p $SIGNPASS -a -c $SCOD
ls -la $CCOD $SCOD
popd
echo "---------------"
echo "Dist"
rm $DIST/*
mkdir $DIST
cp working/$SCOD $DIST/$BASE.cod
cp $DIR/$LIB $DIST/$BASE-2.cod
pushd $DIST
mv $BASE.cod archive.zip
unzip -jLo archive.zip
rm archive.zip
popd
echo "---------------"
echo "Clean"
rm -fr working output
echo $DIST
ls -la $DIST
| true |
126d05a593ecf2885e79fcd9dc91b60ba5327ba9 | Shell | LanderU/micro-ROS-bridge_RPI | /ROS2_Cross-Compilation/cc_script.sh | UTF-8 | 1,078 | 2.75 | 3 | [] | no_license | #!/bin/bash
mkdir -p ~/ros2_rpi/ros2_ws/src
cd ~/ros2_rpi/ros2_ws
wget https://raw.githubusercontent.com/ros2/ros2/crystal/ros2.repos
vcs import src < ros2.repos
cd ~/ros2_rpi
git clone https://github.com/micro-ROS/polly.git
git clone https://github.com/micro-ROS/ros2_raspbian_tools.git
cd ~/ros2_rpi/ros2_raspbian_tools
cat Dockerfile.bootstrap | docker build -t ros2-raspbian:crosscompiler -
./convert_raspbian_docker.py ros2-raspbian
./export_raspbian_image.py ros2-raspbian:lite ros2_dependencies_crystal.bash ros2-raspbian-rootfs.tar
mkdir -p ~/ros2_rpi/rpi-root
cd ~/ros2_rpi/ros2_raspbian_tools
sudo tar -C ~/ros2_rpi/rpi-root -xvf ros2-raspbian-rootfs.tar
patch ~/ros2_rpi/ros2_ws/src/ros/resource_retriever/libcurl_vendor/CMakeLists.txt libcurl_vendor.patch
docker run -it --rm \
-v ~/ros2_rpi/polly:/polly \
-v ~/ros2_rpi/ros2_ws:/ros2_ws \
-v ~/ros2_rpi/ros2_raspbian_tools/build_ros2_crystal.bash:/build_ros2.bash \
-v ~/ros2_rpi/rpi-root:/raspbian_ros2_root \
-w /ros2_ws \
ros2-raspbian:crosscompiler \
bash /build_ros2.bash
| true |
3f6fe4e979d23838110335a092c070c65fc7b8fc | Shell | TourRadar/base-docker | /install.sh | UTF-8 | 2,797 | 2.65625 | 3 | [] | no_license | #!/usr/bin/env bash
#misc
yum -y install telnet wget pstree bind-utils logwatch psmisc sudo cronie git mc iproute epel-release
#disable selinux
sed -i.bak s/SELINUX=enforcing/SELINUX=disabled/ /etc/selinux/config
setenforce 0
#install PHP 7.1
cd cd ~
wget http://rpms.famillecollet.com/enterprise/remi-release-7.rpm
rpm -Uvh remi-release-7.rpm
yum-config-manager --enable remi
yum-config-manager --enable remi-php71
#app dependencies
yum -y install httpd mod_ssl php php-fpm php-opcache php-common php-cli php-bcmath php-mbstring php-pdo php-process php-xml php-soap php-redis php-mysql
#nodejs
yum -y install nodejs npm composer
npm install -g gulp
npm install -g bower
cd ~
wget http://repo.mysql.com/mysql-community-release-el7-5.noarch.rpm
rpm -ivh mysql-community-release-el7-5.noarch.rpm
rm -f mysql-community-release-el7-5.noarch.rpm
#install MySQL tools
yum -y install mysql
#project setup
#set timezone ;( Here should be UTC but... ehh
rm -f /etc/localtime
ln -s /usr/share/zoneinfo/Europe/Vienna /etc/localtime
#needed to deploy script
yum install java unzip -y
cd ~
mkdir /usr/local/share/yui-compressor/
wget https://github.com/downloads/yui/yuicompressor/yuicompressor-2.4.7.zip
unzip yuicompressor-2.4.7.zip
mv ./yuicompressor-2.4.7/build/yuicompressor-2.4.7.jar /usr/local/share/yui-compressor/yui-compressor.jar
rm -rf yuicompressor-2.4.7/
#create TR user
adduser tr
mkdir -p /etc/php_extra
#download new browscap
wget http://browscap.org/stream?q=PHP_BrowsCapINI -O /etc/php_extra/browscap.ini
#fpm config
TRFPMFILE=/etc/php-fpm.d/fpm-tr.conf
echo '[tr]' > $TRFPMFILE
echo 'user = tr' >> $TRFPMFILE
echo 'group = tr' >> $TRFPMFILE
echo 'listen = 127.0.0.1:9001' >> $TRFPMFILE
echo ';because of memory leak' >> $TRFPMFILE
echo 'pm.max_requests = 100' >> $TRFPMFILE
echo 'pm = dynamic' >> $TRFPMFILE
echo 'pm.max_children = 50' >> $TRFPMFILE
echo 'pm.start_servers = 5' >> $TRFPMFILE
echo 'pm.min_spare_servers = 5' >> $TRFPMFILE
echo 'pm.max_spare_servers = 35' >> $TRFPMFILE
echo 'php_admin_value[error_log] = /var/log/php-fpm/tr-error.log' >> $TRFPMFILE
#PHPINI
TRPHPINIDILE=/etc/php.d/php-tr.ini
echo '[PHP]' > $TRPHPINIDILE
echo 'realpath_cache_size=4096K' >> $TRPHPINIDILE
echo 'realpath_cache_ttl=600' >> $TRPHPINIDILE
echo 'upload_max_filesize = 20M' >> $TRPHPINIDILE
echo 'post_max_size = 20M' >> $TRPHPINIDILE
echo 'max_file_uploads = 50' >> $TRPHPINIDILE
echo '' >> $TRPHPINIDILE
echo '[Date]' >> $TRPHPINIDILE
echo 'date.timezone = "Europe/Prague"' >> $TRPHPINIDILE
echo '' >> $TRPHPINIDILE
echo '[opcache]' >> $TRPHPINIDILE
echo 'opcache.max_accelerated_files = 20000' >> $TRPHPINIDILE
echo '[browscap]' >> $TRPHPINIDILE
echo 'browscap = /etc/php_extra/browscap.ini' >> $TRPHPINIDILE
| true |
c20c8f56a80fadc141944e582f95dd9afa2e8644 | Shell | henrik-farre/dotfiles | /bin/xneovim | UTF-8 | 621 | 3.625 | 4 | [] | no_license | #!/bin/bash
# export NVIM_LISTEN_ADDRESS=$XDG_RUNTIME_DIR/neovim_socket
# Opens neovim in an termite window.
FILE=""
if [[ ! -z "$*" ]]; then
FILE="'$*'"
fi
# TERMINAL=wezterm
TERMINAL=termite
# TERMINAL=nvim-gtk
case "$TERMINAL" in
"termite")
COMMAND=$(printf '/usr/bin/bash -ic "nvim %s"' "$FILE")
exec termite --icon=nvim.png --class="neovim-editor" --name="Neovim" -e "$COMMAND"
;;
"wezterm")
exec wezterm start --class "neovim-editor" -- /usr/bin/nvim --listen "$NVIM_LISTEN_ADDRESS"
;;
"nvim-gtk")
export NVIM_GTK_NO_HEADERBAR=1
exec nvim-gtk
;;
esac
| true |
7768c38c36ed38f850d40fa5e3e68176bf1ba6d4 | Shell | medienlampe/eir | /_scripts/getTeamstats.sh | UTF-8 | 514 | 2.765625 | 3 | [
"MIT"
] | permissive | #!/bin/bash
TEAMID=237997
TEAMNAME=Invia
TARGETFOLDER=/root/eir/_data
wget https://apps.foldingathome.org/daily_team_summary.txt.bz2
bzip2 -dk daily_team_summary.txt.bz2
rm -f teamscore.txt
touch teamscore.txt
head -n +2 daily_team_summary.txt | tail -n1 >> teamscore.txt
grep -P "$TEAMID\t$TEAMNAME" daily_team_summary.txt >> teamscore.txt
sed 's/\t/,/g' < teamscore.txt > teamscore.csv
/usr/local/bin/any-json teamscore.csv > $TARGETFOLDER/teamscore.json
rm -rf teamscore.txt teamscore.csv daily_team_summary.* | true |
0a6424e7ccfa5faba1b21a2fa051bb945e14f7ef | Shell | wangyum/Anaconda | /pkgs/libgfortran-3.0.0-1/info/recipe/build.sh | UTF-8 | 443 | 2.921875 | 3 | [
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-unknown"
] | permissive | #!/bin/bash
LIB=$PREFIX/lib
mkdir $LIB
cd $LIB
if [ $ARCH == '64' ]; then
cp /usr/local/lib64/libgfortran.so.3.0.0 .
ln -s libgfortran.so.3.0.0 libgfortran.so.3
elif [ $ARCH == '32' ]; then
cp /usr/local/lib/libgfortran.so.3.0.0 .
ln -s libgfortran.so.3.0.0 libgfortran.so.3
fi
if [ `uname -m` == 'armv7l' ]; then
cp /usr/lib/arm-linux-gnueabihf/libgfortran.so.3.0.0 .
ln -s libgfortran.so.3.0.0 libgfortran.so.3
fi
| true |
4f6713b767249035dbe3cb1c0f0a5f7ac2d708b9 | Shell | RatanRSur/dotfiles | /.xinitrc | UTF-8 | 874 | 2.71875 | 3 | [] | no_license | #!/bin/bash
userresources=$HOME/.Xresources
sysresources=/etc/X11/xinit/.Xresources
# merge in defaults and keymaps
if [ -f $sysresources ]; then
xrdb -merge $sysresources
fi
if [ -f "$userresources" ]; then
xrdb -merge "$userresources"
fi
# start some nice programs
if [ -d /etc/X11/xinit/xinitrc.d ] ; then
for f in /etc/X11/xinit/xinitrc.d/?*.sh ; do
[ -x "$f" ] && . "$f"
done
unset f
fi
nm-applet&
for id in $(xinput list --id-only); do
xinput --set-prop $id "libinput Natural Scrolling Enabled" 1
xinput --set-prop $id "libinput Accel Speed" 0.7
xinput --set-prop $id "libinput Tapping Enabled" 1
done
xbindkeys
keyboard
autorandr --change # this should imply wallpaper setting from autorandr postswitch
wallpaper
clight &
exec i3 -V >> ~/log/i3-$(date -Iseconds).log
#export XDG_SESSION_TYPE=x11
#export GDK_BACKEND=x11
#exec gnome-session
| true |
0ffce77f9fef099e0254e92313f4ca5c9df20564 | Shell | phildkim/pcc_shell | /p3_counts/count.sh | UTF-8 | 826 | 4.25 | 4 | [] | no_license | #!/bin/bash
#
# Bash shell script to count each sentence followed by the number of words.
#
# Copyright (c), Philip D. Kim
#
# INPUT:
# What, me worry? No.
# And you?
# OUTPUT:
# 1 3
# 2 1
# 3 2
# The script takes in the user's input and then output whenever a sentence ends with (!.?) with the number of words.
sentence=1
words=1
for w in $(cat | sed 's/-/ /g')
do
if [[ $w =~ [\!\?\.]$ ]]
then
printf "%d\t%d\n" $sentence $words
words=1
((++sentence))
else
((++words))
fi
done
#
# Count words in each sentence.
#
# Copyright (c) 2019, Sekhar Ravinutala.
#
# s=1; w=1
# IFS=" -"
# while read line; do
# for word in $line; do
# if [[ $word =~ [?\!.]$ ]]; then
# printf "%s\t%s\n" $s $w
# s=$[$s+1]; w=1
# else
# w=$[$w+1]
# fi
# done
# done | true |
a11603be19a9d482b3fee8ad7fa6f0d4fefce744 | Shell | esol-community/ros_rtl_kinetic | /script/build_kinetic.sh | UTF-8 | 1,013 | 3.28125 | 3 | [] | no_license | #!/bin/bash
set -e
PATH=/opt/aarch64_mcos/bin/:${PATH}
BUILD_TOOLCHAIN_PATH=/opt/aarch64_mcos/aarch64-elf-mcos/cmake/aarch64_elf_mcos_ros_toolchain.cmake
ARC_DIR=/work/share/archives/kinetic
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE:-$0}")"; pwd)"
REPO_BASE=${SCRIPT_DIR}/../../ros_rtl_kinetic
SRC_DIR_KINETIC=${REPO_BASE}/source/ros_kinetic
echo "building the ros kinetic."
if [ -e ${SRC_DIR_KINETIC}/src ]; then
rm -rf ${SRC_DIR_KINETIC}/src
fi
mkdir -p ${SRC_DIR_KINETIC}
cp -af ${REPO_BASE}/patch/* ${SRC_DIR_KINETIC}
pushd ${SRC_DIR_KINETIC} > /dev/null
tar xf ${ARC_DIR}/ros_kinetic_source.tar.xz
../../script/_set_catkin_ignore.sh
patch -u -N -t -p0 < patch_ros_kinetic_emcos.patch
rm -rf build_isolated/ install_isolated/ devel_isolated/
./src/catkin/bin/catkin_make_isolated --install --cmake-args -DCMAKE_FIND_DEBUG_MODE=1 -DCMAKE_TOOLCHAIN_FILE=${BUILD_TOOLCHAIN_PATH} -D_CMAKE_MCOS_ROS_RTL=1 > ${SRC_DIR_KINETIC}/log_ros_catkinmake.log 2>&1
popd
echo "building the ros kinetic is complete."
| true |
77dce0e1e19f3f77c383ce4df28264a114f51a87 | Shell | tma15/dotfiles | /init.zsh | UTF-8 | 1,093 | 3.53125 | 4 | [] | no_license | #!/bin/zsh
success() {
printf "\r\033[2K [ \033[00;32mOK\033[0m ] $1\n"
}
setup_git_submodule() {
git submodule update --init --recursive && \
success "git submodule update --init --recursive"
}
link_files() {
ln -ns $1 $2 && success \
"linked $1 to $2"
}
install_deno() {
# https://deno.land/
curl -fsSL https://deno.land/install.sh | sh -s -- v1.32.5 && \
success "installed deno"
}
install_dotfiles() {
# https://github.com/sorin-ionescu/prezto
link_files `pwd`/zprezto ~/.zprezto
link_files `pwd`/zprezto/runcoms/zlogin ~/.zlogin
link_files `pwd`/zprezto/runcoms/zlogout ~/.zlogout
link_files `pwd`/zprezto/runcoms/zprofile ~/.zprofile
link_files `pwd`/zprezto/runcoms/zshenv ~/.zshenv
link_files `pwd`/pyenv ~/.pyenv
link_files `pwd`/tmux.conf ~/.tmux.conf
link_files `pwd`/vimrc ~/.vimrc
link_files `pwd`/vim ~/.vim
link_files `pwd`/zshrc ~/.zshrc
link_files `pwd`/zpreztorc ~/.zpreztorc
}
setup_git_submodule
install_dotfiles
if [ ! -e $HOME/.deno/bin/deno ]; then
install_deno
fi
| true |
ad18a4045e75687beeaca0a073a6a04fb2b3abed | Shell | lampepfl/dotty | /project/scripts/stdlib-bootstrapped-tasty-mima.sh | UTF-8 | 1,127 | 2.9375 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
set -eux
source $(dirname $0)/cmdTestsCommon.inc.sh
TASTY_FROMAT_FILE="tasty/src/dotty/tools/tasty/TastyFormat.scala"
MINOR_TASTY_VERSION_SUPPORTED_BY_TASTY_MIMA=3
MINOR_TASTY_VERSION=$(grep -oE 'val MinorVersion: Int = ([0-9]+)' $TASTY_FROMAT_FILE | grep -oE '[0-9]+')
EXPERIMENTAL_TASTY_VERSION=$(grep -oE 'val ExperimentalVersion: Int = ([0-9]+)' $TASTY_FROMAT_FILE | grep -oE '[0-9]+')
setTastyVersion() {
sed -i -E -e "s/val MinorVersion: Int = [0-9]+/val MinorVersion: Int = $1/" -e "s/val ExperimentalVersion: Int = [0-9]+/val ExperimentalVersion: Int = $2/" $TASTY_FROMAT_FILE
}
setTastyVersion $MINOR_TASTY_VERSION_SUPPORTED_BY_TASTY_MIMA 0
# Run stdlib-bootstrapped/tastyMiMaReportIssues using a custom TASTy version.
# We clean before to make sure all sources are recompiled using the new TASTY version.
# We clean after to make sure no other test will use the TASTy generated with this version.
"$SBT" "clean; stdlib-bootstrapped/clean; reload; stdlib-bootstrapped/tastyMiMaReportIssues; clean; stdlib-bootstrapped/clean"
setTastyVersion $MINOR_TASTY_VERSION $EXPERIMENTAL_TASTY_VERSION
| true |
6019955aaf18984992ff7462f450f46bc99b0d1e | Shell | chaos-dremel/mpkg | /chroot_installer/create-livecd.sh | UTF-8 | 8,684 | 3.640625 | 4 | [] | no_license | #!/bin/bash
# This script should be run as root or fakeroot.
set -e
if [ "$DEBUG" != "" ] ; then
set -x
fi
export PATH=${scriptdir}/bin:$PATH
CWD=${scriptdir}/live-elements
# Set alias for chroot to fakechroot. Let's see if it can work such way
# Loading global config
if [ -r "/etc/mklivecd.conf" ] ; then
. /etc/mklivecd.conf
fi
# Loading local user config
if [ -r "${REAL_HOME}/.mklivecd.conf" ] ; then
. ${REAL_HOME}/.mklivecd.conf
fi
scriptdir=${scriptdir:-/usr/share/mklivecd/scripts} # This should be defined in global config at package build time in case if you use another paths
# Helper environments for ISOBUILD
filedir=${startdir}/files
plugindir=${scriptdir}/plugins
# Loading ISOBUILD
. ${startdir}/ISOBUILD
# Defining variables
ARCH=$arch
if [ "$arch" = "" -o "$arch" = "auto" ] ; then
if [ "`uname -m`" = "x86_64" ] ; then
ARCH=x86_64
else
ARCH=x86
fi
fi
LANGS="en ru"
NODE="${BUILD_ROOT}/${iso_name}-${ARCH}"
INITRD_ROOT="${NODE}/boot/initrd-tree"
LIVE_ROOT="${LIVE_BUILD_ROOT}/${iso_name}-${ARCH}"
ISO_FILENAME=${ISO_FILENAME:-${iso_name}-${ARCH}.iso}
# Cleanup
if [ "$skip_stage1" = "" ] ; then
rm -rf "$NODE"
fi
if [ "$skip_stage2" = "" ] ; then
rm -rf "$INITRD_ROOT"
rm -rf "$LIVE_ROOT"
fi
# Let's go :)
if [ "$ARCH" = "x86" ] ; then
BITS=32
LIBDIRSUFFIX=
else
BITS=64
LIBDIRSUFFIX=64
fi
LIST="${startdir}/pkglist"
# Getting online list if needed
if [ "`echo $package_list | grep ^http:`" != "" -o "`echo $package_list | grep ^ftp:`" != "" ] ; then
wget "$package_list" -O "${startdir}/pkglist"
fi
# List-tuning
# Add
if [ ! -z "$add_to_list" ] ; then
for i in $add_to_list ; do
echo $i >> ${LIST}
done
fi
# Remove
if [ ! -z "$remove_from_list" ] ; then
for i in $remove_from_list ; do
sed -i "s/^$i$//g" $LIST
done
fi
# Installation
if [ "$skip_stage1" = "" ] ; then
ARCH=$ARCH LIST="$LIST" NODE="$NODE" REPO="$REPO" ${scriptdir}/install_virtual_machine.sh
# Copy language file switcher to system
cp ${CWD}/langswitch ${NODE}/etc/init.d/
chmod 755 ${NODE}/etc/init.d/langswitch
# Copy video driver switcher to system
cp ${CWD}/videoswitch ${NODE}/etc/init.d/
chmod 755 ${NODE}/etc/init.d/videoswitch
mkdir -p ${NODE}/etc/X11/xorg_drivers
for i in vesa nv nouveau fbdev modesetting ; do
cp ${CWD}/20-$i.conf ${NODE}/etc/X11/xorg_drivers/
done
NODE="$NODE" ${scriptdir}/add_default_services.sh
fi
# Rip out all documentation, includes and static libs
if [ "$remove_docs" = "1" -o "$do_minimize" = "1" ] ; then
rm -rf $NODE/usr/doc
rm -rf $NODE/usr/share/gtk-doc
rm -rf $NODE/usr/share/doc
fi
if [ "$remove_devel" = "1" -o "$do_minimize" = "1" ] ; then
rm -rf $NODE/usr/include
rm -rf $NODE/usr/lib/*.a
rm -rf $NODE/usr/lib/*.la
if [ -d $NODE/usr/lib64 ] ; then
rm -rf $NODE/usr/lib64/*.a
rm -rf $NODE/usr/lib64/*.la
fi
rm -rf $NODE/lib/*.a
if [ -d $NODE/lib64 ] ; then
rm -rf $NODE/lib64/*.a
fi
fi
if [ "$remove_src" = "1" -o "$do_minimize" = "1" ] ; then
rm -rf $NODE/usr/src/SPKG
rm -rf $NODE/usr/src/BuildTrees
rm -rf $NODE/usr/src/SlackBuilds
fi
# Move packages if requested
if [ "$include_used_packages" = "1" ] ; then
mv ${NODE}/var/mpkg/cache ${LIVE_ROOT}/repository
mkdir -p ${LIVE_ROOT}/repository/setup_variants
cat ${LIST} > ${LIVE_ROOT}/repository/setup_variants/LIVE.list
echo "desc: Live system" > ${LIVE_ROOT}/repository/setup_variants/LIVE.desc
echo "full: Install system like this LiveCD" >> ${LIVE_ROOT}/repository/setup_variants/LIVE.desc
echo "/repository/" > ${LIVE_ROOT}/.repository
echo "AGILIA_LIVE" > ${LIVE_ROOT}/.volume_id
mpkg-index ${LIVE_ROOT}/repository
fi
# Cache has to be removed anyway.
rm -rf $NODE/var/mpkg/cache
mkdir -p ${NODE}/var/mpkg/cache
# Copy root stuff. Login as agilia with no password.
# cat $CWD/shadow > $NODE/etc/shadow
# cat $CWD/passwd > $NODE/etc/passwd
cat $CWD/fstab > $NODE/etc/fstab
# Copy X11 keymap
mkdir -p ${NODE}/etc/X11/xorg.conf.d
mkdir -p ${NODE}/etc/X11/xorg_lang
for i in $LANGS ; do
cat $CWD/10-keymap.conf.$i > ${NODE}/etc/X11/xorg_lang/10-keymap.conf.$i
done
# Default symlink to russian
( cd ${NODE}/etc/X11/xorg.conf.d ; ln -s ../10-keymap.conf.ru 10-keymap.conf )
# Set hostname
hostname=${hostname:-agilia}
sed -i s/localhost/$hostname/g ${NODE}/etc/conf.d/hostname
sed -i s/darkstar/$hostname/g ${NODE}/etc/hosts
# Copy patched lang.sh to system
cat ${CWD}/lang.sh > ${NODE}/etc/profile.d/lang.sh
# Remove xinitrc if any and recreate it:
if [ -d ${NODE}/etc/X11/xinit ] ; then
rm -f ${NODE}/etc/X11/xinit/xinitrc
( cd ${NODE}/etc/X11/xinit
for i in xinitrc.* ; do
ln -sf $i xinitrc
done
)
fi
# Copy skel to root dir
rsync -arvh $NODE/etc/skel/ $NODE/root/
# Set root password if defined by ISOBUILD
if [ "$root_password" = "" ] ; then
if [ "$empty_root_password" = "" ] ; then
root_password=root
fi
fi
# Setting root password.
if [ "$empty_root_password" = "" ] ; then
echo -ne "$root_password\n$root_password\n" | chroot $NODE passwd root
else
chroot $NODE passwd -d root
fi
# Add standard user. If not specified, user will be agilia/agilia
if [ "$no_user" = "" ] ; then
if [ "$user_name" = "" ] ; then
user_name=agilia
fi
if [ "$user_password" = "" ] ; then
if [ "$empty_user_password" = "" ] ; then
user_password=agilia
fi
fi
user_groups=${user_groups:-audio,cdrom,floppy,video,netdev,plugdev,power}
if [ "$pseudoroot_user" = "" ] ; then
chroot $NODE /usr/sbin/useradd -d /home/$user_name -m -g users -G $user_groups -s /bin/bash $user_name
else
chroot $NODE /usr/sbin/useradd -o -u 0 -g 0 -d /root -M -s /bin/bash $user_name
fi
if [ "$empty_user_password" = "" ] ; then
echo -ne "$user_password\n$user_password\n" | chroot $NODE passwd $user_name
else
chroot $NODE passwd -d $user_name
fi
fi
# Runlevel: default is 4, but user may specify another one
RUNLEVEL=${default_runlevel:-4}
sed -i s/id:3:initdefault/id:$RUNLEVEL:initdefault/g $NODE/etc/inittab
# Custom actions. May vary for different live systems
custom_actions
OUTPUT=$LIVE_ROOT/fs${BITS}
mkdir -p $OUTPUT
# Creating sfs files
ARCH=$ARCH OUTPUT=$OUTPUT NODE=$NODE COMPRESSOR=gzip BLOCK_SIZE=65536 $CWD/make_rootfs.sh
# Now, initrd
#mkdir -p $INITRD_ROOT
#( cd $INITRD_ROOT && zcat $CWD/initrd$BITS.img | cpio -div )
# Copy kernel modules
cd $NODE/lib/modules
KERNEL_VER=`ls`
MOD_PATH=lib/modules/$KERNEL_VER
chroot $NODE /sbin/mkinitrd -o /boot/initrd$BITS.img -k $KERNEL_VER
cp $NODE/lib$LIBDIRSUFFIX/libm*.so* $INITRD_ROOT/lib$LIBDIRSUFFIX
cp $NODE/lib$LIBDIRSUFFIX/libc*.so* $INITRD_ROOT/lib$LIBDIRSUFFIX
cat $CWD/init > $INITRD_ROOT/init
cd -
rm -rf $INITRD_ROOT/lib/modules
mkdir -p $INITRD_ROOT/$MOD_PATH/
cp $NODE/$MOD_PATH/kernel/fs/squashfs/squashfs.ko $INITRD_ROOT/$MOD_PATH/
cp $NODE/$MOD_PATH/kernel/fs/aufs/aufs.ko $INITRD_ROOT/$MOD_PATH/
cp $NODE/$MOD_PATH/kernel/drivers/virtio/virtio.ko $INITRD_ROOT/$MOD_PATH/
cp $NODE/$MOD_PATH/kernel/drivers/block/virtio_blk.ko $INITRD_ROOT/$MOD_PATH/
rm $INITRD_ROOT/load_kernel_modules
# Generate load_kernel_modules script
for i in "squashfs aufs virtio virtio_blk" ; do
echo "insmod /lib/modules/$KERNEL_VER/$i" >> $INITRD_ROOT/load_kernel_modules
done
# Copy kernel image
mkdir -p $LIVE_ROOT/boot/
cp $NODE/boot/vmlinuz-$KERNEL_VER $LIVE_ROOT/boot/vmlinuz$BITS
# Generating initrd image
#mkinitrd -s $INITRD_ROOT -o $LIVE_ROOT/boot/initrd$BITS.img -k $KERNEL_VER
chroot $NODE /sbin/mkinitrd -o /boot/initrd$BITS.img -k $KERNEL_VER
cp $NODE/boot/initrd$BITS.img $LIVE_ROOT/boot/initrd$BITS.img
# Copying isolinux configs
mkdir -p $LIVE_ROOT/isolinux
cat $CWD/isolinux.cfg | sed s/@ARCH@/$BITS/g | sed "s/@ISO_TITLE@/${iso_title}/g" > $LIVE_ROOT/isolinux/isolinux.cfg
# Multilanguage stuff
for i in $LANGS ; do
cat $CWD/$i.cfg | sed s/@ARCH@/$BITS/g | sed "s/@ISO_TITLE@/${iso_title}/g" > $LIVE_ROOT/isolinux/$i.cfg
done
# ISOLINUX binaries
for i in linux.c32 vesamenu.c32 vesainfo.c32 isolinux.bin chain.c32 ; do
cp /usr/lib${LIBDIRSUFFIX}/syslinux/$i $LIVE_ROOT/isolinux/
done
cp $CWD/grub640.png $LIVE_ROOT/isolinux/
cp $CWD/koi8u_8x16.psf $LIVE_ROOT/isolinux/
# Dracut attempt
#mkdir -p $LIVE_ROOT/LiveOS
#cp $LIVE_ROOT/fs$BITS/rootfs.sfs $LIVE_ROOT/LiveOS/squashfs.img
#cp $NODE/boot/initrd-fallback-$KERNEL_VER.img $LIVE_ROOT/boot/initrd1.img
# END DRACUT
# Pre-iso cleanup
if [ "$no_cleanup" = "" ] ; then
rm -rf "$NODE"
rm -rf "$INITRD_ROOT"
fi
if [ "$no_iso" = "" ] ; then
# Creating ISO
mkdir -p $ISO_OUTPUT
rm -f $ISO_OUTPUT/$ISO_FILENAME
ISO_FILE=$ISO_OUTPUT/$ISO_FILENAME ISO_ROOT=$LIVE_ROOT $CWD/makeiso.sh
fi
# Post-build cleanup
if [ "$no_cleanup" = "" -a "$keep_liveroot" = "" ] ; then
rm -rf "$LIVE_ROOT"
fi
set +e
if [ "$DEBUG" != "" ] ; then
set +x
fi
| true |
b3fb95ab1f388d4b91563fe6ab3aa275e8535425 | Shell | mapbox/mapbox-navigation-ios | /scripts/update-version.sh | UTF-8 | 3,662 | 3.625 | 4 | [
"MIT",
"ISC",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | #!/usr/bin/env bash
set -e
set -o pipefail
set -u
function step { >&2 echo -e "\033[1m\033[36m* $@\033[0m"; }
function finish { >&2 echo -en "\033[0m"; }
function bump_xcode_proj_versions {
xcrun agvtool bump -all
xcrun agvtool new-marketing-version "${SHORT_VERSION}"
}
function agvtool_on {
local PROJ_NAME=$1
local TMP_DIR=$(uuidgen)
mkdir $TMP_DIR
mv *.xcodeproj $TMP_DIR
mv $TMP_DIR/$PROJ_NAME ./
bump_xcode_proj_versions
mv $TMP_DIR/*.xcodeproj ./
rm -rf $TMP_DIR
}
trap finish EXIT
if [ $# -eq 0 ]; then
echo "Usage: v<semantic version>"
exit 1
fi
SEM_VERSION=$1
SEM_VERSION=${SEM_VERSION/#v}
SHORT_VERSION=${SEM_VERSION%-*}
MINOR_VERSION=${SEM_VERSION%.*}
YEAR=$(date '+%Y')
step "Version ${SEM_VERSION}"
step "Updating Xcode targets to version ${SHORT_VERSION}…"
# agvtool doesn't work when there are multiple xcodeproj in the directory. So, we temporarily move xcodeproj files aside to fulfill agvtool requirements.
agvtool_on MapboxNavigation-SPM.xcodeproj
agvtool_on MapboxNavigation.xcodeproj
step "Updating CocoaPods podspecs to version ${SEM_VERSION}…"
find . -type f -name '*.podspec' -exec sed -i '' "s/^ *s.version *=.*$/ s.version = '${SEM_VERSION}'/" {} +
if [[ $SHORT_VERSION != $SEM_VERSION ]]; then
step "Updating prerelease CocoaPods podspecs…"
cp MapboxCoreNavigation.podspec MapboxCoreNavigation-pre.podspec
cp MapboxNavigation.podspec MapboxNavigation-pre.podspec
sed -i '' -E "s/(\.name *= *\"[^\"]+)\"/\1-pre\"/g; s/(\.dependency *\"MapboxCoreNavigation)\"/\1-pre\"/g" *-pre.podspec
fi
step "Updating CocoaPods installation test fixture…"
cd Tests/CocoaPodsTest/PodInstall/
pod update
cd -
cd Sources/MapboxCoreNavigation/
cp Info.plist Resources/MBXInfo.plist
plutil -replace CFBundleName -string 'MapboxCoreNavigation' Resources/MBXInfo.plist
cd -
cd Sources/MapboxNavigation/
cp Info.plist Resources/MBXInfo.plist
plutil -replace CFBundleName -string 'MapboxNavigation' Resources/MBXInfo.plist
cd -
step "Updating changelog to version ${SHORT_VERSION}…"
sed -i '' -E "s/## *main/## ${SHORT_VERSION}/g" CHANGELOG.md
# Skip updating the installation instructions for patch releases or prereleases.
if [[ $SHORT_VERSION == $SEM_VERSION && $SHORT_VERSION == *.0 ]]; then
step "Updating readmes to version ${SEM_VERSION}…"
sed -i '' -E "s/~> *[^']+/~> ${MINOR_VERSION}/g; s/from: \"*[^\"]+/from: \"${SEM_VERSION}/g; s/\`[^\`]+\` as the minimum version/\`${SEM_VERSION}\` as the minimum version/g" README.md custom-navigation.md
elif [[ $SHORT_VERSION != $SEM_VERSION ]]; then
step "Updating readmes to version ${SEM_VERSION}…"
sed -i '' -E "s/:tag => 'v[^']+'/:tag => 'v${SEM_VERSION}'/g; s/\"mapbox\/mapbox-navigation-ios\" \"v[^\"]+\"/\"mapbox\/mapbox-navigation-ios\" \"v${SEM_VERSION}\"/g; s/\.exact\\(\"*[^\"]+/.exact(\"${SEM_VERSION}/g" README.md custom-navigation.md
fi
step "Updating copyright year to ${YEAR}…"
sed -i '' -E "s/© ([0-9]{4})[–-][0-9]{4}/© \\1–${YEAR}/g" LICENSE.md docs/jazzy.yml
BRANCH_NAME="update-version-${SEM_VERSION}"
git checkout -b $BRANCH_NAME
git add .
git commit -m "Update version ${SEM_VERSION}"
git push origin $BRANCH_NAME
if [[ $SEM_VERSION =~ "alpha" || $SEM_VERSION =~ "beta" ]]; then
BASE_BRANCH_NAME="main"
else
MAJOR=${SEM_VERSION%%.*}
MINOR_TMP=${SEM_VERSION#*.}
MINOR=${MINOR_TMP%%.*}
BASE_BRANCH_NAME="release-v${MAJOR}.${MINOR}"
fi
brew install gh
GITHUB_TOKEN=$GITHUB_WRITER_TOKEN gh pr create \
--title "Release v${SEM_VERSION}" \
--body "Bump version to ${SEM_VERSION}" \
--base $BASE_BRANCH_NAME \
--head $BRANCH_NAME
| true |
77aa5b3f7bd39f2b12c30a80a01f5191bbd3c801 | Shell | computersalat/fwbuilder | /build_mxe-w32.sh | UTF-8 | 1,346 | 3.5 | 4 | [] | no_license | #!/bin/bash
export PATH=/usr/lib/mxe/usr/bin:$PATH
main()
{
if [ $# -eq 0 ]; then
usage
exit
fi
while [ "$1" != "" ]; do
case $1 in
-h | --help | help | usage )
usage
exit
;;
all )
configure
compile
package
exit
;;
configure )
configure
;;
compile )
compile
;;
package )
package
;;
* )
usage
exit 1
;;
esac
shift
done
}
usage()
{
echo "Usage: $0 [ all | configure | compile | package ]"
}
configure()
{
echo "==> Configuring"
qbs setup-toolchains /usr/lib/mxe/usr/bin/i686-w64-mingw32.shared-g++ mingw32
qbs setup-qt /usr/lib/mxe/usr/i686-w64-mingw32.shared/qt5/bin/qmake qt
if [ $? -eq 0 ]; then
echo "==> Done configuring"
else
exit 1
fi
}
compile()
{
echo "==> Compiling"
qbs release profile:qt
if [ $? -eq 0 ]; then
echo "==> Done compiling"
else
exit 1
fi
}
package()
{
echo "==> Packaging"
makensis release/install-root/fwbuilder.nsi
if [ $? -eq 0 ]; then
echo "==> Done packaging"
else
exit 1
fi
}
main "$@"
| true |
7d36616c5378fb90eb6f99879e8321cd2cb28452 | Shell | etiram/test | /testcommit.sh | UTF-8 | 115 | 2.90625 | 3 | [] | no_license | #!/bin/bash
git add --all
if [ $# -ge 1 ] ; then
git commit -m $1
else
git commit -m "DEFAULT"
fi
git push
| true |
d7709a98be78eba8c88311677202625f44b8faa7 | Shell | A2PhotonicSensors/skimage_edge_production | /Utilities/install.sh | UTF-8 | 2,785 | 3.578125 | 4 | [] | no_license | #!/usr/bin/env bash
# Log stdout and stderr in installation.log on home directory
echo "Starting installation script on remote odroid. . ."
# In order to run all the next commands without having to enter the password
echo "Sudo-ing"
sudo false
echo " Loading skimage variables . . . "
source $(dirname $BASH_SOURCE)/skimage_variables.env
echo "Removing $ROOT_DIR/$SOURCE_DIR"
sudo rm -rf "$ROOT_DIR/$SOURCE_DIR"
#echo "Setting time zone"
#sudo timedatectl set-timezone $TZ
sudo apt-get -y update
sudo apt-get -y upgrade
sudo apt-get -y install \
apt-transport-https \
ca-certificates \
curl \
gnupg-agent \
software-properties-common \
inotify-tools \
git
echo "Installing docker"
sudo apt-get -y remove docker docker-engine docker.io containerd runc
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
if [ `uname -m` = "x86_64" ]
then
sudo add-apt-repository \
"deb [arch=amd64] https://download.docker.com/linux/ubuntu \
$(lsb_release -cs) \
stable"
else
sudo add-apt-repository \
"deb [arch=armhf] https://download.docker.com/linux/ubuntu \
$(lsb_release -cs) \
stable"
fi
sudo apt-get -y update
sudo apt-get -y install docker-ce docker-ce-cli containerd.io
sudo groupadd docker
sudo usermod -aG docker $USER
echo "Installing docker-compose"
#sudo apt-get -y install docker-compose
sudo curl -L "https://github.com/docker/compose/releases/download/1.17.1/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
sudo chmod +x /usr/local/bin/docker-compose
echo "Pulling docker image"
sudo docker pull $DOCKER_IMAGE
echo "Making odroid directory "
sudo mkdir -p $ROOT_DIR/$SOURCE_DIR
sudo chown -hR $USER:users $ROOT_DIR
# clone Github repo
echo "Cloning into github repo $GIT_REPO"
git clone $GIT_REPO $ROOT_DIR/$SOURCE_DIR
echo "Github repo has been pulled"
echo "Making data directory "
mkdir -p $ROOT_DIR/$SOURCE_DIR/data
echo "Copying default skimage_parameters.xlsx' from /docs to /data"
cp $ROOT_DIR/$SOURCE_DIR/docs/skimage_parameters.xlsx $ROOT_DIR/$SOURCE_DIR/data/skimage_parameters.xlsx
echo "Copying default my_id.txt' from /docs to /data"
cp $ROOT_DIR/$SOURCE_DIR/docs/my_id.txt $ROOT_DIR/$SOURCE_DIR/data/my_id.txt
echo "Making Logs_SKIMAGE directory if it doesn't already exist"
mkdir -p $ROOT_DIR/$SOURCE_DIR/$SKIMAGE_LOGS_DIR
echo "Setting execute permissions on skimage.sh"
chmod +x $ROOT_DIR/$SOURCE_DIR/skimage.sh
echo "Copying skimage_watchdog.service to /lib/systemd/system"
sudo cp $ROOT_DIR/$SOURCE_DIR/Utilities/skimage_watchdog.service /lib/systemd/system
echo "Reloading systemd daemon and enabling skimage_watchdog service"
sudo systemctl daemon-reload
sudo systemctl enable skimage_watchdog.service
echo "Cleaning apt"
sudo apt -y autoremove | true |
8604214e52d787641423e40a886b4c4f55c81287 | Shell | infothrill/etoy-m221e-gate | /m221egate/start.sh | UTF-8 | 329 | 2.890625 | 3 | [] | no_license | #!/usr/bin/env bash
export PYTHONPATH=$PYTHONPATH:$HOME/src/scrambler-droplet/src/
echo $PYTHONPATH
if [ "x${1}" == "xbg" ]
then
d=$PWD
echo "Starting with production config in bg"
mkdir -p $HOME/var
cd $HOME/var/
python $d/start-m221egate.py $d/prod.cfg > /dev/null 2>&1 < /dev/zero &
else
python start-m221egate.py
fi
| true |
79cd1add9e4955d38134ecf249d97bcf4fd084ca | Shell | nexusventuri/advent_of_code_2018 | /runner.sh | UTF-8 | 366 | 3.53125 | 4 | [] | no_license | #!/bin/bash
echo "Here all the solutions implemented:"
for i in $(find . -maxdepth 1 -type d | grep -v '.git\|\.$')
do
echo "$(basename $i)"
done
echo "Which solution would you like to see?"
read solution_folder
for solution_executable in $(find ./$solution_folder/solution.*)
do
echo "Solution from file '$solution_executable':"
$solution_executable
done
| true |
2581b1d12792fa93f8d8818569788b8ad9c9cc72 | Shell | loleg/dribdat | /deploy-stackscript.sh | UTF-8 | 3,235 | 3.5 | 4 | [
"MIT"
] | permissive | #!/bin/bash
# This is a deployment script for an Ubuntu VPS at Linode.com
# <UDF name="FQDN" Label="Fully qualified domain" example="dribdat.example.com" />
# <UDF name="TIME_ZONE" Label="Server time zone" example="Europe/Zurich" />
# Logs: tail -f /var/log/stackscript.log
# Logs: cat /var/log/stackscript.log
# Log to /var/log/stackscript.log for future troubleshooting
# Logging set up
exec 1> >(tee -a "/var/log/stackscript.log") 2>&1
function log {
echo "### $1 -- `date '+%D %T'`"
}
# Common bash functions
source <ssinclude StackScriptID=1>
log "Common lib loaded"
# Apply harden script
source <ssinclude StackScriptID=394223>
# Update machine
log "Configuring System Updates"
apt-get -o Acquire::ForceIPv4=true update -y
DEBIAN_FRONTEND=noninteractive apt-get -y -o DPkg::options::="--force-confdef" -o DPkg::options::="--force-confold" install grub-pc
apt-get -o Acquire::ForceIPv4=true update -y
## Set hostname, configure apt and perform update/upgrade
log "Setting hostname"
IP=`hostname -I | awk '{print$1}'`
hostnamectl set-hostname $FQDN
echo $IP $FQDN >> /etc/hosts
log "Updating .."
export DEBIAN_FRONTEND=noninteractive
apt-get update -y
## Remove older installations and get set for Docker install
log "Getting ready to install Docker"
sudo apt-get remove docker docker-engine docker.io containerd runc
sudo apt-get update
sudo apt-get install -y \
apt-transport-https \
ca-certificates \
curl \
make \
gnupg-agent \
software-properties-common \
apache2-utils
log "Installing Docker Engine for $lsb_dist"
lsb_dist="$(. /etc/os-release && echo "$ID")"
lsb_dist="$(echo "$lsb_dist" | tr '[:upper:]' '[:lower:]')"
## Add Docker’s official GPG key
curl -fsSL "https://download.docker.com/linux/$lsb_dist/gpg" | sudo apt-key add -
## Install stable docker as daemon
add-apt-repository \
"deb [arch=amd64] https://download.docker.com/linux/$lsb_dist \
$(lsb_release -cs) \
stable"
apt-get update
apt-get install -y docker-ce docker-ce-cli docker-compose containerd.io
systemctl enable docker
## Set up fail2ban
log "Installing fail2ban"
apt-get install fail2ban -y
cd /etc/fail2ban
cp fail2ban.conf fail2ban.local
cp jail.conf jail.local
systemctl start fail2ban
systemctl enable fail2ban
## Set up firewall with defaults ports
log "Configuring firewall"
apt-get install ufw -y
ufw default allow outgoing
ufw default deny incoming
ufw allow ssh
ufw allow https
ufw allow http
ufw enable
systemctl enable ufw
ufw logging off
## ----------------------------------------------
## Install & configure app
log "Installing dribdat"
mkdir -p /srv
cd /srv
cat <<END >.env
DRIBDAT_SECRET=`dd bs=32 count=1 if="/dev/urandom" | base64 | tr +/ _.`
DRIBDAT_APIKEY=`dd bs=16 count=1 if="/dev/urandom" | base64 | tr +/ _.`
SERVER_URL=$FQDN
END
# Commence installation
git clone https://github.com/dribdat/dribdat.git
mv .env dribdat
cd /srv/dribdat
log "Starting cloud deployment via docker-compose"
docker-compose --env-file .env -f docker-compose.yml up -d &
# Open http://$FQDN:1323/init to configure your server
log "After a minute, open: http://$FQDN:1323/init"
## ----------------------------------------------
echo "Installation complete!"
| true |
e1cdd5cbccb0322c8c6097050ab3c54728086b90 | Shell | juselius/kubernetes-actions-runner | /entrypoint.sh | UTF-8 | 1,219 | 3.40625 | 3 | [
"MIT"
] | permissive | #!/bin/bash
set -e
sudo chown runner:runner .
tar fxz ../runner.tar.gz
sudo rm ../runner.tar.gz
RUNNER_WORKDIR=$HOME/_work
mkdir -p $RUNNER_WORKDIR
[ ! -f auth.json ] && echo "ERROR: No auth config!" && exit 1
case ${GITHUB_AUTH} in
app)
[ ! -f auth.pem ] && echo "ERROR: No pem file!" && exit 1
export RUNNER_TOKEN=$(./Auth/GetRunnerToken --config auth.json) ;;
token)
[ -z ${GITHUB_REPOSITORY} ] && echo "ERROR: No repository!" && exit 1
export RUNNER_TOKEN=$(./Auth/GetRunnerToken --config auth.json --token --repository ${GITHUB_REPOSITORY}) ;;
*) echo "ERROR: Guru meditation, unknown error" && exit 1 ;;
esac
if [ ! -z "$LABELS" ]; then
LABELS="--labels $LABELS"
fi
if [ -z "$GITHUB_REPOSITORY" ]; then
GITHUB_URL=https://github.com/${GITHUB_OWNER}
else
GITHUB_URL=https://github.com/${GITHUB_REPOSITORY}
fi
./config.sh \
--name $(hostname) $LABELS \
--token ${RUNNER_TOKEN} \
--url ${GITHUB_URL} \
--work ${RUNNER_WORKDIR} \
--unattended \
--replace
echo "$HOME/config.sh remove --unattended --token ${RUNNER_TOKEN}" > $HOME/remove.sh
remove() {
/bin/sh $HOME/remove.sh
}
trap remove 1 2 3 6 9 11 15
exec ./run.sh "$*"
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.