blob_id stringlengths 40 40 | language stringclasses 1 value | repo_name stringlengths 4 115 | path stringlengths 2 970 | src_encoding stringclasses 28 values | length_bytes int64 31 5.38M | score float64 2.52 5.28 | int_score int64 3 5 | detected_licenses listlengths 0 161 | license_type stringclasses 2 values | text stringlengths 31 5.39M | download_success bool 1 class |
|---|---|---|---|---|---|---|---|---|---|---|---|
43159073df972a6d5d6c369cfef948a351e6a775 | Shell | fhaoquan/androidTools | /logtool/git_push.sh | UTF-8 | 870 | 3.796875 | 4 | [] | no_license | #!/bin/bash
echo "========================================="
echo " git_push.sh Script V3.0"
echo "========================================="
up_remote=172.29.0.92
if [ "$#" -eq 0 ];then
git push
exit 1
fi
isRepository=false
command="git push"
index=1
for arg in "$@"
do
echo "Arg #$index = $arg"
let index+=1
if [ -z "$(echo $arg | grep "-")" ] && [ $isRepository == false ]; then
isRepository=true
if [ -n "$(git remote show | grep $arg)" ];then
projectname_pattern="remote.$arg.projectname"
remote_name=$(git config --list | awk -F "=" '{if($1=="'$projectname_pattern'") print $2}')
remote_url="ssh://$up_remote/$remote_name"
command="$command $remote_url"
else
echo "fatal: can not found the remote repository '$arg'"
exit 1;
fi
else
command="$command $arg"
fi
done
echo "execute command: $command"
$command
exit 0
| true |
875a5bb0edfe542879dff826048117c405c97699 | Shell | zchee/zsh-default-completions | /src/Unix/Command/_abcde | UTF-8 | 2,457 | 2.90625 | 3 | [] | no_license | #compdef abcde
(( $+functions[_abcde_fields] )) ||
_abcde_fields(){
_values -s , field year genre
}
(( $+functions[_abcde_actions] )) ||
_abcde_actions(){
_values -s , action cddb cue read getalbumart embedalbumart normalize encode tag move replaygain playlist clean
}
_arguments -s \
'(-t -T -p)-1[encode the whole CD in a single file]' \
'-a[comma-delimited list of actions to perform]:action:_abcde_actions' \
'-b[enable batch mode normalization]' \
'-B[enable automatic embedding of album art with certain containers]' \
'-c[specify an additional configuration file to parse]:config:_files' \
'-C[resume a session for discid when you no longer have the CD available]:discid' \
'-d[CD-ROM block device that contains audio tracks to be read]:cd-rom-file:_files' \
'-D[capture debugging information]' \
'-e[erase information about encoded tracks from the internal status file]' \
'-f[force the removal of the temporary ABCDETEMPDIR directory]' \
"-g[enable lame's --nogap option]" \
'-G[download album art using the getalbumart function]' \
'(- :)-h[get help information]' \
'-j[start a specified number of encoder processes at once]:number' \
'-k[keep the wav files after encoding]' \
'-l[use the low-diskspace algorithm]' \
'-L[use a local CDDB repository]' \
'-m[create DOS-style playlists, modifying the resulting one by adding CRLF line endings those to work]' \
"-n[don't query CDDB database]" \
'-N[non interactive mode]' \
'-o[select output type]:outputtype:(vorbis ogg mp3 flac spx mpc m4a wav wv ape opus mka aiff)' \
"-p[pads track numbers with 0's]" \
'-P[use Unix PIPES to read and encode in one step]' \
'-r[remote encode on this comma-delimited list of machines using distmp3]:hosts:_sequence _hosts' \
'-s[fields to be shown in the CDDB parsed entries]:field:_abcde_fields' \
'-S[set the speed of the CD drive]:speed' \
'-t[start the numbering of the tracks at a given number]:track-number' \
'-T[start the numbering of the tracks at a given number and change internal tag numbering]:track-number' \
'-U[set CDDBPROTO to version 5]' \
'(- :)-v[show the version and exit]' \
'-V[be more verbose]' \
'-x[eject the CD when all tracks have been read]' \
'-X[use an alternative "cue2discid" implementation]:cue2discid' \
'-w[add a comment to the tracks ripped from the CD]:comment' \
"-W[concatenate CD's]:cd-number" \
'-z[debug mode]' \
'*:tracks:'
# vim:ft=zsh
| true |
82e5bca5e2c268b8af01f67038834f3ea4833bf1 | Shell | yeloer/socblox | /units_ve/axi4_l1_mem_unit/sim/scripts/build.sh | UTF-8 | 421 | 3.5 | 4 | [] | no_license | #!/bin/sh
while test -n "$1"; do
case $1 in
-sim)
shift
SIM=$1
;;
-*)
echo "Error: Unknown option $1"
exit 1
;;
*)
echo "Error: Unknown argument $1"
exit 1
;;
esac
shift
done
CPU_COUNT=2
if test -f /proc/cpuinfo; then
CPU_COUNT=`cat /proc/cpuinfo | grep processor | wc -l`
fi
make SIM=${SIM} -j ${CPU_COUNT} -f ${SIM_DIR}/scripts/Makefile
if test $? -ne 0; then
exit 1
fi
| true |
fa5933e37be598300ab3e84f41d9155023caaf1f | Shell | g-nunia/Coursera-LWB | /guessinggame.sh | UTF-8 | 405 | 3.53125 | 4 | [] | no_license | #!/usr/bin/env bash
# File: guessinggame.sh
function guessinggame () {
x=$(ls | wc -l)
while [[ $response -ne $x ]]
do
echo "Please guess how many files are in the current directory"
read response
if [[ $response -gt $x ]]
then
echo "response is bigger"
elif [[ response -lt $x ]]
then
echo "response is smaller"
else
echo "perfect, goodjob!"
fi
done
}
guessinggame
| true |
20a4115e030c4ecf8dcdd9a88c4aa539fb2add23 | Shell | jchdel/nanoerp | /bin/vehicule | UTF-8 | 970 | 3.453125 | 3 | [
"Unlicense"
] | permissive | #!/bin/bash
[ -f $(dirname $0)/../lib/common.inc ] && . $(dirname $0)/../lib/common.inc
. $LIB/bo.inc
. $LIB/compta.inc
_carburant(){
_check
# PARAMS = PRICE KM
# Ce sont toujours des tickets de caisse de pompes automatiques...
# A rembourser au porteur en mode comptoir
AMOUNT=$(echo $PARAMS|cut -d\ -f1)
_is_number $AMOUNT
TEXT=$(echo $PARAMS|cut -d\ -f2)
[ -z "$TEXT" ] && TEXT="nc"
TIMESTAMP=$(date +%s)
# the journal entry
echo "${TIMESTAMP};D;${name};${AMOUNT};${TEXT}" >> ${JOURNALS}/carburant.csv
# the two book entries
echo "${TIMESTAMP};C;a_rembourser;${AMOUNT};carburant ${name}: ${TEXT}" >> ${BOOK}/6/${ME}s.csv
echo "${TIMESTAMP};D;${ME}s;${AMOUNT};carburant ${name}: ${TEXT}" >> ${BOOK}/5/a_rembourser.csv
}
FLAG1=1
case $ACTION in
"carburant")
_carburant
;;
"help")
echo "carburant NAME PRICE [KM] : register oil ticket for NAME or ID"
;;
*)
FLAG1=0
;;
esac
[ $FLAG0 -eq 0 -a $FLAG1 -eq 0 ] && _usage && _exit 1
_exit 0
| true |
1ed67c762a025f9970b74b619ca21df32a3d32ea | Shell | chaolongYin/shell_my | /unexeshell/config/config.sh | UTF-8 | 1,456 | 2.96875 | 3 | [] | no_license | dir_pre="/home"
if [ `uname` == "Darwin" ]
then
dir_pre="/Users"
echo "Darwin env"
else
echo "other env"
fi
root_dir="$dir_pre/$USER/shell_my/unexeshell/config"
RED='\033[0;31m'
NC='\033[0m'
echo "try config all config...."
echo "1: config git config....."
echo "try remove exist file...."
rm ~/.gitconfig
rm ~/.gitignore_global
ln -s $root_dir/git_config/.gitconfig ~/.gitconfig
ln -s $root_dir/git_config/.gitignore_global ~/.gitignore_global
echo "2: config minicom..."
echo "sudo apt install minicom"
sudo apt install minicom
echo "cp minicom config file"
sudo cp minicom_config/minirc.* /etc/minicom/
echo "3: config chrome..."
echo -e "${RED}all to longhaozhang@gmail.com${NC}"
echo "4: config tmux..."
echo "try remove exist file..."
rm ~/.tmux.conf
ln -s $root_dir/tmux_config/.tmux.conf ~/.tmux.conf
echo "5: config gdb..."
echo "try remove exist file..."
rm ~/.gdbinit
ln -s $root_dir/gdb_config/.gdbinit ~/.gdbinit
echo "6: config vim config..."
echo "try remove exist file..."
rm ~/.vimrc
ln -s $root_dir/vim_config/.vimrc ~/.vimrc
echo "cp external_vim_file"
mkdir ~/.vim/
cp $root_dir/vim_config/external_vim_file/* -rf ~/.vim/
echo "7: config usb udev"
sudo cp $root_dir/ubuntu_config/usb_udev_config/*.rules /etc/udev/rules.d/
echo "8: config simsun fonts..."
sudo cp $root_dir/ubuntu_config/fonts/simsun.ttc /usr/share/fonts/
sudo mkfontscale
sudo mkfontdir
sudo fc-cache -fsv
echo -e "${RED}9: need config ubuntu config Manually${NC}"
| true |
dbb0139d0be7b0f9012a9763b94fce14b0270e2e | Shell | edersonbrilhante/GloboNetworkAPI | /scripts/navlan | UTF-8 | 588 | 3.28125 | 3 | [
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause"
] | permissive | #!/bin/bash
#!/bin/bash
LOGFILE='/tmp/navlan.log';
#Verifica arquivo de log
if [ -f $LOGFILE ]; then
if [ ! -w $LOGFILE ]; then
echo "ERRO: Arquivo de log $LOGFILE sem permissoes de escrita."
exit 1
fi
else
touch $LOGFILE
chmod 666 $LOGFILE
fi
echo "$(date): ${*}" >> $LOGFILE
# navlan -A ambiente(id_ambiente) -N vlans(nome) -R vlans(rede_oct1.rede_oct2.rede_oct3.red_oct4/bloco) -I vlans(num_vlan) --equip <NOME1,NOME2,etc> --cria
echo $"Parametros: $0 $1 $2 $3 $4 $5 $6 $7 $8 $9 ${10} ${11}" >&2
echo "Teste"
exit 0
| true |
e94485774711d48ba5660a63aa9afe8f23c3341e | Shell | MariXavier/Exerc-cios-Shell-Script | /ex 23.txt | UTF-8 | 140 | 2.890625 | 3 | [] | no_license | #!/bin/bash
clear
mkdir pasta
cd pasta
contador=0
while [ $contador -lt 30 ]
do
touch bob$contador
contador=$[contador+1]
done
| true |
44fcd60846b8772448f2d9b5f2454075b805863b | Shell | miing/mci_build | /core/jenkins.sh | UTF-8 | 9,771 | 2.96875 | 3 | [
"Apache-2.0"
] | permissive | #############################################################################
#
# Copyright (C) 2013 Miing.org <samuel.miing@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#############################################################################
ci_jenkins_upgrade()
{
if [ -z "$TARGET_CI" ] ; then
echo
echo "CI::Jenkins::Error::TARGET_CI not set yet"
return
fi
echo
echo "CI::Jenkins::Info::Upgrade for site[$TARGET_CI_JENKINS_SITE]"
}
ci_jenkins_backup()
{
if [ -z "$TARGET_CI" ] ; then
echo
echo "CI::Jenkins::Error::TARGET_CI not set yet"
return
fi
echo
echo "CI::Jenkins::Info::Backup for site[$TARGET_CI_JENKINS_SITE]"
}
ci_jenkins_custom()
{
if [ -z "$TARGET_CI" ] ; then
echo
echo "CI::Jenkins::Error::TARGET_CI not set yet"
return
fi
echo
echo "CI::Jenkins::Info::Customize for site[$TARGET_CI_JENKINS_SITE]"
}
ci_jenkins_postconfig()
{
if [ -z "$TARGET_CI" ] ; then
echo
echo "CI::Jenkins::Error::TARGET_CI not set yet"
return
fi
echo
echo "CI::Jenkins::Info::Postconfigure for site[$TARGET_CI_JENKINS_SITE]"
local citop=/home/jenkins
local config=config.xml
local keys updated=false
if [ ! -f $citop/$config ] ; then
echo
echo "CI::Jenkins::Warning::'$config' not existing yet under '$citop'"
#return
fi
# Configure Admin
if [ -f $citop/$config ] ; then
keys=(`grep -i "<useSecurity>true" $citop/$config`)
if [ "$keys" ] ; then
if [ "$TARGET_CI_JENKINS_SITE_ADMIN_USER" -a "$TARGET_CI_JENKINS_SITE_ADMIN_PASSWORD" ] ; then
local ADMIN_USER="$TARGET_CI_JENKINS_SITE_ADMIN_USER"
local ADMIN_PASSWORD="$TARGET_CI_JENKINS_SITE_ADMIN_PASSWORD"
sudo /etc/init.d/jenkins restart -u $ADMIN_USER -p $ADMIN_PASSWORD
fi
fi
fi
# OpenID
local url plugin_name
if [ "$TARGET_CI_JENKINS_SITE_AUTH" = "openid" ] ; then
if [ ! -d $citop/plugins ] ; then
sudo -u jenkins mkdir $citop/plugins
fi
url=http://updates.jenkins-ci.org/latest/openid.hpi
plugin_name=openid.hpi
sudo -u jenkins wget --no-check-certificate -O $citop/plugins/$plugin_name $url
updated=true
fi
if [ $updated ] ; then
sudo /etc/init.d/jenkins restart
fi
}
ci_jenkins_install()
{
if [ -z "$TARGET_CI" ] ; then
echo
echo "CI::Jenkins::Error::TARGET_CI not set yet"
return
fi
echo
echo "CI::Jenkins::Info::Install for site[$TARGET_CI_JENKINS_SITE]"
local citop=/home/jenkins
local keys
if [ ! -f $citop/bin/jenkins.war ] ; then
# Create bin/logs dirs under home dir for user jenkins
if [ ! -d $citop/bin ] ; then
sudo -u jenkins mkdir $citop/bin
fi
if [ ! -d $citop/logs ] ; then
sudo -u jenkins mkdir $citop/logs
fi
# Set bin path to PATH
keys=(`grep -i "$citop/bin" $citop/.bashrc 2>/dev/null`)
if [ ! "$keys" ] ; then
sudo -u jenkins /bin/bash -c "echo 'PATH=$citop/bin:$PATH' >>$citop/.bashrc"
fi
# Pick up the up-to-date version of Jenkins
# from http://mirrors.jenkins-ci.org/war
# or http://mirrors.jenkins-ci.org/war/latest
local url
local jenkins_war=jenkins.war
if [ -z "$TARGET_CI_JENKINS_VERSION_INSTALLED" ] ; then
url=http://mirrors.jenkins-ci.org/war/latest
else
url=http://mirrors.jenkins-ci.org/war/$TARGET_CI_JENKINS_VERSION_INSTALLED
fi
sudo -u jenkins wget -O $citop/bin/$jenkins_war $url/$jenkins_war
fi
# Launch Jenkins daemon
if [ ! -L /etc/init.d/jenkins ] ; then
if [ ! -f $citop/bin/jenkins.sh ] ; then
sudo -u jenkins cp $TARGET_SITE_CONFIG/jenkins/jenkins.sh $citop/bin/jenkins.sh
fi
sudo ln -snf $citop/bin/jenkins.sh /etc/init.d/jenkins
if [ ! -f /etc/default/jenkins ] ; then
sudo cp $TARGET_SITE_CONFIG/jenkins/jenkins /etc/default/jenkins
fi
sudo update-rc.d jenkins defaults 90 10
fi
keys=(`ps -ef | grep -i "^jenkins"`)
if [ ! "$keys" ] ; then
sudo /etc/init.d/jenkins start
fi
}
ci_jenkins_configure()
{
if [ -z "$TARGET_CI" ] ; then
echo
echo "CI::Jenkins::Error::TARGET_CI not set yet"
return
fi
echo
echo "CI::Jenkins::Info::Configure for site[$TARGET_CI_JENKINS_SITE]"
# Add a new user on system which is named jenkins
if [ ! `id -u jenkins 2>/dev/null` ] ; then
sudo adduser \
--system \
--shell /bin/bash \
--gecos 'Jenkins Continuous Integration' \
--group \
--disabled-password \
--home /home/jenkins \
jenkins
fi
case $TARGET_CI_JENKINS_HTTPD in
apache)
local vhconfig keys
case $TARGET_CI_JENKINS_HTTPD_SCHEME in
https)
sudo a2enmod ssl proxy proxy_http rewrite
if [ ! -f /etc/apache2/sites-available/$TARGET_CI_JENKINS_SITE ] ; then
# Generate a self-signed certificate for SSL
if [ ! -d /etc/apache2/ssl ] ; then
sudo mkdir /etc/apache2/ssl
fi
if [ ! -f /etc/apache2/ssl/$TARGET_SITE.crt -o ! -f /etc/apache2/ssl/$TARGET_SITE.key ] ; then
local OPENSSL=(`which openssl`)
sudo $OPENSSL req -new -x509 -days 365 -nodes -out $TARGET_SITE.crt -keyout $TARGET_SITE.key
sudo mv $TARGET_SITE.crt /etc/apache2/ssl
sudo mv $TARGET_SITE.key /etc/apache2/ssl
fi
# Configure virtualhost
vhconfig=$TARGET_CI_JENKINS_SITE.$TARGET_CI_JENKINS_HTTPD_SCHEME
if [ -f $TARGET_SITE_CONFIG/jenkins/$vhconfig ] ; then
sudo cp $TARGET_SITE_CONFIG/jenkins/$vhconfig /etc/apache2/sites-available/$TARGET_CI_JENKINS_SITE
else
echo
echo "CI::Jenkins::Error::No virtualhost with '$TARGET_CI_JENKINS_HTTPD_SCHEME' on $TARGET_CI_JENKINS_HTTPD"
return
fi
# Enable virtualhost at port 443 for ssl
keys=(`grep "^[[:space:]]NameVirtualHost \*:443" /etc/apache2/ports.conf`)
if [ ! "$keys" ] ; then
sudo sed -i -e "/^<IfModule mod_ssl.c>.*/a\\\tNameVirtualHost \*:443" /etc/apache2/ports.conf
fi
# Match host names with IP address
keys=(`cat /etc/hosts | grep -i -e "^[0-9\.]*[[:space:]]*$TARGET_CI_JENKINS_SITE"`)
if [ ! "$keys" ] ; then
sudo bash -c "cat >>/etc/hosts <<EOF
$TARGET_CI_JENKINS_HTTPD_IPADDR $TARGET_CI_JENKINS_SITE
EOF"
fi
# Make virtualhost take effect
sudo a2ensite $TARGET_CI_JENKINS_SITE
sudo a2dissite default
sudo /etc/init.d/apache2 restart
fi
;;
*)
echo
echo "CI::Jenkins::Error::Invalid httpd scheme: '$TARGET_CI_JENKINS_HTTPD_SCHEME'"
return
;;
esac
;;
*)
echo
echo "CI::Jenkins::Error::Invalid httpd type: '$TARGET_CI_JENKINS_HTTPD'"
return
;;
esac
}
ci_jenkins_preinstall()
{
if [ -z "$TARGET_CI" ] ; then
echo
echo "CI::Jenkins::Error::TARGET_CI not set yet"
return
fi
echo
echo "CI::Jenkins::Info::Preinstall for site[$TARGET_CI_JENKINS_SITE]"
if [ -n "$TARGET_CI_JENKINS_HTTPD" ] ; then
httpd $TARGET_CI_JENKINS_HTTPD
fi
if [ "$TARGET_CI_JENKINS_HTTPD_SCHEME" = "https" ] ; then
if [[ ! `which ssh` || ! `which sshd` ]] ; then
sudo apt-get -y install openssh-client openssh-server
# After installation of ssh client/server, I would like to generate
# new ssh public/private key pair, although the key pair may have
# been already there for some reason.
# ssh-keygen -t rsa
ssh-add
fi
fi
# Set up JAVA runtime environment oh which Jenkins runs
if [ ! `which java` ] ; then
sudo apt-get -y install openjdk-6-jdk
fi
}
ci_jenkins_clean()
{
if [ -z "$TARGET_CI" ] ; then
echo
echo "CI::Jenkins::Error::TARGET_CI not set yet"
return
fi
echo
echo "CI::Jenkins::Info::Clean for site[$TARGET_CI_JENKINS_SITE]"
local keys
keys=(`ps -ef | grep -i "^jenkins"`)
if [ "$keys" ] ; then
sudo /etc/init.d/jenkins stop
fi
if [ -L /etc/init.d/jenkins ] ; then
sudo rm /etc/init.d/jenkins
if [ -f /etc/default/jenkins ] ; then
sudo rm /etc/default/jenkins
fi
fi
case $TARGET_CI_JENKINS_HTTPD in
apache)
if [ -f /etc/apache2/sites-available/$TARGET_CI_JENKINS_SITE ] ; then
sudo rm /etc/apache2/sites-available/$TARGET_CI_JENKINS_SITE
fi
if [ -L /etc/apache2/sites-enabled/$TARGET_CI_JENKINS_SITE ] ; then
sudo rm /etc/apache2/sites-enabled/$TARGET_CI_JENKINS_SITE
fi
;;
*)
echo
echo "CI::Jenkins::Error::Invalid httpd type: '$TARGET_CI_JENKINS_HTTPD'"
return
;;
esac
if [ `id -u jenkins 2>/dev/null` ] ; then
sudo deluser jenkins
fi
if [ -d /home/jenkins ] ; then
sudo rm -rf /home/jenkins
fi
}
ci_jenkins()
{
if [ -z "$TARGET_CI" ] ; then
echo
echo "CI::Jenkins::Error::TARGET_CI not set yet"
return
fi
local goal
for goal in ${TARGET_SITE_GOALS[@]}
do
case $goal in
clean)
ci_jenkins_clean
;;
preinstall)
ci_jenkins_preinstall
;;
configure)
ci_jenkins_configure
;;
install)
ci_jenkins_install
;;
postconfig)
ci_jenkins_postconfig
;;
custom)
ci_jenkins_custom
;;
backup)
ci_jenkins_backup
;;
upgrade)
ci_jenkins_upgrade
;;
lite)
ci_jenkins_clean
ci_jenkins_preinstall
ci_jenkins_configure
ci_jenkins_install
;;
all)
ci_jenkins_clean
ci_jenkins_preinstall
ci_jenkins_configure
ci_jenkins_install
ci_jenkins_postconfig
ci_jenkins_custom
;;
*)
echo
echo "CI::Jenkins::Error::Invalid target site goal: '$goal'"
return
;;
esac
done
}
| true |
f723bedc99f3a20475ab0e8b213bef47b4a3ef45 | Shell | adriansev/eos-deploy | /eos_start_pre.sh | UTF-8 | 3,885 | 3.5 | 4 | [] | no_license | #!/bin/bash
# ----------------------------------------------------------------------
# File: eos_start_pre.sh
# Author: Ivan Arizanovic - ComTrade Solutions Engineering
# ----------------------------------------------------------------------
# ************************************************************************
# * EOS - the CERN Disk Storage System *
# * Copyright (C) 2016 CERN/Switzerland *
# * *
# * This program is free software: you can redistribute it and/or modify *
# * it under the terms of the GNU General Public License as published by *
# * the Free Software Foundation, either version 3 of the License, or *
# * (at your option) any later version. *
# * *
# * This program is distributed in the hope that it will be useful, *
# * but WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
# * GNU General Public License for more details. *
# * *
# * You should have received a copy of the GNU General Public License *
# * along with this program. If not, see <http://www.gnu.org/licenses/>.*
# ************************************************************************
. /etc/sysconfig/eos_env
# Start All EOS daemons (all required daemons from config file)
if [ "$1" = "eos-all" ]; then
if [[ -z "$XRD_ROLES" ]]; then
echo "<3>Error: No XRD_ROLES variable declared in \"/etc/sysconf/eos_env\""
exit 1
fi
for i in ${XRD_ROLES}; do
systemctl start eos@${i} &
done
# Wait for all the daemons to start
FAIL=0
for job in `jobs -p`;do
echo "<5>Waiting for $job ..."
wait $job || let "FAIL+=1"
done
if [ "$FAIL" == "0" ]; then
exit 0
else
exit 1
fi
fi
# StartPre EOS daemons
if [ "$1" = "eos-start-pre" ]; then
if [[ "$XRD_ROLES" == *"$2"* ]]; then
if [ -e /etc/eos.keytab ]; then
chown daemon /etc/eos.keytab
chmod 400 /etc/eos.keytab
fi
mkdir -p /var/eos/md /var/eos/report /var/eos/auth /var/eos/stage /var/log/eos /var/spool/eos/core/${2} /var/spool/eos/admin
chmod 755 /var/eos /var/eos/report
chmod -R 775 /var/spool/eos
# chown -R daemon /var/spool/eos
# find /var/log/eos -maxdepth 1 -type d -exec chown daemon {} \;
# find /var/eos/ -maxdepth 1 -mindepth 1 -not -path "/var/eos/fs" -not -path "/var/eos/fusex" -type d -exec chown -R daemon {} \;
# chown daemon /var/eos/auth /var/eos/stage
setfacl -m default:u:daemon:r /var/eos/auth/
# Require cmsd for fed daemon
if [ "$2" = "fed" ]; then
systemctl start cmsd@clustered
fi
else
echo "<3>Error: Service $2 not in the XRD_ROLES in \"/etc/sysconf/eos_env\""
exit 1
fi
fi
# Stop EOS daemons
if [ "$1" = "eos-stop" ]; then
if [ "$2" = "fed" ]; then
systemctl stop cmsd@clustered
fi
fi
# Start EOS Master
if [ "$1" = "eos-master" ]; then
if [[ "$XRD_ROLES" == *"mq"* ]]; then
touch /var/eos/eos.mq.master
fi
if [[ "$XRD_ROLES" == *"mgm"* ]]; then
touch /var/eos/eos.mgm.rw
fi
fi
# Start EOS Slave
if [ "$1" = "eos-slave" ]; then
if [[ "$XRD_ROLES" == *"mq"* ]]; then
unlink /var/eos/eos.mq.master
fi
if [[ "$XRD_ROLES" == *"mgm"* ]]; then
unlink /var/eos/eos.mgm.rw
fi
fi
# Start EOS fuse daemons
if [ "$1" = "eosd-start" ]; then
mkdir -p /var/run/eosd/ /var/run/eosd/credentials/store ${EOS_FUSE_MOUNTDIR}
chmod 1777 /var/run/eosd/credentials /var/run/eosd/credentials/store
chmod 755 ${EOS_FUSE_MOUNTDIR}
fi
# Stop EOS fuse daemons
if [ "$1" = "eosd-stop" ]; then
umount -f ${EOS_FUSE_MOUNTDIR}
fi
| true |
02688019a3ebac913ca12c065504bab06cf20e5d | Shell | plus3it/spel | /spel/scripts/vmware.sh | UTF-8 | 509 | 3.421875 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
# Bail if we are not running inside VMWare.
if [[ "$(virt-what | head -1)" != "vmware" ]]; then
exit 0
fi
# Install the VMWare Tools from a linux ISO.
echo "installing vmware tools"
#wget http://192.168.0.185/linux.iso -P /tmp
mkdir -p /mnt/vmware
mount -o loop /home/vagrant/linux.iso /mnt/vmware
cd /tmp || exit 1
tar xzf /mnt/vmware/VMwareTools-*.tar.gz
umount /mnt/vmware
rm -fr /home/vagrant/linux.iso
/tmp/vmware-tools-distrib/vmware-install.pl -d
rm -fr /tmp/vmware-tools-distrib
| true |
1b17807a3d8278db0f723101b86424bf310274db | Shell | jia3857/CDPDCTrial | /centosvmCDP.sh | UTF-8 | 7,012 | 2.78125 | 3 | [
"Apache-2.0"
] | permissive | #! /bin/bash
echo "-- Configure user cloudera with passwordless"
useradd cloudera -d /home/cloudera -p cloudera
sudo usermod -aG wheel cloudera
cp /etc/sudoers /etc/sudoers.bkp
rm -rf /etc/sudoers
sed '/^#includedir.*/a cloudera ALL=(ALL) NOPASSWD: ALL' /etc/sudoers.bkp > /etc/sudoers
echo "-- Configure and optimize the OS"
echo never > /sys/kernel/mm/transparent_hugepage/enabled
echo never > /sys/kernel/mm/transparent_hugepage/defrag
echo "echo never > /sys/kernel/mm/transparent_hugepage/enabled" >> /etc/rc.d/rc.local
echo "echo never > /sys/kernel/mm/transparent_hugepage/defrag" >> /etc/rc.d/rc.local
# add tuned optimization https://www.cloudera.com/documentation/enterprise/6/6.2/topics/cdh_admin_performance.html
echo "vm.swappiness = 1" >> /etc/sysctl.conf
sysctl vm.swappiness=1
timedatectl set-timezone UTC
echo "-- Install Java OpenJDK8 and other tools"
yum install -y java-1.8.0-openjdk-devel vim wget curl git bind-utils rng-tools
yum install -y epel-release
yum install -y python-pip
cp /usr/lib/systemd/system/rngd.service /etc/systemd/system/
systemctl daemon-reload
systemctl start rngd
systemctl enable rngd
echo "-- Installing requirements for Stream Messaging Manager"
yum install -y gcc-c++ make
curl -sL https://rpm.nodesource.com/setup_10.x | sudo -E bash -
yum install nodejs -y
npm install forever -g
echo "server 169.254.169.123 prefer iburst minpoll 4 maxpoll 4" >> /etc/chrony.conf
systemctl restart chronyd
sudo /etc/init.d/network restart
echo "-- Configure networking"
PUBLIC_IP=`curl https://api.ipify.org/`
#hostnamectl set-hostname `hostname -f`
sed -i$(date +%s).bak '/^[^#]*cloudera/s/^/# /' /etc/hosts
sed -i$(date +%s).bak '/^[^#]*::1/s/^/# /' /etc/hosts
echo "`host cloudera |grep address | awk '{print $4}'` `hostname` `hostname`" >> /etc/hosts
#sed -i "s/HOSTNAME=.*/HOSTNAME=`hostname`/" /etc/sysconfig/network
systemctl disable firewalld
systemctl stop firewalld
service firewalld stop
setenforce 0
sed -i 's/SELINUX=.*/SELINUX=disabled/' /etc/selinux/config
echo "Disabling IPv6"
echo "net.ipv6.conf.all.disable_ipv6 = 1
net.ipv6.conf.default.disable_ipv6 = 1
net.ipv6.conf.lo.disable_ipv6 = 1
net.ipv6.conf.eth0.disable_ipv6 = 1" >> /etc/sysctl.conf
sysctl -p
echo "-- Install CM and MariaDB"
# CM 7
cd /
wget https://archive.cloudera.com/cm7/7.1.4/redhat7/yum/cloudera-manager-trial.repo -P /etc/yum.repos.d/
# MariaDB 10.1
cat - >/etc/yum.repos.d/MariaDB.repo <<EOF
[mariadb]
name = MariaDB
baseurl = http://yum.mariadb.org/10.1/centos7-amd64
gpgkey=https://yum.mariadb.org/RPM-GPG-KEY-MariaDB
gpgcheck=1
EOF
yum clean all
rm -rf /var/cache/yum/
yum repolist
## CM
yum install -y cloudera-manager-agent cloudera-manager-daemons cloudera-manager-server
sed -i$(date +%s).bak '/^[^#]*server_host/s/^/# /' /etc/cloudera-scm-agent/config.ini
sed -i$(date +%s).bak '/^[^#]*listening_ip/s/^/# /' /etc/cloudera-scm-agent/config.ini
sed -i$(date +%s).bak "/^# server_host.*/i server_host=$(hostname)" /etc/cloudera-scm-agent/config.ini
sed -i$(date +%s).bak "/^# listening_ip=.*/i listening_ip=$(host cloudera |grep address | awk '{print $4}')" /etc/cloudera-scm-agent/config.ini
service cloudera-scm-agent restart
## MariaDB
yum install -y MariaDB-server MariaDB-client
cat conf/mariadb.config > /etc/my.cnf
echo "--Enable and start MariaDB"
systemctl enable mariadb
systemctl start mariadb
echo "-- Install JDBC connector"
wget https://dev.mysql.com/get/Downloads/Connector-J/mysql-connector-java-5.1.46.tar.gz -P ~
tar zxf ~/mysql-connector-java-5.1.46.tar.gz -C ~
mkdir -p /usr/share/java/
cp ~/mysql-connector-java-5.1.46/mysql-connector-java-5.1.46-bin.jar /usr/share/java/mysql-connector-java.jar
rm -rf ~/mysql-connector-java-5.1.46*
echo "-- Create DBs required by CM"
cd /root/CDPDCTrial
mysql -u root < scripts/create_db.sql
echo "-- Secure MariaDB"
mysql -u root < scripts/secure_mariadb.sql
echo "-- Prepare CM database 'scm'"
/opt/cloudera/cm/schema/scm_prepare_database.sh mysql scm scm cloudera
## PostgreSQL
#yum install -y postgresql-server python-pip
#pip install psycopg2==2.7.5 --ignore-installed
#echo 'LC_ALL="en_US.UTF-8"' >> /etc/locale.conf
#sudo su -l postgres -c "postgresql-setup initdb"
#cat conf/pg_hba.conf > /var/lib/pgsql/data/pg_hba.conf
#cat conf/postgresql.conf > /var/lib/pgsql/data/postgresql.conf
#echo "--Enable and start pgsql"
#systemctl enable postgresql
#systemctl restart postgresql
## PostgreSQL see: https://www.postgresql.org/download/linux/redhat/
yum install -y https://download.postgresql.org/pub/repos/yum/reporpms/EL-7-x86_64/pgdg-redhat-repo-latest.noarch.rpm
yum install -y postgresql96
yum install -y postgresql96-server
pip install psycopg2==2.7.5 --ignore-installed
echo 'LC_ALL="en_US.UTF-8"' >> /etc/locale.conf
/usr/pgsql-9.6/bin/postgresql96-setup initdb
cat /root/CDPDCTrial/conf/pg_hba.conf > /var/lib/pgsql/9.6/data/pg_hba.conf
cat /root/CDPDCTrial/conf/postgresql.conf > /var/lib/pgsql/9.6/data/postgresql.conf
echo "--Enable and start pgsql"
systemctl enable postgresql-9.6
systemctl start postgresql-9.6
echo "-- Create DBs required by CM"
sudo -u postgres psql <<EOF
CREATE DATABASE ranger;
CREATE USER ranger WITH PASSWORD 'cloudera';
GRANT ALL PRIVILEGES ON DATABASE ranger TO ranger;
CREATE DATABASE das;
CREATE USER das WITH PASSWORD 'cloudera';
GRANT ALL PRIVILEGES ON DATABASE das TO das;
EOF
echo "-- Install CSDs"
# install local CSDs
mv ~/*.jar /opt/cloudera/csd/
mv /home/centos/*.jar /opt/cloudera/csd/
chown cloudera-scm:cloudera-scm /opt/cloudera/csd/*
chmod 644 /opt/cloudera/csd/*
echo "-- Install local parcels"
mv ~/*.parcel ~/*.parcel.sha /opt/cloudera/parcel-repo/
mv /home/centos/*.parcel /home/centos/*.parcel.sha /opt/cloudera/parcel-repo/
chown cloudera-scm:cloudera-scm /opt/cloudera/parcel-repo/*
echo "-- Enable passwordless root login via rsa key"
ssh-keygen -f ~/myRSAkey -t rsa -N ""
mkdir ~/.ssh
cat ~/myRSAkey.pub >> ~/.ssh/authorized_keys
chmod 400 ~/.ssh/authorized_keys
ssh-keyscan -H `hostname` >> ~/.ssh/known_hosts
sed -i 's/.*PermitRootLogin.*/PermitRootLogin without-password/' /etc/ssh/sshd_config
systemctl restart sshd
echo "-- Start CM, it takes about 2 minutes to be ready"
systemctl start cloudera-scm-server
while [ `curl -s -X GET -u "admin:admin" http://localhost:7180/api/version` -z ] ;
do
echo "waiting 10s for CM to come up..";
sleep 10;
done
echo "-- Now CM is started and the next step is to automate using the CM API"
pip install --upgrade pip cm_client
sed -i "s/YourHostname/`hostname -f`/g" ~/CDPDCTrial/scripts/create_cluster.py
sed -i "s/YourHostname/`hostname -f`/g" ~/CDPDCTrial/scripts/create_cluster.py
python ~/CDPDCTrial/scripts/create_cluster.py ~/CDPDCTrial/conf/cdpsandbox.json
sudo usermod cloudera -G hadoop
sudo -u hdfs hdfs dfs -mkdir /user/cloudera
sudo -u hdfs hdfs dfs -chown cloudera:hadoop /user/cloudera
sudo -u hdfs hdfs dfs -mkdir /user/admin
sudo -u hdfs hdfs dfs -chown admin:hadoop /user/admin
sudo -u hdfs hdfs dfs -chmod -R 0755 /tmp
| true |
ca83a260ff942c3ea1c2bbf8530da29dda61cd57 | Shell | wahlstedtw/noctl-airship-poc | /packages/test_encryptionsolution/function_independent/run.sh | UTF-8 | 1,116 | 2.71875 | 3 | [] | no_license | #!/bin/sh
if [ ! -f ./kustomize ]; then
curl -fsSL https://github.com/kubernetes-sigs/kustomize/releases/download/kustomize%2Fv3.8.1/kustomize_v3.8.1_linux_amd64.tar.gz -o x.tar.gz && tar -zxvf x.tar.gz && rm x.tar.gz
fi
rm sops_functional_tests_key.asc
wget https://raw.githubusercontent.com/mozilla/sops/master/pgp/sops_functional_tests_key.asc
echo build 1
KUSTOMIZE_PLUGIN_HOME=$(pwd) SOPS_IMPORT_PGP=$(cat sops_functional_tests_key.asc) ./kustomize build --enable_alpha_plugins site/site1 > output1.yaml
echo regenerating secrets
# this should be substituted with https://review.opendev.org/c/airship/airshipctl/+/765593 with kustomizeSinkOutputDir
KUSTOMIZE_PLUGIN_HOME=$(pwd) SOPS_IMPORT_PGP=$(cat sops_functional_tests_key.asc) SOPS_PGP_FP='FBC7B9E2A4F9289AC0C1D4843D16CEE4A27381B4' ./kustomize build --enable_alpha_plugins type/type1/secrets_regenerator/ | ./kustomize fn sink site/site1/secrets/generated/
echo build 2
KUSTOMIZE_PLUGIN_HOME=$(pwd) SOPS_IMPORT_PGP=$(cat sops_functional_tests_key.asc) ./kustomize build --enable_alpha_plugins site/site1 > output2.yaml
diff output1.yaml output2.yaml
| true |
ee5f907fca7e29145c270b71e92c709fa8eb29c0 | Shell | SteveSatterfield/HEVf | /idea/src/vtkUtilities/scripts/hev-vtkGaussianSmooth.test.sh | UTF-8 | 1,026 | 3.03125 | 3 | [] | no_license | # ! /bin/sh
# Test cases for hev-vtkGaussianSmooth
INDIR=/usr/local/HEV/idea/src/vtkUtilities/data
OUTDIR=testresults
#remove all files currently present in /testresults/ folder
rm -f $OUTDIR/*
echo 'Removed all files in testresults/ folder.'
cp $INDIR/structPoints.2.noise.vtk $OUTDIR/sP2n.vtk
echo 'Copied structPoints.2.noise.vtk into testresults/sP2n.vtk.'
# test default settings
./hev-vtkGaussianSmooth $OUTDIR/sP2n.vtk $OUTDIR/sP2n.smooth.vtk
echo 'Created sP2n.smooth.vtk.'
# Run original and smoothed version through hev-vtkContour.
# Contour of smoothed version should look smoother than original.
../vtkContourFilter/hev-vtkContour 0.2 $OUTDIR/sP2n.vtk $OUTDIR/sP2n.CF.vtk
echo 'Created sP2n.CF.vtk.'
../vtkContourFilter/hev-vtkContour 0.2 $OUTDIR/sP2n.smooth.vtk $OUTDIR/sP2n.smooth.CF.vtk
echo 'Created sP2n.smooth.CF.vtk.'
# dev 0.8 and rad 0.3:
./hev-vtkGaussianSmooth --stddev 0.8 0.8 0.8 --rfactor 0.3 0.3 0.3 $OUTDIR/sP2n.vtk $OUTDIR/sP2n.dev0.8.rad0.3.vtk
echo 'Created sP2n.dev0.8.rad0.3.vtk.'
exit
| true |
cf19fd880a26f5568e2ddde61439b5203d63cfc6 | Shell | chris-minsik-son/Git-Subset-Implementation | /test05.sh | UTF-8 | 426 | 3.296875 | 3 | [] | no_license | #!/bin/sh
# TEST05: Remove file that does not exist in working directory but only in index
# Here, the file in the index should be deleted
# Note, you should remove any non-girt files in the working directory before running this test
if test ! -d ".girt"
then
./girt-init
else
rm -r ".girt"
./girt-init
fi
# Initialized empty girt repository in .girt
touch a b
./girt-add a b
rm a
./girt-rm a
ls .girt/index
# b | true |
4feb9e2b7b7732f53c9906bc97fc7908f0354c4b | Shell | kube-object-storage/lib-bucket-provisioner | /hack/go.sh | UTF-8 | 4,090 | 3.4375 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
REPO_ROOT="$(readlink -f $(dirname ${BASH_SOURCE})/../)"
readonly LOCAL_IMPORT="sigs.k8s.io/controller-runtime,github.com/kube-object-storage/lib-bucket-provisioner/"
readonly PKGS="./pkg/..."
# Because generated code exists under $REPO_ROOT/pkg/, it's necessary to filter it out
# This function get all sub packages under $REPO_ROOT/pkg/... except
valid_sub_packages() {
# Exclude packages which should not be edited (pkg/apis/* && pkg/client/.*)
local filteredPkgs="$(awk '!/pkg\/client/' <(go list -f '{{.Dir}}' ./...))"
echo "$filteredPkgs"
}
readonly SUB_PACKAGES=$(valid_sub_packages)
# TODO (copejon) go tools should be staticly defined to a commit hash to enforce parity between dev and CI environment
imports(){
echo "-------- formatting"
(
cd "${REPO_ROOT}"
# Call goimport for each sub package
for sp in ${SUB_PACKAGES}; do
echo "goimports -w -local $LOCAL_IMPORT for packages under $sp"
goimports -w -local "$LOCAL_IMPORT" "$sp"
done
)
}
imports-check(){
echo "-------- checking format"
(
cd "${REPO_ROOT}"
# Call goimport for each sub package
for sp in ${SUB_PACKAGES}; do
goimports -d -e -local "$LOCAL_IMPORT" "$sp"
done
)
}
vet(){
echo "-------- vetting"
(
cd "${REPO_ROOT}"
for sp in ${SUB_PACKAGES}; do
go vet "${sp}"
done
)
}
build(){
echo "-------- compiling"
(
cd "${REPO_ROOT}"
for p in ${PKGS}; do
echo "go build'ing package $p"
go build -a "${p}"
done
)
}
test(){
echo "-------- testing"
(
cd "${REPO_ROOT}"
for p in "${PKGS}"; do
go test -v "${p}"
done
)
}
lint(){
(
cd "${REPO_ROOT}"
for p in "${PKGS}"; do
golangci-lint run "${p}"
done
)
}
linters(){
golangci-lint linters
}
ci-checks(){
echo "-------- beginning preflight checks"
lint
test
build
}
help(){
local msg=\
'
This script accepts the following args:
help print this text
vet run go vet on core project code
imports run goimports with defined import priorities on core project code
(goimports also runs gofmt)
imports-check run goimports but only report errors and diffs
build run go build on core project code
test run unit tests
lint run golangci-lint default linters
linters show enabled and disabled golangci-linters
ci-checks run golangci-lint, test, and build (executed in CI)
For example, to vet and gofmt/imports, run:
$ ./go.sh vet imports
'
printf "%s\n" "${msg}"
}
verify-tool(){
which golangci-lint &> /dev/null || (echo \
'WARNING! golangci-lint not found in PATH.
If you have not installed golangci-lint, you can do so with ANY of the following commands, replacing vX.Y.Z with the release version.
It is recommended you use v1.16.0 for parity with CI.
# binary will be $(go env GOPATH)/bin/golangci-lint
curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(go env GOPATH)/bin vX.Y.Z
# or install it into ./bin/
curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s vX.Y.Z
# In alpine linux (as it does not come with curl by default)
wget -O - -q https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s vX.Y.Z
Releases can be found at https://github.com/golangci/golangci-lint/releases
' && exit 1)
}
main(){
verify-tool
[[ ${#@} -eq 0 ]] && (help; exit 0)
while [[ ${#@} -gt 0 ]]; do
case "$1" in
"vet")
vet
shift 1
;;
"build")
build
shift 1
;;
"imports")
imports
shift 1
;;
"imports-check")
imports-check
shift 1
;;
"test")
test
shift 1
;;
"lint")
lint
shift 1
;;
"ci-checks")
ci-checks
exit 0
;;
"linters")
linters
exit 1
;;
"help"|'h')
help
exit
;;
*)
echo "unrecongnized args: $1"
exit 1
;;
esac
done
}
main ${@}
| true |
8d3f1547103f546beed77c2cf3ded3e585ad85ab | Shell | blu-base/fer_steamreforming_supplements | /cfd-furnace/utils/extractValuesFromLog.sh | UTF-8 | 1,086 | 4.15625 | 4 | [
"CC-BY-4.0"
] | permissive | #!/bin/bash
DELIMITER='\t'
LOGFILE=$1
EXTRACTFILE="${LOGFILE%*.log}_extracted.csv"
HEADER=$(grep '[[:space:]]Iteration' $LOGFILE | tail -1 |tr -s " ")
# Strip leading whitespace
HEADER="${HEADER# }"
# Strip units
HEADER="$(echo "$HEADER" | sed 's/([a-zA-Z\/]*)//g' | tr -s " ")"
LOGVALUES=$(grep '[[:space:]]10000' $LOGFILE | tr -s " ")
if [ -z "$LOGVALUES" ]; then
echo "Iteration 10000 not found"
LOGVALUES=$(tail -n 160 $LOGFILE | head -n1 | tr -s " ")
if [ -z "$LOGVALUES" ]; then
echo "Log file not as expected. Looking for last Log header"
LOGVALUES=$(grep -A 1 '[[:space:]]Iteration' $LOGFILE | tail -1 | tr -s " ")
echo "Using iteration $(echo "$LOGVALUES" | cut -d -f2)"
fi
else
echo "Simulation ran till iteration 10000."
fi
LOGVALUES="${LOGVALUES# }"
if [ -z "$LOGVALUES" ]; then
echo "Failed to recover log's values."
else
echo "Writing log's values to ${EXTRACTFILE}"
## Writing data to file, inserting delimiter.
echo "$HEADER" | tr ' ' $DELIMITER > $EXTRACTFILE
echo "$LOGVALUES" | tr ' ' $DELIMITER >> $EXTRACTFILE
echo "Done."
fi
| true |
337a934d9cc765a49c35e1a8c5d32f873e93860e | Shell | teja14312/dockerfiles | /magento1/usr/local/share/magento1/magento_functions.sh | UTF-8 | 4,787 | 3.40625 | 3 | [
"MIT"
] | permissive | #!/bin/bash
function do_magento_n98_download() {
if [ ! -f bin/n98-magerun.phar ]; then
as_code_owner "mkdir -p bin"
as_code_owner "curl -o bin/n98-magerun.phar https://files.magerun.net/n98-magerun.phar"
fi
}
function do_magento_create_directories() {
mkdir -p "${MAGE_ROOT}/media" "${MAGE_ROOT}/sitemaps" "${MAGE_ROOT}/staging" "${MAGE_ROOT}/var"
}
function do_magento_directory_permissions() {
if [ "$IS_CHOWN_FORBIDDEN" != 'true' ]; then
[ ! -e "${MAGE_ROOT}/app/etc/local.xml" ] || chown -R "${CODE_OWNER}:${APP_GROUP}" "${MAGE_ROOT}/app/etc/local.xml"
chown -R "${APP_USER}:${CODE_GROUP}" "${MAGE_ROOT}/media" "${MAGE_ROOT}/sitemaps" "${MAGE_ROOT}/staging" "${MAGE_ROOT}/var"
chmod -R ug+rw,o-w "${MAGE_ROOT}/media" "${MAGE_ROOT}/sitemaps" "${MAGE_ROOT}/staging" "${MAGE_ROOT}/var"
chmod -R a+r "${MAGE_ROOT}/media" "${MAGE_ROOT}/sitemaps" "${MAGE_ROOT}/staging"
else
[ ! -e "${MAGE_ROOT}/app/etc/local.xml" ] || chmod a+r "${MAGE_ROOT}/app/etc/local.xml"
chmod -R a+rw "${MAGE_ROOT}/media" "${MAGE_ROOT}/sitemaps" "${MAGE_ROOT}/staging" "${MAGE_ROOT}/var"
fi
}
function do_magento_frontend_build() {
if [ -d "$FRONTEND_INSTALL_DIRECTORY" ]; then
mkdir -p pub/static/frontend/
if [ -d "pub/static/frontend/" ] && [ "$IS_CHOWN_FORBIDDEN" != 'true' ]; then
chown -R "${CODE_OWNER}:${CODE_GROUP}" pub/static/frontend/
fi
if [ ! -d "$FRONTEND_INSTALL_DIRECTORY/node_modules" ]; then
as_code_owner "npm install" "$FRONTEND_INSTALL_DIRECTORY"
fi
if [ -z "$GULP_BUILD_THEME_NAME" ]; then
as_code_owner "gulp $FRONTEND_BUILD_ACTION" "$FRONTEND_BUILD_DIRECTORY"
else
as_code_owner "gulp $FRONTEND_BUILD_ACTION --theme='$GULP_BUILD_THEME_NAME'" "$FRONTEND_BUILD_DIRECTORY"
fi
if [ -d "pub/static/frontend/" ] && [ "$IS_CHOWN_FORBIDDEN" != 'true' ]; then
chown -R "${APP_USER}:${APP_GROUP}" pub/static/frontend/
fi
fi
}
function do_replace_core_config_values() (
set +x
local SQL
SQL="DELETE from core_config_data WHERE path LIKE 'web/%base_url';
DELETE from core_config_data WHERE path LIKE 'system/full_page_cache/varnish%';
INSERT INTO core_config_data VALUES (NULL, 'default', '0', 'web/unsecure/base_url', '$PUBLIC_ADDRESS_UNSECURE');
INSERT INTO core_config_data VALUES (NULL, 'default', '0', 'web/secure/base_url', '$PUBLIC_ADDRESS_SECURE');
INSERT INTO core_config_data VALUES (NULL, 'default', '0', 'system/full_page_cache/varnish/access_list', 'varnish');
INSERT INTO core_config_data VALUES (NULL, 'default', '0', 'system/full_page_cache/varnish/backend_host', 'varnish');
INSERT INTO core_config_data VALUES (NULL, 'default', '0', 'system/full_page_cache/varnish/backend_port', '80');
$ADDITIONAL_SETUP_SQL"
echo "Running the following SQL on $DATABASE_HOST.$DATABASE_NAME:"
echo "$SQL"
echo "$SQL" | mysql -h"$DATABASE_HOST" -u"$DATABASE_USER" -p"$DATABASE_PASSWORD" "$DATABASE_NAME"
)
function do_magento_config_cache_enable() {
as_app_user "php /app/bin/n98-magerun.phar cache:enable config" "${MAGE_ROOT}"
}
function do_magento_config_cache_clean() {
as_app_user "php /app/bin/n98-magerun.phar cache:clean config" "${MAGE_ROOT}"
}
function do_magento_system_setup() {
as_app_user "php /app/bin/n98-magerun.phar sys:setup:incremental -n" "${MAGE_ROOT}"
}
function do_magento_reindex() {
(as_app_user "php /app/bin/n98-magerun.phar index:reindex:all" "${MAGE_ROOT}" || echo "Failing indexing to the end, ignoring.") && echo "Indexing successful"
}
function do_magento_cache_flush() {
# Flush magento cache
as_app_user "php bin/n98-magerun.phar cache:flush"
}
function do_magento_create_admin_user() (
if [ "$MAGENTO_CREATE_ADMIN_USER" != 'true' ]; then
return 0
fi
# Create magento admin user
set +e
as_app_user "php /app/bin/n98-magerun.phar admin:user:list | grep -q '$MAGENTO_ADMIN_USERNAME'" "${MAGE_ROOT}"
local HAS_ADMIN_USER=$?
set -e
if [ "$HAS_ADMIN_USER" != 0 ]; then
set +x
echo "Creating admin user '$MAGENTO_ADMIN_USERNAME'"
SENSITIVE="true" as_app_user "php /app/bin/n98-magerun.phar admin:user:create '$MAGENTO_ADMIN_USERNAME' '$MAGENTO_ADMIN_EMAIL' '$MAGENTO_ADMIN_PASSWORD' '$MAGENTO_ADMIN_FORENAME' '$MAGENTO_ADMIN_SURNAME' Administrators" "${MAGE_ROOT}"
fi
)
function do_magento_templating() {
:
}
function do_magento_build() {
do_magento_n98_download
do_magento_create_directories
do_magento_directory_permissions
do_magento_frontend_build
}
function do_magento_development_build() {
do_magento_setup
}
function do_magento_setup() {
do_replace_core_config_values
do_magento_config_cache_enable
do_magento_config_cache_clean
do_magento_system_setup
do_magento_create_admin_user
do_magento_reindex
do_magento_cache_flush
}
| true |
9303310f754ab755b69761b5e36074a53b072af8 | Shell | manctl/mansdk | /cmds/cmd.sh | UTF-8 | 429 | 3.609375 | 4 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | #!/bin/sh
# Usage:
# here=`cd "\`dirname \"$0\"\`";pwd` ; source "$here/cmd.sh" ; cd "$here/.."
function run ()
{
"$@"
}
function ran ()
{
echo "$@"
}
if test -z "$WINDIR"; then
case `uname` in
Darwin*|darwin*)
SED_I="-i .bak"
;;
Linux*|linux*)
SED_I="-i"
;;
esac
else
# Windows
SED_I="-i"
fi
function sed_i ()
{
sed $SED_I "$@"
}
| true |
5fc0bfc5203e11d08f491d5bb1d5cfcf067647de | Shell | gaoyingie/mediasoup-client-android | /mediasoup-client/deps/libmediasoupclient/scripts/test.sh | UTF-8 | 663 | 3.765625 | 4 | [
"MIT",
"ISC"
] | permissive | #!/usr/bin/env bash
set -e
PROJECT_PWD=${PWD}
TEST_BINARY=""
current_dir_name=${PROJECT_PWD##*/}
if [ "${current_dir_name}" != "libmediasoupclient" ] ; then
echo ">>> [ERROR] $(basename $0) must be called from libmediasoupclient/ root directory" >&2
exit 1
fi
# Load common script.
. scripts/common.sh
if [ "$1" == "build" ]; then
# Rebuild.
rm -rf build/
cmake . -Bbuild
fi
# Compile.
cmake --build build
if [ "${OS}" = "Darwin" ]; then
TEST_BINARY=./build/test/test_mediasoupclient.app/Contents/MacOS/test_mediasoupclient
else
TEST_BINARY=./build/test/test_mediasoupclient
fi
echo "runing binary: '${TEST_BINARY}'"
# Run test.
${TEST_BINARY} $@
| true |
6eb0d8d6ef345d8e2a5007ea94eb6e1e7ade58c0 | Shell | cquirosj/kube-alive | /deploy.sh | UTF-8 | 4,152 | 3.796875 | 4 | [
"MIT"
] | permissive | #!/bin/bash
set -e
if [ -z "`kubectl version`" ]; then
echo "kubectl is not installed, aborting"
exit 1
fi
echo "Detecting Kubernetes installation..."
KUBECFG=`kubectl config view --minify=true`
USEINGRESS=false
if echo "${KUBECFG}" | grep "name: minikube" > /dev/zero; then
echo "Kubernetes on Minikube detected!"
else
if echo "${KUBECFG}" | grep -e '^ *server:.*azmk8s\.io:443$' > /dev/zero; then
echo "Kubernetes on AKS detected!"
USEINGRESS=true
else
if echo "${KUBECFG}" | grep "name: gke_" > /dev/zero; then
echo "Kubernetes on GKE detected!"
USEINGRESS=true
else
echo "No specific Kubernetes provider detected, assuming Kubernetes runs on bare metal!"
fi
fi
fi
if [ ${USEINGRESS} = true ]
then
echo "Will use an ingress to enable access to kube-alive."
else
export KUBEALIVE_PUBLICIP=`kubectl config view --minify=true | grep "server: http" | sed 's/ *server: http:\/\///' | sed 's/ *server: https:\/\///' | sed 's/:.*//'`
echo "Will use a Service with external IP ${KUBEALIVE_PUBLICIP} to enable access to kube-alive."
fi
ARCHSUFFIX=
LOCAL=0
if [ $# -eq 1 ] && [ $1 = "local" ]; then
LOCAL=1
if uname -a | grep arm64 > /dev/null; then
ARCHSUFFIX=_arm64v8
else
if uname -a | grep arm > /dev/null; then
ARCHSUFFIX=_arm32v7
else
ARCHSUFFIX=_amd64
fi
fi
echo "Deploying locally for architecture ${ARCHSUFFIX} from deploy/."
if [ ! -n "${KUBEALIVE_DOCKER_REPO}" ]; then
echo "\$KUBEALIVE_DOCKER_REPO not set, aborting."
exit 1
else
echo "Using docker repo \"${KUBEALIVE_DOCKER_REPO}\"."
fi
else
echo "Deploying from github."
if [ ! -n "${KUBEALIVE_DOCKER_REPO}" ]; then
echo "\$KUBEALIVE_DOCKER_REPO not set, using \"kubealive\" as a default."
export KUBEALIVE_DOCKER_REPO=kubealive
else
echo "Using docker repo \"${KUBEALIVE_DOCKER_REPO}\"."
fi
fi
if [ -z "${KUBEALIVE_BRANCH}" ]; then
BRANCH_SUFFIX=
else
BRANCH_SUFFIX="_${KUBEALIVE_BRANCH}"
fi
for service in `echo "namespace
getip
healthcheck
cpuhog
incver
frontend"`; do
if [ ${LOCAL} -eq 1 ]; then
cat "./deploy/${service}.yml" | sed "s/%%KUBEALIVE_DOCKER_REPO%%/${KUBEALIVE_DOCKER_REPO}/" | sed "s/%%ARCHSUFFIX%%/${ARCHSUFFIX}/" | sed "s/%%BRANCH_SUFFIX%%/${BRANCH_SUFFIX}/" | kubectl apply -f -
else
curl -sSL "https://raw.githubusercontent.com/daniel-kun/kube-alive/master/deploy/${service}.yml" | sed "s/%%KUBEALIVE_DOCKER_REPO%%/${KUBEALIVE_DOCKER_REPO}/" | sed "s/%%ARCHSUFFIX%%/${ARCHSUFFIX}/" | sed "s/%%BRANCH_SUFFIX%%/${BRANCH_SUFFIX}/" | kubectl apply -f -
fi
done
if [ ${USEINGRESS} = true ]
then
if [ ${LOCAL} -eq 1 ]; then
kubectl apply -f ./deploy/ingress.yml
else
kubectl apply -f "https://raw.githubusercontent.com/daniel-kun/kube-alive/master/deploy/ingress.yml"
fi
echo "
FINISHED!
If you have an ingress controller installed, you should be able to access kube-alive through the ingresses external IP soon.
THIS CAN TAKE UP TO 10 MINUTES to work properly and requests may result in 500s or 404s in the meantime.
If you don't have an ingress controller installed, yet, you should install one now.
Either using helm:
helm install stable/nginx-ingress
or using the official nginx-ingress docs on
https://github.com/kubernetes/ingress-nginx/blob/master/deploy/README.md
"
else
if [ ${LOCAL} -eq 1 ]; then
cat ./deploy/external-ip.yml | sed "s/%%KUBEALIVE_PUBLICIP%%/${KUBEALIVE_PUBLICIP}/" | kubectl apply -f -
else
curl -sSL "https://raw.githubusercontent.com/daniel-kun/kube-alive/master/deploy/external-ip.yml" | sed "s/%%KUBEALIVE_PUBLICIP%%/${KUBEALIVE_PUBLICIP}/" | kubectl apply -f -
fi
echo "
FINISHED!
You should now be able to access kube-alive at
http://${KUBEALIVE_PUBLICIP}/
"
fi
echo "Also, you can look at all those neat Kubernetes resources that havee been created via
kubectl get all -n kube-alive
"
| true |
5e92df864988dadb4563396fcf54419518a42bb4 | Shell | silvansky/dbssh | /dbsshd.sh | UTF-8 | 1,592 | 3.84375 | 4 | [] | no_license | #!/bin/sh
# WARNING! This method is UNSAFE, I strongly recommend not to use it!
VER="0.1"
DIR=/Users/Valentine/Dropbox/shell_in
TMP_DIR=/Users/Valentine/.tmp_dbssh
DONE_DIR=/Users/Valentine/Dropbox/shell_done
OUT_DIR=/Users/Valentine/Dropbox/shell_out
FILE_MASK=*.sh
OWNER="Valentine Silvansky"
MAIL_LOGIN="v.silvansky"
MAIL_DOMAIN="gmail.com"
MAIL="${MAIL_LOGIN}@${MAIL_DOMAIN}"
TIMEOUT=5
SENDMAIL=false
echo "Starting SSH over Dropbox v$VER daemon with following config:"
echo "*** WARNING! Do not use this script! It is really UNSAFE! ***"
echo " Watch dir: $DIR"
echo " Mask ${FILE_MASK}"
echo " Temporary dir: ${TMP_DIR}"
echo " Output dir: ${OUT_DIR}"
echo " Done dir: ${DONE_DIR}"
echo " Temporary dir: ${TMP_DIR}"
echo " Owner: $OWNER <$MAIL>"
echo " Email notify: $SENDMAIL"
cd $DIR
while true; do
FILES=`find . -name ${FILE_MASK}`
for FILE in $FILES; do
if [ "$FILE" != "" ]; then
echo "Found file $FILE"
DATE=`date +%d-%m-%y`
TIME=`date +%H-%M`
TMP_FILE_NAME=${FILE}_${DATE}_${TIME}.sh
TMP_OUTPUT_NAME=${FILE}_${DATE}_${TIME}_out.txt
mv $FILE ${TMP_DIR}/${TMP_FILE_NAME}
echo "Temporary file: ${TMP_FILE_NAME}"
echo "Temporary output file: ${TMP_OUTPUT_NAME}"
chmod +x ${TMP_DIR}/${TMP_FILE_NAME}
${TMP_DIR}/${TMP_FILE_NAME} > ${TMP_DIR}/${TMP_OUTPUT_NAME} 2>&1
mv ${TMP_DIR}/${TMP_FILE_NAME} ${DONE_DIR}/
mv ${TMP_DIR}/${TMP_OUTPUT_NAME} ${OUT_DIR}/
if $SENDMAIL; then
cat ${OUT_DIR}/${TMP_OUTPUT_NAME} | mail -s "[dbssh] File $FILE launched at $DATE $TIME" $MAIL
fi
fi
done
sleep $TIMEOUT
done | true |
ab487ce585edb4f2b080d27831308ba581972555 | Shell | dotzero/dotfiles | /bin/crlf | UTF-8 | 472 | 4.25 | 4 | [] | no_license | #!/usr/bin/env bash
# Find files with Windows line endings (and convert them to Unix in force mode)
#
# Usage:
# crlf [file]
function _crlf_file() {
grep -q $'\x0D' "$1" && echo "$1" && dos2unix "$1"
}
if [ "$1" == "" ] || [ "$1" == "." ] || [ "$1" == "*" ]; then
# All files
for file in $(find . -type f -not -path "*/.git/*" | xargs file | grep ASCII | cut -d: -f1); do
_crlf_file $file
done
elif [ "$1" != "" ]; then
# Single file
_crlf_file $1
fi
| true |
4533743777717411b9644fcc6141c9946ccdedba | Shell | 0x0916/lfs | /02-build-lfs/18-ncurses.sh | UTF-8 | 1,340 | 3.109375 | 3 | [] | no_license | #!/bin/sh
set -e
NAME=ncurses-6.1
TAR=tar.gz
SOURCE=/sources
cd $SOURCE
rm -fr $SOURCE/$NAME
tar -xf $NAME.$TAR
cd $SOURCE/$NAME
#From LFS
sed -i '/LIBTOOL_INSTALL/d' c++/Makefile.in
./configure --prefix=/usr \
--mandir=/usr/share/man \
--with-shared \
--without-debug \
--without-normal \
--enable-pc-files \
--enable-widec
make -j100
make install
mv -v /usr/lib/libncursesw.so.6* /lib
ln -sfv ../../lib/$(readlink /usr/lib/libncursesw.so) /usr/lib/libncursesw.so
for lib in ncurses form panel menu ; do
rm -vf /usr/lib/lib${lib}.so
echo "INPUT(-l${lib}w)" > /usr/lib/lib${lib}.so
ln -sfv ${lib}w.pc /usr/lib/pkgconfig/${lib}.pc
done
rm -vf /usr/lib/libcursesw.so
echo "INPUT(-lncursesw)" > /usr/lib/libcursesw.so
ln -sfv libncurses.so /usr/lib/libcurses.so
mkdir -v /usr/share/doc/ncurses-6.1
cp -v -R doc/* /usr/share/doc/ncurses-6.1
make distclean
./configure --prefix=/usr \
--with-shared \
--without-normal \
--without-debug \
--without-cxx-binding \
--with-abi-version=5
make sources libs
cp -av lib/lib*.so.5* /usr/lib
#From LFS end
cd $SOURCE
rm -fr $SOURCE/$NAME
| true |
eefc734cc488c9037b276693fc1a7062b7f5f2bf | Shell | EtiennePerot/dotfiles | /backup/backup.sh | UTF-8 | 1,283 | 3.609375 | 4 | [] | no_license | #!/usr/bin/env bash
actualFile="`readlink -f "$0"`"
cd "`dirname "$actualFile"`"
if [[ $EUID -ne 0 ]]; then
exec sudo "$actualFile" "$@"
fi
if [ ! -e conf.sh ]; then
echo "Missing config file at `pwd`/conf.sh."
exit 1
fi
source conf.sh
for target in "${targets[@]}"; do
echo "Backing up to $target..."
includes=()
for include in "${backupFiles[@]}"; do
includes=("${includes[@]}" --include "$include")
done
excludes=()
for exclude in "${excludedFiles[@]}"; do
excludes=("${excludes[@]}" --exclude "$exclude")
done
gpgArgs=''
for extraGpgArg in "${extraGpgArgs[@]}"; do
gpgArgs="$gpgArgs $extraGpgArg"
done
export PASSPHRASE="$encryptionKeyPassphrase"
export SIGN_PASSPHRASE="$signingKeyPassphrase"
duplicity incremental \
--name "$backupName" \
--archive-dir "$archiveDirectory" \
--encrypt-key "$encryptionKey" \
--sign-key "$signingKey" \
--full-if-older-than "$fullEvery" \
--volsize "$volumeSize" \
--exclude "$archiveDirectory" \
"${excludes[@]}" \
"${includes[@]}" \
--exclude '**' \
--gpg-options="$gpgArgs" \
"${extraArgs[@]}" \
/ "$target"
unset SIGN_PASSPHRASE
unset PASSPHRASE
done | true |
46d5d98f2ea46d1e710f028192e3d63f5b3d30e8 | Shell | peterdeli/java | /utils/ppp/src/ITppptool_phonebook_bin_1.6-2/opt/ITppptool/xtra/ppptool | UTF-8 | 105,070 | 3.140625 | 3 | [] | no_license | #!/bin/sh
#comment
#\
PATH=/usr/bin:/bin:/usr/sbin:/sbin:/usr/local/bin:/opt/local/bin:/opt/sfw/bin:/usr/ucb:/opt/local/exe:/opt/local/pkgs/tk:/opt/ITTOtcltk/bin:/usr/openwin/bin:/usr/X11R6/bin
#\
export PATH
#\
umask 022
#\
if [ -z "$DISPLAY" ]; then
#\
echo "`date`: ERROR: DISPLAY environment not set"
#\
exit 1
#\
fi
#\
OS=`uname -s`
#\
umask 022
#\
if [ -z "$USER" ] ;then
#\
echo "USER -> $USER"
#\
echo "no"
#\
exit
#\
if [ $OS = "SunOS" ]; then
#\
if [ -f "/usr/ucb/whoami" ]; then
#\
USER_ID=`/usr/ucb/whoami`
#\
else
#\
USER_ID=`/bin/who am i | awk '{print $1}'`
#\
fi
#\
elif [ $OS = "Linux" ]; then
#\
USER_ID=`whoami`
#\
fi
#\
export USER_ID
#\
else
#\
USER_ID=$USER
#\
export USER_ID
#\
fi
#\
echo "USER_ID: $USER_ID"
#\
CONNECTION_TYPE="ppp"
#\
export CONNECTION_TYPE
#\
PPP_HOME=$HOME/.ppptool
#\
export PPP_HOME
#\
if [ ! -d $PPP_HOME ]; then
#\
mkdir $PPP_HOME >> /tmp/ppp.err.$$ 2>&1
#\
if [ $? -ne 0 ]; then
#\
echo "`date`: Error creating $PPP_HOME"
#\
exit
#\
fi
#\
else
#\
chmod 755 $PPP_HOME # just in case
#\
fi
# exec link target
#\
if [ -h $0 ]; then
#\
cd `dirname $0`
#\
basename=`basename $0`
#\
link_target=`ls -l $basename | awk '{print $NF}'`
#\
target=`basename $link_target`
#\
cd `dirname $link_target`
#\
echo "pppdir = `pwd`"
#\
PKGHOME=`pwd`
#\
export PKGHOME
#\
exec expectk $target "$@"
#\
else
#\
echo "pppdir = `pwd`"
#\
PKGHOME=`pwd`
#\
export PKGHOME
#\
cd `dirname $0`
#\
target=`basename $0`
#\
exec expectk $target "$@"
#\
fi
proc RandomInit { seed } {
global randomSeed
set randomSeed $seed
}
proc Random {} {
global randomSeed
set randomSeed [expr ($randomSeed*9301 + 49297) % 233280]
return [expr $randomSeed/double(233280)]
}
proc RandomRange { range } {
return [expr int([Random]*$range)]
}
proc build_menus { } {
##############################
# Create GUI
##############################
. configure -bg lightGray
menu .menubar -bg lightGray
# attach it to the main window
. config -menu .menubar
global active_account
global accounts
global connection_state
global bulblist
global bulbs
global on_color
global off_color
global bulb_colors
global acct_width 30
#set bulb_colors { yellow red green blue orange violet pink }
#set bulb_colors { yellow red lightGreen black }
#set bulb_colors { black green red #000000 }
set bulb_colors { black red #000000 }
set on_color [lindex $bulb_colors 0]
set off_color [lindex $bulb_colors 1]
global save_accounts_prompt
set save_accounts_prompt "false"
set connection_state "connecting"
###############################
# Create more cascade menus
##############################
foreach m {File Edit Accounts Help} {
# same as 'set File [ menu .. ]'
set $m [menu .menubar.m$m]
if { $m == "Help" } {
# figure out how to put on the right side
#.menubar add cascade -label " " -menu .menubar.mFill
.menubar add cascade -label $m -menu .menubar.m$m
} else {
.menubar add cascade -label $m -menu .menubar.m$m
}
}
##############################
# Add Menu Items
##############################
#puts "\$File = $File"
$File add command -label "Save Accounts" -command {
puts "Save Settings"
set save_accounts_prompt "true"
if { [save_accounts "save"] == 1 } {
set save_accounts_prompt "false"
}
set save_accounts_prompt "false"
}
$File add command -label "View Log" -command {
view_log
}
$File add command -label Quit -command {
puts "Quit"
puts "close_pppd"
close_pppd
global save_accounts_prompt
# Check if files need saving
set save_accounts_prompt "true"
if { [save_accounts "exit"] == 1 } {
set save_accounts_prompt "false"
exit
}
set save_accounts_prompt "false"
}
$Edit add command -label "Manage Accounts" -command {
puts "Manage Accounts"
manage_accounts
}
$Edit add command -label "Edit Preferences" -command {
puts "Edit Preferences"
edit_prefs
}
$Help add command -label "About PPP Tool" -command { puts "About PPP Tool" }
$Help add command -label "PPP Tool Help" -command { puts "PPP Tool Help" }
frame .mid -bg lightGray
label .acct_label -text "Active Account: No Account loaded" -relief groove -bg lightGray
pack .acct_label -in .mid -fill x
#label .color -text "Color" -bg white
#pack .color -in .mid
pack .mid -fill x
# account window display
#set no_acct_txt "No Account loaded"
set no_acct_txt ""
set acct_width 30
set pad_val 3
set descr_bg lightGreen
# name
frame .namef -bg lightGray
label .namef.name_l -text "Account:" -bg $descr_bg -relief groove
label .namef.name_r -text $no_acct_txt -bg lightBlue -width $acct_width -relief groove
pack .namef.name_r -side right -pady [expr $pad_val * 2]
pack .namef.name_l -side right -pady $pad_val
.namef.name_r configure -anchor w
# uid
frame .uidf -bg lightGray
label .uidf.uid_l -text "User ID:" -bg $descr_bg -relief groove
entry .uidf.uid_r -text $no_acct_txt -bg lightYellow -width $acct_width -relief groove
pack .uidf.uid_r -side right -pady $pad_val
pack .uidf.uid_l -side right -pady $pad_val
#.uidf.uid_r configure -anchor w
# passwd
frame .passwdf -bg lightGray
label .passwdf.passwd_l -text "Password: " -bg $descr_bg -relief groove
entry .passwdf.passwd_r -text $no_acct_txt -bg lightYellow -width $acct_width \
-relief groove -show *
pack .passwdf.passwd_r -side right -pady $pad_val
pack .passwdf.passwd_l -side right -pady $pad_val
#.passwdf.passwd_r configure -anchor w
# number
frame .numberf -bg lightGray
label .numberf.number_l -text "Phone Number:" -bg $descr_bg -relief groove
label .numberf.number_r -text $no_acct_txt -bg lightBlue -width $acct_width -relief groove
pack .numberf.number_r -side right -pady $pad_val
pack .numberf.number_l -side right -pady $pad_val
.numberf.number_r configure -anchor w
# domain
# ns1
# ns2
# authtype
frame .authtypef -bg lightGray
label .authtypef.authtype_l -text "Authentication Type: " -bg $descr_bg -relief groove
label .authtypef.authtype_r -text $no_acct_txt -bg lightBlue -width $acct_width -relief groove
pack .authtypef.authtype_r -side right -pady $pad_val
pack .authtypef.authtype_l -side right -pady [expr $pad_val * 2]
.authtypef.authtype_r configure -anchor w
pack .namef -in .mid -anchor w -fill x
pack .uidf -in .mid -anchor w -fill x
pack .passwdf -in .mid -anchor w -fill x
pack .numberf -in .mid -anchor w -fill x
pack .authtypef -in .mid -anchor w -fill x
frame .connect_frame -bg lightGray
button .connect -text "Connect" -width 20 \
-state normal -command connect
if { [info exists active_account] != 1 } {
.connect configure -state disabled
}
pack .connect -in .connect_frame
pack .connect_frame -pady 10
frame .cframe -bg lightGray
canvas .cframe.can -width 220 -height 20 -bg lightGray
set x1 5
set y1 5
set x2 15
set y2 15
foreach can_oval { 0 1 2 3 4 5 6 7 8 9 10 11 } {
.cframe.can create oval $x1 $y1 $x2 $y2 -fill red -width 2 -tag "bulb[set can_oval]"
incr x1 18
set x2 [expr $x1 + 10]
lappend bulblist "bulb[set can_oval]"
set bulbs(bulb[set can_oval],color) [lindex $bulb_colors 0]
#puts "set bulbs(bulb[set can_oval],color) [lindex $bulb_colors 0]"
#puts "bulbs(bulb[set can_oval],color) == $bulbs(bulb[set can_oval],color)"
}
pack .cframe.can
pack .cframe
frame .modembutton_text
foreach modem_text { HS AA CD OH RD SD TR MR RS CS SYN FAX } {
label .modembutton_text._$modem_text -text $modem_text -bg black -fg white -font {times 6}
pack .modembutton_text._$modem_text -side left
}
pack .modembutton_text
frame .footer -bg lightGray
label .footer.footer_l -text "Status:" -relief groove -bg lightGray
label .footer.footer_r -text "No connection" -relief groove -bg lightGray
pack .footer.footer_l -side left
pack .footer.footer_r -side left -fill x -expand 1
.footer.footer_r configure -anchor w -justify left
pack .footer -anchor s -side bottom -fill x -expand 1 -pady 5
global menubar_widget
set menubar_widget .menubar
global acct_widget
set acct_widget $Accounts
add_accounts
set_active
}
proc save_prefs { } {
global ppp_config_file
global ppp_settings
global ppp_settings_saved
global port_speeds
global port_speed
global modem_port
global flow_control
global g_modem_port
global g_flow_control
set save_needed "false"
#set ppp_config_file "$ppp_dir/ppp_config"
#set ppp_settings(init_string) "atz"
#set ppp_settings(connect_string) ""
#set ppp_settings(port_speed) 38400
#set ppp_settings(flow_control) hardware
#set ppp_settings(modem_port) /dev/term/b
#set ppp_settings(ppp_options) ""
foreach { key value } [array get ppp_settings] {
if { $ppp_settings($key) != $ppp_settings_saved($key) } {
set save_needed "true"
break
}
}
if { $save_needed == "true" } {
set config_w [open $ppp_config_file w]
puts "Saving ppp prefs"
foreach { key value } [array get ppp_settings] {
puts "$key\t$value"
puts $config_w "$key\t$value"
set ppp_settings_saved($key) $value
}
puts "ppp Prefs Saved"
close $config_w
} else {
puts "No Save needed for PPP Prefs"
}
}
proc save_accounts {prompt_type} {
puts "proc save_accounts"
global save_accounts_prompt
global env
global accounts
global saved_accounts
global ppp_settings
global ppp_dir
#set ppp_dir "$env(HOME)/.ppptool"
global ppp_config_file
#set ppp_config_file "$ppp_dir/ppp_settings"
# list
global active_account
global account_list
global saved_account_list
global account_keys
global account_file
set save_needed false
set return_status 1
# compare account_list to saved_account_list
if { [llength $account_list] != [llength $saved_account_list] } {
# something changed
# write out accounts array
puts "llength account_list != llength saved_account_list"
set save_needed true
} else {
# number of accounts equal, but may have different names
puts "account_list == saved_account_list"
foreach account $account_list {
if { [lsearch $saved_account_list $account] == -1 } {
puts "lsearch saved_account_list $account == -1"
set save_needed true
break
} else {
puts "lsearch saved_account_list $account != -1"
}
}
}
if { $save_needed == "false" } {
foreach { key value } [array get accounts] {
puts "Key=$key, value=$value"
# we should find a match for each
# if not, something was deleted
puts "info exists saved_accounts($key) :?"
puts "saved_accounts($key) == $value :?"
if { [info exists saved_accounts($key)] != 1 } {
puts "save_needed: info exists saved_accounts($key) != 1"
set save_needed true
break
} elseif { $saved_accounts($key) != $value } {
puts "save_needed: saved_accounts($key) != $value"
set save_needed true
break
}
}
foreach { key value } [array get saved_accounts] {
puts "Key=$key, value=$value"
# we should find a match for each
# if not, something was added
puts "info exists accounts($key) :?"
puts "accounts($key) = $value :?"
if { [info exists accounts($key)] != 1 } {
puts "save_needed: info exists accounts($key) != 1"
set save_needed true
break
} elseif { $accounts($key) != $value } {
puts "save_needed: accounts($key) != $value"
puts "accounts($key) != $value"
set save_needed true
break
}
}
}
if { $save_needed == "true" } {
if { $save_accounts_prompt == "true" } {
if { $prompt_type == "exit" } {
set warning_return [tk_messageBox -default yes \
-type yesnocancel -icon warning \
-message "To save your changes:\n\n\
Click YES to save then exit\n\
Click NO to exit without saving\n\
Click CANCEL to return to PPP Tool\n" \
-title "Unsaved Changes"]
} elseif { $prompt_type == "save" } {
set warning_return [tk_messageBox -default ok \
-type okcancel -icon warning \
-message "Would you like to save your changes? \n\n\
OK to save\n\
CANCEL to return to PPP Tool\n" \
-title "Unsaved Changes"]
}
if {"$warning_return" == "yes" || "$warning_return" == "ok"} {
puts "Account list changed, Saving account info .."
set account_w [open $account_file w]
puts "writing to $account_file:"
foreach account_name $account_list {
foreach key [set account_keys] {
if { [info exists accounts($account_name,$key)] } {
puts "$key\t$accounts($account_name,$key)"
puts $account_w "$key\t$accounts($account_name,$key)"
# set save_accounts to current values
set saved_accounts($account_name,$key) $accounts($account_name,$key)
} else {
set accounts($account_name,$key) ""
puts "$key\t$accounts($account_name,$key)"
puts $account_w "$key\t$accounts($account_name,$key)"
# set save_accounts to current values
set saved_accounts($account_name,$key) ""
}
}
}
# set save_account list to current values
set saved_account_list $account_list
puts "Wrote account file $account_file"
close $account_w
# write prefs
save_prefs
set save_needed "false"
set return_status 1
} elseif {"$warning_return" == "no"} {
set return_status 1
} elseif {"$warning_return" == "cancel"} {
set return_status 0
}
}
} else {
puts "No save needed"
set save_needed "false"
}
return $return_status
}
proc manage_accounts { } {
global accounts account_list active_account
# create win
# add r/l frames
# add text & scroll on left
# add buttons on right
# add account names to scroll text
# map buttons to commands
# create account window w/entry widgets,
# label/entry for each acct field
if { [winfo exists .account_manager_win] } {
catch { wm deiconify .account_manager_win }
catch { wm raise .account_manager_win }
return
}
toplevel .account_manager_win
wm title .account_manager_win "Account Manager"
# Two frames, one for a scrolling list of accounts, the other for the buttons
frame .account_manager_win.account_frame
pack .account_manager_win.account_frame -side left -padx 1m -pady 1m
listbox .account_manager_win.account_list -yscrollcommand \
".account_manager_win.yscroll_bar set" -xscrollcommand \
".account_manager_win.xscroll_bar set"
.account_manager_win.account_list configure -height 10
scrollbar .account_manager_win.yscroll_bar -command \
".account_manager_win.account_list yview" -relief sunken
.account_manager_win.yscroll_bar set 5 5 0 4
scrollbar .account_manager_win.xscroll_bar -command \
".account_manager_win.account_list xview" -relief sunken \
-orient horizontal
.account_manager_win.xscroll_bar set 5 5 0 4
pack .account_manager_win.yscroll_bar -in .account_manager_win.account_frame \
-side left -fill y
pack .account_manager_win.xscroll_bar -in .account_manager_win.account_frame \
-side bottom -fill x
pack .account_manager_win.account_list \
-in .account_manager_win.account_frame -side left
bind .account_manager_win.account_list <ButtonRelease-1> {
if { [string length \
[.account_manager_win.account_list curselection]] > 0 } {
set selected_account [list [selection get]]
puts "selected_account = $selected_account"
.account_manager_win.delete_button configure -state normal
.account_manager_win.edit_button configure -state normal
.account_manager_win.select_button configure -state normal
} else {
.account_manager_win.delete_button configure -state disabled
.account_manager_win.edit_button configure -state disabled
.account_manager_win.select_button configure -state disabled
}
}
bind .account_manager_win.account_list <Double-ButtonPress-1> {
set active_account $selected_account
set_account $active_account
#.account_frame.account_button configure -text $active_account
destroy .account_manager_win
}
foreach account [set account_list] {
if { $accounts($account,status) != "DELETED" } {
.account_manager_win.account_list insert end "$accounts($account,name)"
}
}
frame .account_manager_win.button_frame
pack .account_manager_win.button_frame -padx 1m -pady 1m
button .account_manager_win.select_button -text "Make Active" \
-state disabled -command {
puts "selected_account = $selected_account"
set active_account $selected_account
#.account_frame.account_button configure -text $active_account
# check if swan or not - if not, enable password
set_account $active_account
#destroy .account_manager_win
}
button .account_manager_win.new_button -text New \
-command {
puts "Create New Account"
create_account
}
button .account_manager_win.edit_button -text Edit -state disabled \
-command {
puts "Edit Account $selected_account"
edit_account $selected_account
#.account_manager_win.edit_button configure -state disabled
}
button .account_manager_win.delete_button -text Delete -state disabled \
-command {
puts "Delete Account"
set current_sel [.account_manager_win.account_list curselection]
puts "current selection: $current_sel: len: [string length $current_sel]"
if { [string length $current_sel] != 0 } {
.account_manager_win.account_list delete \
[.account_manager_win.account_list curselection]
#.account_manager_win.account_list delete \
#[.account_manager_win.account_list index $selected_account]
delete_account_menu $selected_account
# if account is active, adjust front panel values
puts "Comparing selected account $selected_account to\
active account $active_account"
if { $selected_account == $active_account } {
set active_account ""
set_account $active_account
}
}
.account_manager_win.edit_button configure -state disabled
.account_manager_win.select_button configure -state disabled
.account_manager_win.delete_button configure -state disabled
}
button .account_manager_win.close_button -text Close \
-command {destroy .account_manager_win}
pack .account_manager_win.select_button .account_manager_win.new_button \
.account_manager_win.edit_button .account_manager_win.delete_button \
.account_manager_win.close_button -in .account_manager_win.button_frame \
-ipadx 2 -ipady 2 -padx 2 -pady 2 -fill x
}
proc delete_account {account_name} {
#create window
#name Account1
#uid cbj
#passwd bongo
#number 303 123-4567
#domain craig.com
#ns1 123.456.789.000
#ns2 987.654.321.000
#authtype DES challenge
#defroute 1
global accounts
global account_list
global account_keys
global account_strings
delete_account_menu $account_name
}
proc edit_account {edited_account_name} {
puts "edit_account"
#create window
#name Account1
#uid cbj
#passwd bongo
#number 303 123-4567
#domain craig.com
#ns1 123.456.789.000
#ns2 987.654.321.000
#authtype DES challenge
#defroute 1
global edit_account_name
set edit_account_name $edited_account_name
global accounts
global account_list
global account_keys
global account_strings
global authtype
global defroute
global resolv
global active_account
global required_keys
#.account_manager_win.account_list selection clear 0 end
.account_manager_win.edit_button configure -state disabled
.account_manager_win.delete_button configure -state disabled
set top .edit_account
toplevel [set top]
frame [set top].required_key_frame
label [set top].required_key_frame.required_key -text "Required Fields indicated by '**'"
pack [set top].required_key_frame.required_key
pack [set top].required_key_frame -anchor n
foreach key [set account_keys] {
if { $key == "status" } { continue }
if { $key == "resolv_file" } { continue }
set value $account_strings($key)
set [set key]_frame [set top].[set key]_frame
frame [set [set key]_frame]
puts "key = $key"
if { $key == "authtype" } {
# radio buttons for
# pap chap challenge card
radiobutton [set [set key]_frame].[set key]_radio_pap -text \
"PAP" -variable authtype -value "pap" -width 11
pack [set [set key]_frame].[set key]_radio_pap -side right
radiobutton [set [set key]_frame].[set key]_radio_chap -text \
"CHAP" -variable authtype -value "chap" -width 11
pack [set [set key]_frame].[set key]_radio_chap -side right
radiobutton [set [set key]_frame].[set key]_radio_token -text \
"Token Card" -variable authtype -value "tokencard" -width 11
pack [set [set key]_frame].[set key]_radio_token -side right
label [set [set key]_frame].[set key]_label -text "** $value"
pack [set [set key]_frame].[set key]_label -side right
set authtype $accounts($edit_account_name,authtype)
} elseif { $key == "defroute" } {
checkbutton [set [set key]_frame].[set key]_check \
-text "Set default route" -anchor w -onvalue "1" \
-offvalue "0" -variable defroute -width 42
set defroute $accounts($edit_account_name,defroute)
pack [set [set key]_frame].[set key]_check -side right
label [set [set key]_frame].[set key]_label -text $value
pack [set [set key]_frame].[set key]_label -side right
} elseif { $key == "resolv" } {
checkbutton [set [set key]_frame].[set key]_check \
-text "Create /etc/resolv.conf from DNS settings" -anchor w -onvalue "1" \
-offvalue "0" -variable resolv -width 42
puts "resolv for account $edit_account_name = \
$accounts($edit_account_name,resolv)"
set resolv $accounts($edit_account_name,resolv)
puts "resolv == $resolv"
pack [set [set key]_frame].[set key]_check -side right
label [set [set key]_frame].[set key]_label -text $value
pack [set [set key]_frame].[set key]_label -side right
} else {
entry [set [set key]_frame].[set key]_entry -width 45
eval [set [set key]_frame].[set key]_entry insert 0 \
\"$accounts($edit_account_name,$key)\"
pack [set [set key]_frame].[set key]_entry -side right
if { [lsearch $required_keys $key] != -1 } {
label [set [set key]_frame].[set key]_label -text "** $value"
} else {
label [set [set key]_frame].[set key]_label -text $value
}
pack [set [set key]_frame].[set key]_label -side right
pack [set [set key]_frame].[set key]_entry -anchor w
}
pack [set [set key]_frame] -fill x -expand 1 -pady 5
}
frame [set top].button_frame
button [set top].button_frame.save -text "Save Edits" -command {
puts "Save Edits"
# get all entries
# Check if 'name' has been changed
# if so, lreplace old account name from account_list
# add new account name
# add to accounts array and account_list
global edit_account
global edit_account_name
global authtype
global defroute
global resolv
global active_account
# check if name is already taken
set new_account_name [list [.edit_account.name_frame.name_entry get]]
if { $new_account_name == "" } {
tk_messageBox -type ok -icon error -title "Missing Account Name" \
-message "Please Enter an Account Name"
return
} elseif { [string length $new_account_name] < 1 } {
tk_messageBox -type ok -icon error -title "Missing Account Name" \
-message "Please Enter an Account Name"
return
}
#set new_account_name {}
set delete_account_flag "true"
foreach key [set account_keys] {
# popup for required fields
if { $key == "status" } { continue }
if { $key == "resolv_file" } { continue }
if { $key == "authtype" } {
if { [string length $authtype] < 1 } {
tk_messageBox -type ok -icon error -title \
"$account_strings($key) Required" -message \
"Please Enter $account_strings($key)"
return
} else {
set edit_account($key) $authtype
}
} elseif { $key == "defroute" } {
set edit_account($key) $defroute
} elseif { $key == "resolv" } {
set edit_account($key) $resolv
} else {
set [set key]_frame .edit_account.[set key]_frame
set key_value [[set [set key]_frame].[set key]_entry get]
puts "924"
if { [lsearch $required_keys $key] != -1 && \
[string length $key_value] < 1 } {
tk_messageBox -type ok -icon error -title \
"$account_strings($key) Required" -message \
"Please Enter $account_strings($key)"
return
}
puts "=====> $key: $key_value"
set edit_account($key) $key_value
if { $key == "name" } {
set new_account_name [list $key_value]
set new_account_string $key_value
set old_account_string $accounts($edit_account_name,name)
set replace_index [lsearch $account_list $edit_account_name]
# Check if name has changed
if { $edit_account_name != $new_account_name } {
puts "replacing $edit_account_name with $new_account_name\
at index $replace_index"
set account_list [lreplace $account_list $replace_index \
$replace_index $new_account_name]
puts "account_list == $account_list"
# save resolv_file name from old account
set edit_account(resolv_file) \
$accounts($edit_account_name,resolv_file)
set edit_account(status) \
$accounts($edit_account_name,status)
} else {
# name has not changed, don't delete from accounts
# in delete_account_menu
set delete_account_flag "false"
}
}
}
}
if { $delete_account_flag == "false" } {
foreach { key value } [array get edit_account] {
puts "accounts($new_account_name,$key) = $value"
set accounts($new_account_name,$key) $value
}
set_account $new_account_name
# write resolv file in case changed
write_resolv_file $accounts($new_account_name,name)
} else {
# add to accounts
puts "account name: $new_account_name"
puts "account string: $new_account_string"
set accounts($new_account_name,name) $new_account_string
foreach { key value } [array get edit_account] {
puts "accounts($new_account_name,$key) = $value"
set accounts($new_account_name,$key) $value
}
# write resolv file in case changed
write_resolv_file $accounts($new_account_name,name)
# delete old account
set deleted_index [delete_account_menu $edit_account_name]
# add to Accounts menu and acct manager
add_account_menu $new_account_name $deleted_index
# remove from account list
set account_box_items [.account_manager_win.account_list get 0 end]
foreach item $account_box_items {
if { $item == $old_account_string } {
puts "deleting $item from account list at index \
[lsearch $account_box_items $item]"
.account_manager_win.account_list delete \
[lsearch $account_box_items $item]
}
}
.account_manager_win.account_list insert $replace_index "$accounts($new_account_name,name)"
# if account is active, adjust front panel values
puts "Comparing edited account $edit_account_name to\
active account $active_account"
if { $edit_account_name == $active_account } {
set active_account $new_account_name
set_account $new_account_name
}
set edit_account_name $new_account_name
}
.account_manager_win.edit_button configure -state normal
.account_manager_win.delete_button configure -state normal
destroy .edit_account
}
button [set top].button_frame.close -text "Close" -command {
.account_manager_win.edit_button configure -state normal
.account_manager_win.delete_button configure -state normal
destroy .edit_account
}
pack .edit_account.button_frame.close -side right
pack .edit_account.button_frame.save -side left
pack .edit_account.button_frame -side bottom -pady 10
}
proc create_account { } {
#create window
#name Account1
#uid cbj
#passwd bongo
#number 303 123-4567
#domain craig.com
#ns1 123.456.789.000
#ns2 987.654.321.000
#authtype DES challenge
#defroute 1
global accounts
global account_list
global account_keys
global account_strings
global create_authtype
global create_defroute
global create_resolv
set top .new_account
toplevel [set top]
global required_keys
frame [set top].required_key_frame
label [set top].required_key_frame.required_key -text "Required Fields indicated by '**'"
pack [set top].required_key_frame.required_key
pack [set top].required_key_frame -anchor n
foreach key [set account_keys] {
if { $key == "status" } { continue }
if { $key == "resolv_file" } { continue }
set value $account_strings($key)
set [set key]_frame [set top].[set key]_frame
frame [set [set key]_frame]
puts "key = $key"
if { $key == "authtype" } {
# radio buttons for
# pap chap challenge card
radiobutton [set [set key]_frame].[set key]_radio_pap -text \
"PAP" -variable create_authtype -value "pap" -width 12
pack [set [set key]_frame].[set key]_radio_pap -side right
radiobutton [set [set key]_frame].[set key]_radio_chap -text \
"CHAP" -variable create_authtype -value "chap" -width 12
pack [set [set key]_frame].[set key]_radio_chap -side right
radiobutton [set [set key]_frame].[set key]_radio_token -text \
"Token Card" -variable create_authtype -value "tokencard" -width 12
pack [set [set key]_frame].[set key]_radio_token -side right
label [set [set key]_frame].[set key]_label -text "** $value"
pack [set [set key]_frame].[set key]_label -side right
set create_authtype ""
} elseif { $key == "defroute" } {
set create_defroute 0
checkbutton [set [set key]_frame].[set key]_check \
-text "Set default route" -anchor w -onvalue "1" \
-offvalue "0" -variable create_defroute -width 42
pack [set [set key]_frame].[set key]_check -side right
label [set [set key]_frame].[set key]_label -text $value
pack [set [set key]_frame].[set key]_label -side right
} elseif { $key == "resolv" } {
checkbutton [set [set key]_frame].[set key]_check \
-text "Create /etc/resolv.conf from DNS settings" -anchor w -onvalue "1" \
-offvalue "0" -variable create_resolv -width 42
pack [set [set key]_frame].[set key]_check -side right
label [set [set key]_frame].[set key]_label -text $value
pack [set [set key]_frame].[set key]_label -side right
} else {
entry [set [set key]_frame].[set key]_entry -width 45
pack [set [set key]_frame].[set key]_entry -side right
if { [lsearch $required_keys $key] != -1 } {
label [set [set key]_frame].[set key]_label -text "** $value"
} else {
label [set [set key]_frame].[set key]_label -text $value
}
pack [set [set key]_frame].[set key]_label -side right
pack [set [set key]_frame].[set key]_entry -anchor w
}
pack [set [set key]_frame] -fill x -expand 1 -pady 5
}
frame [set top].button_frame
button [set top].button_frame.save -text "Create Account" -command {
# get all entries
# add to accounts array and account_list
global new_account
global create_authtype
global create_defroute
global create_resolv
# check if name is already taken
set new_account_name [list [.new_account.name_frame.name_entry get]]
if { [lsearch $account_list $new_account_name] != -1 } {
tk_messageBox -type ok -icon error -title "Duplicate Account Name" \
-message "Account name $new_account_name already exists"
return
} elseif { $new_account_name == "" } {
tk_messageBox -type ok -icon error -title "Missing Account Name" \
-message "Please Enter an Account Name"
return
} elseif { [string length $new_account_name] < 1 } {
tk_messageBox -type ok -icon error -title "Missing Account Name" \
-message "Please Enter an Account Name"
return
}
set account_name {}
foreach key [set account_keys] {
puts "-------> Key $key"
if { $key == "status" } { continue }
if { $key == "resolv_file" } { continue }
# get checkbuttons separate from entry boxes
if { $key == "authtype" } {
if { [string length $create_authtype] < 1 } {
tk_messageBox -type ok -icon error -title \
"$account_strings($key) Required" -message \
"Please Enter $account_strings($key)"
return
} else {
set new_account($key) $create_authtype
}
} elseif { $key == "defroute" } {
set new_account($key) $create_defroute
} elseif { $key == "resolv" } {
set new_account($key) $create_resolv
} else {
# entry boxes
set [set key]_frame .new_account.[set key]_frame
set key_value [[set [set key]_frame].[set key]_entry get]
puts "----> key: $key key_value: $key_value"
set new_account($key) $key_value
# required fields
#account_strings(name) "Account Name:"
#account_strings(uid) "User ID:"
#account_strings(passwd) "Password:"
#account_strings(number) "Phone Number:"
#account_strings(domain) "DNS Domain Name:"
#account_strings(ns1) "Account Nameserver #1:"
#account_strings(ns2) "Account Nameserver #2:"
#account_strings(authtype) "Authentication Type:"
#account_strings(defroute) "Default Route:"
if { $key == "name" } {
set account_name [list $key_value]
set account_string $key_value
}
if { [lsearch $required_keys $key] != -1 && [string length $key_value] < 1 } {
tk_messageBox -type ok -icon error -title \
"$account_strings($key) Required" -message \
"Please Enter $account_strings($key)"
return
}
}
}
lappend account_list $account_name
# add to accounts
puts "account name: $account_name"
puts "account string: $account_string"
set accounts($account_name,name) $account_string
foreach { key value } [array get new_account] {
puts "accounts($account_name,$key) = $value"
set accounts($account_name,$key) $value
}
# create resolv.conf name for this account
set resolv_name "[set ppp_dir]/resolv.conf.[RandomRange 1000]"
set accounts($account_name,resolv_file) $resolv_name
puts "resolv.conf file is $accounts($account_name,resolv_file)"
# write a resolv.conf file to the file
write_resolv_file $accounts($account_name,name)
# set status
set accounts($account_name,status) inactive
# add to Accounts menu and acct manager
add_account_menu $account_name "END"
.account_manager_win.account_list insert end "$accounts($account_name,name)"
destroy .new_account
}
button [set top].button_frame.close -text "Close" -command {
destroy .new_account
}
pack .new_account.button_frame.close -side right
pack .new_account.button_frame.save -side left
pack .new_account.button_frame -side bottom -pady 10
}
proc write_resolv_file { account_name } {
puts "write_resolv_file"
global accounts
# get account
set list_name [list $account_name]
set resolv_file $accounts($list_name,resolv_file)
set domain $accounts($list_name,domain)
set search $accounts($list_name,search)
set ns1 $accounts($list_name,ns1)
set ns2 $accounts($list_name,ns2)
puts "Opening resolv_file $resolv_file"
if { [catch { set resolv_fd [open $resolv_file w] } err] != 0 } {
puts "Unable to create $resolv_file: $err"
return -1
}
puts $resolv_fd "# resolv.conf file generated by ppptool"
puts $resolv_fd "domain $domain"
if { [string length $search] < 1 } {
if { [string length $domain] > 0 } {
puts $resolv_fd "search $domain"
}
} else {
puts $resolv_fd "search $search"
}
puts $resolv_fd "nameserver $ns1"
puts $resolv_fd "nameserver $ns2"
close $resolv_fd
puts "Created resolv_file $resolv_file"
return 0
}
proc blink_single {bulb } {
global connection_state
global incr_value
global bulblist
global on_color
global off_color
global bulb_colors
global after_interval
global bulbs
set after_interval [RandomRange 500 ]
if { [lsearch $bulb_colors $bulbs($bulb,color)] == [expr [llength $bulb_colors] - 1] } {
set bulbs($bulb,color) [lindex $bulb_colors 0]
} else {
set bulbs($bulb,color) [lindex $bulb_colors [expr [lsearch $bulb_colors $bulbs($bulb,color)] + 1]]
}
if { $after_interval > 250 } {
.cframe.can itemconfigure $bulb -fill $bulbs($bulb,color)
}
if { $connection_state != "connecting" } {
.cframe.can itemconfigure all -fill black
#after $after_interval blink_single $bulb
} else {
after $after_interval blink_single $bulb
}
}
proc init_blinking_bulbs { } {
global connection_state
global incr_value
global bulblist
global bulbs
global on_color
global off_color
global bulb_colors
global after_interval
# set different intervals
foreach bulb $bulblist {
#puts "bulb $bulb"
set after_interval [RandomRange 1000]
if { [lsearch $bulb_colors $bulbs($bulb,color)] == [expr [llength $bulb_colors] - 1] } {
#puts "bulbs($bulb,color) = $bulbs($bulb,color)"
set bulbs($bulb,color) [lindex $bulb_colors 0]
} else {
set bulbs($bulb,color) [lindex $bulb_colors [expr [lsearch $bulb_colors $on_color] + 1]]
}
if { $bulb != "bulb0" && $bulb != "bulb3" && $bulb != "bulb10" } {
#puts "init_blinking: .cframe.can itemconfigure $bulb -fill $bulbs($bulb,color)"
.cframe.can itemconfigure $bulb -fill $bulbs($bulb,color)
#after $after_interval blink_single $bulb
blink_single $bulb
} else {
#puts "Constant: .cframe.can itemconfigure $bulb -fill green"
if { $bulb == "bulb0" || $bulb == "bulb3" } {
.cframe.can itemconfigure $bulb -fill green
} else {
.cframe.can itemconfigure $bulb -fill yellow
}
}
}
}
proc edit_prefs { } {
global ppp_settings
global ppp_settings_saved
global modem_port
global flow_control
global g_modem_port
global g_flow_control
global port_speeds
global port_speed
puts "Modem port = $ppp_settings(modem_port)"
if { [winfo exists .prefs] } {
catch { wm deiconify .prefs }
catch { wm raise .prefs }
return
}
toplevel .prefs
frame .prefs.pref_frame
## Menubutton items ##
foreach ppp_setting { modem_port flow_control port_speed } {
set button_descr "[lindex [split $ppp_setting '_'] 0] [lindex [split $ppp_setting '_'] 1]"
#set ppp_settings(modem_port) /dev/term/b
frame .prefs.pref_frame.[set ppp_setting]_frame
if { [info exists ppp_settings($ppp_setting)] == 1 } {
puts "$ppp_setting = $ppp_settings($ppp_setting)"
set [set ppp_setting]_button_text $ppp_settings($ppp_setting)
} else {
puts "$ppp_setting = $ppp_settings($ppp_setting)"
set [set ppp_setting]_button_text "Select $button_descr"
}
menubutton .prefs.pref_frame.[set ppp_setting]_frame.[set ppp_setting] \
-text [set [set ppp_setting]_button_text] \
-menu .prefs.pref_frame.[set ppp_setting]_frame.[set ppp_setting].menu \
-relief raised -width 25
pack .prefs.pref_frame.[set ppp_setting]_frame.[set ppp_setting] -side right
label .prefs.pref_frame.[set ppp_setting]_frame.[set ppp_setting]_label -text $button_descr
pack .prefs.pref_frame.[set ppp_setting]_frame.[set ppp_setting]_label -side right
#.prefs.pref_frame.[set ppp_setting]_frame.modem_port configure -anchor w
pack .prefs.pref_frame.[set ppp_setting]_frame -fill x -expand 1 -pady 2
eval { set [set ppp_setting]_menu \
[menu .prefs.pref_frame.[set ppp_setting]_frame.[set ppp_setting].menu -tearoff 0] }
if { $ppp_setting == "modem_port" } {
foreach modem_port [exec ls /dev | grep ttyS\[0-9\] ] {
set modem_port "/dev/[set modem_port]"
puts "[set ppp_setting]_menu add command -label $modem_port -command"
puts "ppp_settings: [array names ppp_settings]"
eval { [set [set ppp_setting]_menu] add radio -label $modem_port -variable modem_port \
-value $modem_port -command {
.prefs.pref_frame.modem_port_frame.modem_port configure -text $modem_port
}
}
}
set modem_port $ppp_settings(modem_port)
} elseif { $ppp_setting == "flow_control" } {
foreach flow_control { hardware software none } {
puts "[set ppp_setting]_menu add command -label $flow_control -command"
puts "ppp_settings: [array names ppp_settings]"
eval { [set [set ppp_setting]_menu] add radio -label $flow_control -variable \
flow_control -value $flow_control -command {
.prefs.pref_frame.flow_control_frame.flow_control \
configure -text $flow_control
}
}
}
set flow_control $ppp_settings(flow_control)
} elseif { $ppp_setting == "port_speed" } {
#port_speeds { 38400 57600 115200 230400 }
foreach port_speed $port_speeds {
puts "[set ppp_setting]_menu add command -label $port_speed -command"
puts "ppp_settings: [array names ppp_settings]"
eval { [set [set ppp_setting]_menu] add radio -label $port_speed \
-variable port_speed -value $port_speed -command {
.prefs.pref_frame.port_speed_frame.port_speed \
configure -text $port_speed
}
}
}
set port_speed $ppp_settings(port_speed)
}
}
#### Entry items ###
set last_width 0
foreach ppp_setting { init_string connect_string ppp_options } {
puts "ppp_setting $ppp_setting"
set entry_width [string length $ppp_settings($ppp_setting)]
if { $entry_width < 20 } { set entry_width 25 }
if { $entry_width > $last_width } { set last_width $entry_width }
}
if { $entry_width < $last_width } { set entry_width [expr $last_width + 5] }
.prefs.pref_frame.modem_port_frame.modem_port configure -width $entry_width
.prefs.pref_frame.flow_control_frame.flow_control configure -width $entry_width
foreach ppp_setting { init_string connect_string ppp_options } {
frame .prefs.pref_frame.[set ppp_setting]_frame
label .prefs.pref_frame.[set ppp_setting]_frame.[set ppp_setting]_label -text $ppp_setting
entry .prefs.pref_frame.[set ppp_setting]_frame.[set ppp_setting]_entry -width $entry_width
.prefs.pref_frame.[set ppp_setting]_frame.[set ppp_setting]_entry insert 0 $ppp_settings($ppp_setting)
pack .prefs.pref_frame.[set ppp_setting]_frame.[set ppp_setting]_entry -side right
pack .prefs.pref_frame.[set ppp_setting]_frame.[set ppp_setting]_label -side right
pack .prefs.pref_frame.[set ppp_setting]_frame -fill x -expand 1 -pady 2
}
#set ppp_settings(port_speed) 38400
#set ppp_settings(flow_control) hardware
#set ppp_settings(init_string) "atz"
#set ppp_settings(connect_string) ""
#set ppp_settings(ppp_options) ""
pack .prefs.pref_frame -pady 2
frame .prefs.prefbuttons
button .prefs.prefbuttons.prefs_save -text "Save" -command {
global ppp_settings
global ppp_settings_saved
global modem_port
global flow_control
global g_modem_port
global g_flow_control
global port_speeds
global port_speed
puts "Save Prefs"
foreach ppp_setting { init_string connect_string ppp_options } {
set ppp_settings($ppp_setting) \
[.prefs.pref_frame.[set ppp_setting]_frame.[set ppp_setting]_entry get]
puts "saved ppp_setting $ppp_setting as \
[.prefs.pref_frame.[set ppp_setting]_frame.[set ppp_setting]_entry get]"
}
foreach ppp_setting { modem_port flow_control port_speed } {
puts "port_speed == $port_speed"
puts "$ppp_setting = [set [set ppp_setting]]"
set ppp_settings($ppp_setting) [set [set ppp_setting]]
puts "saved ppp_setting $ppp_setting as $ppp_settings($ppp_setting)"
}
save_prefs
## write_global #
#destroy .prefs
wm withdraw .prefs
}
button .prefs.prefbuttons.prefs_close -text "Close" -command { wm withdraw .prefs }
pack .prefs.prefbuttons.prefs_save -side left
pack .prefs.prefbuttons.prefs_close -side right
pack .prefs.prefbuttons -side bottom -pady 5
}
proc log_message { message } {
catch { [.log_win.log_text insert end $message] }
catch [.log_win.log_text yview end]
}
proc view_log { } {
catch { [wm deiconify .log_win] }
}
proc build_log_win { } {
global accounts
global active_account
toplevel .log_win
wm title .log_win "PPP Log"
wm withdraw .log_win
frame .log_win.log_frame -relief flat
text .log_win.log_text -relief sunken -borderwidth 2 \
-yscrollcommand { .log_win.scroll_y set} \
-height 20 -width 65 -setgrid true
scrollbar .log_win.scroll_y -command ".log_win.log_text yview"
pack .log_win.log_text -in .log_win.log_frame -side left -pady 1m -fill both
pack .log_win.scroll_y -in .log_win.log_frame -side right -fill y
frame .log_win.button_frame -relief flat
button .log_win.close_button -text "Close" -command {\
wm withdraw .log_win
}
pack .log_win.close_button -side bottom -in .log_win.button_frame \
-ipadx 2 -padx 2
pack .log_win.log_frame .log_win.button_frame -side top
}
proc add_accounts { } {
global menubar_widget
global acct_widget
global color_array
global account_list
global accounts
foreach account $account_list {
add_account_menu $account "END"
}
}
proc set_active { } {
global active_account
global accounts
global account_list
foreach acct $account_list {
if { [info exists accounts($acct,status)] } {
if { $accounts($acct,status) == "active" } {
puts "Setting account $acct to 'active' status"
set active_account $acct
set_account $acct
}
} else {
puts "Setting account $acct to 'inactive' status"
set accounts($acct,status) ""
}
}
}
proc set_account {value } {
# display active account
global menubar_widget
global active_account
global color_array
global accounts
global account_list
global acct_width
if { [string length $active_account] > 0 && \
$accounts($active_account,status) == "DELETED" } {
.acct_label configure -text "Active Account:"
.namef.name_r configure -text ""
.uidf.uid_r delete 0 end
.uidf.uid_r configure -state disabled
.passwdf.passwd_r configure -state disabled
.numberf.number_r configure -text ""
.authtypef.authtype_r configure -text ""
.connect configure -state disabled
return
}
if { [string length $active_account] > 0 } {
.acct_label configure -text \
"Active Account: $accounts($active_account,name)" \
-width [expr \
[string length "Active Account: $accounts($active_account,name)"] + 10]
set namelabel_width [expr [string length $accounts($active_account,name)] + 5]
if { [.namef.name_r cget -width] < $namelabel_width } {
.namef.name_r configure -width $namelabel_width
.uidf.uid_r configure -width $namelabel_width
.passwdf.passwd_r configure -width $namelabel_width
.numberf.number_r configure -width $namelabel_width
.authtypef.authtype_r configure -width $namelabel_width
} else {
.namef.name_r configure -width $acct_width
.uidf.uid_r configure -width $acct_width
.passwdf.passwd_r configure -width $acct_width
.numberf.number_r configure -width $acct_width
.authtypef.authtype_r configure -width $acct_width
}
.namef.name_r configure -text "$accounts($active_account,name)"
.uidf.uid_r delete 0 end
.uidf.uid_r insert 0 "$accounts($active_account,uid)"
if { $accounts($active_account,authtype) == "tokencard" } {
.uidf.uid_r configure -state normal
.passwdf.passwd_r delete 0 end
.passwdf.passwd_r configure -state disabled
} else {
.uidf.uid_r configure -state normal
.passwdf.passwd_r configure -state normal
.passwdf.passwd_r delete 0 end
.passwdf.passwd_r insert 0 "$accounts($active_account,passwd)"
}
.numberf.number_r configure -text "$accounts($active_account,number)"
.authtypef.authtype_r configure -text "$accounts($active_account,authtype)"
if { [info exists active_account] } {
.connect configure -state normal
} else {
.connect configure -state disabled
}
} else {
.acct_label configure -text "Active Account:"
.namef.name_r configure -text ""
.uidf.uid_r delete 0 end
.passwdf.passwd_r delete 0 end
.numberf.number_r configure -text ""
.authtypef.authtype_r configure -text ""
.connect configure -state disabled
}
# reset active key
foreach acct $account_list {
if { $acct == $active_account } {
puts "Setting account $acct to 'active' status"
set accounts($active_account,status) "active"
} else {
puts "Setting account $acct to 'inactive' status"
set accounts($acct,status) ""
}
}
}
proc set_menu {value } {
global menubar_widget
global active_account
$menubar_widget entryconfigure 3 -label $value
}
proc add_account_menu {name index} {
global menubar_widget
global acct_widget
global active_account
global accounts
if { $index != "END" } {
$acct_widget insert $index radio -variable active_account \
-value $name -label $accounts($name,name) -command { set_account $active_account }
} else {
$acct_widget add radio -variable active_account \
-value $name -label $accounts($name,name) -command { set_account $active_account }
}
# save menu position
}
proc delete_account_menu {name} {
global menubar_widget
global acct_widget
global active_account
global accounts
global account_list
global account_keys
global saved_accounts
# find where it is
# number of indexes should correspond to llength $account_list
puts "Looking for menu entry $name"
set index [$acct_widget index $accounts($name,name)]
# delete accounts entry
foreach key $account_keys {
set accounts($name,$key) DELETED
set saved_accounts($name,$key) DELETED
}
# delete account_list entry
set account_list [lreplace $account_list \
[lsearch $account_list $name] \
[lsearch $account_list $name]]
puts "Deleting index $name at index $index"
$acct_widget delete $index
puts "account_list now contains: $account_list"
puts "accounts now contains:"
puts [array get accounts]
set saved_account_list $account_list
# save menu position
# return index
return $index
}
# load global settings
proc load_global { } {
global ppp_dir
global ppp_config_file
global ppp_settings
global ppp_settings_saved
global port_speeds
set port_speeds { 38400 57600 115200 230400 }
global g_modem_port
global g_flow_control
set ppp_settings(init_string) "atz"
set ppp_settings(connect_string) ""
set ppp_settings(port_speed) [lindex $port_speeds 0]
set ppp_settings(flow_control) hardware
set ppp_settings(modem_port) /dev/ttyS0
set ppp_settings(ppp_options) ""
set ppp_settings_saved(init_string) "atz"
set ppp_settings_saved(connect_string) ""
set ppp_settings_saved(port_speed) [lindex $port_speeds 0]
set ppp_settings_saved(flow_control) hardware
set ppp_settings_saved(modem_port) /dev/ttyS0
set ppp_settings_saved(ppp_options) ""
set ppp_config_file "$ppp_dir/ppp_settings"
if { [file exists $ppp_config_file] != 1 } {
puts "Creating $ppp_config_file"
set global_fd [open $ppp_config_file w]
# put in defaults
foreach key [array names ppp_settings] {
puts $global_fd "$key\t$ppp_settings($key)"
}
close $global_fd
} else {
puts "Reading $ppp_config_file"
set ppp_fd [open $ppp_config_file r]
while { [gets $ppp_fd line] != -1 } {
set split_line [split $line "\t"]
set ppp_settings([lindex $split_line 0]) [lindex $split_line 1]
set ppp_settings_saved([lindex $split_line 0]) [lindex $split_line 1]
puts "set ppp_settings([lindex $split_line 0]) [lindex $split_line 1]"
}
}
set g_modem_port ppp_settings(modem_port)
set g_flow_control ppp_settings(flow_control)
#pppopts
# init_string
# connect_string
# port_speed
# flow_control
# modem_port
}
proc load_current_accts { } {
global env
global ppp_dir
# list
global saved_account_list
# array (saved_account_name,key)
global saved_accounts
# account file names
global saved_account_file
global account_keys
set saved_account_name ""
if { [file exists $account_file] } {
set saved_account_fd [open $account_file r]
# parse into acct_array
for { set i 0 } { [gets $saved_account_fd line] != -1 } {incr i} {
if { [string length $line] < 1 } { continue }
set field [split $line "\t"]
set key [lindex $field 0]
string trim $key
puts "Key=$key"
set value [lindex $field 1]
puts "Value=$value"
# check key
if { [lsearch $saved_account_keys $key] == -1 } {
puts "Invalid key $key found in account file $acct"
continue
}
# make sure account name is same as file prefix
if { $key == "name" } {
# New account
puts "Account $value"
set saved_account_name $value
lappend saved_account_list $value
}
puts "Adding key '$key' value '$value' to saved_accounts"
set saved_accounts($saved_account_name,$key) $value
}
close $saved_account_fd
}
# done
puts "Loaded saved_accounts"
}
proc load_accts { } {
global env
global ppp_dir
# list
global account_list
global saved_account_list
set saved_account_list {}
# array (account_name,key)
global accounts
global saved_accounts
# account file names
global account_file
global account_keys
global active_account
set account_name ""
set previous_name ""
if { [file exists $account_file] } {
set account_fd [open $account_file r]
# parse into acct_array
for { set i 0 } { [gets $account_fd line] != -1 } {incr i} {
if { [string length $line] < 1 } { continue }
set field [split $line "\t"]
set key [lindex $field 0]
string trim $key
puts "Key=$key"
set value [lindex $field 1]
puts "Value=$value"
# check key
if { [lsearch $account_keys $key] == -1 } {
puts "Invalid key $key found in account file accounts"
continue
}
# make sure account name is same as file prefix
if { $key == "name" } {
# New account
# first check last account
puts "Account $value"
set account_name [list $value]
set previous_name $account_name
puts "Adding account name $account_name to account_list"
lappend account_list $account_name
puts "Adding account name $account_name to saved_account_list"
lappend saved_account_list $account_name
puts "Adding key '$key' value '$value' to accounts"
set accounts($account_name,$key) $value
puts "Adding key '$key' value '$value' to saved_accounts"
set saved_accounts($account_name,$key) $value
} else {
puts "Adding key '$key' value '$value' to accounts"
set accounts($account_name,$key) $value
puts "Adding key '$key' value '$value' to saved_accounts"
set saved_accounts($account_name,$key) $value
}
}
close $account_fd
# verify
puts "verifying accounts"
foreach account $account_list {
foreach key $account_keys {
puts "key $key"
puts "account $account"
if { [info exists accounts($account,$key)] == 0 } {
puts "key $key not found in accounts file"
puts "set accounts($account,$key)"
set accounts($account,$key) ""
}
}
}
# check for resolv_file entries?
}
# done
puts "Loaded accounts"
}
#======================================================================
# Close the PPP tunnel. Log any messages.
#======================================================================
proc close_ppp_tunnel {} {
puts "proc close_ppp_tunnel {} "
global network_interface message spawn_id port_id
global tip_id connection_type global pppd_pid pppd_id
#set spawn_id $tip_id
if { [info exists connection_type] != 1 } { return }
puts "spawn_id = $spawn_id"
catch {
if { $connection_type == "pppd" } {
exp_send ""
puts "Sending kill -TERM $pppd_pid"
catch { exec kill -TERM $pppd_pid }
} else {
exp_send "~."
expect "[EOT]"
# might as well try and close any tips
catch { [exec pkill tip] }
}
catch { exec kill -TERM $pppd_pid }
#close $tip_id
#close $spawn_id
#set kill_pid [exp_pid -i $spawn_id]
#puts "Sending kill to pid $kill_pid"
#exec kill -TERM $kill_pid
}
puts "close_ppp_tunnel done"
}
proc log_messages {message} {
puts "proc log_messages {message} "
global log_file
regsub "\r" $message "\n" newline_message
set message $newline_message
if { [info exists message] == 1 } {
.log_win.log_text insert end "$message\n"
.log_win.log_text yview end
set fd [open $log_file a]
if {$fd == ""} {
return 0
}
puts $fd "$message\n"
close $fd
}
}
proc des_countdown { } {
global seconds_remaining
if { [winfo exists .des_dialog] != 1 } { return }
if { $seconds_remaining == "null" } {
} elseif { $seconds_remaining == 0 } {
.des_dialog.time_remaining configure -text "Challenge Entry Interval Expired!"
#wm withdraw .des_dialog
#destroy .des_dialog
return -1
} else {
.des_dialog.time_remaining configure -text "Seconds Remaining: $seconds_remaining"
incr seconds_remaining -1
after 1000 des_countdown
}
}
proc down_interface { } {
global ppp_interface serial_port ip_address
global ifconfig_cmd
global spawn_list
# check if i/f is up
if { [string length $ppp_interface] < 1 } { return }
puts "Taking down interface $ppp_interface"
puts "spawn /usr/bin/pfsh"
spawn /usr/bin/pfsh
lappend spawn_list $spawn_id
expect "\$ "
puts "Sending /usr/sbin/ifconfig $ppp_interface down"
exp_send "/usr/sbin/ifconfig $ppp_interface down\r"
expect "\$ "
set ppp_interface ""
set ip_address ""
close $spawn_id
}
proc close_pppd { } {
puts "proc close_pppd"
global ppp_interface serial_port port_id spawn_list
global connection_type ip_address spawn_id tip_id
global pppd_pid pppd_id
global kill_pppd
if { [info exists connection_type] != 1 } { return }
if { [info exists ppp_interface] != 1 || \
[string length $ppp_interface] < 1 } {
puts "no ppp_interface found, trying to close spawn_id"
if { $connection_type == "pppd" } {
catch {
exp_send ""
puts "kill -TERM $pppd_pid"
catch { exec kill -TERM $pppd_pid }
set kill_pid [exp_pid -i $spawn_id]
puts "Sending kill to pid $kill_pid"
exec kill -TERM $kill_pid
puts "pppd_pid == $pppd_pid"
}
} else {
catch { [exp_send "~.\r"] }
}
} else {
puts "Looking for /var/run/[set ppp_interface].pid"
set pppd_pid_file "/var/run/[set ppp_interface].pid"
if { [file exists $pppd_pid_file] == 1 } {
puts "Executing $kill_pppd $pppd_pid_file"
exec $kill_pppd $pppd_pid_file
}
}
#puts "exp_send -i $pppd_id logout"
#catch { exp_send -i $pppd_id "logout\r" }
#puts "exp_send -i $pppd_id pkill /usr/sbin/pppd"
#catch { expect -i $pppd_id "#" }
puts "spawn_ids:"
foreach id $spawn_list {
puts $id
}
foreach id $spawn_list {
puts "close $id"
catch { close $id }
}
# puts "send $port_id atz"
# catch { exp_send -i $port_id "atz\r" }
# catch { expect -i $port_id "OK" }
puts "close_pppd done"
}
proc get_interface_info { ip_address } {
global ppp_interface \
serial_port \
ip_address \
remote_ip_address
puts "find i/f based on $ip_address"
set if_fd [open "|ifconfig"]
set save ""
while { [gets $if_fd line] != -1 } {
puts "i/f line: $line"
if { [string first $ip_address $line] != -1 } {
#puts "found $ip_address in $line"
#puts "looking for i/f name in: $save"
set ppp_interface [lindex [split $save " "] 0]
puts "ppp_interface: $ppp_interface"
}
}
}
proc disconnect { reason } {
puts "proc disconnect $reason"
global accounts
global active_account
global challenge_entered
global des_response
global seconds_remaining
set seconds_remaining 30
global connection_state
set challenge_entered "false"
global pppd_id pppd_pid tip_id
global link_disconnected
set link_disconnected "false"
global ppp_interface serial_port ip_address
global connection_type
set connection_state "disconnecting"
# connection scripts
#pre_disconnect
log_message "Disconnecting: $reason "
.footer.footer_r configure -text "Disconnecting: $reason" -justify left
catch { destroy .des_dialog }
global backup_resolv
global manage_resolv
# restore resolv.conf
if { $accounts($active_account,resolv) == 1 } {
puts "restoring file $backup_resolv"
if { [file exists $backup_resolv] == 1 } {
exec $manage_resolv restore $backup_resolv
}
}
if { [info exists ppp_interface] != 1 ||
[string length $ppp_interface] < 1 } {
log_message "Unable to determine ppp interface, trying IP address"
# see if ip_address present
if { [info exists ip_address] != 1 ||
[string length $ip_address] < 1 } {
log_message "Unable to determine ip interface, exit ppptool to reset"
puts "close_pppd (a)"
close_pppd
close_ppp_tunnel
} else {
puts "find i/f based on $ip_address"
set if_fd [open "|ifconfig -a"]
set save ""
while { [gets $if_fd line] != -1 } {
puts "i/f line: $line"
if { [string first $ip_address $line] != -1 } {
#puts "found $ip_address in $line"
#puts "looking for i/f name in: $save"
set ppp_interface [lindex [split $save " "] 0]
#puts "ppp_interface: $ppp_interface"
break
}
set save $line
}
close $if_fd
log_message "Taking down interface $ppp_interface"
if { [string length $ppp_interface] > 0 } {
puts "Terminating pppd process (a)"
puts "close_pppd (b)"
close_pppd
close_ppp_tunnel
}
}
} else {
#puts "Taking down interface $ppp_interface"
puts "Terminating pppd process (b)"
#down_interface
puts "close_pppd (c)"
close_pppd
close_ppp_tunnel
}
.connect configure -state disabled
.connect configure -text "Connect" -command connect -state normal
puts "disconnect done"
set link_disconnected "true"
# connection script
#post_disconnect
}
proc init_modem { } {
global connection_state
global baud_rate
global ppp_settings
puts "init_modem"
global spawn_list
global ppp_settings
set init_string "AT &F1"
if { [info exists ppp_settings(init_string)] && \
[string length $ppp_settings(init_string)] > 2 } {
set init_string $ppp_settings(init_string)
}
puts "set error catch system stty -F $ppp_settings(modem_port) -echoe -echo raw $ppp_settings(port_speed) err"
set error [catch { [system "stty -F $ppp_settings(modem_port) -echoe -echo raw $ppp_settings(port_speed)"] } err]
if { $error != 1 } {
puts "Error setting baud rate for modem on port $ppp_settings(modem_port)"
return -1
}
####################11111111111111111111####################
# if [spawn -noecho -open [ open $ppp_settings(modem_port) "RDWR NONBLOCK"]] != 0
####################11111111111111111111####################
set timeout 5
puts "open modem"
global port_id
if { [info exists port_id] } {
puts "closing $port_id"
catch { close $port_id }
}
set port_id ""
puts "spawn -noecho -open \[ open $ppp_settings(modem_port) RDWR NONBLOCK\]\]"
if { [spawn -noecho -open [ open $ppp_settings(modem_port) "RDWR NONBLOCK"]] != 0 } {
set connection_state "unconnected"
puts "error initializing modem"
return -1
} else {
lappend spawn_list $spawn_id
puts "set port_id $spawn_id"
set port_id $spawn_id
puts "exp_send -i $port_id ATZ"
exp_send -i $port_id "ATZ\r"
expect {
-i $port_id "OK" {
puts "Sending init string $init_string"
exp_send -i $port_id "$init_string\r"
}
-i $port_id "NO CARRIER" {
puts "NO CARRIER detected"
#catch { close $port_id }
set connection_state "unconnected"
return 0
}
-i $port_id timeout {
set connection_state "unconnected"
puts "-> timeout waiting for OK from AT command <-"
#puts "closing $port_id"
#catch { close $port_id }
return -1
}
}
expect {
-i $port_id "NO CARRIER" {
#catch { close $port_id }
set connection_state "unconnected"
return 0
}
-i $port_id "OK" {
puts "init_modem successful"
set connection_state "connecting"
#catch { close $port_id }
return 0
}
-i $port_id timeout {
set connection_state "unconnected"
puts "timeout waiting for OK from AT command"
#catch { close $port_id }
return -1
}
}
}
}
proc connect { } {
global accounts
global active_account
global challenge_entered
global des_response
global seconds_remaining
set seconds_remaining 30
global connection_state
set challenge_entered "false"
global ppp_interface serial_port ip_address
# pre-connection scripts
#pre_connect
log_messages "Connecting"
set connection_state "disconnected"
sleep 1
set connection_state "connecting"
init_blinking_bulbs
.footer.footer_r configure -text "Connecting .."
global backup_resolv
set backup_resolv ""
global manage_resolv
#.connect configure -state disabled
if { $accounts($active_account,resolv) == 1 } {
# create resolv.conf, backup resolv.conf
if { [file exists $accounts($active_account,resolv_file)] == 1 } {
set backup_resolv "/etc/resolv.conf.ppp"
# backup 'to'
exec $manage_resolv backup $backup_resolv
# copy 'from'
exec $manage_resolv copy $accounts($active_account,resolv_file)
} else {
set backup_resolv ""
}
}
# initialize modem
set modem_init -1
set max_retries 3
for { set i 1 } { $i <= $max_retries } { incr i } {
if { [init_modem] != 0 } {
.footer.footer_r configure -text "Error Initializing Modem, retrying ($i of $max_retries)"
set modem_init -1
sleep 1
continue
} else {
.footer.footer_r configure -text "Modem Initialized"
set modem_init 1
break
}
}
if { $modem_init != 1 } {
.footer.footer_r configure -text "Error Initializing Modem, check modem and ports"
.connect configure -state normal
return -1
}
init_blinking_bulbs
#.connect configure -text "Connect" -command connect -state normal
set connection_state "connecting"
.connect configure -text "Disconnect" -state disabled -command {
disconnect "Disconnected by User 1"
}
if { $accounts($active_account,authtype) == "tokencard" } {
.footer.footer_r configure -text "Connecting .."
toplevel .des_dialog
entry .des_dialog.des_entry -width 20
label .des_dialog.challenge_label -text "Host: Challenge:"
label .des_dialog.instruction_label -text "\n\
Get your DES card ready\n\
Enter your authorization number\n\
Then get ready to key in the Host Number\n\
and Challenge when they appear above\n\n\
Then press 'E' and wait for the response string,\n\
and enter the 'response' from your DES card\n\
into the entry box above"
label .des_dialog.time_remaining -text "Waiting for Remote Connection"
log_messages "Waiting for Remote Connection"
button .des_dialog.des_send_button -text "Send Response" \
-command {
set des_response [.des_dialog.des_entry get]
set challenge_entered "true"
#wm withdraw .des_dialog
set seconds_remaining "null"
destroy .des_dialog
}
button .des_dialog.close -text "Close" \
-command {
#wm iconify .des_dialog
set seconds_remaining "null"
destroy .des_dialog
}
pack .des_dialog.challenge_label -anchor n
pack .des_dialog.des_entry -after .des_dialog.challenge_label
pack .des_dialog.instruction_label -anchor n
pack .des_dialog.time_remaining -anchor n
pack .des_dialog.des_send_button -side left -anchor n
pack .des_dialog.close -side right -anchor n
modempool_connect
} else {
isp_connect
}
}
proc restore_resolv { } {
global accounts
global active_account
global backup_resolv
exec $restore_resolv $backup_resolv
}
proc backup_resolv { } {
global accounts
global active_account
global backup_resolv
set backup_resolv [exec $backup_resolv]
}
proc create_resolv { } {
global accounts
global active_account
#account_strings(domain) "DNS Domain Name:"
#account_strings(ns1) "Account Nameserver #1:"
#account_strings(ns2) "Account Nameserver #2:"
exec $create_resolv \
$account_strings($active_account,domain) \
$account_strings($active_account,ns1) \
$account_strings($active_account,ns2)
}
proc monitor_link { } {
puts "monitor_link"
global ppp_interface serial_port ip_address remote_ip_address
global link_disconnected
if { $link_disconnected == "true" } { return 0 }
set timeout 10
global ip_up_file
global ip_down_file
# global ip_up_array
# read ip-up output file
if { [string length $remote_ip_address] < 6 } {
puts "Unable to monitor link, no valid remote_ip_address"
}
# check that ppp_interface is a gateway
#0.0.0.0 192.168.1.1 0.0.0.0 UG 40 0 0 eth0
set save ""
set gateway_list {}
set ppp_gateway "false"
set netstat_fd [open "|netstat -rn"]
while { [gets $netstat_fd line] != -1 } {
if { [string first "UG" $line] != -1 ||
[string first "UH" $line] != -1 } {
# save all gateways
lappend gateway_list $line
if { [string first $ppp_interface $line] != -1 } {
if { [string first $remote_ip_address $line] != -1 } {
#puts "found $remote_ip_address in $line"
set ppp_gateway "true"
}
}
}
}
catch { close $netstat_fd }
#puts "Gateways: [list $gateway_list]"
if { $ppp_gateway == "false" } {
puts "No gateway found for $ppp_interface"
.footer.footer_r config -text "No gateway found for $ppp_interface"
set connection_state "disconnected"
sleep 2
puts "disconnecting .."
set reason "No gateway found for $ppp_interface"
disconnect $reason
return -1
}
log_user 0
spawn ping -c 1 $remote_ip_address
expect eof
log_user 1
if { [string first \
"1 packets transmitted, 1 received" $expect_out(buffer)] < 0 } {
puts "Cannot reach $remote_ip_address, link may have gone down"
.footer.footer_r config -text "Cannot reach $remote_ip_address, \
link may have gone down"
set connection_state "disconnected"
sleep 2
puts "disconnecting .."
disconnect
return -1
} else {
set connection_state "connecting"
after 10000 monitor_link
}
}
proc isp_connect { } {
puts "proc isp_connect { } "
global accounts account_list active_account baud_rate
global env flow_control init_string ip_enabled
global log_file log_messages message modem_port
global nat_transparency_mode network_interface
global password phone_number
global port port_speed spawn_id tokencard_response username
global baud_rate pppd_config_file username
global spawn_list
global connection_state
global pppd_id
global pppd_pid
global ppp_interface serial_port ip_address remote_ip_address
global link_disconnected
set link_disconnected "true"
set remote_ip_address "0.0.0.0"
set connection_state "connecting"
# create connect script
# ttyb
# 38400
# debug
# lock
# noproxyarp
# updetach
# usepeerdns
# crtscts
# nodefaultroute
# noipdefault
# user internet.smus.MNGF11D
# password tgrtgr
# connect '/usr/bin/chat -v REPORT CONNECT ABORT BUSY ABORT "NO CARRIER" "" atdt18005904857 "CONNECT"'
.passwdf.passwd_r configure -state normal
global phone_number port baud_rate pppd pppd_config_file env username
global tip_id
global connection_type
set connection_type "pppd"
global ppp_settings
#set phone_number $accounts($active_account,number)
# strip out non-numeric
regsub -all "\[^0-9\]" $accounts($active_account,number) "" phone_number
set baud_rate $ppp_settings(port_speed)
set pppd_config_file "$env(HOME)/.ppptool/connect.script"
set port $ppp_settings(modem_port)
set pppd /usr/sbin/pppd
set pppd_cmd "$pppd file $pppd_config_file"
.footer.footer_r config -text "Connect: Creating chat script"
set fd [open $pppd_config_file w]
set username [.uidf.uid_r get]
set password [.passwdf.passwd_r get]
if { $username != $accounts($active_account,uid) } {
set accounts($active_account,uid) $username
}
if { $password != $accounts($active_account,passwd) } {
set accounts($active_account,passwd) $password
}
#set username $accounts($active_account,uid)
#set password $accounts($active_account,passwd)
puts "$phone_number $port $baud_rate $pppd_config_file $username $password"
puts $fd $port
puts $fd $baud_rate
puts $fd debug
puts $fd lock
puts $fd noproxyarp
puts $fd updetach
puts $fd usepeerdns
# flow control
switch $ppp_settings(flow_control) {
"hardware" { puts $fd crtscts }
"software" { puts $fd xonxoff }
"none" { puts $fd nocrtscts }
}
if { $accounts($active_account,defroute) == 1 } {
puts $fd "defaultroute"
} else {
puts $fd "nodefaultroute"
}
puts $fd "noipdefault"
puts $fd "user $username"
puts $fd "password $password"
puts $fd "connect '/usr/sbin/chat -v REPORT CONNECT ABORT BUSY ABORT \"NO CARRIER\" \"\" atdt$phone_number \"CONNECT\"'"
close $fd
set pppd_cmd "$pppd file $pppd_config_file"
puts "$pppd_cmd"
.footer.footer_r config -text "Connecting with Chat script"
set timeout -1
spawn /bin/sh
lappend spawn_list $spawn_id
set pppd_id $spawn_id
expect "#"
exp_send "$pppd_cmd &\r"
#.connect configure -text "Disconnect" -state normal -command {
# disconnect "Disconnected by User 2"
#}
.footer.footer_r config -text "Starting Process:\n$pppd_cmd" -font {times 10 bold} -justify left
set ctr 0
expect_background -i $pppd_id -re "\[^\r]*\r\n" {
if { [string first "Hangup" $expect_out(0,string)] != -1 } {
.footer.footer_r configure -text "Hangup from Modem"
##set connection_state "disconnected"
.connect configure -state normal
}
if { [string first "(SIGHUP)" $expect_out(0,string)] != -1 } {
.footer.footer_r configure -text "Hangup from Modem"
.connect configure -state normal
}
if { [string first "is locked by pid" $expect_out(0,string)] != -1 } {
#Device /dev/term/b is locked by pid 6482
#pppd exp6: Device /dev/term/b is locked by pid 6482
set split_line [split $expect_out(0,string) "\[ \t]*"]
set lock_pid [lindex $split_line [expr [llength $split_line] -1]]
puts "Found locking pid $lock_pid, looking for /var/run/ files with pid"
# look for file in /var/run containing pid
foreach f [exec ls /var/run] {
if { [regexp "pid$" $f] == 1 } {
# check if file contains pid
if { [exec cat "/var/run/$f"] == $lock_pid } {
#send file name to close_pppd
set ppp_interface [lindex [split $f "."] 0]
puts "Setting ppp_interface file name to $ppp_interface"
puts "Running close_pppd"
close_pppd
break
}
}
}
}
# look for connection success
# Connect: sppp0 <--> /dev/term/b
if { [string first "Connect" $expect_out(0,string)] != -1 } {
set ppp_interface \
[lindex [split $expect_out(0,string) " \t"] 1]
set serial_port \
[lindex [split $expect_out(0,string) " \t"] 3]
puts "\n==> PPP Interface: $ppp_interface <==\n"
puts "\n==> serial port: $serial_port <==\n"
}
if { [string first "local" $expect_out(0,string)] != -1 && \
[string first "IP address" $expect_out(0,string)] != -1 } {
set ip_address \
[string trim [lindex [split $expect_out(0,string) "\[ ]*"] 4]]
puts "\n==> IP address for interface: $ip_address <==\n"
.footer.footer_r configure \
-text "PPP Connected.\nNetwork Interface: $ppp_interface\nIP address: $ip_address" -anchor w -justify left
.connect configure -state normal -text "Disconnect" -command {
disconnect "Disconnected by User 3"
}
set connection_state "connecting"
}
if { [string first "remote" $expect_out(0,string)] != -1 && \
[string first "IP address" $expect_out(0,string)] != -1 } {
#remote IP address 32.97.116.34
set remote_ip_address \
[string trim [lindex [split $expect_out(0,string) "\[ ]*"] 3]]
puts "\n==> Remote IP address for interface: $remote_ip_address <==\n"
.footer.footer_r configure \
-text "PPP Connected.\nNetwork Interface: $ppp_interface\nIP address: $ip_address\nRemote IP address: $remote_ip_address" -anchor w -justify left
.connect configure -state normal -text "Disconnect" -command {
disconnect "Disconnected by User 4"
}
set connection_state "connecting"
set link_disconnected "false"
monitor_link
}
# rcvd [IPCP ConfAck id=0x8a <addr 32.100.234.63>]
# local IP address 32.100.234.63
# remote IP address 204.146.246.229
# pppd exp6: rcvd [IPCP ConfAck id=0x8a <addr 32.100.234.63>]
# local IP address 32.100.234.63
# remote IP address 204.146.246.229
# get pid
if { [string first "CONNECT" $expect_out(0,string)] != -1 && \
[string first "started" $expect_out(0,string)] != -1 } {
puts "pppd $spawn_id"
# the pid of the pppd
set pid_line [split $expect_out(0,string) "\[ \t]*"]
puts "pid_line: $pid_line"
# get last string of line
set pid_string [lindex $pid_line end]
puts "pid_string: $pid_string"
set pppd_pid [string trimright [lindex [split $pid_string] 0] ")"]
puts "pppd_pid: $pppd_pid"
.connect configure -state normal
}
puts "pppd $spawn_id: $expect_out(0,string)"
if { [string first "script failed" $expect_out(0,string)] != -1 } {
disconnect $expect_out(0,string)
}
if { [string first "Terminating" $expect_out(0,string)] != -1 } {
disconnect $expect_out(0,string)
}
log_message $expect_out(0,string)
}
#post_connect
}
proc modempool_connect { } {
puts "proc modempool_connect { } "
global connection_type
global connection_state
global accounts account_list active_account baud_rate \
env flow_control init_string ip_enabled \
log_file log_messages message modem_port \
nat_transparency_mode network_interface \
password phone_number \
port port_speed spawn_id tokencard_response username \
baud_rate pppd_config_file username \
tip_id pkghome ppp_settings challenge_entered \
des_response seconds_remaining link_disconnected \
port_id
set connection_type "tip"
set pppd_started "false"
#set ppp_settings(init_string) "atz"
#set ppp_settings(connect_string) ""
#set ppp_settings(port_speed) 38400
#set ppp_settings(flow_control) hardware
#set ppp_settings(modem_port) /dev/term/b
#set ppp_settings(ppp_options) ""
set phone_number $accounts($active_account,number)
set baud_rate $ppp_settings(port_speed)
set port_speed $ppp_settings(port_speed)
set pppd_config_file "$env(HOME)/.ppptool/connect.script"
set port $ppp_settings(modem_port)
#set username $accounts($active_account,uid)
set username [.uidf.uid_r get]
if { $username != $accounts($active_account,uid) } {
set accounts($active_account,uid) $username
}
# don't need passwd field
.passwdf.passwd_r configure -state disabled
.connect configure -state normal
puts "$phone_number $port $baud_rate $pppd_config_file $username"
log_messages "$phone_number $port $baud_rate $pppd_config_file $username"
set timeout 60
# close any tip_id's in case
#### SOLARIS ####
### catch { [exec pkill tip] } ###
#### spawn tip "-$baud_rate" $port ###
#### log_messages "spawn tip -$baud_rate $port" ####
#set tip_id $spawn_id
#### LINUX ####
#puts "Setting baud rate to $baud_rate"
#set error [catch { [system "stty -F $port -echoe -echo raw $baud_rate"] } err]
#if { $error != 1 } {
# puts "Error setting baud rate for modem on port $port"
#}
#for { set i 0 } { $i < 3 } { incr i } {
# if { [init_modem] != 0 } {
# .footer.footer_r configure -text "Error Initializing Modem"
# set modem_init -1
# sleep 1
# continue
# } else {
# .footer.footer_r configure -text "Modem Initialized"
# set modem_init 1
# break
# }
#}
#if { $modem_init != 1 } { return -1 }
init_blinking_bulbs
#.connect configure -text "Connect" -command connect -state normal
set connection_state "connecting"
.connect configure -text "Disconnect" -state normal -command {
disconnect "Disconnected by User 5"
}
if { [info exists port_id] } {
puts "closing $port_id"
catch { close $port_id }
}
if { [catch { [spawn -noecho -open \
[ open $ppp_settings(modem_port) "RDWR NONBLOCK" ]] } err] != 1 } {
#set connection_state "disconnected"
puts "error initializing modem: $err"
return -1
} else {
# get model
exp_send "ati3\r"
expect {
"OK" {
set modem_model $expect_out(buffer)
set modem_model [lindex [split $expect_out(buffer) "\[^\r]*\r\n"] 3]
puts "Modem_model: $modem_model"
}
"ERROR" {
puts "Unable to determine modem model"
}
}
exp_send "AT\r"
}
expect {
"Authentication failed." {
puts "Check User ID/Password"
#disconnect
log_message $expect_out(buffer)
log_message "Check User ID/Password"
.footer.footer_r configure -text \
"Authentication failed.\nCheck User ID/Password" -anchor w -justify left
#set connection_state "disconnected"
#close_ppp_tunnel
return
}
"NO CARRIER" {
#set connection_state "disconnected"
log_message $expect_out(buffer)
#close_ppp_tunnel
sleep 2
puts "Modem connection dropped"
#modempool_connect
}
"EOT" {
#set connection_state "disconnected"
puts "Received: $expect_out(buffer)"
#puts "Modem disconnected"
exp_continue
}
"all ports busy" {
#set connection_state "disconnected"
log_message $expect_out(buffer)
exp_send "~.\r"
#catch { close $tip_id }
#catch { close $spawn_id }
#close_ppp_tunnel
#sleep 10
#modempool_connect
}
"NO DIAL TONE" {
#set connection_state "disconnected"
log_message $expect_out(buffer)
puts "NO DIAL TONE"
#close_ppp_tunnel
#sleep 10
#modempool_connect
}
"OK" {
log_message $expect_out(buffer)
# dial
puts "Sending $phone_number"
puts "******************************\n"
puts " Get your DES card ready"
puts " Enter your authorization #"
puts " Then get read to key in:"
puts " 1.) Host number"
puts " 2.) Challenge"
puts " Then enter the 'response' into the popup"
puts "******************************"
exp_send "atdt $phone_number\r"
exp_continue
}
# auth
Username: {
log_message $expect_out(buffer)
exp_send "$username\r"
exp_continue
}
password: {
log_message $expect_out(buffer)
exp_send "\r"
exp_continue
}
# des card challenge/response
Challenge {
log_message $expect_out(buffer)
exp_continue
}
esponse? {
log_message $expect_out(buffer)
#puts "expect_out(0,string) = $expect_out(0,string)"
#puts "expect_out(buffer) = $expect_out(buffer)"
# pop up entry, display challenge
#Challenge: 9 6024 Response? 0f68pc72
if { [winfo exists .des_dialog] != 1 } { return }
set host_challenge [lindex [split $expect_out(buffer)] 1]
set secret [lindex [split $expect_out(buffer)] 2]
.des_dialog.challenge_label configure -text \
"Host: $host_challenge Challenge: $secret"
puts "\nhost_challenge=$host_challenge"
puts "secret=$secret"
des_countdown
tkwait variable challenge_entered
puts "Challenge: $des_response"
exp_send "$des_response\r"
set timeout -1
puts "Continuing .."
exp_continue
}
"\}" {
foreach line [split $expect_out(buffer) "\r\n"] {
puts "PPPD: $line"
if { [string first "Your IP" $line] != -1 } {
set ip_address [lindex [split $line " "] 4]
set ip_address [string trimright $ip_address "."]
puts "IP address: $ip_address"
set mtu [lindex [split $line " "] 7]
puts "MTU: $mtu"
.footer.footer_r configure -text \
"Connected:\nIP address: $ip_address\nMTU: $mtu" -font {times 10 bold} -justify left
break
}
}
if { $pppd_started == "false" } {
puts "exec $start_pppd \
$ppp_settings(modem_port) \
$ppp_settings(port_speed)"
eval exec $start_pppd \
$ppp_settings(modem_port) \
$ppp_settings(port_speed)
set pppd_started "true"
#.footer.footer_r configure -text "pppd started"
.connect configure -text "Disconnect" -command {
disconnect "Disconnected by User 6"
}
puts "Reading ip-up output file"
read_ip_up
#ppp_interface serial_port ip_address remote_ip_address port_speed mtu
.footer.footer_r configure -text \
"Connected:\n\
IP address: $ip_address\n\
MTU: $mtu\n\
Port Speed: $port_speed\n\
Remote IP address: $remote_ip_address\n" \
-font {times 10 bold} -justify left
monitor_link
}
log_message $expect_out(buffer)
exp_continue
}
-re "(.*)>" {
puts "ppp prompt received"
log_message $expect_out(buffer)
puts "Sending ppp"
exp_send "ppp\r"
exp_continue
}
timeout {
log_message $expect_out(buffer)
#set connection_state "disconnected"
#close_ppp_tunnel
puts "timeout: try again"
#sleep 2
#modempool_connect
}
eof {
#set connection_state "disconnected"
log_message $expect_out(buffer)
puts "eof: Modem connection closed"
#close_ppp_tunnel
}
}
}
proc post_connect { } {
global post_connect_script
if { [file exists $post_connect_script] } {
puts "exec $post_connect_script"
exec $post_connect_script &
}
return
}
proc pre_connect { } {
global pre_connect_script
if { [file exists $pre_connect_script] } {
puts "exec $pre_connect_script"
exec $pre_connect_script &
}
return
}
proc pre_disconnect { } {
global pre_disconnect_script
if { [file exists $pre_disconnect_script] } {
puts "exec $pre_disconnect_script"
exec $pre_disconnect_script &
}
return
}
proc post_disconnect { } {
global post_disconnect_script
if { [file exists $post_disconnect_script] } {
puts "exec $post_disconnect_script"
exec $post_disconnect_script &
}
return
}
proc init { } {
global spawn_list
global port_opened
set port_opened "false"
global ppp_settings
global env
global ppp_dir
global pkghome
puts "Connection Script Env 'USER_ID': $env(USER_ID)"
puts "Connection Script Env 'CONNECTION_TYPE': $env(CONNECTION_TYPE)"
set pkghome $env(PKGHOME)
global start_pppd
set start_pppd "${pkghome}/start_pppd"
set copy_opts "[set pkghome]/copy_pppopts"
global manage_resolv
set manage_resolv "[set pkghome]/manage_resolv"
#"copy"
#"restore"
#"srcfile=s"
#"backup_file=s"
set ppp_dir "$env(HOME)/.ppptool"
global ppp_config_file
set ppp_config_file "$ppp_dir/ppp_settings"
#set ppp_config_file "$ppp_dir/ppp_config"
global ppp_options_file
set ppp_options_file "/etc/ppp/options"
set ppp_options_template "[set pkghome]/options"
puts "Checking for $ppp_options_file file"
if { [file exists $ppp_options_file] != 1 } {
if { [file exists $ppp_options_template] != 1 } {
puts "Options file $ppp_options_template not found"
exit -1
}
puts "$ppp_options_file file not found, creating .."
if { [file exists $copy_opts] } {
exec $copy_opts
if { [file exists $ppp_options_file] != 1 } {
puts "Create of PPP options file $ppp_options_file failed!"
exit -1
}
} else {
puts "File $copy_opts not found, cannot create $ppp_options_file file"
exit -1
}
}
# list
global active_account
global account_list
set account_list {}
# array (account_name,key)
global accounts
# connection scripts
# User configurable scripts in $HOME/.connect
global pre_connect_script
global post_connect_script
global pre_disconnect_script
global post_disconnect_script
if [file exists /etc/connect/pre-connect] {
set pre_connect_script /etc/connect/pre-connect
} else {
set pre_connect_script ""
}
if [file exists /etc/connect/post-connect] {
set post_connect_script /etc/connect/post-connect
} else {
set post_connect_script ""
}
if [file exists /etc/connect/pre-disconnect] {
set pre_disconnect_script /etc/connect/pre-disconnect
} else {
set pre_disconnect_script ""
}
if [file exists /etc/connect/post-disconnect] {
set post_disconnect_script /etc/connect/post-disconnect
} else {
set post_disconnect_script ""
}
global account_keys
set account_keys {\
name\
uid\
passwd\
number\
domain\
search \
ns1\
ns2\
authtype\
defroute\
resolv\
status\
resolv_file\
}
global account_strings
set account_strings(name) "Account Name:"
set account_strings(uid) "User ID:"
set account_strings(passwd) "Password:"
set account_strings(number) "Phone Number:"
set account_strings(domain) "DNS Domain Name:"
set account_strings(search) "DNS Search Domains:"
set account_strings(ns1) "Account Nameserver #1:"
set account_strings(ns2) "Account Nameserver #2:"
set account_strings(authtype) "Authentication Type:"
set account_strings(defroute) "Default Route:"
set account_strings(resolv) "Create /etc/resolv.conf:"
global required_keys
set required_keys { name uid number authtype }
global account_file
set account_file "$ppp_dir/accounts"
RandomInit [pid]
if { [string first "sun" [exec arch]] != -1 } {
option add *font {palatino 12 bold}
option add *font {sun 12 bold}
}
global log_file
set log_file "$ppp_dir/ppp_log"
global kill_pppd
set kill_pppd "[set pkghome]/kill_pppd"
load_global
load_accts
build_log_win
build_menus
global ip_up_file
set ip_up_file "/etc/ppp/ip-up.out"
if { [file exists $ip_up_file] } { exec rm $ip_up_file }
global ip_down_file
set ip_down_file "/etc/ppp/ip-down.out"
if { [file exists $ip_down_file] } { exec rm $ip_down_file }
#if_name=$1
#tty_device=$2
#speed=$3
#local_ip=$4
#remote_ip=$5
#ipparam=$6
#DNS1=$DNS1
#DNS2=$DNS2
global ip_up_array
foreach key {
if_name \
tty_device \
speed \
local_ip \
remote_ip \
ipparam \
DNS1 \
DNS2 \
PPPD_PID \
PPPLOGNAME \
SPEED \
IPREMOTE \
IFNAME \
PATH \
PWD \
SHLVL \
DEVICE \
ORIG_UID \
IPLOCAL } {
set ip_up_array($key) ""
}
}
init
proc read_ip_up { } {
puts "read_ip_up"
global ip_up_file \
ppp_interface \
serial_port \
ip_address \
remote_ip_address
#if_name=$1
#tty_device=$2
#speed=$3
#local_ip=$4
#remote_ip=$5
#ipparam=$6
#DNS1=$DNS1
#DNS2=$DNS2
for { set i 0 } { $i < 12 } { incr i } {
puts "Checking for $ip_up_file"
if { [file exists $ip_up_file] != 1 } {
sleep 5
} else {
break
}
}
if { [file exists $ip_up_file] != 1 } {
puts "file $ip_up_file not found"
return -1
}
puts "opening file $ip_up_file"
set ip_fd [open $ip_up_file r]
while { [gets $ip_fd line] != -1 } {
puts $line
set ip_up_array([lindex [split $line "="] 0]) [lindex [split $line "="] 1]
}
close $ip_fd
puts "remote_ip_address $ip_up_array(remote_ip)"
puts "ip_address $ip_up_array(local_ip)"
puts "ppp_interface $ip_up_array(if_name)"
puts "serial_port $ip_up_array(tty_device)"
set remote_ip_address $ip_up_array(remote_ip)
set ip_address $ip_up_array(local_ip)
set ppp_interface $ip_up_array(if_name)
set serial_port $ip_up_array(tty_device)
return 0
}
# serial speed set to 38400 bps
# pppd exp6: serial speed set to 38400 bps
#
# connect option: '/usr/bin/chat -v REPORT CONNECT ABORT BUSY ABORT "NO CARRIER" "" atdt18005904857 "CONNECT"' started (pid 1058)
# pppd exp6: connect option: '/usr/bin/chat -v REPORT CONNECT ABORT BUSY ABORT "NO CARRIER" "" atdt18005904857 "CONNECT"' started (pid 1058)
#
# chat: Feb 26 19:56:09 CONNECT 28800/ARQ/V34/LAPM/V42BIS
# Serial connection established.
# serial speed set to 38400 bps
# Using interface sppp0
# start_ppptool Connect: sppp0 <--> /dev/term/b
# pppd exp6: chat: Feb 26 19:56:09 CONNECT 28800/ARQ/V34/LAPM/V42BIS
# Serial connection established.
# serial speed set to 38400 bps
# Using interface sppp0
# Connect: sppp0 <--> /dev/term/b
#
# sent [LCP ConfReq id=0x91 <asyncmap 0x0> <magic 0x1504974d> <pcomp> <accomp>]
# pppd exp6: sent [LCP ConfReq id=0x91 <asyncmap 0x0> <magic 0x1504974d> <pcomp> <accomp>]
#
# rcvd [LCP ConfAck id=0x91 <asyncmap 0x0> <magic 0x1504974d> <pcomp> <accomp>]
# pppd exp6: rcvd [LCP ConfAck id=0x91 <asyncmap 0x0> <magic 0x1504974d> <pcomp> <accomp>]
#
# rcvd [LCP ConfReq id=0x1 <mru 1500> <asyncmap 0x0> <auth pap> <pcomp> <accomp>]
# sent [LCP ConfAck id=0x1 <mru 1500> <asyncmap 0x0> <auth pap> <pcomp> <accomp>]
# sent [LCP Ident id=0x92 magic=0x1504974d "ppp-2.4.0b1 (Sun Microsystems, Inc., Jul 31 2002 10:08:25)"]
# Authenticating to peer with PAP
# sent [PAP AuthReq id=0x1 user="internet.smus.MERICAA" password=<hidden>]
# pppd exp6: rcvd [LCP ConfReq id=0x1 <mru 1500> <asyncmap 0x0> <auth pap> <pcomp> <accomp>]
# sent [LCP ConfAck id=0x1 <mru 1500> <asyncmap 0x0> <auth pap> <pcomp> <accomp>]
# sent [LCP Ident id=0x92 magic=0x1504974d "ppp-2.4.0b1 (Sun Microsystems, Inc., Jul 31 2002 10:08:25)"]
# Authenticating to peer with PAP
# sent [PAP AuthReq id=0x1 user="internet.smus.MERICAA" password=<hidden>]
#
# sent [PAP AuthReq id=0x2 user="internet.smus.MERICAA" password=<hidden>]
# pppd exp6: sent [PAP AuthReq id=0x2 user="internet.smus.MERICAA" password=<hidden>]
#
# sent [PAP AuthReq id=0x3 user="internet.smus.MERICAA" password=<hidden>]
# pppd exp6: sent [PAP AuthReq id=0x3 user="internet.smus.MERICAA" password=<hidden>]
#
# rcvd [LCP ConfReq id=0x2 <mru 1500> <asyncmap 0x0> <auth pap> <pcomp> <accomp>]
# sent [LCP ConfReq id=0x93 <asyncmap 0x0> <magic 0x90235e21> <pcomp> <accomp>]
# sent [LCP ConfAck id=0x2 <mru 1500> <asyncmap 0x0> <auth pap> <pcomp> <accomp>]
# pppd exp6: rcvd [LCP ConfReq id=0x2 <mru 1500> <asyncmap 0x0> <auth pap> <pcomp> <accomp>]
# sent [LCP ConfReq id=0x93 <asyncmap 0x0> <magic 0x90235e21> <pcomp> <accomp>]
# sent [LCP ConfAck id=0x2 <mru 1500> <asyncmap 0x0> <auth pap> <pcomp> <accomp>]
#
# rcvd [LCP ConfAck id=0x93 <asyncmap 0x0> <magic 0x90235e21> <pcomp> <accomp>]
# sent [LCP Ident id=0x94 magic=0x90235e21 "ppp-2.4.0b1 (Sun Microsystems, Inc., Jul 31 2002 10:08:25)"]
# Authenticating to peer with PAP
# sent [PAP AuthReq id=0x4 user="internet.smus.MERICAA" password=<hidden>]
# pppd exp6: rcvd [LCP ConfAck id=0x93 <asyncmap 0x0> <magic 0x90235e21> <pcomp> <accomp>]
# sent [LCP Ident id=0x94 magic=0x90235e21 "ppp-2.4.0b1 (Sun Microsystems, Inc., Jul 31 2002 10:08:25)"]
# Authenticating to peer with PAP
# sent [PAP AuthReq id=0x4 user="internet.smus.MERICAA" password=<hidden>]
#
# sent [PAP AuthReq id=0x5 user="internet.smus.MERICAA" password=<hidden>]
# pppd exp6: sent [PAP AuthReq id=0x5 user="internet.smus.MERICAA" password=<hidden>]
#
# rcvd [PAP AuthAck id=0x5 ""]
# sent [IPCP ConfReq id=0x88 <addr 0.0.0.0> <compress VJ 0f 01> <ms-dns1 0.0.0.0> <ms-dns2 0.0.0.0>]
# sent [CCP ConfReq id=0x4d <deflate 15> <deflate(old#) 15> <bsd v1 15>]
# pppd exp6: rcvd [PAP AuthAck id=0x5 ""]
# sent [IPCP ConfReq id=0x88 <addr 0.0.0.0> <compress VJ 0f 01> <ms-dns1 0.0.0.0> <ms-dns2 0.0.0.0>]
# sent [CCP ConfReq id=0x4d <deflate 15> <deflate(old#) 15> <bsd v1 15>]
#
# rcvd [IPCP ConfReq id=0x3 <addr 204.146.246.229>]
# sent [IPCP ConfAck id=0x3 <addr 204.146.246.229>]
# pppd exp6: rcvd [IPCP ConfReq id=0x3 <addr 204.146.246.229>]
# sent [IPCP ConfAck id=0x3 <addr 204.146.246.229>]
#
# rcvd [IPCP ConfRej id=0x88 <compress VJ 0f 01> <ms-dns1 0.0.0.0> <ms-dns2 0.0.0.0>]
# sent [IPCP ConfReq id=0x89 <addr 0.0.0.0>]
# pppd exp6: rcvd [IPCP ConfRej id=0x88 <compress VJ 0f 01> <ms-dns1 0.0.0.0> <ms-dns2 0.0.0.0>]
# sent [IPCP ConfReq id=0x89 <addr 0.0.0.0>]
#
# rcvd [LCP ProtRej id=0x4 80 fd 01 4d 00 0f 1a 04 78 00 18 04 78 00 15 03 2f]
# pppd exp6: rcvd [LCP ProtRej id=0x4 80 fd 01 4d 00 0f 1a 04 78 00 18 04 78 00 15 03 2f]
#
# rcvd [IPCP ConfNak id=0x89 <addr 32.100.234.63>]
# sent [IPCP ConfReq id=0x8a <addr 32.100.234.63>]
# pppd exp6: rcvd [IPCP ConfNak id=0x89 <addr 32.100.234.63>]
# sent [IPCP ConfReq id=0x8a <addr 32.100.234.63>]
#
# rcvd [IPCP ConfAck id=0x8a <addr 32.100.234.63>]
# local IP address 32.100.234.63
# remote IP address 204.146.246.229
# pppd exp6: rcvd [IPCP ConfAck id=0x8a <addr 32.100.234.63>]
# local IP address 32.100.234.63
# remote IP address 204.146.246.229
#- ## This won't work, it's not tip
#- "Local command?" {
#-
#- if { [string first "Your IP address is" $expect_out(buffer)] != -1 } {
#- puts "\n**** Found IP: $expect_out(buffer) ****\n"
#- foreach line [split $expect_out(buffer) "\r\n"] {
#- puts "PPPD: $line"
#- if { [string first "Your IP" $line] != -1 } {
#- set ip_address [lindex [split $line " "] 4]
#- set ip_address [string trimright $ip_address "."]
#- puts "IP address: $ip_address"
#- set mtu [lindex [split $line " "] 7]
#- puts "MTU: $mtu"
#- .footer.footer_r configure -text "ppp Connected: $line" -font {times 10 bold} -justify left
#- break
#- }
#- }
#- }
#-
#- log_message $expect_out(buffer)
#- #puts "exp_send /usr/bin/pppd debug lock updetach crtscts defaultroute noipdefault noccp novj\r"
#- #exp_send "/usr/bin/pppd debug lock updetach crtscts defaultroute noipdefault noccp novj\r"
#- puts "exp_send /usr/sbin/pppd debug lock crtscts defaultroute noipdefault noccp novj\r"
#- exp_send "/usr/sbin/pppd debug lock crtscts defaultroute noipdefault noccp novj\r"
#-
#- # best guess that connected is here
#- post_connect
#-
#- exp_continue
#- }
#- proc linux_isp_connect { } {
#- puts "proc linux_isp_connect { } "
#-
#- global accounts account_list active_account baud_rate
#- global env flow_control init_string ip_enabled
#- global log_file log_messages message modem_port
#- global nat_transparency_mode network_interface
#- global password phone_number
#- global port port_speed spawn_id tokencard_response username
#- global baud_rate pppd_config_file username
#-
#- global connection_state
#- global pppd_id
#- global pppd_pid
#- global ppp_interface serial_port ip_address
#- global connection_state
#-
#- .passwdf.passwd_r configure -state normal
#-
#- global phone_number port baud_rate pppd pppd_config_file env username
#- global tip_id
#- global connection_type
#- set connection_type "pppd"
#- global ppp_settings
#-
#-
#- # strip out non-numeric
#- regsub -all "\[^0-9\]" $accounts($active_account,number) "" phone_number
#-
#- set baud_rate $ppp_settings(port_speed)
#- set pppd_config_file "$env(HOME)/.ppptool/connect.script"
#- set port $ppp_settings(modem_port)
#- set pppd /usr/sbin/pppd
#- set pppd_cmd "$pppd file $pppd_config_file"
#-
#- set username [.uidf.uid_r get]
#- set password [.passwdf.passwd_r get]
#-
#- if { $username != $accounts($active_account,uid) } {
#- set accounts($active_account,uid) $username
#- }
#- if { $password != $accounts($active_account,passwd) } {
#- set accounts($active_account,passwd) $password
#- }
#-
#- puts "$phone_number $port $baud_rate $pppd_config_file $username $password"
#-
#- # flow control
#- #switch $ppp_settings(flow_control) {
#- # "hardware" { puts $fd crtscts }
#- # "software" { puts $fd xonxoff }
#- # "none" { puts $fd nocrtscts }
#- #}
#-
#- if { $accounts($active_account,defroute) == 1 } {
#- puts $fd "defaultroute"
#- } else {
#- puts $fd "nodefaultroute"
#- }
#- puts $fd "noipdefault"
#- puts $fd "user $username"
#- puts $fd "password $password"
#-
#- #puts $fd "connect '/usr/sbin/chat -v REPORT CONNECT ABORT BUSY ABORT \"NO CARRIER\" \"\" atdt$phone_number \"CONNECT\"'"
#-
#- #close $fd
#- #set pppd_cmd "$pppd file $pppd_config_file"
#- #puts "$pppd_cmd"
#-
#- .footer.footer_r config -text "Connecting with Chat script"
#-
#- set timeout -1
#- #eval spawn $pppd_cmd
#- #set pppd_id $spawn_id
#-
#- #.footer.footer_r config -text "Starting Process:\n$pppd_cmd" -font {times 10 bold} -justify left
#- set ctr 0
#-
#- #- expect_background -i $spawn_id -re "\[^\r]*\r\n" {
#- #-
#- #- if { [string first "Hangup" $expect_out(0,string)] != -1 } {
#- #- .footer.footer_r configure -text "Hangup from Modem"
#- #- #set connection_state "disconnected"
#- #- .connect configure -state normal
#- #-
#- #- }
#- #- if { [string first "(SIGHUP)" $expect_out(0,string)] != -1 } {
#- #- .footer.footer_r configure -text "Hangup from Modem"
#- #- .connect configure -state normal
#- #- }
#- #-
#- #- if { [string first "is locked by pid" $expect_out(0,string)] != -1 } {
#- #- # get pid and run close_pppd
#- #- #Device /dev/term/b is locked by pid 6482
#- #- #pppd exp6: Device /dev/term/b is locked by pid 6482
#- #-
#- #- set split_line [split $expect_out(0,string) "\[ \t]*"]
#- #- set lock_pid [lindex $split_line [expr [llength $split_line] -1]]
#- #- puts "Found locking pid $lock_pid, looking for /var/run/ files with pid"
#- #- # look for file in /var/run containing pid
#- #- foreach f [exec ls /var/run] {
#- #- if { [regexp "pid$" $f] == 1 } {
#- #- # check if file contains pid
#- #- if { [exec cat "/var/run/$f"] == $lock_pid } {
#- #- #send file name to close_pppd
#- #- set ppp_interface [lindex [split $f "."] 0]
#- #- puts "Setting ppp_interface file name to $ppp_interface"
#- #- puts "Running close_pppd"
#- #- close_pppd
#- #- break
#- #- }
#- #- }
#- #- }
#- #- }
#- #-
#- #- # look for connection success
#- #- # Connect: sppp0 <--> /dev/term/b
#- #- if { [string first "Connect" $expect_out(0,string)] != -1 } {
#- #- set ppp_interface \
#- #- [lindex [split $expect_out(0,string) " \t"] 1]
#- #-
#- #- set serial_port \
#- #- [lindex [split $expect_out(0,string) " \t"] 3]
#- #- puts "\n==> PPP Interface: $ppp_interface <==\n"
#- #- puts "\n==> serial port: $serial_port <==\n"
#- #- }
#- #-
#- #- if { [string first "local" $expect_out(0,string)] != -1 && \
#- #- [string first "IP address" $expect_out(0,string)] != -1 } {
#- #-
#- #- set ip_address \
#- #- [string trim [lindex [split $expect_out(0,string) "\[ ]*"] 4]]
#- #- puts "\n==> IP address for interface: $ip_address <==\n"
#- #- .footer.footer_r configure \
#- #- -text "PPP Connected.\nNetwork Interface: $ppp_interface\nIP address: $ip_address" -anchor w -justify left
#- .connect configure -state normal -text "Disconnect"
#- set connection_state "connected"
#- #- }
#- #-
#- #- # rcvd [IPCP ConfAck id=0x8a <addr 32.100.234.63>]
#- #- # local IP address 32.100.234.63
#- #- # remote IP address 204.146.246.229
#- #- # pppd exp6: rcvd [IPCP ConfAck id=0x8a <addr 32.100.234.63>]
#- #- # local IP address 32.100.234.63
#- #- # remote IP address 204.146.246.229
#- #-
#- #- # get pid
#- #- if { [string first "CONNECT" $expect_out(0,string)] != -1 && \
#- #- [string first "started" $expect_out(0,string)] != -1 } {
#- #-
#- #- puts "pppd $spawn_id"
#- #- # the pid of the pppd
#- #- set pid_line [split $expect_out(0,string) "\[ \t]*"]
#- #- puts "pid_line: $pid_line"
#- #-
#- #- # get last string of line
#- #- set pid_string [lindex $pid_line end]
#- #- puts "pid_string: $pid_string"
#- #-
#- #- set pppd_pid [string trimright [lindex [split $pid_string] 0] ")"]
#- #- puts "pppd_pid: $pppd_pid"
#- #- .connect configure -state normal
#- #-
#- #- }
#- #-
#- #- puts "pppd $spawn_id: $expect_out(0,string)"
#- #- log_message $expect_out(0,string)
#- #- }
#-
#- #post_connect
#- }
| true |
c79b28fab009e004adbd678d068f99c2c14c2180 | Shell | pangqiqiang/operation | /新建文件夹/startApi2.sh | UTF-8 | 1,718 | 3.21875 | 3 | [] | no_license | #!/bin/bash
# Source function library.
. /etc/init.d/functions
# Source networking configuration.
. /etc/sysconfig/network
function retVal()
{
RETVAL=$?
[ $RETVAL -eq 0 ] && success || failure
echo
return $RETVAL
}
function printHelp()
{
echo Help:;
echo " $0"
}
# Set Java Env
export JAVA_HOME=/usr/local/jdk
export JAVA_BIN=/usr/local/jdk/bin
export PATH=$PATH:$JAVA_HOME/bin
export CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar
export JAVA_HOME JAVA_BIN PATH CLASSPATH
# Set JVM Env
export CATALINA_BASE=/home/ecloud/ins/api2
export CATALINA_HOME=/home/ecloud/app/tomcat
export CATALINA_OUT=/data/weblogs/api2/catalina.out
export LD_LIBRARY_PATH=$CATALINA_HOME/lib:/usr/local/apr/lib:/usr/local/apr
export CATALINA_OPTS="-Dcom.sun.management.jmxremote.port=9213 -Djava.library.path=/home/ecloud/app/tomcat/bin/tomcat-native-1.2.10-src/native/.libs"
export JAVA_OPTS="-server -Xms2400m -Xmx2400m -Xmn1200m -Xss256k -XX:PermSize=128m -XX:MaxPermSize=128m -Xverify:none -XX:+UseCMSCompactAtFullCollection -XX:CMSFullGCsBeforeCompaction=2 -XX:SurvivorRatio=1 -XX:+UseConcMarkSweepGC -XX:+UseParNewGC -XX:-CMSParallelRemarkEnabled -XX:+DisableExplicitGC -XX:+CMSClassUnloadingEnabled -XX:+UseFastAccessorMethods -XX:+UseCMSInitiatingOccupancyOnly -XX:CMSInitiatingOccupancyFraction=85 -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/home/weblogs/jvm_dump.log -Djava.rmi.server.hostname=114.215.16.36 -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false"
#CATALINA_OPTS JAVA_OPTS >/dev/null 2>&1
# Start tomcat
echo -n $"Starting Tomcat: "
$CATALINA_HOME/bin/startup.sh -config $CATALINA_BASE/conf/server.xml > /dev/null 2>&1
retVal
| true |
5e23261044913960c118d6f850c62d64dde708b8 | Shell | qinghanw/source_code | /linux_command/part2/charpter10/test13.sh | UTF-8 | 87 | 2.59375 | 3 | [] | no_license | #!/bin/bash
var1=10
var2=30
var3=$[$var1 * $var2]
echo "The result is $var3"
exit 5
| true |
0a41c70702f8d1bdbdf835c3c3142ee4006278e0 | Shell | hive4bee/studylinux | /02/05_condition2.sh | UTF-8 | 276 | 3.46875 | 3 | [] | no_license | #!/bin/bash
#Testing nested ifs
#
testuser=NoSuchUser
if grep $testuser /etc/passwd
then
echo "The user $testuser exists on this system."
elif ls -d /home/$testuser
then
echo "The user $testuser does not exists on this system."
echo "However, $testuser has a directory."
fi | true |
08b326df19142de2568606e8d698ef574441de8e | Shell | meedan/watchbot | /scripts/test.sh | UTF-8 | 1,044 | 3.609375 | 4 | [] | no_license | #!/bin/bash
# This script tests all API methods
HOST=$1
if [ -z "$HOST" ]
then
HOST='http://localhost:3000'
fi
KEY=$(bundle exec rake watchbot:api_keys:create application=bridge-api | sed 's/.*token \([^ ]\+\) .*/\1/g' | tail -1)
function urlencode {
local length="${#1}"
for (( i = 0; i < length; i++ ))
do
local c="${1:i:1}"
case $c in
[a-zA-Z0-9.~_-]) printf "$c" ;;
*) printf '%%%02X' "'$c"
esac
done
}
function call {
verb=$1
path=$2
params=$3
if [ -z "$params" ]
then
params='{}'
fi
echo "Calling: $verb $path with params $params"
curl -s -X $verb "$HOST/$path" \
-H "Content-Type: application/json" \
-H "Authorization: Token token=\"$KEY\"" \
-d "$params" | python -mjson.tool; echo
}
call POST links '{"url":"http://meedan.com"}'
url=$(urlencode http://meedan.com)
call DELETE "links/$url"
call POST links/bulk '{"url1":"http://meedan.com","url2":"http://meedan.com/bridge"}'
call DELETE links/bulk '{"url1":"http://meedan.com","url2":"http://meedan.com/bridge"}'
| true |
527a137ec2ad96278cb3bc9818e29c12f71ff4b9 | Shell | art-in/meteos | /notifications/docker/scripts/build.sh | UTF-8 | 269 | 2.828125 | 3 | [] | no_license | #!/bin/bash
# creates docker image with production build
PROJECT_NAME=meteos-notifications
DOCKER_IMAGE=$PROJECT_NAME
# go to project root
cd $(dirname $(realpath "$0")) && cd ../..
# create docker image
docker build . --file ./docker/Dockerfile --tag $DOCKER_IMAGE | true |
bbcd52261b256dde4b90de4bbaaf80b4c2b4a187 | Shell | osak/ICFPC2017 | /bin/run_ai.sh | UTF-8 | 328 | 2.90625 | 3 | [] | no_license | #!/bin/bash
# usage run_ai.sh [commit id for ai] [ai args...]
GIT_COMMIT=$1
shift
AI_DIRECTORY=/var/ai/${GIT_COMMIT}/
SCRIPT=$(readlink -f $0)
SCRIPT_DIR=`dirname $SCRIPT`
ROOT_DIR=`dirname $SCRIPT_DIR`
# replace sandstar with latest version
# cp $ROOT_DIR/bin/sandstar.rb $AI_DIRECTORY/bin
cd ${AI_DIRECTORY}
./punter $@
| true |
81ba6772c8cb2ea4de43ed2567b116d765d7ddad | Shell | petronny/aur3-mirror | /supl/PKGBUILD | UTF-8 | 831 | 2.5625 | 3 | [] | no_license | # Maintainer: Dan McGee <dan@archlinux.org>
pkgname=supl
pkgver=1.0.6
pkgrel=1
pkgdesc="Retrieve A-GPS data (ephemeris and almanac) from SUPL servers over TCP/IP"
arch=('i686' 'x86_64')
url="http://www.tajuma.com/supl/index.html"
license=('BSD')
depends=('openssl')
source=(http://downloads.sourceforge.net/project/supl/supl_$pkgver.tar.gz)
md5sums=('72ead03a19fee6ea6ca77577a7ca98dc')
sha256sums=('068dc47ce818ce5634f09a88159df85a6ce3456e2467b11b8c5f8543a99bb347')
build() {
cd "$srcdir/trunk"
./configure --prefix=/usr --precompiled-asn1
# Fixes linker errors due to missing -lcrypto
sed -i -e 's#-lssl#$(shell pkg-config --libs openssl)#' src/Makefile
make
}
package() {
cd "$srcdir/trunk"
make DEB_PREFIX="$pkgdir/" install
install -m644 -D COPYING "$pkgdir/usr/share/licenses/$pkgname/COPYING"
}
# vim:set ts=2 sw=2 et:
| true |
4b6e4ed4d0ba298696153c058990e1476be2f613 | Shell | torshinc/meteor-buildpack-horse | /extra/ffmpeg_install.sh | UTF-8 | 793 | 3.203125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
############################################
# Install ffmpeg
############################################
echo "-----> Install ffmpeg"
FFMPEG_BUILD_DIR=$1
FFMPEG_VENDOR_DIR="vendor"
FFMPEG_DOWNLOAD_URL="https://s3.amazonaws.com/torsh-talent/ffmpeg3.tar.gz"
echo "FFMPEG_BUILD_DIR = " $FFMPEG_BUILD_DIR
echo "DOWNLOAD_URL = " $FFMPEG_DOWNLOAD_URL
cd $FFMPEG_BUILD_DIR
mkdir -p $FFMPEG_VENDOR_DIR
cd $FFMPEG_VENDOR_DIR
curl -L --silent $FFMPEG_DOWNLOAD_URL | tar xz
echo "exporting PATH and LIBRARY_PATH"
FFMPEG_PROFILE_PATH="$FFMPEG_BUILD_DIR/.profile.d/ffmpeg.sh"
mkdir -p $(dirname $FFMPEG_PROFILE_PATH)
echo 'export PATH="$PATH:$HOME/vendor/ffmpeg/bin"' >> $FFMPEG_PROFILE_PATH
echo 'export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:$HOME/vendor/ffmpeg/lib"' >> $FFMPEG_PROFILE_PATH | true |
22be235198a5d34ee638ff0cda320c11993d1b89 | Shell | danielithomas/mac-flow | /scripts/tasks/tasks-add.sh | UTF-8 | 1,090 | 3.65625 | 4 | [] | no_license | #!/bin/bash
# Build the full path of the journal file
# File Format is...
# YEAR-MONTH.md
filename=$(date +%Y-%B).md
logpath=$JOURNAL_DIR/
fullpath=$logpath$filename
tasktype="$1"
tasklist="$2"
str="'${@:3}'"
description="$(sed -e "s/^'//" -e "s/'$//" <<< $str)"
#
# Fun icon - used for quick visual scanning when looking at the journal.
#
funicon="🍀"
taskname="Task"
case "$tasktype" in
'action')
funicon="⭐️"
taskname="Action"
;;
'idea')
funicon="💡"
taskname="Idea"
;;
'parked')
funicon="📌"
taskname="Park"
;;
esac
#
# Create a new file (if required)
#
sh $MAC_FLOW/journal/journal-create.sh "$fullpath"
#
# Create a new journal entry for today (if required)
#
sh $MAC_FLOW/journal/journal-add-day.sh "$fullpath"
#
# Time to create our todo!
#
journaltask="$funicon **$taskname** - $description"
echo "$journaltask" >> $fullpath
sh $MAC_FLOW/tasks/tasks-create-item.sh "$tasklist" "$description"
echo "- Added to journal: $journaltask " | lolcat
# $(date '+%A, %B %d %Y at %H:%M')
#m365 todo task add -t "$taskdescription" --listName "$tasklist" >> /dev/null | true |
4e9b72997aca012b005828293c4125298925be08 | Shell | webscale-networks/mod_pagespeed | /install/apache_experiment_ga_test.sh | UTF-8 | 1,434 | 2.859375 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/bash
#
# Copyright 2012 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Author: jefftk@google.com (Jeff Kaufman)
#
# Runs all Apache-specific experiment framework tests that depend on AnalyticsID
# being set.
#
# See apache_experiment_test for usage.
#
this_dir=$(dirname $0)
source "$this_dir/apache_experiment_test.sh" || exit 1
EXAMPLE="$1/mod_pagespeed_example"
EXTEND_CACHE="$EXAMPLE/extend_cache.html"
start_test Analytics javascript is added for the experimental group.
OUT=$($WGET_DUMP --header='Cookie: PageSpeedExperiment=2' $EXTEND_CACHE)
check_from "$OUT" fgrep -q 'Experiment: 2'
OUT=$($WGET_DUMP --header='Cookie: PageSpeedExperiment=7' $EXTEND_CACHE)
check_from "$OUT" fgrep -q 'Experiment: 7'
start_test Analytics javascript is not added for the no-experiment group.
OUT=$($WGET_DUMP --header='Cookie: PageSpeedExperiment=0' $EXTEND_CACHE)
check_not_from "$OUT" fgrep -q 'Experiment:'
check_failures_and_exit
| true |
cab7171e0ef154aff194da4130d37750ccafff5f | Shell | proski/development-tools | /stg-split/stg-split | UTF-8 | 2,379 | 3.859375 | 4 | [] | no_license | #! /bin/sh
# Split the current StGit patch in two.
#
# Run "stg edit --diff", let the user remove parts of the patch, put the
# removed changes to a separate StGit patch, let the user describe it.
#
# To streamline splitting a patch into more than two parts, it is suggested
# to remove all changes except those that belong to a single patch. Then split
# the new top patch the same way.
#
# To fine tune the resulting patches, "stg pop" to the patch to be adjusted,
# edit the code, refresh the patch and run "stg push --set-tree" so that the
# next patch takes the changes in reverse, keeping the final code unchanged.
set -e -u
# Write error to stderr and exit with non-zero error code
fatal() {
echo "$@" >&2
exit 1
}
# Sanity checks
if ! git rev-parse HEAD >/dev/null; then
fatal "Not in a git repository"
fi
if ! top_dir=$(git rev-parse --show-toplevel); then
fatal "Cannot find top-level git directory"
fi
if ! cd "$top_dir"; then
fatal "Cannot change to the top-level git directory"
fi
if ! git diff --quiet; then
fatal "Unsupported case: unstaged changes found"
fi
if ! git diff --quiet --staged; then
fatal "Unsupported case: staged changes found"
fi
if ! patch_name=$(stg top) >/dev/null; then
fatal "No current StGit patch detected"
fi
if git diff --quiet HEAD^ HEAD; then
fatal "Unsupported case: patch $patch_name is empty"
fi
# Generate a unique name for the new patch
patch_base_name=$(echo "$patch_name" | sed 's/-[0-9]*$//')
i=1
while true; do
new_patch_name="$patch_base_name-$i"
if stg new -m "$new_patch_name" "$new_patch_name" 2>/dev/null; then
break
fi
i=$((i+1))
done
stg delete "$new_patch_name" 2>/dev/null
# Copy the top patch to an unapplied copy
if ! stg pick --noapply --name="$new_patch_name" "$patch_name"; then
# Fallback for StGit 1.x: try --unapplied instead of --noapply
if ! stg pick --unapplied --name="$new_patch_name" "$patch_name"; then
fatal "Cannot duplicate patch $patch_name"
fi
fi
# Edit the first patch
hash_old=$(stg id)
stg edit --diff
hash_new=$(stg id)
if test "$hash_old" = "$hash_new"; then
echo "No changes made to the patch"
exit 0
fi
if git diff --quiet "$hash_old" "$hash_new"; then
echo "No changes made to the sources"
exit 0
fi
# Push the second patch, keep the final tree
stg push --set-tree "$new_patch_name"
# Edit the second patch
stg edit --diff
| true |
e1f48ff7abbbfbd2fa9d149ce578201a44f6a876 | Shell | Viv-Crowe/agnostic_fp | /download.sh | UTF-8 | 881 | 3.515625 | 4 | [] | no_license | #!/bin/bash
CHEMBL="https://ftp.ebi.ac.uk/pub/databases/chembl/ChEMBLdb/latest"
chembl_folder="${CHEMBL##*//}"
files=(\
"checksums.txt" \
"chembl_uniprot_mapping.txt" \
"chembl_28_sqlite.tar.gz" \
"chembl_28.sdf.gz" \
"chembl_28_chemreps.txt.gz" \
"chembl_28.fps.gz"
)
checksum_file="${chembl_folder}/checksums.txt"
for file in "${files[@]}"; do
wget --no-check-certificate -c -x "${CHEMBL}/${file}"
# if grep "${file}" ${checksum_file}; then
# while [[ $(grep "${file}" ${checksum_file} | awk '{print $1}') != $(sha256sum "${chembl_folder}/${file}" | awk '{print $1}') ]]; do
# wget --no-check-certificate -c -x "${CHEMBL}/${file}"
# done
# fi
done
mv "${chembl_folder}"/* .
rm -r "${chembl_folder%%/*}"
echo "Finished download"
echo "sha256 checksums are:"
sha256sum ./* | sort
echo "sha256 checksums should be:"
sort checksums.txt
| true |
3809e4281df29cf83ebe677294bde27a66efe472 | Shell | GuilhermeRoque/Distributed-monitoring-system | /station/installer.sh | UTF-8 | 1,582 | 2.890625 | 3 | [] | no_license | #!/bin/bash
sudo -v
LIB=/usr/lib/python3/dist-packages
BIN=/usr/sbin
SERV=/etc/systemd/system
DATA=/var/lib/station
printf "\nInstalling dependencies into '$LIB/'...\n"
sudo pip3 install --target $LIB -r requirements.txt
printf "\nCopying lib's to '$LIB/'...\n"
sudo cp driver/dht11.py $LIB/
sudo cp driver/driver.py $LIB/
sudo cp driver/sensor.py $LIB/
sudo cp driver/sensorIO.py $LIB/
sudo cp driver/bmp280.py $LIB/
sudo cp interface/amqpConn.py $LIB/
sudo cp interface/consumerAMQP.py $LIB/
sudo cp interface/flaskApp.py $LIB/
sudo cp interface/publisherAMQP.py $LIB/
sudo cp interface/zmqRequest.py $LIB/
printf "\nCopying main's to '$BIN/'...\n"
sudo cp -r driver/mainDriver.py $BIN/
sudo cp -r interface/mainWeb.py $BIN/
sudo cp -r interface/mainPublisherAMQP.py $BIN/
sudo cp -r interface/mainConsumerAMQP.py $BIN/
printf "\nAdding main's permissions...\n"
sudo chmod +x $BIN/mainDriver.py
sudo chmod +x $BIN/mainPublisherAMQP.py
sudo chmod +x $BIN/mainConsumerAMQP.py
sudo chmod +x $BIN/mainWeb.py
printf "\nCopying service files...\n"
sudo cp service/* $SERV/
sudo printf "\nAdding database folder '$DATA/'...\n"
sudo mkdir $DATA
printf "\nAdding permission to services start in system booting...\n"
sudo systemctl enable sensorDriver.service
sudo systemctl enable webApp.service
sudo systemctl enable consumerAMQP.service
sudo systemctl enable publisherAMQP.service
printf "\Starting services...\n"
sudo systemctl start sensorDriver.service
sudo systemctl start webApp.service
sudo systemctl start consumerAMQP.service
sudo systemctl start publisherAMQP.service | true |
63053594c0b45daea2845c2d509a10464c7b68eb | Shell | ljalil/dotfiles | /polybar/.config/polybar/scripts/bluetooth-utility | UTF-8 | 770 | 3.09375 | 3 | [] | no_license | #!/bin/bash
dir="~/.config/polybar/scripts/rofi"
powered=$(bluetoothctl show | grep "Powered" | awk '{print $2}')
if [[ $powered == "no" ]]; then
bt_power_on="Power on"
bt_close="Close"
choice=$(echo -e "Power on\nClose" | rofi -dmenu -i -no-fixed-num-lines -theme $dir/networkmenu.rasi -p "Bluetooth")
if [[ $choice == $bt_power_on ]];then
bluetoothctl power on
fi
if [[ $choice==$bt_close ]]; then
exit
fi
fi
if [[ $powered == "yes" ]]; then
choice=$(echo -e "Scan\nPower off\nClose" | rofi -i -dmenu -no-fixed-num-lines -theme $dir/networkmenu.rasi -p "Bluetooth")
fi
#echo $powered | rofi -dmenu -theme $dir/networkmenu.rasi -p "Bluetooth" $powered
#bluetoothctl devices | awk '{print $3}' | rofi -dmenu -theme $dir/networkmenu.rasi -p "Bluetooth"
| true |
7beeb052558dcac29a3a488f22a20f9dcb58b975 | Shell | zeroc0d3/kafka-docker-playground | /connect/connect-cassandra-sink/cassandra-repro-aws-mcs.sh | UTF-8 | 3,321 | 3.4375 | 3 | [
"MIT",
"Apache-2.0"
] | permissive | #!/bin/bash
set -e
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )"
source ${DIR}/../../scripts/utils.sh
if [ ! -z "$MCS_USERNAME" ] && [ ! -z "$MCS_PASSWORD" ]
then
log "MCS credentials are correctly set"
else
# https://docs.aws.amazon.com/mcs/latest/devguide/accessing.html#ssc
log "Environment variables MCS_USERNAME and MCS_PASSWORD should be set !"
log "You can get them using: aws iam create-service-specific-credential --user-name <user> --service-name cassandra.amazonaws.com"
log "Check ServiceUserName and ServicePassword"
exit 1
fi
KEYSPACE=${1:-dockerplayground}
CASSANDRA_HOSTNAME=${2:-cassandra.us-east-1.amazonaws.com}
cd ${DIR}/security
log "🔐 Generate keys and certificates used for SSL"
./certs-create.sh
cd ${DIR}
${DIR}/../../environment/plaintext/start.sh "${PWD}/docker-compose.plaintext.aws-mcs.yml"
log "create a topic topic1"
docker exec broker kafka-topics --create --topic topic1 --partitions 1 --replication-factor 1 --zookeeper zookeeper:2181
log "Creating Cassandra Sink connector"
curl -X PUT \
-H "Content-Type: application/json" \
--data '{
"connector.class": "io.confluent.connect.cassandra.CassandraSinkConnector",
"tasks.max": "1",
"topics" : "topic1",
"cassandra.contact.points" : "'"$CASSANDRA_HOSTNAME"'",
"cassandra.port": "9142",
"cassandra.keyspace" : "'"$KEYSPACE"'",
"cassandra.username": "'"$MCS_USERNAME"'",
"cassandra.password": "'"$MCS_PASSWORD"'",
"cassandra.ssl.enabled": "true",
"cassandra.ssl.truststore.path": "/etc/kafka/secrets/kafka.connect.truststore.jks",
"cassandra.ssl.truststore.password": "confluent",
"cassandra.consistency.level": "ONE",
"confluent.license": "",
"confluent.topic.bootstrap.servers": "broker:9092",
"confluent.topic.replication.factor": "1",
"transforms": "createKey",
"transforms.createKey.fields": "f1",
"transforms.createKey.type": "org.apache.kafka.connect.transforms.ValueToKey"
}' \
http://localhost:8083/connectors/cassandra-mcs-sink/config | jq .
log "Sleep 45 seconds"
sleep 45
log "Sending messages to topic topic1"
seq -f "{\"f1\": \"value%g\"}" 10 | docker exec -i connect kafka-avro-console-producer --broker-list broker:9092 --property schema.registry.url=http://schema-registry:8081 --topic topic1 --property value.schema='{"type":"record","name":"myrecord","fields":[{"name":"f1","type":"string"}]}'
sleep 10
log "Go to your MCS console to verify messages are in AWS MCS cassandra table mydockerplaygroundkeyspace.topic1"
log "SELECT * FROM dockerplayground.topic1;"
log "if there is no data, you might restart the connector"
# docker exec -e CASSANDRA_HOSTNAME="$CASSANDRA_HOSTNAME" -e KEYSPACE="$KEYSPACE" -e MCS_USERNAME="$MCS_USERNAME" -e MCS_PASSWORD="$MCS_PASSWORD" cassandra bash -c "export SSL_CERTFILE=/etc/kafka/secrets/kafka.cassandra.truststore.jks;cqlsh $CASSANDRA_HOSTNAME 9142 -u $MCS_USERNAME -p $MCS_PASSWORD --ssl -e 'select * from mydockerplaygroundkeyspace.topic1;'"
| true |
481aa3bebbc626a9592d271da172f2c5830302ea | Shell | LPTFF/lptff.github.io | /build.sh | UTF-8 | 2,093 | 3.46875 | 3 | [] | no_license | #!/usr/bin/env sh
# 确保脚本抛出遇到的错误
set -e
# 检查当前操作系统
echo "OSTYPE $OSTYPE"
if [[ "$OSTYPE" == "linux-gnu"* ]]; then
# Linux 系统
echo "Detected Linux OS"
echo "Installing Python..."
# 在此处添加适用于 Linux 系统的安装命令
sudo apt-get update
sudo apt-get install python3
elif [[ "$OSTYPE" == "darwin"* ]]; then
# macOS 系统
echo "Detected macOS"
echo "Installing Python..."
# 在此处添加适用于 macOS 系统的安装命令
brew update
brew install python@3
elif [[ "$OSTYPE" == "msys"* ]]; then
# win 系统
echo "Unsupported Windows"
# 在此处添加适用于 macOS 系统的安装命令
exit 1
else
# 其他操作系统
echo "Detected deafault OS"
echo "Installing Python..."
# 在此处添加适用于 Linux 系统的安装命令
sudo apt-get update
sudo apt-get install python3
# exit 1
fi
# 验证安装是否成功
python_version=$(python --version 2>&1)
echo "python_version $python_version"
if [[ "$python_version" == *"Python"* ]]; then
echo "Python installation successful"
else
echo "Python installation failed"
fi
# python 环境集成
pip install requests
pip install cryptography
pip install pyOpenSSL
pip install certifi
pip install beautifulsoup4
pip install pytz
pip install selenium
# 运行爬虫脚本
python ./src/crawl/welfare.py
python ./src/crawl/douban.py
python ./src/crawl/infzm.py
python ./src/crawl/juejin.py
python ./src/crawl/kuaishou.py
python ./src/crawl/weibo.py
python ./src/crawl/v2ex.py
# 获取当前时间的小时和时区
current_hour=$(TZ='Asia/Shanghai' date +"%H")
current_timezone=$(date +"%Z")
# 判断当前时间是否在指定的时间范围内(晚上10点至第二日凌晨4点)
if [ "$current_hour" -ge 22 ] || [ "$current_hour" -lt 4 ]; then
echo "北京时间$current_hour,满足条件执行更新特殊脚本"
python ./src/crawl/leetCode.py
python ./src/crawl/zhipin.py
else
echo "北京时间$current_hour,不满足条件执行更新特殊脚本"
fi
| true |
d1e148a78d7aed83e39650aa7ba382cb7ecf72f9 | Shell | achinthau/Scripts | /REGISTRY/service.bak | UTF-8 | 2,195 | 3.359375 | 3 | [] | no_license | #!/bin/bash
. /etc/registry
startservice() {
service docker stop
nohup /usr/bin/dockerd --insecure-registry $REPOSITORY_IPURL:5000 -H unix:///var/run/docker.sock &
}
stopservice() {
service docker stop
retval=$( getpid );
if [ $retval = "0" ];then
echo "";
else
kill $retval;
fi
# nohup /usr/bin/dockerd --insecure-registry $REPOSITORY_IPURL:5000 -H unix:///var/run/docker.sock &
}
restartservice() {
service docker stop
retval=$( getpid );
if [ $retval = "0" ];then
kill 0;
else
kill $retval;
fi
sleep 3;
nohup /usr/bin/dockerd --insecure-registry $REPOSITORY_IPURL:5000 -H unix:///var/run/docker.sock &
}
getpid() {
PID=$(pidof dockerd);
if [ ! -z "$PID" ];then
retval=$PID;
else
retval=0;
fi
echo $retval;
}
case "$1" in
start)
startservice
# nohup /usr/bin/dockerd --insecure-registry $REPOSITORY_IPURL:5000 -H unix:///var/run/docker.sock &
retval=$( getpid );
if [ $retval = "0" ];then
echo "Docker Service Cannot Start...";
else
echo "Docker Service Started " $retval;
fi
exit 1
;;
stop)
stopservice
retval=$( getpid );
if [ $retval = "0" ];then
echo "Docker Service Stoped";
else
echo "Docker Service Cannot Stoped " $retval;
fi
echo "Docker Service Stop..."
stopservice
exit 1
;;
restart)
restartservice
echo "Docker Service Stopping..";
retval=$( getpid );
if [ $retval = "0" ];then
echo "Docker Service Cannot Start...";
else
echo "Docker Service Started " $retval;
fi
exit 1
;;
status)
# getpid
retval=$( getpid );
#echo "directory not created"
#echo $retval;
if [ $retval = "0" ];then
echo "Docker Service Not Running...";
else
echo "Docker Service Running " $retval;
fi
# check_init
# status_of_proc -p "$DOCKER_SSD_PIDFILE" "$DOCKERD" "$DOCKER_DESC"
;;
*)
echo "Usage: service docker {start|stop|restart|status}"
exit 1
;;
esac
| true |
23f83c975c91cab1b86ba907fae84c0ab5f83d94 | Shell | seap-udea/micluster | /micluster-terms | UTF-8 | 660 | 4.09375 | 4 | [] | no_license | #!/bin/bash
dir=$(dirname $0)
usage="Usage:\n
General: micluster-terms <domain-base> <ini> <number>\n
Example: micluster-terms 10.0.2 231 26
"
base=$1;shift
if [ "x$base" = "x" ];then
echo "Error: no base provided"
echo -e $usage
exit 1
fi
ini=$1;shift
num=$1;shift
end=$((ini+$num))
echo "Exploring terminals in the range $base.$ini to $base.$end"
echo -n "(press enter to continue)";read
termfile="$dir/terms.list"
echo -n > $termfile
for nt in $(seq $ini $end)
do
terminal="$base.$nt"
if ping -c 2 10.0.2.$nt &> /dev/null;then
echo "$terminal up"
echo $terminal >> $termfile
else
echo "$terminal down"
fi
done
echo "Done."
| true |
cf15dbe9b081306373efd517540e0aa49227303a | Shell | cloudfoundry-community/redis-boshrelease | /jobs/redis/templates/bin/health_check | UTF-8 | 751 | 3.625 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/bash
set -e # exit immediately if a simple command exits with a non-zero status
set -u # report the usage of uninitialized variables
export JOB_DIR=/var/vcap/jobs/redis
# % disk full levels
export DISK_CRITICAL_LEVEL=<%= p("health.disk.critical") %>
export DISK_WARNING_LEVEL=<%= p("health.disk.warning") %>
case $1 in
disk)
volume=/var/vcap/store
persistent_disk_level=$(df | grep $volume | awk '{ print $5 }' | sed -e 's/%//')
echo "Disk level $persistent_disk_level%"
if [[ $persistent_disk_level -ge $DISK_CRITICAL_LEVEL ]]; then
exit 2
fi
if [[ $persistent_disk_level -ge $DISK_WARNING_LEVEL ]]; then
exit 1
fi
exit 0
;;
*)
echo "Usage: health_check {disk}"
;;
esac
exit 0
| true |
50810bdc9ce3dd636422d89f73705cf6b304cbfe | Shell | lzk90s/release-tool | /core/app.sh | UTF-8 | 6,137 | 3.765625 | 4 | [
"Apache-2.0"
] | permissive | #!/bin/sh
usage() {
printMsg "Usage: $PROGRAM"
printMsg " envs:"
printMsg " - RELEASE_VERSION 发布版本号 ($RELEASE_VERSION)"
printMsg " - NEXT_DEV_VERSION 下一个开发版本号 ($NEXT_DEV_VERSION)"
printMsg " - PUSH 是否推送到远程仓库 ($PUSH)"
printMsg " - PROJECTS 工程【可选,若为空,从PROJECT_FILE获取】 ($PROJECTS)"
printMsg " - PROJECT_FILE 工程文件【可选,若为空,取projects.txt】 ($PROJECT_FILE)"
printMsg " - SEND_RELEASE_LOG 是否发送releaselog ($SEND_RELEASE_LOG)"
printMsg " - ONLY_PUSH_CHANGED_PROJECT 是否仅推送变更的工程到远程git服务器 ($ONLY_PUSH_CHANGED_PROJECT)"
printMsg " - AUTO_INCREASE_VERSION 是否自动增加版本号 ($AUTO_INCREASE_VERSION)"
exit 1
}
countdown() {
local secondsLeft=$1
echo "wait ${secondsLeft} seconds to start ......"
while [ $secondsLeft -gt 0 ]; do
echo -n $secondsLeft
sleep 1
secondsLeft=$(($secondsLeft - 1))
echo -ne "\r \r" #清除本行文字
done
}
startApp() {
local gitHost=${GIT_HOST:-""}
local gitUser=${GIT_USER:-""}
local gitPassword=${GIT_PASSWORD:-""}
local releaseVersion=$RELEASE_VERSION
local nextDevVersion=$NEXT_DEV_VERSION
local push=${PUSH:-false}
local projects=${PROJECTS:-}
local projectFile=${PROJECT_FILE:-}
local sendReleaseLog=${SEND_RELEASE_LOG:-false}
local onlyPushChangedProject=${ONLY_PUSH_CHANGED_PROJECT:-false}
local autoIncreaseVersion=${AUTO_INCREASE_VERSION:-false}
local workDir=$TOP_DIR/data
local gitBaseurl=http://$gitUser:$gitPassword@$gitHost
local sendMailAddress=http://181.181.0.158:43234/notifyfile?topic=release-tool
local checkpointFile=$workDir/checkpoint/checkpoint_$nextDevVersion.txt
local releaseLogFile=$workDir/releaseLog.html
local logFile=/dev/stdout
#外部没有指定工程时,使用配置文件中的工程,去掉#开头的行
if [ -z "$projects" ]; then
if [ -f "$projectFile" ]; then
projects=$(cat $projectFile | grep -v '^#' | sort)
fi
fi
mkdir -p $workDir
setLogFile $logFile
#setCheckpointFile $checkpointFile
setDeploySaveDir $workDir
htmlRlogInit $releaseLogFile
printMsg "-----------------------------------------------------------------"
printMsg "gitHost = $gitHost"
printMsg "workDir = $workDir"
printMsg "releaseVersion = $releaseVersion"
printMsg "nextDevVersion = $nextDevVersion"
printMsg "push = $push"
printMsg "sendReleaseLog = $sendReleaseLog"
printMsg "releaseLogFile = $releaseLogFile"
printMsg "projects = $projects"
printMsg "projectFile = $projectFile"
printMsg "onlyPushChangedProject = $onlyPushChangedProject"
printMsg "autoIncreaseVersion = $autoIncreaseVersion"
printMsg "-----------------------------------------------------------------"
#必要参数非空校验
if
[ -z "$gitUser" -o -z "$gitPassword" -o \
-z "$releaseVersion" -o -z "$nextDevVersion" -o \
-z "$projects" ]
then
usage
fi
#校验版本号的合法性
if [ $(isLegalVersion $nextDevVersion) -eq 0 ]; then
logError "the version $nextDevVersion is illegal!"
exit 2
fi
if [ $(isLegalVersion $releaseVersion) -eq 0 ]; then
logError "the version $releaseVersion is illegal!"
exit 2
fi
#如果是bugfix版本,需要保证minor版本号一致
if [ $(isBugfixVersion $releaseVersion) -eq 1 -a "${releaseVersion%.*}" != "${nextDevVersion%.*}" ]; then
logError "the minor number for $releaseVersion and $nextDevVersion is not equal"
exit 1
fi
#倒计时等待
countdown 3
htmlRlogBegin
#循环处理所有工程
changedProjects=
for line in $projects; do
cd ${workDir}
#解析出group,工程名
local group=$(echo $line | cut -d',' -f1)
local proj=$(echo $line | cut -d',' -f2)
if [ -z "$group" -o -z "$proj" -o "$group" == "$proj" ]; then
logError "invalid line $line"
exit 3
fi
local repoUrl=$gitBaseurl$proj.git
logInfo "process project $repoUrl"
local repoDir=$(getRepoDir $repoUrl)
mkdir -p $repoDir
cd $repoDir
if [ "$group" == "service" ]; then
serviceReleaseRoutine $repoUrl $releaseVersion $nextDevVersion $autoIncreaseVersion $group
else
defaultReleaseRoutine $repoUrl $group master
fi
if [ $? -gt 0 ]; then
changedProjects="$changedProjects $proj"
fi
done
htmlRlogEnd
# 检查是否还有未解决的问题
if [ $(hasUnresolvedIssues) -gt 0 ]; then
printMsg "------------------- 未解决的jira问题列表 -------------------"
dumpUnresolvedIssues
#exit 4
fi
#发送release-log
if [ "$sendReleaseLog" != "false" ]; then
curl -f "file=@$releaseLogFile" $sendMailAddress
checkResult "failed to send mail"
fi
#推送到远程仓库
if [ "$push" = "true" ]; then
local projectsToPush=$projects
if [ "$onlyPushChangedProject" = "true" ]; then
projectsToPush=$changedProjects
fi
printMsg "projectsToPush = $projectsToPush"
for p in $projectsToPush; do
cd ${workDir}
local repoUrl=$gitBaseurl$p.git
logInfo "push project $repoUrl"
local repoDir=$(getRepoDir $repoUrl)
mkdir -p $repoDir
cd $repoDir
doPush $repoUrl
checkResult "failed to push project $p"
done
fi
#收集部署文件
logInfo "-------------- all succeed, enjoy:) --------------"
}
| true |
22ff7812dbc993ebac2b27fd54430d0a0a126424 | Shell | stoeffel/chromium-vim | /dist.sh | UTF-8 | 225 | 3.109375 | 3 | [
"MIT"
] | permissive | #!/bin/sh
cd $(dirname $0)
if [[ -e release ]]; then
rm -r release*
fi
./pages/create_mappings_page.sh &&
mkdir release &&
cp -r `find . -maxdepth 1 | egrep -v "^\.$|\.git|release"` release &&
zip -r release.zip release
| true |
b2eb4900cf11a9acd88dd9e8d0405c4bf1dbb93a | Shell | sota1235/CNS-Status-Static | /install | UTF-8 | 3,626 | 3.828125 | 4 | [] | no_license | #!/bin/zsh
# variable
LOGIN_NAME=`whoami`
VIEW_PATH="printer"
# functions
function cecho { echo -e "\e[34m$*\e[m"; }
cecho "### Install start ###"
# Make public_html folder
if [ ! -d /home/${LOGIN_NAME}/public_html ]; then
cecho "### Make 'public_html' folder under your home directory ###"
mkdir /home/${LOGIN_NAME}/public_html
fi
# Make public folder for web application
cecho "### Make 'printer' folder under your 'public_html' folder ###"
if [ ! -d /home/${LOGIN_NAME}/public_html/${VIEW_PATH} ]; then
mkdir /home/${LOGIN_NAME}/public_html/${VIEW_PATH}
else
cecho "### The 'printer' folder is already exists ###"
cecho "### Are you sure you want to delete files under 'printer' folder? ###"
cecho "(yes/no)"
while :
do
read CONFIRM
case ${CONFIRM} in
"yes")
cecho "### Delete files and copy some files ###"
rm -r ~/public_html/printer/*
break
;;
"no")
cecho "### Then, enter folder name you like ###"
cecho "ex) 'CNS_printer'"
while :
do
read VIEW_PATH
case VIEW_PATH in
"")
cecho "### Please enter folder name ###"
;;
*)
cecho "### The folder name is ${VIEW_PATH} ###"
break
;;
esac
done
break
;;
*)
cecho "### Plese enter 'yes' or 'no' ###"
;;
esac
done
fi
# Generate files from Login Name and View Path
cecho "### Make files from your login name in 'lib' folder ###"
cp `pwd`/view/js/ajax.js `pwd`/lib/view/js/
cp `pwd`/view/js/jquery-migrate-1.2.1.min.js `pwd`/lib/view/js/
cp `pwd`/view/js/jquery-1.11.0.min.js `pwd`/lib/view/js/
sed s/#{your_login_name}/${LOGIN_NAME}/g `pwd`/view/js/script.js | \
sed s/#{printer}/${VIEW_PATH}/g > `pwd`/lib/view/js/script.js
sed s/#{your_login_name}/${LOGIN_NAME}/g `pwd`/cron/make_json | \
sed s/#{printer}/${VIEW_PATH}/g > `pwd`/lib/cron/make_json
sed s/#{your_login_name}/${LOGIN_NAME}/g `pwd`/static/json.php | \
sed s/#{printer}/${VIEW_PATH}/g > `pwd`/lib/static/json.php
sed s/#{your_login_name}/${LOGIN_NAME}/g \
`pwd`/cron/find_mail > `pwd`/lib/cron/find_mail
# Make files for crontab
cecho "### Make 'src' folder under your home directory### "
if [ ! -d /home/${LOGIN_NAME}/src ]; then
mkdir /home/${LOGIN_NAME}/src
fi
cp `pwd`/lib/cron/find_mail /home/${LOGIN_NAME}/src/
cp `pwd`/lib/cron/make_json /home/${LOGIN_NAME}/src/
chmod 755 /home/${LOGIN_NAME}/src/find_mail
chmod 755 /home/${LOGIN_NAME}/src/make_json
# Set .procmailrc
cecho "### Set .procmailrc ###"
if [ -e /home/${LOGIN_NAME}/.procmailrc ]; then
cecho "### Warming! You seem to set procmail before. You should set .procmailrc yourself or use cron."
else
sed s/#{your_login_name}/${LOGIN_NAME}/g \
`pwd`/procmail/.procmailrc > `pwd`/lib/procmail/.procmailrc
cp `pwd`/lib/procmail/.procmailrc /home/${LOGIN_NAME}/
chmod 644 /home/${LOGIN_NAME}/.procmailrc
fi
cecho "Checking mails"
/home/${LOGIN_NAME}/src/find_mail
python /home/${LOGIN_NAME}/src/make_json
# Copy files to public folder
cecho "### Is installoing... ###"
cp `pwd`/view/index.html ~/public_html/${VIEW_PATH}/
cp -R `pwd`/view/Flat-UI-master ~/public_html/${VIEW_PATH}/
cp -R `pwd`/lib/view/js ~/public_html/${VIEW_PATH}/
cp `pwd`/lib/static/json.php ~/public_html/${VIEW_PATH}/
cecho "### Installation is completed!! ###"
cecho "### You can access to the website ###"
cecho "### http://web.sfc.keio.ac.jp/~${LOGIN_NAME}/${VIEW_PATH}/index.html ###"
| true |
c482edac2b6e3a2889901024ce1eda28194eb4c8 | Shell | Marguelgtz/explorer | /.circleci/publish.sh | UTF-8 | 1,417 | 3.4375 | 3 | [
"Apache-2.0"
] | permissive | #! /bin/bash
export MAJOR=`[ "$CIRCLE_BRANCH" == "release" ] && echo "3" \
|| ([ "$CIRCLE_BRANCH" == "staging" ] && echo "2" \
|| ([ "$CIRCLE_BRANCH" == "master" ] && echo "1" \
|| echo "0"))`;
export MINOR=`cat Assets/Scripts/MainScripts/DCL/Configuration/Configuration.cs | grep 'version = "[0-9.]\+' | grep -o '\.[0-9.]\+'`
export TAG=`[ "$CIRCLE_BRANCH" == "master" ] && echo "" || echo "$CIRCLE_BRANCH" | sed -e 's/[^a-zA-Z0-9-]/-/g'`
export BUILD_VERSIONING=${MAJOR}${MINOR}'.'${CIRCLE_BUILD_NUM}`[ "$TAG" ] && echo "-$TAG"`
echo "Building package.json for $BUILD_VERSIONING"
function publish() {
# Build package.json
cd $BUILD_PATH
echo "Build path is $BUILD_PATH and cwd is $PWD -- Tag is $TAG"
echo '{"name": "decentraland-renderer", "version": "'${BUILD_VERSIONING}'", "license": "Apache-2.0", "devDependencies": { "npm": "5.6.0" } }' > package.json
# Export the name of the package as a file
echo 'module.exports = "'${BUILD_VERSIONING}'";' > index.js
# Delete unnecessary files from the build (and old files from Unity 2019)
rm -f game.js index.html Build/UnityLoader.js Build/unity.json Build/unity.wasm.framework.unityweb Build/unity.wasm.code.unityweb
# Move all `.unityweb` files into the root build folder
mv Build/* .
# Publish on npm
npx npm publish --tag `[ "$CIRCLE_BRANCH" == "master" ] && echo "latest" || echo $TAG`
}
publish
| true |
413d6ef860177fb966a21aff14b13d28659f3604 | Shell | yuga528/network-sec | /network-sec-430.sh | UTF-8 | 11,536 | 3.921875 | 4 | [] | no_license | #!/bin/bash
#
# Shell script to verify the settings on the network, subnet, firewalls and routes.
#
network=stanford-odysseus-net
subNetwork=subnet1
firewallRule1=stanford-odysseus-net-allow-nat-vm-gcp-services
firewallRule2=stanford-odysseus-net-allow-nat-vm-ssh
firewallRule3=stanford-odysseus-net-allow-gcp-access-internal-vm-ssh
firewallRule4=stanford-odysseus-net-allow-dataproc
firewallRule5=stanford-odysseus-net-deny-nat-vm-ftp-dns-smb-ntp-pop-imap
routes=stanford-odysseus-net-nat-vm-route
project_id=$1
network_ip=$2
Usage()
{
echo "Usage : ./ProgramName ProjectID Stanford_Network_IP"
}
project_id=$1
if [ $# -ne 2 ] ; then
echo "Missing mandatory arguments"
Usage
exit 1
fi
if [ "$network_ip" == "" ] ; then
echo "Stanford Network IP is missing"
Usage
exit 1
fi
gcloud compute networks list | egrep --word-regexp $network &> /dev/null
status=`echo $?`
if [[ "$status" -ne "0" ]] ; then
echo "Network doesn't exist"
exit 1
else
echo "Network $network exists"
fi
gcloud compute networks subnets list | egrep --word-regexp $subNetwork &> /dev/null
status=`echo $?`
if [[ "$status" -ne "0" ]] ; then
echo "Subnetwork doesn't exist"
exit 1
else
echo "Subnetwork $subNetwork exists"
fi
echo "---------------------------------------------------"
gcloud compute firewall-rules describe $firewallRule1 &> /dev/null
status=`echo $?`
if [[ "$status" -ne "0" ]]; then
echo "Firewall rule $firewallRule1 for ingress doesn't exist"
else
echo "Firewall rule $firewallRule1 for ingress exists"
rules=$(gcloud compute firewall-rules list --format="table(network, sourceTags.list(), targetTags,
allowed[].map().firewall_rule().list():label=ALLOW)" --filter="name=$firewallRule1" | grep -v NETWORK)
net=$(echo $rules | awk '{print $1}')
if [[ $net == "stanford-odysseus-net" ]]; then
echo "Network is stanford-odysseus-net"
else
echo "Found network $net"
echo "Network is not stanford-odysseus-net"
fi
source_tag=$(echo $rules | awk '{print $2}')
if [[ $source_tag == "gcp-access-internal-vm" ]] ; then
echo "Source tag is gcp-access-internal-vm"
else
echo "Found source tag $source_tag"
echo "Source tag is gcp-access-internal-vm"
fi
target=$(echo $rules | awk '{print $3}' | cut -d "'" -f 2)
if [[ $target == "nat-vm" ]]; then
echo "Target tag is nat-vm"
else
echo "Found target tag is $target"
echo "Target tag is nat-vm"
fi
echo "Allowed ports are tcp:443,tcp:3306,tcp:5432"
opened_ports=( $(echo $rules | awk {'print $4'}))
echo "Opened ports : $opened_ports"
ports=( $(echo $rules | cut -d " " -f 8 | tr ',' ' '))
for i in ${ports[@]}
do
if [ $i == "tcp:443" ] || [ $i == "tcp:3306" ] || [ $i == "tcp:5432" ]; then
echo " $i is allowed"
else
echo " $i is not allowed"
fi
done
fi
echo "---------------------------------------------------"
gcloud compute firewall-rules describe $firewallRule2 &> /dev/null
status=`echo $?`
if [[ "$status" -ne "0" ]]; then
echo "Firewall rule $firewallRule2 for ingress doesn't exist"
else
echo "Firewall rule $firewallRule2 for ingress exists"
rules=$(gcloud compute firewall-rules list --format="table(network, sourceRanges.list(), targetTags,
allowed[].map().firewall_rule().list():label=ALLOW)" --filter="name=$firewallRule2" | grep -v NETWORK)
net=$(echo $rules | awk '{print $1}')
if [[ $net == "stanford-odysseus-net" ]]; then
echo "Network is stanford-odysseus-net"
else
echo "Found network $net"
echo "Network is not stanford-odysseus-net"
fi
source_ip=$(echo $rules | awk '{print $2}')
if [[ $source_ip == "$network_ip" ]] ; then
echo "Found Network IP $network_ip"
else
echo "Unable to find Network IP $network_ip"
fi
target=$(echo $rules | awk '{print $3}' | cut -d "'" -f2)
if [[ $target == "nat-vm" ]]; then
echo "Target tag is $target"
else
echo "Found target tag $target"
echo "Target tag is not nat-vm"
fi
echo "Allowed ports are tcp:22"
opened_ports=( $(echo $rules | awk {'print $4'} ))
echo "Opened ports : $opened_ports"
ports=( $(echo $rules | cut -d " " -f 8 | tr ',' ' ') )
for i in ${ports[@]}
do
if [ $i == "tcp:22" ]; then
echo " $i is allowed"
else
echo " $i is not allowed"
fi
done
fi
echo "---------------------------------------------------"
gcloud compute firewall-rules describe $firewallRule3 &> /dev/null
status=`echo $?`
if [[ "$status" -ne "0" ]]; then
echo "Firewall rule $firewallRule3 for ingress doesn't exist"
else
echo "Firewall rule $firewallRule3 for ingress exists"
rules=$(gcloud compute firewall-rules list --format="table(network, sourceTags.list(), targetTags,
allowed[].map().firewall_rule().list():label=ALLOW)" --filter="name=$firewallRule3" | grep -v NETWORK)
net=$(echo $rules | awk '{print $1}')
if [[ $net == "stanford-odysseus-net" ]]; then
echo "Network is stanford-odysseus-net"
else
echo "Found network $net"
echo "Network is not stanford-odysseus-net"
fi
source_tag=$(echo $rules | awk '{print $2}')
if [[ $source_tag == "nat-vm" ]] ; then
echo "Source tag is nat-vm"
else
echo "Found source tag $source_tag"
echo "Source tag is nat-vm"
fi
target=$(echo $rules | awk '{print $3}' | cut -d "'" -f 2)
if [[ $target == "gcp-access-internal-vm" ]]; then
echo "Target tag is gcp-access-internal-vm"
else
echo "Found target tag is $target"
echo "Target tag is gcp-access-internal-vm"
fi
echo "Allowed ports are tcp:22"
opened_ports=( $(echo $rules | awk {'print $4'}))
echo "Opened ports : $opened_ports"
ports=( $(echo $rules | cut -d " " -f 8 | tr ',' ' ') )
for i in ${ports[@]}
do
if [ $i == "tcp:22" ]; then
echo " $i is allowed"
else
echo " $i is not allowed"
fi
done
fi
echo "---------------------------------------------------"
gcloud compute firewall-rules describe $firewallRule4 &> /dev/null
status=`echo $?`
if [[ "$status" -ne "0" ]]; then
echo "Firewall rule $firewallRule4 for ingress doesn't exist"
else
echo "Firewall rule $firewallRule4 for ingress exists"
rules=$(gcloud compute firewall-rules list --format="table(network, sourceRanges.list(), targetServiceAccounts.list(),
allowed[].map().firewall_rule().list():label=ALLOW)" --filter="name=$firewallRule4" | grep -v NETWORK)
net=$(echo $rules | awk '{print $1}')
if [[ $net == "stanford-odysseus-net" ]]; then
echo "Network is stanford-odysseus-net"
else
echo "Found network $net"
echo "Network is not stanford-odysseus-net"
fi
source_range=$(echo $rules | awk '{print $2}')
if [[ $source_range == "10.0.0.0/24" ]] ; then
echo "Source Range is 10.0.0.0/24"
else
echo "Found source range $source_range"
echo "Source Range is not 10.0.0.0/24"
fi
target=$(echo $rules | awk '{print $3}' | cut -d'@' -f1)
if [[ $target == "service-dataproc" ]]; then
echo "Target service account is service-dataproc"
else
echo "Found target service account is $target"
echo "Target service account is not service-dataproc"
fi
echo "Allowed ports are tcp:1-65535,udp:1-65535"
opened_ports=( $(echo $rules | awk '{print $4}' | cut -d " " -f 8 ))
echo "Opened ports : $opened_ports"
ports=( $(echo $rules | awk '{print $4}' | cut -d " " -f 8 | tr ',' ' ') )
for i in ${ports[@]}
do
if [ $i == "tcp:1-65535" ] || [ $i == "udp:1-65535" ]; then
echo " $i is allowed"
fi
done
fi
echo "---------------------------------------------------"
gcloud compute firewall-rules describe $firewallRule5 &> /dev/null
status=`echo $?`
if [[ "$status" -ne "0" ]]; then
echo "Firewall rule $firewallRule5 for egress doesn't exist"
else
echo "Firewall rule $firewallRule5 for egress exists"
rules=$(gcloud compute firewall-rules list --format="table(network, targetTags.list():label=TARGET_TAGS,
destinationRanges.list():label=DEST_RANGES, denied[].map().firewall_rule().list():label=DENY)" --filter="name=$firewallRule5" | grep -v NETWORK)
net=$(echo $rules | awk '{print $1}')
if [[ $net == "stanford-odysseus-net" ]]; then
echo "Network is stanford-odysseus-net"
else
echo "Found network $net"
echo "Network is not stanford-odysseus-net"
fi
target=$(echo $rules | awk {'print $2'})
if [[ $target == "nat-vm" ]]; then
echo "Target tag is nat-vm"
else
echo "Found target tag is $target"
echo "Target tag is not nat-vm"
fi
dest_range=$(echo $rules | awk {'print $3'})
if [[ $dest_range == "0.0.0.0/0" ]] ; then
echo "Destination Range is 0.0.0.0/0"
else
echo "Destination range is $dest"
fi
denied_ports=( $(echo $rules | awk {'print $4'} ))
echo "Denied ports : $denied_ports"
for i in ${ports[@]}
do
if [ $i == "tcp:21" ] || [ $i == "tcp:53" ] || [ $i == "tcp:119" ] || [ $i == "tcp:445" ] || [ $i == "tcp:143" ] || [ $i == "tcp:993" ] || [ $i == "udp:53" ]; then
echo "$i"
fi
done
fi
echo "---------------------------------------------------"
gcloud compute routes describe $routes &> /dev/null
status=`echo $?`
if [[ "$status" -ne "0" ]]; then
echo "Router $routes doesn't exist"
else
echo "Router $routes exists"
routes=$(gcloud compute routes list --format="table(network, destRange, nextHopInstance, tags, priority )" --filter="name=$routes" | grep -v NETWORK)
net=$(echo $rules | awk '{print $1}')
if [[ $net == "stanford-odysseus-net" ]]; then
echo "Network is stanford-odysseus-net"
else
echo "Found network is $net"
echo "Network is not stanford-odysseus-net"
fi
destRange=$(echo $routes | awk {'print $2'})
if [[ $destRange == "0.0.0.0/0" ]] ; then
echo "Destination IP Ranges is 0.0.0.0/0 "
else
echo "Found destination IP ranges is $destRange"
echo "Destination IP Ranges is not 0.0.0.0/0"
fi
nextHop=$(echo $routes | awk {'print $3'})
if [[ $nextHop == "https://www.googleapis.com/compute/v1/projects/"$project_id"/zones/us-west1-a/instances/nat-vm" ]]; then
echo "Next hop is nat-vm"
else
echo "Next Hop is $nextHop"
fi
tags=$(echo $routes | awk {'print $4'} | cut -d "'" -f 2)
if [[ $tags == "gcp-access-internal-vm" ]] ; then
echo "Instance tag is gcp-access-internal-vm"
else
echo "Found instance tag is $tags"
echo "Instance tag is not gcp-access-internal-vm"
fi
priority=$(echo $routes | awk {'print $5'})
if [[ $priority == "500" ]] ; then
echo "Priority set to 500"
else
echo "Found priotity is $priority"
echo "Priority is set to 500"
fi
fi
exit 0
| true |
2575880e085e55a48cb5a42b1e3e38e65c2b9654 | Shell | caguerra/Burkardt-Fortran-90 | /f_src/asa241_test/asa241_test.sh | UTF-8 | 369 | 2.921875 | 3 | [] | no_license | #! /bin/bash
#
gfortran -c -Wall asa241_test.f90
if [ $? -ne 0 ]; then
echo "Compile error."
exit
fi
#
gfortran -o asa241_test asa241_test.o $HOME/lib/asa241.o
if [ $? -ne 0 ]; then
echo "Load error."
exit
fi
rm asa241_test.o
#
./asa241_test > asa241_test.txt
if [ $? -ne 0 ]; then
echo "Run error."
exit
fi
rm asa241_test
#
echo "Normal end of execution."
| true |
1214fc5075d6a553abbdcc5181d9b307f65ab79c | Shell | Cardoso1994/dotfiles | /qtile/scripts/autostart.sh | UTF-8 | 1,807 | 2.78125 | 3 | [] | no_license | #!/bin/bash
function run {
if ! pgrep $1 ;
then
$@&
fi
}
#Set your native resolution IF it does not exist in xrandr
#More info in the script
#run $HOME/.config/qtile/scripts/set-screen-resolution-in-virtualbox.sh
#Find out your monitor name with xrandr or arandr (save and you get this line)
#xrandr --output VGA-1 --primary --mode 1360x768 --pos 0x0 --rotate normal
#xrandr --output DP2 --primary --mode 1920x1080 --rate 60.00 --output LVDS1 --off &
#xrandr --output LVDS1 --mode 1366x768 --output DP3 --mode 1920x1080 --right-of LVDS1
#xrandr --output HDMI2 --mode 1920x1080 --pos 1920x0 --rotate normal --output HDMI1 --primary --mode 1920x1080 --pos 0x0 --rotate normal --output VIRTUAL1 --off
#change your keyboard if you need it
#setxkbmap -layout be
# Authentication dialog
/usr/lib/polkit-gnome/polkit-gnome-authentication-agent-1 &
# bluetooth
# exec_always --no-startup-id blueberry-tray &
# network
nm-applet &
# num lock activated
numlockx on &
# redshift
redshift -P -O 4500 &
# tap to click
/home/cardoso/.config/scripts/pad_on_off.sh click &
# volume
# exec --no-startup-id volumeicon
# sets wallpaper
nitrogen --restore &
# udiskie for automounting external drives
udiskie &
# keyboard speed up
xset r rate 300 50 &
# compositor
# compton -i 0.98 --config ~/.config/i3/picom.config &
# picom &
###################################################
######### system applications #################
###################################################
exec_always --no-startup-id xfce4-power-manager &
# ommitted next line to get super key to bring up the menu in xfce and avoid
# error then in i3
# IF xfsettingsd is activated you can not change themes
#exec --no-startup-id xfsettingsd &
/usr/lib/xfce4/notifyd/xfce4-notifyd &
# exec_always --no-startup-id dunst
| true |
80a8120248be12a6be115238392ed06d0799945f | Shell | zgtman/fusion_genes | /star_fusion_all.sh | UTF-8 | 692 | 3.203125 | 3 | [] | no_license | #!/bin/bash
source /var/anaconda3/etc/profile.d/conda.sh
T="$(date +%s)"
source fusion_genes/config
echo "STAR-FUSION ALL IN WORKFLOW"
conda activate star-fusion_env
for i in $(ls *trim*.fastq.gz | rev | cut -c 22- | rev | sort | uniq)
do
name=${i%_L001*}
echo "INFO: Analyzing file: $i"
STAR-Fusion \
--genome_lib_dir $LIB_DIR \
--left_fq ${i}_L001_R1_001.fastq.gz \
--right_fq ${i}_L001_R2_001.fastq.gz \
--output_dir "$name"_star_fusion_output
done
for i in *_fusion_output; do mv $i/star-fusion.fusion_predictions.tsv "$i"/${i%_trim_star*}_fusion_prediction.tsv; done
conda deactivate
T="$(($(date +%s)-T))"
echo "INFO: Time of STAR-FUSION ALL-IN-ONE in seconds: ${T} s"
| true |
b6d0590305e3ab9061d53c3938b6d3c54dc73d1b | Shell | karaage-kkkris/aws_ami_cross_account | /packer/setup.sh | UTF-8 | 1,156 | 3.1875 | 3 | [] | no_license | #!/bin/bash
set -e
lsb_release -a
echo "===== Installing Docker key ====="
curl -fsSL https://download.docker.com/linux/ubuntu/gpg -o /tmp/docker.key
sudo APT_KEY_DONT_WARN_ON_DANGEROUS_USAGE=DontWarn apt-key add /tmp/docker.key
rm /tmp/docker.key
echo "===== Adding Docker repository ====="
sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
echo "===== Updating repositories ====="
sudo apt-get update
echo "===== Installing APT software ====="
sudo DEBIAN_FRONTEND=noninteractive apt-get install -y \
build-essential \
software-properties-common \
apt-transport-https \
ca-certificates \
curl \
gnupg-agent \
software-properties-common \
python3-pip
# Pin at version 1.4.4-1 since there is a known issue in version 1.4.6-1
# LinkL: containerd/containerd#5547
sudo apt-get install containerd.io=1.4.4-1
sudo DEBIAN_FRONTEND=noninteractive apt-get install -y \
docker-ce docker-ce-cli
echo "===== Installing awscli ====="
sudo pip3 install awscli
echo "===== Setting Docker group permissions ====="
sudo usermod -a -G docker ubuntu
echo "Software installation complete"
| true |
bd73f8eddcaac6a0313601c25ea1435d036273ba | Shell | hyperledger/fabric-samples | /full-stack-asset-transfer-guide/check.sh | UTF-8 | 5,748 | 3.671875 | 4 | [
"Apache-2.0",
"CC-BY-4.0"
] | permissive | #!/usr/bin/env bash
SUCCESS="✅"
WARN="⚠️ "
EXIT=0
if ! command -v docker &> /tmp/cmdpath
then
echo "${WARN} Please install Docker; suggested install commands:"
EXIT=1
else
echo -e "${SUCCESS} Docker found:\t$(cat /tmp/cmdpath)"
fi
KUBECTL_VERSION=v1.24.4 # $(curl -L -s https://dl.k8s.io/release/stable.txt)
if ! command -v kubectl &> /tmp/cmdpath
then
echo "${WARN} Please install kubectl if you want to use k8s; suggested install commands:"
if [ $(uname -s) = Darwin ]; then
if [ $(uname -m) = arm64 ]; then
echo "curl -LO https://dl.k8s.io/release/${KUBECTL_VERSION}/bin/darwin/arm64/kubectl"
echo "chmod +x ./kubectl"
echo "sudo mv ./kubectl /usr/local/bin/kubectl"
echo "sudo chown root: /usr/local/bin/kubectl"
else
echo "curl -LO https://dl.k8s.io/release/${KUBECTL_VERSION}/bin/darwin/amd64/kubectl"
echo "chmod +x ./kubectl"
echo "sudo mv ./kubectl /usr/local/bin/kubectl"
echo "sudo chown root: /usr/local/bin/kubectl"
fi
else
echo "curl -LO https://dl.k8s.io/release/${KUBECTL_VERSION}/bin/linux/amd64/kubectl"
echo "sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl"
fi
EXIT=1
else
echo -e "${SUCCESS} kubectl found:\t$(cat /tmp/cmdpath)"
KUBECTL_CLIENT_VERSION=$(kubectl version --client --output=yaml | grep gitVersion | cut -c 15-)
KUBECTL_CLIENT_MINOR_VERSION=$(kubectl version --client --output=yaml | grep minor | cut -c 11-12)
if [ "${KUBECTL_CLIENT_MINOR_VERSION}" -lt "24" ]; then
echo -e "${WARN} Found kubectl client version ${KUBECTL_CLIENT_VERSION}, which may be out of date. Please ensure client version >= ${KUBECTL_VERSION}"
EXIT=1
fi
fi
# Install kind
KIND_VERSION=0.14.0
if ! command -v kind &> /tmp/cmdpath
then
echo "${WARN} Please install kind; suggested install commands:"
echo
if [ $(uname -s) = Darwin ]; then
if [ $(uname -m) = arm64 ]; then
echo "sudo curl --fail --silent --show-error -L https://kind.sigs.k8s.io/dl/v${KIND_VERSION}/kind-darwin-arm64 -o /usr/local/bin/kind"
else
echo "sudo curl --fail --silent --show-error -L https://kind.sigs.k8s.io/dl/v${KIND_VERSION}/kind-darwin-amd64 -o /usr/local/bin/kind"
fi
else
echo "sudo curl --fail --silent --show-error -L https://kind.sigs.k8s.io/dl/v${KIND_VERSION}/kind-linux-amd64 -o /usr/local/bin/kind"
fi
echo "sudo chmod 755 /usr/local/bin/kind"
echo
EXIT=1
else
echo -e "${SUCCESS} kind found:\t\t$(cat /tmp/cmdpath)"
fi
# Install k9s
K9S_VERSION=0.25.3
if ! command -v k9s &> /tmp/cmdpath
then
echo "${WARN} Please install k9s; suggested install commands:"
echo
if [ $(uname -s) = Darwin ]; then
if [ $(uname -m) = arm64 ]; then
echo "curl --fail --silent --show-error -L https://github.com/derailed/k9s/releases/download/v${K9S_VERSION}/k9s_Darwin_arm64.tar.gz -o /tmp/k9s_Darwin_arm64.tar.gz"
echo "tar -zxf /tmp/k9s_Darwin_arm64.tar.gz -C /usr/local/bin k9s"
else
echo "curl --fail --silent --show-error -L https://github.com/derailed/k9s/releases/download/v${K9S_VERSION}/k9s_Darwin_x86_64.tar.gz -o /tmp/k9s_Darwin_x86_64.tar.gz"
echo "tar -zxf /tmp/k9s_Darwin_x86_64.tar.gz -C /usr/local/bin k9s"
fi
else
echo "curl --fail --silent --show-error -L https://github.com/derailed/k9s/releases/download/v${K9S_VERSION}/k9s_Linux_x86_64.tar.gz -o /tmp/k9s_Linux_x86_64.tar.gz"
echo "tar -zxf /tmp/k9s_Linux_x86_64.tar.gz -C /usr/local/bin k9s"
fi
echo "sudo chown root /usr/local/bin/k9s"
echo "sudo chmod 755 /usr/local/bin/k9s"
echo
EXIT=1
else
echo -e "${SUCCESS} k9s found:\t\t$(cat /tmp/cmdpath)"
fi
# Install just
JUST_VERSION=1.2.0
if ! command -v just &> /tmp/cmdpath
then
echo "${WARN} Please install just; suggested install commands:"
echo "curl --proto '=https' --tlsv1.2 -sSf https://just.systems/install.sh | bash -s -- --tag ${JUST_VERSION} --to /usr/local/bin"
EXIT=1
else
echo -e "${SUCCESS} Just found:\t\t$(cat /tmp/cmdpath)"
fi
# Install weft
if ! command -v weft &> /tmp/cmdpath
then
echo "${WARN} Please install weft; suggested install commands:"
echo "npm install -g @hyperledger-labs/weft"
EXIT=1
else
echo -e "${SUCCESS} weft found:\t\t$(cat /tmp/cmdpath)"
fi
# Install jq
if ! command -v jq &> /tmp/cmdpath
then
echo "${WARN} Please install jq; suggested install commands:"
echo "sudo apt-update && sudo apt-install -y jq"
EXIT=1
else
echo -e "${SUCCESS} jq found:\t\t$(cat /tmp/cmdpath)"
fi
if ! command -v peer &> /tmp/cmdpath
then
echo "${WARN} Please install the peer; suggested install commands:"
echo "curl -sSL https://raw.githubusercontent.com/hyperledger/fabric/main/scripts/install-fabric.sh | bash -s -- binary"
echo 'export WORKSHOP_PATH=$(pwd)'
echo 'export PATH=${WORKSHOP_PATH}/bin:$PATH'
echo 'export FABRIC_CFG_PATH=${WORKSHOP_PATH}/config'
EXIT=1
else
echo -e "${SUCCESS} peer found:\t\t$(cat /tmp/cmdpath)"
# double-check that the peer binary is compiled for the correct arch. This can occur when installing fabric
# binaries into a multipass VM, then running the Linux binaries from a Mac or windows Host OS via the volume share.
peer version &> /dev/null
rc=$?
if [ $rc -ne 0 ]; then
echo -e "${WARN} Could not execute peer. Was it compiled for the correct architecture?"
peer version
fi
fi
# tests if varname is defined in the env AND it's an existing directory
function must_declare() {
local varname=$1
if [[ ! -d ${!varname} ]]; then
echo "${WARN} ${varname} must be set to a directory"
EXIT=1
else
echo -e "${SUCCESS} ${varname}:\t${!varname}"
fi
}
must_declare "FABRIC_CFG_PATH"
must_declare "WORKSHOP_PATH"
rm /tmp/cmdpath &> /dev/null
exit $EXIT
| true |
b221f98db35ef4463f27f7b73b3a3e6cedaf869f | Shell | ci2c/code | /scripts/matthieu/DTI_Nb_Fibers_ROI.sh | UTF-8 | 1,468 | 3.421875 | 3 | [] | no_license | #!/bin/bash
if [ $# -lt 6 ]
then
echo ""
echo "Usage: DTI_Nb_Fibers_ROI.sh -roi <NameRoi> -subjid <SubjId> -base <DtiNumber> -od <OutputDir> -lmax <NbHarmonic> -Nfiber <NbFibers>"
echo ""
echo " -roi : Name of the ROI used for Connectum"
echo " -subjid : Subject ID"
echo " -base : Dti number"
echo " -od : Path to output directory (processing results)"
echo " -lmax : Maximum harmonic order"
echo " -Nfiber : Number of fibers generated"
echo ""
echo "Usage: DTI_Nb_Fibers_ROI.sh -roi <NameRoi> -subjid <SubjId> -base <DtiNumber> -od <OutputDir> -lmax <NbHarmonic> -Nfiber <NbFibers>"
echo ""
exit 1
fi
## I/O management
ROI=$1
SUBJ_ID=$2
BASE=$3
OUTPUT_DIR=$4
lmax=$5
Nfiber=$6
if [ ! -e ${OUTPUT_DIR}/${SUBJ_ID}/Connectum_${BASE}/${ROI}_Color.tck ]
then
matlab -nodisplay <<EOF
NbFibres = getVolConnectMatrix('${OUTPUT_DIR}/${SUBJ_ID}/Connectum_${BASE}/r${ROI}b_${BASE}_LAS.nii', '${OUTPUT_DIR}/${SUBJ_ID}/dti_tracto/whole_brain_${lmax}_${Nfiber}_${BASE}.tck', '${OUTPUT_DIR}/${SUBJ_ID}/Connectum_${BASE}', '${ROI}');
fid = fopen('${OUTPUT_DIR}/${SUBJ_ID}/Connectum_${BASE}/NbFibres_${SUBJ_ID}_${BASE}.txt', 'a');
fprintf(fid, '${ROI} : %d fibres\n', NbFibres);
fclose(fid);
% tmp = [${ROI}, num2str(NbFibres)];
% xlswrite('${OUTPUT_DIR}/${SUBJ_ID}/Connectum_${BASE}/NbFibres_${SUBJ_ID}_${BASE}.xls',tmp);
EOF
# echo "${ROI} : ${NbFibres} fibres" >> ${OUTPUT_DIR}/${SUBJ_ID}/Connectum_${BASE}/NbFibres_${SUBJ_ID}_${BASE}.txt
fi | true |
3162e06d98744162d1e7112b621df63a4265e971 | Shell | atareao/dialogos | /kdialog/ejemplo_16.sh | UTF-8 | 201 | 2.953125 | 3 | [
"MIT"
] | permissive | #!/bin/bash
respuesta=$(kdialog --calendar "Elige una fecha señalada")
ans=$?
if [ $ans -eq 0 ]
then
echo "No ha elegido ninguna fecha"
else
echo "La fecha seleccionada ha sido $respuesta"
fi
| true |
d25df1ed8a0f1091f3b5478db234cf1b185d20ce | Shell | uploadcare/intercom-rank | /bin/post_compile | UTF-8 | 253 | 2.515625 | 3 | [
"MIT",
"GPL-1.0-or-later",
"GPL-3.0-only"
] | permissive | #!/usr/bin/env bash
# set terminal to UTF8 (otherwise `gem install foreman` fails)
export LANG=en_CA.UTF-8
# set up ruby env
mkdir .ruby-gems
export GEM_HOME=$PWD/.ruby-gems
export PATH=$PATH:$PWD/.ruby-gems/bin
# install foreman
gem install foreman
| true |
9abc6c3618bb64752541524b593aebcfd3899999 | Shell | blahs15/UCD-Mammalian-Lab | /bash/runx.sh | UTF-8 | 136 | 3.125 | 3 | [] | no_license | #!/bin/bash
script=./NSorter.sh
x=20
time {
for i in $(seq 1 1 $x); do
echo run $i
$script
done
}
echo "$script was run $x times" | true |
fa5f2647628f39d22fb90df6b60f7745c9284eba | Shell | urmyfaith/sinstallation | /preferences/screen_sharing.sh | UTF-8 | 895 | 2.75 | 3 | [] | no_license | ################################################################################
# Screen Sharing
################################################################################
function osx_screen_sharing {
local enabled; if [[ "$1" == "enabled" ]]; then enabled="false"; else enabled="true"; fi
local loaded; if [[ "$1" == "enabled" ]]; then loaded="load"; else loaded="unload"; fi
sudo defaults write /var/db/launchd.db/com.apple.launchd/overrides.plist com.apple.screensharing -dict Disabled -bool $enabled
sudo sh -c "launchctl $loaded /System/Library/LaunchDaemons/com.apple.screensharing.plist 2> /dev/null"
}
function osx_screen_sharing_unencrypted_connection_warnings {
local enabled; if [[ "$1" == "enabled" ]]; then enabled="false"; else enabled="true"; fi
defaults write com.apple.ScreenSharing dontWarnOnVNCEncryption -bool $enabled
}
| true |
6c837fd01d3c011b3dd6800ed125f40ad41ab5ed | Shell | joseluisq/envoy-docker-registry | /docker-entrypoint.sh | UTF-8 | 239 | 2.53125 | 3 | [
"MIT"
] | permissive | #!/bin/sh
set -e
case "$1" in
*.yaml|*.yml) set -- registry serve "$@" ;;
serve|garbage-collect|help|-*) set -- registry "$@" ;;
esac
exec "$@" &
/usr/local/bin/envoy -c /etc/envoy-service.yaml --service-cluster ${SERVICE_NAME}
| true |
e6c7b8908a2af331067ce80ec39daf0b17c97c91 | Shell | jknoxville/dotfiles | /.zshrc | UTF-8 | 2,228 | 3.015625 | 3 | [] | no_license | ### Oh-my-zsh Config
# Commented out by default
export ZSH=~/.oh-my-zsh
# Custom theme only looks good with decent UI support.
# When that is available add a .use-custom-oh-my-zsh-theme file to $HOME
# to enable it, otherwise use the default.
if [ -f ~/.use-custom-oh-my-zsh-theme ]; then
ZSH_THEME="jknognoster"
else;
ZSH_THEME="robbyrussell"
fi;
export UPDATE_ZSH_DAYS=14 # Oh-my-zsh auto-update frequency
COMPLETION_WAITING_DOTS="true" # Show auto-complete status indicator
HISTSIZE=100000
SAVEHIST=1000000000
plugins=(git) # Oh-my-zsh plugins
source $ZSH/oh-my-zsh.sh
### Personal Config
export PATH="/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin:~/bin"
export EDITOR=vim
# Start up ssh-agent
eval `ssh-agent -s`
# vim-style keybindings
set -o vi
# vim bindings don't enable Ctrl-R by default. Re-enable it.
bindkey "^R" history-incremental-search-backward
# Aliases
alias zc="$EDITOR ~/.zshrc"
alias sz="source ~/.zshrc"
alias pwgen="openssl rand -base64 15"
alias gettime='date +%s'
alias ta="tmux -CC attach || tmux -CC" # -CC gives you native terminal support (iTerm2)
alias vi="vim"
alias mp="mkdir -p"
# Delete branches that are merged into HEAD
alias gbc='git branch --merged | grep -v "\*" | grep -v master | grep -v dev | xargs -n 1 git branch -d'
### Work Config
workconfig=~/dotfiles/amazon/amazon.zshrc
if [ -f "$workconfig" ]; then
alias amazon.zshrc="$EDITOR $workconfig"
source "$workconfig"
fi
# Include quick file completion
[[ -s "$HOME/.qfc/bin/qfc.sh" ]] && source "$HOME/.qfc/bin/qfc.sh"
# Open the latest (last modified) file with matching prefix
# E.g. latest tail -f myLogFile
latest () {
$@[0,-2] $@[-1]*(om[1])
}
[[ -s /Users/jknox/.autojump/etc/profile.d/autojump.sh ]] && source /Users/jknox/.autojump/etc/profile.d/autojump.sh
autoload -U compinit && compinit -u
# 0 -- vanilla completion (abc => abc)
# # 1 -- smart case completion (abc => Abc)
# # 2 -- word flex completion (abc => A-big-Car)
# # 3 -- full flex completion (abc => ABraCadabra)
zstyle ':completion:*' matcher-list '' \
'm:{a-z\-}={A-Z\_}' \
'r:[^[:alpha:]]||[[:alpha:]]=** r:|=* m:{a-z\-}={A-Z\_}' \
'r:|?=** m:{a-z\-}={A-Z\_}'
# Facebook stuff
source /etc/profile
| true |
bab5dd96a8acd203c35b8347b29e188fdb554f31 | Shell | sam33r/dotfiles | /utils/center-window | UTF-8 | 1,416 | 3.75 | 4 | [
"MIT"
] | permissive | #!/bin/bash
# Center and resize the active window.
# Determine resolution of the current monitor.
# See https://superuser.com/questions/603528/how-to-get-the-current-monitor-resolution-or-monitor-name-lvds-vga1-etc
## Get screen info
screen1=($(xrandr | grep -w connected | sed 's/primary //' | awk -F'[ +]' '{print $1,$3,$4}' | head -n 1))
screen2=($(xrandr | grep -w connected | sed 's/primary //' | awk -F'[ +]' '{print $1,$3,$4}' | tail -n 1))
## Figure out which screen is to the right of which
if [ ${screen1[2]} -eq 0 ]
then
right=(${screen2[@]});
left=(${screen1[@]});
else
right=(${screen1[@]});
left=(${screen2[@]});
fi
# Get window position
pos=$(xwininfo -id $(xdotool getactivewindow) | grep "Absolute upper-left X" | awk '{print $NF}')
# Which screen is this window displayed in? If $pos
# is greater than the offset of the rightmost screen,
# then the window is on the right hand one
if [ "$pos" -gt "${right[2]}" ]
then
res=${right[1]}
echo "${right[0]} : ${right[1]}"
else
res=${left[1]}
echo "${left[0]} : ${left[1]}"
fi
width=$(echo $res | awk -Fx '{print $1}')
height=$(echo $res | awk -Fx '{print $2}')
# Setup window to cover .75x of the width and the height.
left=$(( $width / 8 ))
down=$(( $height / 8 ))
winwidth=$(( ( $width * 3 ) / 4 ))
winheight=$(( ( $height * 3 ) / 4 ))
params="0,$left,$down,$winwidth,$winheight"
wmctrl -r :ACTIVE: -e $params
| true |
9dc2c22ab66ff9fc49bb6052da2bf02d3d101a5d | Shell | aix27249/abuilds | /texlive-langcyrillic/doinst.sh | UTF-8 | 806 | 3.140625 | 3 | [] | no_license | PKGNAME="texlive-core"
UPDMAP="/etc/texmf/web2c/updmap.cfg"
OLDMAPS="/var/lib/texmf/agilia/installedpkgs/$PKGNAME.maps"
SYNCWITHTREES=''
NEWMAPS=`mktemp`
cat <<EOF > $NEWMAPS
Map cmcyr.map
EOF
cat $NEWMAPS >> $UPDMAP
chroot . usr/bin/mktexlsr
chroot . usr/bin/updmap-sys --quiet --nohash
chroot . usr/bin/fmtutil-sys --all 1>/dev/null
if [ -f $OLDMAPS ] ; then
MAPSDIFF=`mktemp`
TOADD=`mktemp`
diff -B -w $OLDMAPS $NEWMAPS | sed 's/\s\+/ /g' > $MAPSDIFF
TOREMOVE=`cat $MAPSDIFF | egrep '^<' | cut -d' ' -f3`
cat $MAPSDIFF | egrep '^>' | sed 's/^> //' > $TOADD
if [ "x$TOREMOVE" != "x" ]; then
for map in $TOREMOVE; do
sed -i "/\s$map/d" $UPDMAP
done
fi
if [ -s $TOADD ]; then
cat $TOADD >> $UPDMAP
fi
else
echo "Warning: file $OLDMAPS not found"
SYNCWITHTREES="--syncwithtrees"
fi
| true |
fabfab5d57da5ea983c3a41d741d4c537c353bec | Shell | shixiang08abc/sogou_exp | /create_train3_dump.sh | UTF-8 | 1,331 | 3.015625 | 3 | [] | no_license | #!/bin/sh
searchHubDir=$1
CacheDir=$2
trainDir=$3
datadir=$4
locateip=$5
qohost=$6
basemodel=$7
queryterm=$8
rankmask=$9
echo "searchHubDir=$searchHubDir"
echo "CacheDir=$CacheDir"
echo "trainDir=$trainDir"
echo "datadir=$datadir"
echo "locateip=$locateip"
echo "qohost=$qohost"
echo "basemodel=$basemodel"
echo "queryterm=$queryterm"
echo "rankmask=$rankmask"
echo "restart searchHub link $qohost"
cd $searchHubDir/conf
rm searchhub.conf; ln -s $qohost searchhub.conf
cd $searchHubDir
sh restart.sh
sleep 5s
echo "start fangzi link basemodel=$basemodel"
cd $CacheDir/data/base/rerank_data
rm relevance.model; ln -s $basemodel relevance.model
cd $CacheDir
sh start_fangzi.sh
while((1))
do
lines=`grep loop log/err | wc -l`
if [ $lines -eq 4 ];then
echo "loop $lines"
break;
fi
done
sleep 30s
echo "`date "+%Y-%m-%d %H:%M:%S"` start create diff dump rankmask=$rankmask"
filedir=$trainDir/RelevanceLTRData/codename/$datadir
if [ ! -d "$filedir" ];then
mkdir $filedir
fi
dumptrain=$filedir/qs.dump.train.3
echo "send train query reld on new query: $queryterm" >> $filedir/log
date >> $filedir/log
rm $trainDir/data/qs.dump
cd $trainDir/RelevanceTrain/nlp_exp/script
perl request_train.pl $queryterm $locateip $rankmask
sleep 20s
mv $trainDir/data/qs.dump $dumptrain
| true |
4748306f1653e571fd7dd34c29e6ea5526b81ea4 | Shell | tsanov/jyboss-cli | /create-test-server.sh | UTF-8 | 2,323 | 3.9375 | 4 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env bash
# ******************************************************************
# OS specific support. $var _must_ be set to either true or false.
cygwin=false
darwin=false
os400=false
hpux=false
case "`uname`" in
CYGWIN*) cygwin=true;;
Darwin*) darwin=true;;
OS400*) os400=true;;
HP-UX*) hpux=true;;
esac
# ******************************************************************
# verbose messaging
function msg()
{
local _MSG=$1
shift
printf "[\e[94mINFO\e[39m] ${_MSG}\n" $* 1>&2
}
# ******************************************************************
# main line
PATH=.:$PATH
if [ -z "${CI_PROJECT_DIR}" ]; then
export CI_PROJECT_DIR=$(pwd)
fi
# the place where the testing server will be installed
JBOSS_HOME="${CI_PROJECT_DIR}/tmp/server"
# setup maven repo
if [ -z "${M2_REPO}" ]; then
export M2_REPO=~/.m2/repository
fi
# we can infer coordinates from pom
KEYCLOAK_VERSION=4.8.3.Final
KEYCLOAK_PRODUCT=keycloak
KEYCLOAK_GROUP=org.keycloak
KEYCLOAK_DIST_FORMAT=tar.gz
# RedHat SSO Commercial Distro
#KEYCLOAK_PRODUCT=rh-sso
#KEYCLOAK_GROUP=com.redhat.jboss
#KEYCLOAK_DIST_FORMAT=zip
if [ -d "${JBOSS_HOME}" ]; then
msg "Remove ${KEYCLOAK_PRODUCT} home"
rm -rf ${JBOSS_HOME}
fi
mkdir -p ${JBOSS_HOME} 1>&2
msg "Download ${KEYCLOAK_PRODUCT} distribution package"
mvn dependency:get -DgroupId="${KEYCLOAK_GROUP}" \
-DartifactId="${KEYCLOAK_PRODUCT}-server-dist" \
-Dversion="${KEYCLOAK_VERSION}" \
-Dpackaging="${KEYCLOAK_DIST_FORMAT}" 1>&2
msg "Unpack ${KEYCLOAK_PRODUCT}-${KEYCLOAK_VERSION} server"
tar xzf ${M2_REPO}/${KEYCLOAK_GROUP/.//}/${KEYCLOAK_PRODUCT}-server-dist/${KEYCLOAK_VERSION}/${KEYCLOAK_PRODUCT}-server-dist-${KEYCLOAK_VERSION}.${KEYCLOAK_DIST_FORMAT} \
--strip-components=1 \
-C ${JBOSS_HOME} 1>&2
msg '------------------------------------------------------------------------'
msg "Configure ${KEYCLOAK_PRODUCT} server"
${JBOSS_HOME}/bin/jboss-cli.sh --file=keycloak-setup.cli 1>&2
msg '------------------------------------------------------------------------'
printf "\n# Set the below variable prior to executing tests\n\n"
if $cygwin; then
JBOSS_HOME=$(cygpath -aw "${JBOSS_HOME}")
fi
msg 'Server home is %s' "${JBOSS_HOME}"
printf 'export JBOSS_HOME=\"%s\"\n\n' "${JBOSS_HOME}"
| true |
a11dbe2d8fa69a5d6a03fbec456943d83d3f95d0 | Shell | democratic-csi/democratic-csi | /.github/bin/docker-release.sh | UTF-8 | 784 | 3.015625 | 3 | [
"MIT"
] | permissive | #!/bin/bash
echo "$DOCKER_PASSWORD" | docker login -u "$DOCKER_USERNAME" --password-stdin
echo "$GHCR_PASSWORD" | docker login ghcr.io -u "$GHCR_USERNAME" --password-stdin
export DOCKER_ORG="democraticcsi"
export DOCKER_PROJECT="democratic-csi"
export DOCKER_REPO="${DOCKER_ORG}/${DOCKER_PROJECT}"
export GHCR_ORG="democratic-csi"
export GHCR_PROJECT="democratic-csi"
export GHCR_REPO="ghcr.io/${GHCR_ORG}/${GHCR_PROJECT}"
if [[ -n "${IMAGE_TAG}" ]]; then
# -t ${GHCR_REPO}:${IMAGE_TAG}
docker buildx build --progress plain --pull --push --platform "${DOCKER_BUILD_PLATFORM}" -t ${DOCKER_REPO}:${IMAGE_TAG} \
--label "org.opencontainers.image.created=$(date -u --iso-8601=seconds)" \
--label "org.opencontainers.image.revision=${GITHUB_SHA}" \
.
else
:
fi
| true |
cd9051f875d0b9e50e93b347d9ad4bf5fc599a01 | Shell | roulis2844sasha/mk808bp | /mk808bp | UTF-8 | 2,251 | 3.640625 | 4 | [] | no_license | #!/bin/bash
size() {
while read B dummy; do
[ $B -lt 1024 ] && echo ${B} B && break
KB=$(((B+512)/1024))
[ $KB -lt 1024 ] && echo ${KB} KB && break
MB=$(((KB+512)/1024))
[ $MB -lt 1024 ] && echo ${MB} MB && break
GB=$(((MB+512)/1024))
[ $GB -lt 1024 ] && echo ${GB} GB && break
done
}
img() {
echo $(
bash -c "$(
echo -n dialog --menu \
\"Choose wich USB stick have to be installed\" 22 76 17;
echo -n \ desktop \"\" ;
#echo -n \ client \"\" ;
#echo -n \ server \"\" ;
)" 2>&1 >/dev/tty
)
}
usb() {
export USBKEYS=($(
grep -Hv ^0$ /sys/block/*/removable |
sed s/removable:.*$/device\\/uevent/ |
xargs grep -H ^DRIVER=sd |
sed s/device.uevent.*$/size/ |
xargs grep -Hv ^0$ |
cut -d / -f 4
))
case ${#USBKEYS[@]} in
0 ) echo No USB Stick found; exit 0 ;;
1 ) STICK=$USBKEYS ;;
* )
STICK=$(
bash -c "$(
echo -n dialog --menu \
\"Choose wich USB stick have to be installed\" 22 76 17;
for dev in ${USBKEYS[@]} ;do
echo -n \ "$dev" \"$(sudo blockdev --getsize64 /dev/$dev | size) - [$(
sed -e s/\ *$//g </sys/block/$dev/device/model
)]\" ;
done
)" 2>&1 >/dev/tty
)
;;
esac
[ "$STICK" ] || exit 0
echo $STICK
}
if [ "$1" == 'dev' ]; then
echo 'Enable devel mode'
[ "$2" ] || exit 0
sudo modprobe loop
loop=`sudo losetup -f`
sudo losetup $loop $2
sudo partprobe $loop
sudo gparted $loop
sudo losetup -d $loop
fdisk -l $2
echo 'End image'
read end
[ "$end" ] || exit 0
truncate --size=$[($end+1)*512] $2
#split -a 1 -d -b 40M $2 $2.part
else
sudo apt-get install dialog
echo 'Install'
mkdir -p ~/.config/roulis2844sasha/mk808bp
cd ~/.config/roulis2844sasha/mk808bp
img=`img`
wget https://github.com/roulis2844sasha/mk808bp/raw/master/$img/$img.img.xz.part{00,01,02,03,04,05,06,07,08,09,10,11}
cat $img.img.xz.part* > $img.img.xz
unxz $img.img.xz
if [ -f "$img.img" ]; then
usb=`usb`
filesize=$(stat -c%s "$img.img");
var=`sudo blockdev --getsize64 /dev/$usb`
if (( "$filesize" < "$var" )); then
clear
echo "Version: $img"
echo "Usb: $usb"
sudo dd if="$img.img" of=/dev/$usb
sudo gparted $usb
fi
fi
rm -r ~/.config/roulis2844sasha/mk808bp
fi
| true |
79d7fdc9be75d3b59efb4d74f029bd99546ad884 | Shell | conradkoh2/mq_release_automation | /create_release/encrypt_all_patch.sh | UTF-8 | 4,966 | 3.984375 | 4 | [] | no_license | #!/bin/bash
#Name: ALL_PATCH
#Version: 1.1
#shell config
set -e
#========================================
#configurations
#========================================
echo "Please enter your project prefix";
read prefix;
echo "Enter previous release version";
read base_release_version;
echo "Enter deployment release version";
read new_release_version;
#========================================
#initialize variables
#========================================
base_dir=${PWD}
password='Ju$td0it'
#generate the release variables
release_folder_name="$prefix""_patch""_t""$new_release_version""_f""$base_release_version"
release_folder="patches"/"$release_folder_name"
patches_folder="$release_folder"/"patch"
#directory checking
if ! [[ -d $patches_folder ]];
then
mkdir -p "$patches_folder"
else
echo "Warning: Folder ""$patches_folder"" exists"
fi
#========================================
#function declarations
#========================================
create_release(){
set -e
index_folder_name=1;
index_base_release_version=2;
index_new_release_version=3;
folder_name=$1
base_release_version=$2
new_release_version=$3
echo "[$folder_name] - PATCH t_$new_release_version f_$base_release_version";
#if all required vars have been provided, execute
if ! [[ -z "$folder_name" ]] && ! [[ -z "$base_release_version" ]] && ! [[ -z "$new_release_version" ]];
then
#folder definitions
folder_patch_suffix=_patch
diff_filename=diff.file
#========================================
#remove .diff file if it already exists
#========================================
diff_file=$base_dir/$diff_filename
if [ -f "$diff_file" ]; then
echo "Deleting diff file"
rm -r "$diff_file"
echo "Diff file deleted"
fi
#configure git working directory and compute diffs
cd $base_dir/$folder_name
git fetch
git reset --hard HEAD >/dev/null
git clean -f >/dev/null
git checkout -f release/$base_release_version >/dev/null
git pull >/dev/null
git submodule init >/dev/null
git submodule sync >/dev/null
git submodule update >/dev/null
git checkout -f release/$new_release_version >/dev/null
git pull >/dev/null
git submodule init >/dev/null
git submodule sync >/dev/null
git submodule update >/dev/null
git diff --name-only release/$base_release_version > "$diff_file"
#========================================
#Clean up old release
#========================================
#copy files based on generated diff
source=$base_dir/$folder_name
destination="$base_dir"/"$patches_folder"/"$folder_name""$folder_patch_suffix"_t"$new_release_version"_f"$base_release_version"
#recreate portal folder if it already exists
if [ -d "$destination" ]; then
echo "Removing patch destination folder"
rm -r "$destination"
echo "Patch destination folder removed"
fi
mkdir -p "$destination" #create patch directory
#========================================
#Build the release patch
#========================================
#Copy each of the files and make the directories
set +e #allow errors for diffs, especially since files might have been removed
while IFS= read -r var
do
target_file="$destination"/"$var"
src_file="$source"/"$var"
target_dir=$(dirname "${target_file}")
mkdir -p $target_dir
if [ ! -f $src_file ] && [ ! -d $src_file ];
then
echo "Cannot copy deleted file: $src_file"
else
cp -r $src_file $target_dir
fi
done < "$diff_file"
#cleanup
echo "Cleaning up files"
rm $diff_file
find $destination -name '.DS_Store' -type f -delete
echo "Patch creation completed successfully."
else
echo "Incorrect command format specified";
fi
}
#========================================
#script execution
#========================================
create_release "$prefix"_portal "$base_release_version" "$new_release_version"
echo "$prefix""_portal created successfully";
create_release "$prefix"_db "$base_release_version" "$new_release_version"
echo "$prefix""_db created successfully";
create_release "$prefix"_reporting "$base_release_version" "$new_release_version"
echo "$prefix""_reporting created successfully";
#========================================
#patch summary
#========================================
echo "Release Version:" >> "$base_dir"/"$patches_folder"/"version.txt"
echo "$new_release_version" >> "$base_dir"/"$patches_folder"/"version.txt"
echo "Previous Version:" >> "$base_dir"/"$patches_folder"/"version.txt"
echo "$base_release_version" >> "$base_dir"/"$patches_folder"/"version.txt"
#========================================
#package compression & encryption
#========================================
cd "$base_dir"/"$patches_folder"
zip -P "$password" -r "$base_dir"/"$release_folder"".zip" ./*
rm -rf "$base_dir"/"$release_folder"
echo "Patches created & encrypted. Password for the file is ${password}";
read | true |
77e3bdffd45f271ca1ba4a7e83a03312a4334078 | Shell | mcmillion/.dotfiles | /zsh/.zshrc | UTF-8 | 10,115 | 2.984375 | 3 | [
"MIT"
] | permissive | #==============================================================================
# ENVIRONMENT
#==============================================================================
export LANG=en_US.UTF-8
export TERM=xterm-kitty
export EDITOR='nvim'
export LESS='-RFX'
eval "$(/opt/homebrew/bin/brew shellenv)"
#==============================================================================
# BASIC ZSH CONFIGURATION
#==============================================================================
# Fix backspace in zsh vi mode
bindkey "^?" backward-delete-char
# Case-Insensitive completion
autoload -Uz compinit
compinit
zstyle ':completion:*' matcher-list 'm:{a-zA-Z}={A-Za-z}' 'r:|=*' 'l:|=* r:|=*'
# Auto-ls when cd-ing into directories
cd () {
builtin cd "$@";
ls -aG;
}
#==============================================================================
# PROMPT
#==============================================================================
eval "$(starship init zsh)"
#==============================================================================
# HISTORY
#==============================================================================
HISTSIZE=5000
HISTFILE=~/.zsh_history
SAVEHIST=5000
HISTDUP=erase # Erase duplicates in the history file
setopt appendhistory # Append history to the history file (no overwriting)
setopt sharehistory # Share history across terminals
setopt incappendhistory # Immediately append to the history file, not just when a term is killed
#==============================================================================
# COMMON ALIASES
#==============================================================================
alias dot='cd ~/.dotfiles'
alias reload='source ~/.zshrc'
alias l='ls'
alias ls='ls -aG'
alias ll='ls -lh'
alias cp='cp -iv'
alias mv='mv -iv'
alias rm='rm -v'
alias mkdir='mkdir -pv'
alias ..='cd ..'
alias x='exit'
alias q='exit'
alias grep='grep --color=always'
#==================================================================================================
# TMUX / VIM
#==================================================================================================
alias home='tmux new-session -A -s home'
alias kill_all_tmux_sessions='tmux ls | awk '\''{print $1}'\'' | sed '\''s/://g'\'' | xargs -I{} tmux kill-session -t {}'
alias v='nvim'
alias vi='nvim'
alias vim='nvim'
alias clear_nvim_sessions='rm ~/.local/share/nvim/sessions/*'
#==================================================================================================
# GIT
#==================================================================================================
alias gs='git status'
alias ga='git add . && git status'
alias gc='git commit --verbose && git --no-pager log -n 1'
alias gca='git commit --amend --verbose && git --no-pager log -n 1'
alias gl='git l'
alias gb='git branch'
alias gpub='git publish'
alias gunpub='git unpublish'
alias gpu='git push'
alias gpf='git push --force-with-lease'
alias gpl='git pull'
alias grm='gco master && gpl && gco - && git rebase master'
alias grt='gco trunk && gpl && gco - && git rebase trunk'
alias gri='git rebase -i `fcs`~1'
alias grc='git rebase --continue'
alias gra='git rebase --abort'
gsync() {
local main current
main=$(git symbolic-ref refs/remotes/origin/HEAD | sed 's@^refs/remotes/origin/@@')
current=$(git rev-parse --abbrev-ref HEAD)
[ -z "$main" ] && return 1
if [ "$current" = "$main" ]
then
git pull
else
git fetch origin $main:$main
fi
git fetch --all
git checkout $main
git branch --merged $main | grep -v "\* $main" | xargs -n 1 git branch -d
}
#==================================================================================================
# LS
#==================================================================================================
export LSCOLORS='ExfxbxdxCxegedabagacad'
#==================================================================================================
# FZF
#==================================================================================================
export FZF_DEFAULT_COMMAND='rg --files --hidden --follow -g "" 3> /dev/null'
export FZF_DEFAULT_OPTS='--height 50% --color=bg:0,fg:7,hl:8,bg+:0,fg+:15,hl+:4,info:9,prompt:4,pointer:4,marker:4,spinner:4,border:3,header:3'
[ -f ~/.fzf.zsh ] && source ~/.fzf.zsh
gco() {
if [ "$1" = '-a' ] || [ "$1" = '' ]
then
# Fuzzy checkout branch
local branches branch
if [ "$1" = '-a' ]
then
# Fetch remote branches and show remote and local branches
git fetch origin --prune
branches=$(git branch --all | grep -v HEAD) &&
else
# Just show local branches
branches=$(git branch | grep -v HEAD) &&
fi
branch=$(echo "$branches" | fzf-tmux +m) &&
git checkout $(echo "$branch" | sed "s/.* //" | sed "s#remotes/[^/]*/##")
elif [ "$1" = '-b' ]
then
if [ -n "$2" ]
then
# Create and check out a new branch
git checkout -b $(echo "$2")
else
# Invalid argument
echo "Error: Branch name is required (gco -b [new_branch_name])"
fi
else
# Checkout local branch
git checkout $1
fi
}
# FZF commit SHA (used in gri)
fcs() {
local commits commit
commits=$(git log --color=always --pretty=oneline --abbrev-commit --reverse) &&
commit=$(echo "$commits" | fzf --tac +s +m -e --ansi --reverse) &&
echo -n $(echo "$commit" | sed "s/ .*//")
}
# FZF kill process
fkill() {
local pid
pid=$(ps -ef | sed 1d | fzf -m | awk '{print $2}')
if [ "x$pid" != "x" ]
then
echo $pid | xargs kill -${1:-9}
fi
}
# FZF tmuxinator
mux() {
local selected=$(smug list | fzf --prompt="Project: " -m -1 -q "$1")
if [ -n "$selected" ]; then
smug start "$selected" -a
fi
}
#==================================================================================================
# ASDF
#==================================================================================================
source /opt/homebrew/opt/asdf/libexec/asdf.sh
#==================================================================================================
# NPM / PRISMA / T3
#==================================================================================================
alias n='npm run'
alias p='npx prisma'
#==================================================================================================
# RUBY / RAILS
#==================================================================================================
alias be='noglob bundle exec'
alias migrate='bundle && bin/rails db:migrate && bin/rails db:migrate RAILS_ENV=test'
alias kill_rails_server='kill -9 $(lsof -i tcp:3000 -t)'
#==================================================================================================
# PYTHON
#==================================================================================================
# Work Bootstrap
if command -v pyenv 1>/dev/null 2>&1; then
export PYENV_ROOT="$HOME/.pyenv"
export PATH="$(brew --prefix)/opt/gnu-sed/libexec/gnubin:$PATH"
export PATH="$PYENV_ROOT/bin:$PATH"
export PATH="$PATH:/Users/mmcmillion/.local/bin"
eval "$(pyenv init -)"
# git -C ~/Developer/Galileo/dotfiles reset --hard origin/master > /dev/null 2>&1
# git -C ~/Developer/Galileo/dotfiles pull > /dev/null 2>&1
source ~/Developer/Galileo/dotfiles/bootstrap.sh
fi
#==================================================================================================
# POSTGRES / REDIS / ETC
#==================================================================================================
alias fix_stuck_postgres='rm /opt/homebrew/var/postgres/postmaster.pid; brew services restart postgresql'
#==================================================================================================
# DOCKER
#==================================================================================================
alias d='docker'
alias dc='docker-compose'
#==================================================================================================
# IOS / ANDROID
#==================================================================================================
export ANDROID_HOME=$HOME/Library/Android/sdk
export PATH=$PATH:$ANDROID_HOME/emulator
export PATH=$PATH:$ANDROID_HOME/tools
export PATH=$PATH:$ANDROID_HOME/tools/bin
export PATH=$PATH:$ANDROID_HOME/platform-tools
alias fl='bundle exec fastlane'
#==================================================================================================
# RUST
#==================================================================================================
[ -f "$HOME/.cargo/env" ] && source "$HOME/.cargo/env"
#==================================================================================================
# OSX
#==================================================================================================
alias finder='open -a Finder ./'
alias cleanup_ds="find . -type f -name '*.DS_Store' -ls -delete"
alias cleanup_open_with='/System/Library/Frameworks/CoreServices.framework/Frameworks/LaunchServices.framework/Support/lsregister -kill -r -domain local -domain system -domain user && killall Finder'
alias add_blank_space_to_dock="defaults write com.apple.dock persistent-apps -array-add '{\"tile-type\"=\"spacer-tile\";}' && killall Dock"
alias flush_dns='sudo dscacheutil -flushcache; sudo killall -HUP mDNSResponder'
alias disable_hidden_files_in_finder='defaults write com.apple.finder AppleShowAllFiles NO && killall Finder'
alias enable_hidden_files_in_finder='defaults write com.apple.finder AppleShowAllFiles YES && killall Finder'
alias disable_local_timemachine='sudo tmutil disablelocal'
alias enable_local_timemachine='sudo tmutil enablelocal'
alias disable_key_press_and_hold='defaults write NSGlobalDomain ApplePressAndHoldEnabled -bool false'
alias enable_key_press_and_hold='defaults write NSGlobalDomain ApplePressAndHoldEnabled -bool true'
# Start tmux home session automatically
if [[ ("$OSTYPE" == "darwin"*) && (-z "$TMUX") ]]; then
tmux new-session -A -s home
fi
| true |
ae869323f3e1dd171662c00828bf0fe42d6729d4 | Shell | bioinformagical/scripts-cluster | /cleaning/seqclean.pbs | UTF-8 | 1,553 | 2.5625 | 3 | [] | no_license | #!/bin/sh
# Request a run time of 5 hours and 30 minutes
#PBS -l walltime=98:30:00
# Request 1 processor in 1 node
#PBS -l nodes=1:ppn=4
# Request 7600 megabytes memory per processor. ( 48 usable CPUs)
#PBS -N SeqcleanChloro
#PBS -q bigiron
##home=/gpfs/fs3/home/rreid2
# launching Launching build_lmer_table on a single node so it can run till the cows come home
#echo "Launching Repeat Masker"
export PATH=$HOME/bin:$PATH
cd $HOME/blueberry/reads/454Sequences/all/
for file in $(find ./ -name "*.fasta");
do
echo ${file};
/gpfs/fs3/home/rreid2/bin/seqclean/seqclean $file -c 4 -v $HOME/blueberry/db/gSe13andUnivec.fasta -s $HOME/blueberry/db/vitisMito.fasta, $HOME/blueberry/db/vitisViniChloro.fasta -o ${file}".clean"
/gpfs/fs3/home/rreid2/bin/seqclean/cln2qual ${file}".cln" ${file}".qual"
lucy -o ${file}.clean.lucy ${file}.qual.clean.lucy ${file}".clean" ${file}.qual.clean
done
#file=/gpfs/fs3/home/rreid2/blueberry/reads/454Sequences/OLDseqclean/level2/FQ55CRL01.fasta;
#/gpfs/fs3/home/rreid2/bin/seqclean/seqclean $file -c 8 -s /gpfs/fs3/home/rreid2/db/vitis/chloroplast/vitisviniferisChloroplastgenome.fna
#./seqclean ~/blueberry/reads/454Sequences/mira9/split_1.fasta -n 10 -v ~/blueberry/db/vitisViniChloro.fasta, $HOME/blueberry/db/gSe13andUnivec.fasta -l 40 -o test -r rtest
#/gpfs/fs3/home/rreid2/bin/seqclean/seqclean $file -c 16 -v $HOME/blueberry/db/gSe13andUnivec.fasta -s $HOME/blueberry/db/vitisMito.fasta, $HOME/blueberry/db/vitisViniChloro.fasta -l 40 -o ${file%fasta}"clean"
| true |
2aff763cd637b563c0788ee965a2498205d9045a | Shell | kgyrtkirk/networkers | /trunk/doc/fix-fly.bash | UTF-8 | 270 | 3.078125 | 3 | [] | no_license | #!/bin/bash
[ "$1" == "flyer.tex" ] && exit 0
[ ! -e "$1" ] && echo "usage fasdf" && exit 1
grep "^% FIXXER" $1 | awk '{print "s/"$3"/"$4"/g;"}'
exp="`grep "^% FIXXER" flyer.tex | awk '{print "s/"$3"/"$4"/g;"}'`"
echo $exp
sed -r "/^% FIXXER/{p;d};${exp}" -i.last "$1"
| true |
c410a2fa3b6eccc3df230fa819b81d8d343227eb | Shell | vzaicev/bash | /2021/14_oranskaya/lab02/lab02_z09.sh | UTF-8 | 430 | 3 | 3 | [] | no_license | #!/bin/bash
if [ -n "$1" ] && [ "$1" == "-task" ];
then
echo "Задание"
echo "9. Написать скрипт, который скачивает заглавную страницу "
echo "известного портала и архивирует ее (использовать wget и tar -czvf )."
echo
echo
fi
wget -r -l 0 -k https://www.onliner.by && tar -cvzf onliner.tar.gz www.onliner.by
rm -R www.onliner.by | true |
94086ced9b3f52d0790120e620ecaa2ff4683af3 | Shell | evgenyka/artifactory-retention | /runRetention.sh | UTF-8 | 826 | 3.34375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env sh
if [ $# -lt 2 ]
then
echo "Usage: runRetention.sh <builds|artifacts> <repo-name|build-retention-name> [user] [password]"
echo "Ex.: runRetention.sh artifacts foobar-dev-local emily sEcrEt123"
echo "Ex.: runRetention.sh builds builds-without-artifacts robert P4sSw0Rd"
exit 1
fi
if [ "$1" != "builds" ] && [ "$1" != "artifacts" ] && [ "$1" != "releases" ]
then
echo "first arg must be 'builds' or 'artifacts' or 'releases' (got '$1')"
exit 1
fi
if [ "$1" = "builds" ]
then
groovy scripts/BuildRetention.groovy aql/build/$2.aql result/ $3 "$4"
fi
if [ "$1" = "artifacts" ]
then
groovy scripts/ArtifactRetention.groovy $2 aql/artifact result/ $3 "$4"
fi
if [ "$1" = "releases" ]
then
groovy scripts/ReleaseRetention.groovy aql/release/$2.aql result/ $3 "$4"
fi
| true |
d37af5753791d502c0e6c243396c388b7f9b4a62 | Shell | matchalunatic/fun-with-devops | /vm-images/docker-swarm/join-swarm.sh | UTF-8 | 683 | 2.578125 | 3 | [] | no_license | #!/bin/bash
INSTANCE_ID=`wget -qO- http://instance-data/latest/meta-data/instance-id`
REGION=`wget -qO- http://instance-data/latest/meta-data/placement/availability-zone | sed 's/.$//'`
aws ec2 describe-tags --region $REGION --filter "Name=resource-id,Values=$INSTANCE_ID" --output=text | sed -r 's/TAGS\t(.*)\t.*\t.*\t(.*)/\1="\2"/' > /etc/ec2-tags
source /etc/ec2-tags
export NODE_TYPE="${swarm_node_type}"
export DYNAMODB_TABLE="swarm_locking_manager"
docker run -v /var/run/docker.sock:/var/run/docker.sock -v /home/ubuntu/join-swarm-entry.sh:/entry.sh -v /usr/bin/docker:/usr/bin/docker -v /var/log:/var/log -e REGION -e DYNAMODB_TABLE -e NODE_TYPE srikalyan/aws-swarm-init
| true |
cd38593d0325f03e00bc8d14ac7aaf4d187cad3f | Shell | azak-azkaran/day-mode | /day-night-cycle | UTF-8 | 577 | 3.390625 | 3 | [
"BSD-3-Clause"
] | permissive | #!/bin/bash
TIME="$(date '+%H%M')"
echo "The current time is $TIME"
dbus-launch
CIVS="$(/home/azak/git/day-mode/find-twilight | cut -f1 -d\ )"
CIVE="$(/home/azak/git/day-mode/find-twilight | cut -f2 -d\ )"
echo "Daylight starts at $CIVS ends at $CIVE"
if [ $TIME -ge $CIVS ] && [ $TIME -le $CIVE ] ; then
echo "Switching to Day mode"
RESP=exec /home/azak/git/day-mode/day-mode
else
echo "Switching to Night mode"
RESP=exec /home/azak/git/day-mode/night-mode
fi
if [[ $RESP ]] ; then
echo "Switch failure"
else
echo "Switch sucess"
echo "set to: $DAY_MODE"
fi
| true |
e4879efa19d6605e73ac8f782b71b5c35da40906 | Shell | Valkata154/crontab | /mycrontab-updated.sh | UTF-8 | 14,187 | 4.15625 | 4 | [] | no_license | #!/bin/bash
touch cronCopy
chmod 777 cronCopy
# ------------------
# [Print Cronrab Jobs Function]
# --
# -- Context: Prints current crontab jobs in User Friendly Way
# --
# ------------------
print_crontab_jobs() {
#update cronCopy with content of crontab
crontab -l > cronCopy 2>/dev/null;
#checks for content in cronCopy
if [ -s cronCopy ]
then
#instanciate counter
count=0
#pipeline cronCopy content to "read" command
cat cronCopy | while read min hour day month weekDay cm
do
#counter increment
count=$((count + 1))
#check for presence of "@" commands & print line
if (echo "$min" | grep -Eq '@'); then
#locate "@" line & assign to variable "$p"
p=$(cat cronCopy | sed -n "$count"p);
#read in variable "$p" & break into components
read preset pre_setcommand <<< "$p"
#print output
echo "Command No "$count": "$pre_setcommand""
echo "Running: "$preset""
echo ""
#all other commands, format output line accordingly:
else
min=$(format_output_field "$min")
hour=$(format_output_field "$hour")
day=$(format_output_field "$day")
month=$(format_output_field "$month")
weekDay=$(format_output_field "$weekDay")
#print output
echo "Command No "$count": "$cm""
echo "Running: on "$min" minute/s, "$hour" hour/s, on "$day" day(s) of month, on "$month" month(s), "$weekDay" day(s) of the week"
echo ""
fi
done
#success
return 1
else
#error
return 0
fi
}
# ------------------
# [Format Output Field Function]
# --
# -- Context: Helper function to output user friendly format of a field parameter from a crontab job
# --
# -- Args: -> $1 ( Output Field )
# --
# -- Returns: -> "any" if $1 equals "*",
# -- "between" {range}, every other {value} STEPs
# -- otherwise "every $1"
# --
# ------------------
format_output_field() {
if [ "$1" = "*" ];then
echo "any";
else
echo "every $1"
fi
}
# ------------------
# [Ensure Range Function]
# --
# -- Context: Ensures that numeric $1 argument is in range between arguments $2 and $3
# -- Ensures values are according to the crontab specification
# -- - Lists: "4-8,6-20"..."6,4,6,7"
# -- - Ranges: "4-6","6-7"..."0-23"
# -- - Steps: "*/2","0-23/4"..."*/2,*/5"
# -- OR comabination of the 3 ie: "*/2,*/3,2,3,4,8-12"
# --
# -- Args: -> $1 ( User Argument )
# -- -> $2 ( Range: Lower Bound )
# -- -> $3 ( Range: Upper Bound )
# --
# -- Returns: -> 1 ( True ) | 0 ( False )
# --
# ------------------
ensure_range() {
#start error log file with 0 (no errors)
echo "0" >> error
#split input values using (,) as delimiters
#store split values in 'array'
IFS=',' read -r -a array <<< "$1"
#iterate over 'array' & validate split values indiviadually
for element in "${array[@]}"
do
#check for split values matchting patterns
if (echo "$element" | grep -Eq '^([0-9]+-[0-9]+|\*)/[0-9]+$|^[0-9]+-[0-9]+$|^[0-9]+$')
then
#value range check
#split all digits in $element into subsequent substrings (individual digits)
s=$(grep -Eo '[[:alpha:]]+|[0-9]+' <<< "$element")
#iterate over the split digits in "$s" & validate them indiviadually for range
echo "$s" | while read -r line;
do
#digit value range check, passed arguments $2 & $3
if [ "$line" -ge "$2" ] && [ "$line" -le "$3" ]
then
#success
continue
else
#error
echo "***** Error: Invalid input, please check regulations and try again *****"
#record error in log file
echo "1" >> error
fi
done
continue
#check month name inputs for month field only
elif (echo "$element" | grep -Eq '(^Jan$|^Feb$|^Mar$|^Apr$|^May$|^Jun$|^Jul$|^Aug$|^Sep$|^Oct$|^Nov$|^Dec$)') && [ "$3" -eq 12 ]
then
continue
#check weekday name inputs for weekday field only
elif (echo "$element" | grep -Eq '(^Mon$|^Tue$|^Wed$|^Thu$|^Fri$|^Sat$|^Sun$)') && [ "$3" -eq 6 ]
then
continue
#otherwise error
else
echo "***** Error: Invalid input, please check regulations and try again *****"
return 0
fi
done
#read last line of error log file
k=$(tail -n 1 error)
#check for errors if
#if log file contains num. 1
if (echo "$k" | grep -Eq "1");
then
#error
return 0
else
#success
return 1
fi
}
# ------------------
# [Ensure Pre-set Command Function]
# --
# -- Context: Prompts user to insert new crontab job
# -- from the pre-set commands list in crontab
# --
# -- If cron does not exist, it creates it
# -- Returns: -> 1 ( Success ) | 0 ( Failure )
# --
insert_crontab_job_pre_set() {
# -------------------------------------
# Display pre-set commands menu
# -------------------------------------
echo "----------------------"
echo "Please select from the list of pre-set commands below:"
echo ""
echo "@reboot - Run once, at startup"
echo "@yearly or @annually - Run once a year"
echo "@monthly - Run once a month"
echo "@weekly - Run once a week"
echo "@daily or @midnight - Run once a day"
echo "@hourly - Run once an hour"
echo ""
echo 'Enter pre-ser command to use (ie: @reboot)'; read preset;
# validate user inputs:
if (echo "$preset" | grep -Eq '(^@reboot$|^@yearly$|^@annually$|^@monthly$|^@weekly$|^@daily$|^@midnight$|^@hourly$)')
then
# Prompt for command input
echo 'Enter command to install'; read pre_setcommand;
# Place the command in the crontab file:
echo "$preset $pre_setcommand" >> cronCopy;
# Update crontab file
crontab cronCopy
# Success
return 1
else
echo "***** Error: Invalid parameters, please check regulations and try again *****"
# Error
return 0
fi
}
# ------------------
# [Insert Crontab Job]
# --
# -- Context: Prompts user to insert new crontab job
# -- If cron does not exist, it creates it
# -- Returns: -> 1 ( Success ) | 0 ( Failure )
# --
# ------------------
insert_crontab_job() {
# Inform User of input validation and criteria
echo ""
echo "Your input must be within fields specified range"
echo "Ranges (4-7) | Lists: (5,6,7,4-20) | Steps: (*/2,0-15/2) are allowed"
echo ""
# Prompt for minutes input
echo 'Enter minutes ( 0 - 59 ) | * for any'; read minutes
validate_input_field "$minutes" "min"
if [ ! $? -eq 1 ]
then
return 0
fi
# Prompt for hours input
echo 'Enter hour ( 0 - 23 ) | * for any:'; read hour
validate_input_field "$hour" "hour"
if [ ! $? -eq 1 ]
then
return 0
fi
# Prompt for day of month input
echo 'Enter the day of month ( 1 - 31 ) | * for any:'; read day
validate_input_field "$day" "day"
if [ ! $? -eq 1 ]
then
return 0
fi
# Prompt for month input
echo 'Enter month ( 1 - 12 ) or dates: Jan, Feb... | * for any:'; read month
validate_input_field "$month" "month"
if [ ! $? -eq 1 ]
then
return 0
fi
# Prompt for weekday input
echo 'Enter weekday ( 0 - Sun, 6 - Sat ) or dates: Mon, Tue... | * for any:'; read weekDay
validate_input_field "$weekDay" "weekDay"
if [ ! $? -eq 1 ]
then
return 0
fi
# Prompt for task command input
echo 'Enter command to install'; read user_command
# Using quotes to catch the asterixes '*'
echo "$minutes $hour $day $month $weekDay $user_command" >> cronCopy;
# Update crontab file
crontab cronCopy
# Success
return 1
}
# ------------------
# [Validate Input Field Function]
# --
# -- Context: Validates whether user input is valid according to the corresponding field
# --
# -- Args: -> $1 ( User Input )
# -> $2 ( Field ) of { 'min', 'hour', 'day', 'month', 'weekDay' }
# --
# -- Returns -> 1 ( True ) | 0 ( False )
# --
# ------------------
validate_input_field() {
# -------------------------------------
# Base Case (*) - Valid for all inputs
# -------------------------------------
if [ "$1" = "*" ];
then
return 1
fi
#using case
case "$2" in
# -------------------------------------
# Minute Case
# -------------------------------------
"min")
ensure_range "$1" 0 59
return $?
;;
# -------------------------------------
# Hour Case
# -------------------------------------
"hour")
ensure_range "$1" 0 23
return $?
;;
# -------------------------------------
# Day of Month Case
# -------------------------------------
"day")
ensure_range "$1" 1 31
return $?
;;
# -------------------------------------
# Month Case
# -------------------------------------
"month")
ensure_range "$1" 1 12
return $?
;;
# -------------------------------------
# WeekDay Case
# -------------------------------------
"weekDay")
ensure_range "$1" 0 6
return $?
;;
# -------------------------------------
# Default case: invalid parameters
# -------------------------------------
*)
echo "***** Error: Invalid parameters, please check regulations and try again *****"
return 0
;;
esac
}
# - FUNCTION END
# -------------------------------------
# Crontab main menu command redirecting
# -------------------------------------
while true
do
# -------------------------------------
# Display the menu
# -------------------------------------
echo "----------------------"
echo "Welcome to mycrontab!"
echo "Choose one of the following commands by entering the appropriate number."
echo "--"
echo "1. Display all crontab jobs."
echo "2. Insert a job."
echo "3. Edit a job."
echo "4. Remove a job."
echo "5. Remove all jobs."
echo "9. Exit."
echo ""
#read in user input
read -p "Select a command number: " num
echo ""
# -------------------------------------
# User menu selection input validation
# -------------------------------------
#pass user input to vaidation function
ensure_range "$num" 1 9
#check for errors (return status)
if [ ! $? -eq 1 ]
then
continue
fi
# -------------------------------------
# Display all jobs (1)
# -------------------------------------
if [ "$num" -eq 1 ]
then
#call function to print crontab jobs
print_crontab_jobs
#check for errors (return status)
if [ ! $? -eq 1 ]
then
echo "No Jobs to Display"
echo ""
fi
continue
# -------------------------------------
# Insert a job (2)
# -------------------------------------
elif [ "$num" -eq 2 ]
then
#prompt user for custom or pre-set schdeule commands
echo ""
echo 'Please select your schedule input type:';
echo "--"
echo "1. Custom time (whenever you want)"
echo "2. Choose from preset times (ie: weekly, daily...)"
echo ""
#read in user input
read -p "Select a command number: " ans
if [ "$ans" -eq 1 ]
then
#call insert job function (custom time selection)
insert_crontab_job
elif [ "$ans" -eq 2 ]
then
#call insert job function (Pre-set schedule time)
insert_crontab_job_pre_set
else
#error, invalid command selection
echo "---- Incorrect command selection ----"
continue
fi
#check for errors
if [ ! $? -eq 1 ]
then
continue
else
echo ""
echo "Job inserted"
echo ""
fi
continue
# -------------------------------------
# Edit a job (3)
# -------------------------------------
elif [ "$num" -eq 3 ]
then
#call function to print crontab jobs
print_crontab_jobs
#check for errors
if [ ! $? -eq 1 ]
then
echo "*** Job list is empty ***"
echo ""
continue
fi
#prompt for command number to edit
read -p "Select command number to be edited: " commandEdit
#remove the command and update crontab file
sed -i "$commandEdit"d cronCopy
crontab cronCopy
#prompt user for custom or pre-set schdeule commands
echo ""
echo 'Please select your schedule time input type:';
echo "--"
echo "1. Custom time (whenever you want)"
echo "2. Choose from preset times (ie: weekly, daily...)"
echo ""
#read in user input
read -p "Select a command number: " ans
if [ "$ans" -eq 1 ]
then
#call insert job function (custom time selection)
insert_crontab_job
elif [ "$ans" -eq 2 ]
then
#call insert job function (Pre-set schedule time)
insert_crontab_job_pre_set
else
#error, invalid command selection
echo "---- Incorrect command selection ----"
continue
fi
#check for errors
if [ ! $? -eq 1 ]
then
continue
else
echo ""
echo "Job successfully edited"
echo ""
fi
continue
# -------------------------------------
# Remove a job (4)
# -------------------------------------
elif [ "$num" -eq 4 ]
then
#instanciate counter
count=0
#call function to print crontab jobs
print_crontab_jobs
#check for errors
if [ ! $? -eq 1 ]
then
echo ""
echo "*** Job list is empty ***"
echo ""
continue
fi
#prompt for command number to delete
read -p "Select command to be deleted: " commandDel
#remove the command and update crontab file
sed -i "$commandDel"d cronCopy;
crontab cronCopy;
echo "Job deleted successfully."
echo ""
continue
# -------------------------------------
# Remove all jobs (5)
# -------------------------------------
elif [ "$num" -eq 5 ]
then
#remove all crontab jobs
crontab -r >/dev/null 2>&1;
echo "All jobs removed"
echo ""
continue
# -------------------------------------
# Exit the while loop (9)
# -------------------------------------
elif [ "$num" -eq 9 ]
then
break
# -------------------------------------
# Error if command is not listed
# -------------------------------------
else
echo "Error: command number $num is not listed."
echo ""
continue
fi
done
# -- END
# delete cronCopy file
rm "cronCopy"
# delete error file
rm "error"
# Exit crontab
echo "Exit successfull!"
| true |
0feeb0166d092a6b8a24a955ebda939768005acc | Shell | truthslayer/macscript | /collect-chrome.sh | UTF-8 | 1,407 | 2.875 | 3 | [] | no_license | #!/bin/bash
tod=`date +%Y-%m-%d.%H`
hd="/home1/j/jamiemor";
nc="$hd/html/news-clips";
direct="$hd/html/news-clips-puppet/$tod";
puppet="submitpuppet.vanilla";
puppetff="puppet-filler";
wkhf="wkh-filler";
wkh="submitwhktest.vanilla";
condor="/usr/local/bin/condor_submit";
mkdir "$direct";
chmod 777 "$direct";
cp "$hd/phantomjs-individual.sh" "$direct/";
cp "$hd/wkhtmltopdf-individual.sh" "$direct/";
cp "$hd/collect.sh" "$direct/";
cp "$hd/phantomjs/bin/phantomjs" "$direct/";
cp "$hd/html/news-clips/rasterize.js" "$direct/";
cp "$phant" "$direct/";
cp "$wkh" "$direct/";
echo "hello" > really.txt;
echo "$hd/awsend.sh $direct $tod" >> "hi0.text";
printf "Executable = phantomjs-individual.sh\\nArguments = http://\$(state) \$(state) $direct\n" > "$phant";
printf "Executable = wkhtmltopdf-individual.sh\\nArguments = http://\$(state) \$(state) $direct\n" > "$wkh";
cat "$phantf" >> "$phant";
cat "$wkhf" >> "$wkh";
"$condor" "$phant";
"$condor" "$wkh";
sleep 200;
chmod 777 "$direct";
cd "$hd";
echo "$hd/awsend.sh $direct $tod" >> hi.text;
"$hd"/awsend.sh "$direct" "$tod";
Dropbox-Uploader/dropbox_uploader.sh upload "$direct" Public/news-clips/"$tod";
# "$hd/shallow-input.sh" "html/news-clips/$tod" "$tod";
#cp change.sh "$direct/";
#cd "$direct";
#./change.sh;
#mv "$direct/*" "$hd/html/news-clips/$direct/";
#cd "$hd/html/news-clips/$direct/";
#find . -name "*.pdf" | xargs -i chmod 777 {};
| true |
98fd417b65ee6b5b99ad61fb4d6e70f8b9ab038b | Shell | MasonJF/SchoolWork | /Year 1 Archive/OSYS1000/pull | UTF-8 | 199 | 3.046875 | 3 | [] | no_license | #!/bin/bash
read -p "Where would you like to Pull: " arg1
if [ ! -z "$arg1" ]
then
rsync -rq ~/$arg1 it806@142.177.80.67:~/Assignment1
else
echo "You cannot copy to nowhere silly goose! :("
fi
| true |
e91d95fa61a7e434e15450a03a67bf2064d0a802 | Shell | Henrywcj/MLII | /exprmnts/param-search-rnn/scripts/gen-vaid-best.sh | UTF-8 | 480 | 3.21875 | 3 | [
"MIT"
] | permissive | #! /usr/bin/env bash
for NAME in $1/stdout-*.txt
do
# echo $NAME
FILE=${NAME##*/}
BASE=${FILE%.txt}
echo $BASE
grep MSE $NAME | cut -d ' ' -f 7,8 | sort | head -n 1 | tr '\n' ' ' > $1/valid-best-$BASE.txt
echo $BASE | tr '\n' ' ' >> $1/valid-best-$BASE.txt
tail -n 2 $NAME | head -n 1 | cut -c -10 | tr '\n' ' ' >> $1/valid-best-$BASE.txt
tail -n 1 $NAME | tr -d '\n' >> $1/valid-best-$BASE.txt
echo " " >> $1/valid-best-$BASE.txt
done
| true |
770c645f635f639ac008d03676e6e8e28d227a31 | Shell | spicyjack/lack-old | /common/initscripts/kernel-modules | UTF-8 | 1,422 | 3.75 | 4 | [] | no_license | #!/bin/sh
# $Id: kernel_modules,v 1.14 2009-07-16 00:23:49 brian Exp $
# Copyright (c)2007 Brian Manning <brian at portaboom dot com>
# PLEASE DO NOT E-MAIL THE AUTHOR ABOUT THIS SOFTWARE
# The proper venue for questions is the LACK mailing list at:
# http://groups.google.com/linuxack or <linuxack.googlegroups.com>
if ! [ -e $LACK_FUNCTIONS ]; then
LACK_FUNCTIONS="/etc/scripts/lack_functions.sh"
fi # if ! [ -e $LACK_FUNCTIONS ]
source $LACK_FUNCTIONS
ACTION=$1
BINARY=/sbin/modprobe
[ -x "$BINARY" ] || exit 0
BASENAME="kernel-modules"
DESC="Loading kernel modules;"
BINARY_OPTS=""
MODULES_FILE="/etc/modules"
if [ ! -r $MODULES_FILE ]; then
echo "ERROR: modules file $MODULES_FILE not found"
check_exit_status 1 $BASENAME
fi # if [ ! -r $MODULES_FILE ]
case "$ACTION" in
vars)
echo "${BASENAME}:"
exit 0
;;
start)
colorize_nl $S_TIP "${BASENAME}: $DESC"
for MODULE in $(cat $MODULES_FILE | grep -v "^#");
do
colorize $S_TIP " - $MODULE"
$BINARY $BINARY_OPTS $MODULE >> $DEBUG_LOG 2>&1
check_exit_status $? $BASENAME
done
colorize_nl $S_TIP "${BASENAME}: All kernel modules in /etc/modules loaded"
;;
stop)
# noop
:
;;
restart|force-reload)
# noop
:
;;
*)
echo "Usage: $BASENAME {start|stop|restart|force-reload}" >&2
exit 3
;;
esac
exit 0
# vi: set shiftwidth=4 tabstop=4 filetype=sh :
# конец!
| true |
ab9182369f1f814fc04680b5ab290d212c69747a | Shell | PilotPaul/Gadget-Injector | /test.sh | UTF-8 | 5,690 | 2.6875 | 3 | [] | no_license | ##########################################################################
# File Name: test.sh
# Author: PilotPaul
# mail: ass163@qq.com
# Created Time: Sat 10 Apr 2021 10:14:51 PM CST
#########################################################################
#!/bin/bash
PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/src/gcc/gcc-6.4.0
export PATH
if [ x$1 = xleakcheck ]; then
VALGRIND="valgrind --leak-check=full --show-leak-kinds=all"
echo "$VALGRIND"
fi
# test functionalities
#echo "=======================1. no argument===================="
#${VALGRIND} inject
#echo "=======================2. with verbose======================="
#${VALGRIND} inject -vvvvv
#echo "=======================3. show version======================="
#${VALGRIND} inject -V
#echo "=======================4. show help======================="
#${VALGRIND} inject -vvvvv -h
#${VALGRIND} inject -h
#echo "=======================5. with verbose and pid======================="
#${VALGRIND} inject -vvvvv `pidof test`
#${VALGRIND} inject `pidof test`
#echo "=======================6. with verbose, pid, output symbols to stdout======================="
#${VALGRIND} inject -O -vvvvv `pidof test`
#${VALGRIND} inject -O `pidof test`
#echo "=======================7. with verbose, pid, output symbols to file======================="
#rm -f log/symbols.log
#${VALGRIND} inject -Olog/symbols.log -vvvvv `pidof test`
#${VALGRIND} inject -Olog/symbols.log `pidof test`
#echo "=======================8. with verbose, pid, bt with name======================="
#${VALGRIND} inject -f invalid -vvvvv `pidof test`
#${VALGRIND} inject -f invalid `pidof test`
#${VALGRIND} inject -f func2 -vvvvv `pidof test`
#${VALGRIND} inject -f func2 `pidof test`
#echo "=======================9. with verbose, pid, bt with addr======================="
#${VALGRIND} inject -f 0x400578454856561 -vvvvv `pidof test`
#${VALGRIND} inject -f 0x400578454856561 `pidof test`
#${VALGRIND} inject -vvvvv -f 0x400580 `pidof test`
#${VALGRIND} inject -f 0x400580 `pidof test`
#${VALGRIND} inject -f 0x40058d `pidof test`
#${VALGRIND} inject -vvvvv -f 0X0 `pidof test`
#${VALGRIND} inject -f 0X0 `pidof test`
#echo "=======================10. with verbose, pid, inject so lib======================="
#${VALGRIND} inject -vvvvv -i /home/code/case/inject_test/libsub.so `pidof test`
#${VALGRIND} inject -i /home/code/case/inject_test/libsub.so `pidof test`
#${VALGRIND} inject -vvvvv -i libsub.so `pidof test`
#${VALGRIND} inject -i libsub.so `pidof test`
#echo "=======================11. with verbose, pid, stub======================="
#${VALGRIND} inject -vvvvv -s "original invalid" `pidof test`
#${VALGRIND} inject -vvvvv -s "original " `pidof test`
#${VALGRIND} inject -vvvvv -s "original" `pidof test`
#${VALGRIND} inject -vvvvv -s "" `pidof test`
#${VALGRIND} inject -vvvvv -s `pidof test`
#${VALGRIND} inject -s "original invalid" `pidof test`
#${VALGRIND} inject -vvvvv -s "invalid original" `pidof test`
#${VALGRIND} inject -s "invalid original" `pidof test`
#${VALGRIND} inject -vvvvv -s "invalid invalid" `pidof test`
#${VALGRIND} inject -s "invalid invalid" `pidof test`
#${VALGRIND} inject -vvvvv -s "original nextfunc" `pidof test`
#${VALGRIND} inject -s "original nextfunc" `pidof test`
#${VALGRIND} inject -vvvvv -i /home/code/case/inject_test/libsub.so -s "original stubfunc" `pidof test`
#${VALGRIND} inject -i /home/code/case/inject_test/libsub.so -s "original stubfunc" `pidof test`
#echo "=======================12. with verbose, pid, dump block======================="
#${VALGRIND} inject -vvvvv -m "0x1234 6" `pidof test`
#${VALGRIND} inject -m "0x1234 6" `pidof test`
#${VALGRIND} inject -vvvvv -m "0x40058d 17" `pidof test`
#${VALGRIND} inject -m "0x40058d 17" `pidof test`
#${VALGRIND} inject -vvvvv -m "0x40058d 0" `pidof test`
#${VALGRIND} inject -m "0x40058d 0" `pidof test`
#${VALGRIND} inject -vvvvv -m "0x40058d 9999999999" `pidof test`
#${VALGRIND} inject -m "0x40058d 9999999999" `pidof test`
#${VALGRIND} inject -vvvvv -m "0x40058d -9999" `pidof test`
#${VALGRIND} inject -m "0x40058d -9999" `pidof test`
#${VALGRIND} inject -vvvvv -m "0x0 -9999" `pidof test`
#${VALGRIND} inject -m "0x0 -9999" `pidof test`
#echo "=======================13. with verbose, pid, set pc======================="
#${VALGRIND} inject -p invalid -vvvvv `pidof test`
#${VALGRIND} inject -p invalid `pidof test`
#${VALGRIND} inject -p func2 -vvvvv `pidof test`
#${VALGRIND} inject -p func2 `pidof test`
#echo "=======================14. with verbose, pid, bt with addr======================="
#${VALGRIND} inject -p 0x400578454856561 -vvvvv `pidof test`
#${VALGRIND} inject -p 0x400578454856561 `pidof test`
#${VALGRIND} inject -vvvvv -p 0x400580 `pidof test`
#${VALGRIND} inject -p 0x400580 `pidof test`
#${VALGRIND} inject -p 0x40058d `pidof test`
#${VALGRIND} inject -vvvvv -p 0X0 `pidof test`
#${VALGRIND} inject -p 0X0 `pidof test`
#echo "=======================15. with verbose, pid, set pc======================="
#${VALGRIND} inject -t invalid -vvvvv `pidof test`
#${VALGRIND} inject -t invalid `pidof test`
#${VALGRIND} inject -t func2 -vvvvv `pidof test`
#${VALGRIND} inject -t func2 `pidof test`
#echo "=======================16. with verbose, pid, bt with addr======================="
#${VALGRIND} inject -t 0x400578454856561 -vvvvv `pidof test`
#${VALGRIND} inject -t 0x400578454856561 `pidof test`
#${VALGRIND} inject -vvvvv -t 0x400580 `pidof test`
#${VALGRIND} inject -t 0x400580 `pidof test`
#${VALGRIND} inject -t 0x40058d `pidof test`
#${VALGRIND} inject -vvvvv -t 0X0 `pidof test`
#${VALGRIND} inject -t 0X0 `pidof test`
echo "=======================done======================="
| true |
336c064122da2276c91a90b7ea7a11017d967869 | Shell | reallistic/cryptostat | /deploy/stop_app | UTF-8 | 119 | 2.859375 | 3 | [] | no_license | #!/bin/bash -xe
if [ -f "/etc/init.d/bitty" ]; then
echo "service found. stopping"
. /etc/init.d/bitty stop
fi
| true |
046d3409d4910be88a91e1da45c1ad9237f94628 | Shell | ecmwf/eccodes | /tests/grib_statistics.sh | UTF-8 | 2,111 | 2.8125 | 3 | [
"Apache-2.0"
] | permissive | #!/bin/sh
# (C) Copyright 2005- ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
#
# In applying this licence, ECMWF does not waive the privileges and immunities granted to it by
# virtue of its status as an intergovernmental organisation nor does it submit to any jurisdiction.
#
. ./include.ctest.sh
files="regular_latlon_surface.grib2 regular_latlon_surface.grib1"
for file in $files
do
cat >statistics.filter<<EOF
set Ni=2;
set Nj=2;
set decimalPrecision=4;
set values={2.0,2.0,2.0,2.0};
print "values=[values]";
print "max=[max] min=[min] average=[average]";
set values={2.0,5.0,2.0,2.0};
print "values=[values]";
print "max=[max] min=[min] average=[average]";
EOF
${tools_dir}/grib_filter statistics.filter ${data_dir}/$file > statistics.out
diff statistics.out ${data_dir}/statistics.out.good
done
rm -f statistics.out statistics.filter
# GRIB with no missing values but some entries = 9999
# See ECC-478
# ---------------------------------------------------
input=${data_dir}/lfpw.grib1
temp1=temp1.statistics.grib
temp2=temp2.statistics.grib
stats=`${tools_dir}/grib_get -w count=50 -F%.2f -n statistics $input`
[ "$stats" = "10098.00 0.00 1064.19 3066.07 2.57 4.61 0.00" ]
# Scaling values in presence of real 9999 values
${tools_dir}/grib_set -s scaleValuesBy=0.5 $input $temp1
${tools_dir}/grib_set -s missingValue=1.0E34,scaleValuesBy=0.5 $input $temp2
${tools_dir}/grib_compare $temp1 $temp2
# Offsetting values in presence of real 9999 values
${tools_dir}/grib_set -s offsetValuesBy=0.5 $input $temp1
${tools_dir}/grib_set -s missingValue=1.0E34,offsetValuesBy=0.5 $input $temp2
${tools_dir}/grib_compare $temp1 $temp2
# ECC-511
# GRIB2 message from NCEP/GFS with grid_complex_spatial_differencing and
# missingValueManagementUsed. No bitmap but missing values embedded in data
input=${data_dir}/gfs.complex.mvmu.grib2
stats=`${tools_dir}/grib_get -F%.2f -p max,min,avg $input`
[ "$stats" = "2.81 0.00 0.30" ]
rm -f $temp1 $temp2
| true |
92ae300bd0a021a069269b2588ad55043944d970 | Shell | SamuelErickson/Fitzroy_V1_1 | /Old_Unused/startBox.sh | UTF-8 | 706 | 3 | 3 | [] | no_license | #!/usr/bin/env bash
# If this is the first time running this script, you first need to make it executable by user.
# Enter the following command in the linux shell
# chmod u+x startBox.sh
# This shell script runs the commands needed to start one Fitzroy system unit box
# initialize the pippio daemon, which operates the input-output pins
sudo pigpiod
# Example: python3 run_environmental_control.py 28 80 0.5 8 30 12 00
# The above command starts a Fitzroy box at 28 C, 80% relative humidity, 50% fan power, with sunrise at 8:30 AM,
# and a sunset 12 hours 00 minutes later at 20:30.
screen -d -m -S environmental_control bash -c "python3 run_environmental_control.py 28 80 0.5 14 54 1 44"
| true |
41e0f29fdeaecf477f9218a48a7ae70346d1232b | Shell | sirosen/mpcs51083-termination-monitor | /watcher/terminate_ids.sh | UTF-8 | 589 | 4.1875 | 4 | [] | no_license | #!/bin/bash
current_dir="$(dirname "$0")"
source "$current_dir/logfuncs.sh"
# read from stdin
instance_ids="$(cat -)"
loginfo "Will try to terminate the follwing ids: $(echo -n "$instance_ids" | tr '\n' ',')"
# iterate over the instance ids and terminate them
for id in $instance_ids;
do
loginfo "Attempting to terminate $id"
out="$(aws --profile mpcs ec2 terminate-instances --dry-run --instance-ids "$id" 2>&1)"
rc=$?
if [ $rc -ne 0 ];
then
logerr "Could not terminate $id! Output: $out"
else
loginfo "Successfully terminated $id"
fi
done
| true |
fc96a947b2f5bb8fcce01cdd91a2686acccd6d1f | Shell | matthiasdiener/mpich | /mpid/ch_p4mpd/install_ch_p4mpd | UTF-8 | 1,356 | 3.734375 | 4 | [
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-other-permissive"
] | permissive | #! /bin/sh
#
# Install script for the ch_p4mpd device
# This script is called by mpiinstall to install the device.
# It should add commands to the file named by UNINSTALLFILE to remove
# any file that it adds. It creates the mpichboot and mpichstop scripts
if [ -z "$MAKE" -o -z "$XMODE" ] ; then
echo "This script must be invoked from mpiinstall"
exit 1
fi
if [ -f mpid/mpd/Makefile ] ; then
export XMODE
(cd mpid/mpd ; $MAKE install UNINSTALLFILE=$UNINSTALLFILE DESTDIR=$DESTDIR )
rm -f $DESTDIR${bindir}/mpichboot
cat >$DESTDIR${bindir}/mpichboot <<EOF
#! /bin/sh
if [ ! -s $HOME/.mpdpasswd -a ! -s $HOME/.mpd.conf ] ; then
echo "In mpichboot:"
echo "A .mpd.conf file is required before starting an mpd demon."
echo "See the documentation on mpd in the User Manual."
exit 1
fi
# Use the -b option to force mpd into the background, orphaned from the
# calling process.
${bindir}/mpd -b &
EOF
chmod $XMODE $DESTDIR${bindir}/mpichboot
echo "rm -f ${bindir}/mpichboot" >>$UNINSTALLFILE
rm -f $DESTDIR${bindir}/mpichstop
cat >$DESTDIR${bindir}/mpichstop <<EOF
#! /bin/sh
${bindir}/mpdallexit
EOF
chmod $XMODE $DESTDIR${bindir}/mpichstop
echo "rm -f ${bindir}/mpichstop" >>$UNINSTALLFILE
else
echo "Could not find Makefile for mpid/mpd!"
echo "Install not completed"
exit 1
fi
#
| true |
ee08c31de2ff9c5444f9cdf4cdd97eaeb8a92acb | Shell | daggerok/sonar-quality-gates-build-breaker | /bin/compose-recreate.sh | UTF-8 | 267 | 2.609375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
###
# bash ./bin/compose-recreate.sh
###
ARGS=${1:-}
ROOT_PROJECT_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)
cd "${ROOT_PROJECT_DIR}" && bash ./bin/compose-kill.sh ${ARGS}
cd "${ROOT_PROJECT_DIR}" && bash ./bin/compose-create.sh ${ARGS}
| true |
fa47c59b1ccb09fa10c8a4e8016cd7c90d59ef09 | Shell | aayush2710/linux-config | /Anaconda/Anaconda.sh | UTF-8 | 1,004 | 2.515625 | 3 | [] | no_license | #Install Anaconda
pkgver=2020.11
pythonver=3.8
#Download Conda
wget https://repo.anaconda.com/archive/Anaconda3-$pkgver-Linux-x86_64.sh
bash Anaconda3-$pkgver-Linux-x86_64.sh -b -p $HOME/anaconda3
#Patch Anaconda Navigator
#bash vscode_path.sh
#Conda Init
#source /home/$USER/anaconda3/bin/activate
#conda init
#source ~/.zshrc
#Create Launcher Icons
sudo cp Anaconda.desktop /usr/share/applications
sudo cp Spyder.desktop /usr/share/applications
echo " Icon=/home/$USER/anaconda3/lib/python3.8/site-packages/anaconda_navigator/static/images/anaconda-icon-256x256.png" | sudo tee -a /usr/share/applications/Anaconda.desktop
echo " Icon=/home/$USER/anaconda3/lib/python3.8/site-packages/anaconda_navigator/static/images/logos/spyder.png" | sudo tee -a /usr/share/applications/Spyder.desktop
#Conda Packages
conda install -y pytorch torchvision cpuonly -c pytorch
conda install -y -c conda-forge keras
conda install -y -c anaconda tensorflow-gpu
conda install -c -y anaconda h5py
conda update --all
| true |
3954949f59e8272ec75bbeee3421e7ead1e1bd7b | Shell | makro/scripts | /repocopy.sh | UTF-8 | 4,673 | 4.15625 | 4 | [] | no_license | #!/bin/bash
#
# Copy emulator code into hardware build area
#
# 12-Dec-2013 Marko Kallinki Copy also overlay files
# 18-Nov-2013 Marko Kallinki Fix root folder handling
# 14-Nov-2013 Marko Kallinki Code refactored (user query)
# 12-Nov-2013 Marko Kallinki Initial version (hardcoded)
#
OVERLAYPATH="/device/aosp/ara/overlay"
ME="`basename $0`"
USAGEVERBOSE="\
Script to keep emulator and hw build areas in sync
${ME} -r - Clear all and sync .repo from other build area (slow!)
${ME} -a - Sync current application files from other build area
"
# ${ME} -f <dir> - Source build area. If not given, will be asked.
echo "Repository copy script"
currentdir=$PWD
rootdir=$PWD
#
# Process parameter
#
if [ $# -eq 0 ]; then
echo "$USAGEVERBOSE"
exit 0
fi
if [[ $1 = "-h" || $1 = "--help" ]]; then
echo "$USAGEVERBOSE"
exit 0
fi
if [ $1 = "-help" ]; then
echo "Phfff. Either use -h or --help, not -help!"
exit 1
fi
if [[ $1 == "-a" ]]; then
if ! [[ $ANDROID_BUILD_TOP ]]; then
echo -e "\e[00;31mError: You need to be at build shell!\e[00m"
exit 1
fi
fi
if [[ $currentdir = *$OVERLAYPATH* ]]; then
echo -e "\e[00;31mError: You are under overlay. Run this at application directory!\e[00m"
exit
fi
#
# Get other project path
#
if ! [[ $sourcerepo ]]; then
if [[ $ANDROID_BUILD_TOP ]]; then
cd $ANDROID_BUILD_TOP
fi
echo
echo -e "\e[00;31mBuild area to be overwritten: $rootdir\e[00m"
echo
echo "Choose the 'emulator' build area to sync from:"
echo "(If none is correct, exit and use -r <dir> to give proper one)"
cd ..
folders=(`ls -d */`)
num=0
for f in ${folders[*]}; do
num=$(($num + 1))
echo " $num $PWD/$f"
done
limit=${#folders[*]}
selection=100
while [[ $selection -gt $limit ]]; do
echo "Choose 1 - $limit (0 to quit)"
read selection
done
if [[ $selection -eq 0 ]]; then
echo "End user quit."
exit 0
fi
selection=$(($selection - 1))
sourcerepo=$PWD/${folders[$selection]}
sourcerepo=${sourcerepo%*/}
cd $currentdir
fi
#
# Timer for background processes
# Uses global $timer
#
function waittimer {
bgid=$1
active=1
while [[ $active -gt 0 ]]; do
active=(`ps | grep $bgid | wc -l`)
timer=$(($timer + 1))
tput cuu 1
echo -e "\e[00;32m$timer seconds elapsed\e[00m "
sleep 1
done
}
#
# Repo clean and sync
#
if [[ $1 == "-r" ]]; then
starttime=$(date +%s)
cd $rootdir
echo -e "\nSyncing repository operation started. Will take quite some time!"
echo -e "\e[00;32mStep 1: Cleaning current workarea before sync...\e[00m\n"
if [[ -d .repo ]]; then
repo forall -c 'git reset --hard HEAD' 2>&1 1>/dev/null &
waittimer $!
fi
tput cuu 1
echo -e "\e[00;32mStep 2: Deleting old repository...\e[00m\n"
rm -Rf .repo 2>&1 1>/dev/null &
waittimer $!
tput cuu 1
echo -e "\e[00;32mStep 3: Copying new repository...\e[00m\n"
cp -r $sourcerepo/.repo $rootdir 2>&1 1>/dev/null &
waittimer $!
tput cuu 1
echo -e "\e[00;32mStep 4: Updating build area files...\e[00m"
repo sync -l
#2>&1 1>/dev/null &
#waittimer $!
elapsed=$(($(date +%s) - $starttime))
tput cuu 2
echo " "
echo -e "\nRepositoy sync from $sourcerepo made."
echo -e "\e[00;32mSync ended $(date +%T) and took $(($elapsed/60)) minutes $(($elapsed%60)) seconds\e[00m"
echo
fi
#
# Copy files from 'emulator' application folder
#
if [[ $1 == "-a" ]]; then
#
# Folder tirckery
#
appdir1=${PWD##*/}
appdir2=${PWD##*/}
cd ..
relativedir=${PWD#*$ANDROID_BUILD_TOP}/
rootdir=$ANDROID_BUILD_TOP
# Ugly exception for root .
if [[ "$relativedir$appdir1" = "/$rootdir" ]]; then
appdir1=""
relativedir=""
fi
targetdir="${sourcerepo}${relativedir}$appdir1"
echo -e "\nCloning emulator '$appdir2' code to hw build area..."
check=(`find $targetdir | wc -l`)
echo -e "Checking changes from $check files (+ overlay)\n"
if [[ $check -gt 300000 ]]; then
echo -e "\e[00;31mWarning: Will take quite a lot - are you sure [y/n] ?\e[00m"
read answer
if ! [[ $answer = Y* || $answer = y* ]]; then
echo "End user calcelled."
exit
fi
fi
cd $currentdir
cd ..
#echo -e "Now: $PWD\ncp -ur $targetdir/* $appdir2\n"
cp -ur $targetdir/* $appdir2 2>&1 1>/dev/null &
waittimer $!
targetdir="${sourcerepo}$OVERLAYPATH${relativedir}$appdir1"
if [ -d $targetdir ]; then
cd $rootdir$OVERLAYPATH${relativedir}
#echo -e "Now: $PWD\ncp -ur $targetdir/* $appdir2\n"
cp -ur $targetdir/* $appdir2 2>&1 1>/dev/null &
waittimer $!
fi
tput cuu 1
echo "Ready "
echo
fi
| true |
da44cc755ccdece5a781dd5c5e3e1ee2fa0fa6be | Shell | karafecho/nih-exposures-api | /docker/run-nih-exposures-local.sh | UTF-8 | 797 | 3.296875 | 3 | [] | no_license | #!/usr/bin/env bash
PATH_TO_DOCKERFILE=../server/
LOCAL_PORT=5000
DOCKER_NETWORK=database_default
PATH_TO_SSL_CERTS=''
cd $PATH_TO_DOCKERFILE
docker build -t nih-exposures .
cd -
docker stop nih-exposures
sleep 2s
docker rm -fv nih-exposures
sleep 2s
if [[ ! -z ${PATH_TO_SSL_CERTS} ]]; then
docker run -d --name nih-exposures \
-p ${LOCAL_PORT}:5000 \
--network=${DOCKER_NETWORK} \
-v ${PATH_TO_SSL_CERTS}:/certs \
nih-exposures
echo "NIH Exposures API running at https://localhost:"${LOCAL_PORT}"/v1/ui/#/default"
else
docker run -d --name nih-exposures \
-p ${LOCAL_PORT}:5000 \
--network=${DOCKER_NETWORK} \
nih-exposures
echo "NIH Exposures API running at http://localhost:"${LOCAL_PORT}"/v1/ui/#/default"
fi
exit 0; | true |
9e3c1b2de916bc7ea31aaada16a688f396b6f782 | Shell | dotmpe/user-conf | /script/match-uc.lib.sh | UTF-8 | 421 | 3.59375 | 4 | [] | no_license | # Take any string and return a Regex to match that exact string, see
# match-grep-pattern-test.
match_grep() # String
{
echo "$1" | $gsed -E 's/([^A-Za-z0-9{}(),?!@+_])/\\\1/g'
}
# To escape filenames and perhaps other values for use as grep literals
match_grep_pattern_test()
{
p_="$(match_grep "$1")"
# test regex
echo "$1" | grep -q "^$p_$" || {
error "cannot build regex for $1: $p_"
return 1
}
}
| true |
b4946afb5f92a3173e6f2d188c17ad321a6b48a8 | Shell | songshu189/django-ec2 | /scripts/downloadbootstrap.sh | UTF-8 | 470 | 3.5 | 4 | [] | no_license | #!/bin/bash
if [ ! -d static ]; then
mkdir static
else
echo "static already exists"
fi
if [ -z "$1" ]; then
echo "Please input bootstrap version(like 4.2.1):"
read bootstrap_version
else
bootstrap_version=$1
fi
wget -O bootstrap.zip https://github.com/twbs/bootstrap/releases/download/v${bootstrap_version}/bootstrap-${bootstrap_version}-dist.zip
unzip bootstrap.zip
mv bootstrap-${bootstrap_version}-dist static/bootstrap-${bootstrap_version}
rm bootstrap.zip | true |
cd9e5c65d45bdc8877b1e4acb8e5950882fd39d8 | Shell | alathers/tutorials | /bash/lesson1/1-HelloWorld.sh | UTF-8 | 754 | 3.375 | 3 | [] | no_license | #!/bin/bash # This is called a "shabang", #! is a special character combo when the very first 2 characters in a shell script
###########################
# Adam Lathers
# adam.lathers@citrix.com,alathers@gmail.com
# 7/29/2014
###########################
##########################
# A quick demo of basic script attributes, including using the interpreter definition, and setting permissions to include the execute bit
#
##########################
echo "Hello World" # echo with no path, may or may not work
/bin/echo "Hello World" # echo with full path should always work, as long as the path is correct for the host you're running this on
echo;echo;echo
echo "--------------------------------------------------------------"
echo "Now how's it work?"
cat $0
exit
| true |
269fafe7d43dd9a318f4aac65f1dfd225b6bbada | Shell | RaffaeleCanale/scripts-git | /src/modules/merge | UTF-8 | 425 | 3.546875 | 4 | [] | no_license | #!/bin/bash
function moduleRun() {
if [ $# -gt 0 ] && [[ "$1" =~ ^[0-9]+$ ]]; then
ARGUMENTS[0]="FUL-$1"
$GIT merge "${ARGUMENTS[@]}"
elif [ $# -gt 0 ] && [ "$1" == "parent" ]; then
parent="`git parent`"
$GIT checkout "$parent"
$GIT pull
$GIT checkout "$BRANCH"
ARGUMENTS[0]="$parent"
$GIT merge "${ARGUMENTS[@]}"
else
$GIT merge $@
fi
}
| true |
a775006683e48a8024e971543fa4375ea5245e7b | Shell | alexfinnarn/highlightjs_wysiwyg | /tests/travis-ci/run-js-tests.sh | UTF-8 | 715 | 3.203125 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env bash
if [ "${BUNDLE_NAME}" != "null" ]; then
cd ${ROOT_DIR}/drupal/sites/all/modules/${BUNDLE_NAME}
else
cd ${ROOT_DIR}/drupal/profiles/express
fi
EXPRESS_COMMIT_HAS_JS="$(git log -2 --pretty=%B | awk '/./{line=$0} END{print line}' | grep '===js')"
# Run JS tests if merging PR into dev or has JS in it.
if [ "${TRAVIS_EVENT_TYPE}" == "push" ] || [ "${EXPRESS_COMMIT_HAS_JS}" ]; then
echo "Running Express JS tests..."
${ROOT_DIR}/drupal/profiles/express/tests/behat/bin/behat --stop-on-failure --strict --config ${ROOT_DIR}/drupal/profiles/express/tests/behat/behat.travis.yml --verbose --tags ${EXPRESS_JS_BEHAT_TAGS}
earlyexit
else
echo "Not Running Express JS tests..."
fi
exit 0
| true |
81c86b3a10a69c7e02202484714192dad591cedd | Shell | wisedier/beautiful-dev | /services/bdev/gitlab/deploy.sh | UTF-8 | 703 | 2.65625 | 3 | [] | no_license | #!/bin/bash
BASEDIR=$(dirname $(realpath "$0"))
cat << EOF > secrets.yml
production:
secret_key_base: $(head -c 512 /dev/urandom | LC_CTYPE=C tr -cd 'a-zA-Z0-9' | head -c 128)
otp_key_base: $(head -c 512 /dev/urandom | LC_CTYPE=C tr -cd 'a-zA-Z0-9' | head -c 128)
db_key_base: $(head -c 512 /dev/urandom | LC_CTYPE=C tr -cd 'a-zA-Z0-9' | head -c 128)
openid_connect_signinig_key: |
$(openssl genrsa 2048 | awk '{print " " $0}')
EOF
kubectl create secret generic gitlab-rails-secret \
--from-file=$(BASEDIR)/secrets.yml -o yaml --dry-run \
| kubectl apply -f -
helm delete --purge gitlab
helm install --name gitlab --namespace bdev -f $(BASEDIR)/values.yaml $(BASEDIR)/helm
| true |
b8e10d6eb8dd3c713d4373534274da3182def90a | Shell | kristen-schneider/exome-bakeoff | /pipeline_metrics/scripts/5-metrics/ari_sb_driver.sh | UTF-8 | 1,104 | 3.53125 | 4 | [] | no_license | #!/usr/bin/env bash
#
#SBATCH -p short
#SBATCH --job-name=strandbias_driver
#SBATCH --ntasks=1
#SBATCH --time=4:00:00
#SBATCH --mem-per-cpu=10G
#SBATCH --output=/Users/krsc0813/exome-bakeoff/bash_scripts/5-metrics/strandbias_driver.out
#SBATCH --error=/Users/krsc0813/exome-bakeoff/bash_scripts/5-metrics/strandbias_driver.err
# PURPOSE: Computes strandbias metric
# Commandline arguments
# 1. Pileup Files
pileups=$1
# 2. Regions
regions=$2
# 3. Output
out_dir=$3
#
echo "Starting strandbias calculation"
for pileup_file in `ls $pileups`
do
if [[ $pileup_file == *.bed* ]] && [[ $pileup_file != *.tbi ]]; then
echo $pileup_file
# Prepares the name of the outpuyt file by removing the pattern '.*'
sample=${pileup_file%%.*}
out_file=${out_dir}${sample}
# Check if the output file already exists, if so, skip the conversion
if test -e $out_file; then
echo "$out_file already exists, so we skip processing it"
else
echo "Submitting a job for" $sample
sbatch ari_sb_worker.sh $sample $pileups$pileup_file $regions $out_dir
fi
#else
#echo "not using" $pileup_file
fi
done
| true |
b28f65e0e1a70f19b4010d6b86d5866844942905 | Shell | stevengm45/holberton-system_engineering-devops | /0x05-processes_and_signals/7-highlander | UTF-8 | 162 | 2.546875 | 3 | [] | no_license | #!/usr/bin/env bash
# This script display its a infinity loop
trap "echo 'I am invincible!!!'" SIGTERM
while true
do
echo "To infinity and beyond"
sleep 2
done
| true |
c0ff0c732b087f0f346375ea77d73c62d8fbaea6 | Shell | denniscaudell/dot-files | /Scripts/ssh-ddnet-svr | UTF-8 | 699 | 3.5625 | 4 | [] | no_license | #!/usr/bin/bash
declare -A MACHINES=( ["maj"]="210-376-482" ["gpu"]="206-985-814" )
MSG_ERR_PAR="Missing non-optional parameter!"
MSG_ERR_OFF="The selected server is offline"
MSG_HELP="Usage: ssh-ddnet-srv USER MACHINE_ID [COMMAND]"
[[ -z $1 ]] || [[ -z $2 ]] && echo -e "\n$MSG_ERR_PAR\n$MSG_HELP\n" && exit 1
USER="${1}"
MACHINE_ID="${MACHINES[$2]}"
MACHINE_NAME="${MACHINES[$2]}"
IP="$(hamachi list | awk -v mid="${MACHINE_ID}" ' NR > 1 {if( $2 == mid){print $4}}')"
ID="$(hamachi list | awk -v mid="${MACHINE_ID}" ' NR > 1 {if( $2 == mid){print $3}}')"
[[ -z $IP ]] && echo -e "\n${MSG_ERR_OFF}\n" && exit 1
echo -e "\nconnecting to ${USER} @ ${ID}[${IP}]...\n"
ssh ${USER}@${IP} ${@:3}
| true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.