blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
977bd54762b49c5726da91064c4fc51368e0f9f0
|
Shell
|
zzzj1233/shell
|
/06-input-args/11-read-n.sh
|
UTF-8
| 385
| 3.375
| 3
|
[] |
no_license
|
# read -n number
# number用于指定字符数
# 一旦用户输入达到该字符数,无需按下回车,read就会结束阻塞
while read -n1 -p "exit? (y / n)" answer
do
case "$answer" in
y | Y)
echo "ok , will shutdown now"
break
;;
n | N)
echo "ok , will continue work"
break
;;
*)
echo "y / n"
;;
esac
done
| true
|
5f6617243874db1698c621b13b75e070534b92e5
|
Shell
|
LuizGsa21/Firedrake
|
/build.sh
|
UTF-8
| 1,472
| 3.609375
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# Firedrake build script, to make integration of a remote build server easier
# The whole story is that I use OS X but need Linux to build, so I have a VM that runs Debian. My IDE then
# executes the `remote-build` target to build the kernel
if [ $# -eq 0 ]; then
ssh "${FIREDRAKE_USER}@${FIREDRAKE_HOST}" "cd ${FIREDRAKE_PATH}; ./build.sh --build; exit"
exit 0
fi
function configureCMake {
mkdir -p build
cd build
CMAKE_LINKER="<CMAKE_LINKER> <CMAKE_CXX_LINK_FLAGS> <LINK_FLAGS> <OBJECTS> -o <TARGET> <LINK_LIBRARIES>"
cmake "$(pwd)/.." -DCMAKE_BUILD_TYPE=Debug -DCMAKE_CXX_COMPILER="$(which clang)" -DCMAKE_C_COMPILER="$(which clang)" -DCMAKE_LINKER="$(which ld)" -DCMAKE_CXX_LINK_EXECUTABLE="${CMAKE_LINKER}" -DCMAKE_C_LINK_EXECUTABLE="${CMAKE_LINKER}"
}
if [ "$@" == "--build" ]; then
configureCMake
make
cd .. # configureCMake moves us into the ./build folder
./initrd.py
./boot/image.py
exit 0
fi
if [ "$@" == "--clean" ]; then
configureCMake
make clean
exit 0
fi
QEMU_NET="-net nic,model=rtl8139 -net user"
QEMU_CPU="-cpu core2duo -smp cores=2"
QEMU_ARGS="${QEMU_CPU} ${QEMU_NET} -serial stdio"
if [ "$@" == "--run" ]; then
BASEDIR=$(dirname $0)
qemu-system-i386 ${QEMU_ARGS} "${BASEDIR}/boot/Firedrake.iso"
exit 0
fi
if [ "$@" == "--debug" ]; then
BASEDIR=$(dirname $0)
qemu-system-i386 ${QEMU_ARGS} -s -S -D /tmp/qemu.log -d int -no-shutdown -no-reboot "${BASEDIR}/boot/Firedrake.iso" &
sleep 1
exit 0
fi
| true
|
968028d541b526869257e06a3ca2e3083f72b89d
|
Shell
|
remipelhate/dotfiles
|
/bin/dev
|
UTF-8
| 209
| 3.28125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
case $@ in
"play") # Navigate to the Playground dev directory
cd $DEV/Playground ;;
*) # Navigate to given directory (or root directory if no path was given)
cd $DEV/$@
esac
| true
|
53444f530a1bdb882b8a6a0e3236175ae8b59dd2
|
Shell
|
markmo/repo2docker
|
/repo2docker/buildpacks/merge.sh
|
UTF-8
| 1,186
| 3.28125
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/usr/bin/env bash
commit_message=${1:-"merge devsheds session"}
jupyterhub_user="${JUPYTERHUB_USER}"
# see https://stackoverflow.com/questions/3162385/how-to-split-a-string-in-shell-and-get-the-last-field
original_branch=${BINDER_REQUEST##*/}
branch="europa-${jupyterhub_user##*-}"
echo "commit_message=${commit_message}"
echo "JUPYTERHUB_USER=${JUPYTERHUB_USER}"
echo "BINDER_REQUEST=${BINDER_REQUEST}"
echo "REPO_DIR=${REPO_DIR}"
cd "${REPO_DIR}"
touch .gitignore
# don't commit the .garden directory
if grep -Fxq ".garden" .gitignore; then
echo "entry exists"
else
echo ".garden" >> .gitignore
fi
git add .
git commit -m "catch any changes"
git checkout "${original_branch}"
# Merge the temporary branch
git merge --squash "${branch}"
git commit -m "${commit_message}"
# only delete the working branch if we successfully pushed the merged commits
# TODO keep branches for initial release for safety
git push origin "${original_branch}" #&& git push --delete origin "${branch}" && git branch -D "${branch}"
# TODO redirecting output to a file is not allowed in a restricted shell (rbash)
echo "Pre-stop script executed successfully" #> "/home/jovyan/prestop.log"
| true
|
ad07b7bf919db13820805a4c8b34cfbffb840c22
|
Shell
|
onap/aai-graphadmin
|
/src/main/scripts/updatePropertyTool.sh
|
UTF-8
| 2,566
| 2.9375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/ksh
#
# ============LICENSE_START=======================================================
# org.onap.aai
# ================================================================================
# Copyright © 2017 AT&T Intellectual Property. All rights reserved.
# ================================================================================
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============LICENSE_END=========================================================
#
# ECOMP is a trademark and service mark of AT&T Intellectual Property.
#
# updatePropertyTool.sh -- This tool is used to update properties in corrupt vertices
# in the event that an update or delete occurs to a node simultaneously, resulting
# in inconsistent data. Updating the aai-uri can reset the index and restore
# the GET information on the node.
#
# Parameters:
#
# At least one of following two parameters are required
# The following values are needed to identify the node(s) to be updated
# --filename, -f filename of a .txt extension required with a list of vertexIds. Vertex Ids must be separated line after line in text file.
# --vertexId, -v option that may occur multiple times as entries of a list
#
# --property, -p (required) value to be updated in the corrupted node
# --help, -h (optional) used to display help on usage of the function
#
#
# For example:
#
# updatePropertyTool.sh --filename myFile.txt --vertexId 123 --property myProperty
# updatePropertyTool.sh --filename myFile.txt --vertexId 123 --vertexId 456 --property myProperty
# updatePropertyTool.sh -f myFile.txt --vertexId 123 -v 456 -p myProperty
# updatePropertyTool.sh -f myFile.txt -p -myProperty
# updatePropertyTool.sh -v 123 -v 456 -p -myProperty
# updatePropertyTool.sh -v 123 -p -myProperty
#
COMMON_ENV_PATH=$( cd "$(dirname "$0")" ; pwd -P )
. ${COMMON_ENV_PATH}/common_functions.sh
start_date;
check_user;
source_profile;
execute_spring_jar org.onap.aai.dbgen.UpdatePropertyTool ${PROJECT_HOME}/resources/updatePropertyTool-logback.xml "$@"
end_date;
exit 0
| true
|
5872f6e93900e1ba3389c1ce2180233ea6cf08a1
|
Shell
|
Malinskiy/AndroidFFmpeg
|
/FFmpegLibrary/jni/fetch_android_deps.sh
|
UTF-8
| 708
| 3.125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
echo "Fetching Android system headers"
git clone --depth=1 --branch gingerbread-release https://github.com/CyanogenMod/android_frameworks_base.git ./android-source/frameworks/base
git clone --depth=1 --branch gingerbread-release https://github.com/CyanogenMod/android_system_core.git ./android-source/system/core
echo "Fetching Android libraries for linking"
if [ ! -d "./android-libs/armeabi" ]; then
if [ ! -f "./update-cm-7.0.3-N1-signed.zip" ]; then
wget http://download.cyanogenmod.com/get/update-cm-7.0.3-N1-signed.zip -P./
fi
mkdir -p ./android-libs/armeabi
unzip ./update-cm-7.0.3-N1-signed.zip system/lib/* -d./
mv ./system/lib ./android-libs/armeabi
rmdir ./system
fi
| true
|
e68fec6f0e957aa9e3149b516efaf0d49de36536
|
Shell
|
ylnb91/AutoDeployBash
|
/script/auto-deploy-bash.sh
|
UTF-8
| 12,183
| 3.875
| 4
|
[] |
no_license
|
#!/bin/bash
# *******************************************
# 2014.2.11 first version
#
#
# *******************************************
current=`dirname $0`;
. $current/deploy.properties;
if [[ $# == 0 || $1 == "help" ]]; then
echo -e "Command syntax error.\nUsage: `basename $0` [qa|prod] \n"; exit 1;
fi
if [[ $1 == "qa" || $1 == "prod" ]]; then
rm -f $current/$1.properties $current/build.properties;
else
echo "Illegal input, only "qa","prod" is allowed."; exit 1;
fi
read -p "Please input the appname you want to deploy:" appname;
read -p "Please input the svn target: (head|branch|tag name)" target;
case $target in
head ) svn_path_target="trunk"; svn_app_path=$appname;;
branch ) svn_path_target="branches"; svn_app_path=$appname;;
* ) svn_path_target="tags"; svn_app_path=$target;;
esac
sed "s/@appname@/$appname/g; s/@target@/$svn_path_target/g; s/@path@/$svn_app_path/g" $current/general.properties > /tmp/temp-build.properties;
build_svn=`grep svn.repository= /tmp/temp-build.properties | awk -F= '{printf $2}'`;
svn_username=`grep svn.user= /tmp/temp-build.properties | awk -F= '{printf $2}'`;
svn_account=`grep svn.user.pwd= /tmp/temp-build.properties | awk -F= '{printf $2}'`;
svn ls $build_svn --username $svn_username --password $svn_account &> /dev/null;
#exit if the tag name does not exist
if [[ $? != 0 ]]; then
echo "the tag dose not exist."; exit 1;
fi
#classify the clusters
error_count=0;
while (( $error_count < 3 )); do
string=`ls /usr/local | grep ${appname}-node`;
echo -e "please input the cluster instance name you want to deploy: \c";
[[ -n $string ]] && read -p "`echo $string` | all " clusters || read -p "${appname}-node[0...n] " clusters;
if [[ -z $clusters ]]; then
echo "input error."; ((error_count++)); continue;
fi
dir_all=`ls /usr/local`;
not_exist_clusters=(); exist_clusters=();
clusters=($(awk -vRS=' ' '!a[$1]++' <<< $clusters));
if [[ `echo ${clusters[0]} | awk '{print tolower($1)}'` == "all" ]]; then
exist_clusters=($string);
else
for cluster in ${clusters[*]}; do
for dir in $dir_all; do [[ $cluster == $dir ]] && exist_clusters=(${exist_clusters[*]} $cluster) || not_exist_clusters=(${not_exist_clusters[*]} $cluster); done
done
fi
not_exist_clusters=($(awk -vRS=' ' '!a[$1]++' <<< ${not_exist_clusters[*]}));
break;
done
if (( $error_count > 2 )); then
echo "you must input something, program is going to quit now."; exit 1;
fi
[[ $nginx_server_ip == "localhost" || "$nginx_server_ip" == "127.0.0.1" ]] && is_local="local" || is_local="";
echo "~~~~~~~$is_local";
# create not exist clusters
if [[ ${#not_exist_clusters[*]} != 0 ]]; then
# N(default):create a http instance;
# y: create a https instance;
# cancle: do not create any instances.
read -p "do you want to create a https instance[N/y/cancle]:" is_https;
nginx_path=/usr/local/nginx/conf/vhost;
case `echo $is_https | awk '{print tolower($1)}'` in
cancle ) echo "program exit."; exit 0;;
y ) port_type="https";;
* ) port_type="http";;
esac
sed "s/@appname@/$appname/g" $current/${port_type}.conf > $current/${appname}.conf.tmp ;
if [[ $is_local == "local" ]]; then
ls $nginx_path | grep ${appname}.conf &> /dev/null;
if [[ $? != 0 ]]; then
sudo mv -f $current/${appname}.conf.tmp $nginx_path/${appname}.conf;
sudo sed -i "s/include files here/include files here\n include vhost\/$appname.conf;/g" /usr/local/nginx/conf/nginx.conf;
fi
else
ssh -tq $nginx_server_ip "ls $nginx_path | grep ${appname}.conf" &> /dev/null;
if [[ $? != 0 ]]; then
rsync $current/${appname}.conf.tmp ${nginx_server_ip}:~/${appname}.conf.tmp;
ssh -tq $nginx_server_ip "sudo mv -f ~/${appname}.conf.tmp $nginx_path/${appname}.conf; sudo sed -i 's/include files here/include files here\n include vhost\/$appname.conf;/g' /usr/local/nginx/conf/nginx.conf";
fi
fi
rm $current/${appname}.conf.tmp;
fi
for new_app in ${not_exist_clusters[*]}; do
mkdir /tmp/$appname;
new_tomcat=/tmp/$appname/${new_app};
. $current/deploy.properties;
[[ $port_type == "https" ]] && cp -rf $tomcat_https_template $new_tomcat || cp -rf $tomcat_template $new_tomcat;
sed -i "s/@http.port@/$http_port/g; s/@https.port@/$https_port/g; s/@server.port@/$server_port/g; s/@connector.port@/$connector_port/g; s/@appname@/$appname/g" $new_tomcat/conf/server.xml;
sed -i "s/@appname@/$new_app/g" $new_tomcat/bin/setenv.sh $new_tomcat/lib/pkgconfig/tcnative-1.pc $new_tomcat/bin/myshutdown.sh;
echo "**************************************************************" ;
echo "******* Initing a $port_type instance: $new_app ***********" ;
echo "**************************************************************" ;
[[ $port_type == "https" ]] && port=$https_port || port=$http_port;
repeat_port=`grep $new_app $current/cluster.instance`;
if [[ $#repeat_port == 0 || $? != 0 ]]; then
echo "$new_app=$port" >> $current/cluster.instance;
else
sed -i "/$new_app=/c $new_app=$port" $current/cluster.instance;
fi
sudo mv -f $new_tomcat /usr/local/$new_app ;
sudo chown -R tomcat:tomcat /usr/local/$new_app ;
sudo chkconfig --add $new_app;
sudo chkconfig --level 2345 $new_app on;
sed "s/@http.port@/$http_port/g; s/@appname@/$new_app/g" $service_template > $current/service.tmp;
sudo mv -f $current/service.tmp /etc/init.d/$new_app;
sudo chmod a+x /etc/init.d/$new_app;
awk -F= '{if($1 ~ /port/) print $1,$2+1 > "deploy.properties";else print $1,$2 > "deploy.properties"}' OFS="=" $current/deploy.properties;
if [[ $is_local == "local" ]]; then
sudo sed -i "s/upstream $appname {/upstream $appname {\n server 127.0.0.1:$port weight=1;/g" /usr/local/nginx/conf/vhost/$appname.conf;
else
remote_ip=`/sbin/ifconfig -a|grep inet|grep -v 127.0.0.1|grep -v inet6|awk '{print $2}'|tr -d "addr:"`;
ssh -tq $nginx_server_ip "sudo sed -i 's/upstream $appname {/upstream $appname {\n server ${remote_ip}:${port} weight=1;/g' /usr/local/nginx/conf/vhost/$appname.conf";
fi
exist_clusters=(${exist_clusters[*]} $new_app);
done
nginx_error="false";
if [[ $is_local == "local" ]]; then
sudo service nginx test;
[[ $? == 0 ]] || nginx_error="true";
else
ssh -tq $nginx_server_ip "sudo service nginx test";
[[ $? == 0 ]] || nginx_error="true";
fi
# reload nginx , effect the new added clusters
if [[ ${#not_exist_clusters[*]} != 0 ]]; then
if [[ $is_local == "local" ]]; then
if [[ $nginx_error == "true" ]]; then
echo "local nginx test error, please check the conf files...";
else
sudo service nginx reload;
fi
else
if [[ $nginx_error == "true" ]]; then
echo "remote ${nginx_server_ip} nginx test error, please check the conf files...";
else
ssh -tq $nginx_server_ip "sudo service nginx reload";
fi
fi
elif [[ ${#exist_clusters[*]} == 0 ]]; then
echo "no instance. the deploy program quits."; exit 1;
fi
sed "s/@instance@/${exist_clusters[0]}/g" /tmp/temp-build.properties > $current/build.properties;
rm -f /tmp/temp-build.properties;
echo "\n Building Project $appname from $svn_path_target/$svn_app_path ...";
svn export $build_svn/deploy/$1.properties --username $svn_username --password $svn_account;
ant -f $current/build.xml -Dtarget=$1;
rm -f $current/build.properties;
#stop the static nginx agent
if [[ $is_local == "local" ]]; then
#sudo sed -i "/include vhost\/static.conf/c #include vhost\/static.conf;" /usr/local/nginx/conf/vhost/$appname.conf
startline=`sed -n '/#static conf start/=' /usr/local/nginx/conf/vhost/${appname}.conf | tr -d '\r\n'`;
endline=`sed -n '/#static conf end/=' /usr/local/nginx/conf/vhost/${appname}.conf | tr -d '\r\n'`;
if [[ $startline == 0 || $endline == 0 ]]; then
echo "/usr/local/nginx/conf/vhost/${appname}.conf is invalid; please add static start/end flag."; exit 1;
fi
((startline++));
((endline--));
if [[ $endline -ge $startline ]]; then
sed -i "${startline},${endline}d" /usr/local/nginx/conf/vhost/${appname}.conf;
fi
if [[ $nginx_error == "false" ]]; then sudo service nginx reload;fi
else
startline=`ssh -tq $nginx_server_ip "sed -n '/#static conf start/=' /usr/local/nginx/conf/vhost/${appname}.conf" | tr -d '\r\n'`;
endline=`ssh -tq $nginx_server_ip "sed -n '/#static conf end/=' /usr/local/nginx/conf/vhost/${appname}.conf" | tr -d '\r\n'`;
if [[ $startline == 0 || $endline == 0 ]]; then
echo "/usr/local/nginx/conf/vhost/${appname}.conf is invalid; please add static start/end flag."; exit 1;
fi
((startline++));
((endline--));
if [[ $endline -ge $startline ]]; then
ssh -tq $nginx_server_ip "sudo sed -i '${startline},${endline}d' /usr/local/nginx/conf/vhost/${appname}.conf";
fi
if [[ $nginx_error == "false" ]]; then ssh -tq $nginx_server_ip "sudo service nginx reload";fi
fi
# start all the clusters
for app in ${exist_clusters[*]}; do
CATALINA_HOME=/usr/local/$app;
sudo service $app stop;
sleep 5;
#Clean history deployment
sudo rm -rf $CATALINA_HOME/webapps/$appname*;
sudo rm -f $CATALINA_HOME/logs/*;
#Clean finished
#distribute war to tomcat cluster
sudo cp $current/build/$appname.war $CATALINA_HOME/webapps/;
sleep 5;
sudo service $app start;
echo "waiting for $app start...";
sleep 5;
port=`grep $app= $current/cluster.instance | awk -F= '{printf $2}'`;
# test starting suc.
if [[ ${#exist_clusters[*]} == 1 ]]; then continue; fi
while true; do
sleep 5;
http_status_code=`curl -s -o /dev/null -I -w '%{http_code}' http://localhost:${port}/$appname`;
https_status_code=`curl -s -k -o /dev/null -I -w '%{http_code}' https://localhost:${port}/$appname`;
((status_code=$http_status_code+$https_status_code));
if [[ $status_code < 400 && $status_code > 0 ]]; then
echo "Ping localhost:$port/$appname ===> status: $status_code ${app}: suc"; break;
fi
echo "Ping localhost:$port/$appname ===> status: $status_code, try accessing after 5s......";
done
echo "Deploy $appname at $app finished";
done
if [[ `echo ${clusters[0]} | awk '{print tolower($1)}'` == "all" ]]; then
#copy the static files
static_root=/var/www/$appname ;
web_dir=$current/$appname/src/main/webapp;
#open the static agent
if [[ $is_local=="local" ]]; then
sudo mkdir -p $static_root;
sudo rm -rf $static_root/*;
sudo cp -rf $web_dir/js $static_root/js;
sudo cp -rf $web_dir/css $static_root/css;
sed "s/@appname@/$appname/g" static.conf > /tmp/$appname/static.new;
sudo sed -i "/#static conf start/r /tmp/${appname}/static.new" /usr/local/nginx/conf/vhost/$appname.conf;
if [[ $nginx_error == "true" ]]; then
echo "local nginx test error, please check the conf files...";
else
sudo service nginx reload;
fi
else
ssh -tq $nginx_server_ip "sudo rm -rf $static_root/*";
ssh -tq $nginx_server_ip "mkdir -p ~/static";
rsync -a $web_dir/js ${nginx_server_ip}:~/static;
rsync -a $web_dir/css ${nginx_server_ip}:~/static;
ssh -tq $nginx_server_ip "sudo mkdir -p $static_root;sudo rm -rf $static_root/*;sudo mv -f ~/static/js $static_root/js;sudo mv -f ~/static/css $static_root/css; sudo rm -rf ~/static";
sed "s/@appname@/$appname/g" $current/static.conf > /tmp/$appname/static.new;
ssh -tq $nginx_server_ip "sudo sed -i '/#static conf start/r /tmp/${appname}/static.new' /usr/local/nginx/conf/vhost/${appname}.conf";
rm -f /tmp/${appname}/static.new;
[[ $nginx_error == "false" ]] && ssh -tq $nginx_server_ip "sudo service nginx reload";
fi
fi
echo "Auto deploy $appname finished";
exit $?;
| true
|
f16c32505e2f400355fdce49d1332d2692bdb3ec
|
Shell
|
fdloopes/Assistente-samba
|
/banco.sh
|
UTF-8
| 4,439
| 3.375
| 3
|
[] |
no_license
|
#!/bin/bash
export SUDO_ASKPASS="$PWD/minha_senha.sh"
postgress="/etc/postgresql/9.1/main"
Mensagem() {
dialog --title "Mensagem" --msgbox "$1" 6 50
}
TestePostgres(){
clear
dialog --title " AGUARDE.." --infobox '\n TESTANDO DEPENDENCIAS!' 5 30
if ( sudo -k -A psql -U postgres --quiet -c "CREATE DATABASE teste" >> tmp.tmp &> /dev/null )
then
echo " "
psql -U postgres --quiet -c "DROP DATABASE teste" >> tmp.tmp
else
Mensagem "O Postgres nao esta instalado!"
dialog --title "Pergunta:" --yesno "Deseja instalar o postgres?" 6 46
op=$?
test -z $op && op = 1
if [ $op -eq 0 ]
then
dialog --title " AGUARDE.." --infobox '\n ISTO SO VAI LEVAR ALGUNS MINUTOS' 5 40
if ( apt-get -qy install postgresql > tmp.tmp )
then
dialog --title " AGUARDE.." --infobox '\n TERMINANDO INSTALACAO' 5 30
sleep 2
Mensagem "Postgres instalado com sucesso!"
else
clear
Mensagem "Voce nao tem permissao para executar a instalacao do postgres!"
dialog --title "Pergunta:" --yesno "Deseja instalar o postgres como root?" 6 46
op=$?
test -z $op && op = 1
if [ $op -eq 0 ]
then
if sudo apt-get -qy install postgresql > tmp.tmp
then
Mensagem "Postgres instalado com sucesso!"
else
echo "postgres nao instalado!"
fi
clear
else
clear
exit
fi
fi
fi
fi
}
Menu(){
opcao=$(dialog --stdout --no-cancel --backtitle "Menu Postgres" --menu "Faca sua escolha" 15 25 25 \
1 "Editar" \
2 "Autenticacao" \
3 "Iniciar/Parar" \
0 "Voltar" )
test -z $opcao && opcao=0
}
MenuServico(){
opcao=$(dialog --stdout --no-cancel --backtitle "Menu Servico" --menu "Faca sua escolha" 15 25 25 \
1 "Iniciar" \
2 "Reiniciar" \
3 "Parar" \
0 "voltar" )
test -z $opcao && opcao=0
CaseMenuServico
}
CaseMenuServico() {
case $opcao in
1) iniciar
;;
2) restart
;;
3) parar
;;
0) return
;;
esac
}
listar(){
cat "$postgress/pg_hba.conf" | grep -v "^#" > post.tmp
editar=$( dialog --stdout --editbox "post.tmp" 120 120 )
opcao=$?
clear
if [ $opcao -eq 0 ]
then
echo "$editar" > post.tmp
Mensagem "Arquivo salvo com sucesso"
return
else
Mensagem "Arquivo nao salvo"
return
fi
}
autenticar() {
aut="trust"
cat "$postgress/pg_hba.conf" | grep -v "^#" > post.tmp
dialog --title "Configuracao Atual" --backtitle "Configuração Pg_hba.conf" --textbox "post.tmp" 0 0
primeira="local all all "
segunda="host all all 127.0.0.1/32 "
terceira="host all all ::1/128 "
n1="# "local" is for Unix domain socket connections only"
n2="# IPv4 local connections:"
n3="# IPv6 local connections:"
aut=$(dialog --stdout --backtitle "Adicionar novas regras" --inputbox "Informe o nivel de autenticacao Local" 7 50)
op=$?
if [ $op -eq 1 ]
then
return
fi
echo $n1 > post.tmp
echo $primeira $aut >> post.tmp
echo $n2 >> post.tmp
aut=$(dialog --stdout --backtitle "Adicionar novas regras" --inputbox "Informe o nivel de autenticacao das outras redes" 7 60)
if [ $op -eq 1 ]
then
return
fi
echo $segunda $aut >> post.tmp
echo $n3 >> post.tmp
echo $terceira $aut >> post.tmp
n4="# Allow replication connections from localhost, by a user with the replication privilege."
n5="#local replication postgres trust"
n6="#host replication postgres 127.0.0.1/32 trust"
n7="#host replication postgres ::1/128 trust"
echo $n4 >> post.tmp
echo $n5 >> post.tmp
echo $n6 >> post.tmp
echo $n7 >> post.tmp
cat post.tmp > "$postgress/pg_hba.conf"
sleep 4
dialog --title "Configuracao Atual" --backtitle "Configuração Pg_hba.conf" --textbox "$postgress/pg_hba.conf" 0 0
}
iniciar(){
if (service postgresql start &> /dev/null)
then
Mensagem "Servico iniciado com sucesso!"
else
Mensagem "Servico nao iniciado!"
fi
}
parar(){
if (service postgresql stop &> /dev/null)
then
Mensagem "Servico parado com sucesso!"
else
Mensagem "Impossivel parar o servico!"
fi
}
restart(){
if (service postgresql restart &> /dev/null)
then
Mensagem "Servico reiniciado com sucesso!"
else
Mensagem "Servico nao reiniciado!"
fi
}
Principal() {
opcao=1
clear
TestePostgres
while [ $opcao != 0 ]
do
Menu
case $opcao in
1) clear
listar
;;
2) clear
autenticar
;;
3) clear
MenuServico
;;
255)
clear
exit
clear
;;
esac
done
}
Principal
rm -rf post.tmp
rm -rf tmp.tmp
clear
| true
|
fa594e65b83e84cb2b555488ec1c4a191f567830
|
Shell
|
stephengeller/eleanorbettdotcom
|
/deploy.sh
|
UTF-8
| 180
| 2.6875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
git push heroku master
if [[ $? -gt 0 ]]; then
echo "Failed to run 'git push heroku master'. Have you tried using \"heroku login\"? Is the remote set properly?"
fi
| true
|
3da6b98ec06167f916f952436175f9489f7be68d
|
Shell
|
jvidalg/kafka-docker-playground
|
/connect/connect-mqtt-source/mqtt-repro-json.sh
|
UTF-8
| 2,888
| 3.03125
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd )"
source ${DIR}/../../scripts/utils.sh
${DIR}/../../environment/plaintext/start.sh "${PWD}/docker-compose.plaintext.kafka-connect-json-schema.yml"
log "Creating MQTT Source connector"
curl -X PUT \
-H "Content-Type: application/json" \
--data '{
"connector.class": "io.confluent.connect.mqtt.MqttSourceConnector",
"tasks.max": "1",
"mqtt.server.uri": "tcp://mosquitto:1883",
"mqtt.topics":"my-mqtt-topic",
"kafka.topic":"mqtt-json-topic",
"mqtt.qos": "2",
"mqtt.username": "myuser",
"mqtt.password": "mypassword",
"value.converter": "org.apache.kafka.connect.json.JsonConverter",
"value.converter.schemas.enable": "false",
"transforms" : "fromJson,createKey,extractInt",
"transforms.fromJson.type" : "com.github.jcustenborder.kafka.connect.json.FromJson$Value",
"transforms.fromJson.json.schema.location" : "Inline",
"transforms.fromJson.json.schema.inline" : "{\n \"$id\": \"https://example.com/person.schema.json\",\n \"$schema\": \"http://json-schema.org/draft-07/schema#\",\n \"title\": \"Person\",\n \"type\": \"object\",\n \"properties\": {\n \"firstName\": {\n \"type\": \"string\",\n \"description\": \"The person first name.\"\n },\n \"lastName\": {\n \"type\": \"string\",\n \"description\": \"The person last name.\"\n },\n \"age\": {\n \"description\": \"Age in years which must be equal to or greater than zero.\",\n \"type\": \"integer\",\n \"minimum\": 0\n }\n }\n}",
"transforms.createKey.type":"org.apache.kafka.connect.transforms.ValueToKey",
"transforms.createKey.fields":"lastName",
"transforms.extractInt.type":"org.apache.kafka.connect.transforms.ExtractField$Key",
"transforms.extractInt.field":"lastName",
"confluent.license": "",
"confluent.topic.bootstrap.servers": "broker:9092",
"confluent.topic.replication.factor": "1"
}' \
http://localhost:8083/connectors/source-mqtt-json/config | jq .
sleep 5
log "Send message to MQTT in my-mqtt-topic topic"
docker exec mosquitto sh -c 'mosquitto_pub -h localhost -p 1883 -u "myuser" -P "mypassword" -t "my-mqtt-topic" -m "{\"lastName\":\"Doe\",\"age\":21,\"firstName\":\"John\"}"'
sleep 5
log "Verify we have received the json data in mqtt-json-topic topic"
timeout 60 docker exec broker kafka-console-consumer -bootstrap-server broker:9092 --topic mqtt-json-topic --from-beginning --max-messages 1 --property print.key=true --property key.separator=,
# Results (Key:Doe):
# Doe,{"age":21,"firstName":"John","lastName":"Doe"}
| true
|
72b9cb076b27ae65659c6488251e60b9626fc424
|
Shell
|
AlaaAbdelkareem/AlaaDB
|
/table/deleteTable.sh
|
UTF-8
| 421
| 3.4375
| 3
|
[] |
no_license
|
#!/bin/bash
echo "enter the table name"
read del
if [ -f $del ]
then
select choice in "delete all table" "delete specific row"
do
case $choice in
"delete all table")
rm -f $del
echo "done"
exit
;;
"delete specific row")
echo "enter the word to search"
read word
if cut -f1 -d: $del grep "$word"
then
sed -i "/$word/d" "$del"
else
echo "can't find this word"
fi
;;
esac
done
else
echo "not found"
fi
| true
|
166ae7f4917e00eee62ed0f706251e24132c9cb2
|
Shell
|
spiralofhope/shell-random
|
/live/sh/scripts/find-newest-file.sh
|
UTF-8
| 1,018
| 3.9375
| 4
|
[] |
no_license
|
#!/usr/bin/env sh
# Find the oldest files in this directory and all subdirectories.
#:<<'}' # zsh
{
echo .
# Current directory only
#\echo *(.om[1])
#\echo *(om[1])
#\echo **/*(om[1])
}
#:<<'}' #
{
\find . -type f -exec \
` # change %Y to %X for the _accessed_ files ` \
\stat -c '%Y %n' {} \; |\
\sort -nr |\
` # var= is the number of files to list. This doesn't seem to work! ` \
\awk -v var="10" 'NR==1,NR==var {print $0}' |\
while read t f; do
d=$( date -d @$t "+%b %d %T %Y" )
\echo "$d -- $f"
done
}
:<<'}' #
{
# Just a single file
DIR='.'
find "$DIR" -type f -printf "%T@ %p\n" |
awk '
BEGIN { recent = 0; file = "" }
{
if ($1 > recent)
{
recent = $1;
file = $0;
}
}
END { print file; }' |
sed 's/^[0-9]*\.[0-9]* //'
}
#:<<'}' # Simple
{
# -t is the newest first
# ls -Art | tail -n1
# Current directory:
# \ls -lAt1r | tail -n1
# Tree, last 10
\ls -lAt1rR | tail -n10
}
| true
|
cbe07e3a55c66a2ed638127ff71422c2f39661a3
|
Shell
|
deponian/scripts
|
/necessary-packages.sh
|
UTF-8
| 4,681
| 3.453125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
set -euo pipefail
IFS=$'\n\t'
#
# install necessary packages for your distro
#
deb_packages () {
local mode
mode="${1:?"You have to specify mode as first parametr"}"
apt update
if [[ "${mode}" == "minimal" ]]; then
apt install --no-install-recommends bash bc ccze dnsutils git htop iputils-ping mlocate \
ncdu openssh-server rsync sudo tmux vim zsh
elif [[ "${mode}" == "server" ]]; then
apt install --no-install-recommends anacron apg apt-transport-https bash bc bridge-utils \
bwm-ng ca-certificates ccze cron curl debsums dnsutils ethtool gdisk git gnupg2 htop \
ifupdown iputils-ping ioping iotop iproute2 jid jq less links lsb-release lshw mc mlocate \
mtr-tiny nano ncdu netcat nethogs netmask net-tools nmap openssh-server parted progress \
rsync rsyslog strace sudo sysstat tcpdump telnet tmux traceroute unzip vim vlan wget \
xz-utils zsh zstd
elif [[ "${mode}" == "desktop" ]]; then
apt install --no-install-recommends anacron apg apt-transport-https bash bc bridge-utils \
bwm-ng ca-certificates ccze cron curl debsums dnsutils ethtool gdisk git gnupg2 htop \
ifupdown iputils-ping ioping iotop iproute2 jid jq less links lsb-release lshw mc mlocate \
mtr-tiny nano neovim ncdu netcat nethogs netmask net-tools nmap openssh-server parted \
progress rsync rsyslog strace sudo sysstat tcpdump telnet tmux \
traceroute unzip vim vim-gui-common vlan wget xz-utils zsh zstd
else
echo "Something strange happened." >&2
fi
}
# dnsutils -> bind-utils
# inetutils-ping -> iputils
# cron, anacron -> cronie cronie-anacron
rpm_packages () {
local mode
mode="${1:?"You have to specify mode as first parametr"}"
if [[ "${mode}" == "minimal" ]]; then
dnf --setopt=install_weak_deps=False install bash bc ccze bind-utils git htop iputils \
mlocate ncdu openssh-server rsync sudo tmux vim zsh
elif [[ "${mode}" == "server" ]]; then
dnf --setopt=install_weak_deps=False install cronie-anacron apg bash bc bwm-ng ca-certificates \
ccze cronie curl bind-utils ethtool gdisk git gnupg2 htop iputils ioping iotop jid jq less \
links lshw mc mlocate mtr nano ncdu nethogs netmask net-tools nmap openssh-server \
parted progress rsync rsyslog strace sudo sysstat tcpdump telnet tmux traceroute unzip vim wget \
xz zsh zstd
elif [[ "${mode}" == "desktop" ]]; then
dnf --setopt=install_weak_deps=False install cronie-anacron apg bash bc bwm-ng ca-certificates \
ccze cronie curl bind-utils ethtool gdisk git gnupg2 htop iputils ioping iotop jid jq less links \
lshw mc mlocate mtr nano neovim ncdu nethogs netmask net-tools nmap openssh-server parted progress \
rsync rsyslog strace sudo sysstat tcpdump telnet tmux traceroute unzip \
vim wget xz zsh zstd
else
echo "Something strange happened." >&2
fi
}
# dnsutils -> bind
# openssh-server -> openssh
# cronie-anacron -> cronie
# apg -> not in core repository
# gnupg2 -> gnupg
# jid -> not in core repository
# netmask -> not in core repository
# rsyslog -> not in core repository
# telnet -> inetutils
arch_packages () {
local mode
mode="${1:?"You have to specify mode as first parametr"}"
if [[ "${mode}" == "minimal" ]]; then
pacman -S bash bc ccze bind git htop iputils \
mlocate ncdu neovim openssh rsync sudo tmux vim zsh
elif [[ "${mode}" == "server" ]]; then
pacman -S cronie bash bc bwm-ng ca-certificates \
ccze curl bind ethtool gdisk git gnupg htop iputils ioping iotop jq less \
links lshw mc mlocate mtr nano neovim ncdu nethogs net-tools nmap openssh \
parted progress rsync strace sudo sysstat tcpdump inetutils tmux traceroute unzip vim wget \
xz zsh zstd
elif [[ "${mode}" == "desktop" ]]; then
pacman -S cronie bash bc bwm-ng ca-certificates \
ccze cronie curl bind ethtool gdisk git gnupg htop iputils ioping iotop jq less links \
lshw mc mlocate mtr nano neovim ncdu nethogs net-tools nmap openssh parted progress \
rsync strace sudo sysstat tcpdump inetutils tmux traceroute unzip \
vim wget xz zsh zstd
else
echo "Something strange happened." >&2
fi
}
main () {
if [[ "$EUID" != 0 ]]
then
echo "Please run as root" >&2
exit 1
fi
mode="${1:-}"
while true; do
if [[ "${mode}" =~ ^(minimal|server|desktop)$ ]]; then
break
else
read -r -p 'Incorrect mode. Choose packages mode from "minimal", "server" and "desktop": ' mode
fi
done
os_id="$(sed -n -E -e 's/^ID=(\S*)$/\1/p' /etc/os-release)"
case "${os_id}" in
debian | ubuntu)
deb_packages "${mode}"
;;
fedora | centos)
rpm_packages "${mode}"
;;
arch)
arch_packages "${mode}"
;;
*)
echo "I can't install packages for your system."
;;
esac
}
main "$@"
| true
|
29871ddb1aade265f4017be9d0144f79877ccec4
|
Shell
|
srk0002/AU_BIOL-7180_SPR20_GroupProject
|
/Scripts_old/0_SeqDic.sh
|
UTF-8
| 736
| 2.625
| 3
|
[] |
no_license
|
#!/bin/bash
#This scriptis used to prepare a fasta genome reference file for creating the .dict ditionary file needed for GATK. BEFORE you being, you MUST cp the reference.fna file to reference.fa for it to work. Picard seemd overly sensitive to file type extenions and will not recognize .fna as a .fa fasta file althoughth the contents are identical.
#######
##Reference Fasta Files
ref1=Necha2_core_fixed_ml
ref2=Fol_v2_core_ml
module load picard/1.79
#
java -Xms2g -Xmx4g -jar /opt/asn/apps/picard_1.79/picard-tools-1.79/CreateSequenceDictionary.jar REFERENCE=$ref1.fasta OUTPUT=$ref1.dict
java -Xms2g -Xmx4g -jar /opt/asn/apps/picard_1.79/picard-tools-1.79/CreateSequenceDictionary.jar REFERENCE=$ref2.fasta OUTPUT=$ref2.dict
end
| true
|
a886adb4dcb56abfb56b83c8c23d71ddb75cc05d
|
Shell
|
edawson/tool_gistic2_wgs
|
/src/link_conf_wrapper.sh
|
UTF-8
| 244
| 3.34375
| 3
|
[] |
no_license
|
#! /usr/bin/env bash
# Run command
eval $@
# Create generic symlinks to files that specify the confidence level in their
# names.
for file in *.conf_[0-9][0-9]*; do
ln -s $file `echo $file | sed 's/\.conf_[0-9]\+\(\.[0-9]\+\)\?\././'`
done
| true
|
f577e9205d910e5b6a6c2ffb624fce40e62bc05d
|
Shell
|
VisionmateGitHub/newLinuxMachine
|
/Jenkins.sh
|
UTF-8
| 3,243
| 2.515625
| 3
|
[] |
no_license
|
#!/bin/bash
#update OS
sudo yum -y update
#install git-all
cd /opt/
sudo yum -y install git-all
#make sure that unzip is installed
sudo yum -y install unzip
#go to installation directory
cd /opt/
#download jdk1.7.0_71 and unzip
echo You have to download jdk1.7.0_71 manually on http://www.oracle.com/technetwork/java/javase/downloads/java-archive-downloads-javase7-521261.html
#download jdk1.8.0_171 and unzip
echo Downloading jdk1.8.0_171
sudo wget --no-cookies --no-check-certificate --header "Cookie: gpw_e24=http%3A%2F%2Fwww.oracle.com%2F; oraclelicense=accept-securebackup-cookie" "http://download.oracle.com/otn-pub/java/jdk/8u171-b11/512cd62ec5174c3487ac17c61aaa89e8/jdk-8u171-linux-x64.tar.gz"
sudo tar xzf jdk-8u171-linux-x64.tar.gz
#download jdk1.8.0_172 and unzip
echo Downloading jdk1.8.0_172
sudo wget --no-cookies --no-check-certificate --header "Cookie: gpw_e24=http%3A%2F%2Fwww.oracle.com%2F; oraclelicense=accept-securebackup-cookie" "http://download.oracle.com/otn-pub/java/jdk/8u172-b11/a58eab1ec242421181065cdc37240b08/jdk-8u172-linux-x64.tar.gz"
sudo tar xzf jdk-8u172-linux-x64.tar.gz
#install jdk1.8.0_171 with alternatives
cd /opt/jdk1.8.0_171/
sudo alternatives --install /usr/bin/java java /opt/jdk1.8.0_171/bin/java 2
#install jdk1.8.0_172 with alternatives
cd /opt/jdk1.8.0_172/
sudo alternatives --install /usr/bin/java java /opt/jdk1.8.0_172/bin/java 3
#PATH configuration
#jdk1.8.0_171
echo Setting PATH for jdk1.8.0_171 as 2nd alternative
sudo alternatives --install /usr/bin/jar jar /opt/jdk1.8.0_171/bin/jar 2
sudo alternatives --install /usr/bin/javac javac /opt/jdk1.8.0_171/bin/javac 2
sudo alternatives --set jar /opt/jdk1.8.0_171/bin/jar
sudo alternatives --set javac /opt/jdk1.8.0_171/bin/javac
#jdk1.8.0_172
echo Setting PATH for jdk1.8.0_172 as 3rd alternative
sudo alternatives --install /usr/bin/jar jar /opt/jdk1.8.0_172/bin/jar 3
sudo alternatives --install /usr/bin/javac javac /opt/jdk1.8.0_172/bin/javac 3
sudo alternatives --set jar /opt/jdk1.8.0_172/bin/jar
sudo alternatives --set javac /opt/jdk1.8.0_172/bin/javac
echo 3 | sudo alternatives --config java
echo --- Downloading all needed grails versions ---
sudo wget https://github.com/grails/grails-core/releases/download/v2.3.11/grails-2.3.11.zip -P /opt/
sudo wget https://github.com/grails/grails-core/releases/download/v2.4.4/grails-2.4.4.zip -P /opt/
sudo wget https://github.com/grails/grails-core/releases/download/v2.5.6/grails-2.5.6.zip -P /opt/
sudo wget https://github.com/grails/grails-core/releases/download/v3.3.0/grails-3.3.0.zip -P /opt/
sudo wget https://github.com/grails/grails-core/releases/download/v3.3.4/grails-3.3.4.zip -P /opt/
echo --- Unzipping all zips ---
sudo unzip grails*.zip
#not usefull in this case
#cd /opt/
#sudo bash -c 'export SDKMAN_DIR="/opt/sdkman" && curl -s "https://get.sdkman.io" | bash'
#source "/opt/sdkman/bin/sdkman-init.sh"
#yes | sdk install grails 2.3.11
#no | sdk install grails 2.4.4
#no | sdk install grails 2.5.6
#no | sdk install grails 3.3.0
#no | sdk install grails 3.3.4
cd /opt/
sudo wget -O /etc/yum.repos.d/jenkins.repo http://pkg.jenkins-ci.org/redhat-stable/jenkins.repo
sudo rpm --import https://jenkins-ci.org/redhat/jenkins-ci.org.key
sudo yum -y install jenkins
| true
|
2c7c3882120a28b5c0555e86a8e8fa02ab16f20d
|
Shell
|
kflansburg/py-custom-metrics
|
/scripts/deploy.sh
|
UTF-8
| 631
| 2.65625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
kubectl create namespace custom-metrics
echo "Creating self-signed certificate"
./scripts/certs.sh
kubectl create secret tls -n custom-metrics certs --cert=server.crt --key=server.key
kubectl create configmap -n custom-metrics app --from-file=src/
kubectl apply -f manifests/deployment.yaml
kubectl apply -f manifests/service.yaml
case `uname` in
Darwin)
b64_opts='-b=0'
;;
*)
b64_opts='--wrap=0'
esac
export CA_BUNDLE=$(cat ca.crt | base64 ${b64_opts})
cat manifests/api-service.yaml | envsubst | kubectl apply -f -
rm server.crt
rm server.key
rm ca.crt
| true
|
71203f99acadf87cdf2a0f90d7983a97a4bdaba0
|
Shell
|
achikin/git-changed
|
/git-changed
|
UTF-8
| 881
| 3.796875
| 4
|
[] |
no_license
|
#!/bin/bash
if [ -f /usr/local/etc/git-changed.conf ]
then
source /usr/local/etc/git-changed.conf
fi
while [[ $# > 1 ]]
do
key="$1"
case $key in
-h|--help)
shift # past argument
;;
-a|--author)
author="$2"
shift # past argument
;;
-s|--since)
since="$2"
shift # past argument
;;
*)
# unknown option
;;
esac
shift # past argument or value
done
if [ -z "$since" ]; then
since="midnight"
fi
if [ -z "$author" ]; then
author=$(git config --get user.email)
fi
echo "Changes summary since ${since} for ${author}"
git log --stat --no-merges --author $author --since="$since" | awk -F',' '/files? changed/ {
files += $1
insertions += $2
deletions += $3
}
END {
print "Files Changed: " files
print "Insertions: " insertions
print "Deletions: " deletions
print "Lines changed: " insertions + deletions
}'
| true
|
47ebf56505bc85a43d42e9fb5a78005e8e4c7fc3
|
Shell
|
danielbicho/functional-tests
|
/awpTests.sh
|
UTF-8
| 330
| 3.03125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
export DISPLAY=:99 # Display port number of Xvfb
cd /root/functional-tests/
plataResult=`/usr/bin/ant test -Dtestcase=pt.fccn.arquivo.tests.AllTests -Dtest.url=http://arquivo.pt | grep "Failures: 0, Errors: 0"`
if [ -z "$plataResult" ]; then
echo "TESTS FAIL"
else
echo "TESTS OK"
fi
echo "Result: $plataResult"
| true
|
3d211fce599ed19d165df6b7385c0dde6a7987ca
|
Shell
|
thomasvincent/utilities
|
/Standalone_Scripts/ZenOSS_AddHosts/bash_api_docs/list_servers.sh
|
UTF-8
| 717
| 2.71875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# An example of using the JSON API. Prints out five servers.
#
# $ ./list_devices.sh
# [
# "austinbot.zenoss.loc (10.87.209.6)",
# "osx105b.zenoss.loc (10.204.210.24)",
# "test-aix53-1.zenoss.loc (10.175.210.227)",
# "test-aix53-2.zenoss.loc (10.175.210.228)",
# "test-aix61.zenoss.loc (10.175.210.229)"
# ]
#
# Requires jsawk (https://github.com/micha/jsawk)
# and spidermonkey (yum install js)
# and the perl JSON module
. zenoss_api.sh
zenoss_api device_router DeviceRouter getDevices '{"uid": "/zport/dmd/Devices/Server", "limit": 5, "params": {}}'
echo $resp | jsawk 'return this.result.devices' | jsawk 'return this.name + " (" + this.ipAddressString + ")"' | ./pprint.sh
| true
|
51da1b5974f2418f91a5dedb07f5333ad0ef85a3
|
Shell
|
AaronZLT/CL-EDEN-kernel
|
/samples/external/push_and_test.sh
|
UTF-8
| 1,349
| 3.21875
| 3
|
[] |
no_license
|
if [ -z "$ANDROID_TOP" ]; then
echo ""
echo " ## Please set \$ANDROID_TOP as a top of android directory."
echo " Example)"
echo " $ export ANDROID_TOP=<android dir> or add this at ~/.bashrc"
echo ""
exit 0;
fi
TARGET_NAME="erd9925"
PLATFORM_BUILD=false
while getopts "T:t:Pp" flag; do
case $flag in
T) TARGET_NAME=$OPTARG;;
t) TARGET_NAME=$OPTARG;;
P) PLATFORM_BUILD=true;;
p) PLATFORM_BUILD=true;;
esac
done
PUSH_ORIGIN_VENDOR_PATH="$ANDROID_TOP/out/target/product/$TARGET_NAME/vendor"
echo ""
echo " ### Configurations"
echo ""
echo " # TARGET_NAME : $TARGET_NAME"
echo " # ANDROID_TOP : $ANDROID_TOP"
echo " # PUSH LIB/BIN ORIGIN VENDOR PATH : $PUSH_ORIGIN_VENDOR_PATH"
echo ""
if [ ! -d $PUSH_ORIGIN_VENDOR_PATH ]; then
echo "Invalid target name: ${TARGET_NAME}"
exit 0;
fi
# push
adb root
adb remount
adb shell "mkdir -p /data/vendor/enn/models/pamir/"
adb push sample_nnc/* /data/vendor/enn/models/pamir/
adb push $PUSH_ORIGIN_VENDOR_PATH/lib /vendor/
adb push $PUSH_ORIGIN_VENDOR_PATH/lib64 /vendor/
if [[ ${PLATFORM_BUILD} == true ]]; then
adb push $PUSH_ORIGIN_VENDOR_PATH/bin/enn_sample_external /vendor/bin/
else
adb push libs/arm64-v8a/enn_sample_external /vendor/bin/
fi
# test
adb shell "enn_sample_external"
| true
|
179501f4a70d235c64f9143e2dcf8e1ca2320066
|
Shell
|
kamleshksingh/Kettle
|
/MKDM/common_funcs/count_recs
|
UTF-8
| 2,142
| 3.375
| 3
|
[] |
no_license
|
#!/bin/ksh
#*****************************************************************************
#** Program : count_recs
#** Original Author : Brian Syptak
#**
#** Description : The Generic function to build indexes on the table.
#** The parameters to be passed are as shown below:
#** 1) Table name (may include owner i.e. mkdm.ld_usage_tn)
#** 2) OPTIONAL: "PARTITION" or "WHERE" or nothing
#** 3) OPTIONAL: partition name or where clause:
#** i.e. "(P200502)" or "load_date = trunc(sysdate)"
#**
#** Example command lines:
#** Usage 1: count_recs ld_usage_tn
#** Usage 2: count_recs ld_usage_tn PARTITION (P200502)
#** Usage 3: count_recs ld_usage_tn WHERE "load_date = trunc(sysdate)"
#**
#** Revision History: Please do not stray from the example provided.
#**
#** Modfied User
#** Date ID Description
#** MM/DD/YYYY CUID
#** ********** ******* *******************************************************
#** 02/25/2005 bsyptak Initial Checkin
#******************************************************************************
function count_recs
{
if [ $# -eq 1 ]; then
sqlplus -s $ORA_CONNECT << SQLEOT
set feedback off
set verify off
set head off
WHENEVER SQLERROR EXIT SQL.SQLCODE
define TABLE_NAME=$1
select count(*) from &TABLE_NAME;
quit;
SQLEOT
elif [ $2 = "PARTITION" ]; then
sqlplus -s $ORA_CONNECT << SQLEOT
set feedback off
set verify off
set head off
WHENEVER SQLERROR EXIT SQL.SQLCODE
define TABLE_NAME=$1
define PARTITION_NAME=$3
select count(*) from &TABLE_NAME partition &PARTITION_NAME ;
quit;
SQLEOT
elif [ $2 = "WHERE" ]; then
sqlplus -s $ORA_CONNECT << SQLEOT
set feedback off
set verify off
set head off
WHENEVER SQLERROR EXIT SQL.SQLCODE
define TABLE_NAME=$1
define WHERE_CLAUSE=$3
select count(*) from &TABLE_NAME where &WHERE_CLAUSE ;
quit;
SQLEOT
fi
}
| true
|
571e680a0a38ae70faa57280e2392eb3b5ea39ae
|
Shell
|
iustitia/chaos-monkey
|
/scripts/init.sh
|
UTF-8
| 1,063
| 3
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
CPUS=4
RAM=8192
DISK_SIZE=25000mb
if minikube status ; then
echo "Minikube is already running... Unable to initialize minikube.";
else
echo "Starting minikube...";
if [[ $OSTYPE == darwin* ]]; then
minikube start \
--cpus $CPUS \
--memory $RAM \
--disk-size $DISK_SIZE \
--driver=hyperkit #\
# --extra-config=apiserver.authorization-mode=AlwaysAllow # start minikube without RBAC, but it doesn't work on my setup, hence workaround below
else
minikube start \
--cpus $CPUS \
--memory $RAM \
--disk-size $DISK_SIZE #\
# --extra-config=apiserver.authorization-mode=AlwaysAllow # start minikube without RBAC, but it doesn't work on my setup, hence workaround below
fi
fi
sleep 5
# turn off RBAC - workaround
minikube ssh 'sudo cat /etc/kubernetes/manifests/kube-apiserver.yaml | sed -r "s/--authorization-mode=.+/--authorization-mode=AlwaysAllow/g" | sudo tee /etc/kubernetes/manifests/kube-apiserver.yaml'
| true
|
3e48b60c00a1d7fd17220fae35a1cc58755c53b8
|
Shell
|
PeerStreet/kue-bootstrap
|
/bin/update.sh
|
UTF-8
| 289
| 3.09375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
debug() {
[[ $DEBUG == "true" ]]
}
source_remote() {
temp=$(mktemp)
curl -s -o $temp https://raw.githubusercontent.com/PeerStreet/kue-bootstrap/master/bin/${1}.sh
source $temp
rm $temp
}
main() {
debug && set -x
source_remote "kue"
kue::update
}
main "$@"
| true
|
afad112c0f9b5a3049e0e13d15f4ced411e4b3bc
|
Shell
|
frogman1189/low_battery_systemd_script
|
/uninstall.sh
|
UTF-8
| 497
| 3.78125
| 4
|
[] |
no_license
|
#!/bin/sh
# find script dir incase being run outside install folder
dest="/usr/lib/systemd/system/"
files=( low_battery.timer low_battery.service low_battery.d )
script_dir=$(dirname -- "$readlink -f -- "$BASH_SOURCE")")
/bin/cd $script_dir
/bin/echo "removing ${files[@]} from $dest"
for file in ${files[@]}
do
# check before each removal. There should only be 4 files (low_battery.d
# contains low_battery.sh), and it feels safer to have this safeguard.
/bin/rm -rdi "$file" "$dest"
done
| true
|
810de488845a2768dcb0c2360db8774f89b47a0a
|
Shell
|
epasham/elk
|
/elk-xpack/kibana/kibana-setup.sh
|
UTF-8
| 1,425
| 3.203125
| 3
|
[] |
no_license
|
#!/bin/bash
#kibana setup
#OS requirement: redhat 7
set -x
abs_path=$(cd `dirname $0`; pwd)
source $abs_path/../env.conf
#kibana rpm install
rpm -qa | grep kibana-${ELK_VERSION}
[ $? -ne 0 ] && sudo rpm --install ${KB_PKG_URL}
#install x-pack
if [ ! -d "/usr/share/kibana/plugins/x-pack" ]; then
echo "install kibana x-pack..."
sudo /usr/share/kibana/bin/kibana-plugin install x-pack -q
else
echo "x-pack already exist."
fi
[ ! -d /etc/kibana/certs ] && sudo mkdir /etc/kibana/certs
sudo cp $abs_path/../certs/out/* /etc/kibana/certs
#config kibana
sudo mv /etc/kibana/kibana.yml /etc/kibana/kibana.yml.bk
sudo touch /etc/kibana/kibana.yml
sudo tee /etc/kibana/kibana.yml <<FFF
server.port: 5601
server.host: 0.0.0.0
server.name: "${KB_NAME}"
elasticsearch.url: "${ES_API_URL}"
kibana.defaultAppId: "${KB_HOME}"
elasticsearch.username: kibana
elasticsearch.password: "${KB_PASSWORD}"
xpack.security.enabled: true
server.ssl.enabled: true
server.ssl.certificate: /etc/kibana/certs/es_http.pem
server.ssl.key: /etc/kibana/certs/es_http.key
elasticsearch.ssl.certificate: /etc/kibana/certs/es_http.pem
elasticsearch.ssl.key: /etc/kibana/certs/es_http.key
elasticsearch.ssl.certificateAuthorities: [ "/etc/kibana/certs/root-ca.pem" ]
elasticsearch.ssl.verificationMode: none
FFF
#start kibana service
sudo /bin/systemctl daemon-reload
sudo /bin/systemctl enable kibana.service
sudo systemctl restart kibana.service
| true
|
d4f0dec1c64887d6303219ec6649d179f4d64095
|
Shell
|
tauri-apps/tauri
|
/.scripts/cargo-check.sh
|
UTF-8
| 1,107
| 3.96875
| 4
|
[
"Apache-2.0",
"CC0-1.0",
"MIT",
"CC-BY-NC-ND-4.0"
] |
permissive
|
#!/usr/bin/env sh
# Copyright 2019-2023 Tauri Programme within The Commons Conservancy
# SPDX-License-Identifier: Apache-2.0
# SPDX-License-Identifier: MIT
# note: you can pass in the cargo sub-commands used to check manually.
# allowed commands: check, clippy, fmt, test
# default: clippy, fmt, test
# exit the script early if any of the commands return an error
set -e
# set the script arguments if none are found
if [ -z "$*" ]; then
set -- "clippy" "fmt" "test"
fi
# run n+1 times, where n is the amount of mutually exclusive features.
# the extra run is for all the crates without mutually exclusive features.
# as many features as possible are enabled at for each command
run() {
command=$1
shift 1
cargo "$command" --workspace --all-targets --all-features "$@"
}
for command in "$@"; do
case "$command" in
check | test)
run "$command"
;;
clippy)
run clippy -- -D warnings
;;
fmt)
echo "[$command] checking formatting"
cargo +nightly fmt -- --check
;;
*)
echo "[cargo-check.sh] Unknown cargo sub-command: $command"
exit 1
;;
esac
done
| true
|
67feced35aaaaaf147f9febc9cc07f1923f62c05
|
Shell
|
mdestombes/minecraft_ftbrev_server
|
/run.sh
|
UTF-8
| 6,428
| 3.875
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
echo "************************************************************************"
echo "* Minecraft Server launching... ("`date`")"
echo "************************************************************************"
java -version
# Trap management
[[ -p /tmp/FIFO ]] && rm /tmp/FIFO
mkfifo /tmp/FIFO
export TERM=linux
# Stop management
function stop {
echo -e "\n*************************************************"
echo "* Send stop to Minecraft server"
echo "*************************************************"
# Stoping minecraft server
tmux send-keys -t minecraft "stop" C-m
echo -e "\n*************************************************"
echo "* Minecraft server stopping"
echo "*************************************************"
sleep 10
echo -e "\n*************************************************"
echo "* Minecraft server stoppped"
echo "*************************************************"
exit
}
# Init mods
function init_mods {
if [[ ! -f /minecraft/data/mods_initialized ]]; then
echo -e "\n*************************************************"
echo "* Mods installation..."
echo "*************************************************"
if [[ "${WITH_DYNMAP}" == "YES" ]]; then
echo "Add DynMap mod..."
# Copy plugins
cp -f /minecraft/data/plugins/Dynmap-* /minecraft/data/mods
else
echo "Avoiding DynMap mod!"
fi
if [[ "${WITH_BUILDCRAFT}" == "YES" ]]; then
echo "Add Buildcraft mod..."
# Copy plugins
cp -f /minecraft/data/plugins/buildcraft-all-* /minecraft/data/mods
else
echo "Avoiding Buildcraft mod!"
fi
if [[ "${WITH_DYNMAP}" == "YES" ]] && [[ "${WITH_BLOCKSCAN}" == "YES" ]]; then
echo "Add DynMap Blockscan mod..."
# Copy plugins
cp -f /minecraft/data/plugins/DynmapBlockScan-* /minecraft/data/mods
else
echo "Avoiding DynMap Blockscan mod!"
fi
if [[ "${WITH_ENERGY}" == "YES" ]]; then
echo "Add Energy mod..."
# Copy plugins
cp -f /minecraft/data/plugins/energyconverters-* /minecraft/data/mods
else
echo "Avoiding Energy mod!"
fi
touch /minecraft/data/mods_initialized
fi
}
# Check mods launched
function check_mods {
echo -e "\n*************************************************"
echo "* Mods management..."
echo "*************************************************"
sleep 10
if [[ "${WITH_DYNMAP}" == "YES" ]]; then
if [[ `cat /minecraft/data/logs/latest.log | grep 'Unable to read the jar file Dynmap-'` == "" ]]; then
echo "DynMap mod launched..."
# Dynmap port configuration
init_dynmap
else
echo "DynMap mod launch failed..."
fi
fi
if [[ "${WITH_BUILDCRAFT}" == "YES" ]]; then
if [[ `cat /minecraft/data/logs/latest.log | grep 'Unable to read the jar file buildcraft-all-'` == "" ]]; then
echo "Buildcraft mod launched..."
else
echo "Buildcraft mod launch failed..."
fi
fi
if [[ "${WITH_DYNMAP}" == "YES" ]] && [[ "${WITH_BLOCKSCAN}" == "YES" ]]; then
if [[ `cat /minecraft/data/logs/latest.log | grep 'Unable to read the jar file DynmapBlockScan-'` == "" ]]; then
echo "DynMap Blockscan mod launched..."
else
echo "DynMap Blockscan mod launch failed..."
fi
fi
if [[ "${WITH_ENERGY}" == "YES" ]]; then
if [[ `cat /minecraft/data/logs/latest.log | grep 'Unable to read the jar file energyconverters-'` == "" ]]; then
echo "Energy mod launched..."
else
echo "Energy mod launch failed..."
fi
fi
}
# Init dynmap configuration
function init_dynmap {
if [[ ! -f /minecraft/data/dynmap_initialized ]] && [[ "${WITH_DYNMAP}" == "YES" ]]; then
echo -e "\n*************************************************"
echo "* Specific configuration of Dynmap..."
echo "*************************************************"
echo "Waiting for first intialization..."
if [[ "${WITH_BLOCKSCAN}" == "YES" ]]; then
sleep 180
else
sleep 60
fi
while [[ `cat /minecraft/data/logs/latest.log | grep '\[Dynmap\]: \[Dynmap\] Enabled'` == "" ]] \
&& [[ `cat /minecraft/data/logs/latest.log | grep 'Unable to read the jar file Dynmap'` == "" ]]; do
echo "...Waiting more..."
sleep 10
done
if [[ `cat /minecraft/data/logs/latest.log | grep 'Unable to read the jar file Dynmap'` == "" ]]; then
echo "Stopping Minecraft server..."
# Stoping minecraft server
tmux send-keys -t minecraft "stop" C-m
sleep 60
echo "Upgrade Dynmap config..."
cat /minecraft/bin/dynmap_config.txt | sed \
-e "s:__MOTD__:${MOTD}:g" \
-e "s:__DYNMAP_PORT__:${DYNMAP_PORT}:g" \
> /minecraft/data/dynmap/configuration.txt
echo "Restarting Minecraft server..."
# Launching minecraft server
tmux send-keys -t minecraft "/minecraft/data/ServerStart.sh" C-m
fi
touch /minecraft/data/dynmap_initialized
fi
}
# Install
if [[ ! -f /minecraft/data/ServerStart.sh ]]; then
# Copy install
cp -fr /minecraft/downloads/* /minecraft/data
fi
# Includ mods port configuration
init_mods
# Eula License
if [[ ! -f /minecraft/data/eula.txt ]]; then
# Check Minecraft license
if [[ "$EULA" != "" ]]; then
echo "# Generated via Docker on $(date)" > /minecraft/data/eula.txt
echo "eula=$EULA" >> /minecraft/data/eula.txt
else
echo ""
echo "Please accept the Minecraft EULA at"
echo " https://account.mojang.com/documents/minecraft_eula"
echo "by adding the following immediately after 'docker run':"
echo " -e EULA=TRUE"
echo "or editing eula.txt to 'eula=true' in your server's data directory."
echo ""
exit 1
fi
fi
# Check server configuration
[[ ! -f /minecraft/data/server.properties ]] || [[ "${FORCE_CONFIG}" = "true" ]] && python /minecraft/bin/configure.py --config
# Minecraft server session creation
tmux new -s minecraft -c /minecraft/data -d
# Launching minecraft server
tmux send-keys -t minecraft "PATH=$PATH" C-m
tmux send-keys -t minecraft "/minecraft/data/ServerStart.sh" C-m
# Stop server in case of signal INT or TERM
trap stop INT
trap stop TERM
read < /tmp/FIFO &
# Check launched mods
check_mods
echo -e "\n*************************************************"
echo "* Minecraft server launched. Wait few minutes..."
echo "*************************************************"
wait
| true
|
b00144cfc36edeb7aa02bdad419736eb926565c8
|
Shell
|
ryansb/brassballs
|
/lib/balls.sh
|
UTF-8
| 616
| 2.828125
| 3
|
[] |
no_license
|
#!/bin/bash
[[ -z "$BALLS_LIB" ]] && BALLS_LIB=.
[[ -z "$BALLS_CONF" ]] && BALLS_CONF=./config.sh
[[ -z "$BALLS_ROOT" ]] && BALLS_ROOT=$(readlink -f "$(dirname $BALLS_CONF)/../")
[[ -z "$BALLS_LIB" ]] && BALLS_LIB=$(dirname $0)
[[ -z "$BALLS_TMP" ]] && BALLS_TMP=/tmp/balls
[[ -d "$BALLS_TMP" ]] || mkdir "$BALLS_TMP"
[[ -z "$BALLS_PORT" ]] && BALLS_PORT=3000
[[ -z "$BALLS_VIEWS" ]] && BALLS_VIEWS=$BALLS_ROOT/views
[[ -z "$BALLS_ACTIONS" ]] && BALLS_ACTIONS=$BALLS_ROOT/actions
. $BALLS_LIB/util.sh
. $BALLS_LIB/http.sh
. $BALLS_LIB/router.sh
. $BALLS_LIB/server.sh
. $BALLS_LIB/view.sh
. $BALLS_LIB/model.sh
| true
|
8e5d2db3b4a23107923f8d4661bac3339b92a624
|
Shell
|
diekhans/t2t-chm13-gene-analysis
|
/bin/starAlignPara
|
UTF-8
| 258
| 3.015625
| 3
|
[] |
no_license
|
#!/bin/bash -e
# Wrapper for running under parasol. Makes sure PATH is correct.
# starAlign does atomic install of file, so no need for any error handling
set -beEu -o pipefail
source ~/.bashrc
binDir=$(dirname $(realpath $0))
exec $binDir/starAlign "$@"
| true
|
7d9e7cecee2d3cb79f1296a9286c4af4b0886c82
|
Shell
|
garbetjie/docker-nginx
|
/fpm/fs/docker-entrypoint.sh
|
UTF-8
| 946
| 3.796875
| 4
|
[] |
no_license
|
#!/bin/sh
# Build up the list of hosts that are allowed to access the FPM status path.
IFS=" "
fpm_status_hosts_formatted=""
for cidr in $FPM_STATUS_HOSTS_ALLOWED; do
[[ "$cidr" != "" ]] && fpm_status_hosts_formatted="${fpm_status_hosts_formatted} allow ${cidr};"$'\n'
done
for cidr in $FPM_STATUS_HOSTS_DENIED; do
[[ "$cidr" != "" ]] && fpm_status_hosts_formatted="${fpm_status_hosts_formatted} deny ${cidr};"$'\n'
done
export FPM_STATUS_HOSTS_FORMATTED="$fpm_status_hosts_formatted"
# Replace environment variables in nginx configuration file.
IFS=$'\n'
for file in `find /etc/nginx -type f -iname '*.conf'`; do
envsubst '$FPM_HOST
$FPM_PORT
$FPM_STATUS_PATH
$FPM_STATUS_HOSTS_FORMATTED
$MAX_REQUEST_SIZE
$TIMEOUT
$WEBROOT' < "$file" > "${file}.tmp"
mv "${file}.tmp" "$file"
done
# Run the nginx server.
exec nginx
| true
|
a6cbca273ea86439cbdda8b1cc47ab61ffd17e06
|
Shell
|
citb30/shell_scripts
|
/03-input-read.sh
|
UTF-8
| 114
| 2.859375
| 3
|
[] |
no_license
|
#!/bin/bash
## Input can be taken from read command.
read -p 'Enter your name: ' name
echo "Your Name = $name"
| true
|
923a0a00be0da973ba1b989815dd54d92dce6cfd
|
Shell
|
mateusmanuel/TCC-experiments
|
/clover-history/org.jsoup-jsoup/extract.sh
|
UTF-8
| 800
| 3.28125
| 3
|
[] |
no_license
|
for FILE in *.xml.gz; do
gunzip $FILE
done
echo "seconds,coverage,time" > statistics_execution.csv
I=2
for FILE in *.xml; do
sed -n '2,4p;5q' $FILE > $FILE.out
# Calculate delta time
END=$(grep -oP "(?<=generated=\")[^ ]+" $FILE.out)
END=${END%?}
START=$(grep -oP "(?<=timestamp=\")[^ ]+" $FILE.out)
START=${START%?}
START=${START%?}
TOTAL_TIME=`expr $END - $START`
# echo $TOTAL_TIME
# Calculate coverage
COV_ELEMENTS=$(grep -oP "(?<=coveredelements=\")[^ ]+" $FILE.out)
COV_ELEMENTS=${COV_ELEMENTS%?}
UNCOV_ELEMENTS=$(grep -oP "(?<= elements=\")[^ ]+" $FILE.out)
UNCOV_ELEMENTS=${UNCOV_ELEMENTS%?}
# echo $UNCOV_ELEMENTS
COVERAGE=$(bc -l <<< "scale=16; $COV_ELEMENTS/$UNCOV_ELEMENTS")
echo "$I,$COVERAGE,$TOTAL_TIME" >> statistics_execution.csv
((I+=2))
done
rm *.out
| true
|
b6784bc80a229d43c98bd6a11569ca010c42112d
|
Shell
|
openstack-charmers/devstack-utils-nova-lxd
|
/common-functions.sh
|
UTF-8
| 7,775
| 4.125
| 4
|
[] |
no_license
|
# bash common functions for various common actions in the devstack scripts
## Determine if the OpenStack VARS are set to access serverstack (or anything else)
# cache the result
unset _OS_VARS
function assert_os_vars_are_set {
local _OS_REGION_NAME
if [[ -z "$_OS_VARS" ]]; then
# The OS_VARS need to be set up to serverstack - let's make sure that they are
_OS_REGION_NAME=$(env | grep OS_REGION_NAME | cut -d "=" -f 2)
# exit if this isn't running against serverstack
if [[ "xxx$_OS_REGION_NAME" == "xxxserverstack" ]]; then
_OS_VARS=1
else
echo "OS_VARS are not set for serverstack (OS_REGION_NAME) - exiting"
exit 1
fi
fi
}
## Assert that the ssh vars are set for getting keys to instances, etc.
function assert_ssh_vars_are_set {
local _exit
if [[ -z "$DEVSTACK_KEYPAIR_NAME" ]]; then
echo "the \$DEVSTACK_KEYPAIR_NAME env var is not set"
_exit=1
fi
if [[ -z "$DEVSTACK_SSH_IDENTITY" ]]; then
echo "the \$DEVSTACK_SSH_IDENTITY for the private key is not set"
_exit=1
fi
if [[ ! -z $_exit ]]; then
exit 1
fi
}
## see if we are pre-version 3.0 of the openstack client. Some of the commands change for it.
unset OS_VERSION
function which_openstack_version {
if [[ -z "$OS_VERSION" ]]; then
local _version=$(openstack --version 2>&1 | awk '{print $2}')
# take the first character of the version string
OS_VERSION=${_version:0:1}
fi
}
## see if an instance exists; pass the variable as the first param
# returns 1 if the instance does exist
function does_instance_exist {
assert_os_vars_are_set
openstack server list -c Name -f value | egrep "^${1}$" 2>&1 > /dev/null
if [[ "$?" == "1" ]]; then
return 0
else
return 1
fi
}
## see if an image exists; pass the variable as the first param
# returns 1 if the instance does exist
function does_image_exist {
assert_os_vars_are_set
openstack image show $1 2>&1 > /dev/null
if [[ "$?" == "1" ]]; then
return 0
else
return 1
fi
}
## get the public ip for an instance by name or id
# returns $? == 1 if the instance doesn't exist
function find_floating_ip_for {
assert_os_vars_are_set
local _addrs
local addresses
if [[ -z "${1}" ]]; then
echo "Must pass a server name/id to $0"
exit 1
fi
_addr_line=$(openstack server show ${1} -f shell | grep addresses)
if [[ "$?" == "1" ]]; then
exit 1
fi
# eval the shell line
eval "$_addr_line"
_ifs=$IFS
IFS=', ' read -r -a _addrs <<< "$_addr_line"
IFS=$_ifs
if [[ "${#_addrs[@]}" == "1" ]]; then
unset floating_ip
fi
# the public address is the 2nd column
floating_ip=$(echo "${_addrs[1]}" | tr -d '"\n')
}
## wait for the server to answer on the ssh port
# $1 is the keyfile
# $2 is the options
# $3 is the username@server details
function wait_for_ssh {
local maxConnectionAttempts=10
local sleepSeconds=10
echo "Checking for ssh connection ..."
local index=1
while (( $index <= $maxConnectionAttempts )); do
ssh $1 echo
case $? in
0) echo "${index}> Ready"; break ;;
*) echo "${index} of ${maxConnectionAttempts}> Not ready, waiting ${sleepSeconds} seconds ...";;
esac
sleep $sleepSeconds
(( index+=1 ))
done
}
## run a remote command on the server
# $1 = identity file of public key
# $2 = user to run at remote command
# $3 = server or IP to run the command on
# $4 = the command to run
function run_remote_cmd {
local _identity_file=${1}
local _user=${2}
local _server=${3}
local _cmd=${4}
local _ssh_options="-i ${_identity_file} -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no"
ssh ${_ssh_options} ${_user}@${_server} "'${_cmd}'"
}
## add a hostname to the /etc/hosts file; needs sudo
# the hostname is in $1 and the ip address in $2
# note that the env DEVSTACK_MODIFY_ETC_HOSTS needs to be set for this to work
function add_host_to_hosts {
local _host_name=$1
local _ip_address=$2
if [[ ! -z ${DEVSTACK_MODIFY_ETC_HOSTS} ]]; then
sudo sed -i "/${_host_name}\$/d" /etc/hosts
echo "${_ip_address} ${_host_name}" | sudo tee -a /etc/hosts
fi
}
## see if an key-pair exists; pass the variable as the first param
# returns 1 if the key-pair does exist
function does_keypair_exist {
assert_os_vars_are_set
openstack keypair list -c Name -f value | egrep "^${1}$" 2>&1 > /dev/null
if [[ "$?" == "1" ]]; then
return 0
else
return 1
fi
}
## Returns the net_id for a searchable network name in $1
# The returned value is in the var ${net_id} and ${net_name}
function get_net_name_and_id_for_network {
assert_os_vars_are_set
local network_name="${1}"
local _net
#net_id=$(openstack network list -c ID -c Name -f value | egrep "${network_name}" | awk '{print $1}' | tr -d '\n')
_net=$(openstack network list -c ID -c Name -f value | egrep "${network_name}")
net_name=$(echo "$_net" | awk '{print $2}' | tr -d '\n')
net_id=$(echo "$_net" | awk '{print $1}' | tr -d '\n')
}
## Returns the name and id of an image when passed something to grep in $1
# The last image (sorted by name) is the one returned from the grep.
# returns ${image_name} and ${image_id}
function get_image_name_and_id_from_grep {
assert_os_vars_are_set
local _images
local image_grep="${1}"
_images=$(openstack image list -c ID -c Name -f value --limit 1000| grep "$image_grep" | sort -k 2 | tail -1)
image_name=$(echo "$_images" | awk '{print $2}' | tr -d '\n')
image_id=$(echo "$_images" | awk '{print $1}' | tr -d '\n')
}
## Ask if you are sure. Question text is in ${1}
# Note, if the DEFAULT_TO_YES environment variable is set to 'yes|y', then assume the response is yes
# returns _yes=1 if yes, else _yes is unset
function are_you_sure {
unset _yes
local _default_yes
_default_yes=${DEFAULT_TO_YES,,} # to lowercase
if [[ "$_default_yes" =~ ^(yes|y)$ ]]; then
_yes=1
else
read -r -p "${1} [y/N]:" response
response=${response,,} # to lower case
if [[ "$response" =~ ^(yes|y)$ ]]; then
_yes=1
fi
fi
}
## Get a floating IP address into ${floating_ip_address}
# exit 1 if no address could be made
function get_floating_ip_address {
local floating_ips
local _floating_ip
local _floating_ip_assigned
local _ifs
local _cmd_list
local _cmd_create
unset floating_ip_address # return value
assert_os_vars_are_set
which_openstack_version
echo "Finding a free Floating IP address"
_ifs=$IFS
IFS='
'
if [[ "$OS_VERSION" == "2" ]]; then
floating_ips=($(openstack ip floating list -c "Floating IP Address" -c "Fixed IP Address" -f value | sort -k 1))
else
floating_ips=($(openstack floating ip list -c "Floating IP Address" -c "Fixed IP Address" -f value | sort -k 1))
fi
_floating_ip=
for floating_ip in ${floating_ips[@]}; do
echo $floating_ip | grep None 2>&1 > /dev/null
_floating_ip_assigned=$?
if [[ "xxx$_floating_ip_assigned" == "xxx0" ]]; then
_floating_ip=$(echo -n "$floating_ip" | awk '{print $1}')
break
fi
done
IFS=$_ifs
# if we didn't find the IP then create a new one
if [ "xxx" == "xxx$_floating_ip" ]; then
# create a floating IP address
echo "Didn't find one ... Creating a floating IP address"
if [[ "$OS_VERSION" == "2" ]]; then
_floating_ip=$(openstack ip floating create ext_net | grep "^| ip" | awk '{print $4}')
else
_floating_ip=$(openstack floating ip create ext_net | grep "^| floating_ip_address" | awk '{print $4}')
fi
fi
if [[ "$?" != "0" ]]; then
echo "Couldn't create a floating IP"
exit 1
fi
floating_ip_address="${_floating_ip}"
}
## get the status of a server in ${1} .. returned in ${server_status}
function get_server_status {
echo "Server is ${1}"
assert_os_vars_are_set
#local os_status
echo "---Server is ${1}"
#eval $(openstack server show "${1}" -f shell | egrep "^status")
eval $(openstack server show "${1}" -f shell --prefix=os_ | egrep --color=never "^os_status")
server_status=${os_status}
}
| true
|
9b3b8b1e79a65c6063f9920b16473128f8018fef
|
Shell
|
ywy0318/test_20210224
|
/shell/test_sh.txt
|
UTF-8
| 233
| 3.25
| 3
|
[] |
no_license
|
#!/bin/sh
SYSTEM=`uname -s`
if [ $SYSTEM = "Linux" ] ; then
echo "Linux"
elif
[ $SYSTEM = "FreeBSD" ] ; then
echo "FreeBSD"
elif
[ $SYSTEM = "Solaris" ] ; then
echo "Solaris"
else
echo "What?"
fi
| true
|
4db31b1f5d8fc5cb1074f51baa0c5b8162c2c0e6
|
Shell
|
leomazzo/msbb
|
/bbforum-config/init-final.sh
|
UTF-8
| 1,218
| 3.21875
| 3
|
[] |
no_license
|
echo "Adding Swapfile ...."
dd if=/dev/zero of=/swapfile1 bs=1024 count=524288
chown root:root /swapfile1
chmod 0600 /swapfile1
mkswap /swapfile1
swapon /swapfile1
echo "/swapfile1 none swap sw 0 0" >> /etc/fstab
echo "Downloading, Unzipping and doing some settings in BB Forum...."
cd /tmp/
wget https://resources.mybb.com/downloads/mybb_1809.zip
unzip mybb_1809.zip
mv Upload bbforum
cp -r bbforum /var/www/html/
rm -rf Documentation mybb_1809.zip
cd /var/www/html/bbforum
sed -i 119d /etc/httpd/conf/httpd.conf
echo 'DocumentRoot "/var/www/html/bbforum"' >> /etc/httpd/conf/httpd.conf
service httpd restart
rm -rf /var/www/html/bbforum/install
rm -rf /var/www/html/bbforum/inc/settings.php
rm -rf /var/www/html/bbforum/inc/config.php
cd /var/www/html/bbforum/
cp /tmp/msbb-master/bbforum-config/settings.php /var/www/html/bbforum/inc/settings.php
cp /tmp/msbb-master/bbforum-config/config-final.php /var/www/html/bbforum/inc/config.php
tar -xf /tmp/msbb-master/bbforum-config/themes.tar -C /var/www/html/bbforum/cache
chmod -R 0777 cache uploads inc/settings.php inc/config.php
echo "Deploying Management Systems......"
cd /tmp
cp -r /tmp/msbb-master/msbb /var/www/html/bbforum/
echo "Configuration ended...."
| true
|
c17b4904d7295bab96dc180e73cd115c8f988467
|
Shell
|
prenaux/ham
|
/toolsets/ffmpeg/ffmpeg_to_wav8
|
UTF-8
| 164
| 2.8125
| 3
|
[
"Jam",
"MIT",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash
. ham-bash-lib.sh
SRC="$1"
DST="$2"
if [ -z "$DST" ]; then
DST="${SRC%.*}_wav8.wav"
fi
set -ex
ffmpeg -i "$SRC" -vn -acodec pcm_u8 -ar 22050 "$DST"
| true
|
4053913eaff5f932c0c8eb4c568c93c18004ca5d
|
Shell
|
owusumichael/covid19-1
|
/extra/pipeline_onlyR1.sh
|
UTF-8
| 3,303
| 3.453125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# map
for r1 in fastq/raw/*R1*.fastq.gz; do
mkdir -p onlyR1/BAM
output=${r1/_R1/}
bwa mem -v1 -t10 refs/REF_NC_045512.2.fasta "$r1" | samtools view -b - > onlyR1/BAM/`basename $output .fastq.gz`.bam
done
# keep mapped reads
for file in onlyR1/BAM/*.bam; do
samtools view -F 260 -h "$file" > onlyR1/BAM/`basename "$file" .bam`.mapped.bam
done
# sort and index bam files
for file in onlyR1/BAM/*.mapped.bam; do
samtools sort "$file" -o onlyR1/BAM/`basename "$file" .mapped.bam`.mapped.sorted.bam
done
for file in onlyR1/BAM/*.mapped.sorted.bam; do
samtools index "$file"
done
#depth files
mkdir -p onlyR1/QC/depth
for file in onlyR1/BAM/*.mapped.sorted.bam; do
samtools depth -a "$file" > onlyR1/QC/depth/`basename "$file" .mapped.sorted.bam`.txt
done
mkdir -p onlyR1/CNS onlyR1/CNS_5
# consenus
for file in onlyR1/BAM/*.mapped.sorted.bam; do
file_name=`basename "$file" .mapped.sorted.bam`
file_name=$( echo "$file_name" | cut -d'_' -f 1 )
samtools mpileup -A "$file" | ivar consensus -m 1 -p onlyR1/CNS/"$file_name"
samtools mpileup -A "$file" | ivar consensus -m 5 -p onlyR1/CNS_5/"$file_name"
done
rm onlyR1/CNS/*.qual.txt onlyR1/CNS_5/*.qual.txt
# change fasta headers
for file in onlyR1/CNS/*.fa*; do
name=`basename "$file"`
sed -i "s/>.*/>${name%%.*}/" "$file"
done
for file in onlyR1/CNS_5/*.fa*; do
name=`basename "$file"`
sed -i "s/>.*/>${name%%.*}/" "$file"
done
# align with augur (mafft)
mkdir -p onlyR1/alignment
cat CNS_5/*.fa* > onlyR1/alignment/all_not_aligned.fasta
augur align \
--sequences onlyR1/alignment/all_not_aligned.fasta \
--reference-sequence refs/REF_NC_045512.2.fasta \
--output onlyR1/alignment/all_aligned.fasta
mkdir -p onlyR1/results
python /data/projects/Dana/scripts/covid19/MutTable.py onlyR1/alignment/all_aligned.fasta onlyR1/results/muttable.csv
python /data/projects/Dana/scripts/covid19/variants.py onlyR1/alignment/all_aligned.fasta onlyR1/results/variants.csv
report=onlyR1/QC/report.txt
echo -e "sample\tmapped%\tmappedreads\ttotreads\tcovbases\tcoverage%\tcoverageCNS_5%\tmeandepth\tmaxdepth\tmindepth" > "$report"
for file in onlyR1/BAM/*.mapped.sorted.bam; do
sample_name=`basename "$file" .mapped.sorted.bam`
sample_name=$( echo "$sample_name" | cut -d'_' -f 1 )
original_bam=${file/.mapped.sorted.bam/.bam}
tot_reads=$(samtools view -c "$original_bam")
coverage_stats=( $(samtools coverage -H "$file" | cut -f4,5,6) )
breadth_cns5=$(cut -f3 onlyR1/QC/depth/`basename "$file" .mapped.sorted.bam`.txt | awk '$1>5{c++} END{print c+0}')
genome_size=$(cat onlyR1/QC/depth/`basename $file .mapped.sorted.bam`.txt | wc -l)
coverage_cns5=$(echo "$breadth_cns5/$genome_size"*100 | bc -l)
mapped_num=${coverage_stats[0]}
percentage_mapped=$(awk -v m="$mapped_num" -v t="$tot_reads" 'BEGIN {print (m/t)*100}')
depth=$(awk '{if($3==0){next}; if(min==""){min=max=$3}; if($3>max) {max=$3}; if($3< min) {min=$3}; total+=$3; count+=1} END {print total/count"\t"max"\t"min}' onlyR1/QC/depth/`basename $file .mapped.sorted.bam`.txt)
echo -e "${sample_name}\t${percentage_mapped}\t${mapped_num}\t${tot_reads}\t${coverage_stats[1]}\t${coverage_stats[2]}\t${coverage_cns5}\t${depths}" >> "$report"
done
#############################################################################################
| true
|
890481b92284d873a781427c0daa989a8c9b2ecb
|
Shell
|
slaash/scripts
|
/sysinfo_awk.sh
|
UTF-8
| 598
| 3.234375
| 3
|
[] |
no_license
|
#!/bin/bash
awk 'BEGIN { printf "%-19s %-16s %-14s %-14s %s\n", "Date", "Free Memory (kB)", "Used Swap (kB)", "Free Swap (kB)", "System Load" }'
while true;
do
meminfo=$(awk '
tolower($1) ~ /memfree/ { memfree = $2 }
tolower($1) ~ /swapfree/ { swapfree = $2 }
tolower($1) ~ /swaptotal/ { swaptotal = $2 }
END { printf "%'"'"'-16d %'"'"'-14d %'"'"'-14d", memfree, swaptotal-swapfree, swapfree }' /proc/meminfo)
echo "$(date +'%Y/%m/%d %H:%M:%S') ${meminfo} $(cut -d ' ' -f 1,2,3 /proc/loadavg)"
sleep 5
done
| true
|
2aa698f714a0674387771abbe5adc3ff97694cc2
|
Shell
|
Navyasree272/programconstruct
|
/ifelse912.sh
|
UTF-8
| 342
| 3.109375
| 3
|
[] |
no_license
|
#bin/bash -x
read n
echo "you had enter $n"
case $n in
1) echo -n "sunday " ;;
2) echo -n "monday " ;;
3) echo -n "tuesday" ;;
4) echo -n "wednesday " ;;
5) echo -n "thursday" ;;
6) echo -n "friday " ;;
7) echo -n "saturday" ;;
*) echo -n " there is no day" ;;
esac
echo ""
| true
|
b3b2ce0880534c1f2cb0f797457d63d392afb354
|
Shell
|
ithinkihaveacat/dotfiles
|
/home/.bash_profile
|
UTF-8
| 481
| 3.21875
| 3
|
[
"Apache-2.0"
] |
permissive
|
# -*- sh -*-
# Don't run fish if Android Studio is attempting to retrieve environment variables
# https://youtrack.jetbrains.com/articles/IDEA-A-19/Shell-Environment-Loading
if [ -z "$INTELLIJ_ENVIRONMENT_READER" ]; then
# Attempt to run fish as login shell, even if bash is technically the
# login shell.
FISH=$(env PATH="$HOME/local/bin:$HOME/local/homebrew/bin:$PATH" which fish)
if [[ -x "$FISH" ]]; then
exec env SHELL="$FISH" "$FISH" -i
fi
fi
| true
|
cf6ceb59e4efa6d7c055442f6e1df147417f93ea
|
Shell
|
hakehuang/skywalker
|
/vte_script/gen_fail_log.sh
|
UTF-8
| 2,464
| 3.46875
| 3
|
[] |
no_license
|
#!/bin/sh -x
# ./gen_fail_log.sh /home/smb/nfs/wb/vte_IMX50-RDP3_d/output IMX50-RDP3 .
#<output fail log path> <platfrom name> <output path>
echo $* >> /rootfs/wb/.log.txt
PLATFORM=$2
path=$3
#list=$(ls ${1}/*.failed -lrt | awk '{print $8}')
for i in $(ls ${1}/*.failed -lrt )
do
file=$(echo $i | grep "failed" | wc -l)
if [ $file -gt 0 ] ; then
list=$(echo $list $i)
fi
done
MAXcase=300
tofile()
{
echo $1 >> ${path}/${PLATFORM}_failed_status.xml
}
create_file()
{
echo $1 > ${path}/${PLATFORM}_failed_status.xml
}
for i in $list
do
runfile_a=$(basename $i | sed 's/LTP_RUN_ON-//' | sed 's/_log/#/' | cut -d '#' -f 1)
runfile=$(echo $runfile_a | sed 's/_/#/' | cut -d '#' -f 2)
runpath=$(dirname $(dirname $i))/runtest/
total_case=$(cat ${runpath}${runfile} | grep "TGE" | wc -l)
if [ $total_case -gt $MAXcase ]; then
MAXcase=$total_case
fi
done
create_file "<?xml version=\"1.0\" encoding='UTF-8'?>"
tofile "<?xml-stylesheet type=\"text/xsl\" href=\"fails.xsl\"?>"
tofile "<LOG>"
tofile "<title>"
tofile "$2"
tofile "</title>"
tofile "<total>"
tofile $(ls ${1}/*.failed | wc -l)
tofile "</total>"
tofile "<maxcase>"
tofile $MAXcase
tofile "</maxcase>"
for i in $list
do
#get date
idatea=$(stat -c %y $i | awk '{print $1}')
idatey=$(echo $idatea| cut -d '-' -f 1)
idatem=$(echo $idatea| cut -d '-' -f 2)
idated=$(echo $idatea| cut -d '-' -f 3)
idate=${idatey}${idatem}${idated}
runfile_a=$(basename $i | sed 's/LTP_RUN_ON-//' | sed 's/_log/#/' | cut -d '#' -f 1)
runfile=$(echo $runfile_a | sed 's/_/#/' | cut -d '#' -f 2)
mac=$(basename $i | sed 's/LTP_RUN_ON-//' | sed 's/_log/#/' | cut -d '#' -f 2 | sed 's/failed/txt/')
resultpath=$(dirname $(dirname $i))/results/
runpath=$(dirname $(dirname $i))/runtest/
resultfile=$(ls $resultpath | grep $runfile | grep $mac | grep $idate)
#if [ ! -z "$resultfile" ]; then
#total_case=$(cat ${resultpath}${resultfile} | grep "TGE" | wc -l)
#else
total_case=$(cat ${runpath}${runfile} | grep "TGE" | wc -l)
#fi
tofile "<fail_count>"
tofile "<count>"
tofile $(cat $i | wc -l)
tofile "</count>"
tofile "<flink>"
tofile "http://shlx12.ap.freescale.net/test_reports/${2}_output/$(basename $i)"
tofile "</flink>"
tofile "<fdate>"
tofile $idate
tofile "</fdate>"
tofile "<total_cases>"
tofile ${total_case}
tofile "</total_cases>"
tofile "<runfile>"
tofile "http://shlx12.ap.freescale.net/test_reports/runtest_${PLATFORM}/${runfile}"
tofile "</runfile>"
tofile "</fail_count>"
done
tofile "</LOG>"
| true
|
4ac2dd0e5149b0ccd2454548944164944c83e95c
|
Shell
|
kimsyversen/dotfiles
|
/macos.sh
|
UTF-8
| 22,458
| 2.859375
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# Close any open System Preferences panes, to prevent them from overriding
# settings we’re about to change
osascript -e 'tell application "System Preferences" to quit'
# Ask for the administrator password upfront
sudo -v
read -p "Enter hostname # " NAME
###############################################################################
# General UI/UX #
###############################################################################
# Set computer name (as done via System Preferences → Sharing)
sudo scutil --set ComputerName "$NAME"
sudo scutil --set HostName "$NAME"
sudo scutil --set LocalHostName "$NAME"
sudo defaults write /Library/Preferences/SystemConfiguration/com.apple.smb.server NetBIOSName -string "$NAME"
#Allow install applicatons from everyone. This command enables the option "Anywhere" In Security & Privacy
sudo spctl --master-disable
# Increase window resize speed for Cocoa applications
defaults write NSGlobalDomain NSWindowResizeTime -float 0.001
# Disable Resume system-wide
defaults write com.apple.systempreferences NSQuitAlwaysKeepsWindows -bool false
# Reveal IP address, hostname, OS version, etc. when clicking the clock in the login window
sudo defaults write /Library/Preferences/com.apple.loginwindow AdminHostInfo HostName
# Never go into computer sleep mode
sudo systemsetup -setcomputersleep Off > /dev/null
# Automatically open a new Finder window when a volume is mounted
defaults write com.apple.frameworks.diskimages auto-open-ro-root -bool true
defaults write com.apple.frameworks.diskimages auto-open-rw-root -bool true
defaults write com.apple.finder OpenWindowForNewRemovableDisk -bool true
# Menu bar: show remaining battery time (on pre-10.8); hide percentage
defaults write com.apple.menuextra.battery ShowPercent -string "NO"
defaults write com.apple.menuextra.battery ShowTime -string "YES"
# Expand save panel by default
defaults write NSGlobalDomain NSNavPanelExpandedStateForSaveMode -bool true
# Disable the “Are you sure you want to open this application?” dialog
defaults write com.apple.LaunchServices LSQuarantine -bool false
# Always show scrollbars
# Possible values: `WhenScrolling`, `Automatic` and `Always`
defaults write NSGlobalDomain AppleShowScrollBars -string "Always"
###############################################################################
# Trackpad, mouse, keyboard, Bluetooth accessories, and input #
###############################################################################
# Trackpad: enable tap to click for this user and for the login screen
defaults write com.apple.driver.AppleBluetoothMultitouch.trackpad Clicking -bool true
defaults -currentHost write NSGlobalDomain com.apple.mouse.tapBehavior -int 1
defaults write NSGlobalDomain com.apple.mouse.tapBehavior -int 1
# Trackpad: map bottom right corner to right-click
defaults write com.apple.driver.AppleBluetoothMultitouch.trackpad TrackpadCornerSecondaryClick -int 2
defaults write com.apple.driver.AppleBluetoothMultitouch.trackpad TrackpadRightClick -bool true
defaults -currentHost write NSGlobalDomain com.apple.trackpad.trackpadCornerClickBehavior -int 1
defaults -currentHost write NSGlobalDomain com.apple.trackpad.enableSecondaryClick -bool true
# Trackpad: swipe between pages with three fingers
defaults write NSGlobalDomain AppleEnableSwipeNavigateWithScrolls -bool true
defaults -currentHost write NSGlobalDomain com.apple.trackpad.threeFingerHorizSwipeGesture -int 1
defaults write com.apple.driver.AppleBluetoothMultitouch.trackpad TrackpadThreeFingerHorizSwipeGesture -int 1
# Disable “natural” (Lion-style) scrolling
defaults write NSGlobalDomain com.apple.swipescrolldirection -bool true
# Increase sound quality for Bluetooth headphones/headsets
defaults write com.apple.BluetoothAudioAgent "Apple Bitpool Min (editable)" -int 40
# Enable full keyboard access for all controls
# (e.g. enable Tab in modal dialogs)
defaults write NSGlobalDomain AppleKeyboardUIMode -int 3
# Enable access for assistive devices
echo -n 'a' | sudo tee /private/var/db/.AccessibilityAPIEnabled > /dev/null 2>&1
sudo chmod 444 /private/var/db/.AccessibilityAPIEnabled
# TODO: avoid GUI password prompt somehow (http://apple.stackexchange.com/q/60476/4408)
#sudo osascript -e 'tell application "System Events" to set UI elements enabled to true'
# Disable press-and-hold for keys in favor of key repeat
defaults write NSGlobalDomain ApplePressAndHoldEnabled -bool false
# Set a blazingly fast keyboard repeat rate
defaults write NSGlobalDomain KeyRepeat -int 1
defaults write NSGlobalDomain InitialKeyRepeat -int 10
# Automatically illuminate built-in MacBook keyboard in low light
defaults write com.apple.BezelServices kDim -bool true
# Turn off keyboard illumination when computer is not used for 5 minutes
defaults write com.apple.BezelServices kDimTime -int 300
# Disable automatic capitalization as it’s annoying when typing code
defaults write NSGlobalDomain NSAutomaticCapitalizationEnabled -bool false
# Disable smart dashes as they’re annoying when typing code
defaults write NSGlobalDomain NSAutomaticDashSubstitutionEnabled -bool false
# Disable automatic period substitution as it’s annoying when typing code
defaults write NSGlobalDomain NSAutomaticPeriodSubstitutionEnabled -bool false
# Disable smart quotes as they’re annoying when typing code
defaults write NSGlobalDomain NSAutomaticQuoteSubstitutionEnabled -bool false
# Disable auto-correct
defaults write NSGlobalDomain NSAutomaticSpellingCorrectionEnabled -bool false
# Enable subpixel font rendering on non-Apple LCDs
# Reference: https://github.com/kevinSuttle/macOS-Defaults/issues/17#issuecomment-266633501
defaults write NSGlobalDomain AppleFontSmoothing -int 1
# Use scroll gesture with the Ctrl (^) modifier key to zoom
defaults write com.apple.universalaccess closeViewScrollWheelToggle -bool true
defaults write com.apple.universalaccess HIDScrollZoomModifierMask -int 262144
# Follow the keyboard focus while zoomed in
#defaults write com.apple.universalaccess closeViewZoomFollowsFocus -bool true
###############################################################################
# Screen #
###############################################################################
# Require password immediately after sleep or screen saver begins
defaults write com.apple.screensaver askForPassword -int 1
defaults write com.apple.screensaver askForPasswordDelay -int 0
# Enable HiDPI display modes (requires restart)
sudo defaults write /Library/Preferences/com.apple.windowserver DisplayResolutionEnabled -bool true
###############################################################################
# Finder #
###############################################################################
# Show icons for hard drives, servers, and removable media on the desktop
defaults write com.apple.finder ShowExternalHardDrivesOnDesktop -bool true
defaults write com.apple.finder ShowHardDrivesOnDesktop -bool true
defaults write com.apple.finder ShowMountedServersOnDesktop -bool true
defaults write com.apple.finder ShowRemovableMediaOnDesktop -bool true
# Finder: show hidden files by default
defaults write com.apple.finder AppleShowAllFiles -bool false
# Finder: show all filename extensions
defaults write NSGlobalDomain AppleShowAllExtensions -bool true
# Finder: show status bar
defaults write com.apple.finder ShowStatusBar -bool true
# Finder: show path bar
defaults write com.apple.finder ShowPathbar -bool false
# Finder: allow text selection in Quick Look
defaults write com.apple.finder QLEnableTextSelection -bool true
# Display full POSIX path as Finder window title
defaults write com.apple.finder _FXShowPosixPathInTitle -bool true
# When performing a search, search the current folder by default
defaults write com.apple.finder FXDefaultSearchScope -string "SCcf"
# Disable the warning when changing a file extension
defaults write com.apple.finder FXEnableExtensionChangeWarning -bool false
# Avoid creating .DS_Store files on network volumes
defaults write com.apple.desktopservices DSDontWriteNetworkStores -bool true
# Show item info near icons on the desktop and in other icon views
/usr/libexec/PlistBuddy -c "Set :DesktopViewSettings:IconViewSettings:showItemInfo true" ~/Library/Preferences/com.apple.finder.plist
#/usr/libexec/PlistBuddy -c "Set :FK_StandardViewSettings:IconViewSettings:showItemInfo true" ~/Library/Preferences/com.apple.finder.plist - Removed due to no does not exist error
/usr/libexec/PlistBuddy -c "Set :StandardViewSettings:IconViewSettings:showItemInfo true" ~/Library/Preferences/com.apple.finder.plist
# Show item info to the right of the icons on the desktop
/usr/libexec/PlistBuddy -c "Set DesktopViewSettings:IconViewSettings:labelOnBottom false" ~/Library/Preferences/com.apple.finder.plist
# Enable snap-to-grid for icons on the desktop and in other icon views
/usr/libexec/PlistBuddy -c "Set :DesktopViewSettings:IconViewSettings:arrangeBy grid" ~/Library/Preferences/com.apple.finder.plist
#/usr/libexec/PlistBuddy -c "Set :FK_StandardViewSettings:IconViewSettings:arrangeBy grid" ~/Library/Preferences/com.apple.finder.plist - Removed due to no does not exist error
/usr/libexec/PlistBuddy -c "Set :StandardViewSettings:IconViewSettings:arrangeBy grid" ~/Library/Preferences/com.apple.finder.plist
# Increase grid spacing for icons on the desktop and in other icon views
/usr/libexec/PlistBuddy -c "Set :DesktopViewSettings:IconViewSettings:gridSpacing 100" ~/Library/Preferences/com.apple.finder.plist
#/usr/libexec/PlistBuddy -c "Set :FK_StandardViewSettings:IconViewSettings:gridSpacing 100" ~/Library/Preferences/com.apple.finder.plist - Removed due to no does not exist error
/usr/libexec/PlistBuddy -c "Set :StandardViewSettings:IconViewSettings:gridSpacing 100" ~/Library/Preferences/com.apple.finder.plist
# Increase the size of icons on the desktop and in other icon views
/usr/libexec/PlistBuddy -c "Set :DesktopViewSettings:IconViewSettings:iconSize 80" ~/Library/Preferences/com.apple.finder.plist
#/usr/libexec/PlistBuddy -c "Set :FK_StandardViewSettings:IconViewSettings:iconSize 80" ~/Library/Preferences/com.apple.finder.plist - Removed due to no does not exist error
/usr/libexec/PlistBuddy -c "Set :StandardViewSettings:IconViewSettings:iconSize 80" ~/Library/Preferences/com.apple.finder.plist
# Show the /Volumes folder
sudo chflags nohidden /Volumes
# Use list view in all Finder windows by default
# Four-letter codes for the other view modes: `icnv`, `clmv`, `Flwv`
defaults write com.apple.finder FXPreferredViewStyle -string "Nlsv"
# Disable the warning before emptying the Trash
defaults write com.apple.finder WarnOnEmptyTrash -bool false
# Enable AirDrop over Ethernet and on unsupported Macs running Lion
defaults write com.apple.NetworkBrowser BrowseAllInterfaces -bool true
# Show the ~/Library folder
chflags nohidden ~/Library
# Remove Dropbox’s green checkmark icons in Finder
file=/Applications/Dropbox.app/Contents/Resources/emblem-dropbox-uptodate.icns
[ -e "$file" ] && mv -f "$file" "$file.bak"
# Keep folders on top when sorting by name
defaults write com.apple.finder _FXSortFoldersFirst -bool true
###############################################################################
# Dock, Dashboard #
###############################################################################
# Don’t show recent applications in Dock
defaults write com.apple.dock show-recents -bool false
# Enable highlight hover effect for the grid view of a stack (Dock)
defaults write com.apple.dock mouse-over-hilite-stack -bool true
# Set the icon size of Dock items to 36 pixels
defaults write com.apple.dock tilesize -int 36
# Enable spring loading for all Dock items
defaults write com.apple.dock enable-spring-load-actions-on-all-items -bool true
# Show indicator lights for open applications in the Dock
defaults write com.apple.dock show-process-indicators -bool true
# Speed up Mission Control animations
defaults write com.apple.dock expose-animation-duration -float 0.1
# Don’t group windows by application in Mission Control
# (i.e. use the old Exposé behavior instead)
defaults write com.apple.dock expose-group-by-app -bool false
# Disable Dashboard
defaults write com.apple.dashboard mcx-disabled -bool true
# Don’t show Dashboard as a Space
defaults write com.apple.dock dashboard-in-overlay -bool true
# Don’t automatically rearrange Spaces based on most recent use
defaults write com.apple.dock mru-spaces -bool false
# Remove the auto-hiding Dock delay
defaults write com.apple.dock autohide-delay -float 0
# Remove the animation when hiding/showing the Dock
defaults write com.apple.dock autohide-time-modifier -float 0
# Enable the 2D Dock
#defaults write com.apple.dock no-glass -bool true
# Automatically hide and show the Dock
defaults write com.apple.dock autohide -bool true
# Make Dock icons of hidden applications translucent
defaults write com.apple.dock showhidden -bool true
# Reset Launchpad
find ~/Library/Application\ Support/Dock -name "*.db" -maxdepth 1 -delete
# Move dock to left side of screen
defaults write com.apple.dock orientation -string "left"
###############################################################################
# Photos #
###############################################################################
# Prevent Photos from opening automatically when devices are plugged in
defaults -currentHost write com.apple.ImageCapture disableHotPlug -bool true
###############################################################################
# Messages #
###############################################################################
# Disable smart quotes as it’s annoying for messages that contain code
defaults write com.apple.messageshelper.MessageController SOInputLineSettings -dict-add "automaticQuoteSubstitutionEnabled" -bool false
###############################################################################
# Mail #
###############################################################################
# Copy email addresses as `foo@example.com` instead of `Foo Bar <foo@example.com>` in Mail.app
defaults write com.apple.mail AddressesIncludeNameOnPasteboard -bool true
# Add the keyboard shortcut ⌘ + Enter to send an email in Mail.app
defaults write com.apple.mail NSUserKeyEquivalents -dict-add "Send" "@\\U21a9"
###############################################################################
# Terminal and iTerm 2 #
###############################################################################
# Only use UTF-8 in Terminal.app
defaults write com.apple.terminal StringEncodings -array 4
# Install theme for Terminal
#open "theme/terminal-profile.terminal"
#sleep 1 # Wait a bit to make sure the theme is loaded
defaults write com.apple.terminal "Default Window Settings" -string "Kim"
defaults write com.apple.terminal "Startup Window Settings" -string "Kim"
# Don’t display the annoying prompt when quitting iTerm
defaults write com.googlecode.iterm2 PromptOnQuit -bool false
# Use the Smyck color scheme by default in Terminal.app (https://github.com/hukl/Smyck-Color-Scheme/)
osascript <<EOD
tell application "Terminal"
local allOpenedWindows
local initialOpenedWindows
local windowID
set themeName to "Smyck"
(* Store the IDs of all the open terminal windows. *)
set initialOpenedWindows to id of every window
(* Open the custom theme so that it gets added to the list
of available terminal themes (note: this will open two
additional terminal windows). *)
do shell script "open '$PWD/theme/" & themeName & ".terminal'"
(* Wait a little bit to ensure that the custom theme is added. *)
delay 1
(* Set the custom theme as the default terminal theme. *)
set default settings to settings set themeName
(* Get the IDs of all the currently opened terminal windows. *)
set allOpenedWindows to id of every window
repeat with windowID in allOpenedWindows
(* Close the additional windows that were opened in order
to add the custom theme to the list of terminal themes. *)
if initialOpenedWindows does not contain windowID then
close (every window whose id is windowID)
(* Change the theme for the initial opened terminal windows
to remove the need to close them in order for the custom
theme to be applied. *)
else
set current settings of tabs of (every window whose id is windowID) to settings set themeName
end if
end repeat
end tell
EOD
###############################################################################
# Time Machine #
###############################################################################
# Prevent Time Machine from prompting to use new hard drives as backup volume
defaults write com.apple.TimeMachine DoNotOfferNewDisksForBackup -bool true
###############################################################################
# Activity Monitor #
###############################################################################
# Show the main window when launching Activity Monitor
defaults write com.apple.ActivityMonitor OpenMainWindow -bool true
# Visualize CPU usage in the Activity Monitor Dock icon
defaults write com.apple.ActivityMonitor IconType -int 5
# Show all processes in Activity Monitor
defaults write com.apple.ActivityMonitor ShowCategory -int 0
# Sort Activity Monitor results by CPU usage
defaults write com.apple.ActivityMonitor SortColumn -string "CPUUsage"
defaults write com.apple.ActivityMonitor SortDirection -int 0
###############################################################################
# Address Book, Dashboard, iCal, TextEdit, and Disk Utility #
###############################################################################
# Enable the debug menu in Address Book
defaults write com.apple.addressbook ABShowDebugMenu -bool true
# Enable Dashboard dev mode (allows keeping widgets on the desktop)
defaults write com.apple.dashboard devmode -bool true
# Enable the debug menu in iCal (pre-10.8)
defaults write com.apple.iCal IncludeDebugMenu -bool true
# Use plain text mode for new TextEdit documents
defaults write com.apple.TextEdit RichText -int 0
# Open and save files as UTF-8 in TextEdit
defaults write com.apple.TextEdit PlainTextEncoding -int 4
defaults write com.apple.TextEdit PlainTextEncodingForWrite -int 4
# Enable the debug menu in Disk Utility
defaults write com.apple.DiskUtility DUDebugMenuEnabled -bool true
defaults write com.apple.DiskUtility advanced-image-options -bool true
# Auto-play videos when opened with QuickTime Player
defaults write com.apple.QuickTimePlayerX MGPlayMovieOnOpen -bool true
###############################################################################
# Google Chrome & Google Chrome Canary #
###############################################################################
# Disable the all too sensitive backswipe on trackpads
defaults write com.google.Chrome AppleEnableSwipeNavigateWithScrolls -bool false
defaults write com.google.Chrome.canary AppleEnableSwipeNavigateWithScrolls -bool false
# Disable the all too sensitive backswipe on Magic Mouse
defaults write com.google.Chrome AppleEnableMouseSwipeNavigateWithScrolls -bool false
defaults write com.google.Chrome.canary AppleEnableMouseSwipeNavigateWithScrolls -bool false
# Use the system-native print preview dialog
defaults write com.google.Chrome DisablePrintPreview -bool true
defaults write com.google.Chrome.canary DisablePrintPreview -bool true
# Expand the print dialog by default
defaults write com.google.Chrome PMPrintingExpandedStateForPrint2 -bool true
defaults write com.google.Chrome.canary PMPrintingExpandedStateForPrint2 -bool true
###############################################################################
# Quick time #
###############################################################################
# Remove current list
defaults delete com.apple.QuickTimePlayerX.LSSharedFileList RecentDocuments
#Disable Quicktime listing recent items in the future
defaults write com.apple.QuickTimePlayerX NSRecentDocumentsLimit 0
defaults write com.apple.QuickTimePlayerX.LSSharedFileList RecentDocuments -dict-add MaxAmount 0
###############################################################################
# VLC #
###############################################################################
# Remove current list
defaults delete org.videolan.vlc.LSSharedFileList RecentDocuments
# Disable VLC listing recent items
defaults write org.videolan.vlc NSRecentDocumentsLimit 0
defaults write org.videolan.vlc.LSSharedFileList RecentDocuments -dict-add MaxAmount 0
###############################################################################
# Sublime text. #
###############################################################################
# Copy the Smyck theme to
cp theme/Smyck.tmTheme ~/Library/Application\ Support/Sublime\ Text\ 3/Packages/User/
###############################################################################
# Kill affected applications #
###############################################################################
for app in "Activity Monitor" \
"Address Book" \
"Calendar" \
"cfprefsd" \
"Contacts" \
"Dock" \
"Finder" \
"Google Chrome Canary" \
"Google Chrome" \
"Mail" \
"Messages" \
"Opera" \
"Photos" \
"Safari" \
"Spectacle" \
"SystemUIServer" \
"Terminal" \
"iCal"; do
killall "${app}" &> /dev/null
done
echo "Done. Note that some of these changes require a logout/restart to take effect."
| true
|
e19bff37de8383db9ff5620c8490f253e9e5b4be
|
Shell
|
ldionmarcil/blackarch
|
/packages/indxparse/PKGBUILD
|
UTF-8
| 1,599
| 2.90625
| 3
|
[] |
no_license
|
# This file is part of BlackArch Linux ( http://blackarch.org ).
# See COPYING for license details.
pkgname='indxparse'
pkgver=141.81c037d
pkgrel=3
groups=('blackarch' 'blackarch-forensic')
pkgdesc='A Tool suite for inspecting NTFS artifacts.'
arch=('any')
url='http://www.williballenthin.com/forensics/mft/indxparse/'
license=('APACHE')
depends=('python2' 'wxpython2.8')
makedepends=('git')
source=('git+https://github.com/williballenthin/INDXParse.git')
sha1sums=('SKIP')
pkgver() {
cd "$srcdir/INDXParse"
echo $(git rev-list --count HEAD).$(git rev-parse --short HEAD)
}
package() {
cd "$srcdir/INDXParse"
mkdir -p "$pkgdir/usr/bin"
mkdir -p "$pkgdir/usr/share/indxparse"
mkdir -p "$pkgdir/usr/share/doc/indxparse"
install -Dm644 -t "$pkgdir/usr/share/doc/indxparse" README CONTRIBUTORS.txt \
CHANGELOG
install -Dm644 LICENSE "$pkgdir/usr/share/licenses/indxparse/LICENSE"
rm README CONTRIBUTORS.txt CHANGELOG LICENSE
cp -a * "$pkgdir/usr/share/indxparse"
cat > "$pkgdir/usr/bin/indxparse" << EOF
#!/bin/sh
cd /usr/share/indxparse
exec python2 INDXParse.py "\$@"
EOF
chmod +x "$pkgdir/usr/bin/indxparse"
cat > "$pkgdir/usr/bin/mft-indxparse" << EOF
#!/bin/sh
cd /usr/share/indxparse
exec python2 MFT.py "\$@"
EOF
chmod +x "$pkgdir/usr/bin/mft-indxparse"
cat > "$pkgdir/usr/bin/mftindx" << EOF
#!/bin/sh
cd /usr/share/indxparse
exec python2 MFTINDX.py "\$@"
EOF
chmod +x "$pkgdir/usr/bin/mftindx"
cat > "$pkgdir/usr/bin/mftview" << EOF
#!/bin/sh
cd /usr/share/indxparse
exec python2 MFTView.py "\$@"
EOF
chmod +x "$pkgdir/usr/bin/mftview"
}
| true
|
e5bc2b9e427b6012550913c4b157115995187920
|
Shell
|
pyzh/rootless-kubernetes
|
/enter-chroot
|
UTF-8
| 2,677
| 3.078125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
set -e
set -o pipefail
if [[ -z "${USERNS_NAME}" ]]
then
ROOTDIR=$(dirname $(readlink -f "${BASH_SOURCE[0]}"))
cd "${ROOTDIR}"
"./bin/unspawn" -n kube -d localdomain --user --net -- "${BASH_SOURCE[0]}" "$@"
else
reset_env() {
export -n $(export -p | grep -Po '(?<=^declare -x )[^=]+')
export "$@"
}
ROOT="$(pwd)/bind"
bind_dev() {
local name="$1"
if [ -e "/dev/${name}" ]
then
touch "${ROOT}/dev/${name}"
mount -B "/dev/${name}" "${ROOT}/dev/${name}"
fi
}
mount_cgroup() {
mkdir "${ROOT}/sys/fs/cgroup/$1"
mount -t cgroup -o "$1" cgroup "${ROOT}/sys/fs/cgroup/$1"
}
mount --rbind "$(pwd)/root" "${ROOT}"
# taken from http://www.tldp.org/LDP/lfs/LFS-BOOK-6.1.1-HTML/chapter06/devices.html
mount -t tmpfs tmpfs "${ROOT}/dev"
mkdir "${ROOT}/dev/shm"
mkdir "${ROOT}/dev/mqueue"
mount -t mqueue mqueue "${ROOT}/dev/mqueue"
mkdir "${ROOT}/dev/pts"
mount -t devpts -o newinstance,gid=0,mode=600 devpts "${ROOT}/dev/pts"
touch "${ROOT}"/dev/ptmx
mount -B "${ROOT}/dev/pts/ptmx" "${ROOT}/dev/ptmx"
bind_dev console
bind_dev full
bind_dev null
bind_dev zero
bind_dev tty
bind_dev random
bind_dev urandom
ln -s /proc/self/fd "${ROOT}/dev/fd"
ln -s /proc/self/fd/0 "${ROOT}/dev/stdin"
ln -s /proc/self/fd/1 "${ROOT}/dev/stdout"
ln -s /proc/self/fd/2 "${ROOT}/dev/stderr"
ln -s /proc/kcore "${ROOT}/dev/core"
touch "${ROOT}/dev/termination-log"
mount -t proc proc "${ROOT}/proc"
mount -t tmpfs tmpfs "${ROOT}/run"
mount -t tmpfs tmpfs "${ROOT}/tmp"
mount -t sysfs sysfs "${ROOT}/sys"
mount -t tmpfs tmpfs "${ROOT}/sys/fs/cgroup"
mount_cgroup pids
mount_cgroup devices
mount_cgroup cpu,cpuacct
mount_cgroup net_cls,net_prio
mount_cgroup cpuset
mount_cgroup blkio
mount_cgroup memory
mount_cgroup freezer
mount_cgroup perf_event
mount_cgroup hugetlb
mount --rbind "${ROOT}" "${ROOT}/rootfs"
mount --rbind "${ROOT}/run" "${ROOT}/var/run"
mount --rbind "$(pwd)/fakecr" "${ROOT}/root/gopath/src/fakecr"
mount --rbind "$(pwd)/bin" "${ROOT}/root/bin"
mount --rbind "$(pwd)/images" "${ROOT}/root/images"
mount --rbind "$(pwd)/manifests" "${ROOT}/root/manifests"
reset_env \
USERNS_NAME="${USERNS_NAME}" \
USERNS_DOMAIN="${USERNS_DOMAIN}" \
XDG_RUNTIME_DIR=/run \
LANG="en_US.UTF8" \
PATH="/bin:/sbin:/usr/bin:/usr/sbin:/root/kubernetes/server/bin:/root/kubernetes/client/bin:/root/gopath/bin:/root/etcd" \
HOME="/root" \
SHELL="${SHELL}" \
TERM="${TERM}" \
GOPATH="/root/gopath:/root/vendor" \
ETCDCTL_API=3 \
exec chroot "$(pwd)/bind" "$@"
fi
| true
|
2c301be7ca7f5978443bebbae98f705e21899c6f
|
Shell
|
taha-eg/docker-wideworldimporters
|
/build.sh
|
UTF-8
| 1,361
| 2.875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# if [ ! -f WideWorldImporters-Full.bak ]; then
# echo "Downloading WideWorldImporters OLTP backup file from Microsoft..."
# wget https://github.com/Microsoft/sql-server-samples/releases/download/wide-world-importers-v1.0/WideWorldImporters-Full.bak -O WideWorldImporters-Full.bak -q
# echo "Download complete."
# else
# echo "WideWorldImporters OLTP backup file already downloaded. Skipping."
# fi
# if [ ! -f WideWorldImportersDW-Full.bak ]; then
# echo "Downloading WideWorldImporters data warehouse backup file from Microsoft..."
# wget https://github.com/Microsoft/sql-server-samples/releases/download/wide-world-importers-v1.0/WideWorldImportersDW-Full.bak -O WideWorldImportersDW-Full.bak -q
# echo "Download complete."
# else
# echo "WideWorldImporters data warehouse backup file already downloaded. Skipping."
# fi
echo "Building OLTP docker image."
docker build . -t taha3azab/mssql-server-linux-wideworldimporters:latest --build-arg arg=oltp
docker tag taha3azab/mssql-server-linux-wideworldimporters:latest taha3azab/mssql-server-linux-wideworldimporters:oltp
echo "Building OLAP docker image."
docker build . -t taha3azab/mssql-server-linux-wideworldimporters:olap --build-arg arg=olap
docker tag taha3azab/mssql-server-linux-wideworldimporters:olap taha3azab/mssql-server-linux-wideworldimporters:olap
| true
|
b2925391fd81c30726783308285b147fd78a75c3
|
Shell
|
AlanYiNew/ObjectDetector
|
/sw/run.sh
|
UTF-8
| 106
| 2.53125
| 3
|
[] |
no_license
|
#!/bin/sh
for file in $(ls ../circle)
do
./main ../circle/$file
./main ../not\ circle/$file
done
| true
|
521181c11429ccf52b07456c2f51a581fd2f494f
|
Shell
|
kyma-project/kyma
|
/hack/verify-md.sh
|
UTF-8
| 420
| 3.609375
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
readonly CURRENT_DIR="$( cd "$( dirname "$0" )" && pwd )"
cd $CURRENT_DIR/table-gen || exit
echo "Validating CRD documentation tables"
make generate
DIFF=$(git diff --exit-code)
if [ -n "${DIFF}" ]; then
echo -e "ERROR: CRDs documentation is not up to date"
echo -e "Please go to the hack/table-gen, and run 'make generate'"
exit 1
fi
echo "CRD documentation tables are up to date"
| true
|
0e95ad11fe3330f0d670d946ba988d64d70cd752
|
Shell
|
tharrisoniii/unofficial-uoregon-grad-school-dissertation-latex-markdown-apa-format
|
/optional_additional_files/Get_All_R_Library_Version_Numbers_and_Create_Draft_From_Them.sh
|
UTF-8
| 2,740
| 3.890625
| 4
|
[
"LPPL-1.3c",
"CC-BY-4.0"
] |
permissive
|
#!/bin/bash
# Find all R packages loaded in a codebase
# Jacob Levernier
# 2016
# Released under an MIT license
############################
# Settings
############################
file_to_save="./markdown_draft_examples/R_Package_Version_Numbers_AUTOMATICALLY_GENERATED_DO_NOT_EDIT_MANUALLY.md" # This file should be located in the markdown drafts folder (the script "Markdown_to_LaTeX_PDF_Build_Script.sh" expects that).
code_directory_to_search="/path/to/your/code"
############################
# End Settings
############################
# Get all 'library()' calls, and save them to a file:
grep --recursive --include="*.R*" --only-matching --no-filename "library(.*)" "$code_directory_to_search" > "$file_to_save"
# Also get all 'require()' calls, and append them to the file:
grep --recursive --include="*.R*" --only-matching --no-filename "require(.*)" "$code_directory_to_search" >> "$file_to_save"
# Remove all 'library' and 'require' start-of-line strings:
perl -pi -e 's/^(require|library)//g' "$file_to_save"
# Remove all lines beginning with 'Binary file' (which is a false positive):
perl -pi -e 's/^Binary file.*$//g' "$file_to_save"
# Delete anything after the first encountered closing parenthesis (this assumes that there isn't more than one library() call per line of code in the codebase):
perl -pi -e 's/\).*$//g' "$file_to_save"
# Replace all commas with newlines
perl -pi -e 's/,/\n/g' "$file_to_save"
# Delete all opening parentheses:
perl -pi -e 's/\(//g' "$file_to_save"
# Remove all single- and double-quote marks:
perl -pi -e 's/"//g' "$file_to_save"
perl -pi -e "s/'//g" "$file_to_save"
# Get only unique values from the file:
unique_values=$(cat "$file_to_save" | sort --unique)
echo "$unique_values" > "$file_to_save"
# Run the R Script
markdown_table=$(Rscript "/home/jacoblevernier/Primary_Syncing_Folder/Documents/Files in Transit/Dissertation_Project/Dissertation_Proposal/Written_Report/Get_All_R_Library_Version_Numbers_and_Create_Draft_From_Them_R_Script_Portion.R")
# (Over)Write the final markdown file:
echo -e "% Generated using Get_All_R_Library_Version_Numbers_and_Create_Draft_From_Them.sh\n\n" > "$file_to_save" # Add a comment (which will be ignored by Pandoc) re: the source of the file.
echo "\chapter{R base and library version numbers}" >> "$file_to_save"
#echo "\section{(Generated from package documentation within R)}" >> "$file_to_save"
echo "\begin{center}Version numbers of R base and R packages used in this project. This table was generated automatically from package documentation within R; author names are therefore as the authors wished them to be printed.\end{center}" >> "$file_to_save"
echo -e "\n\n" >> "$file_to_save"
echo "$markdown_table" >> "$file_to_save"
| true
|
ad8b0033a9911eb5921789e0ec1f5266bcad0a80
|
Shell
|
cherki-hamza/42
|
/files/exo/print_pid_args.sh
|
UTF-8
| 264
| 3.25
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Made by db0
# Contact db0company[at]gmail[dot]com
# Website http://db0.fr/
echo "PID : $$"
echo "Nombre d'arguments : $#"
echo -n "Les Arguments : "
i=1;
while [ "$i" -le "$#" ]
do
eval echo -n \$$i
echo -n " "
i=$(($i + 1))
done
echo ""
| true
|
926601a4cd57518a0533e63e2120cc59dd1decb1
|
Shell
|
marlt/MA_Methods
|
/BLAST.sh
|
UTF-8
| 2,014
| 3.421875
| 3
|
[] |
no_license
|
#!/bin/bash
# BLAST contigs against a DB
QUERY="$1" # multi fastq file containing query sequences
DB="$2" # path and basename of the target database
OUT="$3" # csv file to write results to
TMP=$(dirname ${OUT}) # Directory, where to save the blast output table
# Write csv headline
echo "qseqid qlen qstart qend qseqid sacc staxid ssciname slen length sstart send score pident evalue qcovs" > ${OUT}
# run actual blastn command - comparing nucleotides to nucleotides
nice blastn -num_threads 40 -task megablast -query ${QUERY} -db ${DB} -out ${TMP}/tmp.csv -evalue 0.001 -max_target_seqs 1 -outfmt "6 qseqid qlen qstart qend qseq sacc staxid ssciname slen length sstart send score pident evalue qcovs"
# to include the table header
cat ${TMP}/tmp.csv >> ${OUT}
rm ${TMP}/tmp.csv
# blastn parameters:
# -outfmt 6 means tabular:
# qseqid Query Seq-id,
# qlen Query sequence length,
# ssciname Subject Scientific Name,
# staxid Subject Taxonomy IDS
# sacc Subject Accession
# sseqid Subject Seq-id
# slen Subject length
# score Raw score
# pident Percentage of identical matches
# evalue Expect value
# qstart Start of alignment in query
# qend End of alignment in query
# qseqid Aligned part of query sequence
# length Alignment length
# sstart Start of alignment in subject
# send End of alignment in subject
# qcovs query coverage on subject
# -evalue threshold for the value, how likely a sequence of the query seqs length is found on the particular db-size by chance
# -db specify the path to the target db basename
# -max_target_seqs number of target seqs with similarity to report
# -task blast task to execute, algorithms with different sensitivities and alignment settings
# -num_threads number of available threads to use
# -query fastq file containing the query sequences
| true
|
eac571a668107ef1f3dd1ec3dad6f6d2f381d70f
|
Shell
|
jrhopper/encryption
|
/encrypt.sh
|
UTF-8
| 1,195
| 4.0625
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
PUBLIC_KEY="pub-key.pem"
FILE_TO_ENCRYPT="$@"
B2_BUCKET_NAME="b2demo"
# Ensure there is a file name specified.
if [ -z "$FILE_TO_ENCRYPT" ]; then
echo "Usage: encrypt.sh <filename>"
exit 1
fi
# Generate a one-time per file password that's 180 characters long. Save it
# into RAM only for use by subsequent commands.
ONE_TIME_PASSWORD=`openssl rand -base64 180`
# Now, encrypt the file. The file is encrypted using symmetrical
# encryption along with the 180 character one-time password above.
echo $ONE_TIME_PASSWORD | \
openssl aes-256-cbc -a -salt -pass stdin \
-in $FILE_TO_ENCRYPT -out $FILE_TO_ENCRYPT.enc
# Now, encrypt the 180 character one-time password using your public key. This
# password was computed in RAM and only written to disk encrypted for security.
# Password is encrypted into a binary format.
echo $ONE_TIME_PASSWORD | \
openssl rsautl -encrypt -pubin -inkey $PUBLIC_KEY \
-out $FILE_TO_ENCRYPT.key.enc
# Upload the encrypted file and the encrypted one time password to B2.
b2 upload_file $B2_BUCKET_NAME $FILE_TO_ENCRYPT.enc $FILE_TO_ENCRYPT.enc
b2 upload_file $B2_BUCKET_NAME $FILE_TO_ENCRYPT.key.enc \
$FILE_TO_ENCRYPT.key.enc
| true
|
c64848d2f00afab438849ec162747292638cf244
|
Shell
|
kennycaiguo/openbsd-X11
|
/xc/programs/fvwm/utils/fvwmrc_convert
|
UTF-8
| 16,537
| 3.546875
| 4
|
[] |
no_license
|
#!/bin/sh
# Some time ago, Martin Kraemer <Martin.Kraemer@mch.sni.de> posted an
# incomplete script to convert fvwm-1 'rc' files to fvwm-2. I've just
# recently fixed and enhanced that script; it's complete (or nearly
# so) now. This should help if you choose to convert.
#
# I've also made a couple of other minor changes to make life easier
# for our users here: I changed the default initialization from "Read
# .fvwmrc" to "Read .fvwm2rc" (in fvwm/fvwmc), and I installed fvwm 2
# as "fvwm2". With these changes, users can easily convert at their
# leisure.
#
# Herewith the script. It's using GNU awk (gawk), but will run with
# any "new" awk (nawk on Suns (SunOS 4, Solaris), awk on most other
# systems). If you do not use gawk, it will be case-sensitive (the
# case of the fvwm commands must match those in the script
# exactly). With gawk, it'll be case-insensitive.
#
# Convert fvwm 1.x configuration file to 2.0
#
# Originally written by Martin Kraemer <Martin.Kraemer@mch.sni.de>
# Corrected, extended, and modified by Grant McDorman <grant@isgtec.com>
# 24 May 95
# Addtional corrections and extensions by Bill Oswald <wamo@vnet.ibm.com>
# 8 Mar 96 thru 18 Jul 96
# Minor corrections to make it easier to customize by Charles Hines 08/01/96
#
echo "fvwmrc-to-fvwm2rc"
# Point this to your gawk/nawk/awk exec:
AWK=/usr/local/bin/gawk
#AWK=/bin/awk
if [ ! -x $AWK ];then
echo "$AWK missing, cannot run"
exit 1
fi
source=${1:-$HOME/.fvwmrc}
dest=${2:-$HOME/.fvwm2rc}
if [ "$dest" != "-" ] ;then
echo "Output to $dest"
if [ -f $dest ] ; then
mv $dest $dest.bak
echo "Saving existing $dest as $dest.bak"
fi
exec >$dest
fi
cat $source | $AWK '
BEGIN {
printf ("# Trying to compile an old .fvwrc to the new fvwm-2.0 Format\n");
TRUE=1; FALSE=0;
IGNORECASE=TRUE;
hiforecolor=""; dflt["hiforecolor"] = "black";
hibackcolor=""; dflt["hibackcolor"] = "CadetBlue";
hilightcolor = FALSE;
stdforecolor=""; dflt["stdforecolor"] = "black";
stdbackcolor=""; dflt["stdbackcolor"] = "grey70";
menuforecolor=""; dflt["menuforecolor"] = "black";
menubackcolor=""; dflt["menubackcolor"] = "grey70";
menustipplecolor=""; dflt["menustipplecolor"] = "grey40";
font=""; dflt["font"] = "-adobe-helvetica-medium-r-*-*-18-*-*-*-*-*-iso8859-1";
mwmmenus=""; dflt["mwmmenus"] = "fvwm";
menustyle=FALSE;
inpopup=FALSE;
infunction=FALSE;
prefix="";
placement = FALSE; dflt["placement"] = "ActivePlacement, DumbPlacement"
}
/^[ \t]*#/ { # Comment, pass it thru
print $0;
next;
}
/^$/ { # Empty line, pass it thru
print $0;
next;
}
################ Rename common items ##############
/Restart/ && /fvwm/ { gsub("fvwm", "fvwm2"); } # try and leave paths alone
/GoodStuff/ { gsub("GoodStuff", "FvwmButtons"); }
################ Highlight Colors ##############
/^[ \t]*HiBackColor[ \t]*/ {
dflt["hibackcolor"]=hibackcolor=$2;
printf ("#!%s (new command=HilightColor)\n", $0);
if (hibackcolor != "" && hiforecolor != "" && !hilightcolor) {
printf ("\n#Set the foreground and background color for selected windows\n");
printf ("HilightColor %s %s\n", hiforecolor, hibackcolor);
hilightcolor=TRUE;
} else
hilightcolor=FALSE;
next;
}
/^[ \t]*HiForeColor[ \t]*/ {
dflt["hiforecolor"]=hiforecolor=$2;
printf ("#!%s (new command=HilightColor)\n", $0);
if (hibackcolor != "" && hiforecolor != "" && !hilightcolor) {
printf ("\n#Set the foreground and background color for selected windows\n");
printf ("HilightColor %s %s\n", hiforecolor, hibackcolor);
hilightcolor=TRUE;
} else
hilightcolor=FALSE;
next;
}
########## Menu Colors, Style and Font ###########
/^[ \t]*MenuForeColor[ \t]*/ {
dflt["menuforecolor"]=menuforecolor=$2;
printf ("#!%s (new command=MenuStyle)\n", $0);
if (menubackcolor != "" && menuforecolor != "" && menustipplecolor != "" && font != "" && mwmmenus != "" && !menustyle) {
printf ("\n#Set the foreground, background and stipple color and font for menus\n");
printf ("MenuStyle %s %s %s %s %s\n", menuforecolor, menubackcolor, menustipplecolor, font, mwmmenus);
menustyle=TRUE;
} else
menustyle=FALSE;
next;
}
/^[ \t]*MenuBackColor[ \t]*/ {
dflt["menubackcolor"]=menubackcolor=$2;
printf ("#!%s (new command=MenuStyle)\n", $0);
if (menubackcolor != "" && menuforecolor != "" && menustipplecolor != "" && font != "" && mwmmenus != "" && !menustyle) {
printf ("\n#Set the foreground, background and stipple color and font for menus\n");
printf ("MenuStyle %s %s %s %s %s\n", menuforecolor, menubackcolor, menustipplecolor, font, mwmmenus);
menustyle=TRUE;
} else
menustyle=FALSE;
next;
}
/^[ \t]*MenuStippleColor[ \t]*/ {
dflt["menustipplecolor"]=menustipplecolor=$2;
printf ("#!%s (new command=MenuStyle)\n", $0);
if (menubackcolor != "" && menuforecolor != "" && menustipplecolor != "" && font != "" && mwmmenus != "" && !menustyle) {
printf ("\n#Set the foreground, background and stipple color and font for menus\n");
printf ("MenuStyle %s %s %s %s %s\n", menuforecolor, menubackcolor, menustipplecolor, font, mwmmenus);
menustyle=TRUE;
} else
menustyle=FALSE;
next;
}
/^[ \t]*MWMMenus[ \t]*/ {
dflt["mwmmenus"]=mwmmenus="mwm";
printf ("#!%s (new command=MenuStyle)\n", $0);
if (menubackcolor != "" && menuforecolor != "" && menustipplecolor != "" && font != "" && mwmmenus != "" && !menustyle) {
printf ("\n#Set the foreground, background and stipple color and font for menus\n");
printf ("MenuStyle %s %s %s %s %s\n", menuforecolor, menubackcolor, menustipplecolor, font, mwmmenus);
menustyle=TRUE;
} else
menustyle=FALSE;
next;
}
/^[ \t]*Font[ \t]*/ {
dflt["font"]=font=$2;
printf ("#!%s (new command=MenuStyle)\n", $0);
if (menubackcolor != "" && menuforecolor != "" && menustipplecolor != "" && font != "" && mwmmenus != "" && !menustyle) {
printf ("\n#Set the foreground, background and stipple color and font for menus\n");
printf ("MenuStyle %s %s %s %s %s\n", menuforecolor, menubackcolor, menustipplecolor, font, mwmmenus);
menustyle=TRUE;
} else
menustyle=FALSE;
next;
}
# Translate both old ButtonStyle formats to the new format:
/^[ \t]*ButtonStyle[ \t]*/ {
if ($2 == ":") { # new style already
if (NF != $4+4)
print "ERROR: ButtonStyle command incorrect" >"/dev/stderr";
printf ("%s %d %d", $1, $3, $4);
for (i=5; i<=NF; ++i)
printf (" %s", $i);
printf ("\n");
} else {
print "Note: Conversion of old ButtonStyle; values rounded" \
>"/dev/stderr"
printf ("#! Old line was: %s\n", $0);
p=index ($3,"x");
x=substr($3,1,p-1)/2;
y=substr($3,p+1)/2;
printf ("%s %s 5 %dx%d@0 %dx%d@0 %dx%d@0 %dx%d@1 %dx%d@1\n",
$1, $2, 50-x,50+y, 50+x,50+y, 50-x,50-y, 50+x,50-y,
50-x,50+y);
}
next;
}
########## Standard Colors ###########
/^[ \t]*StdForeColor[ \t]*/ {
dflt["stdforecolor"]=stdforecolor=$2;
printf ("#!%s (new command=Style \"*\" Color f/b)\n", $0);
print "Style \"*\" ForeColor " $2;
next;
}
/^[ \t]*StdBackColor[ \t]*/ {
dflt["stdbackcolor"]=stdbackcolor=$2;
printf ("#!%s (new command=Style \"*\" Color f/b)\n", $0);
print "Style \"*\" BackColor " $2;
next;
}
########## Icon Related Stuff ##########
/^[ \t]*IconBox[ \t]*/ { print "Style \"*\" " $0; next; }
/^[ \t]*IconFont[ \t]*/ { print $0; next; }
/^[ \t]*SuppressIcons[ \t]*/ { print "Style \"*\" NoIcon"; next; }
/^[ \t]*StickyIcons[ \t]*/ { print "Style \"*\" StickyIcon"; next; }
/^[ \t]*ModulePath[ \t]*/ { gsub("/fvwm", "/fvwm2"); print $0; next; }
/^[ \t]*PixmapPath[ \t]*/ { gsub("/fvwm", "/fvwm2"); print $0; next; }
/^[ \t]*IconPath[ \t]*/ { gsub("/fvwm", "/fvwm2"); print $0; next; }
# note: Icon must be followed by some white space
/^[ \t]*Icon[ \t]+/ { printf "Style %s Icon %s\n", $2, $3; next; }
########## MWM hints ##########
/^[ \t]*MWMFunctionHints[ \t]*/ { printf ("Style \"*\" MWMFunctions\n"); next; }
/^[ \t]*MWMDecor[ \t]*/ { printf ("Style \"*\" MWMDecor\n"); next; }
/^[ \t]*MWMDecorHints[ \t]*/ { printf ("Style \"*\" MWMDecor\n"); next; }
/^[ \t]*MWMBorders[ \t]*/ { printf ("Style \"*\" MWMBorder\n"); next; }
/^[ \t]*MWMButtons[ \t]*/ { printf ("Style \"*\" MWMButtons\n"); next; }
/^[ \t]*MWMHintOverride[ \t]*/ { printf ("Style \"*\" HintOverride\n"); next; }
########## Placement & Focus styles ##########
/^[ \t]*RandomPlacement[ \t]*/ { print "Style \"*\" " $0; placement = TRUE; next; }
/^[ \t]*SmartPlacement[ \t]*/ { print "Style \"*\" " $0; placement = TRUE; next; }
/^[ \t]*Sticky[ \t]*/ { printf "Style \"%s\" Sticky\n", $2; next; }
/^[ \t]*NoPPosition[ \t]*/ { print "Style \"*\" " $0; next; }
/^[ \t]*ClickToFocus[ \t]*/ { print "Style \"*\" " $0; next; }
/^[ \t]*SloppyFocus[ \t]*/ { print "Style \"*\" " $0; next; }
/^[ \t]*StaysOnTop[ \t]*/ { printf "Style \"%s\" StaysOnTop\n", $2; next; }
/^[ \t]*AutoRaise[ \t]*/ {
print "#! " $0 " (use Module FvwmAuto)";
print "AddToFunc \"InitFunction\" \"I\" Module FvwmAuto " $2;
print "AddToFunc \"RestartFunction\" \"I\" Module FvwmAuto " $2;
next;
}
########## Decoration styles ##########
/^[ \t]*BorderWidth[ \t]*/ { print "Style \"*\" " $0; next; }
/^[ \t]*HandleWidth[ \t]*/ { print "Style \"*\" " $0; next; }
/^[ \t]*DecorateTransients[ \t]*/ { print "Style \"*\" DecorateTransient"; next; }
/^[ \t]*XORvalue[ \t]*/ { print $0; next; }
/^[ \t]*BoundaryWidth[ \t]*/ { printf "Style \"*\" HandleWidth %s\n", $2; next; }
/^[ \t]*NoBoundaryWidth[ \t]*/ { print "Style \"*\" BorderWidth " $2; next; }
/^[ \t]*NoTitle[ \t]*/ { print "Style \"*\" " $0; next; }
/^[ \t]*NoBorder[ \t]*/ { print "Style \"*\" " $0; next; }
########## Etc ##########
/^[ \t]*Lenience[ \t]*/ { print "Style \"*\" " $0; next; }
/^[ \t]*Style[ \t]*/ { print $0; next; }
/^[ \t]*Key[ \t]*/ { print $0; next; }
/^[ \t]*Mouse[ \t]*/ {
if (sub("[ ]Pop[uU]p[ ]", " Menu ")) {
if (!warn["Mouse"]) {
print "Note: Setting mouse bindings to sticky menus">"/dev/stderr";
warn["Mouse"] = TRUE;
}
if (index($6, "\"") == 1) {
gsub("\"", "")
if (NF > 6) {
for (i = 6; i <= NF; i++)
sub($i " ", $i)
}
}
sub("$", " Nop");
}
print $0; next;
}
/^[ \t]*WindowListSkip[ \t]*/ { printf "Style %s WindowListSkip", $2; next; }
/^[ \t]*WindowFont[ \t]*/ { print $0; next; }
/^[ \t]*ClickTime[ \t]*/ { print $0; next; }
/^[ \t]*OpaqueMove[ \t]*/ { print "OpaqueMoveSize " $2; next; }
/^[ \t]*EdgeScroll[ \t]*/ { print $0; next; }
/^[ \t]*EdgeResistance[ \t]*/ { print $0; next; }
/^[ \t]*DeskTopSize[ \t]*/ { print $0; next; }
/^[ \t]*DeskTopScale[ \t]*/ {
printf ("#!%s (new command=*FvwmPagerDeskTopScale <scale>)\n", $0);
print "*FvwmPagerDeskTopScale " $2;
next;
}
/^[ \t]**FvwmButtons[ \t]*/ {
sub("[ ]Swallow[ ]*[^ ]*", "& Exec");
print $0;
if (length($0) > 199)
{
print "Warning: line too long" >"/dev/stderr";
print ">> " $0 >"/dev/stderr";
}
if (!warn["GoodStuff"])
{
print "Note: GoodStuff renamed to FvwmButtons" >"/dev/stderr";
warn["GoodStuff"]=TRUE;
}
next;
}
/^\*/ {
# other Module Configuration commands are passed thru
print $0;
next;
}
# hack: Modules spawned outside of a function, menu, or popup cannot have leading whitespace.
# add these to the initfunction
/^Module[ \t]*/ { printf "AddToFunc \"InitFunction\" \"I\" %s \n", $0; next; }
# hack: function declarations cannot have leading whitespace
/^Function[ \t]*/ {
if (inpopup)
print "ERROR: EndPopup missing\n" NR $0 >"/dev/stderr";
inpopup=FALSE;
if (infunction)
print "ERROR: EndFunction missing\n" NR $0 >"/dev/stderr";
infunction=TRUE;
prefix="AddToFunc " $2;
next;
}
/^[ \t]*EndFunction[ \t]*/ {
if (!infunction)
print "ERROR: EndFunction outside of function" >"/dev/stderr";
infunction=FALSE;
prefix="";
next;
}
# hack: popup declarations cannot have leading whitespace
/^Popup/ {
if (inpopup)
print "ERROR: EndPopup missing\n" NR $0 >"/dev/stderr";
if (infunction)
print "ERROR: EndFunction missing\n" NR $0 >"/dev/stderr";
infunction=FALSE;
inpopup=TRUE;
if (index($2, "\"") == 1) {
tstr = ""
for (i = 2; index(substr($i, 2), "\"") < length($i) && i <= NF; i++)
tstr = sprintf("%s%s", tstr, $i);
prefix = "AddToMenu " substr(tstr, 2, length(tstr) - 2)
} else
prefix="AddToMenu " $2;
next;
}
/^[ \t]*EndPopup[ \t]*/ {
if (!inpopup)
print "ERROR: EndPopup outside of popup" >"/dev/stderr";
inpopup=FALSE;
prefix="";
next;
}
########## Deleted Commands ##########
/^[ \t]*DontMoveOff[ \t]*/ ||
/^[ \t]*BackingStore[ \t]*/ ||
/^[ \t]*AppsBackingStore[ \t]*/ ||
/^[ \t]*SaveUnders[ \t]*/ ||
/^[ \t]*StubbornIcons[ \t]*/ ||
/^[ \t]*StubbornIconPlacement[ \t]*/ ||
/^[ \t]*StubbornPlacement[ \t]*/ ||
/^[ \t]*Cursor[ \t]*/ {
print "#! " $0 " [deleted]";
if (warned[$1]==FALSE) {
print "Warning: " $1 " not in Fvwm2, command dropped" >"/dev/stderr";
warned[$1] = TRUE;
}
next;
}
/^[ \t]*Pager[ \t]*/ {
print "#! " $0 " [deleted]";
if (warned[$1]==FALSE) {
print "Warning: " $1 " omitted, internal pager is obsolete (use FvwmPager)" >"/dev/stderr";
warned[$1] = TRUE;
}
next;
}
/^[ \t]*PagingDefault[ \t]*/ ||
/^[ \t]*TogglePage[ \t]*/ {
print "#! " $0 " (use EdgeScroll 0 0)"; next;
print "Warning: " $1 " not in Fvwm2, use EdgeScroll">"/dev/stderr";
}
########## Old Internal Pager Colors ###########
/^[ \t]*PagerForeColor[ \t]*/ ||
/^[ \t]*PagerBackColor[ \t]*/ {
printf ("#!%s (new command=Style FvwmPager Color fore_color/back_color)\n", $0);
if (warned[$1]==FALSE) {
print "Warning: " $1 " omitted, internal pager is obsolete (use FvwmPager)" >"/dev/stderr";
warned[$1] = TRUE;
}
next;
}
########## Sticky Colors ###########
/^[ \t]*StickyForeColor[ \t]*/ {
printf ("#!%s (no sticky foreground color in fvwm2)\n", $0);
if (warned[$1]==FALSE) {
print "Warning: StickyForeColor not in fvwm2, omitted" > "/dev/stderr"
print " Use the Style command to assign each sticky window the same ForeColor" > "/dev/stderr"
}
next;
}
/^[ \t]*StickyBackColor[ \t]*/ {
printf ("#!%s (no sticky background color in fvwm2)\n", $0);
if (warned[$1]==FALSE) {
print "Warning: StickyBackColor not in fvwm2, omitted" >"/dev/stderr"
print " Use the Style command to assign each sticky window the same BackColor" > "/dev/stderr"
}
next;
}
{
if (infunction) {
#gsub("[ ]PopUp[ ]", " "); }
if ($2 == "\"Motion\"")
context="\"M\"";
else if ($2 == "\"Click\"")
context="\"C\"";
else if ($2 == "\"DoubleClick\"")
context="\"D\"";
else if ($2 == "\"Immediate\"")
context="\"I\"";
else context=$2;
printf "%s", prefix " " context " " $1;
for (i=3; i<=NF; ++i)
printf (" %s", $i);
printf ("\n");
prefix="+ ";
next;
} else if (inpopup) {
# not going to handle escaped quotes
label=$2;
first=3;
quoted=substr(label, 1, 1)=="\"" &&
substr(label, length(label), 1)!="\"";
for (i=3;i<=NF && quoted;i++) {
label=label " " $i;
quoted=substr(label, length(label), 1)!="\"";
first=i + 1;
}
printf ("%s %s %s", prefix, label, $1);
for (i=first; i<=NF; ++i)
printf (" %s", $i);
printf ("\n");
prefix="+ ";
next;
}
if (warned[$1]==FALSE) {
printf ("#!Warning: Keyword \"%s\" not handled yet\n", $1);
warned[$1]=TRUE;
print "Warning: Unknown keyword "$1" passed through">"/dev/stderr";
}
print $0;
next;
}
END {
if (!menustyle) {
printf ("\n#Set the foreground, background and stipple color and font for menus\n");
printf ("MenuStyle %s %s %s %s %s\n", dflt["menuforecolor"], dflt["menubackcolor"], dflt["menustipplecolor"], dflt["font"], dflt["mwmmenus"]);
}
if (!hilightcolor) {
printf ("\n#Set the foreground and background color for selected windows\n");
printf ("HilightColor %s %s\n", dflt["hiforecolor"], dflt["hibackcolor"]);
}
if (!placement) {
printf "# overide default RandomPlacement and SmartPlacement Styles\n";
printf "Style \"*\" %s\n", dflt["placement"];
}
}
'
exit
| true
|
396b72e649f90639690de34c7e87519a8de3cd76
|
Shell
|
autopqc/vagrant-autopqc
|
/provision.sh
|
UTF-8
| 1,271
| 2.671875
| 3
|
[] |
no_license
|
export LD_LIBRARY_PATH=/usr/local/lib:/home/vagrant/autopqc/_build/c_src
sudo pacman -Syyu --noconfirm
sudo pacman -S --noconfirm --needed wget base-devel git opam gmp libffi emacs xorg-xauth
# download dependencies
cd ~
wget http://www.shoup.net/ntl/ntl-9.7.1.tar.gz
tar xvf ntl-9.7.1.tar.gz
wget http://www.mathematik.uni-kl.de/ftp/pub/Math/Factory/factory-4.0.2.tar.gz
tar xvf factory-4.0.2.tar.gz
git clone https://github.com/autopqc/autopqc.git autopqc
git clone https://github.com/ZooCrypt/PG.git PG-AutoGnP
# install ntl
cd ~/ntl-9.7.1/src
./configure NTL_GMP_LIP=on SHARED=on
make
sudo make install
# install factory
cd ~/factory-4.0.2
./configure --disable-streamio --without-Singular --disable-static
make
sudo make install
# compile autopqc
cd ~/autopqc
opam init -a -y
opam switch create 4.02.3
eval $(opam env)
opam update -y
opam upgrade -y
opam pin add autognp . -n -y
opam install -y autognp --deps-only
make
make test-examples
# setup proof general
cd ~/PG-AutoGnP
make
echo "(load \"~/PG-AutoGnP/generic/proof-site.el\")" > ~/.emacs
echo "(setq autognp-prog-name \"~/autopqc/autognp -emacs\")" >> ~/.emacs
# clean up
cd ~
rm -rf factory* ntl*
# enable X forwarding
sudo sed -i "s/#X11Forwarding no/X11Forwarding yes/g" /etc/ssh/sshd_config
| true
|
db2ccfed2915a32bff61bd997d7c5df863e5427e
|
Shell
|
denis-kol4ev/OraDBA
|
/Scripts/Bash/hosts_for_puppet.sh
|
UTF-8
| 1,996
| 3.484375
| 3
|
[] |
no_license
|
#!/bin/bash
cd /home/oracle
. .bash_profile > /dev/null
SCRIPT_NAME=$(basename "$0")
ADDRESS=$(hostname -s)
ADMIN_MAIL1=admin1@company.ru
ADMIN_MAIL2=admin2@company.ru
LOG_DIR=/home/oracle/maint/logs
LOG_FILE=$LOG_DIR/hosts_for_puppet_$(date +%F-%H-%M.log)
check_error_f () {
if [[ $1 -ne 0 ]]
then RETURN_STATUS='FAILED'
echo "Step $2 status: "$RETURN_STATUS >> $LOG_FILE
echo "***** End of script *****" >> $LOG_FILE
(
echo "Script: $SCRIPT_NAME"
echo "Step: $2"
echo "Status: $RETURN_STATUS"
echo "Check attachment for details"
) | mailx -S smtp=relay.company.ru -r $ADDRESS@company.ru -a $LOG_FILE -s "$SCRIPT_NAME status: $RETURN_STATUS" $ADMIN_MAIL1 $ADMIN_MAIL2
exit
else RETURN_STATUS='SUCCEED'
echo "Step $2 status: "$RETURN_STATUS >> $LOG_FILE
fi
}
{
echo "***** Start of script *****"
echo "Date : `date`"
echo "HOST : `hostname`"
echo "LOG : $LOG_FILE"
} >> $LOG_FILE 2>&1
STEP_NAME="Export form DB"
{
echo
echo "=========================================================="
echo " $(date) Step 1: $STEP_NAME"
echo "=========================================================="
echo
} >> $LOG_FILE 2>&1
{
cd /home/oracle/maint
sqlplus -s /nolog <<EOF
@hosts_for_puppet.sql
exit
EOF
STEP_STATUS=$?
} >> $LOG_FILE 2>&1
echo "STEP_NAME="$STEP_NAME
echo "STEP_STATUS="$STEP_STATUS
check_error_f "$STEP_STATUS" "$STEP_NAME"
STEP_NAME="Push to Git"
{
echo
echo "=========================================================="
echo " $(date) Step 2: $STEP_NAME"
echo "=========================================================="
echo
} >> $LOG_FILE 2>&1
{
cd /home/oracle/hosts_for_puppet
git add hosts_for_puppet.csv
git commit -m "New hosts uploaded"
git push origin main
STEP_STATUS=$?
} >> $LOG_FILE 2>&1
echo "STEP_NAME="$STEP_NAME
echo "STEP_STATUS="$STEP_STATUS
check_error_f "$STEP_STATUS" "$STEP_NAME"
echo "***** End of script *****" >> $LOG_FILE
| true
|
f712312def2c504a505e86de28628219fc6675fa
|
Shell
|
alemmer1/16s_Project
|
/align_bwa_mus16s.sh
|
UTF-8
| 392
| 2.859375
| 3
|
[] |
no_license
|
#!/bin/bash
if [ "$1" == "index" ];
then
bwa index ./16SMicrobial_removedNewLines.fasta
fi
if [ "$1" == "align" ] ;
then
samples='36 37 38 44 45 46'
dir=/dilithium/Data/Nanopore/Analysis/alemmer/mus16s_samples
ref=./16SMicrobial_removedNewLines.fasta
for sample in $samples
do
bwa mem -x ont2d ${ref} ${dir}/sample.${sample}.202.fq.gz > bwamem_sample.${sample}.202.sam
done
fi
| true
|
e73e496b35e41aa5fd8007331a52f968f6ece559
|
Shell
|
Sergeileduc/bash-tools
|
/casi320
|
UTF-8
| 2,355
| 3.4375
| 3
|
[] |
no_license
|
#!/bin/bash
# This script uploads a picture (jpeg or png) on casimages, using 640 redim
# And return casimages image url in zenity box (needs zenity)
IMG="$1"
ftype=""
function get-file-type {
mimetype=$(file --mime-type -b "$1")
case "$mimetype" in
image/png) ftype="png";;
image/jpeg) ftype="jpg";;
*)
zenity --error --text="Pas le bon format (seulement jpg/jpeg/png)."
exit 1;;
esac
}
# MAIN
get-file-type "$1"
# This first GET initialize "cookie"
curl -s -c "$HOME/.casimagecookie" GET https://www.casimages.com/ -H "Accept: application/json" >/dev/null
# Just changing to dim 320 page.
curl -s 'https://www.casimages.com/ajax/s_ano_resize.php?dim=320' \
-H 'Pragma: no-cache' \
-H 'Sec-Fetch-Site: same-origin' \
-H 'DNT: 1' \
-H 'Accept-Encoding: gzip, deflate, br' \
-H 'Accept-Language: fr-FR,fr;q=0.9,en-US;q=0.8,en;q=0.7' \
-H 'User-Agent: Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.75 Safari/537.36' \
-H 'Sec-Fetch-Mode: cors' \
-H 'Accept: text/plain, */*; q=0.01' \
-H 'Cache-Control: no-cache' \
-H 'X-Requested-With: XMLHttpRequest' \
-b "$HOME/.casimagecookie" \
-H 'Connection: keep-alive' \
-H 'Referer: https://www.casimages.com/' --compressed
# Upload the file
img_id=$(curl -s 'https://www.casimages.com/upload_ano_multi.php' \
-H 'User-Agent: Mozilla/5.0 (X11; Linux x86_64; rv:69.0) Gecko/20100101 Firefox/69.0' \
-H 'Cache-Control: no-cache' \
-H 'Connection: keep-alive' \
-H 'Pragma: no-cache' \
--form "Filedata=@${IMG};type=image/${ftype}" \
-H 'Accept: application/json' \
-H 'X-Requested-With: XMLHttpRequest' \
-H 'Referer: https://www.casimages.com/' \
-H 'Accept-Language: fr-FR,fr;q=0.8,en-US;q=0.5,en;q=0.3' --compressed \
-b "$HOME/.casimagecookie")
# echo "${img_id}"
# Curl the result page and regex to find "Grande" url
# There's two Grande resutls -> x is an array
x=($(curl -s "https://www.casimages.com/codes_ano_multi.php?img=${img_id}" | grep -Po "Grande : .*?value='\\K(.*?)>"))
# delete 2 last characters
url=${x[1]::-2}
# Put result in a textfile
echo "[img]$url[/img]" > "$HOME/urlcasi.txt"
# Display result in zenity
zenity --text-info --title="URL casimages" \
--width=800 --height=200 --filename="$HOME/urlcasi.txt"
# Clean up
rm "$HOME/urlcasi.txt"
rm "$HOME/.casimagecookie"
| true
|
cb6ca52091a41c00f7c0a14c49b198b6de5578bb
|
Shell
|
YuanFanBin/dotfile
|
/.xinitrc
|
UTF-8
| 2,183
| 2.828125
| 3
|
[] |
no_license
|
#!/bin/sh
# Copyright: Copyright (C) 2013-2017 YuanFanBin
# License: The MIT License
# Email: yuanfanbin@gmail.com
#-----------------------------------------------------------------------------#
# cp /etc/X11/xinit/xinitrc
# [Reference]
# * [xinitrc - Arch Wiki](https://wiki.archlinux.org/index.php/Xinitrc)
#-----------------------------------------------------------------------------#
#-----------------------------------------------------------------------------#
if [ -d /etc/X11/xinit/xinitrc.d ] ; then
for f in /etc/X11/xinit/xinitrc.d/?*.sh ; do
[ -x "$f" ] && . "$f"
done
unset f
fi
#-----------------------------------------------------------------------------#
#-----------------------------------------------------------------------------#
export I3_CONFIG_DIR="~/.config/i3"
export GOROOT=/usr/lib/go
export GOPATH=$HOME/workspace/golang
export PATH="$PATH:$I3_CONFIG_DIR/bin:$HOME/bin:$GOROOT/bin:$GOPATH/bin"
export TERMNAL="st"
#-----------------------------------------------------------------------------#
# [Terminal Pinyin]
export GTK_IM_MODULE=fcitx
export QT_IM_MODULE=fcitx
export XMODIFIERS=@im=fcitx
# [Keyboard]
# system .Xmodmap
sysmodmap=/etc/X11/xinit/.Xmodmap
if [ -f $sysmodmap ]; then
xmodmap $sysmodmap
fi
# user .Xmodmap
# [~/.Xmodmap](https://github.com/YuanFanBin/dotfile/.Xmodmap)
usermodmap=$HOME/.Xmodmap
if [ -f "$usermodmap" ]; then
xmodmap "$usermodmap"
fi
# [Mouse]
# 鼠标自动隐藏
unclutter -root -visible &
# [Auto Mount]
udiskie &
# [输入法]
fcitx &
export GTK_IM_MODULE=fcitx
export QT_IM_MODULE=fcitx
export XMODIFIERS=@im=fcitx
# [composite manager]
xcompmgr &
# [Reference]
# * [Xmodmap - Arch Wiki](https://wiki.archlinux.org/index.php/Xmodmap)
# * [Unclutter - Arch Wiki](https://wiki.archlinux.org/index.php/Unclutter)
# * [Udisks - Arch Wiki](https://wiki.archlinux.org/index.php/Udisks)
# * [Fcitx - Arch Wiki](https://wiki.archlinux.org/index.php/Fcitx)
#-----------------------------------------------------------------------------#
# twm &
# xclock -geometry 50x50-1+1 &
# xterm -geometry 80x50+494+51 &
# xterm -geometry 80x20+494-0 &
# exec xterm -geometry 80x66+0+0 -name login
i3-regenerate
i3
| true
|
4084d733b9366ed352a389285a7f27d4852a2467
|
Shell
|
Satkarni/CDAC
|
/Shell/Assignment1/p10.sh
|
UTF-8
| 155
| 3.3125
| 3
|
[] |
no_license
|
#!/bin/bash
echo "Enter number"
read input
while [ $input -gt 0 ]
do
sum=$(($sum+$input%10))
input=$(($input/10))
done
echo "Sum of digits:$sum"
| true
|
b4341c579f5c91556ad2244ca83daedce03e7024
|
Shell
|
iosifv/terminal-toolbelt
|
/sources/bundle/laravel.sh
|
UTF-8
| 1,964
| 3.34375
| 3
|
[
"MIT"
] |
permissive
|
#============================
# Laravel ===================
#============================
function tb-help-laravel {
print-status "laravel:"
echo "art|artisan "
echo "tinker "
echo "lroute = Searches and lists routes in an app. Takes 2 arguments"
echo "lcache = Deletes all cache in a Laravel app"
echo "llog "
echo "ltail "
echo "sqlite-open "
}
# This is needed in Linux environments,
# OSX works well with just an alias
#----------------------------
function artisan {
php artisan $1 $2 $3
}
alias art='php artisan'
alias tinker='php artisan tinker'
# Search for a certain route
# Eg. sed 's/search for text/replace with this/'
#----------------------------
function lroute {
php artisan route:list \
| sed 's/App\\Http\\Controllers//' \
| sed 's/api\/v1\//\//' \
| sed 's/api,auth:api,//' \
| sed -E 's/([[:space:]]+).{1}$//' \
| sed -E 's/(\|[[:space:]]+\|)/\-\>/' \
| sed -E 's/([[:space:]]{16}\|)/\|/' \
| sed 's/^(.{10})/x/' \
| grep $1 \
| grep "$2"
}
# Deletes everything cache related
#----------------------------
function lcache {
echo "${C_YELLOW}Removing bootstrap cache...${C_RESET}"
touch bootstrap/cache/foo.php
rm bootstrap/cache/*.php
echo "${C_YELLOW}Composer stuff...${C_RESET}"
composer dump-autoload
composer install
echo "${C_YELLOW}Removing artisan cache...${C_RESET}"
php artisan cache:clear
php artisan view:clear
php artisan clear-compiled
echo "${C_YELLOW}Recreating ide-helper${C_RESET}"
php artisan ide-helper:generate
php artisan ide-helper:meta
php artisan ide-helper:models -W
}
# Sqlite
#----------------------------
alias sqlite-open='sqlite3 database/database.sqlite'
# Logging stuff
#----------------------------
alias llog='multitail storage/logs/laravel.log'
alias ltail='llog'
# Fastest
#----------------------------
alias fastest='fastest --xml=phpunit.xml'
alias vapor=' ~/.composer/vendor/bin/vapor'
| true
|
6f381a6addc083eb4503cd73dc4c41b186d8beb1
|
Shell
|
cliffton/caliper
|
/packages/caliper-samples/network/fabric-v1.4/kafka/launch_orderer.sh
|
UTF-8
| 1,191
| 2.921875
| 3
|
[
"Apache-2.0",
"CC-BY-4.0"
] |
permissive
|
#!/bin/bash
set -x
export CALIPER_KAFKA=$HOME/caliper/packages/caliper-samples/network/fabric-v1.4/kafka/
function usage {
echo "./launch_orderer.sh ORG_ID"
}
# Check if ORG_NAME passed
if [ -z $1 ];
then
usage
echo "Please provide the ORG ID!!!"
exit 0
else
ORG_ID=$1
fi
rm -rf $HOME/orderer/*
mkdir -p $HOME/orderer/msp/orderer
mkdir -p $HOME/orderer/ledger
mkdir -p $HOME/orderer/configtx
export FABRIC_LOGGING_SPEC="grpc=debug:debug"
export ORDERER_GENERAL_LISTENADDRESS="0.0.0.0"
export ORDERER_GENERAL_GENESISMETHOD="file"
export ORDERER_GENERAL_GENESISFILE="$HOME"/orderer/configtx/genesis.block
export ORDERER_GENERAL_LOCALMSPID="OrdererMSP"
export ORDERER_GENERAL_LOCALMSPDIR="$HOME"/orderer/msp/orderer/msp
export ORDERER_KAFKA_VERBOSE="true"
export ORDERER_FILELEDGER_LOCATION="$HOME"/orderer/ledger
export ORDERER_GENERAL_LOGLEVEL="debug"
export FABRIC_CFG_PATH="$HOME"/orderer
cp orderer.yaml "$HOME"/orderer/
cp -r ./config/* "$HOME"/orderer/configtx/
cp -r ./config/crypto-config/ordererOrganizations/example.com/orderers/orderer"$ORG_ID".example.com/* "$HOME"/orderer/msp/orderer/
cd $HOME/go/src/github.com/hyperledger/fabric/
set +x
orderer
| true
|
42a1cd3838655115d5972cf9909fbadfed18909e
|
Shell
|
pedropena/backharddi-ng-kernel
|
/udebs/d-i/partman/partman-base/source/init.d/unsupported
|
UTF-8
| 741
| 3.0625
| 3
|
[] |
no_license
|
#!/bin/sh
if [ -f /var/lib/partman/supported ]; then
exit 0
fi
. /lib/partman/definitions.sh
[ -d /var/lib/partman ] || mkdir /var/lib/partman
>/var/lib/partman/supported
default_label=$(default_disk_label)
case "$default_label" in
UNKNOWN)
db_input critical partman/unknown_label
db_go || exit 10
db_get partman/unknown_label
if [ "$RET" = false ]; then
db_reset partman/unknown_label
exit 10
fi
db_reset partman/unknown_label
exit 0
;;
UNSUPPORTED)
db_input critical partman/unsupported_label
db_go || exit 10
db_get partman/unsupported_label
if [ "$RET" = false ]; then
db_reset partman/unsupported_label
exit 10
fi
db_reset partman/unsupported_label
exit 0
;;
*)
exit 0
;;
esac
| true
|
62f5859358f91d25cecf7b9e405e9392a4228950
|
Shell
|
cu-swe4s-fall-2019/test-driven-development-qyang13
|
/gen_data.sh
|
UTF-8
| 60
| 2.53125
| 3
|
[] |
no_license
|
for i in `seq 1 1000`; do
echo $RANDOM$'\t'$RANDOM
done
| true
|
1174cd7eef3d57aee61369dc040f7d1f3829a8c0
|
Shell
|
ntpeters/genesis
|
/common/script-requires.sh
|
UTF-8
| 1,282
| 4.28125
| 4
|
[] |
no_license
|
# Functions to limit script executions
# Require a script to be run as root
function require_root() {
if (( EUID != 0 )); then
echo "This script must be run as root!"
exit 1
fi
}
# Require a script to be sourced
function require_sourced() {
if [[ "${BASH_SOURCE[0]}" = "${0}" ]]; then
echo "This script must be sourced, not executed!"
exit 1
fi
}
# Require a script to be run on a specific operating system
function require_os() {
local required_os="${1,,}"
local current_os="${OSTYPE,,}"
if [[ "$current_os" != *"$required_os"* ]]; then
echo "This script must be run on a "$required_os" platform!"
exit 1
fi
return 0
}
# Require a script to be run on a specific linux distribution
function require_distribution() {
local required_platform="${1,,}"
# Ensure this function is only run on linux
require_os "linux"
# Load the current distro
local current_platform=`lsb_release -si`
current_platform="${current_platform,,}"
if [[ "$current_platform" != *"$required_platform"* ]]; then
echo "This script is intended for "$required_platform" only!"
exit 1
fi
return 0
}
# Denotest that this script has been sourced
genesis_script_requires=0
| true
|
e44aaaaebd359a700124216d68305537d021e62e
|
Shell
|
transloadit/uppy
|
/bin/to-gif-hq.sh
|
UTF-8
| 367
| 2.9375
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
# Convert a video file to a gif.
# `to-gif /path/to/input.mp4 /path/to/output.gif`
palette="/tmp/to-gif-palette.png"
filters="fps=15"
ffmpeg -v warning -i $1 -vf "$filters,palettegen" -y $palette
ffmpeg -v warning -i $1 -i $palette -lavfi "$filters [x]; [x][1:v] paletteuse" -y $2
# gifsicle --resize-fit-width 1000 -i animation.gif > animation-1000px.gif
| true
|
8d56fada33d9c6a8253cd048ab210e0d569848b5
|
Shell
|
whsasf/bohem
|
/lib/mx_create_domain.sh
|
UTF-8
| 2,870
| 3.65625
| 4
|
[] |
no_license
|
#!/bin/bash
### NOTE: This script does NOT use MOS.
##
## Create a domain
##
#mx_create_domain.sh <mxhost> <domain>
set -x
user=$1
host=$2
prefix=$3
port=$4
domain=$5
shift 5
while [ $# -gt 0 ]; do
if [ "$1" = "--relayhost" ]; then
relayhost=$2
shift
elif [ "$1" = "--mailrazorgateselector" ]; then
mailrazorgateselector=$2
shift
elif [ "$1" = "--mailrazorgatedkimkey" ]; then
mailrazorgatedkimkey=$2
shift
else
echo "Don't understand option $1"
exit 1
fi
shift
done
echo $host | grep -q ':'
if [ $? -eq 0 ]; then
ssh_port=$(echo $host | cut -f 2 -d ':')
host=$(echo $host | cut -f 1 -d ':')
else
ssh_port=22
fi
ssh -o StrictHostKeyChecking=no -o BatchMode=yes -p $ssh_port $user@$host $prefix/bin/imldapsh -D cn=root -W secret -H ${OWM_CONFSERV1_HOST} -P $port CreateDomain $domain local
if [ $? -ne 0 ]; then
ts=$(date +%d/%m/%Y\ %H:%M:%S)
sleep 30
ssh -o StrictHostKeyChecking=no -o BatchMode=yes -p $ssh_port $user@$host $prefix/bin/imldapsh -D cn=root -W secret -H ${OWM_CONFSERV1_HOST} -P $port CreateDomain $domain local
if [ $? -eq 0 ]; then
echo "WARNING: $ts imldapsh issues adding domain $domain in test $TC_NAME (retry ok)" >> $WORK_DIR/../warnings
else
echo "WARNING: $ts imldapsh issues adding domain $domain in test $TC_NAME (retry also failed)" >> $WORK_DIR/../warnings
fi
fi
for f in $(echo $domain | tr '.' ' ')
do
if [ "$domain_dn" = "" ]; then
domain_dn="dc=$f"
else
domain_dn="$domain_dn,dc=$f"
fi
done
rm -f tmp.ldif
if [ "$relayhost" != "" ] ; then
cat >> tmp.ldif <<- EOF
dn: $domain_dn
changetype: modify
add: mailrazorgaterelayhost
mailrazorgaterelayhost: $relayhost
EOF
fi
if [ "$mailrazorgateselector" != "" ] ; then
cat >> tmp.ldif <<- EOF
dn: $domain_dn
changetype: modify
add: mailrazorgateselector
mailrazorgateselector: $mailrazorgateselector
EOF
fi
if [ "$mailrazorgatedkimkey" != "" ] ; then
cat >> tmp.ldif <<- EOF
dn: $domain_dn
changetype: modify
add: mailrazorgatedkimkey
mailrazorgatedkimkey:: $mailrazorgatedkimkey
EOF
fi
if [ -f tmp.ldif ]; then
ssh -o StrictHostKeyChecking=no -o BatchMode=yes -p $ssh_port $user@$host $prefix/bin/ldapmodify -h $MX1_HOST_IP -p $MX1_LDAP_PORT -D $MX1_LDAP_BIND_DN -w $MX1_LDAP_BIND_PASSWORD < tmp.ldif
if [ $? -ne 0 ]; then
ts=$(date +%d/%m/%Y\ %H:%M:%S)
sleep 30
ssh -o StrictHostKeyChecking=no -o BatchMode=yes -p $ssh_port $user@$host $prefix/bin/ldapmodify -h $MX1_HOST_IP -p $MX1_LDAP_PORT -D $MX1_LDAP_BIND_DN -w $MX1_LDAP_BIND_PASSWORD < tmp.ldif
if [ $? -eq 0 ]; then
echo "WARNING: $ts ldapmodify issues adding domain $domain in test $TC_NAME (retry ok)" >> $WORK_DIR/../warnings
else
echo "WARNING: $ts ldapmodify issues adding domain $domain in test $TC_NAME (retry also failed)" >> $WORK_DIR/../warnings
fi
fi
fi
echo "TIME Domain create: $SECONDS"
exit 0
| true
|
231b3e39b16bae98bc3aecfff6324e2b826c5067
|
Shell
|
tanviredu/BASH_LEARNING_TUTORIAL
|
/func3.sh
|
UTF-8
| 816
| 3.9375
| 4
|
[] |
no_license
|
read -p "Enter the folder name : " tmp_folder
read -p "Enter the file name : " tmp_file
folderCreate(){
## this function will create the
local folder=$1
echo "checking if the folder exixts ......"
if [ -d $1 ]
then
echo "Directory exists"
else
echo "Creating the Directory"
mkdir $1
fi
}
fileCreate(){
## this function will create
### file inside the folder
folder=$1
file=$2
echo "checking if file exists"
if [ -f $1/$2 ]
then
echo "file exists"
else
echo "creating the file"
touch $1/$2
fi
}
editFile(){
folder=$1
file=$2
if [ -d $1 ] && [ -f $1/$2 ]
then
vim $1/$2
else
echo "path does not exists"
fi
}
executeScript(){
folder=$1
file=$2
chmod 777 $1/$2
}
folderCreate $tmp_folder $tmp_file
fileCreate $tmp_folder $tmp_file
editFile $tmp_folder $tmp_file
| true
|
93a6ab36c25bc709c2d91c331cca59eb85622cb4
|
Shell
|
flix-tech/k8s-meetup-talk-2016
|
/apps/helloworld/runtests.sh
|
UTF-8
| 329
| 2.703125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
while true; do
POD=$(kubectl get pods -l app=helloworld --no-headers --show-all | grep Running | awk '{print $1}')
if [[ ! -z ${POD} ]]
then
break
fi
done
kubectl exec -ti ${POD} -c php -- \
su www-data -c 'XDEBUG_CONFIG="PHPSTORM" php vendor/bin/phpunit -c phpunit.xml.dist'
| true
|
3013db0cd2d924b893a6f6ff1224bd2d0d5b9e30
|
Shell
|
wade1990/git-post-receive-irc-bot
|
/post-receive
|
UTF-8
| 566
| 3.671875
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
die() {
echo 'fatal:' "$@" >&2
exit 1
}
[ "$#" -eq 2 ] || die "usage: $0 <network> <channel>"
owndir="$(dirname "$(readlink -f "$0")")"
network="$owndir/irc/$1/in"
channel="$owndir/irc/$1/$2/in"
[ -p "$network" ] || die "$network: not a named pipe; edit $owndir/connect script and reconnect"
[ -p "$channel" ] || {
echo "git-bot: $1/$2 not yet joined; joining"
echo "/j $2" >"$network"
sleep 1
[ -p "$channel" ] || die "$channel: not a named pipe; edit $owndir/connect script and reconnect"
}
cat | "$owndir/post-receive-irc" >"$channel"
| true
|
c0d0d965274b7b4c3cf06c8b386448676b3a0d63
|
Shell
|
petronny/aur3-mirror
|
/kde-odf-thumbnail/PKGBUILD
|
UTF-8
| 859
| 2.828125
| 3
|
[] |
no_license
|
pkgname=kde-odf-thumbnail
pkgver=1.0.0
pkgrel=1
pkgdesc="KDE thumbnail-plugin to generate thumbnails for ODF documents in Dolphin/Konqueror"
arch=('i686' 'x86_64')
url="http://kenai.com/projects/kde-odf-thumbnail"
license=('LGPL-2.1')
depends=('qt' 'kdebase-workspace')
makedepends=('cmake' 'automoc4')
install=$pkgname.install
source=(http://kenai.com/projects/kde-odf-thumbnail/downloads/download/$pkgver/$pkgname-$pkgver.tar.gz)
md5sums=('edf694939f4b5d5e97c3fe2f91e7cec3')
build() {
cd $srcdir/$pkgname-$pkgver
rm -rf build && mkdir build && cd build
cmake -DCMAKE_INSTALL_PREFIX=`kde4-config --prefix` -DCMAKE_BUILD_TYPE=Release ..
make
}
package() {
cd "$srcdir/$pkgname-$pkgver/build"
make DESTDIR="$pkgdir/" install
install -Dm644 $srcdir/$pkgname-$pkgver/LICENSE $pkgdir/usr/share/licenses/$pkgname/COPYING
}
# vim:set ts=2 sw=2 et:
| true
|
5d06355d9a85ae98628c61ab2522ae67e8ca2828
|
Shell
|
xiyuansun/bootcamp009_project
|
/Project2-WebScraping/markschott/bitcoin/misc/rr.sh
|
UTF-8
| 286
| 3.203125
| 3
|
[] |
no_license
|
#!/bin/bash
i=0
while IFS='' read -r line || [[ -n "$line" ]]; do
echo $i >> check.txt
i=$((i+1))
echo "Curling from url: $line"
echo '[' >> "$2"
curl $line | grep "<p>.*</p>" ptest.txt | sed 's/<p>//g' | sed 's/<\/p>//g' >> "$2"
echo '],' >> "$2"
done < "$1"
| true
|
898a2e9866f9458aee521070149502ae516526c6
|
Shell
|
Ismailhachimi/trfl
|
/trfl/op_gen_main.sh
|
UTF-8
| 524
| 3.375
| 3
|
[
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] |
permissive
|
#!/bin/bash
PLATFORM="$(uname -s | tr 'A-Z' 'a-z')"
set -e
if [ "$#" -ne 2 ]; then
echo "usage: %prog% path/to/op_lib.so op1,op2,..."
exit 1
fi
cat <<EOF
import tensorflow as tf
_op_lib = tf.load_op_library(tf.resource_loader.get_path_to_datafile("$1"))
EOF
for name in $(echo $2 | tr "," "\n")
do
snake_name=`echo -n $name | python -c "import re, sys; [s] = sys.stdin; sys.stdout.write(re.sub(r'(.)([A-Z])', r'\1_\2', s).lower())"`
echo "$snake_name = _op_lib.$snake_name"
done
echo "del _op_lib, tf"
| true
|
dce5ad1113a6b92e79a2f4094eabb50ed7876a8d
|
Shell
|
plus3it/tardigrade-ci
|
/tests/make/terraform_lint_failure.bats
|
UTF-8
| 420
| 3.34375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bats
TEST_DIR="$(pwd)/terraform_lint_failure"
function setup() {
mkdir -p "$TEST_DIR/top/nested"
cat > "$TEST_DIR/top/nested/main.tf" <<-EOF
variable "foo" {
default = "bar"
}
output "baz" {
value = var.foo
}
EOF
}
@test "terraform/lint: nested file failure" {
run make terraform/lint
[ "$status" -eq 2 ]
[[ "$output" == *"@@ -1,5 +1,5 @@"* ]]
}
function teardown() {
rm -rf "$TEST_DIR"
}
| true
|
c00882ffe75142d57e2f96e6dcb31917a6556d1f
|
Shell
|
frontstack/vagrant
|
/setup/setup.sh
|
UTF-8
| 16,722
| 3.78125
| 4
|
[
"WTFPL"
] |
permissive
|
#!/bin/bash
#
# FrontStack VM installation and provisioning script
# @author Tomas Aparicio
# @version 0.5
# @license WTFPL
#
szip_url=http://sourceforge.net/projects/frontstack/files/packages/7zip/7z-9.20-x64.tar.gz/download
temporal=/tmp/frontstack
download_dir=$temporal/downloads
output=$temporal/output.log
config_file="$(dirname $(readlink -f "$0")})/$(echo ${0##*/} | sed 's/\.[^\.]*$//').ini"
install_dir='/home/vagrant/frontstack'
# default install options (you can customize them from setup.ini)
bash_profile=1
os_packages='gcc make nano wget'
conf__frontstack__reset_firewall=0
conf__frontstack__format='tar.gz'
conf__frontstack__user='vagrant'
conf__frontstack__download='http://sourceforge.net/projects/frontstack/files/latest/download'
check_exit() {
if [ $? -ne 0 ]; then
echo $1 && exit 1
fi
}
check_sleep() {
if [ $? -ne 0 ]; then
echo $1
echo '\nContinuing with the provisioning...'
sleep 2
fi
}
make_dir() {
if [ ! -d $1 ]; then
mkdir $1
fi
}
exists() {
type $1 >/dev/null 2>&1;
if [ $? -eq 0 ]; then
echo 1
else
echo 0
fi
}
set_folder_permissions() {
chown -R $conf__frontstack__user $1 >> $output
}
install_package() {
if [ -z $nopkgmanager ]; then
# Debian, Ubuntu and derivatives (with apt-get)
if which apt-get &> /dev/null; then
apt-get install -y "$@"
# OpenSuse (with zypper)
elif which zypper &> /dev/null; then
zypper install -y "$@"
# Mandriva (with urpmi)
elif which urpmi &> /dev/null; then
urpmi "$@"
# Fedora and CentOS (with yum)
elif which yum &> /dev/null; then
yum install -y "$@"
# ArchLinux (with pacman)
elif which pacman &> /dev/null; then
pacman -Sy "$@"
# Else, if no package manager has been founded
else
# Set $nopkgmanager
nopkgmanager=1
echo "ERROR: impossible to found a package manager in your system. Install '$@' manually"
fi
fi
}
save_bash_profile() {
save_proxy_vars() {
if [ -f $1 ]; then
if [ ! -z "$http_proxy" ]; then
echo "http_proxy=$http_proxy" >> $1
fi
if [ ! -z "$https_proxy" ]; then
echo "https_proxy=$https_proxy" >> $1
fi
if [ ! -z "$no_proxy" ]; then
echo "no_proxy=$no_proxy" >> $1
fi
fi
}
set_permissions() {
chown $conf__frontstack__user $1 >> $output
chmod +x $1 >> $output
}
if [ ! -f $1 ]; then
echo '#!/bin/bash' > $1
fi
if [ $(exists `cat $1 | grep "$install_dir"`) -eq 1 ]; then
echo "if [ -f $install_dir/bash.sh ]; then" >> $1
echo " . $install_dir/bash.sh" >> $1
echo 'fi' >> $1
# add proxy vars
save_proxy_vars $1
set_permissions $1
fi
}
proxy_auth() {
if [ ! -z $conf__proxy__user ]; then
echo "--proxy-user=$conf__proxy__user --proxy-password=$conf__proxy__password "
fi
}
download_status() {
if [ -f $1 ]; then
while : ; do
sleep 1
local speed=$(echo `cat $1 | grep -oh '\([0-9.]\+[%].*[0-9.][s|m|h|d]\)' | tail -1`)
echo -n ">> Downloading... $speed"
echo -n R | tr 'R' '\r'
if [ -f $2 ]; then
sleep 1
local error=$(echo `cat $2`)
if [ $error != '0' ]; then
echo
if [ $error == '6' ]; then
echo "Server authentication error, configure setup.ini properly. See $output"
else
echo "Download error, exit code '$error'. See $output"
fi
exit $?
fi
break
fi
done
fi
}
iptables_flush() {
iptables -F
iptables -X
iptables -t nat -F
iptables -t nat -X
iptables -t mangle -F
iptables -t mangle -X
iptables -P INPUT ACCEPT
iptables -P FORWARD ACCEPT
iptables -P OUTPUT ACCEPT
}
#
# Copyright (c) 2009 Kevin Porter / Advanced Web Construction Ltd
# (http://coding.tinternet.info, http://webutils.co.uk)
# Copyright (c) 2010-2012 Ruediger Meier <sweet_f_a@gmx.de>
# (https://github.com/rudimeier/)
#
# Simple INI file parser
#
read_ini() {
# Be strict with the prefix, since it's going to be run through eval
check_prefix() {
if ! [[ "${VARNAME_PREFIX}" =~ ^[a-zA-Z_][a-zA-Z0-9_]*$ ]]; then
echo "read_ini: invalid prefix '${VARNAME_PREFIX}'" >&2
return 1
fi
}
check_ini_file() {
if [ ! -r "$INI_FILE" ]; then
echo "read_ini: '${INI_FILE}' doesn't exist or not" \
"readable" >&2
return 1
fi
}
# enable some optional shell behavior (shopt)
pollute_bash() {
if ! shopt -q extglob ; then
SWITCH_SHOPT="${SWITCH_SHOPT} extglob"
fi
if ! shopt -q nocasematch ; then
SWITCH_SHOPT="${SWITCH_SHOPT} nocasematch"
fi
shopt -q -s ${SWITCH_SHOPT}
}
# unset all local functions and restore shopt settings before returning
# from read_ini()
cleanup_bash() {
shopt -q -u ${SWITCH_SHOPT}
unset -f check_prefix check_ini_file pollute_bash cleanup_bash
}
local INI_FILE=""
local INI_SECTION=""
# {{{ START Deal with command line args
# Set defaults
local BOOLEANS=1
local VARNAME_PREFIX=INI
local CLEAN_ENV=0
while [ $# -gt 0 ]
do
case $1 in
--clean | -c )
CLEAN_ENV=1
;;
--booleans | -b )
shift
BOOLEANS=$1
;;
--prefix | -p )
shift
VARNAME_PREFIX=$1
;;
*)
if [ -z "$INI_FILE" ]; then
INI_FILE=$1
else
if [ -z "$INI_SECTION" ]; then
INI_SECTION=$1
fi
fi
;;
esac
shift
done
if [ -z "$INI_FILE" ] && [ "${CLEAN_ENV}" = 0 ]; then
echo -e "Usage: read_ini [-c] [-b 0| -b 1]] [-p PREFIX] FILE"\
"[SECTION]\n or read_ini -c [-p PREFIX]" >&2
cleanup_bash
return 1
fi
if ! check_prefix ; then
cleanup_bash
return 1
fi
local INI_ALL_VARNAME="${VARNAME_PREFIX}__ALL_VARS"
local INI_ALL_SECTION="${VARNAME_PREFIX}__ALL_SECTIONS"
local INI_NUMSECTIONS_VARNAME="${VARNAME_PREFIX}__NUMSECTIONS"
if [ "${CLEAN_ENV}" = 1 ]; then
eval unset "\$${INI_ALL_VARNAME}"
fi
unset ${INI_ALL_VARNAME}
unset ${INI_ALL_SECTION}
unset ${INI_NUMSECTIONS_VARNAME}
if [ -z "$INI_FILE" ]; then
cleanup_bash
return 0
fi
if ! check_ini_file ;then
cleanup_bash
return 1
fi
# Sanitise BOOLEANS - interpret "0" as 0, anything else as 1
if [ "$BOOLEANS" != "0" ]; then
BOOLEANS=1
fi
# }}} END Options
# }}} END Deal with command line args
local LINE_NUM=0
local SECTIONS_NUM=0
local SECTION=""
# IFS is used in "read" and we want to switch it within the loop
local IFS=$' \t\n'
local Iconf__frontstack__OLD="${IFS}"
# we need some optional shell behavior (shopt) but want to restore
# current settings before returning
local SWITCH_SHOPT=""
pollute_bash
while read -r line || [ -n "$line" ]
do
#echo line = "$line"
((LINE_NUM++))
# Skip blank lines and comments
if [ -z "$line" -o "${line:0:1}" = ";" -o "${line:0:1}" = "#" ]; then
continue
fi
# Section marker?
if [[ "${line}" =~ ^\[[a-zA-Z0-9_]{1,}\]$ ]]; then
# Set SECTION var to name of section (strip [ and ] from section marker)
SECTION="${line#[}"
SECTION="${SECTION%]}"
eval "${INI_ALL_SECTION}=\"\${${INI_ALL_SECTION}# } $SECTION\""
((SECTIONS_NUM++))
continue
fi
# Are we getting only a specific section? And are we currently in it?
if [ ! -z "$INI_SECTION" ]; then
if [ "$SECTION" != "$INI_SECTION" ]; then
continue
fi
fi
# Valid var/value line? (check for variable name and then '=')
if ! [[ "${line}" =~ ^[a-zA-Z0-9._]{1,}[[:space:]]*= ]]; then
echo "Error: Invalid line:" >&2
echo " ${LINE_NUM}: $line" >&2
cleanup_bash
return 1
fi
# split line at "=" sign
IFS="="
read -r VAR VAL <<< "${line}"
IFS="${Iconf__frontstack__OLD}"
# delete spaces around the equal sign (using extglob)
VAR="${VAR%%+([[:space:]])}"
VAL="${VAL##+([[:space:]])}"
VAR=$(echo $VAR)
# Construct variable name:
# ${VARNAME_PREFIX}__$SECTION__$VAR
# Or if not in a section:
# ${VARNAME_PREFIX}__$VAR
# In both cases, full stops ('.') are replaced with underscores ('_')
if [ -z "$SECTION" ]; then
VARNAME=${VARNAME_PREFIX}__${VAR//./_}
else
VARNAME=${VARNAME_PREFIX}__${SECTION}__${VAR//./_}
fi
eval "${INI_ALL_VARNAME}=\"\${${INI_ALL_VARNAME}# } ${VARNAME}\""
if [[ "${VAL}" =~ ^\".*\"$ ]]
then
# remove existing double quotes
VAL="${VAL##\"}"
VAL="${VAL%%\"}"
elif [[ "${VAL}" =~ ^\'.*\'$ ]]
then
# remove existing single quotes
VAL="${VAL##\'}"
VAL="${VAL%%\'}"
elif [ "$BOOLEANS" = 1 ]
then
# Value is not enclosed in quotes
# Booleans processing is switched on, check for special boolean
# values and convert
# here we compare case insensitive because
# "shopt nocasematch"
case "$VAL" in
yes | true | on )
VAL=1
;;
no | false | off )
VAL=0
;;
esac
fi
# enclose the value in single quotes and escape any
# single quotes and backslashes that may be in the value
VAL="${VAL//\\/\\\\}"
VAL="\$'${VAL//\'/\'}'"
eval "$VARNAME=$VAL"
done <"${INI_FILE}"
# return also the number of parsed sections
eval "$INI_NUMSECTIONS_VARNAME=$SECTIONS_NUM"
cleanup_bash
}
# check OS architecture
if [ "`uname -m`" != "x86_64" ]; then
echo 'FrontStack only supports 64 bit based OS. Cannot continue' && exit 1
fi
# check if run as root
if [ "`id -u`" -ne 0 ]; then
echo 'You must run the installer like a root user. Cannot continue' && exit 1
fi
if [ ! -z $1 ] && [ -f $1 ]; then
$config_file="$1"
fi
# disabling SELinux if enabled
if [ -f "/usr/sbin/getenforce" ] ; then
selinux_status=`/usr/sbin/getenforce`
/usr/sbin/setenforce 0 2> /dev/null
fi
# read config file
if [ -f $config_file ]; then
read_ini $config_file -p conf
check_exit "Error while parsing config ini file: $config_file"
if [ ! -z "$conf__proxy__http_proxy" ]; then
http_proxy=$conf__proxy__http_proxy
fi
if [ ! -z "$conf__proxy__https_proxy" ]; then
https_proxy=$conf__proxy__https_proxy
fi
if [ ! -z "$conf__proxy__no_proxy" ]; then
no_proxy=$conf__proxy__no_proxy
fi
if [ ! -z "$conf__frontstack__install" ]; then
install_dir=$conf__frontstack__install
fi
fi
# creates temporal directories
make_dir $temporal
make_dir $download_dir
if [ -d $install_dir ] && [ -f $install_dir/VERSION ]; then
echo "FrontStack is already installed" && exit 0
fi
if [ `exists wget` -eq 0 ]; then
install_package wget > /dev/null
fi
if [ "$conf__frontstack__reset_firewall" -eq 1 ]; then
iptables_flush
fi
cat <<EOF
-------------------------------------
Welcome to FrontStack
-------------------------------------
Minimal requirements:
* GNU/Linux 64 bit
* 512MB RAM
* 1GB free disk space
* Internet access (HTTP/S)
EOF
wget $(proxy_auth) http://yahoo.com -O $download_dir/test.html > $output 2>&1
check_exit "No Internet HTTP connectivity. Check if you are behind a proxy and your authentication credentials. See $output"
rm -f $download_dir/test.*
if [ "$conf_frontstack_format" == '7z' ] || [ "$conf_frontstack_format" == 'zip' ]; then
if [ `exists 7z` -eq 0 ]; then
if [ ! -f $temporal/7zip/7z ]; then
echo -n "Downloding 7z... "
wget $(proxy_auth) $szip_url -O $download_dir/7z.tar.gz >> $output 2>&1
check_exit "Error while trying to download 7z. See $output"
echo "done!"
echo -n "Extracting 7z... "
make_dir $temporal/7zip/
tar xvfz $download_dir/7z.tar.gz -C $temporal/7zip/ >> $output 2>&1
check_exit "Error while trying to extract 7z.tar.gz. See $output"
echo "done!"
COMPRESSBIN=$temporal/7zip/7z
fi
fi
elif [ "$conf_frontstack_format" == 'rpm' ]; then
if [ -z $(echo `which rpm`) ]; then
echo 'rpm package not supported, use another. Cannot continue'
exit 1
fi
else
# set default to tar
COMPRESSBIN=$(echo `which tar`)
fi
echo "Downloading lastest version of the FrontStack dev environment"
echo "Note this may take some minutes depending of your connection bandwidth... "
echo
if [ -f $download_dir/download ]; then
rm -f $download_dir/download
fi
# download stack distribution
if [ -z "$conf__frontstack__http_user" ]; then
`wget $(proxy_auth) -F $conf__frontstack__download -O $download_dir/frontstack-latest.$conf__frontstack__format > $output 2>&1 && echo $? > $download_dir/download || echo $? > $download_dir/download` &
download_status $output $download_dir/download
else
`wget $(proxy_auth) -F --user=$conf__frontstack__http_user --password=$conf__frontstack__http_password $conf__frontstack__download -O $download_dir/frontstack-latest.$conf__frontstack__format > $output 2>&1 && echo $? > $download_dir/download || echo $? > $download_dir/download` &
download_status $output $download_dir/download
fi
check_exit "Error while trying to download FrontStack. See $output"
if [ $conf__frontstack__format == 'rpm' ]; then
if [ `exists rpm` -eq 0 ]; then
echo "No rpm binary found. Cannot continue" && exit 1
fi
echo -n "Installing RPM... "
rpm -ivh $download_dir/frontstack-latest.$conf__frontstack__format >> $output 2>&1
check_exit "Error while trying to install the RPM. See $output"
echo 'done!'
else
echo -n 'Extracting (this may take some minutes)... '
make_dir $install_dir
if [ $conf__frontstack__format == '7z' ]; then
$COMPRESSBIN e -o$install_dir -y $download_dir/frontstack-latest.$conf__frontstack__format >> $output 2>&1
else
$COMPRESSBIN xvfz $download_dir/frontstack-latest.$conf__frontstack__format -C $install_dir >> $output 2>&1
fi
check_exit "Error while trying to extract FrontStack. See $output"
echo 'done!'
fi
# set file permissions (by default Vagrant uses the root user to run the provisioning tasks/scripts)
if [ ! -z $conf__frontstack__user ]; then
echo "Setting permissions for the '$conf__frontstack__user' user..."
echo
chown -R $conf__frontstack__user $install_dir >> $output
check_exit "Error while trying to set files permissions. See $output"
# load FrontStack environment variables at session startup (.bash_profile, .profile, .bashrc)
if [ $bash_profile -eq 1 ] && [ -d "/home/$conf__frontstack__user" ]; then
save_bash_profile "/home/$conf__frontstack__user/.bash_profile"
fi
fi
# installing OS packages (beta)
install_packages="${os_packages} ${conf__provision__packages}"
install_packages=( $install_packages )
for pkg in ${install_packages[@]}
do
if [ `exists "$pkg"` -eq 0 ]; then
echo "Installing $pkg..."
install_package $pkg >> $output 2>&1
check_exit "Cannot install the '$pkg' package. See $output"
fi
done
# installing Node.js packages
if [ ! -z "$conf__provision__npm" ]; then
conf__provision__npm=("$conf__provision__npm")
for pkg in ${conf__provision__npm[@]}
do
echo "Installing Node.js package '$pkg'..."
npm install $pkg >> $output 2>&1
check_sleep "Cannot install the '$pkg' package. See $output"
done
fi
# install Ruby gems
if [ ! -z "$conf__provision__gem" ]; then
conf__provision__gem=("$conf__provision__gem")
for pkg in ${conf__provision__gem[@]}
do
echo "Installing Ruby gem '$pkg'..."
gem install $pkg >> $output 2>&1
check_sleep "Cannot install the '$pkg' package. See $output"
done
fi
# custom provisioning script
if [ ! -z "$conf__provision__script" ] && [ -f $conf__provision__script ]; then
[ ! -x $conf__provision__script ] && chmod +x $conf__provision__script
. "$conf__provision__script"
fi
# exec the custom post-install script
if [ ! -z "$conf__frontstack__post_install" ] && [ -f $conf__frontstack__post_install ]; then
[ ! -x $conf__frontstack__post_install ] && chmod +x $conf__frontstack__post_install
. "$conf__frontstack__post_install"
fi
# setting folders permissions to the vagrant user
if [ -d "${install_dir}/packages" ]; then
set_folder_permissions "${install_dir}/packages"
fi
# remove git required noempty file in the workspace directory
if [ -d "/home/${conf__frontstack__user}/workspace" ]; then
rm -f "/home/${conf__frontstack__user}/workspace/noempty"
fi
# re-enable SELinux
if [ -f "/usr/sbin/getenforce" ]; then
selinux_status=`/usr/sbin/getenforce`
/usr/sbin/setenforce 1 2> /dev/null
fi
cat <<EOF
FrontStack installed in: "$install_dir"
To enter in the VM, from the Vagrantfile directory, run:
$ vagrant ssh
EOF
| true
|
c6b659f392ca90971d2861b6048243ab43ca0227
|
Shell
|
apmanomark/app-hello
|
/source/scripts/bash/dev/install_sencha-cmd-64.sh
|
UTF-8
| 565
| 2.65625
| 3
|
[] |
no_license
|
SENCHA_CMD_VERSION=5.0.0.160
UNPACK_FILENAME=sencha-cmd-64
INSTALLER_FILENAME=SenchaCmd-${SENCHA_CMD_VERSION}-linux-64
mkdir -p ./unpack/
curl -o ./unpack/${UNPACK_FILENAME}.run.zip http://cdn.sencha.com/cmd/${SENCHA_CMD_VERSION}/${INSTALLER_FILENAME}.run.zip
unzip -p ./unpack/${UNPACK_FILENAME}.run.zip >./unpack/${UNPACK_FILENAME}.run
rm ./unpack/${UNPACK_FILENAME}.run.zip
chmod +x ./unpack/${UNPACK_FILENAME}.run
./unpack/${UNPACK_FILENAME}.run --mode unattended
rm ./unpack/${UNPACK_FILENAME}.run
PATH=$PATH:$HOME/bin/Sencha/Cmd/${SENCHA_CMD_VERSION}/
| true
|
21713beb8b12ad337964cdcb31968a2330dfa728
|
Shell
|
yamaszone/mac
|
/run
|
UTF-8
| 573
| 3.984375
| 4
|
[] |
no_license
|
#!/bin/bash
# Simple script to allow BATS setup and run tests easily
help(){
echo "Usage:"
printf "\t setup\t\t: Sets up BATS framework if not done already.\n"
printf "\t healthcheck\t: Run health check.\n"
printf "\t tests\t\t: Run tests.\n"
printf "\t help\t\t: Show this help.\n"
exit 0
}
run_healthcheck(){
./setup
bats tests/bats/healthcheck_*
}
run_tests(){
./setup
bats tests/bats/test_*
}
if [[ -z $1 ]];then
help
fi
case $1 in
setup)
./setup
;;
hc | healthcheck)
run_healthcheck
;;
tests)
run_tests
;;
* | help)
help
;;
esac
| true
|
4851a5e7e2978d14d511cfb1f77baab5dad844ec
|
Shell
|
srv-code/College-Lab-Exercises
|
/MCA/Sem 1/s.old2
|
UTF-8
| 4,171
| 3.6875
| 4
|
[] |
no_license
|
#!/bin/bash
set_properties() # receives prop-type & prop-val
{
case $1 in
errcc)
case $2 in
# ASCII Color codes:
# Black 0;30 Dark Gray 1;30
# Red 0;31 Light Red 1;31
# Green 0;32 Light Green 1;32
# Brown/Orange 0;33 Yellow 1;33
# Blue 0;34 Light Blue 1;34
# Purple 0;35 Light Purple 1;35
# Cyan 0;36 Light Cyan 1;36
# Light Gray 0;37 White 1;37
red)
error_color_code='\033[0;31m' # Red
;;
lred)
error_color_code='\033[1;31m' # Light Red
;;
green)
error_color_code='\033[0;32m' # Green
;;
lgreen)
error_color_code='\033[1;32m' # Light Green
;;
yellow)
error_color_code='\033[1;33m' # Yellow
;;
*)
echo Invalid color value \($2\)
return 1
;;
esac
;;
ed)
editor=$2
;;
*)
echo Invalid property type \($1\)
return 1
;;
esac
}
unset_vars() # receives cutoff level
{
[ $1 -lt 1 ] && return
# echo unset_vars\(\) passed cutoff level 1 # DEBUG
# unset editor
unset src_fname
unset ext
[ $1 -lt 2 ] && return
# echo unset_vars\(\) passed cutoff level 2 # DEBUG
unset prg_name
unset prg_name_len
unset start_idx
[ $1 -lt 3 ] && return
# echo unset_vars\(\) passed cutoff level 3 # DEBUG
unset compiler
unset dir_struct
[ $1 -lt 100 ] && return
# echo unset_vars\(\) passed cutoff level 100 # DEBUG
# unset error_color_code
unset src
unset out
}
show_usage()
{
echo Usage 1 : For setting aliases : s \<program-source-file-path\>
echo Usage 2 : For resetting aliases : s \[-r\|--reset\]
echo Usage 3 : For setting properties : s \[-s\|--set\] \<property-name\> \<property-value\>
}
if [ $# -eq 0 ]; then
show_usage
return
fi
if [ "$*" == "-r" -o "$*" == "--reset" ]; then
echo Resetting...
unset_vars 100 # clean variable usages
unalias b >& /dev/null # build
unalias r >& /dev/null # run
unalias f >& /dev/null # fill
unalias e >& /dev/null # edit
unalias sd >& /dev/null # show-developer
unalias s >& /dev/null # show
echo [done]
return
fi
if [ "$1" == "-s" -o "$1" == "--set" ]; then
if [ "$2" == "" -o "$3" == "" ]; then
show_usage
return
fi
set_properties $2 $3
[ $? == 0 ] && echo [done]
return
fi
### Main ###
echo Setting...
[ -z "$editor" ] && editor='subl' # default value
[ -z "$error_color_code" ] && error_color_code='\033[0;31m' # default value
src_fname=$1
ext=`echo $src_fname | tr "." "\n" | tail -1 | tr '[:upper:]' '[:lower:]'`
# printf " [ext='%s'] \n" $ext # DEBUG
if [ "$ext" == "c" ]; then
compiler="gcc"
elif [ "$ext" == "cpp" ]; then
compiler="g++"
else
printf "Unsupported file extension found (%s)! \n" "$ext"
unset_vars 3 # clean variable usages
return
fi
prg_name=`echo $src_fname | tr "/" "\n" | tail -1 | tr "." "\n" | head -1`
# printf " [prg_name='%s'] \n" $prg_name # DEBUG
prg_name_len=`expr ${#prg_name} + ${#ext} + 1`
# printf " [src filename='%s' (len=%d)] \n" $prg_name.$ext $prg_name_len # DEBUG
if [ `echo ${src_fname:0:4} | tr '[:upper:]' '[:lower:]'` == 'src/' ]; then
start_idx=4
else
start_idx=0
fi
# printf " [start_idx=%d] \n" $start_idx # DEBUG
dir_struct=${src_fname:$start_idx:`expr ${#src_fname} - $prg_name_len - $start_idx`}
# printf " [dir_struct='%s'] \n" $dir_struct # DEBUG
[ ! -d out/$dir_struct ] && mkdir -vp out/$dir_struct
src="$src_fname"
out=out/"$dir_struct""$prg_name".out
[ $start_idx -eq 4 ] && dir_struct=src/$dir_struct
[ ! -d $dir_struct ] && mkdir -vp $dir_struct
if [ ! -f "$src_fname" ]; then
touch "$src_fname"
if [ $? != 0 ]; then
unset_vars 3 # clean variable usages
return
fi
echo touch: "created new file '"$src_fname"'"
fi
alias b='$compiler "$src" -o "$out"' # build
alias r='b >& /dev/null; [ $? != 0 ] && echo -e "$error_color_code [Compilation failed] \033[0m" || "$out"' # run
alias f='cat > "$src"' # fill
alias e='$editor "$src"' # edit
alias sd='clear; cat -n "$src"; echo' # show-developer
alias s='clear; cat "$src"; echo' # show
unset_vars 2 # clean variable usages
echo [done]
| true
|
f378094bad16e386d83f3d57c20b87a03db73a63
|
Shell
|
rainik/task4_2
|
/ntp_deploy.sh
|
UTF-8
| 1,609
| 3.96875
| 4
|
[] |
no_license
|
#!/bin/bash
# This script installs ntp in your system, creates cron task which every 5 minutes will check
# if the service is running and its config file is unchanged.
# If the service is stopped it will be launched by cron job and the /etc/ntp.conf will be returned
# in the initial state.
# Author: Serhii itrainik@gmail.com
#
varpath=/opt/ntp
apt-get update && apt-get -y install ntp
# edit the NTP conf file
sed -i 's/0.ubuntu.pool.ntp.org/ua.pool.ntp.org/' /etc/ntp.conf && sed -i '/ubuntu.pool.ntp.org/d' /etc/ntp.conf
# check the folder existance and copy there our etalon file for the future comparcings
if [ -d "$varpath" ]
then
cp /etc/ntp.conf $varpath/ntp.conf.etalon
else
mkdir -p $varpath && cp /etc/ntp.conf $varpath/ntp.conf.etalon
fi
# create ntp_verify.sh file and add it to the root crontab
cat <<"EOF" > $varpath/ntp_verify.sh
#!/bin/bash
vardiff=$(diff -u /etc/ntp.conf /opt/ntp/ntp.conf.etalon)
if [[ -z "$vardiff" ]]
then
if pidof ntpd
then
echo "Ntp service is running"
else
echo "Ntp service isn't running. Restarting... " | mail -s "NTP config was changed" root@localhost
/etc/init.d/ntp restart
fi
else
echo -e "NOTICE: /etc/ntp.conf was changed. Calculated diff:\n $vardiff" | mail -s "NTP config was changed" root@localhost
cp /opt/ntp/ntp.conf.etalon /etc/ntp.conf
/etc/init.d/ntp restart
fi
EOF
# makes ntp_verify executable
chmod +x $varpath/ntp_verify.sh
# modifying root cron file
echo -e "*/5 * * * * $varpath/ntp_verify.sh 2>&1" | crontab -u root -
/etc/init.d/cron restart
| true
|
d4da65371a77752a4fd82a343b95e0aac186241a
|
Shell
|
mandad/moos-ivp-manda
|
/missions/2015_portsmouth_M1/launch.sh
|
UTF-8
| 2,689
| 3.671875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
#-------------------------------------------------------
# Part 1: Check for and handle command-line arguments
#-------------------------------------------------------
TIME_WARP=1
JUST_MAKE="no"
for ARGI; do
if [ "${ARGI}" = "--help" -o "${ARGI}" = "-h" ] ; then
printf "%s [SWITCHES] [time_warp] \n" $0
printf " --just_build, -j \n"
printf " --help, -h \n"
exit 0;
elif [ "${ARGI//[^0-9]/}" = "$ARGI" -a "$TIME_WARP" = 1 ]; then
TIME_WARP=$ARGI
elif [ "${ARGI}" = "--just_build" -o "${ARGI}" = "-j" ] ; then
JUST_MAKE="yes"
else
printf "Bad Argument: %s \n" $ARGI
exit 0
fi
done
#-------------------------------------------------------
# Part 2: Create the .moos and .bhv files.
#-------------------------------------------------------
VNAME1="boat1" # The first vehicle community
VNAME2="boat2" # The second vehicle community
VNAME_ASV="ASV"
START_POS1="500,500"
START_POS2="760,-1180"
START_POS_ASV="0,0"
# What is nsplug? Type "nsplug --help" or "nsplug --manual"
nsplug meta_shoreside.moos targ_shoreside.moos -f WARP=$TIME_WARP \
VNAME="shoreside"
nsplug meta_vehicle.moos targ_$VNAME1.moos -f WARP=$TIME_WARP VTYPE=SHIP \
VNAME=$VNAME1 START_POS=$START_POS1 VLENGTH=15 \
VPORT="9001" SHARE_LISTEN="9301"
nsplug meta_vehicle.bhv targ_$VNAME1.bhv -f VNAME=$VNAME1 START_POS=$START_POS1
nsplug meta_vehicle.moos targ_$VNAME2.moos -f WARP=$TIME_WARP VTYPE=SHIP \
VNAME=$VNAME2 START_POS=$START_POS2 VLENGTH=15 \
VPORT="9002" SHARE_LISTEN="9302"
nsplug meta_vehicle.bhv targ_$VNAME2.bhv -f VNAME=$VNAME2 START_POS=$START_POS2
nsplug meta_asv.moos targ_asv.moos -f WARP=$TIME_WARP VTYPE=SHIP \
VNAME=$VNAME_ASV START_POS=$START_POS_ASV VLENGTH=4 \
VPORT="9003" SHARE_LISTEN="9303"
if [ ${JUST_MAKE} = "yes" ] ; then
exit 0
fi
#-------------------------------------------------------
# Part 3: Launch the processes
#-------------------------------------------------------
printf "Launching $VNAME1 MOOS Community (WARP=%s) \n" $TIME_WARP
pAntler targ_$VNAME1.moos >& /dev/null &
sleep .5
printf "Launching $VNAME2 MOOS Community (WARP=%s) \n" $TIME_WARP
pAntler targ_$VNAME2.moos >& /dev/null &
sleep .5
printf "Launching $VNAME_ASV MOOS Community (WARP=%s) \n" $TIME_WARP
pAntler targ_asv.moos >& /dev/null &
sleep .5
printf "Launching $SNAME MOOS Community (WARP=%s) \n" $TIME_WARP
pAntler targ_shoreside.moos >& /dev/null &
printf "Done \n"
uMAC targ_shoreside.moos
printf "Killing all processes ... \n"
kill %1 %2 %3 %4
printf "Done killing processes. \n"
| true
|
e995ab21fa8e9d14339d3c3e82c80fa919a729cb
|
Shell
|
google/gvisor-containerd-shim
|
/test/e2e/runtime-handler/usage.sh
|
UTF-8
| 534
| 2.859375
| 3
|
[
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
#!/bin/bash
# A sample script for testing the gvisor-containerd-shim
# using runtime handler.
set -ex
{ # Step 1: Pull the nginx image
sudo crictl pull nginx
}
{ # Step 2: Create sandbox.json
cat <<EOF | tee sandbox.json
{
"metadata": {
"name": "nginx-sandbox",
"namespace": "default",
"attempt": 1,
"uid": "hdishd83djaidwnduwk28bcsb"
},
"linux": {
},
"log_directory": "/tmp"
}
EOF
}
{ # Step 3: Create the sandbox
SANDBOX_ID=$(sudo crictl runp --runtime runsc sandbox.json)
}
| true
|
fdad24e60ade693d1ea80ee42da1719041a45d19
|
Shell
|
LaplaceKorea/mimic-cross
|
/target/setup.sh
|
UTF-8
| 1,182
| 2.953125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash -eu
arch >/mimic-cross/arch
HOST_ARCH=$(cat /host/mimic-cross/arch)
echo PATH=\"/mimic-cross/deploy/bin:"$PATH"\" >/etc/environment
#shellcheck disable=SC2046
ln -s $(realpath /host/usr/lib/"$HOST_ARCH"-linux-gnu /usr/lib/)
if [[ $(realpath /lib) != /usr/lib ]]; then
ln -s /host/lib/"$HOST_ARCH"-linux-gnu /lib
fi
mkdir -p /lib64
ln -s /host/lib64/ld-linux-x86-64.* /lib64/
ln -s /host/usr/aarch64-linux-gnu /usr
mkdir -p /var/log/mimic-cross
mkdir -p /mimic-cross/deploy/host
mkdir -p /mimic-cross/deploy/target
mkdir -p /mimic-cross/deploy/bin
cp /mimic-cross/bin/apt-get /usr/local/bin/
cp /mimic-cross/bin/mimic-deploy /usr/local/bin/
cp /mimic-cross/bin/mimic-host-run /usr/local/bin/
cp /mimic-cross/bin/mimic-dual-run /usr/local/bin/
[[ "$(ls -A /etc/apt/sources.list.d/)" ]] && cp /etc/apt/sources.list.d/* /host/etc/apt/sources.list.d/
cp /etc/apt/trusted.gpg.d/* /host/etc/apt/trusted.gpg.d/
cp /etc/resolv.conf /host/etc/resolv.conf
dpkg -l | awk '/gnupg/ {print $2}' | xargs -r /host/"$(which chroot)" /host apt-get install
cp /host/etc/resolv.conf.orig /host/etc/resolv.conf
#shellcheck disable=SC1091
. /mimic-cross/hostize_installed_packages.sh
| true
|
7124e662aa3a93e665ee7edb28cda98a32ae1dd8
|
Shell
|
bobc/R2C2_Firmware
|
/Firmware/tag_version
|
UTF-8
| 169
| 3.515625
| 4
|
[] |
no_license
|
#!/bin/bash
if [ "$1" == "" ] ; then
echo "Usage: tag_version x.y.z"
else
echo "Tagging version " $1 "..."
git tag Firmware_v$1
git push origin Firmware_v$1
fi
| true
|
7e249fb41fcc2480ece29434e416b02818e3aab1
|
Shell
|
mangoalx/scripts
|
/androidscripts/usb-switch-test.sh
|
UTF-8
| 1,485
| 3.21875
| 3
|
[] |
no_license
|
#!/system/bin/sh
TAG="${0##*/}"
insmod /system/lib/modules/gpio-pca953x.ko
echo 100 > /sys/class/gpio/export
echo out > /sys/class/gpio/gpio100/direction
echo 0 > /sys/class/gpio/gpio100/value
sleep 1 # assume default value was 0 and device is already mounted
DEVICE_NAME="$(eval grep -Hv ^0$ /sys/block/*/removable | sed -n 's/^.*\(sd.\).*/\1/p')"
BLOCK_NAME="$(eval ls /dev/block | sed -n "s/\(${DEVICE_NAME}.\)/\1/p")"
if [ -n "${BLOCK_NAME}" ]; then
UUID1="$(eval blkid /dev/block/${BLOCK_NAME} | sed 's/^[^"].*UUID="\([^"]*\)".*/\1/')"
else
UUID1=""
fi
echo 1 > /sys/class/gpio/gpio100/value
sleep 4 # wait for automount
DEVICE_NAME="$(eval grep -Hv ^0$ /sys/block/*/removable | sed -n 's/^.*\(sd.\).*/\1/p')"
BLOCK_NAME="$(eval ls /dev/block | sed -n "s/\(${DEVICE_NAME}.\)/\1/p")"
if [ -n "${BLOCK_NAME}" ]; then
UUID2="$(eval blkid /dev/block/${BLOCK_NAME} | sed 's/^[^"].*UUID="\([^"]*\)".*/\1/')"
else
UUID2=""
fi
if [ -n "${UUID1}" ] && [ -n "${UUID2}" ]; then
if [ "${UUID1}" != "${UUID2}" ]; then
echo "USB-SWITCH-TEST: PASS"
else
echo "USB-SWITCH-TEST: FAIL -- Switch failed, same UUID"
fi
elif [ -n "${UUID1}" ]; then
echo "USB-SWITCH-TEST: FAIL -- Device #2 not found"
elif [ -n "${UUID2}" ]; then
echo "USB-SWITCH-TEST: FAIL -- Device #1 not found"
else
echo "USB-SWITCH-TEST: FAIL -- No devices found"
fi
echo 0 > /sys/class/gpio/gpio100/value
echo 100 > /sys/class/gpio/unexport
rmmod gpio_pca953x
| true
|
50e5c0aea4703ebaab06c5c5ed8356f835747e1c
|
Shell
|
DennisGoldfarb/MS-Demix
|
/scripts/SLURM_pipeline.sh
|
UTF-8
| 1,738
| 3.046875
| 3
|
[] |
no_license
|
#!/bin/bash
#SBATCH --job-name=demix
#SBATCH --array=1-100
#SBATCH -t 01-00:00:00
#SBATCH --mem=16g
#SBATCH --ntasks=4
#SBATCH --output=/pine/scr/d/e/dennisg/MS-Demix/log/Deconvolution_%A_%a.out
#SBATCH --error=/pine/scr/d/e/dennisg/MS-Demix/log/Deconvolution_%A_%a.err
module load matlab
source ./config.sh
mkdir -p ${ROOT_OUT_DIR}
cd ${SOURCE_DIR}/matlab/
# determine number of spectra in file
#NUMSPECTRA=`less $1 | sed -n 's/.*<spectrumList count=\"\([0-9]*\).*/\1/p'`
NUMSPECTRA=1000
#103284
# process each spectrum
numPer=10
step=$(($SLURM_ARRAY_TASK_COUNT * $numPer))
start=$((($SLURM_ARRAY_TASK_ID-1) * $numPer + 1))
for i in `seq $start $step $NUMSPECTRA`;
do
echo "STARTING SCRIPT AT:"
echo $i
# generate model
${BUILD_DIR}/ProcessSingleSpectrum ${DATA_DIR}/HELA_2017-10-24_CID_OT.mzML $i $numPer 2 3 4 ${ROOT_OUT_DIR}
for j in `seq 0 $(($numPer-1))`;
do
scanID=$(($i+$j))
echo "CURRENT SCAN ID:"
echo $scanID
if [ ! -f ${ROOT_OUT_DIR}/A_${scanID}.bin ]; then
continue
fi
# use return code to figure out if it was an MS2
#if [[ $? != 0 ]]; then continue; fi
# evaluate model in matlab
algName='NNLS-sparseGroupLasso'
lambda1=0.1
lambda2=0.1
alpha=0.5
deisotope=0
calcPrecursorMass=1
param="demix('"${ROOT_OUT_DIR}"','"${scanID}"','"${algName}"',"${lambda1}","${lambda2}","${alpha}","${deisotope}","${calcPrecursorMass}");quit force"
matlab -nodesktop -nosplash -r $param
# execute crux
# clean up
rm ${ROOT_OUT_DIR}/A_${scanID}.bin
rm ${ROOT_OUT_DIR}/b_${scanID}.bin
rm ${ROOT_OUT_DIR}/groupWeights_${scanID}.bin
rm ${ROOT_OUT_DIR}/indices_${scanID}.bin
rm ${ROOT_OUT_DIR}/mz_${scanID}.tab
rm ${ROOT_OUT_DIR}/precursorOptions_${scanID}.tab
done
done
| true
|
0dbf3a77bcc03213f583f26a819abc173114f2e2
|
Shell
|
kvpb/.files
|
/.ubuntu
|
UTF-8
| 18,477
| 2.90625
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
exec 2>> ${HOME}/Temporary/log
set -xv
printf '%s\n' ''
printf ' '; printf '\033[48:2:255:165:0m\033[48;2;255;255;255;0#m%s\033[m\n' ' | | | ,'
printf ' '; printf '\033[48;2;255;165;0m\033[48;2;255;255;255;0#m%s\033[m\n' ' | | |-. | | |^. |- | |'
printf ' '; printf '\033[48;2;255;165;0m\033[48;2;255;255;255;0#m%s\033[m\n' ' . |/| |_/ |/| | | \__ |/|'
printf '%s\n' ''
printf ' LICENSED BY\n'
printf ' ${LICENSOR}\n'
printf '%s\n' ''
printf '%s\n' " K K V V PPP BB ' SS"
printf '%s\n' ' KK V V PPP BBB S'
printf '%s\n' ' K K V P BBB SS'
printf '%s\n' ''
printf '%s\n' ' WARNING: USE THIS SCRIPT AT YOUR OWN RISK.'
printf '%s\n' ''
printf '\033[48;2;255;165;255;0#m%s\033[m\n' " ALL THE COMPUTER PROGRAMS AND SOFTWARE ARE PROVIDED 'AS IS' WITHOUT WARRANTY"
printf '%s\n' ' OF ANY KIND. WE MAKE NO WARRANTIES, EXPRESS OR IMPLIED, THAT THEY ARE FREE'
printf '%s\n' ' OF ERROR, OR ARE CONSISTENT WITH ANY PARTICULAR STANDARD OF MERCHANTABILITY,'
printf '%s\n' ' OR THAT THEY WILL MEET YOUR REQUIREMENTS FOR ANY PARTICULAR APPLICATION. THEY'
printf '%s\n' ' SHOULD NOT BE RELIED ON FOR SOLVING A PROBLEM WHOSE INCORRECT SOLUTION COULD'
printf '%s\n' ' RESULT IN INJURY TO A PERSON OR LOSS OF PROPERTY. IF YOU DO USE THEM'
printf '%s\n' ' IN SUCH A MANNER, IT IS AT YOUR OWN RISK. THE AUTHOR AND PUBLISHER DISCLAIM'
printf '%s\n' ' ALL LIABILITY FOR DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES RESULTING FROM'
printf '%s\n' ' YOUR USE OF THE PROGRAMS.'
printf '%s\n' ''
# Display the title screen with disclaimer.
sudo -v # Ask for an administrator password.
#while true; do sudo -n true; sleep 60; kill -0 "$$" || exit; done 2>/dev/null & # Update the sudo timestamp, until the script is over.
printf '\033[48;2;255;255;0;0#m%s\033[m\n' 'First-Party Settings: Systemwide And User-Specific Preferences'
printf '%s\n' 'Unnamed'
printf '%s\n' 'dash dash/sh boolean false' | sudo debconf-set-selections
sudo DEBIAN_FRONTEND=noninteractive dpkg-reconfigure dash
printf '%s\n' 'dash dash/sh boolean true' | sudo debconf-set-selections
# Set the default system shell.
printf '%s\n' 'Network'
s[0]="${USER^^}" # requires BASH 4+.
s[1]="$(. /etc/os-release && printf '%s' ${NAME})"
[ -z "$(sudo dmesg | grep 'QEMU\|VirtualBox\|VMware\|Parallels' 2>/dev/null)" ] && s[2]='PC' || s[2]='VM'
sudo hostname "${s_0}s${s_1}${s_2}"
unset s
# Set the host name of the operating system.
sh ./bin/patchhosts.sh # Set the static table lookup for hostnames.
printf '%s\n' 'Keyboard'
gsettings set org.gnome.desktop.peripherals.keyboard repeat-interval 20 #
gsettings set org.gnome.desktop.peripherals.keyboard delay 200 #
printf '%s\n' 'Dock'
gsettings set org.gnome.shell.extensions.dash-to-dock dock-position 'LEFT' #
gsettings set org.gnome.shell.extensions.dash-to-dock extend-height false #
gsettings set org.gnome.shell.extensions.dash-to-dock unity-backlit-items false #
gsettings set org.gnome.shell.extensions.dash-to-dock dash-max-icon-size 24 #
gsettings set org.gnome.shell favorite-apps "[
'terminator.desktop',
'org.gnome.Terminal.desktop',
'org.gnome.Nautilus.desktop',
'firefox.desktop',
'gnome-system-monitor_gnome-system-monitor.desktop',
'xpad.desktop',
'code.desktop',
'org.gnome.gedit.desktop',
'libreoffice-writer.desktop',
'gimp.desktop',
'inkscape.desktop',
'vlc.desktop',
'ProtonMail_Bridge.desktop',
'org.gnome.Geary.desktop',
'signal-desktop.desktop',
'org.kde.kaddressbook.desktop',
'transmission-gtk.desktop'
]" #
printf '%s\n' 'Files'
gsettings set org.gtk.Settings.FileChooser show-hidden 'true' #
gsettings set org.gnome.nautilus.preferences search-filter-time-type 'last_modified' #
gsettings set org.gnome.nautilus.preferences default-sort-order 'name' #
gsettings set org.gnome.nautilus.preferences recursive-search 'always' #
gsettings set org.gnome.nautilus.preferences open-folder-on-dnd-hover false #
gsettings set org.gnome.nautilus.preferences default-sort-in-reverse-order false #
gsettings set org.gnome.nautilus.preferences show-hidden-files false #
gsettings set org.gnome.nautilus.preferences tabs-open-position 'after-current-tab' #
gsettings set org.gnome.nautilus.preferences always-use-location-entry false #
gsettings set org.gnome.nautilus.preferences show-image-thumbnails 'never' #
gsettings set org.gnome.nautilus.preferences confirm-trash true #
gsettings set org.gnome.nautilus.preferences search-view 'list-view' #
gsettings set org.gnome.nautilus.preferences thumbnail-limit uint64 10 #
gsettings set org.gnome.nautilus.preferences mouse-back-button 8 #
gsettings set org.gnome.nautilus.preferences click-policy 'double' #
gsettings set org.gnome.nautilus.preferences mouse-forward-button 9 #
gsettings set org.gnome.nautilus.preferences show-create-link true #
gsettings set org.gnome.nautilus.preferences show-directory-item-counts 'always' #
gsettings set org.gnome.nautilus.preferences mouse-use-extra-buttons true #
gsettings set org.gnome.nautilus.preferences executable-text-activation 'display' #
gsettings set org.gnome.nautilus.preferences show-delete-permanently true #
gsettings set org.gnome.nautilus.preferences fts-enabled true #
gsettings set org.gnome.nautilus.preferences default-folder-viewer 'list-view' #
gsettings set org.gnome.nautilus.preferences use-experimental-views false #
gsettings set org.gnome.nautilus.preferences install-mime-activation true #
#gsettings set org.gnome.desktop.background picture-uri 'file:////usr/share/gnome-control-center/pixmaps/noise-texture-light.png' # Set the default desktop background.
gsettings set org.gnome.desktop.background picture-options 'none'
gsettings set org.gnome.desktop.background primary-color '#000000'
# Set the background of the desktop. # I want a permanent terminal emulator as a wallpaper. How do I get that?
printf '\033[48;2;255;255;0;0#m%s\033[m\n' 'Second-Party Settings: User-Specific Filesystem Customs'
cd \
&& mkdir -p ${HOME}/Temporary \
&& wget https://github.com/kvpb/.files/archive/master.zip --directory-prefix=${HOME}/Temporary \
&& unzip ${HOME}/Temporary/master.zip -d ${HOME}/Temporary \
&& mv ${HOME}/Temporary/.files-master ${HOME}/Temporary/.files \
&& mv ${HOME}/Temporary/.files ${HOME}/.files \
&& rm -rf ${HOME}/Temporary/master.zip
&& cd ${workingdirectory}
# Download .files/ from GitHub to ${HOME}/.
if [ -d "${HOME}/{Applications,bin,Miscellaneous,Work}" ]
then
if [ -L "${HOME}/{Applications,bin,Miscellaneous,Work}" ]
then
mv "${HOME}/{Applications,bin,Miscellaneous,Work}" ${HOME}/.local/share/Trash/
mkdir "${HOME}/{Applications,bin,Miscellaneous,Work}"
fi
else
mkdir -p "${HOME}/{Applications,bin,Miscellaneous,Work}"
fi
# If Applications, bin, Miscellaneous and Work are not directories in ${HOME}/, substitute symbolic links by or add these directories.
if [ -d "${HOME}/{Documents,Miscellaneous,Music,Pictures,Videos}/Downloads" ]
then
if [ -L "${HOME}/{Documents,Miscellaneous,Music,Pictures,Videos}/Downloads" ]
then
mv "${HOME}/{Documents,Miscellaneous,Music,Pictures,Videos}/Downloads" ${HOME}/.local/share/Trash/
mkdir "${HOME}/{Documents,Miscellaneous,Music,Pictures,Videos}/Downloads"
fi
else
mkdir -p "${HOME}/{Documents,Miscellaneous,Music,Pictures,Videos}/Downloads"
fi
# If Downloads is not a directory in ${HOME}/{Documents,Miscellaneous,Music,Pictures,Videos}/, substitute symbolic links by or add these directories.
if [ -d "${HOME}/Pictures/{DCIM}" ]
then
if [ -L "${HOME}/Pictures/{DCIM}" ]
then
mv "${HOME}/Pictures/{DCIM}" ${HOME}/.local/share/Trash/
mkdir "${HOME}/Pictures/{DCIM}"
fi
else
mkdir -p "${HOME}/Pictures/{DCIM}"
fi
# If DCIM is not a directory in ${HOME}/, substitute symbolic links by or add theis directories.
for i in .inputrc .shrc .shinit .profile .exports .functions .aliases .bashrc .profile .exports .functions .aliases .bashrc .bash_login .bash_profile .bash_logout .bash_prompt .zshrc .zprofile .ssh .vimrc .vim .gitconfig .gitignore_global
do
if [ -e ${i} ]
then
if [ -f ${i} -o -d ${i} ]
then
mv ${i} ${HOME}/.local/share/Trash/
xdg-open ${HOME}/.local/share/Trash/
elif [ -L ${i} ]
then
unlink ${i}
fi
fi
done
ln -s ${HOME}/.files/.inputrc ${HOME}/.inputrc
#ln -s ${HOME}/.files/.shrc ${HOME}/.shrc
#ln -s ${HOME}/.files/.shinit ${HOME}/.shinit
#ln -s ${HOME}/.files/.profile ${HOME}/.profile
ln -s ${HOME}/.files/.bashrc ${HOME}/.bashrc
ln -s ${HOME}/.files/.bash_login ${HOME}/.bash_login
ln -s ${HOME}/.files/.bash_profile ${HOME}/.bash_profile
ln -s ${HOME}/.files/.bash_logout ${HOME}/.bash_logout
ln -s ${HOME}/.files/.bash_prompt ${HOME}/.bash_prompt
ln -s ${HOME}/.files/.zshrc ${HOME}/.zshrc
ln -s ${HOME}/.files/.zprofile ${HOME}/.zprofile
#ln -s ${HOME}/.files/.ssh ${HOME}/.ssh
ln -s ${HOME}/.files/.vimrc ${HOME}/.vimrc
ln -s ${HOME}/.files/.vim ${HOME}/.vim
ln -s ${HOME}/.files/.gitconfig ${HOME}/.gitconfig
ln -s ${HOME}/.files/.gitignore_global ${HOME}/.gitignore_global
ln -s ${HOME}/Miscellaneous/Downloads ${HOME}/Downloads/Miscellaneous
ln -s ${HOME}/Documents/Downloads ${HOME}/Downloads/Documents
ln -s ${HOME}/Pictures/Downloads ${HOME}/Downloads/Pictures
ln -s ${HOME}/Movies/Downloads ${HOME}/Downloads/Movies
ln -s ${HOME}/Music/Downloads ${HOME}/Downloads/Music
ln -s ${HOME}/Pictures/DCIM ${HOME}/DCIM
ln -s ${HOME}/.local/share/Trash ${HOME}/.Trash
# Create user-specific custom symbolic links.
:> ${HOME}/.hushlogin # Write the .hushlogin file in the current user's home folder.
#sed -i '/Icon/d' "/var/lib/AccountsService/${USER}"
#find ${HOME} -mindepth 0 -type f -name Screen\ Shot\ 2015-11-04\ at\ 5.18.34\ PM.png -exec rsync -aHAX {} /home/kvpb/.face/ \;
# Set my user picture.
printf '\033[48;2;255;255;0;0#m%s\033[m\n' 'Third-Party: Systemwide And User-Specific Xenogenetic Software & Preferences'
printf '%s\n' 'BASH'
sudo apt-get upgrade -qq bash # Download and update BASH.
printf '%s\n' 'ZSH'
sudo apt-get install -qq zsh # Download and update ZSH.
printf '%s\n' 'Rust'
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh # Install Rust. https://www.rust-lang.org/tools/install
printf '%s\n' 'Go'
sudo apt-get install -qq golang #golang-go
sudo apt-get install -qq gccgo #gccgo-go
# Download and install Go.
printf '%s\n' 'Python'
sudo apt-get install -qq python3
sudo apt-get remove -qq python-is-python2
sudo apt-get install -qq python-is-python3
sudo apt-get install -qq python2
# Download and install Python 3.10 and 2.7.
#printf '%s\n' 'Ruby'
#
#sudo apt-get install -qq ruby # Download and install Ruby.
printf '%s\n' 'Node.js & NPM'
#sudo apt-get install -y nodejs npm # Download and install Node.js & NPM.
wget -qO- https://deb.nodesource.com/setup_14.x | sudo -E bash -
sudo apt-get install -qq nodejs
# Download and install Node.js 14.x LTS & NPM
printf '%s\n' 'Octave'
sudo apt-get install -qq octave # Download and install Octave.
printf '%s\n' 'Media Transfer Protocol (MTP)'
sudo apt-get install -qq go-mtpfs
sudo apt-get install -qq libmtp
sudo apt-get install -qq mtpfs
sudo apt-get install -qq mtp-tools
sudo apt-get install -qq jmtpfs
# Download and install MTP.
printf '%s\n' 'KVM'
sudo apt-get install -qq kvm # Download and install KVM.
printf '%s\n' 'QEMU'
sudo apt-get install -qq qemu # Download and install QEMU.
printf '%s\n' 'OpenSSH'
sudo apt-get install -qq ssh # Download and install OpenSSH.
printf '%s\n' 'OpenSSL'
sudo apt-get install -qq openssl # Download and install OpenSSL.
printf '%s\n' 'MOSH'
sudo apt-get install -qq mosh # Download and install MOSH.
printf '%s\n' 'Git'
sudo apt-get upgrade -qq git # Download and update Git.
printf '%s\n' 'Wget'
sudo apt-get upgrade -qq wget # Download and update Wget.
printf '%s\n' 'CURL'
sudo apt-get install -qq curl # Download and install CURL.
printf '%s\n' 'XATTR'
sudo apt-get install -qq xattr # Download and install XATTR.
printf '%s\n' 'Vi Improved'
sudo apt-get install -qq vim # Download and install VIM.
printf '%s\n' 'FSWatch'
sudo apt-get install -qq fswatch # Download and install FSWatch.
printf '%s\n' 'Tree'
sudo apt-get install -qq tree # Download and install Tree.
printf '%s\n' 'HTOP'
sudo apt-get install -qq htop # Download and install HTOP.
printf '%s\n' 'The Fuck'
sudo apt-get install -qq thefuck # Download and install The Fuck.
printf '%s\n' 'TMUX'
sudo apt-get install -qq tmux # Download and install TMUX.
printf '%s\n' 'FFMPEG'
sudo apt-get install -qq ffmpeg # Download and install FFMPEG.
printf '%s\n' 'Terminator'
sudo apt-get install -qq terminator # Download and install Terminator.
#printf '%s\n' 'VMware Workstation Player'
#
#sudo apt-get install gcc build-essential -y
#chmod +x VMware-Player-14.0.0-6661328.x86_64.bundle
#gksudo bash VMware-Player-14.0.0-6661328.x86_64.bundle
# Download and install VMware Workstation Player. https://help.ubuntu.com/community/VMware/Player
printf '%s\n' 'Visual Studio Code'
sudo snap install --classic code # Download and install VS Code. https://code.visualstudio.com/docs/setup/linux
printf '%s\n' 'Xpad'
sudo apt-get install -qq xpad # Download and install Xpad.
printf '%s\n' 'Joplin'
#JoplinAppImage='Joplin-2.4.9.AppImage'
#JoplinAppImageURL="https://github.com/laurent22/joplin/releases/download/v2.4.9/Joplin-2.4.9.AppImage"
#wget -q ${JoplinAppImageURL}
#chmod +x ${JoplinAppImage}
#mv ${JoplinAppImage} ${HOME}/Applications/
# 'On Linux, the recommended way is to use the following installation script as it will handle the desktop icon too:' https://joplinapp.org/help/#desktop-applications
wget -O - https://raw.githubusercontent.com/laurent22/joplin/dev/Joplin_install_and_update.sh | bash
# 'Important: First, install Node 12+.' https://joplinapp.org/help/#terminal-application
NPM_CONFIG_PREFIX=~/.joplin-bin npm install -g joplin
sudo ln -s ~/.joplin-bin/bin/joplin /usr/bin/joplin
# 'By default, the application binary will be installed under ~/.joplin-bin. You may change this directory if needed. Alternatively, if your npm permissions are setup as described here (Option 2) then simply running npm -g install joplin would work.' https://joplinapp.org/help/#terminal-application
# Download and install ProtonMail Bridge.
printf '%s\n' 'LibreOffice'
sudo apt-get install -qq libreoffice # Download and install LibreOffice.
printf '%s\n' 'GIMP'
sudo apt-get install -qq gimp # Download and install GIMP.
printf '%s\n' 'Inkscape'
sudo apt-get install -qq inkscape # Download and install Inkscape.
printf '%s\n' 'Handbrake'
sudo apt-get install -qq handbrake # Download and install Handbrake.
printf '%s\n' 'ProtonMail Bridge'
#wget https://protonmail.com/download/protonmail-bridge_1.2.1-1_amd64.deb # 'Download the file to the current working directory.'
#sudo apt-get install debsig-verify debian-keyring # 'Install debsigs to be able to verify the package.'
#gpg --dearmor --output debsig.gpg bridge_pubkey.gpg
#sudo mkdir -p /usr/share/debsig/keyrings/E2C75D68E6234B07
#sudo mv debsig.gpg /usr/share/debsig/keyrings/E2C75D68E6234B07
# 'Import the public key into the keyring.'
#sudo mkdir -p /etc/debsig/policies/E2C75D68E6234B07
#sudo cp bridge.pol /etc/debsig/policies/E2C75D68E6234B07
# 'Install the policy file.'
#if [ $(debsig-verify protonmail-bridge_1.1.6-1_amd64.deb) = "debsig: Verified package from 'Proton Technologies AG (ProtonMail Bridge developers) <bridge@protonmail.ch>' (Proton Technologies AG)" ] # 'Check the DEB file.'
#then
# sudo apt-get install ./protonmail-bridge_1.1.6-1_amd64.deb
#fi # 'Install the package using [my] package manager.'
#mv ./protonmail-bridge_1.1.6-1_amd64.deb ${HOME}/Downloads/
# Download and install ProtonMail Bridge.
pmbdeb='protonmail-bridge_1.8.10-1_amd64.deb'
pmbdeburl="https://protonmail.com/download/bridge/${pmbdeb}"
wget -q ${pmbdeburl}
sudo apt-get install -qq ./${pmbdeb}
mv ${pmbdeb} ${HOME}/Downloads/
# Download and install ProtonMail Bridge.
printf '%s\n' 'Geary'
sudo apt-get install -qq geary # Download and install Geary.
#printf '%s\n' 'Signal'
#
#wget -O- https://updates.signal.org/desktop/apt/keys.asc | gpg --dearmor > signal-desktop-keyring.gpg
#sudo mv signal-desktop-keyring.gpg /usr/share/keyrings/
# '1. Install our official public software signing key'
#echo 'deb [arch=amd64 signed-by=/usr/share/keyrings/signal-desktop-keyring.gpg] https://updates.signal.org/desktop/apt xenial main' |\
# sudo tee -a /etc/apt/sources.list.d/signal-xenial.list
# '2. Add our repository to your list of repositories'
#sudo apt-get update && sudo apt-get install signal-desktop # '3. Update your package database and install signal'
# 'Linux (Debian-based) Install Instructions' https://signal.org/en/download/
# Download and install Signal.
#printf '%s\n' 'California'
#
#sudo add-apt-repository -y ppa:yorba/daily-builds
#sudo apt-get update --allow-insecure-repositories
#sudo apt-get install california -y
# Download and install California.
printf '%s\n' 'KAddressBook'
sudo apt-get install -qq akonadi*
sudo apt-get install -qq kaddressbook
# Download and install KAddressBook.
printf '%s\n' 'Transmission'
sudo apt-get install -qq transmission # Download and install Transmission.
#printf '%s\n' 'Dropbox'
#
#if [ "$(uname -m)" = 'i386' -o "$(uname -m)" = 'i686' ]
#then
# cd ~ && wget -O - "https://www.dropbox.com/download?plat=lnx.x86" | tar xzf -
# ~/.dropbox-dist/dropboxd
#elif [ "$(uname -m)" = 'x86_64' ]
#then
# cd ~ && wget -O - "https://www.dropbox.com/download?plat=lnx.x86_64" | tar xzf -
# ~/.dropbox-dist/dropboxd
#else
# :
#fi
# Download and install Dropbox. https://www.dropbox.com/install-linux
#printf '%s\n' ''
#
#sudo apt-get install -qq # Download and install .
printf '%s\n' ''
printf '%s\n' 'Ubuntu has been configured.'# ${litcyan}
printf '%s\n' 'A reboot is required for some of these changes to take effect. Reboot...?, \e[4my\e[0mes or \e[4mn\e[0mo?'# ${litgreen}
read -r input
if [[ ${input} =~ ^([Yy]|[Yy][Ee][Ss]|[Yy][Ee][Aa][Hh])$ ]]
then
sudo shutdown -r now # Reboot without confirmation.
else
:
fi
exit 0
# .ubuntu
# Ubuntu Configuration Script
#
# Karl V. P. B. `kvpb`
# +1 (DDD) DDD-DDDD
# +33 A BB BB BB BB
# local-part@domain
# local-part@domain
# https://www.linkedin.com/in/
# https://twitter.com/
# https://github.com/
# https://vm.tiktok.com//
| true
|
fb3ee05a86e360b7098ec273eeef555385d50292
|
Shell
|
andres-condezo/mydotfiles
|
/.zshrc
|
UTF-8
| 8,334
| 2.71875
| 3
|
[] |
no_license
|
# If you come from bash you might have to change your $PATH.
# export PATH=$HOME/bin:/usr/local/bin:$PATH
# Path to your oh-my-zsh installation.
export ZSH="/home/adrs/.oh-my-zsh"
# Set name of the theme to load --- if set to "random", it will
# load a random theme each time oh-my-zsh is loaded, in which case,
# to know which specific one was loaded, run: echo $RANDOM_THEME
# See https://github.com/ohmyzsh/ohmyzsh/wiki/Themes
ZSH_THEME="spaceship"
#Set spacship prompt
SPACESHIP_PROMPT_ADD_NEWLINE=false
SPACESHIP_PROMPT_SEPARATE_LINE=false
SPACESHIP_USER_SHOW=always
SPACESHIP_PACKAGE_SHOW=false
SPACESHIP_DIR_TRUNC=0
# Set list of themes to pick from when loading at random
# Setting this variable when ZSH_THEME=random will cause zsh to load
# a theme from this variable instead of looking in $ZSH/themes/
# If set to an empty array, this variable will have no effect.
# ZSH_THEME_RANDOM_CANDIDATES=( "robbyrussell" "agnoster" )
# Uncomment the following line to use case-sensitive completion.
# CASE_SENSITIVE="true"
# Uncomment the following line to use hyphen-insensitive completion.
# Case-sensitive completion must be off. _ and - will be interchangeable.
# HYPHEN_INSENSITIVE="true"
# Uncomment the following line to disable bi-weekly auto-update checks.
# DISABLE_AUTO_UPDATE="true"
# Uncomment the following line to automatically update without prompting.
# DISABLE_UPDATE_PROMPT="true"
# Uncomment the following line to change how often to auto-update (in days).
# export UPDATE_ZSH_DAYS=13
# Uncomment the following line if pasting URLs and other text is messed up.
# DISABLE_MAGIC_FUNCTIONS="true"
# Uncomment the following line to disable colors in ls.
# DISABLE_LS_COLORS="true"
# Uncomment the following line to disable auto-setting terminal title.
# DISABLE_AUTO_TITLE="true"
# Uncomment the following line to enable command auto-correction.
# ENABLE_CORRECTION="true"
# Uncomment the following line to display red dots whilst waiting for completion.
COMPLETION_WAITING_DOTS="true"
# Uncomment the following line if you want to disable marking untracked files
# under VCS as dirty. This makes repository status check for large repositories
# much, much faster.
# DISABLE_UNTRACKED_FILES_DIRTY="true"
# Uncomment the following line if you want to change the command execution time
# stamp shown in the history command output.
# You can set one of the optional three formats:
# "mm/dd/yyyy"|"dd.mm.yyyy"|"yyyy-mm-dd"
# or set a custom format using the strftime function format specifications,
# see 'man strftime' for details.
# HIST_STAMPS="mm/dd/yyyy"
# Would you like to use another custom folder than $ZSH/custom?
# ZSH_CUSTOM=/path/to/new-custom-folder
# Which plugins would you like to load?
# Standard plugins can be found in $ZSH/plugins/
# Custom plugins may be added to $ZSH_CUSTOM/plugins/
# Example format: plugins=(rails git textmate ruby lighthouse)
# Add wisely, as too many plugins slow down shell startup.
plugins=(git
zsh-autosuggestions
colored-man-pages
zsh-syntax-highlighting)
source $ZSH/oh-my-zsh.sh
# User configuration
#---------------------------------------------------------------
#------------------- VIM MODE ----------------------------------
# vi mode
bindkey -v
export KEYTIMEOUT=20
# Use vim keys in tab complete menu:
bindkey -M menuselect 'h' vi-backward-char
bindkey -M menuselect 'k' vi-up-line-or-history
bindkey -M menuselect 'l' vi-forward-char
bindkey -M menuselect 'j' vi-down-line-or-history
bindkey -v '^?' backward-delete-char
bindkey -M viins 'ii' vi-cmd-mode
# bindkey jj vi-cmd-mode
# bindkey -s jj '\e'
# VIM_MODE_VICMD_KEY='^D'
# Change cursor shape for different vi modes.
function zle-keymap-select {
if [[ ${KEYMAP} == vicmd ]] ||
[[ $1 = 'block' ]]; then
echo -ne '\e[1 q'
elif [[ ${KEYMAP} == main ]] ||
[[ ${KEYMAP} == viins ]] ||
[[ ${KEYMAP} = '' ]] ||
[[ $1 = 'beam' ]]; then
echo -ne '\e[5 q'
fi
}
zle -N zle-keymap-select
# zle-line-init() {
# zle -K viins # initiate `vi insert` as keymap (can be removed if `bindkey -V` has been set elsewhere)
# echo -ne "\e[5 q"
# }
zle -N zle-line-init
echo -ne '\e[5 q' # Use beam shape cursor on startup.
preexec() { echo -ne '\e[5 q' ;} # Use beam shape cursor for each new prompt.
# ci"
autoload -U select-quoted
zle -N select-quoted
for m in visual viopp; do
for c in {a,i}{\',\",\`}; do
bindkey -M $m $c select-quoted
done
done
# ci{, ci(, di{ etc..
autoload -U select-bracketed
zle -N select-bracketed
for m in visual viopp; do
for c in {a,i}${(s..)^:-'()[]{}<>bB'}; do
bindkey -M $m $c select-bracketed
done
done
#------------------- FIN VIM MODE ----------------------------------
#---------------------------------------------------------------
# export MANPATH="/usr/local/man:$MANPATH"
# You may need to manually set your language environment
# export LANG=en_US.UTF-8
#------------------- PREFERRED EDITOR ----------------------------------
#---------------------------------------------------------------
# Preferred editor for local and remote sessions
if [[ -n $SSH_CONNECTION ]]; then
export EDITOR='vim'
else
export EDITOR='nvim'
export VISUAL='nvim'
fi
# Compilation flags
# export ARCHFLAGS="-arch x86_64"
#---------------------------------------------------------------
#------------------- ALIAS ----------------------------------
# Set personal aliases, overriding those provided by oh-my-zsh libs,
# plugins, and themes. Aliases can be placed here, though oh-my-zsh
# users are encouraged to define aliases within the ZSH_CUSTOM folder.
# For a full list of active aliases, run `alias`.
alias ohmyzsh="nvim ~/.oh-my-zsh"
alias c:="cd /mnt/c"
alias d:="cd /mnt/d"
alias e:="cd /mnt/e"
alias cl="clear; ls"
alias clr="clear"
alias webdev="/mnt/d/webDevelopment"
alias proyectos="/home/adrs/proyectos"
alias pz="/mnt/d/Documentos/platzi"
alias lv="nvim -c':e#<1'"
alias vrc="nvim ~/.vimrc"
alias zrc="nvim ~/.zshrc"
alias brc="nvim ~/.bashrc"
alias x=exit
alias cli="clip.exe"
alias clip="tr -d '\n' | clip.exe && echo 'copied to clipboard'"
alias cpwd="pwd | tr -d '\n' | clip.exe && echo 'pwd copied to clipboard'"
alias gdf='/usr/bin/git --git-dir=$HOME/dotfiles/ --work-tree=$HOME'
alias v="nvim"
alias e="emacs"
alias docd='/mnt/d/Documentos'
alias py='python3'
alias ipy='ipython3'
alias hol='cd /mnt/d/Documentos/Holberton'
alias rng='ranger'
#---------------------------------------------------------------
#------------------ FUNCTIONS ----------------------------------
c(){
folder="compilers/"
if [[ ! -d $folder ]]; then
mkdir $folder
fi
entry=$(echo "$1" | sed 's/\(\w\)\(\.c\)/\1/g')
cc -o $entry $1
mv $entry $folder
./$folder/$entry
}
ctrlz() {
if [[ $#BUFFER == 0 ]]; then
fg >/dev/null 2>&1 && zle redisplay
else
zle push-input
fi
}
zle -N ctrlz
bindkey '^Z' ctrlz
# Use ranger to switch directories and bind it to ctrl-p
rngcd () {
tmp="$(mktemp)"
ranger --choosedir="$tmp" "$@"
if [ -f "$tmp" ]; then
dir="$(cat "$tmp")"
rm -f "$tmp"
# [ --datadir "$dir" ] && [ "$dir" != "$(pwd)" ] && cd "$dir"
[ -d "$dir" ] && [ "$dir" != "$(pwd)" ] && cd "$dir"
fi
}
bindkey -s '^p' 'rngcd\n'
# Use lf to switch directories and bind it to ctrl-o
lfcd () {
tmp="$(mktemp)"
lf -last-dir-path="$tmp" "$@"
if [ -f "$tmp" ]; then
dir="$(cat "$tmp")"
rm -f "$tmp"
[ -d "$dir" ] && [ "$dir" != "$(pwd)" ] && cd "$dir"
fi
}
bindkey -s '^o' 'lfcd\n'
# overload-tab () {
# if (( CURSOR < 1 ))
# then zle your-new-widget
# else zle expand-or-complete
# fi
# }
# zle -N overload-tab
# bindkey $'\t' overload-tab
#----------------------------------------------------------------
#------------------- BINDKEYS ----------------------------------
# bindkey -M menuselect '^M' .accept-line
bindkey '^ ' autosuggest-accept
# Edit line in vim with ctrl-e:
autoload edit-command-line; zle -N edit-command-line
bindkey '^e' edit-command-line
#---------------------------------------------------------------
#------------------- EXPORTS ----------------------------------
[ -f ~/.fzf.zsh ] && source ~/.fzf.zsh
LS_COLORS=$LS_COLORS:'tw=01;35:ow=01;35:' ;
export LS_COLORS
export LD_PRELOAD="/home/adrs/stderred/build/libstderred.so${LD_PRELOAD:+:$LD_PRELOAD}"
| true
|
573010f3c0fd4b2280d0a4e072b6931bb3423356
|
Shell
|
hardik-kgp/Networks_Lab_CS39006
|
/Assignment4/assignment4_partB.sh
|
UTF-8
| 5,632
| 2.890625
| 3
|
[] |
no_license
|
#!/bin/sh
####################################
#### Sriyash Poddar | 18CS30040 ####
#### Assignment 4 Part B ####
####################################
# creating namespaces
# command: sudo ip netns add <name of namespace>
sudo ip netns add H1
sudo ip netns add H2
sudo ip netns add H3
sudo ip netns add H4
sudo ip netns add R1
sudo ip netns add R2
sudo ip netns add R3
# creating v-eth to connect the namspaces
# command: sudo ip link add <veth-name> type veth peer name <veth-peername>
sudo ip link add V1 type veth peer name V2
sudo ip link add V3 type veth peer name V4
sudo ip link add V5 type veth peer name V6
sudo ip link add V7 type veth peer name V8
sudo ip link add V9 type veth peer name V10
sudo ip link add V11 type veth peer name V12
# assigning interfaces to the namespaces
# command: sudo ip link set <veth-name> netns <namespace>
sudo ip link set V1 netns H1
sudo ip link set V3 netns H2
sudo ip link set V10 netns H3
sudo ip link set V12 netns H4
sudo ip link set V2 netns R1
sudo ip link set V4 netns R1
sudo ip link set V5 netns R1
sudo ip link set V6 netns R2
sudo ip link set V7 netns R2
sudo ip link set V8 netns R3
sudo ip link set V9 netns R3
sudo ip link set V11 netns R3
# adding ips to the interfaces, bringing them up, and setting routes
# command: sudo ip -n <namespace> addr add <ip-address/subnet> dev <veth-name>
sudo ip -n H1 addr add 10.0.10.40/24 dev V1
sudo ip -n H2 addr add 10.0.20.40/24 dev V3
sudo ip -n H3 addr add 10.0.50.41/24 dev V10
sudo ip -n H4 addr add 10.0.60.41/24 dev V12
sudo ip -n R1 addr add 10.0.10.41/24 dev V2
sudo ip -n R1 addr add 10.0.20.41/24 dev V4
sudo ip -n R1 addr add 10.0.30.40/24 dev V5
sudo ip -n R2 addr add 10.0.30.41/24 dev V6
sudo ip -n R2 addr add 10.0.40.40/24 dev V7
sudo ip -n R3 addr add 10.0.40.41/24 dev V8
sudo ip -n R3 addr add 10.0.50.40/24 dev V9
sudo ip -n R3 addr add 10.0.60.40/24 dev V11
# bringing veth, bridges and lo up
# command: sudo ip -n <namespace> link set dev <veth-name> up
sudo ip -n H1 link set dev V1 up
sudo ip -n H2 link set dev V3 up
sudo ip -n H3 link set dev V10 up
sudo ip -n H4 link set dev V12 up
sudo ip -n R1 link set dev V2 up
sudo ip -n R1 link set dev V4 up
sudo ip -n R1 link set dev V5 up
sudo ip -n R2 link set dev V6 up
sudo ip -n R2 link set dev V7 up
sudo ip -n R3 link set dev V8 up
sudo ip -n R3 link set dev V9 up
sudo ip -n R3 link set dev V11 up
sudo ip -n H1 link set dev lo up
sudo ip -n H2 link set dev lo up
sudo ip -n H3 link set dev lo up
sudo ip -n H4 link set dev lo up
sudo ip -n R1 link set dev lo up
sudo ip -n R2 link set dev lo up
sudo ip -n R3 link set dev lo up
# adding routes
# command: sudo ip netns exec <namespace> route add <dest-ip> via <route-ip> dev <veth-name>
sudo ip -n H1 route add 10.0.20.0/24 via 10.0.10.41 dev V1
sudo ip -n H1 route add 10.0.30.0/24 via 10.0.10.41 dev V1
sudo ip -n H1 route add 10.0.40.0/24 via 10.0.10.41 dev V1
sudo ip -n H1 route add 10.0.50.0/24 via 10.0.10.41 dev V1
sudo ip -n H1 route add 10.0.60.0/24 via 10.0.10.41 dev V1
sudo ip -n H2 route add 10.0.10.0/24 via 10.0.20.41 dev V3
sudo ip -n H2 route add 10.0.30.0/24 via 10.0.20.41 dev V3
sudo ip -n H2 route add 10.0.40.0/24 via 10.0.20.41 dev V3
sudo ip -n H2 route add 10.0.50.0/24 via 10.0.20.41 dev V3
sudo ip -n H2 route add 10.0.60.0/24 via 10.0.20.41 dev V3
sudo ip -n H3 route add 10.0.10.0/24 via 10.0.50.40 dev V10
sudo ip -n H3 route add 10.0.20.0/24 via 10.0.50.40 dev V10
sudo ip -n H3 route add 10.0.30.0/24 via 10.0.50.40 dev V10
sudo ip -n H3 route add 10.0.40.0/24 via 10.0.50.40 dev V10
sudo ip -n H3 route add 10.0.60.0/24 via 10.0.50.40 dev V10
sudo ip -n H4 route add 10.0.10.0/24 via 10.0.60.40 dev V12
sudo ip -n H4 route add 10.0.20.0/24 via 10.0.60.40 dev V12
sudo ip -n H4 route add 10.0.30.0/24 via 10.0.60.40 dev V12
sudo ip -n H4 route add 10.0.40.0/24 via 10.0.60.40 dev V12
sudo ip -n H4 route add 10.0.50.0/24 via 10.0.60.40 dev V12
sudo ip -n R1 route add 10.0.40.0/24 via 10.0.30.41 dev V5
sudo ip -n R1 route add 10.0.50.0/24 via 10.0.30.41 dev V5
sudo ip -n R1 route add 10.0.60.0/24 via 10.0.30.41 dev V5
sudo ip -n R2 route add 10.0.10.0/24 via 10.0.30.40 dev V6
sudo ip -n R2 route add 10.0.20.0/24 via 10.0.30.40 dev V6
sudo ip -n R2 route add 10.0.50.0/24 via 10.0.40.41 dev V7
sudo ip -n R2 route add 10.0.60.0/24 via 10.0.40.41 dev V7
sudo ip -n R3 route add 10.0.10.0/24 via 10.0.40.40 dev V8
sudo ip -n R3 route add 10.0.20.0/24 via 10.0.40.40 dev V8
sudo ip -n R3 route add 10.0.30.0/24 via 10.0.40.40 dev V8
# ip forwarding
# command: sudo ip netns exec <namespace> sysctl -w net.ipv4.ip_forward=1
sudo ip netns exec R1 sysctl -w net.ipv4.ip_forward=1
sudo ip netns exec R2 sysctl -w net.ipv4.ip_forward=1
sudo ip netns exec R3 sysctl -w net.ipv4.ip_forward=1
# trace routes
sudo ip netns exec H1 traceroute 10.0.60.41
sudo ip netns exec H3 traceroute 10.0.60.41
sudo ip netns exec H4 traceroute 10.0.20.40
# ping test
for x in {1..6}
do
y=$((x*10))
sudo ip netns exec H1 ping -c3 10.0."$y".40
sudo ip netns exec H2 ping -c3 10.0."$y".40
sudo ip netns exec H3 ping -c3 10.0."$y".40
sudo ip netns exec H4 ping -c3 10.0."$y".40
sudo ip netns exec R1 ping -c3 10.0."$y".40
sudo ip netns exec R2 ping -c3 10.0."$y".40
sudo ip netns exec R3 ping -c3 10.0."$y".40
sudo ip netns exec H1 ping -c3 10.0."$y".41
sudo ip netns exec H2 ping -c3 10.0."$y".41
sudo ip netns exec H3 ping -c3 10.0."$y".41
sudo ip netns exec H4 ping -c3 10.0."$y".41
sudo ip netns exec R1 ping -c3 10.0."$y".41
sudo ip netns exec R2 ping -c3 10.0."$y".41
sudo ip netns exec R3 ping -c3 10.0."$y".41
done
exit
| true
|
2df694d5eca65ed85c2ac95397fb583689fa2f75
|
Shell
|
henderea/jse
|
/resources/shell_code.sh
|
UTF-8
| 1,462
| 3.625
| 4
|
[] |
no_license
|
function __jse_check {
if which jse >/dev/null 2>/dev/null; then
return 0
elif which nvm >/dev/null 2>/dev/null; then
if which nvm_find_nvmrc >/dev/null 2>/dev/null; then
if [[ -f "$(nvm_find_nvmrc)" ]]; then
nvm use >/dev/null 2>/dev/null
else
nvm use default >/dev/null 2>/dev/null
fi
else
nvm use default >/dev/null 2>/dev/null
nvm use >/dev/null 2>/dev/null
fi
if which jse >/dev/null 2>/dev/null; then
return 0
else
return 1
fi
else
return 1
fi
}
function jvg {
jse var get "$1" 2>/dev/null || return $?
}
function jvgl {
jse var get --lines "$1" 2>/dev/null || return $?
}
function jvge {
jse var get --escaped "$1" 2>/dev/null || return $?
}
function jvgeu {
jse var get-all --escaped --unique --sort $@ 2>/dev/null || return $?
}
function jva {
printf "> jse var add $*\n"
jse var add $@ || return $?
}
function jvm {
printf "> jse var mod $*\n"
jse var mod $@
}
function jse {
if ! __jse_check >/dev/null 2>/dev/null; then
printf "$(tput bold)$(tput setaf 1)Unable to find jse script$(tput sgr0)\n"
return 1
fi
if [[ "$1" == 'reload' ]]; then
printf "$(tput dim)Reloading jse shell integration$(tput sgr0)\n" >&2
eval "$(command jse shell)"
else
if [[ "$(command jse shell --revision)" != '!!JSE_SHELL_REVISION!!' ]]; then
eval "$(command jse shell)"
fi
command jse $@
return $?
fi
}
| true
|
3cf2170f798b0df1d3160a8243dfb79e1c9d70ce
|
Shell
|
yuzhenpeng/egaz
|
/template/6_var_list.tt2
|
UTF-8
| 729
| 2.71875
| 3
|
[] |
no_license
|
#!/bin/bash
# perl [% stopwatch.cmd_line %]
cd [% working_dir %]
sleep 1;
if [ -d [% working_dir %]/[% multi_name %]_vcf ]; then
rm -fr [% working_dir %]/[% multi_name %]_vcf;
fi;
mkdir -p [% working_dir %]/[% multi_name %]_vcf
#----------------------------#
# var_list
#----------------------------#
find [% working_dir %]/[% multi_name %]_refined -type f -name "*.fas" -or -name "*.fas.gz" \
| parallel --no-run-if-empty basename {} \
| parallel --no-run-if-empty -j [% parallel %] \
perl [% egaz %]/fas2vcf.pl \
-s [% working_dir %]/Genomes/[% target %]/chr.sizes \
-i [% working_dir %]/[% multi_name %]_refined/{} \
-o [% working_dir %]/[% multi_name %]_vcf/{}.vcf
| true
|
3b939ed8af9e3c014840ba622e848f8d0494b391
|
Shell
|
mickmetalholic/osm-server-docker
|
/config/run.sh
|
UTF-8
| 1,841
| 3.578125
| 4
|
[] |
no_license
|
#!/bin/bash
installstylesheet() {
echo "Installing stylesheet ..."
cd /root/stylesheet && \
sh ./get-shapefiles.sh && \
carto project.mml > style.xml
echo "Stylesheet installed!"
}
initdb() {
echo "Initializing postgresql..."
mkdir -p /var/lib/postgresql/9.4/main && chown -R postgres /var/lib/postgresql/
su - postgres -c "/usr/lib/postgresql/9.4/bin/initdb --pgdata /var/lib/postgresql/9.4/main"
ln -s /etc/ssl/certs/ssl-cert-snakeoil.pem /var/lib/postgresql/9.4/main/server.crt
ln -s /etc/ssl/private/ssl-cert-snakeoil.key /var/lib/postgresql/9.4/main/server.key
echo "Postgresql initialized!"
}
startdb() {
echo "Starting postgresql..."
service postgresql start
echo "Postgresql Started!"
}
createuser() {
echo "Creating user root..."
su - postgres -c "createuser root"
echo "User root created!"
}
createdb() {
echo "Creating database gis..."
su - postgres -c "psql -f /setLang.sql"
su - postgres -c "createdb -E UTF8 -O root gis"
su - postgres -c "psql -s gis -f /addExtensions.sql"
echo "Database gis created!"
}
initialize() {
echo "Initializing..."
installstylesheet
initdb
startdb
createuser
createdb
echo "Initialize finished!"
}
import() {
echo "Importing data..."
startdb
osm2pgsql --slim -d gis -C 6000 --hstore -S /root/stylesheet/openstreetmap-carto.style /data/data.pbf
echo "Finished importing data!"
}
startrenderd() {
echo "Starting renderd..."
service renderd start
echo "Renderd started!"
}
startapache2() {
echo "Starting apache2..."
service apache2 start
echo "Apache2 started!"
}
start() {
echo "Starting service..."
startdb
startrenderd
startapache2
echo "Service started!"
tail -f /run.sh
}
cli() {
echo "Running bash"
/bin/bash
}
# Execute the specified command sequence
for arg
do
$arg;
done
exit 0
| true
|
a75a13e0841635a97fd717fc977d42f1d53716b6
|
Shell
|
pigpaxos/pigpaxos
|
/sigmod2021/prc_experiments/start_no_faults.sh
|
UTF-8
| 405
| 2.875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
source common.sh
arraylength=${#nodes[@]}
for (( i=1; i<${arraylength}+1; i++ ));
do
ssh -i va.pem ubuntu@${nodes[$i-1]} 'cd /home/ubuntu/paxi/bin/; rm logs/*'
upload_one ${nodes[$i-1]} "/home/pigpaxos/go/src/github.com/pigpaxos/pigpaxos/bin/start_bp.sh" "/home/ubuntu/paxi/bin/start_bp.sh"
server_pigpaxos ${nodes[$i-1]} $i $1 $2 false 20
echo "Started PigPaxos 1.$i"
done
| true
|
bd5011ea132c7cf5bbfbb440226ebb4d5dfef024
|
Shell
|
NCAR/DART
|
/models/FESOM/shell_scripts/advance_model.template
|
UTF-8
| 3,594
| 3.125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#
#--- advance each ensemble member using a job array
#----------------------------------------------------------------------
# LSF options (set for the NCAR machine "cheyenne")
#
#BSUB -a poe
#BSUB -J advance_fesom[1-ENSEMBLEMEMBERNO]%3 # Name of the job.
#BSUB -o LOG/advance_fesom_%I_%J.out # Appends stdout to file %J.out.
#BSUB -e LOG/advance_fesom_%I_%J.out # Appends stderr to file %J.err.
#BSUB -P fesom # Project ID.
#BSUB -q POEQUEUENAME # queue
#BSUB -R "span[ptile=16]" #
#BSUB -n NUMBEROFCORES #
#BSUB -N #
####BSUB -x #
#
#----------------------------------------------------------------------
# PBS options (set for the NCAR machine "cheyenne")
#
#PBS -N init_ens
#PBS -J 1-ENSEMBLEMEMBERNO
#PBS -l walltime=0:10:00
#PBS -q regular
#PBS -j oe
#PBS -A P86850054
#PBS -l select=1:ncpus=36:mpiprocs=36
#
#----------------------------------------------------------------------
#-- Load Experiment Environment Variables -----------------
. environment.load
# Translate the queueing-specific variables into a common tongue.
if [[ $SCHEDULER = "lsf" ]] ; then
JOBDIR=${LS_SUBCWD} # directory of this script
JOBNAM=${LSB_JOBNAME} # name of this script
JOBIDN=${LSB_JOBINDEX} # job array index
JOBID=${LSB_JOBID} # job index
EXTENSION=lsf
#-- BEGIN ATHENA CONFIG ----------------------------------
MPIPROGINF=detail
export MPIPROGINF
export LSF_PJL_TYPE="poe"
export MEMORY_AFFINITY=MCM
export MP_WAIT_MODE=poll
export MP_SINGLE_THREAD=yes
export MP_TASK_AFFINITY=MCM
export MP_PGMMODEL=mpmd
export MP_WAIT_MODE=poll
export MP_POLLING_INTERVAL=30000000
export MP_SHARED_MEMORY=yes
export MP_EUILIB=us
export MP_EUIDEVICE=sn_all
export LDR_CNTRL=TEXTPSIZE=64K@STACKPSIZE=64K@DATAPSIZE=64K
export MP_TASK_AFFINITY=core
#-- END ATHENA CONFIG ------------------------------------
elif [[ ${SCHEDULER} = "pbs" ]] ; then
JOBDIR=${PBS_O_WORKDIR} # directory of this script
JOBNAM=${PBS_JOBNAME} # name of this script
JOBIDN=${PBS_ARRAY_INDEX} # job array index
JOBID=${PBS_JOBID} # job index
TMPDIR=/glade/scratch/$USER/temp # cheyenne-specific
mkdir -p $TMPDIR # cheyenne-specific
EXTENSION=pbs
fi
F_RSVTASK=1; export F_RSVTASK; #THIS CAN BE USEFUL FOR ENSEMBLE. CHECK!
F_ERRCNT=0; export F_ERRCNT
#-- Ensemble required variables ---------------------------
ENSNO=$( echo ${LSB_JOBINDEX} | awk '{ printf("%02d\n", $1) }' )
ENSINFO=${ENSID}${ENSNO};
ENSDIR=${WRKDIR}/${ENSINFO};
cd ${ENSDIR}
#-- Advance FESOM -----------------------------------------
#-- capture the model advance stderr,stdout in a file
${MPIEXEC} ./fesom.x #> LOG/advance_fesom_${JOBIDN}_${JOBID}.out 2>&1
#-- Check if the model advance failed ---------------------
#-- check_ensemble checks the length of restart_file_list.txt against
#-- the number of ensemble members and decides what to do.
CHECKRETURN=$( grep -ir "The model blows up" LOG/advance_fesom_${JOBIDN}_${JOBID}.out )
CHECKSTATUS=$(echo $?); echo ${CHECKSTATUS}
if [ ${CHECKSTATUS} -eq "0" ]; then
echo "${ENSINFO} EXIT 1 :model advance FAILED. ERROR." >> ${CHECKFILE}
else
cd ${FILDIR}
[ ! -f restart_file_list.txt ] && \
echo "${ENSDIR}/${ENSINFO}.${EXPYR}.oce.nc" > restart_file_list.txt || \
echo "${ENSDIR}/${ENSINFO}.${EXPYR}.oce.nc" >> restart_file_list.txt
echo "${ENSINFO} EXIT 0 :is ready to be resubmitted" >> ${CHECKFILE}
fi
| true
|
05b1226692b1091893ef5c194c681f5fce9f8415
|
Shell
|
cmcghan/vagrant-rss
|
/single_installers/install_tulip1.1a.sh
|
UTF-8
| 3,723
| 3.59375
| 4
|
[
"LicenseRef-scancode-warranty-disclaimer",
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
#!/bin/bash -e
# Copyright by California Institute of Technology
# All rights reserved. See LICENSE file at:
# https://github.com/cmcghan/vagrant-rss
#
# in the initialization process for vagrant, this bash script is run as user 'root' from /vagrant
#
#
# Note: Ubuntu X-Windows Desktop and ROS indigo are pre-installed
# on the "shadowrobot/ros-indigo-desktop-trusty64" base box
#
echo "Start of install_tulip1.1a.sh script!"
#
# get -f (force) if given
#
# if we get an input parameter (username) then use it, else use default 'vagrant'
if [ $# -eq 1 ] && [ "$1" == "-f" ]
then
echo "-f (force) commandline argument given. Forcing install of all compiled-from-source components."
FORCE=$1
else
FORCE=
fi
#
# check for installation
#
# check for version of tulip-control
TULIP_FOUND=`python -c "import pkg_resources; print(pkg_resources.get_distribution('tulip').version)" | grep -m 1 -o "1.1a-dev-unknown-commit" | wc -l`
# pkg_resources should give '1.1a-dev-unknown-commit'
# grep should find a match and repeat it
# and wc -l should give 1 if tulip 1.1a was found
if [ $TULIP_FOUND -eq 1 ]
then
echo "tulip 1.1a already installed!"
fi
# exit script immediately if libraries are already installed
if [ "$FORCE" != "-f" ] && [ $TULIP_FOUND -eq 1 ]
then
echo "tulip libraries already installed and up-to-date, exiting..."
exit 0
fi
#
# run installation + upgrades
#
# update all packages, because "gah!" otherwise, especially for 'rosdep' stuff later
sudo apt-get -y update
sudo apt-get -y upgrade
# start in the /root directory
cd ~
# make and move into directory for holding compilation files + downloads
mkdir -p initdeps
cd initdeps
# install glpk and cvxopt:
/vagrant/single_installers/install_glpk_cvxopt.sh $FORCE
# back to compilation/install directory (/root/initdeps)
cd ~/initdeps
# install gr1c:
/vagrant/single_installers/install_gr1c.sh $FORCE
# back to compilation/install directory (/root/initdeps)
cd ~/initdeps
#
# install tulip-control v1.1a system-wide
#
sudo apt-get -y install wget curl # for wget and possible curl use below
sudo apt-get -y install default-jre default-jdk
#polytope 0.1.1 doesn't play nice with tulip-1.1a
#polytope 0.1.0 plays nice with tulip-1.1a
#--> see tulip-1.1a/run_tests.py (will FAIL if polytope 0.1.1 is used)
#note that tulip-1.1a seems to try and install newest version of polytope (0.1.1) from PyPi automatically (even if an older version exists, likely using pip)
#one can download and manually install polytope 0.1.0 ("sudo python setup.py install") and then run ./run_tests.py with it (without recompiling tulip-1.1a), though it does seem slower
# install polytope 0.1.0 system-wide (https://pypi.python.org/pypi/polytope/0.1.0)
# if need to force, then remove old directory first
if [ "$FORCE" == "-f" ]
then
rm -rf .
fi
if [ "$FORCE" == "-f" ] || [ ! -f polytope-0.1.0.tar.gz ]
then
wget https://pypi.python.org/packages/source/p/polytope/polytope-0.1.0.tar.gz#md5=1eca56d647340acab6314431c568319f
tar xvzf polytope-0.1.0.tar.gz
fi
cd polytope-0.1.0
sudo python setup.py install
# install tulip-control v1.1a system-wide
cd ~/initdeps
# if need to force, then remove old directory first
if [ "$FORCE" == "-f" ]
then
rm -rf tulip-control-1.1a
fi
if [ "$FORCE" == "-f" ] || [ ! -f v1.1a.tar.gz ]
then
wget https://github.com/tulip-control/tulip-control/archive/v1.1a.tar.gz
tar xvzf v1.1a.tar.gz
cd tulip-control-1.1a
sed -i.orig '290s/polytope >= 0.1.0/polytope >= 0.1.0,<0.1.1/' setup.py
# 'polytope >= 0.1.0', --> 'polytope >= 0.1.0,<0.1.1', # inside 'install_requires'
cd ..
fi
cd tulip-control-1.1a
sudo python setup.py install
echo "End of install_tulip1.1a.sh script!"
| true
|
6e4e9cab582effb1f766ddd792d92364d76bb8cf
|
Shell
|
devopsgana1996/devops
|
/shellScripting/exercises/shiftparameters.sh
|
UTF-8
| 614
| 3.828125
| 4
|
[] |
no_license
|
#!/bin/bash
#Purpose: Shifting positional parameters automatically
#Version:1.0
#Website: WIP
#Created Date: Tue May 22 22:55:50 IST 2018
#Modified Date:
#Author: Vilas Varghese
#Execute as ./shiftparameters g1 g2 g3 g4
# START #
# total number of command-line arguments
echo "Total arguments passed are: $#"
# $* is used to show the command line arguments
echo "The arguments are: $*"
echo "The First Argument is: $1"
shift 2
echo "The First Argument After Shift 2 is: $1"
shift
echo "The First Argument After Shift is: $1"
#set `date`
#echo "Count $#"
#echo "$1 $2 $3 $4 $5"
#shift 2
#echo "$1 $2 $3 $4 $5"
# END #
| true
|
0411395ae643de125f236a3f57a13b2b4de67958
|
Shell
|
kergoth/mac-game-tools
|
/unnative
|
UTF-8
| 903
| 3.75
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
alias mv="mv -v"
for game; do
game_base="${game##*/}"
if ! [ -e "$game/Contents/Resources/$game_base" ]; then
echo >&2 "Warning: unrecognized native game $game"
continue
fi
original="$game/Contents/Resources/$game_base"
if [ -h "$original/Game" ]; then
orig_game_dir="$original/$(readlink "$original/Game")"
rm -fv "$orig_game_dir/"bxlaunch*.bat
if [ -L "$orig_game_dir" ]; then
real_game_dir="$(cd "$orig_game_dir" && pwd -P)"
if [ -e "$real_game_dir" ]; then
rm "$orig_game_dir"
mv "$real_game_dir" "$orig_game_dir"
fi
fi
fi
games_dir="$(dirname "$game")"
tmp="$(mktemp -d "$games_dir/${0##*/}.XXXXXX")"
trap 'rm -rf "$tmp"' EXIT INT TERM
mv "$game" "$tmp/"
mv "$tmp/${original#$games_dir/}" "$games_dir/"
rm -rf "$tmp"
done
| true
|
b3206e1ad3825e1cf0803504d32a1d986487fefe
|
Shell
|
diegosoriarios/SemestreIV
|
/SO1/Atividades 1/Exercicio4.sh
|
UTF-8
| 225
| 3.296875
| 3
|
[] |
no_license
|
#!/bin/bash
f= /home/diego/Documentos/aula/
for file in "$f"/*
do
if [ $f -nt ../teste2/$f -o -z ../teste2/$f ]
then
cp -v $f ../teste2/
echo "Arquivo copiado com sucesso"
fi
echo $file
done
| true
|
466361b506a0a23df3425d0108eb420d69a24e03
|
Shell
|
davegutz/pyDAGx
|
/pyDAG3/Apps/makeCMD/makeTBLADJ
|
UTF-8
| 2,683
| 3.90625
| 4
|
[
"MIT"
] |
permissive
|
#!bash
# makeTBLADJ
# Wrapper script on Rob Boguski's perl map2tbl and ins2adj
# 11-Aug-2009 DA Gutz Created
# Defaults
PGM="ge38"
VER=v1.22
# Initialize
help=0
debug=0
verbose=0
force=0
quitting=0
args=""
DATE=`date +%Y%m%d`
# getopt from /usr/lib/getopt/parse.bash
TEMP=`getopt -o dfhp:v: --long debug,force,help,program:,version: \
-n 'makeCMD' -- "$@"`
if [ $? != 0 ] ; then echo "Terminating..." >&2 ; exit 1 ; fi
eval set -- "$TEMP"
while true ; do
case "$1" in
-d|--debug) debug=1; set -x; shift ;;
-f|--force) force=1; shift ;;
-h|--help) help=1; shift ;;
-p|--program) shift; PGM="$1"; shift ;;
-v|--version) shift; VER="$1"; shift ;;
--) shift; break ;;
*) echo "bad option"; quitting=1; exit 1 ;;
esac
done
if ( test $help -eq "1" || test $quitting -eq "1" )
then
echo "Makes .adj and .tbl files from files like:"
echo "09_ET_01_map.inp and 09_ET_01_ins.inp"
echo "usage"
echo " $0 [options]"
echo " options:"
echo " -d, --debug echo all commands"
echo " -f, --force force program to remake all"
echo " -h, --help this screen"
echo " -p, --program program id [$PGM]"
echo " -v, --version software version [$VER]"
echo " "
exit 0
fi
# Traps
trap 'rm -f map2tbl ins2adj .temp;' 0 1 2 9 15
# Dirty hack: make perl scripts local to avoid stupid path games
MAPFIL=`which map2tbl`
if test $? -eq 0
then
cp $MAPFIL .
else
echo "ERROR $0: map2tbl not found. Have you run 'make install' in base level of this package?"
exit 1
fi
INSFIL=`which ins2adj`
if test $? -eq 0
then
cp $INSFIL .
else
echo "ERROR $0: ins2adj not found. Have you run 'make install' in base level of this package?"
exit 1
fi
if test $debug -eq "1"
then
verbose=4
fi
# tbls
haveFiles=0
if `ls *_map.inp >/dev/null 2>&1`
then
echo "Making tbl files from:"
ls *_map.inp
haveFiles=1
fi
if test $haveFiles -eq "1"
then
perl ./map2tbl as.tbl *_map.inp &
wait $!
if test "$? " == "0 "
then
echo "success with _map.inp"
else
echo "ERROR($0): map2tbl failed"
exit 1
fi
else
echo "No _map.inp files... continuing"
fi
# adjs
haveFiles=0
if `ls *_ins.inp >/dev/null 2>&1`
then
echo "Making adj files from:"
ls *_ins.inp
haveFiles=1
fi
if test $haveFiles -eq "1"
then
perl ./ins2adj as.adj *_ins.inp &
wait $!
if test "$? " == "0 "
then
echo "success with _ins.inp"
else
echo "ERROR($0): ins2adj failed"
exit 1
fi
else
echo "No _ins.inp files"
fi
# Cleanup and quit
rm -f map2tbl ins2adj
echo "$0: done."
| true
|
bdb0a2a51360f2884536b1e0af160cd32ea711e0
|
Shell
|
kusid/herd-mdl
|
/mdl/src/test/scripts/sh/testSetup.sh
|
UTF-8
| 2,501
| 3.46875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#
# Copyright 2018 herd-mdl contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#!/bin/bash
echo "$@"
# Check the error and fail if the last command is NOT successful
function check_error {
return_code=${1}
cmd="$2"
if [ ${return_code} -ne 0 ]
then
echo "$(date "+%m/%d/%Y %H:%M:%S") *** ERROR *** ${cmd} has failed with error $return_code"
exit 1
fi
}
# Execute the given command and support resume option
function execute_cmd {
cmd="${1}"
echo $cmd
eval $cmd
check_error ${PIPESTATUS[0]} "$cmd"
}
#MAIN
deployPropertiesFile=$1
# Source the properties
. ${deployPropertiesFile}
execute_cmd "cd /home/ec2-user"
if [ "${EnableSSLAndAuth}" == 'true' ] ; then
#1. add LDAP certificate to trusted store
execute_cmd "aws configure set default.region ${RegionName}"
LDAP_SERVER=$(aws ssm get-parameter --name "/mdl/ldap/hostname" --output text --query Parameter.Value)
LDAP_BASE_DN=$(aws ssm get-parameter --name "/mdl/ldap/base_dn" --output text --query Parameter.Value)
# export LDAP server cert
echo | openssl s_client -connect "$LDAP_SERVER:636" | sed -ne '/-BEGIN CERTIFICATE-/,/-END CERTIFICATE-/p' > ldapserver.pem
# convert LDAP server cert from pem to der
openssl x509 -outform der -in ldapserver.pem -out ldapserver.der
# import LDAP server cert into Java truststore
sudo keytool -import -alias mdl-ldap --keystore /usr/lib/jvm/jre/lib/security/cacerts --storepass changeit -file ldapserver.der -noprompt
# remove temporary LDAP certs
rm -fr ldapserver.pem ldapserver.der
#2. copy certs jks to mdlt deploy host
execute_cmd "aws s3 cp ${JavaKeyStoreFile} certs.jks"
fi
export APP_LIB_JARS=`find lib -maxdepth 1 -type f -name \*.jar -printf '%p,' 2>/dev/null | sed "s/,/:/g"`
# Execute the test cases
execute_cmd "java -DDeployPropertiesFile=$deployPropertiesFile -cp mdlt/lib/mdl-1.0.0-tests.jar:$APP_LIB_JARS org.tsi.mdlt.util.TestWrapper setup"
exit 0
| true
|
d1996377993c44854727673e8b4a4e3657f478b7
|
Shell
|
LawsonDaiki/google-it-automation-with-python
|
/using-python-to-interact-with-the-operating-system/week-six/gather-information.sh
|
UTF-8
| 269
| 2.640625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
line="-------------------------------------------------------------------"
echo "Starting time at: $(date)"; echo $line
echo "UPTIME"; uptime; echo $line
echo "FREE"; free; echo $line
echo "WHO"; who; echo $line
echo "Finshing at: $(date)"
| true
|
79c358a0e58295f8f6fd1e53f06f10a5f01ed7b9
|
Shell
|
refola/scripts
|
/filesystem/fix-permissions.sh
|
UTF-8
| 419
| 3.234375
| 3
|
[
"Unlicense",
"LicenseRef-scancode-public-domain"
] |
permissive
|
#!/usr/bin/env bash
cmds="
sudo chmod -R g=u /home/kelci
sudo chown -R kelci:kelci /home/kelci
sudo chmod -R g=u /home/mark
sudo chown -R mark:mark /home/mark
sudo chmod -R g=u /home/minecraft
sudo chown -R mark:mark /home/minecraft
sudo chmod -R go=u /shared
sudo chown -R :users /shared
"
echo "Fixing permissions for proper access to stuff...."
IFS=$'\n'
for cmd in $cmds; do
echo "$cmd"
eval "$cmd"
done
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.