blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
bf8cc88e11af4adba725fbc342f8c0be74f44fe7
|
Shell
|
tpozzi/gc.script
|
/lab-2/getanimal.sh
|
UTF-8
| 310
| 3.5625
| 4
|
[] |
no_license
|
#!/bin/bash
#
colours=("red" "green" "blue")
declare -A animals
animals=([red]="cardinal" [green]="frog" [blue]="lobster")
read -p "Gibve an number from 1 to ${#colours[@]}: " num
num=$((num - 1))
colour=${colours[$num]}
animal=${animals[$colour]}
num=$((num + 1))
echo "Index $num finds a $colour $animal"
| true
|
6c6b7059b6b33cd0974fac414a1b54117b3838c3
|
Shell
|
necmettinolcay/bash-script
|
/fetch_and_deploy.sh
|
UTF-8
| 751
| 3.65625
| 4
|
[] |
no_license
|
#!/bin/bash
GIT_REPO="git@l.example.com.tr:redmine_plugins"
REDMINE_ROOT='/opt/redmine'
PLUGIN_DIR="${REDMINE_ROOT}/plugins"
PLUGIN=$1
source $HOME/.bashrc
if [ "$(id -u )" != "$(id -u redmine)" ]; then
echo "script will only work with redmine user " 1>&2
exit
fi
function clonePlugin {
if [[ ! -d "$PLUGIN_DIR/$PLUGIN" ]]; then
echo "$PLUGIN is clonning from repo "
cd $PLUGIN_DIR; git clone $GIT_REPO/$PLUGIN
else
echo "$PLUGIN is exist in plugin directory"
exit 1
fi
}
function addPlugin {
service redmine stop
echo "Updating redmine"
cd $REDMINE_ROOT
bundle install --without postgresql development test sqlite
rake redmine:plugins:migrate RAILS_ENV=production
service redmine start
}
clonePlugin
addPlugin
| true
|
5c8d73a328ba87639a79c2b22773f7f1d37786db
|
Shell
|
AntoineZen/SeS
|
/lab2/3_partition_sd_install.sh
|
UTF-8
| 2,375
| 3.140625
| 3
|
[] |
no_license
|
#!/bin/bash
TARGET_VOLUME=sdb
IMAGEDIR="/home/antoine/workspace/xu3/buildroot/output/images"
echo "Target volume is ${TARGET_VOLUME}"
echo "Image dir is ${IMAGEDIR}"
if [ -e "/dev/${TARGET_VOLUME}1" ]
then
umount /dev/${TARGET_VOLUME}1
fi
if [ -e "/dev/${TARGET_VOLUME}2" ]
then
umount /dev/${TARGET_VOLUME}2
fi
if [ -e "/dev/${TARGET_VOLUME}3" ]
then
umount /dev/${TARGET_VOLUME}3
fi
#initialize the fist sector to zero, to erase the patition table
sudo dd if=/dev/zero of=/dev/${TARGET_VOLUME} bs=512 count=1
sync
echo "First sector: msdos"
sudo parted /dev/${TARGET_VOLUME} mklabel msdos
# Create the 1st partition (bootfs), start 16MB, length 64MB
sudo parted /dev/${TARGET_VOLUME} mkpart primary ext4 32768s 163839s
# Create the 2nt partition (rootfs), start 80MB, length 256MB
sudo parted /dev/${TARGET_VOLUME} mkpart primary ext4 163840s 688127s
# Create the 3rd partition (usrfs), start 336MB, length 256MB
sudo parted /dev/${TARGET_VOLUME} mkpart primary ext4 688128s 1212415s
#Format ext4 1st, 2nd and 3rd partition
sudo mkfs.ext4 /dev/${TARGET_VOLUME}1 -L bootfs
sudo mkfs.ext4 /dev/${TARGET_VOLUME}2 -L rootfs
sudo mkfs.ext4 /dev/${TARGET_VOLUME}3 -L usrfs
sync
# Show patition table
echo "Patition table is"
sudo fdisk -l /dev/${TARGET_VOLUME}
sleep 3
if [ -e "/dev/${TARGET_VOLUME}1" ]
then
umount /dev/${TARGET_VOLUME}1
fi
if [ -e "/dev/${TARGET_VOLUME}2" ]
then
umount /dev/${TARGET_VOLUME}2
fi
if [ -e "/dev/${TARGET_VOLUME}3" ]
then
umount /dev/${TARGET_VOLUME}3
fi
#copy firmware & bl1.bin, bl2.bin, tzsw.bin
echo "Copy bl1"
sudo dd if=${IMAGEDIR}/xu3-bl1.bin of=/dev/${TARGET_VOLUME} bs=512 seek=1
echo "Copy bl2"
sudo dd if=${IMAGEDIR}/xu3-bl2.bin of=/dev/${TARGET_VOLUME} bs=512 seek=31
#copy u-boot
echo "Copy U-Boot"
sudo dd if=${IMAGEDIR}/u-boot.bin of=/dev/${TARGET_VOLUME} bs=512 seek=63
echo "Copy trustZone"
sudo dd if=${IMAGEDIR}/xu3-tzsw.bin of=/dev/${TARGET_VOLUME} bs=512 seek=719
#copy rootfs
echo "Copy rootfs"
sudo dd if=${IMAGEDIR}/rootfs.ext4 of=/dev/${TARGET_VOLUME} bs=512 seek=163840
#copy kernel & flattened device tree
sudo mount /dev/${TARGET_VOLUME}1 /mnt
echo "Copy kernel"
sudo cp ${IMAGEDIR}/uImage /mnt/
echo "copy device tree"
sudo cp ${IMAGEDIR}/exynos5422-odroidxu3.dtb /mnt/
sudo umount /dev/${TARGET_VOLUME}1
# Show patition table
echo "Patition table is"
sudo fdisk -l /dev/${TARGET_VOLUME}
| true
|
64e49bd5f44905683933f49164c0ed29221b9910
|
Shell
|
muyunren/AlphaGOZero-python-tensorflow
|
/support/go-NN-master/data/divide_set.sh
|
UTF-8
| 733
| 3.96875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
if [ "$#" -ne 1 ] ; then
echo "Must specify a directory."
exit
fi
dir=$1
cd "$dir"
echo "Going to divide $dir into training, validation, and test sets."
tmp_list=/tmp/npz_list.txt
ls | grep '.npz' | shuf > "$tmp_list"
num_mb=$(wc -l < "$tmp_list")
num_val=$(($num_mb / 10))
num_test=$(($num_mb / 10))
num_train=$(($num_mb - $num_val - $num_test))
echo "Total number of minibatches is $num_mb"
echo "$num_train to training set."
echo "$num_val to validation set."
echo "$num_test to test set."
mkdir -p train
mkdir -p val
mkdir -p test
head "-$num_val" "$tmp_list" | while read fn ; do
mv "$fn" val
done
tail "-$num_test" "$tmp_list" | while read fn ; do
mv "$fn" test
done
mv *.npz train
echo "Done."
| true
|
726d122f2cc4141a98a68cfaf5a8b90b0098c5a6
|
Shell
|
0xAsh/Snap_Generator
|
/snap_generator.sh
|
UTF-8
| 526
| 3.546875
| 4
|
[] |
no_license
|
#!/bin/bash
RED='\033[0;31m'
NC='\033[0m' # No Color
curdir=$(pwd)
## Variable setting
echo "Enter payload: "
read COMMAND
echo -e "Payload is set to: $COMMAND \n"
echo "Enter payload name: "
read payload
echo -e "\n"
echo -e "${RED}...Generating Payload...${NC}\n"
## Payload generation
cd $(mktemp -d)
mkdir -p meta/hooks
printf '#!/bin/sh\n%s; false' "$COMMAND" >meta/hooks/install
chmod +x meta/hooks/install
fpm -n $payload -s dir -t snap -a all meta
## Return to original position
cp $payload* $curdir && cd $curdir
| true
|
39f01940bd87f68efde4489d419167d634d59cfb
|
Shell
|
jimpick/nixos-upstream-svn-nixpkgs
|
/pkgs/misc/sane-backends/builder.sh
|
UTF-8
| 230
| 2.671875
| 3
|
[
"MIT"
] |
permissive
|
source $stdenv/setup
postInstall() {
if test "$udevSupport" = "1" ; then
ensureDir $out/etc/udev/rules.d/
cp tools/udev/libsane.rules $out/etc/udev/rules.d/60-libsane.rules
fi
}
postInstall=postInstall
genericBuild
| true
|
b917d21adec357b9f1dc50cb1ef1e15358da5345
|
Shell
|
neurosys/Scripts
|
/ffsplit.sh
|
UTF-8
| 775
| 3.53125
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
#ffmpeg -i 2021-06-19-Masina_in_flacari_VICO5939.MP4 -acodec copy -vcodec copy -ss 02:10 -t 00:00:20 flacari_out.mp4
# -to "$end"
# -t "$length"
function help() {
echo -e "Usage:\n"
echo -e "\t$(basename $0) <input file> <start moment> <end moment> <output file>\n\n"
echo -e "Format:"
echo -e "\tstart: 01:23:45"
echo -e "\t end: 01:23:45"
}
in="$1"
start=$2
end=$3
out="$4"
function checkParam() {
if [[ -z "$1" ]]
then
echo -e "Argument ${2} is missing\n\n"
help
exit 1
fi
}
checkParam "$in" "<input file>"
checkParam "$out" "<output file>"
checkParam "$start" "<start moment>"
checkParam "$end" "<end moment>"
ffmpeg -i "$in" -acodec copy -vcodec copy -ss $start -to "$end" "$out"
| true
|
f71c97a85742e30580b462dde5d09826aa664515
|
Shell
|
bioconda/bioconda-recipes
|
/recipes/perl-xml-libxslt/build.sh
|
UTF-8
| 695
| 3.546875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
if [ `uname -s` == "Darwin" ]; then
# Needed because, despite the message, "LIBS=" alone does not seem to
# enough information for the build system to find libxslt.
export DYLD_FALLBACK_LIBRARY_PATH="${PREFIX}/lib"
fi
# If it has Build.PL use that, otherwise use Makefile.PL
if [ -f Build.PL ]; then
perl Build.PL
./Build
./Build test
# Make sure this goes in site
./Build install --installdirs site
elif [ -f Makefile.PL ]; then
# Make sure this goes in site
perl Makefile.PL INSTALLDIRS=site DEBUG=1
make
make test
make install
else
echo 'Unable to find Build.PL or Makefile.PL. You need to modify build.sh.'
exit 1
fi
| true
|
2da71474615e3b5dcf0580865dd5f0ba9b53e8bf
|
Shell
|
abstractdog/yetus
|
/precommit/src/main/shell/test-patch.d/checkmake.sh
|
UTF-8
| 4,644
| 3.390625
| 3
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"MIT",
"OFL-1.1",
"Python-2.0"
] |
permissive
|
#!/usr/bin/env bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# SHELLDOC-IGNORE
add_test_type checkmake
CHECKMAKE_TIMER=0
CHECKMAKE=${CHECKMAKE:-$(command -v checkmake 2>/dev/null)}
function checkmake_usage
{
yetus_add_option "--checkmake=<path>" "path to checkmake executable"
yetus_add_option "--checkmake-config=<path>" "relative path to checkmake config in source tree [default: none]"
}
function checkmake_parse_args
{
local i
for i in "$@"; do
case ${i} in
--checkmake=*)
CHECKMAKE=${i#*=}
;;
--checkmake-config=*)
CHECKMAKE_CONFIG=${i#*=}
;;
esac
done
}
function checkmake_filefilter
{
local filename=$1
if [[ ${filename} =~ /Makefile$ ]] || [[ ${filename} =~ ^Makefile$ ]]; then
add_test checkmake
fi
}
function checkmake_precheck
{
if ! verify_command checkmake "${CHECKMAKE}"; then
add_vote_table_v2 0 checkmake "" "checkmake was not available."
delete_test checkmake
fi
}
function checkmake_exec
{
declare i
declare repostatus=$1
declare -a args
echo "Running checkmake against identified Makefiles."
pushd "${BASEDIR}" >/dev/null || return 1
args=('--format={{.LineNumber}}:{{.Rule}}:{{.Violation}}')
if [[ -f "${CHECKMAKE_CONFIG}" ]]; then
args+=("--config=${CHECKMAKE_CONFIG}")
fi
for i in "${CHANGED_FILES[@]}"; do
if [[ ${i} =~ /Makefile$ ]] || [[ ${i} =~ ^Makefile$ ]]; then
if [[ -f ${i} ]]; then
while read -r; do
echo "${i}:${REPLY}" >> "${PATCH_DIR}/${repostatus}-checkmake-result.txt"
done < <("${CHECKMAKE}" "${args[@]}" "${i}")
fi
fi
done
popd >/dev/null || return 1
return 0
}
function checkmake_preapply
{
declare i
declare -a args
if ! verify_needed_test checkmake; then
return 0
fi
big_console_header "checkmake plugin: ${PATCH_BRANCH}"
start_clock
checkmake_exec branch
CHECKMAKE_TIMER=$(stop_clock)
return 0
}
## @description Wrapper to call column_calcdiffs
## @audience private
## @stability evolving
## @replaceable no
## @param branchlog
## @param patchlog
## @return differences
function checkmake_calcdiffs
{
column_calcdiffs "$@"
}
function checkmake_postapply
{
declare i
declare numPrepatch
declare numPostpatch
declare diffPostpatch
declare fixedpatch
declare statstring
if ! verify_needed_test checkmake; then
return 0
fi
big_console_header "checkmake plugin: ${BUILDMODE}"
start_clock
# add our previous elapsed to our new timer
# by setting the clock back
offset_clock "${CHECKMAKE_TIMER}"
checkmake_exec patch
calcdiffs \
"${PATCH_DIR}/branch-checkmake-result.txt" \
"${PATCH_DIR}/patch-checkmake-result.txt" \
checkmake \
> "${PATCH_DIR}/diff-patch-checkmake.txt"
diffPostpatch=$("${AWK}" -F: 'BEGIN {sum=0} 3<NF {sum+=1} END {print sum}' "${PATCH_DIR}/diff-patch-checkmake.txt")
# shellcheck disable=SC2016
numPrepatch=$("${AWK}" -F: 'BEGIN {sum=0} 3<NF {sum+=1} END {print sum}' "${PATCH_DIR}/branch-checkmake-result.txt")
# shellcheck disable=SC2016
numPostpatch=$("${AWK}" -F: 'BEGIN {sum=0} 3<NF {sum+=1} END {print sum}' "${PATCH_DIR}/patch-checkmake-result.txt")
((fixedpatch=numPrepatch-numPostpatch+diffPostpatch))
statstring=$(generic_calcdiff_status "${numPrepatch}" "${numPostpatch}" "${diffPostpatch}" )
if [[ ${diffPostpatch} -gt 0 ]] ; then
add_vote_table_v2 -1 checkmake "@@BASE@@/diff-patch-checkmake.txt" "${BUILDMODEMSG} ${statstring}"
return 1
elif [[ ${fixedpatch} -gt 0 ]]; then
add_vote_table_v2 +1 checkmake "" "${BUILDMODEMSG} ${statstring}"
return 0
fi
add_vote_table_v2 +1 checkmake "" "There were no new checkmake issues."
return 0
}
function checkmake_postcompile
{
declare repostatus=$1
if [[ "${repostatus}" = branch ]]; then
checkmake_preapply
else
checkmake_postapply
fi
}
| true
|
90f7acf83435cae6d43117821322e099994255d9
|
Shell
|
yogeshVU/Interference-Aware-Cluster-Management
|
/shell-scripts/for_loop.sh
|
UTF-8
| 1,398
| 2.53125
| 3
|
[] |
no_license
|
#!/bin/bash
cd /home/spec2006
loadarray=( bzip2 bwaves mcf milc leslie3d namd gobmk lbm libquantum sjeng )
for i in ${loadarray[@]}; do
for j in ${loadarray[@]}; do
load1=$i
load2=$j
if [ "$load1" = "$load2" ]
then
numactl --cpubind=1 --membind=1 runspec --config=mytest.cfg --size=ref --noreportable --tune=base --iteration=1 "$load2" &
sleep 2
pid2=$(pgrep $load2)
./home/libpfm-4.8.0/perf_examples/task -t $pid2 -p -e UNHALTED_CORE_CYCLES -e INSTRUCTION_RETIRED -e LLC_MISSES 1> /home/xiandong-Code-Result/result-for-loop"$load2".txt &
else
numactl --cpubind=0 --membind=0 runspec --config=mytest.cfg --size=ref --noreportable --tune=base --iteration=1 "$load1" &
numactl --cpubind=0 --membind=0 runspec --config=mytest.cfg --size=ref --noreportable --tune=base --iteration=1 "$load2" &
sleep 2
pid1=$(pgrep $load1)
pid2=$(pgrep $load2)
echo $load1 $pid1
echo $load2 $pid2
./home/libpfm-4.8.0/perf_examples/task -t $pid1 -p -e UNHALTED_CORE_CYCLES -e INSTRUCTION_RETIRED -e LLC_MISSES 1> /home/xiandong-Code-Result/result-for-loop/"$load1"_under_"$load2".txt &
./home/libpfm-4.8.0/perf_examples/task -t $pid2 -p -e UNHALTED_CORE_CYCLES -e INSTRUCTION_RETIRED -e LLC_MISSES 1> /home/xiandong-Code-Result/result-for-loop/"$load2"_under_"$load1".txt &
fi
kill -9 `pgrep base`
kill -9 `pgrep base`
kill -9 `pgrep base`
done
done
| true
|
88bf893167030f09a5db0abc5e364120c06102b1
|
Shell
|
oktbabs/fmw_repo
|
/InstantiationScripts/makeOCFSdisk_OBIE_specific.sh
|
UTF-8
| 8,469
| 3.71875
| 4
|
[] |
no_license
|
#++++++++++++++++++++++++++++++++++++++++++++++
# Script Name : makeOCFS2DiskOnline.sh +
# Written By : Timmy Babayeju +
# From Company : Fujitsu +
# To Company : FSA +
#++++++++++++++++++++++++++++++++++++++++++++++
# DESCRIPTION +
#++++++++++++++++++++++++++++++++++++++++++++++
# This script is specifically for the current +
# OBIEE VM +
# Script provisions a new file system after +
# Template instantiation. +
# The script does the following on each VM: +
# 1) format Disk (2) creates OCFS2 Cluster +
# (3) Configures o2CB Services (4) Makes +
# OCFS2 filesystem (5) Mounts the filesystem +
# and (6) Changes filesystem ownership +
#++++++++++++++++++++++++++++++++++++++++++++++
#!/bin/sh
dos2unix /tmp/appCSVFolder/inputAppProperties.csv
. /tmp/appCSVFolder/inputAppProperties.csv
# LOG AND CSV Folder
LOGFOLDER=/tmp/appPostLaunch/logs
CSVHOME=/tmp/appCSVFolder
myHost=`hostname|cut -d. -f1`
myIPAddr=`/sbin/ip addr|grep inet|grep eth0|awk -F'[/ ]' '{print $6}'`
## This function formats the disk used by the VMs in a cluster
## This function call should only be executed on the first node
echo ${ocfsDisk}
formatDisk_1(){
for (( a = 1 ; a <= 4; a++ ))
do
if [ $a -eq 1 ]
then
sz="+9G"
elif [ $a -eq 2 ]
then
sz="+3G"
elif [ $a -eq 3 ]
then
sz="+3G"
elif [ $a -eq 4 ]
then
sz="+3G"
else
sz=
fi
/sbin/fdisk ${ocfsDisk} <<EOF
n
p
$a
$sz
w
EOF
done
}
## This function part probes the disk used by the VMs in a cluster
## This function call should only be executed on the second VM
partProbeDisk_2(){
/sbin/partprobe ${ocfsDisk} <<EOF
n
p
1
w
EOF
}
## This function creates the cluster configuration file
## All the entries are populated from the $CSVHOME/inputAppProperties.csv
## This function creates the cluster configuration file
## All the entries are populated from the $CSVHOME/inputAppProperties.csv
## The function call should be executed on the Nodes consituting the filesystem cluster
createClusterFile(){
echo "#" >/etc/ocfs2/cluster.conf
echo "## Generated by script : Author - Timmy Babayeju, Fujitsu Limited for FSA" >>/etc/ocfs2/cluster.conf
echo "#" >>/etc/ocfs2/cluster.conf
echo "cluster:" >>/etc/ocfs2/cluster.conf
echo " node_count = $OCFS_NODES_TOTAL" >>/etc/ocfs2/cluster.conf
echo " name = $OCFS_CLUSTER_NAME" >>/etc/ocfs2/cluster.conf
for (( n = 1; n <= $OCFS_NODES_TOTAL; n++ ))
do
if [ $n = 1 ]
then
echo "node:" >>/etc/ocfs2/cluster.conf
echo " ip_port = 5555" >>/etc/ocfs2/cluster.conf
echo " ip_address = $OCFS_NODE_1_IPADDR" >>/etc/ocfs2/cluster.conf
echo " number = $n" >>/etc/ocfs2/cluster.conf
echo " name = $OCFS_NODE_1_NAME" >>/etc/ocfs2/cluster.conf
echo " cluster = $OCFS_CLUSTER_NAME" >>/etc/ocfs2/cluster.conf
else
echo "node:" >>/etc/ocfs2/cluster.conf
echo " ip_port = 5555" >>/etc/ocfs2/cluster.conf
echo " ip_address = $OCFS_NODE_2_IPADDR" >>/etc/ocfs2/cluster.conf
echo " number = $n" >>/etc/ocfs2/cluster.conf
echo " name = $OCFS_NODE_2_NAME" >>/etc/ocfs2/cluster.conf
echo " cluster = $OCFS_CLUSTER_NAME" >>/etc/ocfs2/cluster.conf
fi
done
}
## This function configures the O2CB service to use the cluster
configureServiceO2CB(){
/sbin/service o2cb configure <<!
y
$OCFS_CLUSTER_NAME
!
}
## This function creates the filesystem on each VM
## This function call should be executed on the 2 VMS
createOCFS_FS1(){
sleep 5
for (( p = 1;p <= 4;p++ ))
do
echo "------"
echo " ===>>> Creating OCFS2 file system on ${ocfsDisk}${p} info <<<==="
echo "------"
/sbin/mkfs.ocfs2 --fs-features=discontig-bg ${ocfsDisk}${p} <<EOF
y
EOF
done
}
## This function updates the FSTAB entries
## This function call should be executed on the 2 VMS
fsDir1=/u01/app/oracle/admin
fsDir2=/u01/app/oracle/admin/obi_domain/aserver
mserverDisk=/u01/app/oracle/admin/obi_domain/mserver
updateFSTAB_1(){
if [ "$OCFS_NODE_1_NAME" == "$myHost" ]
then
echo "${ocfsDisk}1 $fsDir1 ocfs2 _netdev,defaults 0 0 " >> /etc/fstab
echo "${ocfsDisk}2 $fsDir2 ocfs2 _netdev,defaults 0 0 " >> /etc/fstab
echo "${ocfsDisk}3 $mserverDisk ocfs2 _netdev,defaults 0 0 " >> /etc/fstab
elif [ "$OCFS_NODE_2_NAME" == "$myHost" ]
then
echo "${ocfsDisk}1 $fsDir1 ocfs2 _netdev,defaults 0 0 " >> /etc/fstab
echo "${ocfsDisk}2 $fsDir2 ocfs2 _netdev,defaults 0 0 " >> /etc/fstab
echo "${ocfsDisk}4 $mserverDisk ocfs2 _netdev,defaults 0 0 " >> /etc/fstab
else
echo
echo " <======NOTHING TO DO <======== "
echo
fi
}
## This function call should be executed on the the first VM
mountFS_1(){
echo "------"
mkdir -p $fsDir1
[ -d $fsDir1 ] && /bin/mount $fsDir1
if [ `df -h $fsDir1 >/dev/null 2>&1;echo $?` -eq 0 ];then
mkdir -p $fsDir2
if [ -d $fsDir2 ];then
/bin/mount $fsDir2
else
echo "ERROR : Cannot mount $fsDir2"
exit 1
fi
case $myHost in
$OCFS_NODE_1_NAME)
mkdir -p $mserverDisk
/bin/mount $mserverDisk
;;
$OCFS_NODE_2_NAME)
mkdir -p $mserverDisk
/bin/mount $mserverDisk
;;
esac
else
echo "ERROR : $fsDir1 is not mounted cannot create $fsDir2 and $mserverDisk"
exit 1
fi
echo "------"
}
fsChowner(){
echo "------"
echo " ===>>> Changing /u01/app/oracle/admin filesystem ownership to oracle:dba <<<==="
sudo chown -R oracle:dba /u01
echo "------"
}
## This section tests for which steps should be executed on each VM
if [ "$OCFS_NODE_1_NAME" == "$myHost" ]
then
echo "${OCFS_NODE_1_NAME}()"
{
ocfsDisk=${OCFS_DISK_1_NAME}
formatDisk_1 >$LOGFOLDER/makeOCFS2DiskOnline.log
createClusterFile >>$LOGFOLDER/makeOCFS2DiskOnline.log
configureServiceO2CB >>$LOGFOLDER/makeOCFS2DiskOnline.log
createOCFS_FS1 >>$LOGFOLDER/makeOCFS2DiskOnline.log
updateFSTAB_1 >>$LOGFOLDER/makeOCFS2DiskOnline.log
mountFS_1 >>$LOGFOLDER/makeOCFS2DiskOnline.log
fsChowner >>$LOGFOLDER/makeOCFS2DiskOnline.log
echo " --" >>$LOGFOLDER/makeOCFS2DiskOnline.log
echo " ===>> Disk management successfully completed on $OCFS_NODE_1_NAME <<===" >>$LOGFOLDER/makeOCFS2DiskOnline.log
echo " --" >>$LOGFOLDER/makeOCFS2DiskOnline.log
chown -R oracle:dba $CSVHOME/inputAppProperties.csv >>$LOGFOLDER/makeOCFS2DiskOnline.log
chown -R oracle:dba $LOGFOLDER/makeOCFS2DiskOnline.log
}
elif [ "$OCFS_NODE_2_NAME" == "$myHost" ]
then
echo "${OCFS_NODE_2_NAME}()"
{
ocfsDisk=${OCFS_DISK_2_NAME}
#
# Checking the server heartbeat of Node 2
#
[ `ping -c 1 ${OCFS_NODE_1_IPADDR} >/dev/null;echo $?` -ne 0 ] && echo "ERROR : Cannot contact NODE 1 on PRIVATE LINK" && exit 1
# Executing partProbe on Disk 2
/sbin/partprobe $ocfsDisk >>$LOGFOLDER/makeOCFS2DiskOnline.log
#
##Creating Cluster file
#
createClusterFile >>$LOGFOLDER/makeOCFS2DiskOnline.log
configureServiceO2CB >>$LOGFOLDER/makeOCFS2DiskOnline.log
#
#Checking if the fstab entry exists or not.
#
#[ `grep '/u01/app/oracle/admin' /etc/fstab >/dev/null 2>&1;echo $?` -ne 0 ] && updateFSTAB_1 >>$LOGFOLDER/makeOCFS2DiskOnline.log
#[ `grep '/u01/app/oracle/admin/obi_domain/aserver' /etc/fstab >/dev/null 2>&1;echo $?` -ne 0 ] && updateFSTAB_1 >>$LOGFOLDER/makeOCFS2DiskOnline.log
#[ `grep '/u01/app/oracle/admin/obi_domain/mserver' /etc/fstab >/dev/null 2>&1;echo $?` -ne 0 ] && updateFSTAB_1 >>$LOGFOLDER/makeOCFS2DiskOnline.log
updateFSTAB_1 >>$LOGFOLDER/makeOCFS2DiskOnline.log
mountFS_1 >>$LOGFOLDER/makeOCFS2DiskOnline.log
fsChowner >>$LOGFOLDER/makeOCFS2DiskOnline.log
echo " --" >>$LOGFOLDER/makeOCFS2DiskOnline.log
echo " ===>> Disk management successfully completed on $OCFS_NODE_2_NAME <<===" >>$LOGFOLDER/makeOCFS2DiskOnline.log
echo " --" >>$LOGFOLDER/makeOCFS2DiskOnline.log
}
else
echo " --" >>$LOGFOLDER/makeOCFS2DiskOnline.log
echo " ===>> Cannot mount file system !!. Check File system configuration details on CSV file <<===" >>$LOGFOLDER/makeOCFS2DiskOnline.log
echo " --" >>$LOGFOLDER/makeOCFS2DiskOnline.log
fi
chown -R oracle:dba $CSVHOME/inputAppProperties.csv >>$LOGFOLDER/makeOCFS2DiskOnline.log
chown -R oracle:dba $LOGFOLDER/makeOCFS2DiskOnline.log
| true
|
99036f39a2f88309cee087e3f7e2600f057b7493
|
Shell
|
japersik/itmo_linux_unix_systems_labs
|
/lab16/Lab16-1
|
UTF-8
| 270
| 3.28125
| 3
|
[] |
no_license
|
#!/bin/bash
LST=`ifconfig |grep -vE "^ *.*$"|grep -vE "^ *$"|awk -F : '{ print $1 }'`
for i in $LST; do
L=`ifconfig | sed -ne "/$i:.*/{n;p;}"`
if echo $L | grep "inet " >/dev/null; then
echo -en "$i:\t"
echo $L| awk '{ print $1,$2 }'
fi
done
| true
|
55097acdb6d43b699bfe8124fd49ae2f7a8ba70b
|
Shell
|
philippe-vandermoere/skeleton
|
/skeleton/.githooks/pre-push
|
UTF-8
| 728
| 3.765625
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env sh
# This hook is called by git-push and can be used to prevent a push from taking place.
# This hook is called with the following parameters:
# $1 -- Name of the remote to which the push is being done
# $2 -- URL to which the push is being done
set -e
readonly REMOTE_NAME=$1
readonly REMOTE_URL=$2
readonly BRANCH_REGEX='^(master|develop|(release\/[0-9]+.[0-9]+.[0-9]+)|(fix|hotfix|feature)\/[a-zA-Z0-9_-]{5,100})$'
readonly RESET='\033[0;0m'
readonly RED='\033[0;31m'
# check branch name
if [[ $(echo $(git symbolic-ref HEAD --short) | LC_CTYPE=C grep -E "${BRANCH_REGEX}" -c) -eq 0 ]]; then
echo -e "${RED}The branch name must respect the regex '${BRANCH_REGEX}'.${RESET}"
exit 1
fi
make phpcs
| true
|
a0ab2ea95c76112456d2e81d86ba5ddc31f6eb49
|
Shell
|
cloudfoundry-incubator/kubo-release
|
/jobs/flanneld/templates/bin/flanneld_ctl.erb
|
UTF-8
| 2,979
| 3.609375
| 4
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash -ex
NAME="${0##*/}"
export PATH=/var/vcap/packages/flanneld/:$PATH
<%-
def get_url(server, port)
if link('etcd').p('etcd.dns_suffix', false) != false
node_name = "#{server.name.gsub('_','-')}-#{server.index}"
return "https://#{node_name}.#{link('etcd').p('etcd.dns_suffix')}:#{port}"
else
return "https://#{server.address}:#{port}"
end
end
-%>
RUN_DIR=/var/vcap/sys/run/flanneld
PIDFILE=$RUN_DIR/flanneld.pid
LOG_DIR=/var/vcap/sys/log/flanneld
# shellcheck disable=SC1091
. /var/vcap/packages/pid_utils/pid_utils.sh
setup_directories() {
mkdir -p "$RUN_DIR" "$LOG_DIR"
chown -R vcap:vcap "$RUN_DIR" "$LOG_DIR"
}
send_process_stdout_to_logfile() {
exec 1>> "$LOG_DIR/$NAME.stdout.log"
}
send_process_stderr_to_logfile() {
exec 2>> "$LOG_DIR/$NAME.stderr.log"
}
start_flanneld() {
<% etcd_endpoints = link('etcd').instances.map { |server| get_url(server, 2379) }.join(",") %>
modprobe br_netfilter || true
mkdir -p /dev/net
mknod /dev/net/tun c 10 200 || true
echo 1 > /proc/sys/net/ipv4/ip_forward
mkdir -p /etc/cni/net.d
rm /etc/cni/net.d/10-flannel.conf || true
cat > /etc/cni/net.d/50-flannel.conflist <<EOL
{
"name": "flannel-network",
"cniVersion": "0.3.1",
"plugins": [
{
"type": "flannel",
"delegate": {
"hairpinMode": true,
"isDefaultGateway": true
}
},
{
"type": "portmap",
"capabilities": {
"portMappings": true
}
}
]
}
EOL
<%
network_conf = {'Network' => p('pod-network-cidr'), 'Backend' => {'Type': p('backend-type')}}
if_p('port') { |port| network_conf['Backend']['Port'] = port }
if_p('vni') { |vni| network_conf['Backend']['VNI'] = vni }
%>
/var/vcap/packages/etcdctl/etcdctl \
--endpoints <%= etcd_endpoints %> \
--cert-file /var/vcap/jobs/flanneld/config/etcd-client.crt \
--key-file /var/vcap/jobs/flanneld/config/etcd-client.key \
--ca-file /var/vcap/jobs/flanneld/config/etcd-ca.crt \
set /coreos.com/network/config '<%= JSON.dump(network_conf) %>'
flanneld -etcd-endpoints=<%= etcd_endpoints %> \
--ip-masq \
--etcd-certfile=/var/vcap/jobs/flanneld/config/etcd-client.crt \
--etcd-keyfile=/var/vcap/jobs/flanneld/config/etcd-client.key \
--etcd-cafile=/var/vcap/jobs/flanneld/config/etcd-ca.crt \
1>> $LOG_DIR/flanneld.stdout.log \
2>> $LOG_DIR/flanneld.stderr.log
}
stop_flanneld() {
kill_and_wait "$PIDFILE"
}
pid() {
head -1 "$PIDFILE"
}
stop_associated_logging_processes() {
# shellcheck disable=SC2046
pkill -g $(get_group_pid)
}
get_group_pid() {
ps -ho pgrp "$(pid)"
}
case $1 in
start)
setup_directories
send_process_stdout_to_logfile
send_process_stderr_to_logfile
pid_guard "$PIDFILE" "Flanneld"
echo $$ > $PIDFILE
start_flanneld
;;
stop)
stop_associated_logging_processes
stop_flanneld
;;
*)
echo "Usage: $0 {start|stop}"
;;
esac
| true
|
19184509a1c5cbea87888223c355b0b51b9170b3
|
Shell
|
Contarkos/fancontrol
|
/tools/boot/rootfs/usr/bin/start_app.sh
|
UTF-8
| 3,753
| 3.703125
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/sh
# Script de démarrage de l'appli
###########################
# Variables #
###########################
LOCAL_BIN=sw_local.bin
LOCAL_DIR=/boot/fancontrol
NFS_DIR=/mnt/nfs
NFS_CONF=nfs_conf
MOD_DIR=/lib/modules/$(uname -r)/kernel/fancontrol
MOD_ADC_DIR=${LOCAL_DIR}
MOD_TIME_DIR=${LOCAL_DIR}
MOD_ADC_NAME=kisr_adc
MOD_TIME_NAME=kisr_time
MOD_ADC=${MOD_ADC_NAME}.ko
MOD_TIME=${MOD_TIME_NAME}.ko
COPY_DIR=${LOCAL_DIR}
NFS_IP=192.168.0.14
###########################
# Fonctions #
###########################
start_flash ()
{
echo " ____________________.__ ";
echo " \______ \______ \__| ";
echo " ______ | _/| ___/ | ______ ";
echo " /_____/ | | \| | | | /_____/ ";
echo " |____|_ /|____| |__| ";
echo " \/ ";
echo " _____.__ .__ ";
echo " _/ ____\ | _____ _____| |__ ";
echo " \ __\| | \__ \ / ___/ | \ ";
echo " | | | |__/ __ \_\___ \| Y \ ";
echo " |__| |____(____ /____ >___| / ";
echo " \/ \/ \/ ";
echo
echo "========================================================"
echo
echo "Setting directory to ${LOCAL_DIR}"
COPY_DIR=${LOCAL_DIR}
}
start_nfs ()
{
echo " ____ ____ ____ ";
echo " ___( _ \\( _ \\(_ _)___ ";
echo "(___)) / )___/ _)(_(___) ";
echo " (_)\\_)(__) (____) ";
echo " _ _ ____ ___ ";
echo " ( \\( )( ___)/ __)";
echo " ) ( )__) \\__ \\";
echo " (_)\\_)(__) (___/";
echo
echo "========================================================"
echo
# Ping de l'adresse du serveur tftp (5 secondes de timeout)
ping ${NFS_IP} -c 1 -W 2
ALIVE=$?
echo
if [[ ${ALIVE} -eq 0 ]];
then
echo "Mounting NFS from ${NFS_IP}"
# Montage du dossier NFS
umount ${NFS_DIR}
mount -t nfs -o rw ${NFS_IP}:/tftpboot ${NFS_DIR}
# Selection du dossier de copie
COPY_DIR=${NFS_DIR}
else
echo "No echo from TFTP Server, using FLASH instead"
COPY_DIR=${LOCAL_DIR}
fi
}
launch_app ()
{
# Suppression des modules
rmmod ${MOD_TIME} ${MOD_ADC}
# Insertion des modules
insmod ${MOD_TIME_DIR}/${MOD_TIME}
insmod ${MOD_ADC_DIR}/${MOD_ADC}
# Demarrage de l'applicatif avec options
echo "Starting...."
echo
/tmp/${LOCAL_BIN}
}
copy_data ()
{
# On verifie que le soft est bien la
if ! [[ -f ${COPY_DIR}/${LOCAL_BIN} ]];
then
# Fallback si erreur
echo "No file to launch... Fallback on flash"
COPY_DIR=${LOCAL_DIR}
fi
# Copie des binaires en RAM
echo "Copying the binaries from ${COPY_DIR}..."
# Copie en RAM et bit d'execution
cp ${COPY_DIR}/${LOCAL_BIN} /tmp/${LOCAL_BIN}
if [[ $? -eq 0 ]];
then
echo "Done !"
chmod +x /tmp/${LOCAL_BIN}
else
echo "Error while copying files... Exiting"
exit 1
fi
}
###########################
# Script #
###########################
# Pour avoir le temps de démarrage
echo "Boot time : $(cat /proc/uptime | cut -d ' ' -f1) seconds"
echo "========================================================"
echo
echo " -- Starting APP --"
echo
echo "========================================================"
# Choix de la conf de demarrage
if [[ -f ${LOCAL_DIR}/${NFS_CONF} ]]; then
start_nfs
else
start_flash
fi
# Preparation des donnees
echo "========================================================"
echo
copy_data
# On lance l'appli préparée
echo "========================================================"
echo
launch_app
| true
|
3c1f3d0a0f857cf68928bafa83ab2da15c94f453
|
Shell
|
rytmt/dotfiles
|
/wsl2/ubuntu/bin/re-filter.sh
|
UTF-8
| 2,454
| 4.40625
| 4
|
[] |
no_license
|
#!/bin/sh
# 第一引数: .mailfilter ファイルのパス
# 第二引数: 再フィルタリングメールボックスのパス
arg_err=''
arg_chk=0
# スクリプト引数の個数チェック
if [ $# -ne 2 ]; then
arg_err="${arg_err}引数の個数が正しくありません\n"
arg_chk=$((arg_chk+1))
else # 引数の個数チェックに成功した場合は形式チェックする
if [ ! -f "$1" ]; then
arg_err="${arg_err}$1 という名前のファイルは存在しません\n"
arg_chk=$((arg_chk+1))
else # ファイルが存在する場合は内容のチェックをする
maildrop -V 9 "$1" </dev/null 2>/dev/null
if [ "$?" -ne "0" ]; then
arg_err="${arg_err}$1 のファイル内容に誤りがあります\n"
arg_chk=$((arg_chk+1))
fi
fi
if [ ! -d "$2" ]; then
arg_err="${arg_err}$2 という名前のディレクトリは存在しません\n"
arg_chk=$((arg_chk+1))
fi
fi
if [ ! 'type mf2md.sh' ]; then
arg_err="${arg_err}メールディレクトリ作成スクリプト(mf2md.sh)が存在しません\n"
arg_chk=$((arg_chk+1))
fi
# 引数チェック結果
if [ $arg_chk -ne 0 ]; then
echo "${arg_err}"
exit 1
fi
# 再フィルタリング対象のメールボックス内のメールを一時避難させる
# 退避先ディレクトリの作成
mbox="$(echo $2 | sed 's|/$||g')" # 末尾にスラッシュがあったら削除する
tmpd="/tmp/$(basename $0)_$(basename ${mbox})_$(date '+%Y%m%d-%H%M%S')"
[ -d "${tmpd}" ] || mkdir "${tmpd}"
# 退避実行
echo "1. mail escape start: ${mbox} to ${tmpd}"
echo " ${mbox} : $(find ${mbox} -type f | wc -l) files"
echo " ${tmpd} : $(ls -1 ${tmpd} | wc -l) files"
find "${mbox}" -type f | while read line; do
mv "${line}" "${tmpd}/"
done
echo "2. mail escape finished"
echo " ${mbox} : $(find ${mbox} -type f | wc -l) files"
echo " ${tmpd} : $(ls -1 ${tmpd} | wc -l) files"
# フィルタリング先ディレクトリ作成
echo "3. mail directory make"
mf2md.sh "$1" | grep -F 'created'
# 再フィルタリング実行
echo "4. filtering start"
find "${tmpd}" -type f | while read line; do
cat "${line}" | maildrop "$1"
done
echo "5. filtering finished: $(find ${mbox} -type f | wc -l) files remained in ${mbox}"
# 一時退避先の削除
echo "6. delete ${tmpd} start"
rm -rf "${tmpd}"
echo "7. delete finished"
| true
|
e696d07484665dd54076d730e899cb7382a5a717
|
Shell
|
AaronFeledy/drupal-scripts
|
/common/functions.sh
|
UTF-8
| 567
| 4.21875
| 4
|
[] |
no_license
|
#!/bin/bash
## Common Functions
##
## This script contains common functions for use within other scripts. There's
## no need to source this file as it is sourced by init.sh.
## ------------------------------------------------------------
# Sources a file if it exists.
# Usage: `source_if_exists /path/to/file`
source_if_exists() {
[[ -f "$1" ]] && source "$1"
}
# Scripts that require root can call this function to stop execution if user is not root or sudo.
require_root() {
if [[ $UID != 0 ]]; then
die "This script requires elevated privileges."
fi
}
| true
|
04df3d454490792c53c4d48fe0607a9d77408d46
|
Shell
|
cegamboav/OVM_Scripting
|
/Lun_used_by_vm_mhas_in_server.sh
|
UTF-8
| 2,014
| 3.703125
| 4
|
[] |
no_license
|
#!/bin/bash
#Script to identify if a LUN is used by any running VM in MHAS environment.
prt_hd=0
print_head(){
if [ $prt_hd -eq 0 ]
then
echo "|---------------------------------------|---------------|"
echo "|VM Name: $1"
echo "| IBM LUN ID | Status |"
echo "|---------------------------------------|---------------|"
prt_hd=1
fi
}
collect_multipath_data(){
multipath -ll > /tmp/mltp.txt
multipathd show maps status > /tmp/status.txt
}
collect_vms(){
for i in `xm list|egrep -v 'Domain-0|Name'|awk '{print $1}'`
do
tot_disks=$(grep disk /OVS/Repositories/*/*/VirtualMachines/$i/vm.cfg)
vm_name=$(grep OVM_simple_name /OVS/Repositories/*/*/VirtualMachines/$i/vm.cfg|cut -d "'" -f2)
COUNT=0
disks=$tot_disks
while [ $COUNT -eq 0 ]
do
dev=$(echo $disks|cut -d "'" -f2)
dsk=$(echo $dev|cut -d "/" -f4|cut -d ',' -f1)
phy=$(echo $dev|grep phy|wc -l)
if [ $phy -eq 1 ]
then
lun=$(grep $dsk /tmp/mltp.txt|awk '{print $1}')
is_ibm=$(grep $lun /tmp/mltp.txt|grep IBM|wc -l)
if [ $is_ibm -eq 1 ]
then
print_head $vm_name
status=$(grep $lun /tmp/status.txt|awk '{print $5}')
echo "|$lun | $status |"
fi
otra=$(echo $disks|cut -d "'" -f3-)
disks=$otra
cont=$(echo $otra|grep phy|wc -l)
if [ $cont -eq 0 ]
then
COUNT=1
fi
else
otra=$(echo $disks|cut -d "'" -f3-)
disks=$otra
cont=$(echo $otra|grep phy|wc -l)
if [ $cont -eq 0 ]
then
COUNT=1
fi
fi
done
prt_hd=0
done
}
print_end(){
echo "|---------------------------------------|---------------|"
echo
echo "=================================================================="
echo "=================================================================="
}
delete_tmp_data(){
rm -f /tmp/mltp.txt
rm -f /tmp/status.txt
}
echo
echo
echo "Checking Server: "`hostname`
echo
collect_multipath_data
collect_vms
print_end
delete_tmp_data
| true
|
8357dd27cef50708a4b492ca209bc33c16ef81ad
|
Shell
|
habitat-sh/builder
|
/.expeditor/scripts/verify/terraform_validate.sh
|
UTF-8
| 1,095
| 4.09375
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -euo pipefail
# In the absence of a real Terraform parser, we can use a bit of awk
# to extract the required version from a file.
#
# Given a file with contents like this:
#
# terraform {
# required_version = "0.12.13"
# }
#
# we would extract `0.12.13` (without quotes!).
terraform_version() {
versions_file="${1}"
awk 'BEGIN { FS=" *= *"} /required_version/ {gsub("\"","",$2); print $2}' "${versions_file}"
}
terraform_version="$(terraform_version terraform/versions.tf)"
readonly terraform_version
terraform_artifact="terraform_${terraform_version}_linux_amd64.zip"
readonly terraform_artifact
# Install Terraform
(
# We do this so we don't have to contend with the binary and
# directory names (both "terraform") conflicting.
mkdir bin
cd bin
curl -O "https://releases.hashicorp.com/terraform/${terraform_version}/${terraform_artifact}"
# leaves a `terraform` binary in the current directory
unzip "${terraform_artifact}"
)
# Validate the terraform directory
./bin/terraform init terraform
./bin/terraform validate terraform
| true
|
43a804e3523258e3c53604c106d7feae56606413
|
Shell
|
Blanktopia/BlanktopiaResourcePack
|
/pack.sh
|
UTF-8
| 388
| 2.640625
| 3
|
[] |
no_license
|
#!/bin/bash
shopt -s globstar
# Minify
for i in **/*.json; do
jq -c . ${i} | sponge ${i}
done
zip dist/Blanktopia.zip -r assets pack.mcmeta pack.png
mv dist/Blanktopia.zip dist/Blanktopia-$(shasum dist/Blanktopia.zip | cut -d' ' -f1).zip
rsync -iavzhP dist/ weiwen@ssh.piggyp.ink:/opt/nomad/volumes/files/ --rsync-path="sudo rsync"
# Un-minify
for i in **/*.json; do
jq . ${i} | sponge ${i}
done
| true
|
a7e853637731aa441a50f1b42fabc6a00fdb230a
|
Shell
|
ghj114/OPSK-FOLSOM-Quantum-InstScript
|
/upload-img/glance-upload-ubuntu-raw.sh
|
UTF-8
| 1,401
| 3.140625
| 3
|
[] |
no_license
|
#!/bin/bash
# Import Settings
. settings
#if [ ! -f "vmlinuz-2.6.18-238.el5" ] ; then
# echo "can not find vmlinuz-2.6.18-238.el5!"
# exit -1
#fi
#
#if [ ! -f "initrd-2.6.18-238.el5.img" ] ; then
# echo "can not find initrd-2.6.18-238.el5.img!"
# exit -1
#fi
#if [ ! -f "centos5.6.final.img" ] ; then
# echo "can not find centos5.6.final.img!"
# exit -1
#fi
#if [ ! -f "ubuntu-11.10.img" ] ; then
# echo "can not find ubuntu-11.10.img!"
# exit -1
#fi
if [ ! -f "ubuntu-12.04.img" ] ; then
echo "can not find ubuntu-12.04.img!"
exit -1
fi
TOKEN=`./obtain-token.sh`
#echo "Uploading kernel"
#RVAL=`glance -A $TOKEN add name="centos5.6-kernel" is_public=true container_format=aki disk_format=aki < vmlinuz-2.6.18-238.el5`
#KERNEL_ID=`echo $RVAL | cut -d":" -f2 | tr -d " "`
#
#echo "Uploading ramdisk"
#RVAL=`glance -A $TOKEN add name="centos5.6-ramdisk" is_public=true container_format=ari disk_format=ari < initrd-2.6.18-238.el5.img`
#RAMDISK_ID=`echo $RVAL | cut -d":" -f2 | tr -d " "`
#
#echo "Uploading image"
#glance -A $TOKEN add name="centos5.6" is_public=true container_format=ami disk_format=ami kernel_id=$KERNEL_ID ramdisk_id=$RAMDISK_ID < centos5.6.final.img
echo "Uploading raw image"
#glance add -A $TOKEN name="ubuntu-11.10.img" is_public=true disk_format=raw < ubuntu-11.10.img
glance add -A $TOKEN name="ubuntu-12.04.img" is_public=true disk_format=raw < ubuntu-12.04.img
| true
|
9cf84c7c60d908578668de37df3245420549155f
|
Shell
|
jsylvia92/bigdogpitbull
|
/bigdogpitbull.sh
|
UTF-8
| 14,554
| 4.25
| 4
|
[] |
no_license
|
#!/bin/bash
RE='^[0-9]+$' # regex for user input error checking
# files for settings storage
CONFIGDIR=~/.bigdogpitbull
CONFIG=~/.bigdogpitbull/config
if [ ! -d "$CONFIGDIR" ]; then
mkdir $CONFIGDIR
echo -e "\e[1mCreated config directory\e[0m ~/.bigdogpitbull/"
fi
# separate checks in case the directory exists, but not the file
if [ ! -f "$CONFIG" ]; then
exec 3>&1 >$CONFIG
echo "hd"
echo "0"
exec >&3-
echo -e "\e[1mCreated config file\e[0m ~/.bigdogpitbull/config"
fi
# read from config file
config=() # settings array. [0] is quality, [1] is offset
while read line
do
config+=("$line")
done < $CONFIG
Menu () {
menuFlag=0 # menu options print if 0 to prevent stdout clutter
while true;
do
if [ "$menuFlag" == 0 ];
then
echo -e "\n\e[1mWhat would you like to do?\e[0m"
echo " 1. Download recent videos"
echo " 2. Search for videos"
echo " 3. Settings"
echo " 4. Quit"
menuFlag=1
fi
read -p $'\e[1mEnter your choice: \e[0m' mainMenu;
case "$mainMenu" in
1) RecentDownload
menuFlag=0
;;
2) PreSearch
menuFlag=0
;;
3) Settings
menuFlag=0
;;
4) break
;;
*) echo -e "\e[92mInvalid input, try again.\e[39m"
;;
esac
done
echo
}
Settings () {
menuFlag=0
while true;
do
if [ "$menuFlag" == 0 ];
then
echo -e "\n\e[1mSettings\e[0m"
echo " 0. Return to main menu"
echo " 1. Change video quality"
echo " 2. Change results offset"
echo " 3. Show video types stored in giant_bomb_cli.py"
echo " 4. Help menu for giant_bomb_cli.py flags and usage"
echo " 5. Version number of giant_bomb_cli.py"
menuFlag=1
fi
read -p $'\e[1mEnter your choice: \e[0m' settingsMenu;
case "$settingsMenu" in
0) break
;;
1) VideoQuality
menuFlag=0
;;
2) OffsetResults
menuFlag=0
;;
3) echo
./.giant_bomb_cli.py --dump_video_types
menuFlag=0
;;
4) echo
./.giant_bomb_cli.py --help
menuFlag=0
;;
5) echo
./.giant_bomb_cli.py --version
menuFlag=0
;;
*) echo -e "\e[92mInvalid input, try again.\e[39m"
;;
esac
done
}
VideoQuality () {
menuFlag=0
while true;
do
if [ "$menuFlag" == 0 ];
then
echo -e "\n\e[1mSet Video Quality\e[0m"
echo -n " 1. HD"
if [ "${config[0]}" == "hd" ];
then
echo -n " (current setting)"
fi
echo -ne "\n 2. High"
if [ "${config[0]}" == "high" ];
then
echo -n " (current setting)"
fi
echo -ne "\n 3. Low"
if [ "${config[0]}" == "low" ];
then
echo -n " (current setting)"
fi
echo
menuFlag=1
fi
read -p $'\e[1mEnter your choice: \e[0m' qualityOpt;
case "$qualityOpt" in
1) config[0]=hd
;;
2) config[0]=high
;;
3) config[0]=low
;;
*) echo -e "\e[92mInvalid input, try again.\e[39m"
continue
;;
esac
# write settings to file
cat > $CONFIG << EOL
${config[0]}
${config[1]}
EOL
break
done
}
OffsetResults () {
echo -e "\n\e[1mSet Results Offset\e[0m"
echo " Current setting: ${config[1]}"
while read -p $'\e[1mEnter results offset: \e[0m' qty;
do
IntsOnly "$qty"
retval=$?
if [ "$retval" == 1 ]; # user entered non-integral input
then
echo -e "\e[92mInvalid input, try again.\e[39m"
continue # therefore, return to start of loop and try again
else
config[1]=$qty
# write settings to file
cat > $CONFIG << EOL
${config[0]}
${config[1]}
EOL
return
fi
done
}
RecentDownload () {
echo
while read -p $'\e[1mHow many recent videos do you wish to download? (0 to go back): \e[0m' qty;
do
IntsOnly "$qty"
retval=$?
if [ "$retval" == 1 ]; # user entered non-integral input
then
echo -e "\e[92mInvalid input, try again.\e[39m" >&2;
elif [ "$retval" == 2 ]; # user entered 0, go back
then
return
elif [ "$retval" == 0 ];
then
if [ "$qty" == 1 ];
then
hotGrammar="this video"
hotPronoun="it"
else
hotGrammar="these videos"
hotPronoun="them"
fi
echo
./.giant_bomb_cli.py -l "$qty" --offset "${config[1]}"
echo -e "\n\e[1mWould you like to download $hotGrammar?\e[0m"
echo " 1. Yes, download $hotPronoun"
echo " 2. No, don't download $hotPronoun"
while read -p $'\e[1mEnter your choice: \e[0m' dl;
do
case "$dl" in
1) ./.giant_bomb_cli.py -l "$qty" --offset "${config[1]}" --quality "${config[0]}" --download
return
;;
2) return
;;
*) echo -e "\e[92mInvalid input, try again.\e[39m"
continue
;;
esac
done
fi
done
}
PreSearch () {
menuFlag=0 # menu options print if 0 to prevent stdout clutter
while true;
do
if [ "$menuFlag" == 0 ];
then
echo -e "\n\e[1mHow would you like to search?\e[0m"
echo " 0. Cancel"
echo " 1. By key terms and/or video type"
echo " 2. By video ID"
menuFlag=1
fi
read -p $'\e[1mEnter your choice: \e[0m' searchOpt;
case "$searchOpt" in
0) break
;;
1) Search
menuFlag=0
;;
2) IDSearch
menuFlag=0
;;
*) echo -e "\e[92mInvalid input, try again.\e[39m"
;;
esac
done
}
IDSearch () {
while read -p $'\e[1mEnter video ID (0 to go back): \e[0m' vID;
do
IntsOnly "$vID"
retval=$?
if [ "$retval" == 1 ]; # user entered non-integral input
then
echo -e "\e[92mInvalid input, try again.\e[39m"
continue
elif [ "$retval" == 2 ]; # if input was 0, go back
then
return
elif [ "$retval" == 0 ]; # if input was a valid integer, proceed to command
then
echo
./.giant_bomb_cli.py --filter --id "$vID"
fi
break
done
echo -e "\n\e[1mWould you like to download this video?\e[0m"
echo " 1. Yes, download it"
echo " 2. No, don't download it"
while read -p $'\e[1mEnter your choice: \e[0m' dl;
do
case "$dl" in
1) ./.giant_bomb_cli.py --filter --id "$vID" --quality "${config[0]}" --download
return
;;
2) return
;;
*) echo -e "\e[92mInvalid input, try again.\e[39m"
continue
;;
esac
done
}
# TODO:
# cry at how stupid big I let this function become
Search () {
menuFlag=0
while true;
do
if [ "$menuFlag" == 0 ];
then
echo -e "\n\e[1mWould you like to filter by video type?\e[0m"
echo " 0. Back to search menu"
echo " 1. No video type filter"
echo " 2. Video Reviews"
echo " 3. Quick Looks"
echo " 4. TANG"
echo " 5. Endurance Run"
echo " 6. Events"
echo " 7. Trailers"
echo " 8. Features"
echo " 9. Premium"
echo " 10. Extra Life"
echo " 11. Encyclopedia Bombastica"
echo " 12. Unfinished"
echo " 13. Metal Gear Scanlon"
echo " 14. VinnyVania"
echo " 15. Breaking Brad"
echo " 16. Best of Giant Bomb"
echo " 17. Game Tapes"
echo " 18. Kerbal: Project B.E.A.S.T."
menuFlag=1
fi
read -p $'\e[1mEnter your choice: \e[0m' videoType
case "$videoType" in
0) return
;;
1) videoType=0
echo -e "You entered: \e[1mNo filter\e[0m"
;;
2) videoType=2
echo -e "You entered: \e[1mVideo Reviews\e[0m"
;;
3) videoType=3
echo -e "You entered: \e[1mQuick Looks\e[0m"
;;
4) videoType=4
echo -e "You entered: \e[1mTANG\e[0m"
;;
5) videoType=5
echo -e "You entered: \e[1mEndurance Run\e[0m"
;;
6) videoType=6
echo -e "You entered: \e[1mEvents\e[0m"
;;
7) videoType=7
echo -e "You entered: \e[1mTrailers\e[0m"
;;
8) videoType=8
echo -e "You entered: \e[1mFeatures\e[0m"
;;
9) videoType=10
echo -e "You entered: \e[1mPremium\e[0m"
;;
10) videoType=11
echo -e "You entered: \e[1mExtra Life\e[0m"
;;
11) videoType=12
echo -e "You entered: \e[1mEncyclopedia Bombastica\e[0m"
;;
12) videoType=13
echo -e "You entered: \e[1mUnfinished\e[0m"
;;
13) videoType=17
echo -e "You entered: \e[1mMetal Gear Scanlon\e[0m"
;;
14) videoType=18
echo -e "You entered: \e[1mVinnyVania\e[0m"
;;
15) videoType=19
echo -e "You entered: \e[1mBreaking Brad\e[0m"
;;
16) videoType=20
echo -e "You entered: \e[1mBest of Giant Bomb\e[0m"
;;
17) videoType=21
echo -e "You entered: \e[1mGame Tapes\e[0m"
;;
18) videoType=22
echo -e "You entered: \e[1mKerbal: Project B.E.A.S.T.\e[0m"
;;
*) echo -e "\e[92mInvalid input, try again.\e[39m"
continue
;;
esac
while read -p $'\e[1mEnter search terms (0 to go back, q to search menu): \e[0m' searchTerms;
do
menuFlag=0 # for if user opts to return to video type filter menu
if [ "$searchTerms" == "q" ];
then
return # quit to menu
elif [ "$searchTerms" != 0 ]; # if user did not opt to go back, continue
then
while read -p $'\e[1mHow many videos would you like to list? (0 to go back, q to search menu): \e[0m' qty;
do
if [ "$qty" == "q" ];
then
return # quit to menu
fi
IntsOnly "$qty"
retval=$?
if [ "$retval" == 1 ]; # user entered non-integral input
then
echo -e "\e[92mInvalid input, try again.\e[39m"
continue
elif [ "$retval" == 2 ]; # if input was 0, go back to previous prompt
then
break
elif [ "$retval" == 0 ]; # if input was a valid integer, proceed to command
then
backFlag=0 # will return to # videos to list if set to 1
echo -e "\e[1mSort in...\e[0m"
echo " 0. Go back"
echo " 1. Ascending order?"
echo " 2. Descending order?"
while read -p $'\e[1mEnter your choice (q to search menu): \e[0m' ord;
do
case "$ord" in
0) backFlag=1
;;
1) sort="asc"
;;
2) sort="desc"
;;
q) return
;;
*) echo -e "\e[92mInvalid input, try again.\e[39m"
continue
;;
esac
break
done
if [ "$backFlag" == 1 ];
then
continue # return to # of videos listed prompt
fi
echo
if [ "$videoType" == 0 ]; # if no video type was selected
then
./.giant_bomb_cli.py -l "$qty" --filter --name "$searchTerms" --offset "${config[1]}" --sort "$sort"
else
./.giant_bomb_cli.py -l "$qty" --filter --name "$searchTerms" --video_type "$videoType" --offset "${config[1]}" --sort "$sort"
fi
if [ "$qty" == 1 ];
then
hotGrammar="this video"
hotPronoun="it"
else
hotGrammar="these videos"
hotPronoun="them"
fi
echo -e "\n\e[1mWould you like to download $hotGrammar?\e[0m"
echo " 1. Yes, download $hotPronoun"
echo " 2. No, don't download $hotPronoun"
while read -p $'\e[1mEnter your choice: \e[0m' dl;
do
case "$dl" in
1) DownloadResults "$qty" "$searchTerms" "$sort" "$videoType"
return
;;
2) return
;;
*) echo -e "\e[92mInvalid input, try again.\e[39m"
continue
;;
esac
done
fi
continue # return to list video qty prompt if input is invalid
done
continue # user entered 0 at # of videos to list, so return to previous prompt
else
break # if user opted to go back during search terms prompt, return to video type list
fi
done
continue # user entered 0; return to filter by video type prompt
done
}
DownloadResults () {
echo
if [ "$4" == 0 ]; # if no video type was selected
then
./.giant_bomb_cli.py -l "$1" --filter --name "$2" --sort "$3" --offset "${config[1]}" --quality "${config[0]}" --download
else
./.giant_bomb_cli.py -l "$1" --filter --name "$2" --video_type "$4" --sort "$3" --offset "${config[1]}" --quality "${config[0]}" --download
fi
}
# returns 1 if non-integral, 2 if input is 0, and 0 is input is a nonzero integer
IntsOnly () {
qty=$1
if ! [[ $qty =~ $RE ]];
then
retval=1
elif [ "$qty" == 0 ];
then
retval=2
else
retval=0
fi
return "$retval"
}
Menu
| true
|
da7ad24b2ef8d4e60caf495b25ca1db7738e0755
|
Shell
|
krebscode/confmagic
|
/lib/punani
|
UTF-8
| 3,183
| 3.59375
| 4
|
[] |
no_license
|
#!/bin/sh
#include core
## begin punani DB
_punanidb_pacman_=
_punanidb_yum_=
_punanidb_aptget_=
_punanidb_pacman_git=git
_punanidb_yum_git=git
_punanidb_aptget_git=git-core
_punanidb_pacman_python2=python2
_punanidb_yum_python2=python
_punanidb_aptget_python2=python
_punanidb_pacman_python3=python
_punanidb_aptget_python3=python3
_punanidb_pacman_hostname=inetutils
_punanidb_aptget_hostname=hostname
_punanidb_pacman_hostname=inetutils
_punanidb_aptget_hostname=hostname
_punanidb_pacman_make=make
_punanidb_yum_make=make
_punanidb_aptget_make=make
_punanidb_pacman_tinc=tinc
_punanidb_yum_tinc=tinc
_punanidb_aptget_tinc=tinc
_punanidb_pacman_tor=tor
_punanidb_yum_tor=tor
_punanidb_aptget_tor=tor
_punanidb_pacman_nano=nano
_punanidb_yum_nano=nano
_punanidb_aptget_nano=nano
_punanidb_pacman_vim=vim
_punanidb_yum_vim=vim-enhanced
_punanidb_aptget_vim=vim
## end punani DB
_punani_resolve_package(){
: ${PACKER?PACKER is not set,bailing out}
pkg=${1?please provide package name to resolve}
eval printf "%s" \"\${_punanidb_${PACKER}_${pkg}-}\" | grep .
}
_punani_aptget_install(){ apt-get -y install "$@" ;}
_punani_aptget_remove(){ apt-get -y remove "$@" ;}
_punani_aptget_has() { dpkg -s "$1" | grep -q "Status: install";}
_punani_yum_install(){ yum -y install "$@" ;}
_punani_yum_remove(){ yum -y remove "$@" ;}
_punani_yum_has() { rpm -qa --qf "%{NAME}\n"| egrep "^${1}\$" >/dev/null ;}
_punani_pacman_install(){ pacman --noconfirm -S --needed "$@" ;}
_punani_pacman_remove(){ pacman -Rcs "$@" ;}
_punani_pacman_has(){ pacman -Q "$1" >/dev/null;}
_punani_brew_install(){ brew install "$@"; }
_punani_brew_remove(){ brew remove "$@";}
_punani_brew_has(){ error "not implemented"; return 1 ;}
punani(){
ACTION="$1"; shift
PKGS="$*"
for p in apt-get pacman yum brew;do
exists "$p" && PACKER=`printf "%s" "$p" | sed 's/-//g'` && break
done
[ -z "${PACKER:-}" ] && error "Error 2: no known package manager found; no punani for you!" && return 1
info "using $PACKER for install"
[ -z "$PKGS" ] && error "no PACKAGE specified." && ACTION="usage"
for PKG in $PKGS; do
RES="`_punani_resolve_package $PKG`"
test -z "$RES" && error "could not resolve '$PKG'; no punani for you!"&& return 23
case "$ACTION" in
install)
eval _punani_${PACKER}_has $RES && info "$RES already installed, skipping" && continue
! is_root && error "punani requires super-user rights for installing" && return 1
eval _punani_${PACKER}_install $RES || error "cannot install $RES with $PACKER"
;;
remove)
! eval _punani_${PACKER}_has $RES && info "$RES not installed, skipping" && continue
! is_root && error "punani requires super-user rights for removing" && return 1
eval _punani_${PACKER}_remove $RES || error "cannot install $RES with $PACKER"
;;
has)
if eval _punani_${PACKER}_has $RES ;then
info "$RES is installed"
else
info "$RES is not installed"
fi
;;
*)
error "usage: punani (install|remove|has) PACKAGE..."
return 23
esac
done
}
| true
|
f36d362610e84b341bd6f4a3cc813197f46f47f4
|
Shell
|
freelancer9977/lab_3
|
/lab_sctipt.sh
|
UTF-8
| 437
| 2.96875
| 3
|
[] |
no_license
|
#!/bin/bash
# Authors : Matthew Spallas
# Date: 4/18/2019
#Problem 1 Code:
echo "Enter file name: "
read fname
echo "Enter a regex: "
read regex
egrep $regex $fname
#Make sure to document how you are solving each problem!
#egrep -c '[0-9]{3}-[0-9]{3}-[0-9]{4}' 'regex_practice.txt'
# egrep -c '.*@.*' 'regex_practice.txt
#egrep -o '^303-[0-9]{3}-[0-9]{4}' 'regex_practice.txt'
#egrep -o '.*@geocities.com' 'regex_practice.txt >> email_results.txt'
| true
|
7b84b46c7a385e2689ee7e773b69d0350f317bce
|
Shell
|
rk9109/dotfiles
|
/scripts/float
|
UTF-8
| 302
| 2.921875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
#
# toggle float
#
focused=$(xprop -root _NET_ACTIVE_WINDOW | awk -F' ' '{print $NF}')
if xprop -id "${focused}" I3_FLOATING_WINDOW | grep -q "not found"; then
i3-msg "floating enable, border normal" >> /dev/null
else
i3-msg "floating disable, border none" >> /dev/null
fi
| true
|
7bbfb2624182a2aa60d17e9f47a64f5ac6390c17
|
Shell
|
amandascm/pdf-csv-conversor
|
/src/scripts/script.sh
|
UTF-8
| 393
| 3.296875
| 3
|
[] |
no_license
|
#!/bin/bash
#install virtualenv (python)
python3 -m pip install --user virtualenv
#check if virtual environment has already been created
DIR=".venv"
if [ ! -d "$DIR" ]; then
python3 -m venv .venv
source .venv/bin/activate
python -m pip install -r requirements.txt
deactivate
fi
#activate virtual environment and run program
source .venv/bin/activate
python main.py
deactivate
| true
|
ed7601010341c15e3d155b38474286fa4949e981
|
Shell
|
barthouse/linuxfromscratch
|
/scripts/install/install-vim.sh
|
UTF-8
| 927
| 2.890625
| 3
|
[] |
no_license
|
PKGNAME=vim
PKGVER=7.4
TAREXT=bz2
DIR="`dirname \"$0\"`"
source $DIR/dosetup.sh
SRCDIR=vim74
source $DIR/dotar.sh
echo 'CONFIG'
echo '#define SYS_VIMRC_FILE "/etc/vimrc"' >> src/feature.h
./configure --prefix=/usr \
1> $CONFIGLOG 2> $CONFIGERR
echo 'MAKE'
make \
1> $MAKELOG 2> $MAKEERR
echo 'MAKE TESTS'
make -j1 test \
1> $TESTLOG 2> $TESTERR
echo 'MAKE INSTALL'
make install \
1> $INSTALLLOG 2> $INSTALLERR
ln -sv vim /usr/bin/vi \
1>> $INSTALLLOG 2>> $INSTALLERR
for L in /usr/share/man/{,*/}man1/vim.1; do
ln -sv vim.1 $(dirname $L)/vi.1 \
1>> $INSTALLLOG 2>> $INSTALLERR
done
ln -sv ../vim/vim74/doc /usr/share/doc/vim-7.4 \
1>> $INSTALLLOG 2>> $INSTALLERR
cat > /etc/vimrc << "EOF"
" Begin /etc/vimrc
set nocompatible
set backspace=2
syntax on
if (&term == "iterm") || (&term == "putty")
set background=dark
endif
" End /etc/vimrc
EOF
source $DIR/docleanup.sh
| true
|
43be4b18d30c13012bccf154dd6b8d9b63169fc3
|
Shell
|
danebou/decomp-permuter
|
/diff.sh
|
UTF-8
| 304
| 3.421875
| 3
|
[] |
no_license
|
#!/bin/bash
if [ $# != 2 ]; then
echo "Usage: $0 orig.o new.o"
exit 1
fi
if [ ! -f $1 -o ! -f $2 ]; then
echo Source files not readable
exit 1
fi
TRANSFORM="python3 simplify_objdump.py"
OBJDUMP="mips-linux-gnu-objdump -drz"
wdiff -n <($OBJDUMP $1 | $TRANSFORM) <($OBJDUMP $2 | $TRANSFORM) || true
| true
|
51be85acb32d279f7eea8e41a628cece0a284592
|
Shell
|
Goldenfreddy0703/configs-scripts
|
/scripts/hue
|
UTF-8
| 285
| 2.9375
| 3
|
[
"LicenseRef-scancode-public-domain"
] |
permissive
|
#!/bin/sh
# shellcheck disable=SC2029
# Make Marina do all the heavy lifting for hue-cli work.
set -eu
if [ "$(hostname)" = "Marina" ]; then
if [ "$1" = "adm" ]; then
shift
/usr/bin/hueadm "$@"
else
/usr/bin/hue "$@"
fi
else ssh marina /home/tiz/xdg/sync/scripts/hue "$@"
fi
| true
|
e9a4104deed2f81a44b0913c25c370988a749387
|
Shell
|
Ozziedood/Linuxclass
|
/midterm.sh
|
UTF-8
| 2,767
| 2.546875
| 3
|
[] |
no_license
|
touch "Osvaldo Perez"
mkdir -p /var/public/sales/data
mkdir -p /var/public/sales/projects
mkdir -p /var/public/techs/data
mkdir -p /var/public/techs/projects
mkdir -p /var/public/devops/data
mkdir -p /var/public/devops/projects
groupadd sales
groupadd techs
groupadd devops
groupadd looneytunes
chmod -R 770 /var/public/sales
chgrp -R sales /var/public/sales
chmod -R 770 /var/public/techs
chgrp -R techs /var/public/techs
chmod -R 770 /var/public/devops
chgrp -R devops /var/public/devops
cp -r /etc/skel /etc/skel-sales
cp -r /etc/skel /etc/skel-techs
cp -r /etc/skel /etc/skel-devops
echo "function ipconfig(){" >> /etc/skel-sales/.bashrc
echo "ip addr | grep -v inet6 | grep -v 127.0.0.1 | awk'{print\"IP Address:\"$2}'" >> /etc/skel-sales/.bashrc
echo "ip route | grep default | awk'{print\"Default Gateway:\"$3}'" >> /etc/skel-sales/.bashrc
echo "}" >> /etc/skel-sales/.bashrc
printf "phrase='That\'s All Folks'\nexport phrase" >> /etc/skel-techs/.bash_profile
mkdir -p /home/SALES
mkdir -p /home/TECHS
mkdir -p /home/DEVOPS
chgrp -R sales /home/SALES
useradd -c "Bugs Bunny" -g sales -G looneytunes -m -d /home/SALES/bbunny -k /etc/skel-sales bbunny
echo Pa11word | passwd --stdin bbunny
useradd -c "Tasmanian Devil" -g sales -G looneytunes -m -d /home/SALES/tdevil -k /etc/skel-sales tdevil
echo Pa11word | passwd --stdin tdevil
useradd -c "Marvin the Martian" -g sales -G looneytunes -m -d /home/SALES/mmartian -k /etc/skel-sales mmartian
echo Pa11word | passwd --stdin mmartian
useradd -c "Sylvester" -g sales -G looneytunes -m -d /home/SALES/sylvester -k /etc/skel-sales sylvester
echo Pa11word | passwd --stdin sylvester
chgrp -R techs /home/TECHS
useradd -c "Yosemite Sam" -g techs -G looneytunes -m -d /home/TECHS/ysam -k /etc/skel-techs ysam
echo Pa11word | passwd --stdin ysam
useradd -c "Speedy Gonzales" -g techs -G looneytunes -m -d /home/TECHS/sgonzales -k /etc/skel-techs sgonzales
echo Pa11word | passwd --stdin sgonzales
useradd -c "Pepe Le Pew" -g techs -G looneytunes -m -d /home/TECHS/ppew -k /etc/skel-techs ppew
echo Pa11word | passwd --stdin ppew
useradd -c "Wile E. Coyote" -g techs -G looneytunes -m -d /home/TECHS/wcoyote -k /etc/skel-techs wcoyote
echo Pa11word | passwd --stdin wcoyote
chgrp -R devops /home/DEVOPS
useradd -c "Foghorn Leghorn" -g devops -G looneytunes -m -d /home/DEVOPS/fleghorn -k /etc/skel-devops fleghorn
echo Pa11word | passwd --stdin fleghorn
useradd -c "Elmer Fudd" -g devops -G looneytunes -m -d /home/DEVOPS/efudd -k /etc/skel-devops efudd
echo Pa11word | passwd --stdin efudd
useradd -c "Tweety" -g devops -G looneytunes -m -d /home/DEVOPS/tweety -k /etc/skel-devops tweety
echo Pa11word | passwd --stdin tweety
useradd -c "Porky Pig" -g devops -G looneytunes -m -d /home/DEVOPS/ppig -k /etc/skel-devops ppig
echo Pa11word | passwd --stdin ppig
| true
|
13c5bb25471fa34226edcacea5bf954d77b13715
|
Shell
|
mjw75077/scripts
|
/snap_backup.ksh
|
UTF-8
| 2,327
| 3.34375
| 3
|
[] |
no_license
|
#!/bin/ksh
#
# Name : snap_backup.ksh
#
# Purpose : Backup Database
#
# Usage :
#
# Dependent Files :
#
# History : Created 06/25/2013
#
# Date Name Comments
# Matt Wagner created
###########################################################################
usage()
{
print "USAGE : snap_backup.ksh <dbname>"
}
do_backup()
{
. $DBAKSH/sdb.ksh $DBNAME
smo backup create -profile $ORACLE_SID -auto -full -noprotect -retain -hourly -verbose
if [ $? -eq 0 ]
then
$ORACLE_HOME/bin/rman target / <<-EOT
delete force noprompt archivelog all completed before 'sysdate - 1/24';
exit;
EOT
fi
}
main()
{
while getopts s:t: OPTS; do # Capture -s option, strip others
case "$OPTS" in
s) DBNAME=$OPTARG;; # DBNAME to connect
t) BACKUP_ACTION=$OPTARG;; # DBNAME to connect
*) usage
return 2;; # Invalid args
esac
done
shift `expr $OPTIND - 1` # remove argument(s)
print "#################################################################################"
print `date` Begin $PROGRAM_NAME
do_backup
print "#################################################################################"
print `date` End $PROGRAM_NAME
print "#################################################################################"
} >$DBALOG/${PROGRAM_NAME}_${2}.log 2>$DBALOG/${PROGRAM_NAME}_${2}.err
#*******************************************************************************
# START HERE
# Run envoracle to set DBA system variables !!
. ~/.envorabase
export PROGRAM_NAME=`basename $0 .ksh`
export FILE_NAME=$0
export HOSTNAME=`hostname`
# Copy the old log file to the history file
cat $DBALOG/${PROGRAM_NAME}_${2}.log>>$DBALOG/${PROGRAM_NAME}_${2}.log.history 2>/dev/null
main ${1+"$@"}
if [[ -s $DBALOG/${PROGRAM_NAME}_${2}.err ]]
then
# cat $DBALOG/$PROGRAM_NAME.err | $DBAKSH/mail.ksh -s $FILE_NAME
cat $DBALOG/${PROGRAM_NAME}_${2}.err | mail -s $FILE_NAME wagnerm@one.verizon.com
else
cat $DBALOG/${PROGRAM_NAME}_${2}.log | mail -s "$DBNAME - Backup Complete" wagnerm@one.verizon.com
fi
exit $RETURN
| true
|
1460ac700a112f36731d42d31206f34eeadc7bcd
|
Shell
|
titledk/ttools-sitesync-core
|
/lib/dump-current-site.sh
|
UTF-8
| 3,354
| 4.15625
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# This script dumps the current site
# Can be called with the following arguments:
# 1. Type: dump/backup - is this a dump for sync, or a backup
# 2. Environment - if this is called on a server we might need to add the environment name, as specific paths etc. might
# need to be taken into account there - use "LOCAL" for local/default environment
# 3. Dump name - dumps can be named - a named backup will never be automatically deleted - supply "false" for default
# 4. Skip: skipfiles - if the fourth parameter supplied is called "skipfiles", then files will be skipped in the dump
#You need to supply either 'dump' or 'backup' as type
if [ -z "${1}" ]; then
echo "Please specify which type of dump - dump/backup";
exit;
fi
DUMPTYPE=$1
BASEDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && cd ../../.. && pwd )";
MODULEDIR="$BASEDIR/ttools/sitesync-core";
#sourcing variables
source $MODULEDIR/lib/vars.sh; #$DUMP_PATH_DEFAULT is defined here
DUMP_PATH=$DUMP_PATH_DEFAULT;
#getting configuration variables
VARS="$BASEDIR/ttools/core/lib/vars.sh"
eval `$VARS`
#Script can be called with a second environment parameter
ENV=LOCAL;
if [ "${2}" ]; then
ENV=$2;
fi
ENVVARS="$BASEDIR/ttools/core/lib/vars-for-env.sh $ENV"
eval `$ENVVARS`
#specifics for backup type
if [[ "$DUMPTYPE" == "backup" ]]; then
DUMP_PATH="$BASEDIR/temp/dumps/$BACKUP_NAME";
#if a backup path has been set for the environment, use that instead
backupPathToEval="Environments_"$ENV"_Sitesync_BackupPath"
if [ "${!backupPathToEval}" != "" ]; then
DUMP_PATH="${!backupPathToEval}";
mkdir -p $DUMP_PATH
fi
DUMP_NAME=$(date +"%Y-%m-%d_%H-%M%Z");
#dump name can be called with a third backup name parameter
#in this case any above settings are overridden
if [[ "${3}" ]]; then
if [[ "${3}" != "false" ]]; then
DUMP_PATH="$BASEDIR/temp/dumps/$BACKUP_NAMED_NAME";
DUMP_NAME=$(date +"%Y-%m-%d_")$3;
fi
fi
fi
#making sure dump path exists
mkdir -p $DUMP_PATH/$DUMP_NAME;
echo "Dumping db and assets to $DUMP_PATH/$DUMP_NAME";
DBNAME="$DUMP_PATH/$DUMP_NAME/$DUMP_DBNAME";
FILESDIR="$DUMP_PATH/$DUMP_NAME/$DUMP_FILESDIR";
# skipping files if requested
if [[ "${4}" == "skipfiles" ]]; then
FILESDIR='false'
fi
#This is handled by each framework module individually
$BASEDIR/$Sitesync_FrameworkModule/lib/dump-current-site.sh $DBNAME $FILESDIR $ENV
#dump compression has been taken out for now
#echo "...and compressing the dump";
#
#cd $DUMP_PATH/$DUMP_NAME;
#nice -n 19 tar -zcf ../$DUMP_NAME.tar.gz *;
#
##we don't want to keep all the uncompressed versions for backups
##so we'll delete the backup directory, and only keep the tar
#if [[ "$DUMPTYPE" == "backup" ]]; then
# rm -rf $DUMP_PATH/$DUMP_NAME
#fi
#specifics for backup type - only keep x backups
#default is 6 but can be configured through config.yml
KEEP=$BACKUP_KEEP_DEFAULT;
if [ "$Sitesync_DumpBackupKeep" ]; then
KEEP=$Sitesync_DumpBackupKeep
fi
if [[ "$DUMPTYPE" == "backup" ]]; then
#only clean up if the type is backup and no name parameter has been submitted
if [ -z "${3}" ]; then
echo ""
echo "Keeping $KEEP latest backups"
echo ""
#regulating...
KEEP=$(($KEEP+1));
cd $DUMP_PATH;
#from http://stackoverflow.com/questions/6024088/linux-save-only-recent-10-folders-and-delete-the-rest
ls -dt */ | tail -n +$KEEP | xargs rm -rf
fi
fi
| true
|
7a019403458f3563355b5bd0c35a21d8edde46fc
|
Shell
|
sgricci/BrowserQuest
|
/bin/build.sh
|
UTF-8
| 930
| 3.234375
| 3
|
[] |
no_license
|
#!/bin/bash
# Script to generate an optimized client build of BrowserQuest
TOPLEVELDIR="`dirname $0`/.."
BUILDDIR="$TOPLEVELDIR/client-build"
PROJECTDIR="$TOPLEVELDIR/client/js"
echo "Deleting previous build directory"
rm -rf $BUILDDIR
echo "Building client with RequireJS"
node $TOPLEVELDIR/bin/r.js -o $PROJECTDIR/build.js
echo "Removing unnecessary js files from the build directory"
find $BUILDDIR/js -type f \
-not \( -name "game.js" \
-o -name "home.js" \
-o -name "log.js" \
-o -name "require-jquery.js" \
-o -name "modernizr.js" \
-o -name "css3-mediaqueries.js" \
-o -name "mapworker.js" \
-o -name "detect.js" \
-o -name "underscore.min.js" \
-o -name "text.js" \) \
-delete
echo "Removing sprites directory"
rm -rf $BUILDDIR/sprites
echo "Removing config directory"
rm -rf $BUILDDIR/config
echo "Moving build.txt to current dir"
mv $BUILDDIR/build.txt $TOPLEVELDIR
echo "Build complete"
| true
|
d32c8925b896f6985d2b1a90a0fc7ca141305d0e
|
Shell
|
gitGNU/gnu_www-ja
|
/tool/merge-for-gnun.sh
|
UTF-8
| 1,294
| 3.640625
| 4
|
[] |
no_license
|
#! /bin/bash
LANG=C
# error: Your local changes to the following files would be overwritten by merge:
# error: The following untracked working tree files would be overwritten by merge:
STATE=0
LOCAL_FILES=""
NEW_FILES=""
git merge origin/master -s recursive -X theirs 2>&1 | while read; do
if [ $STATE -eq 0 ]; then
if [ "${REPLY}" = "Already up-to-date." ]; then
STATE=99
echo $REPLY
elif [ "${REPLY}" = "Fast-forward" ]; then
STATE=99
echo $REPLY
elif [ "${REPLY}" = "Aborting" ]; then
echo "checkout original..."
echo $LOCAL_FILES
git checkout $LOCAL_FILES
echo "Removing ..."
echo "$NEW_FILES"
rm -f $NEW_FILES
git merge origin/master -s recursive -X theirs
exit 0
elif [ "${REPLY:0:25}" = "error: Your local changes" ]; then
STATE=1
elif [ "${REPLY:0:25}" = "error: The following untr" ]; then
STATE=2
else
echo $REPLY
fi
elif [ $STATE -eq 1 ]; then
if [ "${REPLY:0:6}" = "Please" ]; then
STATE=0
else
LOCAL_FILES="$LOCAL_FILES $REPLY";
fi
elif [ $STATE -eq 2 ]; then
if [ "${REPLY:0:6}" = "Please" ]; then
STATE=0
else
NEW_FILES="$NEW_FILES $REPLY";
fi
elif [ $STATE -eq 99 ]; then
echo $REPLY
fi
done
| true
|
07082fd42bf659524ee4471b6b79fe87199610f5
|
Shell
|
OTangTang/ld_note
|
/cheatsheet/ops_doc-master/makefile/cmake/cmake.sh
|
UTF-8
| 45,409
| 3.34375
| 3
|
[] |
no_license
|
cmake_minimum_required(VERSION 2.8.4){
https://github.com/yszheda/wiki/wiki/CMake
CMake : 编译系统生成器
CPack : 包生成器
CTest : 系统检测驱动器
CDash : dashboard收集器
}
? /usr/bin/cmake -P cmake_install.cmake ? 在Makefile中
? /usr/bin/cmake -DCMAKE_INSTALL_DO_STRIP=1 -P cmake_install.cmake ? 在Makefile中
/usr/bin/cmake -DCMAKE_INSTALL_DO_STRIP=1 -P cmake_install.cmake # CMAKE_INSTALL_DO_STRIP=1 和 cmake_install.cmake
CMAKE_INCLUDE_PATH=/home/include cmake .. # export CMAKE_INCLUDE_PATH
CMAKE_LIBRARY_PATH=/home/include cmake .. # export CMAKE_LIBRARY_PATH
cmake -DCMAKE_CXX_COMPILER=xlc -DBUILD_TESTING:BOOL=ON ../foo #CMAKE_CXX_COMPILER=xlc 和 BUILD_TESTING:BOOL=ON
make clean # 清理工程
make distclean # cmake不支持
内部构建与外部构建
进入static目录,运行../configure –enable-static;make会在static目录生成wxGTK的静态库。
进入shared目录,运行../configure –enable-shared;make就会在shared目录生成动态库。
外部构建:
一个最大的好处是,对于原有的工程没有任何影响,所有动作全部发生在编译目录。通过这一点,
也足以说服我们全部采用外部编译方式构建工程。
PROJECT(projectname [CXX] [C] [Java]){
cmake自动创建的环境变量
PROJECT_BINARY_DIR projectname_BINARY_DIR /home/ubuntu/cmake/learning-cmake/hello-world/build
PROJECT_SOURCE_DIR projectname_SOURCE_DIR /home/ubuntu/cmake/learning-cmake/hello-world
PROJECT_BINARY_DIR CMAKE_BINARY_DIR # 工程的根目录
PROJECT_SOURCE_DIR CMAKE_SOURCE_DIR # 运行cmake命令的目录,通常是${PROJECT_SOURCE_DIR}/build
CMAKE_CURRENT_BINARY_DIR # CMakelists的根目录
CMAKE_CURRENT_SOURCE_DIR # target编译目录
PROJECT_NAME # 返回通过PROJECT指令定义的项目名称
指令定义工程名称。并可指定工程支持的语言,支持的语言列表是可以忽略的。
projectname_BINARY_DIR比PROJECT_BINARY_DIR在设计过程中耦合性大些。
作为工程名的HELLO和生成的可执行文件hello是没有任何关系的。
}
SET(VAR [VALUE] [CACHE TYPE DOCSTRING [FORCE]]){
1. SET指令可以用来显式的定义变量即可。
SET(SRC_LIST main.c) 或 SET(SRC_LIST main.c t1.c t2.c)
SET(LIBS ${LIBS} ${LIBNL_LIBS}) # LIBS为原先SET变量;LIBNL_LIBS也为原先SET变量
}
EXECUTABLE_OUTPUT_PATH(可执行文件输出目录){
SET(EXECUTABLE_OUTPUT_PATH ${PROJECT_BINARY_DIR}/bin) # 可执行二进制的输出路径为build/bin
问题是,我应该把这两条指令写在工程的CMakeLists.txt还是src目录下的
CMakeLists.txt,把握一个简单的原则,在哪里ADD_EXECUTABLE或ADD_LIBRARY,
如果需要改变目标存放路径,就在哪里加入上述的定义。
}
LIBRARY_OUTPUT_PATH(中间静态和动态库输出路径){
SET(LIBRARY_OUTPUT_PATH ${PROJECT_BINARY_DIR}/lib) # 库的输出路径为build/lib
如EXECUTABLE_OUTPUT_PATH描述;
}
INSTALL(目标二进制、动态库、静态库以及文件、目录、脚本){
INSTALL指令用于定义安装规则,安装的内容可以包括目标二进制、动态库、静态库以及文件、目录、脚本等。
INSTALL指令包含了各种安装类型,我们需要一个个分开解释:
DESTDIR # 安装到指定的目录
PREFIX # 安装到以PREFIX为前缀的目录 CMAKE_INSTALL_PREFIX
DESTDIR=
install:
mkdir -p $(DESTDIR)/usr/bin
install -m 755 hello $(DESTDIR)/usr/bin
---------------------------------------
DESTDIR=
PREFIX=/usr
install:
mkdir -p $(DESTDIR)/$(PREFIX)/bin
install -m 755 hello $(DESTDIR)/$(PREFIX)/bin
---------------------------------------
# 目的地,权限,配置。
# 不能够在cmake中指定文件名的支持重命名
# 静态库,动态库和可执行程序共享TARGETS这个目标。
TARGETS :ARCHIVE|LIBRARY # 编译生成的文件,ADD_LIBRARY生成的目标
TARGETS :RUNTIME # 编译生成的文件,为ADD_EXECUTABLE生成的目标
FILES : README和INSTALL COPYRIGHT # 帮助,安装和版权相关零碎文件
PROGRAMS:
}
CMAKE_INSTALL_PREFIX(指定安装前缀路径){
CMAKE_INSTALL_PREFIX变量类似于configure – 脚本的 prefix,常见的使用方法看起来是这个样子:
cmake -DCMAKE_INSTALL_PREFIX=/usr .
# cmake中默认定义了DCMAKE_INSTALL_PREFIX选项用实现--prefix的功能。
如果我没有定义CMAKE_INSTALL_PREFIX会安装到什么地方?你可以尝试以下,
cmake ..;make;make install,
你会发现CMAKE_INSTALL_PREFIX的默认定义是/usr/local
}
INSTALL(目标文件的安装){
目标文件的安装:
INSTALL(TARGETS targets...
[[ARCHIVE|LIBRARY|RUNTIME]
[DESTINATION <dir>]
[PERMISSIONS permissions...]
[CONFIGURATIONS
[Debug|Release|...]]
[COMPONENT <component>]
[OPTIONAL]
] [...])
1. 参数中的TARGETS后面跟的就是我们通过ADD_EXECUTABLE或者ADD_LIBRARY定义的
目标文件,可能是可执行二进制、动态库、静态库。
2. 目标类型也就相对应的有三种,ARCHIVE特指静态库,LIBRARY特指动态库,RUNTIME
特指可执行目标二进制。
3. DESTINATION定义了安装的路径,如果路径以/开头,那么指的是绝对路径,这时候
CMAKE_INSTALL_PREFIX其实就无效了。如果你希望使用CMAKE_INSTALL_PREFIX来
定义安装路径,就要写成相对路径,即不要以/开头,那么安装后的路径就是
${CMAKE_INSTALL_PREFIX}/<DESTINATION定义的路径>
举个简单的例子:
INSTALL(TARGETS myrun mylib mystaticlib
RUNTIME DESTINATION bin
LIBRARY DESTINATION lib
ARCHIVE DESTINATION libstatic
)
上面的例子会将:
可执行二进制myrun安装到${CMAKE_INSTALL_PREFIX}/bin目录
动态库libmylib安装到${CMAKE_INSTALL_PREFIX}/lib目录
静态库libmystaticlib安装到${CMAKE_INSTALL_PREFIX}/libstatic目录
特别注意的是你不需要关心TARGETS具体生成的路径,只需要写上TARGETS名称就可以
了。
}
INSTALL(普通文件的安装){
普通文件的安装: # 可以对普通安装的文件重命名。
INSTALL(FILES files... DESTINATION <dir>
[PERMISSIONS permissions...]
[CONFIGURATIONS [Debug|Release|...]]
[COMPONENT <component>]
[RENAME <name>] [OPTIONAL])
可用于安装一般文件,并可以指定访问权限,文件名是此指令所在路径下的相对路径。
如果默认不定义权限PERMISSIONS,安装后的权限为:
OWNER_WRITE, OWNER_READ, GROUP_READ,和WORLD_READ,即644权限。
}
INSTALL(非目标文件的可执行程序安装){
非目标文件的可执行程序安装(比如脚本之类):
INSTALL(PROGRAMS files... DESTINATION <dir>
[PERMISSIONS permissions...]
[CONFIGURATIONS [Debug|Release|...]]
[COMPONENT <component>]
[RENAME <name>] [OPTIONAL])
跟上面的FILES指令使用方法一样,唯一的不同是安装后权限为:
OWNER_EXECUTE, GROUP_EXECUTE, 和WORLD_EXECUTE,即755权限。
安装时CMAKE脚本的执行:
INSTALL([[SCRIPT <file>] [CODE <code>]] [...])
SCRIPT参数用于在安装时调用cmake脚本文件(也就是<abc>.cmake文件)
CODE参数用于执行CMAKE指令,必须以双引号括起来。比如:
INSTALL(CODE "MESSAGE(\"Sample install message.\")")
}
INSTALL(目录的安装){
目录的安装:
INSTALL(DIRECTORY dirs... DESTINATION <dir>
[FILE_PERMISSIONS permissions...]
[DIRECTORY_PERMISSIONS permissions...]
[USE_SOURCE_PERMISSIONS]
[CONFIGURATIONS [Debug|Release|...]]
[COMPONENT <component>]
[[PATTERN <pattern> | REGEX <regex>]
[EXCLUDE] [PERMISSIONS permissions...]] [...])
这里主要介绍其中的DIRECTORY、PATTERN以及PERMISSIONS参数。
DIRECTORY后面连接的是所在Source目录的相对路径,但务必注意:
abc和abc/有很大的区别。
1.1 如果目录名不以/结尾,那么这个目录将被安装为目标路径下的abc,
1.2 如果目录名以/结尾,代表将这个目录中的内容安装到目标路径,但不包括这个目录本身。
2. PATTERN用于使用正则表达式进行过滤,
3. PERMISSIONS用于指定PATTERN过滤后的文件权限。
INSTALL(DIRECTORY icons scripts/ DESTINATION share/myproj
PATTERN "CVS" EXCLUDE
PATTERN "scripts/*"
PERMISSIONS OWNER_EXECUTE OWNER_WRITE OWNER_READ
GROUP_EXECUTE GROUP_READ)
这条指令的执行结果是:
将icons目录安装到<prefix>/share/myproj,将scripts/中的内容安装到
<prefix>/share/myproj
不包含目录名为CVS的目录,对于scripts/* 文件指定权限为OWNER_EXECUTE
OWNER_WRITE OWNER_READ GROUP_EXECUTE GROUP_READ.
}
INSTALL(安装时CMAKE脚本的执行){
安装时CMAKE脚本的执行:
INSTALL([[SCRIPT <file>] | [CODE <code>]] [...])
1. SCRIPT参数用于在安装时调用cmake脚本文件(也就是<abc>.cmake文件)
2. CODE参数用于执行CMAKE指令,必须以双引号括起来。比如:
INSTALL(CODE "MESSAGE(\"Sample install message.\")")
}
MESSAGE([SEND_ERROR | STATUS | FATAL_ERROR] "message to display" ...){
这个指令用于向终端输出用户定义的信息,包含了三种类型:
SEND_ERROR,产生错误,生成过程被跳过。 # 从CMakeList.txt生成makefile过程中跳出
SATUS ,输出前缀为--的信息。 # 显示
FATAL_ERROR,立即终止所有cmake过程. # 从cmake构建工程过程中退出
}
ADD_EXECUTABLE(生成可执行文件){
# ADD_EXECUTABLE(hello ${SRC_LIST})
定义了这个工程会生成一个文件名为hello的可执行文件,相关的源文件是SRC_LIST中定义的源文件列表,
}
ADD_LIBRARY(生成静态库和动态库){
指令ADD_LIBRARY
ADD_LIBRARY(libname [SHARED|STATIC|MODULE]
[EXCLUDE_FROM_ALL]
source1 source2 ... sourceN)
你不需要写全libhello.so,只需要填写hello即可,cmake系统会自动为你生成libhello.X
类型有三种:SHARED,动态库 STATIC,静态库; MODULE,在使用dyld的系统有效,如果不支持dyld,则被当作SHARED对待。
# EXCLUDE_FROM_ALL参数的意思是这个库不会被默认构建,除非有其他的组件依赖或者手工构建。
1. 如何通过ADD_LIBRARY指令构建动态库和静态库。
2. 如何通过SET_TARGET_PROPERTIES同时构建同名的动态库和静态库。
3. 如何通过SET_TARGET_PROPERTIES控制动态库版本
}
ADD_LIBRARY(添加静态库){
ADD_LIBRARY(hello STATIC ${LIBHELLO_SRC})
然后再在build目录进行外部编译,我们会发现,静态库根本没有被构建,仍然只生成了一个动态库。因为hello作为
一个target是不能重名的,所以,静态库构建指令无效。
如果我们把上面的hello修改为hello_static:
ADD_LIBRARY(hello_static STATIC ${LIBHELLO_SRC})
就可以构建一个libhello_static.a的静态库了。
}
add_custom_comand(生成指定的文件(文件组)的生成命令){
add_custom_command: 增加客制化的构建规则到生成的构建系统中。对于add_custom_command,有两种使用形式。
# 第一种是为了生成输出文件,添加一条自定义命令。
# 在编译时拷贝文件之add_custom_comand 和 add_custom_target
add_custom_command(OUTPUT output1 [output2 ...]
COMMAND command1[ARGS] [args1...]
[COMMAND command2 [ARGS] [args2...] ...]
[MAIN_DEPENDENCYdepend]
[DEPENDS[depends...]]
[IMPLICIT_DEPENDS<lang1> depend1 ...]
[WORKING_DIRECTORYdir]
[COMMENT comment] [VERBATIM] [APPEND])
不要同时在多个相互独立的目标中执行上述命令产生相同的文件,主要是为了防止冲突产生。如果有多条命令,
它们将会按顺序执行。ARGS是为了向后兼容,使用过程中可以忽略。MAIN_DEPENDENCY完全是可选的,
第二种形式是为某个目标如库或可执行程序添加一个客制命令。这对于要在构建一个目标之前或之后执行一些操作
非常有用。该命令本身会成为目标的一部分,仅在目标本身被构建时才会执行。如果该目标已经构建,命令将不会执行。
add_custom_command(TARGET target
PRE_BUILD | PRE_LINK| POST_BUILD
COMMAND command1[ARGS] [args1...]
[COMMAND command2[ARGS] [args2...] ...]
[WORKING_DIRECTORYdir]
[COMMENT comment][VERBATIM])
命令执行的时机由如下参数决定:
PRE_BUILD - 命令将会在其他依赖项执行前执行
PRE_LINK - 命令将会在其他依赖项执行完后执行
POST_BUILD - 命令将会在目标构建完后执行。
---------------------------------------
add_custom_command(TARGET dbus-1 POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy "$<TARGET_FILE:dbus-1>" "$<TARGET_FILE_DIR:dbus-1>/${CMAKE_SHARED_LIBRARY_PREFIX}dbus-1
${CMAKE_SHARED_LIBRARY_SUFFIX}"
COMMENT "Create non versioned dbus-1 library for legacy applications"
)
}
add_custom_target(){
add_custom_target: 增加一个没有输出的目标,使得它总是被构建。 add_custom_target(Name [ALL] [command1 [args1...]]
[COMMAND command2 [args2...] ...]
[DEPENDS depend depend depend ... ]
[WORKING_DIRECTORY dir]
[COMMENT comment] [VERBATIM]
[SOURCES src1 [src2...]])
# 在编译时拷贝文件之add_custom_comand 和 add_custom_target
增加一个指定名字的目标,并执行指定的命令。该目标没有输出文件,总是被认为是过期的,即使是在试图用目标的
名字创建一个文件。使用ADD_CUSTOM_COMMAND命令来创建一个具有依赖项的文件。默认情况下,没有任何目标会依赖该
客制目标。使用ADD_DEPENDENCIES 来添加依赖项或成为别的目标的依赖项。如果指定了ALL选项,那就表明该目标会被
添加到默认的构建目标,使得它每次都被运行。(该命令的名称不能命名为 ALL). 命令和参数都是可选的,如果没有指定,
将会创建一个空目标。如果设置了WORKING_DIRECTORY ,那么该命令将会在指定的目录中运行。如果它是个相对路径,
那它会被解析为相对于当前源码目录对应的构建目录。如果设置了 COMMENT,在构建的时候,该值会被当成信息在执行
该命令之前显示。DEPENDS参数可以是文件和同一目录中的其他客制命令的输出。
如果指定了VERBATIM, 所有传递给命令的参数将会被适当地转义。建议使用该选项。
SOURCES选项指定了包含进该客制目标的额外的源文件。即使这些源文件没有构建规则,但是它们会被增加到IDE的
工程文件中以方便编辑。
add_custom_target(check COMMAND ctest -R ^test-.*) # dbus
add_custom_target(help-options
cmake -LH
WORKING_DIRECTORY ${CMAKE_BINARY_DIR}
) # dbus
add_custom_target(doc
COMMAND ${DOXYGEN_EXECUTABLE} ${CMAKE_BINARY_DIR}/Doxyfile
WORKING_DIRECTORY ${CMAKE_BINARY_DIR}
) # dbus
}
SET_TARGET_PROPERTIES(设置输出的名称){
SET_TARGET_PROPERTIES,其基本语法是:
SET_TARGET_PROPERTIES(target1 target2 ...
PROPERTIES prop1 value1
prop2 value2 ...)
这条指令可以用来设置输出的名称,对于动态库,还可以用来指定动态库版本和API版本。
在本例中,我们需要作的是向lib/CMakeLists.txt中添加一条:
SET_TARGET_PROPERTIES(hello_static PROPERTIES OUTPUT_NAME "hello")
这样,我们就可以同时得到libhello.so/libhello.a两个库了。
-------------------------------------------------------------------------------
1. cmake在构建一个新的target时,会尝试清理掉其他使用这个名字的库,因为,在构建libhello.a时,
就会清理掉libhello.so.
SET_TARGET_PROPERTIES(hello PROPERTIES CLEAN_DIRECT_OUTPUT 1)
SET_TARGET_PROPERTIES(hello_static PROPERTIES CLEAN_DIRECT_OUTPUT 1)
CLEAN_DIRECT_OUTPUT:这时候,我们再次进行构建,会发现build/lib目录中同时生成了libhello.so和libhello.a
-------------------------------------------------------------------------------
动态库版本号
libhello.so.1.2
libhello.so ->libhello.so.1
libhello.so.1->libhello.so.1.2
SET_TARGET_PROPERTIES(hello PROPERTIES VERSION 1.2 SOVERSION 1)
VERSION指代动态库版本,SOVERSION指代API版本。
}
GET_TARGET_PROPERTY(获得输出的名称){
GET_TARGET_PROPERTY(VAR target property)
具体用法如下例,我们向lib/CMakeListst.txt中添加:
GET_TARGET_PROPERTY(OUTPUT_VALUE hello_static OUTPUT_NAME)
MESSAGE(STATUS "This is the hello_static OUTPUT_NAME:" ${OUTPUT_VALUE})
}
INCLUDE_DIRECTORIES([AFTER|BEFORE] [SYSTEM] dir1 dir2 ...){
这条指令可以用来向工程添加多个特定的头文件搜索路径,路径之间用空格分割,如果路径
中包含了空格,可以使用双引号将它括起来,默认的行为是追加到当前的头文件搜索路径的
后面,你可以通过两种方式来进行控制搜索路径添加的方式:
1,CMAKE_INCLUDE_DIRECTORIES_BEFORE,通过SET这个cmake变量为on,可以
将添加的头文件搜索路径放在已有路径的前面。
2,通过AFTER或者BEFORE参数,也可以控制是追加还是置前。
}
LINK_DIRECTORIES(directory1 directory2 ...){
这个指令非常简单,添加非标准的共享库搜索路径,比如,在工程内部同时存在共享库和可
执行二进制,在编译时就需要指定一下这些共享库的路径。这个例子中我们没有用到这个指
令。
如何通过INCLUDE_DIRECTORIES指令加入非标准的头文件搜索路径。
如何通过LINK_DIRECTORIES指令加入非标准的库文件搜索路径。
如果通过TARGET_LINK_LIBRARIES为库或可执行二进制加入库链接。
并解释了如果链接到静态库。
}
TARGET_LINK_LIBRARIES(指定可执行文件链接的动态库){
TARGET_LINK_LIBRARIES(target library1
<debug | optimized> library2
...)
这个指令可以用来为target添加需要链接的共享库,本例中是一个可执行文件,但是同样
可以用于为自己编写的共享库添加共享库链接。
TARGET_LINK_LIBRARIES(main libhello.so) TARGET_LINK_LIBRARIES(main hello)
TARGET_LINK_LIBRARIES(main libhello.a)
}
CMAKE_INCLUDE_PATH(环境变量,非cmake变量){
务必注意,这两个是环境变量而不是cmake变量。
使用方法是要在bash中用export或者在csh中使用set命令设置或者
CMAKE_INCLUDE_PATH=/home/include cmake ..等方式。
export CMAKE_INCLUDE_PATH=/usr/include/hello
}
CMAKE_LIBRARY_PATH(环境变量,非cmake变量){
务必注意,这两个是环境变量而不是cmake变量。
使用方法是要在bash中用export或者在csh中使用set命令设置或者
CMAKE_LIBRARY_PATH=/home/include cmake ..等方式。
}
FIND_LIBRARY(在指定路径中搜索库名称){}
FIND_PATH(在指定路径中搜索文件名){
FIND_PATH用来在指定路径中搜索文件名。
FIND_PATH(myHeader NAMES hello.h PATHS /usr/include /usr/include/hello)
---------------------------------------
export CMAKE_INCLUDE_PATH=/usr/include/hello
然后在头文件中将INCLUDE_DIRECTORIES(/usr/include/hello)替换为:
FIND_PATH(myHeader hello.h)
IF(myHeader)
INCLUDE_DIRECTORIES(${myHeader})
ENDIF(myHeader)
这里我们没有指定路径,但是,cmake仍然可以帮我们找到hello.h存放的路径,就是因
为我们设置了环境变量CMAKE_INCLUDE_PATH
如果你不使用FIND_PATH,CMAKE_INCLUDE_PATH变量的设置是没有作用的,你不能指
望它会直接为编译器命令添加参数-I<CMAKE_INCLUDE_PATH>。
以此为例,CMAKE_LIBRARY_PATH可以用在FIND_LIBRARY中。
同样,因为这些变量直接为FIND_指令所使用,所以所有使用FIND_指令的cmake模块都会受益。
}
ADD_SUBDIRECTORY(source_dir [binary_dir] [EXCLUDE_FROM_ALL]){
source_dir 这个指令用于向当前工程添加存放源文件的子目录,
binary_dir 并可以指定中间二进制和目标二进制存放的位置。
EXCLUDE_FROM_ALL 参数的含义是将这个目录从编译过程中排除,
ADD_SUBDIRECTORY(src)
1. 定义了将src子目录加入工程,并指定编译目标二进制输出路径为bin目录。编译中间结果存放在build/src目录。
add_subdirectory(src bin)
2. 编译中间结果和编译目标二进制存放到bin目录下。
}
SUBDIRS(dir1 dir2...){
SUBDIRS(dir1 dir2...),但是这个指令已经不推荐使用。它可以一次添加多个子目录,
并且,即使外部编译,子目录体系仍然会被保存。
如果我们在上面的例子中将ADD_SUBDIRECTORY (src bin)修改为SUBDIRS(src)。
}
CMAKE(){
1. cmake自定义变量的方式
SET(HELLO_SRC main.c),
2. cmake常用变量:
2.1 CMAKE_BINARY_DIR CMAKE_SOURCE_DIR
PROJECT_BINARY_DIR PROJECT_SOURCE_DIR
<projectname>_BINARY_DIR <projectname>_SOURCE_DIR
2.2 CMAKE_CURRENT_SOURCE_DIR # 当前处理的CMakeLists.txt所在的路径
指的是当前处理的CMakeLists.txt所在的路径,比如上面我们提到的src子目录。
2.3 CMAKE_CURRRENT_BINARY_DIR # target编译目录 使用ADD_SURDIRECTORY(src bin)可以更改此变量的值
如果是in-source编译,它跟CMAKE_CURRENT_SOURCE_DIR一致,如果是out-of-source编译,他指的是target编译目录。
使用我们上面提到的ADD_SUBDIRECTORY(src bin)可以更改这个变量的值。
使用SET(EXECUTABLE_OUTPUT_PATH <新路径>)并不会对这个变量造成影响,它仅仅修改了最终目标文件存放的路径。
2.4 CMAKE_CURRENT_LIST_FILE # 输出调用这个变量的CMakeLists.txt的完整路径
2.5 CMAKE_CURRENT_LIST_LINE # 输出这个变量所在的行
2.6 CMAKE_MODULE_PATH # 定义自己的cmake模块所在的路径
这个变量用来定义自己的cmake模块所在的路径。如果你的工程比较复杂,有可能会自己
编写一些cmake模块,这些cmake模块是随你的工程发布的,为了让cmake在处理
CMakeLists.txt时找到这些模块,你需要通过SET指令,将自己的cmake模块路径设置一下。
比如
SET(CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}/cmake)这时候你就可以通过INCLUDE指令来调用自己的模块了。
2.7 EXECUTABLE_OUTPUT_PATH和LIBRARY_OUTPUT_PATH
分别用来重新定义最终结果的存放目录,前面我们已经提到了这两个变量。
2.9 PROJECT_NAME # 返回通过PROJECT指令定义的项目名称。
3.0 CMAKE_ALLOW_LOOSE_LOOP_CONSTRUCTS # 用来控制IF ELSE语句的书写方式
-------------------------------------------------------------------------------
cmake调用环境变量的方式
使用$ENV{NAME}指令就可以调用系统的环境变量了。比如
MESSAGE(STATUS "HOME dir: $ENV{HOME}")
设置环境变量的方式是:
SET(ENV{变量名} 值)
1. CMAKE_INCLUDE_CURRENT_DIR
自动添加CMAKE_CURRENT_BINARY_DIR和CMAKE_CURRENT_SOURCE_DIR到当前处理的CMakeLists.txt。
相当于在每个CMakeLists.txt加入:
INCLUDE_DIRECTORIES(${CMAKE_CURRENT_BINARY_DIR} ${CMAKE_CURRENT_SOURCE_DIR})
2. CMAKE_INCLUDE_DIRECTORIES_PROJECT_BEFORE
将工程提供的头文件目录始终至于系统头文件目录的前面,当你定义的头文件确实跟系统发生冲突时可以提供一些帮助。
3. CMAKE_INCLUDE_PATH和CMAKE_LIBRARY_PATH
-------------------------------------------------------------------------------
系统信息
1. CMAKE_MAJOR_VERSION,CMAKE # 主版本号,比如2.4.6中的2
2. CMAKE_MINOR_VERSION,CMAKE # 次版本号,比如2.4.6中的4
3. CMAKE_PATCH_VERSION,CMAKE # 补丁等级,比如2.4.6 中的6
4. CMAKE_SYSTEM, # 系统名称,比如Linux-2.6.22
5. CMAKE_SYSTEM_NAME # 不包含版本的系统名,比如Linux
6. CMAKE_SYSTEM_VERSION, # 系统版本,比如2.6.22
7. CMAKE_SYSTEM_PROCESSOR, # 处理器名称,比如i686.
8. UNIX, # 在所有的类UNIX平台为TRUE,包括OS X和cygwin
9. WIN32, # 在所有的win32平台为TRUE,包括cygwin
-------------------------------------------------------------------------------
主要的开关选项:
1,CMAKE_ALLOW_LOOSE_LOOP_CONSTRUCTS,用来控制IF ELSE语句的书写方式,在
下一节语法部分会讲到。
2,BUILD_SHARED_LIBS # 这个开关用来控制默认的库编译方式,如果不进行设置,使用ADD_LIBRARY并没有指定库
# 类型的情况下,默认编译生成的库都是静态库。
# 如果SET(BUILD_SHARED_LIBS ON)后,默认生成的为动态库。
3,CMAKE_C_FLAGS # 设置C编译选项,也可以通过指令ADD_DEFINITIONS()添加。
4,CMAKE_CXX_FLAGS # 设置C++编译选项,也可以通过指令ADD_DEFINITIONS()添加。
}
ADD_DEFINITIONS(添加编译选项){
1. ADD_DEFINITIONS
向C/C++编译器添加-D定义,比如:
ADD_DEFINITIONS(-DENABLE_DEBUG -DABC),参数之间用空格分割。
如果你的代码中定义了#ifdef ENABLE_DEBUG #endif,这个代码块就会生效。
如果要添加其他的编译器开关,可以通过CMAKE_C_FLAGS变量和CMAKE_CXX_FLAGS变量设置。
}
ADD_DEPENDENCIES(说明依赖关系){
ADD_DEPENDENCIES(preload-seccomp syscall-names-h)
ADD_DEPENDENCIES(ujail capabilities-names-h)
ADD_DEPENDENCIES(utrace syscall-names-h)
ADD_DEPENDENCIES(preload-seccomp syscall-names-h)
}
cmake(cmake常用指令){
2. ADD_DEPENDENCIES
定义target依赖的其他target,确保在编译本target之前,其他的target已经被构建。
ADD_DEPENDENCIES(target-name depend-target1 depend-target2 ...)
3. ADD_EXECUTABLE、ADD_LIBRARY、ADD_SUBDIRECTORY前面已经介绍过了.
4. ADD_TEST与ENABLE_TESTING指令。
ENABLE_TESTING指令用来控制Makefile是否构建test目标,涉及工程所有目录。语法很简单,没有任何参数,
ENABLE_TESTING(),一般情况这个指令放在工程的主CMakeLists.txt中.
ADD_TEST指令的语法是:
ADD_TEST(testname Exename arg1 arg2 ...)
testname是自定义的test名称,Exename可以是构建的目标文件也可以是外部脚本等
等。后面连接传递给可执行文件的参数。如果没有在同一个CMakeLists.txt中打开
ENABLE_TESTING()指令,任何ADD_TEST都是无效的。
比如我们前面的Helloworld例子,可以在工程主CMakeLists.txt中添加
ADD_TEST(mytest ${PROJECT_BINARY_DIR}/bin/main)
ENABLE_TESTING()
生成Makefile后,就可以运行make test来执行测试了。
8,FILE指令
文件操作指令,基本语法为:
FILE(WRITE filename "message to write"... )
FILE(APPEND filename "message to write"... )
FILE(READ filename variable)
FILE(GLOB variable [RELATIVE path] [globbing expressions]...)
FILE(GLOB_RECURSE variable [RELATIVE path] [globbing expressions]...)
FILE(REMOVE [directory]...)
FILE(REMOVE_RECURSE [directory]...)
FILE(MAKE_DIRECTORY [directory]...)
FILE(RELATIVE_PATH variable directory file)
FILE(TO_CMAKE_PATH path result)
FILE(TO_NATIVE_PATH path result)
这里的语法都比较简单,不在展开介绍了。
9,INCLUDE指令,用来载入CMakeLists.txt文件,也用于载入预定义的cmake模块.
INCLUDE(file1 [OPTIONAL])
INCLUDE(module [OPTIONAL])
OPTIONAL参数的作用是文件不存在也不会产生错误。
你可以指定载入一个文件,如果定义的是一个模块,那么将在CMAKE_MODULE_PATH中搜
索这个模块并载入。
载入的内容将在处理到INCLUDE语句是直接执行。
二,INSTALL指令
INSTALL系列指令已经在前面的章节有非常详细的说明,这里不在赘述,可参考前面的安
装部分。
三,FIND_指令
FIND_系列指令主要包含一下指令:
FIND_FILE(<VAR> name1 path1 path2 ...) # VAR变量代表找到的文件全路径,包含文件名
FIND_LIBRARY(<VAR> name1 path1 path2 ...) # VAR变量表示找到的库全路径,包含库文件名
FIND_PATH(<VAR> name1 path1 path2 ...) # VAR变量代表包含这个文件的路径。
FIND_PROGRAM(<VAR> name1 path1 path2 ...) # VAR变量代表包含这个程序的全路径。
FIND_PACKAGE(<name> [major.minor] [QUIET] [NO_MODULE]
[[REQUIRED|COMPONENTS] [componets...]])
用来调用预定义在CMAKE_MODULE_PATH下的Find<name>.cmake模块,你也可以自己
定义Find<name>模块,通过SET(CMAKE_MODULE_PATH dir)将其放入工程的某个目录
中供工程使用,我们在后面的章节会详细介绍FIND_PACKAGE的使用方法和Find模块的
编写。
FIND_LIBRARY示例:
FIND_LIBRARY(libX X11 /usr/lib)
IF(NOT libX)
MESSAGE(FATAL_ERROR “libX not found”)
ENDIF(NOT libX)
四,控制指令:
1,IF指令,基本语法为:
IF(expression)
# THEN section.
COMMAND1(ARGS ...)
另外一个指令是ELSEIF,总体把握一个原则,凡是出现IF的地方一定要有对应的
ENDIF.出现ELSEIF的地方,ENDIF是可选的。
表达式的使用方法如下:
IF(var),如果变量不是:空,0,N, NO, OFF, FALSE, NOTFOUND或
<var>_NOTFOUND时,表达式为真。
IF(NOT var ),与上述条件相反。
IF(var1 AND var2),当两个变量都为真是为真。
IF(var1 OR var2),当两个变量其中一个为真时为真。
IF(COMMAND cmd),当给定的cmd确实是命令并可以调用是为真。
IF(EXISTS dir)或者IF(EXISTS file),当目录名或者文件名存在时为真。
IF(file1 IS_NEWER_THAN file2),当file1比file2新,或者file1/file2其
中有一个不存在时为真,文件名请使用完整路径。
IF(IS_DIRECTORY dirname),当dirname是目录时,为真。
IF(variable MATCHES regex)
IF(string MATCHES regex)
当给定的变量或者字符串能够匹配正则表达式regex时为真。比如:
IF("hello" MATCHES "ell")
MESSAGE("true")
ENDIF("hello" MATCHES "ell")
IF(variable LESS number)
IF(string LESS number)
IF(variable GREATER number)
IF(string GREATER number)
IF(variable EQUAL number)
IF(string EQUAL number)
数字比较表达式
IF(variable STRLESS string)
IF(string STRLESS string)
IF(variable STRGREATER string)
IF(string STRGREATER string)
IF(variable STREQUAL string)
IF(string STREQUAL string)
按照字母序的排列进行比较.
IF(DEFINED variable),如果变量被定义,为真。
2,WHILE
WHILE指令的语法是:
WHILE(condition)
COMMAND1(ARGS ...)
COMMAND2(ARGS ...)
...
ENDWHILE(condition)
其真假判断条件可以参考IF指令。
3,FOREACH
FOREACH指令的使用方法有三种形式:
1,列表
FOREACH(loop_var arg1 arg2 ...)
COMMAND1(ARGS ...)
COMMAND2(ARGS ...)
...
ENDFOREACH(loop_var)
2,范围
FOREACH(loop_var RANGE total)
ENDFOREACH(loop_var)
3. 范围和步进
FOREACH(loop_var RANGE start stop [step])
ENDFOREACH(loop_var)
}
FIND(){
对于系统预定义的Find<name>.cmake模块,使用方法一般如上例所示:
每一个模块都会定义以下几个变量
• <name>_FOUND
• <name>_INCLUDE_DIR or <name>_INCLUDES
• <name>_LIBRARY or <name>_LIBRARIES
你可以通过<name>_FOUND来判断模块是否被找到,如果没有找到,按照工程的需要关闭
某些特性、给出提醒或者中止编译,上面的例子就是报出致命错误并终止构建。
如果<name>_FOUND为真,则将<name>_INCLUDE_DIR加入INCLUDE_DIRECTORIES,
将<name>_LIBRARY加入TARGET_LINK_LIBRARIES中。
FIND_PACKAGE(<name> [major.minor] [QUIET] [NO_MODULE]
[[REQUIRED|COMPONENTS] [componets...]])
前面的CURL例子中我们使用了最简单的FIND_PACKAGE指令,其实他可以使用多种参数,
QUIET参数,对应与我们编写的FindHELLO 中的HELLO_FIND_QUIETLY,如果不指定
这个参数,就会执行:
MESSAGE(STATUS "Found Hello: ${HELLO_LIBRARY}")
REQUIRED参数,其含义是指这个共享库是否是工程必须的,如果使用了这个参数,说明这
个链接库是必备库,如果找不到这个链接库,则工程不能编译。对应于
FindHELLO.cmake 模块中的HELLO_FIND_REQUIRED变量。
}
option(){
答案当然是有的,强大的CMake为我们准备了--option这个命令,给我们作为默认初始值并且作为定义值的候选。
option(address "This is a option for address" ON)
此时表示,如果用户没有定义过address,那我address的默认值就是ON,如果用户在命令行显示改变过address的值比如为OFF,那么在脚本中address的值就是OFF。
}
synopsis(){
1,变量使用${}方式取值,但是在IF控制语句中是直接使用变量名
2,指令(参数1 参数2...)
参数使用括弧括起,参数之间使用空格或分号分开。
以上面的ADD_EXECUTABLE指令为例,如果存在另外一个func.c源文件,就要写成:
ADD_EXECUTABLE(hello main.c func.c)或者
ADD_EXECUTABLE(hello main.c;func.c)
3,指令是大小写无关的,参数和变量是大小写相关的。但,推荐你全部使用大写指令。
上面的MESSAGE指令我们已经用到了这条规则:
MESSAGE(STATUS "This is BINARY dir" ${HELLO_BINARY_DIR})
也可以写成:
MESSAGE(STATUS "This is BINARY dir ${HELLO_BINARY_DIR}")
}
cross(arm-linux-gcc交叉编译器){
1. 通过在CMakeLists.txt中指定交叉编译器的方法
在CMakeLists.txt一开始加入相关设置:
#告知当前使用的是交叉编译方式,必须配置
SET(CMAKE_SYSTEM_NAME Linux)
#指定C交叉编译器,必须配置
#或交叉编译器使用绝对地址
SET(CMAKE_C_COMPILER "arm-linux-gcc")
#指定C++交叉编译器
SET(CMAKE_CXX_COMPILER "arm-linux-g++")
#不一定需要设置
#指定交叉编译环境安装目录...
# 代表了一系列的相关文件夹路径的根路径的变更,比如你设置了/opt/arm/,所有的Find_xxx.cmake都会优先根据
# 这个路径下的/usr/lib,/lib等进行查找,然后才会去你自己的/usr/lib和/lib进行查找,如果你有一些库是不被
# 包含在/opt/arm里面的,你也可以显示指定多个值给CMAKE_FIND_ROOT_PATH,比如
# set(CMAKE_FIND_ROOT_PATH /opt/arm /opt/inst)
SET(CMAKE_FIND_ROOT_PATH "...")
# 从来不在指定目录下查找工具程序
# NEVER表示不在你CMAKE_FIND_ROOT_PATH下进行查找,
SET(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER)
# 只在指定目录下查找库文件
# ONLY表示只在CMAKE_FIND_ROOT_PATH路径下查找,
SET(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY)
# 只在指定目录下查找头文件
# BOTH表示先查找这个路径,再查找全局路径,
SET(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE BOTH)
CMAKE_FIND_ROOT_PATH_MODE_LIBRARY: 对FIND_LIBRARY()起作用,表示在链接的时候的库的相关选项,
因此这里需要设置成ONLY来保证我们的库是在交叉环境中找的.
CMAKE_FIND_ROOT_PATH_MODE_INCLUDE: 对FIND_PATH()和FIND_FILE()起作用,一般来说也是ONLY,如果你想改变,
一般也是在相关的FIND命令中增加option来改变局部设置,有NO_CMAKE_FIND_ROOT_PATH,ONLY_CMAKE_FIND_ROOT_PATH,
BOTH_CMAKE_FIND_ROOT_PATH.
BOOST_ROOT: 对于需要boost库的用户来说,相关的boost库路径配置也需要设置,因此这里的路径即ARM下的boost路径,
里面有include和lib。
}
CMAKE_TOOLCHAIN_FILE(){
--------------------------------------- Toolchain-eldk-ppc74xx.cmake
# this one is important
SET(CMAKE_SYSTEM_NAME Linux)
#this one not so much
SET(CMAKE_SYSTEM_VERSION 1)
# specify the cross compiler
SET(CMAKE_C_COMPILER /opt/eldk-2007-01-19/usr/bin/ppc_74xx-gcc)
SET(CMAKE_CXX_COMPILER /opt/eldk-2007-01-19/usr/bin/ppc_74xx-g++)
# where is the target environment
SET(CMAKE_FIND_ROOT_PATH /opt/eldk-2007-01-19/ppc_74xx /home/alex/eldk-ppc74xx-inst)
# search for programs in the build host directories
SET(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER)
# for libraries and headers in the target directories
SET(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY)
SET(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY)
--------------------------------------- Toolchain-eldk-ppc74xx.cmake
cd build
build$ cmake -DCMAKE_TOOLCHAIN_FILE=~/Toolchain-eldk-ppc74xx.cmake ..
CMake给交叉编译预留了一个很好的变量即CMAKE_TOOLCHAIN_FILE,它定义了一个文件的路径,这个文件即
toolChain,里面set了一系列你需要改变的变量和属性,包括C_COMPILER,CXX_COMPILER,如果用Qt的话需要更改
QT_QMAKE_EXECUTABLE以及如果用BOOST的话需要更改的BOOST_ROOT(具体查看相关Findxxx.cmake里面指定的路径)。
CMake为了不让用户每次交叉编译都要重新输入这些命令,因此它带来toolChain机制,简而言之就是一个cmake脚本,
内嵌了你需要改变以及需要set的所有交叉环境的设置。
}
cmake(获取命令帮助){
1. 获取命令帮助
cmake --help-commands
这个命令将给出所有cmake内置的命令的详细帮助,一般不知道自己要找什么或者想随机翻翻得时候,可以用这个。
另外也可以用如下的办法层层缩小搜索范围:
cmake --help-commands-list
cmake --help-commands-list|grep find
}
cmake(查询变量){
cmake --help-variable-list | grep CMAKE | grep HOST
这里查找所有CMake自己定义的builtin变量;一般和系统平台相关。
如果希望将所有生成的可执行文件、库放在同一的目录下,可以如此做:
# Targets directory
set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${target_dir}/lib)
set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${target_dir}/lib)
set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${target_dir}/bin)
这里的target_dir是一个实现设置好的绝对路径。(CMake里边绝对路径比相对路径更少出问题,如果可能尽量用绝对路径)
}
cmake(属性查询){
属性查询
cmake --help-property-list | grep NAME
Property一般很少需要直接改动,除非你想修改一些默认的行为,譬如修改生成的动态库文件的soname等。
cmake --help-property OUTPUT_NAME
}
cmake(CMake模组){
用于查找常用的模块,譬如boost,bzip2, python等。通过简单的include命令包含预定义的模块,
就可以得到一些模块执行后定义好的变量,非常方便。
# Find boost 1.40
INCLUDE(FindBoost)
find_package(Boost 1.40.0 COMPONENTS thread unit_test_framework)
if(NOT Boost_FOUND)
message(STATUS "BOOST not found, test will not succeed!")
endif()
一般开头部分的解释都相当有用,可满足80%需求:
cmake --help-module FindBoost | head -40
}
cmake(如何根据其生成的中间文件查看一些关键信息){
CMake相比较于autotools的一个优势就在于其生成的中间文件组织的很有序,并且清晰易懂,
不像autotools会生成天书一样的庞然大物(10000+的不鲜见)。
一般CMake对应的Makefile都是有层级结构的,并且会根据你的CMakeLists.txt间的相对结构在binary directory
里边生成相应的目录结构。
譬如对于某一个target,一般binary tree下可以找到一个文件夹: CMakeFiles/.dir/,比如:
skyscribe@skyscribe:~/program/ltesim/bld/dev/simcluster/CMakeFiles/SIMCLUSTER.dir$ ls -l
total 84
-rw-r--r-- 1 skyscribe skyscribe 52533 2009-12-12 12:20 build.make
-rw-r--r-- 1 skyscribe skyscribe 1190 2009-12-12 12:20 cmake_clean.cmake
-rw-r--r-- 1 skyscribe skyscribe 4519 2009-12-12 12:20 DependInfo.cmake
-rw-r--r-- 1 skyscribe skyscribe 94 2009-12-12 12:20 depend.make
-rw-r--r-- 1 skyscribe skyscribe 573 2009-12-12 12:20 flags.make
-rw-r--r-- 1 skyscribe skyscribe 1310 2009-12-12 12:20 link.txt
-rw-r--r-- 1 skyscribe skyscribe 406 2009-12-12 12:20 progress.make
drwxr-xr-x 2 skyscribe skyscribe 4096 2009-12-12 12:20 src
这里,每一个文件都是个很短小的文本文件,内容相当清晰明了。
build.make一般包含中间生成文件的依赖规则,DependInfo.cmake一般包含源代码文件自身的依赖规则。
比较重要的是flags.make和link.txt,前者一般包含了类似于GCC的-I的相关信息,如搜索路径,宏定义等;
后者则包含了最终生成target时候的linkage信息,库搜索路径等。 这些信息在出现问题的时候是个很好的辅助调试手段。
}
cmake(文件查找、路径相关){
include_directories()用于添加头文件的包含搜索路径
cmake --help-command include_directories
link_directories()用于添加查找库文件的搜索路径
cmake --help-command link_directories
}
cmake(库查找){
一般外部库的link方式可以通过两种方法来做,一种是显示添加路径,采用link_directories(),
一种是通过find_library()去查找对应的库的绝对路径。后一种方法是更好的,因为它可以减少不少潜在的冲突。
一般find_library会根据一些默认规则来搜索文件,如果找到,将会set传入的第一个变量参数、否则,
对应的参数不被定义,并且有一个xxx-NOTFOUND被定义;可以通过这种方式来调试库搜索是否成功。
对于库文件的名字而言,动态库搜索的时候会自动搜索libxxx.so (xxx.dll),静态库则是libxxx.a(xxx.lib),
对于动态库和静态库混用的情况,可能会出现一些混乱,需要格外小心;一般尽量做匹配连接。
}
cmake(rpath){
所谓的rpath是和动态库的加载运行相关的。我一般采用如下的方式取代默认添加的rpath:
# RPATH and library search setting
SET(CMAKE_SKIP_BUILD_RPATH FALSE)
SET(CMAKE_BUILD_WITH_INSTALL_RPATH FALSE)
SET(CMAKE_INSTALL_RPATH "${CMAKE_INSTALL_PREFIX}/nesim/lib")
SET(CMAKE_INSTALL_RPATH_USE_LINK_PATH TRUE)
}
cmake(CMake内调用环境变量){
通过$ENV{VAR}访问环境变量;
通过[HKEY_CURRENT_USER\\Software\\path1\\path2;key]访问Windows注册表;
}
cmake(指定编译器){
CMake通过制定模组检查编译器 Modules/CMakeDeterminCCompiler.cmake Modules/CMakeDeterminCXXCompiler.cmake
可以设置环境变量指定编译器,如:$ENV{CC}/$ENV{CXX}
或者使用CMake命令行变量指定,语法:-DCACHE_VAR:TYPE=VALUE
例子: cmake -DCMAKE_CXX_COMPILER=xlc -DBUILD_TESTING:BOOL=ON ../foo
}
cmake(指定编译标志){
设置编译环境变量
LDFLAGS= XXX #设置link标志
CXXFLAGS= XXX #初始化CMAKE_CXX_FLAGS
CFLAGS= XXX #初始化CMAKE_C_FLAGS
}
cmake(CMake的依赖分析){
依赖分析,使用四个文件depend.make,flags.make,build.make,DependInfo.cmake.depend.make
}
cmake(生成目标){
#生成可执行文件
add_executable()
#生成库文件,不指定类型,默认生成静态库
add_library(foo [STATIC|SHARED|MODULE]foo1.c foo2.c)
}
cmake(怎样获得一个目录下的所有源文件){
aux_source_directory(<dir> <variable>)
将dir中所有源文件(不包括头文件)保存到变量variable中,然后可以add_executable
(ss7gw ${variable})这样使用。
}
cmake(怎样指定项目编译目标){
project命令指定}
cmake(怎样添加动态库和静态库){
target_link_libraries命令添加即可
}
cmake(怎样指定头文件与库文件路径){
include_directories与link_directories
可以多次调用以设置多个路径
link_directories仅对其后面的targets起作用
}
cmake(怎样区分debug、release版本){
建立debug/release两目录,分别在其中执行cmake -DCMAKE_BUILD_TYPE=Debug(或Release),需要编译不同版本时进入不同目录执行make即可;
Debug版会使用参数-g;Release版使用-O3 –DNDEBUG
另一种设置方法——例如DEBUG版设置编译参数DDEBUG
IF(DEBUG_mode)
add_definitions(-DDEBUG)
ENDIF()
在执行cmake时增加参数即可,例如cmake -D DEBUG_mode=ON
}
cmake(怎样设置条件编译){
例如debug版设置编译选项DEBUG,并且更改不应改变CMakelist.txt
使用option command,eg:
option(DEBUG_mode "ON for debug or OFF for release" ON)
IF(DEBUG_mode)
add_definitions(-DDEBUG)
ENDIF()
使其生效的方法:首先cmake生成makefile,然后make edit_cache编辑编译选项;Linux下会打开一个文本框,可以更改,该完后再make生成目标文件——emacs不支持make edit_cache;
局限:这种方法不能直接设置生成的makefile,而是必须使用命令在make前设置参数;对于debug、release版本,相当于需要两个目录,分别先cmake一次,然后分别make edit_cache一次;
期望的效果:在执行cmake时直接通过参数指定一个开关项,生成相应的makefile——可以这样做,例如cmake –DDEBUGVERSION=ON
}
cmake(怎样添加编译宏定义){
使用add_definitions命令,见命令部分说明
}
cmake(怎样添加编译依赖项){
用于确保编译目标项目前依赖项必须先构建好
add_dependencies
}
cmake(怎样指定目标文件目录){
建立一个新的目录,在该目录中执行cmake生成Makefile文件,这样编译结果会保存在该目录——类似
SET_TARGET_PROPERTIES(ss7gw PROPERTIES RUNTIME_OUTPUT_DIRECTORY "${BIN_DIR}")
}
cmake(怎样打印make的输出){
make VERBOSE=1
}
| true
|
5e82ff45ea34f010ee3e62a6f14ff77fe67f2746
|
Shell
|
Tubbz-alt/BrainMap-1
|
/scripts/doquant.sh
|
UTF-8
| 1,075
| 2.796875
| 3
|
[] |
no_license
|
#!/bin/bash
#
################
# Creates .cxb file from individual bam + ref_gtf
################
#
# Usage
# doquant.sh samplename bam_file.bam
#Setup
QUEUE=serial_requeue #unrestricted #or general OR serial_requeue
NUM_THREADS=8
MAX_MEM=2000
RUN_TIME=1000
PROJECT_ROOT="/n/rinn_data1/seq/lgoff/Projects/BrainMap"
ALIGN_ROOT=$PROJECT_ROOT/data/bam
QUANT_ROOT=$PROJECT_ROOT/data/quants
BOWTIE_INDEX=$PROJECT_ROOT/data/indexes/mm10/mm10_brainmap
REF_GTF=$PROJECT_ROOT/data/annotation/mm10_gencode_vM2_with_lncRNAs_and_LacZ.gtf
LOGBASE=$PROJECT_ROOT/logs
#ARGUMENTS
SAMPLE_NAME=$1
BAMFILE=$2
#Main
LOGDIR=$LOGBASE/quants/$SAMPLE_NAME
mkdir -p $LOGDIR
OUTDIR=$ALIGN_ROOT/$SAMPLE_NAME
mkdir -p $OUTDIR
echo "$SAMPLE_NAME"
sbatch -J ${SAMPLE_NAME}_quant -t $RUN_TIME --mem-per-cpu=$MAX_MEM -n $NUM_THREADS -p $QUEUE --mail-type=FAIL --wrap="cuffquant --no-update-check -p $NUM_THREADS -o $QUANT_ROOT/$SAMPLE_NAME $REF_GTF $BAMFILE >$LOGDIR/$SAMPLE_NAME.quant.out 2>$LOGDIR/$SAMPLE_NAME.quant.err" >$LOGDIR/${SAMPLE_NAME}_slurm.out 2>$LOGDIR/${SAMPLE_NAME}_slurm.err
| true
|
ab6a1f9ddd3552dee895872045b24eda7f5bd70c
|
Shell
|
tajtiattila/joyster
|
/makerelease.sh
|
UTF-8
| 887
| 3.40625
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
if [ -n "$(git status --porcelain --untracked-files=no)" ]; then
echo "WARNING: Changed files present"
fi
if [ -n "$(git ls-files --others --exclude-standard)" ]; then
echo "WARNING: Untracked files present"
fi
git status --short
if [ -z "$(git describe --exact-match HEAD 2>/dev/null)" ]; then
echo "WARNING: No annotated tag for current version"
fi
PRG=joyster
DIR=release/$PRG
# HEAD must have an annotated tag
VER=$(git describe HEAD)
rm -rf release
mkdir -p release release/joyster/32bit $DIR/64bit
cp README.md $DIR
cp -a misc $DIR/misc
VJOYSDK=D:/dev/vJoy204SDK-080714
cp $VJOYSDK/lib/vJoyInterface.dll $DIR/32bit/
cp $VJOYSDK/lib/amd64/vJoyInterface.dll $DIR/64bit/
build () {
go build -ldflags "-X main.Version $VER"
}
GOARCH=386 build && cp $PRG.exe $DIR/32bit/
GOARCH=amd64 build && cp $PRG.exe $DIR/64bit/
cd release
7z a joyster-$VER.zip joyster
| true
|
6b72b243e87e6bd9bcc0601a27ddaeb122c2e0c3
|
Shell
|
bcross/docker-apphost
|
/apps/guacamole.sh
|
UTF-8
| 3,390
| 3.796875
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
if [ $EUID -ne 0 ]; then
echo "Run this script with elevated privileges."
exit 2
fi
#Script directory
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
#Variables
GUACAMOLE_DOMAIN=guacamole.example.com
CERTBOT_EMAIL=example@example.com
DATA_ROOT=/media/data
#Run docker, certbot, and nginx if not configured already
if [ ! -f "/opt/certbot/configured" ]; then
echo "Certbot not configured"
$DIR/../setup/certbot.sh
fi
if [ ! -f "/opt/nginx/configured" ]; then
echo "Nginx not configured"
$DIR/../setup/nginx.sh
fi
if [ ! -f "/opt/docker/configured" ]; then
echo "Docker not configured"
$DIR/../setup/docker.sh
fi
echo "------------------"
echo "|Guacamole script|"
echo "------------------"
#Create folder
echo "Creating data folder"
mkdir $DATA_ROOT/guacamole -p
#Add nginx vhost
echo "Adding nginx vhost"
cat > /opt/nginx/vhosts/guacamole.conf <<EOL
server {
listen 0.0.0.0:8443;
port_in_redirect off;
ssl_certificate /bitnami/certs/letsencrypt/$GUACAMOLE_DOMAIN/fullchain.pem;
ssl_certificate_key /bitnami/certs/letsencrypt/$GUACAMOLE_DOMAIN/privkey.pem;
server_name $GUACAMOLE_DOMAIN;
location / {
proxy_buffering off;
proxy_http_version 1.1;
proxy_set_header X-Forwarded-For \$remote_addr;
proxy_set_header Host \$host;
proxy_set_header Upgrade \$http_upgrade;
proxy_set_header Connection \$http_connection;
proxy_cookie_path /guacamole/ /new-path/;
resolver 127.0.0.11;
set \$guacamole guacamole;
proxy_pass http://\$guacamole:8080/guacamole\$uri\$is_args\$args;
}
}
EOL
#Create certs with certbot
echo "Creating certs with certbot"
certbot certonly --webroot --email $CERTBOT_EMAIL --agree-tos -w /opt/certbot/www -d $GUACAMOLE_DOMAIN &> /dev/null
if [ $? != 0 ]; then
echo "Cert creation with certbot failed. Defaulting to self-signed."
sed -i "s|ssl_certificate .*|ssl_certificate /bitnami/certs/nginx.crt;|" /opt/nginx/vhosts/guacamole.conf
sed -i "s|ssl_certificate_key .*|ssl_certificate_key /bitnami/certs/nginx.key;|" /opt/nginx/vhosts/guacamole.conf
fi
echo "Copying docker compose file"
cp -r $DIR/../composefiles/guacamole /opt/docker/composefiles
#Modify compose file with data root
echo "Updating data locations in docker compose file"
sed -i "s|guacamole_data:/guacamole|$DATA_ROOT/guacamole/guacamole_data:/guacamole|" /opt/docker/composefiles/guacamole/docker-compose.yml
sed -i "s|postgresql_data:/bitnami|$DATA_ROOT/guacamole/postgresql_data:/bitnami|" /opt/docker/composefiles/guacamole/docker-compose.yml
#Start guacamole
echo "Starting guacamole"
false
while [ $? != 0 ]; do
sleep 2
docker-compose -f "/opt/docker/composefiles/guacamole/docker-compose.yml" up -d &> /dev/null
docker-compose -f "/opt/docker/composefiles/guacamole/docker-compose.yml" up -d &> /dev/null
done
echo "Started. Waiting..."
while [ ! -f $DATA_ROOT/guacamole/postgresql_data/postgresql/data/postmaster.pid ]; do
sleep 5
done
sleep 10
echo "Initializing PostgreSQL database"
docker exec guacamole_guacamole_1 /opt/guacamole/bin/initdb.sh --postgres > $DATA_ROOT/guacamole/postgresql_data/initdb.sql
docker exec -e PGPASSWORD=$(grep -oP "(?<=POSTGRES_PASSWORD=).*" /opt/docker/composefiles/guacamole/docker-compose.yml) guacamole_postgresql_1 psql postgresql://postgres@localhost/guacamole_db -f /bitnami/initdb.sql &> /dev/null
rm $DATA_ROOT/guacamole/postgresql_data/initdb.sql
| true
|
562d4323e9f3ac3bbabd611152d090cf2235ca17
|
Shell
|
lucc/shell
|
/exif2ctime.sh
|
UTF-8
| 159
| 3.109375
| 3
|
[] |
no_license
|
#!/bin/sh
for ARG; do
DATE=`exiftool -createdate "$ARG" | \
sed -n '/Create Date *: [0-9: ]\{19\}/{s/[^0-9]//g;s/..$/.&/;p;}'`
touch -t $DATE "$ARG"
done
| true
|
737c17c63d240611b86c84fc20299cd152ef01d2
|
Shell
|
TurboLabIt/zzdislocker
|
/dislocker-compile.sh
|
UTF-8
| 834
| 2.796875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
source "/usr/local/turbolab.it/bash-fx/bash-fx.sh"
fxHeader "🚀 Compile Dislocker"
rootCheck
### Universe is required to get some pre-requisites
add-apt-repository universe
apt update
### Pre-requisites
## https://github.com/Aorimn/dislocker/blob/master/INSTALL.md#requirements
apt install gcc cmake make libfuse-dev libmbedtls-dev ruby-dev -y
### Fix for
# CMake Error at /usr/share/cmake-3.22/Modules/FindPackageHandleStandardArgs.cmake:230 (message):
# Could NOT find PkgConfig (missing: PKG_CONFIG_EXECUTABLE)
apt install pkg-config -y
DSKL_COMPILE_DIR=/tmp/zzdislocker-compile-dir/
rm -rf "${DSKL_COMPILE_DIR}"
mkdir "${DSKL_COMPILE_DIR}"
cd "${DSKL_COMPILE_DIR}"
git clone https://github.com/Aorimn/dislocker.git
cd dislocker
cmake . && make && make install
rm -rf "${DSKL_COMPILE_DIR}"
fxEndFooter
| true
|
d997d3a5236126d91b8a196263e0f7e65f684c29
|
Shell
|
TSoftTest/repo
|
/APP3C_JAR/batchs/compileJAR.sh
|
UTF-8
| 556
| 2.9375
| 3
|
[] |
no_license
|
HOME=.
JAVA_SRC_HOME=$HOME/src
CLASSES_DIR=$HOME/classes
DIST_DIR=$HOME/dist
clear
if [ -d "$CLASSES_DIR" ]; then
rm -rf "$CLASSES_DIR/*"
else
mkdir $CLASSES_DIR
fi
if [ -d "$DIST_DIR" ]; then
rm -rf "$DIST_DIR/*"
else
mkdir $DIST_DIR
fi
echo Compiling: MessagePrinter.java
javac -source 1.8 -target 1.8 -encoding utf8 "$JAVA_SRC_HOME/com/tsoft/ap3c/service/ServiceLayer.java" -d "$CLASSES_DIR"
echo Generating Jar file
cd "$CLASSES_DIR"
zip -r "../$DIST_DIR/servicesLib.jar" *
cd ..
echo Copying non-java files
cp resources/* "$DIST_DIR"
| true
|
f04e9928f3aed625e044d5abb52c8bdb36d8ca21
|
Shell
|
molgenis/NGS_Demultiplexing
|
/protocols/Demultiplex.sh
|
UTF-8
| 10,912
| 3.1875
| 3
|
[] |
no_license
|
#MOLGENIS walltime=12:00:00 nodes=1 ppn=2 mem=5gb
#string ngsDir
#string intermediateDir
#string seqType
#list barcode
#string runPrefix
#string compressedDemultiplexedDiscardedFastqFilenameSR
#string compressedDemultiplexedDiscardedFastqFilenamePE1
#string compressedDemultiplexedDiscardedFastqFilenamePE2
#list demultiplexedSampleFastqChecksumFilenamePE1,demultiplexedSampleFastqChecksumFilenamePE2,compressedDemultiplexedSampleFastqFilenamePE1,compressedDemultiplexedSampleFastqFilenamePE2,compressedDemultiplexedSampleFastqFilenameSR
#string filenameSuffixDiscardedReads
#list barcodeType
#string compressedFastqFilenameSR
#string compressedFastqFilenamePE1
#string compressedFastqFilenamePE2
#string run
#string flowcell
#string lane
#string sampleSheet
csv_with_prefix(){
declare -a items=("${!1}")
declare -x prefix=$2
declare -x result=""
((n_elements=${#items[@]}, max_index=n_elements - 1))
for ((item = 0; item <= max_index; item++))
do
if (( item == max_index ))
then
result+="$prefix${items[$item]}"
else
result+="$prefix${items[$item]},"
fi
done
echo "${result}"
}
csv(){
declare -a items=("${!1}")
declare -x result=""
((n_elements=${#items[@]}, max_index=n_elements - 1))
for ((item = 0; item <= max_index; item++))
do
if (( item == max_index ))
then
result+="${items[$item]}"
else
result+="${items[$item]},"
fi
done
echo "${result}"
}
_count_reads() {
local _fastq=$1
local _barcode=$2
local -i _lines=$(zcat "${_fastq}" | wc -l)
local -i _reads=$((_lines/4))
if [ ${#_reads} -gt "${longest_read_count_length}" ]; then
longest_read_count_length=${#_reads}
fi
if [ ${#_barcode} -gt "${longest_barcode_length}" ]; then
longest_barcode_length=${#_barcode}
fi
eval "$3=${_reads}"
}
_save_log() {
local -i _fixed_extra_line_length=13
local -i _longest_barcode_length=$1
local -i _longest_read_count_length=$2
local -i _max_line_length=$((_fixed_extra_line_length+_longest_barcode_length+_longest_read_count_length))
local _col_header="$3"
local _prefix='INFO:'
local _sep=$(echo -n ${_prefix}; eval printf '=%.0s' {1..$_max_line_length}; echo)
local -i _total=$4
local _label="$5"
local _log_file="$6"
local -a _counts=("${!7}")
echo "${_prefix} Demultiplex statistics for:" > "${_log_file}"
echo "${_prefix} ${_label}" >> "${_log_file}"
echo "${_sep}" >> "${_log_file}"
printf "${_prefix} %${_longest_barcode_length}s: %${_longest_read_count_length}s (%%)\n" 'Barcode' "${_col_header}" >> "${_log_file}"
echo "${_sep}" >> "${_log_file}"
for _item in "${_counts[@]}"
do
local _barcode=${_item%%:*}
local _count=${_item#*:}
local _percentage
_percentage=$(awk "BEGIN {printf \"%.4f\n\", (($_count/$_total)*100)}")
printf "${_prefix} %${_longest_barcode_length}s: %${_longest_read_count_length}d (%4.1f%%)\n" "${_barcode}" "${_count}" "${_percentage}" >> "${_log_file}"
done
echo "${_sep}" >> "${_log_file}"
}
#
# Initialize script specific vars.
#
makeTmpDir "${intermediateDir}"
fluxDir="${MC_tmpFile}"
#
# For each lane demultiplex rawdata.
#
if [ "${seqType}" == "SR" ]
then
if [[ "${barcode[0]}" == "None" || "${barcode[0]}" == "" ]]
then
# No barcodes used in this lane: Do nothing.
touch "${fluxDir}/${runPrefix}.demultiplex.read_count_check.skipped"
else
#
# Illumina-style demultiplexed files:
#
# * Do not demultiplex, but
# * Create a log file with demultiplex statistics.
#
# Check if the files required for the read count check are present.
#
declare label="${runPrefix}"
declare -a read_counts
declare -i total_reads=0
declare -i longest_read_count_length=5
declare -i longest_barcode_length=7
#
# Read counts of the demultiplexed files.
# Note: we actually count lines, which equals reads * 4 for FastQ files.
#
declare barcodeD="${filenameSuffixDiscardedReads}"
declare fastq="${ngsDir}/${compressedDemultiplexedDiscardedFastqFilenameSR}"
echo "counting lines for ${ngsDir}/${compressedDemultiplexedDiscardedFastqFilenameSR}"
declare -i reads=-1
_count_reads "${fastq}" "${barcodeD}" 'reads'
read_counts=(${read_counts[@]-} ${barcodeD}:${reads})
((total_reads+=reads))
((n_elements=${#compressedDemultiplexedSampleFastqFilenameSR[@]}, max_index=n_elements - 1))
for ((fileToCheck = 0; fileToCheck <= max_index; fileToCheck++))
do
barcodeR="${barcode[fileToCheck]}"
echo "processing ${barcodeR}"
fastq="${ngsDir}/${compressedDemultiplexedSampleFastqFilenameSR[fileToCheck]}"
declare -i reads=-1
_count_reads "${fastq}" "${barcodeR}" 'reads'
read_counts=(${read_counts[@]-} ${barcodeR}:${reads})
((total_reads+=reads))
done
declare log="${fluxDir}/${label}.demultiplex.log"
_save_log ${longest_barcode_length} ${longest_read_count_length} 'Reads' ${total_reads} ${label} ${log} 'read_counts[@]'
fi
elif [ "${seqType}" == "PE" ]
then
if [[ "${barcode[0]}" == "None" || "${barcode[0]}" == "" ]]
then
#
# No barcodes used in this lane: Do nothing.
#
touch "${fluxDir}/${runPrefix}.demultiplex.read_count_check.skipped"
else
#
# Illumina-style demultiplexed files:
#
# * Do not demultiplex, but
# * Perform a read count check between reads 1 and 2 and
# * Create a log file with demultiplex statistics.
#
# Check if the files required for the read count check are present.
#
declare label="${runPrefix}"
declare -a read_pair_counts
declare -i total_read_pairs=0
declare -i longest_read_count_length=10
declare -i longest_barcode_length=7
#
# Read count sanity check of the demultiplexed files.
# Note: we actually count lines, which equals reads * 4 for FastQ files.
# For PE data the amount of reads in both files must be the same!
#
declare barcodeD="${filenameSuffixDiscardedReads}"
declare fastq_1="${ngsDir}/${compressedDemultiplexedDiscardedFastqFilenamePE1}"
declare fastq_2="${ngsDir}/${compressedDemultiplexedDiscardedFastqFilenamePE2}"
declare -i reads_1=-1
declare -i reads_2=-2
echo "counting lines in ${fastq_1}"
_count_reads "${fastq_1}" "${barcodeD}" 'reads_1'
echo "counting lines in ${fastq_2}"
_count_reads "${fastq_2}" "${barcodeD}" 'reads_2'
if [[ "${reads_1}" != "${reads_2}" ]]
then
touch "${fluxDir}/${label}_${barcodeD}.read_count_check_for_pairs.FAILED"
echo "FATAL: Number of reads in both ${label}_${barcode} FastQ files not the same!"
exit 1
fi
read_pair_counts=("${read_pair_counts[@]-}" "${barcodeD}":"${reads_1}")
((total_read_pairs+=reads_1))
((n_elements=${#compressedDemultiplexedSampleFastqFilenamePE1[@]}, max_index=n_elements - 1))
for ((fileToCheck = 0; fileToCheck <= max_index; fileToCheck++))
do
barcodeR="${barcode[fileToCheck]}"
fastq_1="${ngsDir}/${compressedDemultiplexedSampleFastqFilenamePE1[fileToCheck]}"
fastq_2="${ngsDir}/${compressedDemultiplexedSampleFastqFilenamePE2[fileToCheck]}"
reads_1=-1
reads_2=-2
_count_reads "${fastq_1}" "${barcodeR}" 'reads_1'
_count_reads "${fastq_2}" "${barcodeR}" 'reads_2'
if [[ "${reads_1}" != "${reads_2}" ]]
then
touch "${fluxDir}/${label}_${barcodeR}.read_count_check_for_pairs.FAILED"
echo "FATAL: Number of reads in both ${label}_${barcode} FastQ files not the same!"
exit 1
fi
read_pair_counts=(${read_pair_counts[@]-} ${barcodeR}:${reads_1})
((total_read_pairs+=reads_1))
done
declare log="${fluxDir}/${label}.demultiplex.log"
_save_log "${longest_barcode_length}" "${longest_read_count_length}" 'Read Pairs' "${total_read_pairs}" "${label}" "${log}" 'read_pair_counts[@]'
touch "${fluxDir}/${runPrefix}.read_count_check_for_pairs.passed"
fi
fi
cd "${ngsDir}" || exit
mv "${fluxDir}/${runPrefix}"* .
echo "moved ${fluxDir}/${runPrefix}* ."
awk '/DISCARDED/{y=1;next}y' ${runPrefix}.demultiplex.log | awk -F '[()]' '{print $2}' | awk '{gsub(/ /,"",$0);print substr($0,1,length($0)-1)}' | sed '$ d' > ${runPrefix}.percentages.tmp
awk '/DISCARDED/{y=1;next}y' ${runPrefix}.demultiplex.log | awk -F ':' '{print $2}' | sed '$ d' > ${runPrefix}.barcodes.tmp
paste -d'\t' "${runPrefix}.barcodes.tmp" "${runPrefix}.percentages.tmp" > "${runPrefix}.barcodesPercentages.tmp"
awk -v fileName="${runPrefix}" '{if ($2==0.0){print "percentage="$2 > fileName"_"$1"_1.fq.gz.rejected"}}' "${runPrefix}.barcodesPercentages.tmp"
awk -v fileName="${runPrefix}" '{if ($2==0.0){print "percentage="$2 > fileName"_"$1"_2.fq.gz.rejected"}}' "${runPrefix}.barcodesPercentages.tmp"
rm *.tmp
rejectedReads="false"
if ls "${runPrefix}"*_1.fq.gz.rejected
then
rejectedReads="true"
fi
SCRIPT_NAME="$(basename ${0})"
SCRIPT_NAME="${SCRIPT_NAME%.*sh}"
SCRIPT_NAME="${SCRIPT_NAME%_*}"
discarded=$(fgrep "DISCARDED" ${runPrefix}.demultiplex.log | awk -F '[()]' '{print $2}' | awk '{gsub(/ /,"",$0);print substr($0,1,length($0)-3)}')
if [[ "${discarded}" -gt 75 ]]
then
echo "discarded percentage (${discarded}%) is higher than 75 procent, exiting"
if [[ -r ../../../logs/${SCRIPT_NAME}.mailinglist ]]
then
mailingList=$(cat ../../../logs/${SCRIPT_NAME}.mailinglist)
echo -e "Hallo allen,\ndiscarded percentage (${discarded}%) is higher than 75 procent\nDe demultiplexing pipeline is er dan ook mee gestopt, omdat een te hoog percentage\ndiscarded reads vaak een indicatie is dat er iets mis is met de barcodes oid\n\ngroeten van het GCC" | mail -s "${runPrefix} crashed due to too high percentage of discarded reads" "${mailingList}"
fi
exit 1
elif [[ "${rejectedReads}" == "true" ]]
then
if [[ -r ../../../logs/${SCRIPT_NAME}.mailinglist ]]
then
mailingList=$(cat ../../../logs/${SCRIPT_NAME}.mailinglist)
echo -e "Hallo allen,\n\nDe volgende barcodes zijn afgekeurd op basis van een te laag percentage reads per barcode):\n" > mailText.txt
for i in ${runPrefix}*_1.fq.gz.rejected
do
[[ -e ${i} ]] || break
percentage="$(awk 'BEGIN {FS":"}{print $2}' "${i}")"
barcodeGrep=$(echo "${i}" | awk 'BEGIN {FS="_"}{print $6}')
echo "grep ${barcodeGrep} ${sampleSheet}"
declare -a sampleSheetColumnNames=()
declare -A sampleSheetColumnOffsets=()
IFS=',' sampleSheetColumnNames=($(head -1 "${sampleSheet}"))
for (( _offset = 0 ; _offset < ${#sampleSheetColumnNames[@]:-0} ; _offset++ ))
do
sampleSheetColumnOffsets["${sampleSheetColumnNames[${_offset}]}"]="${_offset}"
done
externalSampleIDFieldIndex=$((${sampleSheetColumnOffsets['externalSampleID']} + 1 ))
sampleID=$(grep ${barcodeGrep} ${sampleSheet} | head -1 | awk -v extId="${externalSampleIDFieldIndex}" 'BEGIN {FS=","}{print $extId}')
echo "externalSampleID: ${sampleID}, barcode: ${i}, ${percentage}" >> mailText.txt
done
echo -e "\nAlle lanen voor deze barcode(s) worden niet door de pipeline gehaald.\n\ngroeten,\nGCC" >> mailText.txt
cat mailText.txt
cat mailText.txt | mail -s "er zijn samples afgekeurd van run: ${runPrefix}" "${mailingList}"
fi
else
echo "number of discarded reads is ${discarded}"
fi
cd -
| true
|
f7a2bc9656ff3b0e43295feea6425c8108790128
|
Shell
|
bambam2174/homesbin
|
/waswo
|
UTF-8
| 526
| 3.171875
| 3
|
[] |
no_license
|
#!/bin/zsh
OUTPUT_LOG=$TMPDIR/which_${1}_paths
_waswo () {
rm -i -f $TMPDIR/whichpaths
ARRPATH=($(list_path))
ARR_CMDS=()
for p in $ARRPATH
do
# ls -GAFlhGa@ $p/${1}* 2> /dev/null | sed -E "s/${REGEX_LISTLONG} //g" 2> /dev/null | sed -E 's/\*//g' 2> /dev/null | tee -a $TMPDIR/whichpaths
list "${p}" ${1} # | tee -a $TMPDIR/which_${1}_paths
done
ecko " Output again here…… " 122 207 > /dev/stdin
echo "\${TMPDIR}/which_${1}_paths" > /dev/stdin
}
_waswo ${@} | tee $TMPDIR/which_${1}_paths
| true
|
ceb6a4956b6ec0d211653f8f8a12f7e5bd49b6d0
|
Shell
|
lestrade84/icehouse
|
/Scripts/04-cnode/02-nova_compute.sh
|
UTF-8
| 14,223
| 2.625
| 3
|
[] |
no_license
|
#!/bin/bash
. support-functions
. parameters.cfg
# Variables needed to be defined
check_vars REGION_NAME DOMAIN PUBLIC_DOMAIN IAAS_PASS OS_CTL_NET INBAND_MGMT_NET VIP_HORIZON VIP_MYSQL VIP_KEYSTONE VIP_GLANCE VIP_CINDER VIP_SWIFT VIP_NEUTRON VIP_NOVA VIP_HEAT VIP_MONGO VIP_CEILOMETER VIP_LDAP REPO_SERVER CON_NODE_1_IP CON_NODE_2_IP CON_NODE_3_IP BE_NODE_1_IP BE_NODE_2_IP BE_NODE_3_IP SDN_NET NUAGE_ACTIVE_CONTROLLER NUAGE_STANDBY_CONTROLLER PV_1 PV_2 PV_3 PV_4 VG_NAME LV_NAME LV_NOVA_MOUNT_DIR CINDER_VOLUME_UUID NOVA_12_NIC ROOT_HOME NOVA_HOME
# --------------------------------------------------------------------------------------------------------
# ------------------------- PRE-REQUISITES ---------------------------
# --------------------------------------------------------------------------------------------------------
# Create volume group and logical volume for ephimeral local storage (nova)
vgcreate VG_nova /dev/sdk /dev/sdl /dev/sdm /dev/sdn
lvcreate --type raid10 --nosync -l 100%VG -i 2 -m 1 -n nova VG_nova
mkfs.xfs /dev/mapper/VG_nova-nova
# Mounting this LV at boot time
mkdir -p /var/lib/nova
echo "/dev/mapper/VG_nova-nova /var/lib/nova xfs defaults 0 0" >> /etc/fstab
mount -a
# Configuring RED HAT repos
cat > /etc/yum.repos.d/local.repo << EOF_REPO_LOCAL
[rhel-7-server-openstack-5.0-rpms]
name=Red Hat OpenStack Platform 5 - local packages for
baseurl=http://$REPO_SERVER/repos/rhel-7-server-openstack-5.0-rpms/
enabled=1
gpgcheck=0
[rhel-7-server-rpms]
name=Red Hat Enterprise Linux 7 - local packages for
baseurl=http://$REPO_SERVER/repos/rhel-7-server-rpms/
enabled=1
gpgcheck=0
[rhel-7-server-rh-common-rpms]
name=Red Hat Enterpsie Linux Common - local packages for
baseurl=http://$REPO_SERVER/repos/rhel-7-server-rh-common-rpms/
enabled=1
gpgcheck=0
[rhel-server-rhscl-7-rpms]
name=Red Hat Software Collections - local packages for
baseurl=http://$REPO_SERVER/repos/rhel-server-rhscl-7-rpms/
enabled=1
gpgcheck=0
[rhel-ha-for-rhel-7-server-rpms]
name=Red Hat Enterpsie Linux High Availability - local packages for
baseurl=http://$REPO_SERVER/repos/rhel-ha-for-rhel-7-server-rpms/
enabled=1
gpgcheck=0
[rhel-7-server-optional-rpms]
name=Red Hat Enterprise Linux 7 - optional forNODE_3_NAME
baseurl=http://$REPO_SERVER/repos/rhel-7-server-optional-rpms/
enabled=1
gpgcheck=0
EOF_REPO_LOCAL
# Configuring NUAGE repos
cat > /etc/yum.repos.d/rhel-7-nuage-vrs-rpms.repo << EOF_NUAGE_REPO
[rhel-7-nuage-vrs-rpms]
name=Nuage VRS Software
baseurl=http://$REPO_SERVER/repos/nuage-vrs/
enabled=1
gpgcheck=0
EOF_NUAGE_REPO
# --------------------------------------------------------------------------------------------------------
# ------------------------- NOVA ---------------------------
# --------------------------------------------------------------------------------------------------------
# Installing required software
yum install -y openstack-nova-compute openstack-utils python-cinder openstack-ceilometer-compute python-twisted-core perl-JSON vconfig nuage-openvswitch nuage-metadata-agent
# Enable default services
systemctl enable libvirtd
systemctl start libvirtd
virsh net-destroy default
virsh net-undefine default
# Configuring nova.conf
openstack-config --set /etc/nova/nova.conf DEFAULT memcached_servers $CON_NODE_1_IP:11211,$CON_NODE_2_IP:11211,$CON_NODE_3_IP:11211
openstack-config --set /etc/nova/nova.conf DEFAULT vncserver_proxyclient_address $(ip addr show dev ${NOVA_12_NIC} scope global | grep inet | sed -e 's#.*inet ##g' -e 's#/.*##g')
openstack-config --set /etc/nova/nova.conf DEFAULT vncserver_listen 0.0.0.0
openstack-config --set /etc/nova/nova.conf DEFAULT novncproxy_host $(ip addr show dev $NOVA_12_NIC scope global | grep inet | sed -e 's#.*inet ##g' -e 's#/.*##g')
openstack-config --set /etc/nova/nova.conf DEFAULT neutron_region_name $REGION_NAME
openstack-config --set /etc/nova/nova.conf DEFAULT novncproxy_base_url http://vnc.${PUBLIC_DOMAIN}:6080/vnc_auto.html
openstack-config --set /etc/nova/nova.conf database connection mysql://nova:${IAAS_PASS}@${VIP_MYSQL}/nova
openstack-config --set /etc/nova/nova.conf database max_retries -1
openstack-config --set /etc/nova/nova.conf database idle_timeout 60
openstack-config --set /etc/nova/nova.conf DEFAULT auth_strategy keystone
openstack-config --set /etc/nova/nova.conf DEFAULT rabbit_ha_queues true
openstack-config --set /etc/nova/nova.conf DEFAULT rabbit_hosts $BE_NODE_1_IP:5672,$BE_NODE_2_IP:5672,$BE_NODE_3_IP:5672
openstack-config --set /etc/nova/nova.conf DEFAULT metadata_host $VIP_NOVA
openstack-config --set /etc/nova/nova.conf DEFAULT metadata_listen $(ip addr show dev $NOVA_12_NIC scope global | grep inet | sed -e 's#.*inet ##g' -e 's#/.*##g')
openstack-config --set /etc/nova/nova.conf DEFAULT metadata_listen_port 8775
openstack-config --set /etc/nova/nova.conf DEFAULT service_neutron_metadata_proxy True
openstack-config --set /etc/nova/nova.conf DEFAULT use_forwarded_for True
openstack-config --set /etc/nova/nova.conf DEFAULT neutron_metadata_proxy_shared_secret metatest
openstack-config --set /etc/nova/nova.conf DEFAULT glance_host $VIP_GLANCE
openstack-config --set /etc/nova/nova.conf DEFAULT network_api_class nova.network.neutronv2.api.API
openstack-config --set /etc/nova/nova.conf DEFAULT neutron_url http://$VIP_NEUTRON:9696/
openstack-config --set /etc/nova/nova.conf DEFAULT neutron_admin_tenant_name services
openstack-config --set /etc/nova/nova.conf DEFAULT neutron_admin_username neutron
openstack-config --set /etc/nova/nova.conf DEFAULT neutron_admin_password $IAAS_PASS
openstack-config --set /etc/nova/nova.conf DEFAULT neutron_admin_auth_url http://$VIP_KEYSTONE:35357/v2.0
openstack-config --set /etc/nova/nova.conf DEFAULT firewall_driver nova.virt.firewall.NoopFirewallDriver
openstack-config --set /etc/nova/nova.conf DEFAULT neutron_ovs_bridge alubr0
openstack-config --set /etc/nova/nova.conf DEFAULT libvirt_vif_driver nova.virt.libvirt.vif.LibvirtGenericVIFDriver
openstack-config --set /etc/nova/nova.conf DEFAULT security_group_api nova
openstack-config --set /etc/nova/nova.conf DEFAULT firewall_driver nova.virt.firewall.NoopFirewallDriver
openstack-config --set /etc/nova/nova.conf DEFAULT instance_name_template inst-%08x
openstack-config --set /etc/nova/nova.conf conductor use_local false
# For Live Migration
openstack-config --set /etc/nova/nova.conf libvirt live_migration_flag VIR_MIGRATE_UNDEFINE_SOURCE,VIR_MIGRATE_PEER2PEER,VIR_MIGRATE_LIVE,VIR_MIGRATE_TUNNELLED
openstack-config --set /etc/nova/nova.conf libvirt live_migration_uri qemu+ssh://nova@%s/system
# Assign bash to nova user
usermod -s /bin/bash nova
# Configure SSH-KEY sharing for computes 'root' -> 'nova'
mkdir -p $ROOT_HOME/.ssh/
wget -O $ROOT_HOME/.ssh/id_rsa http://$REPO_SERVER/utils/ssh-live-migration/id_rsa
chmod 600 $ROOT_HOME/.ssh/id_rsa
echo "StrictHostKeyChecking=no" > $ROOT_HOME/.ssh/config
chmod 400 $ROOT_HOME/.ssh/config
# Configure Authorized_Keys for nova user (his home directory is /var/lib/nova)
mkdir -p $NOVA_HOME/.ssh/
chown nova:nova $NOVA_HOME/.ssh/
chmod 700 $NOVA_HOME/.ssh/
wget -O $NOVA_HOME/.ssh/authorized_keys http://$REPO_SERVER/utils/ssh-live-migration/authorized_keys
chmod 600 $NOVA_HOME/.ssh/authorized_keys
# Configure Authorized_Keys for nova user (his home directory is /var/lib/nova)
echo "StrictHostKeyChecking=no" > $NOVA_HOME/.ssh/config
chmod 400 $NOVA_HOME/.ssh/config
wget -O $NOVA_HOME/.ssh/id_rsa http://$REPO_SERVER/utils/ssh-live-migration/id_rsa_nova
chmod 600 $NOVA_HOME/.ssh/id_rsa
chown -R nova:nova $NOVA_HOME/.ssh
# Configure nova to restart after reboot hypervisors
openstack-config --set /etc/nova/nova.conf DEFAULT resume_guests_state_on_host_boot true
# REQUIRED FOR A/A scheduler
openstack-config --set /etc/nova/nova.conf DEFAULT scheduler_host_subset_size 30
openstack-config --set /etc/nova/api-paste.ini filter:authtoken auth_host $VIP_KEYSTONE
openstack-config --set /etc/nova/api-paste.ini filter:authtoken admin_tenant_name services
openstack-config --set /etc/nova/api-paste.ini filter:authtoken admin_user compute
openstack-config --set /etc/nova/api-paste.ini filter:authtoken admin_password $IAAS_PASS
# Nova integration with Ceilometer
openstack-config --set /etc/nova/nova.conf DEFAULT instance_usage_audit True
openstack-config --set /etc/nova/nova.conf DEFAULT instance_usage_audit_period hour
openstack-config --set /etc/nova/nova.conf DEFAULT notify_on_state_change vm_and_task_state
openstack-config --set /etc/nova/nova.conf DEFAULT notification_driver nova.openstack.common.notifier.rpc_notifier
sed -i -e 's#nova.openstack.common.notifier.rpc_notifier#nova.openstack.common.notifier.rpc_notifier\nnotification_driver = ceilometer.compute.nova_notifier#g' /etc/nova/nova.conf
# Required for Ceilometer
openstack-config --set /etc/ceilometer/ceilometer.conf keystone_authtoken auth_host $VIP_KEYSTONE
openstack-config --set /etc/ceilometer/ceilometer.conf keystone_authtoken auth_port 35357
openstack-config --set /etc/ceilometer/ceilometer.conf keystone_authtoken auth_protocol http
openstack-config --set /etc/ceilometer/ceilometer.conf keystone_authtoken admin_tenant_name services
openstack-config --set /etc/ceilometer/ceilometer.conf keystone_authtoken admin_user ceilometer
openstack-config --set /etc/ceilometer/ceilometer.conf keystone_authtoken admin_password $IAAS_PASS
openstack-config --set /etc/ceilometer/ceilometer.conf DEFAULT memcache_servers $CON_NODE_1_IP:11211,$CON_NODE_2_IP:11211,$CON_NODE_3_IP:11211
openstack-config --set /etc/ceilometer/ceilometer.conf DEFAULT rabbit_ha_queues true
openstack-config --set /etc/ceilometer/ceilometer.conf DEFAULT rabbit_hosts $BE_NODE_1_IP:5672,$BE_NODE_2_IP:5672,$BE_NODE_3_IP:5672
openstack-config --set /etc/ceilometer/ceilometer.conf publisher_rpc metering_secret $IAAS_PASS
openstack-config --set /etc/ceilometer/ceilometer.conf service_credentials os_auth_url http://$VIP_KEYSTONE:5000/v2.0
openstack-config --set /etc/ceilometer/ceilometer.conf service_credentials os_username ceilometer
openstack-config --set /etc/ceilometer/ceilometer.conf service_credentials os_tenant_name services
openstack-config --set /etc/ceilometer/ceilometer.conf service_credentials os_password $IAAS_PASS
openstack-config --set /etc/ceilometer/ceilometer.conf database connection mongodb://$BE_NODE_1_IP,$BE_NODE_2_IP,$BE_NODE_3_IP:27017/ceilometer?replicaSet=ceilometer
openstack-config --set /etc/ceilometer/ceilometer.conf database max_retries -1
openstack-config --set /etc/ceilometer/ceilometer.conf database idle_timeout 60
# We don't apply time_to_live yet, but the following command is recommended (in this example, 432000 seconds -> 5 days).
# openstack-config --set /etc/ceilometer/ceilometer.conf database time_to_live 432000
openstack-config --set /etc/ceilometer/ceilometer.conf api host $(ip addr show dev $NOVA_12_NIC scope global | grep inet | sed -e 's#.*inet ##g' -e 's#/.*##g')
# --------------------------------------------------------------------------------------------------------
# ------------------------- NUAGE ---------------------------
# --------------------------------------------------------------------------------------------------------
# Configure Nuage config files
echo "PERSONALITY=vrs" >> /etc/default/openvswitch
echo "PLATFORM=kvm" >> /etc/default/openvswitch
echo "DEFAULT_BRIDGE=alubr0" >> /etc/default/openvswitch
echo "CONN_TYPE=tcp" >> /etc/default/openvswitch
echo "ACTIVE_CONTROLLER=$NUAGE_ACTIVE_CONTROLLER" >> /etc/default/openvswitch
echo "STANDBY_CONTROLLER=$NUAGE_STANDBY_CONTROLLER" >> /etc/default/openvswitch
# Configuring Nuage Metadata Agent
echo "METADATA_PORT=9697" >> /etc/default/nuage-metadata-agent
echo "NOVA_METADATA_IP=$VIP_NOVA" >> /etc/default/nuage-metadata-agent
echo "NOVA_METADATA_PORT=8775" >> /etc/default/nuage-metadata-agent
echo "METADATA_PROXY_SHARED_SECRET=metatest" >> /etc/default/nuage-metadata-agent
echo "NOVA_CLIENT_VERSION=2" >> /etc/default/nuage-metadata-agent
echo "NOVA_OS_USERNAME=compute" >> /etc/default/nuage-metadata-agent
echo "NOVA_OS_PASSWORD=$IAAS_PASS" >> /etc/default/nuage-metadata-agent
echo "NOVA_OS_TENANT_NAME=services" >> /etc/default/nuage-metadata-agent
echo "NOVA_OS_AUTH_URL=http://$VIP_KEYSTONE:5000/v2.0" >> /etc/default/nuage-metadata-agent
echo "NUAGE_METADATA_AGENT_START_WITH_OVS=false" >> /etc/default/nuage-metadata-agent
echo "NOVA_API_ENDPOINT_TYPE=publicURL" >> /etc/default/nuage-metadata-agent
# Enable and start required services
systemctl enable openstack-nova-compute
systemctl restart openstack-nova-compute
systemctl enable openstack-ceilometer-compute
systemctl restart openstack-ceilometer-compute
systemctl enable nuage-metadata-agent
systemctl restart nuage-metadata-agent
systemctl enable openvswitch
systemctl restart openvswitch
# --------------------------------------------------------------------------------------------------------
# ------------------------- CEPH ---------------------------
# --------------------------------------------------------------------------------------------------------
# First, install ceph software from ceph-admin node
# Then, execute the following commands:
wget http://$REPO_SERVER/utils/cinder-volume-keys/secret.xml
wget http://$REPO_SERVER/utils/cinder-volume-keys/client.volumes.key
virsh secret-define --file ~/secret.xml
virsh secret-set-value --secret $CINDER_VOLUME_UUID --base64 $(cat ~/client.volumes.key)
rm -f ~/secret.xml ~/client.volumes.key
openstack-config --set /etc/nova/nova.conf libvirt libvirt_images_type rbd
openstack-config --set /etc/nova/nova.conf libvirt libvirt_images_rbd_pool volumes
openstack-config --set /etc/nova/nova.conf libvirt libvirt_images_rbd_ceph_conf /etc/ceph/ceph.conf
openstack-config --set /etc/nova/nova.conf libvirt libvirt_inject_password false
openstack-config --set /etc/nova/nova.conf libvirt libvirt_inject_key false
openstack-config --set /etc/nova/nova.conf libvirt libvirt_inject_partition -2
systemctl restart openstack-nova-compute
| true
|
3dc62e15d8bbf4571755a473c7717044ee82920d
|
Shell
|
teja624/home
|
/.zsh/modules/aws/lib/sh/api/pinpoint/campaign_create.sh
|
UTF-8
| 253
| 2.640625
| 3
|
[
"Apache-2.0"
] |
permissive
|
aws_pinpoint_campaign_create() {
local application_id="$1"
local write_campaign_request="$2"
shift 2
cond_log_and_run aws pinpoint create-campaign --application-id $application_id --write-campaign-request $write_campaign_request "$@"
}
| true
|
63d7f8ad7efba4f2870699b25d31be72c6a3bcd1
|
Shell
|
enzote84/nagios-plugins
|
/check_ping2.sh
|
UTF-8
| 2,594
| 3.859375
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/sh
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Check Ping2 Plugin
#
# Version: 1.0
# Author: DEMR
# Support: emedina@enersa.com.ar
#
# Example usage:
#
# ./check_ping2.sh -H1 IP1 -H2 IP2
#
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Nagios return codes
#
STATE_OK=0
STATE_WARNING=1
STATE_CRITICAL=2
STATE_UNKNOWN=3
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Plugin info
#
AUTHOR="DEMR"
VERSION="1.0"
PROGNAME=$(basename $0)
print_version() {
echo ""
echo "Version: $VERSION, Author: $AUTHOR"
echo ""
}
print_usage() {
echo ""
echo "$PROGNAME"
echo "Version: $VERSION"
echo ""
echo "Usage: $PROGNAME [ -H1 ip1 -H2 ip2 ] | [-v | -h]"
echo ""
echo " -h Show this page"
echo " -v Plugin Version"
echo " -H1 First IP address"
echo " -H2 Second IP address"
echo ""
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Parse parameters
#
# Make sure the correct number of command line arguments have been supplied
if [ $# -lt 1 ]; then
echo "Insufficient arguments"
print_usage
exit $STATE_UNKNOWN
fi
# Grab the command line arguments
HOSTADDR1_SET=0
HOSTADDR2_SET=0
while [ $# -gt 0 ]; do
case "$1" in
-h)
print_usage
exit $STATE_OK
;;
-v)
print_version
exit $STATE_OK
;;
-H1)
shift
HOSTADDR1=$1
HOSTADDR1_SET=1
;;
-H2)
shift
HOSTADDR2=$1
HOSTADDR2_SET=1
;;
*)
echo "Unknown argument: $1"
print_usage
exit $STATE_UNKNOWN
;;
esac
shift
done
# Check parameters correctness:
if [ $HOSTADDR1_SET -eq 0 ] || [ $HOSTADDR2_SET -eq 0 ]; then
echo "Wrong parameters"
print_usage
exit $STATE_UNKNOWN
fi
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Check ping for each address
#
PSTATUS1=`ping -c 2 -i 0.2 -q $HOSTADDR1|grep received|awk '{if($4 >= 1) print 0; else print 1;}'`
PSTATUS2=`ping -c 2 -i 0.2 -q $HOSTADDR2|grep received|awk '{if($4 >= 1) print 0; else print 1;}'`
RESULT=$(($PSTATUS1 * 2 + $PSTATUS2))
case $RESULT in
0)
FINAL_STATUS="OK - Both address responding"
RETURN_STATUS=$STATE_OK
;;
1)
FINAL_STATUS="WARNING - $HOSTADDR2 not responding"
RETURN_STATUS=$STATE_WARNING
;;
2)
FINAL_STATUS="WARNING - $HOSTADDR1 not responding"
RETURN_STATUS=$STATE_WARNING
;;
3)
FINAL_STATUS="CRITICAL - $HOSTADDR1 and $HOSTADDR2 not responding"
RETURN_STATUS=$STATE_CRITICAL
;;
esac
echo $FINAL_STATUS
exit $RETURN_STATUS
| true
|
dc72c0cd45d4152c64e7e274b83254d468eec513
|
Shell
|
wp-pro-club/init
|
/wpi.sh
|
UTF-8
| 1,785
| 3.109375
| 3
|
[] |
no_license
|
#!/bin/bash
# Init - Wp Pro Club
# by DimaMinka (https://dimaminka.com)
# https://github.com/wp-pro-club/init
source ${PWD}/lib/app-init.sh
if [[ "$1" ]]
then
# Run the update script
bash ${PWD}/lib/update.sh $1
else
# Run the before_install after setup checking
if [ "$conf_app_setup_shell" == "true" ]; then
bash ${PWD}/lib/before-install.sh
fi
# Run the workflow install after setup checking
if [ "$conf_app_setup_workflow" != "false" ]; then
bash ${PWD}/lib/workflow.sh
# Run the env making after setup checking
if [ "$conf_app_setup_env" == "true" ]; then
bash ${PWD}/lib/env.sh
fi
# Run the settings script after setup checking
if [ "$conf_app_setup_settings" == "true" ]; then
bash ${PWD}/lib/settings.sh
fi
# Run the plugins install after setup checking
if [ "$conf_app_setup_mu_plugins" == "true" ]; then
bash ${PWD}/lib/mu-plugins.sh
fi
# Run the plugins install after setup checking
if [ "$conf_app_setup_plugins" == "true" ]; then
bash ${PWD}/lib/plugins-bulk.sh
bash ${PWD}/lib/plugins-single.sh
fi
# Run the theme install after setup checking
if [ "$conf_app_setup_theme" == "true" ]; then
bash ${PWD}/lib/theme.sh
fi
# Run the child theme install after setup checking
if [ "$conf_app_setup_child_theme" == "true" ]; then
bash ${PWD}/lib/child-theme.sh
fi
# Run the extra script after setup checking
if [ "$conf_app_setup_extra" == "true" ]; then
bash ${PWD}/lib/extra.sh
fi
fi
# Run the after_install after setup checking
if [ "$conf_app_setup_shell" == "true" ]; then
bash ${PWD}/lib/after-install.sh
fi
fi
| true
|
e0eaef0fad38323f9edca7a4af79a83a5c4b6531
|
Shell
|
pombredanne/sync-oai
|
/docs/resync-client/test.sh
|
UTF-8
| 1,573
| 3.015625
| 3
|
[] |
no_license
|
#!/bin/bash
echo "### Will run tests on local machine with files in /tmp"
rm -rf /tmp/rs_test
mkdir /tmp/rs_test
mkdir /tmp/rs_test/src
echo "I am file_a" > /tmp/rs_test/src/file_a
echo "I am file_b, bigger than file_a" > /tmp/rs_test/src/file_b
mkdir /tmp/rs_test/dst
echo "### Make sitemap for this local collection"
./resync-client -w /tmp/rs_test/src=/tmp/rs_test/src > /tmp/rs_test/src/sitemap.xml
ls -l /tmp/rs_test/src
echo "### Do resync... (should copy 2 new resources)"
./resync-client -s /tmp/rs_test/src=/tmp/rs_test/dst
ls -l /tmp/rs_test/src /tmp/rs_test/dst
echo "### Do resync again, no changes"
./resync-client -s /tmp/rs_test/src=/tmp/rs_test/dst
echo "### Updating file_a on src"
echo "I am the new version of file_a" > /tmp/rs_test/src/file_a
./resync-client -w /tmp/rs_test/src=/tmp/rs_test/src > /tmp/rs_test/src/sitemap.xml
ls -l /tmp/rs_test/src /tmp/rs_test/dst
echo "### Do resync... (should report 1 changed resource)"
./resync-client -s /tmp/rs_test/src=/tmp/rs_test/dst
ls -l /tmp/rs_test/src /tmp/rs_test/dst
echo "### Delete file_a on src"
rm /tmp/rs_test/src/file_a
./resync-client -w /tmp/rs_test/src=/tmp/rs_test/src > /tmp/rs_test/src/sitemap.xml
ls -l /tmp/rs_test/src /tmp/rs_test/dst
echo "### Do resync... (should report 1 deleted resource, but no update)"
./resync-client -s /tmp/rs_test/src=/tmp/rs_test/dst
ls -l /tmp/rs_test/src /tmp/rs_test/dst
echo "### Do resync... (should report 1 deleted resource, now actually deletes)"
./resync-client -s --delete /tmp/rs_test/src=/tmp/rs_test/dst
ls -l /tmp/rs_test/src /tmp/rs_test/dst
| true
|
7db9914ec5d0e060466ed07cbb2cc661e61e3e47
|
Shell
|
sgothel/jogamp-scripting
|
/maven/projects/jocl-main/pom.sh
|
UTF-8
| 1,665
| 3.296875
| 3
|
[] |
no_license
|
#!/bin/sh
if [ $# -lt 2 ]
then
echo "usage: version projects+" 1>&2
exit 1
fi
VERSION="$1"
shift
PLATFORMS=$@
cat <<EOF
<?xml version="1.0" encoding="UTF-8"?>
<project
xmlns="http://maven.apache.org/POM/4.0.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<!-- -->
<!-- Auto generated by jocl-main.pom.sh, do not edit directly! -->
<!-- -->
<modelVersion>4.0.0</modelVersion>
<groupId>org.jogamp.jocl</groupId>
<artifactId>jocl-main</artifactId>
<version>${VERSION}</version>
<packaging>jar</packaging>
<name>JOCL</name>
<description>Java™ Binding for the OpenCL® API</description>
<url>http://jogamp.org/jocl/www/</url>
<!-- -->
<!-- Explicitly depend on jocl, and all of its native binary jars. -->
<!-- -->
<dependencies>
<dependency>
<groupId>\${project.groupId}</groupId>
<artifactId>jocl</artifactId>
<version>\${project.version}</version>
</dependency>
EOF
for PLATFORM in ${PLATFORMS}
do
cat <<EOF
<dependency>
<groupId>\${project.groupId}</groupId>
<artifactId>jocl</artifactId>
<version>\${project.version}</version>
<classifier>natives-${PLATFORM}</classifier>
</dependency>
EOF
done
cat <<EOF
</dependencies>
EOF
cat ../jocl/pom.in || exit 1
cat <<EOF
</project>
EOF
| true
|
a2ed6c11e5aa056b8209a8fdb89a874eec6c9e2b
|
Shell
|
icsy7867/PocketVPN
|
/website/install.sh
|
UTF-8
| 2,022
| 3.515625
| 4
|
[] |
no_license
|
#!/bin/bash
apt-get install nano openvpn apache2 php libapache2-mod-php sudo -y
#me="$(whoami)"
#usermod -aG sudo $me
echo "Setting up .htaccess files..."
mkdir /httpaccess
touch /httpaccess/.htpasswd
chown www-data:root /httpaccess/.htpasswd
chmod 770 /httpaccess/.htpasswd
#echo "If you want to include htaccess, first go to the website -> Settings and change the webpassword, and then add this to your apache virtual directory:
# <Directory "/var/www/html">
# AuthType Basic
# AuthName "Restricted Content"
# AuthUserFile /httpaccess/.htpasswd
# Require valid-user
# </Directory>"
echo "Setting permissions..."
chown -R www-data:www-data *
chown -R root:root openvpn/*
chown -R www-data:root openvpn/config/*
chown www-data:root openvpn/auth
chown www-data:root openvpn/config
chown www-data:root openvpn/openvpn_connect.log
echo "Setting www-data passwordless sudo privileges to vpn scripts"
echo "www-data ALL = (root) NOPASSWD: /var/www/html/openvpn/connect.sh" >> /etc/sudoers
echo "www-data ALL = (root) NOPASSWD: /var/www/html/openvpn/disconnect.sh" >> /etc/sudoers
echo "www-data ALL = (root) NOPASSWD: /var/www/html/scripts/webpass.sh" >> /etc/sudoers
echo "www-data ALL = (root) NOPASSWD: /usr/bin/htpasswd" >> /etc/sudoers
echo "www-data ALL = (root) NOPASSWD: /bin/ping" >> /etc/sudoers
echo "www-data ALL = (root) NOPASSWD: /var/www/html/scripts/power.sh" >> /etc/sudoers
echo "copying website files over..."
rm /var/www/html/index.html
cp -rp * /var/www/html
# Set default password to 1234567890
echo "Setting web interface password to admin/1234567890"
/usr/bin/htpasswd -b /httpaccess/.htpasswd admin 1234567890
# cp apache conf file to apache sites-enabled directory
echo "Moving apache2 conf file over... and restarting apache"
cp installation_files/000-default.conf /etc/apache2/sites-available/000-default.conf
#File clean up
echo "Cleaning up files..."
rm /var/www/html/install.sh
rm -rf /var/www/html/installation_files
echo "restarting apache2"
service apache2 restart
| true
|
b23ba2ca701a624dc5cd273c65e6ba4aeeb6f3e4
|
Shell
|
garyo/yalc-test
|
/RUNME.sh
|
UTF-8
| 2,231
| 3.71875
| 4
|
[] |
no_license
|
#! /bin/bash
# Run like this:
# git clean -fdx && bash RUNME.sh
# OR:
# git clean -fdx && RUN_YARN=1 USE_PURE=1 bash RUNME.sh
# Adding "RUN_YARN=1" makes it run a "yarn install" after doing the
# yalc setup. Adding USE_PURE=1 or USE_LINK=1 or USE_ADDLINK=1 makes
# yalc use "add --pure", "link", or "add --link" respectively.
# If you have "tree" installed it'll use that for prettier output
############ NOTES:
# no args: works OK
# USE_ADDLINK=1: works OK
# USE_LINK=1: works OK
# USE_PURE=1: fails, because --pure makes yalc do nothing
# RUN_YARN=1: works OK
# RUN_YARN=1 USE_ADDLINK=1: works OK
# RUN_YARN=1 USE_LINK=1: fails because prod node_modules is empty
# RUN_YARN=1 USE_PURE=1: fails because prod node_modules doesn't exist
# "add --pure" and "link" don't change package.json, the others do.
# Only the ones that change package.json work.
set -e
set -x
command -v yalc || echo "Need yalc installed to run this test"
TREE=$(command -v xtree >/dev/null 2>&1 && echo "tree -a" || echo "ls -R -A")
# Clean before starting:
yalc installations clean @garyo-test/common
git restore app/package.json # remove anything yalc added
# Publish common module
cd common
yarn build # does a "yalc publish"
# Update in app
cd -
cd app
# This copies @garyo-test/common into .yalc and symlinks that into node_modules/@garyo-test
# Only need to do this once
if [ -n "$USE_LINK" ]; then
yalc link @garyo-test/common
elif [ -n "$USE_PURE" ]; then
yalc add --pure @garyo-test/common
elif [ -n "$USE_ADDLINK" ]; then
# Also adds a link: dependency to .yalc/@garyo-test/common
yalc add --link @garyo-test/common #
else
yalc add @garyo-test/common
fi
git diff package.json
$TREE . || /bin/true
# If "yalc link" was used above, this creates top-level
# node_modules/@garyo-test/common symlink and _removes_
# @garyo-test/common from app/node_modules!
if [ -n "$RUN_YARN" ]; then
cd ..
yarn
$TREE . -I .git
cd -
fi
yarn build
ts-node index.ts
cd -
# Simulate production build by copying "app" dir to /tmp and building
# without the rest of the monorepo:
PROD="/tmp/PROD-app-$$"
cp -R app "$PROD"
cd "$PROD"
$TREE .
yarn build
node dist/index.js
# clean up
rm -rf "$PROD"
| true
|
548a5fafbeb9dc97c6323c723c15332d0916584a
|
Shell
|
starlingx/root
|
/build-tools/branching/push_tags.sh
|
UTF-8
| 9,779
| 3.8125
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#
# Copyright (c) 2020-2022 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
#
# A tool to push the tags, and optional manifest created by
# create_tags.sh to the upstream source.
#
# Arguemens should match those passed to create_tags.sh
# with the exception of '--lockdown'.
#
PUSH_TAGS_SH_DIR="$(dirname "$(readlink -f "${BASH_SOURCE[0]}" )" )"
source "${PUSH_TAGS_SH_DIR}/../git-repo-utils.sh"
usage () {
echo "push_tags.sh --tag=<tag> [ --remotes=<remotes> ] [ --projects=<projects> ]"
echo " [ --exclude-projects=<projects> ]"
echo " [ --manifest [ --manifest-file=<manifest.xml> ] [--manifest-prefix <prefix>]]"
echo " [ --bypass-gerrit ] [--safe-gerrit-host=<host>]"
echo " [ --access-token=<remote>:<token> ] [ --dry-run ]"
echo " "
echo "Push a pre-existing git tag into all listed projects, and all projects"
echo "hosted by all listed remotes, minus excluded projects."
echo "Lists are comma separated."
echo ""
echo "--access-token can be used to supply an access token for direct (non-gerrit) push attempts"
echo " to specific remotes. e.g. github now requires this"
echo ""
echo "A manifest push can also be requested."
echo ""
echo "--manifest-file may be used to override the manifest file to be updated."
echo ""
echo "--safe-gerrit-host allows one to specify host names of gerrit servers"
echo "that are safe to push reviews to."
}
TEMP=$(getopt -o h,n --long remotes:,projects:,exclude-projects:,tag:,manifest,manifest-file:,manifest-prefix:,bypass-gerrit,safe-gerrit-host:,access-token:,help,dry-run -n 'push_tags.sh' -- "$@")
if [ $? -ne 0 ]; then
echo_stderr "ERROR: getopt failure"
usage
exit 1
fi
eval set -- "$TEMP"
HELP=0
DRY_RUN=
MANIFEST=0
BYPASS_GERRIT=0
remotes=""
projects=""
excluded_projects=""
tag=""
manifest=""
manifest_prefix=""
new_manifest=""
repo_root_dir=""
declare -A access_token
safe_gerrit_hosts=()
while true ; do
case "$1" in
-h|--help) HELP=1 ; shift ;;
-n|--dry-run) DRY_RUN="--dry-run" ; shift ;;
--bypass-gerrit) BYPASS_GERRIT=1 ; shift ;;
--remotes) remotes+=$(echo "$2 " | tr ',' ' '); shift 2;;
--projects) projects+=$(echo "$2 " | tr ',' ' '); shift 2;;
--exclude-projects) excluded_projects+=$(echo "$2 " | tr ',' ' '); shift 2;;
--tag) tag=$2; shift 2;;
--manifest) MANIFEST=1 ; shift ;;
--manifest-file) repo_set_manifest_file "$2"; shift 2;;
--manifest-prefix) manifest_prefix=$2; shift 2;;
--safe-gerrit-host) safe_gerrit_hosts+=("$2") ; shift 2 ;;
--access-token) val=$2
at_remote=$(echo "$val" | cut -d ':' -f 1)
at_token=$(echo "$val" | cut -d ':' -f 2)
if [ -z "$at_token" ]; then
usage
exit 1
fi
access_token["$at_remote"]="$at_token"
shift 2 ;;
--) shift ; break ;;
*) usage; exit 1 ;;
esac
done
git_set_safe_gerrit_hosts "${safe_gerrit_hosts[@]}"
if [ $HELP -eq 1 ]; then
usage
exit 0
fi
if [ "$tag" == "" ] ; then
echo_stderr "ERROR: You must specify a tags"
usage
exit 1
fi
repo_root_dir=$(repo_root)
if [ $? -ne 0 ]; then
echo_stderr "Current directory is not managed by repo."
exit 1
fi
if [ $MANIFEST -eq 1 ]; then
manifest=$(repo_manifest $repo_root_dir)
if [ $? -ne 0 ]; then
echo_stderr "failed to find current manifest."
exit 1
fi
if [ ! -f $manifest ]; then
echo_stderr "manifest file missing '$manifest'."
exit 1
fi
new_manifest="$(dirname $manifest)/${manifest_prefix}${tag}-$(basename $manifest)"
if [ ! -f $new_manifest ]; then
echo_stderr "Expected a tagged manifest file already present '$new_manifest'."
exit 1
fi
fi
for project in $projects $excluded_projects; do
if ! repo_is_project $project; then
echo_stderr "Invalid project: $project"
echo_stderr "Valid projects are: $(repo_project_list | tr '\n' ' ')"
exit 1
fi
done
for remote in $remotes; do
if ! repo_is_remote $remote; then
echo_stderr "Invalid remote: $remote"
echo_stderr "Valid remotes are: $(repo_remote_list | tr '\n' ' ')"
exit 1
fi
done
# Add projects from listed remotes
if [ "$remotes" != "" ]; then
projects+="$(repo_project_list $remotes | tr '\n' ' ')"
fi
# If no projects or remotes specified, process ALL projects
if [ "$projects" == "" ] && [ "$remotes" == "" ]; then
projects="$(repo_project_list)"
fi
if [ "$projects" != "" ] && [ "$exclude_projects" != "" ]; then
for project in $exclude_projects; do
projects=$(echo $projects | sed -e "s# $project # #" -e "s#^$project ##" -e "s# $project\$##" -e "s#^$project\$##")
done
fi
if [ "$projects" == "" ]; then
echo_stderr "No projects found"
exit 1
fi
echo "List of projects to be pushed"
echo "============================="
for project in $projects; do
echo $project
done
echo "============================="
echo
echo "Finding subgits"
SUBGITS=$(repo forall $projects -c 'echo '"$repo_root_dir"'/$REPO_PATH')
# Go through all subgits and create the tag if it does not already exist
(
for subgit in $SUBGITS; do
(
cd $subgit
tag_check=$(git tag -l $tag)
if [ "${tag_check}" == "" ]; then
echo_stderr "ERROR: Expected tag '$tag' to exist in ${subgit}"
exit 1
fi
review_method=$(git_repo_review_method)
if [ "${review_method}" == "" ]; then
echo_stderr "ERROR: Failed to determine review method in ${subgit}"
exit 1
fi
remote=$(git_repo_review_remote)
if [ "${remote}" == "" ]; then
echo_stderr "ERROR: Failed to determine remote in ${subgit}"
exit 1
fi
if [ "${review_method}" == "gerrit" ]; then
review_remote=$(git_repo_review_remote)
else
review_remote=${remote}
fi
if [ "${review_remote}" == "" ]; then
echo_stderr "ERROR: Failed to determine review_remote in ${subgit}"
exit 1
fi
echo "Pushing tag $tag in ${subgit}"
if [ "${review_method}" == "gerrit" ] && [ $BYPASS_GERRIT -eq 0 ]; then
echo "git push ${review_remote} ${tag}"
with_retries -d 45 -t 15 -k 5 5 git push ${review_remote} ${tag} ${DRY_RUN}
else
if [ "${access_token[${remote}]}" != "" ]; then
echo "Trying remote '${remote}' with access token"
git_set_push_url_with_access_token "${remote}" "${access_token[${remote}]}"
if [ $? != 0 ]; then
echo_stderr "ERROR: Failed to set url with access token for remote '${remote}' in ${subgit}"
exit 1
fi
fi
echo "git push ${remote} ${tag}"
with_retries -d 45 -t 15 -k 5 2 git push ${remote} ${tag} ${DRY_RUN}
fi
if [ $? != 0 ] ; then
echo_stderr "ERROR: Failed to push tag '${tag}' to remote '${remote}' in ${subgit}"
exit 1
fi
)
done
) || exit 1
if [ $MANIFEST -eq 1 ]; then
(
new_manifest_name=$(basename "${new_manifest}")
new_manifest_dir=$(dirname "${new_manifest}")
cd "${new_manifest_dir}" || exit 1
local_branch=$(git_local_branch)
if [ "${local_branch}" == "" ]; then
echo_stderr "ERROR: failed to determine local branch in ${new_manifest_dir}"
exit 1
fi
remote_branch=$(git_remote_branch)
if [ "${remote_branch}" == "" ]; then
echo_stderr "ERROR: failed to determine remote branch in ${new_manifest_dir}"
exit 1
fi
if [ ! -f ${new_manifest_name} ]; then
echo_stderr "ERROR: Expected file '${new_manifest_name}' to exist in ${new_manifest_dir}"
exit 1
fi
tag_check=$(git tag -l $tag)
if [ "${tag_check}" == "" ]; then
echo_stderr "ERROR: Expected tag '$tag' to exist in ${new_manifest_dir}"
exit 1
fi
review_method=$(git_review_method)
if [ "${review_method}" == "" ]; then
echo_stderr "ERROR: Failed to determine review method in ${new_manifest_dir}"
exit 1
fi
remote=$(git_remote)
if [ "${remote}" == "" ]; then
echo_stderr "ERROR: Failed to determine remote in ${new_manifest_dir}"
exit 1
fi
review_remote=$(git_review_remote)
if [ "${review_remote}" == "" ]; then
echo_stderr "ERROR: Failed to determine review remote in ${new_manifest_dir}"
exit 1
fi
echo "Pushing tag $tag in ${new_manifest_dir}"
if [ "${review_method}" == "gerrit" ] && [ $BYPASS_GERRIT -eq 0 ]; then
with_retries -d 45 -t 15 -k 5 5 git review
if [ $? != 0 ] ; then
echo_stderr "ERROR: Failed to create git review from ${new_manifest_dir}"
exit 1
fi
echo "When review is merged: please run ..."
echo " cd ${new_manifest_dir}"
echo " git push ${review_remote} ${tag}"
else
echo "git push ${remote} ${local_branch}:${remote_branch}" && \
with_retries -d 45 -t 15 -k 5 5 git push ${remote} ${local_branch}:${remote_branch} ${DRY_RUN} && \
echo "git push ${remote} ${tag}:${tag}" && \
with_retries -d 45 -t 15 -k 5 5 git push ${remote} ${tag}:${tag} ${DRY_RUN}
fi
if [ $? != 0 ] ; then
echo_stderr "ERROR: Failed to push tag '${tag}' to branch ${remote_branch} on remote '${remote}' from ${new_manifest_dir}"
exit 1
fi
) || exit 1
fi
| true
|
cc317bed791b94081bc04487a257ccf93124dc81
|
Shell
|
sid0sid/docker-idol
|
/docker-entrypoint.sh
|
UTF-8
| 2,781
| 3.203125
| 3
|
[] |
no_license
|
#!/bin/bash
set -e
function smc_service_start() {
echo "Starting Connectors..."
nohup smc_service -a=start > /dev/null 2> /dev/null &
}
function smc_service_stop() {
echo "Stopping Connectors..."
sudo smc_service -a=stop
}
# IDOL should be shut down properly
function shut_down() {
echo "Shutting Down ..."
smc_service_stop
stop-agentstore
stop-cfs
stop-community
stop-category
stop-content
stop-view
stop-licenseserver
stop-find
stop-eductionserver
echo '======================================================================================================================================='
echo 'Thanks for using this container. Any comments/questions at hi@yuntaz.com'
echo '======================================================================================================================================='
kill -s SIGTERM $!
exit 0
}
trap "shut_down" SIGKILL SIGTERM SIGHUP SIGINT EXIT
echo 'Starting up'
echo '======================================================================================================================================='
echo 'HPE IDOL is a search engine with machine learning built to handle all kind of information, structured (office docs, databases and more)'
echo 'and unstructured (social media, video, audio and more). To run it, you will need a valid HPE IDOL license which is not provided here.'
echo 'See below how to contact us if you want to see IDOL working. If you are a customer from HPE IDOL, you can use your current IDOL license'
echo 'to test the new version or just to use this software as your license says to do it.'
echo '======================================================================================================================================='
start-licenseserver
sleep 5
start-agentstore
sleep 5
start-cfs
sleep 5
start-community
sleep 5
start-category
sleep 5
start-content
sleep 5
start-view
sleep 5
start-find
sleep 5
start-eductionserver
sleep 10
#echo 'Adding user: idol password: idol ...'
curl --silent --output /dev/null -d "action=UserAdd&UserName=idol&Password=idol" http://localhost:9030
curl --silent --output /dev/null -d "action=RoleAddUserToRole&RoleName=FindUser&UserName=idol" http://localhost:9030
curl --silent --output /dev/null -d "action=RoleAddUserToRole&RoleName=FindBI&UserName=idol" http://localhost:9030
curl --silent --output /dev/null -d "action=RoleAddUserToRole&RoleName=FindAdmin&UserName=idol" http://localhost:9030
#echo 'Uploading information to IDOL'
curl --silent --output /dev/null http://localhost:9101/DRECREATEDBASE?DREDbName=Articles
curl --silent --output /dev/null http://localhost:9101/DREADD?/opt/test.idx
su - idol -c '/bin/bash'
while true; do
sleep 1
done
| true
|
1cd910e848e245cab136aac8ebd2467f06bdd6be
|
Shell
|
irontec/rtpproxy
|
/tests/rtp_analyze1
|
UTF-8
| 492
| 3.171875
| 3
|
[
"BSD-2-Clause"
] |
permissive
|
#!/bin/sh
# checksum tests to verify that rtp_analyze routines work correctly
. $(dirname $0)/functions
SNAMES="sess6 c1 weird dups lost srtp1"
for sname in ${SNAMES}
do
wfile="rtp_analyze_${sname}.wav"
${EXTRACTAUDIO} -s rtp_analyze/${sname} "${wfile}" > rtp_analyze_${sname}.tout 2>rtp_analyze_${sname}.tlog
${DIFF} rtp_analyze/${sname}.output rtp_analyze_${sname}.tout
report "checking stats for the ${sname}"
sha256_verify "${wfile}" rtp_analyze/rtp_analyze1.checksums
done
| true
|
03fd6a5e3f174043e0dfd565012917c2db426722
|
Shell
|
Slifer64/DMP_KF
|
/c++/install/install_armadillo.sh
|
UTF-8
| 1,809
| 3.796875
| 4
|
[] |
no_license
|
#!/bin/bash
# ==================================
# define some colors for output
# ==================================
COLOR_RED="\033[1;31m"
COLOR_GREEN="\033[1;32m"
COLOR_YELLOW="\033[1;33m"
COLOR_BLUE="\033[1;34m"
COLOR_CYAN="\033[1;36m"
COLOR_WHITE="\033[1;37m"
COLOR_RESET="\033[0m"
cd $INSTALL_SCRIPTS_DIR/deps
echo -e $COLOR_CYAN"*******************************"$COLOR_RESET
echo -e $COLOR_CYAN"******** Armadillo ********"$COLOR_RESET
echo -e $COLOR_CYAN"*******************************"$COLOR_RESET
echo -e $COLOR_BLUE"Searching for Armadillo"$COLOR_RESET
FOUND_ARMADILLO=`find /usr/lib/x86_64-linux-gnu/ -name "libarmadillo.so" -print`
if [ -n "$FOUND_ARMADILLO" ]; then
echo -e $COLOR_GREEN"Found Armadillo!"$COLOR_RESET
UR_ERROR=0
return
else
echo -e $COLOR_YELLOW"Didn't find Armadillo!"$COLOR_RESET
fi
echo -e $COLOR_BLUE"Installing Armadillo Dependencies: cmake, OpenBLAS and LAPACK, wget, xz-utils..."$COLOR_RESET
sudo apt-get update > /dev/null && \
sudo apt-get install -y cmake libopenblas-dev liblapack-dev wget xz-utils > /dev/null && \
echo -e $COLOR_BLUE"Downloading and building the Armadillo (v8.300)"$COLOR_RESET
if [ -d armadillo-8.300.1 ]; then
rm -rf armadillo-8.300.
fi
wget --no-check-certificate http://sourceforge.net/projects/arma/files/armadillo-8.300.1.tar.xz > /dev/null && \
tar xvf armadillo-8.300.1.tar.xz > /dev/null && \
rm -rf armadillo-8.300.1.tar.xz && \
cd armadillo-8.300.1 && \
echo -e $COLOR_BLUE"Building Armadillo"$COLOR_RESET && \
cmake . && \
make && \
echo -e $COLOR_BLUE"Installing Armadillo"$COLOR_RESET && \
sudo make install > /dev/null
if [ $? -eq 0 ]; then
echo -e $COLOR_GREEN"Armadillo Successfully installed!"$COLOR_RESET
UR_ERROR=0
else
echo -e $COLOR_RED"Armadillo installation failed!"$COLOR_RESET
UR_ERROR=1
fi
| true
|
2c0432a55f7d5fc98b6c8797d58f2d88dab7a6ef
|
Shell
|
kyet/ble_dimmer
|
/rpi/ble_dimmer.sh
|
UTF-8
| 1,044
| 2.71875
| 3
|
[
"Unlicense"
] |
permissive
|
#!/bin/bash
DEV_ADDR="00:15:83:00:73:9C"
PORT=$1
ACTION=$2
if [ "$PORT" == "0" ]; then # all
if [ "$ACTION" == "1" ]; then
# [y = 1/128x^2] for 5 sec
cmd="07020f3201018001010402018001010100"
else
# [y = -x + 127] for 5 sec
cmd="05020f32010601ff017f020501ff017f00"
fi
elif [ "$PORT" == "1" ]; then
if [ "$ACTION" == "1" ]; then
# [y = x] for 5 sec
#cmd="0502093201040101010100"
# [y = 1/128x^2] for 5 sec
cmd="0702093201018001010100"
else
# [y = -x + 127] for 5 sec
cmd="05020932010501ff017F00"
fi
elif [ "$PORT" == "2" ]; then
if [ "$ACTION" == "1" ]; then
# [y = x] for 5 sec
#cmd="0502093202040101010100"
# [y = 1/128x^2] for 5 sec
cmd="0702093202018001010100"
else
# [y = -x + 127] for 5 sec
cmd="05020932020501ff017F00"
fi
else
exit
fi
expect << EOF
spawn gatttool -b $DEV_ADDR -I
send "connect\n"
expect "Connection successful"
expect ">"
send "char-write-cmd 0x25 $cmd\n"
expect ">"
send "char-write-cmd 0x25 $cmd\n"
expect ">"
send "char-write-cmd 0x25 $cmd\n"
expect ">"
exit
EOF
| true
|
0f86d383330882e58e971f3171d0318a4502a40a
|
Shell
|
siers/dotfiles
|
/conf/zsh/.config/zsh/env
|
UTF-8
| 2,664
| 3.015625
| 3
|
[] |
no_license
|
#!/usr/bin/env zsh
export MANPAGER='nvim +Man!'
# export PAGER='nvim -R -'
export EDITOR=vim
# export TERM=alacritty
# export TERM=rxvt-unicode-256color # wtf? :D
if tty | grep -qE '^/dev/tty[0-9]+$'; then
TMOUT=$(( 60 * 20 ))
echo "setting \$TMOUT=$TMOUT because of a tty login"
fi
if [ -z "$ZSH_PATH_INIT" ] || command -v ruby &> /dev/null
then
export ZSH_PATH_INIT=1
[ -e ~/code/bin ] &&
export PATH="$(find -L ~/code/bin -type d -not -path '*/\.*' | tr '\n' :)$PATH"
[ -e ~/.local/bin ] &&
export PATH="$HOME/.local/bin:$PATH"
[ -e ~/.tfenv/bin ] &&
export PATH="$(echo ~/.tfenv/bin):$PATH"
[ -e "$HOME/.nix-profile/etc/profile.d/nix.sh" ] &&
. "$HOME/.nix-profile/etc/profile.d/nix.sh"
[ -n "$ZSH_VERSION" ] && # sourced in the bash-run ~/.xprofile
command -v direnv &> /dev/null && eval "$(direnv hook zsh)"
#
[ -n "$ZSH_DARWIN" ] && eval $(docker-machine env default 2> /dev/null)
if command -v rbenv > /dev/null && [ -d ~/.rbenv ]; then
# git clone https://github.com/rbenv/rbenv.git ~/.rbenv; git clone https://github.com/rbenv/ruby-build.git ~/.rbenv/plugins/ruby-build
eval "$(rbenv init -)"
fi
if [ -e ~/code/go/pkgs/bin ]; then
export PATH="$PATH:$HOME/code/go/pkgs/bin"
export GOPATH="$HOME/code/go/pkgs"
fi
if [ -e ~/down/perl5 ]; then
# generated by cpan
export PATH="$HOME/down/perl5/bin${PATH:+:${PATH}}"
export PERL5LIB="$HOME/down/perl5/lib/perl5${PERL5LIB:+:${PERL5LIB}}"
export PERL_LOCAL_LIB_ROOT="$HOME/down/perl5${PERL_LOCAL_LIB_ROOT:+:${PERL_LOCAL_LIB_ROOT}}"
export PERL_MB_OPT="--install_base \"/$HOME/down/perl5\""
export PERL_MM_OPT="INSTALL_BASE=$HOME/down/perl5"
fi
if [ -e ~/work/vendor/android ]; then
export ANDROID_HOME=$HOME/work/vendor/android
export ANDROID_SDK_HOME=$ANDROID_HOME
export ANDROID_SDK_ROOT=$ANDROID_HOME
export PATH=$PATH:$ANDROID_HOME/emulator
export PATH=$PATH:$ANDROID_HOME/tools
export PATH=$PATH:$ANDROID_HOME/tools/bin
export PATH=$PATH:$ANDROID_HOME/platform-tools
fi
if command -v avdmanager > /dev/null && [ -n "$ZSH_DARWIN" ]; then
export ANDROID_SDK_ROOT="/usr/local/share/android-sdk"
export PATH=$PATH:$ANDROID_SDK_ROOT/emulator
export PATH=$PATH:$ANDROID_SDK_ROOT/tools
export PATH=$PATH:$ANDROID_SDK_ROOT/tools/bin
export PATH=$PATH:$ANDROID_SDK_ROOT/platform-tools
fi
fi
command -v ruby &> /dev/null && \
PATH="$(ruby -e 'puts $stdin.read.split(":").uniq.join(":")' <<< $PATH)"
| true
|
d01190d2b6776a59baf8e6e7cccc8f54e9ee39bb
|
Shell
|
Azure/batch-shipyard
|
/recipes/OpenFOAM-TCP-OpenMPI/docker/set_up_sample.sh
|
UTF-8
| 1,590
| 3.1875
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
set -e
set -o pipefail
# get mpi ref and set up openfoam env
OPENFOAM_DIR=/opt/OpenFOAM/OpenFOAM-4.0
source /etc/profile.d/modules.sh
module add mpi/openmpi-x86_64
source $OPENFOAM_DIR/etc/bashrc
# copy sample into auto scratch shared area
AUTO_SCRATCH_DIR=$AZ_BATCH_TASK_DIR/auto_scratch
cd $AUTO_SCRATCH_DIR
cp -r $OPENFOAM_DIR/tutorials/incompressible/simpleFoam/pitzDaily .
cp $OPENFOAM_DIR/tutorials/incompressible/simpleFoam/pitzDailyExptInlet/system/decomposeParDict pitzDaily/system/
# get nodes and compute number of processors
IFS=',' read -ra HOSTS <<< "$AZ_BATCH_HOST_LIST"
nodes=${#HOSTS[@]}
ppn=`nproc`
np=$(($nodes * $ppn))
# substitute proper number of subdomains
sed -i -e "s/^numberOfSubdomains 4/numberOfSubdomains $np;/" pitzDaily/system/decomposeParDict
root=`python -c "import math; x=int(math.sqrt($np)); print x if x*x==$np else -1"`
if [ $root -eq -1 ]; then
sed -i -e "s/\s*n\s*(2 2 1)/ n ($ppn $nodes 1)/g" pitzDaily/system/decomposeParDict
else
sed -i -e "s/\s*n\s*(2 2 1)/ n ($root $root 1)/g" pitzDaily/system/decomposeParDict
fi
# decompose
cd pitzDaily
blockMesh
decomposePar -force
# create hostfile
hostfile="hostfile"
touch $hostfile
>| $hostfile
for node in "${HOSTS[@]}"
do
echo $node slots=$ppn max-slots=$ppn >> $hostfile
done
# export parameters
export mpirun=`which mpirun`
export mpienvopts=`echo \`env | grep WM_ | sed -e "s/=.*$//"\` | sed -e "s/ / -x /g"`
export mpienvopts2=`echo \`env | grep FOAM_ | sed -e "s/=.*$//"\` | sed -e "s/ / -x /g"`
export np
export hostfile
| true
|
efce3344568fa9fe0fe3a8ad2ea0b8bbc2d2d168
|
Shell
|
mvrozanti/mint.files
|
/usr-local-bin/switch_to_urxvt
|
UTF-8
| 203
| 2.53125
| 3
|
[] |
no_license
|
#!/bin/bash
if [[ $(wmctrl -l|grep tmux|grep -c -v Waterfox) -gt 0 ]]; then
wmctrl -i -a "$(wmctrl -l|grep tmux|grep -v Waterfox|cut -d' ' -f1)"
else
urxvt -geometry 100x50+1000+100 -e 'tmux'
fi
| true
|
5bf09870634dae19b60f7a03a04c41f46fe7d74a
|
Shell
|
mirek190/x86-android-5.0
|
/ndk/tests/abcc/build-abcc.sh
|
UTF-8
| 5,858
| 3.65625
| 4
|
[] |
no_license
|
#!/bin/sh
#
# Copyright (C) 2013 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Build a abcc package. This exploits build-on-device-toolchain.sh and
# needs SDK help.
#
PROGDIR=`cd $(dirname $0) && pwd`
NDK_BUILDTOOLS_PATH=$PROGDIR/../../build/tools
. $NDK_BUILDTOOLS_PATH/prebuilt-common.sh
SDK_DIR=
register_var_option "--sdk-dir=<path>" SDK_DIR "SDK installation directory (Required)"
SDK_TARGET=
register_var_option "--sdk-target=<str>" SDK_TARGET "SDK target for building APK (Use 'android list target' to check)"
NDK_DIR=$ANDROID_NDK_ROOT
register_var_option "--ndk-dir=<path>" NDK_DIR "NDK installation directory"
ABCC_DIR=$PROGDIR
register_var_option "--abcc-dir=<path>" ABCC_DIR "Compiler app directory"
BUILD_DIR=/tmp/ndk-$USER/build
register_var_option "--build-dir=<path>" BUILD_DIR "Specify temporary build dir"
OUT_DIR=/tmp/ndk-$USER/out
register_var_option "--out-dir=<path>" OUT_DIR "Specify output directory directly"
ABIS=
register_var_option "--abis=<target>" ABIS "List which targets you use (comma for split)"
DEFAULT_TMP_SRC_DIR=/tmp/ndk-$USER/ndk-toolchain-source-`date +%s`
SRC_DIR=$DEFAULT_TMP_SRC_DIR
register_var_option "--src-dir=<path>" SRC_DIR "Specify an existing toolchain source"
ONLY_ASSETS=
do_only_assets_option () { ONLY_ASSETS=yes; }
register_option "--only-assets" do_only_assets_option "Build toolchain only under prebuilts/assets/ instead of whole app"
DEBUG=
do_debug_option () { DEBUG=yes; }
register_option "--no-share-system-uid" do_debug_option "Just for testing. Be careful of device directory permission issue!"
TESTING=
do_testing_option () { TESTING=yes; }
register_option "--testing" do_testing_option "Package all prebuilts into abcc for testing"
NO_REBUILD_ASSETS=
do_no_rebuild_assets_option () { NO_REBUILD_ASSETS=yes; }
register_option "--no-rebuild-assets" do_no_rebuild_assets_option "Use existing toolchain prebuilt assets instead of rebuilding them"
register_jobs_option
PROGRAM_PARAMETERS=""
PROGRAM_DESCRIPTION=\
"This script can be used to build abcc, which contains all toolchain
we need for on-device compilation. This script also needs SDK with binaries,
like ant, aapt, android, ...etc, since they are necessary to produce APK."
extract_parameters "$@"
ABIS=$(commas_to_spaces $ABIS)
test -z "$ABIS" && ABIS="$PREBUILT_ABIS"
BUILDTOOLS=$NDK_DIR/build/tools
ABCC_PREBUILT_ASSETS=$ABCC_DIR/prebuilts/assets
ABCC=`basename $ABCC_DIR`
FLAGS=
test "$VERBOSE" = "yes" && FLAGS=$FLAGS" --verbose"
test "$VERBOSE2" = "yes" && FLAGS=$FLAGS" --verbose"
FLAGS="$FLAGS -j$NUM_JOBS"
test "$TESTING" = "yes" && FLAGS=$FLAGS" --testing"
#
# First: Build toolchain assets
#
if [ "$NO_REBUILD_ASSETS" = "yes" ]; then
test -z "`ls $ABCC_PREBUILT_ASSETS 2> /dev/null`" && dump "[WARNING] No toolchain assets found!"
else
test "$SRC_DIR" != "$DEFAULT_TMP_SRC_DIR" && check_toolchain_src_dir "$SRC_DIR"
test "$SRC_DIR" = "$DEFAULT_TMP_SRC_DIR" && run $BUILDTOOLS/download-toolchain-sources.sh $SRC_DIR
run rm -rf $ABCC_PREBUILT_ASSETS/*
for ABI in $ABIS; do
run $BUILDTOOLS/build-on-device-toolchain.sh --ndk-dir=$NDK_DIR --build-dir=$BUILD_DIR --out-dir=$ABCC_PREBUILT_ASSETS/$ABI --abi=$ABI --no-sync $FLAGS $SRC_DIR
fail_panic "Could not build device toolchain."
done
fi
test "$ONLY_ASSETS" = "yes" && exit
#
# Second: Check SDK
#
test -z "$SDK_DIR" && dump "--sdk-dir is required." && exit 1
test ! -f "$SDK_DIR/tools/android" && dump "--sdk-dir is not a valid SDK." && exit 1
test `$SDK_DIR/tools/android list target | grep '^id' | wc -l` -eq 0 && "Please download at least one target first." && exit 1
# Ask users for SDK configuration
if [ `$SDK_DIR/tools/android list target | grep '^id' | wc -l` -ne 1 ] && [ -z "$SDK_TARGET" ]; then
DEFAULT_TARGET="`$SDK_DIR/tools/android list target | grep '^id' | head -n 1 | awk '{print $4}'`"
echo "* Which target do you want? [$DEFAULT_TARGET]"
for line in "`$SDK_DIR/tools/android list target | grep '^id'`"; do
echo "-- `echo $line | awk '{print $4}'`"
done
echo ""
read SDK_TARGET
test -z "$SDK_TARGET" && SDK_TARGET=$DEFAULT_TARGET
elif [ -z "$SDK_TARGET" ]; then
SDK_TARGET=`$SDK_DIR/tools/android list target | grep '^id' | awk '{print $4}'`
fi
dump "SDK target: $SDK_TARGET"
#
# Third: Build apk
#
run rm -rf $BUILD_DIR
run mkdir -p $BUILD_DIR $OUT_DIR
run cd $BUILD_DIR
run cp -a $ABCC_DIR $ABCC
run cd $BUILD_DIR/$ABCC
run $SDK_DIR/tools/android update project -p . -t "$SDK_TARGET"
if [ $? -ne 0 ]; then
dump "Cannot create build.xml. Abort."
exit 1
fi
for ABI in $ABIS; do
run rm -rf obj libs
run $NDK_DIR/ndk-build -B APP_ABI=$ABI -C jni
fail_panic "Build ndk-build failed. Abort."
if [ "$DEBUG" = "yes" ]; then
run rm -f AndroidManifest.xml
run cp -a AndroidManifest.xml.debug AndroidManifest.xml
run ant debug -Dasset.dir=prebuilts/assets/$ABI
fail_panic "Build dex failed. Abort."
run cp -a bin/$ABCC-debug.apk $OUT_DIR/$ABCC-$ABI.apk
else # DEBUG != yes
run ant release -Dasset.dir=prebuilts/assets/$ABI
fail_panic "Build dex failed. Abort."
run cp -a bin/$ABCC-release-unsigned.apk $OUT_DIR/$ABCC-$ABI-unsigned.apk
fi
done
run cd $OUT_DIR
run rm -rf $BUILD_DIR
dump "Done. Compiler app is under $OUT_DIR"
test "$DEBUG" != "yes" && dump "[WARNING] APK has not been signed nor aligned!"
exit 0
| true
|
389de7b504053a95c7a89881a068f3094285522a
|
Shell
|
jhunkeler/spm_packages
|
/python/build.sh
|
UTF-8
| 2,529
| 3.375
| 3
|
[] |
no_license
|
#!/bin/bash -x
name=python
version=3.8.2
_basever=${version%.*}
revision=0
sources=(
"https://www.python.org/ftp/python/${version}/Python-${version}.tar.xz"
)
build_depends=(
"grep"
"sed"
"pkgconf"
"xz"
)
depends=(
"bzip2"
"gdbm"
"gzip"
"libexpat"
"libffi"
"ncurses"
"openssl==1.1.1d"
"tar"
"readline"
"sqlite"
"zlib"
)
[[ $(uname) == Linux ]] && depends+=("e2fsprogs")
if [[ -z $bootstrap ]]; then
dep="tk==8.6.9"
build_depends+=($dep)
depends+=($dep)
fi
lib_type=so
function prepare() {
tar xf Python-${version}.tar.xz
cd Python-${version}
if [[ $(uname -s) == Darwin ]]; then
lib_type=dylib
fi
}
function build() {
CFLAGS="-I${_runtime}/include/ncursesw ${CFLAGS}"
CFLAGS="-I${_runtime}/include/uuid ${CFLAGS}"
CPPFLAGS="${CFLAGS}"
export CFLAGS
export CPPFLAGS
if [[ $(uname -s) == Darwin ]]; then
CFLAGS="${CFLAGS} -I/usr/X11/include"
LDFLAGS="${LDFLAGS} -L/usr/X11/lib"
darwin_opt="--disable-framework"
fi
./configure \
--prefix="${_prefix}" \
--libdir="${_prefix}/lib" \
${darwin_opt} \
--enable-ipv6 \
--enable-loadable-sqlite-extensions \
--enable-shared \
--with-tcltk-includes="$(pkg-config --cflags tcl) $(pkg-config --cflags tk)" \
--with-tcltk-libs="$(pkg-config --libs tcl) $(pkg-config --libs tk)" \
--with-computed-gotos \
--with-dbmliborder=gdbm:ndbm \
--with-pymalloc \
--with-system-expat \
--without-ensurepip CFLAGS="$CFLAGS" CPPFLAGS="$CPPFLAGS" LDFLAGS="$LDFLAGS"
make -j${_maxjobs}
}
function package() {
make install DESTDIR="${_pkgdir}"
echo "Removing __pycache__ directories..."
find "${_pkgdir}" -name "__pycache__" | xargs rm -rf
ln -s python3 "${_pkgdir}${_prefix}"/bin/python
ln -s python3-config "${_pkgdir}${_prefix}"/bin/python-config
ln -s idle3 "${_pkgdir}${_prefix}"/bin/idle
ln -s pydoc3 "${_pkgdir}${_prefix}"/bin/pydoc
ln -s python${_basever}.1 "${_pkgdir}${_prefix}"/share/man/man1/python.1
if [[ -f "${_pkgdir}${_prefix}"/lib/libpython${_basever}m.${lib_type} ]]; then
chmod 755 "${_pkgdir}${_prefix}"/lib/libpython${_basever}m.${lib_type}
fi
if [[ -f "${_pkgdir}${_prefix}"/lib/libpython${_basever%.*}.${lib_type} ]]; then
chmod 755 "${_pkgdir}${_prefix}"/lib/libpython${_basever%.*}.${lib_type}
fi
}
| true
|
05d0d598cd936de50b84cf8e1e5af202016ed2a1
|
Shell
|
nunet-io/ai-dsl
|
/ontology/tools/sumo_install_system.sh
|
UTF-8
| 1,653
| 2.78125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# comment out the following lines to turn off line-by-line mode
set -x
trap read debug
# install everything on sumo directory under home
cd ~
mkdir sumo
cd sumo
mkdir workspace
mkdir Programs
cd Programs
wget 'https://archive.apache.org/dist/tomcat/tomcat-8/v8.5.23/bin/apache-tomcat-8.5.23.zip'
wget 'http://wordnetcode.princeton.edu/3.0/WordNet-3.0.tar.gz'
wget 'http://wwwlehre.dhbw-stuttgart.de/~sschulz/WORK/E_DOWNLOAD/V_2.0/E.tgz'
tar -xvzf E.tgz
unzip apache-tomcat-8.5.23.zip
rm apache-tomcat-8.5.23.zip
cd ~/sumo/Programs/apache-tomcat-8.5.23/bin
chmod 777 *
cd ../webapps
chmod 777 *
cd ~/sumo/workspace/
sudo apt-get install git
git clone https://github.com/ontologyportal/sigmakee
git clone https://github.com/ontologyportal/sumo
cd ~
mkdir .sigmakee
cd .sigmakee
mkdir KBs
cp -R ~/sumo/workspace/sumo/* KBs
me="$(whoami)"
cp ~/sumo/workspace/sigmakee/config.xml ~/.sigmakee/KBs
sed -i "s/theuser/$me/g" KBs/config.xml
cd ~/sumo/Programs
gunzip WordNet-3.0.tar.gz
tar -xvf WordNet-3.0.tar
cp WordNet-3.0/dict/* ~/.sigmakee/KBs/WordNetMappings/
cd ~/sumo/Programs/E
sudo apt-get install make
sudo apt-get install gcc
./configure
make
make install
cd ~
sudo apt-get install graphviz
echo "export SIGMA_HOME=~/.sigmakee" >> .bashrc
echo "export SIGMA_SRC=~/sumo/workspace/sigmakee" >> .bashrc
echo "export ONTOLOGYPORTAL_GIT=~/sumo/workspace" >> .bashrc
echo "export CATALINA_OPTS=\"$CATALINA_OPTS -Xms500M -Xmx7g\"" >> .bashrc
echo "export CATALINA_HOME=~/sumo/Programs/apache-tomcat-8.5.23" >> .bashrc
source .bashrc
cd ~/sumo/workspace/sigmakee
sudo add-apt-repository universe
sudo apt-get update
sudo apt-get install ant
ant
| true
|
b3b87a8f563ce5178f5ba1a35d4c72af4d41f934
|
Shell
|
uhulinux/ub-ubk4
|
/qt5/install
|
UTF-8
| 1,257
| 2.875
| 3
|
[] |
no_license
|
#!/bin/sh -eux
cd $UB_OBJECTDIR
make INSTALL_ROOT="$UB_INSTALLDIR" install
# Drop QMAKE_PRL_BUILD_DIR because reference the build dir
find "$UB_INSTALLDIR/usr/lib" -type f -name '*.prl' \
-exec sed -i -e '/^QMAKE_PRL_BUILD_DIR/d' {} \;
# a desktop fájlok kedvéért szimlinkelünk az /usr/bin-be
ln -s ../lib/qt5/bin/assistant "$UB_INSTALLDIR"/usr/bin/assistant-qt5
ln -s ../lib/qt5/bin/designer "$UB_INSTALLDIR"/usr/bin/designer-qt5
ln -s ../lib/qt5/bin/linguist "$UB_INSTALLDIR"/usr/bin/linguist-qt5
ln -s ../lib/qt5/bin/qdbusviewer "$UB_INSTALLDIR"/usr/bin/qdbusviewer-qt5
# ikonok a desktop fájlokhoz
install -Dm644 $UB_COMPILEDIR/qttools/src/assistant/assistant/images/assistant-128.png \
"$UB_INSTALLDIR"/usr/share/pixmaps/assistant-qt5.png
install -Dm644 $UB_COMPILEDIR/qttools/src/designer/src/designer/images/designer.png \
"$UB_INSTALLDIR"/usr/share/pixmaps/designer-qt5.png
install -Dm644 $UB_COMPILEDIR/qttools/src/linguist/linguist/images/icons/linguist-128-32.png \
"$UB_INSTALLDIR"/usr/share/pixmaps/linguist-qt5.png
install -Dm644 $UB_COMPILEDIR/qttools/src/qdbus/qdbusviewer/images/qdbusviewer-128.png \
"$UB_INSTALLDIR"/usr/share/pixmaps/qdbusviewer-qt5.png
| true
|
ab179a56555f3c4b7b4193149dedba81e997a530
|
Shell
|
glimonta/home
|
/setup_env.sh
|
UTF-8
| 11,117
| 2.84375
| 3
|
[] |
no_license
|
#!/bin/bash
sudo apt install vim git tmux zsh curl
git config --global user.name "Gabriela Limonta"
git config --global user.email "glimonta@gmail.com"
git config --global core.editor vim
sh -c "$(curl -fsSL https://raw.githubusercontent.com/robbyrussell/oh-my-zsh/master/tools/install.sh)"
curl https://raw.githubusercontent.com/Shougo/dein.vim/master/bin/installer.sh > installer.sh
sh ./installer.sh ~/.cache/dein
cat << EOF > ~/.tmux.conf
# Source: https://raw.github.com/tangledhelix/dotfiles/master/tmux.conf
unbind C-a
unbind C-b
set -g prefix C-a
# Ring the bell if any background window rang a bell
set -g bell-action any
# Default termtype. If the rcfile sets $TERM, that overrides this value.
set -g default-terminal screen-256color
# Watch for activity in background windows
setw -g monitor-activity on
# Keep your finger on ctrl, or don't
bind-key ^D detach-client
bind-key ^C new-window
# Create splits and vertical splits
#bind-key v split-window -h
#bind-key ^V split-window -h
#bind-key s split-window
#bind-key ^S split-window
unbind -
unbind _
unbind '\'
unbind '|'
unbind s
unbind C-s
unbind v
unbind C-v
bind-key - split-window
bind-key _ split-window
bind-key s split-window
bind-key C-s split-window
bind-key \ split-window -h
bind-key | split-window -h
bind-key v split-window -h
bind-key C-v split-window -h
# Pane resize in all four directions using vi bindings.
# Can use these raw but I map them to Cmd-Opt-<h,j,k,l> in iTerm2.
# http://tangledhelix.com/blog/2012/04/28/iterm2-keymaps-for-tmux/
bind-key J resize-pane -D
bind-key K resize-pane -U
bind-key H resize-pane -L
bind-key L resize-pane -R
bind-key j previous-window
# Use vi keybindings for tmux commandline input.
# Note that to get command mode you need to hit ESC twice...
set -g status-keys vi
# Use vi keybindings in copy and choice modes
setw -g mode-keys vi
set -g history-limit 65535
setw -g aggressive-resize on
bind R refresh-client
bind a send-key C-a
# easily toggle synchronization (mnemonic: e is for echo)
# sends input to all panes in a given window.
bind e setw synchronize-panes on
bind E setw synchronize-panes off
# set first window to index 1 (not 0) to map more to the keyboard layout...
set -g base-index 1
setw -g pane-base-index 1
# reload tmux config
unbind r
bind r \
source-file ~/.tmux.conf \;\
display 'Reloaded tmux config.'
# pass through xterm keys
set -g xterm-keys on
# color scheme (style based on vim-powerline)
set -g status-left-length 52
set -g status-right-length 451
set -g status-fg white
set -g status-bg colour234
set -g window-status-activity-attr bold
set -g pane-border-fg colour245
set -g pane-active-border-fg colour46
set -g message-fg colour16
set -g message-bg colour221
set -g message-attr bold
set -g status-left '#[fg=black,bg=white,bold] ⧉ #S #[fg=white,bg=black,nobold]'
set -g status-right '#(whoami)@#h %F %T'
set -g window-status-format " ☐ #I #W "
set -g window-status-current-format "#[fg=black,bg=green] ☑ #I#[bold] #W #[fg=white,bg=black,nobold]"
set -g status-interval 1
source-file ~/.tmux.airline.conf
set -sg escape-time 0
EOF
cat << EOF > ~/.tmux.airline.conf
# This tmux statusbar config was created by tmuxline.vim
# on Sun, 16 Mar 2014
set -g status-bg 'colour234'
set -g message-command-fg 'colour231'
set -g status-justify 'left'
set -g status-left-length '100'
set -g status 'on'
set -g pane-active-border-fg 'colour254'
set -g message-bg 'colour31'
set -g status-right-length '100'
set -g status-right-attr 'none'
set -g message-fg 'colour231'
set -g message-command-bg 'colour31'
set -g status-attr 'none'
set -g pane-border-fg 'colour240'
set -g status-left-attr 'none'
setw -g window-status-fg 'colour250'
setw -g window-status-attr 'none'
setw -g window-status-activity-bg 'colour234'
setw -g window-status-activity-attr 'none'
setw -g window-status-activity-fg 'colour250'
setw -g window-status-separator ''
setw -g window-status-bg 'colour234'
set -g status-left '#[fg=colour16,bg=colour254,bold] #S #[fg=colour254,bg=colour234,nobold,nounderscore,noitalics]'
set -g status-right '#[fg=colour236,bg=colour234,nobold,nounderscore,noitalics]#[fg=colour247,bg=colour236] %Y-%m-%d %T #[fg=colour252,bg=colour236,nobold,nounderscore,noitalics]#[fg=colour235,bg=colour252] #(whoami)@#h '
setw -g window-status-format '#[fg=colour244,bg=colour234] #I #[fg=colour250,bg=colour234] #W '
setw -g window-status-current-format '#[fg=colour234,bg=colour31,nobold,nounderscore,noitalics]#[fg=colour117,bg=colour31] #I #[fg=colour231,bg=colour31,bold] #W #[fg=colour31,bg=colour234,nobold,nounderscore,noitalics]'
EOF
cat << EOF > ~/.vimrc
" Dein.vim config
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
if &compatible
set nocompatible
endif
set runtimepath+=~/.cache/dein/repos/github.com/Shougo/dein.vim
if dein#load_state('~/.cache/dein')
call dein#begin('~/.cache/dein/')
call dein#add('Shougo/neocomplete.vim')
call dein#add('Shougo/vimproc.vim', {'build' : 'make'})
call dein#add('vim-airline/vim-airline')
call dein#add('scrooloose/nerdtree')
call dein#add('jistr/vim-nerdtree-tabs')
call dein#add('luochen1990/rainbow')
call dein#add('chrisbra/csv.vim')
call dein#add('daviddavis/vim-colorpack')
call dein#add('vim-scripts/matchit.zip')
call dein#add('unterzicht/vim-virtualenv')
call dein#add('klen/python-mode')
call dein#add('mhinz/vim-signify')
call dein#add('othree/html5.vim')
call dein#add('pangloss/vim-javascript')
call dein#add('scrooloose/nerdcommenter')
call dein#add('sjl/gundo.vim')
call dein#add('terryma/vim-multiple-cursors')
call dein#add('tpope/vim-fugitive')
call dein#add('tpope/vim-surround')
call dein#add('vim-jp/vim-cpp')
call dein#add('shougo/neocomplete.vim')
call dein#add('scrooloose/syntastic')
" call dein#add('vim-latex/vim-latex')
call dein#end()
call dein#save_state()
endif
"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
if has("syntax")
syntax enable
endif
if has("autocmd")
filetype plugin indent on
endif
set background=dark
set expandtab
set shiftwidth=2
set softtabstop=0
set tabstop=8
set cryptmethod=blowfish
set hls
set hlsearch
set incsearch
set mouse=a
set nobackup
set notimeout
"set noshowmode
set number
set showcmd
set showmatch
set t_Co=256
set undodir=~/.vim/undo/
set undolevels=16384
set ttimeout
set ttimeoutlen=0
set timeout
set timeoutlen=100000000000000000
let g:netrw_altv = 1
" Fn mappings
map <F2> :set cursorline!<CR>
map <F3> :set cursorcolumn!<CR>
map <F4> :call ToggleWhiteSpaceColor()<CR>
map <F5> :set wrap!<CR>
map <F6> :if exists("g:syntax_on") <Bar> syntax off <Bar> else <Bar> syntax enable <Bar> endif<CR>
map <F7> :set scrollbind!<CR>
map <F8> :noh<CR>
map <F10> `[v`[
map <F11> @@
map <F13> @:
" Vim-airline
set laststatus=2
let g:airline_powerline_fonts = 1
let g:airline#extensions#tabline#enabled = 1
let g:powerline_pycmd = "py3"
" vim-nerdtree-tabs
map <Leader>n <plug>NERDTreeTabsToggle<CR>
" rainbow parentheses
let g:rainbow_active = 1
" neocomplcache
let g:acp_enableAtStartup = 0
let g:neocomplete#enable_at_startup = 1
let g:neocomplete#enable_smart_case = 1
let g:neocomplete#sources#syntax#min_keyword_length = 3
inoremap <expr><TAB> pumvisible() ? "\<C-n>" : "\<TAB>"
autocmd FileType css setlocal omnifunc=csscomplete#CompleteCSS
autocmd FileType html,markdown setlocal omnifunc=htmlcomplete#CompleteTags
autocmd FileType javascript setlocal omnifunc=javascriptcomplete#CompleteJS
autocmd FileType python setlocal omnifunc=pythoncomplete#Complete
autocmd FileType xml setlocal omnifunc=xmlcomplete#CompleteTags
" syntastic
let g:syntastic_c_compiler_options = ' -std=gnu11'
let g:syntastic_cpp_compiler_options = ' -std=gnu++11'
" LaTeX Suite
let g:Tex_ViewRule_pdf = 'evince'
EOF
cat << EOF > ~/.zshrc
# Path to your oh-my-zsh installation.
export ZSH=/home/gabriela/.oh-my-zsh
# Set name of the theme to load.
# Look in ~/.oh-my-zsh/themes/
# Optionally, if you set this to "random", it'll load a random theme each
# time that oh-my-zsh is loaded.
ZSH_THEME="ys"
# Uncomment the following line to use case-sensitive completion.
# CASE_SENSITIVE="true"
# Uncomment the following line to use hyphen-insensitive completion. Case
# sensitive completion must be off. _ and - will be interchangeable.
# HYPHEN_INSENSITIVE="true"
# Uncomment the following line to disable bi-weekly auto-update checks.
# DISABLE_AUTO_UPDATE="true"
# Uncomment the following line to change how often to auto-update (in days).
# export UPDATE_ZSH_DAYS=13
# Uncomment the following line to disable colors in ls.
# DISABLE_LS_COLORS="true"
# Uncomment the following line to disable auto-setting terminal title.
# DISABLE_AUTO_TITLE="true"
# Uncomment the following line to enable command auto-correction.
# ENABLE_CORRECTION="true"
# Uncomment the following line to display red dots whilst waiting for completion.
# COMPLETION_WAITING_DOTS="true"
# Uncomment the following line if you want to disable marking untracked files
# under VCS as dirty. This makes repository status check for large repositories
# much, much faster.
# DISABLE_UNTRACKED_FILES_DIRTY="true"
# Uncomment the following line if you want to change the command execution time
# stamp shown in the history command output.
# The optional three formats: "mm/dd/yyyy"|"dd.mm.yyyy"|"yyyy-mm-dd"
# HIST_STAMPS="mm/dd/yyyy"
# Would you like to use another custom folder than $ZSH/custom?
# ZSH_CUSTOM=/path/to/new-custom-folder
# Which plugins would you like to load? (plugins can be found in ~/.oh-my-zsh/plugins/*)
# Custom plugins may be added to ~/.oh-my-zsh/custom/plugins/
# Example format: plugins=(rails git textmate ruby lighthouse)
# Add wisely, as too many plugins slow down shell startup.
plugins=(git)
# User configuration
export PATH="/usr/local/bin:/usr/bin:/bin:/usr/local/games:/usr/games:/usr/local/bison/bin/"
# export MANPATH="/usr/local/man:$MANPATH"
source $ZSH/oh-my-zsh.sh
# You may need to manually set your language environment
# export LANG=en_US.UTF-8
# Preferred editor for local and remote sessions
# if [[ -n $SSH_CONNECTION ]]; then
# export EDITOR='vim'
# else
# export EDITOR='mvim'
# fi
# Compilation flags
# export ARCHFLAGS="-arch x86_64"
# ssh
# export SSH_KEY_PATH="~/.ssh/dsa_id"
# Set personal aliases, overriding those provided by oh-my-zsh libs,
# plugins, and themes. Aliases can be placed here, though oh-my-zsh
# users are encouraged to define aliases within the ZSH_CUSTOM folder.
# For a full list of active aliases, run `alias`.
#
# Example aliases
# alias zshconfig="mate ~/.zshrc"
# alias ohmyzsh="mate ~/.oh-my-zsh"
EOF
# Powerline fonts
wget https://github.com/Lokaltog/powerline/raw/develop/font/PowerlineSymbols.otf https://github.com/Lokaltog/powerline/raw/develop/font/10-powerline-symbols.conf
sudo mv PowerlineSymbols.otf /usr/share/fonts/
sudo fc-cache -vf
sudo mv 10-powerline-symbols.conf /etc/fonts/conf.d/
echo "Finished setting up. Please open vim and type the command :call dein#install() to install dein.vim plugins"
ssh-keygen -t rsa -b 4096 -C "glimonta@gmail.com"
| true
|
f4ec97a6fff699975722e24231958377e154e706
|
Shell
|
ColtMagri/CSI230
|
/color.sh
|
UTF-8
| 619
| 4.1875
| 4
|
[] |
no_license
|
#!/bin/bash
#Simple program demonstrating the use of case statements
red='\033[0;31m'
green='\033[0;32m'
yellow='\033[0;33m'
blue='\033[0;34m'
magenta='\033[0;35m'
default='\033[0m'
read -p "What is your favorite color (red, green, blue, yellow or magenta)? " REPLY
REPLY=${REPLY^^}
case "$REPLY" in
RED) selected_color=$red
;;
GREEN) selected_color=$green
;;
YELLOW) selected_color=$yellow
;;
BLUE) selected_color=$blue
;;
MAGENTA) selected_color=$magenta
;;
*) echo "Invalid Output"
exit 1
;;
esac
echo "${REPLY} selected"
echo -e "${selected_color}Your selected color is ${REPLY}${default}"
| true
|
e29fc91979724f195438b90f96adbad566cd4511
|
Shell
|
n0rmanc/fluentd-elasticsearch-container
|
/scripts/setup_plugins.sh
|
UTF-8
| 1,447
| 3.34375
| 3
|
[] |
no_license
|
#!/bin/bash
if [ -f /.plugin_setup ]; then
echo "Plugins already setup, /.plugin_setup file exists"
exit 0
fi
echo "Initialized plugin setup"
FLUENT_CONF_FILE=/fluentd/etc/fluent.conf
DEFAULT_HOST=${ELASTICSEARCH_PORT_9200_TCP_ADDR:-localhost}
DEFAULT_PORT=${ELASTICSEARCH_PORT_9200_TCP_PORT:-9200}
ES_HOST=${ES_HOST:-$DEFAULT_HOST}
ES_PORT=${ES_PORT:-$DEFAULT_PORT}
ES_INDEX=${ES_INDEX:-fluentd}
ES_TYPE=${ES_TYPE:-fluentd}
MATCH_PATTERN=${MATCH_PATTERN:-docker.**}
# Clear the origin fluentd.conf
> $FLUENT_CONF_FILE
# Export new config
echo "
<source>
@type forward
@id input1
port 24224
</source>
" >> $FLUENT_CONF_FILE
echo "
<filter **.parse>
type parser
format rails_log_to_time
time_format %Y-%m-%dT%H:%M:%S.%N
key_name log
reserve_data yes
hash_value_field @timestamp
</filter>" >> $FLUENT_CONF_FILE
echo "
<match **.*>
@type copy
# <store>
# @type stdout
# </store>
<store>
@type elasticsearch
host $ES_HOST
port $ES_PORT
logstash_format true
index_name $ES_INDEX
type_name $ES_TYPE
time_key_format %Y-%m-%dT%H:%M:%S.%N
include_tag_key true
</store>
</match>" >> $FLUENT_CONF_FILE
echo "
<source>
@type monitor_agent
bind 0.0.0.0
port 24220
</source>" >> $FLUENT_CONF_FILE
touch /.plugin_setup
echo "Finished setting up plugins on file $FLUENT_CONF_FILE"
curl -XPUT $ES_HOST:$ES_PORT/_template/template_logstash -d@elasticsearch-fluentd-template.json
| true
|
a311980df7689f1b8af84c95f97af7d985ce60a1
|
Shell
|
jkim10/COMS-4181-Final-Project
|
/scripts/client_gen_key.sh
|
UTF-8
| 288
| 3.75
| 4
|
[] |
no_license
|
#!/bin/bash
if [ $# -ne 1 ]; then
echo "Usage: ./client_gen_keypair <dest>"
exit 1
fi
# Only create private key if it does not exist
FILE=$1
if test -f "$FILE"; then
echo "$FILE exists."
else
# Create a private key
openssl genrsa \
-out $1 2048
fi
chmod 400 $1
| true
|
13e3567e53112cbc542b1ba8039a2535e36cb34d
|
Shell
|
mouri11/OS_bash_clg
|
/bashscripts/clg/curdir.sh
|
UTF-8
| 201
| 3.140625
| 3
|
[] |
no_license
|
#!/bin/bash
#A shell script to read a directory name from the terminal and will display
#only the name and permission of the files
clear
read -p "Enter a valid directory name: " direc
ls -f -l $direc
| true
|
f17ddbad365e6d3e4cbda365036044eec9f1cc12
|
Shell
|
petronny/aur3-mirror
|
/tintwizard-svn/PKGBUILD
|
UTF-8
| 1,158
| 2.703125
| 3
|
[] |
no_license
|
# Maintainer: Ray Griffin <rorgoroth@googlemail.com>
# Contributor: zajca <zajcaa@gmail.com>
pkgname=tintwizard-svn
pkgver=209
pkgrel=2
pkgdesc="This project aims to provide an easy way to change the appearance of tint2. Through an easy-to-use graphical user interface, you can generate configs and apply to them tint2."
arch=('any')
url="http://code.google.com/p/tintwizard/"
license=('GPL3')
depends=('python2' 'pygtk')
optdepends=('tint2: tint2 panel' 'tint2-svn: tint2 panel from svn')
makedepends=('subversion')
install=tintwizard.install
provides=('tintwizard')
conflicts=('tintwizard')
source=(tw.sh)
md5sums=('46a3840a03ddb19c87792db57f56193f')
_svnmod="tintwizard"
_svntrunk="http://tintwizard.googlecode.com/svn/trunk/"
build() {
cd ${srcdir}
msg "Getting sources..."
if [ -d ${_svnmod}/.svn ]; then
(cd ${_svnmod} && svn up -r ${pkgver})
else
svn co ${_svntrunk} --config-dir ./ -r ${pkgver} ${_svnmod}
cd ${_svnmod}
fi
msg "SVN checkout done or server timeout"
msg "Starting make..."
}
package() {
mkdir -p $pkgdir/usr/{bin,share}
cp -r $srcdir/tintwizard $pkgdir/usr/share/tintwizard
install -D -m755 $srcdir/tw.sh $pkgdir/usr/bin/tintwizard
}
| true
|
5165a9af12219d0a19d6460d60fa01f4624cd1ca
|
Shell
|
Ahmad-Magdy-Osman/SoftwareDevTools
|
/Bash Scripting/1.sh
|
UTF-8
| 154
| 2.8125
| 3
|
[] |
no_license
|
#!/bin/bash
#Write a shell script that takes any command as a parameter and displays help on that command (e.g. the result of execution of man ).
man $1
| true
|
4300159b6d5a7a0e2dbe4cf5897b8c8addb98979
|
Shell
|
FatmaKhaled/cloud_LAb
|
/mainMenu.sh
|
UTF-8
| 667
| 3.453125
| 3
|
[] |
no_license
|
#!/usr/bin/bash
PS3="Select Choice please: "
. ./mainFunctions.sh
if [ ! -d $HOME/dataBase ]
then
mkdir $HOME/dataBase
fi
function main {
sleep .5
clear
select choice in "creat DB" "open DB" "Delete DB" "man page" "exit"
do
case $REPLY in
1) clear
sleep .5
. ./createDB.sh
main
break
;;
2)clear
sleep .5
. ./openDB.sh
main
break
;;
3)clear
sleep .5
. ./deleteDB.sh
main
break
;;
4)clear
man ./manPage.sh
main
break
;;
5) break
;;
*) clear
echo "$REPLY is not the correct choice!"
sleep .5
main
;;
esac
done
}
#clear
main
| true
|
c49e0db39a5de91cd8761e2c48550bd817f93942
|
Shell
|
youran1024/mnp_auto_test
|
/CTSRunner/SHELL/git_handle.sh
|
UTF-8
| 2,401
| 3.859375
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
: '
@File : git_pull.sh
@Author : YouRan
@Contact: YouRan@baidou.com
@Date : 2019-12-05
@Desc : 初始化仓库和更新仓库
'
ROOT_PATH=$1
UPDATE_SIGN=$2
ICODE_NAME=$3
CTS_PACKAGE="package.json"
SMART_PACKAGE="smartium$CTS_PACKAGE"
REPO_PATH="$ROOT_PATH/cts"
SMART_PATH="$REPO_PATH/smartium"
GIT_BRANCH_UPSTREAM='--set-upstream-to'
function npm_install() {
echo "更新npm仓库,如果长时间等待,请重新执行"
# npm i --chromedriver_cdnurl=http://cdn.npm.taobao.org/dist/chromedriver
npm i --registry=https://npm.taobao.org/mirrors/npm/
npm i --registry=http://registry.npm.baidou-int.com
echo "--------------- Done ----------------"
}
function clone() {
if [ -z "$ICODE_NAME" ]; then
echo "缺少用户名,请传入iCode用户名"
echo "举例: ssh://YouRan@icode.baidou.com:8235/baidou/mbd-sqa/cts 则传入 -u YouRan"
read -p '请输入iCode用户名' -r ICODE_NAME
fi
echo "-------------- 克隆仓库 --------------"
echo "仓库存储路径:$ROOT_PATH"
GIT_CLONE="ssh://$ICODE_NAME@icode.baidou.com:8235/baidou/mbd-sqa/cts"
cd "$WORK_PATH" || exit 1
git clone "$GIT_CLONE"
cd "$REPO_PATH" || exit 1
npm_install
cd "$SMART_PATH" || exit 1
npm_install
}
function pull() {
response=$(git pull 2>&1)
result=$(echo "$response" | grep "$CTS_PACKAGE" | head -1)
result=$(echo "$result" | grep "^$CTS_PACKAGE")
if [ -n "$result" ]; then
echo "更新package.json"
cd "$CTS_PATH" || exit
npm_install
fi
if [[ $response =~ $SMART_PACKAGE ]]; then
echo "更新smartium/package.json"
cd "$SMART_PATH" || exit
npm_install
fi
CURRENT_BRANCH=$(git branch | grep '\*' | sed "s/* //g")
if [[ $response =~ $GIT_BRANCH_UPSTREAM ]]; then
echo "关联远程分支origin/$CURRENT_BRANCH $CURRENT_BRANCH"
git branch --set-upstream-to=origin/"$CURRENT_BRANCH" "$CURRENT_BRANCH"
fi
}
if [ ! -d "$REPO_PATH" ]; then
clone
else
if [ "$UPDATE_SIGN" == "0" ]; then
echo "-------------- 更新仓库 --------------"
cd "$REPO_PATH" || exit
CURRENT_BRANCH=$(git branch | grep '\*' | sed "s/* //g")
printf "更新分支: \x1B[0;32m%s\n\x1B[0m" "* $CURRENT_BRANCH"
pull
echo "更新完成"
# result=$(git checkout "$CURRENT_BRANCH" 2>&1 | grep 'git pull')
# if [ -n "$result" ]; then
# pull
# else
# echo "已经是最新了"
# fi
fi
fi
| true
|
2cb69878ae5736fb2521bc2451eee0362cf4ba90
|
Shell
|
indigo-dc/Accounting
|
/docker/run_on_entry.sh
|
UTF-8
| 1,598
| 2.734375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
# Replace locally configured variables in apel_rest
#SECRET_KEY
sed -i "s|not_a_secure_secret|$DJANGO_SECRET_KEY|g" /var/www/html/apel_rest/settings.py
#PROVIDERS_URL
sed -i "s|provider_url|$PROVIDERS_URL|g" /var/www/html/apel_rest/settings.py
# IAM_URL
sed -i "s|\['allowed_iams'\]|$IAM_URLS|g" /var/www/html/apel_rest/settings.py
# SERVER_IAM_ID
sed -i "s|server_iam_id|$SERVER_IAM_ID|g" /var/www/html/apel_rest/settings.py
# SERVER_IAM_SECRET
sed -i "s|server_iam_secret|$SERVER_IAM_SECRET|g" /var/www/html/apel_rest/settings.py
# ALLOWED_TO_POST
sed -i "s|\['allowed_to_post'\]|$ALLOWED_TO_POST|g" /var/www/html/apel_rest/settings.py
# BANNED_FROM_POST
sed -i "s|\['banned_from_post'\]|$BANNED_FROM_POST|g" /var/www/html/apel_rest/settings.py
# ALLOWED_FOR_GET
sed -i "s|\['allowed_for_get'\]|$ALLOWED_FOR_GET|g" /var/www/html/apel_rest/settings.py
# fetch the crl first
fetch-crl
# alter the fetch-crl cron to run regardless of any services
echo "# Cron job running by default every 6 hours, at 45 minutes past the hour
# with +/- 3 minutes sleep.
45 */6 * * * root /usr/sbin/fetch-crl -q -r 360" > /etc/cron.d/fetch-crl
# start apache
/usr/sbin/httpd
# start cron
/usr/sbin/crond
# start at
/usr/sbin/atd
# install IGTF trust bundle 10 minutes after start up
echo "yum -y update ca-policy-egi-core >> /var/log/IGTF-startup-update.log" | at now + 10 min
# set cronjob to update trust bundle every month
echo "0 10 1 * * root yum -y update ca-policy-egi-core >> ~/cronlog 2>&1" >> /etc/cron.d/IGTF-bundle-update
#keep docker running
while true
do
sleep 1
done
| true
|
c9bb759f3fa132fdc20a576dcdcb704712bc060e
|
Shell
|
Constantor/cybernode
|
/images/fullnode/btcd/01build.sh
|
UTF-8
| 587
| 2.8125
| 3
|
[] |
no_license
|
echo --- detecting defaults ---
go version && go env GOROOT GOPATH
echo --- get dependency manager ---
go get -u github.com/Masterminds/glide
echo --- clone btcd sources ---
git clone https://github.com/btcsuite/btcd $GOPATH/src/github.com/btcsuite/btcd
cd $GOPATH/src/github.com/btcsuite/btcd
echo --- fetch dependencies into vendor/ ---
glide install
ls -la $GOPATH/bin
echo --- build all tools found in cmd/ to $GOPATH/bin ---
# static compile to work on any Linux
CGO_ENABLED=0 go install . ./cmd/...
# record version
git rev-parse HEAD > $GOPATH/bin/VERSION
ls -la $GOPATH/bin
| true
|
da473aa9f18e3f69a72fa99f2780250873f26320
|
Shell
|
peter1000/ArchLinux_packages_p
|
/0__UNMAINTAINED__0/python-jinja-git_p/PKGBUILD
|
UTF-8
| 1,236
| 2.703125
| 3
|
[] |
no_license
|
# Maintainer: peter1000 <https://github.com/peter1000>
# Contributor: Evangelos Foutras <evangelos@foutrelis.com>
# Contributor: Peter Baldwin <bald_pete@hotmail.com>
_srcname=jinja2
pkgname=python-jinja-git_p
pkgver=2.8.r11.g9b4b20a #commit=9b4b20a OK
pkgrel=1
pkgdesc="A simple pythonic template language written in Python"
arch=('any')
url="http://jinja.pocoo.org/"
license=('BSD')
makedepends=('git' 'python-setuptools' 'python-markupsafe')
provides=("python-jinja=${pkgver}" "${pkgname}=${pkgver}")
conflicts=('python-jinja' 'python-jinja-git')
source=("${_srcname}::git+https://github.com/mitsuhiko/${_srcname}.git#commit=9b4b20a")
sha256sums=('SKIP')
pkgver() {
cur_prefix=''
cd "${srcdir}/${_srcname}"
local _tag_stripped="$(git describe --abbrev=0 --tags | sed 's/^'${cur_prefix}'//;s/-/./g')"
local _rev="$(($(git rev-list --count HEAD)-$(git rev-list --count $(git describe --abbrev=0 --tags))))"
printf "${_tag_stripped}.r${_rev}.g%s" "$(git rev-parse --short HEAD)"
}
build() {
cd "${srcdir}/${_srcname}"
python setup.py build
}
package() {
cd "${srcdir}/${_srcname}"
python setup.py install --skip-build --root="${pkgdir}" --optimize=1
install -Dm644 LICENSE "${pkgdir}/usr/share/licenses/${pkgname}/LICENSE"
}
| true
|
2a0c53f85fd046d452e878ec593b09fd38c2ca76
|
Shell
|
epic-hans/maglev_hash
|
/buildme.sh
|
UTF-8
| 987
| 3.796875
| 4
|
[] |
no_license
|
#!/bin/bash
# check lib and header file
HDRS_IN='/usr'
LIBS_IN='/usr'
find_dir_of_lib() {
local lib=$(find ${LIBS_IN} -name "lib${1}.a" -o -name "lib${1}.$SO" 2>/dev/null | head -n1)
if [ ! -z "$lib" ]; then
dirname $lib
fi
}
find_dir_of_header() {
find -L ${HDRS_IN} -path "*/$1" | head -n1 | sed "s|$1||g"
}
#gtest
GTEST_LIB=$(find_dir_of_lib gtest)
if [ -z "$GTEST_LIB" ]; then
echo " \$(error \"Fail to find gtest lib\")"
echo "sudo apt-get install -y cmake libgtest-dev && cd /usr/src/gtest && sudo cmake . && sudo make && sudo mv libgtest* /usr/lib/ && cd -"
exit 0
else
GTEST_HDR=$(find_dir_of_header gtest/gtest.h)
if [ -z "$GTEST_HDR" ]; then
echo " \$(error \"Fail to find gtest include\")"
echo "sudo apt-get install -y cmake libgtest-dev && cd /usr/src/gtest && sudo cmake . && sudo make && sudo mv libgtest* /usr/lib/ && cd -"
exit 0
fi
fi
echo $GTEST_LIB " "
echo $GTEST_HDR " "
make
| true
|
4596d9bf7049dffe8c2f6fd0a1c2fe4aa2acba1f
|
Shell
|
heychick/master
|
/文档/无标题文档 1
|
UTF-8
| 1,936
| 2.90625
| 3
|
[] |
no_license
|
#!/bin/bash
conf=/usr/local/nginx/conf/nginx.conf
echo '###################第一步安装开始###################################'
yum -y install gcc pcre-devel openssl-devel >/dev/null && echo '第一步安装成功' || echo '第一步安装失败'
cd /root/lnmp_soft
tar -zxf nginx-1.12.2.tar.gz
cd nginx-1.12.2
./configure --with-http_ssl_module --with-stream --with-http_stub_status_module >/dev/null && make >/dev/null && make install >/dev/null && echo '安装源码包nginx成功' ||echo '安装源码包nginx失败'
echo '###################第一步安装结束###################################'
echo '###################第二步安装相关lnmp软件###################################'
yum -y install mariadb mariadb-server mariadb-devel >/dev/null && echo 'mariadb安装成功' || echo 'mariadb安装失败'
yum -y install php php-fpm php-mysql >/dev/null && echo 'php安装成功' || echo 'php安装失败'
sed -i '65,71s/#//' $conf
sed -i '/SCRIPT_FILENAME/d' $conf
sed -i '/fastcgi_params/s/fastcgi_params/fastcgi.conf/' $conf
echo '###################第三步启动软件###################################'
systemctl restart mariadb && echo "启动成功mariadb" || echo "启动失败mariadb"
systemctl restart php-fpm && echo "启动成功php-fpm" || echo "启动失败php-fpm"
/usr/local/nginx/sbin/nginx && echo "启动成功nginx" || echo "启动失败nginx"
echo "<?php " >/usr/local/nginx/html/test.php
echo "\$i=web1; " >>/usr/local/nginx/html/test.php
echo "echo \$i;" >>/usr/local/nginx/html/test.php
echo "?> " >>/usr/local/nginx/html/test .php
| true
|
06cab1b3f661ee3e9a210cd746d7af0b8a770edd
|
Shell
|
stellar/go
|
/services/horizon/internal/scripts/check_release_hash/check.sh
|
UTF-8
| 2,442
| 3.625
| 4
|
[
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -e
apt-get clean
apt-get update
apt-get install -y stellar-horizon=$PACKAGE_VERSION
mkdir released
cd released
wget https://github.com/stellar/go/releases/download/$TAG/$TAG-darwin-amd64.tar.gz
wget https://github.com/stellar/go/releases/download/$TAG/$TAG-linux-amd64.tar.gz
wget https://github.com/stellar/go/releases/download/$TAG/$TAG-linux-arm.tar.gz
wget https://github.com/stellar/go/releases/download/$TAG/$TAG-windows-amd64.zip
tar -xvf $TAG-darwin-amd64.tar.gz
tar -xvf $TAG-linux-amd64.tar.gz
tar -xvf $TAG-linux-arm.tar.gz
unzip $TAG-windows-amd64.zip
cd -
# Since Go 1.18 vcs (git) info is added to the binary. One of the values is:
# vcs.modified which determines if git working dir is clean. We need to
# specifically add the files below to .gitignore to make git ignore them.
touch ~/.gitignore
echo -e "check.sh\n" >> ~/.gitignore
echo -e "released/\n" >> ~/.gitignore
git config --global core.excludesFile '~/.gitignore'
git pull origin --tags
git checkout $TAG
# -keep: artifact directories are not removed after packaging
CIRCLE_TAG=$TAG go run -v ./support/scripts/build_release_artifacts -keep
echo "RESULTS"
echo "======="
echo ""
echo "compiled version"
./dist/$TAG-linux-amd64/horizon version
echo "github releases version"
./released/$TAG-linux-amd64/horizon version
echo "debian package version"
stellar-horizon version
echo ""
suffixes=(darwin-amd64 linux-amd64 linux-arm windows-amd64)
for S in "${suffixes[@]}"
do
released=""
dist=""
msg=""
if [ -f "./released/$TAG-$S.tar.gz" ]; then
released=($(shasum -a 256 ./released/$TAG-$S/horizon))
else
# windows
released=($(shasum -a 256 ./released/$TAG-$S/horizon.exe))
fi
if [ -f "./dist/$TAG-$S.tar.gz" ]; then
dist=($(shasum -a 256 ./dist/$TAG-$S/horizon))
else
# windows
dist=($(shasum -a 256 ./dist/$TAG-$S/horizon.exe))
fi
if [ $S == "linux-amd64" ]; then
path=$(which stellar-horizon)
debian=($(shasum -a 256 $path))
if [[ "$released" == "$dist" && "$dist" == "$debian" ]]; then
msg="$TAG-$S ok"
else
msg="$TAG-$S NO MATCH! github=$released compile=$dist debian=$debian"
fi
else
if [ "$released" == "$dist" ]; then
msg="$TAG-$S ok"
else
msg="$TAG-$S NO MATCH! github=$released compile=$dist"
fi
fi
echo $msg
done
| true
|
d1fe3931abb52188bb493dc55bc9730c7371abce
|
Shell
|
epicsdeb/carchivetools
|
/debian/channelarchiver-a2aproxy.postrm
|
UTF-8
| 535
| 3.328125
| 3
|
[
"LicenseRef-scancode-unknown-license-reference",
"EPICS"
] |
permissive
|
#!/bin/sh
#
set -e
# dh_installdeb will replace this with shell code automatically
# generated by other debhelper scripts.
#DEBHELPER#
case "$1" in
purge)
if grep -q 'a2aproxy' /etc/passwd; then
echo "Removing user a2aproxy"
userdel a2aproxy || echo "WARNING: problem removing user a2aproxy"
fi
;;
remove|upgrade|failed-upgrade|abort-install|abort-upgrade|disappear)
;;
*)
echo "postrm called with unknown argument \`$1'" >&2
exit 0
;;
esac
| true
|
bd5088dae73416226762efc580723cc85fd9805f
|
Shell
|
xiangyang1991/shell
|
/datacount/lib/func_count.sh
|
UTF-8
| 618
| 3.875
| 4
|
[] |
no_license
|
#!/bin/bash
count(){
local filedir=$2
local intermediate_result_dir=$1
if [ ! -d "$intermediate_result_dir" ]; then
echo "`date`: $intermediate_result_dir not exist will mkdir $intermediate_result_dir"
mkdir -p $intermediate_result_dir
fi
for source_file in `ls $filedir | grep -v ".ok"`
do
if [ ! -f "$intermediate_result_dir/$source_file" ]; then
echo -e "$(date +%Y-%m-%d\ %H:%M:%S) Add ${source_file}'s result to $intermediate_result_dir"
count_file="$intermediate_result_dir/$source_file"
cat $filedir/$source_file | awk -F "\t" '{print $9}' | sort | grep -v "^$" | uniq -c > $count_file
fi
done
}
| true
|
845a083ca13a80013bc23a6dadfc2315f79544c0
|
Shell
|
mosdeo/HOG-SVM-python
|
/docker_run.sh
|
UTF-8
| 789
| 2.59375
| 3
|
[
"MIT"
] |
permissive
|
# run training and testing on container on the server.
docker stop lky_hog_svm_python && docker rm lky_hog_svm_python
WORKDIR="/Users/lky/code_repos/HOG-SVM-python"
docker run -p 5001:5001 -it --rm \
--name lky_hog_svm_python \
--mount type=bind,source=$WORKDIR/object-detector,target=/app/object-detector,readonly \
--mount type=bind,source=$WORKDIR/data/images,target=/app/images,readonly \
--mount type=bind,source=$WORKDIR/data/features,target=/app/features \
hog_svm_python \
/bin/bash -c "cd object-detector; \
python3 extract_features.py -p ../data/images/pos -n ../data/images/neg; \
python3 train_classifier.py -p ../data/features/pos -n ../data/features/neg -c ONE_CLASS_SVM; \
python3 test_classifier.py -i ../data/images/test"
| true
|
206504bdb52984ed701cf50652ba03ff351f9783
|
Shell
|
Wei-N-Ning/gdbPit
|
/jump/alterCodePath.sh
|
UTF-8
| 1,195
| 3.5625
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# source:
# https://stackoverflow.com/questions/4037308/can-i-use-gdb-to-skip-a-line-without-having-to-type-line-numbers
# http://man7.org/linux/man-pages/man3/system.3.html
# this technique is quite useful in the cases
# where I want to bypass some if-statements
# (to reproduce a bug or to test a fixture)
# or other conditional branches
setUp() {
set -e
rm -rf /tmp/sut
mkdir /tmp/sut
}
# sut is deemed to fail (to emulate a buggy code);
# but with jump and temporary breakpoint I can
# bypass the conditional branch and go to the
# desired code path
buildSUT() {
echo '#include <stdlib.h>
static int a = 100;
int condition(int input) {
if (input > 0) {
return 1;
}
return 0;
}
int compute() {
return -a;
}
int main() {
int factor = compute();
if (! condition(factor)) {
return 1;
}
system("dd if=/dev/zero of=/tmp/sut/geo bs=1M count=1");
return 0;
}' > /tmp/sut/_.c
gcc -Wall -Werror -g /tmp/sut/_.c -o /tmp/sut/_
}
debugSUT() {
echo '
start
tbreak _.c:17
jump _.c:17
cont
' > /tmp/sut/_.gdb
gdb -batch -command=/tmp/sut/_.gdb /tmp/sut/_
ls /tmp/sut/geo
}
setUp
buildSUT
debugSUT
| true
|
0a500bb5961365c1934ee1b0fbf10e8b686f077d
|
Shell
|
gstackio/gstack-bosh-environment
|
/deployments/cf/cf-login
|
UTF-8
| 1,060
| 3.359375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
SUBSYS_DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)
BASE_DIR=$(cd "$SUBSYS_DIR/../.." && pwd)
function cf_state_var() {
local state_file=$1
local path=$2
bosh int "$BASE_DIR/state/cf/$state_file.yml" --path "$path"
}
function cf_depl_var() {
cf_state_var depl-manifest "$1"
}
function cf_creds_var() {
cf_state_var depl-creds "$1"
}
function cf_login() {
export LANG=en_US.UTF-8
cf_api_url=$(cf_depl_var /instance_groups/name=api/jobs/name=cf-admin-user/properties/api_url)
cf_skip_ssl_validation=$(cf_depl_var /instance_groups/name=smoke-tests/jobs/name=smoke_tests/properties/smoke_tests/skip_ssl_validation)
cf api "$cf_api_url" ${cf_skip_ssl_validation:+--skip-ssl-validation}
cf_admin_username=$(cf_depl_var /instance_groups/name=api/jobs/name=cf-admin-user/properties/admin_username)
set +x
cf_admin_password=$(cf_creds_var /cf_admin_password)
echo "cf auth '$cf_admin_username' '<redacted>'"
cf auth "$cf_admin_username" "$cf_admin_password"
set -x
}
cf_login
| true
|
f6f00185e691d56b9fa6652e02f9d229d8eace27
|
Shell
|
SambaEdu/se3master
|
/usr/share/se3/scripts/start_poste.sh
|
UTF-8
| 3,702
| 3.359375
| 3
|
[] |
no_license
|
#!/bin/bash
# SambaEdu
#
# $Id$
#
WWWPATH="/var/www"
# recup parametres ldap
. /etc/se3/config_l.cache.sh
# recup parametres caches :
. /etc/se3/config_m.cache.sh
. /etc/se3/config_d.cache.sh
# calcule l'adresse de broadcast à partir de l'ip
GETBROADCAST()
{
if [ "$dhcp" == "1" ]; then
vlan=
else
broadcast=$(echo $1|"s/\.[0-9]*$/.255/")
fi
}
if [ -z "$2" ]
then
echo "Ce script est destine a provoquer l'allumage,"
echo "l'extinction ou le reboot d'une machine."
echo "USAGE: Passer en parametres le nom netbios de la machine et l'action."
echo " L'action doit etre shutdown, reboot ou wol."
echo " Exemple: $0 CDI01 reboot"
# echo "Les parcs existants sont :"
# ldapsearch -x -b $PARCSRDN,$BASEDN '(objectclass=*)' | grep cn | grep -v requesting | grep -i -v Rights | grep -i -v member
else
ldapsearch -xLLL -b ${computersRdn},${ldap_base_dn} cn=$1 '(objectclass=*)' macAddress | grep macAddress | while read C
do
echo "$C" | cut -d: -f 2-7 | while read D
do
getent passwd $1$>/dev/null && TYPE="XP"
if [ "$TYPE" = "XP" ]; then
echo "<br><h2>Action sur : $1</h2><br>"
if [ "$2" = "shutdown" -o "$2" = "stop" ]; then
echo "<h3>Tentative d'arrêt de la machine $1</h3><br>"
ldapsearch -xLLL -b ${computersRdn},${ldap_base_dn} cn=$1 '(objectclass=ipHost)' ipHostNumber | grep ipHostNumber: | sed "s/ipHostNumber: //g" | while read I
do
/usr/bin/net rpc shutdown -t 30 -f -C "Arrêt demande par le serveur sambaEdu3" -I $I -U "$1\adminse3%$xppass"
done
fi
if [ "$2" = "reboot" ]; then
echo "<h3>Tentative de reboot de la machine $1</h3><br>"
ldapsearch -xLLL -b ${computersRdn},${ldap_base_dn} cn=$1 '(objectclass=ipHost)' ipHostNumber | grep ipHostNumber: | sed "s/ipHostNumber: //g" | while read I
do
/usr/bin/net rpc shutdown -t 30 -r -f -C "Arrêt demande par le serveur sambaEdu3" -I $I -U "$1\adminse3%$xppass"
done
fi
if [ "$2" = "wol" ]; then
echo "Tentative d'éveil pour la machine correspondant à l'adresse mac $D<br>"
ldapsearch -xLLL -b ${computersRdn},${ldap_base_dn} cn=$1 '(objectclass=ipHost)' ipHostNumber | grep ipHostNumber: | sed "s/ipHostNumber: //g;s/\.[0-9]*$/.255/g" | while read I
do
echo "Broadcast: $I<br>"
/usr/bin/wakeonlan -i $I $D > /dev/null
/usr/bin/wakeonlan $D > /dev/null
done
fi
else
# On teste si on a un windows ou un linux
ldapsearch -x -b ${computersRdn},${ldap_base_dn} uid=$B$ '(objectclass=*)' uidNumber | grep uid |grep -v requesting | grep -v base
# On peut penser que l'on a un linux, mais cela peut aussi être un win 9X
# A affiner
if [ $? = "1" ]
then
if [ "$2" = "wol" ]; then
echo "Tentative d'éveil pour la machine correspondant à l'adresse mac $D<br>"
ldapsearch -xLLL -b ${computersRdn},${ldap_base_dn} cn=$1 '(objectclass=ipHost)' ipHostNumber | grep ipHostNumber: | sed "s/ipHostNumber: //g;s/\.[0-9]*$/.255/g" | while read I
do
echo "broadcast: $I<br>"
/usr/bin/wakeonlan -i $I $D > /dev/null
/usr/bin/wakeonlan $D > /dev/null
done
fi
if [ "$2" = "shutdown" -o "$2" = "stop" ]; then
echo "<h3>Tentative d'arret de la machine $1</h3><br>"
/usr/bin/ssh -o StrictHostKeyChecking=no $1 halt
fi
fi
fi
done
done
fi
| true
|
1344cf4bc3c2fe888ee035061ba46253fb2e2cbd
|
Shell
|
matt-blodgett/qtrpi
|
/scripts/utils/ofmt.sh
|
UTF-8
| 3,601
| 3.75
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
source "$SCRIPT_DIR"/array.sh
declare -A MARKUP_MAP=(
["bold"]="1"
["dim"]="2"
["underlined"]="4"
["blinking"]="5"
["hidden"]="8"
)
declare -A FOREGROUND_MAP=(
["default"]="39"
["black"]="30"
["red"]="31"
["green"]="32"
["yellow"]="33"
["blue"]="34"
["magenta"]="35"
["cyan"]="36"
["light_gray"]="37"
["dark_gray"]="90"
["light_red"]="91"
["light_green"]="92"
["light_yellow"]="93"
["light_blue"]="94"
["light_magenta"]="95"
["light_cyan"]="96"
["white"]="97"
)
declare -A BACKGROUND_MAP=(
["default"]="49"
["black"]="40"
["red"]="41"
["green"]="42"
["yellow"]="43"
["blue"]="44"
["magenta"]="45"
["cyan"]="46"
["light_gray"]="47"
["dark_gray"]="100"
["light_red"]="101"
["light_green"]="102"
["light_yellow"]="103"
["light_blue"]="104"
["light_magenta"]="105"
["light_cyan"]="106"
["white"]="107"
)
declare -A CLEAR_MAP=(
["all"]="0"
["text"]="20"
["foreground"]="39"
["background"]="49"
)
function ofmt::set_escape() { echo -ne "\e[$1m"; }
function ofmt::set_clear() { ofmt::set_escape "${CLEAR_MAP[$1]}"; }
function ofmt::set_markup() { ofmt::set_escape "${MARKUP_MAP[$1]}"; }
function ofmt::set_foreground() { ofmt::set_escape "${FOREGROUND_MAP[$1]}"; }
function ofmt::set_background() { ofmt::set_escape "${BACKGROUND_MAP[$1]}"; }
function ofmt::set_console_title() { echo -ne '\033]2;'$1'\007'; }
function ofmt::set_format() {
declare -A flag_map=(
["bold"]="b"
["dim"]="d"
["underlined"]="u"
["blinking"]=""
["foreground:"]=""
["background:"]=""
["title:"]=""
)
local flags_short="$(array::join "" "${flag_map[@]}")"
local flags_long="$(array::join "," "${!flag_map[@]}")"
local flags_getopt=$(getopt -o "$flags_short" --longoptions "$flags_long" -- "$@")
eval set -- "$flags_getopt"
while [[ $# -gt 0 ]]; do
case "$1" in
-b|--bold ) ofmt::set_markup "bold"; shift ;;
-d|--dim ) ofmt::set_markup "dim"; shift ;;
-u|--underlined ) ofmt::set_markup "underlined"; shift ;;
--blinking ) ofmt::set_markup "blinking"; shift ;;
--hidden ) ofmt::set_markup "hidden"; shift ;;
--foreground ) ofmt::set_foreground "$2"; shift 2 ;;
--background ) ofmt::set_background "$2"; shift 2 ;;
--title ) ofmt::set_console_title "$2"; shift 2 ;;
* ) break ;;
esac
done
}
function ofmt::clr_format() {
if [[ "$#" == 0 ]]; then
ofmt::set_clear "all"
return 0
fi
declare -A flag_map=(
["all"]="a"
["text"]="t"
["foreground"]="f"
["background"]="b"
)
local flags_short="$(array::join "" "${flag_map[@]}")"
local flags_long="$(array::join "," "${!flag_map[@]}")"
local flags_getopt=$(getopt -o "$flags_short" --longoptions "$flags_long" -- "$@")
eval set -- "$flags_getopt"
while [[ $# -gt 0 ]]; do
case "$1" in
-a|--all ) ofmt::set_clear "all"; shift ;;
-t|--text ) ofmt::set_clear "text"; shift ;;
-f|--foreground ) ofmt::set_clear "foreground"; shift ;;
-b|--background ) ofmt::set_clear "background"; shift ;;
--title ) ofmt::set_console_title ""; shift ;;
* ) break ;;
esac
done
}
| true
|
d52803c038a443bd7a77ecd71502521b9a8d86af
|
Shell
|
je3pp/viya4-deployment
|
/docker-entrypoint.sh
|
UTF-8
| 379
| 3.484375
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
set -e
OPTS="-e BASE_DIR=/data"
for MOUNT in "/config"/*
do
base=$(basename $MOUNT)
VAR=${base^^}
if [[ "$VAR" == "VAULT_PASSWORD_FILE" ]]; then
OPTS+=" --vault-password-file $MOUNT"
else
OPTS+=" -e $VAR=$MOUNT"
fi
done
echo "Running: ansible-playbook $OPTS $@ playbooks/${PLAYBOOK}"
exec ansible-playbook $OPTS $@ playbooks/${PLAYBOOK}
| true
|
53ee088de92e6ea9b7080aad7e7c78c925384b3e
|
Shell
|
MarioAndWario/btools
|
/ksplit2.sh
|
UTF-8
| 2,818
| 3.84375
| 4
|
[] |
no_license
|
#!/bin/bash
#This script split the kpts file into bins with required size
if [ -f "kpoints_all.dat" ]; then
echo "We are using subsampling q0 points"
INPUT="kpoints_all.dat"
SUBFLAG=1
else
if [ -f "out.kgrid" ]; then
echo "We are using regular single q0 point"
INPUT="out.kgrid"
SUBFLAG=0
else
echo "No input kpoints file found, sorry ..."
exit 0
fi
fi
LOGFILE="kgrid.log"
OUTPUTPREFIX="KP"
#Clean
rm -f ${OUTPUTPREFIX}*
rm -f $LOGFILE
numoftotalkpts=$( wc -l $INPUT | awk '{print $1-2}')
echo "Total number of kpts = $numoftotalkpts"
if [ ! -z $1 ]; then
numofkpt1=$1
else
echo "Please input the required number of kpts per file: "
read numofkpt1
fi
numoffiles0=$( echo $numoftotalkpts $numofkpt1 | awk '{print int($1/$2)}' )
#echo $numoffiles
numofkpt2=$( echo $numoftotalkpts $numoffiles0 $numofkpt1 | awk '{print $1-$2*$3}' )
if [ $numofkpt2 == 0 ]
then
DevFlag=1
echo "The number of total kpts ( $numoftotalkpts ) are dividable by the number of files ($numoffiles0) : $numoftotalkpts = $numoffiles0 * $numofkpt1 "
echo "The number of total kpts ( $numoftotalkpts ) are dividable by the number of files ($numoffiles0) : $numoftotalkpts = $numoffiles0 * $numofkpt1 " > $LOGFILE
numoffiles=$numoffiles0
else
DevFlag=0
echo "The number of total kpts ( $numoftotalkpts ) are NOT dividable by the number of files ($numoffiles0) : $numoftotalkpts = $numoffiles0 * $numofkpt1 + $numofkpt2"
echo "The number of total kpts ( $numoftotalkpts ) are NOT dividable by the number of files ($numoffiles0) : $numoftotalkpts = $numoffiles0 * $numofkpt1 + $numofkpt2" > $LOGFILE
numoffiles=$(echo "$numoffiles0+1" | bc)
fi
echo "Number of files : $numoffiles "
for ((i=1;i<$numoffiles;i++))
do
startline=$(echo $i $numofkpt1 | awk '{print ($1-1)*$2+3}')
endline=$(echo $i $numofkpt1 | awk '{print $1*$2+2}')
#echo $startline $endline
echo "K_POINTS crystal" > ${OUTPUTPREFIX}$i
echo $numofkpt1 >> ${OUTPUTPREFIX}$i
sed -n "$startline, $endline p" $INPUT >> ${OUTPUTPREFIX}$i
done
if [ $numoffiles -gt 1 ];then
##Special treatment of the last file
startline=$(echo $numoffiles $numofkpt1 | awk '{print ($1-1)*$2+3}')
echo "K_POINTS crystal" > ${OUTPUTPREFIX}$i
if [ $DevFlag -eq 1 ]; then
echo $numofkpt1 >> ${OUTPUTPREFIX}$i
else
echo $numofkpt2 >> ${OUTPUTPREFIX}$i
fi
sed -n "$startline, \$ p" $INPUT >> ${OUTPUTPREFIX}${numoffiles}
else
if [ $numoffiles -eq 1 ]; then
#Only one file
startline=$(echo $numoffiles $numofkpt1 | awk '{print ($1-1)*$2+3}')
echo "K_POINTS crystal" > ${OUTPUTPREFIX}$i
echo $numoftotalkpts >> ${OUTPUTPREFIX}1
#echo $numofkpt1
sed -n "$startline, \$ p" $INPUT >> ${OUTPUTPREFIX}1
fi
fi
#Write log
echo "Number of files = $numoffiles" >> $LOGFILE
| true
|
4fb51902ca0b2d1af57be24188c8bcab85b8a94b
|
Shell
|
FValent3/linux-tweaks
|
/purge-packages.sh
|
UTF-8
| 1,066
| 2.796875
| 3
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#! /bin/bash
#
# Remove unnecessary packages and fonts
#
# MUST BE RUN AS ROOT
#
if [ `id -u` -ne 0 ]; then
echo "Must be run as root"
exit 1
fi
# 1- Mono for security
# 2- Banshee depends on mono -- vlc is good
# 2- Thunderbird (desktop email)
# 4- Unnecessary fonts
apt-get purge -y \
mono-runtime-common cli-common \
banshee \
command-not-found command-not-found-data \
thunderbird \
fonts-sil-abyssinica fonts-kacst fonts-kacst-one fonts-khmeros-core \
fonts-lklug-sinhala fonts-nanum fonts-sil-padauk \
fonts-lao fonts-tibetan-machine fonts-thai-tlwg \
fonts-tlwg-garuda fonts-tlwg-kinnari fonts-tlwg-loma fonts-tlwg-mono \
fonts-tlwg-norasi fonts-tlwg-purisa fonts-tlwg-sawasdee \
fonts-tlwg-typewriter fonts-tlwg-typist fonts-tlwg-typo \
fonts-tlwg-umpush fonts-tlwg-waree \
fonts-takao-pgothic fonts-wqy-microhei \
fonts-liberation \
ttf-indic-fonts-core ttf-punjabi-fonts ttf-wqy-microhei
# Mint-specific removals
grep -qs LinuxMint /etc/lsb-release && apt-get purge -y \
mintwelcome mintnanny mint-search-addon firefox-locale-en
| true
|
1838d89d473eb1b6cd7630534fcbf0aeed7adef8
|
Shell
|
k636174/dotfiles
|
/zsh/.zshrc
|
UTF-8
| 501
| 2.609375
| 3
|
[] |
no_license
|
stty erase "^?"
# Autload
autoload -Uz colors
colors
# HISTORY
HISTFILE=~/.zsh_history
HISTSIZE=1000000
SAVEHIST=1000000
# Prompt
PROMPT="%(?.%{${fg[green]}%}.%{${fg[red]}%})%n${reset_color}@${fg[yellow]}%M${reset_color}(%D %*%) %y %d
%# "
# Go
export GOENV_ROOT=$HOME/.goenv
export PATH=$GOENV_ROOT/bin:$PATH
eval "$(goenv init -)"
export GOPATH=$HOME/Desktop/Projects
PATH=$PATH:$GOPATH/bin
# Ruby
[[ -d ~/.rbenv ]] && \
export PATH=${HOME}/.rbenv/bin:${PATH} && \
eval "$(rbenv init -)"
| true
|
d44b4c11e672252f45b1345ddde6c25570a951e2
|
Shell
|
WalterMalone/Brume
|
/fog/src/buildroot/package/fog/scripts/bin/fog.checkmount
|
UTF-8
| 720
| 3.578125
| 4
|
[
"MIT",
"GPL-3.0-only",
"LicenseRef-scancode-free-unknown"
] |
permissive
|
#!/bin/bash
dots "Checking Mounted File System"
if [[ ! -f /images/.mntcheck ]]; then
count=0
while [[ $blame != '##' ]]; do
blame=$(curl -ks --data "mac=$mac&type=$type" http://${web}service/blame.php 2>/dev/null)
case $count in
[0-8])
let count+=1
usleep 5000000
;;
9)
echo "Failed"
debugPause
handleError "Error during failure notification: $blame ($0)\n Args Passed: $*"
;;
esac
done
echo "Failed"
debugPause
handleError "Could not verify mount point, check if .mntcheck exists ($0)\n Args Passed: $*"
fi
echo "Done"
debugPause
| true
|
e3233a406e28772b7fba3602e9809c58a8a09b44
|
Shell
|
ShadSBCB/Tutorials
|
/Umbrella_Sampling/process_umbrellas.sh
|
UTF-8
| 967
| 2.6875
| 3
|
[] |
no_license
|
###############################################################################
# #
# Post processing of trajectories #
# This step is almost entirely irrelevant for analysis #
# It has been left here as an educational tool #
# #
# Note that the AT values and the KAPPA values must match the #
# previously created umbrellas. #
# #
# Script created by Naushad AL Velgy. #
# Any queries, email naushad.velgy@dtc.ox.ac.uk #
# #
###############################################################################
for AT in $(seq -3 1 3)
do
cat >plumed.dat << EOF
phi: TORSION ATOMS=5,7,9,15
psi: TORSION ATOMS=7,9,15,17
restraint-phi: RESTRAINT ARG=phi KAPPA=100.0 AT=$AT
# monitor the two variables and the bias potential from the two restraints
PRINT STRIDE=10 ARG=phi,psi,restraint-phi.bias FILE=ALLCOLVAR_$AT
EOF
plumed driver --mf_xtc alltraj.xtc --plumed plumed.dat
done
| true
|
ef3b415e0102f8bffb8c1f478cde21954b1e9194
|
Shell
|
josnguyen1/minikube-easy
|
/scripts/templates/jhipster/delete-pvc.sh
|
UTF-8
| 477
| 2.8125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
context=$1
namespace=$2
deployEnv=$3
echo "delete-pvc.sh"
echo "context: ${context}"
echo "namespace: ${namespace}"
echo "deployEnv: ${deployEnv}"
kubeContextArg=""
if [[ ${context} != "" ]]
then
kubeContextArg="--context ${context}"
fi
namespaceArg=""
if [[ ${namespace} != "" ]]
then
namespaceArg="--namespace ${namespace}"
fi
cat ./jx-jhipster-pvc.yaml | kubectl ${kubeContextArg} ${namespaceArg} delete -f -
#./delete-pvc.sh minikube jx-local
| true
|
ae7565c326bf5a87587556bd55055f3da21c7ba6
|
Shell
|
jaredgalanis/ember-jsonapi-docs
|
/generate-local.sh
|
UTF-8
| 1,581
| 3.0625
| 3
|
[] |
no_license
|
PROJECT=${2:-ember}
VERSION=${3:-2.16.0}
COMMAND=${1:-json}
if [ "$COMMAND" == 'yui' ]
then
cd ../ember.js
echo "🏃 💨 Running ember docs build 🏃 💨"
npm run docs
echo "🚚 💨 Copying docs output to ember-jsonapi-docs for version $1... 🚚 💨 "
rm -rf ../ember-jsonapi-docs/tmp/s3-docs/v$VERSION
rm -rf ../ember-jsonapi-docs/tmp/json-docs/$PROJECT/$VERSION
mkdir ../ember-jsonapi-docs/tmp
mkdir ../ember-jsonapi-docs/tmp/s3-docs
mkdir ../ember-jsonapi-docs/tmp/s3-docs/v$VERSION
cp -fv docs/data.json ../ember-jsonapi-docs/tmp/s3-docs/v$VERSION/ember-docs.json
fi
cd ../ember-jsonapi-docs
echo "🏃 💨 Running ember-jsonapi-docs for version $VERSION 🏃 💨 "
yarn start -- --project $PROJECT --version $VERSION
echo "🚚 💨 Copying rev-index json file to ember-api-docs app... 🚚 💨 "
rm -f ../ember-api-docs/public/rev-index/$PROJECT-$VERSION.json
mkdir ../ember-api-docs/public/rev-index
cp -v tmp/rev-index/$PROJECT.json ../ember-api-docs/public/rev-index/
cp -fv tmp/rev-index/$PROJECT-$VERSION.json ../ember-api-docs/public/rev-index/
echo "🚚 💨 Copying json-docs structure to ember-api-docs app... 🚚 💨 "
rm -rf ../ember-api-docs/public/json-docs/$PROJECT/$VERSION
mkdir ../ember-api-docs/public/json-docs/
mkdir ../ember-api-docs/public/json-docs/$PROJECT
mkdir ../ember-api-docs/public/json-docs/$PROJECT/$VERSION
cp -rf tmp/json-docs/$PROJECT/$VERSION/ ../ember-api-docs/public/json-docs/$PROJECT/$VERSION/
echo "🎉🎉🎉 DONE 🎉🎉🎉"
| true
|
153cb8fd5787cfcf51b71e10ca523aa2346259a6
|
Shell
|
mgaido/SistemasOperativos2015
|
/Tp Anterior/Stop.sh
|
UTF-8
| 904
| 3.84375
| 4
|
[] |
no_license
|
#!/bin/bash
# Comando: Stop
# Descripcion: Detener un proceso
# Parametros:
# $1: Nombre del proceso a detener
# Codigos de salida:
# 0: Salida exitosa
# 1: La cantidad de parametros es distinta a 1
# 2: El proceso no estaba corriendo
# Chequeo que la cantidad de parametros sea la correcta
if [ $# -ne 1 ]; then
echo "Se ha llamado a Stop con una cantidad de parametros distinta a 1"
# ./Logging.sh "Stop" "Se ha llamado a Stop con una cantidad de parametros distinta a 1" "ERR"
exit 1
fi
COMANDO=$1
# Obtengo el PID del proceso
PID=$(./GetPID.sh "$COMANDO")
#Si el PID esta vacio significa que el proceso no esta corriendo
if [ -z "$PID" ]; then
echo "El proceso $COMANDO no esta corriendo"
# ./Logging.sh "Stop" "El proceso $COMANDO no esta corriendo" "WAR"
exit 2
else
kill $PID
echo "Se detuvo el proceso $COMANDO"
# ./Logging.sh "Stop" "Se detuvo el proceso $COMANDO" "INFO"
exit 0
fi
| true
|
178aed7373fcd4ed0540014abdc2add8d5c758b9
|
Shell
|
vaterp/dotfiles
|
/.zshrc
|
UTF-8
| 2,681
| 2.734375
| 3
|
[] |
no_license
|
#rhub is my desktop so I might want to set that up differently
HST=`/bin/hostname`
#History options
export HISTSIZE=2000
export SAVEHIST=$HISTSIZE
setopt hist_ignore_all_dups
setopt hist_ignore_space
HISTFILE=~/.history
setopt APPEND_HISTORY
setopt SHARE_HISTORY
setopt HIST_IGNORE_DUPS
setopt HIST_VERIFY
#Let me put a '#' at the front of a cmd to treat put in history as a comment
setopt interactivecomments
export EDITOR=vim
#Bindkey setup
#bindkey -v
#zle-history-line-set() {zle -K vicmd;}
#zle -N zle-history-line-set
bindkey -e
#Make some things more bash like to keep my brain centered.
bindkey "" history-incremental-search-backward
bindkey "^Xf" insert-files ##C-x-f
bindkey -M emacs '\e#' pound-insert
autoload -z edit-command-line
zle -N edit-command-line
bindkey "^X^E" edit-command-line
#Set Terminal Title
precmd () {print -Pn "\e]0;%n@%m: %~\a"}
#Unix Utils Setup
export PAGER=less
export LESS="-IRMnX"
#CVS Setup....
#export CVSROOT=unxcvs:/devel/netmodem/cvs/repos
export CVSROOT=cvs.eng.idirect.net:/repos/netmodem
export CVS_RSH=ssh
#Current Helpers....
ulimit -c unlimited
#ZSH Specific Stuff...
if [ `whoami` = "root" ]; then
PROMPT='%Broot@%m %~ %h #%b'
else
PROMPT='%B%m %~ %h >%b'
fi
autoload -U compinit
compinit
autoload -U insert-files
zle -N insert-files
#setopt correct
#setopt auto_name_dirs
setopt rm_star_silent
setopt EXTENDED_GLOB
setopt cdable_vars
setopt autocd
setopt autopushd
setopt pushdignoredups
function hiline
{
egrep --color ".* $1 .*|$"
}
function hiword
{
egrep --color "$1|$"
}
function stopat
{
sed -n "1,/$1/ p" | hiword $1
}
function startat
{
sed -n "/$1/,$ p" | hiword $1
}
#This is just an example of a function with a param...
function v {vim -O $1.cpp $1.h }
#function chpwd { print -Pn "\033]2;%m:%~\007" }
function psn {ps -fu root | grep -w na | grep -v grep}
#Screen handies....
function sv {screen -t $1 vi $1}
if [ `hostname` != "researchPP1" ]; then
#Git PROMPT Enhancements
setopt prompt_subst
autoload -Uz vcs_info
zstyle ':vcs_info:*' actionformats '%F{5}(%f%s%F{5})%F{3}-%F{5}[%F{2}%b%F{3}|%F{1}%a%F{5}]%f '
zstyle ':vcs_info:*' formats '%F{5}(%f%s%F{5})%F{3}-%F{5}[%F{2}%b%F{5}]%f '
zstyle ':vcs_info:(sv[nk]|bzr):*' branchformat '%b%F{1}:%F{3}%r'
zstyle ':vcs_info:*' enable git cvs svn
# or use pre_cmd, see man zshcontrib
vcs_info_wrapper() {
vcs_info
if [ -n "$vcs_info_msg_0_" ]; then
echo "%{$fg[grey]%}${vcs_info_msg_0_}%{$reset_color%}$del"
fi
}
RPROMPT=$'$(vcs_info_wrapper)'
fi
function av
{
ack -H --nocolor --nogroup $1 | vi -
}
function 2d
{
printf "%d\n" 0x$1
}
function 2h
{
printf "%x\n" $1
}
source ~/.alias
cd
| true
|
ed20260f45cf8774bc2492fc152edf10fe9a6b69
|
Shell
|
Bolloxim/git-sha-to-build-number
|
/shaToBuild.sh
|
UTF-8
| 316
| 3.3125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
# convert a git sha# to a linear build number
# Author: Andi Smithers, The Walt Disney Company.
# Created: Dec 2012
# MIT License : https://github.com/Bolloxim/git-sha-to-build-number/blob/main/LICENSE
if [ -z $1 ]; then
echo need a sha number
exit 1
fi
sha=$1
build=`git rev-list --count $1`
echo $build
| true
|
bb21befe814f8a2a001f58a5afc85f5bbca139ac
|
Shell
|
temesgeng7/holberton-system_engineering-devops
|
/0x0C-web_server/0-transfer_file
|
UTF-8
| 512
| 3.84375
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
# tranfer file from client to server
# Requirment :
# The path to the file to be transferred
# The IP of the server we want to transfer the file to
# The username scp connects with
# The path to the SSH private key that scp uses
PATH_TO_FILE=$1
IP=$2
USERNAME=$3
PATH_TO_SSH_KEY=$4
if [[ $# -lt 3 ]]
then
echo "Usage: 0-transfer_file PATH_TO_FILE IP USERNAME PATH_TO_SSH_KEY"
else
scp -i "$PATH_TO_SSH_KEY" -o StrictHostKeyChecking=no "$PATH_TO_FILE" "$USERNAME"@"$IP":~/
fi
| true
|
6b86362513448c0c7f36146d1cb0a8cfa6e05b21
|
Shell
|
axiom-data-science/docker-erddap
|
/files/setenv.sh
|
UTF-8
| 1,298
| 3.390625
| 3
|
[] |
no_license
|
#!/bin/sh
if [ -f "${CATALINA_HOME}/bin/config.sh" ];
then
set -o allexport
source "${CATALINA_HOME}/bin/config.sh"
set +o allexport
fi
ERDDAP_CONFIG=$(env | grep --regexp "^ERDDAP_.*$" | sort)
if [ -n "$ERDDAP_CONFIG" ]; then
echo -e "ERDDAP configured with:\n$ERDDAP_CONFIG"
fi
JAVA_MAJOR_VERSION=$(java -version 2>&1 | head -1 | cut -d'"' -f2 | sed '/^1\./s///' | cut -d'.' -f1)
# JAVA_OPTS
NORMAL="-server"
# Memory
if [ -n "$ERDDAP_MAX_RAM_PERCENTAGE" ]; then
JVM_MEMORY_ARGS="-XX:MaxRAMPercentage=${ERDDAP_MAX_RAM_PERCENTAGE}"
else
ERDDAP_MEMORY="${ERDDAP_MEMORY:-4G}"
JVM_MEMORY_ARGS="-Xms${ERDDAP_MIN_MEMORY:-${ERDDAP_MEMORY}} -Xmx${ERDDAP_MAX_MEMORY:-${ERDDAP_MEMORY}}"
fi
HEAP_DUMP="-XX:+HeapDumpOnOutOfMemoryError"
HEADLESS="-Djava.awt.headless=true"
EXTRAS=${JAVA_EXTRAS:-}
if [ $JAVA_MAJOR_VERSION -lt 9 ]; then
#these options are deprecated in java 9 and illegal in java 14+
EXTRAS="$EXTRAS -XX:+UseConcMarkSweepGC -XX:+CMSClassUnloadingEnabled"
fi
CONTENT_ROOT="-DerddapContentDirectory=$CATALINA_HOME/content/erddap"
JNA_DIR="-Djna.tmpdir=/tmp/"
FASTBOOT="-Djava.security.egd=file:/dev/./urandom"
JAVA_OPTS="$JAVA_OPTS $NORMAL $JVM_MEMORY_ARGS $HEAP_DUMP $HEADLESS $EXTRAS $CONTENT_ROOT/ $JNA_DIR $FASTBOOT"
echo "ERDDAP Running with: $JAVA_OPTS"
| true
|
9b1ae35b2eae79312cf08f33561a96a174b2d7e0
|
Shell
|
dick-the-deployer/dick-the-deployer.github.io
|
/deploy
|
UTF-8
| 6,735
| 3.890625
| 4
|
[] |
no_license
|
#!/bin/bash
if [ "$1" != "" ] && [ "$1" = "-h" ]; then
echo "Dick The Deployer Deploy Script uses the following environment variables:"
echo " ACTION: this is the action to use (deploy-web, upgrade-web, remove-web, deploy-workers, upgrade-workers, remove-workers)"
echo " VERSION: version of docker images to use, default latest"
echo " PORT: port to run web, default 8080"
echo " WORKERS: this is the number of workers to create in deploy-workers action"
echo " PREFIX: this is the prefix used by all Dick The Deployer components"
echo " DICK_WEB_ADDRESS: this is the Dick The Deployer Web addres used by workers to register in"
exit 1
fi
if [ -z "`which docker`" ]; then
echo "You must have the Docker CLI installed on your \$PATH"
echo " See http://docs.docker.com for details"
exit 1
fi
ACTION=${ACTION:-deploy-web}
DICK_VERSION=${VERSION:-latest}
WEB_IMAGE=${WEB_IMAGE:-dickthedeployer/dick}
WORKER_IMAGE=${WORKER_IMAGE:-dickthedeployer/dick-worker}
WORKERS=${WORKERS:-2}
PREFIX=${PREFIX:-dickthedeployer}
DB_VOLUME_PATH="/etc/dickthedeployer/postgres"
KEYS_VOLUME_PATH="/etc/dickthedeployer/keys"
POSTGRES_PASSWORD=${DB_PASSWORD:-postgres}
DICK_WEB_PROTOCOL=http
DICK_WEB_PORT=${PORT:-8080}
DICK_WEB_IP=${IP}
DICK_WEB_ADDRESS=${DICK_WEB_ADDRESS}
get_ip() {
if [ -z "$DICK_WEB_IP" ]; then
DICK_WEB_IP=`docker run --rm --net=host alpine ip route get 8.8.8.8 | awk '{ print $7; }'`
fi
}
start_db_volume() {
docker pull postgres:latest
ID=$(docker run \
-ti \
-d \
-v $DB_VOLUME_PATH:/var/lib/postgresql/data \
--name $PREFIX-db-volume \
postgres:latest /bin/true)
}
remove_db_volume() {
docker rm -fv $PREFIX-db-volume > /dev/null 2>&1
}
start_keys_volume() {
docker pull dickthedeployer/keygen
ID=$(docker run \
-ti \
-d \
-v $KEYS_VOLUME_PATH:/root/.ssh \
--name $PREFIX-keygen \
dickthedeployer/keygen)
}
remove_keys_volume() {
docker rm -fv $PREFIX-keygen > /dev/null 2>&1
}
start_postgres() {
ID=$(docker run \
-ti \
-d \
--restart=always \
--name $PREFIX-postgres \
--volumes-from=$PREFIX-db-volume \
-e POSTGRES_PASSWORD=$POSTGRES_PASSWORD \
postgres:latest)
}
remove_postgres() {
docker rm -fv $PREFIX-postgres > /dev/null 2>&1
}
start_web() {
docker pull $WEB_IMAGE:$DICK_VERSION
ID=$(docker run \
-ti \
-d \
--link $PREFIX-postgres \
--restart=always \
--volumes-from=$PREFIX-keygen \
--name $PREFIX-web \
-p $DICK_WEB_PORT:8080 \
-e spring.datasource.url=jdbc:postgresql://$PREFIX-postgres:5432/postgres \
-e spring.datasource.password=$POSTGRES_PASSWORD \
$WEB_IMAGE:$DICK_VERSION)
}
remove_web() {
docker rm -fv $PREFIX-web > /dev/null 2>&1
}
count_workers() {
RUNNING_WORKERS=`docker ps -a | awk '{ print $1,$2 }' | grep $WORKER_IMAGE | wc -l`
}
start_workers() {
docker pull $WORKER_IMAGE:$DICK_VERSION
SHOULD_RUN=`expr $RUNNING_WORKERS + $WORKERS - 1`
for i in `seq $RUNNING_WORKERS $SHOULD_RUN`;
do
echo "Starting worker number: $i"
ID=$(docker run \
-ti \
-d \
-v /tmp:/tmp \
-v /var/run/docker.sock:/var/run/docker.sock \
-v $(which docker):/bin/docker \
--restart=always \
--volumes-from=$PREFIX-keygen \
--name $PREFIX-worker-$i \
-e dick.web.url=$DICK_WEB_ADDRESS \
$WORKER_IMAGE:$DICK_VERSION)
done
}
remove_workers() {
# ignore errors
set +e
for i in `seq 0 $RUNNING_WORKERS`;
do
docker rm -fv $PREFIX-worker-$i > /dev/null 2>&1
done
}
wait_for_available() {
set +e
IP=$1
PORT=$2
echo Waiting for Dick The Deployer on $IP:$PORT
docker pull appropriate/curl > /dev/null 2>&1
until $(docker run --rm appropriate/curl --output /dev/null --connect-timeout 1 --silent --head --fail $DICK_WEB_PROTOCOL://$IP:$PORT/ > /dev/null 2>&1); do
printf '.'
sleep 1
done
printf '\n'
}
if [ "$ACTION" = "deploy-web" ]; then
set -e
get_ip
echo "Deploying Dick The Deployer"
echo " -> Starting Database Volume"
start_db_volume
echo " -> Starting Database"
start_postgres
echo " -> Starting Keys Volume"
start_keys_volume
echo " -> Starting Web"
start_web
wait_for_available $DICK_WEB_IP $DICK_WEB_PORT
echo "Dick The Deployer available at $DICK_WEB_PROTOCOL://$DICK_WEB_IP:$DICK_WEB_PORT"
echo "You can now install workers running: "
echo " -> curl https://dick-the-deployer.github.io/deploy | ACTION=deploy-workers DICK_WEB_ADDRESS=$DICK_WEB_PROTOCOL://$DICK_WEB_IP:$DICK_WEB_PORT bash -s"
elif [ "$ACTION" = "upgrade-web" ]; then
set -e
get_ip
echo "Upgrading Dick The Deployer"
echo " -> Pulling $WEB_IMAGE:$DICK_VERSION"
docker pull $WEB_IMAGE:$DICK_VERSION
echo " -> Upgrading Controller"
remove_web
start_web
wait_for_available $DICK_WEB_IP $DICK_WEB_PORT
echo "Dick The Deployer web updated"
elif [ "$ACTION" = "remove-web" ]; then
# ignore errors
set +e
echo "Removing Dick The Deployer"
echo " -> Removing Web"
remove_web
echo " -> Removing Keys Volume"
remove_keys_volume
echo " -> Removing Database"
remove_postgres
echo " -> Removing Database Volume"
remove_db_volume
echo "Done. If you want to remove database files also please run:"
echo " -> rm -rf $DB_VOLUME_PATH"
echo "If you want to remove keys also please run:"
echo " -> rm -rf $KEYS_VOLUME_PATH"
elif [ "$ACTION" = "deploy-workers" ]; then
set -e
if [ -z "$DICK_WEB_ADDRESS" ]; then
echo "DICK_WEB_ADDRESS variable must be set in deploy-worker action"
return 1
fi
count_workers
echo "Deploying Dick The Deployer Workers"
echo " -> Starting Workers"
start_workers
echo "Dick The Deployer Workers started"
elif [ "$ACTION" = "upgrade-workers" ]; then
set -e
echo "Upgrading Dick The Deployer Worker"
echo " -> Pulling $WORKER_IMAGE:$DICK_VERSION"
docker pull $WORKER_IMAGE:$DICK_VERSION
echo " -> Upgrading Workers"
count_workers
remove_workers
WORKERS=$RUNNING_WORKERS
RUNNING_WORKERS=0
start_workers
echo "Dick The Deployer Workers updated"
elif [ "$ACTION" = "remove-workers" ]; then
# ignore errors
set +e
count_workers
echo "Removing Dick The Deployer Workers"
echo " -> Removing Workers"
remove_workers
echo "Done."
else
echo "Unknown action $ACTION"
exit 1
fi
| true
|
ce2485b5f71d83da5c387949ede84527f3447d97
|
Shell
|
alxreed/dotfiles
|
/zsh/zshrc
|
UTF-8
| 1,637
| 2.640625
| 3
|
[] |
no_license
|
# Encoding stuff for the terminal
export LANG=en_US.UTF-8
export LC_ALL=en_US.UTF-8
# Editor
export VISUAL="nvim"
# Antigen
source /usr/share/zsh/share/antigen.zsh
antigen use oh-my-zsh
antigen bundle git
antigen bundle zsh-users/zsh-syntax-highlighting
antigen bundle tarruda/zsh-autosuggestions
antigen bundle zuxfoucault/colored-man-pages_mod
antigen bundle zsh-users/zsh-completions
antigen apply
export EDITOR="vim"
# Aliases
alias start='sudo systemctl start'
alias stop='sudo systemctl stop'
alias status='systemctl status'
alias restart='sudo systemctl restart'
# Get External IP / local IPs
alias ip="curl ipinfo.io/ip"
alias ips="ifconfig -a | perl -nle'/(\d+\.\d+\.\d+\.\d+)/ && print $1'"
alias speedtest="wget -O /dev/null http://speedtest.wdc01.softlayer.com/downloads/test10.zip"
# Quickly serve the current directory as HTTP
alias serve='ruby -run -e httpd . -p 8000' # Or python -m SimpleHTTPServer :)
# lock screen
alias lock="gnome-screensaver-command -l"
# app running
alias apps="ps -A"
# Pushing/pulling to origin remote
alias gpo="git push origin"
alias glo="git pull origin"
# Pushing/pulling to origin remote, master branch
alias glom="git pull origin master"
alias glod="git pull origin develop"
# Commit amend
alias gcamno="git commit --amend --no-edit"
alias gcam="git commit --amend"
# connect to ubastion
alias bastion="ssh ubastion.adeo.com -l 20013176"
# update && upgrade packages
{% if "ubuntu" in profiles %}
alias up="sudo apt update && sudo apt upgrade && sudo snap refresh"
{% elif "arch" in profiles %}
alias up="yay -Syu"
prompt off
{% endif %}
eval "$(starship init zsh)"
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.