blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 4
115
| path
stringlengths 2
970
| src_encoding
stringclasses 28
values | length_bytes
int64 31
5.38M
| score
float64 2.52
5.28
| int_score
int64 3
5
| detected_licenses
listlengths 0
161
| license_type
stringclasses 2
values | text
stringlengths 31
5.39M
| download_success
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
63eb9a1f2bc4c7870bf8c8e3c87193ab296bbef5
|
Shell
|
jgarte/emacs-container
|
/bashrc
|
UTF-8
| 304
| 3.1875
| 3
|
[] |
no_license
|
vterm_printf () {
if [ -n "$TMUX" ]
then
printf "\ePtmux;\e\e]%s\007\e\\" "$1"
elif [ "${TERM%%-*}" = "screen" ]
then
printf "\eP\e]%s\007\e\\" "$1"
else
printf "\e]%s\e\\" "$1"
fi
}
vterm_prompt_end(){
vterm_printf "51;A$(whoami)@$(hostname):$(pwd)"
}
PS1=$PS1'\[$(vterm_prompt_end)\]'
| true
|
acf7cf3a242fa2eebc71cd1ea6abd48da9bf6e96
|
Shell
|
cuauv/software
|
/install/ripgrep-install.sh
|
UTF-8
| 337
| 3.125
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/usr/bin/env bash
set -xeuo pipefail
VERSION=0.7.1
tmpDir="$(mktemp -d)"
pushd .
cd "$tmpDir"
archiveName="ripgrep-$VERSION-x86_64-unknown-linux-musl"
wget "https://github.com/BurntSushi/ripgrep/releases/download/$VERSION/$archiveName.tar.gz"
tar -xf "$archiveName".tar.gz
cp "$archiveName"/rg /usr/local/bin
popd
rm -rf "$tmpDir"
| true
|
882ebbad3393c0a857ffd740ad3170576b57d712
|
Shell
|
dlang/dmd
|
/compiler/test/compilable/extra-files/vcg-ast-arraylength-postscript.sh
|
UTF-8
| 620
| 2.90625
| 3
|
[
"BSL-1.0"
] |
permissive
|
#!/usr/bin/env bash
source tools/common_funcs.sh
# Test if there's call to the runtime for .length = 100
grep "_d_arraysetlengthT(arr, 100.*)" "${TEST_DIR}/${TEST_NAME}.d.cg" &&
# Make sure there's no call to the runtime for .length = 0
! grep "_d_arraysetlengthT(\(arr\|f\), 0.*)" "${TEST_DIR}/${TEST_NAME}.d.cg" &&
# Make sure there's no call to the runtime for .length = x
! grep "_d_arraysetlengthT(f, x.*)" "${TEST_DIR}/${TEST_NAME}.d.cg" &&
# Test if a slice expr is applied for the above case
grep "arr = arr\[0..0\]" "${TEST_DIR}/${TEST_NAME}.d.cg"
ret=$?
rm_retry "${TEST_DIR}/${TEST_NAME}.d.cg"
exit $ret
| true
|
4c5e4c3e331446a4d1e1409e20a142a83d663d98
|
Shell
|
Roshibean/b32-intro-bash
|
/exercices/exercice1.sh
|
UTF-8
| 337
| 3.828125
| 4
|
[] |
no_license
|
#!/bin/bash
#Lire un mot avec read
#Et dire si le mot existe
read -p "Entrez un mot : " mot
result=`wget -qO - http://dictionary.objectif8.com/exists.php?word=$mot` # commande "wget" ne fonctione pas sur windows seulement sur linux
if test $result = "1";then
echo "Ce mot existe."
else
echo "Ce mot n'existe pas."
fi
| true
|
b05e0658277926b6711837dbfadca6c86c1862d9
|
Shell
|
jonsmithers/dotfiles
|
/zsh/change_theme
|
UTF-8
| 23,824
| 3.828125
| 4
|
[] |
no_license
|
#!/bin/bash
# This script prompts for choice of theme and then configures vim and kitty to
# have matching themes.
#
# Additional Kitty Setup
# =======================
#
# None. This script will automatically obtain the necessary theme assets.
#
#
# Additional Vim Setup
# ====================
#
# 1. If the script doesn't see the theme plugins in your vimrc, it will
# automatically download colorscheme plugins into vim's native plugin
# directory:
#
# git clone <theme-repo> ~/.vim/pack/<theme>/start/<theme>
#
# 2. Configure vimrc to read environment variables "$VIM_BACKGROUND" and
# "$VIM_COLORSCHEME" and "$VIM_THEME_GLOBALS" like so:
#
# if (!exists('g:colors_name')) " no colorscheme set
# if exists('$VIM_BACKGROUND')
# execute 'set background='..$VIM_BACKGROUND
# endif
# if exists('$VIM_COLORSCHEME')
# execute 'silent! colorscheme '..$VIM_COLORSCHEME
# endif
# if exists('$VIM_THEME_GLOBALS')
# for assignment in split($VIM_THEME_GLOBALS, ',')
# sandbox execute 'let g:'..split(assignment, '=')[0]..'="'..split(assignment, '=')[1]..'"'
# endfor
# execute 'silent! colorscheme ' .. $VIM_COLORSCHEME
# endif
# endif
set -e -o pipefail
function change_vim_colorscheme() {
local theme="$1"
local globals="$2"
echo setting vim theme to "$theme"
local line_to_insert="export VIM_COLORSCHEME=$theme"
if grep --quiet '^export VIM_COLORSCHEME' < ~/.zprofile; then
sed -I '.sedbackup' 's/^export VIM_COLORSCHEME=.*/'"$line_to_insert"'/' ~/.zprofile
rm ~/.zprofile.sedbackup
else
echo "$line_to_insert" >> ~/.zprofile
fi
line_to_insert="export VIM_THEME_GLOBALS='$globals'"
if grep --quiet '^export VIM_THEME_GLOBALS' < ~/.zprofile; then
sed -I '.sedbackup' 's/^export VIM_THEME_GLOBALS=.*/'"$line_to_insert"'/' ~/.zprofile
rm ~/.zprofile.sedbackup
else
echo "$line_to_insert" >> ~/.zprofile
fi
}
function change_vim_background() {
local background=$1
local line_to_insert="export VIM_BACKGROUND=$background"
if grep --quiet '^export VIM_BACKGROUND' < ~/.zprofile; then
sed -I '.sedbackup' 's/^export VIM_BACKGROUND=.*/'"$line_to_insert"'/' ~/.zprofile
rm ~/.zprofile.sedbackup
else
echo "$line_to_insert" >> ~/.zprofile
fi
}
function change_kitty_theme() {
local theme=$1
echo setting kitty theme to "$theme"
[[ -f "$HOME/.config/kitty/profile.conf" ]] || touch ~/.config/kitty/profile.conf
if ! grep --quiet "include profile.conf" < ~/.config/kitty/kitty.conf; then
echo "include profile.conf" >> ~/.config/kitty/kitty.conf
fi
local sed_target
sed_target="$(readlink ~/.config/kitty/profile.conf || echo ~/.config/kitty/profile.conf)"
sed \
-I '.sedbackup' \
'/# begin auto-generated kitty theme setting/,/# end auto-generated kitty theme setting/s/^[^#].*$/include '"$theme"'.conf/' \
"$sed_target"
rm "${sed_target}.sedbackup"
if ! grep --quiet "^include $theme.conf$" < "$sed_target"; then
echo "adding auto-generated section to $sed_target"
{
echo '# begin auto-generated kitty theme setting'
echo "include $theme.conf"
echo '# end auto-generated kitty theme setting'
} >> "$sed_target"
fi
[[ -f "$HOME/.config/kitty/$theme.conf" ]] || {
echo This kitty theme does not exist! "$HOME/.config/kitty/$theme.conf"
}
}
function obtain_vim_theme_from_git() {
local theme_plugin=$1
local git_remote=$2
if [[ -d "$HOME/.vim/pack/$theme_plugin/opt/$theme_plugin" ]]; then
echo found "$theme_plugin" in ~/.vim/pack/
return
elif grep --quiet "$theme_plugin" < ~/.vimrc; then
echo found "$theme_plugin" in your vimrc
return
elif grep --quiet "$theme_plugin" < ~/.config/nvim/lua/plugins.lua; then
echo found "$theme_plugin" in your plugins.lua
return
elif grep --quiet "$theme_plugin" < ~/.config/nvim/init.vim; then
echo found "$theme_plugin" in your init.vim
return
elif grep --quiet "$theme_plugin" < ~/.config/nvim/init.lua; then
echo found "$theme_plugin" in your init.lua
return
else
echo obtaining vim theme from git repo;
git clone "$git_remote" "$HOME/.vim/pack/$theme_plugin/opt/$theme_plugin"
fi
}
function obtain_neovim_theme_from_git() {
local theme_plugin=$1
local git_remote=$2
if [[ -d "$HOME/.config/nvim/pack/$theme_plugin/start/$theme_plugin" ]]; then
echo found "$theme_plugin" in ~/.nvim/pack/*/start
return
else
echo obtaining neovim theme from git repo;
git clone "$git_remote" "$HOME/.config/nvim/pack/$theme_plugin/start/$theme_plugin"
fi
}
function obtain_kitty_theme_from_curl() {
local theme=$1
local curl_url=$2
[[ -f "$HOME/.config/kitty/$theme.conf" ]] || {
echo obtaining kitty theme from url
(
set -x;
curl --fail "$curl_url" > "$HOME/.config/kitty/$theme.conf";
)
}
}
function obtain_kitty_theme_from_kitten() {
local theme=$1
[[ -f "$HOME/.config/kitty/$theme.conf" ]] || {
echo obtaining kitty theme from kitten
kitty +kitten themes --dump-theme "$theme" > "$HOME/.config/kitty/$theme.conf"
}
}
function obtain_kitty_theme_from_github() {
local theme=$1
[[ -f "$HOME/.config/kitty/$theme.conf" ]] || {
echo obtaining kitty theme from github/projekt0n/github-nvim-theme;
curl --fail https://raw.githubusercontent.com/projekt0n/github-theme-contrib/main/themes/kitty/"$theme".conf > ~/.config/kitty/"$theme".conf;
}
}
theme=$1
if [[ -z "$theme" ]]; then
themes=(
"Apprentice Dark"
"Ayu Dark"
"Ayu Light"
"Ayu Mirage"
"Catppuccin Frappe"
"Catppuccin Latte"
"Catppuccin Macchiato"
"Catppuccin Mocha"
"GitHub Dark"
"GitHub Dark Colorblind"
"GitHub Dark High Contrast"
"GitHub Dark Dimmed"
"GitHub Light"
"GitHub Light High Contrast"
"Gruvbox Dark"
"Gruvbox Light"
"Kanagawa Dragon (dark)"
"Kanagawa Lotus (light)"
"Kanagawa Wave (dark)"
"Nightfox Nightfox"
"Nightfox Carbonfox"
"Nightfox Dayfox"
"Nightfox Dawnfox"
"Nightfox Duskfox"
"Nightfox Nordfox"
"Nightfox Terafox"
"One Dark"
"One Light"
"Rose Pine"
"Rose Pine Dawn"
# "Rose Pine Moon"
"Seoul256 Dark"
"Seoul256 Light"
"TokyoNight Night"
"TokyoNight Day"
"TokyoNight Moon"
"TokyoNight Storm"
"Zenbones Dark"
"Zenbones Light"
"Zenbones Neobones Dark"
"Zenbones Neobones Light"
"Zenbones Seoulbones Dark"
"Zenbones Seoulbones Light"
"Zenbones Zenwritten Dark"
"Zenbones Zenwritten Light"
);
PS3=$'\n'"select theme> "
if command -v fzf &> /dev/null; then
theme=$(( IFS=$'\n'; echo "${themes[*]}" ) | fzf --border=rounded --prompt='Choose a Theme: ')
else
select theme in "${themes[@]}"; do
break
done
fi
fi
case $theme in
"Apprentice Dark")
obtain_neovim_theme_from_git Apprentice git@github.com:romainl/Apprentice.git
change_vim_background dark
change_vim_colorscheme apprentice
obtain_kitty_theme_from_kitten Apprentice
change_kitty_theme Apprentice
;;
"Ayu Dark")
obtain_neovim_theme_from_git ayu-vim git@github.com:ayu-theme/ayu-vim.git
change_vim_background dark
change_vim_colorscheme ayu
obtain_kitty_theme_from_kitten Ayu
change_kitty_theme Ayu
;;
"Ayu Light")
obtain_neovim_theme_from_git ayu-vim git@github.com:ayu-theme/ayu-vim.git
change_vim_background light
change_vim_colorscheme ayu 'ayucolor=light'
obtain_kitty_theme_from_kitten 'Ayu Light'
change_kitty_theme 'Ayu Light'
;;
"Ayu Mirage")
obtain_neovim_theme_from_git ayu-vim git@github.com:ayu-theme/ayu-vim.git
change_vim_background dark
change_vim_colorscheme ayu 'ayucolor=mirage'
obtain_kitty_theme_from_kitten 'Ayu Mirage'
change_kitty_theme 'Ayu Mirage'
;;
"Catppuccin Frappe")
obtain_neovim_theme_from_git catppuccin https://github.com/catppuccin/nvim.git
change_vim_background dark
change_vim_colorscheme catppuccin 'catppuccin_flavour=frappe'
obtain_kitty_theme_from_curl catppuccin_frappe https://raw.githubusercontent.com/catppuccin/kitty/main/themes/frappe.conf
change_kitty_theme catppuccin_frappe
;;
"Catppuccin Latte")
obtain_neovim_theme_from_git catppuccin https://github.com/catppuccin/nvim.git
change_vim_background light
change_vim_colorscheme catppuccin 'catppuccin_flavour=latte'
obtain_kitty_theme_from_curl catppuccin_latte https://raw.githubusercontent.com/catppuccin/kitty/main/themes/latte.conf
change_kitty_theme catppuccin_latte
;;
"Catppuccin Macchiato")
obtain_neovim_theme_from_git catppuccin https://github.com/catppuccin/nvim.git
change_vim_background dark
change_vim_colorscheme catppuccin 'catppuccin_flavour=macchiato'
obtain_kitty_theme_from_curl catppuccin_macchiato https://raw.githubusercontent.com/catppuccin/kitty/main/themes/macchiato.conf
change_kitty_theme catppuccin_macchiato
;;
"Catppuccin Mocha")
obtain_neovim_theme_from_git catppuccin https://github.com/catppuccin/nvim.git
change_vim_background dark
change_vim_colorscheme catppuccin 'catppuccin_flavour=mocha'
obtain_kitty_theme_from_curl catppuccin_mocha https://raw.githubusercontent.com/catppuccin/kitty/main/themes/mocha.conf
change_kitty_theme catppuccin_mocha
;;
"GitHub Dark")
obtain_neovim_theme_from_git github-nvim-theme git@github.com:projekt0n/github-nvim-theme.git
change_vim_background dark
change_vim_colorscheme github_dark
obtain_kitty_theme_from_github github_dark
change_kitty_theme github_dark
;;
"GitHub Dark High Contrast")
obtain_neovim_theme_from_git github-nvim-theme git@github.com:projekt0n/github-nvim-theme.git
change_vim_background dark
change_vim_colorscheme github_dark_high_contrast
obtain_kitty_theme_from_github github_dark_high_contrast
change_kitty_theme github_dark_high_contrast
;;
"GitHub Dark Colorblind")
obtain_neovim_theme_from_git github-nvim-theme git@github.com:projekt0n/github-nvim-theme.git
change_vim_background dark
change_vim_colorscheme github_dark_colorblind
obtain_kitty_theme_from_github github_dark_colorblind
change_kitty_theme github_dark_colorblind
;;
"GitHub Dark Dimmed")
obtain_neovim_theme_from_git github-nvim-theme git@github.com:projekt0n/github-nvim-theme.git
change_vim_background dark
change_vim_colorscheme github_dark_dimmed
obtain_kitty_theme_from_github github_dark_dimmed
change_kitty_theme github_dark_dimmed
;;
"GitHub Light")
obtain_neovim_theme_from_git github-nvim-theme git@github.com:projekt0n/github-nvim-theme.git
change_vim_background dark
change_vim_colorscheme github_light
obtain_kitty_theme_from_github github_light
change_kitty_theme github_light
;;
"GitHub Light High Contrast")
obtain_neovim_theme_from_git github-nvim-theme git@github.com:projekt0n/github-nvim-theme.git
change_vim_background dark
change_vim_colorscheme github_light_high_contrast
obtain_kitty_theme_from_github github_light_high_contrast
change_kitty_theme github_light_high_contrast
;;
"Gruvbox Dark")
obtain_neovim_theme_from_git gruvbox git@github.com:morhetz/gruvbox.git
change_vim_background dark
change_vim_colorscheme gruvbox
obtain_kitty_theme_from_kitten 'Gruvbox Dark Hard'
change_kitty_theme 'Gruvbox Dark Hard'
;;
"Gruvbox Light")
obtain_neovim_theme_from_git gruvbox git@github.com:morhetz/gruvbox.git
change_vim_background light
change_vim_colorscheme gruvbox
obtain_kitty_theme_from_kitten 'Gruvbox Light Hard'
change_kitty_theme 'Gruvbox Light Hard'
;;
"Kanagawa Dragon (dark)")
obtain_neovim_theme_from_git kanagawa.nvim git@github.com:rebelot/kanagawa.nvim.git
change_vim_background dark
change_vim_colorscheme kanagawa-dragon
obtain_kitty_theme_from_curl 'Kanagawa Dragon' https://raw.githubusercontent.com/rebelot/kanagawa.nvim/master/extras/kanagawa_dragon.conf
change_kitty_theme 'Kanagawa Dragon'
;;
"Kanagawa Lotus (light)")
obtain_neovim_theme_from_git kanagawa.nvim git@github.com:rebelot/kanagawa.nvim.git
change_vim_background light
change_vim_colorscheme kanagawa-lotus
obtain_kitty_theme_from_curl 'Kanagawa Lotus' https://raw.githubusercontent.com/rebelot/kanagawa.nvim/master/extras/kanagawa_light.conf
change_kitty_theme 'Kanagawa Lotus'
;;
"Kanagawa Wave (dark)")
obtain_neovim_theme_from_git kanagawa.nvim git@github.com:rebelot/kanagawa.nvim.git
change_vim_background dark
change_vim_colorscheme kanagawa-wave
obtain_kitty_theme_from_curl 'Kanagawa Wave' https://raw.githubusercontent.com/rebelot/kanagawa.nvim/master/extras/kanagawa.conf
change_kitty_theme 'Kanagawa Wave'
;;
"Nightfox Nightfox")
obtain_neovim_theme_from_git nightfox.nvim git@github.com:EdenEast/nightfox.nvim.git
change_vim_background dark
change_vim_colorscheme nightfox
obtain_kitty_theme_from_curl 'nightfox-nightfox' https://raw.githubusercontent.com/EdenEast/nightfox.nvim/main/extra/nightfox/nightfox_kitty.conf
change_kitty_theme 'nightfox-nightfox'
;;
"Nightfox Carbonfox")
obtain_neovim_theme_from_git nightfox.nvim git@github.com:EdenEast/nightfox.nvim.git
change_vim_background dark
change_vim_colorscheme carbonfox
obtain_kitty_theme_from_curl 'nightfox-carbonfox' https://raw.githubusercontent.com/EdenEast/nightfox.nvim/main/extra/carbonfox/nightfox_kitty.conf
change_kitty_theme 'nightfox-carbonfox'
;;
"Nightfox Dayfox")
obtain_neovim_theme_from_git nightfox.nvim git@github.com:EdenEast/nightfox.nvim.git
change_vim_background dark
change_vim_colorscheme dayfox
obtain_kitty_theme_from_curl 'nightfox-dayfox' https://raw.githubusercontent.com/EdenEast/nightfox.nvim/main/extra/dayfox/nightfox_kitty.conf
change_kitty_theme 'nightfox-dayfox'
;;
"Nightfox Dawnfox")
obtain_neovim_theme_from_git nightfox.nvim git@github.com:EdenEast/nightfox.nvim.git
change_vim_background dark
change_vim_colorscheme dawnfox
obtain_kitty_theme_from_curl 'nightfox-dawnfox' https://raw.githubusercontent.com/EdenEast/nightfox.nvim/main/extra/dawnfox/nightfox_kitty.conf
change_kitty_theme 'nightfox-dawnfox'
;;
"Nightfox Duskfox")
obtain_neovim_theme_from_git nightfox.nvim git@github.com:EdenEast/nightfox.nvim.git
change_vim_background dark
change_vim_colorscheme duskfox
obtain_kitty_theme_from_curl 'nightfox-duskfox' https://raw.githubusercontent.com/EdenEast/nightfox.nvim/main/extra/duskfox/nightfox_kitty.conf
change_kitty_theme 'nightfox-duskfox'
;;
"Nightfox Nordfox")
obtain_neovim_theme_from_git nightfox.nvim git@github.com:EdenEast/nightfox.nvim.git
change_vim_background dark
change_vim_colorscheme nordfox
obtain_kitty_theme_from_curl 'nightfox-nordfox' https://raw.githubusercontent.com/EdenEast/nightfox.nvim/main/extra/nordfox/nightfox_kitty.conf
change_kitty_theme 'nightfox-nordfox'
;;
"Nightfox Terafox")
obtain_neovim_theme_from_git nightfox.nvim git@github.com:EdenEast/nightfox.nvim.git
change_vim_background dark
change_vim_colorscheme terafox
obtain_kitty_theme_from_curl 'nightfox-terafox' https://raw.githubusercontent.com/EdenEast/nightfox.nvim/main/extra/terafox/nightfox_kitty.conf
change_kitty_theme 'nightfox-terafox'
;;
"One Dark")
obtain_neovim_theme_from_git onedark.vim git@github.com:joshdick/onedark.vim.git
change_vim_background dark
change_vim_colorscheme onedark
obtain_kitty_theme_from_kitten 'One Dark'
change_kitty_theme 'One Dark'
;;
"One Light")
obtain_neovim_theme_from_git vim-one git@github.com:rakr/vim-one.git
change_vim_background light
change_vim_colorscheme one
obtain_kitty_theme_from_kitten 'One Half Light'
change_kitty_theme 'One Half Light'
;;
"Rose Pine")
obtain_neovim_theme_from_git rose-pine git@github.com:rose-pine/neovim.git
change_vim_background dark
change_vim_colorscheme rose-pine
obtain_kitty_theme_from_curl 'Rose Pine' https://raw.githubusercontent.com/rose-pine/kitty/main/dist/rose-pine.conf
change_kitty_theme 'Rose Pine'
;;
"Rose Pine Dawn")
obtain_neovim_theme_from_git rose-pine git@github.com:rose-pine/neovim.git
change_vim_background light
change_vim_colorscheme rose-pine
obtain_kitty_theme_from_curl 'Rose Pine Dawn' https://raw.githubusercontent.com/rose-pine/kitty/main/dist/rose-pine-dawn.conf
change_kitty_theme 'Rose Pine Dawn'
;;
# "Rose Pine Moon")
# obtain_neovim_theme_from_git rose-pine git@github.com:rose-pine/neovim.git
# change_vim_background dark 'dark_variant=moon'
# change_vim_colorscheme rose-pine
# obtain_kitty_theme_from_curl 'Rose Pine Moon' https://raw.githubusercontent.com/rose-pine/kitty/main/dist/rose-pine-moon.conf
# change_kitty_theme 'Rose Pine Moon'
# ;;
"Seoul256 Dark")
obtain_neovim_theme_from_git seoul256.vim git@github.com:junegunn/seoul256.vim.git
change_vim_background dark
change_vim_colorscheme seoul256
obtain_kitty_theme_from_curl seoul256 https://raw.githubusercontent.com/malikbenkirane/kitty-colors/master/seoul256.kitty-conf
change_kitty_theme seoul256
;;
"Seoul256 Light")
obtain_neovim_theme_from_git seoul256.vim git@github.com:junegunn/seoul256.vim.git
change_vim_background light
change_vim_colorscheme seoul256-light
obtain_kitty_theme_from_curl seoul256-light https://raw.githubusercontent.com/malikbenkirane/kitty-colors/master/seoul256-light.kitty-conf
change_kitty_theme seoul256-light
;;
"TokyoNight Night")
obtain_neovim_theme_from_git tokyonight.nvim git@github.com:folke/tokyonight.nvim.git
change_vim_background dark
change_vim_colorscheme tokyonight-night
obtain_kitty_theme_from_kitten "Tokyo Night"
change_kitty_theme "Tokyo Night"
;;
"TokyoNight Day")
obtain_neovim_theme_from_git tokyonight.nvim git@github.com:folke/tokyonight.nvim.git
change_vim_background light
change_vim_colorscheme tokyonight-day
obtain_kitty_theme_from_kitten "Tokyo Night Day"
change_kitty_theme "Tokyo Night Day"
;;
"TokyoNight Moon")
obtain_neovim_theme_from_git tokyonight.nvim git@github.com:folke/tokyonight.nvim.git
change_vim_background dark
change_vim_colorscheme tokyonight-moon
obtain_kitty_theme_from_kitten "Tokyo Night Storm"
change_kitty_theme "Tokyo Night Storm"
;;
"TokyoNight Storm")
obtain_neovim_theme_from_git tokyonight.nvim git@github.com:folke/tokyonight.nvim.git
change_vim_background dark
change_vim_colorscheme tokyonight-storm
obtain_kitty_theme_from_kitten "Tokyo Night Moon"
change_kitty_theme "Tokyo Night Moon"
;;
"Zenbones Dark")
obtain_neovim_theme_from_git zenbones.nvim git@github.com:mcchrish/zenbones.nvim.git
change_vim_background dark
change_vim_colorscheme zenbones zenbones_compat=1
obtain_kitty_theme_from_curl zenbones-dark https://raw.githubusercontent.com/mcchrish/zenbones.nvim/main/extras/kitty/zenbones_dark.conf
change_kitty_theme zenbones-dark
;;
"Zenbones Light")
obtain_neovim_theme_from_git zenbones.nvim git@github.com:mcchrish/zenbones.nvim.git
change_vim_background light
change_vim_colorscheme zenbones zenbones_compat=1
obtain_kitty_theme_from_curl zenbones-light https://raw.githubusercontent.com/mcchrish/zenbones.nvim/main/extras/kitty/zenbones_light.conf
change_kitty_theme zenbones-light
;;
"Zenbones Zenwritten Dark")
obtain_neovim_theme_from_git zenbones.nvim git@github.com:mcchrish/zenbones.nvim.git
change_vim_background dark
change_vim_colorscheme zenwritten zenbones_compat=1
obtain_kitty_theme_from_curl zenbones-zenwritten-dark https://raw.githubusercontent.com/mcchrish/zenbones.nvim/main/extras/kitty/zenwritten_dark.conf
change_kitty_theme zenbones-zenwritten-dark
echo TODO: zenbones requires the \'lush\' lua rock \(can be installed manually\)
;;
"Zenbones Zenwritten Light")
obtain_neovim_theme_from_git zenbones.nvim git@github.com:mcchrish/zenbones.nvim.git
change_vim_background light
change_vim_colorscheme zenwritten zenbones_compat=1
obtain_kitty_theme_from_curl zenbones-zenwritten-light https://raw.githubusercontent.com/mcchrish/zenbones.nvim/main/extras/kitty/zenwritten_light.conf
change_kitty_theme zenbones-zenwritten-light
echo TODO: zenbones requires the \'lush\' lua rock \(can be installed manually\)
;;
"Zenbones Neobones Light")
obtain_neovim_theme_from_git zenbones.nvim git@github.com:mcchrish/zenbones.nvim.git
change_vim_background light
change_vim_colorscheme neobones zenbones_compat=1
obtain_kitty_theme_from_curl zenbones-neobones-light https://raw.githubusercontent.com/mcchrish/zenbones.nvim/main/extras/kitty/neobones_light.conf
change_kitty_theme zenbones-neobones-light
echo TODO: zenbones requires the \'lush\' lua rock \(can be installed manually\)
;;
"Zenbones Neobones Dark")
obtain_neovim_theme_from_git zenbones.nvim git@github.com:mcchrish/zenbones.nvim.git
change_vim_background dark
change_vim_colorscheme neobones zenbones_compat=1
obtain_kitty_theme_from_curl zenbones-neobones-dark https://raw.githubusercontent.com/mcchrish/zenbones.nvim/main/extras/kitty/neobones_dark.conf
change_kitty_theme zenbones-neobones-dark
echo TODO: zenbones requires the \'lush\' lua rock \(can be installed manually\)
;;
"Zenbones Seoulbones Light")
obtain_neovim_theme_from_git zenbones.nvim git@github.com:mcchrish/zenbones.nvim.git
change_vim_background light
change_vim_colorscheme seoulbones zenbones_compat=1
obtain_kitty_theme_from_curl zenbones-seoulbones-light https://raw.githubusercontent.com/mcchrish/zenbones.nvim/main/extras/kitty/seoulbones_light.conf
change_kitty_theme zenbones-seoulbones-light
echo TODO: zenbones requires the \'lush\' lua rock \(can be installed manually\)
;;
"Zenbones Seoulbones Dark")
obtain_neovim_theme_from_git zenbones.nvim git@github.com:mcchrish/zenbones.nvim.git
change_vim_background dark
change_vim_colorscheme seoulbones zenbones_compat=1
obtain_kitty_theme_from_curl zenbones-seoulbones-dark https://raw.githubusercontent.com/mcchrish/zenbones.nvim/main/extras/kitty/seoulbones_dark.conf
change_kitty_theme zenbones-seoulbones-dark
echo TODO: zenbones requires the \'lush\' lua rock \(can be installed manually\)
;;
*)
echo "\"$theme\" is not a recognized theme"
;;
esac
| true
|
be30c7fe781aa2de620908e858c351473bd92469
|
Shell
|
kurotetsuka/amaktet
|
/game
|
UTF-8
| 489
| 2.640625
| 3
|
[] |
no_license
|
#!/bin/bash
cp="-cp lib/*:lib/lwjgl/jar/*:jar/*"
natives="-Djava.library.path=lib/lwjgl/native/linux"
#natives="-Djava.library.path=lib/lwjgl/native/macosx"
#natives="-Djava.library.path=lib/lwjgl/native/solaris"
#natives="-Djava.library.path=lib/lwjgl/native/windows"
memory_min="256m"
memory_max="512m"
memory_min_arg="-Xms$memory_min"
memory_max_arg="-Xms$memory_max"
command="java \
$cp $natives $memory_min_arg $memory_max_arg \
kuro.amaktet.Driver $@"
#echo $command
$command
| true
|
307adcec07e261c848b8d368dc2f1d5de0f68b0e
|
Shell
|
yuanqingmei/NJU-Bras-Auto-Login
|
/bras.sh
|
UTF-8
| 958
| 3.390625
| 3
|
[] |
no_license
|
#!/bin/sh
USERNAME="$1"
PASSWORD="$2"
if test "$1" = "out" -o "$1" = "-o" -o "$1" = "--out"; then
curl -m 5 -sS http://p.nju.edu.cn/portal_io/logout
exit
fi
set -e
INFO=$(curl -m 5 -sS http://p.nju.edu.cn/portal_io/getinfo)
if echo "$INFO" | grep -q '"reply_code":0'; then
echo "$INFO"
exit
fi
if test -z "$USERNAME"; then
read -p 'USERNAME: ' USERNAME
fi
if test -z "$PASSWORD"; then
read -s -p 'PASSWORD: ' PASSWORD
echo
fi
CHALLENGE=$(curl -m 5 -sS http://p.nju.edu.cn/portal_io/getchallenge | cut -d'"' -f10)
ID=$(dd if=/dev/urandom count=1 ibs=1 2>/dev/null | xxd -ps)
PASSWORD=${ID}$(echo -n "$PASSWORD" | xxd -ps)${CHALLENGE}
PASSWORD=${ID}$(echo -n "$PASSWORD" | xxd -ps -r | md5sum | cut -d' ' -f1)
INFO=$(curl -m 5 -sS http://p.nju.edu.cn/portal_io/login -d username="$USERNAME" -d password="$PASSWORD" -d challenge="$CHALLENGE")
echo "$INFO"
if ! echo "$INFO" | grep -q '"reply_code":1'; then
exit 1
fi
| true
|
a2ba9f92972eaa30783cabca6c801d472897bed6
|
Shell
|
anirudhreddy18/Linux
|
/Exercise4.sh
|
UTF-8
| 454
| 3.84375
| 4
|
[] |
no_license
|
#!/bin/bash
if [[ $USER != root ]]
then
echo "User cannot be created"
exit 1
fi
if [[ $# -eq 0 ]]
then
echo "Please provide an account name in the arguments supplied"
exit 1
fi
USERNAME="$1"
shift
COMMENT="$@"
echo $USERNAME
echo $COMMENT
useradd -c "$COMMENT" -m $USERNAME
echo "User Created is $USERNAME"
PASSWORD=$(date +%s$RANDOM | sha256sum | head -c48)
echo PASSWORD | passwd --stdin $USERNAME
echo "Password is $PASSWORD"
| true
|
a15c217fa779232d8cb10737d8c04c88ee14d7b1
|
Shell
|
DanielTakeshi/DCUR
|
/bash/paper/offline_ant.sh
|
UTF-8
| 2,061
| 2.5625
| 3
|
[
"MIT"
] |
permissive
|
# ------------------------------------------------------------------------------ #
# Offline RL, for curriculum experiments. CoRL 2021 plan.
# https://github.com/CannyLab/spinningup/pull/32
# ------------------------------------------------------------------------------ #
CPU1=0
CPU2=9
ENV=Ant-v3
TYPE=logged
T_SOURCE=ant_td3_act0-1_s50
B_PATH=ant_td3_act0-1/${T_SOURCE}/buffer/final_buffer-maxsize-1000000-steps-1000000-noise-0.1.p
T_PATH=ant_td3_act0-1/${T_SOURCE}/experiments/sigma_predictor-time_prediction-tot-1000000-seed-02.tar
EPOCHS=250
# ------------------------------------------------------------------------------ #
# (May 28) Final buffer. Different choices for the additive curricula, going up to final buffer.
# NOTE: don't have the ones that decrease PREV (yet), want to find best ones here first.
# ------------------------------------------------------------------------------ #
PREV=1000000
for NEXT in 50000 100000 200000 500000 1000000 ; do
NAME=ant_td3_offline_curriculum_ep_${EPOCHS}_${TYPE}_p_${PREV}_n_${NEXT}_overlap
for SEED in 90 91 92 93 94 ; do
taskset -c ${CPU1}-${CPU2} python spinup/teaching/offline_rl.py \
--env ${ENV} --exp_name ${NAME} --seed ${SEED} -bp ${B_PATH} --t_source ${T_SOURCE} \
--curriculum ${TYPE} --c_prev ${PREV} --c_next ${NEXT} --epochs ${EPOCHS} --overlap
done
done
# ------------------------------------------------------------------------------ #
# (May 28) Different scaling curricula choices, including the concurrent case.
# ------------------------------------------------------------------------------ #
for C_SCALE in 0.50 0.75 1.00 1.10 ; do
NAME=ant_td3_offline_curriculum_ep_${EPOCHS}_${TYPE}_scale_${C_SCALE}t_overlap
for SEED in 90 91 92 93 94 ; do
taskset -c ${CPU1}-${CPU2} python spinup/teaching/offline_rl.py \
--env ${ENV} --exp_name ${NAME} --seed ${SEED} -bp ${B_PATH} --t_source ${T_SOURCE} \
--curriculum ${TYPE} --c_scale ${C_SCALE} --epochs ${EPOCHS} --overlap
done
done
| true
|
6c528a949fb3f0008a2b3f13d9db989049fea534
|
Shell
|
tmeisenh/dotfiles
|
/files/.zsh-tmeisenh-dotfiles/prompt.zsh
|
UTF-8
| 1,864
| 3.203125
| 3
|
[] |
no_license
|
# Z shell configuration file
#
# Author: Travis Meisenheimer <travis@indexoutofbounds.com>
#
# prompt.zsh setup - sets up git, works with zprezto as well
#********************************************************************
function prompt_tmeisenh_precmd() {
vcs_info
print -Pn "\033]0;%n@%m%# %~ %W :: %T\a"
}
function prompt_tmeisenh_preexec() {
print -Pn "\033]0;%n@%m%# <$1> %W :: %T\a"
}
function prompt_char() {
if [ $UID -eq 0 ]; then echo "#"; else echo "%%"; fi
}
function prompt_tmeisenh_setup() {
setopt LOCAL_OPTIONS
unsetopt XTRACE KSH_ARRAYS
prompt_opts=(cr percent subst)
# Load required functions.
autoload -Uz add-zsh-hook
autoload -Uz vcs_info
autoload -U promptinit && promptinit
#autoload -U colors && colors # colors must be loaded
_tmeisenh_colors=(
"%B%F{1}" #red
"%B%F{2}" #green
"%B%F{7}" #white
"%B%F{3}" #yellow
"%B%F{4}" #blue
"%b%f" #clear
)
# Add hook for calling git-info before each command.
add-zsh-hook precmd prompt_tmeisenh_precmd
add-zsh-hook preexec prompt_tmeisenh_preexec
# Set vcs_info parameters.
# check-for-changes (disabled by default) can be slow.
# For actionformats/formats: http://zsh.sourceforge.net/Doc/Release/User-Contributions.html#Version-Control-Information
zstyle ':vcs_info:*' enable git svn
zstyle ':vcs_info:*' check-for-changes true
zstyle ':vcs_info:*' unstagedstr '!'
zstyle ':vcs_info:*' stagedstr '+'
zstyle ':vcs_info:*' actionformats "(%b %u%c (%a)) "
zstyle ':vcs_info:*' formats "(%b %u%c) "
zstyle ':vcs_info:*' nvcsformats ""
# trailing space to separate vcs_info from cwd is in the vcs_info zstyles....
PROMPT='%(!.${_tmeisenh_colors[1]}.${_tmeisenh_colors[2]}%n@)%m ${_tmeisenh_colors[3]}%@ ${_tmeisenh_colors[4]}${vcs_info_msg_0_}${_tmeisenh_colors[5]}%1d $(prompt_char)${_tmeisenh_colors[6]} '
}
prompt_tmeisenh_setup "$@"
| true
|
6c44209248c095c67ee100efe6101af0aa34de89
|
Shell
|
jappjapp/scripts
|
/GitNoRelease/git-mnr
|
UTF-8
| 222
| 3
| 3
|
[] |
no_license
|
#!/bin/sh
MERGE_INTO=`git rev-parse --abbrev-ref HEAD`
MERGE_FROM=$@
git checkout $MERGE_FROM
LINES=`git grep -F "NORELEASE"`
if [ -n "$LINES" ]; then
echo "$LINES"
else
git checkout $MERGE_INTO
git merge $MERGE_FROM
fi
| true
|
595113fc3fb2f8edbe0bfc2da31214ab8a72b301
|
Shell
|
stasiaks/.dotfiles
|
/.config/yadm/bootstrap.d/vundle.sh
|
UTF-8
| 323
| 2.578125
| 3
|
[] |
no_license
|
#!/bin/sh
if ! [ -d "$HOME/.vim/bundle/Vundle.vim" ]; then
echo "Downloading Vundle"
git clone https://github.com/VundleVim/Vundle.vim.git ~/.vim/bundle/Vundle.vim
vim +PluginInstall +qall
fi
if ! [ -d "$HOME/.vim/bundle/coc.nvim/build" ]; then
cd $HOME/.vim/bundle/coc.nvim
yarn install
cd -
fi
| true
|
2fc0ccdf759b4386aa7b8ae55f97e9494a8197e1
|
Shell
|
rkarger-google/magic-modules
|
/.ci/containers/terraform-tester/test_terraform.sh
|
UTF-8
| 3,239
| 3.390625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -e
version=$1
pr_number=$2
mm_commit_sha=$3
build_id=$4
project_id=$5
build_step=$6
github_username=modular-magician
if [ "$version" == "ga" ]; then
gh_repo=terraform-provider-google
elif [ "$version" == "beta" ]; then
gh_repo=terraform-provider-google-beta
else
echo "no repo, dying."
exit 1
fi
scratch_path=https://$github_username:$GITHUB_TOKEN@github.com/$github_username/$gh_repo
local_path=$GOPATH/src/github.com/terraform-providers/$gh_repo
mkdir -p "$(dirname $local_path)"
git clone $scratch_path $local_path --single-branch --branch "auto-pr-$pr_number" --depth 1
pushd $local_path
post_body=$( jq -n \
--arg context "${gh_repo}-test" \
--arg target_url "https://console.cloud.google.com/cloud-build/builds;region=global/${build_id};step=${build_step}?project=${project_id}" \
--arg state "pending" \
'{context: $context, target_url: $target_url, state: $state}')
curl \
-X POST \
-u "$github_username:$GITHUB_TOKEN" \
-H "Accept: application/vnd.github.v3+json" \
"https://api.github.com/repos/GoogleCloudPlatform/magic-modules/statuses/$mm_commit_sha" \
-d "$post_body"
post_body=$( jq -n \
--arg context "${gh_repo}-lint" \
--arg target_url "https://console.cloud.google.com/cloud-build/builds;region=global/${build_id};step=${build_step}?project=${project_id}" \
--arg state "pending" \
'{context: $context, target_url: $target_url, state: $state}')
curl \
-X POST \
-u "$github_username:$GITHUB_TOKEN" \
-H "Accept: application/vnd.github.v3+json" \
"https://api.github.com/repos/GoogleCloudPlatform/magic-modules/statuses/$mm_commit_sha" \
-d "$post_body"
set +e
make
lint_exit_code=$?
test_exit_code=1
if [ $lint_exit_code -eq 0 ]; then
# only run lint & tests if the code compiled
make lint
lint_exit_code=$lint_exit_code || $?
make test
test_exit_code=$?
fi
make tools
lint_exit_code=$lint_exit_code || $?
make docscheck
lint_exit_code=$lint_exit_code || $?
set -e
if [ $test_exit_code -ne 0 ]; then
test_state="failure"
else
test_state="success"
fi
if [ $lint_exit_code -ne 0 ]; then
lint_state="failure"
else
lint_state="success"
fi
post_body=$( jq -n \
--arg context "${gh_repo}-test" \
--arg target_url "https://console.cloud.google.com/cloud-build/builds;region=global/${build_id};step=${build_step}?project=${project_id}" \
--arg state "${test_state}" \
'{context: $context, target_url: $target_url, state: $state}')
curl \
-X POST \
-u "$github_username:$GITHUB_TOKEN" \
-H "Accept: application/vnd.github.v3+json" \
"https://api.github.com/repos/GoogleCloudPlatform/magic-modules/statuses/$mm_commit_sha" \
-d "$post_body"
post_body=$( jq -n \
--arg context "${gh_repo}-lint" \
--arg target_url "https://console.cloud.google.com/cloud-build/builds;region=global/${build_id};step=${build_step}?project=${project_id}" \
--arg state "${lint_state}" \
'{context: $context, target_url: $target_url, state: $state}')
curl \
-X POST \
-u "$github_username:$GITHUB_TOKEN" \
-H "Accept: application/vnd.github.v3+json" \
"https://api.github.com/repos/GoogleCloudPlatform/magic-modules/statuses/$mm_commit_sha" \
-d "$post_body"
| true
|
173f634169200205c32d9f50d5a99e7ec091fc7a
|
Shell
|
acrdlph/fcr-project
|
/fcr-scripts/apply_and_challenge.sh
|
UTF-8
| 310
| 2.8125
| 3
|
[] |
no_license
|
#!/bin/bash
export LISTING=$1
fcr apply $LISTING 1025 --from=1
fcr challenge $LISTING --from=2
if [ $2 = "fail" ]; then
fcr buy $LISTING LONG_ACCEPTED 5 --from=3
else
fcr buy $LISTING SHORT_ACCEPTED 5 --from=3
fi
evm increaseTime 3600
fcr close $LISTING
echo "Applied, challenged, and closed $LISTING"
| true
|
60257d2da7cca4a4bb7f32c9a8a261279b21bf91
|
Shell
|
zuzu59/logomatch
|
/sort_urls.sh
|
UTF-8
| 436
| 3.625
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
#petit script pour faire trier et supprimer les doublons
#zf190327.1615
#test si l'argument est vide
if [ -z "$1" ]
then
echo -e "
Syntax:
./sort_urls.sh data/liste_urls.csv
"
exit
fi
INPUT=$1
OUTPUT=`echo $1 | sed 's/.csv/_unique.csv/g'`
echo $INPUT
echo $OUTPUT
#exit
echo -e "site,url" > $OUTPUT
echo -e "Avant: "`cat $INPUT |wc -l`
sort -u $INPUT >> $OUTPUT
echo -e "Après: "`cat $OUTPUT |wc -l`
| true
|
2316f7b0550703f37d53138008c0123a5608b63f
|
Shell
|
MatthewWilkes/explain.depesz.com
|
/vagrant_bootstrap.sh
|
UTF-8
| 2,090
| 3.59375
| 4
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/bash
export PGDATA="/etc/postgresql/10/main"
export APP_USER="explain"
export APP_PASS="explain"
export BASEDIR="/vagrant"
### This script *can* be run on a Docker Ubuntu VM if desired.
### To do so, additional commands need to be run:
# apt-get update
# apt-get -y -qq install make curl libclone-perl lsb-release sudo
# sed -i "s/exit.*/exit 0/" /usr/sbin/policy-rc.d
# export BASEDIR="/explain"
# Set username in explain.json
sed -i "s/\"username\" : \"explain\"/\"username\" : \"${APP_USER}\"/" ${BASEDIR}/explain.json
sed -i "s/\"password\" : \"explain\"/\"password\" : \"${APP_PASS}\"/" ${BASEDIR}/explain.json
# Install dependencies
curl -s -L cpanmin.us | perl - -n Mojolicious
apt-get -y -qq install wget ca-certificates cpanminus libmojolicious-perl libmail-sender-perl libdate-simple-perl libemail-valid-perl libxml-simple-perl libdbd-pg-perl
# Install Pg::Explain
cpanm -q Pg::Explain
# Install Postgres
echo "deb http://apt.postgresql.org/pub/repos/apt/ $(lsb_release -cs)-pgdg main" > /etc/apt/sources.list.d/pgdg.list
wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo apt-key add -
sudo apt-get -y -qq update
sudo apt-get -y -qq upgrade
sudo apt-get -y -qq install postgresql-10
sed -i -e "s/peer/trust/" ${PGDATA}/pg_hba.conf
sudo -u postgres psql -tc "select pg_reload_conf()"
# Create database
createdb -U postgres explain
# Create user
psql -qU postgres explain -c "CREATE USER ${APP_USER} WITH PASSWORD '${APP_PASS}'"
# Apply patches
psql -q -f ${BASEDIR}/sql/create.sql -U postgres explain
for i in `seq -f "%03g" 1 10`
do
psql -q -f ${BASEDIR}/sql/patch-${i}.sql -U postgres explain
done
# Apply grants
psql -qU postgres explain -c "GRANT ALL ON plans, users TO ${APP_USER}"
psql -qU postgres explain -c "GRANT ALL ON SCHEMA plans TO ${APP_USER}"
for x in `echo {0..9} {A..Z} {a..z}`
do
psql -qU postgres explain -c "GRANT ALL PRIVILEGES ON plans.\"part_${x}\" TO ${APP_USER}"
done
# Done
echo "Your Vagrant box is all set up. Use 'vagrant ssh' to log in, then call '/vagrant/explain.pl daemon' to start the service"
| true
|
299a45d0e19652f7971d90dc2a30fa8408765ddd
|
Shell
|
dansku/balena_pijuice
|
/pijuice/tags_api/get-device-tag.sh
|
UTF-8
| 472
| 2.53125
| 3
|
[] |
no_license
|
TAG_KEY=$1
RESIN_DEVICE_ID=$2
if ! [ "$RESIN_DEVICE_ID" ] ; then
RESIN_DEVICE_ID=$(curl "https://api.balena-cloud.com/v4/device?\$select=id,uuid&\$filter=uuid%20eq%20'$RESIN_DEVICE_UUID'" -H "Authorization: Bearer $RESIN_API_KEY" | jq '.d[0].id')
fi
curl -X GET "https://api.balena-cloud.com/v4/device_tag?\$filter=((device%20eq%20$RESIN_DEVICE_ID)%20and%20(tag_key%20eq%20'$TAG_KEY'))" \
-H "Content-Type:application/json" \
-H "Authorization: Bearer $RESIN_API_KEY"
| true
|
e2ad0c74f2f3e5a8f5b4a124c5900da75c0f2b9e
|
Shell
|
okapies/travis-sandbox
|
/.travis/build-menoh.sh
|
UTF-8
| 1,330
| 3.921875
| 4
|
[
"MIT"
] |
permissive
|
#!/bin/bash
# retrieve arguments
while [[ $# != 0 ]]; do
case $1 in
--)
shift
break
;;
--source-dir)
ARG_SOURCE_DIR="$2"
shift 2
;;
--install-dir)
ARG_INSTALL_DIR="$2"
shift 2
;;
--link-static)
ARG_LINK_STATIC="$2"
shift 2
;;
-*)
err Unknown option \"$1\"
exit
;;
*)
break
;;
esac
done
# validate the arguments
test -n "${ARG_SOURCE_DIR}" || { echo "--source-dir is not specified"; exit 1; }
test -n "${ARG_LINK_STATIC}" || ARG_LINK_STATIC='false'
echo -e "\e[33;1mBuilding Menoh\e[0m"
cd ${ARG_SOURCE_DIR}/menoh
[ -d "build" ] || mkdir -p build
cd build
if [ -n "${ARG_INSTALL_DIR}" ]; then
CMAKE_INSTALL_PREFIX="-DCMAKE_INSTALL_PREFIX=${ARG_INSTALL_DIR}"
fi
if [ "${ARG_LINK_STATIC}" != "true" ]; then
cmake \
-DCMAKE_BUILD_TYPE=Release \
${CMAKE_INSTALL_PREFIX} \
-DENABLE_TEST=ON ..
else
cmake \
-DCMAKE_BUILD_TYPE=Release \
${CMAKE_INSTALL_PREFIX} \
-DLINK_STATIC_LIBGCC=ON \
-DLINK_STATIC_LIBSTDCXX=ON \
-DLINK_STATIC_LIBPROTOBUF=ON \
-DENABLE_TEST=ON \
..
fi
make
| true
|
a9d5bdbc5178280a8972b652bc9c31d6cb27b5f4
|
Shell
|
prplfoundation/prplMesh-tools
|
/klocwork/kw.sh
|
UTF-8
| 5,812
| 3.875
| 4
|
[
"BSD-2-Clause-Patent",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash
# In order to use the script in non interactive mode,
# enter first argument to be the repo name, one of the following: 'framework', 'common', 'controller', 'agent'
#set -x
echo kwcheck --version
echo current folder: `pwd`
echo number of input arguments: "$#"
declare -a REPOS=("framework" "common" "controller" "agent")
declare TOOLCHAIN_PATH
declare URL_PATH
declare REPORT_PATH
declare REPO_PATH
declare ROOT_PATH
declare PLATFORM_TYPE
# set an initial value for the flag
PASSIVE_MODE=false
# read the options
OPTS=`getopt -o p -n 'kw.sh' -- "$@"`
eval set -- "$OPTS"
# extract options and their arguments into variables.
while true ; do
case "$1" in
"") break ;;
-p) PASSIVE_MODE=true; shift; break ;;
* ) break ;;
esac
done
PASSIVE_MODE_OPT=""
if $PASSIVE_MODE ; then
PASSIVE_MODE_OPT="-f PASSIVE_MODE=ON"
echo "PASSIVE_MODE=ON"
else
echo "PASSIVE_MODE=OFF"
fi
################################################################
####################### Local Functions ########################
################################################################
prepare_kw()
{
# Generate input script to klocwork checker
rm -rf _GO_KW
cat > _GO_KW << DONE
#!/bin/bash
cd `pwd`/../../$REPO
echo "starting kw from folder: \`pwd\`"
echo "../tools/maptools.sh build $REPO $PASSIVE_MODE_OPT -c clean make"
../tools/maptools.sh build $REPO $PASSIVE_MODE_OPT -c clean make
exit
DONE
ROOT_PATH=$(realpath `pwd`/../../)
REPO_PATH=$(realpath `pwd`/../../$REPO)
PLATFORM_TYPE=$(grep -Po "(?<=^TARGET_PLATFORM=).*" $(realpath `pwd`/../../external_toolchain.cfg)) # "ugw"/"rdkb"
echo platfrom identified: $PLATFORM_TYPE
REPORT_PATH=$REPO_PATH/kw_reports/$PLATFORM_TYPE
mkdir -p $REPORT_PATH
if [ "$PLATFORM_TYPE" = "rdkb" ]; then
TOOLCHAIN_PATH=$(realpath `pwd`/../../../../atom_rdkbos/build/tmp/work/core2-32-rdk-linux)
URL_PATH="https://klocwork3-jf.devtools.intel.com:8140/Atom-Puma7-RDKB"
elif [ "$PLATFORM_TYPE" = "ugw" ]; then
TOOLCHAIN_PATH=$(grep -Po "(?<=^PLATFORM_BASE_DIR=).*" $(realpath `pwd`/../../external_toolchain.cfg))
URL_PATH="https://klocwork-iind4.devtools.intel.com:8105/UGW_master_grx350_rt"
fi
}
kw()
{
echo Performing KW on: $REPO.
#prepare any build specific and paths before common section
prepare_kw
# Create a klocwork project based on the feeds compilation
rm -rf .kw*/
kwcheck create --url $URL_PATH || { rm -rf .kw*; kwcheck create; echo "*** WARNING: Creating local KW project, not synced with UGW/RDKB *** " ; }
chmod +x _GO_KW
kwshell -s ./_GO_KW
if [ "$PLATFORM_TYPE" = "ugw" ]; then
# Add checkers/overrides that are used by UGW for SDL
git archive --remote=ssh://git@gts-chd.intel.com:29418/sw_ugw/ugw_sw.git HEAD:kw_support/ kw_override.h | tar -x
git archive --remote=ssh://git@gts-chd.intel.com:29418/sw_ugw/ugw_sw.git HEAD:kw_support/ klocwork_database.kb | tar -x
git archive --remote=ssh://git@gts-chd.intel.com:29418/sw_ugw/ugw_sw.git HEAD:kw_support/ analysis_profile.pconf | tar -x
kwcheck import kw_override.h
kwcheck import klocwork_database.kb
kwcheck import analysis_profile.pconf
fi
# Analyze and generate reports
kwcheck run -j auto
echo ""
echo Generating reports...
kwcheck list -F detailed --status 'Analyze','Fix' --report ${REPORT_PATH}/kwreport_all.log
kwcheck list -F detailed --severity 1 --status 'Analyze','Fix' --report ${REPORT_PATH}/kwreport_critical.log
kwcheck list -F detailed --severity 2 --status 'Analyze','Fix' --report ${REPORT_PATH}/kwreport_error.log
kwcheck list -F detailed --severity 3 --status 'Analyze','Fix' --report ${REPORT_PATH}/kwreport_warning.log
kwcheck list -F detailed --severity 4 --status 'Analyze','Fix' --report ${REPORT_PATH}/kwreport_review.log
kwcheck list -F detailed --status 'Ignore','Not a Problem','Defer' --report ${REPORT_PATH}/kwreport_ignore.log
# finalize reports
# remove local prefixes from source controlled reports
declare -a KW_REPORTS=(${REPORT_PATH}/kwreport_all.log ${REPORT_PATH}/kwreport_critical.log ${REPORT_PATH}/kwreport_error.log ${REPORT_PATH}/kwreport_warning.log ${REPORT_PATH}/kwreport_review.log ${REPORT_PATH}/kwreport_ignore.log)
for r in ${KW_REPORTS[@]}; do
cp $r ${r}.tmp
sed -i -e "s/${ROOT_PATH////\\/}\///g" $r # remove local path prefixes from multiap modules
sed -i -e "s/${TOOLCHAIN_PATH////\\/}\///g" $r # remove local path prefixes from external toolchain files
done
# Generate output summary
declare -a KW_TYPES=("1:Critical" "2:Error" "3:Warning" "4:Review")
echo -e "Summary by components:" > ${REPORT_PATH}/kwreport_summary.log
cp ${REPORT_PATH}/kwreport_all.log ${REPORT_PATH}/kwreport_tmp.log
for t in ${KW_TYPES[@]}; do
issue_cnt=`grep -i $t ${REPORT_PATH}/kwreport_all.log | grep -civ "Ignore\|Not a Problem\|Defer"`
echo " $t: $issue_cnt" >> ${REPORT_PATH}/kwreport_summary.log
done
rm ${REPORT_PATH}/kwreport_tmp.log
echo -e "\nLast KW: `date +'%d/%m/%Y %H:%M'`" >> ${REPORT_PATH}/kwreport_summary.log
echo ""
}
################################################################
####################### Script begining ########################
################################################################
# Repo Select
read -p "On which repo do you with to perfrom klocwork? [0-all, 1-framework, 2-common, 3-controller, 4-agent]: " REPO
case $REPO in
"0") REPO="all" ;;
"1") REPO="framework" ;;
"2") REPO="common" ;;
"3") REPO="controller" ;;
"4") REPO="agent" ;;
*)
echo "Error: unrecognized input value:'$REPO'"
exit 128 # Invalid argument to exit
;;
esac
if [ "$REPO" = "all" ]; then
echo "Performing KW on all repos!"
for REPO in ${REPOS[@]}; do
kw
done
else
kw
fi
echo DONE!
| true
|
f102eb062d3e415a8db22c33406a778fed99765e
|
Shell
|
haopeng/sase-advanced
|
/tests/SimplePattern/DomainSize/domainsize.sh
|
UTF-8
| 1,185
| 3.03125
| 3
|
[] |
no_license
|
#!/bin/bash
# this is for domain size experiments
dirname=domainsize_results_$(date +%Y%m%d-%T)
mkdir $dirname
#set the print to 1 if you want to see the details of the results
#set the print to 0 if you DONOT want to see the details of the results
print=1
echo "domainsize 100"
sase $SASE_HOME/tests/SimplePattern/DomainSize/domainsize.query $SASE_HOME/tests/SimplePattern/DomainSize/domainsize100.stream $print >>$dirname/ds100.result
echo "domainsize 500"
sase $SASE_HOME/tests/SimplePattern/DomainSize/domainsize.query $SASE_HOME/tests/SimplePattern/DomainSize/domainsize500.stream $print >>$dirname/ds500.result
echo "domainsize 1000"
sase $SASE_HOME/tests/SimplePattern/DomainSize/domainsize.query $SASE_HOME/tests/SimplePattern/DomainSize/domainsize1000.stream $print >>$dirname/ds1000.result
echo "domainsize 5000"
sase $SASE_HOME/tests/SimplePattern/DomainSize/domainsize.query $SASE_HOME/tests/SimplePattern/DomainSize/domainsize5000.stream $print >>$dirname/ds5000.result
echo "domainsize 1000"
sase $SASE_HOME/tests/SimplePattern/DomainSize/domainsize.query $SASE_HOME/tests/SimplePattern/DomainSize/domainsize10000.stream $print >>$dirname/ds10000.result
| true
|
e4aed051a45eb999f0daa77307c3b70be5424b95
|
Shell
|
EnginesOS/ServiceImageBuilds
|
/images/03.ServiceImages/ftp/home/engines/scripts/services/add_service.sh
|
UTF-8
| 419
| 2.515625
| 3
|
[] |
no_license
|
#!/bin/sh
. /home/engines/functions/checks.sh
required_values="username password folder"
check_required_values
if ! test -z $rw_access
then
if test $rw_access = true
then
access=rw
else
access=ro
fi
else
access=ro
fi
export ftp_gid access username password folder service_handle service_container_name parent_engine
sudo -n /home/engines/scripts/services/sudo/_add_service.sh
| true
|
f919e436a128eed8acc5c78afd44c0dcb42f2708
|
Shell
|
jsirex/bash-it
|
/plugins/available/rvm.plugin.bash
|
UTF-8
| 418
| 3.296875
| 3
|
[] |
no_license
|
# Load RVM, if you are using it
cite about-plugin
about-plugin 'load rvm, if you are using it'
[[ -s "$HOME/.rvm/scripts/rvm" ]] && source "$HOME/.rvm/scripts/rvm"
# Check to make sure that RVM is actually loaded before adding
# the customizations to it.
if [ "$rvm_path" ]
then
# Load the auto-completion script if RVM was loaded.
[[ -r $rvm_path/scripts/completion ]] && . $rvm_path/scripts/completion
fi
| true
|
b4d00e385ec7de42db4a21a99819f976180db118
|
Shell
|
TetragrammatonHermit/zsh-config
|
/completers/_json_pp
|
UTF-8
| 842
| 2.75
| 3
|
[
"MIT"
] |
permissive
|
#compdef json_pp
local -a from_formats
local -a to_formats
local -a json_opts
from_formats=("json" "eval")
to_formats=("null" "json" "dumper")
json_opts=(
"ascii"
"latin1"
"utf8"
"pretty"
"indent"
"space_before"
"space_after"
"relaxed"
"canonical"
"allow_nonref"
"allow_singlequote"
"allow_barekey"
"allow_bignum"
"loose"
"escape_slash"
)
_arguments -n : \
'(-v)-v[Be slightly more verbose]' \
'(-)-V[Prints version and exits]' \
'(-f)-f[Read a file in the given format from STDIN]: :'"($from_formats)" \
'(-t)-t[Write the file in the given format to STDOUT]: :'"($to_formats)" \
'-json_opt[option to JSON::PP(comma separated)]: :'"($json_opts)" \
'*: :()'
# Local Variables:
# mode: Shell-Script
# sh-indentation: 2
# indent-tabs-mode: nil
# sh-basic-offset: 2
# End:
# vim: ft=zsh sw=2 ts=2 et
| true
|
81ae9540ecda4544bb3d347b369857ab21f85f7a
|
Shell
|
AndresHF/DAW
|
/bash/ejercicio4/ej8.sh
|
UTF-8
| 375
| 3.390625
| 3
|
[] |
no_license
|
#!/bin/bash
# 8 Si al script anterior no se le pasa cadena por parametro la pide por teclado, la pedirá cíclicamente hasta que introduzcas algo
word=$@
while $(echo "$word" | egrep ".{0}") 2> /dev/null; do
echo 'Missing param, type any word:'
read word
done
echo "$word" | grep "$(echo $word | rev)" > /dev/null && echo 'palindrome: true' || echo 'palindrome: false'
| true
|
48e27eca4281e7148e091047ea5a473203c2a56c
|
Shell
|
brblisko/IOS-project1
|
/tradelog
|
UTF-8
| 9,310
| 3.390625
| 3
|
[] |
no_license
|
#!/bin/sh
#|=============================================================================
#| Assignment: 1. Uloha
#|
#| Author: Boris Vesely
#| Login: xvesel92
#|
#| Class: IOS - Operacni systemy
#|
#|=============================================================================
export POSIXLY_CORRECT=yes
export LC_NUMERIC=en_US.UTF-8
print_help() {
echo "Usage: tradelog [-h | --help]"
echo " tradelog [FILTER...] [COMMAND] [LOG1 [LOG2 [...]]]"
echo ""
echo "Commands: list-tick – výpis seznamu vyskytujících se burzovních symbolu, tzv. “tickeru”."
echo " profit – výpis celkového zisku z uzavřených pozic."
echo " pos – výpis hodnot aktuálně držených pozic seřazených sestupně dle hodnoty."
echo " last-price – výpis poslední známé ceny pro každý ticker."
echo " hist-ord – výpis histogramu počtu transakcí dle tickeru."
echo " graph-pos – výpis grafu hodnot držených pozic dle tickeru."
echo ""
echo "Filters: -a DATETIME – after: jsou uvažovány pouze záznamy PO tomto datu (bez tohoto data)."
echo " DATETIME je formátu YYYY-MM-DD HH:MM:SS."
echo ""
echo " -b DATETIME – before: jsou uvažovány pouze záznamy PŘED tímto datem (bez tohoto data)."
echo ""
echo " -t TICKER – jsou uvažovány pouze záznamy odpovídající danému tickeru."
echo " Při více výskytech přepínače se bere množina všech uvedených tickerů."
echo ""
echo " -w WIDTH – u výpisu grafů nastavuje jejich šířku, tedy délku nejdelšího řádku na WIDTH."
echo ""
echo " -h a --help vypíšou nápovědu s krátkým popisem každého příkazu a přepínače."
}
# jednoducha funkcia pre vypis na stderr
echoerr() {
echo "$@" 1>&2
}
gzFiles=""
logFiles=""
command=""
width=""
ticker=""
after=""
before=""
pos_function() {
eval "$readInput | awk -F ';' '$filtr'" | sort -t ';' -k2,2 -r | awk -F ';' '
{
if(tickChck=="")
{
tickChck = $2;
cost = $4;
}
if(tickChck != $2)
{
tmp = sprintf("%.2f",sum * cost);
if(length(tmp) > longestNum)
{
longestNum=length(tmp);
}
printf("%0.2f %s\n",sum * cost, tickChck);
tickChck = $2;
cost = $4;
sum = 0;
}
if($3 == "sell")
{
sum -= $6;
}
if($3 == "buy")
{
sum += $6;
}
}
END{printf("%0.2f %s\n",sum * cost, $2);
tmp = sprintf("%.2f",sum * cost);
if(length(tmp) > longestNum){
longestNum=length(tmp);
}
printf("LongestNum %d\n",longestNum);}' |
sort -g | awk '
{
if($1 == "LongestNum")
{
longNum = $2;
}
if($1 != "LongestNum")
{
tmp = sprintf("%.2f", $1);
printf("%-10s: %*s\n", $2, longNum , tmp);
}
}' | sort -t ':' -k2,2 -n -r
}
while [ "$#" -gt 0 ]; do # spracovanie argumentov
case "$1" in
list-tick | profit | pos | last-price | hist-ord | graph-pos)
if [ -n "$command" ]; then
echoerr ERROR - multiple commands
exit 1
fi
command="$1"
shift
;;
-h | --help)
print_help
exit 0
;;
-w)
if [ -n "$width" ]; then
echoerr ERROR - multiple -w used
exit 1
fi
if [ "$2" -lt 0 ]; then
echoerr ERROR - width cannot be minus
exit 1
fi
width="$2"
shift
shift
;;
-a)
if [ -z "$after" ]; then
after="$2"
else
savedDate=$(date -d "$after" +%s)
newDate=$(date -d "$2" +%s)
if [ "$newDate" -gt "$savedDate" ]; then
after="$2"
fi
fi
shift
shift
;;
-b)
if [ -z "$before" ]; then
before="$2"
else
savedDate=$(date -d "$before" +%s)
newDate=$(date -d "$2" +%s)
if [ "$newDate" -lt "$savedDate" ]; then
after="$2"
fi
fi
shift
shift
;;
-t)
if echo "$2" | grep -q " "; then
echoerr ERROR - ticker cannot contain white space
exit 1
fi
if echo "$2" | grep -q ";"; then
echoerr ERROR - ticker cannot contain \;
exit 1
fi
if [ -z "$ticker" ]; then
ticker="\$2 == \"$2\""
else
ticker="$ticker || \$2 == \"$2\""
fi
shift
shift
;;
*)
if echo "$1" | grep -q .gz$; then
gzFiles="$1 $gzFiles"
else
logFiles="$1 $logFiles"
fi
shift
;;
esac
done
if [ -n "$gzFiles" ] && [ -n "$logFiles" ]; then
readInput="gzip -d -c $gzFiles | cat $logFiles -"
fi
if [ -n "$logFiles" ] && [ -z "$gzFiles" ]; then
readInput="cat $logFiles"
fi
if [ -n "$gzFiles" ] && [ -z "$logFiles" ]; then
readInput="gzip -d -c $gzFiles | cat -"
fi
if
[ -z "$gzFiles" ] && [ -z "$logFiles" ]
then
readInput="cat -"
fi
dateFilter=""
if [ -n "$after" ] && [ -n "$before" ]; then
dateFilter="\$1 > \"$after\" && \$1 < \"$before\""
fi
if [ -n "$after" ] && [ -z "$before" ]; then
dateFilter="\$1 > \"$after\""
fi
if [ -n "$before" ] && [ -z "$after" ]; then
dateFilter="\$1 < \"$before\""
fi
filtr="{ print }"
if [ -n "$dateFilter" ] && [ -n "$ticker" ]; then
filtr="(($dateFilter)&&($ticker))"
fi
if [ -n "$dateFilter" ] && [ -z "$ticker" ]; then
filtr="$dateFilter"
fi
if [ -n "$ticker" ] && [ -z "$dateFilter" ]; then
filtr="$ticker"
fi
case "$command" in
list-tick)
eval "$readInput | awk -F ';' '$filtr'" | awk -F ';' '{ print $2 }' | sort | uniq
exit 0
;;
profit)
eval "$readInput | awk -F ';' '$filtr'" | awk -F ';' '
{
if($3 == "sell")
{
sum += $4 * $6;
}
if($3 == "buy")
{
sum -= $4 * $6;
}
}
END{printf("%.2f\n",sum)}'
exit 0
;;
pos)
pos_function
exit 0
;;
last-price)
eval "$readInput | awk -F ';' '$filtr'" | sort -t ';' -k2,2 -r | sort -t ';' -k2,2 -u | awk -F ';' '
{
tmp = sprintf("%.2f", $4 );
if(length(tmp) > longestNum)
{
longestNum=length(tmp);
}
printf("%0.2f %s\n", $4 , $2);
}
END{printf("LongestNum %d\n",longestNum);}' | sort -g | awk '
{
if($1 == "LongestNum")
{
longNum = $2;
}
if($1 != "LongestNum")
{
tmp = sprintf("%.2f", $1);
printf("%-10s: %*s\n", $2, longNum , tmp);
}
}' | sort -t ':' -k1,1
exit 0
;;
hist-ord)
if [ -z "$width" ]; then
eval "$readInput | awk -F ';' '$filtr'" | awk -F ';' '{ print $2 }' | sort | uniq -c | awk '
{
printf("%-10s:", $2);
for(i;i < $1;i++){
if(i==0){
printf(" ");
}
printf("#");
}
i = 0
printf("\n")
}'
else
eval "$readInput | awk -F ';' '$filtr'" | awk -F ';' '{ print $2 }' | sort | uniq -c | sort -g -r |
awk -v mineWidth="$width" '
{
if(NR == 1){
ratio = mineWidth / $1
}
printf("%-10s:", $2);
for(i;i < int($1 * ratio);i++){
if(i==0){
printf(" ");
}
printf("#");
}
i = 0
printf("\n")
}' | sort -t ':' -k1,1
fi
;;
graph-pos)
pos_function | awk -F ':' '{ printf("%0.2f %s\n",$2, $1); }' | sed 's/^-\(.*\)/\1-/' | sort -g -r |
sed 's/^\(.*\)-$/-\1/' | awk -v mineWidth="$width" '
function abs(v) {return v < 0 ? -v : v}
{
if(mineWidth==""){
printf("%-10s:", $2);
for(i;i < int(abs($1 / 1000));i++){
if(i==0){
printf(" ");
}
if($1 > 0)
{
printf("#");
}
else
{
printf("!");
}
}
i = 0
printf("\n")
}
else{
if(NR == 1){
ratio = mineWidth / $1;
}
printf("%-10s:", $2);
for(i;i < int(abs($1 * ratio));i++){
if(i==0){
printf(" ");
}
if($1 > 0)
{
printf("#");
}
else
{
printf("!");
}
}
i = 0
printf("\n")
}
}' | sort -t ':' -k1,1
;;
*)
eval "$readInput | awk -F ';' '$filtr'"
;;
esac
| true
|
b99417d58577f3f5c31c5421985e59d8bc996707
|
Shell
|
pvdvreede/dokku-gitsecret
|
/install
|
UTF-8
| 683
| 3.265625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
set -eo pipefail; [[ $DOKKU_TRACE ]] && set -x
# install git-secret and gnupg
apt-get update
apt-get install -y git-secret gnupg
[[ $(gpg --list-keys "$(hostname)") ]] || {
# generate gpg key for server
cat >foo <<EOF
%echo Generating a basic OpenPGP key
Key-Type: DSA
Key-Length: 1024
Subkey-Type: ELG-E
Subkey-Length: 1024
Name-Real: $(hostname)
Name-Comment: Server key for $(hostname)
Name-Email: server@$(hostname).bar
Expire-Date: 0
Passphrase: ""
# Do a commit here, so that we can later print "done" :-)
%commit
%echo done
EOF
gpg --batch --generate-key foo
rm foo
}
| true
|
8cdec9dd0949cdb0b404602f09160f4a46719400
|
Shell
|
PhearZero/EDDN
|
/contrib/letsencrypt/certbot-common
|
UTF-8
| 1,448
| 3.96875
| 4
|
[
"BSD-3-Clause"
] |
permissive
|
###########################################################################
# Copy a certificate's files into place, with appropriate ownership and
# mode.
#
# $1 - Name of certificate (i.e. letsencrypt directory names).
# $2 - Source Directory
# $3 - Destination filename for fullchain.pem
# $4 - Destination filename for privkey.pem
# $5 - File ownership to set (user:group)
# $6 - File mode to set (as passed to 'chmod')
###########################################################################
copy_cert() {
CERT_NAME="$1"
SRC_DIR="$2"
DST_FILE_FULLCHAIN="$3"
DST_FILE_PRIVKEY="$4"
CERT_NEW_OWNER="$5"
CERT_NEW_PERMS="$6"
echo "${CERT_NAME}: Copying new files into place..."
# Preserve only the mode as it should be 0600, and thus we won't
# temporarily open up the files for *all* users to read,
# BUT don't preserve the timestamp as we want it to be 'now' so
# that a `find ... -newer <this file>` check works later.
cp -v --preserve=mode ${SRC_DIR}/fullchain.pem ${DST_FILE_FULLCHAIN}
cp -v --preserve=mode ${SRC_DIR}/privkey.pem ${DST_FILE_PRIVKEY}
chown -v ${CERT_NEW_OWNER} ${DST_FILE_FULLCHAIN} ${DST_FILE_PRIVKEY}
chmod -v ${CERT_NEW_PERMS} ${DST_FILE_FULLCHAIN} ${DST_FILE_PRIVKEY}
echo "${CERT_NAME}: Copying new files into place DONE"
}
###########################################################################
# vim: :set filetype=sh tabstop=2 shiftwidth=2 expandtab wrapmargin=0 textwidth=0
| true
|
0a259b949e95ca1a6dbaed2ad37f9239c11c9292
|
Shell
|
ci2c/code
|
/scripts/matthieu/PrepProcess_TRACULA.sh
|
UTF-8
| 1,540
| 3.328125
| 3
|
[] |
no_license
|
#!/bin/bash
if [ $# -lt 2 ]
then
echo ""
echo "Usage: PrepProcess_TRACULA.sh InputDir SubjId OutpuDir"
echo ""
echo " InputDir : Input directory containing the rec/par files"
echo " SubjId : Id of the subject treated"
echo " OutpuDir : Output directory containing the bvec & bval files and DTI32.nii"
echo ""
echo "Usage: PrepProcess_TRACULA.sh InputDir SubjId OutpuDir"
echo ""
exit 1
fi
## I/O management
INPUT_DIR=$1
SUBJ_ID=$2
OUTPUT_DIR=$3
## Creation of bvec & bval files from REC/PAR
DTI_PAR=$(ls ${INPUT_DIR}/${SUBJ_ID}/*dti32*.par)
cd ${INPUT_DIR}/${SUBJ_ID}
par2bval_transpose.sh ${DTI_PAR}
## Copy of bvec & bval files and dti32 nifti to output directory
mkdir -p ${OUTPUT_DIR}/${SUBJ_ID}
cp ${INPUT_DIR}/${SUBJ_ID}/*dti32*.bvec ${INPUT_DIR}/${SUBJ_ID}/*dti32*.bval ${INPUT_DIR}/${SUBJ_ID}/*dti32*.nii.gz ${OUTPUT_DIR}/${SUBJ_ID}
gunzip ${OUTPUT_DIR}/${SUBJ_ID}/*dti32*.nii.gz
## Removal of the last mean DTI frame from input .nii file and merge into DTI32.nii
# fslsplit ${OUTPUT_DIR}/${SUBJ_ID}/*dti32*.nii ${OUTPUT_DIR}/${SUBJ_ID}/vol
# rm -f ${OUTPUT_DIR}/${SUBJ_ID}/vol0033*.gz
# fslmerge -a ${OUTPUT_DIR}/${SUBJ_ID}/DTI32.nii ${OUTPUT_DIR}/${SUBJ_ID}/vol00*.gz
# rm -f ${OUTPUT_DIR}/${SUBJ_ID}/vol00*
# gunzip ${OUTPUT_DIR}/${SUBJ_ID}/DTI32.nii.gz
## Pre-processing
trac-all -prep -c ${OUTPUT_DIR}/dmrirc.example
## Ball-and-stick model fit
trac-all -bedp -c ${OUTPUT_DIR}/dmrirc.example
## Reconstructing white-matter pathways
trac-all -path -c ${OUTPUT_DIR}/dmrirc.example
| true
|
e1ebf602161c33742c99f477c5484038d7604d1f
|
Shell
|
KamDob/PythonEiT
|
/Bash/10.sh
|
UTF-8
| 203
| 3.0625
| 3
|
[] |
no_license
|
#!/bin/bash
echo Podaj sciezke:
read path
list= $( find $path -type f | tr " " "\n")
#list= $( find $path -type f )
#sorted=$(echo $list | tr " " "\n")
#echo $sorted
for file in $list
do
echo "> $file"
done
| true
|
65c90d68548cf5b3f5cb324c91e5920e117957ee
|
Shell
|
kervinck/gigatron-rom
|
/Apps/Apple-1/15puz/make.sh
|
UTF-8
| 348
| 2.734375
| 3
|
[
"BSD-2-Clause",
"MIT"
] |
permissive
|
dasm puzz15.asm -lpuzz15.lst -opuzz15.out
# Create puzz15.gcl file
(
echo '{ Generated by: cd 15puz && ./make.sh }'
# Hex dump
od -v -A n -t x1 puzz15.out |
fmt -1 |
tail +3 |
awk -v A=1024 '
NF>0 {
if(A%16==0)print""
if(A%256==0)printf"\n*=$%x\n",A
printf " #$%-2s",$1
A++}
END {print}
'
) > ../puzz15.gcl
| true
|
3f66f36f3384940d3d0df8c085b5ba3fede49dc1
|
Shell
|
ColadaFF/catch-them-all
|
/Docker/Solr/script.sh
|
UTF-8
| 1,176
| 4.0625
| 4
|
[] |
no_license
|
#!/bin/bash
set -e
appSetup () {
mkdir /etc/cier
touch /etc/cier/.alreadysetup
bash install_solr_service.sh /opt/$SOLR.tgz -p 8080 -u root
mkdir -p /var/solr/data/criminals/data/index
chown -R 777 /var/solr/data/criminals
curl "http://localhost:8080/solr/admin/cores?action=CREATE&name=criminals&instanceDir=criminals"
}
appStart () {
[ -f /etc/cier/.alreadysetup ] && echo "Skipping setup..." || appSetup
service=solr
if (( $(ps -ef | grep -v grep | grep $service | wc -l) > 0 ))
then
echo "$service is running!!!"
else
/etc/init.d/$service start
fi
}
appHelp () {
echo "Available options:"
echo " app:start - Starts all services needed for the meteor app to start"
echo " app:setup - First time setup."
echo " app:help - Displays the help"
echo " [command] - Execute the specified linux command eg. /bin/bash."
}
case "$1" in
app:start)
appStart
;;
app:setup)
appSetup
;;
app:help)
appHelp
;;
*)
if [ -x $1 ]; then
$1
else
prog=$(which $1)
if [ -n "${prog}" ] ; then
shift 1
$prog $@
else
appHelp
fi
fi
;;
esac
exit 0
| true
|
3706295238756b960cc554395d71e9fb9f1adfb2
|
Shell
|
hexgnu/dotfiles
|
/bash/bashrc
|
UTF-8
| 2,969
| 3.171875
| 3
|
[] |
no_license
|
#!/bin/bash
stty -ixon # Disable ctrl-s and ctrl-q.
shopt -s autocd #Allows you to cd into directory merely by typing the directory name.
shopt -s checkwinsize
# Avoid duplicates
# HISTCONTROL=ignoredups:erasedups
# When the shell exits, append to the history file instead of overwriting it
# shopt -s histappend
HISTSIZE= HISTFILESIZE= # Infinite history.
export PROMPT_COMMAND=hexgnu_prompt
function termwide {
local GRAY="\[\033[1;30m\]"
local LIGHT_GRAY="\[\033[0;37m\]"
local WHITE="\[\033[1;37m\]"
local NO_COLOUR="\[\033[0m\]"
local CYAN="\[\033[0;36m\]"
local LIGHT_CYAN="\[\033[1;36m\]"
local NO_COLOR="\[\033[0m\]"
local MAGENTA="\[\033[1;35m\]"
local LIGHT_BLUE="\[\033[1;34m\]"
local YELLOW="\[\033[1;33m\]"
case $TERM in
xterm*)
TITLEBAR='\[\033]0;\u@\h:\w\007\]'
;;
*)
TITLEBAR=""
;;
esac
PS1="$TITLEBAR\
$MAGENTA\${PWD}\
$YELLOW (\$(date +%H:%M))\
$LIGHT_GRAY \$${LIGHT_GRAY} "
PS2="$LIGHT_CYAN-$CYAN-$GRAY-$NO_COLOUR "
}
termwide
function hcat {
highlight --out-format=ansi $* 2> /dev/null || /usr/bin/cat $*
}
export EDITOR="nvim"
export VISUAL="nvim"
export TERM='xterm-256color'
alias diff="diff --color=auto" \
dnf="sudo dnf" \
docker-compose='sudo -E docker-compose' \
docker='sudo -E docker' \
gb="git branch | grep '*' | cut -d ' ' -f 2" \
grep="grep --color=auto" \
ls="ls --color=auto" \
pbcopy='xclip -selection clipboard' \
pbpaste='xclip -selection clipboard -o'
command -v nvim >/dev/null && alias vim="nvim" vimdiff="nvim -d" # Use neovim for vim if present.
if [ -f ~/.git-completion.bash ]; then
. ~/.git-completion.bash &> /dev/null
fi
# This fixes the gpg2 problems I was encountering
if [ -e /run/user/1000/gnupg/S.gpg-agent.ssh ] && [ -n "$(pgrep gpg-agent)" ]; then
SSH_AUTH_SOCK=/run/user/1000/gnupg/S.gpg-agent.ssh; export SSH_AUTH_SOCK;
GPG_AGENT_INFO=/run/user/1000/gnupg/S.gpg-agent:0:1; export GPG_AGENT_INFO;
else
eval $(gpg-agent --daemon --enable-ssh-support) &> /dev/null
fi
export GPG_TTY=$(tty)
# >>> conda initialize >>>
# !! Contents within this block are managed by 'conda init' !!
__conda_setup="$('/home/hexgnu/miniconda3/bin/conda' 'shell.bash' 'hook' 2> /dev/null)"
if [ $? -eq 0 ]; then
eval "$__conda_setup"
else
if [ -f "/home/hexgnu/miniconda3/etc/profile.d/conda.sh" ]; then
. "/home/hexgnu/miniconda3/etc/profile.d/conda.sh"
else
export PATH="/home/hexgnu/miniconda3/bin:$PATH"
fi
fi
unset __conda_setup
# <<< conda initialize <<<
_direnv_hook() {
local previous_exit_status=$?;
trap -- '' SIGINT;
eval "$("/usr/bin/direnv" export bash)";
trap - SIGINT;
return $previous_exit_status;
};
if ! [[ "${PROMPT_COMMAND:-}" =~ _direnv_hook ]]; then
PROMPT_COMMAND="_direnv_hook${PROMPT_COMMAND:+;$PROMPT_COMMAND}"
fi
export GIT_ACCESS_TOKEN=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
# PATH=$PATH:/home/hexgnu/.cargo/bin
# PATH=$PATH:/home/hexgnu/go/bin
# source "$HOME/.cargo/env"
| true
|
68857ef91fbe9fc3e73832496f62fd9c2eb2374d
|
Shell
|
theraccoonbear/OHOL
|
/docker/fetch-and-compile.sh
|
UTF-8
| 575
| 2.78125
| 3
|
[] |
no_license
|
SERVER_NUMBER=$((1 + RANDOM % 9))
SOURCE_URL=http://download$SERVER_NUMBER.onehouronelife.com/downloads/OneLife_Live4_UnixSource.tar.gz
rm -fR /opt/src/OneLife
echo Source: $SOURCE_URL
mkdir -p /opt/game
cd /opt/game
curl -o source.tar.gz $SOURCE_URL
tar xvzf source.tar.gz
./OneLife_Live4_UnixSource/pullAndBuildLatest
cd OneLife/server
./configure 1
make
ln -s ../../OneLifeData7/objects .
ln -s ../../OneLifeData7/transitions .
ln -s ../../OneLifeData7/categories .
sed -i 's/1/0/' settings/useStatsServer.ini
sed -i 's/1/0/' settings/requireTicketServerCheck.ini
| true
|
923d93a0a520a1122a1f12d0b12e3104d92a284f
|
Shell
|
SirSertile/dotfileBackup
|
/dotfiles/autobackup.sh
|
UTF-8
| 522
| 3.8125
| 4
|
[] |
no_license
|
#!/bin/bash
echo "Initializing Backup . . . "
DIRNAME="System Backup for "$(hostname)" on "$( date "+%b %d, %Y %H:%M:%S")
echo $DIRNAME
# CUT UP ARGUMENTS INTO A LIST
# STEP 1 RSYNC STUFF
if [ -z "$1" ]
then
ls
echo "Which directory would you like to back up?"
read directory
sudo rsync -aH "$directory" ./"$DIRNAME"
else
sudo rsync -aH $1 ./"$DIRNAME"
fi
# STEP 2 PUSH TO GOOGLE DRIVE
drive push -hidden -no-prompt ./"$DIRNAME"
# STEP 3 DELETE STUFF FROM FOLDER
sudo rm -r ./"$DIRNAME"
echo "Backup Finished"
| true
|
87e8579e87354a55c50039299b2ff9ca3d1e33ab
|
Shell
|
HelloMyGH/Beikeyun_OPIMG_Tools
|
/BKmkop.sh
|
UTF-8
| 4,596
| 3.859375
| 4
|
[] |
no_license
|
#!/bin/bash
#
red="\033[31m"
green="\033[32m"
white="\033[0m"
out_dir="./out"
openwrt_dir="./openwrt"
BOOTLOADER_IMG="$PWD/armbian/beikeyun/others/btld-rk3328.bin"
rootfs_dir="/media/rootfs"
loop=
SKIP_MB=16
BOOT_MB=128
echo -e "\n贝壳云Openwrt镜像制作工具"
#检测root权限
if [ $UID -ne 0 ];then
echo -e "$red \n 错误:请使用root用户或sudo执行此脚本!$white" && exit
fi
#清理重建目录
if [ -d $out_dir ]; then
sudo rm -rf $out_dir
fi
mkdir -p $out_dir/openwrt
sudo mkdir -p $rootfs_dir
# 解压openwrt固件
cd $openwrt_dir
if [ -f *ext4-factory.img.gz ]; then
gzip -d *ext4-factory.img.gz
elif [ -f *root.ext4.gz ]; then
gzip -d *root.ext4.gz
elif [ -f *rootfs.tar.gz ] || [ -f *ext4-factory.img ] || [ -f *root.ext4 ]; then
[ ]
else
echo -e "$red \n openwrt目录下不存在固件或固件类型不受支持! $white" && exit
fi
# 挂载openwrt固件
if [ -f *rootfs.tar.gz ]; then
sudo tar -xzf *rootfs.tar.gz -C ../$out_dir/openwrt
elif [ -f *ext4-factory.img ]; then
loop=$(sudo losetup -P -f --show *ext4-factory.img)
if ! sudo mount -o rw ${loop}p2 $rootfs_dir; then
echo -e "$red \n 挂载OpenWrt镜像失败! $white" && exit
fi
elif [ -f *root.ext4 ]; then
sudo mount -o loop *root.ext4 $rootfs_dir
fi
# 拷贝openwrt rootfs
echo -e "$green \n 提取OpenWrt ROOTFS... $white"
cd ../$out_dir
if df -h | grep $rootfs_dir > /dev/null 2>&1; then
sudo cp -r $rootfs_dir/* openwrt/ && sync
sudo umount $rootfs_dir
[ $loop ] && sudo losetup -d $loop
fi
sudo cp -r ../armbian/beikeyun/rootfs/* openwrt/ && sync
# 制作可启动镜像
echo && read -p "请输入ROOTFS分区大小(单位MB),默认256M: " rootfssize
[ $rootfssize ] || rootfssize=256
openwrtsize=$(sudo du -hs openwrt | cut -d "M" -f 1)
[ $rootfssize -lt $openwrtsize ] && \
echo -e "$red \n ROOTFS分区最少需要 $openwrtsize MB! $white" && \
exit
echo -e "$green \n 生成空镜像(.img)... $white"
fallocate -l ${rootfssize}MB "$(date +%Y-%m-%d)-openwrt-beikeyun-auto-generate.img"
# 格式化镜像
echo -e "$green \n 格式化... $white"
loop=$(sudo losetup -P -f --show *.img)
[ ! $loop ] && \
echo -e "$red \n 格式化失败! $white" && \
exit
#MBR引导
sudo parted -s $loop mklabel msdos> /dev/null 2>&1
#创建BOOT分区
START=$((SKIP_MB * 1024 * 1024))
END=$((BOOT_MB * 1024 * 1024 + START -1))
sudo parted $loop mkpart primary ext4 ${START}b ${END}b >/dev/null 2>&1
#创建ROOTFS分区
START=$((END + 1))
END=$((rootfssize * 1024 * 1024 + START -1))
sudo parted $loop mkpart primary btrfs ${START}b 100%
sudo parted $loop print
# mk boot filesystem (ext4)
BOOT_UUID=$(uuid)
sudo mkfs.ext4 -U ${BOOT_UUID} -L EMMC_BOOT ${loop}p1
echo "BOOT UUID IS $BOOT_UUID"
# mk root filesystem (btrfs)
ROOTFS_UUID=$(uuid)
sudo mkfs.btrfs -U ${ROOTFS_UUID} -L EMMC_ROOTFS1 ${loop}p2
echo "ROOTFS UUID IS $ROOTFS_UUID"
echo "parted ok"
# write bootloader
echo $PWD
sudo dd if=${BOOTLOADER_IMG} of=${loop} bs=1 count=446
sudo dd if=${BOOTLOADER_IMG} of=${loop} bs=512 skip=1 seek=1
sudo sync
#设定分区目录挂载路径
boot_dir=/media/$BOOT_UUID
rootfs_dir=/media/$ROOTFS_UUID
#删除重建目录
sudo rm -rf $boot_dir $rootfs_dir
sudo mkdir $boot_dir $rootfs_dir
#挂载分区到新建目录
sudo mount -t ext4 ${loop}p1 $boot_dir
sudo mount -t btrfs -o compress=zstd ${loop}p2 $rootfs_dir
#写入UUID 到fstab
sudo echo "UUID=$BOOT_UUID / btrfs compress=zstd 0 1">openwrt/etc/fstab
sudo echo "UUID=$ROOTFS_UUID /boot ext4 noatime,errors=remount-ro 0 2">openwrt/etc/fstab
sudo echo "tmpfs /tmp tmpfs defaults,nosuid 0 0">>openwrt/etc/fstab
# 拷贝文件到启动镜像
cd ../
#创建armbianEnv.txt
sudo rm -rf armbian/beikeyun/boot/armbianEnv.txt
sudo touch armbian/beikeyun/boot/armbianEnv.txt
#写入UUID到armbianEnv
sudo cat > armbian/beikeyun/boot/armbianEnv.txt <<EOF
verbosity=7
overlay_prefix=rockchip
rootdev=/dev/mmcblk0p2
rootfstype=btrfs
rootflags=compress=zstd
extraargs=usbcore.autosuspend=-1
extraboardargs=
fdtfile=rk3328-beikeyun.dtb
EOF
sudo cp -r armbian/beikeyun/boot /media/$BOOT_UUID
sudo chown -R root:root $out_dir/openwrt/
sudo mv $out_dir/openwrt/* /media/$ROOTFS_UUID
# 取消挂载
if df -h | grep $rootfs_dir > /dev/null 2>&1 ; then
sudo umount /media/$BOOT_UUID /media/$ROOTFS_UUID
fi
[ $loopp1 ] && sudo losetup -d $loop
# 清理残余
sudo rm -rf $boot_dir
sudo rm -rf $rootfs_dir
sudo rm -rf $out_dir/openwrt
echo -e "$green \n 制作成功, 输出文件夹 --> $out_dir $white"
| true
|
b82adc27d508c39fc796f68afcd0357ce3df60c7
|
Shell
|
lunar-linux/moonbase-core
|
/utils/util-linux/make-issue
|
UTF-8
| 280
| 3.140625
| 3
|
[] |
no_license
|
#!/bin/bash
[ ! -w /etc/ ] && exit 0
for ISSUE in /etc/issue /etc/issue.net ; do
if [ -f /etc/lunar.release ] ; then
cat /etc/lunar.release > $ISSUE
else
echo "Lunar Linux" > $ISSUE
fi
echo "Kernel `uname -r` on an `uname -m`" >> $ISSUE
echo "" >> $ISSUE
done
| true
|
2b9d45312c429536535752e4830d8ef82fe1c685
|
Shell
|
transferwise/spire
|
/.github/workflows/scripts/push-scratch-images.sh
|
UTF-8
| 980
| 3.90625
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
set -e
IMAGETAG="$1"
if [ -z "$IMAGETAG" ]; then
echo "IMAGETAG not provided!" 1>&2
echo "Usage: push-images.sh IMAGETAG" 1>&2
exit 1
fi
# Extracting org name rather than hardcoding allows this
# action to be portable across forks
ORGNAME=$(echo "$GITHUB_REPOSITORY" | tr '/' "\n" | head -1 | tr -d "\n")
echo "Pushing images tagged as $IMAGETAG..."
for img in spire-server spire-agent oidc-discovery-provider; do
ghcrimg="ghcr.io/${ORGNAME}/${img}:${IMAGETAG}"
# Detect the oidc image and give it a different name for GHCR
# TODO: Remove this hack and fully rename the image once we move
# off of GCR.
if [ "$img" == "oidc-discovery-provider" ]; then
ghcrimg="ghcr.io/${ORGNAME}/spire-oidc-provider:${IMAGETAG}"
fi
echo "Executing: docker tag $img-scratch:latest-local $ghcrimg"
docker tag "$img"-scratch:latest-local "$ghcrimg"
echo "Executing: docker push $ghcrimg"
docker push "$ghcrimg"
done
| true
|
4be2ee318ce628836c104f1d6b7d543d82ff07d6
|
Shell
|
kroniak/dotnext2019-mutation-analysis
|
/scripts/test.coverage.sh
|
UTF-8
| 269
| 2.53125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
set -euo pipefail
SCRIPT_ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
dotnet test -c Release /p:CollectCoverage=true \
/p:Exclude="[xunit.*]*" \
/p:CoverletOutputFormat=lcov /p:CoverletOutput="${SCRIPT_ROOT}/../lcov" \
"${SCRIPT_ROOT}/../test"
| true
|
69672611bea7d29283193fde725bccda171e1374
|
Shell
|
nguyenvanca2110/openptack_liberty_scripts
|
/net-05-glance.sh
|
UTF-8
| 3,783
| 2.78125
| 3
|
[] |
no_license
|
#!/bin/bash
source net-config.cfg
source ~/admin-openrc.sh
install_path=`pwd`
echo "##### INSTALL GLANCE ##### "
apt-get -y install glance python-glanceclient
filename=/etc/glance/glance-api.conf
test -f $filename.org || cp $filename $filename.org
rm -f $filename
cat << EOF > $filename
[DEFAULT]
notification_driver = noop
verbose = True
[database]
connection = mysql+pymysql://glance:$DEFAULT_PASS@controller/glance
backend = sqlalchemy
[glance_store]
default_store = file
filesystem_store_datadir = /var/lib/glance/images/
[keystone_authtoken]
auth_uri = http://controller:5000
auth_url = http://controller:35357
auth_plugin = password
project_domain_id = default
user_domain_id = default
project_name = service
username = glance
password = $DEFAULT_PASS
[paste_deploy]
flavor = keystone
EOF
chown glance:glance $filename
filename=/etc/glance/glance-registry.conf
test -f $filename.org || cp $filename $filename.org
rm -f $filename
cat << EOF > $filename
[DEFAULT]
notification_driver = noop
verbose = True
[database]
connection = mysql+pymysql://glance:$DEFAULT_PASS@controller/glance
backend = sqlalchemy
[keystone_authtoken]
auth_uri = http://controller:5000
auth_url = http://controller:35357
auth_plugin = password
project_domain_id = default
user_domain_id = default
project_name = service
username = glance
password = $DEFAULT_PASS
[paste_deploy]
flavor = keystone
EOF
chown glance:glance $filename
echo "##### DB SYNC #####"
glance-manage db_sync
service glance-registry restart
service glance-api restart
apt-get -y install qemu-utils
mkdir -p ~/images
cd ~/images
echo "############ CREATE CIRROS IMAGE ##############"
wget http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-x86_64-disk.img
glance image-create --name "cirros-0.3.4-x86_64" --file cirros-0.3.4-x86_64-disk.img \
--disk-format qcow2 --container-format bare --visibility public --progress
##echo "############ CREATE UBUNTU (UBUNTU/UBUNTU) ##############"
##wget http://uec-images.ubuntu.com/trusty/current/trusty-server-cloudimg-amd64-disk1.img
##
##qemu-img convert -c -O qcow2 trusty-server-cloudimg-amd64-disk1.img trusty-server-cloudimg-amd64-disk1_8GB.qcow2
##qemu-img resize trusty-server-cloudimg-amd64-disk1_8GB.qcow2 +8G
##modprobe nbd
##qemu-nbd -c /dev/nbd0 `pwd`/trusty-server-cloudimg-amd64-disk1_8GB.qcow2
##ls image || mkdir image
##mount /dev/nbd0p1 image
##
##sed -ri 's|(/boot/vmlinuz-.*-generic\s*root=LABEL=cloudimg-rootfs.*)$|\1 ds=nocloud|' image/boot/grub/grub.cfg
##sed -ri 's|^(GRUB_CMDLINE_LINUX_DEFAULT=).*$|\1" ds=nocloud"|' image/etc/default/grub
##sed -ri 's|^#(GRUB_TERMINAL=console)$|\1|' image/etc/default/grub
##
##mkdir -p image/var/lib/cloud/seed/nocloud
##
##tee image/var/lib/cloud/seed/nocloud/meta-data <<EOF
##instance-id: ubuntu
##local-hostname: ubuntu
##EOF
##
##tee image/var/lib/cloud/seed/nocloud/user-data <<EOF
###cloud-config
##password: ubuntu
##chpasswd: { expire: False }
##ssh_pwauth: True
##EOF
##
##sed -ri "s|^(127.0.0.1\s*localhost)$|\1\n127.0.0.1 `cat image/etc/hostname`|" image/etc/hosts
##
##sync
##umount image
##qemu-nbd -d /dev/nbd0
##modprobe -r nbd > /dev/null 2>&1
##
##glance image-create --name "ubuntu-server-14.04" \
## --file trusty-server-cloudimg-amd64-disk1_8GB.qcow2 \
## --disk-format qcow2 --container-format bare --visibility public --progress
##
##glance image-list
## android download
## http://sourceforge.net/projects/androidx86-openstack/?source=typ_redirect
#glance image-create --name "androidx86-4.4" \
# --file androidx86-4.4.qcow2 \
# --disk-format qcow2 --container-format bare --visibility public --progress
rm -rf ~/images
cd $install_path
exit 0
| true
|
d85ca1e179758510a898c0358d25ca5b0a6fddb4
|
Shell
|
dagrayvid/ci-artifacts
|
/testing/model-mesh/00-cleanup.sh
|
UTF-8
| 222
| 2.875
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
THIS_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
source "$THIS_DIR/config.sh"
oc delete ns ${MINIO_NS}
for i in $(seq 1 ${NS_COUNT})
do oc delete ns/${NS_BASENAME}-${i}
done
| true
|
c5d28f242d67fa4ab355c4230d16cfe2a54ca744
|
Shell
|
wileyj/public-puppet
|
/code/modules/local/sensu/files/etc/sensu/plugins/local/check-mysql-heartbeat.sh
|
UTF-8
| 1,041
| 3.84375
| 4
|
[] |
no_license
|
#!/bin/bash
MYSQL="/opt/mysql/product/5.0.77/bin/mysql"
if [ $# != 7 ]; then
echo "Usage: $0 warn_thresh crit_thresh mysql_user mysql_passwd mysql_ip mysql_port mysql_db"
exit 2
fi
WARN=$1
CRIT=$2
USER=$3
PASSWORD=$4
IP=$5
PORT=$6
DB=$7
# First check to see that heartbeat table has exactly one row with id=1.
# Anything different means the table is somehow damaged and an alert
# should be generated
output=`$MYSQL -s -e "SELECT id FROM heartbeat where id=1" -u$USER -p$PASSWORD -A -h$IP -P$PORT $DB |/bin/grep -v id`
res=$?
if [ $res -ne 0 ]; then
exit 2
fi
output=`$MYSQL -s -e "SELECT abs(time_to_sec(timediff(ts,now()))) as timedifference FROM heartbeat WHERE id=1" -u$USER -p$PASSWORD -A -h$IP -P$PORT $DB |/bin/grep -v timedifference`
res=$?
if [ $res -ne 0 ]; then
exit 2
fi
if [ $output -ge $CRIT ]; then
return=2
elif [ $output -ge $WARN ]; then
return=1
elif [ $output -lt $WARN ] && [ $output -ge 0 ]; then
return=0
else
#unknown condition (should never happen)
return=2
fi
echo $output
exit $return
| true
|
72055ec46d50774585c5571ea754b23220421b14
|
Shell
|
simeonackermann/dc-sparql-experimente
|
/test.sh
|
UTF-8
| 2,385
| 4.3125
| 4
|
[] |
no_license
|
#!/bin/bash
#
# This script runs a Docker Compose environment a given number of times
# run: ./test.sh [number]
#
# @author: Simeon Ackermann (amseon@web.de)
#
# Init the vars
########################
# the result folder
RESULT_DIR=$PWD/Results
# how much runs we will do
RUN_TIMES=1
# the prefix for the container names
COMPOSE_PREFIX="sp-ts-"
# title of this project
TITLE="SPARQL Experiments"
# End of init
########################
function help()
{
echo "Please use: (e.g. ./test.sh [numbers of testcases])"
}
function main()
{
echo "_____________________________________________"
echo -e "\n# ${TITLE} Setup #\n"
if [[ "$1" = "help" ]]; then
help
exit 0
fi
if [[ -n $1 ]]; then
RUN_TIMES=$1
fi
echo -e "# Number of tests: ${RUN_TIMES} \n"
exit 0
# Do some startup checks
check
#for RUN in {1..$RUN_TIMES}
for (( run=1; run<=$RUN_TIMES; run++ )); do
run $run
done
}
function run()
{
run=$1
echo -e "# ${TITLE} - Start run ${run}"
echo -e "_____________________________________________\n"
# first may cleanup old containers
cleanup
# start env
docker-compose up
# cleanup again after run
cleanup
# move results
mkdir -p ${RESULT_DIR}/Test-${run}
cp ${RESULT_DIR}/results_0/org.aksw.iguana.testcases.StressTestcase1.0/1/0/*.csv ${RESULT_DIR}/Test-${run}/
rm -rf ${RESULT_DIR}/results_0
mv ${RESULT_DIR}/results_0.zip ${RESULT_DIR}/Test-${run}.zip
# Done ;)
echo "_____________________________________________"
echo -e "\n# ${TITLE} - Run ${run} done"
echo -e "\n# Check your results in ${RESULT_DIR}/Test-${run}\n"
}
function cleanup()
{
# may stop containers
if [[ ! -z $(docker ps -a -q --filter "name=${COMPOSE_PREFIX}") ]]; then
echo "[INFO] Stop our running containers ..."
docker stop $(docker ps -a -q --filter "name=${COMPOSE_PREFIX}")
fi
# delete container
if [[ ! -z $(docker ps -a -q --filter "name=${COMPOSE_PREFIX}") ]]; then
echo "[INFO] Delete the containers to cleanup ..."
docker rm -v $(docker ps -a -q --filter "name=${COMPOSE_PREFIX}")
fi
}
function check()
{
command -v docker >/dev/null 2>&1 || {
echo >&2 "[ERROR] Please install Docker (http://docker.com/) first!"
exit 1
}
command -v docker-compose >/dev/null 2>&1 || {
echo >&2 "[ERROR] Please install Docker Compose (http://docs.docker.com/compose/) first!"
exit 1
}
# cleanup results dir
# rm -rf ${RESULT_DIR}/*
}
main $1
| true
|
8b4e9f9ad42dd09c1d88c3d18b12270ef701ccb7
|
Shell
|
isnuryusuf/provision-content
|
/content/templates/set-hostname.sh.tmpl
|
UTF-8
| 571
| 3.578125
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#
# This template populates the HOSTNAME of the system in various places.
# It also exports the HOSTNAME variable for use by other templates.
#
# Runs as part of a shell script for kickstart or net-post-install
#
# Required Parameters:
# Optional Parameters:
#
# Parameter YAML format:
#
# Defaults:
#
HOSTNAME="{{.Machine.Name}}"
if [ -f /etc/sysconfig/network ] ; then
sed -i -e "s/HOSTNAME=.*/HOSTNAME=${HOSTNAME}/" /etc/sysconfig/network
fi
echo "${HOSTNAME#*.}" >/etc/domainname
echo "$HOSTNAME" >/etc/hostname
hostname "$HOSTNAME"
export HOSTNAME
| true
|
748c50f28d386acd179c70b9adb8ccfa0eb33d99
|
Shell
|
hhanccheng/Quicksetup
|
/webserver/setup.sh
|
UTF-8
| 631
| 2.625
| 3
|
[] |
no_license
|
#! /bin/bash
#Quick configuration of nginx front-end and apache back-end on archlinux
#apache
pacman -Syu apache
systemctl start httpd
#Database
pacman -S mariadb
mysql_install_db --user=mysql --basedir=/usr --datadir=/var/lib/mysql
systemctl start mariadb
mysql_secure_installation
#php
pacman -S php php-apache
#nginx
pacman -S nginx
#configuration files
mv httpd.conf /etc/httpd/conf/httpd.conf
mv nginx.conf /etc/nginx/nginx.conf
systemctl enable httpd mariadb nginx
systemctl start httpd mariadb nginx
# Default: nginx index files in /ust/share/nginx/html with port 80, apache index file is in /srv/http with port 8080
| true
|
288d87efa2d1dc448d726c5679ba8bd95013d3d7
|
Shell
|
matalangilbert/mats-bash-scripts
|
/ipchange/email-when-ip-changes.sh
|
UTF-8
| 691
| 3.6875
| 4
|
[] |
no_license
|
#!/bin/bash
# sends an email when external IP changes
cd /home/fft/mats-bash-scripts/ipchange/
# Put email address in email.txt
read EMAIL < email.txt
touch old_ip.dat
curl -s ifconfig.me > curr_ip.dat
read CURR < curr_ip.dat
read OLD < old_ip.dat
if [ "$CURR" = "$OLD" ]; then
echo "`date` -- ip change script -- IP hasn't changed: $CURR -- doing \
nothing" | xargs -0 logger
else
echo "`date` -- ip change script -- New IP is: $CURR. Sending email" | \
xargs -0 logger
echo "`date` -- ip change script -- New Raspberry Pi IP is: $CURR" > temp
mail -s "Raspberry Pi external IP has changed" $EMAIL < temp
fi
rm -f old_ip.dat temp
mv curr_ip.dat old_ip.dat
| true
|
3e350cd42fb6911c6600df64394b3799c653cf2e
|
Shell
|
lujulia/Gaussian-Dynamic-Convolution
|
/train.sh
|
UTF-8
| 307
| 2.5625
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
# Training scripts for Highway Net.
# Prepare for data.
mkdir -p data/cityscapes && cd data/cityscapes
echo "Data Downloading..."
hdfs dfs -get $PAI_DATA_DIR
echo "Data Unpackaging..."
tar -I pigz -xf cityscapes_formated.tar
cd ../..
# Start training.
python3 train.py
sleep infinity
| true
|
0dd1d5c0adb0ffdf9e2747802e7e31cf74fe0156
|
Shell
|
akemery/initiation_to_linux
|
/usageoflstouch/run
|
UTF-8
| 491
| 3.390625
| 3
|
[] |
no_license
|
#!/bin/bash
ssh_student --setup-script "/bin/bash /task/student/fs/start.sh" --teardown-script "/bin/bash history -w"
if [ -f /task/student/.ssh_logs ]; then
logs=$(grep "worker@" /task/student/.ssh_logs | cut -d'$' -f2 | )
answer=$(grep "ls -lt | head -n 2" $logs)
fi
if [ "$answer" = ""]; then
feedback-msg -em "You used the wrong commands : $logs !"
feedback-result failed
else
feedback-msg -em "Congratulation you use the right command: $answer"
feedback-result success
fi
| true
|
4c92107d315afaab5eba8d28b4e09b32f548fcff
|
Shell
|
yabostone/init
|
/unused/ssh-key.sh
|
UTF-8
| 627
| 2.703125
| 3
|
[] |
no_license
|
#!/bin/bash
# add my ssh-key into folders
##放弃不用了,使用同一的添加的newauth
if [ ! -d ~/.ssh/ ] ; then
mkdir ~/.ssh
fi
apt-get install sshd wget -y
mv /etc/ssh/sshd_config{,.bak}
wget -N --no-check-certificate https://raw.githubusercontent.com/git4xuan/MyScripts/master/SSH/files/id_rsa_2048onekey.pub && mv id_rsa_2048onekey.pub ~/.ssh/authorized_keys
chmod 600 ~/.ssh/authorized_keys
chmod 700 ~/.ssh/
wget -N --no-check-certificate https://raw.githubusercontent.com/git4xuan/MyScripts/master/SSH/files/sshd_config_sample && mv sshd_config_sample /etc/ssh/sshd_config
/etc/init.d/sshd restart
| true
|
0fd508dd98058e1b1aaa2b0989671db066c5b343
|
Shell
|
wrossmorrow/gslmpiregression
|
/results.sh
|
UTF-8
| 258
| 2.625
| 3
|
[] |
no_license
|
#!/bin/bash
K=5
PROCESS=( 2 4 6 8 )
OBSVERS=( 100 1000 10000 100000 1000000 )
for P in "${PROCESS[@]}" ; do
for N in "${OBSVERS[@]}" ; do
echo "results from job P = ${P}, N = ${N}, K = ${K}"
cat "gslregress-${P}-${N}-${K}.txt"
echo " "
done
done
| true
|
4543350b59baeafaea41c4b841bb48378c0bc7e3
|
Shell
|
vjrj/la-pipelines
|
/sample-all-cluster.sh
|
UTF-8
| 1,075
| 2.890625
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
echo $(date)
SECONDS=0
echo 'Step 1. Spark job to export all lat longs'
/data/spark/bin/spark-submit \
--name "Export All" \
--num-executors 8 \
--executor-cores 8 \
--executor-memory 16G \
--driver-memory 4G \
--class au.org.ala.pipelines.beam.ExportAllLatLongCSVPipeline \
--master spark://172.30.1.102:7077 \
--driver-java-options "-Dlog4j.configuration=file:/efs-mount-point/log4j.properties" \
/efs-mount-point/pipelines.jar \
--appName="Lat Long export" \
--runner=SparkRunner \
--inputPath=/data/pipelines-data \
--targetPath=/data/pipelines-data
echo 'Step 2. Sample all using sampling.ala.org.au'
java -Xmx8g -Xmx8g -XX:+UseG1GC -cp pipelines/target/pipelines-1.0-SNAPSHOT-shaded.jar au.org.ala.sampling.LayerCrawler
echo 'Step 3. Generate sample cache for all'
java -Xmx8g -Xmx8g -XX:+UseG1GC -cp pipelines/target/pipelines-1.0-SNAPSHOT-shaded.jar au.org.ala.sampling.SamplingCacheBuilder /data/pipelines-sampling/
echo $(date)
duration=$SECONDS
echo "Sampling of all data took $(($duration / 60)) minutes and $(($duration % 60)) seconds."
| true
|
d307c28bb1c58ec25b1b8e9698ae60f9816cc4f4
|
Shell
|
Rajakumaran1106/opensync
|
/src/dm/fut/othr_connect_wifi_client_to_ap_freeze.sh
|
UTF-8
| 4,190
| 3.28125
| 3
|
[
"BSD-2-Clause",
"BSD-3-Clause"
] |
permissive
|
#!/bin/sh
# Copyright (c) 2015, Plume Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the Plume Design Inc. nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL Plume Design Inc. BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# FUT environment loading
# shellcheck disable=SC1091
source /tmp/fut-base/shell/config/default_shell.sh
[ -e "/tmp/fut-base/fut_set_env.sh" ] && source /tmp/fut-base/fut_set_env.sh
source "${FUT_TOPDIR}/shell/lib/othr_lib.sh"
[ -e "${PLATFORM_OVERRIDE_FILE}" ] && source "${PLATFORM_OVERRIDE_FILE}" || raise "${PLATFORM_OVERRIDE_FILE}" -ofm
[ -e "${MODEL_OVERRIDE_FILE}" ] && source "${MODEL_OVERRIDE_FILE}" || raise "${MODEL_OVERRIDE_FILE}" -ofm
tc_name="othr/$(basename "$0")"
manager_setup_file="othr/othr_setup.sh"
bridge="br-home"
freeze_src_token=device_freeze_src
freeze_dst_token=device_freeze_dst
usage() {
cat <<usage_string
${tc_name} [-h] arguments
Description:
- Script adds Openflow rules to freeze the client.
Arguments:
-h show this help message
\$1 (client_mac) : MAC address of client connected to ap: (string)(required)
Testcase procedure:
- On DEVICE: Run: ./${manager_setup_file} (see ${manager_setup_file} -h)
Run: ./${tc_name} <CLIENT-MAC-ADDR>
Script usage example:
./${tc_name} a1:b2:c3:d4:e5:f6
usage_string
}
if [ -n "${1}" ]; then
case "${1}" in
help | \
--help | \
-h)
usage && exit 1
;;
*)
;;
esac
fi
trap '
fut_info_dump_line
print_tables Openflow_Tag Openflow_Config
fut_info_dump_line
' EXIT SIGINT SIGTERM
NARGS=1
[ $# -ne ${NARGS} ] && raise "Requires exactly '${NARGS}' input argument" -arg
client_mac=${1}
log_title "$tc_name: OTHR test - Adding Openflow rules to tables to freeze client"
log "$tc_name: Inserting rules into Openflow tables to freeze the client: $client_mac"
${OVSH} i Openflow_Tag name:=frozen cloud_value:='["set",['$(printf '"%s"' "${client_mac}")']]' &&
log "$tc_name: Entry inserted to Openflow_Tag for name 'frozen', client MAC '$client_mac' - Success" ||
raise "FAIL: Failed to add Openflow rules for client $client_mac to Openflow_Tag table" -l "$tc_name" -oe
${OVSH} i Openflow_Config action:=drop bridge:="${bridge}" priority:=200 rule:='dl_src=${frozen}' table:=0 token:=${freeze_src_token} &&
log "$tc_name: Entry inserted to Openflow_Config for action 'drop' for dl_src rule - Success" ||
raise "FAIL: Failed to insert to Openflow_Config for action 'drop' for dl_src rule" -l "$tc_name" -oe
${OVSH} i Openflow_Config action:=drop bridge:="${bridge}" priority:=200 rule:='dl_dst=${frozen}' table:=0 token:=${freeze_dst_token} &&
log "$tc_name: Entry inserted to Openflow_Config for action 'drop' for dl_dst rule - Success" ||
raise "FAIL: Failed to insert to Openflow_Config for action 'drop' for dl_dst rule" -l "$tc_name" -oe
pass
| true
|
cd015328e8f46bd56c0c8f470933ca3de72c9a3d
|
Shell
|
LibreCrops/cdef
|
/Site/hl.sh
|
UTF-8
| 711
| 2.6875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
#
# Copyright 2015 Gu Zhengxiong <rectigu@gmail.com>
#
CSS=sh_emacs.css
DEST=../../../../lost-sdk
source-highlight --src-lang=c --gen-references=inline \
--out-format=html5 \
--lang-def=../lang/c.lang --style-css-file=$CSS \
--header ../header.html --footer ../footer.html \
$* || exit 233
sed -ri 's/(href=".+?html)#[0-9]+/\1/g' *.html || exit 233
rm -rf $DEST/files || exit 233
mkdir -p $DEST/files || exit 233
mv *.html $DEST/files || exit 233
../index.py $DEST/files
# sed -ri 's/ntdll\.h\///g' $OUT/*.html || exit 233
# sed -ri "s/$OUT\///g" $OUT/*.html || exit 233
# perl -00 -i -pe 's/\n<a.+//g' $OUT/*.html || exit 233
| true
|
eaa6f2c8573f4973fabd3b465bd4721d1a825513
|
Shell
|
yudong-tian/dscale
|
/new-Validation/MERRA2-interpolated/interpolate.sh
|
UTF-8
| 1,384
| 3.15625
| 3
|
[] |
no_license
|
# interpolates MERRA2 monthly to B24/B12/B4 monthly
idir=/home/ytian/Climate-downscaling/new-Validation/MERRA2
odir=/home/ytian/Climate-downscaling/new-Validation/MERRA2-interpolated
cd $odir
mkdir -p B24/monthly
mkdir -p B12/monthly
mkdir -p B4/monthly
for year in `seq 2000 2010`; do
for mon in jan feb mar apr may jun jul aug sep oct nov dec; do
sdate="1 $mon $year" # 1 feb 2010
gtime=`date -u -d "$sdate" +%H:%MZ%d%b%Y`
ifile=$idir/monthly.ctl
echo $gtime
# B24
ofile=$odir/B24/monthly/$mon$year.1gd4r
/home/dao_ops/operations/GrADS/Linux-1.9-rc1-gmao/grads -bl <<EOF
open $ifile
set time $gtime
set gxout fwrite
set fwrite -be -st $ofile
d re(rain, 232, linear, -124.875, 0.25, 112, linear, 25.125, 0.25)
disable fwrite
quit
EOF
# B12
ofile=$odir/B12/monthly/$mon$year.1gd4r
/home/dao_ops/operations/GrADS/Linux-1.9-rc1-gmao/grads -bl <<EOF1
open $ifile
set time $gtime
set gxout fwrite
set fwrite -be -st $ofile
d re(rain, 464, linear, -124.9375, 0.125, 224, linear, 25.0625, 0.125)
disable fwrite
quit
EOF1
# B4
ofile=$odir/B4/monthly/$mon$year.1gd4r
/home/dao_ops/operations/GrADS/Linux-1.9-rc1-gmao/grads -bl <<EOF2
open $ifile
set time $gtime
set gxout fwrite
set fwrite -be -st $ofile
d re(rain, 1405, linear, -125.0, 0.04166667, 621, linear, 24.0833, 0.04166667)
disable fwrite
quit
EOF2
done # mon
done # year
| true
|
20a31d15b9d026933c46fb0d46e3f0533450b7bd
|
Shell
|
pr2-debs/installer-trusty
|
/basestation/.svn/text-base/genimage.svn-base
|
UTF-8
| 1,272
| 3.75
| 4
|
[] |
no_license
|
#!/bin/bash
VERSION=12.04.1
IMAGE=ubuntu-$VERSION-alternate-amd64.iso
(cd install_dev1; [ -e $IMAGE ] || wget http://releases.ubuntu.com/$VERSION/$IMAGE)
# clean out target directory
IMAGE_DIR=/tmp/image_dir
rm -rf $IMAGE_DIR
mkdir $IMAGE_DIR
# mount disk image
mkdir -p /tmp/cdrom
mount -o loop install_dev1/$IMAGE /tmp/cdrom
# copy disk image to target directory
rsync -a /tmp/cdrom/ $IMAGE_DIR
# unmount disk image
umount /tmp/cdrom
# copy reprepro conf to target and sync packages
cp -r install_dev1/pr2-packages/conf $IMAGE_DIR
(cd $IMAGE_DIR; reprepro --noskipold update precise)
# copy preseed file to target
cp install_dev1/basestation_precise64.preseed $IMAGE_DIR/preseed/ubuntu.seed
# add grub config files
cp install_dev1/grub.cfg $IMAGE_DIR/boot/grub/
# add new sources.list
cp install_dev1/new-sources.list $IMAGE_DIR/
# remove GPG signature
rm $IMAGE_DIR/dists/precise/Release.gpg
# generate md5sums
rm $IMAGE_DIR/md5sum.txt
find $IMAGE_DIR -type f | xargs md5sum >$IMAGE_DIR/md5sum.txt
isofile=/tmp/basestation.iso
genisoimage -R -J -o $isofile -b isolinux/isolinux.bin -c isolinux/boot.cat -no-emul-boot -boot-load-size 4 -boot-info-table $IMAGE_DIR
echo "ISO generated as $isofile:"
du -h $isofile
echo "Image directory:"
du -sh $IMAGE_DIR
| true
|
6d6e38802f0928e72a812c9691dabd26fd8bdf23
|
Shell
|
ChrisCarini/dotfiles
|
/macos/dock.sh
|
UTF-8
| 1,507
| 3.171875
| 3
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
#!/usr/bin/env bash
##
# Make utilities available
#
# Needed because we make use of is-macos-catalina-or-later within this script.
##
DOTFILES_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && cd .. && pwd )"
PATH="$DOTFILES_DIR/bin:$PATH"
##
# Remove all Dock Icons
##
dockutil --no-restart --remove all
##
# Add Dock Icons in the order we desire.
##
# Finder
dockutil --no-restart --add "/System/Library/CoreServices/Finder.app"
# O365
dockutil --no-restart --add "/Applications/Microsoft Outlook.app"
# Chrome
dockutil --no-restart --add "/Applications/Google Chrome.app"
# LI IDEA
dockutil --no-restart --add "$(ls -trd /Applications/LI\ IntelliJ\ IDEA\ 20* | head -n1)"
# Term
if is-macos-catalina-or-later ; then
dockutil --no-restart --add "/System/Applications/Utilities/Terminal.app"
else
dockutil --no-restart --add "/Applications/Utilities/Terminal.app"
fi
# iTerm
dockutil --no-restart --add "/Applications/iTerm.app"
# IJ
dockutil --no-restart --add "/Applications/IntelliJ IDEA CE.app"
# Slack
dockutil --no-restart --add "/Applications/Slack.app"
# Activity Monitor
if is-macos-catalina-or-later ; then
dockutil --no-restart --add "/System/Applications/Utilities/Activity Monitor.app"
else
dockutil --no-restart --add "/Applications/Utilities/Activity Monitor.app"
fi
# Teams
dockutil --no-restart --add "/Applications/Microsoft Teams.app"
# OneNote
dockutil --no-restart --add "/Applications/Microsoft OneNote.app"
##
# Restart Dock for icons to take effect.
##
killall Dock
| true
|
6ed3e9b96e0b4c2faee63eb09bf8dfeb9535b0b5
|
Shell
|
iwm911/MISP-dockerized
|
/.ci/30_push_2_registry.sh
|
UTF-8
| 3,824
| 3.75
| 4
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/sh
set -e
STARTMSG="[push]"
# first_version=5.100.2
# second_version=5.1.2
# if version_gt $first_version $second_version; then
# echo "$first_version is greater than $second_version !"
# fi'
version_gt() { test "$(printf '%s\n' "$@" | sort -V | head -n 1)" != "$1"; }
func_push() {
DOCKER_REPO="$1"
TAGS="$2"
for i in $TAGS
do
[ ! -z "$(echo $i | grep 'dev')" ] && continue
docker push "$DOCKER_REPO:$i"
done
}
# change directory for make usage
[ -z "$1" ] && echo "$STARTMSG No parameter with the Docker registry URL. Exit now." && exit 1
[ "$1" = "NOT2PUSH" ] && echo "$STARTMSG The NOT2PUSH slug is only for local build and retag not for pushin to docker registries. Exit now." && exit 1
[ -z "$2" ] && echo "$STARTMSG No parameter with the Docker registry username. Exit now." && exit 1
[ -z "$3" ] && echo "$STARTMSG No parameter with the Docker registry password. Exit now." && exit 1
REGISTRY_URL="$1"
REGISTRY_USER="$2"
REGISTRY_PW="$3"
SERVER_TAGS="$(docker images --no-trunc --format '{{.Tag}}={{.ID}}' | grep $(docker inspect misp-server -f '{{.Image}}')|cut -d = -f 1)"
PROXY_TAGS="$(docker images --no-trunc --format '{{.Tag}}={{.ID}}' | grep $(docker inspect misp-proxy -f '{{.Image}}')|cut -d = -f 1)"
ROBOT_TAGS="$(docker images --no-trunc --format '{{.Tag}}={{.ID}}' | grep $(docker inspect misp-robot -f '{{.Image}}')|cut -d = -f 1)"
MODULES_TAGS="$(docker images --no-trunc --format '{{.Tag}}={{.ID}}' | grep $(docker inspect misp-modules -f '{{.Image}}')|cut -d = -f 1)"
#DB_TAGS=$(docker ps -f name=db --format '{{.Image}}'|cut -d : -f 2)
REDIS_TAGS="$(docker images --no-trunc --format '{{.Tag}}={{.ID}}' | grep $(docker inspect misp-redis -f '{{.Image}}')|cut -d = -f 1)"
# Login to Docker registry
[ "$REGISTRY_URL" != "dcso" ] && DOCKER_LOGIN_OUTPUT="$(echo "$REGISTRY_PW" | docker login -u "$REGISTRY_USER" "$REGISTRY_URL" --password-stdin)"
[ "$REGISTRY_URL" = "dcso" ] && DOCKER_LOGIN_OUTPUT="$(echo "$REGISTRY_PW" | docker login -u "$REGISTRY_USER" --password-stdin)"
echo "$DOCKER_LOGIN_OUTPUT"
DOCKER_LOGIN_STATE="$(echo "$DOCKER_LOGIN_OUTPUT" | grep 'Login Succeeded')"
if [ ! -z "$DOCKER_LOGIN_STATE" ]; then
# # retag all existing tags dev 2 public repo
# #$makefile_travis tag REPOURL=$REGISTRY_URL server_tag=${server_tag} proxy_tag=${proxy_tag} robot_tag=${robot_tag} modules_tag=${modules_tag} db_tag=${modules_tag} redis_tag=${modules_tag} postfix_tag=${postfix_tag}
# func_tag "$REGISTRY_URL/misp-dockerized-server" "$SERVER_TAG"
# func_tag "$REGISTRY_URL/misp-dockerized-server" "$SERVER_TAG"
# func_tag "$REGISTRY_URL/misp-dockerized-robot" "$ROBOT_TAG"
# func_tag "$REGISTRY_URL/misp-dockerized-misp-modules" "$MODULES_TAG"
# #func_tag "$REGISTRY_URL/misp-dockerized-db" "$DB_TAG"
# func_tag "$REGISTRY_URL/misp-dockerized-redis" "$REDIS_TAG"
# echo "###########################################" && docker images && echo "###########################################"
# Push all Docker images
#$makefile_travis push REPOURL=$REGISTRY_URL server_tag=${server_tag} proxy_tag=${proxy_tag} robot_tag=${robot_tag} modules_tag=${modules_tag} postfix_tag=${postfix_tag}
func_push "$REGISTRY_URL/misp-dockerized-server" "$SERVER_TAGS"
func_push "$REGISTRY_URL/misp-dockerized-proxy" "$PROXY_TAGS"
func_push "$REGISTRY_URL/misp-dockerized-robot" "$ROBOT_TAGS"
func_push "$REGISTRY_URL/misp-dockerized-misp-modules" "$MODULES_TAGS"
if version_gt "$CURRENT_VERSION" "1.1.0" ; then
func_push "$REGISTRY_URL/misp-dockerized-redis" "$REDIS_TAGS"
fi
#func_push "$REGISTRY_URL/misp-dockerized-db" "$DB_TAGS"
else
echo "$DOCKER_LOGIN_OUTPUT"
exit
fi
echo "$STARTMSG $0 is finished."
| true
|
1abdc8bb7a26210be32690255c3b273f6bbf1832
|
Shell
|
kaoengine/forex-demo
|
/api-services/install.sh
|
UTF-8
| 2,025
| 2.921875
| 3
|
[
"MIT"
] |
permissive
|
if ! [ -x "$(command -v pipenv)" ]; then
echo 'Warning: pipenv is not installed.' >&2
pip3 install pipenv
else
echo 'Error: pipenv is installed.' >&2
fi
if ! [ -x "$(command -v flask)" ]; then
echo 'Warning: flask is not installed.' >&2
# pip3 install flask
else
echo 'Error: flask is installed.' >&2
fi
if ! [ -x "$(command -v joblib)" ]; then
echo 'Warning: joblib is not installed.' >&2
# pip3 install joblib
else
echo 'Error: joblib is installed.' >&2
fi
if ! [ -x "$(command -v torch)" ]; then
echo 'Warning: torch is not installed.' >&2
# pip3 install torch
else
echo 'Error: torch is installed.' >&2
fi
if ! [ -x "$(command -v numpy)" ]; then
echo 'Warning: numpy is not installed.' >&2
# pip3 install numpy
else
echo 'Error: numpy is installed.' >&2
fi
if ! [ -x "$(command -v scipy)" ]; then
echo 'Warning: scipy is not installed.' >&2
# pip3 install scipy
else
echo 'Error: scipy is installed.' >&2
fi
if ! [ -x "$(command -v sklearn)" ]; then
echo 'Warning: sklearn is not installed.' >&2
pip3 install sklearn
else
echo 'Error: sklearn is installed.' >&2
fi
# postgresql
# postgres /var/lib/postgresql/13/main /var/log/postgresql/postgresql-13-main.log
if ! [ -x "$(command -v psql )" ]; then
echo 'Warning: postgresql is not installed.' >&2
sudo sh -c 'echo "deb http://apt.postgresql.org/pub/repos/apt $(lsb_release -cs)-pgdg main" > /etc/apt/sources.list.d/pgdg.list'
wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo apt-key add -
sudo apt-get update
sudo apt-get -y install postgresql
else
echo 'Error: postgresql is installed.' >&2
echo 'Start portgresql...' >&2
# start postgresql
# https://manpages.debian.org/buster/postgresql-common/pg_ctlcluster.1.en.html
sudo pg_ctlcluster 13 main start
fi
| true
|
395ee2b156be69e630d257c29419c620f02d500b
|
Shell
|
sinetoami/dotfiles
|
/home/.bin/kill-process-fzf
|
UTF-8
| 424
| 3.609375
| 4
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env sh
# mnemonic: [K]ill [P]rocess
function kill_process() {
local pid
if [ "$UID" != "0" ]; then
pid=$(ps -f -u $UID | sed 1d | eval "fzf -m --header='[kill:process]'" | awk '{print $2}')
else
pid=$(ps -ef | sed 1d | eval "fzf ${FZF_DEFAULT_OPTIONS} -m --header='[kill:process]'" | awk '{print $2}')
fi
if [ "x$pid" != "x" ]; then
echo $pid | xargs kill -${1:-9}
fi
}
kill_process
| true
|
93e61d7a052bc6b60243f915f80fcab1557aca83
|
Shell
|
mergermarket/docker-mysql-s3-backup
|
/assets/sync_to_s3.sh
|
UTF-8
| 1,452
| 4.21875
| 4
|
[] |
no_license
|
#!/usr/bin/env bash
set -e -u -o pipefail
SCRIPT_NAME=$0
help() {
cat <<EOF
Usage:
S3_BUCKET_NAME=my_bucket \
S3_BUCKET_PATH=/backup/foobar/live \
SYNC_ORIGIN_PATH=/data \
${SCRIPT_NAME}
Other optional variables:
S3_ENDPOINT=... - S3 endpoint to connect to
SYNC_EXCLUDE=/data/dir/* - Directory to exclude
I will copy all the files from /data into the given S3 bucket.
EOF
}
if [ -z "${SYNC_ORIGIN_PATH}" ] && ! [ -d "${SYNC_ORIGIN_PATH}" ]; then
echo "ERROR: \${SYNC_ORIGIN_PATH}='${SYNC_ORIGIN_PATH}' is not a valid directory'"
exit 1
fi
remove_double_slashes() {
local a="$1"
local b=""
while [ "$b" != "$a" ]; do
b="$a"; a="${b//\/\//\/}";
done;
echo "$a"
}
do_sync() {
target_path=s3://$(remove_double_slashes "${S3_BUCKET_NAME}/${S3_BUCKET_PATH}")
aws s3 sync \
--delete \
${S3_ENDPOINT:+--endpoint-url ${S3_ENDPOINT}} \
${SYNC_EXCLUDE:+--exclude "${SYNC_EXCLUDE}"} \
"${SYNC_ORIGIN_PATH}" \
"${target_path}"
}
load_aws_credentials(){
if [ -n "${AWS_CONTAINER_CREDENTIALS_RELATIVE_URI:-}" ]; then
eval $(curl -qs 169.254.170.2$AWS_CONTAINER_CREDENTIALS_RELATIVE_URI | jq -r '"export AWS_ACCESS_KEY_ID=\( .AccessKeyId )\nexport AWS_SECRET_ACCESS_KEY=\( .SecretAccessKey )\nexport AWS_SESSION_TOKEN=\( .Token )"')
fi
}
load_aws_credentials
# aws s3 sync sometimes doesn't work if the files change, so this gives more chance of success
do_sync || do_sync || do_sync
| true
|
9fa827ccfd17833ebd8b7674b2d1f955612de02e
|
Shell
|
savsmith/pfp_HW5
|
/BellmanFord/hw4/run_vtune.sh
|
UTF-8
| 1,024
| 2.875
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
function measure_vtune() {
CXXFLAGS=$1
VARIANT=$2
INPUT=$3
make clean
make ${CXXFLAGS}
for algo in pull push;
do
amplxe-cl -collect-with runsa -knob event-config=CPU_CLK_UNHALTED.REF_P:sa=133000,CPU_CLK_UNHALTED.THREAD:sa=2000000,FP_COMP_OPS_EXE.X87:sa=2000000,FP_COMP_OPS_EXE.MMX:sa=2000000,FP_COMP_OPS_EXE.SSE_FP:sa=2000000,FP_COMP_OPS_EXE.SSE2_INTEGER:sa=2000000,INST_RETIRED.ANY:sa=2000000,MEM_INST_RETIRED.LOADS:sa=2000000,MEM_INST_RETIRED.STORES:sa=2000000,MEM_LOAD_RETIRED.L1D_HIT:sa=2000000,MEM_LOAD_RETIRED.L2_HIT:sa=200000,MEM_LOAD_RETIRED.LLC_UNSHARED_HIT:sa=200000,MEM_LOAD_RETIRED.LLC_MISS:sa=200000,MEM_LOAD_RETIRED.DTLB_MISS:sa=200000,BR_MISP_EXEC.ANY:sa=20000,BR_INST_EXEC.ANY:sa=20000 -start-paused -analyze-system -app-working-dir . -- ./pagerank inputs/${INPUT} ${algo} 1.0e-20 | tee pr_${VARIANT}_${algo}_${INPUT}.txt;
done
}
for f in `ls inputs/`
do
measure_vtune "" "aos" ${f}
measure_vtune "CXXFLAGS=-DSP2018_CS377P_STRUCT_OF_ARRAYS" "soa" ${f}
done
| true
|
eae68cc9124025d9d86716049162fecf86065081
|
Shell
|
nagyistge/mistral
|
/docker_image_build.sh
|
UTF-8
| 396
| 2.65625
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash -xe
if [ -x "/usr/bin/apt-get" ]; then
sudo -E apt-get update
sudo -E apt-get install -y docker.io apparmor cgroup-lite
elif [ -x "/usr/bin/yum" ]; then
sudo -E yum install -y docker-io gpg
else
echo "No supported package manager installed on system. Supported: apt, yum"
exit 1
fi
sudo docker build -t mistral-docker .
sudo docker save mistral-docker | gzip > mistral-docker.tar.gz
| true
|
4e4cf555ba5fb962209e43557825b28bada8d4c7
|
Shell
|
SylixOS/sylixos
|
/find_disk.sh
|
UTF-8
| 24,840
| 3.765625
| 4
|
[] |
no_license
|
#!/bin/bash
#set -x
cwd=`dirname $0`
en_shield=y
declare -a disk_list
export disk_list=
CFGFILE=$cwd/estuarycfg.json
parse_config $CFGFILE
DISTROS=()
idx=0
idx_en=0
install=`jq -r ".distros[$idx].install" $CFGFILE`
while [ x"$install" != x"null" ];
do
if [ x"yes" = x"$install" ]; then
idx_en=${#DISTROS[@]}
DISTROS[${#DISTROS[@]}]=`jq -r ".distros[$idx].name" $CFGFILE`
echo "$idx_en) ${DISTROS[$idx_en]}"
fi
name=`jq -r ".distros[$idx].name" $CFGFILE`
value=`jq -r ".distros[$idx].install" $CFGFILE`
capacity=`jq -r ".distros[$idx].capacity" $CFGFILE`
case $name in
"Ubuntu")
ubuntu_en=$value
ubuntu_partition_size=$capacity
;;
"OpenSuse")
opensuse_en=$value
opensuse_partition_size=$capacity
;;
"Fedora")
fedora_en=$value
fedora_partition_size=$capacity
;;
"Debian")
debian_en=$value
debian_partition_size=$capacity
;;
*)
;;
esac
let idx=$idx+1
install=`jq -r ".distros[$idx].install" $CFGFILE`
done
read -p 'Please select the target system type: ' type_idx
echo "$type_idx ${DISTROS[$type_idx]}"
target_system_type=${DISTROS[$type_idx]}
if [ ! -o pipefail ]; then
set -o pipefail
is_disable=1
fi
#readarray will add newline in array elements
read -a disk_list <<< $(lsblk | grep '\<disk\>' | awk '{print $1}')
if [ $? ]; then
echo "OK. existing hard-disks are " ${disk_list[@]}
if [ ${#disk_list[@]} -eq 0 ]; then
echo "No any SATA hard disk. Please connect a new one"
exit
fi
else
echo "Get hard-disk information fail"
exit
fi
echo "length of array " ${#disk_list[@]}
if [ $is_disable -eq 1 ]; then
set +o pipefail
fi
#TMP_RANFILE=$(mktemp)
#The root device should be defined in /proc/cmdline, we process it
#1) obtain the root device id
#2) check whether the hard-disk is where the root filesystem resides
for x in $(cat /proc/cmdline); do
case $x in
root=*)
root_para=${x#root=}
echo "root_para "${root_para}
case $root_para in
LABEL=*)
root_id=${root_para#LABEL=}
#case $root_id in
#*/*)
#if sed > /dev/null 2>&1;then
# root_id="$(echo ${root_id} | sed 's,/,\\x2f,g')"
#fi
#;;
#*)
#echo "invalid label " ${root_id}
#;;
#esac
root_id="/dev/disk/by-label/${root_id}"
;;
PARTUUID=*)
root_id="/dev/disk/by-partuuid/${root_para#PARTUUID=}"
root_dev=$(ls -l /dev/disk/by-partuuid | grep ${root_para#PARTUUID=} | awk '{ print $NF }')
root_dev=${root_dev#../../}
;;
/dev/*)
echo "legacy root device " ${root_para}
root_id=${root_para#/dev/}
root_dev=${root_id}
;;
*)
echo "invalid root device " ${root_para}
;;
esac
;;
#we only care about the root=, skip all others
esac
done
echo "final root device " ${root_id} ${root_dev}
##filter out the current root disk..
CUR_RTDEV=""
if [ "$root_dev" != "nfs" ]; then
CUR_RTDEV=$( echo ${root_dev} | sed 's,[0-9]\+,,g')
echo "root disk in using is "$CUR_RTDEV
#org_size=${#disk_list[@]}
elem_idx=0
for now_disk in "${disk_list[@]}"; do
echo "disk_list is $elem_idx ${disk_list[elem_idx]}--"
if (echo ${root_dev} | grep "$now_disk"); then
echo "try to skip " $now_disk
unset disk_list[elem_idx]
#(( ${first_unset:=$elem_idx} ))
#echo "first unset index is "$first_unset
else
if [ $? -gt 1 ]; then
echo "unknow Error occurred!"
exit
fi
fi
(( elem_idx++ ))
done
#remove the invalid array elements
#move_array_unset disk_list 0 "$org_size"
for (( idx=0; idx \< elem_idx; (( idx++ )) )); do
if [ -z "${disk_list[idx]}" ]; then
if [ -n "${disk_list[(( --elem_idx ))]}" ]; then
disk_list[idx]=${disk_list[elem_idx]}
unset disk_list[elem_idx]
fi
fi
done
else
[ ${#disk_list[@]} == 0 ] && ( echo "NFS + no_any_disk!"; exit )
fi
export CUR_RTDEV
echo "After filter..length of array ${#disk_list[@]} ${disk_list[0]}--"
#The length of disk_list[] must -gt 0
if [ ${#disk_list[@]} -le 0 ]; then
echo "No idle SATA hard disk. Please plug new one"
exit 1
#disk_list[0]=$(echo ${root_dev} | sed 's,[0-9]\+,,g')
#echo "Or update " ${disk_list[0]} "have risk to demage whole disk!!"
#read -t 20 -p "Please assert[y/n]:" assert_flag
#if [ "$assert_flag" != "y" ]; then
# exit 1
#fi
else
##when there are multiply disks,maybe it is better user decide which to be selected
#But how the user know which one is new plugged??
if [ ${#disk_list[@]} \> 1 ]; then
select newroot_disk in "${disk_list[@]}"; do
if [ -n "$newroot_disk" ]; then
disk_list[$REPLY]=${disk_list[0]}
disk_list[0]=$newroot_disk
break
else
echo "Please try again"
fi
done
fi
fi
echo "will partition disk " ${disk_list[0]}"--"
#ok. The available hard-disks are here now. Just pick one with enough space
#1) check whether parted had been installed
if ! command -v ping -c 2 ports.ubuntu.com > /dev/null 2>&1; then
echo "network seems not to be available. Please check it first"
exit
fi
if ! command -v parted -v > /dev/null 2>&1; then
apt-get install parted || ( echo "parted installation FAIL!"; exit )
fi
#2) find a partition to save the packages fetched
declare -a part_list
declare -a part_name
part_list_idx=0
#disk_list[0]=$(echo ${disk_list[0]} | sed 's/*[ \t\n]//')
declare -a nonboot_part
if [ -z "$CUR_RTDEV" ]; then
read -a nonboot_part <<< $(sudo parted /dev/${disk_list[0]} print |\
awk '$1 ~ /[0-9]+/ {print $1}' | sort)
else
#for non-nfs, only one root-disk, or not less than two disks. For one root-disk, if we choose it, then
#disk_list[0] is it; for multiple disks, the root-disk will not be in disk_list[].
#read -a nonboot_part <<< $(sudo parted /dev/${disk_list[0]} print |\
# awk '$1 ~ /[0-9]+/ && ! /boot/ {print $1}' | sort)
read -a nonboot_part <<< $(sudo parted /dev/${disk_list[0]} print |\
awk '$1 ~ /[0-9]+/ {print $1}' | sort)
fi
for part_idx in ${nonboot_part[*]}; do
echo "current partition index "${part_idx}
#will exclude the current root and all mounted partitions of first disk
if [ ${disk_list[0]} != ${root_dev%${part_idx}} ]; then
tmp_part="/dev/${disk_list[0]}${part_idx}"
echo "tmporary partition is "$tmp_part
if ( mount | grep "$tmp_part" ); then
#match_str=`(sudo df -ihT | awk '{ if ($1 == tmp_pt) print $NF }' tmp_pt=${tmp_part})`
#echo "match_str "$match_str"--"
#if [ "$match_str" ]; then
echo "partition "$tmp_part " should be kept"
else
echo "partition "$tmp_part " can be removed"
part_list[part_list_idx]=$part_idx
part_name[part_list_idx]=$tmp_part
(( part_list_idx++ ))
fi
fi
done
unset part_idx
part_name[(( part_list_idx++ ))]="all"
part_name[part_list_idx]="exit"
assert_flag=""
while [ "$assert_flag" != "y" ]; do
##Begin to remove the idle partitions
sudo parted "/dev/"${disk_list[0]} print
#only debud
if [ "$en_shield" == "n" ]
then
echo "Please choose the partition to be removed:"
select part_tormv in "${part_name[@]}"; do
echo "select input "$part_tormv
if [ "$part_tormv" == "all" ]; then
echo "all the partitions listed above will be deleted"
elif [ "$part_tormv" == "exit" ]; then
echo "keep all current partitions"
assert_flag="y"
elif [ -n "$part_tormv" ]; then
echo $part_tormv" will be deleted"
else
echo "invalid choice! Please try again"
continue
fi
sel_idx=`expr $REPLY - 1`
break
done
fi
cat << EOM
##############################################################################
Right now, the default installation will be finished.
##############################################################################
EOM
wait_user_choose "all partitions of this Hard Disk will be deleted?" "y|n"
if [ "$assert_flag" == "y" ]; then
part_tormv=all
sel_idx=${#part_list[@]}
full_intallation=yes
else
full_intallation=no
exit 0
fi
echo "sel_idx "$sel_idx "part_list count:"${#part_list[@]} "part_list[0] :"${part_list[0]}
ind=0
if [ $sel_idx != $(( ${#part_list[@]} + 1 )) ]; then
if [ $sel_idx == ${#part_list[@]} ]; then
while [ -v part_list[ind] ]; do
cmd_str="sudo parted "/dev/"${disk_list[0]} rm ${part_list[ind]}"
echo "delete $ind "$cmd_str
eval $cmd_str
(( ind++ ))
done
assert_flag="y"
else
cmd_str="sudo parted "/dev/"${disk_list[0]} rm ${part_list[sel_idx]}"
echo "delete one partition: "$cmd_str
eval $cmd_str
org_size=${#part_name[@]}
unset part_name[sel_idx]
move_array_unset part_name $sel_idx $org_size
#idx=$sel_idx
#(( i=$idx + 1 ))
#while [ $i \< $org_size -a $idx \< $org_size ]; do
#[ -z "${part_name[i]}" ] && { (( i++ )); continue; }
#part_name[idx]=${part_name[i]}
#unset part_name[i]
#(( idx++ ))
#(( i++ ))
#done
echo "new partition is ""${part_name[@]}"
org_size=${#part_list[@]}
unset part_list[sel_idx]
move_array_unset part_list $sel_idx $org_size
echo "new partition id are ${part_list[@]}"
#idx=$sel_idx
#(( i=$idx + 1 ))
#while [ $i \< $org_size -a $idx \< $org_size ]; do
#[ -z "${part_list[i]}" ] && { (( i++ )); continue; }
#part_list[idx]=${part_list[i]}
#unset part_list[i]
#(( idx++ ))
#(( i++ ))
#done
fi
fi
done
#NEWFS_DEV=${disk_list[0]}
## the later two entry is not used again unset them
(( i=${#part_name[@]} - 1 ))
unset part_name[i]
(( i-- ))
unset part_name[i]
if [ "$full_intallation" = "yes" ]; then
#make another partition as the place where the new root filesystem locates
#1) ensure that the disk partition table is gpt
if [ "$(sudo parted /dev/${disk_list[0]} print | \
awk '/Partition / && /Table:/ {print $NF}')" != "gpt" ]; then
echo "All current partitions will be deleted"
if ! ( sudo parted /dev/${disk_list[0]} mklabel gpt ); then
echo "configure ${disk_list[0]} label as gpt FAIL"
exit
fi
fi
boot_id=$(sudo parted /dev/${disk_list[0]} print | awk '$1 ~ /[0-9]+/ && /boot/ {print $1}')
if [ -z "$boot_id" ]; then
echo -n "make boot partition"
##[ ! (sudo parted /dev/${disk_list[0]} mkpart uefi 1 256) ] && ( echo "ERR"; exit ) always said too many parameters
if ! ( sudo parted /dev/${disk_list[0]} mkpart uefi 1 256;set 1 boot on ); then
echo " ERR"
exit
else
echo " OK"
##since UEFI currently only support fat16, we need mkfs.vfat
sudo apt-get install dosfstools -y
mkfs -t vfat /dev/${disk_list[0]}1
#parted /dev/${disk_list[0]} mkfs 1 fat16
[ $? ] || { echo "ERR::mkfs for boot partition FAIL"; exit; }
#sudo parted /dev/${disk_list[0]} set 1 boot on
fi
else
echo "existed boot partition will be updated"
fi
rootfs_start=1
if [ "$ubuntu_en" == "yes" ]; then
ubuntu_partition_size_int=${ubuntu_partition_size%G*}
rootfs_end=$(( rootfs_start + ubuntu_partition_size_int ))
cmd_str="sudo parted /dev/${disk_list[0]} mkpart ubuntu ${rootfs_start}G ${rootfs_end}G"
echo -n "make root partition by "$cmd_str
eval $cmd_str
[ $? ] || { echo " ERR"; exit; }
#get the device id that match with the partition just made
read -a cur_idx <<< $(sudo parted /dev/${disk_list[0]} print | \
grep "ubuntu" | awk '{print $1}' | sort)
echo "root cur_idx is ${cur_idx[*]}"
NEWRT_IDX=${cur_idx[0]}
rootfs_start=$rootfs_end
#we always re-format the root partition
mkfs -t ext3 /dev/${disk_list[0]}$NEWRT_IDX
sudo mkdir $PWD/rootfs
sudo mkdir $PWD/tmp
sudo mount -t ext3 /dev/${disk_list[0]}$NEWRT_IDX rootfs
sudo rm -rf rootfs/*
tar -xzf /sys_setup/distro/$build_PLATFORM/ubuntu$TARGET_ARCH/Ubuntu_"$TARGET_ARCH".tar.gz -C rootfs/
ubuntu_username=""
read -p "Please input the username which you want to create in ubuntu system :" ubuntu_username
if [ -n "$ubuntu_username" ]; then
sudo useradd -m $ubuntu_username
sudo passwd $ubuntu_username
cp -a /home/$ubuntu_username rootfs/home/
sudo chown $ubuntu_username:$ubuntu_username rootfs/home/$ubuntu_username
echo `cat /etc/passwd | grep "$ubuntu_username"` >> rootfs/etc/passwd
echo `cat /etc/group | grep "$ubuntu_username"` >> rootfs/etc/group
echo `cat /etc/shadow | grep "$ubuntu_username"` >> rootfs/etc/shadow
echo `cat /etc/shadow | grep "$ubuntu_username"` >> rootfs/etc/shadow
echo "$ubuntu_username ALL=(ALL:ALL) ALL" >> rootfs/etc/sudoers
userdel -r $ubuntu_username
[ $? ] || { echo "WARNING:: create username FAIL"; }
fi
unset ubuntu_username
sudo umount rootfs
sudo rm -rf rootfs tmp
if [ "$target_system_type" == "Ubuntu" ]; then
rootfs_dev=/dev/${disk_list[0]}$NEWRT_IDX
rootfs_partuuid=`ls -al /dev/disk/by-partuuid/ | grep "${rootfs_dev##*/}" | awk {'print $9'}`
fi
fi
if [ "$fedora_en" == "yes" ]; then
fedora_partition_size_int=${fedora_partition_size%G*}
rootfs_end=$(( rootfs_start + fedora_partition_size_int ))
cmd_str="sudo parted /dev/${disk_list[0]} mkpart fedora ${rootfs_start}G ${rootfs_end}G"
echo -n "make root partition by "$cmd_str
eval $cmd_str
[ $? ] || { echo " ERR"; exit; }
#get the device id that match with the partition just made
read -a cur_idx <<< $(sudo parted /dev/${disk_list[0]} print | \
grep "fedora" | awk '{print $1}' | sort)
echo "root cur_idx is ${cur_idx[*]}"
NEWRT_IDX=${cur_idx[0]}
rootfs_start=$rootfs_end
#we always re-format the root partition
mkfs -t ext3 /dev/${disk_list[0]}$NEWRT_IDX
sudo mkdir $PWD/rootfs
sudo mount -t ext3 /dev/${disk_list[0]}$NEWRT_IDX rootfs
sudo rm -rf rootfs/*
tar -xzf /sys_setup/distro/$build_PLATFORM/fedora$TARGET_ARCH/Fedora_"$TARGET_ARCH".tar.gz -C rootfs/
sudo umount rootfs
sudo rm -rf rootfs
if [ "$target_system_type" == "Fedora" ]; then
rootfs_dev=/dev/${disk_list[0]}$NEWRT_IDX
rootfs_partuuid=`ls -al /dev/disk/by-partuuid/ | grep "${rootfs_dev##*/}" | awk {'print $9'}`
fi
fi
if [ "$debian_en" == "yes" ]; then
debian_partition_size_int=${debian_partition_size%G*}
rootfs_end=$(( rootfs_start + debian_partition_size_int ))
cmd_str="sudo parted /dev/${disk_list[0]} mkpart debian ${rootfs_start}G ${rootfs_end}G"
echo -n "make root partition by "$cmd_str
eval $cmd_str
[ $? ] || { echo " ERR"; exit; }
#get the device id that match with the partition just made
read -a cur_idx <<< $(sudo parted /dev/${disk_list[0]} print | \
grep "debian" | awk '{print $1}' | sort)
echo "root cur_idx is ${cur_idx[*]}"
NEWRT_IDX=${cur_idx[0]}
rootfs_start=$rootfs_end
#we always re-format the root partition
mkfs -t ext3 /dev/${disk_list[0]}$NEWRT_IDX
sudo mkdir $PWD/rootfs
sudo mount -t ext3 /dev/${disk_list[0]}$NEWRT_IDX rootfs
sudo rm -rf rootfs/*
tar -xzf /sys_setup/distro/$build_PLATFORM/debian$TARGET_ARCH/Debian_"$TARGET_ARCH".tar.gz -C rootfs/
sudo umount rootfs
sudo rm -rf rootfs
if [ "$target_system_type" == "Debian" ]; then
rootfs_dev=/dev/${disk_list[0]}$NEWRT_IDX
rootfs_partuuid=`ls -al /dev/disk/by-partuuid/ | grep "${rootfs_dev##*/}" | awk {'print $9'}`
fi
fi
if [ "$opensuse_en" == "yes" ]; then
opensuse_partition_size_int=${opensuse_partition_size%G*}
rootfs_end=$(( rootfs_start + opensuse_partition_size_int ))
cmd_str="sudo parted /dev/${disk_list[0]} mkpart opensuse ${rootfs_start}G ${rootfs_end}G"
echo -n "make root partition by "$cmd_str
eval $cmd_str
[ $? ] || { echo " ERR"; exit; }
#get the device id that match with the partition just made
read -a cur_idx <<< $(sudo parted /dev/${disk_list[0]} print | \
grep "opensuse" | awk '{print $1}' | sort)
echo "root cur_idx is ${cur_idx[*]}"
NEWRT_IDX=${cur_idx[0]}
rootfs_start=$rootfs_end
#we always re-format the root partition
mkfs -t ext3 /dev/${disk_list[0]}$NEWRT_IDX
sudo mkdir $PWD/rootfs
sudo mount -t ext3 /dev/${disk_list[0]}$NEWRT_IDX rootfs
sudo rm -rf rootfs/*
tar -xzf /sys_setup/distro/$build_PLATFORM/opensuse$TARGET_ARCH/OpenSuse_"$TARGET_ARCH".tar.gz -C rootfs/
sudo umount rootfs
sudo rm -rf rootfs
if [ "$target_system_type" == "Opensuse" ]; then
rootfs_dev=/dev/${disk_list[0]}$NEWRT_IDX
rootfs_partuuid=`ls -al /dev/disk/by-partuuid/ | grep "${rootfs_dev##*/}" | awk {'print $9'}`
fi
fi
boot_dev=/dev/${disk_list[0]}1
boot_uuid=`ls -al /dev/disk/by-uuid/ | grep "${boot_dev##*/}" | awk {'print $9'}`
mkdir $PWD/boot
sudo mount -t vfat /dev/${disk_list[0]}1 boot
sudo rm -rf boot/*
sudo cp -r /sys_setup/boot/* boot/
cat > boot/grub.cfg << EOM
#
# Sample GRUB configuration file
#
# Boot automatically after 0 secs.
set timeout=5
# By default, boot the Euler/Linux
set default=${target_system_type}_sata
# For booting GNU/Linux
menuentry "$target_system_type SATA" --id ${target_system_type}_sata {
search --no-floppy --fs-uuid --set ${boot_uuid}
linux /Image rdinit=/init root=PARTUUID=$rootfs_partuuid rootdelay=10 rootfstype=ext4 rw console=ttyS0,115200 earlycon=uart8250,mmio32,0x80300000 ip=::::::dhcp
}
EOM
sudo umount boot
sudo rm -rf boot
exit 0
else
#make another partition as the place where the new root filesystem locates
#1) ensure that the disk partition table is gpt
if [ "$(sudo parted /dev/${disk_list[0]} print | \
awk '/Partition / && /Table:/ {print $NF}')" != "gpt" ]; then
echo "All current partitions will be deleted"
if ! ( sudo parted /dev/${disk_list[0]} mklabel gpt ); then
echo "configure ${disk_list[0]} label as gpt FAIL"
exit
fi
fi
#2) check whether the boot partition exist
boot_id=$(sudo parted /dev/${disk_list[0]} print | awk '$1 ~ /[0-9]+/ && /boot/ {print $1}')
###in D02, if [ -n "$boot_id" -a $boot_id -ne 1 ]; then always warning "too many parameters"
[[ -n "$boot_id" && $boot_id -ne 1 ]] && \
{ echo "boot partition is not first one. will delete it at first"
if ! ( sudo parted /dev/${disk_list[0]} rm $boot_id ); then
echo "ERR:delete /dev/${disk_list[0]}$boot_id FAIL"
exit
fi
}
#recheck does boot exist...
boot_id=$(sudo parted /dev/${disk_list[0]} print | awk '$1 ~ /[0-9]+/ && /boot/ {print $1}')
if [ -z "$boot_id" ]; then
echo -n "make boot partition"
##[ ! (sudo parted /dev/${disk_list[0]} mkpart uefi 1 256) ] && ( echo "ERR"; exit ) always said too many parameters
if ! ( sudo parted /dev/${disk_list[0]} mkpart uefi 1 256;set 1 boot on ); then
echo " ERR"
exit
else
echo " OK"
##since UEFI currently only support fat16, we need mkfs.vfat
sudo apt-get install dosfstools -y
mkfs -t vfat /dev/${disk_list[0]}1
#parted /dev/${disk_list[0]} mkfs 1 fat16
[ $? ] || { echo "ERR::mkfs for boot partition FAIL"; exit; }
#sudo parted /dev/${disk_list[0]} set 1 boot on
fi
else
echo "existed boot partition will be updated"
fi
sel_name=""
#3) make the new root partition
#ROOT_FS="ubuntu"
#get the current partition number list before new creation.
#actually, $ROOT_FS is not necessary. we can find the new created partition still.
read -a old_idx <<< $(sudo parted /dev/${disk_list[0]} print | grep "$ROOT_FS" | awk '{print $1}' | sort)
echo "previous idx list is \"${old_idx[*]}\"${old_idx[*]}"
sudo parted /dev/${disk_list[0]} print free
assert_flag="w"
#while [ "$assert_flag" != "y" -a "$assert_flag" != "n" ]; do
# read -p "Do you want to create a new root partition?(y | n):" assert_flag
#done
wait_user_choose "Create a new root partition?" "y|n"
if [ "$assert_flag" == "y" ]; then
echo "Please carefully configure the start and end of root partition"
cmd_str="sudo parted /dev/${disk_list[0]} mkpart $ROOT_FS 512M 20G"
echo -n "make root partition by "$cmd_str
eval $cmd_str
[ $? ] || { echo " ERR"; exit; }
echo " OK"
#get the device id that match with the partition just made
read -a cur_idx <<< $(sudo parted /dev/${disk_list[0]} print | \
grep "$ROOT_FS" | awk '{print $1}' | sort)
echo "root cur_idx is ${cur_idx[*]}"
for (( ind=0; ( $ind \< ${#old_idx[*]} ); (( ind++ )) )); do
[ ${cur_idx[ind]} == ${old_idx[ind]} ] || break
done
NEWRT_IDX=${cur_idx[ind]}
#we always re-format the root partition
mkfs -t ext3 /dev/${disk_list[0]}$NEWRT_IDX
else
para_sel part_name sel_name
#we always re-format the root partition
mkfs -t ext3 $sel_name
NEWRT_IDX=${sel_name##/dev/${disk_list[0]}}
fi
echo "newrt_idx is "$NEWRT_IDX
#we can make this as function later
read -a cur_idx <<< $(sudo parted /dev/${disk_list[0]} print | \
grep "user" | awk '{print $1}' | sort)
echo "user cur_idx is ${cur_idx[*]} ${#cur_idx[@]}"
#we try our best to use less user partitions
assert_flag="hw"
wait_user_choose "Create new user partition?" "y|n"
if [ ${#cur_idx[@]} == 0 ]; then
echo "No any user partitions. Will jump to create new one!"
assert_flag="y"
fi
if [ "$assert_flag" == "y" ]; then
#USRDEV_IDX=${cur_idx[0]}
sudo parted /dev/${disk_list[0]} print free
cmd_str="sudo parted /dev/${disk_list[0]} mkpart user 20G 40G"
echo -n "make user partition by "$cmd_str
eval $cmd_str
[ $? ] || { echo " ERRR"; exit; }
echo " OK"
#only one user partition
read -a cur_idx <<< $(sudo parted /dev/${disk_list[0]} print | \
grep "user" | awk '{print $1}')
USRDEV=${disk_list[0]}${cur_idx[0]}
mkfs -t ext3 /dev/$USRDEV
echo "user partition is $USRDEV"
else
sel_name=""
echo "There are user partitions now."
for (( i=0; i < ${#cur_idx[@]}; (( i++ )) )); do
cur_idx[i]="/dev/${disk_list[0]}${cur_idx[i]}"
done
sudo parted /dev/${disk_list[0]} pr
echo "Must select one idle partition as cache:"
para_sel cur_idx sel_name
##unset the reused partition
for (( i=0; i < ${#part_name[@]}; (( i++ )) )); do
[ "${part_name[i]}" != $sel_name ] && continue
unset part_name[i]
break
done
move_array_unset part_name $i ${#part_name[@]}
USRDEV=${sel_name##/dev/}
echo "user partition is $USRDEV"
wait_user_choose "Is the user partition re-formatted?" "y|n"
[ "$assert_flag" != "y" ] || mkfs -t ext3 /dev/$USRDEV
fi
USRDEV_IDX=${USRDEV##${disk_list[0]}}
echo "USRDEV_IDX is $USRDEV_IDX"
assert_flag=""
read -p "Do you need to create one swap partition?(y/n)" assert_flag
if [ "$assert_flag" == "y" ]; then
sudo parted /dev/${disk_list[0]} print free
sudo parted /dev/${disk_list[0]} mkpart swap linux-swap 40G 50G
[ $? ] || { echo "WARNING:: create swap partition FAIL"; }
fi
fi
#fi
#read -p "Please input the partition size(G or M):" root_size
#while [ -z "$(echo $root_size | awk '/^[0-9]+[GM]$/ {print}')" -o "${root_size:0: -1}" == "0" ]; do
# echo "Invalid input"
# read -p "Please input the partition size(G or M):" root_size
#done
#echo "partition size is "$root_size
NEWFS_DEV=${disk_list[0]}
export NEWRT_IDX
export NEWFS_DEV
rootfs_dev2=/dev/${disk_list[0]}2
rootfs_partuuid=`ls -al /dev/disk/by-partuuid/ | grep "${rootfs_dev2##*/}" | awk {'print $9'}`
sudo mkdir $PWD/boot
sudo mkdir $PWD/rootfs
sudo mkdir $PWD/tmp
sudo mount -t vfat /dev/${disk_list[0]}1 boot
sudo mount -t ext3 /dev/${disk_list[0]}2 rootfs
sudo rm -rf boot/*
sudo rm -rf rootfs/*
sudo cp -a /sys_setup/boot/* boot/
rm -f boot/EFI/GRUB2/grub.cfg
touch tmp/grub.cfg
cat > tmp/grub.cfg << EOM
#
# Sample GRUB configuration file
#
# Boot automatically after 0 secs.
set timeout=5
# By default, boot the Euler/Linux
set default=ubuntu_sata
# For booting GNU/Linux
menuentry "Ubuntu SATA" --id ubuntu_sata {
set root=(hd1,gpt1)
linux /Image rdinit=/init root=PARTUUID=$rootfs_partuuid rootdelay=10 rootfstype=ext4 rw console=ttyS0,115200 earlycon=uart8250,mmio32,0x80300000 ip=:::::eth0:dhcp
devicetree /hip05-d02.dtb
}
EOM
mv tmp/grub.cfg boot/EFI/GRUB2/
#sudo dd if=/sys_setup/distro/$build_PLATFORM/ubuntu$TARGET_ARCH/ubuntu-vivid.img of=/dev/${disk_list[0]}2
if [ "$ubuntu_en" == "yes" ]; then
tar -xzf /sys_setup/distro/$build_PLATFORM/ubuntu$TARGET_ARCH/ubuntu"$TARGET_ARCH"_"$build_PLATFORM".tar.gz -C rootfs/
fi
sudo umount boot rootfs
sudo rm -rf boot rootfs tmp
##OK. Partitions are ready in Hard_disk. Can start the boot, root file-system making
| true
|
646b5d7bfb57c6da3fce207abab16191b6769d91
|
Shell
|
andy2804/transfer-learner
|
/transferlearning/trainer/export_example.sh
|
UTF-8
| 1,058
| 3.328125
| 3
|
[] |
no_license
|
#!/usr/bin/env bash
cd $HOME/models/research
export PYTHONPATH=$PYTHONPATH:`pwd`:`pwd`/slim
# Job_name can't have spaces
job_name=name_of_protobuf_config
ckpt_number=000001
gpu=0
# Pass arguments of checkpoint numbers to export several graphs at once!
# e.g. './export_example.sh name_of_protobuf_config "230503 330981 612000"'
if [ $# -eq 0 ]
then
job_name=$job_name
ckpt=$ckpt_number
else
job_name=$1
ckpt=$2
fi
for arg in $ckpt; do
tag=$((arg / 1000))
tag=${tag%.*}
echo "Exporting graph ${job_name}_${arg}"
# Actual call
CUDA_VISIBLE_DEVICES=${gpu} python3 object_detection/export_inference_graph.py \
--input_type image_tensor \
--pipeline_config_path $HOME/WormholeLearning/resources/nets_cfgs/configs/${job_name}.config \
--trained_checkpoint_prefix /media/sdc/andya/wormhole_learning/models/obj_detector_retrain/train_${job_name}/freeze/model.ckpt-${arg} \
--output_directory /media/sdc/andya/wormhole_learning/models/obj_detector_retrain/ssd_inception_v2_${dataset}_${job_name}_${tag}/
done
| true
|
4002f1f510762dff4d87f3215d6dece26ac8f24f
|
Shell
|
kkaempf/openwbem
|
/test/unit/newtest.sh
|
UTF-8
| 1,078
| 3.984375
| 4
|
[
"BSD-3-Clause"
] |
permissive
|
#/bin/sh
if [ x${1:-nothing} = x--full ]; then
FULL_TESTS=1
shift
else
FULL_TESTS=0
fi
if [ $# -le 0 ]; then
echo "Usage: `basename $0` [option] ClassName" >&2
echo "Where option can be --full to produce a full (non-auto) test." >&2
exit 1
fi
while [ $# -gt 0 ]; do
if [ -f $1TestCases.cpp -o -f $1TestCases.hpp ]; then
echo "Test Case for $1 already exist" >&2
exit 1
fi
if [ x${FULL_TESTS} = x0 ]; then
sed "s/Generic/$1/g" < GenericAutoTestCases.cpp.tmpl > $1TestCases.cpp
SOURCE_FILES=$1TestCases.cpp
else
sed "s/Generic/$1/g" < GenericTestCases.hpp.tmpl > $1TestCases.hpp
sed "s/Generic/$1/g" < GenericTestCases.cpp.tmpl > $1TestCases.cpp
SOURCE_FILES="$1TestCases.cpp $1TestCases.hpp"
fi
sed -e 's~\(BUILT_TESTS = \\\)$~\1\n'"$1"'TestCases \\~g' \
-e 's~\(# Test case definitions -- DO NOT EDIT THIS COMMENT\)~\1\n'"$1"'TestCases_SOURCES = '"${SOURCE_FILES}"'\n~g' < Makefile.am > tmp && mv tmp Makefile.am
echo "$1TestCases" >> .cvsignore
if [ -d CVS ]; then
cvs add ${SOURCE_FILES}
else
svn add ${SOURCE_FILES}
fi
shift
done
| true
|
e5238a6c70c1722a5841f173384251f22e5bf41b
|
Shell
|
delkyd/alfheim_linux-PKGBUILDS
|
/4kvideodownloader/PKGBUILD
|
UTF-8
| 2,019
| 2.578125
| 3
|
[] |
no_license
|
# Maintainer: Muflone http://www.muflone.com/contacts/english/
pkgname=4kvideodownloader
pkgver=4.3.2.2215
pkgrel=1
pkgdesc="Quickly download videos from YouTube in high-quality."
arch=('i686' 'x86_64')
url="http://www.4kdownload.com/products/product-videodownloader"
license=('custom:eula')
depends=('qt5-script' 'portaudio' 'ffmpeg2.8')
source=("${pkgname}.desktop"
"${pkgname}.png")
source_i686=("${pkgname}_${pkgver}_i386.tar.bz2"::"https://downloads2.4kdownload.com/app/${pkgname}_${pkgver%.*.*}_i386.tar.bz2")
source_x86_64=("${pkgname}_${pkgver}_amd64.tar.bz2"::"https://downloads2.4kdownload.com/app/${pkgname}_${pkgver%.*.*}_amd64.tar.bz2")
sha256sums=('6ab39088bde330267b43f87878f6bd47a215c732e17d417a99fc23ac4c568952'
'56b851ef96aade0612f236b8763ccaf2def8acdd49f37bbefdd79e1d5f6e68be')
sha256sums_i686=('a8e2d7981857c9a69ae6ba261052bf27e84a57d30452453093420dbcf156cfaf')
sha256sums_x86_64=('da7ac298ad0d97e1199f2a92f667a433a086363bdff7088b5070caccdf3e46f2')
package() {
# Install desktop file
install -m 755 -d "${pkgdir}/usr/share/applications"
install -m 755 -t "${pkgdir}/usr/share/applications" "${pkgname}.desktop"
# Install icon file
install -m 755 -d "${pkgdir}/usr/share/pixmaps"
install -m 644 -t "${pkgdir}/usr/share/pixmaps" "${pkgname}.png"
# Install files
cd "${pkgname}"
install -m 755 -d "${pkgdir}/usr/lib/${pkgname}"
install -m 755 -t "${pkgdir}/usr/lib/${pkgname}" "${pkgname}-bin"
install -m 755 -d "${pkgdir}/usr/lib/${pkgname}/audio"
install -m 755 -t "${pkgdir}/usr/lib/${pkgname}/audio" audio/*
install -m 755 -d "${pkgdir}/usr/lib/${pkgname}/translation"
install -m 755 -t "${pkgdir}/usr/lib/${pkgname}/translation" translation/*
# Install launcher file
install -m 755 -d "${pkgdir}/usr/bin"
ln -s "/usr/lib/${pkgname}/${pkgname}-bin" "${pkgdir}/usr/bin/${pkgname}"
# Install license file
install -m 755 -d "${pkgdir}/usr/share/licenses/${pkgname}"
install -m 644 -t "${pkgdir}/usr/share/licenses/${pkgname}" "doc/eula"
}
| true
|
b5fc58f113eb9bcb29334aef26c6fc8387c202d7
|
Shell
|
xiebohan789/test
|
/network.sh
|
UTF-8
| 549
| 2.796875
| 3
|
[] |
no_license
|
#!/bin/bash
for ((i=10 ; i<=20 ;i++)) ;do
echo "
<network>
<name>test$i</name>
<uuid>`uuidgen`</uuid>
<bridge name='virbr$i' stp='on' delay='0'/>
<mac address='52:54:00:75:fc:$i'/>
<domain name='test$i'/>
<ip address='192.168.$i.1' netmask='255.255.255.0'>
<dhcp>
<range start='192.168.$i.2' end='192.168.$i.254'/>
</dhcp>
</ip>
</network>" > test$i
echo `virsh net-define test$i`
echo `virsh net-start test$i`
done
| true
|
1f4bbedc5bcc6eefe7c2f10a6b16fed6c7e78f72
|
Shell
|
lifesfun/ShellScripts
|
/pullNs2
|
UTF-8
| 504
| 2.921875
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/bash
. /var/lifesfun/serverNs2/.ns2rc
if [ -n "$1" ] && [ -d "$baseNs2"/"$1" ] ; then
lftp -p "$portNs2" -u "$usrNs2","$passwdNs2" "$ipNs2" << EOF
mirror -r --delete --parallel=10 -x '.*txt' -x '.*hashes' "$remoteNs2"/ns2 "$baseNs2"/"$1"/ns2
mirror --parallel=10 --use-cache -i '.*json' -x '.*txt' "$remoteNs2"/ns2/shine "$baseNs2"/"$1"/ns2/shine
quit 0
EOF
getModIds "$baseNs2"/"$1"/ns2/MapCycle.json
else
echo "You need to specify an existing local directory to pull" >&2 && exit 1
fi
| true
|
0d5ae41a90970d60385d8a38984fb554192ed40a
|
Shell
|
bertvannuffelen/RDFsync
|
/receiver/virtuoso/receiver_cont_sync.sh
|
UTF-8
| 1,008
| 3.15625
| 3
|
[] |
no_license
|
#!/bin/bash
# synchronise to the virtuoso & then trigger a next synchronisation
INSTALLDIR=ENV_INSTALL_DIR/receiver/virtuoso
if [ -f ENV_WORK_DIR/sync ] ;
then
if [ -f ENV_WORK_DIR/receiving ] ;
then
# there is already a receiving operation started. Wait until it is finished to start another one.
echo "."
else
echo "start" ;
rm -f ENV_WORK_DIR/sync
cp ENV_INSTALL_DIR/receiver/receiver ENV_WORK_DIR/receiving
echo "upload most recent data" ;
$INSTALLDIR/execute-isql.sh $INSTALLDIR/clean_upload.sql
echo "dump all data" ;
rm -rf ENV_VIRTUOSO_DUMP_DIR
$INSTALLDIR/execute-isql.sh $INSTALLDIR/dump_dmz.sql
# the sleep has to be checked.
# It can only be checked via implementing a poll on a virtuoso loading table
sleep 10m
$INSTALLDIR/transferdumps.sh
scp ENV_INSTALL_DIR/producer/sync ENV_SCP_TARGET:ENV_SCP_TARGET_DIR
echo "done" ;
rm -f ENV_WORK_DIR/receiving
fi
else
echo "." ;
fi
| true
|
393c0096b84d1f3ef3928af6315309c861cc70e3
|
Shell
|
dice-project/cloudify-oryx2-blueprint
|
/scripts/start_oryx.sh
|
UTF-8
| 1,990
| 2.859375
| 3
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
#!/usr/bin/env bash
export ORYX_BIN=~/oryx
cd ${ORYX_BIN}
# Blatantly stolen from oryx-run.sh
ANY_JAR=$(ls -1 oryx-batch-*.jar oryx-speed-*.jar oryx-serving-*.jar | head -1)
CONFIG_FILE="oryx.conf"
CONFIG_PROPS=$(java -cp ${ANY_JAR} -Dconfig.file=${CONFIG_FILE} com.cloudera.oryx.common.settings.ConfigToProperties)
INPUT_ZK=$(echo "${CONFIG_PROPS}" | grep -E "^oryx\.input-topic\.lock\.master=.+$" | grep -oE "[^=]+$")
INPUT_KAFKA=$(echo "${CONFIG_PROPS}" | grep -E "^oryx\.input-topic\.broker=.+$" | grep -oE "[^=]+$")
INPUT_TOPIC=$(echo "${CONFIG_PROPS}" | grep -E "^oryx\.input-topic\.message\.topic=.+$" | grep -oE "[^=]+$")
UPDATE_ZK=$(echo "${CONFIG_PROPS}" | grep -E "^oryx\.update-topic\.lock\.master=.+$" | grep -oE "[^=]+$")
UPDATE_KAFKA=$(echo "${CONFIG_PROPS}" | grep -E "^oryx\.update-topic\.broker=.+$" | grep -oE "[^=]+$")
UPDATE_TOPIC=$(echo "${CONFIG_PROPS}" | grep -E "^oryx\.update-topic\.message\.topic=.+$" | grep -oE "[^=]+$")
ALL_TOPICS=$(kafka-topics --list --zookeeper ${INPUT_ZK} 2>&1 | grep -vE "^mkdir: cannot create directory")
if [ -z $(echo "${ALL_TOPICS}" | grep ${INPUT_TOPIC}) ]; then
ctx logger info "[start_oryx] Creating input topc"
kafka-topics --zookeeper ${INPUT_ZK} --create --replication-factor 2 --partitions 4 --topic ${INPUT_TOPIC} 2>&1 | grep -vE "^mkdir: cannot create directory"
fi
if [ -z `echo "${ALL_TOPICS}" | grep ${UPDATE_TOPIC}` ]; then
ctx logger info "[start_oryx] Creating update topc"
kafka-topics --zookeeper ${UPDATE_ZK} --create --replication-factor 2 --partitions 1 --topic ${UPDATE_TOPIC} 2>&1 | grep -vE "^mkdir: cannot create directory"
kafka-topics --zookeeper ${UPDATE_ZK} --alter --topic ${UPDATE_TOPIC} --config retention.ms=86400000 --config max.message.bytes=16777216 2>&1 | grep -vE "^mkdir: cannot create directory"
fi
tmux new -d -s batch_layer './oryx-run.sh batch; read'
tmux new -d -s speed_layer './oryx-run.sh speed; read'
tmux new -d -s serving_layer './oryx-run.sh serving; read'
| true
|
d902bde973b5595942d74a021e133f8450c9cc0b
|
Shell
|
pangine/disasm-eval-sources
|
/images/x86-pc/linux/gnu-icc-19.1.1.219/nginx-1.8.0/run.sh
|
UTF-8
| 633
| 2.84375
| 3
|
[] |
no_license
|
#!/bin/bash
source ${LINUX_SCRIPTS_DIR}/run_prefix.sh
# Rename "opt_report_file" argument with "qopt_report_file"
sed -i "s~-opt_report_file=~-qopt_report_file=~g" auto/cc/icc
${LINUX_SCRIPTS_DIR}/git_prepare.sh
./configure --prefix=${HOME}/local --with-cc-opt="${OPTIONS} -m32 --save-temps -g -Wno-error" --with-ld-opt="-m32" --without-http_gzip_module --without-http_rewrite_module
${LINUX_SCRIPTS_DIR}/git_prepare.sh
make install
${LINUX_SCRIPTS_DIR}/git_finish.sh
# Move files to result directories
mv ${HOME}/local/sbin/* ${BIN_DIR}/${PKG}
cd ${HOME}
mv ${PKG} ${BUILD_DIR}/${PKG}
source ${LINUX_SCRIPTS_DIR}/run_suffix.sh
| true
|
db57364b033f9ae84c6e59fa93fb9655bd7b9f65
|
Shell
|
XinZhou-1/NCLscripts
|
/cesm_scripts/Analysis/run_checklist_RossbyWaves.sh
|
UTF-8
| 5,455
| 2.984375
| 3
|
[] |
no_license
|
#!/bin/sh
# Script to calculate variables that are useful for analysing Rossby wave
# behaviour
cd /home/disk/eos4/rachel/git/NCL/cesm_scripts/Analysis//scripts/
#dir="/home/disk/eos4/rachel/CESM_outfiles/HYAK/"
#dir="/home/disk/eos4/rachel/CESM_outfiles/"
dir="/home/disk/eos4/rachel/CESM_outfiles/AMOC/"
numexps="5"
#exps=("CAM4POP_NoMTR_f19" "CAM4POP_NoMT_f19" "CAM4POP_NoR_f19" "CAM4POP_NoMT_f09" "CAM4POP_NoR_f09" "CAM4POP_NoTopo_f19" "CAM4POP_NoTopo_f09")
exps=("CAM4slab_CTL_f19" "CAM4slab_NoTopo_f19" "CAM4slab_NoMT_f19" "CAM4slab_NoR_f19" "CAM4slab_NoMTR_f19")
#expsctl=("WACCM_f19_CTL" "WACCM_f19_CTL" "WACCM_f19_CTL" "WACCM_f19_CTL" "WACCM_f19_CTL")
dirbase="/home/disk/rachel/CESM_outfiles/"
expsctl=("CESMnoTf19")
start="11"
end="40"
version="122" # 122 for cesm 1.2.2, 106 for cesm 1.0.6
# For Tak-Nak fluxes:
export NCL_startyrC=11
export NCL_nyearsC=20
nsecs="00000" # default = 00000, when running hybrid will be 21600
h2start="01" # default = 01, when running hybrid this will be 02
export NCL_low_bpf=2 # 2 days minimum filter
export NCL_high_bpf=6 # 6 days maximum filter
export NCL_ARG_lonstart=0
export NCL_ARG_lonend=360
export NCL_dirstr="/atm/"
export NCL_Ozone=0
export NCL_Mtrans=1
export NCL_GW=0
export NCL_xrad=0
export NCL_dia=1
export NCL_N_ARGS=$#
export NCL_h2mon="02"
export NCL_dailyfile='h1'
export NCL_ERAlev=0
export NCL_CESMversion=$version
export NCL_nsecs=$nsecs
export NCL_h2start=$h2start
# save command line arguments to environment variable NCL_ARG_#
export NCL_ARG_1=$dir
export NCL_ARG_2=$numexps
# save command line arguments to environment variable NCL_ARG_#
for ((index=0; index<=$numexps-1; index++))
do
eval export NCL_Exp_$index=${expsctl[index]} # with topography
eval export NCL_Exp2_$index=${exps[index]} # without topography
done
export NCL_Dirb=$dirbase
for ((index=3; index<=2+$numexps; index++))
do
eval export NCL_ARG_$index=${exps[index-3]}
done
eval export NCL_startyr=$start
eval export NCL_endyr=$end
((index++))
echo $index
eval export NCL_ARG_$index=$nsecs
echo NCL_N_ARGS
#echo 'Initial_analysis_means.ncl'
#ncl Initial_analysis_means.ncl # Add variables to monthly resolution files
# including PV, SF, divergences MSE, etc
# then calculate climatological means
# on monthly and annual time resolution
###NOT CURRENTLY USED echo 'Calc_VertGrad.ncl'
###ncl Calc_VertGrad.ncl # Calculate climatological mean vertical gradients
# of omega and T, TH, and omegaT NOT on pressure levels
#echo 'hybrid2pres.ncl'
#ncl hybrid2pres.ncl
#export NCL_ERAlev=0 # if 1 put onto ERAI pressure levels
echo 'hybrid2pres_morelev.ncl'
ncl hybrid2pres_morelev.ncl # convert many variables onto hybrid levels from
# monthly resolution data including caluclation of
# potential temperaturei, PV, etc and vertical
# gradients etc
# Calculates these ON Pressure levels, rather than
# calculating them on hybrid and then converting
#export NCL_ERAlev=1 # if 1 put onto ERAI pressure levels
#echo 'hybrid2pres_morelev.ncl'
#ncl hybrid2pres_morelev.ncl # convert many variables onto hybrid levels from
# Use to get U, V, TH on limited pressure levels
#echo 'hybrid2pres_daily_limlev.ncl'
#ncl hybrid2pres_daily_limlev.ncl
#echo 'Create_Seas_ts.ncl'
#ncl Create_Seas_ts.ncl # create timeseries of all years of monthly data for
# DJF, MAM, JJA and SON
#echo 'hybrid2pres_ts.ncl'
#ncl hybrid2pres_ts.ncl # convert the files created by Create_Seas_ts.ncl
# onto pressure levels specified in this file
## Refractive index
#echo CalcZMKs.ncl
#ncl Calc_ZMKs.ncl
#echo Calc_ZMKa_monthly.ncl
#ncl Calc_ZMKs_monthly.ncl
## Eddy characteristics
#echo 'Calc_Eady_long.ncl'
#ncl Calc_Eady_long.ncl
#echo 'LanczosF_Z850_250.ncl'
#ncl LanczosF_Z850_250.ncl
#echo 'Calc_varZ850.ncl'
#ncl Calc_varZ850.ncl
#export NCL_var="V"
#export NCL_inlev="850"
#echo 'Lanczos bandpass filter'
#ncl LanczosF_bandpass_pres.ncl
#export NCL_inlev="250"
#echo 'Lanczos bandpass filter'
#ncl LanczosF_bandpass_pres.ncl
#echo 'LanczosF_UVT_EKE_EV.ncl'
#ncl LanczosF_UVT_EKE_EV.ncl
#echo 'Calc_EKE_VT.ncl'
#ncl Calc_EKE_VT.ncl
##########
#export NCL_ERAlev=1
#echo 'Calc_EPfluxes.ncl'
#ncl Calc_EPfluxes.ncl
#export NCL_ERAlev=0
#echo 'Calc_EPfluxes.ncl'
#ncl Calc_EPfluxes.ncl
#echo 'Calc_RIdx.ncl'
#ncl Calc_RIdx.ncl
#echo 'Calc_Ks.ncl'
#ncl Calc_Ks.ncl
# Calculate Ks on lat-lon-pressure levels, incuding buoyancy term
#echo Calc_Ks_full.ncl
#ncl Calc_Ks_full.ncl
#ncl Calc_ZMKs_bimonth.ncl
#ncl Calc_QGKS_test.ncl
# Calculate EP fluxes on daily data
#echo Calc_EPfluxes_daily.ncl
#ncl Calc_EPfluxes_daily.ncl
# Calculate wavenumber 1 and 2 EP fluxes on daily data
#ncl Calc_EPfluxes_wave12_daily.ncl
#ncl Calc_EPfluxes_wave2_daily.ncl
#eval export NCL_seas="DJF"
#ncl Calc_TEMcirc_daily.ncl
#eval export NCL_seas="Annual"
#ncl Calc_TEMcirc_daily.ncl
#eval export NCL_seas="JJA"
#ncl Calc_TEMcirc_daily.ncl
#echo 'Calc_TakNak_fluxes.ncl'
#export NCL_season="DJF"
#ncl Calc_TakNak_fluxes.ncl
#export NCL_season="SON"
#ncl Calc_TakNak_fluxes.ncl
#export NCL_season="MAM"
#ncl Calc_TakNak_fluxes.ncl
#export NCL_season="JJA"
#ncl Calc_TakNak_fluxes.ncl
echo 'finished'
| true
|
24dcc2b3867af3e888325031d2e9b72590f5720c
|
Shell
|
QBRC/PIPE-CLIP
|
/lib/runR1.sh
|
UTF-8
| 826
| 3.234375
| 3
|
[
"BSD-3-Clause"
] |
permissive
|
#!/bin/sh
filename=$1
pvalue=$2
#declare -a epsilon
#declare -a steps
epsilon=(0.01 0.15 0.1)
steps=(0.1 0.08 0.05)
#sleep 500
#status=0
#check if file is generated
#while ["$status" == "0"]
#do
# if [-p "$filename"]
# then
# status = 1
# filesize = $(stat -c%s "$filename")
# else
# sleep 60
# fi
#done
#
##make sure file does not change
#filestat = 0
#while ["$filestat"=="0"]
#do
# currentsize = $(stat -c%s "$filename")
# if ["$filesize" == "$currentsize"]
# filestat = 1
# else
# sleep 60
# fi
#done
#Call R function
r_status="1"
count=1
for e in "${epsilon[@]}"
do
for s in "${steps[@]}"
do
echo "$e,$s"
if [ -s "$filename.Converge.txt" ]
then
echo
else
#echo "$e,$s"
Rscript lib/ZTNB_tryCatch.R $filename $pvalue $e $s
fi
#echo "$filename.$count"
count=$((count+1))
done
done
| true
|
db34f6e821c1e680242f48a4f8c984d06b9666a5
|
Shell
|
GoogleCloudPlatform/click-to-deploy
|
/docker/kafka/templates/debian11/scripts/docker-entrypoint.sh
|
UTF-8
| 1,289
| 3.28125
| 3
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -e
# Clear some variables that we don't want runtime
unset KAFKA_USER KAFKA_UID KAFKA_GROUP KAFKA_GID \
KAFKA_DOCKER_SCRIPTS KAFKA_DIST_URL KAFKA_SHA512
if [[ "$VERBOSE" == "yes" ]]; then
set -x
fi
if [[ -v KAFKA_PORT ]] && ! grep -E -q '^[0-9]+$' <<<"${KAFKA_PORT:-}"; then
KAFKA_PORT=9092
export KAFKA_PORT
fi
# when invoked with e.g.: docker run kafka -help
if [ "${1:0:1}" == '-' ]; then
set -- "$KAFKA_HOME/bin/kafka-server-start.sh" "$@"
fi
# execute command passed in as arguments.
# The Dockerfile has specified the PATH to include
# /opt/kafka/bin (for kafka) and /opt/docker-kafka/scripts (for our scripts
# like create-topics, start-kafka, versions).
exec "$@"
| true
|
bc99764aaaaaed64de2fd652f331d32ea448bd57
|
Shell
|
Mujinzhao/DzAppCenter
|
/build.sh
|
UTF-8
| 657
| 3.359375
| 3
|
[] |
no_license
|
#!/bin/bash
product_name="DzAppCenter";
product_version="1.0"
buildtime=`date +%Y%m%d%H%M%S`
zipfile="$product_name-$product_version-$buildtime.zip"
outdir="output/$product_name"
function cpfiles()
{
for i in $@; do
cp -r $i $outdir
done
}
################################
rm -rf output
mkdir -p $outdir/data
################################
cpfiles *.php config index.htm source tool md5 pack template upload
################################
cd $outdir
# 删除php文件中的所有注释代码
../../clear_annotation -r -w
################################
# zip
cd ../; zip -r $zipfile $product_name
cd ../
echo 'build success'
exit 0
| true
|
518089e7cd026e915a7a72b1bfabd2ed55b8114d
|
Shell
|
jgerhold/bbb-hetzner-cloud
|
/bbb-config.sh
|
UTF-8
| 1,197
| 2.8125
| 3
|
[] |
no_license
|
#!/bin/bash
while [[ $# -gt 0 ]]
do
key="$1"
case $key in
--admin-name)
ADMINNAME="$2"
shift
shift
;;
--admin-email)
ADMINEMAIL="$2"
shift
shift
;;
--admin-password)
ADMINPW="$2"
shift
shift
;;
*)
shift
;;
esac
done
cd greenlight/
# docker exec greenlight-v2 bundle exec rake admin:create
docker exec greenlight-v2 bundle exec rake admin:create["${ADMINNAME}","${ADMINEMAIL}","${ADMINPW}"]
cat << EOF >> /etc/bigbluebutton/bbb-conf/apply-config.sh
echo " - Setting camera defaults"
yq w -i \$HTML5_CONFIG 'public.kurento.cameraProfiles.(id==low).bitrate' 50
yq w -i \$HTML5_CONFIG 'public.kurento.cameraProfiles.(id==medium).bitrate' 75
yq w -i \$HTML5_CONFIG 'public.kurento.cameraProfiles.(id==high).bitrate' 100
yq w -i \$HTML5_CONFIG 'public.kurento.cameraProfiles.(id==hd).bitrate' 100
yq w -i \$HTML5_CONFIG 'public.kurento.cameraProfiles.(id==low).default' true
yq w -i \$HTML5_CONFIG 'public.kurento.cameraProfiles.(id==medium).default' false
yq w -i \$HTML5_CONFIG 'public.kurento.cameraProfiles.(id==high).default' false
yq w -i \$HTML5_CONFIG 'public.kurento.cameraProfiles.(id==hd).default' false
EOF
bbb-conf --restart
| true
|
c49a3d68a19c0c3d9875c9cb5c14f914817d5a85
|
Shell
|
delkyd/alfheim_linux-PKGBUILDS
|
/libjson-rpc-cpp/PKGBUILD
|
UTF-8
| 1,205
| 2.65625
| 3
|
[] |
no_license
|
# Maintainer: Peter Spiess-Knafl <dev@spiessknafl.at>
# Contributor: Daniel Bomar <dbdaniel42@gmail.com>
pkgname=libjson-rpc-cpp
pkgver=1.0.0
pkgrel=2
pkgdesc="C++ framework for json-rpc 1.0 and 2.0"
arch=('i686' 'x86_64')
url="https://github.com/cinemast/libjson-rpc-cpp"
license=('MIT')
depends=('curl' 'argtable' 'jsoncpp' 'libmicrohttpd' 'hiredis')
makedepends=('cmake')
#checkdepends=('libcatch-cpp-headers')
install=libjson-rpc-cpp.install
changelog=ChangeLog
source=('https://github.com/cinemast/libjson-rpc-cpp/archive/v1.0.0.tar.gz')
sha256sums=('888c10f4be145dfe99e007d5298c90764fb73b58effb2c6a3fc522a5b60a18c6')
prepare() {
cd "${srcdir}"/${pkgname}-${pkgver}
}
build() {
msg2 "Creating build directories"
mkdir -p $pkgname-$pkgver/build
cd $pkgname-$pkgver/build
msg2 "Invoking cmake"
cmake -DCMAKE_INSTALL_PREFIX=/usr -DCMAKE_INSTALL_LIBDIR=/usr/lib -DBUILD_STATIC_LIBS=TRUE -DCOMPILE_TESTS=FALSE -DCOMPILE_EXAMPLES=FALSE ..
msg2 "Building the framework"
make
}
package() {
cd $pkgname-$pkgver/build
msg2 "Packing all together"
make DESTDIR="${pkgdir}" install
msg2 "Add MIT License to package"
install -D -m644 "${srcdir}/$pkgname-$pkgver/LICENSE.txt" "${pkgdir}/usr/share/licenses/${pkgname}/LICENSE"
}
| true
|
7ce01a163ab5734034efc7ddf86baa66d3cf1c5e
|
Shell
|
TottiPuc/speech-recognition-with-PNCC
|
/LoadPNCCTest/ExtractPNCCFolder.sh
|
UTF-8
| 1,146
| 3.296875
| 3
|
[] |
no_license
|
#!/bin/bash
################################################
#==============================================#
##### Christian Dayan Arcos Gordillo ##########
##### speech recognition #########
####### CETUC - PUC - RIO ##########
##### christian@cetuc.puc-rio.br ########
##### dayan3846@gmail.com.co ########
#==============================================#
################################################
OUT=$1/products/htk/pnccAURORA
FUN=$2/LoadPNCCTest
IN=$3
#*********************************************************************************#
#*** navigate through AURORA folders and files to list them in config files ***#
#******** so they can be passed to the recognizer later *********#
#********** Loop through wav files to extract PNCC of each one *******************#
echo "*** Extracting PNCC from AURORA testing data ***"
nem=`ls $IN*.wav | wc -l`
i=1
find $IN -name "*.wav" | while read line
do
nam=`ls $line | cut -d '/' -f 11 | sed 's/.wav/.feat/g'`
echo 'extracting PNCC from file_'$i"_of_"$nem
cp $line tmpIn
sai=$OUT'/featuresTest/'$nam
octave -q $FUN/PNCC_function.m
mv tmpOut $sai
i=`expr $i + 1`
rm tmpIn
done
| true
|
59ecd2c54afcf0b1db0c10047fa00749da6ada53
|
Shell
|
Warren772/secret-agent
|
/replay/pack.sh
|
UTF-8
| 904
| 3.171875
| 3
|
[
"MIT"
] |
permissive
|
PACKAGE_VERSION=$(cat package.json \
| grep version \
| head -1 \
| awk -F: '{ print $2 }' \
| sed 's/[",]//g' \
| tr -d '[[:space:]]')
cd dist
mkdir -p assets
if [ -d "./mac" ]; then
cd mac
echo "Packing mac"
tar -czf "../assets/replay-${PACKAGE_VERSION}-mac.tar.gz" SecretAgentReplay.app
cd ..
fi
if [ -d "./mac-arm64" ]; then
cd "mac-arm64"
echo "Packing mac-arm64"
tar -czf "../assets/replay-${PACKAGE_VERSION}-mac-arm64.tar.gz" SecretAgentReplay.app
cd ..
fi
if [ -d "./linux-unpacked" ]; then
echo "Packing linux"
mv linux-unpacked "replay-${PACKAGE_VERSION}-linux"
tar -czf "assets/replay-${PACKAGE_VERSION}-linux.tar.gz" "replay-${PACKAGE_VERSION}-linux"
fi
if [ -d "./win-unpacked" ]; then
echo "Packing windows"
mv win-unpacked "replay-${PACKAGE_VERSION}-win"
tar -czf "assets/replay-${PACKAGE_VERSION}-win.tar.gz" "replay-${PACKAGE_VERSION}-win"
fi
| true
|
47bb3f30cb506fea42a1ffcf404ec3406fbda05a
|
Shell
|
yuxiaoba/Serverless-Bechmark
|
/PageRank/Knative/build_image.sh
|
UTF-8
| 294
| 2.578125
| 3
|
[] |
no_license
|
#!/bin/sh
registryurl="harbor.dds-sysu.tech/functions/"
arc="amd"
functionname="pagerank"
dockerhuburl="yuxiaoba/"$functionname":"$arc
imageurl=$registryurl$functionname":"$arc
docker build . -t $imageurl
docker push $imageurl
docker tag $imageurl $dockerhuburl
docker push $dockerhuburl
| true
|
7b011a366e720ce4fd696901699b574bbd125884
|
Shell
|
dbyone/sofa
|
/utils/ci_autotest/rw_testing/randw_sector.scp
|
UTF-8
| 3,016
| 3.71875
| 4
|
[
"Apache-2.0"
] |
permissive
|
#!/bin/bash
#Description: (like as randw_page_32.scp, but is sector-based testing)
# purpose: sector-based testing, random write data to target raw device, and verify data
# procedures: first, use the function randswsr in utest to random write in a range,
# second, sequential read the data and verify, and we set the length of the write io is 32 sectors.
#input parameters:
# 1st: target raw device
# 2nd: amount of data to write (in page unit)
# 3th: SSD page unit is 4KB or 8KB
#########################
# include files #
#########################
source ../common/config_test_env.sh
source ../common/common_func.sh
source ../common/common_global_var.sh
check_sofa_service
retval=$?
if [ "$retval" == 1 ]
then
log_msg "$test_scfn: [INFO] sofa service is running, stop it."
stop_sofasrv
fi
reset_sofasrv
start_sofasrv
#########################
# setup testing para #
#########################
target_dev=lfsm
amount_data=0
to_8k=0
if [ $# -eq 1 ] ; then
target_dev=$1
get_disk_size $target_dev
amount_data=$gDiskSize
elif [ $# -eq 2 ] ; then
target_dev=$1
get_disk_size $target_dev
amount_data=$2
elif [ $# -eq 3 ] ; then
target_dev=$1
get_disk_size $target_dev
amount_data=$2
to_8k=$3
else
target_dev=lfsm
get_disk_size $target_dev
amount_data=$gDiskSize
fi
#if amount_data >= disksize, amount_data = disksize
if [ $amount_data -ge $gDiskSize ] ; then
amount_data=$gDiskSize
fi
log_msg "$test_scfn: [INFO] start testing with target dev = $target_dev, and amount data = $amount_data, to_8k = $to_8k"
#########################
# set other parameters #
#########################
test_tool=${PATH_TEST_TOOL}/vtest
if [ "$to_8k" == 1 ]
then
all_sectors=$(($amount_data*16))
else
all_sectors=$(($amount_data*8))
fi
log_msg "$test_scfn: [INFO] all_sectors = $all_sectors, test_tool = $test_tool"
#########################
# start testing #
#########################
err_ret=0
[ -f $test_log_file ] && rm -f $test_log_file
export utest_seed48=321
sector=32
range=$(($all_sectors-$sector))
range=$(($range+1))
log_msg "$test_scfn: [INFO] TEST: Sector Random W Sequential R len=$sector, range=$range"
$test_tool /dev/$target_dev randswsr $range $(($all_sectors/8)) 0 $sector >> $test_log_file
if [ $? -ne 0 ]; then
log_msg "$test_scfn: [ERROR] test error, please check it"
err_ret=1
fi
cor_num=`awk -f "../common/check_correct_num.awk" $test_log_file`
err=$(($range-$cor_num))
log_msg "$test_scfn: [INFO] num_of_err=$err num_of_correct=$cor_num"
if [ $err -ne 0 ]; then
log_msg "$test_scfn: [ERROR] data is not correct!!, err = $err"
log_msg "$test_scfn: [ERROR] Random wirte for $sector sectors DATE IS NOT CORRECT!!"
err_ret=1
fi
if [ "$err_ret" == 0 ]
then
log_msg "$test_scfn: [INFO] test result ok"
#TODO notify the caller or jenkins server that test is ok
else
log_msg "$test_scfn: [INFO] test error, err_ret = $err_ret"
exit $err_ret
fi
| true
|
7de662cb5e01433c8cb9550a95dff3ee715abb25
|
Shell
|
tiangehe/envConfig
|
/mac_setup.zsh
|
UTF-8
| 7,535
| 3.09375
| 3
|
[] |
no_license
|
#!/bin/zsh
# Author: MoxHe
# About: This script is used to set up a developing evironmnet for MacOS using zsh, nvim, tmux,
# iterm2, fzf and powerline and etc.
# Note: This sript is intended for setting up freshed MacOS. It's assuming you are using zsh
# Please read through the script and comment out software you don't want. It will
# overlap your exiting .zshrc, init.vim, tmux.conf, and pwerline configs for tmux.
# Please be careful, and back up your exiting config files before running this script.
echo "Start setting up...\n"
echo "Checking software that need to be installed...\n"
# Check for xcode tools, Install if we don't have it
if [ ! $(which xcode-select) ]; then
echo "Installing xcode-select..."
xcode-select --install
else
echo "xcode-select has already been installed. Skipped"
fi
# Install oh-my-zsh
echo "Installing oh-my-zsh..."
sh -c "$(curl -fsSL https://raw.githubusercontent.com/ohmyzsh/ohmyzsh/master/tools/install.sh)"
# Check for Homebrew, Install if we don't have it
if [ ! $(which brew) ]; then
echo "Installing homebrew..."
/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install.sh)"
else
echo "Homebrew has already been installed. Skipped"
fi
echo "Updating Homebrew..."
brew update
# Check for git, Install if we don't have it
if [ ! $(which git) ]; then
echo "Installing git..."
brew install git
else
echo "git has already been installed. Skipped"
fi
# Check for fzf, Install if we don't have it
if [ ! $(which fzf) ]; then
echo "Installing fzf..."
brew install fzf
else
echo "fzf has already been installed. Skipped"
fi
# Check for ripgrep, Install if we don't have it
if [ ! $(which rg) ]; then
echo "Installing ripgrep..."
brew install ripgrep
else
echo "ripgrep has already been installed. Skipped"
fi
# Check for tmux, Install if we don't have it
if [ ! $(which tmux) ]; then
echo "Installing tmux..."
brew install tmux
else
echo "tmux has already been installed. Skipped"
fi
# Install zsh-syntax-highlighting
echo "Installing zsh-syntax-highlighting..."
brew install zsh-syntax-highlighting
# Install zsh-autosuggestions
echo "Installing zsh-autosuggestions..."
brew install zsh-autosuggestions
# Check for neovim, Install if we don't have it
if [ ! $(which nvim) ]; then
echo "Installing neovim..."
brew install neovim
else
echo "neovim has already been installed. Skipped"
fi
# Check for bat, Install if we don't have it
if [ ! $(which bat) ]; then
echo "Installing bat..."
brew install bat
else
echo "bat has already been installed. Skipped"
fi
# Install python3
if [ ! $(which python3) ]; then
echo "Installing python3..."
brew install python3
else
echo "python3 has already been installed. Skipped"
fi
# Install pynvim
echo "Installing pynvim..."
pip3 install pynvim
# Install powerline
echo "Installing powerline..."
pip3 install powerline-status
# Hard link .zshrc
if [ -f ~/.zshrc ]; then
echo "Rmeoved existing .zshrc"
rm ~/.zshrc
fi
echo "Hard link .zshrc"
ln ./zsh/.zshrc ~/.zshrc
# Hard link .zshenv
if [ -f ~/.zshenv ]; then
echo "Rmeoved existing .zshenv"
rm ~/.zshenv
fi
echo "Hard link .zshenv"
ln ./zsh/.zshenv ~/.zshenv
echo "Sourcing .zshrc"
source ~/.zshrc
# Install nvm
echo "Installing nvm..."
curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.35.3/install.sh | bash
# Install node.js
if [ ! $(nvm which current) ]; then
echo "Installing Node.js..."
nvm install stable
else
echo "Node.js has already been installed. Skipped"
fi
# Install node neovim
echo "Installing node neovim..."
sudo npm install -g neovim
mkdir -p ~/.config/nvim/after/ftplugin
mkdir -p ~/.config/bat/config
mkdir -p ~/.config/bat/themes
mkdir -p ~/.config/powerline/themes/tmux
mkdir -p ~/.config/powerline/colorschemes/tmux
# Hard link ~/.tmux.conf
if [ -f ~/.tmux.conf ]; then
echo "Rmeoved existing ~/.tmux.conf"
rm ~/.tmux.conf
fi
echo "Hard link ~/.tmux.conf"
ln ./tmux/.tmux.conf ~/.tmux.conf
# Hard link init.vim
if [ -f ~/.config/nvim/init.vim ]; then
echo "Rmeoved existing init.vim"
rm ~/.config/nvim/init.vim
fi
echo "Hard link init.vim"
ln ./nvim/init.vim ~/.config/nvim/init.vim
# Hard link fugitive.vim
if [ -f ~/.config/nvim/after/ftplugin/fugitive.vim ]; then
echo "Rmeoved existing fugitive.vim"
rm ~/.config/nvim/after/ftplugin/fugitive.vim
fi
echo "Hard link fugitive.vim"
ln ./nvim/after/ftplugin/fugitive.vim ~/.config/nvim/after/ftplugin/fugitive.vim
# Hard link coc-settings.json
if [ -f ~/.config/nvim/coc-settings.json ]; then
echo "Rmeoved existing coc-settings.json"
rm ~/.config/nvim/coc-settings.json
fi
echo "Hard link coc-settings.json"
ln ./nvim/coc-settings.json ~/.config/nvim/coc-settings.json
# Install vim plug manager
echo "Installing vim plug manager"
sh -c 'curl -fLo "${XDG_DATA_HOME:-$HOME/.local/share}"/nvim/site/autoload/plug.vim --create-dirs \
https://raw.githubusercontent.com/junegunn/vim-plug/master/plug.vim'
# Hard link bat.conf
if [ -f ~/.config/bat/config/bat.conf ]; then
echo "Rmeoved existing bat.conf"
rm ~/.config/bat/config/bat.conf
fi
echo "Hard link bat.conf"
ln ./bat/config/bat.conf ~/.config/bat/config/bat.conf
# Hard link gruvbox.tmTheme
if [ -f ~/.config/bat/themes/gruvbox.tmTheme ]; then
echo "Rmeoved existing bat.conf"
rm ~/.config/bat/themes/gruvbox.tmTheme
fi
echo "Hard link gruvbox.tmTheme"
ln ./bat/themes/gruvbox.tmTheme ~/.config/bat/themes/gruvbox.tmTheme
# Update bat binary cache
echo "Update bat binary cache..."
bat cache --build
echo "Sourcing .zshrc"
source ~/.zshrc
# Hard link powerline/colors.json
if [ -f ~/.config/powerline/colors.json ]; then
echo "Rmeoved existing powerline/colors.json"
rm ~/.config/powerline/colors.json
fi
echo "Hard link powerline/colors.json"
ln ./powerline/colors.json ~/.config/powerline/colors.json
# Hard link powerline/config.json
if [ -f ~/.config/powerline/config.json ]; then
echo "Rmeoved existing powerline/config.json"
rm ~/.config/powerline/config.json
fi
echo "Hard link powerline/config.json"
ln ./powerline/config.json ~/.config/powerline/config.json
# Hard link powerline/themes/tmux/default.json
if [ -f ~/.config/powerline/themes/tmux/default.json ]; then
echo "Rmeoved existing powerline/themes/tmux/default.json"
rm ~/.config/powerline/themes/tmux/default.json
fi
echo "Hard link powerline/themes/tmux/default.json"
ln ./powerline/themes/tmux/default.json ~/.config/powerline/themes/tmux/default.json
# Hard link powerline/colorschemes/gruvbox_dark.json
if [ -f ~/.config/powerline/colorschemes/gruvbox_dark.json ]; then
echo "Rmeoved existing powerline/colorschemes/gruvbox_dark.json"
rm ~/.config/powerline/colorschemes/gruvbox_dark.json
fi
echo "Hard link powerline/colorschemes/gruvbox_dark.json"
ln ./powerline/colorschemes/gruvbox_dark.json ~/.config/powerline/colorschemes/gruvbox_dark.json
# Hard link powerline/colorschemes/tmux/gruvbox_dark.json
if [ -f ~/.config/powerline/colorschemes/tmux/gruvbox_dark.json ]; then
echo "Rmeoved existing powerline/colorschemes/tmux/gruvbox_dark.json"
rm ~/.config/powerline/colorschemes/tmux/gruvbox_dark.json
fi
echo "Hard link powerline/colorschemes/tmux/gruvbox_dark.json"
ln ./powerline/colorschemes/tmux/gruvbox_dark.json ~/.config/powerline/colorschemes/tmux/gruvbox_dark.json
# Install iterm2
echo "Installing iterm2..."
brew cask install iterm2
# Install nerd font
echo "Installing nerd font..."
brew tap homebrew/cask-fonts
brew cask install font-hack-nerd-font
echo "\nDone!"
| true
|
f913a0679a1eeb71ef40fc00730fa217ae88eded
|
Shell
|
chenddcoder/dumpmysql
|
/dumptable.sh
|
UTF-8
| 129
| 2.640625
| 3
|
[] |
no_license
|
if [ $# -eq 0 ];
then
usage [tablename]
exit
fi
TABLE=$1
mysql -hhost -uuser -ppass -Ddb << EOF
select * from $TABLE;
EOF
| true
|
415a8ec06c88911fc34c265df13f7743a2b48bef
|
Shell
|
sriharikrishna/enzymescripts
|
/scripts1/results/raja-ser-single-forward/script.sh
|
UTF-8
| 827
| 2.625
| 3
|
[] |
no_license
|
#!/usr/bin/bash
mywait(){
eval string1="$1"
STATUS=-1
while [ $STATUS -ne 0 ]; do
sleep 1
STATUS=$(pgrep -uubuntu ${1}| wc -l)
echo $STATUS
done
}
taskset -c 0 numactl -i all ~/LULESH-MPI-RAJA/buildomp14/bin/lulesh-v2.0-RAJA-seq.exe -s 64 -i 100 > ser-single_100_64_0.txt &
taskset -c 1 numactl -i all ~/LULESH-MPI-RAJA/buildomp14/bin/lulesh-v2.0-RAJA-seq.exe -s 64 -i 100 > ser-single_100_64_1.txt &
taskset -c 2 numactl -i all ~/LULESH-MPI-RAJA/buildomp14/bin/lulesh-v2.0-RAJA-seq.exe -s 64 -i 100 > ser-single_100_64_2.txt &
taskset -c 3 numactl -i all ~/LULESH-MPI-RAJA/buildomp14/bin/lulesh-v2.0-RAJA-seq.exe -s 64 -i 100 > ser-single_100_64_3.txt &
taskset -c 4 numactl -i all ~/LULESH-MPI-RAJA/buildomp14/bin/lulesh-v2.0-RAJA-seq.exe -s 64 -i 100 > ser-single_100_64_4.txt &
mywait lulesh
| true
|
9b125062c9101ceeb60aa276db3e3bb103a5a340
|
Shell
|
gocros/test
|
/u
|
UTF-8
| 112
| 3.15625
| 3
|
[] |
no_license
|
#!/bin/bash
if [ $# -eq 1 ]
then
git commit -a -m \'$1\'
git push
else
echo "Missing commit message!"
fi
| true
|
55423841b702a5bce4194cd4eb58a94a5c026569
|
Shell
|
liaochenlanruo/bioconda-recipes
|
/recipes/r-ngsplot/build.sh
|
UTF-8
| 483
| 2.859375
| 3
|
[
"MIT"
] |
permissive
|
outdir=$PREFIX/share/$PKG_NAME-$PKG_VERSION-$PKG_BUILDNUM
mkdir -p $outdir
mkdir -p $PREFIX/bin
cp -R {bin,database,example,galaxy,lib,LICENSE} ${outdir}/
for f in ${outdir}/bin/*; do
ln -s ${f} ${PREFIX}/bin
done
for f in "${outdir}"/bin/*.py ; do
sed -Ei.bak "s|os\.environ\[\"NGSPLOT\"\]|\"${outdir}\"|g" "${f}"
rm "${f}.bak"
done
for f in "${outdir}"/bin/{,backup}/*.r ; do
sed -Ei.bak "s|Sys\.getenv\('NGSPLOT'\)|\"${outdir}\"|g" "${f}"
rm "${f}.bak"
done
| true
|
288e1547d9319bf804d875d217d4189ef969108b
|
Shell
|
pasosdeJesus/Mt77
|
/doc/conf.sh
|
ISO-8859-1
| 10,249
| 3.828125
| 4
|
[
"ISC"
] |
permissive
|
#!/bin/sh
# Script genrico para preparar herramientas de LaTeX y configurar.
# Esta fuente se cede al dominio pblico 2003. No se ofrecen garantas.
# Puede enviar reportes de fallas a structio-info@lists.sourceforge.net
# Crditos
# Manejo de variables de configuracin: Miembros de Structio.
# http://structio.sourceforge.net/
# Lnea de comandos: WWWeb Tide Team
# http://www.ebbtide.com/Util/ksh_parse.html
# que tambin es de dominio pblico de acuerdo a http://www.ebbtide.com/Util/
# "The following utilities have been written by the members of WWWeb Tide
# Team. We considered them to be infinitely useful in every day systems and
# web site administration. So much so, in fact, we have decided to put the
# sources in the public domain."
# Leyendo variables de configuracin
if (test ! -f confv.sh) then {
cp confv.empty confv.sh
} fi;
. ./confv.sh
# Leyendo funciones para ayudar en configuracin
. herram/confaux.sh
. herram/misc.sh
# Reconociendo lnea de comandos
BASENAME=$(basename $0)
USAGE="$BASENAME [-v] [-h] [-M] [-p prefijo]"
# Remember: if you add a switch above, don't forget to add it to:
# 1) "Parse command line options" section
# 2) "MANPAGE_HEREDOC" section
# 3) "check for help" section
ARG_COUNT=0 # This nubmer must reflect true argument count
OPT_FLAG=0 # Command line mistake flag
OPT_COUNT=0 # Number of options on the command line
MAN_FLAG=0 # Default: no man pages requited
HELP_FLAG=0 # Default: no help required
VERBOSE_FLAG=0 # Default: no verbose
WARNING=0 # General purpose no fail warning flag
# initialize local variables
vbs=""
prefix="/usr/local"
# Parse command line options
while getopts :p:Mhv arguments
do
# remember treat r: switches with: R_VALE = $OPTARG ;;
case $arguments in
p) prefix=$OPTARG;;
M) MAN_FLAG=1 ;; # Display man page
v) # increment the verboseness count and...
VERBOSE_FLAG=$(($VERBOSE_FLAG+1))
# gather up all "-v" switches
vbs="$vbs -v"
;;
h) HELP_FLAG=1;; # display on-line help
\?) echo "Opcin no reconocida: $OPTARG" >&2 # flag illegal switch
OPT_FLAG=1;;
esac
done
OPT_COUNT=$(($OPTIND-1))
shift $OPT_COUNT
options_help="
-p prefijo Prefijo de la ruta de instalacin (por defecto /usr/local)
-h Presenta ayuda corta
-M Presenta ayuda ms completa
-v Presenta informacin de depuracin durante ejecucin"
# check for man page request
if (test "$MAN_FLAG" = "1" ) then {
if (test "$PAGER" = "" ) then {
if ( test "$VERBOSE_FLAG" -gt "0" ) then {
echo "$BASENAME: Resetting PAGER variable to more" >&2
} fi;
export PAGER=more;
} fi;
$PAGER << MANPAGE_HEREDOC
NOMBRE
$BASENAME - Configura fuentes de $PROYECTO
$USAGE
DESCRIPCIN
Establece el valor de las variables de configuracin y genera
archivos en diversos formatos empleados por las fuentes DocBook
con ayudas de 'repasa' del proyecto $PROYECTO.
* $PRY_DESC
* $URLSITE
Las variables de configuracin y sus valores por defecto estn
en confv.empty (debajo de cada variable hay un comentario con la
descripcin).
Este script modifica el archivo confv.sh (o de no existir lo crea
a partir de confv.empty) y genera los archivos Make.inc y confv.ent
con las variables de configuracin instanciadas.
Para la instanciacin este tiene en cuenta:
* Detecta procesadores para hojas de estilo DocBook, hojas de estilo
y de requerirse verifica sus versiones (Jade, OpenJade, xsltproc)
* Adapta mtodos de generacin (por defecto prefiere emplear xsltproc
para generar HTML, OpenJade para generar PostScript y ps2pdf para
generar PDF).
* Detecta herramientas auxiliares empleadas para la generacin y
operacin (e.g collateindex, dvips, convert, ps2pdf, awk, sed)
* Detecta herraminetas opcionales que pueden servir para la
actualizacin del proyecto en Internet (ncftpput o scp)
* Actualiza fecha del proyecto de algn programa).
Si este script no logra completar alguna deteccin, indicar el
problema, junto con la posible ayuda que se haya configurado en
confv.empty y permitir ingresar directamente la informacin o
cancelar para reanudar posteriormente.
De requerirlo usted puede cambiar directamente los valores detectados
modificando el archivo confv.sh y ejecutando nuevamente ./conf.sh.
OPCIONES
$options_help
EJEMPLOS
./conf.sh
Configura fuentes y deja como prefijo para la ruta de instalacin
"/usr/local"
./conf.sh -p /usr/
Configura fuentes y deja como prefijo para la ruta de instalacin
"/usr"
ESTNDARES
Este script pretende ser portable. Debe cumplir POSIX.
FALLAS
VER TAMBIN
Para mejorar este script o hacer uno similar ver fuentes de
herram/confaux.sh
CRDITOS Y DERECHOS DE REPRODUCCIN
Script de dominio pblico. Sin garantas.
Fuentes disponibles en: http://structio.sourceforge.net/repasa
Puede enviar reportes de problemas a
structio-info@lists.sourceforge.net
Incluye porciones de cdigo dominio pblico escritas por:
Miembros de Structio http://structio.sourceforge.net
WWWeb Tide Team http://www.ebbtide.com/Util/
Puede ver ms detalles sobre los derechos y crditos de este script en
las fuentes.
MANPAGE_HEREDOC
exit 0;
} fi;
# check for help
if (test "$HELP_FLAG" = "1" ) then {
echo " Utilizacin: $USAGE"
cat << HLP_OP
$options_help
HLP_OP
exit 0
} fi;
# check for illegal switches
if (test "$OPT_FLAG" = "1") then {
echo "$BASENAME: Se encontr alguna opcin invalida" >&2
echo "Utilizacin: $USAGE" >&2
exit 1
}
elif (test "$#" != "$ARG_COUNT" ) then {
echo "$BASENAME: se encontraron $# argumentos, pero se esperaban $ARG_COUNT." >&2
echo "Utilizacin: $USAGE" >&2
exit 1;
} fi;
echo "Configurando $PROYECTO $PRY_VERSION";
if (test "$DPLANO" != "si" -a "$prefix" != "") then {
INSBIN="$prefix/bin";
changeVar INSBIN 1;
INSDOC="$prefix/share/doc/$PROYECTO";
changeVar INSDOC 1;
INSDATA="$prefix/share/$PROYECTO";
changeVar INSDATA 1;
} fi;
if (test "$VERBOSE_FLAG" -gt "0") then {
echo "Chequeando y detectando valor de variables de configuracin";
} fi;
check "LATEX" "" "test -x \$LATEX" `which latex 2> /dev/null`
check "PDFLATEX" "" "test -x \$PDFLATEX" `which pdflatex 2> /dev/null`
check "BIBTEX" "" "test -x \$BIBTEX" `which bibtex 2> /dev/null`
check "MAKEINDEX" "" "test -x \$MAKEINDEX" `which makeindex 2> /dev/null`
check "HEVEA" "" "test -x \$HEVEA" `which hevea 2> /dev/null`
# If teTeX is installed could try
# find in `kpsewhich -expand-var='$TEXMFMAIN'`
check "HEVEA_STY" "" "test -f \$HEVEA_STY/hevea.sty" "/usr/local/lib/hevea" "/usr/lib/hevea" "/usr/share/hevea"
check "DVIPS" "" "test -x \$DVIPS" `which dvips 2> /dev/null`
# We would like to call PS2PDF the next one but LaTeX doesn't accept numbers
# in the name of a macro.
check "PSPDF" "" "test -x \$PSPDF" `which ps2pdf 2> /dev/null`
if (test "$ACT_PROC" = "act-ncftpput") then {
check "NCFTPPUT" "optional" "test -x \$NCFTPPUT" `which ncftpput 2> /dev/null`
}
elif (test "$ACT_PROC" = "act-scp") then {
check "SCP" "optional" "test -x \$SCP" `which scp 2> /dev/null`
} fi;
check "CONVERT" "" "test -x \$CONVERT" `which convert 2> /dev/null`
check "AWK" "" "test -x \$AWK" `which awk 2> /dev/null`
check "CP" "" "test -x \$CP" `which cp 2> /dev/null`
check "CVS" "optional" "test -x \$CVS" `which cvs 2> /dev/null`
check "ED" "" "test -x \$ED" `which ed 2> /dev/null`
check "FIND" "" "test -x \$FIND" `which find 2> /dev/null`
check "GZIP" "" "test -x \$GZIP" `which gzip 2> /dev/null`
# Correccin ortografica
check "ISPELL" "optional" "test -x \$ISPELL" `which ispell 2> /dev/null`
check "MAKE" "" "test -x \$MAKE" `which make 2> /dev/null`
check "MV" "" "test -x \$MV" `which mv 2> /dev/null`
check "MKDIR" "" "test -x \$MKDIR" `which mkdir 2> /dev/null`
check "PERL" "optional" "test -x \$PERL" `which perl 2> /dev/null`
check "RM" "" "test -x \$RM" `which rm 2> /dev/null`
check "SED" "" "test -x \$SED" `which sed 2> /dev/null`
check "TAR" "" "test -x \$TAR" `which tar 2> /dev/null`
check "TIDY" "optional" "test -x \$TIDY" `which tidy 2> /dev/null`
check "TOUCH" "" "test -x \$TOUCH" `which touch 2> /dev/null`
check "WTM" "optional" "test -x \$WTM" `which w3m 2> /dev/null` `which lynx 2> /dev/null`
l=`echo $WTM | sed -e "s|.*lynx.*|si|g"`
WTM_OPT="";
if (test "$l" = "si") then {
WTM_OPT="-nolist";
} fi;
changeVar WTM_OPT 1;
check "ZIP" "optional" "test -x \$ZIP" `which zip 2> /dev/null`
BLB_TARGET="";
if (test "$DPLANO" = "si") then {
BLB_TARGET="";
# Objetivo para generar bibliografa, dejar en blanco si no tiene
# IDX_TARGET="";
# Objetivo para generar indice, dejar en blanco si no tiene
} else {
BLB_TARGET="$PROYECTO.blb";
# IDX_TARGET="$PROYECTO.idx";
} fi;
changeVar BLB_TARGET 1;
changeVar IDX_TARGET 1;
FECHA_ACT=`date "+%d/%m/%Y"`;
changeVar FECHA_ACT 1;
m=`date "+%m" | sed -e "s/01/Enero/;s/02/Febrero/;s/03/Marzo/;s/04/Abril/;s/05/Mayo/;s/06/Junio/;s/07/Julio/;s/08/Agosto/;s/09/Septiembre/;s/10/Octubre/;s/11/Noviembre/;s/12/Diciembre/"`
a=`date "+%Y"`
MES_ACT="$m de $a";
changeVar MES_ACT 1;
if (test "$VERBOSE_FLAG" -gt "0") then {
echo "Guardando variables de configuracin";
} fi;
changeConfv;
if (test "$VERBOSE_FLAG" -gt "0") then {
echo "Generando Make.inc";
} fi;
echo "# Algunas variables para el Makefile" > Make.inc;
echo "# Este archivo es generado automticamente por conf.sh. No editar" >> Make.inc;
echo "" >> Make.inc
# Adding configuration variables to Make.inc
addMakeConfv Make.inc;
echo "PREFIX=$prefix" >> Make.inc
if (test "$VERBOSE_FLAG" -gt "0") then {
echo "Generando confv.tex"
} fi;
echo "% Variables de configuracin" > confv.tex
echo "% Este archivo es generado automticamente por conf.sh. No editar " >> confv.tex
addLATeXConfv confv.tex;
if (test "$DPLANO" != "si") then {
if (test "$VERBOSE_FLAG" -gt "0") then {
echo "Creando directorios auxiliares"
} fi;
mkdir -p html
} fi;
if (test "$VERBOSE_FLAG" -gt "0") then {
echo "Cambiando ruta de awk en script"
} fi;
echo ",s|/usr/bin/awk|$AWK|g
w
q
" | ed herram/latex2rep 2> /dev/null
if (test ! -f repasa.hva) then {
ln -s repasa.sty repasa.hva
} fi;
echo "Configuracin completada";
| true
|
7b776370e586440596835baf4e7b82426d93a34c
|
Shell
|
hissssst/arrow_nav
|
/arrow_nav.zsh
|
UTF-8
| 1,031
| 3.1875
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/zsh
ARROW_NAV_INDEX=0
down() {
ARROW_NAV_DIRS=(../*/)
ARROW_NAV_COUNT=${#ARROW_NAV_DIRS[@]}
ARROW_NAV_INDEX=$(( (ARROW_NAV_INDEX + 1) % ARROW_NAV_COUNT ))
dr=$ARROW_NAV_DIRS[$(( ARROW_NAV_INDEX + 1 ))]
cd $dr
zle reset-prompt
}
right() {
ARROW_NAV_DIRS=(*/)
if [[ ${#ARROW_NAV_DIRS[@]} = 0 ]] ; then
echo "no directories here" ;
else
cd $ARROW_NAV_DIRS[1]
ARROW_NAV_INDEX=0
zle reset-prompt ;
fi
}
left() {
cd ..
ARROW_NAV_INDEX=0
zle reset-prompt
}
up() {
ARROW_NAV_DIRS=(../*/)
ARROW_NAV_COUNT=${#ARROW_NAV_DIRS[@]}
ARROW_NAV_INDEX=$(( (ARROW_NAV_INDEX - 1) % ARROW_NAV_COUNT ))
if [[ $ARROW_NAV_INDEX = -1 ]] ; then
ARROW_NAV_INDEX=$(( ARROW_NAV_COUNT - 1));
fi
dr=$ARROW_NAV_DIRS[$(( ARROW_NAV_INDEX + 1 ))]
cd $dr
zle reset-prompt
}
zle -N down
zle -N right
zle -N left
zle -N up
bindkey '^[[1;3B' down
bindkey '^[[1;3C' right
bindkey '^[[1;3A' up
bindkey '^[[1;3D' left
bindkey '[B' down
bindkey '[C' right
bindkey '[A' up
bindkey '[D' left
| true
|
7d4a02f98b20080416332d4e32ec190717edc3f1
|
Shell
|
marftcorp/xcc_toolchain_scripts
|
/h
|
UTF-8
| 9,832
| 3.625
| 4
|
[
"BSD-2-Clause"
] |
permissive
|
#!/bin/sh -e
TOP=`pwd`
if test -z "$PLAT" ; then PLAT=win32 ; fi
PREFIX=$TOP/inst/$PLAT
BINUTILSDIST=binutils-2.25.tar.bz2
GCCDIST=gcc-5.1.0.tar.bz2
MINGW64DIST=mingw-w64-v4.0.2.tar.bz2
SYSROOTDIST=sysroot-${PLAT}.tar.xz
MAKEJ=-j8
#- ^^^configure me^^^ ---------------------------------------------------------
GCC_CONFIG_FLAGS=""
case $PLAT in
arm7h) TARG=arm-linux-gnueabihf DEPS="sysroot gcc gcc2" GCC_CONFIG_FLAGS=--with-float=hard ;;
bsd64) TARG=x86_64-freebsd9 DEPS="sysroot gcc gcc2" NEWLIB=1 ;;
bsd32) TARG=i686-freebsd9 DEPS="sysroot gcc gcc2" NEWLIB=1 ;;
deb64) TARG=x86_64-linux-gnu DEPS="sysroot gcc gcc2" ;;
u1032) TARG=i686-linux-gnu DEPS="sysroot gcc gcc2" ;;
u1064) TARG=x86_64-linux-gnu DEPS="sysroot gcc gcc2" ;;
deb32) TARG=i686-linux-gnu DEPS="sysroot gcc gcc2" ;;
win64) TARG=x86_64-w64-mingw32 DEPS="mgwinc gcc mgwcrt gcc2" LANGS=",c++" ;;
win32) TARG=i686-w64-mingw32 DEPS="mgwinc gcc mgwcrt gcc2" LANGS=",c++" ;;
esac
# --- binutils ----------------------------------------------------------------
#
# --with-lib-path is the single entry on the default search path for ld (which
# gcc overrides, so this is only used when invoked as ld)
# it looks like --with-libdir and --with-sysroot do nothing useful but we have
# them set to nonexistant places so we can figure it out if we have problems
#
# The cruft in prefix/$(target-alias)/lib (like ldscripts) is controlled by
# the SCRIPTSDIR variable in the Makefile; debian eliminates the ugly
# $(target-alias) by actually patching the Makefile, so there's no good way:
# http://lists.debian.org/debian-embedded/2005/07/msg00007.html
# We get as good as we can without patching by setting TOOLDIR on make.
binutils() {
rm -fr src/binutils build/binutils
extract "$BINUTILSDIST" src/binutils
(
mkdir -p build/binutils ; cd build/binutils
${TOP}/src/binutils/configure \
--prefix=$PREFIX \
--program-prefix=${PLAT}- \
--target=$TARG \
--with-libdir=$PREFIX/binutilslibdir \
--with-sysroot=$PREFIX/binutilssysroot \
--with-lib-path=$PREFIX/sysroot/lib
TOOLDIR=$PREFIX/binutils # scriptsdir=TOOLDIR/lib/ldscripts
make tooldir=$TOOLDIR ${MAKEJ}
make tooldir=$TOOLDIR install
rm -fr $PREFIX/../lib # This is the libiberty bug commented below
)
}
# --- gcc ---------------------------------------------------------------------
#
# Decent starter on gcc's configure: http://gcc.gnu.org/install/configure.html
# Decent mingw/osx blog: http://www.nathancoulson.com/proj_cross.php
#
# --with-native-system-header-dir
# Override where the cross-compiler will look for headers (normally
# /usr/include) when the cross-compiler runs; must begin with / but is
# nevertheless relative to sysroot
# --with-local-prefix
# Override the location /usr/local/include, the 2nd thing on the include
# search path after native-system-header-dir; relative to sysroot
#
# In the following, the magic words are set on gcc's configure:
# gccexec --exec-prefix=
# gcclib --libdir=
# gcclibexec --libexecdir=
# sysroot --with-sysroot=
#
# GCC's prepends ld's default search path (blibpath) adding its own entries,
# yielding:
# /home/mcq/work/inst/gccexec/i686-freebsd9/lib/libdoesnotexist.a
# /home/mcq/work/inst/gccexec/i686-freebsd9/lib/libdoesnotexist.so
# /home/mcq/work/inst/gcclib/gcc/i686-freebsd9/4.8.1/libdoesnotexist.a
# /home/mcq/work/inst/gcclib/gcc/i686-freebsd9/4.8.1/libdoesnotexist.so
# /home/mcq/work/inst/sysroot/lib/libdoesnotexist.a
# /home/mcq/work/inst/sysroot/lib/libdoesnotexist.so
#
# These are the things that get installed to the deranged GCC nonsense places:
# gccexec/TARG/lib: libgcc_s.so
# gcclib/gcc/TARG/VERS: libgcc.a crtbegin.o libgcov.a plugin/include etc
# gcclibexec/gcc/TARG/VERS: cc1 collect2 lto1 plugin/gengtype etc
#
# We make the paths to the crazy places as short and overlapping as possible by
# setting all to lie on gcc/TARG and for gcc/TARG to be a direct child of PREFIX
# so the searches are clean and ALL of gcc's junk is isolated in gcc/:
# gcc/TARG/VERS/include gcc/TARG/VERS/_.a,_.so
# gcc/TARG/VERS/include-fixed gcc/TARG/lib/_.a,_.so
# sysroot/include sysroot/lib/_.a,_.so
#
# Note: --with-newlib causes libstdc++-v3 to pick up 'newlib' configs rather
# than the mingw32-w64 ones, which leads to complaints about _P etc when
# building libstdc++-v3, so don't have that option for mingw. BSD won't
# build the C compiler without it (errors about can't find <elf.h>). Debian
# is fine without. Ugh. XXX Are we giving BSD the right target?
#
# Note: some mingw builds supply --with-cpu=generic to the gcc passes; us?
gcc() {
rm -fr build/gcc
rm -fr src/gcc ; extract "$GCCDIST" src/gcc
(
mkdir -p build/gcc ; cd build/gcc
# Usually cross-gcc looks for a magic directory left by binutils in
# TOOLDIR/bin containing target ar,as,ld,etc named without prefix. We
# dislike these pointless copies, so we have to go to some effort to get
# gcc to use the PLAT-ar versions in inst/bin instead, both at build
# time (building libgcc) and at runtime (invoking as and ld). We use a
# combination of X_FOR_TARGET environment variables and --with-as/--with-ld
# to achieve this. These env vars must be absolute paths or they're
# silently ignored. You have to specify AS and LD as --with-as= so that
# they get interned and used by the compiler you generate, not just while
# building it; doing this implies using the same at build-time so no need
# for AS/LD env vars here.
if [ "$NEWLIB" == "1" ] ; then NEWLIB="--with-newlib" ; else NEWLIB="" ; fi
PTHREAD_FLAGS=" --enable-threads=posix --enable-tls "
if [ "$PLAT" == "win32" ] || [ "$PLAT" == "win64" ] ; then
PTHREAD_FLAGS=" "
fi
RANLIB_FOR_TARGET=${PREFIX}/bin/${PLAT}-ranlib \
AR_FOR_TARGET=${PREFIX}/bin/${PLAT}-ar \
NM_FOR_TARGET=${PREFIX}/bin/${PLAT}-nm \
${TOP}/src/gcc/configure \
--prefix=$PREFIX \
--program-prefix=${PLAT}- \
--target=$TARG \
--with-sysroot=$PREFIX/sysroot \
--with-native-system-header-dir=/include \
--libdir=$PREFIX \
--libexecdir=$PREFIX \
--exec-prefix=$PREFIX/gcc \
--bindir=$PREFIX/bin \
--with-as=$PREFIX/bin/${PLAT}-as \
--with-ld=$PREFIX/bin/${PLAT}-ld \
--enable-languages=c${LANGS} \
$GCC_CONFIG_FLAGS \
$PTHREAD_FLAGS \
--without-headers \
--disable-multilib \
--disable-bootstrap \
--disable-libmudflap \
--disable-libssp \
--disable-libquadmath \
--disable-libatomic \
--disable-libgomp \
${NEWLIB}
make ${MAKEJ} all-gcc
make install-gcc
rm -fr $PREFIX/../lib # This is the libiberty bug commented below
# XXX: The C++ headers end up in $PLAT/$TARG/include rather than sysroot.
)
}
# The above gcc make install puts libiberty.a into PREFIX/../lib; it's probably
# one of those ../../../../.. things that expects e.g. libdir to be at
# least one directory below PREFIX. Or something. If PREFIX/../lib already
# exists, it might be important and we shouldn't turd into it. So check.
if test -e $PREFIX/../lib ; then
echo
echo "PROBLEM: $PREFIX/../lib exists"
echo "GCC's install will turd libiberty.a in it. Aborting."
echo
exit 1
fi
gcc2() {
(
cd build/gcc
make ${MAKEJ}
make install
rm -fr $PREFIX/../lib # This is the libiberty bug commented below
)
}
# - canned sysroot ------------------------------------------------------------
sysroot() {
rm -fr ${PREFIX}/sysroot
extract "$SYSROOTDIST" ${PREFIX}/sysroot
}
# - mingw64 -------------------------------------------------------------------
mgwinc() {
rm -fr src/mingw64 build/headers
extract "$MINGW64DIST" src/mingw64
(
mkdir -p build/headers ; cd build/headers
${TOP}/src/mingw64/mingw-w64-headers/configure \
--prefix=$PREFIX/sysroot \
--host=${TARG} \
--with-sysroot=$PREFIX/sysroot \
--program-prefix=${PLAT}-
make ${MAKEJ}
make install
)
}
mgwcrt() {
rm -fr build/crt
(
mkdir -p build/crt ; cd build/crt
${TOP}/src/mingw64/mingw-w64-crt/configure \
--prefix=$PREFIX/sysroot \
--host=${TARG} \
--with-sysroot=$PREFIX/sysroot \
--program-prefix=${PLAT}-
make ${MAKEJ}
make install
)
(
cd ${PREFIX}/sysroot
if [ -d lib32 ] ; then mv lib32 lib ; fi # hardcoded multilib
)
}
# It doesn't look like a 2nd pass to get "the whole mingw" is necessary, so we
# don't do that for now.
# -----------------------------------------------------------------------------
clean() {
rm -fr build src ${PREFIX}
}
extract() {
( mkdir -p $2 ; cd $2 ; $TOP/scripts/extract -s $1 )
BALL=`basename $2`
PATCHESCMD="$TOP/patches/$PLAT/${BALL}-*"
PATCHES=`echo $PATCHESCMD`
if [ "$PATCHES" != "$PATCHESCMD" ] ; then
( cd $2 ; for i in $PATCHES ; do patch -p0 < $i ; done )
fi
}
strip() {
(
cd $PREFIX
rm -fr share include lib bin/$TARG-gcc-* binutils/bin
for i in bin gcc ; do
find $i -executable -type f -exec strip -x {} 2>/dev/null \;
done
)
}
testfile() {
cat > testfile.c << EOF
#include <stdio.h>
int main() { printf("Hello world\n"); }
EOF
echo ; echo --- dynamically linked hello world ---
${PREFIX}/bin/${PLAT}-gcc -o testfile testfile.c
file testfile
echo ; echo --- statically linked hello world ---
${PREFIX}/bin/${PLAT}-gcc -static -o testfile testfile.c
file testfile
rm -f testfile testfile.c
}
export PATH=$PREFIX/bin:$PATH
clean
binutils
for i in $DEPS ; do eval $i ; done
strip
testfile
scripts/getpaths ${PREFIX}/bin/${PLAT}-gcc
| true
|
a9c3fc9f25e73747e28e4efac565edc9cf12d4d5
|
Shell
|
bioconda/bioconda-recipes
|
/recipes/dfast_qc/build.sh
|
UTF-8
| 694
| 2.578125
| 3
|
[
"MIT"
] |
permissive
|
#!/bin/sh
APPROOT="${PREFIX}/opt/${PKG_NAME}-${PKG_VERSION}"
mkdir -p ${APPROOT}
mkdir -p ${PREFIX}/bin
cp -Rv ./* ${APPROOT}/
# cp -av dqc $APPROOT/dqc
# cp -v dfast_qc $APPROOT
# cp -v dqc_admin_tools.py $APPROOT
# cp -v initial_setup.sh $APPROOT
# cp -av dqc ${PREFIX}/bin/dqc
# cp -v dfast_qc ${PREFIX}/bin
# cp -v dqc_admin_tools.py ${PREFIX}/bin
# cp -v initial_setup.sh ${PREFIX}/bin
# cd ${PREFIX}/bin
ln -s ${APPROOT}/dfast_qc ${PREFIX}/bin/
ln -s ${APPROOT}/dqc_admin_tools.py ${PREFIX}/bin/
ln -s ${APPROOT}/initial_setup.sh ${PREFIX}/bin/dqc_initial_setup.sh
ln -s ${APPROOT}/dqc_ref_manager.py ${PREFIX}/bin/
# ${PREFIX}/bin/dfast_qc --version
dfast_qc --version
dfast_qc -h
| true
|
2cd20ebcfc77e885e0ca5a45dd3be7b9f40b60bf
|
Shell
|
whothey/dotfiles
|
/custom_scripts/.scripts/toggle_touchpad.sh
|
UTF-8
| 277
| 2.9375
| 3
|
[] |
no_license
|
#!/bin/zsh -e
if [[ $(synclient -l) =~ 'TouchpadOff[[:space:]]+\=[[:space:]]+([[:digit:]])' ]]; then
if [[ $match[1] == '1' ]]; then
synclient TouchpadOff=0
else
synclient TouchpadOff=1
fi
else
echo "Couldn't get the 'TouchpadOff' property!"
fi
| true
|
b1fd4e94c2afcc89048c6fc2542d8c5696af40d8
|
Shell
|
yqlbu/pycharm-aarch64
|
/install.sh
|
UTF-8
| 657
| 2.78125
| 3
|
[] |
no_license
|
#!/bin/bash
cd ~
sudo apt-get update && sudo apt-get install openjdk-8-jdk
wget https://download.jetbrains.com/python/pycharm-professional-2019.3.4.tar.gz?_ga=2.42966822.2056165753.1586158936-1955479096.1586158936 -O pycharm-professional-2019.3.4.tar.gz
tar -xzf pycharm-professional-2019.3.4.tar.gz && cd pycharm-2019.3.4/bin
sudo chmod +x pycharm.sh && mv pycharm.sh pycharm
sudo rm -rf pycharm-professional-2019.3.4.tar.gz
cd ~
echo 'export PATH=/home/'$USER'/pycharm-2019.3.4/bin:$PATH' >> .bashrc
echo " *** Congratualations! You have successfully installed pycharm-aarch64 !!"
echo " *** Type 'pycharm' to start using pycharm."
echo " *** Enjoy !!!"
| true
|
7c8c5553edf9fbf63f952d00a8ff9a9bbb81052b
|
Shell
|
5l1v3r1/nmap-auto
|
/nmap-auto.sh
|
UTF-8
| 592
| 3.953125
| 4
|
[] |
no_license
|
#!/bin/bash
#Variables
MODE="$1"
ADDRESS="$2"
PORTS="$3"
RESULTSDIR="$4"
echo "This script is set to output to XML format"
#Checking if directory exists
if [ ! -d "$RESULTSDIR" ]; then
echo "Sorry $RESULTSDIR does not exist."
sleep 1
echo "Creating it now.."
mkdir $RESULTSDIR # Creating directory
echo "$RESULTSDIR created."
else
#Running nmap command
nmap -$MODE -sV -Pn $ADDRESS -sC -p$PORTS -oX $RESULTSDIR/$(date "+%Y.%m.%d-%H.%M").xml
if [ $? -eq 0 ]; then # Checking if nmap command failed or not
echo "Success"
else
echo "Failed"
fi
fi
exit
| true
|
716f96bcc60a818facf60c1057a4feb8c26160a1
|
Shell
|
joeytwiddle/jsh
|
/code/shellscript/init/startj-simple.sh
|
UTF-8
| 1,052
| 3.15625
| 3
|
[] |
no_license
|
# @sourceme
# Hmmm. At very least this should call startj -simple, or startj should call this.
# Try to guess the top directory of j install
# If all below fails, then you should set it youself with export JPATH=...; source $JPATH/startj
if [ -z "$JPATH" ]
then
if [ -d "$HOME/j" ]
then export JPATH="$HOME/j"
# This doesn't work: bash cannot see it unless we call startj direct (no source)
elif [ -d "$(dirname "$0")" ]
then
export JPATH="$(dirname "$0")"
echo "startj: guessed JPATH=$JPATH"
else
echo "startj: Could not find JPATH. Not starting."
#env > /tmp/env.out
exit 0
fi
fi
#export PATH="$JPATH/tools:$PATH"
export PATH="$PATH:$JPATH/tools"
# Although we don't need things like aliases, we might need things like nvm setup.
# But nvm setup is usually done in ~/.bashrc, so we don't need it here.
# In future we may want to split the shell setup into interactive and non-interactive setups.
#[ -f "$JPATH/global.conf" ] && . "$JPATH/global.conf"
#[ -f "$JPATH/local.conf" ] && . "$JPATH/local.conf"
#. javainit
#. hugsinit
| true
|
45e1570964c9b3c2261d0a3fad5b13b34d47cc3f
|
Shell
|
particleman314/ShellLibrary
|
/test/nim_configuration/__get_key_from_configuration.sh
|
UTF-8
| 1,673
| 2.53125
| 3
|
[
"MIT"
] |
permissive
|
#!/usr/bin/env bash
assert_not_empty "${TEST_CONFIG_BASIC}"
assert_not_empty "${TEST_CONFIG_SIMPLE}"
assert_not_empty "${TEST_CONFIG_MEDIUM}"
assert_not_empty "${TEST_CONFIG_DEEP}"
section='test_config_basic'
key='key1'
answer=$( __get_key_from_configuration )
assert_failure $?
answer=$( __get_key_from_configuration --cfgsection "${section}" )
assert_failure $?
answer=$( __get_key_from_configuration --key "${key}" )
assert_failure $?
answer=$( __get_key_from_configuration --cfgsection "${section}" --key "${key}" )
assert_failure $?
answer=$( __get_key_from_configuration --cfgfile 'blah' --cfgsection "${section}" --key "${key}" )
assert_failure $?
answer=$( __get_key_from_configuration --cfgfile "${TEST_CONFIG_SIMPLE}" --key "${key}" )
assert_failure $?
answer=$( __get_key_from_configuration --cfgfile "${TEST_CONFIG_BASIC}" --cfgsection "${section}" --key "${key}" )
assert_success $?
assert_not_empty "${answer}"
assert_equals 2 $( get_element --data "${answer}" --id 2 --separator ':' )
assert_equals 'Hello' $( get_element --data "${answer}" --id 1 --separator ':' )
section='/hub/tunnel/certificates'
key='cert'
answer=$( __get_key_from_configuration --cfgfile "${TEST_CONFIG_MEDIUM}" --cfgsection "${section}" --key "${key}" )
assert_success $?
assert_not_empty "${answer}"
assert_equals 'test.pem' $( get_element --data "${answer}" --id 1 --separator ':' )
section='/disk/alarm/connections'
key='level'
answer=$( __get_key_from_configuration --cfgfile "${TEST_CONFIG_DEEP}" --cfgsection "${section}" --key "${key}" )
assert_success $?
assert_not_empty "${answer}"
assert_equals 'minor' $( get_element --data "${answer}" --id 1 --separator ':' )
| true
|
fd0401c7d6456a0b52265d06f82b27bcffa057d3
|
Shell
|
shippablejedi/admiral
|
/common/scripts/docker/installRedis.sh
|
UTF-8
| 966
| 3.84375
| 4
|
[
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
#!/bin/bash -e
export REDIS_IMAGE="374168611083.dkr.ecr.us-east-1.amazonaws.com/redis:$RELEASE"
__cleanup() {
__process_msg "Removing stale containers"
sudo docker rm -f $COMPONENT || true
}
__run_redis() {
__process_msg "Running redis container"
local data_dir_container="/data"
local config_dir_container="/etc/redis"
local run_cmd="sudo docker run \
-d \
-v $REDIS_CONFIG_DIR:$config_dir_container \
-v $REDIS_DATA_DIR:$data_dir_container \
--publish 6379:6379 \
--net=host \
--privileged=true \
--name=$COMPONENT \
$REDIS_IMAGE
"
__process_msg "Executing: $run_cmd"
eval "$run_cmd"
__process_msg "Redis container successfully running"
}
__check_redis() {
__process_msg "Checking redis container status on: $REDIS_HOST:$REDIS_PORT"
__check_service_connection "$REDIS_HOST" "$REDIS_PORT" "$COMPONENT"
}
main() {
__process_marker "Installing redis"
__cleanup
__run_redis
__check_redis
}
main
| true
|
b64401735137ebf1cd2f114bc9d807d1e2535a81
|
Shell
|
dakodeon/dotfiles
|
/.local/bin/playlist_mgmt
|
UTF-8
| 3,753
| 4.03125
| 4
|
[] |
no_license
|
#!/bin/env sh
# a script using dmenu to interface with mpd and mpc to create and manage playlists
# functionalities:
# 1. add current song to some playlist(s)
# 2. remove current song from the playlist
# 3. load and play an existing playlist
# 4. add some songs to the current playlist
# 5. select and load songs from database as a playlist
# 6. search current playlist for a song
# 7. show current playing file in file manager (lf)
# check if mpd is running, exit if not
pgrep mpd > /dev/null || exit 23
# where are the playlists stored
playlist_dir="$HOME/.local/share/mpd/playlists"
music_dir="$HOME/Music"
# file manager
fileman='lf-ueberzug'
# FUNCTIONS DEFINITIONS
# search playlists
pl_search() { \
mpc lsplaylists | dmenu -i -l 20 -p "$1"
}
# search the database
db_search2() { \
searchby="$(printf "artist\ncomposer\nalbum\ndirs\nall" | dmenu -i -l 5 -p "Search database by:")" || exit 23
case "$searchby" in
artist|composer|album)
tag="$searchby"
query="$(mpc list "$tag" | dmenu -i -l 20 -p "Search by $tag:")" || exit 23
mpc search "$tag" "$query" | dmenu -i -l 20 -p "$1"
;;
dirs)
query="$(find "$music_dir" -type d | dmenu -i -l 20 -p "$1")" || exit 23
query=${query#"$music_dir/"}
mpc search filename "$query"
;;
all)
mpc listall | dmenu -i -l 20 -p "$1"
;;
*) exit 23 ;;
esac
}
db_search() { \
mpc listall | dmenu -i -l 20 -p "$1"
}
# search current playlist
curr_search() { \
mpc playlist | dmenu -i -l 20 -r -p "$1" | sed 's/.* - //' | xargs -I"{}" mpc search title "{}"
}
add_to_playlist() { \
set -f; IFS='
'
for file in $1; do
# set +f; unset IFS
mpc add "$file"
done
set +f; unset IFS
}
# get info about current playing song
now_playing="$(mpc -f %file% current)"
now_playing_fancy="$(mpc_display)"
while getopts ":aDlANsFh" opt; do
case $opt in
a) # add current song to playlist
plist="$(pl_search "Add current playing to playlist:")" || exit 23
plist_path="$playlist_dir/$plist.m3u"
[ -f "$plist_path" ] && {
grep -q "$now_playing" "$plist_path" && dunstify " Track is already in playlist \"$plist\"" "$now_playing_fancy" && exit
echo "$now_playing" >> "$plist_path"
} || echo "$now_playing" > "$plist_path"
dunstify " Added track to playlist \"$plist\"" "$now_playing_fancy"
;;
D) # remove current from playlist
yesno="$(printf "Yes\nNo" | dmenu -r -n -i "Really remove current song?")" || exit 23
case "$yesno" in
"yes")
mpc del 0
dunstify " Removed track from playlist \"$plist\"" "$now_playing_fancy"
;;
*)
exit 23
;;
esac
;;
l) # load a playlist
plist="$(pl_search "Load playlist:")" || exit 23
mpc clear && mpc load "$plist" && mpc play
dunstify " Playlist loaded" "$plist"
;;
A) # add tracks from the database to the playlist
newlist="$(db_search2 "Select songs to add to the playlist:")" || exit 23
add_to_playlist "$newlist"
dunstify " Added some songs to current playlist"
;;
N) # replace playlist with songs
newlist="$(db_search2 "Select songs to play:")" || exit 23
mpc clear && add_to_playlist "$newlist" && mpc play && get_now_playing
dunstify " Playlist changed. Now playing:" "$now_playing_fancy"
;;
s) # select and play a song from the current playlist
curr_search "Select song:" | xargs -I"{}" mpc searchplay filename "{}" && get_now_playing
dunstify " Now playing:" "$now_playing_fancy"
;;
F) # show current track in file manager
$TERMINAL -e $fileman "$music_dir/$now_playing"
;;
h) echo "display a help message" ;;
esac
done
| true
|
0ad9e5d3509dece8e3839d648052ee6b223605e9
|
Shell
|
eunmin/immutant
|
/etc/bin/bees-ci-build.sh
|
UTF-8
| 1,668
| 3.21875
| 3
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
#!/bin/bash
# This is the bees CI build. Any changes to the build script should be
# here instead if in the bees config.
set -e
BIN_DIR="${WORKSPACE}/bin"
WF_DIR="${WORKSPACE}/wildfly-dists"
WF8_VERSION="8.2.0.Final"
WF9_VERSION="9.0.0.Alpha1"
LEIN_VERSION=2.5.1
export PATH="${BIN_DIR}:${PATH}"
export WORKSPACE_HOME="${WORKSPACE}/home"
export LEIN_HOME="${WORKSPACE_HOME}/.lein"
export JVM_OPTS="-Dprogress.monitor=false"
function mark {
echo
echo "=============================================="
echo $1
date
echo "=============================================="
echo
}
rm -rf ${WORKSPACE}/target ${BIN_DIR}
mark "Installing leiningen ${LEIN_VERSION}"
mkdir -p ${BIN_DIR}
cd ${BIN_DIR}
wget --no-check-certificate https://raw.github.com/technomancy/leiningen/${LEIN_VERSION}/bin/lein
chmod +x lein
cd -
mark "Setting up lein profiles"
mkdir -p ${LEIN_HOME}
cp -f /private/projectodd/auth_profile.clj ${LEIN_HOME}/profiles.clj
rm -rf ${WF_DIR}
mark "Installing WildFly ${WF8_VERSION}"
etc/bin/ci-prep-wildfly.sh ${WF_DIR} ${WF8_VERSION}
mark "Installing WildFly ${WF9_VERSION}"
etc/bin/ci-prep-wildfly.sh ${WF_DIR} ${WF9_VERSION}
mark "Reversioning"
etc/bin/reversion.sh 2.x.incremental.${BUILD_NUMBER}
mark "Starting build + integ run against ${WF8_VERSION}"
export JBOSS_HOME="${WF_DIR}/wildfly-${WF8_VERSION}"
lein with-profile +integs modules all
mark "Starting integs with ${WF9_VERSION}"
export JBOSS_HOME="${WF_DIR}/wildfly-${WF9_VERSION}"
cd integration-tests
lein with-profile +integs all
cd -
mark "Starting deploy build"
lein with-profile +incremental modules deploy
mark "Starting doc build"
lein docs
mark "Done"
| true
|
9b5ffeb80e98c277eefb833bcb7b884129a851ae
|
Shell
|
petronny/aur3-mirror
|
/deepin-vte/PKGBUILD
|
UTF-8
| 1,737
| 2.75
| 3
|
[] |
no_license
|
# Maintainer: Josip Ponjavic <josipponjavic at gmail dot com>
# Maintainer: Xu Fasheng <fasheng.xu[AT]gmail.com>
pkgname=deepin-vte
_pkgname=vte
pkgver=0.28.2
_srcdirname=vte-0.28.2
pkgrel=9
epoch=1
pkgdesc="Virtual Terminal Emulator widget for use with GTK2 - with several patches"
arch=('i686' 'x86_64')
license=('GPL3')
url="https://developer.gnome.org/vte/"
groups=('deepin-extra')
depends=('gtk2')
makedepends=('pygtk' 'intltool' 'gobject-introspection' 'pygobject2-devel')
provides=('vte=0.28.2' 'deepin-vte-plus')
conflicts=('vte' 'deepin-vte-plus')
replaces=('deepin-vte-plus')
options=('!libtool' '!emptydirs')
source=("http://packages.linuxdeepin.com/deepin/pool/main/v/vte/vte_0.28.2.orig.tar.xz"
"http://packages.linuxdeepin.com/deepin/pool/main/v/vte/vte_0.28.2-6deepin7~saucy.debian.tar.gz")
sha256sums=('ee52b91ecab31d0a64399896ce0c3515e579ea8ac212a00eb9b0895c58f001fe'
'b63a344541f0feebddac0232a8d7196d22d4819ec32cbd6cb39e33b2ba50e940')
prepare() {
patchdir="${srcdir}"/debian/patches
for f in $(< "${patchdir}"/series); do
msg "patching: ${f##*/}"
# ignore error if patch again
patch -Np1 -i "${patchdir}/${f}" || msg "patch error: ${f##*/}"
done
}
build() {
cd "${srcdir}"
#warning: type-punning to incomplete type might break strict-aliasing rules
export CFLAGS="$CFLAGS -fno-strict-aliasing"
export PYTHON="/usr/bin/python2"
./configure --prefix=/usr --sysconfdir=/etc \
--libexecdir=/usr/lib/vte \
--localstatedir=/var \
--disable-static \
--enable-introspection \
--with-gtk=2.0
make
}
package(){
cd "${srcdir}"
make DESTDIR="${pkgdir}" install
rm -f "${pkgdir}"/usr/lib/vte/gnome-pty-helper
}
| true
|
064cd352307c335c778390596fbb50306c58597b
|
Shell
|
nmuncy/Linguistics
|
/sbatch_step1.sh
|
UTF-8
| 1,572
| 3.03125
| 3
|
[] |
no_license
|
#!/bin/bash
# Written by Nathan Muncy on 11/20/17
#SBATCH --time=10:00:00 # walltime
#SBATCH --ntasks=2 # number of processor cores (i.e. tasks)
#SBATCH --nodes=1 # number of nodes
#SBATCH --mem-per-cpu=32gb # memory per CPU core
#SBATCH -J "BenTemp" # job name
# Compatibility variables for PBS. Delete if not needed.
export PBS_NODEFILE=`/fslapps/fslutils/generate_pbs_nodefile`
export PBS_JOBID=$SLURM_JOB_ID
export PBS_O_WORKDIR="$SLURM_SUBMIT_DIR"
export PBS_QUEUE=batch
# Set the max number of threads to use for programs using OpenMP. Should be <= ppn. Does nothing if the program doesn't use OpenMP.
export OMP_NUM_THREADS=$SLURM_CPUS_ON_NODE
workDir=~/compute/Ben_template
rawDir=${workDir}/DICOMs
cd $rawDir
for i in t*; do
dataDir=${rawDir}/$i
subjDir=${workDir}/"${i/t1_Luke_Reading_}"
if [ ! -d $subjDir ]; then
mkdir $subjDir
fi
# construct
if [ ! -f ${subjDir}/struct_orig.nii.gz ]; then
cd $dataDir
dcm2nii -a y -g n -x y *.dcm
mv co*.nii ${subjDir}/struct_orig.nii
rm *.nii
fi
cd $subjDir
# acpc align
if [ ! -f struct_acpc.nii.gz ]; then
acpcdetect -M -o struct_acpc.nii.gz -i struct_orig.nii
fi
# n4bc
dim=3
input=struct_acpc.nii.gz
n4=struct_n4bc.nii.gz
con=[50x50x50x50,0.0000001]
shrink=4
bspline=[200]
if [ ! -f $n4 ]; then
N4BiasFieldCorrection \
-d $dim \
-i $input \
-s $shrink \
-c $con \
-b $bspline \
-o $n4
fi
cd $rawDir
done
| true
|
634d3d982ca9b5a4e8381c6647bf126db13b9077
|
Shell
|
onecoolx/davinci
|
/tools/win32/msys/local/share/bashdb/lib/info.sh
|
UTF-8
| 4,718
| 3.546875
| 4
|
[] |
no_license
|
# -*- shell-script -*-
# info.sh - Bourne Again Shell Debugger Help Routines
# Copyright (C) 2002, 2003, 2004, 2005, 2006, 2008
# Rocky Bernstein rocky@gnu.org
#
# bashdb is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2, or (at your option) any later
# version.
#
# bashdb is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License along
# with bashdb; see the file COPYING. If not, write to the Free Software
# Foundation, 59 Temple Place, Suite 330, Boston, MA 02111 USA.
typeset -r _Dbg_info_cmds='args breakpoints display files functions line program signals source stack terminal variables warranty'
_Dbg_info_help() {
local -r info_cmd=$1
local label=$2
if [[ -z $info_cmd ]] ; then
local thing
_Dbg_msg \
'List of info subcommands:
'
for thing in $_Dbg_info_cmds ; do
_Dbg_info_help $thing 1
done
return
fi
case $info_cmd in
ar | arg | args )
_Dbg_msg \
"info args -- Argument variables (e.g. \$1, \$2, ...) of the current stack frame."
return 0
;;
b | br | bre | brea | 'break' | breakp | breakpo | breakpoints | \
w | wa | wat | watc | 'watch' | watchp | watchpo | watchpoints )
_Dbg_msg \
'info breakpoints -- Status of user-settable breakpoints'
return 0
;;
disp | displ | displa | display )
_Dbg_msg \
'info display -- Show all display expressions'
return 0
;;
fi | file| files | sources )
_Dbg_msg \
'info files -- Source files in the program'
return 0
;;
fu | fun| func | funct | functi | functio | function | functions )
_Dbg_msg \
'info functions -- All function names'
return 0
;;
l | li| lin | line )
_Dbg_msg \
'info line -- list current line number and and file name'
return 0
;;
p | pr | pro | prog | progr | progra | program )
_Dbg_msg \
'info program -- Execution status of the program.'
return 0
;;
h | ha | han | hand | handl | handle | \
si | sig | sign | signa | signal | signals )
_Dbg_msg \
'info signals -- What debugger does when program gets various signals'
return 0
;;
so | sou | sourc | source )
_Dbg_msg \
'info source -- Information about the current source file'
return 0
;;
st | sta | stac | stack )
_Dbg_msg \
'info stack -- Backtrace of the stack'
return 0
;;
te | ter | term | termi | termin | termina | terminal | tt | tty )
_Dbg_msg \
'info terminal -- Print terminal device'
return 0
;;
tr|tra|trac|trace|tracep | tracepo | tracepoi | tracepoint | tracepoints )
_Dbg_msg \
'info tracepoints -- Status of tracepoints'
return 0
;;
v | va | var | vari | varia | variab | variabl | variable | variables )
_Dbg_msg \
'info variables -- All global and static variable names'
return 0
;;
w | wa | war | warr | warra | warran | warrant | warranty )
_Dbg_msg \
'info warranty -- Various kinds of warranty you do not have'
return 0
;;
* )
_Dbg_errmsg \
"Undefined info command: \"$info_cmd\". Try \"help info\"."
esac
}
# List signal handlers in effect.
function _Dbg_info_signals {
typeset -i i=0
typeset signal_name
typeset handler
typeset stop_flag
typeset print_flag
_Dbg_msg "Signal Stop Print Stack Value"
_Dbg_printf_nocr "%-12s %-6s %-7s %-9s " EXIT \
${_Dbg_sig_stop[0]:-nostop} ${_Dbg_sig_print[0]:-noprint} \
${_Dbg_sig_show_stack[0]:-nostack}
# This is a horrible hack, but I can't figure out how to get
# trap -p 0 into a variable; handler=`trap -p 0` doesn't work.
if [[ -n $_Dbg_tty ]] ; then
builtin trap -p 0 >>$_Dbg_tty
else
builtin trap -p 0
fi
while [ 1 ] ; do
signal_name=$(builtin kill -l $i 2>/dev/null) || break
handler=$(builtin trap -p $i)
if [[ -n $handler ]] ; then
_Dbg_printf "%-12s %-6s %-7s %-9s %-6s" $signal_name \
${_Dbg_sig_stop[$i]:-nostop} ${_Dbg_sig_print[$i]:-noprint} \
${_Dbg_sig_show_stack[$i]:-nostack} "$handler"
fi
((i++))
done
}
# This is put at the so we have something at the end to stop at
# when we debug this. By stopping at the end all of the above functions
# and variables can be tested.
typeset -r _Dbg_info_ver=\
'$Id: info.sh,v 1.3 2008/09/09 02:51:45 rockyb Exp $'
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.