text stringlengths 1 1.05M |
|---|
<reponame>zju-3dv/multi-person3dpose
# encoding: utf-8
"""
@author: <NAME>
@contact: <EMAIL>
"""
import zmq
import multiprocessing as mp
from config import config
from utils.dpflow.serialize import loads, dumps
import dataset
def data_sender(id, name, *args):
context = zmq.Context()
sender = context.socket(zmq.PUSH)
sender.connect('ipc://@{}'.format(name))
print('start data provider {}-{}'.format(name, id))
while True:
data_iter = dataset.train_dataset(id + 1)
for msg in data_iter:
# print(id)
sender.send(dumps([id, msg]))
def provider(nr_proc, name, *args):
proc_ids = [i for i in range(nr_proc)]
procs = []
for i in range(nr_proc):
w = mp.Process(
target=data_sender,
args=(proc_ids[i], name, *args))
w.deamon = True
procs.append(w)
for p in procs:
p.start()
# , dataset.train_dataset()
def receiver(name):
context = zmq.Context()
receiver = context.socket(zmq.PULL)
receiver.bind('ipc://@{}'.format(name))
while True:
id, msg = loads(receiver.recv())
# print(id, end='')
yield msg
if __name__ == "__main__":
from IPython import embed
import time
provider(config.nr_dpflows, config.program_name)
dataiter = receiver(config.program_name)
start = time.clock()
time.sleep(10)
for i in range(1000):
hehe = next(dataiter)
end = time.clock()
print("read: %f s" % (end - start)) |
#!/bin/sh -e
NAME="$(basename $0)"
CWD="$(pwd)"
TMP_PATH="/tmp/.shotcut.$$"
SHOTCUT_VERSION=""
if [ "$(which rpmbuild)" == "" ]; then
printf "Unable to find rpmbuild, please use yum or zypper to install the package\n" >&2
exit 1
fi
if [ "$(which curl)" == "" ]; then
printf "Unable to find curl, please use yum or zypper to install the package\n" >&2
exit 1
fi
usage() {
cat << EOF
$NAME: Shotcut RPM package generator tool
Usage: $NAME [OPTIONS]
-h Show help
Example:
$NAME
EOF
}
shotcut_set_version() {
local app_version=
local tmp_file="/tmp/.version.$$"
printf "Retrieving latest Shotcut version from Github ... "
curl -sk -X GET 'https://api.github.com/repos/mltframework/shotcut/releases/latest' -o $tmp_file >/dev/null 2>&1
app_version=$(sed -n 's/.*\"\(tag_name\)\": \"\(.*\)\"\,/\2/p' $tmp_file 2>/dev/null | sed 's/v//g')
rm -f $tmp_file >/dev/null 2>&1
printf "$app_version\n"
SHOTCUT_VERSION=$app_version
}
shotcut_set_release() {
local new_version="$1"
local old_version=""
local release=""
if [ -r "${CWD}/.version" ]; then
old_version="$(cat ${CWD}/.version)"
else
echo "$new_version" > ${CWD}/.version
fi
if [ -r "${CWD}/.release" ]; then
release=$(cat ${CWD}/.release)
else
release=0
fi
if [ "$new_version" == "$old_version" ]; then
release=$(($release + 1))
else
release=0
fi
echo "$new_version" > ${CWD}/.version
echo "$release" > ${CWD}/.release
RPM_REVISION=$release
}
while getopts "h" opt; do
case "$opt" in
h)
usage
exit 0
;;
esac
done
shotcut_set_version
shotcut_set_release "${SHOTCUT_VERSION}"
if [ "${SHOTCUT_VERSION}" == "" ]; then
printf "Unable to determine version, something went wrong... \n" >&2
exit 1
fi
SHOTCUT_PATH="/opt/shotcut"
SHOTCUT_ALT_VERSION="$(echo ${SHOTCUT_VERSION}.02 | tr -d '.')"
PACKAGE_NAME="shotcut-linux-x86_64-${SHOTCUT_ALT_VERSION}.tar.bz2"
PACKAGE_URL="https://github.com/mltframework/shotcut/releases/download/v${SHOTCUT_VERSION}/${PACKAGE_NAME}"
RPM_ARCH="x86_64"
RPM_PACKAGE_NAME="shotcut"
RPM_PACKAGE="${RPM_PACKAGE_NAME}-${SHOTCUT_VERSION}-${RPM_REVISION}.${RPM_ARCH}.rpm"
RPM_BUILD_PATH="${TMP_PATH}/rpmbuild"
mkdir -p ${TMP_PATH}
mkdir -p ${RPM_BUILD_PATH}/{BUILD,BUILDROOT,RPMS,SOURCES,SPECS,SRPMS} || exit 1
printf "Downloading ${PACKAGE_NAME}: "
rc=$(curl -skL -X GET "${PACKAGE_URL}" -o "${RPM_BUILD_PATH}/SOURCES/${PACKAGE_NAME}" -w '%{http_code}')
if [ "$rc" -eq 200 ]; then
printf "done\n"
else
printf "failed\n"
exit 1
fi
printf "Generating ${RPM_PACKAGE_NAME}.spec ...\n"
cat << EOF > ${RPM_BUILD_PATH}/SPECS/${RPM_PACKAGE_NAME}.spec
%define _topdir ${RPM_BUILD_PATH}
Name: ${RPM_PACKAGE_NAME}
Version: ${SHOTCUT_VERSION}
Release: ${RPM_REVISION}
Summary: Shotcut is a free, open source, cross-platform video editor.
License: GPL-3.0
Vendor: Shotcut
URL: https://www.shotcut.org
BugURL: https://github.com/mltframework/shotcut/issues
ExcludeArch: noarch
Source: ${PACKAGE_NAME}
Requires(post): coreutils shared-mime-info desktop-file-utils
Requires(postun): shared-mime-info desktop-file-utils
AutoReqProv: no
%if 0%{?suse_version}
Requires: libXss1
%else
Requires: libXScrnSaver
%endif
Packager: Robert Milasan <robert@linux-source.org>
%description
Shotcut is a free, open source, cross-platform video editor for Windows, Mac and Linux.
Major features include support for a wide range of formats; no import required meaning native timeline editing;
Blackmagic Design support for input and preview monitoring; and resolution support to 4k.
%prep
%setup -n Shotcut
%build
%install
mkdir -p \$RPM_BUILD_ROOT/${SHOTCUT_PATH}
cp -afR Shotcut.app/* \$RPM_BUILD_ROOT/${SHOTCUT_PATH}
mkdir -p \$RPM_BUILD_ROOT/usr/bin
ln -sf ${SHOTCUT_PATH}/shotcut \$RPM_BUILD_ROOT/usr/bin/shotcut
mkdir -p \$RPM_BUILD_ROOT/usr/share/applications
install -m 644 Shotcut.app/share/applications/org.shotcut.Shotcut.desktop \$RPM_BUILD_ROOT/usr/share/applications/org.shotcut.Shotcut.desktop
mkdir -p \$RPM_BUILD_ROOT/usr/share/icons/hicolor/64x64/apps
install -m 644 Shotcut.app/share/icons/hicolor/64x64/apps/org.shotcut.Shotcut.png \$RPM_BUILD_ROOT/usr/share/icons/hicolor/64x64/apps/org.shotcut.Shotcut.png
mkdir -p \$RPM_BUILD_ROOT/usr/share/metainfo
install -m 644 Shotcut.app/share/metainfo/org.shotcut.Shotcut.appdata.xml \$RPM_BUILD_ROOT/usr/share/metainfo/org.shotcut.Shotcut.appdata.xml
mkdir -p \$RPM_BUILD_ROOT/usr/share/mime/packages
install -m 644 Shotcut.app/share/mime/packages/org.shotcut.Shotcut.xml \$RPM_BUILD_ROOT/usr/share/mime/packages/org.shotcut.Shotcut.xml
%post
if test -x /usr/bin/update-mime-database; then
/usr/bin/update-mime-database "/usr/share/mime" || true
fi
if test -x /usr/bin/update-desktop-database; then
/usr/bin/update-desktop-database --quiet "/usr/share/applications" || true
fi
if test -x /usr/bin/gtk-update-icon-cache; then
/usr/bin/gtk-update-icon-cache --quiet --force "/usr/share/icons/hicolor" || true
fi
exit 0
%postun
if [ \$1 -eq 0 ]; then
if test -x /usr/bin/gtk-update-icon-cache; then
/usr/bin/gtk-update-icon-cache --quiet --force "/usr/share/icons/hicolor" || true
fi
fi
if [ \$1 -eq 0 ]; then
if test -x /usr/bin/update-desktop-database; then
/usr/bin/update-desktop-database --quiet "/usr/share/applications" || true
fi
fi
if [ \$1 -eq 0 ]; then
if test -x /usr/bin/update-mime-database; then
/usr/bin/update-mime-database "/usr/share/mime" || true
fi
fi
exit 0
%clean
rm -rfv \$RPM_BUILD_ROOT
%files
%defattr(0644, root, root, 0755)
%dir $SHOTCUT_PATH
$SHOTCUT_PATH/*
%attr(755,root,root) $SHOTCUT_PATH/shotcut
%attr(755,root,root) $SHOTCUT_PATH/ffmpeg
%attr(755,root,root) $SHOTCUT_PATH/ffplay
%attr(755,root,root) $SHOTCUT_PATH/ffprobe
%attr(755,root,root) $SHOTCUT_PATH/melt
%attr(755,root,root) $SHOTCUT_PATH/qmelt
%attr(755,root,root) $SHOTCUT_PATH/source-me
%attr(755,root,root) $SHOTCUT_PATH/bin/*
/usr/bin/shotcut
/usr/share/applications/org.shotcut.Shotcut.desktop
/usr/share/icons/hicolor/64x64/apps/org.shotcut.Shotcut.png
/usr/share/metainfo/org.shotcut.Shotcut.appdata.xml
/usr/share/mime/packages/org.shotcut.Shotcut.xml
EOF
printf "Generating RPM package: ${RPM_PACKAGE}\n"
( cd ${RPM_BUILD_PATH}/SPECS
rpmbuild -bb --quiet --target=${RPM_ARCH} ${RPM_PACKAGE_NAME}.spec 2>/dev/null
)
if [ -r "${RPM_BUILD_PATH}/RPMS/${RPM_ARCH}/${RPM_PACKAGE}" ]; then
cp -af ${RPM_BUILD_PATH}/RPMS/${RPM_ARCH}/${RPM_PACKAGE} ${CWD}/${RPM_PACKAGE}
printf "Package generated: ${CWD}/${RPM_PACKAGE}\n"
else
printf "Failed to generate RPM package\n" >&2
exit 1
fi
rm -fr ${TMP_PATH}
|
from wtforms import (
StringField,
PasswordField,
BooleanField,
IntegerField,
DateField,
TextAreaField,
SubmitField,
)
from flask_wtf import FlaskForm
from wtforms.validators import InputRequired, Length, EqualTo, Email, Regexp ,Optional, ValidationError
import email_validator
from werkzeug.security import generate_password_hash, check_password_hash
class signup_form(FlaskForm):
email = StringField(validators=[InputRequired(), Email(), Length(1, 64)])
password = PasswordField(validators=[InputRequired(), Length(8, 72)])
sign_up = SubmitField('Sign Up')
_user = None |
<filename>src/main/resources/static/book.js
var panX = 0
var panY = 0
var swipeStart = false
function touchGestureStartPan(event) {
if (event.touches.length == 1 && window.getSelection().type != "Range") {
panX = event.touches[0].pageX
panY = event.touches[0].pageY
swipeStart = true
}
}
function touchGesturePan(event) {
if (SETTING_SWIPE_PAGE.get() && event.touches.length == 1 && window.getSelection().type != "Range" && swipeStart) {
let newX = event.touches[0].pageX
let newY = event.touches[0].pageY
let deltaX = newX - panX
let deltaY = newY - panY
let swipeParameters = computeSwipeParameters(deltaX, deltaY)
let horizontalThreshold = getViewportWidth() * SETTING_SWIPE_LENGTH.get()
let verticalMoveValid = swipeParameters.angle < SETTING_SWIPE_ANGLE_THRESHOLD.get()
if (verticalMoveValid && deltaX < -horizontalThreshold) {
swipeStart = false
nextPage()
} else if (verticalMoveValid && deltaX > horizontalThreshold) {
swipeStart = false
previousPage()
}
}
}
function imageLoadedPromise(image) {
return new Promise((resolve, reject) => {
let imageResolveFunction = function() {
resolve()
}
image.onload = imageResolveFunction
image.onerror = imageResolveFunction
})
}
function scrollNecessaryPromise(el) {
return new Promise((resolve, reject) => {
var images = el.getElementsByTagName('img')
var imageCount = images.length
if (imageCount > 0) {
let imagePromises = []
for (var i = 0; i < imageCount; i++) {
imagePromises.push(imageLoadedPromise(images[i]))
}
Promise.all(imagePromises).then(() => {
if (el.scrollHeight > el.offsetHeight || el.scrollWidth > el.offsetWidth) resolve(true)
else resolve(false)
})
} else {
if (el.scrollHeight > el.offsetHeight || el.scrollWidth > el.offsetWidth) resolve(true)
else resolve(false)
}
})
}
function getPageFor(position, withIndex = false) {
var savedPages = document.savedPages
if (savedPages != null) {
// search for page
for (var i = 0; i < savedPages.length; i++) {
if (savedPages[i].start <= position && position <= savedPages[i].end) {
// we found the page
let page = savedPages[i]
if (withIndex) {
page.index = i
}
return page
}
}
}
// no page available
return null
}
function getRemainingPagesInChapter() {
if (document.currentPage
&& document.section != null
&& document.section.start <= document.currentPage.end
&& document.currentPage.end <= document.section.end) {
let currentNode = document.section.leafAtPosition(document.currentPage.end)
let nextHeader = currentNode.nextNodeOfName("h1")
let startPage = getPageFor(document.currentPage.end, true)
let endPage
if (nextHeader) {
endPage = getPageFor(nextHeader.start, true)
} else {
endPage = getPageFor(document.section.end, true)
}
if (endPage) {
let pagesLeft = endPage.index - startPage.index
return pagesLeft
}
}
}
function getContentFor(start, end, callback) {
if (document.section != null && document.section.start <= start && start <= end && end <= document.section.end) {
if (callback != null) {
callback(document.section.copy(start, end).getContent())
}
} else {
downloadSection(start, function(section) {
document.section = section
if (document.section.start <= start && start <= end && end <= document.section.end) {
if (callback != null) {
callback(document.section.copy(start, end).getContent())
}
}
})
}
}
async function displayPageFor(position) {
let displayPageForInternal = async function(position) {
document.lastPageChange = now
showSpinner()
let page = getPageFor(position)
if (page == null) {
computePagesForSection(position)
page = await getPageForPromise(position)
}
getContentFor(page.start, page.end, function(text) {
var content = document.getElementById("ch_content")
content.innerHTML = text
document.currentPage = page
// if book end is displayed, we mark the book as read
if (page.end == parseInt(getMeta("bookEnd"))) {
saveProgress(getMeta("bookId"), page.end)
} else {
// don't save progress again if the current progress is on this page
if (document.currentPosition == undefined || document.currentPosition == 0 || document.currentPosition < page.start || page.end < document.currentPosition) {
saveProgress(getMeta("bookId"), page.start)
}
}
updatePositionInput(getPositionPercentage(page.start, page.end))
updatePagesLeft()
initializeMode()
// check if overflow is triggerred on every page display
scrollNecessaryPromise(content).then(scrollNecessary => {
if (scrollNecessary) {
resetPagesForSection()
displayPageFor(position)
} else {
hideSpinner()
}
})
})
}
let now = new Date()
if (document.lastPageChange == undefined) {
window.location.reload()
}
let difference = now - document.lastPageChange
if (difference > REFRESH_PAGE_TIME_DIFFERENCE) {
showSpinner()
loadProgress(function(currentPosition) {
document.currentPosition = currentPosition
if (currentPosition < document.currentPage.start || document.currentPage.end < currentPosition) {
window.location.reload()
} else {
// continue as normal
displayPageForInternal(position)
}
})
} else {
displayPageForInternal(position)
}
}
function updatePagesLeft() {
let el = document.getElementById("pagesLeft")
el.innerHTML = ""
let remainingPages = getRemainingPagesInChapter()
if (remainingPages != undefined) {
let span = document.createElement("span")
text = remainingPages
if (remainingPages == 1) {
text += " page "
} else {
text += " pages "
}
text += "left in chapter"
span.innerHTML = text
el.appendChild(span)
}
}
function getPositionPercentage(pageStart, pageEnd) {
var bookSize = parseInt(getMeta("bookEnd"))
var percentage = (pageEnd / bookSize) * 100.0
var percentageInteger = Math.floor(percentage)
var percentageFraction = Math.floor((percentage - percentageInteger)*100)
var text = percentageInteger
if (percentageFraction > 0) {
text += "." + percentageFraction
}
text += "%"
return text
}
function nextPage() {
if (document.currentPage != null) {
if (document.currentPage.end < parseInt(getMeta("bookEnd"))) {
displayPageFor(document.currentPage.end + 1)
}
}
}
function previousPage() {
if (document.currentPage != null) {
if (document.currentPage.start > parseInt(getMeta("bookStart"))) {
displayPageFor(document.currentPage.start - 1)
}
}
}
function handleResize() {
fixControlSizes()
if (document.currentPage != null) {
var position = document.currentPage.start
loadCache()
document.currentPage = null
var content = document.getElementById("ch_content")
content.innerHTML = ""
displayPageFor(position)
}
}
function downloadSection(position, callback) {
var xhttp = new XMLHttpRequest()
xhttp.onreadystatechange = function() {
if (this.readyState == 4) {
if (this.status == 200) {
var jsonObj = JSON.parse(this.responseText)
var node = convert(jsonObj)
if (callback != null) {
callback(node)
}
} else {
reportError(this.status + " " + this.responseText)
}
}
}
xhttp.open("GET", "bookSection?id=" + getMeta("bookId") + "&position=" + position)
xhttp.send()
}
function downloadBookToDevice() {
if('serviceWorker' in navigator) {
var bookId = getMeta("bookId")
var size = num(getMeta("size"))
navigator.serviceWorker.controller.postMessage({type: 'storeBook', bookId: bookId, maxPositions: size, kind: 'book'})
}
}
function getSectionFor(position) {
if (document.section != null && document.section.start <= position && position <= document.section.end) {
return document.section
} else {
downloadSection(position, function(node) {
document.section = node
})
return null
}
}
function computePagesForSection(position) {
downloadSection(position, function(section) {
compute(section, section.start)
})
}
function resetPagesForSection() {
if (document.section) {
let start = document.section.start
let end = document.section.end
let remainingPages = document.savedPages.filter(page => page.end < start || end < page.start)
document.savedPages = remainingPages
saveCache()
}
}
function savePage(start, end) {
if (document.savedPages == null) {
document.savedPages = []
}
document.savedPages.push({start: start, end: end})
}
function timeout(ms) {
return new Promise((resolve, reject) => {
window.setTimeout(function() {
resolve()
}, ms)
})
}
function getPageForPromise(position) {
return new Promise((resolve, reject) => {
let page = getPageFor(position)
if (page == null) {
timeout(100).then(() => resolve(getPageForPromise(position)))
} else {
resolve(page)
}
})
}
async function compute(section, start) {
let shadowContent = document.getElementById("ch_shadow_content")
shadowContent.innerHTML = ""
//let firstEnd = section.findSpaceAfter(start)
let firstEnd = start
let end = firstEnd
let previousEnd = firstEnd
shadowContent.innerHTML = section.copy(start, end).getContent()
let overflow = await scrollNecessaryPromise(shadowContent)
while ((!overflow) && (end < section.end)) {
previousEnd = end
end = section.findSpaceAfter(end)
shadowContent.innerHTML = section.copy(start, end).getContent()
overflow = await scrollNecessaryPromise(shadowContent)
}
// we have a page
if (end < section.end) {
savePage(start, previousEnd)
timeout(10).then(() => compute(section, previousEnd + 1))
} else {
savePage(start, end)
saveCache()
updatePagesLeft()
}
}
function getBookStyleSheet() {
let styleSheets = window.document.styleSheets
for (let i = 0; i < styleSheets.length; i++) {
if (styleSheets[i].href.endsWith("book.css")) {
return styleSheets[i]
}
}
}
function setDarkMode() {
let background = SETTING_DARK_MODE_BACKGROUND.get()
let foreground = SETTING_DARK_MODE_FOREGROUND.get()
setUiColors(foreground, background)
}
function createDynamicStyleSheet() {
var sheet = (function() {
var style = document.createElement("style");
// WebKit hack
style.appendChild(document.createTextNode(""));
document.head.appendChild(style);
return style.sheet;
})();
document.dynamicStyleSheet = sheet
}
function setLightMode() {
let background = SETTING_LIGHT_MODE_BACKGROUND.get()
let foreground = SETTING_LIGHT_MODE_FOREGROUND.get()
setUiColors(foreground, background)
}
function setUiColors(foreground, background) {
let bookStyleSheet = document.dynamicStyleSheet
if (bookStyleSheet) {
while (bookStyleSheet.cssRules.length > 0) bookStyleSheet.deleteRule(0)
bookStyleSheet.insertRule('#content { color: ' + foreground + '; background-color: ' + background + '; }', 0)
bookStyleSheet.insertRule('a { color: ' + foreground + '; }', 0)
bookStyleSheet.insertRule('table, th, td { border-color: ' + foreground + '; }', 0)
setStatusBarColor(background)
}
}
function initializeMode() {
let bookMode = SETTING_BOOK_MODE.get()
if (bookMode == 0) {
setDarkMode()
} else if (bookMode == 2) {
setLightMode()
} else {
let dayStart = timeStringToDate(SETTING_DAY_START.get())
let dayEnd = timeStringToDate(SETTING_DAY_END.get())
let now = new Date()
if (now < dayStart || dayEnd < now) {
setDarkMode()
} else {
setLightMode()
}
}
}
function getChapters() {
if (! document.chapters) {
var chapterElements = document.getElementsByClassName("ch_chapter")
document.chapters = []
for (var i = 0; i < chapterElements.length; i++) {
document.chapters.push({
start: parseInt(chapterElements[i].getAttribute("ch_position")),
element: chapterElements[i]
})
}
}
return document.chapters.sort((a, b) => parseFloat(a.start) - parseFloat(b.start));
}
function resetCurrentChapter() {
// remove current chapter selection
var currentChapters = document.getElementsByClassName("ch_current")
for (var i = 0; i < currentChapters.length; i++) {
currentChapters[i].classList.remove("ch_current")
}
}
function getCurrentChapter() {
var chapters = getChapters()
// find current chapter
var currentChapter = -1
var position = (document.currentPage.start + document.currentPage.end) / 2
while (currentChapter < chapters.length - 1 && position > chapters[currentChapter + 1].start) {
currentChapter = currentChapter + 1
}
return chapters[currentChapter]
}
function expandPathToChapter(chapter) {
if (chapter != null) {
chapter.element.classList.add("ch_current")
var current = chapter.element
while (current != null) {
if (current.nodeName == "UL") {
current.style.display = "block"
}
current = current.parentElement
}
}
}
function prepareBookTools() {
resetCurrentChapter()
hideAllSubchapters()
var chapter = getCurrentChapter()
expandPathToChapter(chapter)
}
function getChildList(current) {
var childLists = current.getElementsByTagName("ul")
if (childLists.length > 0) {
return childLists[0]
} else {
return null
}
}
function toggleChildList(current) {
var childList = getChildList(current)
if (childList != null) {
if (childList.style.display == "none") {
childList.style.display = "block"
} else {
childList.style.display = "none"
}
}
}
function hideAllSubchapters() {
var listItemsWithSubchapters = document.getElementsByClassName("ch_withsubchapters")
for (var i = 0; i < listItemsWithSubchapters.length; i++) {
var subchapterList = listItemsWithSubchapters[i].getElementsByTagName("ul")[0]
subchapterList.style.display = "none"
}
}
function initializeChapters() {
var listItemsWithSubchapters = document.getElementsByClassName("ch_withsubchapters")
for (var i = 0; i < listItemsWithSubchapters.length; i++) {
var para = listItemsWithSubchapters[i].getElementsByTagName("p")[0]
para.addEventListener("click", (event) => {
event.stopPropagation()
toggleChildList(event.target.parentElement)
})
}
}
function initTableOfContents() {
initializeChapters()
hideAllSubchapters()
}
function displayPageForTocEntry(entry) {
var position = parseInt(entry.getAttribute("ch_position"))
hideTools()
displayPageFor(position)
}
function setZoom(zoom, withResize = true) {
document.getElementById("content").style["font-size"] = zoom + "rem"
if (withResize) handleResize()
}
function getBookPagesCacheKey() {
var pagesKey = "bookPages_" + getMeta("bookId") + "_" + getViewportWidth() + "_" + getViewportHeight() + "_" + SETTING_BOOK_ZOOM.get() + "_" + SETTING_BOOK_EDGE_HORIZONTAL.get() + "_" + SETTING_BOOK_EDGE_VERTICAL.get()
return pagesKey
}
function loadCache() {
var cacheKey = getBookPagesCacheKey()
var cache = window.localStorage.getItem(cacheKey)
if (cache != null) {
document.savedPages = JSON.parse(cache)
} else {
document.savedPages = []
}
}
function saveCache() {
var cacheKey = getBookPagesCacheKey()
window.localStorage.setItem(cacheKey, JSON.stringify(document.savedPages))
}
function initSettings() {
let settingsWrapper = document.getElementById('ch_settings')
settingsWrapper.appendChild(SETTING_BOOK_ZOOM.controller)
settingsWrapper.appendChild(SETTING_BOOK_MODE.controller)
settingsWrapper.appendChild(SETTING_DARK_MODE_BACKGROUND.controller)
settingsWrapper.appendChild(SETTING_DARK_MODE_FOREGROUND.controller)
settingsWrapper.appendChild(SETTING_LIGHT_MODE_BACKGROUND.controller)
settingsWrapper.appendChild(SETTING_LIGHT_MODE_FOREGROUND.controller)
settingsWrapper.appendChild(SETTING_BOOK_EDGE_HORIZONTAL.controller)
settingsWrapper.appendChild(SETTING_BOOK_EDGE_VERTICAL.controller)
settingsWrapper.appendChild(SETTING_BOOK_TOOLS_HEIGHT.controller)
settingsWrapper.appendChild(SETTING_OVERLAY_TRANSPARENCY.controller)
settingsWrapper.appendChild(SETTING_SWIPE_PAGE.controller)
settingsWrapper.appendChild(SETTING_SWIPE_LENGTH.controller)
settingsWrapper.appendChild(SETTING_SWIPE_ANGLE_THRESHOLD.controller)
SETTING_BOOK_MODE.addListener(initializeMode)
SETTING_DARK_MODE_BACKGROUND.addListener(initializeMode)
SETTING_DARK_MODE_FOREGROUND.addListener(initializeMode)
SETTING_LIGHT_MODE_BACKGROUND.addListener(initializeMode)
SETTING_LIGHT_MODE_FOREGROUND.addListener(initializeMode)
SETTING_BOOK_EDGE_HORIZONTAL.addListener(() => setTimeout(handleResize, 1000))
SETTING_BOOK_EDGE_VERTICAL.addListener(() => setTimeout(handleResize, 1000))
SETTING_BOOK_TOOLS_HEIGHT.addListener(handleResize)
SETTING_BOOK_ZOOM.addListener(setZoom)
SETTING_OVERLAY_TRANSPARENCY.addListener(initAlpha)
settingsWrapper.appendChild(getRemoveProgressButton())
settingsWrapper.appendChild(getMarkAsReadButton())
}
window.onload = function() {
document.documentElement.style.setProperty('--accent-color', SETTING_ACCENT_COLOR.get());
document.documentElement.style.setProperty('--foreground-color', SETTING_FOREGROUND_COLOR.get());
document.documentElement.style.setProperty('--background-color', SETTING_BACKGROUND_COLOR.get());
createDynamicStyleSheet()
fixControlSizes()
initTableOfContents()
initSettings()
initFullscreenButton()
initBookCollectionLinks()
// other page controls heights need to be fixed like this too
enableKeyboardGestures({
"leftAction": previousPage,
"rightAction": nextPage,
"escapeAction": () => toggleTools(true, prepareBookTools)
})
document.getElementById("ch_content").addEventListener('touchstart', touchGestureStartPan, false);
document.getElementById("ch_content").addEventListener('touchmove', touchGesturePan, false);
document.getElementById("ch_prev").addEventListener("click", (event) => previousPage())
document.getElementById("ch_next").addEventListener("click", (event) => nextPage())
document.getElementById("ch_tools_left").addEventListener("click", (event) => toggleTools(true, prepareBookTools))
document.getElementById("ch_tools_right").addEventListener("click", (event) => toggleTools(false, prepareBookTools))
document.getElementById("ch_tools_container").addEventListener("click", (event) => hideTools())
document.getElementById("ch_tools").addEventListener("click", event => event.stopPropagation())
initializeMode()
initAlpha()
window.addEventListener("focus", function(event) {
initializeMode()
}, false)
setZoom(SETTING_BOOK_ZOOM.get(), false)
loadCache()
document.lastPageChange = new Date()
timeout(100).then(() => {loadProgress(function(currentPosition) {
document.currentPosition = currentPosition
displayPageFor(currentPosition)
})})
downloadBookToDevice()
} |
TERMUX_PKG_HOMEPAGE=https://github.com/danmar/cppcheck
TERMUX_PKG_DESCRIPTION="tool for static C/C++ code analysis"
TERMUX_PKG_LICENSE="GPL-3.0"
TERMUX_PKG_MAINTAINER="@termux"
TERMUX_PKG_VERSION=2.7.4
TERMUX_PKG_AUTO_UPDATE=true
TERMUX_PKG_SRCURL=https://github.com/danmar/cppcheck/archive/$TERMUX_PKG_VERSION.tar.gz
TERMUX_PKG_SHA256=f0558c497b7807763325f3a821f1c72b743e5d888b037b8d32157dd07d6c26e1
|
<reponame>uditgupta002/StringProblems
import java.util.*;
import java.lang.*;
import java.io.*;
class LongestCommonPrefix {
public static void main (String[] args) throws IOException {
BufferedReader br = new BufferedReader(new InputStreamReader(System.in));
int testCases = Integer.parseInt(br.readLine());
for(int t = 0;t < testCases;t++){
int length = Integer.parseInt(br.readLine());
String tempArr[] = br.readLine().split(" ");
int minLength = Integer.MAX_VALUE;
StringBuffer buf = new StringBuffer();
for(String str: tempArr){
if(str.length() < minLength)
minLength = str.length();
}
boolean exit = false;
for(int i=0;i<minLength && !exit ;i++){
char currentChar = tempArr[0].charAt(i);
//System.out.println("CurrentChar is "+currentChar);
for(int j=0;j<length;j++){
if(tempArr[j].charAt(i) != currentChar){
exit = true;
break;
}
}
if(!exit)
buf.append(currentChar);
}
System.out.println(buf.length() > 0 ? buf.toString():"-1");
}
}
}
|
#!/bin/bash -e
SCRIPT_DIR=$(cd "$(dirname "$0")"; pwd)
cd "$SCRIPT_DIR"
function compile_by_gcc() {
echo -e "\nCompile souce files by gcc\n"
find src -type f | grep -E '*\.c' | while read -r C_FILE
do
ASM=$(echo "$C_FILE" | sed 's#c$#s#g')
echo "gcc -S \"$C_FILE\" -o \"$ASM\""
gcc -S "$C_FILE" -o "$ASM"
done
echo -e "\nAssemble and link\n"
gcc -static -O0 -o willani src/*.s asm/*.s src/parse/*.s
}
function compile_by_self() {
echo -e "\nCompile souce files by willani\n"
find src -type f | grep -E '*\.c' | while read -r C_FILE
do
ASM=$(echo "$C_FILE" | sed 's#c$#s#g')
echo "./willani \"$C_FILE\" -o \"$ASM\""
./willani "$C_FILE" -o "$ASM"
done
echo -e "\nAssemble and link\n"
gcc -static -O0 -o willani src/*.s asm/*.s src/parse/*.s
}
function test_willani() {
rm -f test-*
echo 'int static_fn() { return 5; }' | gcc -xc -c -o test-01.o -
./willani test/test.c -o test-00.s
gcc -static test-00.s test-01.o -o test-00.out
./test-00.out
echo 'test1 is finished!!'
./test/test2.sh
echo 'test2 is finished!!'
./test/test3.sh
echo 'test3 is finished!!'
}
if [ "$1" = "gcc" ]; then
compile_by_gcc
elif [ "$1" = "self" ]; then
compile_by_self
elif [ "$1" = "test" ]; then
test_willani
fi
|
#!/bin/bash
while read p; do
line=$p
if ! [[ $line == *"apples"* ]]
then
echo "$line" >> newfile.txt
fi
done <myfile.txt
mv newfile.txt myfile.txt |
public static String convertToCamelCase(String str){
str = str.trim().toLowerCase()
StringBuilder sb = new StringBuilder();
String[] splitStr = str.split(" ");
for(String s : splitStr){
if(s.length() > 0){
sb.append(s.substring(0, 1).toUpperCase() + s.substring(1) + "");
}
}
return sb.toString();
} |
#!/bin/bash
# This script sets up a Ubuntu host to be able to create the image by
# installing all of the necessary files. It assumes an EC2 host with
# passwordless sudo
# Install a bunch of packages we need
read -d '' PACKAGES <<EOT
bc
libtool-bin
gperf
bison
flex
texi2html
texinfo
help2man
gawk
libtool
build-essential
automake
libncurses5-dev
libz-dev
libglib2.0-dev
device-tree-compiler
qemu-user-static
binfmt-support
multistrap
git
lib32z1
lib32ncurses5
lib32bz2-1.0
lib32stdc++6
libgnutls-dev
libssl-dev
kpartx
zerofree
u-boot-tools
EOT
sudo apt-get install -y $PACKAGES
# Install up-to-date versions of crosstool and qemu
mkdir tools
cd tools/
wget http://crosstool-ng.org/download/crosstool-ng/crosstool-ng-1.22.0.tar.bz2
tar -xf crosstool-ng-1.22.0.tar.bz2
cd crosstool-ng/
./configure --prefix=/opt/crosstool-ng
make
sudo make install
cd ..
wget http://wiki.qemu-project.org/download/qemu-2.8.0.tar.bz2
tar -xf qemu-2.8.0.tar.bz2
cd qemu-2.8.0
./configure --target-list=arm-linux-user --prefix=/opt/qemu --static
make
sudo make install
# Create the symlink that ubuntu expects
cd /opt/qemu/bin
sudo ln -s qemu-arm qemu-arm-static
cd ~
# Create gmake symlink to keep SDK happy
cd /usr/bin
sudo ln -s make gmake
echo 'PATH=/opt/qemu/bin:/opt/crosstool-ng/bin:$PATH' >> ~/.profile
echo "Now install Vivado and SDK version 2016.1 and login again to ensure the enviroment is properly set up"
|
<gh_stars>0
/*
* Copyright 2014-2016 CyberVision, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.kaaproject.kaa.server.common.dao.service;
import org.codehaus.jackson.JsonNode;
import org.codehaus.jackson.map.ObjectMapper;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Ignore;
import org.junit.Test;
import org.kaaproject.kaa.common.dto.ApplicationDto;
import org.kaaproject.kaa.common.dto.HasId;
import org.kaaproject.kaa.common.dto.TenantDto;
import org.kaaproject.kaa.common.dto.ctl.CTLSchemaDto;
import org.kaaproject.kaa.common.dto.ctl.CTLSchemaMetaInfoDto;
import org.kaaproject.kaa.server.common.dao.AbstractTest;
import org.kaaproject.kaa.server.common.dao.exception.DatabaseProcessingException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Set;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
@Ignore("This test should be extended and initialized with proper context in each NoSQL submodule")
public class CTLServiceImplTest extends AbstractTest {
private ExecutorService executorService = Executors.newFixedThreadPool(10);
private TenantDto tenant;
private ApplicationDto appDto;
private ApplicationDto appDto2;
private CTLSchemaDto firstSchema;
private CTLSchemaDto secondSchema;
private CTLSchemaDto thirdSchema;
private CTLSchemaDto fourthSchema;
private CTLSchemaDto mainSchema;
private CTLSchemaDto defaultSystemSchema;
private CTLSchemaDto systemSchema;
private CTLSchemaDto tenantSchema;
private CTLSchemaDto tenantSchema2;
private CTLSchemaDto appSchema;
private CTLSchemaDto app2Schema;
private CTLSchemaDto appSchema2;
private CTLSchemaDto appSchema3;
private static final String TEST_CTL_SCHEMA_ALPHA = "dao/ctl/alpha.json";
private static final String TEST_CTL_SCHEMA_ALPHA_FLAT = "dao/ctl/alphaFlat.json";
private static final String TEST_CTL_SCHEMA_BETA = "dao/ctl/beta.json";
private static final String TEST_CTL_SCHEMA_GAMMA = "dao/ctl/gamma.json";
private CTLSchemaDto alpha;
private CTLSchemaDto beta;
private CTLSchemaDto gamma;
@Before
public void before() throws Exception {
clearDBData();
if (tenant == null) {
tenant = userService.findTenantByName(SUPER_TENANT);
if (tenant == null) {
TenantDto tn = new TenantDto();
tn.setName(SUPER_TENANT);
tenant = userService.saveTenant(tn);
appDto = generateApplicationDto(tenant.getId(), "The app 1");
appDto2 = generateApplicationDto(tenant.getId(), "The app 2");
List<CTLSchemaDto> ctlSchemas = ctlService.findSystemCTLSchemas();
defaultSystemSchema = ctlSchemas.get(0);
}
}
Set<CTLSchemaDto> dependency = new HashSet<>();
firstSchema = ctlService.saveCTLSchema(generateCTLSchemaDto(DEFAULT_FQN+1, tenant.getId(), null, 1));
dependency.add(firstSchema);
secondSchema = ctlService.saveCTLSchema(generateCTLSchemaDto(DEFAULT_FQN+2, tenant.getId(), null, 2));
dependency.add(secondSchema);
thirdSchema = ctlService.saveCTLSchema(generateCTLSchemaDto(DEFAULT_FQN+3, tenant.getId(), null, 3));
dependency.add(thirdSchema);
fourthSchema = ctlService.saveCTLSchema(generateCTLSchemaDto(DEFAULT_FQN+4, tenant.getId(), null, 4));
dependency.add(fourthSchema);
mainSchema = generateCTLSchemaDto(DEFAULT_FQN+5, tenant.getId(), null, 7);
mainSchema.setDependencySet(dependency);
mainSchema = ctlService.saveCTLSchema(mainSchema);
systemSchema = ctlService.saveCTLSchema(generateCTLSchemaDto(DEFAULT_FQN+6, null, null, 50));
tenantSchema = ctlService.saveCTLSchema(generateCTLSchemaDto(DEFAULT_FQN+7, tenant.getId(), null, 77));
tenantSchema2 = ctlService.saveCTLSchema(generateCTLSchemaDto(DEFAULT_FQN+7, tenant.getId(), null, 78));
CTLSchemaDto unsaved = generateCTLSchemaDto(DEFAULT_FQN+8, tenant.getId(), appDto.getId(), 80);
appSchema = ctlService.saveCTLSchema(unsaved);
unsaved = generateCTLSchemaDto(DEFAULT_FQN+8, tenant.getId(), appDto.getId(), 81);
appSchema2 = ctlService.saveCTLSchema(unsaved);
unsaved = generateCTLSchemaDto(DEFAULT_FQN+9, tenant.getId(), appDto.getId(), 2);
appSchema3 = ctlService.saveCTLSchema(unsaved);
unsaved = generateCTLSchemaDto(DEFAULT_FQN+8, tenant.getId(), appDto2.getId(), 11);
app2Schema = ctlService.saveCTLSchema(unsaved);
gamma = new CTLSchemaDto();
CTLSchemaMetaInfoDto gammaMetaInfo = new CTLSchemaMetaInfoDto("org.kaaproject.kaa.Gamma", tenant.getId());
gamma.setMetaInfo(gammaMetaInfo);
gamma.setVersion(1);
gamma.setBody(readSchemaFileAsString(TEST_CTL_SCHEMA_GAMMA));
gamma = ctlService.saveCTLSchema(gamma);
gamma = ctlService.findCTLSchemaById(gamma.getId());
beta = new CTLSchemaDto();
CTLSchemaMetaInfoDto betaMetaInfo = new CTLSchemaMetaInfoDto("org.kaaproject.kaa.Beta", tenant.getId());
beta.setMetaInfo(betaMetaInfo);
beta.setVersion(1);
Set<CTLSchemaDto> betaDependencies = new HashSet<>();
betaDependencies.add(gamma);
beta.setDependencySet(betaDependencies);
beta.setBody(readSchemaFileAsString(TEST_CTL_SCHEMA_BETA));
beta = ctlService.saveCTLSchema(beta);
beta = ctlService.findCTLSchemaById(beta.getId());
alpha = new CTLSchemaDto();
CTLSchemaMetaInfoDto alphaMetaInfo = new CTLSchemaMetaInfoDto("org.kaaproject.kaa.Alpha", tenant.getId());
alpha.setMetaInfo(alphaMetaInfo);
alpha.setVersion(1);
Set<CTLSchemaDto> alphaDependencies = new HashSet<>();
alphaDependencies.add(beta);
alpha.setDependencySet(alphaDependencies);
alpha.setBody(readSchemaFileAsString(TEST_CTL_SCHEMA_ALPHA));
alpha = ctlService.saveCTLSchema(alpha);
alpha = ctlService.findCTLSchemaById(alpha.getId());
}
@Test
public void testRemoveCTLSchemaByFqnAndVerAndTenantIdAndApplicationId() {
String schemaId = tenantSchema.getId();
ctlService.removeCTLSchemaByFqnAndVerAndTenantIdAndApplicationId(tenantSchema.getMetaInfo().getFqn(), tenantSchema.getVersion(),
tenantSchema.getMetaInfo().getTenantId(), tenantSchema.getMetaInfo().getApplicationId());
Assert.assertNull(ctlService.findCTLSchemaById(schemaId));
}
@Test
public void testRemoveCTLSchemaByFqnAndVerAndWithoutTenantId() {
String schemaId = systemSchema.getId();
ctlService.removeCTLSchemaByFqnAndVerAndTenantIdAndApplicationId(systemSchema.getMetaInfo().getFqn(), systemSchema.getVersion(),
systemSchema.getMetaInfo().getTenantId(), systemSchema.getMetaInfo().getApplicationId());
Assert.assertNull(ctlService.findCTLSchemaById(schemaId));
}
@Test
public void testFindCTLSchemaByFqnAndVerAndTenantIdAndApplicationId() {
CTLSchemaMetaInfoDto metaInfo = firstSchema.getMetaInfo();
CTLSchemaDto found = ctlService.findCTLSchemaByFqnAndVerAndTenantIdAndApplicationId(metaInfo.getFqn(),
firstSchema.getVersion(), metaInfo.getTenantId(), metaInfo.getApplicationId());
Assert.assertEquals(firstSchema, found);
}
@Test
public void testFindCTLSchemaById() {
CTLSchemaDto found = ctlService.findCTLSchemaById(firstSchema.getId());
Assert.assertEquals(firstSchema, found);
}
@Test
public void testFindSystemCTLSchemas() {
List<CTLSchemaDto> appSchemas = ctlService.findSystemCTLSchemas();
Assert.assertEquals(getIdsDto(Arrays.asList(defaultSystemSchema, systemSchema)), getIdsDto(appSchemas));
}
@Test
public void testFindSystemCTLSchemasMetaInfo() {
List<CTLSchemaMetaInfoDto> appSchemas = ctlService.findSystemCTLSchemasMetaInfo();
Comparator<HasId> comparator = new Comparator<HasId>() {
@Override
public int compare(HasId o1, HasId o2) {
return o1.getId().compareTo(o2.getId());
}
};
Collections.sort(appSchemas, comparator);
List<CTLSchemaMetaInfoDto> expectedSchemas = Arrays.asList(defaultSystemSchema.getMetaInfo(), systemSchema.getMetaInfo());
Collections.sort(expectedSchemas, comparator);
Assert.assertEquals(expectedSchemas, appSchemas);
}
@Test
public void testFindLatestCTLSchemaByFqn() {
CTLSchemaDto latestTenantScope = ctlService.findLatestCTLSchemaByFqnAndTenantIdAndApplicationId(DEFAULT_FQN+7, tenant.getId(), null);
Assert.assertEquals(Integer.valueOf(78), latestTenantScope.getVersion());
CTLSchemaDto latestAppScope = ctlService.findLatestCTLSchemaByFqnAndTenantIdAndApplicationId(DEFAULT_FQN+8, tenant.getId(), appDto.getId());
Assert.assertEquals(Integer.valueOf(81), latestAppScope.getVersion());
}
@Test
public void testScopeUpdate() {
CTLSchemaMetaInfoDto metaInfo = appSchema3.getMetaInfo();
metaInfo.setApplicationId(null);
ctlService.updateCTLSchemaMetaInfoScope(metaInfo);
CTLSchemaDto found = ctlService.findCTLSchemaByFqnAndVerAndTenantIdAndApplicationId(metaInfo.getFqn(), appSchema3.getVersion(), metaInfo.getTenantId(), null);
Assert.assertEquals(appSchema3, found);
}
@Test(expected = DatabaseProcessingException.class)
public void testScopeUpdateForbidden() {
CTLSchemaMetaInfoDto metaInfo = appSchema.getMetaInfo();
metaInfo.setApplicationId(null);
ctlService.updateCTLSchemaMetaInfoScope(metaInfo);
}
@Test
public void testFindSiblingsFqns() {
List<CTLSchemaMetaInfoDto> siblingSchemas =
ctlService.findSiblingsByFqnTenantIdAndApplicationId(appSchema.getMetaInfo().getFqn(), appSchema.getMetaInfo().getTenantId(), appSchema.getMetaInfo().getApplicationId());
Assert.assertNotNull(siblingSchemas);
Assert.assertEquals(1, siblingSchemas.size());
Assert.assertEquals(app2Schema.getMetaInfo(), siblingSchemas.get(0));
}
@Test
public void testFindCTLSchemaDependentsByFqnVersionTenantId() {
List<CTLSchemaDto> appSchemas = ctlService.findCTLSchemaDependents(firstSchema.getMetaInfo().getFqn(), firstSchema.getVersion(),
tenant.getId(), null);
Assert.assertEquals(Arrays.asList(mainSchema), appSchemas);
appSchemas = ctlService.findCTLSchemaDependents(secondSchema.getMetaInfo().getFqn(), secondSchema.getVersion(), tenant.getId(), null);
Assert.assertEquals(Arrays.asList(mainSchema), appSchemas);
appSchemas = ctlService.findCTLSchemaDependents(thirdSchema.getMetaInfo().getFqn(), thirdSchema.getVersion(), tenant.getId(), null);
Assert.assertEquals(Arrays.asList(mainSchema), appSchemas);
appSchemas = ctlService.findCTLSchemaDependents(fourthSchema.getMetaInfo().getFqn(), fourthSchema.getVersion(), tenant.getId(), null);
Assert.assertEquals(Arrays.asList(mainSchema), appSchemas);
appSchemas = ctlService.findCTLSchemaDependents(mainSchema.getMetaInfo().getFqn(), mainSchema.getVersion(), tenant.getId(), null);
Assert.assertTrue(appSchemas.isEmpty());
}
@Test
public void multiThreadCTLSchemaSaveTest() throws InterruptedException, ExecutionException {
List<Future<CTLSchemaDto>> list = new ArrayList<>();
for (int i = 0; i < 100; i++) {
list.add(executorService.submit(new Callable<CTLSchemaDto>() {
@Override
public CTLSchemaDto call() {
CTLSchemaDto sch = null;
try {
sch = ctlService.saveCTLSchema(generateCTLSchemaDto(generateTenantDto().getId()));
} catch (Throwable t) {
throw t;
}
return sch;
}
}));
}
Iterator<Future<CTLSchemaDto>> iterator = list.iterator();
List<CTLSchemaDto> schemas = new ArrayList<>();
while (iterator.hasNext()) {
Future<CTLSchemaDto> f = iterator.next();
while (!f.isDone()) {
}
schemas.add(f.get());
iterator.remove();
}
Assert.assertEquals(100, schemas.size());
for (CTLSchemaDto schema : schemas) {
CTLSchemaDto savedSchema = ctlService.findCTLSchemaByFqnAndVerAndTenantIdAndApplicationId(DEFAULT_FQN, 100, schema.getMetaInfo().getTenantId(), null);
Assert.assertNotNull(savedSchema);
Assert.assertEquals(schema, savedSchema);
}
}
@Test
public void testShallowExport() throws Exception {
ObjectMapper mapper = new ObjectMapper();
JsonNode expected = mapper.readTree(readSchemaFileAsString(TEST_CTL_SCHEMA_ALPHA));
JsonNode actual = mapper.readTree(ctlService.shallowExport(alpha).getFileData());
Assert.assertEquals(expected, actual);
}
@Test
public void testFlatExport() throws Exception {
ObjectMapper mapper = new ObjectMapper();
JsonNode expected = mapper.readTree(readSchemaFileAsString(TEST_CTL_SCHEMA_ALPHA_FLAT));
JsonNode actual = mapper.readTree(ctlService.flatExport(alpha).getFileData());
Assert.assertEquals(expected, actual);
}
}
|
<gh_stars>0
import { Link } from "gatsby";
import React from "react";
import Title from "../Typography/Title/Title";
import "./Header.css";
import { colors } from "../../constants/colors";
interface HeaderProps {
siteTitle: string;
}
const Header: React.FC<HeaderProps> = ({ siteTitle }) => (
<header className="Header" style={{ color: colors.accent }}>
<Title><h1>{siteTitle}</h1></Title>
</header>
);
export default Header;
|
#!/bin/sh
# CYBERWATCH SAS - 2017
#
# Security fix for USN-2578-1
#
# Security announcement date: 2015-04-27 00:00:00 UTC
# Script generation date: 2017-01-01 21:04:29 UTC
#
# Operating System: Ubuntu 14.10
# Architecture: i686
#
# Vulnerable packages fix on version:
# - libreoffice-core:1:4.3.7~rc2-0ubuntu1
#
# Last versions recommanded by security team:
# - libreoffice-core:1:4.3.7~rc2-0ubuntu1
#
# CVE List:
# - CVE-2014-9093
# - CVE-2015-1774
#
# More details:
# - https://www.cyberwatch.fr/vulnerabilites
#
# Licence: Released under The MIT License (MIT), See LICENSE FILE
sudo apt-get install --only-upgrade libreoffice-core=1:4.3.7~rc2-0ubuntu1 -y
|
<reponame>RSpace/spree-heroku
# encoding: utf-8
Spree::FileUtilz.class_eval do
class << self
# Patch mirror_files method to be silent when using r/o Heroku FS
alias_method :mirror_files_old, :mirror_files
def mirror_files(source, destination, create_backups = false)
return mirror_files_old(source, destination, create_backups) unless Rails.env == 'production'
mirror_files_old(source, destination, create_backups) rescue true
end
end
end
|
# (C) Datadog, Inc. 2018
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
from .kyototycoon import KyotoTycoonCheck
from .__about__ import __version__
__all__ = [
'__version__',
'KyotoTycoonCheck'
]
|
<filename>basicJava/singletonPattern/kr.co.singleton1/SingletonMainTest.java
package kr.co.singleton1;
public class SingletonMainTest {
public static void main(String[] args) {
/*
* 싱글톤 클래스 연습
*
*/
//Singleton obj=new Singleton(); // >에러 , 생성자 함수를 private로 막아놓았기 때문에
// new 연산자로 객체를 생성할 수 없다.
//static 접근 > 클래스명.static변수나 함수로 접근한다.
Singleton obj1=Singleton.getSingle();
System.out.println(obj1); //kr.co.singleton1.Singleton@15db9742
Singleton obj2=Singleton.getSingle();
System.out.println(obj2); //kr.co.singleton1.Singleton@15db9742
//아무리 obj1,2,3..해서 변수를 바꿔서 찍어봐도 값은 똑같음.
//같은 객체를 사용함.
if(obj1==obj2){
System.out.println("같은 객체입니다");
}else{
System.out.println("다른 객체입니다.");
}//if > 결과값 > 같은 객체입니다
}//main
}//class
|
# ubuntu
username="your_user_name"
password="your_password"
branch="your_branch_name"
project_dir="your_project_dir"
url="https://$username:$password@github.com/doong-jo/membership-todo.git"
cd $project_dir
git remote set-url origin $url
git fetch
find_origin="git rev-parse origin/$branch"
origin_hash=$($find_origin)
find_local=$(git rev-parse $branch)
local_hash=$find_local
if [ "$origin_hash" == "$local_hash" ]; then
exit
fi
git pull $url
sudo npm install
sudo npm start
|
#include <torch/extension.h>
std::vector<at::Tensor> attn_score_backward(
const at::Tensor &grad_output,
const at::Tensor &attn_query,
const at::Tensor &attn_keys,
const at::Tensor &bias,
const at::Tensor &linear_attn) {
// Compute gradients of input tensors using chain rule and grad_output
at::Tensor grad_attn_query = grad_output.matmul(linear_attn.transpose(0, 1));
at::Tensor grad_attn_keys = grad_output.transpose(0, 1).matmul(attn_query).transpose(0, 1);
at::Tensor grad_bias = grad_output.sum(0, true);
at::Tensor grad_linear_attn = grad_output.transpose(0, 1).matmul(attn_query.transpose(0, 1));
return {grad_attn_query, grad_attn_keys, grad_bias, grad_linear_attn};
} |
#!/bin/sh
set -e
set -u
set -o pipefail
function on_error {
echo "$(realpath -mq "${0}"):$1: error: Unexpected failure"
}
trap 'on_error $LINENO' ERR
if [ -z ${FRAMEWORKS_FOLDER_PATH+x} ]; then
# If FRAMEWORKS_FOLDER_PATH is not set, then there's nowhere for us to copy
# frameworks to, so exit 0 (signalling the script phase was successful).
exit 0
fi
echo "mkdir -p ${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
mkdir -p "${CONFIGURATION_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
COCOAPODS_PARALLEL_CODE_SIGN="${COCOAPODS_PARALLEL_CODE_SIGN:-false}"
SWIFT_STDLIB_PATH="${DT_TOOLCHAIN_DIR}/usr/lib/swift/${PLATFORM_NAME}"
BCSYMBOLMAP_DIR="BCSymbolMaps"
# This protects against multiple targets copying the same framework dependency at the same time. The solution
# was originally proposed here: https://lists.samba.org/archive/rsync/2008-February/020158.html
RSYNC_PROTECT_TMP_FILES=(--filter "P .*.??????")
# Copies and strips a vendored framework
install_framework()
{
if [ -r "${BUILT_PRODUCTS_DIR}/$1" ]; then
local source="${BUILT_PRODUCTS_DIR}/$1"
elif [ -r "${BUILT_PRODUCTS_DIR}/$(basename "$1")" ]; then
local source="${BUILT_PRODUCTS_DIR}/$(basename "$1")"
elif [ -r "$1" ]; then
local source="$1"
fi
local destination="${TARGET_BUILD_DIR}/${FRAMEWORKS_FOLDER_PATH}"
if [ -L "${source}" ]; then
echo "Symlinked..."
source="$(readlink "${source}")"
fi
if [ -d "${source}/${BCSYMBOLMAP_DIR}" ]; then
# Locate and install any .bcsymbolmaps if present, and remove them from the .framework before the framework is copied
find "${source}/${BCSYMBOLMAP_DIR}" -name "*.bcsymbolmap"|while read f; do
echo "Installing $f"
install_bcsymbolmap "$f" "$destination"
rm "$f"
done
rmdir "${source}/${BCSYMBOLMAP_DIR}"
fi
# Use filter instead of exclude so missing patterns don't throw errors.
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --links --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${destination}\""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --links --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${destination}"
local basename
basename="$(basename -s .framework "$1")"
binary="${destination}/${basename}.framework/${basename}"
if ! [ -r "$binary" ]; then
binary="${destination}/${basename}"
elif [ -L "${binary}" ]; then
echo "Destination binary is symlinked..."
dirname="$(dirname "${binary}")"
binary="${dirname}/$(readlink "${binary}")"
fi
# Strip invalid architectures so "fat" simulator / device frameworks work on device
if [[ "$(file "$binary")" == *"dynamically linked shared library"* ]]; then
strip_invalid_archs "$binary"
fi
# Resign the code if required by the build settings to avoid unstable apps
code_sign_if_enabled "${destination}/$(basename "$1")"
# Embed linked Swift runtime libraries. No longer necessary as of Xcode 7.
if [ "${XCODE_VERSION_MAJOR}" -lt 7 ]; then
local swift_runtime_libs
swift_runtime_libs=$(xcrun otool -LX "$binary" | grep --color=never @rpath/libswift | sed -E s/@rpath\\/\(.+dylib\).*/\\1/g | uniq -u)
for lib in $swift_runtime_libs; do
echo "rsync -auv \"${SWIFT_STDLIB_PATH}/${lib}\" \"${destination}\""
rsync -auv "${SWIFT_STDLIB_PATH}/${lib}" "${destination}"
code_sign_if_enabled "${destination}/${lib}"
done
fi
}
# Copies and strips a vendored dSYM
install_dsym() {
local source="$1"
warn_missing_arch=${2:-true}
if [ -r "$source" ]; then
# Copy the dSYM into the targets temp dir.
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${source}\" \"${DERIVED_FILES_DIR}\""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${source}" "${DERIVED_FILES_DIR}"
local basename
basename="$(basename -s .dSYM "$source")"
binary_name="$(ls "$source/Contents/Resources/DWARF")"
binary="${DERIVED_FILES_DIR}/${basename}.dSYM/Contents/Resources/DWARF/${binary_name}"
# Strip invalid architectures from the dSYM.
if [[ "$(file "$binary")" == *"Mach-O "*"dSYM companion"* ]]; then
strip_invalid_archs "$binary" "$warn_missing_arch"
fi
if [[ $STRIP_BINARY_RETVAL == 0 ]]; then
# Move the stripped file into its final destination.
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --links --filter \"- CVS/\" --filter \"- .svn/\" --filter \"- .git/\" --filter \"- .hg/\" --filter \"- Headers\" --filter \"- PrivateHeaders\" --filter \"- Modules\" \"${DERIVED_FILES_DIR}/${basename}.framework.dSYM\" \"${DWARF_DSYM_FOLDER_PATH}\""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --links --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${DERIVED_FILES_DIR}/${basename}.dSYM" "${DWARF_DSYM_FOLDER_PATH}"
else
# The dSYM was not stripped at all, in this case touch a fake folder so the input/output paths from Xcode do not reexecute this script because the file is missing.
mkdir -p "${DWARF_DSYM_FOLDER_PATH}"
touch "${DWARF_DSYM_FOLDER_PATH}/${basename}.dSYM"
fi
fi
}
# Used as a return value for each invocation of `strip_invalid_archs` function.
STRIP_BINARY_RETVAL=0
# Strip invalid architectures
strip_invalid_archs() {
binary="$1"
warn_missing_arch=${2:-true}
# Get architectures for current target binary
binary_archs="$(lipo -info "$binary" | rev | cut -d ':' -f1 | awk '{$1=$1;print}' | rev)"
# Intersect them with the architectures we are building for
intersected_archs="$(echo ${ARCHS[@]} ${binary_archs[@]} | tr ' ' '\n' | sort | uniq -d)"
# If there are no archs supported by this binary then warn the user
if [[ -z "$intersected_archs" ]]; then
if [[ "$warn_missing_arch" == "true" ]]; then
echo "warning: [CP] Vendored binary '$binary' contains architectures ($binary_archs) none of which match the current build architectures ($ARCHS)."
fi
STRIP_BINARY_RETVAL=1
return
fi
stripped=""
for arch in $binary_archs; do
if ! [[ "${ARCHS}" == *"$arch"* ]]; then
# Strip non-valid architectures in-place
lipo -remove "$arch" -output "$binary" "$binary"
stripped="$stripped $arch"
fi
done
if [[ "$stripped" ]]; then
echo "Stripped $binary of architectures:$stripped"
fi
STRIP_BINARY_RETVAL=0
}
# Copies the bcsymbolmap files of a vendored framework
install_bcsymbolmap() {
local bcsymbolmap_path="$1"
local destination="${BUILT_PRODUCTS_DIR}"
echo "rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${bcsymbolmap_path}" "${destination}""
rsync --delete -av "${RSYNC_PROTECT_TMP_FILES[@]}" --filter "- CVS/" --filter "- .svn/" --filter "- .git/" --filter "- .hg/" --filter "- Headers" --filter "- PrivateHeaders" --filter "- Modules" "${bcsymbolmap_path}" "${destination}"
}
# Signs a framework with the provided identity
code_sign_if_enabled() {
if [ -n "${EXPANDED_CODE_SIGN_IDENTITY:-}" -a "${CODE_SIGNING_REQUIRED:-}" != "NO" -a "${CODE_SIGNING_ALLOWED}" != "NO" ]; then
# Use the current code_sign_identity
echo "Code Signing $1 with Identity ${EXPANDED_CODE_SIGN_IDENTITY_NAME}"
local code_sign_cmd="/usr/bin/codesign --force --sign ${EXPANDED_CODE_SIGN_IDENTITY} ${OTHER_CODE_SIGN_FLAGS:-} --preserve-metadata=identifier,entitlements '$1'"
if [ "${COCOAPODS_PARALLEL_CODE_SIGN}" == "true" ]; then
code_sign_cmd="$code_sign_cmd &"
fi
echo "$code_sign_cmd"
eval "$code_sign_cmd"
fi
}
if [[ "$CONFIGURATION" == "Debug" ]]; then
install_framework "${BUILT_PRODUCTS_DIR}/Alamofire/Alamofire.framework"
install_framework "${BUILT_PRODUCTS_DIR}/Kingfisher/Kingfisher.framework"
fi
if [[ "$CONFIGURATION" == "Release" ]]; then
install_framework "${BUILT_PRODUCTS_DIR}/Alamofire/Alamofire.framework"
install_framework "${BUILT_PRODUCTS_DIR}/Kingfisher/Kingfisher.framework"
fi
if [ "${COCOAPODS_PARALLEL_CODE_SIGN}" == "true" ]; then
wait
fi
|
<filename>pkg/smartcontract/trigger/trigger_type_test.go
package trigger
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestStringer(t *testing.T) {
tests := map[Type]string{
System: "System",
Application: "Application",
Verification: "Verification",
}
for o, s := range tests {
assert.Equal(t, s, o.String())
}
}
func TestEncodeBynary(t *testing.T) {
tests := map[Type]byte{
System: 0x01,
Verification: 0x20,
Application: 0x40,
}
for o, b := range tests {
assert.Equal(t, b, byte(o))
}
}
func TestDecodeBynary(t *testing.T) {
tests := map[Type]byte{
System: 0x01,
Verification: 0x20,
Application: 0x40,
}
for o, b := range tests {
assert.Equal(t, o, Type(b))
}
}
|
#!/usr/bin/env bash
# Copyright 2018 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
# cd to the repo root
REPO_ROOT=$(git rev-parse --show-toplevel)
cd "${REPO_ROOT}"
# check for gofmt diffs
diff=$(find . -name "*.go" | grep -v "\\/vendor\\/" | xargs gofmt -s -d 2>&1)
if [[ -n "${diff}" ]]; then
echo "${diff}"
echo
echo "Please run hack/update/gofmt.sh"
exit 1
fi
|
#!/bin/bash
PSQL_IP=$DB_PORT_5432_TCP_ADDR
PSQL_PORT=$DB_PORT_5432_TCP_PORT
echo "
CREATE ROLE $APP_USER WITH LOGIN PASSWORD '$APP_PASS' VALID UNTIL 'infinity';
CREATE DATABASE $APP_DB WITH ENCODING 'UNICODE' TEMPLATE=template0;" \
| PGPASSWORD="$DB_PASS" psql -h $PSQL_IP -p $PSQL_PORT -d postgres -U $DB_USER -w
|
/*
The MIT License
Copyright (c) 2020 headwire.com, Inc
https://github.com/headwirecom/jsonforms-react-spectrum-renderers
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
const path = require('path');
module.exports = {
output: {
path: path.resolve('./', 'lib'),
},
// Enable sourcemaps for debugging webpack's output.
devtool: 'source-map',
resolve: {
// Add '.ts' as resolvable extensions.
extensions: ['.ts', '.js', '.tsx'],
},
module: {
rules: [
{
enforce: 'pre',
test: /\.js$/,
exclude: /node_modules/,
loader: 'source-map-loader',
},
{
test: /\.ts$/, // All ts and tsx files will be process by
loaders: ['ts-loader'], // first ts-loader, then babel-loader
exclude: /node_modules/, // ignore node_modules
},
{
test: /\.tsx$/, // All ts and tsx files will be process by
loaders: ['babel-loader', 'ts-loader'], // first ts-loader, then babel-loader
exclude: /node_modules/, // ignore node_modules
},
{
test: /\.jsx?$/, // all js and jsx files will be processed by
loader: 'babel-loader', // babel-loader
exclude: /node_modules/, // ignore node_modules
},
{
test: /\.css$/,
use: ['style-loader', 'css-loader'],
},
],
},
// When importing a module whose path matches one of the following, just
// assume a corresponding global variable exists and use that instead.
// This is important because it allows us to avoid bundling all of our
// dependencies, which allows browsers to cache those libraries between builds.
externals: {},
};
|
import React from 'react';
import isEqual from 'lodash/lang/isEqual';
class PaginationLink extends React.Component {
componentWillMount() {
this.handleClick = this.handleClick.bind(this);
}
shouldComponentUpdate(nextProps) {
return !isEqual(this.props, nextProps);
}
handleClick(e) {
this.props.handleClick(this.props.pageNumber, e);
}
render() {
let {cssClasses, label, ariaLabel, url, isDisabled} = this.props;
let tagName = 'span';
let attributes = {
className: cssClasses.link,
dangerouslySetInnerHTML: {
__html: label
}
};
// "Enable" the element, by making it a link
if (!isDisabled) {
tagName = 'a';
attributes = {
...attributes,
ariaLabel,
href: url,
onClick: this.handleClick
};
}
let element = React.createElement(tagName, attributes);
return (
<li className={cssClasses.item}>
{element}
</li>
);
}
}
PaginationLink.propTypes = {
ariaLabel: React.PropTypes.oneOfType([
React.PropTypes.string,
React.PropTypes.number
]).isRequired,
cssClasses: React.PropTypes.shape({
item: React.PropTypes.string,
link: React.PropTypes.string
}),
handleClick: React.PropTypes.func.isRequired,
isDisabled: React.PropTypes.bool,
label: React.PropTypes.oneOfType([
React.PropTypes.string,
React.PropTypes.number
]).isRequired,
pageNumber: React.PropTypes.number,
url: React.PropTypes.string
};
export default PaginationLink;
|
/* jshint indent: 1 */
module.exports = function(sequelize, DataTypes) {
return sequelize.define('delSaleinfo', {
recid: {
type: DataTypes.INTEGER,
allowNull: false,
primaryKey: true,
autoIncrement: true,
field: 'RECID'
},
salekey: {
type: DataTypes.INTEGER,
allowNull: true,
defaultValue: '((0))',
field: 'SALEKEY'
},
realkey: {
type: DataTypes.INTEGER,
allowNull: true,
defaultValue: '((0))',
field: 'REALKEY'
},
grantee: {
type: DataTypes.CHAR,
allowNull: true,
field: 'GRANTEE'
},
grantor: {
type: DataTypes.CHAR,
allowNull: true,
field: 'GRANTOR'
},
saledate: {
type: DataTypes.DATEONLY,
allowNull: true,
field: 'SALEDATE'
},
deedpage: {
type: DataTypes.CHAR,
allowNull: true,
field: 'DEEDPAGE'
},
plotpage: {
type: DataTypes.CHAR,
allowNull: true,
field: 'PLOTPAGE'
},
saleprice: {
type: DataTypes.BIGINT,
allowNull: true,
defaultValue: '((0))',
field: 'SALEPRICE'
},
saleclass: {
type: DataTypes.CHAR,
allowNull: true,
field: 'SALECLASS'
},
strat: {
type: DataTypes.INTEGER,
allowNull: true,
defaultValue: '((0))',
field: 'STRAT'
},
reason: {
type: DataTypes.CHAR,
allowNull: true,
field: 'REASON'
},
qualifier: {
type: DataTypes.CHAR,
allowNull: true,
field: 'QUALIFIER'
},
mktval: {
type: DataTypes.BIGINT,
allowNull: true,
defaultValue: '((0))',
field: 'MKTVAL'
},
comment: {
type: DataTypes.STRING,
allowNull: true,
field: 'COMMENT'
},
ptd: {
type: DataTypes.CHAR,
allowNull: true,
field: 'PTD'
},
exists: {
type: DataTypes.BOOLEAN,
allowNull: true,
defaultValue: '0',
field: 'EXISTS'
},
pt61Num: {
type: DataTypes.CHAR,
allowNull: true,
field: 'PT61_NUM'
},
rett: {
type: DataTypes.DOUBLE,
allowNull: true,
defaultValue: '((0))',
field: 'RETT'
},
instrument: {
type: DataTypes.CHAR,
allowNull: true,
field: 'INSTRUMENT'
},
salesadj: {
type: DataTypes.BIGINT,
allowNull: true,
defaultValue: '((0))',
field: 'SALESADJ'
},
netSp: {
type: DataTypes.BIGINT,
allowNull: true,
defaultValue: '((0))',
field: 'NET_SP'
},
mavval: {
type: DataTypes.BIGINT,
allowNull: true,
defaultValue: '((0))',
field: 'MAVVAL'
},
digestVal: {
type: DataTypes.BOOLEAN,
allowNull: true,
defaultValue: '0',
field: 'DIGEST_VAL'
},
adddate: {
type: DataTypes.DATEONLY,
allowNull: true,
field: 'ADDDATE'
},
surveysent: {
type: DataTypes.BOOLEAN,
allowNull: true,
field: 'SURVEYSENT'
},
surveyreceived: {
type: DataTypes.BOOLEAN,
allowNull: true,
field: 'SURVEYRECEIVED'
},
saleacres: {
type: DataTypes.DOUBLE,
allowNull: true,
field: 'SALEACRES'
}
}, {
tableName: 'DEL_SALEINFO',
timestamps: false
});
};
|
#!/bin/bash
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
find . -regex '\.\/[P|T].CL-.*\.sh' -exec {} -o results-Latency/ --csv \;
|
<filename>example/example-service/src/main/java/com/company/example/util/Config.java
package com.company.example.util;
/**
* 系统配置类
*
* @author 谭海潮
*
*/
public class Config {
}
|
curl "https://api.m3o.com/v1/search/Vote" \
-H "Content-Type: application/json" \
-H "Authorization: Bearer $M3O_API_TOKEN" \
-d '{
"message": "Launch it!"
}'
|
echo '' >> /home/vagrant/ansible/hosts
echo '[clients]' >> /home/vagrant/ansible/hosts
echo 'client-node1' >> /home/vagrant/ansible/hosts
cd /usr/share/ceph-ansible/group_vars/
cp clients.yml.sample clients.yml
sed -i "/^#.*user_config: false/s/^#//" /usr/share/ceph-ansible/group_vars/clients.yml
sed -i "/user_config: false/s/false/true/" /usr/share/ceph-ansible/group_vars/clients.yml
|
#!/bin/sh
create_etcd_cert() {
echo "generate $1 certificates"
/usr/local/bin/cfssl gencert -ca=ca.pem -ca-key="ca-key.pem" --config="ca-config.json" -profile=$1 "$1-csr.json" | /usr/local/bin/cfssljson -bare $1
}
/usr/local/bin/cfssl gencert -initca ca-csr.json | /usr/local/bin/cfssljson -bare ca
create_etcd_cert "client"
create_etcd_cert "peer"
create_etcd_cert "server"
|
#!/bin/sh
# Copyright 2005-2019 ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
#
# In applying this licence, ECMWF does not waive the privileges and immunities granted to it by
# virtue of its status as an intergovernmental organisation nor does it submit to any jurisdiction.
#
. ./include.sh
set -u
#Define a common label for all the tmp files
label="bufr_split_by_rdbSubtype"
temp=$label.temp
fRules=${label}.filter
# Do all the work in a temporary directory
temp_dir=tempdir.${label}
mkdir -p $temp_dir
cd $temp_dir
cat > $fRules <<EOF
write "out.filter_by_rdbSubtype.[rdbSubtype].bufr";
EOF
bufr_files=`cat ${data_dir}/bufr/bufr_data_files.txt`
for f in ${bufr_files}; do
fpath=${data_dir}/bufr/$f
# This will create output files like out.filter_by_rdbSubtype.*
${tools_dir}/codes_bufr_filter $fRules $fpath
# This will create output files like split_rdbSubtype*.bufr
${tools_dir}/bufr_split_by_rdbSubtype -v $fpath
for sp in out.filter_by_rdbSubtype.*; do
st=`echo $sp | awk -F. '{print $3}'`
${tools_dir}/bufr_compare $sp split_rdbSubtype.$st.bufr
done
rm -f split_rdbSubtype*.bufr
rm -f out.filter_by_rdbSubtype.*
done
# Clean up
# -------------
cd $test_dir
rm -fr $temp_dir
|
<gh_stars>0
// ============================================================================
//
// Copyright (C) 2006-2021 Talend Inc. - www.talend.com
//
// This source code is available under agreement available at
// %InstallDIR%\features\org.talend.rcp.branding.%PRODUCTNAME%\%PRODUCTNAME%license.txt
//
// You should have received a copy of the agreement
// along with this program; if not, write to Talend SA
// 9 rue Pages 92150 Suresnes, France
//
// ============================================================================
package org.talend.repository.model.migration;
import org.apache.commons.lang.StringUtils;
import java.util.ArrayList;
import java.util.Date;
import java.util.GregorianCalendar;
import java.util.List;
import org.talend.commons.exception.PersistenceException;
import org.talend.core.model.migration.AbstractItemMigrationTask;
import org.talend.core.model.properties.Item;
import org.talend.core.model.properties.Property;
import org.talend.core.model.repository.ERepositoryObjectType;
import org.talend.core.runtime.CoreRuntimePlugin;
import org.talend.core.utils.WorkspaceUtils;
import org.talend.migration.IProjectMigrationTask;
import org.talend.repository.items.importexport.handlers.imports.ImportBasicHandler;
import org.talend.repository.model.IProxyRepositoryFactory;
/**
* DOC hwang class global comment. Detailled comment
*/
public class ResetItemLabelMigrationTask extends AbstractItemMigrationTask implements IProjectMigrationTask{
@Override
public Date getOrder() {
GregorianCalendar gc = new GregorianCalendar(2017, 3, 13, 12, 0, 0);
return gc.getTime();
}
@Override
public ExecutionResult execute(Item item) {
IProxyRepositoryFactory repositoryFactory = CoreRuntimePlugin.getInstance().getProxyRepositoryFactory();
ImportBasicHandler handler = new ImportBasicHandler();
Property property = item.getProperty();
if(property == null){
return ExecutionResult.NOTHING_TO_DO;
}
String label = property.getLabel();
if(label == null){
return ExecutionResult.NOTHING_TO_DO;
}
try {
boolean isAvailable = WorkspaceUtils.checkNameIsOK(label);
if(!isAvailable){
property.setLabel(handler.getPropertyLabel(StringUtils.trimToNull(label)));
property.setDisplayName(StringUtils.trimToNull(label));
repositoryFactory.save(item, true);
}
} catch (PersistenceException e) {
return ExecutionResult.FAILURE;
}
return ExecutionResult.SUCCESS_WITH_ALERT;
}
@Override
public List<ERepositoryObjectType> getTypes() {
List<ERepositoryObjectType> toReturn = new ArrayList<ERepositoryObjectType>();
toReturn.add(ERepositoryObjectType.CONTEXT);
return toReturn;
}
}
|
<gh_stars>0
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.h2o.algos
import hex.deeplearning.DeepLearning
import hex.deeplearning.DeepLearningModel.DeepLearningParameters
import hex.schemas.DeepLearningV3.DeepLearningParametersV3
import org.apache.spark.annotation.Since
import org.apache.spark.h2o.H2OContext
import org.apache.spark.ml.h2o.models.H2OMOJOModel
import org.apache.spark.ml.h2o.param.H2OAlgoParams
import org.apache.spark.ml.util._
import org.apache.spark.sql.SQLContext
import water.support.ModelSerializationSupport
/**
* H2O Deep learning algorithm exposed via Spark ML pipelines.
*
* TODO: There are still bunch of parameters defined DeepLearningParameters which need to be ported here
*/
class H2ODeepLearning(parameters: Option[DeepLearningParameters], override val uid: String)
(implicit h2oContext: H2OContext, sqlContext: SQLContext)
extends H2OAlgorithm[DeepLearningParameters, H2OMOJOModel](parameters)
with H2ODeepLearningParams {
def this()(implicit h2oContext: H2OContext, sqlContext: SQLContext) = this(None, Identifiable.randomUID("dl"))
def this(uid: String, hc: H2OContext, sqlContext: SQLContext) = this(None, uid)(hc, sqlContext)
def this(parameters: DeepLearningParameters)(implicit h2oContext: H2OContext, sqlContext: SQLContext) = this(Option(parameters), Identifiable.randomUID("dl"))
def this(parameters: DeepLearningParameters, uid: String)(implicit h2oContext: H2OContext, sqlContext: SQLContext) = this(Option(parameters), uid)
override def defaultFileName: String = H2ODeepLearning.defaultFileName
override def trainModel(params: DeepLearningParameters): H2OMOJOModel = {
val model = new DeepLearning(params).trainModel().get()
new H2OMOJOModel(ModelSerializationSupport.getMojoData(model))
}
}
object H2ODeepLearning extends MLReadable[H2ODeepLearning] {
private final val defaultFileName = "dl_params"
@Since("1.6.0")
override def read: MLReader[H2ODeepLearning] = new H2OAlgorithmReader[H2ODeepLearning, DeepLearningParameters](defaultFileName)
@Since("1.6.0")
override def load(path: String): H2ODeepLearning = super.load(path)
}
/**
* Parameters here can be set as normal and are duplicated to DeepLearningParameters H2O object
*/
trait H2ODeepLearningParams extends H2OAlgoParams[DeepLearningParameters] {
type H2O_SCHEMA = DeepLearningParametersV3
protected def paramTag = reflect.classTag[DeepLearningParameters]
protected def schemaTag = reflect.classTag[H2O_SCHEMA]
//
// Param definitions
//
private final val epochs = doubleParam("epochs")
private final val l1 = doubleParam("l1")
private final val l2 = doubleParam("l2")
private final val hidden = intArrayParam("hidden")
//
// Default values
//
setDefault(
epochs -> parameters._epochs,
l1 -> parameters._l1,
l2 -> parameters._l2,
hidden -> parameters._hidden)
//
// Getters
//
/** @group getParam */
def getEpochs() = $(epochs)
/** @group getParam */
def getL1() = $(l1)
/** @group getParam */
def getL2() = $(l2)
/** @group getParam */
def getHidden() = $(hidden)
//
// Setters
//
/** @group setParam */
def setEpochs(value: Double): this.type = set(epochs, value)
/** @group setParam */
def setL1(value: Double): this.type = set(l1, value)
/** @group setParam */
def setL2(value: Double): this.type = set(l2, value)
/** @group setParam */
def setHidden(value: Array[Int]): this.type = set(hidden, value)
def updateParams(): Unit ={
parameters._epochs = $(epochs)
parameters._l1 = $(l1)
parameters._l2 = $(l2)
parameters._hidden = $(hidden)
}
}
|
"""The tests for the demo light component."""
import pytest
from homeassistant.components import light
from homeassistant.setup import async_setup_component
from tests.components.light import common
ENTITY_LIGHT = "light.bed_light"
@pytest.fixture(autouse=True)
def setup_comp(hass):
"""Set up demo component."""
hass.loop.run_until_complete(
async_setup_component(hass, light.DOMAIN, {"light": {"platform": "demo"}})
)
async def test_state_attributes(hass):
"""Test light state attributes."""
await common.async_turn_on(hass, ENTITY_LIGHT, xy_color=(0.4, 0.4), brightness=25)
state = hass.states.get(ENTITY_LIGHT)
assert light.is_on(hass, ENTITY_LIGHT)
assert (0.4, 0.4) == state.attributes.get(light.ATTR_XY_COLOR)
assert state.attributes.get(light.ATTR_BRIGHTNESS) == 25
assert state.attributes.get(light.ATTR_RGB_COLOR) == (255, 234, 164)
assert state.attributes.get(light.ATTR_EFFECT) == "rainbow"
await common.async_turn_on(
hass, ENTITY_LIGHT, rgb_color=(251, 253, 255), white_value=254
)
state = hass.states.get(ENTITY_LIGHT)
assert state.attributes.get(light.ATTR_WHITE_VALUE) == 254
assert state.attributes.get(light.ATTR_RGB_COLOR) == (250, 252, 255)
assert state.attributes.get(light.ATTR_XY_COLOR) == (0.319, 0.326)
await common.async_turn_on(hass, ENTITY_LIGHT, color_temp=400, effect="none")
state = hass.states.get(ENTITY_LIGHT)
assert state.attributes.get(light.ATTR_COLOR_TEMP) == 400
assert state.attributes.get(light.ATTR_MIN_MIREDS) == 153
assert state.attributes.get(light.ATTR_MAX_MIREDS) == 500
assert state.attributes.get(light.ATTR_EFFECT) == "none"
await common.async_turn_on(hass, ENTITY_LIGHT, kelvin=3000, brightness_pct=50)
state = hass.states.get(ENTITY_LIGHT)
assert state.attributes.get(light.ATTR_COLOR_TEMP) == 333
assert state.attributes.get(light.ATTR_BRIGHTNESS) == 127
async def test_turn_off(hass):
"""Test light turn off method."""
await hass.services.async_call(
"light", "turn_on", {"entity_id": ENTITY_LIGHT}, blocking=True
)
assert light.is_on(hass, ENTITY_LIGHT)
await hass.services.async_call(
"light", "turn_off", {"entity_id": ENTITY_LIGHT}, blocking=True
)
assert not light.is_on(hass, ENTITY_LIGHT)
async def test_turn_off_without_entity_id(hass):
"""Test light turn off all lights."""
await hass.services.async_call(
"light", "turn_on", {"entity_id": "all"}, blocking=True
)
assert light.is_on(hass, ENTITY_LIGHT)
await hass.services.async_call(
"light", "turn_off", {"entity_id": "all"}, blocking=True
)
assert not light.is_on(hass, ENTITY_LIGHT)
|
import { baseApi } from '../../app/api/base';
export const blogApi = baseApi({
entityTypes: ['blogs'],
reducerPath: 'blogs',
resolvers: (builder) => ({})
});
export const {
useLoadBlogsQuery,
useLoadPagingBlogsQuery,
useUpdateBlogsMutation,
useDeleteBlogsMutation,
useCreateBlogsMutation
} = blogApi;
export const {
endpoints: { loadBlogs, loadPagingBlogs, updateBlogs, deleteBlogs, createBlogs }
} = blogApi;
|
#!/usr/bin/env sh
set -e
jsdir="${TMPDIR:-/tmp}"
jsfile="$jsdir"/htmlviewer_searcher.js
echo "temporary file: $jsfile"
cd "$(dirname "$0")"
# extract source
sed '/^const SEARCH_/p
/^ \/\/ Testable searcher/,/^ \/\/ @}/p
d
' ../htmlviewer > "$jsfile"
exports=$(sed -E '/^ ? ?const/!d;s/^ ? ?const ([a-zA-Z0-9_]+).*/module.exports.\1 = \1;/' "$jsfile")
echo "$exports" >> "$jsfile"
# run tests
NODE_PATH="$jsdir":$NODE_PATH ./node_modules/jest/bin/jest.js "$@" searcher.test.js
|
#!/bin/bash
#
# Copyright IBM Corp All Rights Reserved
#
# SPDX-License-Identifier: Apache-2.0
#
# This script defines the main capabilities of this project
declare -A OPNAMES
LINE0='imageget,certgen,netup,netstats,channelcreate,channeljoin,anchorupdate,'
LINE1='profilegen,ccinstall,ccapprove,cccommit,ccinstantiate,discover'
OPNAMES=([up]="$LINE0$LINE1" [netup]='imageget,certgen,netup,netstats' \
[restart]='netdown,netup' [generate]='certrem,certgen' [configmerge]='configmerge' \
[orgjoin]='channelquery,configmerge,channelsign,channelupdate' \
[cleanup]='netdown,filerem' [stats]='netstats' [apprun]='apprun' \
[down]='netdown' [install]='ccinstall' [approve]='ccapprove' \
[instantiate]='ccinstantiate' [initialize]='ccinstantiate' \
[commit]='cccommit' [invoke]='ccinvoke' [create]='channelcreate' \
[query]='ccquery' [join]='channeljoin' [blockquery]='blockquery' \
[channelquery]='channelquery' [profilegen]='profilegen' [caliperrun]='caliperrun' \
[channelsign]='channelsign' [channelupdate]='channelupdate' \
[portainerup]='portainerup' [portainerdown]='portainerdown' \
[anchorupdate]='anchorupdate' [explorerup]='explorerup' [explorerdown]='explorerdown' \
[consoleup]='consoleup' [consoledown]='consoledown' \
[nodeimport]='nodeimport' [discover]='discover' [imageget]='imageget' [update]='update')
# Print the usage message
function printHelp() {
echo "Usage: "
echo " minifab <mode> [options]"
echo " <mode> - one of operations or combination of operations separated by comma"
echo ""
echo " - 'up' - bring up the network and do all default channel and chaincode operations"
echo " - 'netup' - bring up the network only"
echo " - 'down' - tear down the network"
echo " - 'restart' - restart the network"
echo " - 'generate' - generate required certificates and genesis block"
echo " - 'profilegen' - generate channel based profiles"
echo " - 'install' - install chaincode"
echo " - 'approve' - approve chaincode"
echo " - 'instantiate' - instantiate chaincode for fabric release < 2.0"
echo " - 'initialize' - initialize chaincode for fabric release >= 2.0"
echo " - 'commit' - commit chaincode for fabric releases greater or equal to 2.0"
echo " - 'invoke' - run chaincode invoke method"
echo " - 'query' - run chaincode query method"
echo " - 'create' - create application channel"
echo " - 'join' - join all peers currently in the network to a channel"
echo " - 'blockquery' - do channel block query and produce a channel tx json file"
echo " - 'channelquery' - do channel query and produce a channel configuration json file"
echo " - 'channelsign' - do channel config update signoff"
echo " - 'channelupdate' - do channel update with a given new channel configuration json file"
echo " - 'anchorupdate' - do channel update which makes all peer nodes anchors for the all orgs"
echo " - 'nodeimport' - import external node certs and endpoints"
echo " - 'discover' - discover channel endorsement policy"
echo " - 'cleanup' - remove all the nodes and cleanup runtime files"
echo " - 'stats' - list all nodes and status"
echo " - 'explorerup' - start up Hyperledger explorer"
echo " - 'explorerdown' - shutdown Hyperledger explorer"
echo " - 'portainerup' - start up portainer web management"
echo " - 'portainerdown' - shutdown portainer web management"
echo " - 'apprun' - (experimental) run chaincode app if there is any"
echo " - 'caliperrun' - (experimental) run caliper test"
echo " - 'orgjoin' - (experimental) join an org to the current channel"
echo " - 'update' - (experimental) update minifabric to the latest version"
echo ""
echo " options:"
echo " -c|--channel-name - channel name to use (defaults to \"mychannel\")"
echo " -s|--database-type - the database backend to use: goleveldb (default) or couchdb"
echo " -l|--chaincode-language - the language of the chaincode: go (default), node, or java"
echo " -i|--fabric-release - the fabric release to be used to launch the network (defaults to \"2.1\")"
echo " -n|--chaincode-name - chaincode name to be installed/instantiated/approved"
echo " -b|--block-number - block number to be queried"
echo " -v|--chaincode-version - chaincode version to be installed"
echo " -p|--chaincode-parameters - chaincode instantiation and invocation parameters"
echo " -t|--transient-parameters - chaincode instantiation and invocation transient parameters"
echo " -r|--chaincode-private - flag if chaincode processes private data, default is false"
echo " -e|--expose-endpoints - flag if node endpoints should be exposed, default is false"
echo " -o|--organization - organization to be used for org specific operations"
echo " -y|--chaincode-policy - chaincode policy"
echo " -d|--init-required - chaincode initialization flag, default is true"
echo " -f|--run-output - minifabric run time output callback, can be 'minifab'(default), 'default', 'dense'"
echo " -h|--help - print this message"
echo
}
function doDefaults() {
declare -a params=("CHANNEL_NAME" "CC_LANGUAGE" "IMAGETAG" "BLOCK_NUMBER" "CC_VERSION" \
"CC_NAME" "DB_TYPE" "CC_PARAMETERS" "EXPOSE_ENDPOINTS" "CURRENT_ORG" "TRANSIENT_DATA" \
"CC_PRIVATE" "CC_POLICY" "CC_INIT_REQUIRED" "RUN_OUTPUT")
if [ ! -f "./vars/envsettings" ]; then
cp envsettings vars/envsettings
fi
source ./vars/envsettings
for value in ${params[@]}; do
if [ -z ${!value+x} ]; then
tt="$value=$"XX_"$value"
eval "$tt"
fi
done
echo "#!/bin/bash"> ./vars/envsettings
for value in ${params[@]}; do
echo 'declare XX_'$value="'"${!value}"'" >> ./vars/envsettings
done
}
function doOp() {
ansible-playbook -i hosts \
-e "mode=$1" -e "hostroot=$hostroot" -e "CC_LANGUAGE=$CC_LANGUAGE" \
-e "DB_TYPE=$DB_TYPE" -e "CHANNEL_NAME=$CHANNEL_NAME" -e "CC_NAME=$CC_NAME" \
-e "CC_VERSION=$CC_VERSION" -e "CHANNEL_NAME=$CHANNEL_NAME" -e "IMAGETAG=$IMAGETAG" \
-e "CC_PARAMETERS=$CC_PARAMETERS" -e "EXPOSE_ENDPOINTS=$EXPOSE_ENDPOINTS" \
-e "ADDRS=$ADDRS" -e "CURRENT_ORG=$CURRENT_ORG" -e "BLOCK_NUMBER=$BLOCK_NUMBER" \
-e "TRANSIENT_DATA=$TRANSIENT_DATA" -e "CC_PRIVATE=$CC_PRIVATE" \
-e "CC_POLICY=$CC_POLICY" -e "CC_INIT_REQUIRED=$CC_INIT_REQUIRED" fabops.yaml
}
funcparams='optionverify'
function isValidateCMD() {
if [ -z $MODE ] || [[ '-h' == "$MODE" ]] || [[ '--help' == "$MODE" ]]; then
printHelp
exit
fi
readarray -td, cmds < <(printf '%s' "$MODE")
for i in "${cmds[@]}"; do
key=$(echo "${i,,}"|xargs)
if [ ! -z "${OPNAMES[$key]}" ]; then
funcparams="$funcparams","${OPNAMES[$key]}"
else
echo "'"${i}"'"' is a not supported command!'
exit 1
fi
done
if [[ $funcparams == 'optionverify' ]]; then
printHelp
exit
fi
}
function getRealRootDir() {
varpath=$(docker inspect --format '{{ range .Mounts }}{{ if eq .Destination "/home/vars" }}{{ .Source }}{{ end }}{{ end }}' minifab)
hostroot=${varpath%/vars}
hostroot=${hostroot//\\/\/}
}
function startMinifab() {
export ANSIBLE_STDOUT_CALLBACK=$RUN_OUTPUT
time doOp $funcparams
}
|
#!/usr/bin/env bash
#
# Description: carries out walkthrough as described in `pdp` documentation,
# and provides alternative workflows, where primer design locations are filtered.
# Usage: walkthrough.sh
# 1. Clean walkthrough output
OUTDIR=tests/walkthrough
rm ${OUTDIR}/*.json
rm -rf ${OUTDIR}/blastn* ${OUTDIR}/classify* ${OUTDIR}/deduped* ${OUTDIR}/eprimer3* ${OUTDIR}/primersearch* ${OUTDIR}/prodigal ${OUTDIR}/prodigaligr ${OUTDIR}/extract* ${OUTDIR}/*.json
# 2. Standard workflow (no filtering)
# Validate config file
pdp config --validate ${OUTDIR}/pectoconf.tab
# Fix input sequences
pdp config --fix_sequences ${OUTDIR}/fixed.json \
--outdir ${OUTDIR}/config \
${OUTDIR}/pectoconf.tab
# Design primers
pdp eprimer3 -f \
--outdir ${OUTDIR}/eprimer3 \
${OUTDIR}/fixed.json \
${OUTDIR}/with_primers.json
# Remove redundant primers
pdp dedupe -f \
--dedupedir ${OUTDIR}/deduped \
${OUTDIR}/with_primers.json \
${OUTDIR}/deduped_primers.json
# Screen deduped primers against BLAST db
pdp blastscreen -f \
--db ${OUTDIR}/blastdb/e_coli_screen.fna \
--outdir ${OUTDIR}/blastn \
${OUTDIR}/deduped_primers.json \
${OUTDIR}/screened.json
# Cross-hybridise primers against input genomes
pdp primersearch -f \
--outdir ${OUTDIR}/primersearch \
${OUTDIR}/screened.json \
${OUTDIR}/primersearch.json
# Classify primers
pdp classify -f \
${OUTDIR}/primersearch.json \
${OUTDIR}/classify
# Extract amplicons
pdp extract -f \
${OUTDIR}/primersearch.json \
${OUTDIR}/classify/atrosepticum_NCBI_primers.json \
${OUTDIR}/extract
# Display results
cat ${OUTDIR}/classify/summary.tab
printf "\n"
cat ${OUTDIR}/extract/atrosepticum_NCBI_primers/distances_summary.tab
printf "\n"
# 3. Only use primers that are located in CDS regions
# Here, we can start from the fixed.json file created above
pdp filter -f \
--prodigal \
--outdir ${OUTDIR}/prodigal \
${OUTDIR}/fixed.json \
${OUTDIR}/filter_prodigal.json
pdp eprimer3 -f \
--filter \
--outdir ${OUTDIR}/eprimer3 \
${OUTDIR}/filter_prodigal.json \
${OUTDIR}/with_cds_primers.json
pdp dedupe -f \
--dedupedir ${OUTDIR}/deduped_cds \
${OUTDIR}/with_cds_primers.json \
${OUTDIR}/deduped_cds_primers.json
pdp blastscreen -f \
--db ${OUTDIR}/blastdb/e_coli_screen.fna \
--outdir ${OUTDIR}/blastn_cds \
${OUTDIR}/deduped_cds_primers.json \
${OUTDIR}/screened_cds.json
pdp primersearch -f \
--outdir ${OUTDIR}/primersearch_cds \
${OUTDIR}/screened_cds.json \
${OUTDIR}/primersearch_cds.json
pdp classify -f \
${OUTDIR}/primersearch_cds.json \
${OUTDIR}/classify_cds
pdp extract -f \
${OUTDIR}/primersearch_cds.json \
${OUTDIR}/classify_cds/atrosepticum_NCBI_primers.json \
${OUTDIR}/extract_cds
cat ${OUTDIR}/classify_cds/summary.tab
printf "\n"
cat ${OUTDIR}/extract_cds/atrosepticum_NCBI_primers/distances_summary.tab
printf "\n"
# 4. Only use primers that are located in intergenic regions
# Again, we can start from the fixed.json file created above
pdp filter -f \
--prodigaligr \
--outdir ${OUTDIR}/prodigaligr \
${OUTDIR}/fixed.json \
${OUTDIR}/filter_prodigaligr.json
pdp eprimer3 -f \
--filter \
--outdir ${OUTDIR}/eprimer3 \
${OUTDIR}/filter_prodigaligr.json \
${OUTDIR}/with_igr_primers.json
pdp dedupe -f \
--dedupedir ${OUTDIR}/deduped_igr \
${OUTDIR}/with_igr_primers.json \
${OUTDIR}/deduped_igr_primers.json
pdp blastscreen -f \
--db ${OUTDIR}/blastdb/e_coli_screen.fna \
--outdir ${OUTDIR}/blastn_igr \
${OUTDIR}/deduped_igr_primers.json \
${OUTDIR}/screened_igr.json
pdp primersearch -f \
--outdir ${OUTDIR}/primersearch_igr \
${OUTDIR}/screened_igr.json \
${OUTDIR}/primersearch_igr.json
pdp classify -f \
${OUTDIR}/primersearch_igr.json \
${OUTDIR}/classify_igr
pdp extract -f \
${OUTDIR}/primersearch_igr.json \
${OUTDIR}/classify_igr/atrosepticum_NCBI_primers.json \
${OUTDIR}/extract_igr
cat ${OUTDIR}/classify_igr/summary.tab
printf "\n"
cat ${OUTDIR}/extract_cds/atrosepticum_NCBI_primers/distances_summary.tab
printf "\n"
|
import React, { Component } from 'react';
export class ModalContact extends Component {
state = {
name: '',
email: '',
message: '',
formEmailSent: false,
loading: false
};
onChange = (e) => {
// e.persist();
this.setState({ [e.target.name]: e.target.value });
};
onSubmit = (e) => {
e.preventDefault();
const { name, email, message } = this.state;
if (email.trim === '' || message.trim() === '') return false;
this.setState({ loading: true });
const { REACT_APP_EMAILJS_RECEIVER: receiverEmail, REACT_APP_EMAILJS_TEMPLATEID: template } = process.env;
this.sendMail(template, name, email, receiverEmail, message);
};
sendMail(templateId, name, email, receiverEmail, message) {
// console.log(name, email, message);
window.emailjs
.send('mailjet', templateId, {
name,
email,
receiverEmail,
message
})
.then((res) => {
this.setState({ formEmailSent: true, loading: false });
})
.catch((err) => console.error('Failed to send message. Error: ', err));
}
closeHandler = (e) => {
this.setState({
name: '',
email: '',
message: '',
formEmailSent: false,
loading: false
});
this.props.handleClose(e);
};
render() {
const { show } = this.props;
const showClass = show ? 'active' : '';
const buttonClass = this.state.loading ? 'btn contact__submit loading' : 'btn contact__submit';
const sendButton = this.state.formEmailSent ? (
<button
className={buttonClass + ' disabled'}
type="submit"
disabled
style={{ color: 'green', fontSize: '1.5rem' }}
>
✔
</button>
) : (
<button className={buttonClass} type="submit">
Send
</button>
);
return (
<div className={`modal ${showClass}`} id="modal-id">
<a className="modal-overlay" aria-label="Close" onClick={this.closeHandler} />
<div className="modal-container">
<div className="modal-body">
<div className="content">
<p style={{ textAlign: 'center' }}>
You can send an email to <strong style={{ fontWeight: 'bold' }}><EMAIL> </strong>or
contact with form below
</p>
<form method="POST" onSubmit={this.onSubmit} className="contact__form" style={{ paddingTop: '10px' }}>
<input
type="text"
name="name"
placeholder="Your name"
value={this.state.name}
onChange={this.onChange}
autoFocus
/>
<input
type="email"
name="email"
placeholder="Your email"
required
value={this.state.email}
onChange={this.onChange}
/>
<textarea
name="message"
placeholder="Your message"
required
value={this.state.message}
onChange={this.onChange}
rows={5}
/>
{sendButton}
</form>
</div>
</div>
<button
name="close"
className="modal__close"
style={{ '--text-color': '#ffffff' }}
onClick={this.closeHandler}
>
✖
</button>
</div>
<style jsx>
{`
modal {
position: fixed;
top: 0;
left: 0;
width: 100%;
height: 100%;
background: rgba(0, 0, 0, 0.6);
z-index: 50;
}
.modal-main {
position: fixed;
background: #ebf2ff;
width: 80%;
height: auto;
top: 50%;
left: 50%;
transform: translate(-50%, -50%);
display: grid;
grid-template-rows: auto 1fr;
max-height: 80vh;
z-index: 69;
padding: 15px;
}
.modal__close {
position: absolute;
right: 5px;
top: 5px;
border: none;
background: none;
cursor: pointer;
}
.modal-container {
position: relative;
}
.display-block {
display: block;
}
.display-none {
display: none;
}
.contact__form {
display: grid;
grid-gap: 6px;
}
.contact__submit {
cursor: pointer;
outline: none;
height: 2rem;
font-weight: bolder;
border-radius: 6px;
background: #fdfeef;
background: -webkit-gradient(linear, left top, left bottom, from(#fdfeef), to(#b1b2a7));
background: -moz-linear-gradient(top, #fdfeef, #b1b2a7);
background: linear-gradient(to bottom, #fdfeef, #b1b2a7);
text-shadow: #ffffff 1px 1px 1px;
}
@media screen and (min-width: 768px) {
.modal-main {
width: 40%;
padding: 30px;
}
}
@media screen and (min-width: 1024px) {
.modal-main {
width: 20%;
padding: 30px;
}
}
`}
</style>
</div>
);
}
}
export default ModalContact;
|
<filename>_/Foodfact/grunt-foodfact-video-5.4/tasks/foodfact.js
var _ = require('lodash');
var path = require('path');
var async = require('async');
var download = require('../lib/download.js');
var parse = require('../lib/parse.js');
module.exports = function(grunt) {
grunt.registerMultiTask('foodfact', 'Load the foodfact database', function() {
var stack = [];
var done = this.async();
var options = this.options({
download : true,
delimiter : 't'
});
var urls = this.data.urls || options.urls;
//extract destination dir and create it
var prepareDestDir = function prepareDestDir(destinationFile){
var destDir = path.dirname(destinationFile);
if (!grunt.file.exists(destDir)) {
grunt.file.mkdir(destDir);
}
return destDir;
};
//run the file parsing/convertion
var convert = function convert(source, destination, cb){
grunt.verbose.writeln('Convert %s to %s', source, destination);
grunt.event.emit('convert.foodfact', source, destination);
parse(source, destination, { delimiter : options.delimiter }, function(err){
if(err){
return cb(err);
}
grunt.event.emit('converted.foodfact', source, destination);
cb();
});
};
if(options.download){
//download then the files are not expanded
stack = _.map(this.data.files, function(filePattern, dest) {
var destDir = prepareDestDir(dest);
return function (cb){
download(urls, destDir, function(err){
if(err){
return cb(err);
}
grunt.file.expand(filePattern).forEach(function(source){
convert(source, dest, cb);
});
});
};
});
} else {
stack = this.files.map(function(file){
prepareDestDir(file.dest);
return function (cb){
file.src.forEach(function(source){
convert(source, file.dest, cb);
});
};
});
}
//run the stack function in parrallel
async.parallel(stack, function(err){
if(err){
return done(err);
}
grunt.log.ok('%d files converted', stack.length);
done();
});
});
};
|
class FractalTree:
"""
This class defines a Fractal Tree object.
"""
def __init__(self, curvelength, width, angle):
"""
Arguments:
curvelength (float): This argument will define the current length of the branches of the tree.
width (float): This argument will define the width of the branches of the tree.
angle (float): This argument will define the angle of the tree.
"""
self.curvelength = curvelength
self.width = width
self.angle = angle
def draw(self):
"""
This method will draw the fractal tree.
""" |
<filename>src/vuejsclient/login/AccessPolicy/recover/AccessPolicyRecoverComponent.ts
import { Component } from "vue-property-decorator";
import ModuleAccessPolicy from '../../../../shared/modules/AccessPolicy/ModuleAccessPolicy';
import ModuleParams from '../../../../shared/modules/Params/ModuleParams';
import ModuleSASSSkinConfigurator from '../../../../shared/modules/SASSSkinConfigurator/ModuleSASSSkinConfigurator';
import ModuleSendInBlue from '../../../../shared/modules/SendInBlue/ModuleSendInBlue';
import VueComponentBase from '../../../ts/components/VueComponentBase';
import './AccessPolicyRecoverComponent.scss';
@Component({
template: require('./AccessPolicyRecoverComponent.pug')
})
export default class AccessPolicyRecoverComponent extends VueComponentBase {
private email: string = "";
private message: string = null;
private logo_url: string = null;
private redirect_to: string = "/";
private has_sms_activation: boolean = false;
private async mounted() {
await this.load_logo_url();
let logged_id: number = await ModuleAccessPolicy.getInstance().getLoggedUserId();
if (!!logged_id) {
window.location = this.redirect_to as any;
}
for (let j in this.$route.query) {
switch (j) {
case 'email':
this.email = this.$route.query[j];
break;
}
}
this.has_sms_activation =
await ModuleParams.getInstance().getParamValueAsBoolean(ModuleSendInBlue.PARAM_NAME_SMS_ACTIVATION) &&
await ModuleParams.getInstance().getParamValueAsBoolean(ModuleAccessPolicy.PARAM_NAME_CAN_RECOVER_PWD_BY_SMS);
}
private async load_logo_url() {
this.logo_url = await ModuleParams.getInstance().getParamValue(ModuleSASSSkinConfigurator.MODULE_NAME + '.logo_url');
if (this.logo_url && (this.logo_url != '""') && (this.logo_url != '')) {
return;
}
this.logo_url = null;
}
private async recover() {
let self = this;
self.snotify.async(self.label('recover.start'), () =>
new Promise(async (resolve, reject) => {
if (await ModuleAccessPolicy.getInstance().beginRecover(self.email)) {
if (self.has_sms_activation) {
self.message = self.label('login.recover.answercansms');
} else {
self.message = self.label('login.recover.answer');
}
resolve({
body: self.label('recover.ok'),
config: {
timeout: 10000,
showProgressBar: true,
closeOnClick: false,
pauseOnHover: true,
},
});
} else {
reject({
body: self.label('recover.failed'),
config: {
timeout: 10000,
showProgressBar: true,
closeOnClick: false,
pauseOnHover: true,
},
});
}
})
);
}
private async recoversms() {
let self = this;
self.snotify.async(self.label('recover.start'), () =>
new Promise(async (resolve, reject) => {
if (await ModuleAccessPolicy.getInstance().beginRecoverSMS(this.email)) {
this.message = this.label('login.recover.answersms');
resolve({
body: self.label('recover.oksms'),
config: {
timeout: 10000,
showProgressBar: true,
closeOnClick: false,
pauseOnHover: true,
},
});
} else {
reject({
body: self.label('recover.failed'),
config: {
timeout: 10000,
showProgressBar: true,
closeOnClick: false,
pauseOnHover: true,
},
});
}
})
);
}
} |
#!/bin/bash
## strict check
set -euo pipefail
DOCDIR='/Users/mtang/Dropbox (Partners HealthCare)/github_repos/CIDC-bioinformatics-computation-wiki'
#### DANGER ####
## MAKE SURE TO HAVE VALID PATH HERE AS SCRIPT WILL NOT CHECK FOR PATH
## rsync may overwrite or worse, delete files on remote node.
if [[ ! -d "$DOCDIR" || ! -x "$DOCDIR" ]]; then
echo -e "\nERROR: DOCDIR does not exists or not accesible at $DOCDIR\n" >&2
exit 1
fi
## build docs
cd "$DOCDIR" && \
mkdocs build --clean && echo -e "\nINFO: Built updated docs\n" && \
mkdocs gh-deploy --clean -m "published using commit: {sha} and mkdocs {version}"
## Build hook
## curl -X POST -d {} https://api.netlify.com/build_hooks/***
## END ## |
#import <Foundation/Foundation.h>
int main() {
@autoreleasepool {
NSString *string1 = @"Hello";
NSString *string2 = @"World";
NSString *concatenatedString = [NSString stringWithFormat:@"%@ %@", string1, string2];
NSLog(@"%@", concatenatedString);
}
return 0;
} |
<reponame>Daniel201618/git_learning
package com.cwl.service.part_1;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.TimeUnit;
/**
* @author cwl
* @description: TODO
* @date 2019/12/1717:37
*/
public class ThreadJoin {
public static void main(String[] args) throws InterruptedException {
Thread thread1 = create(1);
Thread thread2 = create(1);
List<Thread> list = new ArrayList<>();
list.add(thread1);
list.add(thread2);
for (Thread thread : list) {
thread.start();
}
//for (Thread thread : list) {
// thread.join();
//}
thread1.join();
for (int i = 0; i < 10; i++) {
System.out.println(Thread.currentThread().getName() + "#" + i);
shortSleep();
}
}
private static Thread create(int seq) {
return new Thread(() -> {
for (int i = 0; i < 10; i++) {
System.out.println(Thread.currentThread().getName() + "#" + i);
shortSleep();
}
});
}
private static void shortSleep() {
try {
TimeUnit.MILLISECONDS.sleep(100);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
}
|
def find_most_common(list_of_strings)
count = Hash.new(0)
list_of_strings.each { |str| count[str] += 1 }
count.max_by { |k, v| v }&.first
end
list_of_strings = ['foo', 'bar', 'foo', 'baz', 'foo', 'qux']
most_common_string = find_most_common(list_of_strings)
puts most_common_string #Output: "foo" |
<reponame>bbc/hive_mind<filename>spec/controllers/api/devices_controller_spec.rb
require 'rails_helper'
require 'timecop'
RSpec.describe Api::DevicesController, type: :controller do
let(:valid_attributes) {
{
name: 'Device 1'
}
}
let(:valid_attributes_with_id) {
{
name: 'Device 1',
id: 987
}
}
let(:device_with_mac1) {
{
name: 'Device 1',
macs: [ 'aa:bb:cc:dd:ee:01' ]
}
}
let(:device_with_mac2) {
{
name: 'Device 2',
macs: [ 'aa:bb:cc:dd:ee:02' ]
}
}
let(:valid_session) { {} }
describe 'POST #register' do
it 'registers a new device without unique identifier' do
expect {
post :register, {device: valid_attributes}, valid_session
}.to change(Device, :count).by(1)
end
it 'does not reregister a known device' do
device = Device.create! valid_attributes
expect {
post :register, {device: valid_attributes.merge(id: device.id)}, valid_session
}.to change(Device, :count).by(0)
end
it 'does not register an unknown device only by id' do
expect {
post :register, {device: { id: 99 }}, valid_session
}.to change(Device, :count).by(0)
end
it 'registers two devices with different MACs' do
post :register, {device: device_with_mac1}, valid_session
expect {
post :register, {device: device_with_mac2}, valid_session
}.to change(Device, :count).by(1)
end
it 'identifies device by MAC' do
post :register, {device: device_with_mac1}, valid_session
expect {
post :register, {device: device_with_mac1}, valid_session
}.to change(Device, :count).by(0)
end
it 'does not create an empty MAC for an empty string' do
post :register, {device: { macs: [ '' ] } }, valid_session
expect(Device.last.macs.length).to be 0
end
it 'does not create an empty MAC for nil value' do
post :register, {device: { macs: [ nil ] } }, valid_session
expect(Device.last.macs.length).to be 0
end
it 'does not create an empty MAC for missing macs array' do
post :register, {device: { name: 'Device' } }, valid_session
expect(Device.last.macs.length).to be 0
end
it 'sets the heartbeat' do
post :register, {device: valid_attributes}, valid_session
Timecop.freeze(Time.now + 30) do
expect(Device.last.seconds_since_heartbeat).to eq 30
end
end
it 'registers a devices with an unknown id' do
expect {
post :register, {device: valid_attributes_with_id}, valid_session
}.to change(Device, :count).by(1)
end
context 'unknown device type' do
let(:unknown_device_type) {
{
device_type: :unknown,
name: 'Unknown device'
}
}
it 'registeres the unknown device' do
expect {
post :register, {device: unknown_device_type}, valid_session
}.to change(Device, :count).by(1)
end
it 'sets the device type as nil' do
post :register, {device: unknown_device_type}, valid_session
expect(Device.last.plugin_type).to be_nil
end
it 'sets the device data id as nil' do
post :register, {device: unknown_device_type}, valid_session
expect(Device.last.plugin_id).to be_nil
end
end
context 'known device type' do
let(:known_device_type) {
{
model: :test_model,
brand: :test_brand,
device_type: :generic,
name: 'Known device',
extra_data_one: 'Data one',
extra_data_two: 2
}
}
let(:device_without_name) {
{
model: :test_model,
brand: :test_brand,
device_type: :generic
}
}
let(:device_with_name) {
{
model: :test_model,
brand: :test_brand,
device_type: :generic,
name: 'User defined device name'
}
}
let(:device_with_os) {
{
model: :test_model,
brand: :test_brand,
operating_system_name: 'Test OS',
operating_system_version: '1.2.3'
}
}
it 'registeres the known device' do
expect {
post :register, {device: known_device_type}, valid_session
}.to change(Device, :count).by(1)
end
it 'sets the device type' do
post :register, {device: known_device_type}, valid_session
expect(Device.last.device_type).to eq 'generic'
end
it 'passes through attributes' do
post :register, {device: known_device_type}, valid_session
plugin = Device.last.plugin
expect(plugin.details['extra_data_one']).to eq 'Data one'
expect(plugin.details['extra_data_two']).to eq '2'
expect(plugin.details['extra_data_three']).to be_nil
end
it 'generates a name from engine' do
post :register, {device: device_without_name}, valid_session
expect(Device.last.name).to_not be_nil
end
it 'overrides name set by engine' do
post :register, {device: device_with_name}, valid_session
expect(Device.last.name).to eq 'User defined device name'
end
it 'sets the operating system' do
post :register, {device: device_with_os}, valid_session
expect(Device.last.operating_system.name).to eq 'Test OS'
expect(Device.last.operating_system.version).to eq '1.2.3'
end
context 'existing device' do
let(:device_instance) { Device.new }
before(:each) {
device_instance.set_os(name: 'Old OS', version: '1.2.3')
}
it 'updates the operating system' do
post :register, {device: { id: device_instance.id, operating_system_name: 'New OS', operating_system_version: '2.4.6' } }
device_instance.reload
expect(device_instance.operating_system.name).to eq 'New OS'
expect(device_instance.operating_system.version).to eq '2.4.6'
end
it 'does not modify the operating system' do
post :register, {device: { id: device_instance.id } }
device_instance.reload
expect(device_instance.operating_system.name).to eq 'Old OS'
expect(device_instance.operating_system.version).to eq '1.2.3'
end
end
end
context 'device models, types and brands' do
let(:brand1_model1) {
{
name: 'Device 1',
model: 'Model 1',
brand: 'Brand 1',
}
}
let(:brand1_model2) {
{
name: 'Device 2',
model: 'Model 2',
brand: 'Brand 1',
}
}
let(:brand2_model1) {
{
name: 'Device 3',
model: 'Model 1',
brand: 'Brand 2',
}
}
let(:brand2_model2) {
{
name: 'Device 4',
model: 'Model 2',
brand: 'Brand 2',
}
}
let(:brand1_model1_type1) {
{
name: 'Device 5',
model: 'Model 1',
brand: 'Brand 1',
device_type: 'Type 1',
}
}
let(:brand1_model1_type2) {
{
name: 'Device 6',
model: 'Model 1',
brand: 'Brand 1',
device_type: 'Type 2',
}
}
it 'creates a new model' do
expect {
post :register, {device: brand1_model1}, valid_session
}.to change(Model, :count).by(1)
end
it 'creates a new brand' do
expect {
post :register, {device: brand1_model1}, valid_session
}.to change(Brand, :count).by(1)
end
it 'creates a new model for an existing brand' do
post :register, {device: brand1_model1}, valid_session
expect {
post :register, {device: brand1_model2}, valid_session
}.to change(Model, :count).by(1)
end
it 'creates a new brand with an existing model name' do
post :register, {device: brand1_model1}, valid_session
expect {
post :register, {device: brand2_model1}, valid_session
}.to change(Brand, :count).by(1)
end
it 'it does not recreate a known model' do
post :register, {device: brand1_model1}, valid_session
expect {
post :register, {device: brand1_model1}, valid_session
}.to change(Model, :count).by(0)
end
it 'it does not recreate a known brand for a known model' do
post :register, {device: brand1_model1}, valid_session
expect {
post :register, {device: brand1_model1}, valid_session
}.to change(Brand, :count).by(0)
end
it 'it does not recreate a known brand for a new model' do
post :register, {device: brand1_model1}, valid_session
expect {
post :register, {device: brand1_model2}, valid_session
}.to change(Brand, :count).by(0)
end
it 'creates a new type' do
expect {
post :register, {device: brand1_model1_type1}, valid_session
}.to change(DeviceType, :count).by(1)
end
it 'does not recreate an existing type' do
post :register, {device: brand1_model1_type1}, valid_session
expect {
post :register, {device: brand1_model1_type1}, valid_session
}.to change(DeviceType, :count).by(0)
end
it 'creates a new type for an existing model and brand' do
post :register, {device: brand1_model1_type1}, valid_session
expect {
post :register, {device: brand1_model1_type2}, valid_session
}.to change(DeviceType, :count).by(1)
end
it 'creates a new model for an existing model/brand with a new type' do
post :register, {device: brand1_model1_type1}, valid_session
expect {
post :register, {device: brand1_model1_type2}, valid_session
}.to change(Model, :count).by(1)
end
end
context 'plugin with device identifier method' do
let(:device) {
{
name: 'Device',
model: 'Model',
brand: 'Brand',
device_type: 'Controllermockone',
}
}
module HiveMindControllermockone
class Plugin < HiveMindGeneric::Plugin
def self.identify_existing options = {}
if identifier = HiveMindGeneric::Characteristic.find_by(key: 'id_key', value: options[:id_key])
identifier.plugin.device
else
nil
end
end
def self.plugin_params params
params.permit(:id_key)
end
end
end
it 'identifies device based on plugin identifier method' do
post :register, {device: device.merge(
id_key: '12468',
macs: ['aa:aa:aa:aa:aa:01']
)}, valid_session
expect {
post :register, {device: device.merge(
id_key: '12468',
macs: ['aa:aa:aa:aa:aa:02']
)}, valid_session
}.to change(Device, :count).by(0)
end
it 'creates new device with different unique identifier' do
post :register, {device: device.merge(
id_key: '12468',
macs: ['aa:aa:aa:aa:aa:01']
)}, valid_session
expect {
post :register, {device: device.merge(
id_key: '12469',
macs: ['aa:aa:aa:aa:aa:02']
)}, valid_session
}.to change(Device, :count).by(1)
expect {
post :register, {device: device.merge(
id_key: '12470',
macs: ['aa:aa:aa:aa:aa:01']
)}, valid_session
}.to change(Device, :count).by(1)
end
end
context 'plugin with existing device' do
let(:device_info) {
{
name: 'Device',
device_type: 'Controllermockupdating',
macs: [ 'aa:bb:cc:dd:ee:01' ]
}
}
let(:device_no_plugin_info) {
{
name: 'Device (no plugin)',
macs: [ 'aa:bb:cc:dd:ee:02' ]
}
}
module HiveMindControllermockupdating
class Plugin < HiveMindGeneric::Plugin
def update(*args)
@@indicator = 'updated'
end
def self.plugin_params args
{}
end
def self.indicator
@@indicator
end
end
end
let(:device_type) { DeviceType.new(classification: 'Controllermockupdating') }
let(:model) { Model.new(device_type: device_type) }
let(:device) {
Device.create(
name: 'Device',
model: model,
macs: [ Mac.new(mac:'aa:bb:cc:dd:ee:01')],
plugin: HiveMindControllermockupdating::Plugin.new,
plugin_type: 'HiveMindControllermockupdating::Plugin'
)
}
let(:device_no_plugin) {
Device.create(
name: 'Device (no plugin)',
macs: [ Mac.new(mac:'aa:bb:cc:dd:ee:02')],
)
}
it 'calls the plugin update method' do
post :register, {device: device_info.merge( device_id: device.id )}
expect(HiveMindControllermockupdating::Plugin.indicator).to eq 'updated'
end
it 'updates a devices with no plugin set' do
post :register, {device: device_no_plugin_info.merge( device_id: device_no_plugin.id )}
expect(response).to have_http_status(:accepted)
end
end
end
describe 'PUT #poll' do
let(:brand) { Brand.create(name: 'Test brand') }
let(:model) { Model.create(name: 'Test model', brand: brand) }
let(:device) { Device.create(name: 'Test device', model: model) }
let(:device2) { Device.create(name: 'Test device 2') }
let(:device3) { Device.create(name: 'Test device 3') }
let(:reporting_device) { Device.create(name: 'Reporting device') }
it 'adds a heartbeat for a known device' do
expect {
put :poll, { poll: { id: device.id } }
}.to change(Heartbeat, :count).by(1)
end
it 'sets the heartbeat for the correct device' do
put :poll, { poll: { id: device.id } }
expect(Heartbeat.last.device).to eq device
end
it 'sets the reporting device for a self reporting device' do
put :poll, { poll: { id: device.id } }
expect(Heartbeat.last.reporting_device).to eq device
end
it 'sets the device of a hearbeat when reported by a different device' do
put :poll, { poll: { devices: [ device.id ], id: reporting_device.id } }
expect(Heartbeat.last.device).to eq device
end
it 'sets the reporting device as different from the device' do
put :poll, { poll: { devices: [ device.id ], id: reporting_device.id } }
expect(Heartbeat.last.reporting_device).to eq reporting_device
end
it 'polls multiple devices' do
expect {
put :poll, { poll: { devices: [ device.id, device2.id, device3.id ], id: reporting_device.id } }
}.to change(Heartbeat, :count).by(3)
expect(Heartbeat.last(3).map{ |h| h.device }).to match [ device, device2, device3 ]
end
it 'fails to set a heartbeat for an unknown device' do
expect {
put :poll, { poll: { id: -1 } }
}.to change(Heartbeat, :count).by(0)
end
it 'report an unknown device correctly' do
put :poll, { poll: { id: -1 } }
expect(response).to have_http_status(:not_found)
end
it 'report an unknown reporting device correctly' do
put :poll, { poll: { devices: [ device.id ], id: -1 } }
expect(response).to have_http_status(:not_found)
end
context 'with actions' do
let(:valid_options) {
{
device_id: device.id,
action_type: 'redirect',
body: 'http://test_url.com'
}
}
it 'returns an action with the poll response' do
put :action, { device_action: valid_options }
put :poll, { poll: { id: device.id, poll_type: 'active' } }
expect(assigns(:device_action)).to eq DeviceAction.last
end
it 'returns an action with the poll response (default poll type)' do
put :action, { device_action: valid_options }
put :poll, { poll: { id: device.id } }
expect(assigns(:device_action)).to eq DeviceAction.last
end
it 'does not return an action for a passive poll' do
put :action, { device_action: valid_options }
put :poll, { poll: { id: device.id, poll_type: 'passive' } }
expect(assigns(:device_action)).to be_nil
end
context 'polled by another device' do
it 'returns an action with the poll response' do
put :action, { device_action: valid_options }
put :poll, { poll: { id: device2.id, devices: [ device.id], poll_type: 'active' } }
expect(assigns(:device_actions)[device.id]).to eq DeviceAction.last
end
it 'returns an action with the poll response (default poll type)' do
put :action, { device_action: valid_options }
put :poll, { poll: { id: device2.id, devices: [ device.id] } }
expect(assigns(:device_actions)[device.id]).to eq DeviceAction.last
end
it 'does not return an action for a passive poll' do
put :action, { device_action: valid_options }
put :poll, { poll: { id: device2.id, devices: [ device.id ], poll_type: 'passive' } }
expect(assigns(:device_actions)[device.id]).to be_nil
end
end
end
end
describe 'PUT #action' do
let(:device) { Device.create(name: 'Test device') }
let(:device2) { Device.create(name: 'Test device 2') }
let(:valid_options) {
{
device_id: device.id,
action_type: 'redirect',
body: 'http://test_url.com'
}
}
let(:missing_device_id) {
{
action_type: 'redirect',
body: 'http://test_url.com'
}
}
let(:missing_type) {
{
device_id: device.id,
body: 'http://test_url.com'
}
}
let(:missing_body) {
{
device_id: device.id,
action_type: 'redirect',
}
}
let(:valid_options_2) {
{
device_id: device2.id,
action_type: 'redirect',
body: 'http://test_url.com'
}
}
let(:valid_options_3) {
{
device_id: device.id,
action_type: 'display',
body: 'http://test_url.com'
}
}
let(:valid_options_4) {
{
device_id: device.id,
action_type: 'redirect',
body: 'http://test_url_2.com'
}
}
it 'adds an action' do
expect {
put :action, { device_action: valid_options }
}.to change(DeviceAction, :count).by 1
end
it 'adds an action for the given device' do
put :action, { device_action: valid_options }
expect(DeviceAction.last.device).to eq device
end
it 'sets the action type and body' do
put :action, { device_action: valid_options }
expect(DeviceAction.last.action_type).to eq 'redirect'
expect(DeviceAction.last.body).to eq 'http://test_url.com'
end
it 'sets the executed time to nil' do
put :action, { device_action: valid_options }
expect(DeviceAction.last.executed_at).to be_nil
end
it 'adds a second action for a different device' do
put :action, { device_action: valid_options }
expect {
put :action, { device_action: valid_options_2 }
}.to change(DeviceAction, :count).by 1
end
it 'adds a second action of a different type action' do
put :action, { device_action: valid_options }
expect {
put :action, { device_action: valid_options_3 }
}.to change(DeviceAction, :count).by 1
end
it 'adds a second action with a different body' do
put :action, { device_action: valid_options }
expect {
put :action, { device_action: valid_options_4 }
}.to change(DeviceAction, :count).by 1
end
it 'does not duplicate an action before it is executed' do
put :action, { device_action: valid_options }
expect {
put :action, { device_action: valid_options }
}.to change(DeviceAction, :count).by 0
expect(response).to have_http_status(:already_reported)
end
it 'allows a duplicate action after it has been executed' do
put :action, { device_action: valid_options }
put :poll, { poll: { id: device.id } }
expect {
put :action, { device_action: valid_options }
}.to change(DeviceAction, :count).by 1
end
it 'does not duplicate a retried action before it is executed' do
put :action, { device_action: valid_options }
put :poll, { poll: { id: device.id } }
put :action, { device_action: valid_options }
expect {
put :action, { device_action: valid_options }
}.to change(DeviceAction, :count).by 0
expect(response).to have_http_status(:already_reported)
end
it 'does not add an action with a missing device id' do
expect {
put :action, { device_action: missing_device_id }
}.to change(DeviceAction, :count).by 0
expect(response).to have_http_status(:unprocessable_entity)
end
it 'does not add an action with a missing type' do
expect {
put :action, { device_action: missing_type }
}.to change(DeviceAction, :count).by 0
expect(response).to have_http_status(:unprocessable_entity)
end
it 'does not add an action with a missing body' do
expect {
put :action, { device_action: missing_body }
}.to change(DeviceAction, :count).by 0
expect(response).to have_http_status(:unprocessable_entity)
end
end
describe 'PUT #hive_queues' do
let(:device) { Device.create(name: 'Test device') }
let(:hive_queue_1) { HiveQueue.create(name: 'queue_one', description: 'First test queue') }
let(:hive_queue_2) { HiveQueue.create(name: 'queue_two', description: 'Second test queue') }
let(:device_with_queue) { Device.create(name: 'Test device', hive_queues: [hive_queue_1]) }
it 'set a single hive queue for a device' do
put :hive_queues, {
device_id: device.id,
hive_queues: [
hive_queue_1.name
]
}
device.reload
expect(device.hive_queues.length).to be 1
expect(device.hive_queues[0]).to eq hive_queue_1
end
it 'removes a single hive queue from a device' do
put :hive_queues, {
device_id: device_with_queue.id,
hive_queues: []
}
device_with_queue.reload
expect(device_with_queue.hive_queues.length).to be 0
end
it 'removes hive queues from a device if nil' do
put :hive_queues, {
device_id: device_with_queue.id
}
device_with_queue.reload
expect(device_with_queue.hive_queues.length).to be 0
end
it 'change hive queue for a device' do
put :hive_queues, {
device_id: device_with_queue.id,
hive_queues: [
hive_queue_2.name
]
}
device_with_queue.reload
expect(device_with_queue.hive_queues.length).to be 1
expect(device_with_queue.hive_queues[0]).to eq hive_queue_2
end
it 'sets two hive queues for a device' do
put :hive_queues, {
device_id: device.id,
hive_queues: [
hive_queue_1.name,
hive_queue_2.name
]
}
device.reload
expect(device.hive_queues.length).to be 2
expect(device.hive_queues.map{ |q| q.name }).to match_array([hive_queue_1.name, hive_queue_2.name])
end
it 'adds an unknown queue to a device' do
expect {
put :hive_queues, {
device_id: device.id,
hive_queues: [
'unknown queue'
]
}
}.to change(HiveQueue, :count).by 1
device.reload
expect(device.hive_queues.length).to be 1
expect(device.hive_queues[0].name).to eq 'unknown queue'
end
it 'silently ignores a nil queue name' do
expect {
put :hive_queues, {
device_id: device.id,
hive_queues: [
nil
]
}
}.to change(HiveQueue, :count).by 0
device.reload
expect(device.hive_queues.length).to be 0
end
it 'silently ignores an empty queue name' do
expect {
put :hive_queues, {
device_id: device.id,
hive_queues: [
''
]
}
}.to change(HiveQueue, :count).by 0
device.reload
expect(device.hive_queues.length).to be 0
end
end
describe 'PUT #device_state' do
let(:device) { Device.create(name: 'Test device') }
let(:device2) { Device.create(name: 'Test device 2') }
['debug', 'info', 'warn', 'error', 'fatal'].each do |status|
context "status is '#{status}'" do
it 'adds an log message' do
expect {
put :update_state, {
device_state: {
device_id: device.id,
component: 'Test component',
state: status,
message: 'Test message'
}
}
}.to change(DeviceState, :count).by 1
expect(response).to have_http_status :ok
end
end
end
it 'fails to set an unknown state' do
put :update_state, {
device_state: {
device_id: device.id,
component: 'Test component',
state: 'bad_status',
message: 'Test message'
}
}
expect(response).to have_http_status :unprocessable_entity
end
it 'fails to set the state of an unknown device' do
id = device.id
device.destroy
put :update_state, {
device_state: {
device_id: id,
component: 'Test component',
state: 'info',
message: 'Test message'
}
}
expect(response).to have_http_status :unprocessable_entity
end
it 'clears the device state for a device' do
DeviceState.create(device: device, state: 'info')
DeviceState.create(device: device, state: 'info')
DeviceState.create(device: device, state: 'info')
DeviceState.create(device: device, state: 'info')
DeviceState.create(device: device, state: 'info')
expect {
put :update_state, {
device_state: {
device_id: device.id,
state: 'clear'
}
}
}.to change(DeviceState, :count).by -5
end
it 'only clears the state of the correct device' do
DeviceState.create(device: device, state: 'info')
DeviceState.create(device: device, state: 'info')
DeviceState.create(device: device, state: 'info')
DeviceState.create(device: device2, state: 'info')
DeviceState.create(device: device2, state: 'info')
expect {
put :update_state, {
device_state: {
device_id: device.id,
state: 'clear'
}
}
}.to change(DeviceState, :count).by -3
end
it 'fails to clear state for all devices' do
DeviceState.create(device: device, state: 'info')
DeviceState.create(device: device, state: 'info')
DeviceState.create(device: device, state: 'info')
DeviceState.create(device: device2, state: 'info')
DeviceState.create(device: device2, state: 'info')
expect {
put :update_state, {
device_state: {
state: 'clear'
}
}
}.to change(DeviceState, :count).by 0
expect(response).to have_http_status(:unprocessable_entity)
end
context 'limit cleared states' do
before(:each) do
@debug = DeviceState.create(device: device, state: 'debug')
@info = DeviceState.create(device: device, state: 'info')
@warn = DeviceState.create(device: device, state: 'warn')
@error = DeviceState.create(device: device, state: 'error')
@fatal = DeviceState.create(device: device, state: 'fatal')
end
it 'clears only debug messages' do
put :update_state, { device_state: { device_id: device.id, state: 'clear', level: 'debug' } }
expect(device.reload.device_states).to match_array([@info, @warn, @error, @fatal])
end
it 'clears only info and debug messages' do
put :update_state, { device_state: { device_id: device.id, state: 'clear', level: 'info' } }
expect(device.reload.device_states).to match_array([@warn, @error, @fatal])
end
it 'clears only warn, info and debug messages' do
put :update_state, { device_state: { device_id: device.id, state: 'clear', level: 'warn' } }
expect(device.reload.device_states).to match_array([@error, @fatal])
end
it 'clears only error, warn, info and debug messages' do
put :update_state, { device_state: { device_id: device.id, state: 'clear', level: 'error' } }
expect(device.reload.device_states).to match_array([@fatal])
end
it 'clears all messages' do
put :update_state, { device_state: { device_id: device.id, state: 'clear', level: 'fatal' } }
expect(device.reload.device_states).to match_array([])
end
it 'clears all messages with nil level' do
put :update_state, { device_state: { device_id: device.id, state: 'clear', level: nil } }
expect(device.reload.device_states).to match_array([])
end
end
it 'clears messages for a component' do
component_1 = DeviceState.create(device: device, component: 'one', state: 'info')
component_2 = DeviceState.create(device: device, component: 'two', state: 'info')
component_3 = DeviceState.create(device: device, component: 'three', state: 'info')
put :update_state, { device_state: { device_id: device.id, state: 'clear', component: 'one' } }
expect(device.reload.device_states).to match_array([component_2, component_3])
end
it 'clears all messages for a nil component' do
component_1 = DeviceState.create(device: device, component: 'one', state: 'info')
component_2 = DeviceState.create(device: device, component: 'two', state: 'info')
component_3 = DeviceState.create(device: device, component: 'three', state: 'info')
put :update_state, { device_state: { device_id: device.id, state: 'clear', component: nil } }
expect(device.reload.device_states.count).to eq 0
end
it 'clears a message by state id' do
state_one = DeviceState.create(device: device, state: 'info')
state_two = DeviceState.create(device: device, state: 'info')
state_three = DeviceState.create(device: device, state: 'info')
put :update_state, { device_state: { state_ids: [ state_one.id ], state: 'clear' } }
expect(device.reload.device_states).to match_array([state_two, state_three])
end
it 'clears multiple messages by state id' do
state_one = DeviceState.create(device: device, state: 'info')
state_two = DeviceState.create(device: device, state: 'info')
state_three = DeviceState.create(device: device, state: 'info')
put :update_state, { device_state: { state_ids: [ state_one.id, state_two.id ], state: 'clear' } }
expect(device.reload.device_states).to_not include(state_one)
expect(device.reload.device_states).to_not include(state_two)
expect(device.reload.device_states).to match_array([state_three])
end
end
end
|
import argparse
def parse_args():
parser = argparse.ArgumentParser(description='Command-line utility for managing shortcuts')
parser.add_argument('--shortcuts', nargs='+', help='List of shortcuts to run')
parser.add_argument('--test', action='store_true', help='Run a test for the utility')
return parser.parse_args()
def run(*shortcuts):
for shortcut in shortcuts:
# Execute the shortcut
print(f"Running shortcut: {shortcut}")
def run_test():
# Perform test for the utility
print("Running test for the utility")
if __name__ == '__main__':
args = parse_args()
if args.shortcuts:
run(*args.shortcuts)
elif args.test:
run_test() |
let res = 0
let index
const test = () => {
let a = 1
index++
res = res + a
console.log('res', res)
if(res > 10){
return res
}else{
test.call(this)
}
}
console.log('test', test())
//es7语法
// import('./src/info').then(()=>{
// console.log('hello')
// }) |
#!/bin/sh
# shellcheck disable=SC2086 # FIXME: fix these globing warnings
set -e
die() {
echo "die: $*"
exit 1
}
#SERENITY_PACKET_LOGGING_ARG="-object filter-dump,id=hue,netdev=breh,file=e1000.pcap"
[ -e /dev/kvm ] && [ -r /dev/kvm ] && [ -w /dev/kvm ] && SERENITY_VIRT_TECH_ARG="-enable-kvm"
[ -z "$SERENITY_BOCHS_BIN" ] && SERENITY_BOCHS_BIN="bochs"
# To support virtualization acceleration on mac
# we need to use 64-bit qemu
if [ "$(uname)" = "Darwin" ] && [ "$(uname -m)" = "x86_64" ]; then
[ -z "$SERENITY_QEMU_BIN" ] && SERENITY_QEMU_BIN="qemu-system-x86_64"
if $SERENITY_QEMU_BIN --accel help | grep -q hvf; then
SERENITY_VIRT_TECH_ARG="--accel hvf"
fi
fi
SCRIPT_DIR="$(dirname "${0}")"
# Prepend the toolchain qemu directory so we pick up QEMU from there
PATH="$SCRIPT_DIR/../Toolchain/Local/qemu/bin:$PATH"
# Also prepend the i686 toolchain directory because that's where most
# people will have their QEMU binaries if they built them before the
# directory was changed to Toolchain/Local/qemu.
PATH="$SCRIPT_DIR/../Toolchain/Local/i686/bin:$PATH"
SERENITY_RUN="${SERENITY_RUN:-$1}"
if [ -z "$SERENITY_QEMU_BIN" ]; then
if [ "$SERENITY_ARCH" = "x86_64" ]; then
SERENITY_QEMU_BIN="qemu-system-x86_64"
else
SERENITY_QEMU_BIN="qemu-system-i386"
fi
fi
[ -z "$SERENITY_KERNEL_CMDLINE" ] && SERENITY_KERNEL_CMDLINE="hello"
[ -z "$SERENITY_RAM_SIZE" ] && SERENITY_RAM_SIZE=512M
if command -v wslpath >/dev/null; then
case "$SERENITY_QEMU_BIN" in
/mnt/?/*)
[ -z "$SERENITY_QEMU_CPU" ] && SERENITY_QEMU_CPU="max,vmx=off"
SERENITY_KERNEL_CMDLINE="$SERENITY_KERNEL_CMDLINE disable_virtio"
esac
fi
[ -z "$SERENITY_QEMU_CPU" ] && SERENITY_QEMU_CPU="max"
[ -z "$SERENITY_DISK_IMAGE" ] && {
if [ "$SERENITY_RUN" = qgrub ]; then
SERENITY_DISK_IMAGE="grub_disk_image"
elif [ "$SERENITY_RUN" = qextlinux ]; then
SERENITY_DISK_IMAGE="extlinux_disk_image"
else
SERENITY_DISK_IMAGE="_disk_image"
fi
if command -v wslpath >/dev/null; then
case "$SERENITY_QEMU_BIN" in
/mnt/?/*)
SERENITY_DISK_IMAGE=$(wslpath -w "$SERENITY_DISK_IMAGE")
;;
esac
fi
}
if ! command -v "$SERENITY_QEMU_BIN" >/dev/null 2>&1 ; then
die "Please install QEMU version 5.0 or newer or use the Toolchain/BuildQemu.sh script."
fi
SERENITY_QEMU_MIN_REQ_VERSION=5
installed_major_version=$("$SERENITY_QEMU_BIN" -version | head -n 1 | sed -E 's/QEMU emulator version ([1-9][0-9]*|0).*/\1/')
if [ "$installed_major_version" -lt "$SERENITY_QEMU_MIN_REQ_VERSION" ]; then
echo "Required QEMU >= 5.0! Found $($SERENITY_QEMU_BIN -version | head -n 1)"
echo "Please install a newer version of QEMU or use the Toolchain/BuildQemu.sh script."
die
fi
if [ -z "$SERENITY_SPICE" ] && "${SERENITY_QEMU_BIN}" -chardev help | grep -iq qemu-vdagent; then
SERENITY_SPICE_SERVER_CHARDEV="-chardev qemu-vdagent,clipboard=on,mouse=off,id=vdagent,name=vdagent"
elif "${SERENITY_QEMU_BIN}" -chardev help | grep -iq spicevmc; then
SERENITY_SPICE_SERVER_CHARDEV="-chardev spicevmc,id=vdagent,name=vdagent"
fi
if [ "$(uname)" = "Darwin" ]; then
SERENITY_AUDIO_BACKEND="-audiodev coreaudio,id=snd0"
elif command -v wslpath >/dev/null; then
SERENITY_AUDIO_BACKEND="-audiodev dsound,id=snd0"
elif "$SERENITY_QEMU_BIN" -audio-help 2>&1 | grep -- "-audiodev id=sdl" >/dev/null; then
SERENITY_AUDIO_BACKEND="-audiodev sdl,id=snd0"
else
SERENITY_AUDIO_BACKEND="-audiodev pa,id=snd0"
fi
if [ "$installed_major_version" -gt 5 ]; then
SERENITY_AUDIO_HW="-machine pcspk-audiodev=snd0"
else
SERENITY_AUDIO_HW="-soundhw pcspk"
fi
SERENITY_SCREENS="${SERENITY_SCREENS:-1}"
if [ "$SERENITY_SPICE" ]; then
SERENITY_QEMU_DISPLAY_BACKEND="${SERENITY_QEMU_DISPLAY_BACKEND:-spice-app}"
elif command -v wslpath >/dev/null; then
# QEMU for windows does not like gl=on, so detect if we are building in wsl, and if so, disable it
# Also, when using the GTK backend we run into this problem: https://github.com/SerenityOS/serenity/issues/7657
SERENITY_QEMU_DISPLAY_BACKEND="${SERENITY_QEMU_DISPLAY_BACKEND:-sdl,gl=off}"
elif [ $SERENITY_SCREENS -gt 1 ] && "${SERENITY_QEMU_BIN}" --display help | grep -iq sdl; then
SERENITY_QEMU_DISPLAY_BACKEND="${SERENITY_QEMU_DISPLAY_BACKEND:-sdl,gl=off}"
elif ("${SERENITY_QEMU_BIN}" --display help | grep -iq sdl) && (ldconfig -p | grep -iq virglrenderer); then
SERENITY_QEMU_DISPLAY_BACKEND="${SERENITY_QEMU_DISPLAY_BACKEND:-sdl,gl=on}"
elif "${SERENITY_QEMU_BIN}" --display help | grep -iq cocoa; then
# QEMU for OSX seems to only support cocoa
SERENITY_QEMU_DISPLAY_BACKEND="${SERENITY_QEMU_DISPLAY_BACKEND:-cocoa,gl=off}"
else
SERENITY_QEMU_DISPLAY_BACKEND="${SERENITY_QEMU_DISPLAY_BACKEND:-gtk,gl=off}"
fi
if [ "$SERENITY_SCREENS" -gt 1 ]; then
SERENITY_QEMU_DISPLAY_DEVICE="virtio-vga,max_outputs=$SERENITY_SCREENS "
# QEMU appears to always relay absolute mouse coordinates relative to the screen that the mouse is
# pointed to, without any way for us to know what screen it was. So, when dealing with multiple
# displays force using relative coordinates only
SERENITY_KERNEL_CMDLINE="$SERENITY_KERNEL_CMDLINE vmmouse=off"
else
SERENITY_QEMU_DISPLAY_DEVICE="VGA,vgamem_mb=64 "
fi
if [ -z "$SERENITY_DISABLE_GDB_SOCKET" ]; then
SERENITY_EXTRA_QEMU_ARGS="$SERENITY_EXTRA_QEMU_ARGS -s"
fi
if [ -z "$SERENITY_ETHERNET_DEVICE_TYPE" ]; then
SERENITY_ETHERNET_DEVICE_TYPE="e1000"
fi
[ -z "$SERENITY_COMMON_QEMU_ARGS" ] && SERENITY_COMMON_QEMU_ARGS="
$SERENITY_EXTRA_QEMU_ARGS
-m $SERENITY_RAM_SIZE
-cpu $SERENITY_QEMU_CPU
-d guest_errors
-smp 2
-display $SERENITY_QEMU_DISPLAY_BACKEND
-device $SERENITY_QEMU_DISPLAY_DEVICE
-drive file=${SERENITY_DISK_IMAGE},format=raw,index=0,media=disk
-usb
$SERENITY_SPICE_SERVER_CHARDEV
-device virtio-serial,max_ports=2
-chardev stdio,id=stdout,mux=on
-device virtconsole,chardev=stdout
-device isa-debugcon,chardev=stdout
-device virtio-rng-pci
$SERENITY_AUDIO_BACKEND
$SERENITY_AUDIO_HW
-device sb16,audiodev=snd0
-device pci-bridge,chassis_nr=1,id=bridge1 -device $SERENITY_ETHERNET_DEVICE_TYPE,bus=bridge1
-device i82801b11-bridge,bus=bridge1,id=bridge2 -device sdhci-pci,bus=bridge2
-device i82801b11-bridge,id=bridge3 -device sdhci-pci,bus=bridge3
-device ich9-ahci,bus=bridge3
"
if "${SERENITY_QEMU_BIN}" -chardev help | grep -iq spice; then
SERENITY_COMMON_QEMU_ARGS="$SERENITY_COMMON_QEMU_ARGS
-spice port=5930,agent-mouse=off,disable-ticketing=on
-device virtserialport,chardev=vdagent,nr=1
"
fi
[ -z "$SERENITY_COMMON_QEMU_Q35_ARGS" ] && SERENITY_COMMON_QEMU_Q35_ARGS="
$SERENITY_EXTRA_QEMU_ARGS
-m $SERENITY_RAM_SIZE
-cpu $SERENITY_QEMU_CPU
-machine q35
-d guest_errors
-smp 2
-device pcie-root-port,port=0x10,chassis=1,id=pcie.1,bus=pcie.0,multifunction=on,addr=0x2
-device pcie-root-port,port=0x11,chassis=2,id=pcie.2,bus=pcie.0,addr=0x2.0x1
-device pcie-root-port,port=0x12,chassis=3,id=pcie.3,bus=pcie.0,addr=0x2.0x2
-device pcie-root-port,port=0x13,chassis=4,id=pcie.4,bus=pcie.0,addr=0x2.0x3
-device pcie-root-port,port=0x14,chassis=5,id=pcie.5,bus=pcie.0,addr=0x2.0x4
-device pcie-root-port,port=0x15,chassis=6,id=pcie.6,bus=pcie.0,addr=0x2.0x5
-display $SERENITY_QEMU_DISPLAY_BACKEND
-device $SERENITY_QEMU_DISPLAY_DEVICE
-device secondary-vga
-device bochs-display,bus=pcie.6,addr=0x10.0x0
-device piix3-ide
-drive file=${SERENITY_DISK_IMAGE},format=raw,id=disk,if=none
-device ahci,id=ahci
-device ide-hd,bus=ahci.0,drive=disk,unit=0
-usb
-device virtio-serial
-chardev stdio,id=stdout,mux=on
-device virtconsole,chardev=stdout
-device isa-debugcon,chardev=stdout
-device virtio-rng-pci
$SERENITY_AUDIO_BACKEND
$SERENITY_AUDIO_HW
-device sb16
"
export SDL_VIDEO_X11_DGAMOUSE=0
: "${SERENITY_BUILD:=.}"
cd -P -- "$SERENITY_BUILD" || die "Could not cd to \"$SERENITY_BUILD\""
if [ "$SERENITY_RUN" = "b" ]; then
# Meta/run.sh b: bochs
[ -z "$SERENITY_BOCHSRC" ] && {
# Make sure that SERENITY_SOURCE_DIR is set and not empty
[ -z "$SERENITY_SOURCE_DIR" ] && die 'SERENITY_SOURCE_DIR not set or empty'
SERENITY_BOCHSRC="$SERENITY_SOURCE_DIR/Meta/bochsrc"
}
"$SERENITY_BOCHS_BIN" -q -f "$SERENITY_BOCHSRC"
elif [ "$SERENITY_RUN" = "qn" ]; then
# Meta/run.sh qn: qemu without network
"$SERENITY_QEMU_BIN" \
$SERENITY_COMMON_QEMU_ARGS \
-device $SERENITY_ETHERNET_DEVICE_TYPE \
-kernel Kernel/Prekernel/Prekernel \
-initrd Kernel/Kernel \
-append "${SERENITY_KERNEL_CMDLINE}"
elif [ "$SERENITY_RUN" = "qtap" ]; then
# Meta/run.sh qtap: qemu with tap
sudo ip tuntap del dev tap0 mode tap || true
sudo ip tuntap add dev tap0 mode tap user "$(id -u)"
"$SERENITY_QEMU_BIN" \
$SERENITY_COMMON_QEMU_ARGS \
$SERENITY_VIRT_TECH_ARG \
$SERENITY_PACKET_LOGGING_ARG \
-netdev tap,ifname=tap0,id=br0 \
-device $SERENITY_ETHERNET_DEVICE_TYPE,netdev=br0 \
-kernel Kernel/Prekernel/Prekernel \
-initrd Kernel/Kernel \
-append "${SERENITY_KERNEL_CMDLINE}"
sudo ip tuntap del dev tap0 mode tap
elif [ "$SERENITY_RUN" = "qgrub" ] || [ "$SERENITY_RUN" = "qextlinux" ]; then
# Meta/run.sh qgrub: qemu with grub/extlinux
"$SERENITY_QEMU_BIN" \
$SERENITY_COMMON_QEMU_ARGS \
$SERENITY_VIRT_TECH_ARG \
$SERENITY_PACKET_LOGGING_ARG \
-netdev user,id=breh,hostfwd=tcp:127.0.0.1:8888-10.0.2.15:8888,hostfwd=tcp:127.0.0.1:8823-10.0.2.15:23 \
-device $SERENITY_ETHERNET_DEVICE_TYPE,netdev=breh
elif [ "$SERENITY_RUN" = "q35" ]; then
# Meta/run.sh q35: qemu (q35 chipset) with SerenityOS
echo "Starting SerenityOS with QEMU Q35 machine, Commandline: ${SERENITY_KERNEL_CMDLINE}"
"$SERENITY_QEMU_BIN" \
$SERENITY_COMMON_QEMU_Q35_ARGS \
$SERENITY_VIRT_TECH_ARG \
-netdev user,id=breh,hostfwd=tcp:127.0.0.1:8888-10.0.2.15:8888,hostfwd=tcp:127.0.0.1:8823-10.0.2.15:23 \
-device $SERENITY_ETHERNET_DEVICE_TYPE,netdev=breh \
-kernel Kernel/Prekernel/Prekernel \
-initrd Kernel/Kernel \
-append "${SERENITY_KERNEL_CMDLINE}"
elif [ "$SERENITY_RUN" = "ci" ]; then
# Meta/run.sh ci: qemu in text mode
echo "Running QEMU in CI"
"$SERENITY_QEMU_BIN" \
$SERENITY_EXTRA_QEMU_ARGS \
$SERENITY_VIRT_TECH_ARG \
-m $SERENITY_RAM_SIZE \
-cpu $SERENITY_QEMU_CPU \
-d guest_errors \
-no-reboot \
-smp 2 \
-drive file=${SERENITY_DISK_IMAGE},format=raw,index=0,media=disk \
-device ich9-ahci \
-nographic \
-display none \
-debugcon file:debug.log \
-kernel Kernel/Prekernel/Prekernel \
-initrd Kernel/Kernel \
-append "${SERENITY_KERNEL_CMDLINE}"
else
# Meta/run.sh: qemu with user networking
"$SERENITY_QEMU_BIN" \
$SERENITY_COMMON_QEMU_ARGS \
$SERENITY_VIRT_TECH_ARG \
$SERENITY_PACKET_LOGGING_ARG \
-netdev user,id=breh,hostfwd=tcp:127.0.0.1:8888-10.0.2.15:8888,hostfwd=tcp:127.0.0.1:8823-10.0.2.15:23,hostfwd=tcp:127.0.0.1:8000-10.0.2.15:8000,hostfwd=tcp:127.0.0.1:2222-10.0.2.15:22 \
-device $SERENITY_ETHERNET_DEVICE_TYPE,netdev=breh \
-kernel Kernel/Prekernel/Prekernel \
-initrd Kernel/Kernel \
-append "${SERENITY_KERNEL_CMDLINE}"
fi
|
<reponame>uzelac92/jsPDF-AutoTable
/* eslint-disable @typescript-eslint/no-unused-vars */
// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Object/assign
export function assign<T, U, V, W, X>(
target: T,
s: U,
s1?: V,
s2?: W,
s3?: X
): T & U & V & W & X {
if (target == null) {
throw new TypeError('Cannot convert undefined or null to object')
}
const to = Object(target)
for (let index = 1; index < arguments.length; index++) {
// eslint-disable-next-line prefer-rest-params
const nextSource = arguments[index]
if (nextSource != null) {
// Skip over if undefined or null
for (const nextKey in nextSource) {
// Avoid bugs when hasOwnProperty is shadowed
if (Object.prototype.hasOwnProperty.call(nextSource, nextKey)) {
to[nextKey] = nextSource[nextKey]
}
}
}
}
return to
}
|
#!/bin/bash
id jaspy && userdel jaspy || true
|
# Version 1.2
# Copyright (c) 2019 pilisir.tw@gmail.com
# Under MIT Licesne, please go to "https://en.wikipedia.org/wiki/MIT_License" to check license terms.
escapePath() {
resultValue=$(echo "$@" | sed 's@[\]@\\\\@g;s/"/\\"/g;s/\ /\\ /g;s/'"'"'/\'"\'"'/g;s/`/\\`/g;s/:/\\:/g;s/?/\\?/g;s/!/\\!/g;s/</\\</g;s/>/\\>/g;s/|/\\|/g;s/*/\\*/g;s/(/\\(/g;s/)/\\)/g;s/\[/\\[/g;s/\]/\\]/g;s/{/\\{/g;s/}/\\}/g;s/&/\\&/g;s/%/\\%/g;s/\$/\\$/g;s/#/\\#/g;s/~/\\~/g;s/=/\\=/g;s/,/\\,/g;s@[;]@\\;@g;')
echo $resultValue
}
zipFileName=""
eachFileName=""
zipFileDir=""
while read -r eachItemPath;
do
eachItemPathEscape=$(escapePath "$eachItemPath")
eachFileName=$(basename "$eachItemPathEscape")
if [ -z $zipFileName ]; then
zipFileName=$eachFileName
fi
if [ -d $eachItemPath ]; then
zipFileDir=$eachItemPathEscape
cmd="cd $zipFileDir"
eval $cmd
cd ../
else
zipFileDir=$(dirname "$eachItemPathEscape")
cmd="cd $zipFileDir"
eval $cmd
fi
cmd="zip -x */\__MACOSX -x *.DS_Store -r ./$zipFileName.zip -- $eachFileName"
eval $cmd
done |
<gh_stars>0
package pl.coderslab.spring01hibernate.entity.examples;
import pl.coderslab.spring01hibernate.repository.examples.UczenRepository;
import javax.persistence.*;
import java.util.List;
@Entity
public class Nauczyciel {
@Id
@GeneratedValue(strategy = GenerationType.IDENTITY)
private long id;
private String name;
@OneToOne(cascade = {CascadeType.PERSIST, CascadeType.MERGE})
private Klasa klasaWychowawcza;
@ManyToMany(fetch = FetchType.EAGER)
private List<Uczen> uczniowie;
public long getId() {
return id;
}
public void setId(long id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public Klasa getKlasaWychowawcza() {
return klasaWychowawcza;
}
public void setKlasaWychowawcza(Klasa klasaWychowawcza) {
this.klasaWychowawcza = klasaWychowawcza;
}
public List<Uczen> getUczniowie() {
return uczniowie;
}
public void setUczniowie(List<Uczen> uczniowie) {
this.uczniowie = uczniowie;
}
@Override
public String toString() {
return "Nauczyciel{" +
"id=" + id +
", name='" + name + '\'' +
", klasaWychowawcza=" + klasaWychowawcza +
'}';
}
}
|
use App\Tenant;
use Carbon\Carbon;
class TenantFilter
{
// Existing method to get tenants ordered based on supplied filters
public function getTenants($sortField, $orderBy)
{
// Implementation to retrieve tenants based on sortField and orderBy
// ...
}
// New method to filter tenants by registration date
public function filterByRegistrationDate($startDate, $endDate)
{
// Retrieve tenants registered within the specified date range
$filteredTenants = Tenant::whereBetween('registered_at', [$startDate, $endDate])->get();
return $filteredTenants;
}
} |
<gh_stars>0
function addBorder(picture) {
let framedPicture = [];
picture.forEach((line, index) => {
if (index === 0) {
framedPicture.push("*".repeat(line.length + 2));
}
framedPicture.push(`*${line}*`);
if (index === picture.length - 1) {
framedPicture.push("*".repeat(line.length + 2));
}
});
return framedPicture;
}
|
#!/bin/bash
pip install --user bcolz mxnet tensorboardX matplotlib easydict opencv-python einops --no-cache-dir -U | cat
pip install --user scikit-image imgaug PyTurboJPEG --no-cache-dir -U | cat
pip install --user scikit-learn --no-cache-dir -U | cat
pip install torch==1.7.1+cu110 torchvision==0.8.2+cu110 -f https://download.pytorch.org/whl/torch_stable.html --no-cache-dir -U | cat
pip install --user termcolor imgaug prettytable --no-cache-dir -U | cat
pip install --user timm==0.3.4 --no-cache-dir -U | cat
|
<reponame>jelly/patternfly-react<filename>packages/react-core/src/components/DatePicker/examples/DatePickerControlledCalendar.tsx<gh_stars>0
import React from 'react';
import { Button, DatePicker } from '@patternfly/react-core';
export const DatePickerControlledCalendar: React.FunctionComponent = () => {
const dateRef = React.useRef(null);
const onClick = () => {
if (dateRef.current) {
dateRef.current.toggleCalendar();
}
};
return (
<React.Fragment>
<Button onClick={onClick}>Toggle calendar</Button>
<DatePicker ref={dateRef} />
</React.Fragment>
);
};
|
-- Combined Lua script for incrementing key value and setting expiration, and batch deletion of keys
local function delBatch(keys, start, batchSize)
local endIdx = math.min(start + batchSize - 1, #keys)
return redis.call('del', unpack(keys, start, endIdx))
end
local function incrExpire(key, expireTime, incrementBy)
local current = redis.call("incrby", key, incrementBy)
if tonumber(current) == tonumber(incrementBy) then
redis.call("expire", key, expireTime)
end
end
local function combinedScript(keys, start, batchSize, expireTime, incrementBy)
local res = 0
while start <= #keys do
res = res + delBatch(keys, start, batchSize)
start = start + batchSize
end
incrExpire(keys[1], expireTime, incrementBy)
return res
end
return combinedScript(KEYS, 1, 5000, ARGV[1], ARGV[2]) |
<gh_stars>10-100
# Generated by Django 3.0.8 on 2020-07-31 18:22
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('mail', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='email',
old_name='time',
new_name='timestamp',
),
]
|
var username = String(getCookie('username'));
$(document).ready(function(){
$("#send").click(function(){
console.log("sent")
var ul = document.getElementById("messages");
var li = document.createElement("li");
if (document.getElementById("messageinput").value == "/showloc"){
li.appendChild(document.createTextNode( "*"+username+": "+"x: " + Math.round(player.x) + ", y: " + Math.round(player.y)));
// document.getElementById(li.id).style.color = 'red'
ul.appendChild(li);
socket.emit('usermessage', {
message: "*"+username+": "+"x: " + Math.round(player.x) + ", y: " + Math.round(player.y)
})
}
else{
li.appendChild(document.createTextNode( "*"+username+": "+ document.getElementById("messageinput").value));
ul.appendChild(li);
socket.emit('usermessage', {
message: username+": "+ document.getElementById("messageinput").value
})
}
document.getElementById("messageinput").value = "";
var objDiv = document.getElementById("messages");
objDiv.scrollTop = objDiv.scrollHeight;
});
});
socket.on('chat message', function(msg){
$('#messages').append($('<li>').text(msg));
window.scrollTo(0, document.body.scrollHeight);
});
function getCookie(cname) {
var name = cname + "=";
var decodedCookie = decodeURIComponent(document.cookie);
var ca = decodedCookie.split(';');
for(var i = 0; i <ca.length; i++) {
var c = ca[i];
while (c.charAt(0) == ' ') {
c = c.substring(1);
}
if (c.indexOf(name) == 0) {
return c.substring(name.length, c.length);
}
}
return "";
} |
<filename>src/scripts/index.js
const reminder = require('./reminder');
module.exports = {
reminder
} |
<filename>2-resources/__DATA-Structures/Data-Structures-Algos-Codebase-master/ALGO/UNSORTED/Pig Latin.js
//Pig Latin is a way of altering English Words. The rules are as follows:
//If a word begins with a consonant, take the first consonant or consonant cluster, move it to the end of the word, and add "ay" to it.
// If a word begins with a vowel, just add "way" at the end.
function translatePigLatin( str ) {
let myconsonant = str.match( /^[^aeiou]+/ )
return myconsonant !== null ? str.replace( myconsonant, "" ).concat( myconsonant ).concat( "ay" ) : str.concat( "way" )
}
console.log( translatePigLatin( "california" ) ) //"aliforniacay"
console.log( translatePigLatin( "algorithm" ) ) //"algorithmway"
console.log( translatePigLatin( "eight" ) ) //"eightway"
console.log( translatePigLatin( "pratikshya" ) ) //"atikshyapray"
//The caret serves two different purposes. It is a special character that denotes "the beginning of a line" and it is a "not" operator inside of []s.
//Matches any character that is not a vowel followed by any number of characters.
|
#!/bin/sh
. ./build/tfs/common/node.sh
. ./scripts/env.sh
. ./build/tfs/common/common.sh
export VSCODE_MIXIN_PASSWORD="$1"
export AZURE_STORAGE_ACCESS_KEY="$2"
export AZURE_STORAGE_ACCESS_KEY_2="$3"
export MOONCAKE_STORAGE_ACCESS_KEY="$4"
export AZURE_DOCUMENTDB_MASTERKEY="$5"
VSO_PAT="$6"
echo "machine monacotools.visualstudio.com password $VSO_PAT" > ~/.netrc
step "Install dependencies" \
npm install
step "Hygiene" \
npm run gulp -- hygiene
step "Mix in repository from vscode-distro" \
npm run gulp -- mixin
step "Install distro dependencies" \
node build/tfs/common/installDistro.js
step "Build minified & upload source maps" \
npm run gulp -- vscode-darwin-min upload-vscode-sourcemaps
# step "Create loader snapshot"
# node build/lib/snapshotLoader.js
step "Run unit tests" \
./scripts/test.sh --build --reporter dot
step "Run integration tests" \
./scripts/test-integration.sh
step "Publish release" \
./build/tfs/darwin/release.sh
|
#pragma once
#include <KAI/Language/Language.h>
|
package be.kwakeroni.parameters.adapter.jmx.api;
import be.kwakeroni.parameters.definition.api.DefinitionVisitor;
import java.util.function.Consumer;
/**
* Created by kwakeroni on 09/05/17.
*/
public interface JMXGroupMBeanFactory extends DefinitionVisitor<JMXGroupBuilder> {
public void register(Registry registry);
public void unregister(Consumer<Class<?>> registry);
@FunctionalInterface
public static interface Registry {
public <I extends JMXGroupMBeanFactory> void register(Class<? super I> type, I formatter);
}
}
|
package app.services;
import app.conf.AppConfig;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.test.context.ActiveProfiles;
import org.springframework.test.context.ContextConfiguration;
import org.springframework.test.context.junit4.SpringJUnit4ClassRunner;
import java.util.Map;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
@RunWith(SpringJUnit4ClassRunner.class)
@ContextConfiguration(classes = { AppConfig.class })
@ActiveProfiles(profiles = {"test"})
public class DemoMovieServiceTest {
@Autowired
private DemoMovieService service;
@Test
public void onlyParamsSelectedByUserShouldBePresentInMap() {
SearchParams params = new SearchParams();
params.setName("The Matrix");
params.setGenre("NOT_SELECTED");
params.setFromYear(1992);
Map<String, String> result = service.getSelectedParams(params);
assertEquals("The Matrix", result.get("name"));
assertEquals("1992", result.get("fromYear"));
assertFalse(result.keySet().contains("toYear"));
assertFalse(result.keySet().contains("genre"));
assertFalse(result.values().contains("NOT_SELECTED"));
}
} |
#!/bin/bash
# Copyright 2021 Dialpad, Inc. (Shreekantha Nadig, Riqiang Wang)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
if [ "$#" -ne 3 ]; then
echo "Usage: $0 <src-dir> <dst-dir> <audio-dir-name>"
echo "e.g.: $0 downloads/hindi/train/ data/hindi/train/ Audios"
exit 1
fi
src=$1
dst=$2
audio_dir=$3
if ! which sox >&/dev/null; then
echo "Please install 'sox' on ALL worker nodes!"
exit 1
fi
mkdir -p ${dst}
cp ${src}/transcription.txt ${dst}/text.orig
sed -i "s/\.//g" ${dst}/text.orig
sed -i "s/,//g" ${dst}/text.orig
sed -i "s/?//g" ${dst}/text.orig
cat ${dst}/text.orig | awk '{print $1}' > ${dst}/uttids
for i in `cat ${dst}/uttids`; do echo "sox ${src}/${audio_dir}/${i}.wav -t wav -r 8000 -b 16 - |" >> ${dst}/wav_cmd; done;
paste ${dst}/uttids ${dst}/wav_cmd > ${dst}/wav.scp
paste ${dst}/uttids ${dst}/uttids > ${dst}/utt2spk
utils/utt2spk_to_spk2utt.pl ${dst}/utt2spk > ${dst}/spk2utt
python3 local/get_valid_utts.py ${dst}/text.orig ${dst}/text
utils/fix_data_dir.sh $dst || exit 1
utils/validate_data_dir.sh --no-feats $dst || exit 1
echo "$0: successfully prepared data in $dst"
exit 0
|
#include <iostream>
int main() {
int n;
std::cin >> n;
int a = 0, b = 1, c;
std::cout << a << " " << b << " ";
for (int i = 2; i < n; i++) {
c = a + b;
std::cout << c << " ";
a = b;
b = c;
}
return 0;
} |
<filename>test/kennel/api_test.rb
# frozen_string_literal: true
require_relative "../test_helper"
SingleCov.covered!
describe Kennel::Api do
let(:api) { Kennel::Api.new("app", "api") }
describe "#show" do
it "fetches monitor" do
stub_datadog_request(:get, "monitor/1234")
.with(body: nil, headers: { "Content-Type" => "application/json" })
.to_return(body: { bar: "foo" }.to_json)
api.show("monitor", 1234).must_equal bar: "foo"
end
it "fetches slo" do
stub_datadog_request(:get, "slo/1234").to_return(body: { data: { bar: "foo" } }.to_json)
api.show("slo", "1234").must_equal bar: "foo"
end
it "fetches synthetics test" do
stub_datadog_request(:get, "synthetics/tests/1234").to_return(body: { public_id: "1234" }.to_json)
api.show("synthetics/tests", "1234").must_equal id: "1234"
end
it "can pass params so external users can filter" do
stub_datadog_request(:get, "monitor/1234", "&foo=bar")
.with(body: nil, headers: { "Content-Type" => "application/json" })
.to_return(body: { bar: "foo" }.to_json)
api.show("monitor", 1234, foo: "bar").must_equal bar: "foo"
end
it "does not ignore 404" do
stub_datadog_request(:get, "monitor/1234").to_return(status: 404)
assert_raises RuntimeError do
api.show("monitor", 1234).must_equal({})
end.message.must_include "Error 404 during GET"
end
end
describe "#list" do
it "fetches monitors" do
stub_datadog_request(:get, "monitor", "&foo=bar")
.with(body: nil, headers: { "Content-Type" => "application/json" })
.to_return(body: [{ bar: "foo" }].to_json)
api.list("monitor", foo: "bar").must_equal [{ bar: "foo" }]
end
it "fetches dashboards" do
stub_datadog_request(:get, "dashboard")
.to_return(body: { dashboards: [{ bar: "foo" }] }.to_json)
api.list("dashboard").must_equal [{ bar: "foo" }]
end
it "shows a descriptive failure when request fails" do
stub_datadog_request(:get, "monitor", "&foo=bar")
.to_return(status: 300, body: "foo")
e = assert_raises(RuntimeError) { api.list("monitor", foo: "bar") }
e.message.must_equal "Error 300 during GET /api/v1/monitor?foo=bar\nfoo"
end
it "fetches syntetic tests" do
stub_datadog_request(:get, "synthetics/tests").to_return(body: { tests: [{ public_id: "123" }] }.to_json)
api.list("synthetics/tests").must_equal [{ id: "123" }]
end
describe "slo" do
it "paginates" do
stub_datadog_request(:get, "slo", "&limit=1000&offset=0").to_return(body: { data: Array.new(1000) { { bar: "foo" } } }.to_json)
stub_datadog_request(:get, "slo", "&limit=1000&offset=1000").to_return(body: { data: [{ bar: "foo" }] }.to_json)
api.list("slo").size.must_equal 1001
end
it "fails when pagination would not work" do
assert_raises(ArgumentError) { api.list("slo", limit: 100) }
assert_raises(ArgumentError) { api.list("slo", offset: 100) }
end
end
end
describe "#create" do
it "creates a monitor" do
stub_datadog_request(:post, "monitor")
.with(body: "{\"foo\":\"bar\"}").to_return(body: { bar: "foo" }.to_json)
api.create("monitor", foo: "bar").must_equal bar: "foo"
end
it "shows a descriptive failure when request fails" do
stub_datadog_request(:post, "monitor")
.with(body: "{\"foo\":\"bar\"}").to_return(body: { bar: "foo" }.to_json, status: 300)
e = assert_raises(RuntimeError) { api.create("monitor", foo: "bar") }
e.message.must_equal <<~TEXT.strip
Error 300 during POST /api/v1/monitor
request:
{
\"foo\": \"bar\"
}
response:
{\"bar\":\"foo\"}
TEXT
end
it "unwraps slo array reply" do
stub_datadog_request(:post, "slo").to_return(body: { data: [{ bar: "foo" }] }.to_json)
api.create("slo", foo: "bar").must_equal bar: "foo"
end
it "fixes synthetic test public_id" do
stub_datadog_request(:post, "synthetics/tests").to_return(body: { public_id: "123" }.to_json)
api.create("synthetics/tests", foo: "bar").must_equal id: "123"
end
end
describe "#update" do
it "updates a monitor" do
stub_datadog_request(:put, "monitor/123")
.with(body: "{\"foo\":\"bar\"}").to_return(body: { bar: "foo" }.to_json)
api.update("monitor", 123, foo: "bar").must_equal bar: "foo"
end
it "updates a synthetics test" do
stub_datadog_request(:put, "synthetics/tests/123").to_return(body: { public_id: "123" }.to_json)
api.update("synthetics/tests", "123", foo: "bar").must_equal id: "123"
end
end
describe "#delete" do
it "deletes a monitor" do
stub_datadog_request(:delete, "monitor/123", "&force=true").to_return(body: "{}")
api.delete("monitor", 123).must_equal({})
end
it "deletes a dash" do
stub_datadog_request(:delete, "dash/123", "&force=true")
.with(body: nil).to_return(body: "")
api.delete("dash", 123).must_equal({})
end
it "deletes synthetic" do
stub_datadog_request(:post, "synthetics/tests/delete")
.with(body: { public_ids: [123] }.to_json)
.to_return(body: "")
api.delete("synthetics/tests", 123).must_equal({})
end
it "shows a descriptive failure when request fails, without including api keys" do
stub_datadog_request(:delete, "monitor/123", "&force=true")
.with(body: nil).to_return(body: "{}", status: 300)
e = assert_raises(RuntimeError) { api.delete("monitor", 123) }
e.message.must_equal <<~TEXT.strip
Error 300 during DELETE /api/v1/monitor/123?force=true
{}
TEXT
end
it "ignores 404" do
stub_datadog_request(:delete, "monitor/123", "&force=true").to_return(status: 404)
api.delete("monitor", 123).must_equal({})
end
end
describe "#fill_details!" do
in_temp_dir # uses file-cache
it "does nothing when not needed" do
api.fill_details!("monitor", {})
end
it "fills dashboards" do
stub_datadog_request(:get, "dashboard/123").to_return(body: { bar: "foo" }.to_json)
list = [{ id: "123", modified_at: "123" }]
api.fill_details!("dashboard", list)
list.must_equal [{ id: "123", modified_at: "123", bar: "foo" }]
end
it "caches" do
show = stub_datadog_request(:get, "dashboard/123").to_return(body: "{}")
2.times do
api.fill_details!("dashboard", [{ id: "123", modified_at: "123" }])
end
assert_requested show, times: 1
end
it "does not cache when modified" do
show = stub_datadog_request(:get, "dashboard/123").to_return(body: "{}")
2.times do |i|
api.fill_details!("dashboard", [{ id: "123", modified_at: i }])
end
assert_requested show, times: 2
end
end
describe "retries" do
capture_all
it "does not retry successful" do
request = stub_datadog_request(:get, "monitor/1234").to_return(body: { bar: "foo" }.to_json)
api.show("monitor", 1234).must_equal bar: "foo"
assert_requested request
end
it "does not retry other failures" do
request = stub_datadog_request(:get, "monitor/1234").to_return(body: { bar: "foo" }.to_json, status: 400)
assert_raises(RuntimeError) { api.show("monitor", 1234) }
assert_requested request
end
it "does not retry non-gets" do
request = stub_datadog_request(:delete, "monitor/1234", "&force=true")
.to_return(body: { bar: "foo" }.to_json, status: 400)
assert_raises(RuntimeError) { api.delete("monitor", 1234) }
assert_requested request
end
it "retries on random get 500 errors" do
request = stub_datadog_request(:get, "monitor/1234").to_return(
[
{ status: 500 },
{ status: 200, body: { foo: "bar" }.to_json }
]
)
api.show("monitor", 1234).must_equal foo: "bar"
assert_requested request, times: 2
stderr.string.must_equal "Retrying on server error 500 for /api/v1/monitor/1234\n"
end
it "retries on timeout" do
request = stub_datadog_request(:get, "monitor/1234").to_timeout
assert_raises Faraday::TimeoutError do
api.show("monitor", 1234).must_equal foo: "bar"
end
assert_requested request, times: 3
stderr.string.scan(/\d retries left/).must_equal ["1 retries left", "0 retries left"]
end
it "fails on repeated errors" do
request = stub_datadog_request(:get, "monitor/1234").to_return(status: 500)
e = assert_raises(RuntimeError) { api.show("monitor", 1234) }
e.message.must_equal "Error 500 during GET /api/v1/monitor/1234\n"
assert_requested request, times: 2
stderr.string.must_equal "Retrying on server error 500 for /api/v1/monitor/1234\n"
end
end
describe "force get cache" do
in_temp_dir # uses file-cache
with_env FORCE_GET_CACHE: "true"
it "caches" do
get = stub_datadog_request(:get, "monitor/1234").to_return(body: "{}")
2.times { api.show("monitor", 1234).must_equal({}) }
assert_requested get, times: 1
end
end
end
|
package io.dronefleet.mavlink.matrixpilot;
import io.dronefleet.mavlink.annotations.MavlinkFieldInfo;
import io.dronefleet.mavlink.annotations.MavlinkMessageBuilder;
import io.dronefleet.mavlink.annotations.MavlinkMessageInfo;
import java.lang.Object;
import java.lang.Override;
import java.lang.String;
import java.util.Objects;
/**
* Backwards compatible version of SERIAL_UDB_EXTRA F17 format
*/
@MavlinkMessageInfo(
id = 183,
crc = 175,
description = "Backwards compatible version of SERIAL_UDB_EXTRA F17 format"
)
public final class SerialUdbExtraF17 {
private final float sueFeedForward;
private final float sueTurnRateNav;
private final float sueTurnRateFbw;
private SerialUdbExtraF17(float sueFeedForward, float sueTurnRateNav, float sueTurnRateFbw) {
this.sueFeedForward = sueFeedForward;
this.sueTurnRateNav = sueTurnRateNav;
this.sueTurnRateFbw = sueTurnRateFbw;
}
/**
* Returns a builder instance for this message.
*/
@MavlinkMessageBuilder
public static Builder builder() {
return new Builder();
}
/**
* SUE Feed Forward Gain
*/
@MavlinkFieldInfo(
position = 1,
unitSize = 4,
description = "SUE Feed Forward Gain"
)
public final float sueFeedForward() {
return this.sueFeedForward;
}
/**
* SUE Max Turn Rate when Navigating
*/
@MavlinkFieldInfo(
position = 2,
unitSize = 4,
description = "SUE Max Turn Rate when Navigating"
)
public final float sueTurnRateNav() {
return this.sueTurnRateNav;
}
/**
* SUE Max Turn Rate in Fly By Wire Mode
*/
@MavlinkFieldInfo(
position = 3,
unitSize = 4,
description = "SUE Max Turn Rate in Fly By Wire Mode"
)
public final float sueTurnRateFbw() {
return this.sueTurnRateFbw;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || !getClass().equals(o.getClass())) return false;
SerialUdbExtraF17 other = (SerialUdbExtraF17)o;
if (!Objects.deepEquals(sueFeedForward, other.sueFeedForward)) return false;
if (!Objects.deepEquals(sueTurnRateNav, other.sueTurnRateNav)) return false;
if (!Objects.deepEquals(sueTurnRateFbw, other.sueTurnRateFbw)) return false;
return true;
}
@Override
public int hashCode() {
int result = 0;
result = 31 * result + Objects.hashCode(sueFeedForward);
result = 31 * result + Objects.hashCode(sueTurnRateNav);
result = 31 * result + Objects.hashCode(sueTurnRateFbw);
return result;
}
@Override
public String toString() {
return "SerialUdbExtraF17{sueFeedForward=" + sueFeedForward
+ ", sueTurnRateNav=" + sueTurnRateNav
+ ", sueTurnRateFbw=" + sueTurnRateFbw + "}";
}
public static final class Builder {
private float sueFeedForward;
private float sueTurnRateNav;
private float sueTurnRateFbw;
/**
* SUE Feed Forward Gain
*/
@MavlinkFieldInfo(
position = 1,
unitSize = 4,
description = "SUE Feed Forward Gain"
)
public final Builder sueFeedForward(float sueFeedForward) {
this.sueFeedForward = sueFeedForward;
return this;
}
/**
* SUE Max Turn Rate when Navigating
*/
@MavlinkFieldInfo(
position = 2,
unitSize = 4,
description = "SUE Max Turn Rate when Navigating"
)
public final Builder sueTurnRateNav(float sueTurnRateNav) {
this.sueTurnRateNav = sueTurnRateNav;
return this;
}
/**
* SUE Max Turn Rate in Fly By Wire Mode
*/
@MavlinkFieldInfo(
position = 3,
unitSize = 4,
description = "SUE Max Turn Rate in Fly By Wire Mode"
)
public final Builder sueTurnRateFbw(float sueTurnRateFbw) {
this.sueTurnRateFbw = sueTurnRateFbw;
return this;
}
public final SerialUdbExtraF17 build() {
return new SerialUdbExtraF17(sueFeedForward, sueTurnRateNav, sueTurnRateFbw);
}
}
}
|
/** OSM server classes. */
package io.opensphere.osm.server;
|
def list_sum(list1, list2):
if len(list1) == len(list2):
sum_list = []
for i in range(len(list1)):
sum_list.append(list1[i] + list2[i])
return sum_list
else:
return None |
<gh_stars>1-10
'use strict'
/**
* Represents a TagRenderer.
* @constructor
* @param {Object} options - Options for TagRenderer.
* Valid options:
* - pretty - defines if rendering is pretty or not
*/
function TagRenderer (options = {}) {
this.pretty = (options.pretty === undefined) ? false : options.pretty
this.ending = (this.pretty) ? '\n' : ''
}
/**
* Renders a given tag.
* @memberof TagRenderer
* @param {Tag} tag - Tag to be rendered.
* @param {number} padding - Padding (number of spaces before tag)
* @returns {string} Rendered tag.
*/
TagRenderer.prototype.render = function (tag, padding = 0) {
const [tagPadding, textPadding] = this._getPadding(padding)
const openingTag = this._renderOpeningTag(tag, tagPadding)
const tagText = this._renderTagText(tag, textPadding)
const closingTag = this._renderClosingTag(tag, tagPadding)
return `${openingTag}${tagText}${closingTag}`
}
TagRenderer.prototype._getPadding = function (padding) {
if (this.pretty === true) {
return [' '.repeat(padding), ' '.repeat(padding + 1)]
}
return ['', '']
}
TagRenderer.prototype._renderOpeningTag = function (tag, tagPadding) {
let openingTag = `${tagPadding}<${tag.name}`
if (tag.params.attrs !== null) {
openingTag = openingTag + ' ' + tag.params.attrs
}
openingTag += `>${this.ending}`
return openingTag
}
TagRenderer.prototype._renderTagText = function (tag, textPadding) {
let tagText = ''
if (tag.params.text !== null) {
tagText += tag.params.text.split('\n')
.map(item => {
return `${textPadding}${item}${this.ending}`
})
.join('')
}
return tagText
}
TagRenderer.prototype._renderClosingTag = function (tag, tagPadding) {
let closingTag = ''
if (tag.params.needsClose) {
closingTag += `${tagPadding}</${tag.name}>${this.ending}`
}
// TODO: ^^^ Don't add new line if open/closed tag without text
return closingTag
}
/**
* Represents a TreeRenderer.
* @constructor
* @param {Object} options - Options for TreeRenderer.
* Valid options:
* - pretty - defines if rendering is pretty or not
*/
function TreeRenderer (options = {}) {
this.pretty = (options.pretty === undefined) ? false : options.pretty
this.ending = (this.pretty) ? '\n' : ''
this.pad = (this.pretty) ? ' ' : ''
this.tagRenderer = new TagRenderer({ pretty: this.pretty })
}
/**
* Renders a given tree.
* @memberof TreeRenderer
* @param {Node} node - Start Node to render from.
* @returns {string} Rendered tree.
*/
TreeRenderer.prototype.renderTree = function (node) {
if (node.children.length === 0) {
return this.tagRenderer.render(node.data, node.level)
}
const data = []
for (let i = 0; i < node.children.length; i++) {
const tag = this.renderTree(node.children[i])
data.push(tag)
}
return (
`${this.pad.repeat(node.level)}${node.data.openingTag}${this.ending}` +
`${data.join('')}` +
`${this.pad.repeat(node.level)}${node.data.closingTag}${this.ending}`
)
}
module.exports = {
TagRenderer: TagRenderer,
TreeRenderer: TreeRenderer
}
|
package io.opensphere.core.util.swing;
import java.awt.GridLayout;
import java.awt.Insets;
import java.awt.event.ActionListener;
import java.awt.event.FocusAdapter;
import java.awt.event.FocusEvent;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
import javax.swing.JButton;
import javax.swing.JPanel;
import javax.swing.JRootPane;
import javax.swing.SwingUtilities;
import io.opensphere.core.util.collections.New;
/**
* Generic button panel.
*/
public class ButtonPanel extends JPanel
{
/** The serialVersionUID. */
private static final long serialVersionUID = 1L;
/** Button insets that match those of a default JButton. */
public static final Insets INSETS_DEFAULT = new Insets(5, 14, 5, 14);
/** Button insets that are moderate. */
public static final Insets INSETS_MEDIUM = new Insets(4, 12, 4, 12);
/** Button insets that match those of a JOptionPane. */
public static final Insets INSETS_JOPTIONPANE = new Insets(2, 8, 2, 8);
/** OK. */
public static final String OK = "OK";
/** Cancel. */
public static final String CANCEL = "Cancel";
/** Yes. */
public static final String YES = "Yes";
/** No. */
public static final String NO = "No";
/** Close. */
public static final String CLOSE = "Close";
/** The OK/Cancel options. */
public static final Collection<String> OK_CANCEL = New.unmodifiableList(OK, CANCEL);
/** The Yes/No options. */
public static final Collection<String> YES_NO = New.unmodifiableList(YES, NO);
/** The button margin. */
private Insets myButtonMargin = INSETS_MEDIUM;
/** The selection that was made. */
private volatile String mySelection;
/** Map of button label to button. */
private final Map<String, JButton> myButtonMap = new HashMap<>();
/**
* Constructor.
*/
public ButtonPanel()
{
this(OK_CANCEL);
}
/**
* Constructor.
*
* @param buttonLabels The button labels
*/
public ButtonPanel(Collection<String> buttonLabels)
{
initialize(buttonLabels);
}
/**
* Constructor.
*
* @param buttonLabels The button labels
*/
public ButtonPanel(String... buttonLabels)
{
this(Arrays.asList(buttonLabels));
}
/**
* Adds an <code>ActionListener</code> to the button.
*
* @param l the <code>ActionListener</code> to be added
*/
public void addActionListener(ActionListener l)
{
listenerList.add(ActionListener.class, l);
}
/**
* Gets the button for the given button label.
*
* @param buttonLabel The button label
* @return The button, or null
*/
public JButton getButton(String buttonLabel)
{
return myButtonMap.get(buttonLabel);
}
/**
* Getter for selection.
*
* @return the selection
*/
public String getSelection()
{
return mySelection;
}
/**
* Setter for buttonMargin.
*
* @param buttonMargin the buttonMargin
*/
public void setButtonMargin(Insets buttonMargin)
{
myButtonMargin = buttonMargin;
for (JButton button : myButtonMap.values())
{
button.setMargin(myButtonMargin);
}
}
/**
* Initializes the GUI.
*
* @param buttonLabels The button labels
*/
private void initialize(Collection<String> buttonLabels)
{
setLayout(new GridLayout(1, buttonLabels.size(), 6, 0));
for (String label : buttonLabels)
{
final IconButton button = new IconButton(label);
button.setFocusPainted(true);
if (OK.equals(label) || YES.equals(label))
{
button.setIcon("/images/check_12x12.png");
}
else if (CANCEL.equals(label) || NO.equals(label))
{
button.setIcon("/images/cancel_14x14.png");
}
button.addActionListener(e ->
{
mySelection = e.getActionCommand();
for (ActionListener listener : getListeners(ActionListener.class))
{
listener.actionPerformed(e);
}
});
button.addFocusListener(new FocusAdapter()
{
@Override
public void focusGained(FocusEvent e)
{
JRootPane root = SwingUtilities.getRootPane(button);
if (root != null)
{
root.setDefaultButton(button);
}
}
});
add(button);
myButtonMap.put(label, button);
}
}
}
|
import pywingchun
from enum import IntEnum
class Source:
CTP = "ctp"
XTP = "xtp"
OES = "oes"
class Exchange:
SSE = "SSE"
SZE = "SZE"
SHFE = "SHFE"
DCE = "DCE"
CZCE = "CZCE"
CFFEX = "CFFEX"
INE = "INE"
class Region:
CN = 'CN'
HK = 'HK'
class ValuationMethod(IntEnum):
FIFO = 0
LIFO = 1
AverageCost = 2
LedgerCategory = pywingchun.constants.LedgerCategory
InstrumentType = pywingchun.constants.InstrumentType
ExecType = pywingchun.constants.ExecType
Side = pywingchun.constants.Side
Offset = pywingchun.constants.Offset
BsFlag = pywingchun.constants.BsFlag
OrderStatus = pywingchun.constants.OrderStatus
Direction = pywingchun.constants.Direction
PriceType = pywingchun.constants.PriceType
VolumeCondition = pywingchun.constants.VolumeCondition
TimeCondition = pywingchun.constants.TimeCondition
MsgType = pywingchun.constants.MsgType
AllFinalOrderStatus = [int(OrderStatus.Filled),
int(OrderStatus.Error),
int(OrderStatus.PartialFilledNotActive),
int(OrderStatus.Cancelled)]
InstrumentTypeInStockAccount = [InstrumentType.Stock,
InstrumentType.Bond,
InstrumentType.Fund,
InstrumentType.StockOption,
InstrumentType.TechStock,
InstrumentType.Index,
InstrumentType.Repo]
ENUM_TYPES = [LedgerCategory,
InstrumentType,
ExecType,
Side,
Offset,
BsFlag,
OrderStatus,
Direction,
PriceType,
VolumeCondition,
TimeCondition,
MsgType]
MSG_TYPES = {
"quote": MsgType.Quote,
"entrust": MsgType.Entrust,
"transaction": MsgType.Transaction,
"bar": MsgType.Bar,
"order_input": MsgType.OrderInput,
"order_action": MsgType.OrderAction,
"order": MsgType.Order,
"trade": MsgType.Trade,
"position": MsgType.Position,
"asset": MsgType.Asset,
"position_detail": MsgType.PositionDetail,
"instrument": MsgType.Instrument
}
|
<gh_stars>100-1000
'use strict';
const progressInfo = document.querySelectorAll( '.progress_info_bold' );
if( progressInfo.length > 0 )
{
let apps = 0;
let drops = 0;
let match;
for( let i = 0; i < progressInfo.length; i++ )
{
match = progressInfo[ i ].textContent.match( /([0-9]+) card drops? remaining/ );
if( match )
{
match = parseInt( match[ 1 ], 10 ) || 0;
if( match > 0 )
{
apps++;
drops += match;
}
}
}
if( apps > 0 )
{
const text = document.createElement( 'span' );
text.className = 'steamdb_drops_remaining';
text.appendChild( document.createTextNode( drops + ' drops remaining across ' + apps + ' apps' + ( document.querySelector( '.pageLinks' ) ? ' on this page' : '' ) ) );
const container = document.querySelector( '.badge_details_set_favorite' );
if( container )
{
container.insertBefore( text, container.firstChild );
}
}
}
else
{
GetOption( { 'button-gamecards': true }, function( items )
{
if( !items[ 'button-gamecards' ] )
{
return;
}
const profileTexture = document.querySelector( '.profile_small_header_texture' );
if( !profileTexture )
{
return;
}
const badgeUrl = location.pathname.match( /\/badges\/([0-9]+)/ );
if( !badgeUrl )
{
return;
}
const badgeid = parseInt( badgeUrl[ 1 ], 10 );
const container = document.createElement( 'div' );
container.className = 'profile_small_header_additional steamdb';
const image = document.createElement( 'img' );
image.className = 'ico16';
image.src = GetLocalResource( 'icons/white.svg' );
const span = document.createElement( 'span' );
span.dataset.tooltipText = 'View badge on SteamDB';
span.appendChild( image );
const link = document.createElement( 'a' );
link.rel = 'noopener';
link.className = 'btnv6_blue_hoverfade btn_medium btn_steamdb';
link.href = GetHomepage() + 'badge/' + badgeid + '/?utm_source=Steam&utm_medium=Steam&utm_campaign=SteamDB%20Extension';
link.appendChild( span );
container.insertBefore( link, container.firstChild );
profileTexture.appendChild( container );
} );
}
|
#!/bin/env bash
# Copyright 2017-2018 by SDRausty. All rights reserved. 🌎 🌍 🌏 🌐 🗺
# Hosting https://sdrausty.github.io/TermuxArch courtesy https://pages.github.com
# https://sdrausty.github.io/TermuxArch/CONTRIBUTORS Thank you for your help.
# https://sdrausty.github.io/TermuxArch/README has information about this project.
################################################################################
fstnd=""
ftchit() {
getmsg
printdownloadingftchit
if [[ "$dm" = aria2c ]];then
aria2c http://"$mirror$path$file".md5
aria2c -c http://"$mirror$path$file"
elif [[ "$dm" = axel ]];then
axel http://"$mirror$path$file".md5
axel http://"$mirror$path$file"
elif [[ "$dm" = wget ]];then
wget "$dmverbose" -N --show-progress http://"$mirror$path$file".md5
wget "$dmverbose" -c --show-progress http://"$mirror$path$file"
else
curl "$dmverbose" -C - --fail --retry 4 -OL http://"$mirror$path$file".md5 -O http://"$mirror$path$file"
fi
}
ftchstnd() {
fstnd=1
getmsg
printcontacting
if [[ "$dm" = aria2c ]];then
aria2c "$cmirror" | tee /dev/fd/1 > "$tampdir/global2localmirror"
nmirror="$(grep Redir "$tampdir/global2localmirror" | awk {'print $8'})"
printdone
printdownloadingftch
aria2c http://"$mirror$path$file".md5
aria2c -c -m 4 http://"$mirror$path$file"
elif [[ "$dm" = wget ]];then
wget -v -O/dev/null "$cmirror" 2>"$tampdir/global2localmirror"
nmirror="$(grep Location "$tampdir/global2localmirror" | awk {'print $2'})"
printdone
printdownloadingftch
wget "$dmverbose" -N --show-progress "$nmirror$path$file".md5
wget "$dmverbose" -c --show-progress "$nmirror$path$file"
else
curl -v "$cmirror" 2>"$tampdir/global2localmirror"
nmirror="$(grep Location "$tampdir/global2localmirror" | awk {'print $3'})"
printdone
printdownloadingftch
curl "$dmverbose" -C - --fail --retry 4 -OL "$nmirror$path$file".md5 -O "$nmirror$path$file"
fi
}
getimage() {
printdownloadingx86
getmsg
if [[ "$dm" = aria2c ]];then
aria2c http://"$mirror$path$file".md5
if [[ "$CPUABI" = "$CPUABIX86" ]];then
file="$(grep i686 md5sums.txt | awk {'print $2'})"
else
file="$(grep boot md5sums.txt | awk {'print $2'})"
fi
sed '2q;d' md5sums.txt > "$file".md5
rm md5sums.txt
aria2c -c http://"$mirror$path$file"
elif [[ "$dm" = axel ]];then
axel http://"$mirror$path$file".md5
if [[ "$CPUABI" = "$CPUABIX86" ]];then
file="$(grep i686 md5sums.txt | awk {'print $2'})"
else
file="$(grep boot md5sums.txt | awk {'print $2'})"
fi
sed '2q;d' md5sums.txt > "$file".md5
rm md5sums.txt
axel http://"$mirror$path$file"
elif [[ "$dm" = wget ]];then
wget "$dmverbose" -N --show-progress http://"$mirror${path}"md5sums.txt
if [[ "$CPUABI" = "$CPUABIX86" ]];then
file="$(grep i686 md5sums.txt | awk {'print $2'})"
else
file="$(grep boot md5sums.txt | awk {'print $2'})"
fi
sed '2q;d' md5sums.txt > "$file".md5
rm md5sums.txt
printdownloadingx86two
wget "$dmverbose" -c --show-progress http://"$mirror$path$file"
else
curl "$dmverbose" --fail --retry 4 -OL http://"$mirror${path}"md5sums.txt
if [[ "$CPUABI" = "$CPUABIX86" ]];then
file="$(grep i686 md5sums.txt | awk {'print $2'})"
else
file="$(grep boot md5sums.txt | awk {'print $2'})"
fi
sed '2q;d' md5sums.txt > "$file".md5
rm md5sums.txt
printdownloadingx86two
curl "$dmverbose" -C - --fail --retry 4 -OL http://"$mirror$path$file"
fi
}
getmsg() {
if [[ "$dm" = axel ]] || [[ "$dm" = lftp ]];then
printf "\\n\\e[1;32m%s\\n\\n""The chosen download manager \`$dm\` is being implemented: curl (command line tool and library for transferring data with URLs) alternative https://github.com/curl/curl chosen: DONE"
fi
}
## EOF
|
package com.report.adapter.persistence.repository;
import com.report.application.domain.vo.CharacterPhrase;
import com.report.application.domain.vo.PlanetName;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.DisplayName;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
import org.mockito.InjectMocks;
import org.mockito.Mock;
import org.mockito.junit.jupiter.MockitoExtension;
import static org.junit.jupiter.api.Assertions.assertAll;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.mockito.Mockito.verify;
@ExtendWith(MockitoExtension.class)
class SwapiRepositoryTest {
private static final String PLANET_NAME = "Earth";
private static final String CHARACTER_PHRASE = "Musk";
private static PlanetName planetName;
private static CharacterPhrase characterPhrase;
@Mock
private SwapiNativeRepository nativeRepository;
@InjectMocks
private SwapiRepository repository;
@BeforeAll
static void init() {
planetName = new PlanetName(PLANET_NAME);
characterPhrase = new CharacterPhrase(CHARACTER_PHRASE);
}
@Test
@DisplayName("Deleting SWAPI data")
void shouldCleanSwapiTables() {
// When
repository.deleteSwapiData();
// Then
verify(nativeRepository).cleanSwapiTables();
}
@Test
@DisplayName("Getting FilmCharacterRecords, when all arguments are not null")
void shouldFindAllByCriteria() {
// When
repository.getFilmCharacterRecordsThatMeetTheCriteria(planetName, characterPhrase);
// Then
verify(nativeRepository).findAllByPlanetNameAndCharacterNameThatContainsPhrase(PLANET_NAME, CHARACTER_PHRASE);
}
@Test
@DisplayName("Getting FilmCharacterRecords, when some arguments are null")
void shouldNotAcceptNullValue() {
// When & Then
assertAll(
() -> assertThrows(NullPointerException.class,
() -> repository.getFilmCharacterRecordsThatMeetTheCriteria(null, characterPhrase)
),
() -> assertThrows(NullPointerException.class,
() -> repository.getFilmCharacterRecordsThatMeetTheCriteria(planetName, null)
),
() -> assertThrows(NullPointerException.class,
() -> repository.getFilmCharacterRecordsThatMeetTheCriteria(null, null)
)
);
}
}
|
<filename>src/publisher.cpp
#include <Interface/SimulationGUI.h>
#include <IO/ROS.h>
int main(int argc,const char** argv) {
// Create world
RobotWorld world;
SimGUIBackend backend(&world);
WorldSimulation& sim = backend.sim;
// Load world file
if (!backend.LoadAndInitSim(argc,argv)) {
cerr << "Error loading simulation from command line" << endl;
return 1;
}
// Initialize ROS
// This must be called before all other ROS calls
if (!ROSInitialized()) {
if (!ROSInit()) {
cerr << "Error initializing ROS" << endl;
}
else {
cout << "ROS initialized!" << endl;
}
}
// Create subscriber and topic
Robot robot_sub = *world.robots[0]; // dummy robot listener
const char* topic = "myROSTopic"; // desired topic
// Subscribe to joint state
if (!ROSSubscribeJointState(robot_sub,topic)) {
cerr << "Error subscribing joint state" << endl;
}
// Start simulation
double dt = 0.1;
while (sim.time < 3) {
// Advance and update simulation
sim.Advance(dt);
sim.UpdateModel();
cout << sim.time << ":\t";
// Publish robot's joint state
if (!ROSPublishJointState(*world.robots[0],topic)) {
cerr << "Error publishing joint state" << endl;
}
// Check for updates
if (ROSSubscribeUpdate()) {
// robot_sub has now been updated from the topic
cout << "Updated!" << endl;
}
else {
// robot_sub already has the latest information
cout << "No updates" << endl;
}
}
// Print number of subscribed and published topics
cout << "Subscribed to "<< ROSNumSubscribedTopics() << " topics" << endl;
cout << "Published "<< ROSNumPublishedTopics() << " topics" << endl;
// Shutdown ROS
// Must call after all other ROS calls to cleanly shutdown ROS
if (!ROSShutdown()) {
cerr << "Error shutting down ROS" << endl;
}
return 0;
}
|
def find_longest_word(string):
words = string.split(" ")
longest_word = ' '
for cur_word in words:
if len(cur_word) > len(longest_word):
longest_word = cur_word
return longest_word
# Driver Code
string = "The quick brown fox jumps over the lazy dog"
longest_word = find_longest_word(string)
print(f"The longest word from given string is: {longest_word}") |
#!/usr/bin/env bash
set -e
VERSIONS_FILE="$(dirname $(realpath $0))/../kafka-versions.yaml"
# Gets the default Kafka version and sets "default_kafka_version" variable
# to the corresponding version string.
function get_default_kafka_version {
finished=0
counter=0
default_kafka_version="null"
while [ $finished -lt 1 ]
do
version="$(yq read $VERSIONS_FILE [${counter}].version)"
if [ "$version" = "null" ]
then
finished=1
else
if [ "$(yq read $VERSIONS_FILE [${counter}].default)" = "true" ]
then
if [ "$default_kafka_version" = "null" ]
then
default_kafka_version=$version
finished=1
else
# We have multiple defaults so there is an error in the versions file
>&2 echo "ERROR: There are multiple Kafka versions set as default"
unset default_kafka_version
exit 1
fi
fi
counter=$((counter+1))
fi
done
unset finished
unset counter
unset version
}
function get_kafka_versions {
eval versions="$(yq read $VERSIONS_FILE '*.version' -j | tr '[],' '() ')"
}
function get_kafka_urls {
eval binary_urls="$(yq read $VERSIONS_FILE '*.url' -j | tr '[],' '() ')"
}
function get_zookeeper_versions {
eval zk_versions="$(yq read $VERSIONS_FILE '*.zookeeper' -j | tr '[],' '() ')"
}
function get_kafka_checksums {
eval checksums="$(yq read $VERSIONS_FILE '*.checksum' -j | tr '[],' '() ')"
}
function get_kafka_third_party_libs {
eval libs="$(yq read "$VERSIONS_FILE" '*.third-party-libs' -j | tr '[],' '() ')"
}
function get_kafka_protocols {
eval protocols="$(yq read $VERSIONS_FILE '*.protocol' -j | tr '[],' '() ')"
}
function get_kafka_formats {
eval formats="$(yq read $VERSIONS_FILE '*.format' -j | tr '[],' '() ')"
}
function get_kafka_does_not_support {
eval does_not_support="$(yq read $VERSIONS_FILE '*.unsupported-features' -j | tr '[],' '() ')"
get_kafka_versions
declare -Ag version_does_not_support
for i in "${!versions[@]}"
do
version_does_not_support[${versions[$i]}]=${does_not_support[$i]}
done
}
# Parses the Kafka versions file and creates three associative arrays:
# "version_binary_urls": Maps from version string to url from which the kafka source
# tar will be downloaded.
# "version_checksums": Maps from version string to sha512 checksum.
# "version_libs": Maps from version string to third party library version string.
function get_version_maps {
get_kafka_versions
get_kafka_urls
get_kafka_checksums
get_kafka_third_party_libs
declare -Ag version_binary_urls
declare -Ag version_checksums
declare -Ag version_libs
for i in "${!versions[@]}"
do
version_binary_urls[${versions[$i]}]=${binary_urls[$i]}
version_checksums[${versions[$i]}]=${checksums[$i]}
version_libs[${versions[$i]}]=${libs[$i]}
done
}
|
use regex::Regex;
fn count_test_functions(rust_source_code: &str) -> usize {
let test_function_pattern = Regex::new(r"(?m)(?s)\#\s*\[test\]\s*fn\s+\w+\s*\(\s*\)").unwrap();
test_function_pattern.find_iter(rust_source_code).count()
} |
import scipy.optimize as opt
# stocks is an array of tuples,
# each tuple contains the stock ticker, current price, and allocated capital
stocks = [
('MSFT', 200.00, 1000),
('AMZN', 1800.00, 1000),
('AAPL', 500.00, 1000)
]
# Define objective function
# which minimizes the difference between target stock values and actual stock values
def objective(allocations):
target_allocations = [s[2] for s in stocks]
return sum([(a - b)**2 for a,b in zip(allocations, target_allocations)])
# Define constraint function
# which ensures that the sum of allocations is equal to the total capital
def constrain(allocations):
return sum(allocations) - sum([s[2] for s in stocks])
# Initialize allocations
# with a value equal to each stock's allocated capital
allocations = [s[2] for s in stocks]
# Invoke optimization procedure
opt_allocations = opt.fmin_slsqp(
objective,
allocations,
bounds=[(0, s[2]) for s in stocks],
constraints=[{'type': 'eq', 'fun': constrain}],
callback=None
)
print('Optimized Allocations: ', opt_allocations) |
<reponame>ab2005/provider
/*
* Copyright (c) 2015. Seagate Technology PLC. All rights reserved.
*/
package com.seagate.alto.provider.lyve.response;
import com.google.gson.Gson;
import com.google.gson.annotations.Expose;
import com.google.gson.annotations.SerializedName;
public class Location {
@SerializedName("latitude")
@Expose
public Double latitude;
@SerializedName("longitude")
@Expose
public Double longitude;
@Override
public String toString() {
return new Gson().toJson(this);
}
}
|
#!/usr/bin/env sh
set -eu
echo 'test default install'
./src/sh/scripts/install-dotnet
echo 'test channel install'
./src/sh/scripts/install-dotnet --channel lts
echo 'test version install'
./src/sh/scripts/install-dotnet --version latest
|
<gh_stars>10-100
module.exports = {
copyFirebaseUiCss: {
src: ['./node_modules/firebaseui/dist/firebaseui.css'],
dest: '{{BUILD}}'
}
};
|
<filename>src/containers/Content.js
import React from 'react';
import Carousel from './components/Carousel';
import './css/Content.css';
function Content(){
return(
<div className="content bg-light border-dark">
<Carousel/>
</div>
)
}
export default Content;
|
<gh_stars>10-100
package org.yarnandtail.andhow.valid;
import org.yarnandtail.andhow.api.Validator;
/**
* A collection of Long validation types
*
* @author ericeverman
*/
public abstract class LngValidator implements Validator<Long> {
@Override
public boolean isSpecificationValid() {
return true;
}
@Override
public String getInvalidSpecificationMessage() {
return "THIS VALIDATION IS ALWAYS VALID";
}
/**
* Validate that a long is greater than a specified reference.
*/
public static class GreaterThan extends LngValidator {
long ref;
public GreaterThan(long ref) {
this.ref = ref;
}
@Override
public boolean isValid(Long value) {
if (value != null) {
return (value > ref);
}
return false;
}
@Override
public String getTheValueMustDescription() {
return "be greater than " + Long.toString(ref);
}
}
/**
* Validate that an long is greater than or equal to a specified reference.
*/
public static class GreaterThanOrEqualTo extends LngValidator {
private long ref;
public GreaterThanOrEqualTo(long ref) {
this.ref = ref;
}
@Override
public boolean isValid(Long value) {
if (value != null) {
return (value >= ref);
}
return false;
}
@Override
public String getTheValueMustDescription() {
return "be greater than or equal to " + Long.toString(ref);
}
}
/**
* Validate that an long is less than a specified reference.
*/
public static class LessThan extends LngValidator {
private long ref;
public LessThan(long ref) {
this.ref = ref;
}
@Override
public boolean isValid(Long value) {
if (value != null) {
return (value < ref);
}
return false;
}
@Override
public String getTheValueMustDescription() {
return "be less than " + Long.toString(ref);
}
}
/**
* Validate that a long is less than or equal to a specified reference.
*/
public static class LessThanOrEqualTo extends LngValidator {
private long ref;
public LessThanOrEqualTo(long ref) {
this.ref = ref;
}
@Override
public boolean isValid(Long value) {
if (value != null) {
return (value <= ref);
}
return false;
}
@Override
public String getTheValueMustDescription() {
return "be less than or equal to " + Long.toString(ref);
}
}
}
|
#!/bin/sh
yarn concurrently \
--prefix-colors blue,green \
--names FE,TEST \
--kill-others \
"yarn start" "yarn test"
|
#!/bin/bash
# Module specific variables go here
# Files: file=/path/to/file
# Arrays: declare -a array_name
# Strings: foo="bar"
# Integers: x=9
###############################################
# Bootstrapping environment setup
###############################################
# Get our working directory
cwd="$(pwd)"
# Define our bootstrapper location
bootstrap="${cwd}/tools/bootstrap.sh"
# Bail if it cannot be found
if [ ! -f ${bootstrap} ]; then
echo "Unable to locate bootstrap; ${bootstrap}" && exit 1
fi
# Load our bootstrap
source ${bootstrap}
###############################################
# Metrics start
###############################################
# Get EPOCH
s_epoch="$(gen_epoch)"
# Create a timestamp
timestamp="$(gen_date)"
# Whos is calling? 0 = singular, 1 is as group
caller=$(ps $PPID | grep -c stigadm)
###############################################
# Perform restoration
###############################################
# If ${restore} = 1 go to restoration mode
if [ ${restore} -eq 1 ]; then
report "Not yet implemented" && exit 1
fi
###############################################
# STIG validation/remediation
###############################################
# Module specific validation code should go here
# Errors should go in ${errors[@]} array (which on remediation get handled)
# All inspected items should go in ${inspected[@]} array
errors=("${stigid}")
# If ${change} = 1
#if [ ${change} -eq 1 ]; then
# Create the backup env
#backup_setup_env "${backup_path}"
# Create a backup (configuration output, file/folde permissions output etc
#bu_configuration "${backup_path}" "${author}" "${stigid}" "$(echo "${array_values[@]}" | tr ' ' '\n')"
#bu_file "${backup_path}" "${author}" "${stigid}" "${file}"
#if [ $? -ne 0 ]; then
# Stop, we require a backup
#report "Unable to create backup" && exit 1
#fi
# Iterate ${errors[@]}
#for error in ${errors[@]}; do
# Work to remediate ${error} should go here
#done
#fi
# Remove dupes
#inspected=( $(remove_duplicates "${inspected[@]}") )
###############################################
# Results for printable report
###############################################
# If ${#errors[@]} > 0
if [ ${#errors[@]} -gt 0 ]; then
# Set ${results} error message
#results="Failed validation" UNCOMMENT ONCE WORK COMPLETE!
results="Not yet implemented!"
fi
# Set ${results} passed message
[ ${#errors[@]} -eq 0 ] && results="Passed validation"
###############################################
# Report generation specifics
###############################################
# Apply some values expected for report footer
[ ${#errors[@]} -eq 0 ] && passed=1 || passed=0
[ ${#errors[@]} -gt 0 ] && failed=1 || failed=0
# Calculate a percentage from applied modules & errors incurred
percentage=$(percent ${passed} ${failed})
# If the caller was only independant
if [ ${caller} -eq 0 ]; then
# Show failures
[ ${#errors[@]} -gt 0 ] && print_array ${log} "errors" "${errors[@]}"
# Provide detailed results to ${log}
if [ ${verbose} -eq 1 ]; then
# Print array of failed & validated items
[ ${#inspected[@]} -gt 0 ] && print_array ${log} "validated" "${inspected[@]}"
fi
# Generate the report
report "${results}"
# Display the report
cat ${log}
else
# Since we were called from stigadm
module_header "${results}"
# Show failures
[ ${#errors[@]} -gt 0 ] && print_array ${log} "errors" "${errors[@]}"
# Provide detailed results to ${log}
if [ ${verbose} -eq 1 ]; then
# Print array of failed & validated items
[ ${#inspected[@]} -gt 0 ] && print_array ${log} "validated" "${inspected[@]}"
fi
# Finish up the module specific report
module_footer
fi
###############################################
# Return code for larger report
###############################################
# Return an error/success code (0/1)
exit ${#errors[@]}
# Date: 2018-09-27
#
# Severity: CAT-II
# Classification: UNCLASSIFIED
# STIG_ID: V0077399
# STIG_Version: SV-92095r2
# Rule_ID: SLES-12-020620
#
# OS: SLES
# Version: 12
# Architecture:
#
# Title: The SUSE operating system must generate audit records for all uses of the chacl command.
# Description: SLES
|
import React from 'react';
import styled from 'styled-components';
interface IProps {}
const Styled = styled.button`
width: 17px;
height: 17px;
`;
const ScanVector = React.memo((props: any) => {
return (
<svg width={17} height={17}>
<path
d="M15.703 5.86c.508 0 .781-.282.781-.79V3.172c0-1.672-.875-2.547-2.57-2.547h-1.898c-.508 0-.79.273-.79.781 0 .5.282.774.79.774h1.796c.711 0 1.118.375 1.118 1.132V5.07c0 .508.273.79.773.79zm-13.93 0c.508 0 .774-.282.774-.79V3.312c0-.757.398-1.132 1.117-1.132h1.797c.508 0 .789-.274.789-.774 0-.508-.281-.781-.79-.781H3.564c-1.688 0-2.57.867-2.57 2.547V5.07c0 .508.28.79.78.79zm3.688 10.257c.508 0 .789-.281.789-.781s-.281-.781-.79-.781H3.665c-.719 0-1.117-.367-1.117-1.125v-1.758c0-.508-.274-.79-.774-.79-.507 0-.78.282-.78.79v1.898c0 1.672.882 2.547 2.57 2.547H5.46zm8.453 0c1.695 0 2.57-.875 2.57-2.547v-1.898c0-.508-.28-.79-.78-.79-.509 0-.774.282-.774.79v1.758c0 .758-.407 1.125-1.117 1.125h-1.797c-.508 0-.79.28-.79.78s.282.782.79.782h1.898z"
fill="#000"
fillRule="nonzero"
{...props}
/>
</svg>
);
});
const Scan = (props: IProps & React.ButtonHTMLAttributes<HTMLButtonElement>) => {
const { className = '' } = props;
return (
<Styled type="button" className={`icon ${className || ''}`} {...props}>
<ScanVector />
</Styled>
);
};
export default Scan;
|
#!/bin/bash
# Copyright 2017, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
set -ex
# change to grpc repo root
cd $(dirname $0)/../../../..
source tools/internal_ci/helper_scripts/prepare_build_linux_rc
tools/run_tests/run_tests_matrix.py -f c++ tsan --inner_jobs 16 -j 1 --internal_ci --bq_result_table aggregate_results
|
#!/usr/bin/env bash
# ########################################################################### #
#
# Generate the debian repository and sign it with a given GPG key
#
# ########################################################################### #
TMP_PROGRAM_VERSION="0.2";
# ............................................................................ #
# Prerequisites:
# - gnu utils (apt-get install coreutils)
# - find (apt-get install findutils)
# - sed (apt-get install sed)
# - gpg, GPG key creator (apt-get install gnupg2)
# - aptly, debian repo creator (installation: https://www.aptly.info/download/)
# - wget, http download utility (apt-get install wget)
# - jq, json utility (apt-get install jq)
# - haveged, entropy generator (apt-get install haveged)
# TODO: what happens when there is a clobber of the file during attempt to
# download it, do we error out and how?
# ............................................................................ #
# turn on tracing of error, this will bubble up all the error codes
# basically this allows the ERR trap is inherited by shell functions
set -o errtrace;
# turn on quiting on first error
set -o errexit;
# error out on undefined variables
set -o nounset;
# propagate pipe errors
set -o pipefail;
# debugging
#set -o xtrace;
# ........................................................................... #
# get script name
TMP_SCRIPT_NAME=$(basename "${0}");
# get full path of the script folder
TMP_SCRIPT_FOLDER="$(cd $(dirname $0); pwd)";
# artly plugin display name, extracted from environment otherwise set to ""
ARTLY_PLUGIN=${ARTLY_PLUGIN:-""}
# aptly script path
ARTLY_SCRIPT_PATH="${ARTLY_SCRIPT_PATH:-}";
# ........................................................................... #
# variables to store script arguments in
# static defaults are set here
# dynamic ones, which are based on other passed in parameters are set in
# process_script_arguments
# TODO: figure out a better way, prolly use -z
TMP_OPTION_OUTPUT_FOLDER="";
TMP_OPTION_REPOSITORY_NAME="";
TMP_OPTION_REPOSITORY_COMPONENT="";
TMP_OPTION_REPOSITORY_DISTRIBUTION="";
TMP_OPTION_SECRET_KEY_FILE="";
declare -a TMP_OPTION_PACKAGE_ENTITIES;
TMP_OPTION_REPOSITORY_ARCHITECTURES="";
TMP_OPTION_REPOSITORY_LABEL="";
TMP_OPTION_REPOSITORY_ORIGIN="";
TMP_OPTION_REPOSITORY_DESCRIPTION="";
TMP_OPTION_PUBLIC_KEY_FILE="";
TMP_OPTION_RECREATE=0;
TMP_OPTION_GPG="";
TMP_OPTION_WORK_FOLDER="";
TMP_OPTION_MACHINE_READABLE=0;
TMP_OPTION_VERBOSE=0;
TMP_OPTION_QUIET=0;
TMP_OPTION_NO_COLOR=0;
TMP_OPTION_DEBUG=0;
TMP_OPTION_VERSION=0;
# verbosity
TMP_WGET_VERBOSITY="";
TMP_GPG_VERBOSITY="";
TMP_RM_VERBOSITY="";
TMP_MKDIR_VERBOSITY="";
TMP_MV_VERBOSITY="";
TMP_CP_VERBOSITY="";
TMP_CHMOD_VERBOSITY="";
TMP_SHRED_VERBOSITY="";
# template for the work folder name
TMP_WORK_FOLDER_NAME_TEMPLATE="/tmp/artly-make-debian-repository.XXXXXXXXXX";
# folder for downloaded packages and copied
TMP_SOURCE_FOLDER="";
# file containing all package enteties (folders, files) to import after
# they been scanned and imported
TMP_PACKAGE_LIST_FILE="";
# aptly work, rootdir and config file
# also the log file for the package import process
TMP_APTLY_IMPORT_LOG="";
TMP_APTLY_WORK_FOLDER=""
TMP_APTLY_ROOTDIR_FOLDER="";
TMP_APTLY_CONFIG_FILE="";
# bin folder for the gpg wrapper
TMP_BIN_FOLDER="";
# keyrings file paths, keyid of the imported key file, imported packages count
TMP_PUBLIC_KEY_FILE="";
TMP_KEYRING_FILE="";
TMP_SECRET_KEYRING_FILE="";
TMP_KEYID="";
TMP_IMPORTED_PACKAGES_COUNT="";
# flag to track if we created the output folder, necessary because of the
# error trapping removing the folder when we did not create it
# default to 0 so this way we do not remove the folder
TMP_CREATED_OUTPUT_FOLDER=0;
# ........................................................................... #
# print script usage
function usage {
local script_display_name;
if [ "${ARTLY_PLUGIN}" == "" ]; then
script_display_name="${TMP_SCRIPT_NAME}";
else
script_display_name="${ARTLY_PLUGIN}";
fi
echo "\
${script_display_name} - generate and sign Debian package repository
Usage: ${script_display_name} [OPTIONS]
Generate GPG keyring and import it's private, public and keyid into output
folder. Key parameters can be provided on using arguments, though there are
reasonable (though incomplete) defaults.
To understand Debian repository format please read:
https://wiki.debian.org/RepositoryFormat
Options:
-o, --output-folder <path>
Repository output folder.
-n, --name
Repository name.
-d, --distribution
Repository distribution.
-c, --component
Repository component.
-k, --secret-key-file <path>
Secret key file path. Secret PGP key to sign the repository with.
-p, --package-location <path>
Semi-optional, path to local package files or a folders to be scanned
recursively for packages to import into the repository.
If you want to specify multiple files of folder quote the argument
like so: --package-location \"./folder1 /folder2\".
You can also specify this argument multiple times:
--package-location \"./folder1 /folder2\" --package-location ./folder3
IMPORTANT: at least one --package-location or --package-url needs to be
specified.
-u, --package-url <url>
Semi-optional, URL of the package to download and import to the
repository.
Same as with --package-location/-p you can specify multiple URLs
within quotes and can specify --package-url/-u argument multiple times.
IMPORTANT: at least one --package-location or --package-url needs to be
specified.
-a, --architectures
Optional, repository architectures to publish. By default set to
'amd64,i386,all,source' which publishes \"amd64\", \"i386\"
architectures, architecture-independent \"all\" packages and source
packages. (see Notes Sections for explanation).
Multiple values should be comma separated without spaces if you do not
quote them (example -a amd64,i686). Quote the argument \"amd64, i686\"
if you have spaces.
--label
Optional, repository label.
--origin
Optional, repository origin.
--description
Optional, repository description. If omitted, filled out by aptly
with it's friendly \"Generated by aptly\" message.
--public-key <path>
Optional, file path to the exported public key of the secret key used
to sign the repository. By default set to \"public.asc\" which means it
will be placed inside the output folder.
Common recommended key extension is \".asc\".
Also the key path must be relative to the output folder and not start
with \"/\" or contain any \"..\" parts.
--gpg <gpg path>
Optional, use the gpg executable specified by <gpg path>. By default
set to the first gpg executable found on the PATH using \"type -P gpg\"
command.
--machine-readable
Optional, print out colon separated output. This only prints out
repository information.
--recreate
Optional, delete previous output folder by the same name before
creating it again. Useful when you want to recreate the keys without
having to do manual removal.
--work-folder <path>
Optional, work folder path, needed to generate the keyrings. By default
the work folder name is created by mktemp using following template
\"${TMP_WORK_FOLDER_NAME_TEMPLATE}\".
-v, --verbose
Optional, turn on verbose output.
-q, --quiet
Optional, be as quiet as possible. This really only print the very
final output and not any \"work in progress\" messages.
--no-color
Optional, do not colorize output.
--debug
Optional, turn on debug. Currently this means that the work folder is
not deleted after the script is done so it can be used for inspection.
Also turn on --verbose/-b option.
--version
Print version.
-h, --help
show help for this script.
Notes:
Currently aptly 0.9.7 is unable to publish a repository if all the debian
packages are architecture independent (a.k.a \"all\" architecture)
See: https://github.com/smira/aptly/issues/165
As such we have to force specifying architecture and defaulting it to
VERY common ones.
";
}
# ........................................................................... #
# ERR EXIT INT TERM signals trap handler which clears the ERR trap, prints out
# script, status code line number of the error then exit
# :{1}: line on which error occured
# :{2}: status code of the errors
# :{3}: signal code of the error
# :globals: ERR EXIT INT TERM
# :error: any errors that occure when trap for ERR EXIT INT TERM is removed
# :trap ERR: clears ERR trap
# :return 0: on success, when no errors occured
# :exit 1: anything code propagated when trap for ERR EXIT INT TERM is removed
function trap_handler {
# get error line and error description
local error_line="${1}";
local exit_code="${2}";
local error_signal="${3}";
local frame=0;
local frame_expression;
local indent="";
# clear all traps so we do not hit recusions
clear_error_traps;
if [ "${error_signal}" == "ERR" ]; then
# print out error code
echo "!! Error in script : $(caller | cut -d' ' -f2)" >&2;
echo "!! Error exit code : ${exit_code}" >&2;
echo "!! Error line : ${error_line}" >&2;
echo "!! Error signal : ${error_signal}" >&2;
echo "----- begin stack trace ----";
# turn off errtrace and errexit so we can can stop iterating over frames
# when caller does not return 0 error code
set +o errtrace;
set +o errexit;
while frame_expression=$(caller $frame); do
echo "${indent}${frame_expression}";
indent="${indent} ";
((frame++));
done
# turn exit flags back on
set -o errtrace;
set -o errexit;
echo "----- end stack trace ----";
echo "Unexpected script error, deleting output and work folders as \
needed.">&2;
remove_temporary_directories_and_files;
exit 1;
elif [ "${error_signal}" == "TERM" ]; then
echo "Unexpected script termination, deleting output and work folders \
as needed.">&2;
remove_temporary_directories_and_files;
exit 1;
elif [ "${error_signal}" == "INT" ]; then
echo "Unexpected script interruption, deleting output and work \
folders as needed.">&2;
remove_temporary_directories_and_files;
elif [ "${error_signal}" == "EXIT" ]; then
if [ ${exit_code} -ne 0 ]; then
echo "Unexpected script exit, deleting output and work \
folders as needed.">&2;
remove_temporary_directories_and_files;
fi
fi
}
# ............................................................................ #
# clear all (ERR EXIT INT TERM) error traps
function clear_error_traps {
# clear all traps so we do not hit recusions
trap - ERR EXIT INT TERM;
}
# ........................................................................... #
# check commands, parse scripts, and run the install/setup steps
function begin () {
# handle script errors by trapping ERRR
trap 'trap_handler ${LINENO} $? ERR' ERR;
trap 'trap_handler ${LINENO} $? EXIT' EXIT;
trap 'trap_handler ${LINENO} $? INT' INT;
trap 'trap_handler ${LINENO} $? TERM' TERM;
# check for commands we use and error out if they are not found
check_commands;
# process script arguments
process_script_arguments "$@";
# run script arguments (--version for example)
maybe_run_script_arguments;
# validate script arguments and set default
validate_and_default_arguments;
# enable color, if they are supported
if [ $TMP_OPTION_NO_COLOR == 1 ]; then
disable_color_vars;
else
maybe_enable_color_vars;
fi
# log script paths and various information
log_script_info;
# create folders
create_folders;
# download packages
generate_package_list;
# create aptly config
create_aptly_config;
# create new repository
create_new_repository;
# import debian packages into the repository
import_debian_packages_into_repository;
# create keyrings with an imported secret key file
create_keyrings;
# create gpg.conf with with keyring information
create_gpg_config_file;
# create gpg executable
create_gpg_wrapper;
# publish repository
publish_repository;
# export public key;
export_public_key
# if not debugging remove the work folder
if [ ${TMP_OPTION_DEBUG} -eq 0 ]; then
remove_work_folder;
fi
# print repository information
print_repository_information
}
# ........................................................................... #
# checks for all the commands required for setup and activate
# this does not include the python commands we install
function check_commands {
local current_aptly_version;
local required_aptly_version;
# check for bash 4 or greater
cmd_exists_at_least_bash4_or_abort;
# check gnu getop
cmd_exists_gnu_getopt_or_abort;
# check for a whole set of commands
cmds_exists_or_abort \
"echo" "basename" "dirname" "mkdir" "rm" "rev" "cut" "grep" \
"find" "sed" "shred" "wget" "jq" "aptly";
# get current aptly version and compare it to the one we want
current_aptly_version="$(aptly version | sed 's/aptly version: //g')"
required_aptly_version="0.9.7";
version_lte "${required_aptly_version}" "${current_aptly_version}" \
|| abort "Aptly version ${current_aptly_version} is below required \
version ${required_aptly_version}" 1;
}
# ........................................................................... #
# get script params and store them
function process_script_arguments {
local short_args;
local long_args="";
local processed_args;
short_args="o: n: d: c: k: p: u: a: v q h";
long_args+="output-folder: name: distribution: component: ";
long_args+="secret-key-file: package-location: package-url: ";
long_args+="architectures: label: origin: description: public-key: ";
long_args+="gpg: recreate machine-readable work-folder: verbose quiet ";
long_args+="debug version help";
# if no arguments given print usage
if [ $# -eq 0 ]; then
# print usage to stderr since no valid command was provided
clear_error_traps;
usage 1>&2;
echo "No arguments given">&2;
exit 2;
fi
# process the arguments, if failed then print out all unknown arguments
# and exit with code 2
processed_args=$(get_getopt "${short_args}" "${long_args}" "$@") \
|| {
clear_error_traps;
echo "Unknown argument(s) given: ${processed_args}"; \
exit 2;
}
# set the processed arguments into the $@
eval set -- "${processed_args}";
# go over the arguments
while [ $# -gt 0 ]; do
case "$1" in
# store output folder path
--output-folder | -o)
TMP_OPTION_OUTPUT_FOLDER="${2}";
shift;
;;
# store repository name
--name | -n)
TMP_OPTION_REPOSITORY_NAME="${2}";
shift;
;;
# store repository distribution
--distribution | -d)
TMP_OPTION_REPOSITORY_DISTRIBUTION="${2}";
shift;
;;
# store repository component
--component | -c)
TMP_OPTION_REPOSITORY_COMPONENT="${2}";
shift;
;;
# store secret key file path
--secret-key-file | -k)
TMP_OPTION_SECRET_KEY_FILE="${2}";
shift;
;;
# store package location in an array
--package-location | -p)
TMP_OPTION_PACKAGE_ENTITIES+=("${1}:${2}")
shift;
;;
# store package url in an array
--package-url | -u)
TMP_OPTION_PACKAGE_ENTITIES+=("${1}:${2}")
shift;
;;
# store repository architectures
--architectures | -a)
TMP_OPTION_REPOSITORY_ARCHITECTURES="${2}";
shift;
;;
# store repository label
--label)
TMP_OPTION_REPOSITORY_LABEL="${2}";
shift;
;;
# store repository origin
--origin)
TMP_OPTION_REPOSITORY_ORIGIN="${2}";
shift;
;;
# store repository description
--description)
TMP_OPTION_REPOSITORY_DESCRIPTION="${2}";
shift;
;;
# store exported public key
--public-key)
TMP_OPTION_PUBLIC_KEY_FILE="${2}";
shift;
;;
# store gpg executable path
--gpg)
TMP_OPTION_GPG="${2}";
shift;
;;
# store recreate flag
--recreate)
TMP_OPTION_RECREATE=1;
;;
# store work folder path
--work-folder)
TMP_OPTION_WORK_FOLDER="${2}";
shift
;;
# store machine readable flag
--machine-readable)
TMP_OPTION_MACHINE_READABLE=1;
;;
# store verbose flag
--verbose | -v)
TMP_OPTION_VERBOSE=1;
;;
# store quiet flag
--quiet | -q)
TMP_OPTION_QUIET=1;
;;
# store no color flag
--no-color)
TMP_OPTION_NO_COLOR=1;
;;
# store debug flag
--debug)
TMP_OPTION_DEBUG=1;
;;
# store version flag
--version)
TMP_OPTION_VERSION=1;
;;
# show usage and quit with code 0
--help | -h)
usage;
exit 0;
;;
# argument end marker
--)
# pop the marker of the stack
shift;
# there should not be any trailing arguments
if [ "${#}" -gt 0 ]; then
# print usage to stderr exit with code 2
clear_error_traps;
usage 1>&2;
echo "Unknown arguments(s) given: ${@}">&2;
exit 2;
else
# if it 0 then break the loop, so the shift at the end
# of the for loop did not cause an error
break;
fi
;;
# unknown argument: anything that starts with -
-*)
# print usage to stderr, exit with code 2
clear_error_traps;
usage 1>&2;
echo "Unknown argument(s) given: ${1}">&2;
exit 2;
;;
*)
# print usage to stderr since no valid command was provided
clear_error_traps;
usage 1>&2;
echo "No arguments given.">&2;
exit 2;
;;
esac
shift;
done
}
# ............................................................................ #
# run functionality specific only to some arguments.
# these are independent arguments not specific to rest of scrip functionality
# (for example, --version)
function maybe_run_script_arguments {
# check if asked to print version
if [ "${TMP_OPTION_VERSION}" -eq 1 ]; then
print_version;
exit;
fi
}
# ............................................................................ #
# print out version
function print_version {
local artly_arguments;
if [ "${TMP_OPTION_MACHINE_READABLE}" -eq 1 ]; then
echo "artly-make-debian-repository-version:${TMP_PROGRAM_VERSION}";
artly_arguments="--machine-readable";
else
echo "Artly Make Debian Repository version: ${TMP_PROGRAM_VERSION}";
artly_arguments="";
fi
# print out artly version if the script was run as an Artly plugin
if [ "${ARTLY_SCRIPT_PATH}" != "" ]; then
"${ARTLY_SCRIPT_PATH}" \
${artly_arguments} \
--version;
fi
}
# ........................................................................... #
# validate the set script arguments and set all default values that are
# not set at the top of the script when variable containing them are declared
function validate_and_default_arguments {
local public_key_check_bad_parts_check;
# check if repository name is specified, if not abort with message
if [ "${TMP_OPTION_REPOSITORY_NAME}" == "" ]; then
abort "Please specify repository name using --name/-n" 1;
fi
# check if repository distribution is specified, if not abort with message
if [ "${TMP_OPTION_REPOSITORY_DISTRIBUTION}" == "" ]; then
abort \
"Please specify repository distribution using --distribution/-d" 1;
fi
# check if repository component is specified, if not abort with message
if [ "${TMP_OPTION_REPOSITORY_COMPONENT}" == "" ]; then
abort "Please specify repository component using --component/-c" 1;
fi
# set the default to the common "amd64, all,source" see usage notes for
# explanation
if [ "${TMP_OPTION_REPOSITORY_ARCHITECTURES}" == "" ]; then
TMP_OPTION_REPOSITORY_ARCHITECTURES="amd64,i386,all,source";
fi
# check if keyfile is specified, if not abort with message
if [ "${TMP_OPTION_SECRET_KEY_FILE}" == "" ]; then
abort "Please specify secret key file using --secret-key-file/-k" 1;
fi
# check if the TMP_OPTION_PACKAGE_ENTITIES is empty, if so then no
# package entries were specified.
if [ ${#TMP_OPTION_PACKAGE_ENTITIES[@]} -eq 0 ]; then
abort "Please specify at least one package source using --package-url \
or --package-location" 1;
fi
# check if output folder is specified, if not abort with message
if [ "${TMP_OPTION_OUTPUT_FOLDER}" == "" ]; then
abort "Please specify output folder using --output-folder/-o" 1;
fi
# create a default work folder using a mktemp and
# TMP_WORK_FOLDER_NAME_TEMPLATE template
if [ "${TMP_OPTION_WORK_FOLDER}" == "" ]; then
TMP_OPTION_WORK_FOLDER="$(\
mktemp \
--directory \
--dry-run \
${TMP_WORK_FOLDER_NAME_TEMPLATE}
)";
fi
# default virtualenv to gpg executable
if [ "${TMP_OPTION_GPG}" == "" ]; then
TMP_OPTION_GPG="$(type -P gpg)";
fi
# check if gpg we have found exists
cmds_exists_or_abort "${TMP_OPTION_GPG}";
# public key to export, set default if not set
# also check for ../
if [ "${TMP_OPTION_PUBLIC_KEY_FILE}" == "" ]; then
TMP_OPTION_PUBLIC_KEY_FILE="public.asc";
else
# get pipe error code safe (using || true) error code of the grep
# for bad patterns
public_key_check_bad_parts_check=$(\
echo "${TMP_OPTION_PUBLIC_KEY_FILE}" \
| grep \
--extended-regexp \
"/\.\.|\.\./|^/" \
1>/dev/null \
2>/dev/null;
echo $?) \
|| true;
# if we found the patterns grep would exit with 0 as such abort
if [ ${public_key_check_bad_parts_check} -eq 0 ]; then
abort "Please specify exported public key path that does not \
start with / and without \"..\" parts" 1
fi
fi
# set TMP_PUBLIC_KEY_FILE to the full path
TMP_PUBLIC_KEY_FILE="${TMP_OPTION_OUTPUT_FOLDER}/${TMP_OPTION_PUBLIC_KEY_FILE}";
# if debug then turn on verbosity
if [ ${TMP_OPTION_DEBUG} -eq 1 ]; then
TMP_OPTION_VERBOSE=1;
fi
# if verbose the set gpg, rm, mkdir verbosity
if [ ${TMP_OPTION_VERBOSE} -eq 1 ]; then
TMP_WGET_VERBOSITY="--verbose"
TMP_GPG_VERBOSITY="--verbose";
TMP_RM_VERBOSITY="--verbose";
TMP_MKDIR_VERBOSITY="--verbose";
TMP_MV_VERBOSITY="--verbose";
TMP_CP_VERBOSITY="--verbose";
TMP_CHMOD_VERBOSITY="--verbose";
TMP_SHRED_VERBOSITY="--verbose";
else
TMP_WGET_VERBOSITY="";
TMP_GPG_VERBOSITY="";
TMP_RM_VERBOSITY="";
TMP_MKDIR_VERBOSITY="";
TMP_MV_VERBOSITY="";
TMP_CP_VERBOSITY="";
TMP_CHMOD_VERBOSITY="";
TMP_SHRED_VERBOSITY="";
fi
# if quiet, set verbosity to 0 and enforce the quietest options for
# those utilities that have it (gpg, rm, mkdir, mv, chmod)
if [ ${TMP_OPTION_QUIET} -eq 1 ]; then
TMP_OPTION_VERBOSE=0;
TMP_WGET_VERBOSITY="--quiet";
TMP_GPG_VERBOSITY="--quiet";
TMP_RM_VERBOSITY="";
TMP_MKDIR_VERBOSITY="";
TMP_MV_VERBOSITY="";
TMP_CP_VERBOSITY="";
TMP_CHMOD_VERBOSITY="--quiet";
TMP_SHRED_VERBOSITY="";
fi
# packages
TMP_SOURCE_FOLDER="${TMP_OPTION_WORK_FOLDER}/packages_source";
TMP_PACKAGE_LIST_FILE="${TMP_OPTION_WORK_FOLDER}/package_entities.txt";
# gpg error log, only available after work folder has been created
TMP_APTLY_IMPORT_LOG="${TMP_OPTION_WORK_FOLDER}/aptly_import.log"
# work and roodir folder and config file for aptly
TMP_APTLY_WORK_FOLDER="${TMP_OPTION_WORK_FOLDER}/aptly"
TMP_APTLY_ROOTDIR_FOLDER="${TMP_APTLY_WORK_FOLDER}/rootdir";
TMP_APTLY_CONFIG_FILE="${TMP_APTLY_WORK_FOLDER}/aptly.conf";
# home dir use to "sandbox" execution of gpg
TMP_GPG_HOMEDIR_FOLDER="${TMP_OPTION_WORK_FOLDER}/gpg_homedir";
# gpg.conf options file
TMP_GPG_CONF_FILE="${TMP_GPG_HOMEDIR_FOLDER}/gpg.conf";
# bin folder for the gpg script
TMP_BIN_FOLDER="${TMP_OPTION_WORK_FOLDER}/bin";
}
# ........................................................................... #
# log paths and various scripts information
function log_script_info {
log_verbose "Repository Name : ${TMP_OPTION_REPOSITORY_NAME}";
log_verbose "Repository Distribution : ${TMP_OPTION_REPOSITORY_DISTRIBUTION}";
log_verbose "Repository Component : ${TMP_OPTION_REPOSITORY_COMPONENT}";
log_verbose "Repository Architectures : ${TMP_OPTION_REPOSITORY_ARCHITECTURES}";
log_verbose "Secret Key file : ${TMP_OPTION_SECRET_KEY_FILE}";
log_verbose "Output folder : ${TMP_OPTION_OUTPUT_FOLDER}";
log_verbose "Repository Label : ${TMP_OPTION_REPOSITORY_LABEL}";
log_verbose "Repository Origin : ${TMP_OPTION_REPOSITORY_ORIGIN}";
if [ "${TMP_OPTION_REPOSITORY_DESCRIPTION}" == "" ]; then
log_verbose "Repository Description : ${TMP_OPTION_REPOSITORY_DESCRIPTION}";
fi
log_verbose "Recreate : $(humanize_bool ${TMP_OPTION_RECREATE})";
log_verbose "GPG executable : ${TMP_OPTION_GPG}";
log_verbose "GPG version : $(gpg_version ${TMP_OPTION_GPG})";
log_verbose "Public Key : ${TMP_PUBLIC_KEY_FILE}";
log_verbose "Work folder : ${TMP_OPTION_WORK_FOLDER}";
log_verbose "GPG homedir : ${TMP_GPG_HOMEDIR_FOLDER}";
log_verbose "gpg.conf : ${TMP_GPG_CONF_FILE}";
log_verbose "Debug : $(humanize_bool ${TMP_OPTION_DEBUG})";
for package_entity in "${TMP_OPTION_PACKAGE_ENTITIES[@]}"; do
package_type="$(echo ${package_entity} | cut -d':' -f1)";
package_location="$(echo ${package_entity} | cut -d':' -f2-)";
log_verbose "${package_type} : ${package_location}";
done
}
# ........................................................................... #
# remove all the temporary folders and files if debug is that
# this removes output and work folder
function remove_temporary_directories_and_files {
# if debug is NOT set "force" remove output and work folder
# BUT!!! only remove the output folder if we created it. this helps not to
# remove it when we remove is attempted from error handling
if [ ${TMP_OPTION_DEBUG} -eq 0 ]; then
if [ ${TMP_CREATED_OUTPUT_FOLDER} -eq 1 ]; then
# remove the work for this script folder if exist
if [ -d "${TMP_OPTION_OUTPUT_FOLDER}" ]; then
rm \
${TMP_RM_VERBOSITY} \
--recursive \
"${TMP_OPTION_OUTPUT_FOLDER}";
log_unquiet "Removed output folder: \
${TMP_OPTION_OUTPUT_FOLDER}";
fi
else
log_unquiet "Did not remove the output folder, \
since we did not create it.";
fi
# always remove work folder
remove_work_folder;
fi
}
# ........................................................................... #
# create output, work, homedir folder removing them if needed
# also homedir folder permissions are set at 700
function create_folders {
# create output folder
create_output_folder
# remove work folder if exists, forcing removal if recreate flag is set
remove_work_folder;
# create work folder
mkdir \
${TMP_MKDIR_VERBOSITY} \
--parent \
"${TMP_OPTION_WORK_FOLDER}";
log_unquiet "Created work folder: ${TMP_OPTION_WORK_FOLDER}";
# create source folder for the packages
mkdir \
--parent \
"${TMP_SOURCE_FOLDER}";
# create aptly work folder
mkdir \
--parent \
"${TMP_APTLY_WORK_FOLDER}";
# create aptly rootdir
mkdir \
--parent \
"${TMP_APTLY_ROOTDIR_FOLDER}";
# create bin folder for gpg executable
mkdir \
${TMP_MKDIR_VERBOSITY} \
--parent \
"${TMP_BIN_FOLDER}";
}
# ........................................................................... #
# create output folder, remove it if already exists and recreate is true
# otherwise abort suggesting recreate option
function create_output_folder {
if [ -d "${TMP_OPTION_OUTPUT_FOLDER}" ]; then
if [ ${TMP_OPTION_RECREATE} -eq 1 ]; then
# remove the output folder
rm \
${TMP_RM_VERBOSITY} \
--recursive \
"${TMP_OPTION_OUTPUT_FOLDER}";
log_unquiet "Removed output folder: ${TMP_OPTION_OUTPUT_FOLDER}";
# create output folder
mkdir \
${TMP_MKDIR_VERBOSITY} \
--parent \
"${TMP_OPTION_OUTPUT_FOLDER}";
# set a flag that we created the folder
TMP_CREATED_OUTPUT_FOLDER=1;
log_unquiet "Created output folder: ${TMP_OPTION_OUTPUT_FOLDER}";
else
abort "Output folder already exists: ${TMP_OPTION_OUTPUT_FOLDER}
Consider --recreate option." 1;
fi
else
# create output folder
mkdir \
${TMP_MKDIR_VERBOSITY} \
--parent \
"${TMP_OPTION_OUTPUT_FOLDER}";
# set a flag that we created the folder
TMP_CREATED_OUTPUT_FOLDER=1;
log_unquiet "Created output folder: ${TMP_OPTION_OUTPUT_FOLDER}";
fi
}
# ........................................................................... #
# Remove work folder if it is exists
function remove_work_folder {
# remove the work for this script folder if exist
if [ -d "${TMP_OPTION_WORK_FOLDER}" ]; then
# shred all the files in the gpg homedir folder, cause private keys
# and keyrings
shred_recursively \
"${TMP_SHRED_VERBOSITY}" \
"${TMP_GPG_HOMEDIR_FOLDER}";
rm \
${TMP_RM_VERBOSITY} \
--recursive \
"${TMP_OPTION_WORK_FOLDER}";
log_unquiet "Shredded and removed work folder: \
${TMP_OPTION_WORK_FOLDER}";
fi
}
# ........................................................................... #
# retrieve package from various locations into the source folder for import
# do not clobber files if the already exist
function generate_package_list {
local package_type;
local package_locations;
local package_file;
# go over each package entity and get it's type and locations
for package_entities_entry in "${TMP_OPTION_PACKAGE_ENTITIES[@]}"; do
package_type="$(echo ${package_entities_entry} | cut -d':' -f1)";
package_locations="$(echo ${package_entities_entry} | cut -d':' -f2-)";
# go over each location and add it to the TMP_PACKAGE_LIST_FILE
# download files as necessary and do validation on the paths
for package_location in ${package_locations}; do
case "${package_type}" in
# validate the package_location exists and add it the package
# list. if this is a folder aptly will scan the folder
# recursively so just add this locations
--package-location | -p)
if [ ! -e "${package_location}" ]; then
abort "Following file or folder specified by ${package_type} is missing: ${package_location}" 1;
fi
echo "${package_location}" >> "${TMP_PACKAGE_LIST_FILE}";
;;
# if a url then download to the source folder, without
# clobbering existing file.
# create path to the downloaded package in the source folder
# and record it into the package list
--package-url | -u)
wget \
--no-clobber \
--directory-prefix="${TMP_SOURCE_FOLDER}" \
"${package_location}" \
|| abort "URL specified by ${package_type} could not be retrieved: ${package_location}" 1;
package_file="${TMP_SOURCE_FOLDER}/$(basename ${package_location})";
# record package entity
echo "${package_file}" >> "${TMP_PACKAGE_LIST_FILE}";
;;
esac;
done;
done;
# if file does not exist then abort this should never happen, apparently
if [ ! -e "${TMP_PACKAGE_LIST_FILE}" ]; then
abort "No packages entries available to import" 1;
fi
}
# ........................................................................... #
# generate default config for aptly using aptly, adjust the rootDir to proper
# pathing (aptly messes it up) and set the architecture
function create_aptly_config {
local rootdir_lineno;
local dot_aptly_conf_file;
# note: since there is no option to config to specify where the config
# should be generated and it always generates in $HOME/.aptly.conf
# additionally, if $HOME/.aptly.conf does already exist stderr is poluted
# with a message about creating .aptly.conf which presents an issue for
# logging error. As such we will first create our own .aptly.conf with an
# empty json object so it is read why 'aptly show' config is ran hence
# preventing it from poluting stderr. Then we will redirect the output of
# 'aptly show config' to our custom aptly.conf in TMP_APTLY_CONFIG_FILE.
dot_aptly_conf_file="${TMP_APTLY_WORK_FOLDER}/.aptly.conf"
echo "{}" > "${dot_aptly_conf_file}";
HOME="${TMP_APTLY_WORK_FOLDER}" \
aptly config show \
1>"${TMP_APTLY_CONFIG_FILE}" \
|| abort "failed to generate \"aptly.conf\": ${TMP_APTLY_CONFIG_FILE}" 1;
update_aptly_config_rootdir;
}
# ........................................................................... #
# update aptly.conf rootdir to point TMP_APTLY_ROOTDIR_FOLDER
function update_aptly_config_rootdir {
# remove the dummy .aptly.conf
rm \
${TMP_RM_VERBOSITY} \
--force \
"${dot_aptly_conf_file}" \
|| abort "failed to remove dummy \".aptly.conf\"" 1;
cat "${TMP_APTLY_CONFIG_FILE}" \
| jq \
".rootDir=\"${TMP_APTLY_ROOTDIR_FOLDER}\"" \
> "${TMP_APTLY_CONFIG_FILE}.tmp" \
|| abort "failed to update aptly.conf \"rootDir\"" 1;
mv \
${TMP_MV_VERBOSITY} \
--force \
"${TMP_APTLY_CONFIG_FILE}.tmp" \
"${TMP_APTLY_CONFIG_FILE}";
}
# ........................................................................... #
# create repository with components, distribution using aptly
function create_new_repository {
# aptly has no verbosity setting so we are forced to do redirects
# setup verbosity redirects for stderr and stdout
if [ ${TMP_OPTION_VERBOSE} -eq 1 ]; then
# create 2 handles. 3 goes to stdout, 4 to stderr
# this way verbosity is printed to their approper
exec 3>&1;
else
exec 3>/dev/null;
fi
# create an aptly repository
aptly \
-config="${TMP_APTLY_CONFIG_FILE}" \
repo create \
-component="${TMP_OPTION_REPOSITORY_COMPONENT}" \
-distribution="${TMP_OPTION_REPOSITORY_DISTRIBUTION}" \
"${TMP_OPTION_REPOSITORY_NAME}" \
1>&3 \
|| abort "failed to create debian repository" 1;
# close out stdout and stderr verbosity file descriptors
exec 3>&-;
}
# ........................................................................... #
# add debian packages from origin folder to the repository, replacing existing
# all packages
function import_debian_packages_into_repository {
# aptly has no verbosity setting so we are forced to do redirects
# setup verbosity redirects for stderr and stdout
if [ ${TMP_OPTION_VERBOSE} -eq 1 ]; then
# create 2 handles. 3 goes to stdout, 4 to stderr
# this way verbosity is printed to their approper
exec 3>&1;
else
exec 3>/dev/null;
fi
# go over package sources and add them to the aptly repository
while read package_source; do
log_verbose "package source ${package_source}";
aptly \
-config="${TMP_APTLY_CONFIG_FILE}" \
repo add \
"${TMP_OPTION_REPOSITORY_NAME}" \
"${package_source}" \
1>>"${TMP_APTLY_IMPORT_LOG}" \
|| abort_with_log \
"${TMP_APTLY_IMPORT_LOG}" \
"failed to import packages" 1;
# cat the processing log out if successful and verbose is on
if [ ${TMP_OPTION_VERBOSE} -eq 1 ]; then
cat "${TMP_APTLY_IMPORT_LOG}";
fi
done < "${TMP_PACKAGE_LIST_FILE}";
# close out stdout and stderr verbosity file descriptors
exec 3>&-;
}
# create a gpg keyring using our custome keyring script with our custom config
# and homedir we will specify it to aptly using PATH. this is currently the
# only way to specify a custom gpg "binary" since aptly does not take it as
# parameter TODO: we should do a PR to aptly
function create_keyrings {
local keyring_type;
local keyring_file;
local make_keyring_output_file;
make_keyring_info_file="${TMP_OPTION_WORK_FOLDER}/keyring.info"
"${TMP_SCRIPT_FOLDER}/artly-make-keyring.sh" \
--output-folder "${TMP_GPG_HOMEDIR_FOLDER}" \
--key-file "${TMP_OPTION_SECRET_KEY_FILE}" \
--gpg "${TMP_OPTION_GPG}" \
--recreate \
--quiet \
--machine-readable \
> "${make_keyring_info_file}" \
|| abort "failed to create a keyring from key file: ${TMP_OPTION_SECRET_KEY_FILE}" 1;
while read config_line; do
# get the config key value and value
config_key="$(echo ${config_line} | cut -d':' -f1)";
config_value="$(echo ${config_line} | cut -d':' -f2)";
# assign config values to their respective variables based on
# config key
case "$config_key" in
keyring)
TMP_KEYRING_FILE="${config_value}";
;;
secret-keyring)
TMP_SECRET_KEYRING_FILE="${config_value}";
;;
imported-keyid)
TMP_KEYID="${config_value}";
;;
esac;
done \
< "${make_keyring_info_file}";
}
# ........................................................................... #
# create gpg.config file.
# force use of the keyring files and keyid we have
# set gpg.config permissions to 600
function create_gpg_config_file {
# in >=gpg2.1 specifying either or both primary-keyring and keyring in the
# config file has proven to a terrible idea.
#
# For some commands like --list-keys issued during "aptly repo publish" issued
# by aptly having both primary-keyring and keyring settings in the config
# causes gpg to exit with exit code 2 (GPG_ERR_UNKNOWN_PACKET)
#
# in another case, also during "aptly repo publish" signing of the Release file
# the --keyring argument is given to gpg by aptly it also cause gpg to exit
# with code 2 when either primary-keyring or keyring are specified in the
# config
#
#
# So far no good reason for this been found has been found
# only error message seen is:
# gpg: keyblock resource '<some path>/gpg_homedir/pubring.kbx': File exists
#
# As such we will avoid coding these setting in the config file until more
# information is forth comming and rely on sandboxing due to specification
# of --homedir and the fact that only one key is every imported into it
#
# Below are the settings that were used before:
# no-default-keyring
# primary-keyring ${TMP_KEYRING_FILE}
# keyring ${TMP_KEYRING_FILE}
# secret-keyring ${TMP_SECRET_KEYRING_FILE}
#
# Debug note, add before the gpg binary to see the gpg commands issued:
# strace -f -s99999 -e trace=clone,execve
cat >"${TMP_GPG_CONF_FILE}" <<EOF
default-key ${TMP_KEYID}
EOF
# print out file if verbose
cat_verbose "${TMP_GPG_CONF_FILE}";
# change permission gor gpg.conf to the 600 for additional safety
chmod \
${TMP_CHMOD_VERBOSITY} \
600 \
"${TMP_GPG_CONF_FILE}";
}
# ........................................................................... #
# create gpg executable to be used by apt publish repo on the PATH
# this script exists mostly because call to the gpg executable is hardcoded
# in aptly to 'gpg'. However currently aptly support for >=gpg 2.1 has some
# issues and not all distros us <gpg 2.1 we need to create a wrapper
# _CUSTOM_GPG_PATH passes in the unmodified PATH which is necessary to look up
# the 'gpg' executable by the same name without getting into an infinite loop
# which would be create if the wrapper which is also name 'gpg' is found on the
# PATH when it is run inside the wrapper.
function create_gpg_wrapper {
cat >"${TMP_BIN_FOLDER}/gpg" <<EOF
#!/usr/bin/env bash
PATH="\${_CUSTOM_GPG_PATH}" \\
${TMP_OPTION_GPG} \\
${TMP_GPG_VERBOSITY} \\
--options "${TMP_GPG_CONF_FILE}" \\
--homedir "${TMP_GPG_HOMEDIR_FOLDER}" \\
\${@};
EOF
# make the script executable
chmod \
${TMP_CHMOD_VERBOSITY} \
u+x \
"${TMP_BIN_FOLDER}/gpg";
}
# ........................................................................... #
# publish the repository, signing it with secret keyid from GPG_KEYID_FILE
# use our custom built gpg script
function publish_repository {
local formated_architectures;
# aptly has no verbosity setting so we are forced to do redirects
# setup verbosity redirects for stderr and stdout
if [ ${TMP_OPTION_VERBOSE} -eq 1 ]; then
# create 2 handles. 3 goes to stdout, 4 to stderr
# this way verbosity is printed to their approper
exec 3>&1;
else
exec 3>/dev/null;
fi
# strip any extra whitespace after commans and quote all the architecures
formated_architectures=$(\
echo "${TMP_OPTION_REPOSITORY_ARCHITECTURES}" \
| sed 's/ *//g' \
);
# run aptly publish using our gpg wrapper
# for _CUSTOM_GPG_PATH explanation see create_gpg_wrapper
_CUSTOM_GPG_PATH="${PATH}" \
PATH="${TMP_BIN_FOLDER}:${PATH}" \
aptly \
-config="${TMP_APTLY_CONFIG_FILE}" \
publish repo \
-architectures="${formated_architectures}" \
-keyring="${TMP_KEYRING_FILE}" \
-secret-keyring="${TMP_SECRET_KEYRING_FILE}" \
-gpg-key="${TMP_KEYID}" \
-label="${TMP_OPTION_REPOSITORY_LABEL}" \
-origin="${TMP_OPTION_REPOSITORY_ORIGIN}" \
"${TMP_OPTION_REPOSITORY_NAME}" \
1>&3 \
|| abort "failed to sign and publish the repository" 1;
# if description is specified then go over Release and InRelease files
# in the disttribution and replace the first instance of the "Description:"
# with "Description:" + TMP_OPTION_REPOSITORY_DESCRIPTION
if [ "${TMP_OPTION_REPOSITORY_DESCRIPTION}" != "" ]; then
for release_file in "Release" "InRelease"; do
sed \
--in-place \
"s/^Description:.*/Description: ${TMP_OPTION_REPOSITORY_DESCRIPTION}/" \
"${TMP_APTLY_ROOTDIR_FOLDER}/public/dists/${TMP_OPTION_REPOSITORY_DISTRIBUTION}/${release_file}";
done;
fi
# close out stdout and stderr verbosity file descriptors
exec 3>&-;
# get the count of imported pacakes and store it in
# TMP_IMPORTED_PACKAGES_COUNT
TMP_IMPORTED_PACKAGES_COUNT="$(get_repository_package_count)";
# copy the "public" repository folder inside aptly rootdir to ubuntu folder
# of the repository output folder
cp \
${TMP_CP_VERBOSITY} \
--recursive \
"${TMP_APTLY_ROOTDIR_FOLDER}"/public \
--no-target-directory \
"${TMP_OPTION_OUTPUT_FOLDER}" \
|| abort "failed to copy repository to: ${TMP_OPTION_OUTPUT_FOLDER}" 1;
log_unquiet "Copied repository to output folder: \
${TMP_OPTION_OUTPUT_FOLDER}";
}
# ........................................................................... #
# export public key to public.asc file in output folder
function export_public_key {
local gpg_error_log;
# setup verbosity redirects for stdout using a file descriptor
if [ ${TMP_OPTION_VERBOSE} -eq 1 ]; then
exec 3>&1;
else
exec 3>/dev/null;
fi
gpg_error_log="${TMP_OPTION_WORK_FOLDER}/gpg_error.log";
# export armored public key using the keyid in the keyid file to
# TMP_OPTION_PUBLIC_KEY_FILE. use --batch so it does not prompt, because it does.
# so not for the sadness: if gpg fails to export the key it will actuallly
# print 'gpg: WARNING: nothing exported' to stderr but will still exit
# with code 0, effectively telling us all is good. As such we will need to
# do our own check, checking if the file itself was generated
"${TMP_OPTION_GPG}" \
${TMP_GPG_VERBOSITY} \
--options "${TMP_GPG_CONF_FILE}" \
--homedir "${TMP_GPG_HOMEDIR_FOLDER}" \
--batch \
--export \
--armor \
--output "${TMP_PUBLIC_KEY_FILE}" \
"${TMP_KEYID}"; #\
1>&3 \
2>"${gpg_error_log}" \
|| abort_with_log \
"${gpg_error_log}" \
"failed to export armored public key to ${TMP_PUBLIC_KEY_FILE}" 1;
# check if public key file was actually created and if not abort
if [ ! -f "${TMP_PUBLIC_KEY_FILE}" ]; then
abort_with_log \
"${gpg_error_log}" \
"failed to export armored public key to ${TMP_PUBLIC_KEY_FILE}" 1;
fi
}
# ........................................................................... #
# get count of imported packages
function get_repository_package_count {
aptly \
-config="${TMP_APTLY_CONFIG_FILE}" \
repo show \
"${TMP_OPTION_REPOSITORY_NAME}" \
2>/dev/null \
| grep "Number of packages: " \
| cut \
--delimiter ':' \
--fields 2 \
| cut \
--characters 2;
}
# ........................................................................... #
# print out repository information
function print_repository_information {
local keyring="${TMP_OPTION_OUTPUT_FOLDER}/";
keyring="${keyring}$(basename ${TMP_KEYRING_FILE})"
local secret_keyring="${TMP_OPTION_OUTPUT_FOLDER}/";
secret_keyring="${secret_keyring}$(basename ${TMP_SECRET_KEYRING_FILE})";
if [ ${TMP_OPTION_MACHINE_READABLE} -eq 1 ]; then
echo "repository-name:${TMP_OPTION_REPOSITORY_NAME}";
echo "repository-distribution:${TMP_OPTION_REPOSITORY_DISTRIBUTION}";
echo "repository-component:${TMP_OPTION_REPOSITORY_COMPONENT}";
echo "repository-architectures:${TMP_OPTION_REPOSITORY_ARCHITECTURES}";
echo "repository-folder:${TMP_OPTION_OUTPUT_FOLDER}";
echo "repository-label:${TMP_OPTION_REPOSITORY_LABEL}";
echo "repository-origin:${TMP_OPTION_REPOSITORY_ORIGIN}";
echo "repository-package-count:${TMP_IMPORTED_PACKAGES_COUNT}";
echo "public-key:${TMP_PUBLIC_KEY_FILE}";
else
log_unquiet "Repository Name : ${TMP_OPTION_REPOSITORY_NAME}";
log_unquiet "Repository Distribution : ${TMP_OPTION_REPOSITORY_DISTRIBUTION}";
log_unquiet "Repository Component : ${TMP_OPTION_REPOSITORY_COMPONENT}";
log_unquiet "Repository Architectures : ${TMP_OPTION_REPOSITORY_ARCHITECTURES}";
log_unquiet "Repository Folder : ${TMP_OPTION_OUTPUT_FOLDER}";
log_unquiet "Repository Label : ${TMP_OPTION_REPOSITORY_LABEL}";
log_unquiet "Repository Origin : ${TMP_OPTION_REPOSITORY_ORIGIN}";
log_unquiet "GPG version : $(gpg_version ${TMP_OPTION_GPG})";
log_unquiet "Public Key : ${TMP_PUBLIC_KEY_FILE}";
log_unquiet "Repository Package Count : ${TMP_IMPORTED_PACKAGES_COUNT}";
fi
}
# @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ #
# import utils.sh
if [ -f "${TMP_SCRIPT_FOLDER}/utils.sh" ]; then
source "${TMP_SCRIPT_FOLDER}/utils.sh"
else
echo "
Could not load required '${TMP_SCRIPT_FOLDER}/utils.sh' module.$
" >&2;
exit 1;
fi
# @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ #
begin "$@";
|
<filename>modules/coverage-report/src/test/java/org/jooby/EnvEmpyCallbackFeature.java
package org.jooby;
import org.jooby.test.ServerFeature;
import org.junit.Test;
public class EnvEmpyCallbackFeature extends ServerFeature {
{
on("dev", () -> {
});
get("/", () -> "empty");
on("dev", () -> {
});
}
@Test
public void devCallback() throws Exception {
request().get("/").expect("empty");
}
}
|
# Get current work dir
WORK_DIR=$(pwd)/PipeSwitch
# Import global variables
source $WORK_DIR/scripts/config/env.sh
PYTHONPATH=$PYTHONPATH:$WORK_DIR python $WORK_DIR/scripts/figures/figure7/per_layer_no_pipeline_bert_base/remote_run_data.py |
#!/bin/sh
#
# Vivado(TM)
# runme.sh: a Vivado-generated Runs Script for UNIX
# Copyright 1986-2020 Xilinx, Inc. All Rights Reserved.
#
echo "This script was generated under a different operating system."
echo "Please update the PATH and LD_LIBRARY_PATH variables below, before executing this script"
exit
if [ -z "$PATH" ]; then
PATH=E:/APPZ/Xilinx/Vivado/2020.2/ids_lite/ISE/bin/nt64;E:/APPZ/Xilinx/Vivado/2020.2/ids_lite/ISE/lib/nt64:E:/APPZ/Xilinx/Vivado/2020.2/bin
else
PATH=E:/APPZ/Xilinx/Vivado/2020.2/ids_lite/ISE/bin/nt64;E:/APPZ/Xilinx/Vivado/2020.2/ids_lite/ISE/lib/nt64:E:/APPZ/Xilinx/Vivado/2020.2/bin:$PATH
fi
export PATH
if [ -z "$LD_LIBRARY_PATH" ]; then
LD_LIBRARY_PATH=
else
LD_LIBRARY_PATH=:$LD_LIBRARY_PATH
fi
export LD_LIBRARY_PATH
HD_PWD='E:/VUT/4 Semester/DE1/Digital-electronics-1/labs/4.segment/display/display.runs/impl_1'
cd "$HD_PWD"
HD_LOG=runme.log
/bin/touch $HD_LOG
ISEStep="./ISEWrap.sh"
EAStep()
{
$ISEStep $HD_LOG "$@" >> $HD_LOG 2>&1
if [ $? -ne 0 ]
then
exit
fi
}
# pre-commands:
/bin/touch .init_design.begin.rst
EAStep vivado -log hex_7seg.vdi -applog -m64 -product Vivado -messageDb vivado.pb -mode batch -source hex_7seg.tcl -notrace
|
import { mount } from '@vue/test-utils'
import LoginWallet from '@/components/LoginWallet.vue'
import { expect, vi } from 'vitest'
test('Login directly should fail', async () => {
// mock the $message method inside
const $message = vi.fn()
const $router = {
push: vi.fn()
}
const wrapper = mount(LoginWallet, {
global: {
mocks: {
$message,
$router
}
}
})
// check button exists
const button = wrapper.find('div.btn>div')
expect(button.exists()).toBe(true)
expect(button.text()).toBe('Login to existing wallet')
// test fail scenario
await button.trigger('click')
expect($message.mock.calls.length).toBe(1)
// route to home after error message
expect($router.push).toHaveBeenCalledWith('/')
})
|
<gh_stars>10-100
import "./test_util";
import "./test_tree";
import "./test_jqtree";
QUnit.config.testTimeout = 5000;
|
#!/bin/bash
set -e -x
ls -lah
pwd
export | sort
./gradlew --no-daemon --info build test check
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.