file_name large_stringlengths 4 140 | prefix large_stringlengths 0 12.1k | suffix large_stringlengths 0 12k | middle large_stringlengths 0 7.51k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
ItemView.js | () {
// Private variables
// These are jQuery objects corresponding to elements
let $mapImage;
let $carpet;
let $backArrow;
let $forwardArrow;
let $avatar;
let $arrowsAndItemOrderNumbers;
let $itemOrderNumbers;
let previousAngle = 0;
let viewModel;
let itemsDetails;
let itemToShowBecauseItIsInTheURL;
let performAnimations;
// Private functions
const cacheJQueryObjects = () => {
$mapImage = $("#MapImage");
$carpet = $('#Carpet');
$arrowsAndItemOrderNumbers = $('#ArrowsAndItemOrderNumbers');
$backArrow = $('#ArrowBack');
$forwardArrow = $('#ArrowForward');
$itemOrderNumbers = $('#ItemOrderNumbers');
$avatar = $('#Avatar');
};
/**
* FIXME
* Now I don't fetch item details from the backend because the index.html file comes with
* them ready to use. I collect the details from the elements.
*
* See server-rendering/writer-home-page-generator.js to know how the details are incorporated
* in the page.
*/
const collectItemDetailsFromMap = () => {
itemsDetails = [];
const itemElements = document.querySelectorAll("[data-nid]");
itemElements.forEach((element) => {
itemsDetails.push({ "nid": element.dataset.nid,
"field_order_number": element.dataset.order,
"title": element.dataset.title,
"field_coordinate_x": element.dataset.xCoord,
"field_coordinate_y": element.dataset.yCoord,
"field_item_type": element.dataset.type,
"path": element.dataset.path });
});
// the viewModel needs to know about the items details as well
viewModel.setItemsDetails(itemsDetails);
}
const moveToStartingPointOfSpiral = () => {
// We are going to move the carpet to the starting point of the spiral
// We set the animation running. The viewModel will take care of closing
// the item content panel, if any. It will also close any contact me form.
viewModel.setAnimationToNextItemRunning(true);
const viewport = viewModel.getViewPort();
// Rotating the carpet to the horizontal position it's supposed to have
// at the starting point of the spiral
$carpet.velocity({ transform: ["rotateZ(" + 0 + "deg)", "rotateZ(" + previousAngle + "deg)"] },
{ duration: 1000, easing: "linear", loop: false});
previousAngle = 0;
const mapImagePosition = $mapImage.position();
const currentTop = Math.round(mapImagePosition.top);
const currentLeft = Math.round(mapImagePosition.left);
let animationDuration = 1500;
// If the carpet is already very near the place it's going to,
// I want to get there very quickly so that the user can
// click on the arrows with no delay
// If I have the animation last 1500ms, the user may click on an arrow and
// nothing happens
if (Math.abs(currentTop - (viewport.height / 2 - 3500)) < 200 &&
Math.abs(currentLeft - (viewport.width / 2 - 3500)) < 200) {
animationDuration = 100;
}
// Now animating the carpet to go to the starting point of the spiral
$mapImage.animate({ top: viewport.height / 2 - 3500 ,
left: viewport.width / 2 - 3500 }, animationDuration, null,
() => {
// console.log('animation to spiral starting point completed');
// Animation completed
viewModel.setAnimationToNextItemRunning(false);
}
);
};
const clickOnArrowHandler = (event) => {
// console.log(event);
// console.log(viewModel.getAnimationToNextItemRunning());
// Only if we are not already flying to the next item, do the following
if (!viewModel.getAnimationToNextItemRunning()) {
let itemToVisitNext;
// Determining the item to visit next
if (!event && itemToShowBecauseItIsInTheURL) {
// This is in the case I have to move directly to an item because it's in the URL
itemToVisitNext = itemToShowBecauseItIsInTheURL;
itemToShowBecauseItIsInTheURL = undefined;
performAnimations = false;
// console.log("clickOnArrowHandler, itemToShowBecauseItIsInTheURL ", itemToShowBecauseItIsInTheURL);
// console.log("performAnimations ", performAnimations);
} else {
// the parameter tells if we are going forward or back
itemToVisitNext = viewModel.getItemToVisitNext(event.target.id === "ArrowForward");
}
if (itemToVisitNext) {
const viewport = viewModel.getViewPort();
// When performing the animation the View Model needs to know so that it
// can tell other views
viewModel.setAnimationToNextItemRunning(true);
// left and top attributes to give to the map to get to the item
const positionItemToVisitNext = { left: viewport.width / 2 - itemToVisitNext.field_coordinate_x,
top: viewport.height / 2 - itemToVisitNext.field_coordinate_y };
const mapImagePosition = $mapImage.position();
const currentTop = Math.round(mapImagePosition.top);
const currentLeft = Math.round(mapImagePosition.left);
// Differences in x and y we need to travel to get to the item from the current position
const delta_x = (currentLeft - positionItemToVisitNext.left);
const delta_y = (currentTop - positionItemToVisitNext.top);
// The angle of the direction we take to get to the item. Used to rotate the carpet accordingly
const angle = Math.atan2(delta_y, delta_x) * (180 / Math.PI);
if (performAnimations) {
// Rotating the carpet
$carpet.velocity({ transform: ["rotateZ(" + angle + "deg)", "rotateZ(" + previousAngle + "deg)"] },
{ duration: 1000, easing: "linear", loop: false});
} else {
// Rotate the carpet with no animation
$carpet.css("transform", "rotateZ(" + angle + "deg)");
}
previousAngle = angle;
const maxDelta = Math.max(Math.abs(delta_x), Math.abs(delta_y));
// This is to make the carpet stop before covering the image
// We don't want the carpet to be over the item's image
const approachingFactor = maxDelta / 100;
const showingItemAtTheEndOfTheAnimation = () => {
viewModel.setAnimationToNextItemRunning(false);
updateItemOrderNumbers(itemToVisitNext);
viewModel.showItem();
}
if (performAnimations) {
$mapImage.animate({ top: positionItemToVisitNext.top + (delta_y / approachingFactor),
left: positionItemToVisitNext.left + (delta_x / approachingFactor)}, 1500, null,
() => {
showingItemAtTheEndOfTheAnimation();
}
);
} else {
$mapImage.css("top", positionItemToVisitNext.top + (delta_y / approachingFactor));
$mapImage.css("left", positionItemToVisitNext.left + (delta_x / approachingFactor));
showingItemAtTheEndOfTheAnimation();
// Now I can finally reset performAnimations to true to restart doing animations
performAnimations = true;
}
}
}
};
/**
* To update the order number of the item currently visited as shown between the arrows.
* The total number of items is shown as well.
*
* @param item
*/
const updateItemOrderNumbers = (item) => {
if (item)
$itemOrderNumbers.html("<span>" + item.field_order_number + "/" + viewModel.getNumberOfItems() + "</span>");
else
$itemOrderNumbers.html("<span>Click right arrow</span>");
};
/**
* This is about registering handlers for standard events like click
* @memberOf ItemView
*/
const setupStandardEventHandlers = () => {
//console.log("binding events");
$backArrow.bind('click', clickOnArrowHandler);
$forwardArrow.bind('click', clickOnArrowHandler);
};
/**
* registerEventHandlers is the standard name for the function that attaches event handlers
* I'm talking about my custom jquery events
* No standard events like click
* @memberOf ItemView
*/
const registerEventHandlers = () => {
// Hide the arrows only on small screens. On large screens keep them.
const hideNavigationArrows = () => {
if (viewModel.itIsASmallScreen())
$arrowsAndItemOrderNumbers.hide();
};
const showNavigationArrows = () => {
if (!$arrowsAndItemOrderNumbers.is(":visible") && $mapImage.is(":visible"))
$arrowsAndItemOrderNumbers.show();
};
// We have to hide the arrows when the item content dialog is showing
viewModel.attachEventHandler('ViewModel.itemcontent.beingshown', hideNavigationArrows);
// We restore the arrows when the item content dialog is hidden
viewModel.attachEventHandler('ViewModel.itemcontent.beinghidden', showNavigationArrows);
viewModel | ItemView | identifier_name | |
ItemView.js |
// Private functions
const cacheJQueryObjects = () => {
$mapImage = $("#MapImage");
$carpet = $('#Carpet');
$arrowsAndItemOrderNumbers = $('#ArrowsAndItemOrderNumbers');
$backArrow = $('#ArrowBack');
$forwardArrow = $('#ArrowForward');
$itemOrderNumbers = $('#ItemOrderNumbers');
$avatar = $('#Avatar');
};
/**
* FIXME
* Now I don't fetch item details from the backend because the index.html file comes with
* them ready to use. I collect the details from the elements.
*
* See server-rendering/writer-home-page-generator.js to know how the details are incorporated
* in the page.
*/
const collectItemDetailsFromMap = () => {
itemsDetails = [];
const itemElements = document.querySelectorAll("[data-nid]");
itemElements.forEach((element) => {
itemsDetails.push({ "nid": element.dataset.nid,
"field_order_number": element.dataset.order,
"title": element.dataset.title,
"field_coordinate_x": element.dataset.xCoord,
"field_coordinate_y": element.dataset.yCoord,
"field_item_type": element.dataset.type,
"path": element.dataset.path });
});
// the viewModel needs to know about the items details as well
viewModel.setItemsDetails(itemsDetails);
}
const moveToStartingPointOfSpiral = () => {
// We are going to move the carpet to the starting point of the spiral
// We set the animation running. The viewModel will take care of closing
// the item content panel, if any. It will also close any contact me form.
viewModel.setAnimationToNextItemRunning(true);
const viewport = viewModel.getViewPort();
// Rotating the carpet to the horizontal position it's supposed to have
// at the starting point of the spiral
$carpet.velocity({ transform: ["rotateZ(" + 0 + "deg)", "rotateZ(" + previousAngle + "deg)"] },
{ duration: 1000, easing: "linear", loop: false});
previousAngle = 0;
const mapImagePosition = $mapImage.position();
const currentTop = Math.round(mapImagePosition.top);
const currentLeft = Math.round(mapImagePosition.left);
let animationDuration = 1500;
// If the carpet is already very near the place it's going to,
// I want to get there very quickly so that the user can
// click on the arrows with no delay
// If I have the animation last 1500ms, the user may click on an arrow and
// nothing happens
if (Math.abs(currentTop - (viewport.height / 2 - 3500)) < 200 &&
Math.abs(currentLeft - (viewport.width / 2 - 3500)) < 200) {
animationDuration = 100;
}
// Now animating the carpet to go to the starting point of the spiral
$mapImage.animate({ top: viewport.height / 2 - 3500 ,
left: viewport.width / 2 - 3500 }, animationDuration, null,
() => {
// console.log('animation to spiral starting point completed');
// Animation completed
viewModel.setAnimationToNextItemRunning(false);
}
);
};
const clickOnArrowHandler = (event) => {
// console.log(event);
// console.log(viewModel.getAnimationToNextItemRunning());
// Only if we are not already flying to the next item, do the following
if (!viewModel.getAnimationToNextItemRunning()) {
let itemToVisitNext;
// Determining the item to visit next
if (!event && itemToShowBecauseItIsInTheURL) {
// This is in the case I have to move directly to an item because it's in the URL
itemToVisitNext = itemToShowBecauseItIsInTheURL;
itemToShowBecauseItIsInTheURL = undefined;
performAnimations = false;
// console.log("clickOnArrowHandler, itemToShowBecauseItIsInTheURL ", itemToShowBecauseItIsInTheURL);
// console.log("performAnimations ", performAnimations);
} else {
// the parameter tells if we are going forward or back
itemToVisitNext = viewModel.getItemToVisitNext(event.target.id === "ArrowForward");
}
if (itemToVisitNext) {
const viewport = viewModel.getViewPort();
// When performing the animation the View Model needs to know so that it
// can tell other views
viewModel.setAnimationToNextItemRunning(true);
// left and top attributes to give to the map to get to the item
const positionItemToVisitNext = { left: viewport.width / 2 - itemToVisitNext.field_coordinate_x,
top: viewport.height / 2 - itemToVisitNext.field_coordinate_y };
const mapImagePosition = $mapImage.position();
const currentTop = Math.round(mapImagePosition.top);
const currentLeft = Math.round(mapImagePosition.left);
// Differences in x and y we need to travel to get to the item from the current position
const delta_x = (currentLeft - positionItemToVisitNext.left);
const delta_y = (currentTop - positionItemToVisitNext.top);
// The angle of the direction we take to get to the item. Used to rotate the carpet accordingly
const angle = Math.atan2(delta_y, delta_x) * (180 / Math.PI);
if (performAnimations) {
// Rotating the carpet
$carpet.velocity({ transform: ["rotateZ(" + angle + "deg)", "rotateZ(" + previousAngle + "deg)"] },
{ duration: 1000, easing: "linear", loop: false});
} else {
// Rotate the carpet with no animation
$carpet.css("transform", "rotateZ(" + angle + "deg)");
}
previousAngle = angle;
const maxDelta = Math.max(Math.abs(delta_x), Math.abs(delta_y));
// This is to make the carpet stop before covering the image
// We don't want the carpet to be over the item's image
const approachingFactor = maxDelta / 100;
const showingItemAtTheEndOfTheAnimation = () => {
viewModel.setAnimationToNextItemRunning(false);
updateItemOrderNumbers(itemToVisitNext);
viewModel.showItem();
}
if (performAnimations) {
$mapImage.animate({ top: positionItemToVisitNext.top + (delta_y / approachingFactor),
left: positionItemToVisitNext.left + (delta_x / approachingFactor)}, 1500, null,
() => {
showingItemAtTheEndOfTheAnimation();
}
);
} else {
$mapImage.css("top", positionItemToVisitNext.top + (delta_y / approachingFactor));
$mapImage.css("left", positionItemToVisitNext.left + (delta_x / approachingFactor));
showingItemAtTheEndOfTheAnimation();
// Now I can finally reset performAnimations to true to restart doing animations
performAnimations = true;
}
}
}
};
/**
* To update the order number of the item currently visited as shown between the arrows.
* The total number of items is shown as well.
*
* @param item
*/
const updateItemOrderNumbers = (item) => {
if (item)
$itemOrderNumbers.html("<span>" + item.field_order_number + "/" + viewModel.getNumberOfItems() + "</span>");
else
$itemOrderNumbers.html("<span>Click right arrow</span>");
};
/**
* This is about registering handlers for standard events like click
* @memberOf ItemView
*/
const setupStandardEventHandlers = () => {
//console.log("binding events");
$backArrow.bind('click', clickOnArrowHandler);
$forwardArrow.bind('click', clickOnArrowHandler);
};
/**
* registerEventHandlers is the standard name for the function that attaches event handlers
* I'm talking about my custom jquery events
* No standard events like click
* @memberOf ItemView
*/
const registerEventHandlers = () => {
// Hide the arrows only on small screens. On large screens keep them.
const hideNavigationArrows = () => {
if (viewModel.itIsASmallScreen())
$arrowsAndItemOrderNumbers.hide();
};
const showNavigationArrows = () => {
if (!$arrowsAndItemOrderNumbers.is(":visible") && $mapImage.is(":visible"))
$arrowsAndItemOrderNumbers.show();
};
// We have to hide the arrows when the item content dialog is showing
viewModel.attachEventHandler('ViewModel.itemcontent.beingshown', hideNavigationArrows);
// We restore the arrows when the item content dialog is hidden
viewModel.attachEventHandler('ViewModel.itemcontent.beinghidden', showNavigationArrows);
viewModel.attachEventHandler | {
// Private variables
// These are jQuery objects corresponding to elements
let $mapImage;
let $carpet;
let $backArrow;
let $forwardArrow;
let $avatar;
let $arrowsAndItemOrderNumbers;
let $itemOrderNumbers;
let previousAngle = 0;
let viewModel;
let itemsDetails;
let itemToShowBecauseItIsInTheURL;
let performAnimations; | identifier_body | |
string_pool.rs | () -> Result<Self, ()> {
let bump = Bump::try_with_capacity(INIT_BLOCK_SIZE).map_err(|_| ())?;
let boxed_bump = Box::try_new(bump).map_err(|_| ())?;
Ok(StringPool(Some(InnerStringPool::new(
boxed_bump,
|bump| RefCell::new(RentedBumpVec(BumpVec::new_in(&bump))),
))))
}
/// # Safety
///
/// The inner type is only ever None in middle of the clear()
/// method. Therefore it is safe to use anywhere else.
fn inner(&self) -> &InnerStringPool {
self.0.as_ref().unwrap_or_else(|| unsafe {
std::hint::unreachable_unchecked()
})
}
/// Determines whether or not the current BumpVec is empty.
pub(crate) fn is_empty(&self) -> bool {
self.inner().rent(|vec| vec.borrow().0.is_empty())
}
/// Determines whether or not the current BumpVec is full.
pub(crate) fn is_full(&self) -> bool {
self.inner().rent(|vec| vec.borrow().is_full())
}
/// Gets the current vec, converts it into an immutable slice,
/// and resets bookkeeping so that it will create a new vec next time.
pub(crate) fn finish_string(&self) -> &[XML_Char] {
self.inner().ref_rent_all(|pool| {
let mut vec = RentedBumpVec(BumpVec::new_in(&pool.bump));
pool.current_bump_vec.replace(vec).0.into_bump_slice()
})
}
/// Gets the current vec, converts it into a slice of cells (with interior mutability),
/// and resets bookkeeping so that it will create a new vec next time.
pub(crate) fn finish_string_cells(&self) -> &[Cell<XML_Char>] {
self.inner().ref_rent_all(|pool| {
let mut vec = RentedBumpVec(BumpVec::new_in(&pool.bump));
let sl = pool.current_bump_vec.replace(vec).0.into_bump_slice_mut();
Cell::from_mut(sl).as_slice_of_cells()
})
}
/// Resets the current bump vec to the beginning
pub(crate) fn clear_current(&self) {
self.inner().rent(|v| v.borrow_mut().0.clear())
}
/// Obtains the length of the current BumpVec.
pub(crate) fn len(&self) -> usize {
self.inner().rent(|vec| vec.borrow().0.len())
}
/// Call callback with an immutable buffer of the current BumpVec. This must
/// be a callback to ensure that we don't (safely) borrow the slice for
/// longer than it stays vaild.
pub(crate) fn current_slice<F, R>(&self, mut callback: F) -> R
where F: FnMut(&[XML_Char]) -> R
{
self.inner().rent(|v| callback(v.borrow().0.as_slice()))
}
/// Call callback with a mutable buffer of the current BumpVec. This must
/// be a callback to ensure that we don't (safely) borrow the slice for
/// longer than it stays vaild.
pub(crate) fn current_mut_slice<F, R>(&self, mut callback: F) -> R
where F: FnMut(&mut [XML_Char]) -> R
{
self.inner().rent(|v| callback(v.borrow_mut().0.as_mut_slice()))
}
/// Unsafe temporary version of `current_slice()`. This needs to be removed
/// when callers are made safe.
pub(crate) unsafe fn current_start(&self) -> *const XML_Char {
self.inner().rent(|v| v.borrow().0.as_ptr())
}
/// Appends a char to the current BumpVec.
pub(crate) fn append_char(&self, c: XML_Char) -> bool {
self.inner().rent(|vec| vec.borrow_mut().append_char(c))
}
/// Overwrites the last char in the current BumpVec.
/// Note that this will panic if empty. This is not an insert
/// operation as it does not shift bytes afterwards.
pub(crate) fn replace_last_char(&self, c: XML_Char) {
self.inner().rent(|buf| {
*buf.borrow_mut()
.0
.last_mut()
.expect("Called replace_last_char() when string was empty") = c;
})
}
/// Decrements the length, panicing if len is 0
pub(crate) fn backtrack(&self) {
self.inner().rent(|vec| vec.borrow_mut().0.pop().expect("Called backtrack() on empty BumpVec"));
}
/// Gets the last character, panicing if len is 0
pub(crate) fn get_last_char(&self) -> XML_Char {
self.inner().rent(|buf| *buf.borrow().0.last().expect("Called get_last_char() when string was empty"))
}
/// Appends an entire C String to the current BumpVec.
pub(crate) unsafe fn append_c_string(&self, mut s: *const XML_Char) -> bool {
self.inner().rent(|vec| {
let mut vec = vec.borrow_mut();
while *s != 0 {
if !vec.append_char(*s) {
return false;
}
s = s.offset(1)
}
true
})
}
/// Resets the current Bump and deallocates its contents.
/// The `inner` method must never be called here as it assumes
/// self.0 is never `None`
pub(crate) fn clear(&mut self) {
let mut inner_pool = self.0.take();
let mut bump = inner_pool.unwrap().into_head();
bump.reset();
inner_pool = Some(InnerStringPool::new(
bump,
|bump| RefCell::new(RentedBumpVec(BumpVec::new_in(&bump))),
));
swap(&mut self.0, &mut inner_pool);
}
pub(crate) fn store_c_string(
&self,
enc: &ENCODING,
buf: ExpatBufRef,
) -> bool {
self.inner().rent(|vec| {
let mut vec = vec.borrow_mut();
if !vec.append(enc, buf) {
return false;
}
if !vec.append_char('\0' as XML_Char) {
return false;
}
true
})
}
pub(crate) fn append(
&self,
enc: &ENCODING,
read_buf: ExpatBufRef,
) -> bool {
self.inner().rent(|vec| vec.borrow_mut().append(enc, read_buf))
}
pub(crate) unsafe fn copy_c_string(
&self,
mut s: *const XML_Char,
) -> Option<&[XML_Char]> {
// self.append_c_string(s);?
let successful = self.inner().rent(|vec| {
let mut vec = vec.borrow_mut();
loop {
if !vec.append_char(*s) {
return false;
}
if *s == 0 {
break;
}
s = s.offset(1);
}
true
});
if !successful {
return None;
}
Some(self.finish_string())
}
pub(crate) unsafe fn copy_c_string_n(
&self,
mut s: *const XML_Char,
mut n: c_int,
) -> Option<&[XML_Char]> {
let successful = self.inner().rent(|vec| {
let mut vec = vec.borrow_mut();
let mut n = n.try_into().unwrap();
if vec.0.try_reserve_exact(n).is_err() {
return false;
};
while n > 0 {
if !vec.append_char(*s) {
return false;
}
n -= 1;
s = s.offset(1)
}
true
});
if !successful {
return None;
}
Some(self.finish_string())
}
}
#[derive(Debug)]
pub(crate) struct RentedBumpVec<'bump>(BumpVec<'bump, XML_Char>);
impl<'bump> RentedBumpVec<'bump> {
fn is_full(&self) -> bool {
self.0.len() == self.0.capacity()
}
fn append<'a>(
&mut self,
enc: &ENCODING,
mut read_buf: ExpatBufRef<'a>,
) -> bool {
loop {
// REXPAT: always reserve at least 4 bytes,
// so at least one character gets converted every iteration
if self.0.try_reserve(read_buf.len().max(4)).is_err() {
return false;
}
let start_len = self.0.len();
let cap = self.0.capacity();
self.0.resize(cap, 0);
let mut write_buf = ExpatBufRefMut::from(&mut self.0[start_len..]);
let write_buf_len = write_buf.len();
let convert_res = XmlConvert!(enc, &mut read_buf, &mut write_buf);
// The write buf shrinks by how much was written to it
let written_size = write_buf_len - write_buf.len();
self.0.truncate(start_len + written_size);
if convert_res == XML_Convert_Result::COMPLETED || | try_new | identifier_name | |
string_pool.rs | = Box::try_new(bump).map_err(|_| ())?;
Ok(StringPool(Some(InnerStringPool::new(
boxed_bump,
|bump| RefCell::new(RentedBumpVec(BumpVec::new_in(&bump))),
))))
}
/// # Safety
///
/// The inner type is only ever None in middle of the clear()
/// method. Therefore it is safe to use anywhere else.
fn inner(&self) -> &InnerStringPool {
self.0.as_ref().unwrap_or_else(|| unsafe {
std::hint::unreachable_unchecked()
})
}
/// Determines whether or not the current BumpVec is empty.
pub(crate) fn is_empty(&self) -> bool {
self.inner().rent(|vec| vec.borrow().0.is_empty())
}
/// Determines whether or not the current BumpVec is full.
pub(crate) fn is_full(&self) -> bool {
self.inner().rent(|vec| vec.borrow().is_full())
}
/// Gets the current vec, converts it into an immutable slice,
/// and resets bookkeeping so that it will create a new vec next time.
pub(crate) fn finish_string(&self) -> &[XML_Char] {
self.inner().ref_rent_all(|pool| {
let mut vec = RentedBumpVec(BumpVec::new_in(&pool.bump));
pool.current_bump_vec.replace(vec).0.into_bump_slice()
})
}
/// Gets the current vec, converts it into a slice of cells (with interior mutability),
/// and resets bookkeeping so that it will create a new vec next time.
pub(crate) fn finish_string_cells(&self) -> &[Cell<XML_Char>] {
self.inner().ref_rent_all(|pool| {
let mut vec = RentedBumpVec(BumpVec::new_in(&pool.bump));
let sl = pool.current_bump_vec.replace(vec).0.into_bump_slice_mut();
Cell::from_mut(sl).as_slice_of_cells()
})
}
/// Resets the current bump vec to the beginning
pub(crate) fn clear_current(&self) {
self.inner().rent(|v| v.borrow_mut().0.clear())
}
/// Obtains the length of the current BumpVec.
pub(crate) fn len(&self) -> usize {
self.inner().rent(|vec| vec.borrow().0.len())
}
/// Call callback with an immutable buffer of the current BumpVec. This must
/// be a callback to ensure that we don't (safely) borrow the slice for
/// longer than it stays vaild.
pub(crate) fn current_slice<F, R>(&self, mut callback: F) -> R
where F: FnMut(&[XML_Char]) -> R
{
self.inner().rent(|v| callback(v.borrow().0.as_slice()))
}
/// Call callback with a mutable buffer of the current BumpVec. This must
/// be a callback to ensure that we don't (safely) borrow the slice for
/// longer than it stays vaild.
pub(crate) fn current_mut_slice<F, R>(&self, mut callback: F) -> R
where F: FnMut(&mut [XML_Char]) -> R
{
self.inner().rent(|v| callback(v.borrow_mut().0.as_mut_slice()))
}
/// Unsafe temporary version of `current_slice()`. This needs to be removed
/// when callers are made safe.
pub(crate) unsafe fn current_start(&self) -> *const XML_Char {
self.inner().rent(|v| v.borrow().0.as_ptr())
}
/// Appends a char to the current BumpVec.
pub(crate) fn append_char(&self, c: XML_Char) -> bool {
self.inner().rent(|vec| vec.borrow_mut().append_char(c))
}
/// Overwrites the last char in the current BumpVec.
/// Note that this will panic if empty. This is not an insert
/// operation as it does not shift bytes afterwards.
pub(crate) fn replace_last_char(&self, c: XML_Char) {
self.inner().rent(|buf| {
*buf.borrow_mut()
.0
.last_mut()
.expect("Called replace_last_char() when string was empty") = c;
})
}
/// Decrements the length, panicing if len is 0
pub(crate) fn backtrack(&self) {
self.inner().rent(|vec| vec.borrow_mut().0.pop().expect("Called backtrack() on empty BumpVec"));
}
/// Gets the last character, panicing if len is 0
pub(crate) fn get_last_char(&self) -> XML_Char {
self.inner().rent(|buf| *buf.borrow().0.last().expect("Called get_last_char() when string was empty"))
}
/// Appends an entire C String to the current BumpVec.
pub(crate) unsafe fn append_c_string(&self, mut s: *const XML_Char) -> bool |
/// Resets the current Bump and deallocates its contents.
/// The `inner` method must never be called here as it assumes
/// self.0 is never `None`
pub(crate) fn clear(&mut self) {
let mut inner_pool = self.0.take();
let mut bump = inner_pool.unwrap().into_head();
bump.reset();
inner_pool = Some(InnerStringPool::new(
bump,
|bump| RefCell::new(RentedBumpVec(BumpVec::new_in(&bump))),
));
swap(&mut self.0, &mut inner_pool);
}
pub(crate) fn store_c_string(
&self,
enc: &ENCODING,
buf: ExpatBufRef,
) -> bool {
self.inner().rent(|vec| {
let mut vec = vec.borrow_mut();
if !vec.append(enc, buf) {
return false;
}
if !vec.append_char('\0' as XML_Char) {
return false;
}
true
})
}
pub(crate) fn append(
&self,
enc: &ENCODING,
read_buf: ExpatBufRef,
) -> bool {
self.inner().rent(|vec| vec.borrow_mut().append(enc, read_buf))
}
pub(crate) unsafe fn copy_c_string(
&self,
mut s: *const XML_Char,
) -> Option<&[XML_Char]> {
// self.append_c_string(s);?
let successful = self.inner().rent(|vec| {
let mut vec = vec.borrow_mut();
loop {
if !vec.append_char(*s) {
return false;
}
if *s == 0 {
break;
}
s = s.offset(1);
}
true
});
if !successful {
return None;
}
Some(self.finish_string())
}
pub(crate) unsafe fn copy_c_string_n(
&self,
mut s: *const XML_Char,
mut n: c_int,
) -> Option<&[XML_Char]> {
let successful = self.inner().rent(|vec| {
let mut vec = vec.borrow_mut();
let mut n = n.try_into().unwrap();
if vec.0.try_reserve_exact(n).is_err() {
return false;
};
while n > 0 {
if !vec.append_char(*s) {
return false;
}
n -= 1;
s = s.offset(1)
}
true
});
if !successful {
return None;
}
Some(self.finish_string())
}
}
#[derive(Debug)]
pub(crate) struct RentedBumpVec<'bump>(BumpVec<'bump, XML_Char>);
impl<'bump> RentedBumpVec<'bump> {
fn is_full(&self) -> bool {
self.0.len() == self.0.capacity()
}
fn append<'a>(
&mut self,
enc: &ENCODING,
mut read_buf: ExpatBufRef<'a>,
) -> bool {
loop {
// REXPAT: always reserve at least 4 bytes,
// so at least one character gets converted every iteration
if self.0.try_reserve(read_buf.len().max(4)).is_err() {
return false;
}
let start_len = self.0.len();
let cap = self.0.capacity();
self.0.resize(cap, 0);
let mut write_buf = ExpatBufRefMut::from(&mut self.0[start_len..]);
let write_buf_len = write_buf.len();
let convert_res = XmlConvert!(enc, &mut read_buf, &mut write_buf);
// The write buf shrinks by how much was written to it
let written_size = write_buf_len - write_buf.len();
self.0.truncate(start_len + written_size);
if convert_res == XML_Convert_Result::COMPLETED || convert_res == XML_Convert_Result::INPUT_INCOMPLETE {
return true;
}
}
}
fn append_char(&mut self, c: XML_Char | {
self.inner().rent(|vec| {
let mut vec = vec.borrow_mut();
while *s != 0 {
if !vec.append_char(*s) {
return false;
}
s = s.offset(1)
}
true
})
} | identifier_body |
string_pool.rs | Vec(BumpVec::new_in(&pool.bump));
let sl = pool.current_bump_vec.replace(vec).0.into_bump_slice_mut();
Cell::from_mut(sl).as_slice_of_cells()
})
}
/// Resets the current bump vec to the beginning
pub(crate) fn clear_current(&self) {
self.inner().rent(|v| v.borrow_mut().0.clear())
}
/// Obtains the length of the current BumpVec.
pub(crate) fn len(&self) -> usize {
self.inner().rent(|vec| vec.borrow().0.len())
}
/// Call callback with an immutable buffer of the current BumpVec. This must
/// be a callback to ensure that we don't (safely) borrow the slice for
/// longer than it stays vaild.
pub(crate) fn current_slice<F, R>(&self, mut callback: F) -> R
where F: FnMut(&[XML_Char]) -> R
{
self.inner().rent(|v| callback(v.borrow().0.as_slice()))
}
/// Call callback with a mutable buffer of the current BumpVec. This must
/// be a callback to ensure that we don't (safely) borrow the slice for
/// longer than it stays vaild.
pub(crate) fn current_mut_slice<F, R>(&self, mut callback: F) -> R
where F: FnMut(&mut [XML_Char]) -> R
{
self.inner().rent(|v| callback(v.borrow_mut().0.as_mut_slice()))
}
/// Unsafe temporary version of `current_slice()`. This needs to be removed
/// when callers are made safe.
pub(crate) unsafe fn current_start(&self) -> *const XML_Char {
self.inner().rent(|v| v.borrow().0.as_ptr())
}
/// Appends a char to the current BumpVec.
pub(crate) fn append_char(&self, c: XML_Char) -> bool {
self.inner().rent(|vec| vec.borrow_mut().append_char(c))
}
/// Overwrites the last char in the current BumpVec.
/// Note that this will panic if empty. This is not an insert
/// operation as it does not shift bytes afterwards.
pub(crate) fn replace_last_char(&self, c: XML_Char) {
self.inner().rent(|buf| {
*buf.borrow_mut()
.0
.last_mut()
.expect("Called replace_last_char() when string was empty") = c;
})
}
/// Decrements the length, panicing if len is 0
pub(crate) fn backtrack(&self) {
self.inner().rent(|vec| vec.borrow_mut().0.pop().expect("Called backtrack() on empty BumpVec"));
}
/// Gets the last character, panicing if len is 0
pub(crate) fn get_last_char(&self) -> XML_Char {
self.inner().rent(|buf| *buf.borrow().0.last().expect("Called get_last_char() when string was empty"))
}
/// Appends an entire C String to the current BumpVec.
pub(crate) unsafe fn append_c_string(&self, mut s: *const XML_Char) -> bool {
self.inner().rent(|vec| {
let mut vec = vec.borrow_mut();
while *s != 0 {
if !vec.append_char(*s) {
return false;
}
s = s.offset(1)
}
true
})
}
/// Resets the current Bump and deallocates its contents.
/// The `inner` method must never be called here as it assumes
/// self.0 is never `None`
pub(crate) fn clear(&mut self) {
let mut inner_pool = self.0.take();
let mut bump = inner_pool.unwrap().into_head();
bump.reset();
inner_pool = Some(InnerStringPool::new(
bump,
|bump| RefCell::new(RentedBumpVec(BumpVec::new_in(&bump))),
));
swap(&mut self.0, &mut inner_pool);
}
pub(crate) fn store_c_string(
&self,
enc: &ENCODING,
buf: ExpatBufRef,
) -> bool {
self.inner().rent(|vec| {
let mut vec = vec.borrow_mut();
if !vec.append(enc, buf) {
return false;
}
if !vec.append_char('\0' as XML_Char) {
return false;
}
true
})
}
pub(crate) fn append(
&self,
enc: &ENCODING,
read_buf: ExpatBufRef,
) -> bool {
self.inner().rent(|vec| vec.borrow_mut().append(enc, read_buf))
}
pub(crate) unsafe fn copy_c_string(
&self,
mut s: *const XML_Char,
) -> Option<&[XML_Char]> {
// self.append_c_string(s);?
let successful = self.inner().rent(|vec| {
let mut vec = vec.borrow_mut();
loop {
if !vec.append_char(*s) {
return false;
}
if *s == 0 {
break;
}
s = s.offset(1);
}
true
});
if !successful {
return None;
}
Some(self.finish_string())
}
pub(crate) unsafe fn copy_c_string_n(
&self,
mut s: *const XML_Char,
mut n: c_int,
) -> Option<&[XML_Char]> {
let successful = self.inner().rent(|vec| {
let mut vec = vec.borrow_mut();
let mut n = n.try_into().unwrap();
if vec.0.try_reserve_exact(n).is_err() {
return false;
};
while n > 0 {
if !vec.append_char(*s) {
return false;
}
n -= 1;
s = s.offset(1)
}
true
});
if !successful {
return None;
}
Some(self.finish_string())
}
}
#[derive(Debug)]
pub(crate) struct RentedBumpVec<'bump>(BumpVec<'bump, XML_Char>);
impl<'bump> RentedBumpVec<'bump> {
fn is_full(&self) -> bool {
self.0.len() == self.0.capacity()
}
fn append<'a>(
&mut self,
enc: &ENCODING,
mut read_buf: ExpatBufRef<'a>,
) -> bool {
loop {
// REXPAT: always reserve at least 4 bytes,
// so at least one character gets converted every iteration
if self.0.try_reserve(read_buf.len().max(4)).is_err() {
return false;
}
let start_len = self.0.len();
let cap = self.0.capacity();
self.0.resize(cap, 0);
let mut write_buf = ExpatBufRefMut::from(&mut self.0[start_len..]);
let write_buf_len = write_buf.len();
let convert_res = XmlConvert!(enc, &mut read_buf, &mut write_buf);
// The write buf shrinks by how much was written to it
let written_size = write_buf_len - write_buf.len();
self.0.truncate(start_len + written_size);
if convert_res == XML_Convert_Result::COMPLETED || convert_res == XML_Convert_Result::INPUT_INCOMPLETE {
return true;
}
}
}
fn append_char(&mut self, c: XML_Char) -> bool {
if self.0.try_reserve(1).is_err() {
false
} else {
self.0.push(c);
true
}
}
}
#[cfg(test)]
mod consts {
use super::XML_Char;
pub const A: XML_Char = 'a' as XML_Char;
pub const B: XML_Char = 'b' as XML_Char;
pub const C: XML_Char = 'c' as XML_Char;
pub const D: XML_Char = 'd' as XML_Char;
pub const NULL: XML_Char = '\0' as XML_Char;
pub static S: [XML_Char; 5] = [C, D, D, C, NULL];
}
#[test]
fn test_append_char() {
use consts::*;
let mut pool = StringPool::try_new().unwrap();
assert!(pool.append_char(A));
pool.current_slice(|s| assert_eq!(s, [A]));
assert!(pool.append_char(B));
pool.current_slice(|s| assert_eq!(s, [A, B]));
// New BumpVec
pool.finish_string();
assert!(pool.append_char(C));
pool.current_slice(|s| assert_eq!(s, [C]));
}
#[test]
fn test_append_string() {
use consts::*;
let mut pool = StringPool::try_new().unwrap();
let mut string = [A, B, C, NULL];
unsafe {
assert!(pool.append_c_string(string.as_mut_ptr()));
}
pool.current_slice(|s| assert_eq!(s, [A, B, C]));
}
#[test]
fn test_copy_string() {
use consts::*;
let mut pool = StringPool::try_new().unwrap();
assert!(pool.append_char(A));
pool.current_slice(|s| assert_eq!(s, [A]));
| let new_string = unsafe { | random_line_split | |
string_pool.rs | = Box::try_new(bump).map_err(|_| ())?;
Ok(StringPool(Some(InnerStringPool::new(
boxed_bump,
|bump| RefCell::new(RentedBumpVec(BumpVec::new_in(&bump))),
))))
}
/// # Safety
///
/// The inner type is only ever None in middle of the clear()
/// method. Therefore it is safe to use anywhere else.
fn inner(&self) -> &InnerStringPool {
self.0.as_ref().unwrap_or_else(|| unsafe {
std::hint::unreachable_unchecked()
})
}
/// Determines whether or not the current BumpVec is empty.
pub(crate) fn is_empty(&self) -> bool {
self.inner().rent(|vec| vec.borrow().0.is_empty())
}
/// Determines whether or not the current BumpVec is full.
pub(crate) fn is_full(&self) -> bool {
self.inner().rent(|vec| vec.borrow().is_full())
}
/// Gets the current vec, converts it into an immutable slice,
/// and resets bookkeeping so that it will create a new vec next time.
pub(crate) fn finish_string(&self) -> &[XML_Char] {
self.inner().ref_rent_all(|pool| {
let mut vec = RentedBumpVec(BumpVec::new_in(&pool.bump));
pool.current_bump_vec.replace(vec).0.into_bump_slice()
})
}
/// Gets the current vec, converts it into a slice of cells (with interior mutability),
/// and resets bookkeeping so that it will create a new vec next time.
pub(crate) fn finish_string_cells(&self) -> &[Cell<XML_Char>] {
self.inner().ref_rent_all(|pool| {
let mut vec = RentedBumpVec(BumpVec::new_in(&pool.bump));
let sl = pool.current_bump_vec.replace(vec).0.into_bump_slice_mut();
Cell::from_mut(sl).as_slice_of_cells()
})
}
/// Resets the current bump vec to the beginning
pub(crate) fn clear_current(&self) {
self.inner().rent(|v| v.borrow_mut().0.clear())
}
/// Obtains the length of the current BumpVec.
pub(crate) fn len(&self) -> usize {
self.inner().rent(|vec| vec.borrow().0.len())
}
/// Call callback with an immutable buffer of the current BumpVec. This must
/// be a callback to ensure that we don't (safely) borrow the slice for
/// longer than it stays vaild.
pub(crate) fn current_slice<F, R>(&self, mut callback: F) -> R
where F: FnMut(&[XML_Char]) -> R
{
self.inner().rent(|v| callback(v.borrow().0.as_slice()))
}
/// Call callback with a mutable buffer of the current BumpVec. This must
/// be a callback to ensure that we don't (safely) borrow the slice for
/// longer than it stays vaild.
pub(crate) fn current_mut_slice<F, R>(&self, mut callback: F) -> R
where F: FnMut(&mut [XML_Char]) -> R
{
self.inner().rent(|v| callback(v.borrow_mut().0.as_mut_slice()))
}
/// Unsafe temporary version of `current_slice()`. This needs to be removed
/// when callers are made safe.
pub(crate) unsafe fn current_start(&self) -> *const XML_Char {
self.inner().rent(|v| v.borrow().0.as_ptr())
}
/// Appends a char to the current BumpVec.
pub(crate) fn append_char(&self, c: XML_Char) -> bool {
self.inner().rent(|vec| vec.borrow_mut().append_char(c))
}
/// Overwrites the last char in the current BumpVec.
/// Note that this will panic if empty. This is not an insert
/// operation as it does not shift bytes afterwards.
pub(crate) fn replace_last_char(&self, c: XML_Char) {
self.inner().rent(|buf| {
*buf.borrow_mut()
.0
.last_mut()
.expect("Called replace_last_char() when string was empty") = c;
})
}
/// Decrements the length, panicing if len is 0
pub(crate) fn backtrack(&self) {
self.inner().rent(|vec| vec.borrow_mut().0.pop().expect("Called backtrack() on empty BumpVec"));
}
/// Gets the last character, panicing if len is 0
pub(crate) fn get_last_char(&self) -> XML_Char {
self.inner().rent(|buf| *buf.borrow().0.last().expect("Called get_last_char() when string was empty"))
}
/// Appends an entire C String to the current BumpVec.
pub(crate) unsafe fn append_c_string(&self, mut s: *const XML_Char) -> bool {
self.inner().rent(|vec| {
let mut vec = vec.borrow_mut();
while *s != 0 {
if !vec.append_char(*s) |
s = s.offset(1)
}
true
})
}
/// Resets the current Bump and deallocates its contents.
/// The `inner` method must never be called here as it assumes
/// self.0 is never `None`
pub(crate) fn clear(&mut self) {
let mut inner_pool = self.0.take();
let mut bump = inner_pool.unwrap().into_head();
bump.reset();
inner_pool = Some(InnerStringPool::new(
bump,
|bump| RefCell::new(RentedBumpVec(BumpVec::new_in(&bump))),
));
swap(&mut self.0, &mut inner_pool);
}
pub(crate) fn store_c_string(
&self,
enc: &ENCODING,
buf: ExpatBufRef,
) -> bool {
self.inner().rent(|vec| {
let mut vec = vec.borrow_mut();
if !vec.append(enc, buf) {
return false;
}
if !vec.append_char('\0' as XML_Char) {
return false;
}
true
})
}
pub(crate) fn append(
&self,
enc: &ENCODING,
read_buf: ExpatBufRef,
) -> bool {
self.inner().rent(|vec| vec.borrow_mut().append(enc, read_buf))
}
pub(crate) unsafe fn copy_c_string(
&self,
mut s: *const XML_Char,
) -> Option<&[XML_Char]> {
// self.append_c_string(s);?
let successful = self.inner().rent(|vec| {
let mut vec = vec.borrow_mut();
loop {
if !vec.append_char(*s) {
return false;
}
if *s == 0 {
break;
}
s = s.offset(1);
}
true
});
if !successful {
return None;
}
Some(self.finish_string())
}
pub(crate) unsafe fn copy_c_string_n(
&self,
mut s: *const XML_Char,
mut n: c_int,
) -> Option<&[XML_Char]> {
let successful = self.inner().rent(|vec| {
let mut vec = vec.borrow_mut();
let mut n = n.try_into().unwrap();
if vec.0.try_reserve_exact(n).is_err() {
return false;
};
while n > 0 {
if !vec.append_char(*s) {
return false;
}
n -= 1;
s = s.offset(1)
}
true
});
if !successful {
return None;
}
Some(self.finish_string())
}
}
#[derive(Debug)]
pub(crate) struct RentedBumpVec<'bump>(BumpVec<'bump, XML_Char>);
impl<'bump> RentedBumpVec<'bump> {
fn is_full(&self) -> bool {
self.0.len() == self.0.capacity()
}
fn append<'a>(
&mut self,
enc: &ENCODING,
mut read_buf: ExpatBufRef<'a>,
) -> bool {
loop {
// REXPAT: always reserve at least 4 bytes,
// so at least one character gets converted every iteration
if self.0.try_reserve(read_buf.len().max(4)).is_err() {
return false;
}
let start_len = self.0.len();
let cap = self.0.capacity();
self.0.resize(cap, 0);
let mut write_buf = ExpatBufRefMut::from(&mut self.0[start_len..]);
let write_buf_len = write_buf.len();
let convert_res = XmlConvert!(enc, &mut read_buf, &mut write_buf);
// The write buf shrinks by how much was written to it
let written_size = write_buf_len - write_buf.len();
self.0.truncate(start_len + written_size);
if convert_res == XML_Convert_Result::COMPLETED || convert_res == XML_Convert_Result::INPUT_INCOMPLETE {
return true;
}
}
}
fn append_char(&mut self, c: XML_Char | {
return false;
} | conditional_block |
index.js | copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
var app = {
// Application Constructor
initialize: function() {
this.bindEvents();
this.make_validations();
},
// Bind Event Listeners
//
// Bind any events that are required on startup. Common events are:
// 'load', 'deviceready', 'offline', and 'online'.
bindEvents: function() {
document.addEventListener('deviceready', this.onDeviceReady, false);
},
make_validations: function(){
var value = window.localStorage.getItem("is_logged");
if (value == null || value == 'false')
window.location = 'login.html';
//verifica se existe algum checkin pendente de finalização armazenado no aparelho
var checkin_nota = window.localStorage.getItem("dados_ultimo_checkin_nota");
checkin_nota = JSON.parse(checkin_nota);
if (checkin_nota != null && !checkin_nota.nao_travar_tela_checkin)
window.location = 'dados_checkin_nota.html';
},
|
// deviceready Event Handler
//
// The scope of 'this' is the event. In order to call the 'receivedEvent'
// function, we must explicitly call 'app.receivedEvent(...);'
onDeviceReady: function() {
$(document).ready(function (e) {
app.load_update_data();
});
document.addEventListener("backbutton", function (e) {
e.preventDefault();
util.return_last_page();
}, false);
util.getCurrentGeoLocation(
function(s) {
app.configureBackgroundGeoLocation();
app.startBackgroundGeoLocation(); //inicia a coleta de dados de localização
}, function(error) {
console.log(error);
/*
navigator.notification.confirm('As permissões para uso do GPS não foram concedidas, alguns recursos podem não funcionar corretamente. ' + error,
function (e) {
}, 'Atenção', 'OK');*/
});
app.setupPush();
$("#btnCheckin").click(function () {
util.add_path_to_breadcrumb('index.html');
window.location = 'checkin_nota.html';
});
$("#btnComprovarNota").click(function () {
util.add_path_to_breadcrumb('index.html');
window.location = 'comprovar_nota.html';
});
$("#btnNotasIniciadas").click(function () {
util.add_path_to_breadcrumb('index.html');
window.location = 'notas_iniciadas_usuario.html';
});
$("#btnTransporteAgendado").click(function () {
util.add_path_to_breadcrumb('index.html');
window.location = 'index.html';
// window.location = 'transporte_agendado.html';
});
$("#btnTransporteAgendado1").click(function () {
util.add_path_to_breadcrumb('index.html');
window.location = 'transporte_agendado.html';
});
$("#btnRealizarColeta").click(function () {
util.add_path_to_breadcrumb('index.html');
window.location = 'coleta_escolha.html';
});
var login = window.localStorage.getItem("login");
$("#usuario").text(login);
},
load_update_data: function(){
const monthNames = ["01", "02", "03", "04", "05", "06",
"07", "08", "09", "10", "11", "12"];
let dateObj = new Date();
let month = monthNames[dateObj.getMonth()];
let day = String(dateObj.getDate()).padStart(2, '0');
let day1 = String(dateObj.getDate()-1).padStart(2, '0');
let day2 = String(dateObj.getDate()-2).padStart(2, '0');
let year = dateObj.getFullYear();
let output = day + '/'+ month + '/' + year;
let output1 = day1 + '/'+ month + '/' + year;
let output2 = day2 + '/'+ month + '/' + year;
// $('#formDataShow').html('<p>'+output+'</p>');
$('#finalizadaDate').text(output);
$('#finalizadaDate1').text(output1);
$('#finalizadaDate2').text(output2);
let beforeSend = function () {
coreProgress.showPreloader(undefined, 0);
}
let success = function (msg) {
coreDialog.close('.preloader');
console.log(msg);
if (msg.status == "ok") {
$('#entregas').text(msg.data.number_purchases_pendent);
$('#agendamentos').text(msg.data.number_transports_pendent);
$('#finalizadas').text(msg.data.number_delivery_finished);
$('#finalizadas1').text(msg.data.delivery_finished_daybefore);
$('#finalizadas2').text(msg.data.delivery_finished_twodaysBefore);
var config = {
value: msg.data.percent_complet,
text: '%',
durationAnimate: 3000,
padding: '3px',
color: 'white',
trailColor: 'black-opacity-10',
textSize: '50px',
textColor: 'black',
width:'160px',
strokeWidth: '2',
trailWidth:'8',
};
ProgressCircle.create(document.getElementById('progressUserKM'), config);
}
}
let error = function (msg) {
coreDialog.close('.preloader');
console.log(msg);
$('.formDataShow').html('<p>error</p>');
}
var formData = new FormData();
formData.append('username', window.localStorage.getItem("login"));
webservice_access.get_update_data(formData, beforeSend, success, error);
},
setupPush: function() {
console.log('calling push init');
var push = PushNotification.init({
"android": {
"senderID": "12345"
},
"browser": {},
"ios": {
"sound": true,
"vibration": true,
"badge": true
},
"windows": {}
});
console.log('after init');
push.on('registration', function(data) {
console.log('registration event: ' + data.registrationId);
var oldRegId = localStorage.getItem('registrationId');
if (oldRegId !== data.registrationId) {
// Save new registration ID
localStorage.setItem('registrationId', data.registrationId);
// Post registrationId to your app server as the value has changed
}
var parentElement = document.getElementById('registration');
var listeningElement = parentElement.querySelector('.waiting');
var receivedElement = parentElement.querySelector('.received');
listeningElement.setAttribute('style', 'display:none;');
receivedElement.setAttribute('style', 'display:block;');
});
push.on('error', function(e) {
console.log("push error = " + e.message);
});
push.on('notification', function(data) {
console.log('notification event');
navigator.notification.alert(
data.message, // message
null, // callback
data.title, // title
'Ok' // buttonName
);
});
},
startBackgroundGeoLocation: function() {
//app.configureBackgroundGeoLocation();
// Turn ON the background-geolocation system. The user will be tracked whenever they suspend the app.
window.plugins.backgroundGeoLocation.start();
// window.plugins.backgroundGeoLocation.delete_all_locations()
},
stopBackgroundGeoLocation: function() {
// If you wish to turn OFF background-tracking, call the #stop method.
window.plugins.backgroundGeoLocation.stop();
},
configureBackgroundGeoLocation: function() {
// Your app must execute AT LEAST ONE call for the current position via standard Cordova geolocation,
// in order to prompt the user for Location permission.
window.navigator.geolocation.getCurrentPosition(function(location) {
console.log('Location from Cordova');
});
var bgGeo = window.plugins.backgroundGeoLocation;
var yourAjaxCallback = function(response) {
bgGeo.finish();
};
var callbackFn = function(location) {
console.log('[js] BackgroundGeoLocation callback: ' + location.latitude + ',' + location.longitude);
yourAjaxCallback.call(this);
};
var failureFn = function(error) {
console.log('BackgroundGeoLocation error');
}
var domain = window.localStorage.getItem("domain");
var url = domain + '/scan/example/webservice/ws_portal_transportador.php?request=InsertUserLocation';
var user = window.localStorage.getItem("login");
// BackgroundGeoLocation is highly configurable.
bgGeo.configure(callbackFn, failureFn, {
url: url,
params: {
user: user
},
headers: {
'apiKey': '78asd4546d4sa687e1d1xzlcknhwyhuWMKPSJDpox821 | random_line_split | |
modulizer.py | (path):
sourceList = []
nbFields = 6
fd = open(path,'rb')
# skip first line and detect separator
firstLine = fd.readline()
sep = ','
if (len(firstLine.split(sep)) != nbFields):
sep = ';'
if (len(firstLine.split(sep)) != nbFields):
sep = '\t'
if (len(firstLine.split(sep)) != nbFields):
print "Unknown separator"
return sourceList
fd.seek(0)
# parse file
for line in fd:
if (line.strip()).startswith("#"):
continue
words = line.split(sep)
if (len(words) < (nbFields-1)):
print "Wrong number of fields, skipping this line"
continue
fullPath = words[0].strip(" ,;\t\n\r")
groupName = words[2].strip(" ,;\t\n\r")
moduleName = words[3].strip(" ,;\t\n\r")
subDir = words[4].strip(" ,;\t\n\r")
sourceName = op.basename(fullPath)
sourceList.append({"path":fullPath, "group":groupName, "module":moduleName, "subDir":subDir})
fd.close()
return sourceList
def parseDescriptions(path):
output = {}
sep = '|'
nbFields = 2
fd = open(path,'rb')
for line in fd:
if (line.strip()).startswith("#"):
continue
words = line.split(sep)
if len(words) != nbFields:
continue
moduleName = words[0].strip(" \"\t\n\r")
description = words[1].strip(" \"\t\n\r")
output[moduleName] = description
fd.close()
return output
if len(sys.argv) < 4:
print("USAGE: {0} monolithic_OTB_PATH OUTPUT_DIR Manifest_Path [module_dep [test_dep [mod_description]]]".format(sys.argv[0]))
print(" monolithic_OTB_PATH : checkout of OTB repository (will not be modified)")
print(" OUTPUT_DIR : output directory where OTB_Modular and OTB_remaining will be created ")
print(" Manifest_Path : path to manifest file, in CSV-like format. Fields are :")
print(" source_path/current_subDir/group/module/subDir/comment")
print(" module_dep : dependencies between modules")
print(" test_dep : additional dependencies for tests")
print(" mod_description : description for each module")
print(" migration_password : password to enable MIGRATION")
sys.exit(-1)
scriptDir = op.dirname(op.abspath(sys.argv[0]))
HeadOfOTBTree = sys.argv[1]
if (HeadOfOTBTree[-1] == '/'):
HeadOfOTBTree = HeadOfOTBTree[0:-1]
OutputDir = sys.argv[2]
HeadOfModularOTBTree = op.join(OutputDir,"OTB_Modular")
ManifestPath = sys.argv[3]
EdgePath = ""
if len(sys.argv) >= 5:
EdgePath = sys.argv[4]
testDependPath = ""
if len(sys.argv) >= 6:
testDependPath = sys.argv[5]
modDescriptionPath = ""
if len(sys.argv) >= 7:
modDescriptionPath = sys.argv[6]
enableMigration = False
if len(sys.argv) >= 8:
migrationPass = sys.argv[7]
if migrationPass == "redbutton":
enableMigration = True
# copy the whole OTB tree over to a temporary dir
HeadOfTempTree = op.join(OutputDir,"OTB_remaining")
if op.isdir(HeadOfTempTree):
shutil.rmtree(HeadOfTempTree)
if op.isdir(HeadOfModularOTBTree):
shutil.rmtree(HeadOfModularOTBTree)
print("Start to copy" + HeadOfOTBTree + " to ./OTB_remaining ...")
shutil.copytree(HeadOfOTBTree,HeadOfTempTree, ignore = shutil.ignore_patterns('.hg','.hg*'))
print("Done copying!")
# checkout OTB-Modular
cmd ='hg clone http://hg.orfeo-toolbox.org/OTB-Modular '+HeadOfModularOTBTree
os.system(cmd)
logDir = op.join(OutputDir,"logs")
if not op.isdir(logDir):
os.makedirs(logDir)
# read the manifest file
print ("moving files from ./OTB_remaining into modules in {0}".format(HeadOfModularOTBTree))
numOfMissingFiles = 0;
missingf = open(op.join(logDir,'missingFiles.log'),'w')
moduleList=[]
moduleDic={}
sourceList = parseFullManifest(ManifestPath)
for source in sourceList:
# build module list
moduleDic[source["module"]] = source["group"]
# create the path
inputfile = op.abspath(op.join(HeadOfTempTree,source["path"]))
outputPath = op.join(op.join(HeadOfModularOTBTree,"Modules"),op.join(source["group"],op.join(source["module"],source["subDir"])))
if not op.isdir(outputPath):
os.makedirs(outputPath)
# copying files to the destination
if op.isfile(inputfile):
if op.isfile(op.join(outputPath,op.basename(inputfile))):
os.remove(op.join(outputPath,op.basename(inputfile)))
shutil.move(inputfile, outputPath)
else:
missingf.write(inputfile+'\n')
numOfMissingFiles = numOfMissingFiles + 1
missingf.close()
print ("listed {0} missing files to logs/missingFiles.log").format(numOfMissingFiles)
moduleList = moduleDic.keys()
# after move, operate a documentation check
for source in sourceList:
outputPath = op.join(op.join(HeadOfModularOTBTree,"Modules"),op.join(source["group"],op.join(source["module"],source["subDir"])))
outputFile = op.join(outputPath,op.basename(source["path"]))
if op.isfile(outputFile):
if op.splitext(outputFile)[1] == ".h":
nextContent = documentationCheck.parserHeader(outputFile,source["module"])
fd = open(outputFile,'wb')
fd.writelines(nextContent)
fd.close()
# get dependencies (if file is present)
dependencies = {}
testDependencies = {}
exDependencies = {}
for mod in moduleList:
dependencies[mod] = []
testDependencies[mod] = []
exDependencies[mod] = []
if op.isfile(EdgePath):
fd = open(EdgePath,'rb')
for line in fd:
words = line.split(',')
if len(words) == 2:
depFrom = words[0].strip(" ,;\t\n\r")
depTo = words[1].strip(" ,;\t\n\r")
if dependencies.has_key(depFrom):
dependencies[depFrom].append(depTo)
else:
print("Bad dependency : "+depFrom+" -> "+depTo)
fd.close()
if op.isfile(testDependPath):
fd = open(testDependPath,'rb')
for line in fd:
words = line.split(',')
if len(words) == 2:
depFrom = words[0].strip(" ,;\t\n\r")
depTo = words[1].strip(" ,;\t\n\r")
if testDependencies.has_key(depFrom):
testDependencies[depFrom].append(depTo)
else:
print("Bad dependency : "+depFrom+" -> "+depTo)
fd.close()
"""
if op.isfile(exDependPath):
fd = open(exDependPath,'rb')
for line in fd:
words = line.split(',')
if len(words) == 2:
depFrom = words[0].strip(" ,;\t\n\r")
depTo = words[1].strip(" ,;\t\n\r")
if exDependencies.has_key(depFrom):
exDependencies[depFrom].append(depTo)
else:
print("Bad dependency : "+depFrom+" -> "+depTo)
fd.close()
"""
modDescriptions = {}
if op.isfile(modDescriptionPath):
modDescriptions = parseDescriptions(modDescriptionPath)
# list the new files
newf = open(op.join(logDir,'newFiles.log'),'w')
for (root, subDirs, files) in os.walk(HeadOfTempTree):
for afile in files:
newf.write(op.join(root, afile)+'\n')
newf.close()
print ("listed new files to logs/newFiles.log")
###########################################################################
print ('creating cmake files for each module (from the template module)')
#moduleList = os.listdir(HeadOfModularOTBTree)
for moduleName in moduleList:
moduleDir = op.join(op.join(HeadOfModularOTBTree,"Modules"),op.join(moduleDic[moduleName],moduleName))
cmakeModName = "OTB"+moduleName
if op.isdir(moduleDir):
# write CMakeLists.txt
filepath = moduleDir+'/CMakeLists.txt'
if not op.isfile(filepath):
o = open(filepath,'w')
if op.isdir(moduleDir+'/src'):
template_cmakelist = op.join(scriptDir,'templateModule/otb-template-module/CMakeLists.txt')
else:
template_cmakelist = op.join(scriptDir,'templateModule/otb-template-module/CMakeLists-nosrc.txt')
for line in open(template_cmak | parseFullManifest | identifier_name | |
modulizer.py | in sourceList:
if op.basename(item["path"]) == op.basename(srcf) and \
moduleName == item["module"]:
appDir = op.basename(op.dirname(item["path"]))
cmakeListPath = op.join(HeadOfOTBTree,op.join("Testing/Applications"),op.join(appDir,"CMakeLists.txt"))
break
# get App tests
if not op.isfile(cmakeListPath):
continue
appList[appName]["test"] = analyseAppManifest.findTestFromApp(cmakeListPath,appName)
# build list of link dependencies
linkLibs = ""
for dep in dependencies[moduleName]:
linkLibs = linkLibs + " ${OTB"+dep+"_LIBRARIES}" + "\n"
filepath = moduleDir+'/app/CMakeLists.txt'
if not op.isfile(filepath):
o = open(filepath,'w')
# define link libraries
o.write("set("+cmakeModName+"_LINK_LIBS\n")
o.write(linkLibs)
o.write(")\n")
for appli in appList:
content = "\notb_create_application(\n"
content += " NAME " + appli + "\n"
content += " SOURCES " + appList[appli]["source"] + "\n"
content += " LINK_LIBRARIES ${${otb-module}_LIBRARIES})\n"
o.write(content)
o.close()
filepath = moduleDir+'/test/CMakeLists.txt'
if not op.isfile(filepath):
o = open(filepath,'w')
o.write("otb_module_test()")
for appli in appList:
if not appList[appli].has_key("test"):
continue
o.write("\n#----------- "+appli+" TESTS ----------------\n")
for test in appList[appli]["test"]:
if test.count("${"):
print "Warning : test name contains a variable : "+test
continue
testcode=appList[appli]["test"][test]
testcode=[s.replace('OTB_TEST_APPLICATION', 'otb_test_application') for s in testcode]
o.writelines(testcode)
o.write("\n")
o.close()
# write test/CMakeLists.txt : done by dispatchTests.py
"""
if op.isdir(moduleDir+'/test'):
cxxFiles = glob.glob(moduleDir+'/test/*.cxx')
cxxFileList='';
for cxxf in cxxFiles:
cxxFileList = cxxFileList+cxxf.split('/')[-1]+'\n'
filepath = moduleDir+'/test/CMakeLists.txt'
if not op.isfile(filepath):
o = open(filepath,'w')
for line in open('./templateModule/otb-template-module/test/CMakeLists.txt','r'):
# TODO : refactor for OTB
words= moduleName.split('-')
moduleNameMod='';
for word in words:
moduleNameMod=moduleNameMod + word.capitalize()
line = line.replace('itkTemplateModule',moduleNameMod)
line = line.replace('itk-template-module',moduleName)
line = line.replace('LIST_OF_CXX_FILES',cxxFileList[0:-1]) #get rid of the last \n
o.write(line);
o.close()
"""
# write otb-module.cmake, which contains dependency info
filepath = moduleDir+'/otb-module.cmake'
if not op.isfile(filepath):
o = open(filepath,'w')
for line in open(op.join(scriptDir,'templateModule/otb-template-module/otb-module.cmake'),'r'):
# replace documentation
if line.find("DESCRIPTION_TO_BE_REPLACED") >= 0:
docString = "\"TBD\""
if moduleName in modDescriptions:
descPos = line.find("DESCRIPTION_TO_BE_REPLACED")
limitChar = 80
docString = "\""+modDescriptions[moduleName]+"\""
curPos = 80 - descPos
while curPos < len(docString):
lastSpace = docString[0:curPos].rfind(' ')
if lastSpace > max(0,curPos-80):
docString = docString[0:lastSpace] + '\n' + docString[lastSpace+1:]
else:
docString = docString[0:curPos] + '\n' + docString[curPos:]
curPos += 81
line = line.replace('DESCRIPTION_TO_BE_REPLACED',docString)
# replace module name
line = line.replace('otb-template-module',cmakeModName)
# replace depend list
dependTagPos = line.find("DEPENDS_TO_BE_REPLACED")
if dependTagPos >= 0:
replacementStr = "DEPENDS"
indentStr = ""
for it in range(dependTagPos+2):
indentStr = indentStr + " "
if len(dependencies[moduleName]) > 0:
deplist = dependencies[moduleName]
else:
deplist = ["Common"]
for dep in deplist:
replacementStr = replacementStr + "\n" + indentStr +"OTB"+ dep
line = line.replace('DEPENDS_TO_BE_REPLACED',replacementStr)
# replace test_depend list
testDependTagPos = line.find("TESTDEP_TO_BE_REPLACED")
if testDependTagPos >= 0:
if moduleName.startswith("App"):
# for application : hardcode TestKernel and CommandLine
indentStr = ""
for it in range(testDependTagPos+2):
indentStr = indentStr + " "
replacementStr = "TEST_DEPENDS\n" + indentStr + "OTBTestKernel\n" + indentStr + "OTBCommandLine"
line = line.replace('TESTDEP_TO_BE_REPLACED',replacementStr)
else:
# standard case
if len(testDependencies[moduleName]) > 0:
indentStr = ""
replacementStr = "TEST_DEPENDS"
for it in range(testDependTagPos+2):
indentStr = indentStr + " "
for dep in testDependencies[moduleName]:
replacementStr = replacementStr + "\n" + indentStr +"OTB"+ dep
line = line.replace('TESTDEP_TO_BE_REPLACED',replacementStr)
else:
line = line.replace('TESTDEP_TO_BE_REPLACED','')
# replace example_depend list
exDependTagPos = line.find("EXDEP_TO_BE_REPLACED")
if exDependTagPos >= 0:
if len(exDependencies[moduleName]) > 0:
indentStr = ""
replacementStr = "EXAMPLE_DEPENDS"
for it in range(exDependTagPos+2):
indentStr = indentStr + " "
for dep in exDependencies[moduleName]:
replacementStr = replacementStr + "\n" + indentStr +"OTB"+ dep
line = line.replace('EXDEP_TO_BE_REPLACED',replacementStr)
else:
line = line.replace('EXDEP_TO_BE_REPLACED','')
o.write(line);
o.close()
# call dispatchTests to fill test/CMakeLists
if op.isfile(testDependPath):
dispatchTests.main(["dispatchTests.py",ManifestPath,HeadOfOTBTree,HeadOfModularOTBTree,testDependPath])
"""
# call dispatchExamples to fill example/CMakeLists
if op.isfile(exDependPath):
dispatchExamples.main(["dispatchExamples.py",ManifestPath,HeadOfOTBTree,HeadOfModularOTBTree,exDependPath])
"""
# examples
for i in sorted(os.listdir(HeadOfTempTree + "/Examples")):
if i == "CMakeLists.txt" or i == "README.txt" or i.startswith("DataRepresentation"):
continue
for j in sorted(os.listdir(HeadOfTempTree + "/Examples/" + i)):
if j == "CMakeLists.txt" or j.startswith("otb"):
continue
command = "mv %s/Examples/%s/%s %s/Examples/%s/%s" % ( HeadOfTempTree, i, j, HeadOfModularOTBTree, i, j)
os.system(command)
for i in sorted(os.listdir(HeadOfTempTree + "/Examples/DataRepresentation")):
if i == "CMakeLists.txt" or i == "README.txt":
continue
for j in sorted(os.listdir(HeadOfTempTree + "/Examples/DataRepresentation/" + i)):
if j == "CMakeLists.txt" or j.startswith("otb"):
continue
command = "mv %s/Examples/DataRepresentation/%s/%s %s/Examples/DataRepresentation/%s/%s" % ( HeadOfTempTree, i, j, HeadOfModularOTBTree, i, j)
os.system(command)
# save version without patches (so that we can regenerate patches later)
os.system( "cp -ar " + op.join(OutputDir,"OTB_Modular") + " " + op.join(OutputDir,"OTB_Modular-nopatch") )
| # apply patches in OTB_Modular
curdir = op.abspath(op.dirname(__file__))
command = "cd " + op.join(OutputDir,"OTB_Modular") + " && patch -p1 < " + curdir + "/patches/otbmodular.patch"
print "Executing " + command | random_line_split | |
modulizer.py |
if len(sys.argv) < 4:
print("USAGE: {0} monolithic_OTB_PATH OUTPUT_DIR Manifest_Path [module_dep [test_dep [mod_description]]]".format(sys.argv[0]))
print(" monolithic_OTB_PATH : checkout of OTB repository (will not be modified)")
print(" OUTPUT_DIR : output directory where OTB_Modular and OTB_remaining will be created ")
print(" Manifest_Path : path to manifest file, in CSV-like format. Fields are :")
print(" source_path/current_subDir/group/module/subDir/comment")
print(" module_dep : dependencies between modules")
print(" test_dep : additional dependencies for tests")
print(" mod_description : description for each module")
print(" migration_password : password to enable MIGRATION")
sys.exit(-1)
scriptDir = op.dirname(op.abspath(sys.argv[0]))
HeadOfOTBTree = sys.argv[1]
if (HeadOfOTBTree[-1] == '/'):
HeadOfOTBTree = HeadOfOTBTree[0:-1]
OutputDir = sys.argv[2]
HeadOfModularOTBTree = op.join(OutputDir,"OTB_Modular")
ManifestPath = sys.argv[3]
EdgePath = ""
if len(sys.argv) >= 5:
EdgePath = sys.argv[4]
testDependPath = ""
if len(sys.argv) >= 6:
testDependPath = sys.argv[5]
modDescriptionPath = ""
if len(sys.argv) >= 7:
modDescriptionPath = sys.argv[6]
enableMigration = False
if len(sys.argv) >= 8:
migrationPass = sys.argv[7]
if migrationPass == "redbutton":
enableMigration = True
# copy the whole OTB tree over to a temporary dir
HeadOfTempTree = op.join(OutputDir,"OTB_remaining")
if op.isdir(HeadOfTempTree):
shutil.rmtree(HeadOfTempTree)
if op.isdir(HeadOfModularOTBTree):
shutil.rmtree(HeadOfModularOTBTree)
print("Start to copy" + HeadOfOTBTree + " to ./OTB_remaining ...")
shutil.copytree(HeadOfOTBTree,HeadOfTempTree, ignore = shutil.ignore_patterns('.hg','.hg*'))
print("Done copying!")
# checkout OTB-Modular
cmd ='hg clone http://hg.orfeo-toolbox.org/OTB-Modular '+HeadOfModularOTBTree
os.system(cmd)
logDir = op.join(OutputDir,"logs")
if not op.isdir(logDir):
os.makedirs(logDir)
# read the manifest file
print ("moving files from ./OTB_remaining into modules in {0}".format(HeadOfModularOTBTree))
numOfMissingFiles = 0;
missingf = open(op.join(logDir,'missingFiles.log'),'w')
moduleList=[]
moduleDic={}
sourceList = parseFullManifest(ManifestPath)
for source in sourceList:
# build module list
moduleDic[source["module"]] = source["group"]
# create the path
inputfile = op.abspath(op.join(HeadOfTempTree,source["path"]))
outputPath = op.join(op.join(HeadOfModularOTBTree,"Modules"),op.join(source["group"],op.join(source["module"],source["subDir"])))
if not op.isdir(outputPath):
os.makedirs(outputPath)
# copying files to the destination
if op.isfile(inputfile):
if op.isfile(op.join(outputPath,op.basename(inputfile))):
os.remove(op.join(outputPath,op.basename(inputfile)))
shutil.move(inputfile, outputPath)
else:
missingf.write(inputfile+'\n')
numOfMissingFiles = numOfMissingFiles + 1
missingf.close()
print ("listed {0} missing files to logs/missingFiles.log").format(numOfMissingFiles)
moduleList = moduleDic.keys()
# after move, operate a documentation check
for source in sourceList:
outputPath = op.join(op.join(HeadOfModularOTBTree,"Modules"),op.join(source["group"],op.join(source["module"],source["subDir"])))
outputFile = op.join(outputPath,op.basename(source["path"]))
if op.isfile(outputFile):
if op.splitext(outputFile)[1] == ".h":
nextContent = documentationCheck.parserHeader(outputFile,source["module"])
fd = open(outputFile,'wb')
fd.writelines(nextContent)
fd.close()
# get dependencies (if file is present)
dependencies = {}
testDependencies = {}
exDependencies = {}
for mod in moduleList:
dependencies[mod] = []
testDependencies[mod] = []
exDependencies[mod] = []
if op.isfile(EdgePath):
fd = open(EdgePath,'rb')
for line in fd:
words = line.split(',')
if len(words) == 2:
depFrom = words[0].strip(" ,;\t\n\r")
depTo = words[1].strip(" ,;\t\n\r")
if dependencies.has_key(depFrom):
dependencies[depFrom].append(depTo)
else:
print("Bad dependency : "+depFrom+" -> "+depTo)
fd.close()
if op.isfile(testDependPath):
fd = open(testDependPath,'rb')
for line in fd:
words = line.split(',')
if len(words) == 2:
depFrom = words[0].strip(" ,;\t\n\r")
depTo = words[1].strip(" ,;\t\n\r")
if testDependencies.has_key(depFrom):
testDependencies[depFrom].append(depTo)
else:
print("Bad dependency : "+depFrom+" -> "+depTo)
fd.close()
"""
if op.isfile(exDependPath):
fd = open(exDependPath,'rb')
for line in fd:
words = line.split(',')
if len(words) == 2:
depFrom = words[0].strip(" ,;\t\n\r")
depTo = words[1].strip(" ,;\t\n\r")
if exDependencies.has_key(depFrom):
exDependencies[depFrom].append(depTo)
else:
print("Bad dependency : "+depFrom+" -> "+depTo)
fd.close()
"""
modDescriptions = {}
if op.isfile(modDescriptionPath):
modDescriptions = parseDescriptions(modDescriptionPath)
# list the new files
newf = open(op.join(logDir,'newFiles.log'),'w')
for (root, subDirs, files) in os.walk(HeadOfTempTree):
for afile in files:
newf.write(op.join(root, afile)+'\n')
newf.close()
print ("listed new files to logs/newFiles.log")
###########################################################################
print ('creating cmake files for each module (from the template module)')
#moduleList = os.listdir(HeadOfModularOTBTree)
for moduleName in moduleList:
moduleDir = op.join(op.join(HeadOfModularOTBTree,"Modules"),op.join(moduleDic[moduleName],moduleName))
cmakeModName = "OTB"+moduleName
if op.isdir(moduleDir):
# write CMakeLists.txt
filepath = moduleDir+'/CMakeLists.txt'
if not op.isfile(filepath):
o = open(filepath,'w')
if op.isdir(moduleDir+'/src'):
template_cmakelist = op.join(scriptDir,'templateModule/otb-template-module/CMakeLists.txt')
else:
template_cmakelist = op.join(scriptDir,'templateModule/otb-template-module/CMakeLists-nosrc.txt')
for line in open(template_cmakelist,'r'):
line = line.replace('otb-template-module',cmakeModName)
o.write(line);
o.close()
# write src/CMakeLists.txt
# list of CXX files
if op.isdir(moduleDir+'/src'):
cxxFiles = glob.glob(moduleDir+'/src/*.cxx')
cxxFileList='';
for cxxf in cxxFiles:
cxxFileList = cxxFileList+' '+cxxf.split('/')[-1]+'\n'
# build list of link dependencies
linkLibs = ""
for dep in dependencies[moduleName]:
#verify if dep is a header-onlymodule
depThirdParty = False
try:
moduleDic[dep]
except KeyError:
# this is a ThirdParty module
depThirdParty = True
if not depThirdParty:
depModuleDir = op.join(op.join(HeadOfModularOTBTree,"Modules"),op.join(moduleDic[dep],dep))
depcxx = glob.glob(depModuleDir+'/src/*.cxx')
if depcxx :
linkLibs = linkLibs + " ${OTB"+dep+"_LIBRARIES}" + "\n"
else:
linkLibs = linkLibs + " ${OTB"+dep+"_LIBRARIES}" + "\n"
if len(linkLib | output = {}
sep = '|'
nbFields = 2
fd = open(path,'rb')
for line in fd:
if (line.strip()).startswith("#"):
continue
words = line.split(sep)
if len(words) != nbFields:
continue
moduleName = words[0].strip(" \"\t\n\r")
description = words[1].strip(" \"\t\n\r")
output[moduleName] = description
fd.close()
return output | identifier_body | |
modulizer.py |
modDescriptionPath = ""
if len(sys.argv) >= 7:
modDescriptionPath = sys.argv[6]
enableMigration = False
if len(sys.argv) >= 8:
migrationPass = sys.argv[7]
if migrationPass == "redbutton":
enableMigration = True
# copy the whole OTB tree over to a temporary dir
HeadOfTempTree = op.join(OutputDir,"OTB_remaining")
if op.isdir(HeadOfTempTree):
shutil.rmtree(HeadOfTempTree)
if op.isdir(HeadOfModularOTBTree):
shutil.rmtree(HeadOfModularOTBTree)
print("Start to copy" + HeadOfOTBTree + " to ./OTB_remaining ...")
shutil.copytree(HeadOfOTBTree,HeadOfTempTree, ignore = shutil.ignore_patterns('.hg','.hg*'))
print("Done copying!")
# checkout OTB-Modular
cmd ='hg clone http://hg.orfeo-toolbox.org/OTB-Modular '+HeadOfModularOTBTree
os.system(cmd)
logDir = op.join(OutputDir,"logs")
if not op.isdir(logDir):
os.makedirs(logDir)
# read the manifest file
print ("moving files from ./OTB_remaining into modules in {0}".format(HeadOfModularOTBTree))
numOfMissingFiles = 0;
missingf = open(op.join(logDir,'missingFiles.log'),'w')
moduleList=[]
moduleDic={}
sourceList = parseFullManifest(ManifestPath)
for source in sourceList:
# build module list
moduleDic[source["module"]] = source["group"]
# create the path
inputfile = op.abspath(op.join(HeadOfTempTree,source["path"]))
outputPath = op.join(op.join(HeadOfModularOTBTree,"Modules"),op.join(source["group"],op.join(source["module"],source["subDir"])))
if not op.isdir(outputPath):
os.makedirs(outputPath)
# copying files to the destination
if op.isfile(inputfile):
if op.isfile(op.join(outputPath,op.basename(inputfile))):
os.remove(op.join(outputPath,op.basename(inputfile)))
shutil.move(inputfile, outputPath)
else:
missingf.write(inputfile+'\n')
numOfMissingFiles = numOfMissingFiles + 1
missingf.close()
print ("listed {0} missing files to logs/missingFiles.log").format(numOfMissingFiles)
moduleList = moduleDic.keys()
# after move, operate a documentation check
for source in sourceList:
outputPath = op.join(op.join(HeadOfModularOTBTree,"Modules"),op.join(source["group"],op.join(source["module"],source["subDir"])))
outputFile = op.join(outputPath,op.basename(source["path"]))
if op.isfile(outputFile):
if op.splitext(outputFile)[1] == ".h":
nextContent = documentationCheck.parserHeader(outputFile,source["module"])
fd = open(outputFile,'wb')
fd.writelines(nextContent)
fd.close()
# get dependencies (if file is present)
dependencies = {}
testDependencies = {}
exDependencies = {}
for mod in moduleList:
dependencies[mod] = []
testDependencies[mod] = []
exDependencies[mod] = []
if op.isfile(EdgePath):
fd = open(EdgePath,'rb')
for line in fd:
words = line.split(',')
if len(words) == 2:
depFrom = words[0].strip(" ,;\t\n\r")
depTo = words[1].strip(" ,;\t\n\r")
if dependencies.has_key(depFrom):
dependencies[depFrom].append(depTo)
else:
print("Bad dependency : "+depFrom+" -> "+depTo)
fd.close()
if op.isfile(testDependPath):
fd = open(testDependPath,'rb')
for line in fd:
words = line.split(',')
if len(words) == 2:
depFrom = words[0].strip(" ,;\t\n\r")
depTo = words[1].strip(" ,;\t\n\r")
if testDependencies.has_key(depFrom):
testDependencies[depFrom].append(depTo)
else:
print("Bad dependency : "+depFrom+" -> "+depTo)
fd.close()
"""
if op.isfile(exDependPath):
fd = open(exDependPath,'rb')
for line in fd:
words = line.split(',')
if len(words) == 2:
depFrom = words[0].strip(" ,;\t\n\r")
depTo = words[1].strip(" ,;\t\n\r")
if exDependencies.has_key(depFrom):
exDependencies[depFrom].append(depTo)
else:
print("Bad dependency : "+depFrom+" -> "+depTo)
fd.close()
"""
modDescriptions = {}
if op.isfile(modDescriptionPath):
modDescriptions = parseDescriptions(modDescriptionPath)
# list the new files
newf = open(op.join(logDir,'newFiles.log'),'w')
for (root, subDirs, files) in os.walk(HeadOfTempTree):
for afile in files:
newf.write(op.join(root, afile)+'\n')
newf.close()
print ("listed new files to logs/newFiles.log")
###########################################################################
print ('creating cmake files for each module (from the template module)')
#moduleList = os.listdir(HeadOfModularOTBTree)
for moduleName in moduleList:
moduleDir = op.join(op.join(HeadOfModularOTBTree,"Modules"),op.join(moduleDic[moduleName],moduleName))
cmakeModName = "OTB"+moduleName
if op.isdir(moduleDir):
# write CMakeLists.txt
filepath = moduleDir+'/CMakeLists.txt'
if not op.isfile(filepath):
o = open(filepath,'w')
if op.isdir(moduleDir+'/src'):
template_cmakelist = op.join(scriptDir,'templateModule/otb-template-module/CMakeLists.txt')
else:
template_cmakelist = op.join(scriptDir,'templateModule/otb-template-module/CMakeLists-nosrc.txt')
for line in open(template_cmakelist,'r'):
line = line.replace('otb-template-module',cmakeModName)
o.write(line);
o.close()
# write src/CMakeLists.txt
# list of CXX files
if op.isdir(moduleDir+'/src'):
cxxFiles = glob.glob(moduleDir+'/src/*.cxx')
cxxFileList='';
for cxxf in cxxFiles:
cxxFileList = cxxFileList+' '+cxxf.split('/')[-1]+'\n'
# build list of link dependencies
linkLibs = ""
for dep in dependencies[moduleName]:
#verify if dep is a header-onlymodule
depThirdParty = False
try:
moduleDic[dep]
except KeyError:
# this is a ThirdParty module
depThirdParty = True
if not depThirdParty:
depModuleDir = op.join(op.join(HeadOfModularOTBTree,"Modules"),op.join(moduleDic[dep],dep))
depcxx = glob.glob(depModuleDir+'/src/*.cxx')
if depcxx :
linkLibs = linkLibs + " ${OTB"+dep+"_LIBRARIES}" + "\n"
else:
linkLibs = linkLibs + " ${OTB"+dep+"_LIBRARIES}" + "\n"
if len(linkLibs) == 0:
linkLibs = " ${OTBITK_LIBRARIES}"
filepath = moduleDir+'/src/CMakeLists.txt'
if not op.isfile(filepath):
o = open(filepath,'w')
for line in open(op.join(scriptDir,'templateModule/otb-template-module/src/CMakeLists.txt'),'r'):
line = line.replace('otb-template-module',cmakeModName)
line = line.replace('LIST_OF_CXX_FILES',cxxFileList[0:-1]) #get rid of the last \n
line = line.replace('LINK_LIBRARIES_TO_BE_REPLACED',linkLibs)
o.write(line);
o.close()
# write app/CMakeLists.txt
if op.isdir(moduleDir+'/app'):
os.mkdir(moduleDir+'/test')
srcFiles = glob.glob(moduleDir+'/app/*.cxx')
srcFiles += glob.glob(moduleDir+'/app/*.h')
appList = {}
for srcf in srcFiles:
# get App name
appName = analyseAppManifest.findApplicationName(srcf)
if len(appName) == 0:
continue
appList[appName] = {"source":op.basename(srcf)}
# get original location
cmakeListPath = ""
for item in sourceList:
if op.basename(item["path"]) == op.basename(srcf) and \
moduleName == item["module"]:
appDir = op.basename(op.dirname(item["path"]))
cmakeListPath = op.join(HeadOfOTBTree,op.join("Testing/Applications"),op.join(appDir,"CMakeLists.txt"))
break
# get App tests
if not op.isfile(cmakeListPath):
continue
appList[appName]["test"] = analyseAppManifest.findTestFromApp(cmakeListPath,appName)
# build list of link dependencies
linkLibs = ""
for dep in dependencies[moduleName]:
linkLibs = linkLibs + | testDependPath = sys.argv[5] | conditional_block | |
scraper-twitter-user.py |
user_isFamous = False
user_location = ""
user_joinDate = ""
user_following = 0
user_followers = 0 | mention_tweet_counter = 0
url_tweet_counter = 0
retweet_counter = 0
tweet_id = 0
tweet_lang = ""
tweet_raw_text = ""
tweet_datetime = None
tweet_mentions = None
tweet_hashtags = None
tweet_owner_id = 0
tweet_retweeter = False
tweet_timestamp = 0
tweet_owner_name = ""
tweet_owner_username = ""
dict_mentions_mutual = defaultdict(lambda: 0)
dict_mentions_user = defaultdict(lambda: 0)
dict_mentions_p = defaultdict(lambda: 0)
dict_hashtag_p = defaultdict(lambda: 0)
dict_retweets = defaultdict(lambda: 0)
dict_mentions = defaultdict(lambda: 0)
dict_hashtag = defaultdict(lambda: 0)
dict_lang_p = defaultdict(lambda: 0)
dict_lang = defaultdict(lambda: 0)
_time_start_ = time.time()
print("\nAccessing %s profile on twitter.com..." % (_USER_))
error = True
while error:
try:
user_url = "/%s/with_replies"
res = ss.get(user_url % (_USER_), redirect=False)
if res.status // 100 != 2:
print("It looks like you're not logged in, I'll try to collect only what is public")
logged_in = False
user_url = "/%s"
document = ss.load(user_url % (_USER_))
if not document:
print("nothing public to bee seen... sorry")
return
error = False
except:
time.sleep(5)
profile = document.select(".ProfileHeaderCard")
# user screenname
_USER_ = profile.select(
".ProfileHeaderCard-screenname"
).then("a").getAttribute("href")
if not _USER_:
return
_USER_ = _USER_[0][1:]
_BASE_DIR_ = "_OUTPUT_/%s/" % (_USER_)
_BASE_PHOTOS = _BASE_DIR_ + "photos/"
_BASE_PHOTOS_PERSONAL = _BASE_PHOTOS + "personal/"
_BASE_PHOTOS_EXTERN = _BASE_PHOTOS + "extern/"
try:
os.makedirs(_BASE_PHOTOS_PERSONAL)
except:
pass
try:
os.makedirs(_BASE_PHOTOS_EXTERN)
except:
pass
# Is Famous
user_isFamous = True if profile.select(".Icon--verified") else False
# Name
user_name = profile.select(".ProfileHeaderCard-name").then("a .ProfileHeaderCard-nameLink").text()
# Biography
user_bio = profile.select(".ProfileHeaderCard-bio").text()
# Location
user_location = profile.select(".ProfileHeaderCard-locationText").text()
# Url
user_url = profile.select(".ProfileHeaderCard-urlText").then("a").getAttribute("title")
user_url = user_url[0] if user_url else ""
# Join Date
user_joinDate = profile.select(".ProfileHeaderCard-joinDateText").getAttribute("title")
user_joinDate = user_joinDate[0] if user_joinDate else ""
profileNav = document.select(".ProfileNav")
# user id
user_id = profileNav.getAttribute("data-user-id")[0]
# tweets
user_tweets = profileNav.select(".ProfileNav-item--tweets").then("a").getAttribute("title")
user_tweets = user_tweets[0].split(" ")[0].replace(",", "") if user_tweets else 0
# following
user_following = profileNav.select(".ProfileNav-item--following").then("a").getAttribute("title")
user_following = user_following[0].split(" ")[0].replace(",", "") if user_following else 0
# followers
user_followers = profileNav.select(".ProfileNav-item--followers").then("a").getAttribute("title")
user_followers = user_followers[0].split(" ")[0].replace(",", "") if user_followers else 0
# favorites
user_favs = profileNav.select(".ProfileNav-item--favorites").then("a").getAttribute("title")
if user_favs:
user_favs = user_favs[0].split(" ")[0].replace(",", "")
else:
user_favs = ""
user_profilePic = document.select(".ProfileAvatar").andThen("img").getAttribute("src")[0]
print("\n> downloading profile picture...")
ss.download(user_profilePic, _BASE_PHOTOS)
print("\n\nAbout to start downloading user's timeline:")
timeline_url = "https://twitter.com/i/profiles/show/%s/timeline/" % (_USER_)
timeline_url += "%s?include_available_features=1&include_entities=1" % ("with_replies" if logged_in else "tweets")
while has_more_items:
try:
print("\n> downloading timeline chunk [ %s of %s tweets so far, max_position=%s]... \n" % (tweet_counter + retweet_counter, user_tweets, min_position))
if not min_position:
r = ss.get(timeline_url)
if not r:
break
else:
r = ss.get(timeline_url + "&max_position=%s" % min_position)
if not r:
break
try:
j = json.loads(r.body)
except:
print("[*] Error while trying to parse the JSON response, aborting...")
has_more_items = False
break
items_html = j["items_html"].encode("utf8")
document = ss.parse(items_html)
items_html = document.select("li")
for node in items_html:
node = node.select("@data-tweet-id")
if node:
node = node[0]
else:
continue
tweet_id = node.getAttribute("data-tweet-id")
tweet_owner_id = node.getAttribute("data-user-id")
tweet_owner_username = node.getAttribute("data-screen-name")
tweet_owner_name = node.getAttribute("data-name")
tweet_retweeter = node.getAttribute("data-retweeter")
tweet_mentions = node.getAttribute("data-mentions")
tweet_mentions = tweet_mentions.split() if tweet_mentions else []
tweet_raw_text = node.select(".tweet-text").text()
tweet_lang = node.select(".tweet-text").getAttribute("lang")
tweet_lang = tweet_lang[0] if tweet_lang else ""
tweet_timestamp = int(node.select("@data-time-ms").getAttribute("data-time")[0])
tweet_hashtags = []
tweet_iscomment = node.getAttribute("data-is-reply-to") == "true"
for node_hashtag in node.select(".twitter-hashtag"):
hashtag = node_hashtag.text().upper().replace("#.\n", "")
tweet_hashtags.append(hashtag)
dict_hashtag[hashtag] += 1
if not tweet_retweeter:
dict_hashtag_p[hashtag] += 1
tweet_links = [link for link in node.select(".tweet-text").then("a").getAttribute("href") if link.startswith("http")]
# updating counters
tweet_owner_username = tweet_owner_username.upper()
for uname in tweet_mentions:
if uname.upper() == _USER_.upper():
dict_mentions_user[tweet_owner_username] += 1
tweet_datetime = datetime.fromtimestamp(tweet_timestamp).replace(tzinfo=_UTC_TIMEZONE_).astimezone(_TIMEZONE_)
for usermen in tweet_mentions:
dict_mentions[usermen.upper()] += 1
if not tweet_retweeter:
dict_mentions_p[usermen.upper()] += 1
dict_lang[tweet_lang] += 1
if tweet_retweeter:
retweet_counter += 1
dict_retweets[tweet_owner_username] += 1
else:
if tweet_owner_id == user_id:
dict_lang_p[tweet_lang] += 1
# updating counters
tweet_counter += 1
if tweet_iscomment:
comments_counter += 1
if len(tweet_mentions):
mention_tweet_counter += 1
if len(tweet_links):
url_tweet_counter += 1
_XML_TWEETS += _XML_TWEET_TEMPLATE % (
tweet_id,
tweet_timestamp,
tweet_lang,
_USER_,
tweet_id,
tweet_raw_text
)
print(
"|%s |%s[%s]%s\t|%s |%s |%s |%s |%s"
%
(
toFixed(tweet_datetime.isoformat(" "), 16),
tweet_id,
tweet_lang,
"r" if tweet_retweeter else ("c" if tweet_iscomment else ""),
toFixed(tweet_owner_id, 10),
toFixed(tweet_owner_username, 16),
toFixed(tweet_owner_name, 19),
toFixed(tweet_mentions + tweet_hashtags, 10),
toFixed(tweet_raw_text, 54) + "..."
)
)
if len(node.select("@data-image-url")):
img_list = node.select("@data-image-url")
len_imgs = len(img_list)
print("\n" + "- " * 61)
if tweet_retweeter:
print("\t> %i extern photo found" % (len_imgs))
|
tweet_counter = 0
comments_counter = 0 | random_line_split |
scraper-twitter-user.py | (user):
_XML_TWEETS = ""
_XML_ = ""
_USER_ = user
logged_in = True
has_more_items = True
min_position = ""
items_html = ""
document = None
i = 0
user_id = 0
user_bio = ""
user_url = ""
user_name = ""
user_favs = 0
user_tweets = 0
user_isFamous = False
user_location = ""
user_joinDate = ""
user_following = 0
user_followers = 0
tweet_counter = 0
comments_counter = 0
mention_tweet_counter = 0
url_tweet_counter = 0
retweet_counter = 0
tweet_id = 0
tweet_lang = ""
tweet_raw_text = ""
tweet_datetime = None
tweet_mentions = None
tweet_hashtags = None
tweet_owner_id = 0
tweet_retweeter = False
tweet_timestamp = 0
tweet_owner_name = ""
tweet_owner_username = ""
dict_mentions_mutual = defaultdict(lambda: 0)
dict_mentions_user = defaultdict(lambda: 0)
dict_mentions_p = defaultdict(lambda: 0)
dict_hashtag_p = defaultdict(lambda: 0)
dict_retweets = defaultdict(lambda: 0)
dict_mentions = defaultdict(lambda: 0)
dict_hashtag = defaultdict(lambda: 0)
dict_lang_p = defaultdict(lambda: 0)
dict_lang = defaultdict(lambda: 0)
_time_start_ = time.time()
print("\nAccessing %s profile on twitter.com..." % (_USER_))
error = True
while error:
try:
user_url = "/%s/with_replies"
res = ss.get(user_url % (_USER_), redirect=False)
if res.status // 100 != 2:
print("It looks like you're not logged in, I'll try to collect only what is public")
logged_in = False
user_url = "/%s"
document = ss.load(user_url % (_USER_))
if not document:
print("nothing public to bee seen... sorry")
return
error = False
except:
time.sleep(5)
profile = document.select(".ProfileHeaderCard")
# user screenname
_USER_ = profile.select(
".ProfileHeaderCard-screenname"
).then("a").getAttribute("href")
if not _USER_:
return
_USER_ = _USER_[0][1:]
_BASE_DIR_ = "_OUTPUT_/%s/" % (_USER_)
_BASE_PHOTOS = _BASE_DIR_ + "photos/"
_BASE_PHOTOS_PERSONAL = _BASE_PHOTOS + "personal/"
_BASE_PHOTOS_EXTERN = _BASE_PHOTOS + "extern/"
try:
os.makedirs(_BASE_PHOTOS_PERSONAL)
except:
pass
try:
os.makedirs(_BASE_PHOTOS_EXTERN)
except:
pass
# Is Famous
user_isFamous = True if profile.select(".Icon--verified") else False
# Name
user_name = profile.select(".ProfileHeaderCard-name").then("a .ProfileHeaderCard-nameLink").text()
# Biography
user_bio = profile.select(".ProfileHeaderCard-bio").text()
# Location
user_location = profile.select(".ProfileHeaderCard-locationText").text()
# Url
user_url = profile.select(".ProfileHeaderCard-urlText").then("a").getAttribute("title")
user_url = user_url[0] if user_url else ""
# Join Date
user_joinDate = profile.select(".ProfileHeaderCard-joinDateText").getAttribute("title")
user_joinDate = user_joinDate[0] if user_joinDate else ""
profileNav = document.select(".ProfileNav")
# user id
user_id = profileNav.getAttribute("data-user-id")[0]
# tweets
user_tweets = profileNav.select(".ProfileNav-item--tweets").then("a").getAttribute("title")
user_tweets = user_tweets[0].split(" ")[0].replace(",", "") if user_tweets else 0
# following
user_following = profileNav.select(".ProfileNav-item--following").then("a").getAttribute("title")
user_following = user_following[0].split(" ")[0].replace(",", "") if user_following else 0
# followers
user_followers = profileNav.select(".ProfileNav-item--followers").then("a").getAttribute("title")
user_followers = user_followers[0].split(" ")[0].replace(",", "") if user_followers else 0
# favorites
user_favs = profileNav.select(".ProfileNav-item--favorites").then("a").getAttribute("title")
if user_favs:
user_favs = user_favs[0].split(" ")[0].replace(",", "")
else:
user_favs = ""
user_profilePic = document.select(".ProfileAvatar").andThen("img").getAttribute("src")[0]
print("\n> downloading profile picture...")
ss.download(user_profilePic, _BASE_PHOTOS)
print("\n\nAbout to start downloading user's timeline:")
timeline_url = "https://twitter.com/i/profiles/show/%s/timeline/" % (_USER_)
timeline_url += "%s?include_available_features=1&include_entities=1" % ("with_replies" if logged_in else "tweets")
while has_more_items:
try:
print("\n> downloading timeline chunk [ %s of %s tweets so far, max_position=%s]... \n" % (tweet_counter + retweet_counter, user_tweets, min_position))
if not min_position:
r = ss.get(timeline_url)
if not r:
break
else:
r = ss.get(timeline_url + "&max_position=%s" % min_position)
if not r:
break
try:
j = json.loads(r.body)
except:
print("[*] Error while trying to parse the JSON response, aborting...")
has_more_items = False
break
items_html = j["items_html"].encode("utf8")
document = ss.parse(items_html)
items_html = document.select("li")
for node in items_html:
node = node.select("@data-tweet-id")
if node:
node = node[0]
else:
continue
tweet_id = node.getAttribute("data-tweet-id")
tweet_owner_id = node.getAttribute("data-user-id")
tweet_owner_username = node.getAttribute("data-screen-name")
tweet_owner_name = node.getAttribute("data-name")
tweet_retweeter = node.getAttribute("data-retweeter")
tweet_mentions = node.getAttribute("data-mentions")
tweet_mentions = tweet_mentions.split() if tweet_mentions else []
tweet_raw_text = node.select(".tweet-text").text()
tweet_lang = node.select(".tweet-text").getAttribute("lang")
tweet_lang = tweet_lang[0] if tweet_lang else ""
tweet_timestamp = int(node.select("@data-time-ms").getAttribute("data-time")[0])
tweet_hashtags = []
tweet_iscomment = node.getAttribute("data-is-reply-to") == "true"
for node_hashtag in node.select(".twitter-hashtag"):
hashtag = node_hashtag.text().upper().replace("#.\n", "")
tweet_hashtags.append(hashtag)
dict_hashtag[hashtag] += 1
if not tweet_retweeter:
dict_hashtag_p[hashtag] += 1
tweet_links = [link for link in node.select(".tweet-text").then("a").getAttribute("href") if link.startswith("http")]
# updating counters
tweet_owner_username = tweet_owner_username.upper()
for uname in tweet_mentions:
if uname.upper() == _USER_.upper():
dict_mentions_user[tweet_owner_username] += 1
tweet_datetime = datetime.fromtimestamp(tweet_timestamp).replace(tzinfo=_UTC_TIMEZONE_).astimezone(_TIMEZONE_)
for usermen in tweet_mentions:
dict_mentions[usermen.upper()] += 1
if not tweet_retweeter:
dict_mentions_p[usermen.upper()] += 1
dict_lang[tweet_lang] += 1
if tweet_retweeter:
retweet_counter += 1
dict_retweets[tweet_owner_username] += 1
else:
if tweet_owner_id == user_id:
dict_lang_p[tweet_lang] += 1
# updating counters
tweet_counter += 1
if tweet_iscomment:
comments_counter += 1
if len(tweet_mentions):
mention_tweet_counter += 1
if len(tweet_links):
url_tweet_counter += 1
_XML_TWEETS += _XML_TWEET_TEMPLATE % (
tweet_id,
tweet_timestamp,
tweet_lang,
_USER_,
tweet_id,
tweet_raw_text
)
print(
"|%s |%s[%s]%s\t|%s |%s |%s |%s |%s"
%
(
toFixed(tweet_datetime.isoformat(" "), 16),
tweet_id,
tweet_lang,
"r" if tweet_retweeter else ("c" if tweet_iscomment else ""),
toFixed(tweet_owner_id, 10),
toFixed(tweet_owner_username, 16),
toFixed(tweet_owner_name, 19),
to | scrapes | identifier_name | |
scraper-twitter-user.py |
user_isFamous = False
user_location = ""
user_joinDate = ""
user_following = 0
user_followers = 0
tweet_counter = 0
comments_counter = 0
mention_tweet_counter = 0
url_tweet_counter = 0
retweet_counter = 0
tweet_id = 0
tweet_lang = ""
tweet_raw_text = ""
tweet_datetime = None
tweet_mentions = None
tweet_hashtags = None
tweet_owner_id = 0
tweet_retweeter = False
tweet_timestamp = 0
tweet_owner_name = ""
tweet_owner_username = ""
dict_mentions_mutual = defaultdict(lambda: 0)
dict_mentions_user = defaultdict(lambda: 0)
dict_mentions_p = defaultdict(lambda: 0)
dict_hashtag_p = defaultdict(lambda: 0)
dict_retweets = defaultdict(lambda: 0)
dict_mentions = defaultdict(lambda: 0)
dict_hashtag = defaultdict(lambda: 0)
dict_lang_p = defaultdict(lambda: 0)
dict_lang = defaultdict(lambda: 0)
_time_start_ = time.time()
print("\nAccessing %s profile on twitter.com..." % (_USER_))
error = True
while error:
try:
user_url = "/%s/with_replies"
res = ss.get(user_url % (_USER_), redirect=False)
if res.status // 100 != 2:
print("It looks like you're not logged in, I'll try to collect only what is public")
logged_in = False
user_url = "/%s"
document = ss.load(user_url % (_USER_))
if not document:
print("nothing public to bee seen... sorry")
return
error = False
except:
time.sleep(5)
profile = document.select(".ProfileHeaderCard")
# user screenname
_USER_ = profile.select(
".ProfileHeaderCard-screenname"
).then("a").getAttribute("href")
if not _USER_:
return
_USER_ = _USER_[0][1:]
_BASE_DIR_ = "_OUTPUT_/%s/" % (_USER_)
_BASE_PHOTOS = _BASE_DIR_ + "photos/"
_BASE_PHOTOS_PERSONAL = _BASE_PHOTOS + "personal/"
_BASE_PHOTOS_EXTERN = _BASE_PHOTOS + "extern/"
try:
os.makedirs(_BASE_PHOTOS_PERSONAL)
except:
pass
try:
os.makedirs(_BASE_PHOTOS_EXTERN)
except:
pass
# Is Famous
user_isFamous = True if profile.select(".Icon--verified") else False
# Name
user_name = profile.select(".ProfileHeaderCard-name").then("a .ProfileHeaderCard-nameLink").text()
# Biography
user_bio = profile.select(".ProfileHeaderCard-bio").text()
# Location
user_location = profile.select(".ProfileHeaderCard-locationText").text()
# Url
user_url = profile.select(".ProfileHeaderCard-urlText").then("a").getAttribute("title")
user_url = user_url[0] if user_url else ""
# Join Date
user_joinDate = profile.select(".ProfileHeaderCard-joinDateText").getAttribute("title")
user_joinDate = user_joinDate[0] if user_joinDate else ""
profileNav = document.select(".ProfileNav")
# user id
user_id = profileNav.getAttribute("data-user-id")[0]
# tweets
user_tweets = profileNav.select(".ProfileNav-item--tweets").then("a").getAttribute("title")
user_tweets = user_tweets[0].split(" ")[0].replace(",", "") if user_tweets else 0
# following
user_following = profileNav.select(".ProfileNav-item--following").then("a").getAttribute("title")
user_following = user_following[0].split(" ")[0].replace(",", "") if user_following else 0
# followers
user_followers = profileNav.select(".ProfileNav-item--followers").then("a").getAttribute("title")
user_followers = user_followers[0].split(" ")[0].replace(",", "") if user_followers else 0
# favorites
user_favs = profileNav.select(".ProfileNav-item--favorites").then("a").getAttribute("title")
if user_favs:
user_favs = user_favs[0].split(" ")[0].replace(",", "")
else:
|
user_profilePic = document.select(".ProfileAvatar").andThen("img").getAttribute("src")[0]
print("\n> downloading profile picture...")
ss.download(user_profilePic, _BASE_PHOTOS)
print("\n\nAbout to start downloading user's timeline:")
timeline_url = "https://twitter.com/i/profiles/show/%s/timeline/" % (_USER_)
timeline_url += "%s?include_available_features=1&include_entities=1" % ("with_replies" if logged_in else "tweets")
while has_more_items:
try:
print("\n> downloading timeline chunk [ %s of %s tweets so far, max_position=%s]... \n" % (tweet_counter + retweet_counter, user_tweets, min_position))
if not min_position:
r = ss.get(timeline_url)
if not r:
break
else:
r = ss.get(timeline_url + "&max_position=%s" % min_position)
if not r:
break
try:
j = json.loads(r.body)
except:
print("[*] Error while trying to parse the JSON response, aborting...")
has_more_items = False
break
items_html = j["items_html"].encode("utf8")
document = ss.parse(items_html)
items_html = document.select("li")
for node in items_html:
node = node.select("@data-tweet-id")
if node:
node = node[0]
else:
continue
tweet_id = node.getAttribute("data-tweet-id")
tweet_owner_id = node.getAttribute("data-user-id")
tweet_owner_username = node.getAttribute("data-screen-name")
tweet_owner_name = node.getAttribute("data-name")
tweet_retweeter = node.getAttribute("data-retweeter")
tweet_mentions = node.getAttribute("data-mentions")
tweet_mentions = tweet_mentions.split() if tweet_mentions else []
tweet_raw_text = node.select(".tweet-text").text()
tweet_lang = node.select(".tweet-text").getAttribute("lang")
tweet_lang = tweet_lang[0] if tweet_lang else ""
tweet_timestamp = int(node.select("@data-time-ms").getAttribute("data-time")[0])
tweet_hashtags = []
tweet_iscomment = node.getAttribute("data-is-reply-to") == "true"
for node_hashtag in node.select(".twitter-hashtag"):
hashtag = node_hashtag.text().upper().replace("#.\n", "")
tweet_hashtags.append(hashtag)
dict_hashtag[hashtag] += 1
if not tweet_retweeter:
dict_hashtag_p[hashtag] += 1
tweet_links = [link for link in node.select(".tweet-text").then("a").getAttribute("href") if link.startswith("http")]
# updating counters
tweet_owner_username = tweet_owner_username.upper()
for uname in tweet_mentions:
if uname.upper() == _USER_.upper():
dict_mentions_user[tweet_owner_username] += 1
tweet_datetime = datetime.fromtimestamp(tweet_timestamp).replace(tzinfo=_UTC_TIMEZONE_).astimezone(_TIMEZONE_)
for usermen in tweet_mentions:
dict_mentions[usermen.upper()] += 1
if not tweet_retweeter:
dict_mentions_p[usermen.upper()] += 1
dict_lang[tweet_lang] += 1
if tweet_retweeter:
retweet_counter += 1
dict_retweets[tweet_owner_username] += 1
else:
if tweet_owner_id == user_id:
dict_lang_p[tweet_lang] += 1
# updating counters
tweet_counter += 1
if tweet_iscomment:
comments_counter += 1
if len(tweet_mentions):
mention_tweet_counter += 1
if len(tweet_links):
url_tweet_counter += 1
_XML_TWEETS += _XML_TWEET_TEMPLATE % (
tweet_id,
tweet_timestamp,
tweet_lang,
_USER_,
tweet_id,
tweet_raw_text
)
print(
"|%s |%s[%s]%s\t|%s |%s |%s |%s |%s"
%
(
toFixed(tweet_datetime.isoformat(" "), 16),
tweet_id,
tweet_lang,
"r" if tweet_retweeter else ("c" if tweet_iscomment else ""),
toFixed(tweet_owner_id, 10),
toFixed(tweet_owner_username, 16),
toFixed(tweet_owner_name, 19),
toFixed(tweet_mentions + tweet_hashtags, 10),
toFixed(tweet_raw_text, 54) + "..."
)
)
if len(node.select("@data-image-url")):
img_list = node.select("@data-image-url")
len_imgs = len(img_list)
print("\n" + "- " * 61)
if tweet_retweeter:
print("\t> %i extern photo found" % (len_imgs | user_favs = "" | conditional_block |
scraper-twitter-user.py |
def sumc(collection):
total = 0
if type(collection) == list or type(collection) == set or type(collection) == tuple:
for e in collection:
total += e
else:
for e in collection:
total += collection[e]
return float(total)
parser = argparse.ArgumentParser(
description='LIDIC Twitter Scraper v.1.1',
epilog=(
"Author: Burdisso Sergio (<sergio.burdisso@gmail.com>), Phd. Student. "
"LIDIC, Department of Computer Science, National University of San Luis"
" (UNSL), San Luis, Argentina."
)
)
parser.add_argument('USER', help="target's twitter user name")
args = parser.parse_args()
# https://en.wikipedia.org/wiki/List_of_tz_database_time_zones
_TIMEZONE_ = tz.gettz('America/Buenos_Aires')
_UTC_TIMEZONE_ = tz.gettz('UTC')
ss.setVerbose(False)
ss.scookies.set("lang", "en")
ss.setUserAgent(ss.UserAgent.CHROME_LINUX)
_XML_TEMPLATE = """\
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<author type="twitter" url="https://twitter.com/%s" id="%s" name="%s" join_date="%s" location="%s" personal_url="%s" tweets="%s" following="%s" followers="%s" favorites="%s" age_group="xx" gender="xx" lang="xx">
<biography>
<![CDATA[%s]]>
</biography>
<documents count="%s">%s
</documents>
</author>"""
_XML_TWEET_TEMPLATE = """
<document id="%s" timestamp="%s" lang="%s" url="https://twitter.com/%s/status/%s"><![CDATA[%s]]></document>
"""
def scrapes(user):
_XML_TWEETS = ""
_XML_ = ""
_USER_ = user
logged_in = True
has_more_items = True
min_position = ""
items_html = ""
document = None
i = 0
user_id = 0
user_bio = ""
user_url = ""
user_name = ""
user_favs = 0
user_tweets = 0
user_isFamous = False
user_location = ""
user_joinDate = ""
user_following = 0
user_followers = 0
tweet_counter = 0
comments_counter = 0
mention_tweet_counter = 0
url_tweet_counter = 0
retweet_counter = 0
tweet_id = 0
tweet_lang = ""
tweet_raw_text = ""
tweet_datetime = None
tweet_mentions = None
tweet_hashtags = None
tweet_owner_id = 0
tweet_retweeter = False
tweet_timestamp = 0
tweet_owner_name = ""
tweet_owner_username = ""
dict_mentions_mutual = defaultdict(lambda: 0)
dict_mentions_user = defaultdict(lambda: 0)
dict_mentions_p = defaultdict(lambda: 0)
dict_hashtag_p = defaultdict(lambda: 0)
dict_retweets = defaultdict(lambda: 0)
dict_mentions = defaultdict(lambda: 0)
dict_hashtag = defaultdict(lambda: 0)
dict_lang_p = defaultdict(lambda: 0)
dict_lang = defaultdict(lambda: 0)
_time_start_ = time.time()
print("\nAccessing %s profile on twitter.com..." % (_USER_))
error = True
while error:
try:
user_url = "/%s/with_replies"
res = ss.get(user_url % (_USER_), redirect=False)
if res.status // 100 != 2:
print("It looks like you're not logged in, I'll try to collect only what is public")
logged_in = False
user_url = "/%s"
document = ss.load(user_url % (_USER_))
if not document:
print("nothing public to bee seen... sorry")
return
error = False
except:
time.sleep(5)
profile = document.select(".ProfileHeaderCard")
# user screenname
_USER_ = profile.select(
".ProfileHeaderCard-screenname"
).then("a").getAttribute("href")
if not _USER_:
return
_USER_ = _USER_[0][1:]
_BASE_DIR_ = "_OUTPUT_/%s/" % (_USER_)
_BASE_PHOTOS = _BASE_DIR_ + "photos/"
_BASE_PHOTOS_PERSONAL = _BASE_PHOTOS + "personal/"
_BASE_PHOTOS_EXTERN = _BASE_PHOTOS + "extern/"
try:
os.makedirs(_BASE_PHOTOS_PERSONAL)
except:
pass
try:
os.makedirs(_BASE_PHOTOS_EXTERN)
except:
pass
# Is Famous
user_isFamous = True if profile.select(".Icon--verified") else False
# Name
user_name = profile.select(".ProfileHeaderCard-name").then("a .ProfileHeaderCard-nameLink").text()
# Biography
user_bio = profile.select(".ProfileHeaderCard-bio").text()
# Location
user_location = profile.select(".ProfileHeaderCard-locationText").text()
# Url
user_url = profile.select(".ProfileHeaderCard-urlText").then("a").getAttribute("title")
user_url = user_url[0] if user_url else ""
# Join Date
user_joinDate = profile.select(".ProfileHeaderCard-joinDateText").getAttribute("title")
user_joinDate = user_joinDate[0] if user_joinDate else ""
profileNav = document.select(".ProfileNav")
# user id
user_id = profileNav.getAttribute("data-user-id")[0]
# tweets
user_tweets = profileNav.select(".ProfileNav-item--tweets").then("a").getAttribute("title")
user_tweets = user_tweets[0].split(" ")[0].replace(",", "") if user_tweets else 0
# following
user_following = profileNav.select(".ProfileNav-item--following").then("a").getAttribute("title")
user_following = user_following[0].split(" ")[0].replace(",", "") if user_following else 0
# followers
user_followers = profileNav.select(".ProfileNav-item--followers").then("a").getAttribute("title")
user_followers = user_followers[0].split(" ")[0].replace(",", "") if user_followers else 0
# favorites
user_favs = profileNav.select(".ProfileNav-item--favorites").then("a").getAttribute("title")
if user_favs:
user_favs = user_favs[0].split(" ")[0].replace(",", "")
else:
user_favs = ""
user_profilePic = document.select(".ProfileAvatar").andThen("img").getAttribute("src")[0]
print("\n> downloading profile picture...")
ss.download(user_profilePic, _BASE_PHOTOS)
print("\n\nAbout to start downloading user's timeline:")
timeline_url = "https://twitter.com/i/profiles/show/%s/timeline/" % (_USER_)
timeline_url += "%s?include_available_features=1&include_entities=1" % ("with_replies" if logged_in else "tweets")
while has_more_items:
try:
print("\n> downloading timeline chunk [ %s of %s tweets so far, max_position=%s]... \n" % (tweet_counter + retweet_counter, user_tweets, min_position))
if not min_position:
r = ss.get(timeline_url)
if not r:
break
else:
r = ss.get(timeline_url + "&max_position=%s" % min_position)
if not r:
break
try:
j = json.loads(r.body)
except:
print("[*] Error while trying to parse the JSON response, aborting...")
has_more_items = False
break
items_html = j["items_html"].encode("utf8")
document = ss.parse(items_html)
items_html = document.select("li")
for node in items_html:
node = node.select("@data-tweet-id")
if node:
node = node[0]
else:
continue
tweet_id = node.getAttribute("data-tweet-id")
tweet_owner_id = node.getAttribute("data-user-id")
tweet_owner_username = node.getAttribute("data-screen-name")
tweet_owner_name = node.getAttribute("data-name")
tweet_retweeter = node.getAttribute("data-retweeter")
tweet_mentions = node.getAttribute("data-mentions")
tweet_mentions = tweet_mentions.split() if tweet_mentions else []
tweet_raw_text = node.select(".tweet-text").text()
tweet_lang = node.select(".tweet-text").getAttribute("lang")
tweet_lang = tweet_lang[0] if tweet_lang else ""
tweet_timestamp = int(node.select("@data-time-ms").getAttribute("data-time")[0])
tweet_hashtags = []
tweet_iscomment = node.getAttribute("data-is-reply-to") == "true"
for node_hashtag in node.select(".twitter-hashtag"):
hashtag = node_hashtag.text().upper().replace("#.\n", "")
tweet_hashtags.append(hashtag)
dict_hashtag[has | if isinstance(strn, list) or type(strn) == int:
strn = str(strn)
return (u"{:<%i}" % (length)).format(strn[:length]) | identifier_body | |
lib.rs | _display;
/// **(internal)** Implements experimental `.bnet` parser for `BooleanNetwork`.
mod _impl_boolean_network_from_bnet;
/// **(internal)** Implements an experimental `.bnet` writer for `BooleanNetwork`.
mod _impl_boolean_network_to_bnet;
/// **(internal)** All methods implemented by the `ExtendedBoolean` object.
mod _impl_extended_boolean;
/// **(internal)** Utility methods for `FnUpdate`.
mod _impl_fn_update;
/// **(internal)** Utility methods for `Parameter`.
mod _impl_parameter;
/// **(internal)** Utility methods for `ParameterId`.
mod _impl_parameter_id;
/// **(internal)** Utility methods for `Regulation`.
mod _impl_regulation;
/// **(internal)** All methods for analysing and manipulating `RegulatoryGraph`.
mod _impl_regulatory_graph;
/// **(internal)** All methods implemented by the `Space` object.
mod _impl_space;
/// **(internal)** Utility methods for `Variable`.
mod _impl_variable;
/// **(internal)** Utility methods for `VariableId`.
mod _impl_variable_id;
// Re-export data structures used for advanced graph algorithms on `RegulatoryGraph`.
pub use _impl_regulatory_graph::signed_directed_graph::SdGraph;
pub use _impl_regulatory_graph::signed_directed_graph::Sign;
/// **(internal)** A regex string of an identifier which we currently allow to appear
/// as a variable or parameter name.
const ID_REGEX_STR: &str = r"[a-zA-Z0-9_]+";
lazy_static! {
/// A regular expression that matches the identifiers allowed as names of
/// Boolean parameters or variables.
static ref ID_REGEX: Regex = Regex::new(ID_REGEX_STR).unwrap();
}
/// A type-safe index of a `Variable` inside a `RegulatoryGraph` (or a `BooleanNetwork`).
///
/// If needed, it can be converted into `usize` for serialisation and safely read
/// again by providing the original `RegulatoryGraph` as context
/// to the `VariableId::try_from_usize`.
///
/// **Warning:** Do not mix type-safe indices between different networks/graphs!
#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
pub struct VariableId(usize);
/// A type-safe index of a `Parameter` inside a `BooleanNetwork`.
///
/// If needed, it can be converted into `usize` for serialisation and safely read
/// again by providing the original `BooleanNetwork` as context
/// to the `ParameterId::try_from_usize`.
///
/// **Warning:** Do not mix type-safe indices between different networks!
#[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
pub struct ParameterId(usize);
/// Possible monotonous effects of a `Regulation` in a `RegulatoryGraph`.
///
/// Activation means positive and inhibition means negative monotonicity.
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub enum Monotonicity {
Activation,
Inhibition,
}
/// A Boolean variable of a `RegulatoryGraph` (or a `BooleanNetwork`) with a given `name`.
///
/// `Variable` can be only created by and borrowed from a `RegulatoryGraph`.
/// It has no public constructor.
#[derive(Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
pub struct Variable {
name: String,
}
/// An explicit parameter of a `BooleanNetwork`; an uninterpreted Boolean function with a given
/// `name` and `arity`.
///
/// `Parameter` can be only created by and borrowed form the `BooleanNetwork` itself.
/// It has no public constructor.
#[derive(Clone, Debug, Eq, Hash, PartialEq)]
pub struct | {
name: String,
arity: u32,
}
/// Describes an interaction between two `Variables` in a `RegulatoryGraph`
/// (or a `BooleanNetwork`).
///
/// Every regulation can be *monotonous*, and can be set as *observable*:
///
/// - Monotonicity is either *positive* or *negative* and signifies that the influence of the
/// `regulator` on the `target` has to *increase* or *decrease* the `target` value respectively.
/// - If observability is set to `true`, the `regulator` *must* have influence on the outcome
/// of the `target` update function in *some* context. If set to false, this is not enforced
/// (i.e. the `regulator` *can* have an influence on the `target`, but it is not required).
///
/// Regulations can be represented as strings in the
/// form `"regulator_name 'relationship' target_name"`. The 'relationship' starts with `-`, which
/// is followed by `>` for activation (positive monotonicity), `|` for inhibition (negative
/// monotonicity) or `?` for unspecified monotonicity. Finally, an additional `?` at the end
/// of 'relationship' signifies a non-observable regulation. Together, this gives the
/// following options: `->, ->?, -|, -|?, -?, -??`.
///
/// Regulations cannot be created directly, they are only borrowed from a `RegulatoryGraph`
/// or a `BooleanNetwork`.
#[derive(Clone, Debug, Eq, Hash, PartialEq)]
pub struct Regulation {
regulator: VariableId,
target: VariableId,
observable: bool,
monotonicity: Option<Monotonicity>,
}
/// A directed graph representing relationships between a collection of Boolean variables
/// using `Regulations`.
///
/// It can be explored using `regulators`, `targets`, `transitive_regulators`, or
/// `transitive_targets` (for example to determine if two variables depend on each other).
/// We can also compute the SCCs of this graph.
///
/// A regulatory graph can be described using a custom string format. In this format,
/// each line represents a regulation or a comment (starting with `#`).
///
/// Regulations can be represented as strings in the form of
/// `"regulator_name 'relationship' target_name"`. The 'relationship' is one of the arrow strings
/// `->, ->?, -|, -|?, -?, -??`. Here, `>` means activation, `|` is inhibition and `?` is
/// unspecified monotonicity. The last question mark signifies observability — if it is present,
/// the regulation is not necessarily observable. See `Regulation` and tutorial module for a more
/// detailed explanation.
///
/// Example of a `RegulatoryGraph`:
///
/// ```rg
/// # Regulators of a
/// a ->? a
/// b -|? a
///
/// # Regulators of b
/// a -> b
/// b -| b
/// ```
#[derive(Clone, Debug)]
pub struct RegulatoryGraph {
variables: Vec<Variable>,
regulations: Vec<Regulation>,
variable_to_index: HashMap<String, VariableId>,
}
/// Possible binary Boolean operators that can appear in `FnUpdate`.
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub enum BinaryOp {
And,
Or,
Xor,
Iff,
Imp,
}
/// A Boolean update function formula which references
/// `Variables` and `Parameters` of a `BooleanNetwork`.
///
/// An update function specifies the evolution rules for one specific `Variable` of a
/// `BooleanNetwork`. The arguments used in the function must be the same as specified
/// by the `RegulatoryGraph` of the network.
#[derive(Clone, Debug, Eq, Hash, PartialEq)]
pub enum FnUpdate {
/// A true/false constant.
Const(bool),
/// References a network variable.
Var(VariableId),
/// References a network parameter (uninterpreted function).
///
/// The variable list are the arguments of the function invocation.
Param(ParameterId, Vec<VariableId>),
/// Negation.
Not(Box<FnUpdate>),
/// Binary boolean operation.
Binary(BinaryOp, Box<FnUpdate>, Box<FnUpdate>),
}
/// A Boolean network, possibly parametrised with uninterpreted Boolean functions.
///
/// The structure of the network is based on an underlying `RegulatoryGraph`. However,
/// compared to a `RegulatoryGraph`, `BooleanNetwork` can have a specific update function
/// given for each variable.
///
/// If the function is not specified (so called *implicit parametrisation*), all admissible
/// Boolean functions are considered in its place. A function can be also only partially
/// specified by using declared *explicit parameters*. These are uninterpreted but named Boolean
/// functions, such that, again, all admissible instantiations of these functions are considered.
/// See crate tutorial to learn more.
///
/// ### Boolean network equivalence
///
/// Please keep in mind that we consider two networks to be equivalent when they share a regulatory
/// graph, and when they have (syntactically) the same update functions and parameters. We do not
/// perform any semantic checks for whether the update functions are functionally equivalent.
///
/// Also keep in mind that the *ordering* of variables and parameters must be shared by equivalent
/// networks. This is because we want to preserve the property that `VariableId` and `ParameterId`
/// objects are interchangeable as log as networks are equivalent.
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct BooleanNetwork {
graph: RegulatoryGraph,
parameters: Vec<Parameter>,
update_functions: Vec<Option<FnUpdate>>,
parameter_to_index: HashMap<String, ParameterId>,
}
/// An iterator over all `VariableIds` | Parameter | identifier_name |
lib.rs | u32,
}
/// Describes an interaction between two `Variables` in a `RegulatoryGraph`
/// (or a `BooleanNetwork`).
///
/// Every regulation can be *monotonous*, and can be set as *observable*:
///
/// - Monotonicity is either *positive* or *negative* and signifies that the influence of the
/// `regulator` on the `target` has to *increase* or *decrease* the `target` value respectively.
/// - If observability is set to `true`, the `regulator` *must* have influence on the outcome
/// of the `target` update function in *some* context. If set to false, this is not enforced
/// (i.e. the `regulator` *can* have an influence on the `target`, but it is not required).
///
/// Regulations can be represented as strings in the
/// form `"regulator_name 'relationship' target_name"`. The 'relationship' starts with `-`, which
/// is followed by `>` for activation (positive monotonicity), `|` for inhibition (negative
/// monotonicity) or `?` for unspecified monotonicity. Finally, an additional `?` at the end
/// of 'relationship' signifies a non-observable regulation. Together, this gives the
/// following options: `->, ->?, -|, -|?, -?, -??`.
///
/// Regulations cannot be created directly, they are only borrowed from a `RegulatoryGraph`
/// or a `BooleanNetwork`.
#[derive(Clone, Debug, Eq, Hash, PartialEq)]
pub struct Regulation {
regulator: VariableId,
target: VariableId,
observable: bool,
monotonicity: Option<Monotonicity>,
}
/// A directed graph representing relationships between a collection of Boolean variables
/// using `Regulations`.
///
/// It can be explored using `regulators`, `targets`, `transitive_regulators`, or
/// `transitive_targets` (for example to determine if two variables depend on each other).
/// We can also compute the SCCs of this graph.
///
/// A regulatory graph can be described using a custom string format. In this format,
/// each line represents a regulation or a comment (starting with `#`).
///
/// Regulations can be represented as strings in the form of
/// `"regulator_name 'relationship' target_name"`. The 'relationship' is one of the arrow strings
/// `->, ->?, -|, -|?, -?, -??`. Here, `>` means activation, `|` is inhibition and `?` is
/// unspecified monotonicity. The last question mark signifies observability — if it is present,
/// the regulation is not necessarily observable. See `Regulation` and tutorial module for a more
/// detailed explanation.
///
/// Example of a `RegulatoryGraph`:
///
/// ```rg
/// # Regulators of a
/// a ->? a
/// b -|? a
///
/// # Regulators of b
/// a -> b
/// b -| b
/// ```
#[derive(Clone, Debug)]
pub struct RegulatoryGraph {
variables: Vec<Variable>,
regulations: Vec<Regulation>,
variable_to_index: HashMap<String, VariableId>,
}
/// Possible binary Boolean operators that can appear in `FnUpdate`.
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub enum BinaryOp {
And,
Or,
Xor,
Iff,
Imp,
}
/// A Boolean update function formula which references
/// `Variables` and `Parameters` of a `BooleanNetwork`.
///
/// An update function specifies the evolution rules for one specific `Variable` of a
/// `BooleanNetwork`. The arguments used in the function must be the same as specified
/// by the `RegulatoryGraph` of the network.
#[derive(Clone, Debug, Eq, Hash, PartialEq)]
pub enum FnUpdate {
/// A true/false constant.
Const(bool),
/// References a network variable.
Var(VariableId),
/// References a network parameter (uninterpreted function).
///
/// The variable list are the arguments of the function invocation.
Param(ParameterId, Vec<VariableId>),
/// Negation.
Not(Box<FnUpdate>),
/// Binary boolean operation.
Binary(BinaryOp, Box<FnUpdate>, Box<FnUpdate>),
}
/// A Boolean network, possibly parametrised with uninterpreted Boolean functions.
///
/// The structure of the network is based on an underlying `RegulatoryGraph`. However,
/// compared to a `RegulatoryGraph`, `BooleanNetwork` can have a specific update function
/// given for each variable.
///
/// If the function is not specified (so called *implicit parametrisation*), all admissible
/// Boolean functions are considered in its place. A function can be also only partially
/// specified by using declared *explicit parameters*. These are uninterpreted but named Boolean
/// functions, such that, again, all admissible instantiations of these functions are considered.
/// See crate tutorial to learn more.
///
/// ### Boolean network equivalence
///
/// Please keep in mind that we consider two networks to be equivalent when they share a regulatory
/// graph, and when they have (syntactically) the same update functions and parameters. We do not
/// perform any semantic checks for whether the update functions are functionally equivalent.
///
/// Also keep in mind that the *ordering* of variables and parameters must be shared by equivalent
/// networks. This is because we want to preserve the property that `VariableId` and `ParameterId`
/// objects are interchangeable as log as networks are equivalent.
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct BooleanNetwork {
graph: RegulatoryGraph,
parameters: Vec<Parameter>,
update_functions: Vec<Option<FnUpdate>>,
parameter_to_index: HashMap<String, ParameterId>,
}
/// An iterator over all `VariableIds` of a `RegulatoryGraph` (or a `BooleanNetwork`).
pub type VariableIdIterator = Map<Range<usize>, fn(usize) -> VariableId>;
/// An iterator over all `ParameterIds` of a `BooleanNetwork`.
pub type ParameterIdIterator = Map<Range<usize>, fn(usize) -> ParameterId>;
/// An iterator over all `Regulations` of a `RegulatoryGraph`.
pub type RegulationIterator<'a> = std::slice::Iter<'a, Regulation>;
/// An enum representing the possible state of each variable when describing a hypercube.
#[derive(Copy, Clone, Eq, PartialEq, Hash)]
pub enum ExtendedBoolean {
Zero,
One,
Any,
}
/// `Space` represents a hypercube (multi-dimensional rectangle) in the Boolean state space.
///
/// Keep in mind that there is no way of representing an empty hypercube at the moment. So any API
/// that can take/return an empty set has to use `Option<Space>` or something similar.
#[derive(Clone, PartialEq, Eq, Hash, Debug)]
pub struct Space(Vec<ExtendedBoolean>);
/// Annotations are "meta" objects that can be declared as part of AEON models to add additional
/// properties that are not directly recognized by the main AEON toolbox.
///
/// Annotations are comments which start with `#!`. After the `#!` "preamble", each annotation
/// can contains a "path prefix" with path segments separated using `:` (path segments can be
/// surrounded by white space that is automatically trimmed). Based on these path
/// segments, the parser will create an annotation tree. If there are multiple annotations with
/// the same path, their values are concatenated using newlines.
///
/// For example, annotations can be used to describe model layout:
///
/// ```text
/// #! layout : var_1 : 10,20
/// #! layout : var_2 : 14,-3
/// ```
///
/// Another usage for annotations are extra properties enforced on the model behaviour, for
/// example through CTL:
/// ```test
/// #! property : AG (problem => AF apoptosis)
/// ```
///
/// Obviously, you can also use annotations to specify model metadata:
/// ```text
/// #! name: My Awesome Model
/// #! description: This model describes ...
/// #! description:var_1: This variable describes ...
/// ```
///
/// You can use "empty" path (e.g. `#! is_multivalued`), and you can use an empty annotation
/// value with a non-empty path (e.g. `#!is_multivalued:var_1:`). Though this is not particularly
/// encouraged: it is better to just have `var_1` as the annotation value if you can do that.
/// An exception to this may be a case where `is_multivalued:var_1:` has an "optional" value and
/// you want to express that while the "key" is provided, but the "value" is missing. Similarly, for
/// the sake of completeness, it is technically allowed to use empty path names (e.g. `a::b:value`
/// translates to `["a", "", "b"] = "value"`), but it is discouraged.
///
/// Note that the path segments should only contain alphanumeric characters and underscores,
/// but can be escaped using backticks (`` ` ``; other backticks in path segments are not allowed).
/// Similarly, annotation values cannot contain colons (path segment separators) or backticks,
/// unless escaped with `` #`ACTUAL_STRING`# ``. You can also use escaping if you wish to
/// retain whitespace around annotation values. As mentioned, multi-line values can be split
/// into multiple annotation comments.
#[derive(PartialEq, Eq, Clone)] | random_line_split | ||
broker.go | : opts.codec,
onError: opts.errorHandler,
routing: newRoutingTable(opts),
pubsub: newPubSub(pubsub, opts),
leaving: make(chan struct{}),
pendingReplies: make(map[uint64]pendingReply),
messageHandlers: opts.messageHandlers,
requestHandlers: opts.requestHandlers,
}
if err := b.pubsub.subscribe(ctx, nodeStream(b.clique, b.routing.local), "", b.processCliqueProtocol); err != nil {
b.pubsub.shutdown(ctx)
return nil, err
}
if err := b.pubsub.subscribe(ctx, b.clique, "", b.processCliqueProtocol); err != nil {
b.pubsub.shutdown(ctx)
return nil, err
}
for stream := range b.messageHandlers {
if err := b.pubsub.subscribe(ctx, stream, b.clique, b.processMessage); err != nil {
b.pubsub.shutdown(ctx)
return nil, err
}
}
for stream := range b.requestHandlers {
if err := b.pubsub.subscribe(ctx, stream, b.clique, b.processRequest); err != nil {
b.pubsub.shutdown(ctx)
return nil, err
}
}
join := marshalJoin(join{sender: b.routing.local})
if err := b.broadcast(ctx, join); err != nil {
b.pubsub.shutdown(ctx)
return nil, err
}
b.wg.Add(1)
go b.stabilize(opts.stabilization.Interval)
return b, nil
}
// Close notifies all clique members about a leaving broker and
// disconnects from the pub/sub system.
func (b *Broker) Close() error {
return b.shutdown(context.Background(), func() error { return nil })
}
// Shutdown gracefully shuts down the broker. It notifies all clique
// members about a leaving broker and waits until all messages and
// requests are processed. If the given context expires before, the
// context's error will be returned.
func (b *Broker) Shutdown(ctx context.Context) error {
return b.shutdown(ctx, func() error {
ticker := time.NewTicker(250 * time.Millisecond)
defer ticker.Stop()
for atomic.LoadUint64(&b.messagesInFlight) != 0 || atomic.LoadUint64(&b.requestsInFlight) != 0 {
select {
case <-ctx.Done():
return ctx.Err()
case <-ticker.C:
}
}
return nil
})
}
// Publish forwards the message directly to the pub/sub system.
// If the message does not contain any partition key, the message will
// be processed by a random broker within a clique.
// All binary data of the passed message needs to be valid only during
// the method call.
func (b *Broker) Publish(ctx context.Context, msg Message) error {
if b.isShuttingDown() {
return ErrClosed
}
if err := msg.validate(); err != nil {
return err
}
return b.pubsub.send(ctx, msg.Stream, b.codec.EncodeMessage(msg))
}
// Request sends a request message and waits for its response.
// If the message has no partition key, the request will be processed
// by a random broker within a clique.
// All binary data of the passed message needs to be valid only during
// the method call.
func (b *Broker) Request(ctx context.Context, request Message) (Message, error) {
if b.isShuttingDown() {
return Message{}, ErrClosed
}
if err := request.validate(); err != nil {
return Message{}, err
}
reply := b.awaitReply(ctx, b.routing.local, b.reqTimeout, func(ctx context.Context, id uint64) error {
return b.pubsub.send(ctx, request.Stream, marshalMsg(msg{
id: id,
reply: []byte(nodeStream(b.clique, b.routing.local)),
stream: []byte(request.Stream),
pkey: request.PartitionKey,
data: request.Data,
}))
})
return Message{Data: reply.data}, reply.err
}
func (b *Broker) processMessage(ctx context.Context, stream string, data []byte) {
atomic.AddUint64(&b.messagesInFlight, 1)
defer atomic.AddUint64(&b.messagesInFlight, ^uint64(0))
decoded, err := b.codec.DecodeMessage(stream, data)
if err != nil {
b.onError(errorf("decode message: %v", err))
return
}
b.forwardMsg(ctx, msg{
stream: []byte(decoded.Stream),
pkey: decoded.PartitionKey,
data: decoded.Data,
})
}
func (b *Broker) processRequest(ctx context.Context, stream string, data []byte) {
atomic.AddUint64(&b.requestsInFlight, 1)
defer atomic.AddUint64(&b.requestsInFlight, ^uint64(0))
f, err := unmarshalFrame(data)
switch {
case err != nil:
b.onError(errorf("request subscription: %v", err))
return
case f.typ() != frameTypeMsg:
b.onError(errorf("unexpected request frame type: %s", f.typ()))
return
}
msg, err := unmarshalMsg(f)
if err != nil {
b.onError(errorf("unmarshal msg: %v", err))
return
}
b.forwardMsg(ctx, msg)
}
func (b *Broker) processCliqueProtocol(ctx context.Context, stream string, data []byte) {
f, err := unmarshalFrame(data)
if err != nil {
b.onError(errorf("clique subscription: %v", err))
return
}
switch f.typ() {
case frameTypeJoin:
b.handleJoin(ctx, f)
case frameTypeLeave:
b.handleLeave(f)
case frameTypeInfo:
b.handleInfo(f)
case frameTypePing:
b.handlePing(ctx, f)
case frameTypeFwd:
b.handleFwd(ctx, f)
case frameTypeAck:
b.handleAck(f)
default:
b.onError(errorf("unexpected clique frame type: %s", f.typ()))
}
}
func (b *Broker) handleJoin(ctx context.Context, f frame) {
join, err := unmarshalJoin(f)
switch {
case err != nil:
b.onError(errorf("unmarshal join: %v", err))
return
case join.sender == b.routing.local:
return
}
neighbors := b.routing.neighbors()
b.routing.registerKey(join.sender)
err = b.sendTo(ctx, join.sender, marshalInfo(info{neighbors: neighbors}))
if err != nil {
b.onError(errorf("send info: %v", err))
}
}
func (b *Broker) handleLeave(f frame) {
leave, err := unmarshalLeave(f)
if err != nil {
b.onError(errorf("unmarshal leave: %v", err))
return
}
b.routing.unregister(leave.node)
}
func (b *Broker) handleInfo(f frame) {
info, err := unmarshalInfo(f)
if err != nil {
b.onError(errorf("unmarshal info: %v", err))
return
}
b.routing.registerKeys(info.neighbors)
b.notifyReply(info.id, reply{})
}
func (b *Broker) handlePing(ctx context.Context, f frame) {
ping, err := unmarshalPing(f)
if err != nil {
b.onError(errorf("unmarshal ping: %v", err))
return
}
err = b.sendTo(ctx, ping.sender, marshalInfo(info{
id: ping.id,
neighbors: b.routing.neighbors(),
}))
if err != nil {
b.onError(errorf("send info: %v", err))
}
}
func (b *Broker) handleFwd(ctx context.Context, f frame) {
fwd, err := unmarshalFwd(f)
if err != nil {
b.onError(errorf("unmarshal fwd: %v", err))
return
}
err = b.sendTo(ctx, fwd.ack, marshalAck(ack{id: fwd.id}))
if err != nil {
b.onError(errorf("send ack: %v", err))
}
b.forwardMsg(ctx, fwd.msg)
}
func (b *Broker) handleAck(frame frame) {
ack, err := unmarshalAck(frame)
if err != nil {
b.onError(errorf("unmarshal ack: %v", err))
return
}
b.notifyReply(ack.id, reply{data: ack.data})
}
func (b *Broker) forwardMsg(ctx context.Context, msg msg) {
partition := KeyFromBytes(msg.pkey)
if len(msg.pkey) == 0 {
b.dispatchMsg(ctx, msg, partition)
return
}
for {
succ := b.routing.successor(partition)
if succ == b.routing.local {
b.dispatchMsg(ctx, msg, partition)
return
}
reply := b.awaitReply(ctx, succ, b.ackTimeout, func(ctx context.Context, id uint64) error {
return b.sendTo(ctx, succ, marshalFwd(fwd{
id: id,
ack: b.routing.local,
msg: msg,
}))
})
if reply.err == nil {
return
} else if reply.err != ErrTimeout {
b.onError(reply.err)
return
}
// The node was suspected and removed from the
// valid keys. We look for the next successor
// to handle the message.
}
}
func (b *Broker) | dispatchMsg | identifier_name | |
broker.go | ackTimeout: opts.ackTimeout,
reqTimeout: opts.reqTimeout,
codec: opts.codec,
onError: opts.errorHandler,
routing: newRoutingTable(opts),
pubsub: newPubSub(pubsub, opts),
leaving: make(chan struct{}),
pendingReplies: make(map[uint64]pendingReply),
messageHandlers: opts.messageHandlers,
requestHandlers: opts.requestHandlers,
}
if err := b.pubsub.subscribe(ctx, nodeStream(b.clique, b.routing.local), "", b.processCliqueProtocol); err != nil {
b.pubsub.shutdown(ctx)
return nil, err
}
if err := b.pubsub.subscribe(ctx, b.clique, "", b.processCliqueProtocol); err != nil {
b.pubsub.shutdown(ctx)
return nil, err
}
for stream := range b.messageHandlers {
if err := b.pubsub.subscribe(ctx, stream, b.clique, b.processMessage); err != nil {
b.pubsub.shutdown(ctx)
return nil, err
}
}
for stream := range b.requestHandlers {
if err := b.pubsub.subscribe(ctx, stream, b.clique, b.processRequest); err != nil {
b.pubsub.shutdown(ctx)
return nil, err
}
}
join := marshalJoin(join{sender: b.routing.local})
if err := b.broadcast(ctx, join); err != nil {
b.pubsub.shutdown(ctx)
return nil, err
}
b.wg.Add(1)
go b.stabilize(opts.stabilization.Interval)
return b, nil
}
// Close notifies all clique members about a leaving broker and
// disconnects from the pub/sub system.
func (b *Broker) Close() error {
return b.shutdown(context.Background(), func() error { return nil })
}
// Shutdown gracefully shuts down the broker. It notifies all clique
// members about a leaving broker and waits until all messages and
// requests are processed. If the given context expires before, the
// context's error will be returned.
func (b *Broker) Shutdown(ctx context.Context) error {
return b.shutdown(ctx, func() error {
ticker := time.NewTicker(250 * time.Millisecond)
defer ticker.Stop()
for atomic.LoadUint64(&b.messagesInFlight) != 0 || atomic.LoadUint64(&b.requestsInFlight) != 0 {
select {
case <-ctx.Done():
return ctx.Err()
case <-ticker.C:
}
}
return nil
})
}
// Publish forwards the message directly to the pub/sub system.
// If the message does not contain any partition key, the message will
// be processed by a random broker within a clique.
// All binary data of the passed message needs to be valid only during
// the method call.
func (b *Broker) Publish(ctx context.Context, msg Message) error {
if b.isShuttingDown() {
return ErrClosed
}
if err := msg.validate(); err != nil {
return err
}
return b.pubsub.send(ctx, msg.Stream, b.codec.EncodeMessage(msg))
}
// Request sends a request message and waits for its response.
// If the message has no partition key, the request will be processed
// by a random broker within a clique.
// All binary data of the passed message needs to be valid only during
// the method call.
func (b *Broker) Request(ctx context.Context, request Message) (Message, error) {
if b.isShuttingDown() {
return Message{}, ErrClosed
}
if err := request.validate(); err != nil {
return Message{}, err
}
reply := b.awaitReply(ctx, b.routing.local, b.reqTimeout, func(ctx context.Context, id uint64) error {
return b.pubsub.send(ctx, request.Stream, marshalMsg(msg{
id: id,
reply: []byte(nodeStream(b.clique, b.routing.local)),
stream: []byte(request.Stream),
pkey: request.PartitionKey,
data: request.Data,
}))
})
return Message{Data: reply.data}, reply.err
}
func (b *Broker) processMessage(ctx context.Context, stream string, data []byte) {
atomic.AddUint64(&b.messagesInFlight, 1)
defer atomic.AddUint64(&b.messagesInFlight, ^uint64(0))
decoded, err := b.codec.DecodeMessage(stream, data)
if err != nil {
b.onError(errorf("decode message: %v", err))
return
}
b.forwardMsg(ctx, msg{
stream: []byte(decoded.Stream),
pkey: decoded.PartitionKey,
data: decoded.Data,
})
}
func (b *Broker) processRequest(ctx context.Context, stream string, data []byte) {
atomic.AddUint64(&b.requestsInFlight, 1)
defer atomic.AddUint64(&b.requestsInFlight, ^uint64(0))
f, err := unmarshalFrame(data)
switch {
case err != nil:
b.onError(errorf("request subscription: %v", err))
return
case f.typ() != frameTypeMsg:
b.onError(errorf("unexpected request frame type: %s", f.typ()))
return
}
msg, err := unmarshalMsg(f)
if err != nil {
b.onError(errorf("unmarshal msg: %v", err))
return
}
b.forwardMsg(ctx, msg)
}
func (b *Broker) processCliqueProtocol(ctx context.Context, stream string, data []byte) {
f, err := unmarshalFrame(data)
if err != nil {
b.onError(errorf("clique subscription: %v", err))
return
}
switch f.typ() {
case frameTypeJoin:
b.handleJoin(ctx, f)
case frameTypeLeave:
b.handleLeave(f)
case frameTypeInfo:
b.handleInfo(f)
case frameTypePing:
b.handlePing(ctx, f)
case frameTypeFwd:
b.handleFwd(ctx, f)
case frameTypeAck:
b.handleAck(f)
default:
b.onError(errorf("unexpected clique frame type: %s", f.typ()))
}
}
func (b *Broker) handleJoin(ctx context.Context, f frame) {
join, err := unmarshalJoin(f)
switch {
case err != nil:
b.onError(errorf("unmarshal join: %v", err))
return
case join.sender == b.routing.local:
return
}
neighbors := b.routing.neighbors()
b.routing.registerKey(join.sender)
err = b.sendTo(ctx, join.sender, marshalInfo(info{neighbors: neighbors}))
if err != nil {
b.onError(errorf("send info: %v", err))
}
}
func (b *Broker) handleLeave(f frame) {
leave, err := unmarshalLeave(f)
if err != nil {
b.onError(errorf("unmarshal leave: %v", err))
return
}
b.routing.unregister(leave.node)
}
func (b *Broker) handleInfo(f frame) {
info, err := unmarshalInfo(f)
if err != nil {
b.onError(errorf("unmarshal info: %v", err))
return
}
b.routing.registerKeys(info.neighbors)
b.notifyReply(info.id, reply{})
}
func (b *Broker) handlePing(ctx context.Context, f frame) {
ping, err := unmarshalPing(f)
if err != nil {
b.onError(errorf("unmarshal ping: %v", err))
return
}
err = b.sendTo(ctx, ping.sender, marshalInfo(info{
id: ping.id,
neighbors: b.routing.neighbors(),
}))
if err != nil {
b.onError(errorf("send info: %v", err))
}
}
func (b *Broker) handleFwd(ctx context.Context, f frame) {
fwd, err := unmarshalFwd(f)
if err != nil {
b.onError(errorf("unmarshal fwd: %v", err))
return
}
err = b.sendTo(ctx, fwd.ack, marshalAck(ack{id: fwd.id}))
if err != nil {
b.onError(errorf("send ack: %v", err))
}
b.forwardMsg(ctx, fwd.msg)
}
func (b *Broker) handleAck(frame frame) {
ack, err := unmarshalAck(frame)
if err != nil {
b.onError(errorf("unmarshal ack: %v", err))
return
}
b.notifyReply(ack.id, reply{data: ack.data})
}
func (b *Broker) forwardMsg(ctx context.Context, msg msg) {
partition := KeyFromBytes(msg.pkey)
if len(msg.pkey) == 0 {
b.dispatchMsg(ctx, msg, partition)
return
} | b.dispatchMsg(ctx, msg, partition)
return
}
reply := b.awaitReply(ctx, succ, b.ackTimeout, func(ctx context.Context, id uint64) error {
return b.sendTo(ctx, succ, marshalFwd(fwd{
id: id,
ack: b.routing.local,
msg: msg,
}))
})
if reply.err == nil {
return
} else if reply.err != ErrTimeout {
b.onError(reply.err)
return
}
// The node was suspected and removed from the
// valid keys. We look for |
for {
succ := b.routing.successor(partition)
if succ == b.routing.local { | random_line_split |
broker.go | ackTimeout: opts.ackTimeout,
reqTimeout: opts.reqTimeout,
codec: opts.codec,
onError: opts.errorHandler,
routing: newRoutingTable(opts),
pubsub: newPubSub(pubsub, opts),
leaving: make(chan struct{}),
pendingReplies: make(map[uint64]pendingReply),
messageHandlers: opts.messageHandlers,
requestHandlers: opts.requestHandlers,
}
if err := b.pubsub.subscribe(ctx, nodeStream(b.clique, b.routing.local), "", b.processCliqueProtocol); err != nil {
b.pubsub.shutdown(ctx)
return nil, err
}
if err := b.pubsub.subscribe(ctx, b.clique, "", b.processCliqueProtocol); err != nil {
b.pubsub.shutdown(ctx)
return nil, err
}
for stream := range b.messageHandlers {
if err := b.pubsub.subscribe(ctx, stream, b.clique, b.processMessage); err != nil {
b.pubsub.shutdown(ctx)
return nil, err
}
}
for stream := range b.requestHandlers {
if err := b.pubsub.subscribe(ctx, stream, b.clique, b.processRequest); err != nil {
b.pubsub.shutdown(ctx)
return nil, err
}
}
join := marshalJoin(join{sender: b.routing.local})
if err := b.broadcast(ctx, join); err != nil {
b.pubsub.shutdown(ctx)
return nil, err
}
b.wg.Add(1)
go b.stabilize(opts.stabilization.Interval)
return b, nil
}
// Close notifies all clique members about a leaving broker and
// disconnects from the pub/sub system.
func (b *Broker) Close() error |
// Shutdown gracefully shuts down the broker. It notifies all clique
// members about a leaving broker and waits until all messages and
// requests are processed. If the given context expires before, the
// context's error will be returned.
func (b *Broker) Shutdown(ctx context.Context) error {
return b.shutdown(ctx, func() error {
ticker := time.NewTicker(250 * time.Millisecond)
defer ticker.Stop()
for atomic.LoadUint64(&b.messagesInFlight) != 0 || atomic.LoadUint64(&b.requestsInFlight) != 0 {
select {
case <-ctx.Done():
return ctx.Err()
case <-ticker.C:
}
}
return nil
})
}
// Publish forwards the message directly to the pub/sub system.
// If the message does not contain any partition key, the message will
// be processed by a random broker within a clique.
// All binary data of the passed message needs to be valid only during
// the method call.
func (b *Broker) Publish(ctx context.Context, msg Message) error {
if b.isShuttingDown() {
return ErrClosed
}
if err := msg.validate(); err != nil {
return err
}
return b.pubsub.send(ctx, msg.Stream, b.codec.EncodeMessage(msg))
}
// Request sends a request message and waits for its response.
// If the message has no partition key, the request will be processed
// by a random broker within a clique.
// All binary data of the passed message needs to be valid only during
// the method call.
func (b *Broker) Request(ctx context.Context, request Message) (Message, error) {
if b.isShuttingDown() {
return Message{}, ErrClosed
}
if err := request.validate(); err != nil {
return Message{}, err
}
reply := b.awaitReply(ctx, b.routing.local, b.reqTimeout, func(ctx context.Context, id uint64) error {
return b.pubsub.send(ctx, request.Stream, marshalMsg(msg{
id: id,
reply: []byte(nodeStream(b.clique, b.routing.local)),
stream: []byte(request.Stream),
pkey: request.PartitionKey,
data: request.Data,
}))
})
return Message{Data: reply.data}, reply.err
}
func (b *Broker) processMessage(ctx context.Context, stream string, data []byte) {
atomic.AddUint64(&b.messagesInFlight, 1)
defer atomic.AddUint64(&b.messagesInFlight, ^uint64(0))
decoded, err := b.codec.DecodeMessage(stream, data)
if err != nil {
b.onError(errorf("decode message: %v", err))
return
}
b.forwardMsg(ctx, msg{
stream: []byte(decoded.Stream),
pkey: decoded.PartitionKey,
data: decoded.Data,
})
}
func (b *Broker) processRequest(ctx context.Context, stream string, data []byte) {
atomic.AddUint64(&b.requestsInFlight, 1)
defer atomic.AddUint64(&b.requestsInFlight, ^uint64(0))
f, err := unmarshalFrame(data)
switch {
case err != nil:
b.onError(errorf("request subscription: %v", err))
return
case f.typ() != frameTypeMsg:
b.onError(errorf("unexpected request frame type: %s", f.typ()))
return
}
msg, err := unmarshalMsg(f)
if err != nil {
b.onError(errorf("unmarshal msg: %v", err))
return
}
b.forwardMsg(ctx, msg)
}
func (b *Broker) processCliqueProtocol(ctx context.Context, stream string, data []byte) {
f, err := unmarshalFrame(data)
if err != nil {
b.onError(errorf("clique subscription: %v", err))
return
}
switch f.typ() {
case frameTypeJoin:
b.handleJoin(ctx, f)
case frameTypeLeave:
b.handleLeave(f)
case frameTypeInfo:
b.handleInfo(f)
case frameTypePing:
b.handlePing(ctx, f)
case frameTypeFwd:
b.handleFwd(ctx, f)
case frameTypeAck:
b.handleAck(f)
default:
b.onError(errorf("unexpected clique frame type: %s", f.typ()))
}
}
func (b *Broker) handleJoin(ctx context.Context, f frame) {
join, err := unmarshalJoin(f)
switch {
case err != nil:
b.onError(errorf("unmarshal join: %v", err))
return
case join.sender == b.routing.local:
return
}
neighbors := b.routing.neighbors()
b.routing.registerKey(join.sender)
err = b.sendTo(ctx, join.sender, marshalInfo(info{neighbors: neighbors}))
if err != nil {
b.onError(errorf("send info: %v", err))
}
}
func (b *Broker) handleLeave(f frame) {
leave, err := unmarshalLeave(f)
if err != nil {
b.onError(errorf("unmarshal leave: %v", err))
return
}
b.routing.unregister(leave.node)
}
func (b *Broker) handleInfo(f frame) {
info, err := unmarshalInfo(f)
if err != nil {
b.onError(errorf("unmarshal info: %v", err))
return
}
b.routing.registerKeys(info.neighbors)
b.notifyReply(info.id, reply{})
}
func (b *Broker) handlePing(ctx context.Context, f frame) {
ping, err := unmarshalPing(f)
if err != nil {
b.onError(errorf("unmarshal ping: %v", err))
return
}
err = b.sendTo(ctx, ping.sender, marshalInfo(info{
id: ping.id,
neighbors: b.routing.neighbors(),
}))
if err != nil {
b.onError(errorf("send info: %v", err))
}
}
func (b *Broker) handleFwd(ctx context.Context, f frame) {
fwd, err := unmarshalFwd(f)
if err != nil {
b.onError(errorf("unmarshal fwd: %v", err))
return
}
err = b.sendTo(ctx, fwd.ack, marshalAck(ack{id: fwd.id}))
if err != nil {
b.onError(errorf("send ack: %v", err))
}
b.forwardMsg(ctx, fwd.msg)
}
func (b *Broker) handleAck(frame frame) {
ack, err := unmarshalAck(frame)
if err != nil {
b.onError(errorf("unmarshal ack: %v", err))
return
}
b.notifyReply(ack.id, reply{data: ack.data})
}
func (b *Broker) forwardMsg(ctx context.Context, msg msg) {
partition := KeyFromBytes(msg.pkey)
if len(msg.pkey) == 0 {
b.dispatchMsg(ctx, msg, partition)
return
}
for {
succ := b.routing.successor(partition)
if succ == b.routing.local {
b.dispatchMsg(ctx, msg, partition)
return
}
reply := b.awaitReply(ctx, succ, b.ackTimeout, func(ctx context.Context, id uint64) error {
return b.sendTo(ctx, succ, marshalFwd(fwd{
id: id,
ack: b.routing.local,
msg: msg,
}))
})
if reply.err == nil {
return
} else if reply.err != ErrTimeout {
b.onError(reply.err)
return
}
// The node was suspected and removed from the
// valid keys. We look | {
return b.shutdown(context.Background(), func() error { return nil })
} | identifier_body |
broker.go | ackTimeout: opts.ackTimeout,
reqTimeout: opts.reqTimeout,
codec: opts.codec,
onError: opts.errorHandler,
routing: newRoutingTable(opts),
pubsub: newPubSub(pubsub, opts),
leaving: make(chan struct{}),
pendingReplies: make(map[uint64]pendingReply),
messageHandlers: opts.messageHandlers,
requestHandlers: opts.requestHandlers,
}
if err := b.pubsub.subscribe(ctx, nodeStream(b.clique, b.routing.local), "", b.processCliqueProtocol); err != nil {
b.pubsub.shutdown(ctx)
return nil, err
}
if err := b.pubsub.subscribe(ctx, b.clique, "", b.processCliqueProtocol); err != nil {
b.pubsub.shutdown(ctx)
return nil, err
}
for stream := range b.messageHandlers {
if err := b.pubsub.subscribe(ctx, stream, b.clique, b.processMessage); err != nil {
b.pubsub.shutdown(ctx)
return nil, err
}
}
for stream := range b.requestHandlers {
if err := b.pubsub.subscribe(ctx, stream, b.clique, b.processRequest); err != nil {
b.pubsub.shutdown(ctx)
return nil, err
}
}
join := marshalJoin(join{sender: b.routing.local})
if err := b.broadcast(ctx, join); err != nil {
b.pubsub.shutdown(ctx)
return nil, err
}
b.wg.Add(1)
go b.stabilize(opts.stabilization.Interval)
return b, nil
}
// Close notifies all clique members about a leaving broker and
// disconnects from the pub/sub system.
func (b *Broker) Close() error {
return b.shutdown(context.Background(), func() error { return nil })
}
// Shutdown gracefully shuts down the broker. It notifies all clique
// members about a leaving broker and waits until all messages and
// requests are processed. If the given context expires before, the
// context's error will be returned.
func (b *Broker) Shutdown(ctx context.Context) error {
return b.shutdown(ctx, func() error {
ticker := time.NewTicker(250 * time.Millisecond)
defer ticker.Stop()
for atomic.LoadUint64(&b.messagesInFlight) != 0 || atomic.LoadUint64(&b.requestsInFlight) != 0 {
select {
case <-ctx.Done():
return ctx.Err()
case <-ticker.C:
}
}
return nil
})
}
// Publish forwards the message directly to the pub/sub system.
// If the message does not contain any partition key, the message will
// be processed by a random broker within a clique.
// All binary data of the passed message needs to be valid only during
// the method call.
func (b *Broker) Publish(ctx context.Context, msg Message) error {
if b.isShuttingDown() {
return ErrClosed
}
if err := msg.validate(); err != nil {
return err
}
return b.pubsub.send(ctx, msg.Stream, b.codec.EncodeMessage(msg))
}
// Request sends a request message and waits for its response.
// If the message has no partition key, the request will be processed
// by a random broker within a clique.
// All binary data of the passed message needs to be valid only during
// the method call.
func (b *Broker) Request(ctx context.Context, request Message) (Message, error) {
if b.isShuttingDown() {
return Message{}, ErrClosed
}
if err := request.validate(); err != nil {
return Message{}, err
}
reply := b.awaitReply(ctx, b.routing.local, b.reqTimeout, func(ctx context.Context, id uint64) error {
return b.pubsub.send(ctx, request.Stream, marshalMsg(msg{
id: id,
reply: []byte(nodeStream(b.clique, b.routing.local)),
stream: []byte(request.Stream),
pkey: request.PartitionKey,
data: request.Data,
}))
})
return Message{Data: reply.data}, reply.err
}
func (b *Broker) processMessage(ctx context.Context, stream string, data []byte) {
atomic.AddUint64(&b.messagesInFlight, 1)
defer atomic.AddUint64(&b.messagesInFlight, ^uint64(0))
decoded, err := b.codec.DecodeMessage(stream, data)
if err != nil {
b.onError(errorf("decode message: %v", err))
return
}
b.forwardMsg(ctx, msg{
stream: []byte(decoded.Stream),
pkey: decoded.PartitionKey,
data: decoded.Data,
})
}
func (b *Broker) processRequest(ctx context.Context, stream string, data []byte) {
atomic.AddUint64(&b.requestsInFlight, 1)
defer atomic.AddUint64(&b.requestsInFlight, ^uint64(0))
f, err := unmarshalFrame(data)
switch {
case err != nil:
b.onError(errorf("request subscription: %v", err))
return
case f.typ() != frameTypeMsg:
b.onError(errorf("unexpected request frame type: %s", f.typ()))
return
}
msg, err := unmarshalMsg(f)
if err != nil {
b.onError(errorf("unmarshal msg: %v", err))
return
}
b.forwardMsg(ctx, msg)
}
func (b *Broker) processCliqueProtocol(ctx context.Context, stream string, data []byte) {
f, err := unmarshalFrame(data)
if err != nil {
b.onError(errorf("clique subscription: %v", err))
return
}
switch f.typ() {
case frameTypeJoin:
b.handleJoin(ctx, f)
case frameTypeLeave:
b.handleLeave(f)
case frameTypeInfo:
b.handleInfo(f)
case frameTypePing:
b.handlePing(ctx, f)
case frameTypeFwd:
b.handleFwd(ctx, f)
case frameTypeAck:
b.handleAck(f)
default:
b.onError(errorf("unexpected clique frame type: %s", f.typ()))
}
}
func (b *Broker) handleJoin(ctx context.Context, f frame) {
join, err := unmarshalJoin(f)
switch {
case err != nil:
b.onError(errorf("unmarshal join: %v", err))
return
case join.sender == b.routing.local:
return
}
neighbors := b.routing.neighbors()
b.routing.registerKey(join.sender)
err = b.sendTo(ctx, join.sender, marshalInfo(info{neighbors: neighbors}))
if err != nil {
b.onError(errorf("send info: %v", err))
}
}
func (b *Broker) handleLeave(f frame) {
leave, err := unmarshalLeave(f)
if err != nil {
b.onError(errorf("unmarshal leave: %v", err))
return
}
b.routing.unregister(leave.node)
}
func (b *Broker) handleInfo(f frame) {
info, err := unmarshalInfo(f)
if err != nil {
b.onError(errorf("unmarshal info: %v", err))
return
}
b.routing.registerKeys(info.neighbors)
b.notifyReply(info.id, reply{})
}
func (b *Broker) handlePing(ctx context.Context, f frame) {
ping, err := unmarshalPing(f)
if err != nil |
err = b.sendTo(ctx, ping.sender, marshalInfo(info{
id: ping.id,
neighbors: b.routing.neighbors(),
}))
if err != nil {
b.onError(errorf("send info: %v", err))
}
}
func (b *Broker) handleFwd(ctx context.Context, f frame) {
fwd, err := unmarshalFwd(f)
if err != nil {
b.onError(errorf("unmarshal fwd: %v", err))
return
}
err = b.sendTo(ctx, fwd.ack, marshalAck(ack{id: fwd.id}))
if err != nil {
b.onError(errorf("send ack: %v", err))
}
b.forwardMsg(ctx, fwd.msg)
}
func (b *Broker) handleAck(frame frame) {
ack, err := unmarshalAck(frame)
if err != nil {
b.onError(errorf("unmarshal ack: %v", err))
return
}
b.notifyReply(ack.id, reply{data: ack.data})
}
func (b *Broker) forwardMsg(ctx context.Context, msg msg) {
partition := KeyFromBytes(msg.pkey)
if len(msg.pkey) == 0 {
b.dispatchMsg(ctx, msg, partition)
return
}
for {
succ := b.routing.successor(partition)
if succ == b.routing.local {
b.dispatchMsg(ctx, msg, partition)
return
}
reply := b.awaitReply(ctx, succ, b.ackTimeout, func(ctx context.Context, id uint64) error {
return b.sendTo(ctx, succ, marshalFwd(fwd{
id: id,
ack: b.routing.local,
msg: msg,
}))
})
if reply.err == nil {
return
} else if reply.err != ErrTimeout {
b.onError(reply.err)
return
}
// The node was suspected and removed from the
// valid keys. We look | {
b.onError(errorf("unmarshal ping: %v", err))
return
} | conditional_block |
FilesManager.ts | // Already exists
}
}
private getAppSourceFile(classId: string) {
const fullPath =
classId === "index"
? path.resolve(APP_DIR, "index.ts")
: path.resolve(APP_DIR, classId, `index.ts`);
const sourceFile = this.project.getSourceFile(fullPath);
if (sourceFile) {
sourceFile.refreshFromFileSystemSync();
return sourceFile;
}
return this.project.addSourceFileAtPath(fullPath);
}
private async ensureConfigurationDir() {
const configDir = path.resolve(CONFIGURATION_DIR);
try {
await fs.promises.mkdir(configDir);
} catch {
// Already exists
}
try {
const metadata = await fs.promises.readFile(
path.resolve(configDir, "metadata.json")
);
this.metadata = JSON.parse(new TextDecoder("utf-8").decode(metadata));
} catch {
// No file, we will write it later
}
}
private async ensureContainerEntry() {
const entryFile = path.resolve(APP_DIR, "index.ts");
try {
await fs.promises.stat(entryFile);
} catch {
// We do not have the file, lets write it
await this.writePrettyFile(
entryFile,
`import { Container } from '${LIBRARY_IMPORT}'
export const container = new Container({}, { devtool: process.env.NODE_ENV === 'development' && !window.opener ? "localhost:5051" : undefined })
`
);
}
}
private extractClass(classId: string) {
const node = this.getAppSourceFile(classId);
const classNode = ast.getClassNode(node, classId);
const mixins = ast.getClassMixins(classNode);
const injectors = ast.getInjectors(classNode);
const properties = ast.getProperties(classNode);
const methods = ast.getMethods(classNode);
const observables = ast.getObservables(classNode);
properties.forEach((property) => {
if (observables.observable.includes(property.name)) {
property.type = "observable";
} else if (observables.computed.includes(property.name)) |
});
methods.forEach((property) => {
if (observables.action.includes(property.name)) {
property.type = "action";
}
});
return {
classId,
mixins,
injectors,
properties,
methods,
};
}
private async getClass(fileName: string): Promise<ExtractedClass> {
const classId = this.getClassIdFromFileName(fileName);
return this.extractClass(classId);
}
private getClassIdFromFileName(fileName: string) {
return path.dirname(fileName).split(path.sep).pop()!;
}
private async getClasses() {
const appDir = path.resolve(APP_DIR)!;
try {
const directories = (await fs.promises.readdir(appDir)).filter(
(file) => file !== "index.ts" && !file.endsWith(".ts")
);
return directories.reduce<{
[key: string]: ExtractedClass;
}>((aggr, directory) => {
const classId = directory;
aggr[classId] = this.extractClass(classId);
return aggr;
}, {});
} catch {
return {};
}
}
/*
This is where we map files to nodes and their metadata. Things
like position and the ID of the node.
*/
async addMetadata({
classId,
x,
y,
}: {
classId: string;
x: number;
y: number;
}) {
this.metadata[classId] = {
x,
y,
};
this.writeMetadata();
}
async writeMetadata() {
const file = path.resolve(CONFIGURATION_DIR, "metadata.json")!;
await this.writePrettyFile(file, JSON.stringify(this.metadata, null, 2));
}
/*
This method writes the initial file content
*/
async writeClass(classId: string) {
const file = path.resolve(APP_DIR, classId, "index.ts")!;
await this.writeClassToEntryFile(classId);
await this.writePrettyFile(
file,
`import { Feature } from 'reactive-app'
export interface ${classId} extends Feature {}
export class ${classId} {
static mixins = ["Feature"];
}`
);
}
private async writeClassToEntryFile(classId: string) {
const sourceFile = this.getAppSourceFile("index");
sourceFile.addImportDeclaration({
moduleSpecifier: `./${classId}`,
namedImports: [classId],
});
sourceFile
.getVariableDeclaration("container")
?.getInitializer()
?.transform((traversal) => {
const node = traversal.visitChildren();
if (
ts.isObjectLiteralExpression(node) &&
ts.isNewExpression(node.parent) &&
node.parent.arguments![0] === node
) {
return ts.factory.createObjectLiteralExpression(
[
...node.properties,
ts.factory.createShorthandPropertyAssignment(classId, undefined),
],
undefined
);
}
return node;
});
sourceFile.saveSync();
}
/*
This method adds injections. The type of injection will be part of
the payload, either "singleton" or "factory"
*/
async inject({
fromClassId,
toClassId,
asFactory,
}: {
fromClassId: string;
toClassId: string;
asFactory: boolean;
}) {
const sourceFile = this.getAppSourceFile(toClassId);
ast.addImportDeclaration(sourceFile, LIBRARY_IMPORT, "TFeature");
ast.addImportDeclaration(
sourceFile,
`../${fromClassId}`,
fromClassId,
true
);
const classNode = sourceFile.getClass(toClassId);
if (!classNode) {
throw new Error("Can not find class node");
}
const name = asFactory
? `create${fromClassId}`
: fromClassId[0].toLocaleLowerCase() + fromClassId.substr(1);
classNode.insertProperty(1, {
name,
hasExclamationToken: true,
isReadonly: true,
type: `TFeature<typeof ${fromClassId}>`,
trailingTrivia: writeLineBreak,
});
ast.updateInjectFeatures(classNode, (config) => {
config.addProperty({
name,
kind: StructureKind.PropertyAssignment,
initializer: `"${fromClassId}"`,
});
});
sourceFile.saveSync();
}
async removeInjection(fromClassId: string, toClassId: string) {
const sourceFile = this.getAppSourceFile(toClassId);
ast.removeImportDeclaration(sourceFile, `../${fromClassId}`);
const classNode = sourceFile.getClass(toClassId);
if (!classNode) {
throw new Error("Can not find class node");
}
classNode
.getProperty((property) => {
const name = property.getName();
return (
name === this.getInjectName(fromClassId) ||
name === this.getInjectFactoryName(fromClassId)
);
})
?.remove();
ast.updateInjectFeatures(classNode, (config) => {
const property = config.getProperty((property) => {
if (!Node.isPropertyAssignment(property)) {
return false;
}
const initializer = property.getInitializer();
if (!Node.isStringLiteral(initializer)) {
return false;
}
return JSON.parse(initializer.getText()) === fromClassId;
});
property?.remove();
});
sourceFile.saveSync();
}
async toggleMakeObservableProperty(
classId: string,
name: string,
value?: "observable" | "computed" | "action"
) {
const sourceFile = this.getAppSourceFile(classId);
const classNode = sourceFile.getClass(classId)!;
ast.updateMakeObservable(classNode, (config) => {
if (value) {
config.addProperty({
name,
kind: StructureKind.PropertyAssignment,
initializer: `"${value}"`,
});
} else {
const property = config.getProperty(name);
property?.remove();
}
});
sourceFile.saveSync();
}
async toggleMixin(classId: string, mixin: Mixin) {
const sourceFile = this.getAppSourceFile(classId);
switch (mixin) {
case "View":
case "Factory":
ast.toggleImportDeclaration(sourceFile, LIBRARY_IMPORT, mixin);
ast.toggleMixinInterface(sourceFile, classId, mixin);
ast.toggleMixin(sourceFile, classId, mixin);
break;
case "StateMachine":
ast.toggleImportDeclaration(sourceFile, LIBRARY_IMPORT, mixin);
ast.toggleMixinInterface(
sourceFile,
classId,
"StateMachine<TContext, TEvent>"
);
const contextType = sourceFile.getTypeAlias("TContext");
const messageType = sourceFile.getTypeAlias("TEvent");
const classInterface = sourceFile.getInterface(classId);
const clas = sourceFile.getClass(classId)!;
const state = clas.getProperty("state");
const onMessage = clas.getMethod("onMessage");
if (state && onMessage && contextType && messageType) {
state.remove();
contextType.remove();
messageType.remove();
onMessage.remove();
} else {
const interfaceNodeIndex = classInterface!.getChildIndex();
sourceFile.insertTypeAlias(interfaceNodeIndex, {
name: "TEvent",
isExported: true,
type: '{ type: "SOMETHING_HAPPEN | {
property.type = "computed";
} | conditional_block |
FilesManager.ts | // Already exists
}
}
private getAppSourceFile(classId: string) {
const fullPath =
classId === "index"
? path.resolve(APP_DIR, "index.ts")
: path.resolve(APP_DIR, classId, `index.ts`);
const sourceFile = this.project.getSourceFile(fullPath);
if (sourceFile) {
sourceFile.refreshFromFileSystemSync();
return sourceFile;
}
return this.project.addSourceFileAtPath(fullPath);
}
private async ensureConfigurationDir() {
const configDir = path.resolve(CONFIGURATION_DIR);
try {
await fs.promises.mkdir(configDir);
} catch {
// Already exists
}
try {
const metadata = await fs.promises.readFile(
path.resolve(configDir, "metadata.json")
);
this.metadata = JSON.parse(new TextDecoder("utf-8").decode(metadata));
} catch {
// No file, we will write it later
}
}
private async ensureContainerEntry() {
const entryFile = path.resolve(APP_DIR, "index.ts");
try {
await fs.promises.stat(entryFile);
} catch {
// We do not have the file, lets write it
await this.writePrettyFile(
entryFile,
`import { Container } from '${LIBRARY_IMPORT}'
export const container = new Container({}, { devtool: process.env.NODE_ENV === 'development' && !window.opener ? "localhost:5051" : undefined })
`
);
}
}
private extractClass(classId: string) {
const node = this.getAppSourceFile(classId);
const classNode = ast.getClassNode(node, classId);
const mixins = ast.getClassMixins(classNode);
const injectors = ast.getInjectors(classNode);
const properties = ast.getProperties(classNode);
const methods = ast.getMethods(classNode);
const observables = ast.getObservables(classNode);
properties.forEach((property) => {
if (observables.observable.includes(property.name)) {
property.type = "observable";
} else if (observables.computed.includes(property.name)) {
property.type = "computed";
}
});
methods.forEach((property) => {
if (observables.action.includes(property.name)) {
property.type = "action";
}
});
return {
classId,
mixins,
injectors,
properties,
methods,
};
}
private async getClass(fileName: string): Promise<ExtractedClass> {
const classId = this.getClassIdFromFileName(fileName);
return this.extractClass(classId);
}
private getClassIdFromFileName(fileName: string) {
return path.dirname(fileName).split(path.sep).pop()!;
}
private async getClasses() {
const appDir = path.resolve(APP_DIR)!;
try {
const directories = (await fs.promises.readdir(appDir)).filter(
(file) => file !== "index.ts" && !file.endsWith(".ts")
);
return directories.reduce<{
[key: string]: ExtractedClass;
}>((aggr, directory) => {
const classId = directory;
aggr[classId] = this.extractClass(classId);
return aggr;
}, {});
} catch {
return {};
}
}
/*
This is where we map files to nodes and their metadata. Things
like position and the ID of the node.
*/
async addMetadata({
classId,
x,
y,
}: {
classId: string;
x: number;
y: number;
}) {
this.metadata[classId] = {
x,
y,
};
this.writeMetadata();
}
async writeMetadata() {
const file = path.resolve(CONFIGURATION_DIR, "metadata.json")!;
await this.writePrettyFile(file, JSON.stringify(this.metadata, null, 2));
}
/*
This method writes the initial file content
*/
async writeClass(classId: string) {
const file = path.resolve(APP_DIR, classId, "index.ts")!;
await this.writeClassToEntryFile(classId);
await this.writePrettyFile(
file,
`import { Feature } from 'reactive-app'
export interface ${classId} extends Feature {}
export class ${classId} {
static mixins = ["Feature"];
}`
);
}
private async writeClassToEntryFile(classId: string) {
const sourceFile = this.getAppSourceFile("index");
sourceFile.addImportDeclaration({
moduleSpecifier: `./${classId}`,
namedImports: [classId],
});
sourceFile
.getVariableDeclaration("container")
?.getInitializer()
?.transform((traversal) => {
const node = traversal.visitChildren();
if (
ts.isObjectLiteralExpression(node) &&
ts.isNewExpression(node.parent) &&
node.parent.arguments![0] === node
) {
return ts.factory.createObjectLiteralExpression(
[
...node.properties,
ts.factory.createShorthandPropertyAssignment(classId, undefined),
],
undefined
);
}
return node;
});
sourceFile.saveSync();
}
/*
This method adds injections. The type of injection will be part of
the payload, either "singleton" or "factory"
*/
async inject({
fromClassId,
toClassId,
asFactory,
}: {
fromClassId: string;
toClassId: string;
asFactory: boolean;
}) |
classNode.insertProperty(1, {
name,
hasExclamationToken: true,
isReadonly: true,
type: `TFeature<typeof ${fromClassId}>`,
trailingTrivia: writeLineBreak,
});
ast.updateInjectFeatures(classNode, (config) => {
config.addProperty({
name,
kind: StructureKind.PropertyAssignment,
initializer: `"${fromClassId}"`,
});
});
sourceFile.saveSync();
}
async removeInjection(fromClassId: string, toClassId: string) {
const sourceFile = this.getAppSourceFile(toClassId);
ast.removeImportDeclaration(sourceFile, `../${fromClassId}`);
const classNode = sourceFile.getClass(toClassId);
if (!classNode) {
throw new Error("Can not find class node");
}
classNode
.getProperty((property) => {
const name = property.getName();
return (
name === this.getInjectName(fromClassId) ||
name === this.getInjectFactoryName(fromClassId)
);
})
?.remove();
ast.updateInjectFeatures(classNode, (config) => {
const property = config.getProperty((property) => {
if (!Node.isPropertyAssignment(property)) {
return false;
}
const initializer = property.getInitializer();
if (!Node.isStringLiteral(initializer)) {
return false;
}
return JSON.parse(initializer.getText()) === fromClassId;
});
property?.remove();
});
sourceFile.saveSync();
}
async toggleMakeObservableProperty(
classId: string,
name: string,
value?: "observable" | "computed" | "action"
) {
const sourceFile = this.getAppSourceFile(classId);
const classNode = sourceFile.getClass(classId)!;
ast.updateMakeObservable(classNode, (config) => {
if (value) {
config.addProperty({
name,
kind: StructureKind.PropertyAssignment,
initializer: `"${value}"`,
});
} else {
const property = config.getProperty(name);
property?.remove();
}
});
sourceFile.saveSync();
}
async toggleMixin(classId: string, mixin: Mixin) {
const sourceFile = this.getAppSourceFile(classId);
switch (mixin) {
case "View":
case "Factory":
ast.toggleImportDeclaration(sourceFile, LIBRARY_IMPORT, mixin);
ast.toggleMixinInterface(sourceFile, classId, mixin);
ast.toggleMixin(sourceFile, classId, mixin);
break;
case "StateMachine":
ast.toggleImportDeclaration(sourceFile, LIBRARY_IMPORT, mixin);
ast.toggleMixinInterface(
sourceFile,
classId,
"StateMachine<TContext, TEvent>"
);
const contextType = sourceFile.getTypeAlias("TContext");
const messageType = sourceFile.getTypeAlias("TEvent");
const classInterface = sourceFile.getInterface(classId);
const clas = sourceFile.getClass(classId)!;
const state = clas.getProperty("state");
const onMessage = clas.getMethod("onMessage");
if (state && onMessage && contextType && messageType) {
state.remove();
contextType.remove();
messageType.remove();
onMessage.remove();
} else {
const interfaceNodeIndex = classInterface!.getChildIndex();
sourceFile.insertTypeAlias(interfaceNodeIndex, {
name: "TEvent",
isExported: true,
type: '{ type: "SOMETHING_HAPPEN | {
const sourceFile = this.getAppSourceFile(toClassId);
ast.addImportDeclaration(sourceFile, LIBRARY_IMPORT, "TFeature");
ast.addImportDeclaration(
sourceFile,
`../${fromClassId}`,
fromClassId,
true
);
const classNode = sourceFile.getClass(toClassId);
if (!classNode) {
throw new Error("Can not find class node");
}
const name = asFactory
? `create${fromClassId}`
: fromClassId[0].toLocaleLowerCase() + fromClassId.substr(1); | identifier_body |
FilesManager.ts | {
const classId = this.getClassIdFromFileName(fileName);
return this.extractClass(classId);
}
private getClassIdFromFileName(fileName: string) {
return path.dirname(fileName).split(path.sep).pop()!;
}
private async getClasses() {
const appDir = path.resolve(APP_DIR)!;
try {
const directories = (await fs.promises.readdir(appDir)).filter(
(file) => file !== "index.ts" && !file.endsWith(".ts")
);
return directories.reduce<{
[key: string]: ExtractedClass;
}>((aggr, directory) => {
const classId = directory;
aggr[classId] = this.extractClass(classId);
return aggr;
}, {});
} catch {
return {};
}
}
/*
This is where we map files to nodes and their metadata. Things
like position and the ID of the node.
*/
async addMetadata({
classId,
x,
y,
}: {
classId: string;
x: number;
y: number;
}) {
this.metadata[classId] = {
x,
y,
};
this.writeMetadata();
}
async writeMetadata() {
const file = path.resolve(CONFIGURATION_DIR, "metadata.json")!;
await this.writePrettyFile(file, JSON.stringify(this.metadata, null, 2));
}
/*
This method writes the initial file content
*/
async writeClass(classId: string) {
const file = path.resolve(APP_DIR, classId, "index.ts")!;
await this.writeClassToEntryFile(classId);
await this.writePrettyFile(
file,
`import { Feature } from 'reactive-app'
export interface ${classId} extends Feature {}
export class ${classId} {
static mixins = ["Feature"];
}`
);
}
private async writeClassToEntryFile(classId: string) {
const sourceFile = this.getAppSourceFile("index");
sourceFile.addImportDeclaration({
moduleSpecifier: `./${classId}`,
namedImports: [classId],
});
sourceFile
.getVariableDeclaration("container")
?.getInitializer()
?.transform((traversal) => {
const node = traversal.visitChildren();
if (
ts.isObjectLiteralExpression(node) &&
ts.isNewExpression(node.parent) &&
node.parent.arguments![0] === node
) {
return ts.factory.createObjectLiteralExpression(
[
...node.properties,
ts.factory.createShorthandPropertyAssignment(classId, undefined),
],
undefined
);
}
return node;
});
sourceFile.saveSync();
}
/*
This method adds injections. The type of injection will be part of
the payload, either "singleton" or "factory"
*/
async inject({
fromClassId,
toClassId,
asFactory,
}: {
fromClassId: string;
toClassId: string;
asFactory: boolean;
}) {
const sourceFile = this.getAppSourceFile(toClassId);
ast.addImportDeclaration(sourceFile, LIBRARY_IMPORT, "TFeature");
ast.addImportDeclaration(
sourceFile,
`../${fromClassId}`,
fromClassId,
true
);
const classNode = sourceFile.getClass(toClassId);
if (!classNode) {
throw new Error("Can not find class node");
}
const name = asFactory
? `create${fromClassId}`
: fromClassId[0].toLocaleLowerCase() + fromClassId.substr(1);
classNode.insertProperty(1, {
name,
hasExclamationToken: true,
isReadonly: true,
type: `TFeature<typeof ${fromClassId}>`,
trailingTrivia: writeLineBreak,
});
ast.updateInjectFeatures(classNode, (config) => {
config.addProperty({
name,
kind: StructureKind.PropertyAssignment,
initializer: `"${fromClassId}"`,
});
});
sourceFile.saveSync();
}
async removeInjection(fromClassId: string, toClassId: string) {
const sourceFile = this.getAppSourceFile(toClassId);
ast.removeImportDeclaration(sourceFile, `../${fromClassId}`);
const classNode = sourceFile.getClass(toClassId);
if (!classNode) {
throw new Error("Can not find class node");
}
classNode
.getProperty((property) => {
const name = property.getName();
return (
name === this.getInjectName(fromClassId) ||
name === this.getInjectFactoryName(fromClassId)
);
})
?.remove();
ast.updateInjectFeatures(classNode, (config) => {
const property = config.getProperty((property) => {
if (!Node.isPropertyAssignment(property)) {
return false;
}
const initializer = property.getInitializer();
if (!Node.isStringLiteral(initializer)) {
return false;
}
return JSON.parse(initializer.getText()) === fromClassId;
});
property?.remove();
});
sourceFile.saveSync();
}
async toggleMakeObservableProperty(
classId: string,
name: string,
value?: "observable" | "computed" | "action"
) {
const sourceFile = this.getAppSourceFile(classId);
const classNode = sourceFile.getClass(classId)!;
ast.updateMakeObservable(classNode, (config) => {
if (value) {
config.addProperty({
name,
kind: StructureKind.PropertyAssignment,
initializer: `"${value}"`,
});
} else {
const property = config.getProperty(name);
property?.remove();
}
});
sourceFile.saveSync();
}
async toggleMixin(classId: string, mixin: Mixin) {
const sourceFile = this.getAppSourceFile(classId);
switch (mixin) {
case "View":
case "Factory":
ast.toggleImportDeclaration(sourceFile, LIBRARY_IMPORT, mixin);
ast.toggleMixinInterface(sourceFile, classId, mixin);
ast.toggleMixin(sourceFile, classId, mixin);
break;
case "StateMachine":
ast.toggleImportDeclaration(sourceFile, LIBRARY_IMPORT, mixin);
ast.toggleMixinInterface(
sourceFile,
classId,
"StateMachine<TContext, TEvent>"
);
const contextType = sourceFile.getTypeAlias("TContext");
const messageType = sourceFile.getTypeAlias("TEvent");
const classInterface = sourceFile.getInterface(classId);
const clas = sourceFile.getClass(classId)!;
const state = clas.getProperty("state");
const onMessage = clas.getMethod("onMessage");
if (state && onMessage && contextType && messageType) {
state.remove();
contextType.remove();
messageType.remove();
onMessage.remove();
} else {
const interfaceNodeIndex = classInterface!.getChildIndex();
sourceFile.insertTypeAlias(interfaceNodeIndex, {
name: "TEvent",
isExported: true,
type: '{ type: "SOMETHING_HAPPENED" }',
trailingTrivia: writeLineBreak,
});
sourceFile.insertTypeAlias(interfaceNodeIndex, {
name: "TContext",
isExported: true,
type: '{ state: "FOO" } | { state: "BAR" }',
});
clas.addProperty({
name: "context",
type: "TContext",
initializer: `{ state: "FOO" }`,
});
const onEvent = clas.addMethod({
name: "onEvent",
parameters: [
{
name: "event",
type: "TEvent",
},
],
statements: `
return this.transition(this.context, event, {
FOO: {
SOMETHING_HAPPENED: () => ({ state: "BAR" })
},
BAR: {
SOMETHING_HAPPENED: () => ({ state: "FOO" })
}
})
`,
trailingTrivia: writeLineBreak,
});
onEvent.toggleModifier("protected", true);
}
ast.toggleMixin(sourceFile, classId, mixin);
ast.updateMakeObservable(clas, (config) => {
config.addProperty({
name: "context",
initializer: '"observable"',
kind: StructureKind.PropertyAssignment,
});
});
break;
}
sourceFile.saveSync();
}
async deleteClass(classId: string) {
const directory = path.resolve(APP_DIR, classId);
await fs.promises.rmdir(directory, { recursive: true });
}
async renameClass(fromClassId: string, toClassId: string) {
await this.addMetadata({
classId: toClassId,
...this.metadata[fromClassId],
});
const fromClassPath = path.resolve(APP_DIR, fromClassId, "index.ts");
const toClassPath = path.resolve(APP_DIR, toClassId, "index.ts");
const fs = this.project.getFileSystem();
const contents = fs.readFileSync(fromClassPath);
const sourceFile = this.project.createSourceFile(toClassPath, contents);
const classDefinition = sourceFile.getClass(fromClassId)!;
const classInterface = sourceFile.getInterface(fromClassId)!;
classDefinition.rename(toClassId);
classInterface.rename(toClassId);
fs.mkdirSync(path.resolve(APP_DIR, toClassId));
fs.writeFileSync(toClassPath, sourceFile.print());
await this.writeClassToEntryFile(toClassId);
await this.deleteClass(fromClassId);
}
async | initialize | identifier_name | |
FilesManager.ts | await fs.promises.mkdir(path.dirname(fileName));
} catch {
// Already exists
}
return fs.promises.writeFile(
fileName,
new TextEncoder().encode(
prettier.format(content, {
...prettierConfig,
parser: path.extname(fileName) === ".json" ? "json" : "typescript",
})
)
);
}
private async ensureAppDir() {
try {
await fs.promises.mkdir(path.resolve(APP_DIR));
} catch {
// Already exists
}
}
private getAppSourceFile(classId: string) {
const fullPath =
classId === "index"
? path.resolve(APP_DIR, "index.ts")
: path.resolve(APP_DIR, classId, `index.ts`);
const sourceFile = this.project.getSourceFile(fullPath);
if (sourceFile) {
sourceFile.refreshFromFileSystemSync();
return sourceFile;
}
return this.project.addSourceFileAtPath(fullPath);
}
private async ensureConfigurationDir() {
const configDir = path.resolve(CONFIGURATION_DIR);
try {
await fs.promises.mkdir(configDir);
} catch {
// Already exists
}
try {
const metadata = await fs.promises.readFile(
path.resolve(configDir, "metadata.json")
);
this.metadata = JSON.parse(new TextDecoder("utf-8").decode(metadata));
} catch {
// No file, we will write it later
}
}
private async ensureContainerEntry() {
const entryFile = path.resolve(APP_DIR, "index.ts");
try {
await fs.promises.stat(entryFile);
} catch {
// We do not have the file, lets write it
await this.writePrettyFile(
entryFile,
`import { Container } from '${LIBRARY_IMPORT}'
export const container = new Container({}, { devtool: process.env.NODE_ENV === 'development' && !window.opener ? "localhost:5051" : undefined })
`
);
}
}
private extractClass(classId: string) {
const node = this.getAppSourceFile(classId);
const classNode = ast.getClassNode(node, classId);
const mixins = ast.getClassMixins(classNode);
const injectors = ast.getInjectors(classNode);
const properties = ast.getProperties(classNode);
const methods = ast.getMethods(classNode);
const observables = ast.getObservables(classNode);
properties.forEach((property) => {
if (observables.observable.includes(property.name)) {
property.type = "observable";
} else if (observables.computed.includes(property.name)) {
property.type = "computed";
}
});
methods.forEach((property) => {
if (observables.action.includes(property.name)) {
property.type = "action";
}
});
return {
classId,
mixins,
injectors,
properties,
methods,
};
}
private async getClass(fileName: string): Promise<ExtractedClass> {
const classId = this.getClassIdFromFileName(fileName);
return this.extractClass(classId);
}
private getClassIdFromFileName(fileName: string) {
return path.dirname(fileName).split(path.sep).pop()!;
}
private async getClasses() {
const appDir = path.resolve(APP_DIR)!;
try {
const directories = (await fs.promises.readdir(appDir)).filter(
(file) => file !== "index.ts" && !file.endsWith(".ts")
);
return directories.reduce<{
[key: string]: ExtractedClass;
}>((aggr, directory) => {
const classId = directory;
aggr[classId] = this.extractClass(classId);
return aggr;
}, {});
} catch {
return {};
}
}
/*
This is where we map files to nodes and their metadata. Things
like position and the ID of the node.
*/
async addMetadata({
classId,
x,
y,
}: {
classId: string;
x: number;
y: number;
}) {
this.metadata[classId] = {
x,
y,
};
this.writeMetadata();
}
async writeMetadata() {
const file = path.resolve(CONFIGURATION_DIR, "metadata.json")!;
await this.writePrettyFile(file, JSON.stringify(this.metadata, null, 2));
}
/*
This method writes the initial file content
*/
async writeClass(classId: string) {
const file = path.resolve(APP_DIR, classId, "index.ts")!;
await this.writeClassToEntryFile(classId);
await this.writePrettyFile(
file,
`import { Feature } from 'reactive-app'
export interface ${classId} extends Feature {}
export class ${classId} {
static mixins = ["Feature"];
}`
);
}
private async writeClassToEntryFile(classId: string) {
const sourceFile = this.getAppSourceFile("index");
sourceFile.addImportDeclaration({
moduleSpecifier: `./${classId}`,
namedImports: [classId],
});
sourceFile
.getVariableDeclaration("container")
?.getInitializer()
?.transform((traversal) => {
const node = traversal.visitChildren();
if (
ts.isObjectLiteralExpression(node) &&
ts.isNewExpression(node.parent) &&
node.parent.arguments![0] === node
) {
return ts.factory.createObjectLiteralExpression(
[
...node.properties,
ts.factory.createShorthandPropertyAssignment(classId, undefined),
],
undefined
);
}
return node;
});
sourceFile.saveSync();
}
/*
This method adds injections. The type of injection will be part of
the payload, either "singleton" or "factory"
*/
async inject({
fromClassId,
toClassId,
asFactory,
}: {
fromClassId: string;
toClassId: string;
asFactory: boolean;
}) {
const sourceFile = this.getAppSourceFile(toClassId);
ast.addImportDeclaration(sourceFile, LIBRARY_IMPORT, "TFeature");
ast.addImportDeclaration(
sourceFile,
`../${fromClassId}`,
fromClassId,
true
);
const classNode = sourceFile.getClass(toClassId);
if (!classNode) {
throw new Error("Can not find class node");
}
const name = asFactory
? `create${fromClassId}`
: fromClassId[0].toLocaleLowerCase() + fromClassId.substr(1);
classNode.insertProperty(1, {
name,
hasExclamationToken: true,
isReadonly: true,
type: `TFeature<typeof ${fromClassId}>`,
trailingTrivia: writeLineBreak,
});
ast.updateInjectFeatures(classNode, (config) => {
config.addProperty({
name,
kind: StructureKind.PropertyAssignment,
initializer: `"${fromClassId}"`,
});
});
sourceFile.saveSync();
}
async removeInjection(fromClassId: string, toClassId: string) {
const sourceFile = this.getAppSourceFile(toClassId);
ast.removeImportDeclaration(sourceFile, `../${fromClassId}`);
const classNode = sourceFile.getClass(toClassId);
if (!classNode) {
throw new Error("Can not find class node");
}
classNode
.getProperty((property) => {
const name = property.getName();
return (
name === this.getInjectName(fromClassId) ||
name === this.getInjectFactoryName(fromClassId)
);
})
?.remove();
ast.updateInjectFeatures(classNode, (config) => {
const property = config.getProperty((property) => {
if (!Node.isPropertyAssignment(property)) {
return false;
}
const initializer = property.getInitializer();
if (!Node.isStringLiteral(initializer)) {
return false;
}
return JSON.parse(initializer.getText()) === fromClassId;
});
property?.remove();
});
sourceFile.saveSync();
}
async toggleMakeObservableProperty(
classId: string,
name: string,
value?: "observable" | "computed" | "action"
) {
const sourceFile = this.getAppSourceFile(classId);
const classNode = sourceFile.getClass(classId)!;
ast.updateMakeObservable(classNode, (config) => {
if (value) {
config.addProperty({
name,
kind: StructureKind.PropertyAssignment,
initializer: `"${value}"`,
});
} else {
const property = config.getProperty(name);
property?.remove();
}
});
sourceFile.saveSync();
}
async toggleMixin(classId: string, mixin: Mixin) {
const sourceFile = this.getAppSourceFile(classId);
switch (mixin) {
case "View":
case "Factory":
ast.toggleImportDeclaration(sourceFile, LIBRARY_IMPORT, mixin);
ast.toggleMixinInterface(sourceFile, classId, mixin);
ast.toggleMixin(sourceFile, classId, mixin);
break;
case "StateMachine":
ast.toggleImportDeclaration(sourceFile, LIBRARY_IMPORT, mixin);
ast.toggleMixinInterface(
sourceFile,
classId,
"StateMachine<TContext, TEvent>"
);
const contextType = sourceFile.getTypeAlias("TContext");
const messageType = sourceFile.getTypeAlias("TEvent");
const classInterface = sourceFile | private async writePrettyFile(fileName: string, content: string) {
try { | random_line_split | |
specs.py |
event = (
("evid", 1, "i4", "i8", 1, 8),
("evname", 2, "c15", "a15", 10, 24),
("prefor", 3, "i4", "i8", 26, 33),
("auth", 4, "c15", "al5", 35, 49),
("commid", 5, "i4", "i8", 51, 58),
("lddate", 6, "date", "al7", 60, 76),
)
gregion = (
("gm", 1, "i4", "i8", 1, 8),
("gmame", 2, "c40", "a40", 10, 49),
("lddate", 3, "date", "a17", 51, 67),
)
instrument = (
("inid", 1, "i4", "i8", 1, 8),
("insname", 2, "c50", "a50", 10, 59),
("instype", 3, "c6", "a6", 61, 66),
("band", 4, "c1", "a1", 68, 68),
("digital", 5, "c1", "a1", 70, 70),
("sarnprate", 6, "f4", "fll.7", 72, 82),
("ncalib", 7, "f4", "fl6.6", 84, 99),
("ncalper", 8, "f4", "fl6.6", 101, 116),
("dir", 9, "c64", "a64", 118, 181),
("dfile", 10, "c32", "a32", 183, 214),
("rsptype", 11, "c6", "a6", 216, 221),
("lddate", 12, "date", "a17", 223, 239),
)
lastid = (
("keyname", 1, "c15", "a15", 1, 15),
("keyvalue", 2, "i4", "i8", 17, 24),
("lddate", 3, "date", "a17", 26, 42),
)
netmag = (
("magid", 1, "i4", "i8", 1, 8),
("net", 2, "c8", "a8", 10, 17),
("orid", 3, "i4", "i8", 19, 26),
("evid", 4, "i4", "i8", 28, 35),
("magtype", 5, "c6", "a6", 37, 42),
("nsta", 6, "i4", "i8", 44, 51),
("magnitude", 7, "f4", "f7.2", 53, 59),
("uncertainty", 8, "f4", "f7.2", 61, 67),
("auth", 9, "c15", "a15", 69, 83),
("commid", 10, "i4", "i8", 85, 92),
("lddate", 11, "date", "al7", 94, 110),
)
network = (
("net", 1, "c8", "a8", 1, 8),
("netname", 2, "c80", "a80", 10, 89),
("nettype", 3, "c4", "a4", 91, 94),
("auth", 4, "ciS", "al5", 96, 110),
("corrunid", 5, "i4", "i8", 112, 119),
("lddate", 6, "date", "a17", 121, 137),
)
origerr = (
("orid", 1, "i4", "i8", 1, 8),
("sxx", 2, "f4", "fl5.4", 10, 24),
("syy", 3, "f4", "f15.4", 26, 40),
("szz", 4, "f4", "f15.4", 42, 56),
("stt", 5, "f4", "fl5.4", 58, 72),
("sxy", 6, "f4", "f15.4", 74, 88),
("sxz", 7, "f4", "f15.4", 90, 104),
("syz", 8, "f4", "f15.4", 106, 120),
("stx", 9, "f4", "f15.4", 122, 136),
("sty", 10, "f4", "f15.4", 138, 152),
("stz", 11, "f4", "fl5.4", 154, 168),
("sdobs", 12, "f4", "f9.4", 170, 178),
("smajax", 13, "f4", "!9.4", 180, 188),
("sminax", 14, "f4", "f9.4", 190, 198),
("strike", 15, "f4", "f6.2", 200, 205),
("sdepth", 16, "f4", "f9.4", 207, 215),
("stime", 17, "f4", "f8.2", 217, 224),
("eonf", 18, "f4", "f5.3", 226, 230),
("commid", 19, "i4", "i8", 232, 239),
("lddate", 20, "date", "a17", 241, 257),
)
origin = (
("lat", 1, "f4", "f9.4", 1, 9),
("lon", 2, "f4", "f9.4", 11, 19),
("depth", 3, "f4", "f9.4", 21, 29),
("time", 4, "f8", "fl7.5", 31, 47),
("orid", 5, "i4", "i8", 49, 56),
("evid", 6, "i4", "i8", 58, 65),
("jdate", 7, "i4", "i8", 67, 74),
("nass", 8, "i4", "i4", 76, 79),
("ndef", 9, "i4", "i4", 81, 84),
("ndp", 10, "i4", "i4", 86, 89),
("gm", 11, "i4", "i8", 91, 98),
("sm", 12, "i4", "i8", 100, 107),
("etype", 13, "c7", "a7", 109, 115),
("depdp", 14, "f4", "f9.4", 117, 125),
("dtype", 15, "c1", "a1", 127, 127),
("mb", 16, "f4", "f7.2", 129, 135),
("mbid", 17, "i4", "i8 | ("lddate", 19, "date", "al7", 136, 152),
) | random_line_split | |
traverser.go |
SendTimeout time.Duration
}
func NewTraverser(cfg TraverserConfig) *traverser {
// Include request id in all logging.
logger := log.WithFields(log.Fields{"requestId": cfg.Chain.RequestId()})
// Channels used to communicate between traverser + reaper(s)
doneJobChan := make(chan proto.Job)
runJobChan := make(chan proto.Job)
runnerRepo := runner.NewRepo() // needed for traverser + reaper factory
reaperFactory := &ChainReaperFactory{
Chain: cfg.Chain,
ChainRepo: cfg.ChainRepo,
RMClient: cfg.RMClient,
RMCTries: reaperTries,
RMCRetryWait: reaperRetryWait,
Logger: logger,
DoneJobChan: doneJobChan,
RunJobChan: runJobChan,
RunnerRepo: runnerRepo,
}
return &traverser{
reaperFactory: reaperFactory,
logger: logger,
chain: cfg.Chain,
chainRepo: cfg.ChainRepo,
rf: cfg.RunnerFactory,
runnerRepo: runnerRepo,
shutdownChan: cfg.ShutdownChan,
runJobChan: runJobChan,
doneJobChan: doneJobChan,
doneChan: make(chan struct{}),
rmc: cfg.RMClient,
stopMux: &sync.RWMutex{},
stopTimeout: cfg.StopTimeout,
sendTimeout: cfg.SendTimeout,
}
}
// Run runs all jobs in the chain and blocks until the chain finishes running, is
// stopped, or is suspended.
func (t *traverser) Run() {
t.logger.Infof("chain traverser started")
defer t.logger.Infof("chain traverser done")
defer t.chainRepo.Remove(t.chain.RequestId())
// Start a goroutine to run jobs. This consumes from the runJobChan. When
// jobs are done, they will be sent to the doneJobChan, which the job reapers
// consume from.
go t.runJobs()
// Find all the jobs we can start running. For a new job chain (not suspended),
// this'll end up being just the first job in the chain.
jobsToRun := t.chain.RunnableJobs()
// Add the first jobs to runJobChan.
for _, job := range jobsToRun {
t.logger.Infof("sending initial job (%s) to runJobChan", job.Id)
if t.chain.IsSequenceStartJob(job.Id) {
// Starting a sequence, so increment sequence try count.
t.chain.IncrementSequenceTries(job.Id)
seqLogger := t.logger.WithFields(log.Fields{"sequence_id": job.SequenceId, "sequence_try": t.chain.SequenceTries(job.Id)})
seqLogger.Info("starting try of sequence")
}
t.runJobChan <- job
}
// Start a goroutine to reap done jobs. The running reaper consumes from
// doneJobChan and sends the next jobs to be run to runJobChan.
runningReaperChan := make(chan struct{})
t.reaper = t.reaperFactory.MakeRunning()
go func() {
defer close(runningReaperChan) // indicate reaper is done (see select below)
defer close(t.runJobChan) // stop runJobs goroutine
t.reaper.Run()
}()
// Wait for running reaper to be done or traverser to be shut down.
select {
case <-runningReaperChan:
// If running reaper is done because traverser was stopped, we will
// wait for Stop() to finish. Otherwise, the chain finished normally
// (completed or failed) and we can return right away.
//
// We don't check if the chain was suspended, since that can only
// happen via the other case in this select.
t.stopMux.Lock()
if !t.stopped {
t.stopMux.Unlock()
return
}
t.stopMux.Unlock()
case <-t.shutdownChan:
// The Job Runner is shutting down. Stop the running reaper and suspend
// the job chain, to be resumed later by another Job Runner.
t.shutdown()
}
// Traverser is being stopped or shut down - wait for that to finish before
// returning.
select {
case <-t.doneChan:
// Stopped/shutdown successfully - nothing left to do.
return
case <-time.After(20 * time.Second):
// Failed to stop/shutdown in a reasonable amount of time.
// Log the failure and return.
t.logger.Warnf("stopping or suspending the job chain took too long. Exiting...")
return
}
}
// Stop stops the running job chain by switching the running chain reaper for a
// stopped chain reaper and stopping all currently running jobs. Stop blocks until
// all jobs have finished and the stopped reaper has send the chain's final state
// to the RM.
func (t *traverser) Stop() error {
// Don't do anything if the traverser has already been stopped or suspended.
t.stopMux.Lock()
defer t.stopMux.Unlock()
if t.stopped {
return nil
} else if t.suspended {
return ErrShuttingDown
}
t.stopped = true
t.logger.Infof("stopping traverser and all jobs")
// Stop the current reaper and start running a reaper for stopped chains. This
// reaper saves jobs' states (but doesn't enqueue any more jobs to run) and
// sends the chain's final state to the RM when all jobs have stopped running.
t.reaper.Stop() // blocks until running reaper is done stopping
stoppedReaperChan := make(chan struct{})
t.reaper = t.reaperFactory.MakeStopped()
go func() {
defer close(stoppedReaperChan)
t.reaper.Run()
}()
// Stop all job runners in the runner repo. Do this after switching to the
// stopped reaper so that when the jobs finish and are sent on doneJobChan,
// they are reaped correctly.
err := t.stopRunningJobs()
if err != nil {
// Don't return the error yet - we still want to wait for the stop
// reaper to be done.
err = fmt.Errorf("traverser was stopped, but encountered an error in the process: %s", err)
}
// Wait for the stopped reaper to finish. If it takes too long, some jobs
// haven't respond quickly to being stopped. Stop waiting for these jobs by
// stopping the stopped reaper.
select {
case <-stoppedReaperChan:
case <-time.After(t.stopTimeout):
t.logger.Warnf("timed out waiting for jobs to stop - stopping reaper")
t.reaper.Stop()
}
close(t.doneChan)
return err
}
// Status returns the status of currently running jobs in the chain.
func (t *traverser) Status() (proto.JobChainStatus, error) {
t.logger.Infof("getting the status of all running jobs")
activeRunners, err := t.runnerRepo.Items()
if err != nil {
return proto.JobChainStatus{}, err
}
runningJobs := t.chain.Running()
status := make([]proto.JobStatus, len(runningJobs))
i := 0
for jobId, jobStatus := range runningJobs {
runner := activeRunners[jobId]
if runner == nil {
// The job finished between the call to chain.Running() and now,
// so it's runner no longer exists in the runner.Repo.
jobStatus.Status = "(finished)"
} else {
jobStatus.Status = runner.Status()
}
status[i] = jobStatus
i++
}
jcStatus := proto.JobChainStatus{
RequestId: t.chain.RequestId(),
JobStatuses: status,
}
return jcStatus, nil
}
// -------------------------------------------------------------------------- //
// runJobs loops on the runJobChan, and runs each job that comes through the
// channel. When the job is done, it sends the job out through the doneJobChan.
func (t *traverser) runJobs() | {
// Run all jobs that come in on runJobChan. The loop exits when runJobChan
// is closed after the running reaper finishes.
for job := range t.runJobChan {
// Explicitly pass the job into the func, or all goroutines would share
// the same loop "job" variable.
go func(job proto.Job) {
jLogger := t.logger.WithFields(log.Fields{"job_id": job.Id, "sequence_id": job.SequenceId, "sequence_try": t.chain.SequenceTries(job.Id)})
// Always send the finished job to doneJobChan to be reaped. If the
// reaper isn't reaping any more jobs (if this job took too long to
// finish after being stopped), sending to doneJobChan won't be
// possible - timeout after a while so we don't leak this goroutine.
defer func() {
select {
case t.doneJobChan <- job:
case <-time.After(t.sendTimeout):
jLogger.Warnf("timed out sending job to doneJobChan")
}
// Remove the job's runner from the repo (if it was ever added) | identifier_body | |
traverser.go | (jobChain *proto.JobChain) (Traverser, error) {
// Convert/wrap chain from proto to Go object.
chain := NewChain(jobChain, make(map[string]uint), make(map[string]uint), make(map[string]uint))
return f.make(chain)
}
// MakeFromSJC makes a Traverser from a suspended job chain.
func (f *traverserFactory) MakeFromSJC(sjc *proto.SuspendedJobChain) (Traverser, error) {
// Convert/wrap chain from proto to Go object.
chain := NewChain(sjc.JobChain, sjc.SequenceTries, sjc.TotalJobTries, sjc.LatestRunJobTries)
return f.make(chain)
}
// Creates a new Traverser from a chain. Used for both new and resumed chains.
func (f *traverserFactory) make(chain *Chain) (Traverser, error) {
// Add chain to repo. This used to save the chain in Redis, if configured,
// but now it's only an in-memory map. The only functionality it serves is
// preventing this JR instance from running the same job chain.
if err := f.chainRepo.Add(chain); err != nil {
return nil, fmt.Errorf("error adding job chain: %s", err)
}
// Create and return a traverser for the chain. The traverser is responsible
// for the chain: running, cleaning up, removing from repo when done, etc.
// And traverser and chain have the same lifespan: traverser is done when
// chain is done.
cfg := TraverserConfig{
Chain: chain,
ChainRepo: f.chainRepo,
RunnerFactory: f.rf,
RMClient: f.rmc,
ShutdownChan: f.shutdownChan,
StopTimeout: defaultTimeout,
SendTimeout: defaultTimeout,
}
return NewTraverser(cfg), nil
}
// -------------------------------------------------------------------------- //
type traverser struct {
reaperFactory ReaperFactory
reaper JobReaper
shutdownChan chan struct{} // indicates JR is shutting down
runJobChan chan proto.Job // jobs to be run
doneJobChan chan proto.Job // jobs that are done
doneChan chan struct{} // closed when traverser finishes running
stopMux *sync.RWMutex // lock around checks to stopped
stopped bool // has traverser been stopped
suspended bool // has traverser been suspended
chain *Chain
chainRepo Repo // stores all currently running chains
rf runner.Factory
runnerRepo runner.Repo // stores actively running jobs
rmc rm.Client
logger *log.Entry
stopTimeout time.Duration // Time to wait for jobs to stop
sendTimeout time.Duration // Time to wait for a job to send on doneJobChan.
}
type TraverserConfig struct {
Chain *Chain
ChainRepo Repo
RunnerFactory runner.Factory
RMClient rm.Client
ShutdownChan chan struct{}
StopTimeout time.Duration
SendTimeout time.Duration
}
func NewTraverser(cfg TraverserConfig) *traverser {
// Include request id in all logging.
logger := log.WithFields(log.Fields{"requestId": cfg.Chain.RequestId()})
// Channels used to communicate between traverser + reaper(s)
doneJobChan := make(chan proto.Job)
runJobChan := make(chan proto.Job)
runnerRepo := runner.NewRepo() // needed for traverser + reaper factory
reaperFactory := &ChainReaperFactory{
Chain: cfg.Chain,
ChainRepo: cfg.ChainRepo,
RMClient: cfg.RMClient,
RMCTries: reaperTries,
RMCRetryWait: reaperRetryWait,
Logger: logger,
DoneJobChan: doneJobChan,
RunJobChan: runJobChan,
RunnerRepo: runnerRepo,
}
return &traverser{
reaperFactory: reaperFactory,
logger: logger,
chain: cfg.Chain,
chainRepo: cfg.ChainRepo,
rf: cfg.RunnerFactory,
runnerRepo: runnerRepo,
shutdownChan: cfg.ShutdownChan,
runJobChan: runJobChan,
doneJobChan: doneJobChan,
doneChan: make(chan struct{}),
rmc: cfg.RMClient,
stopMux: &sync.RWMutex{},
stopTimeout: cfg.StopTimeout,
sendTimeout: cfg.SendTimeout,
}
}
// Run runs all jobs in the chain and blocks until the chain finishes running, is
// stopped, or is suspended.
func (t *traverser) Run() {
t.logger.Infof("chain traverser started")
defer t.logger.Infof("chain traverser done")
defer t.chainRepo.Remove(t.chain.RequestId())
// Start a goroutine to run jobs. This consumes from the runJobChan. When
// jobs are done, they will be sent to the doneJobChan, which the job reapers
// consume from.
go t.runJobs()
// Find all the jobs we can start running. For a new job chain (not suspended),
// this'll end up being just the first job in the chain.
jobsToRun := t.chain.RunnableJobs()
// Add the first jobs to runJobChan.
for _, job := range jobsToRun {
t.logger.Infof("sending initial job (%s) to runJobChan", job.Id)
if t.chain.IsSequenceStartJob(job.Id) {
// Starting a sequence, so increment sequence try count.
t.chain.IncrementSequenceTries(job.Id)
seqLogger := t.logger.WithFields(log.Fields{"sequence_id": job.SequenceId, "sequence_try": t.chain.SequenceTries(job.Id)})
seqLogger.Info("starting try of sequence")
}
t.runJobChan <- job
}
// Start a goroutine to reap done jobs. The running reaper consumes from
// doneJobChan and sends the next jobs to be run to runJobChan.
runningReaperChan := make(chan struct{})
t.reaper = t.reaperFactory.MakeRunning()
go func() {
defer close(runningReaperChan) // indicate reaper is done (see select below)
defer close(t.runJobChan) // stop runJobs goroutine
t.reaper.Run()
}()
// Wait for running reaper to be done or traverser to be shut down.
select {
case <-runningReaperChan:
// If running reaper is done because traverser was stopped, we will
// wait for Stop() to finish. Otherwise, the chain finished normally
// (completed or failed) and we can return right away.
//
// We don't check if the chain was suspended, since that can only
// happen via the other case in this select.
t.stopMux.Lock()
if !t.stopped {
t.stopMux.Unlock()
return
}
t.stopMux.Unlock()
case <-t.shutdownChan:
// The Job Runner is shutting down. Stop the running reaper and suspend
// the job chain, to be resumed later by another Job Runner.
t.shutdown()
}
// Traverser is being stopped or shut down - wait for that to finish before
// returning.
select {
case <-t.doneChan:
// Stopped/shutdown successfully - nothing left to do.
return
case <-time.After(20 * time.Second):
// Failed to stop/shutdown in a reasonable amount of time.
// Log the failure and return.
t.logger.Warnf("stopping or suspending the job chain took too long. Exiting...")
return
}
}
// Stop stops the running job chain by switching the running chain reaper for a
// stopped chain reaper and stopping all currently running jobs. Stop blocks until
// all jobs have finished and the stopped reaper has send the chain's final state
// to the RM.
func (t *traverser) Stop() error {
// Don't do anything if the traverser has already been stopped or suspended.
t.stopMux.Lock()
defer t.stopMux.Unlock()
if t.stopped {
return nil
} else if t.suspended {
return ErrShuttingDown
}
t.stopped = true
t.logger.Infof("stopping traverser and all jobs")
// Stop the current reaper and start running a reaper for stopped chains. This
// reaper saves jobs' states (but doesn't enqueue any more jobs to run) and
// sends the chain's final state to the RM when all jobs have stopped running.
t.reaper.Stop() // blocks until running reaper is done stopping
stoppedReaperChan := make(chan struct{})
t.reaper = t.reaperFactory.MakeStopped()
go func() {
defer close(stoppedReaperChan)
t.reaper.Run()
}()
// Stop all job runners in the runner repo. Do this after switching to the
// stopped reaper so that when the jobs finish and are sent on doneJobChan,
// they are reaped correctly.
err := t.stopRunningJobs()
if err != nil {
// Don't return the error yet - we still want to wait for the stop
// reaper to be done.
err = fmt.Errorf | Make | identifier_name | |
traverser.go | .
// And traverser and chain have the same lifespan: traverser is done when
// chain is done.
cfg := TraverserConfig{
Chain: chain,
ChainRepo: f.chainRepo,
RunnerFactory: f.rf,
RMClient: f.rmc,
ShutdownChan: f.shutdownChan,
StopTimeout: defaultTimeout,
SendTimeout: defaultTimeout,
}
return NewTraverser(cfg), nil
}
// -------------------------------------------------------------------------- //
type traverser struct {
reaperFactory ReaperFactory
reaper JobReaper
shutdownChan chan struct{} // indicates JR is shutting down
runJobChan chan proto.Job // jobs to be run
doneJobChan chan proto.Job // jobs that are done
doneChan chan struct{} // closed when traverser finishes running
stopMux *sync.RWMutex // lock around checks to stopped
stopped bool // has traverser been stopped
suspended bool // has traverser been suspended
chain *Chain
chainRepo Repo // stores all currently running chains
rf runner.Factory
runnerRepo runner.Repo // stores actively running jobs
rmc rm.Client
logger *log.Entry
stopTimeout time.Duration // Time to wait for jobs to stop
sendTimeout time.Duration // Time to wait for a job to send on doneJobChan.
}
type TraverserConfig struct {
Chain *Chain
ChainRepo Repo
RunnerFactory runner.Factory
RMClient rm.Client
ShutdownChan chan struct{}
StopTimeout time.Duration
SendTimeout time.Duration
}
func NewTraverser(cfg TraverserConfig) *traverser {
// Include request id in all logging.
logger := log.WithFields(log.Fields{"requestId": cfg.Chain.RequestId()})
// Channels used to communicate between traverser + reaper(s)
doneJobChan := make(chan proto.Job)
runJobChan := make(chan proto.Job)
runnerRepo := runner.NewRepo() // needed for traverser + reaper factory
reaperFactory := &ChainReaperFactory{
Chain: cfg.Chain,
ChainRepo: cfg.ChainRepo,
RMClient: cfg.RMClient,
RMCTries: reaperTries,
RMCRetryWait: reaperRetryWait,
Logger: logger,
DoneJobChan: doneJobChan,
RunJobChan: runJobChan,
RunnerRepo: runnerRepo,
}
return &traverser{
reaperFactory: reaperFactory,
logger: logger,
chain: cfg.Chain,
chainRepo: cfg.ChainRepo,
rf: cfg.RunnerFactory,
runnerRepo: runnerRepo,
shutdownChan: cfg.ShutdownChan,
runJobChan: runJobChan,
doneJobChan: doneJobChan,
doneChan: make(chan struct{}),
rmc: cfg.RMClient,
stopMux: &sync.RWMutex{},
stopTimeout: cfg.StopTimeout,
sendTimeout: cfg.SendTimeout,
}
}
// Run runs all jobs in the chain and blocks until the chain finishes running, is
// stopped, or is suspended.
func (t *traverser) Run() {
t.logger.Infof("chain traverser started")
defer t.logger.Infof("chain traverser done")
defer t.chainRepo.Remove(t.chain.RequestId())
// Start a goroutine to run jobs. This consumes from the runJobChan. When
// jobs are done, they will be sent to the doneJobChan, which the job reapers
// consume from.
go t.runJobs()
// Find all the jobs we can start running. For a new job chain (not suspended),
// this'll end up being just the first job in the chain.
jobsToRun := t.chain.RunnableJobs()
// Add the first jobs to runJobChan.
for _, job := range jobsToRun {
t.logger.Infof("sending initial job (%s) to runJobChan", job.Id)
if t.chain.IsSequenceStartJob(job.Id) {
// Starting a sequence, so increment sequence try count.
t.chain.IncrementSequenceTries(job.Id)
seqLogger := t.logger.WithFields(log.Fields{"sequence_id": job.SequenceId, "sequence_try": t.chain.SequenceTries(job.Id)})
seqLogger.Info("starting try of sequence")
}
t.runJobChan <- job
}
// Start a goroutine to reap done jobs. The running reaper consumes from
// doneJobChan and sends the next jobs to be run to runJobChan.
runningReaperChan := make(chan struct{})
t.reaper = t.reaperFactory.MakeRunning()
go func() {
defer close(runningReaperChan) // indicate reaper is done (see select below)
defer close(t.runJobChan) // stop runJobs goroutine
t.reaper.Run()
}()
// Wait for running reaper to be done or traverser to be shut down.
select {
case <-runningReaperChan:
// If running reaper is done because traverser was stopped, we will
// wait for Stop() to finish. Otherwise, the chain finished normally
// (completed or failed) and we can return right away.
//
// We don't check if the chain was suspended, since that can only
// happen via the other case in this select.
t.stopMux.Lock()
if !t.stopped {
t.stopMux.Unlock()
return
}
t.stopMux.Unlock()
case <-t.shutdownChan:
// The Job Runner is shutting down. Stop the running reaper and suspend
// the job chain, to be resumed later by another Job Runner.
t.shutdown()
}
// Traverser is being stopped or shut down - wait for that to finish before
// returning.
select {
case <-t.doneChan:
// Stopped/shutdown successfully - nothing left to do.
return
case <-time.After(20 * time.Second):
// Failed to stop/shutdown in a reasonable amount of time.
// Log the failure and return.
t.logger.Warnf("stopping or suspending the job chain took too long. Exiting...")
return
}
}
// Stop stops the running job chain by switching the running chain reaper for a
// stopped chain reaper and stopping all currently running jobs. Stop blocks until
// all jobs have finished and the stopped reaper has send the chain's final state
// to the RM.
func (t *traverser) Stop() error {
// Don't do anything if the traverser has already been stopped or suspended.
t.stopMux.Lock()
defer t.stopMux.Unlock()
if t.stopped {
return nil
} else if t.suspended {
return ErrShuttingDown
}
t.stopped = true
t.logger.Infof("stopping traverser and all jobs")
// Stop the current reaper and start running a reaper for stopped chains. This
// reaper saves jobs' states (but doesn't enqueue any more jobs to run) and
// sends the chain's final state to the RM when all jobs have stopped running.
t.reaper.Stop() // blocks until running reaper is done stopping
stoppedReaperChan := make(chan struct{})
t.reaper = t.reaperFactory.MakeStopped()
go func() {
defer close(stoppedReaperChan)
t.reaper.Run()
}()
// Stop all job runners in the runner repo. Do this after switching to the
// stopped reaper so that when the jobs finish and are sent on doneJobChan,
// they are reaped correctly.
err := t.stopRunningJobs()
if err != nil {
// Don't return the error yet - we still want to wait for the stop
// reaper to be done.
err = fmt.Errorf("traverser was stopped, but encountered an error in the process: %s", err)
}
// Wait for the stopped reaper to finish. If it takes too long, some jobs
// haven't respond quickly to being stopped. Stop waiting for these jobs by
// stopping the stopped reaper.
select {
case <-stoppedReaperChan:
case <-time.After(t.stopTimeout):
t.logger.Warnf("timed out waiting for jobs to stop - stopping reaper")
t.reaper.Stop()
}
close(t.doneChan)
return err
}
// Status returns the status of currently running jobs in the chain.
func (t *traverser) Status() (proto.JobChainStatus, error) {
t.logger.Infof("getting the status of all running jobs")
activeRunners, err := t.runnerRepo.Items()
if err != nil {
return proto.JobChainStatus{}, err
}
runningJobs := t.chain.Running()
status := make([]proto.JobStatus, len(runningJobs))
i := 0
for jobId, jobStatus := range runningJobs | {
runner := activeRunners[jobId]
if runner == nil {
// The job finished between the call to chain.Running() and now,
// so it's runner no longer exists in the runner.Repo.
jobStatus.Status = "(finished)"
} else {
jobStatus.Status = runner.Status()
}
status[i] = jobStatus
i++
} | conditional_block | |
traverser.go | ) and we can return right away.
//
// We don't check if the chain was suspended, since that can only
// happen via the other case in this select.
t.stopMux.Lock()
if !t.stopped {
t.stopMux.Unlock()
return
}
t.stopMux.Unlock()
case <-t.shutdownChan:
// The Job Runner is shutting down. Stop the running reaper and suspend
// the job chain, to be resumed later by another Job Runner.
t.shutdown()
}
// Traverser is being stopped or shut down - wait for that to finish before
// returning.
select {
case <-t.doneChan:
// Stopped/shutdown successfully - nothing left to do.
return
case <-time.After(20 * time.Second):
// Failed to stop/shutdown in a reasonable amount of time.
// Log the failure and return.
t.logger.Warnf("stopping or suspending the job chain took too long. Exiting...")
return
}
}
// Stop stops the running job chain by switching the running chain reaper for a
// stopped chain reaper and stopping all currently running jobs. Stop blocks until
// all jobs have finished and the stopped reaper has send the chain's final state
// to the RM.
func (t *traverser) Stop() error {
// Don't do anything if the traverser has already been stopped or suspended.
t.stopMux.Lock()
defer t.stopMux.Unlock()
if t.stopped {
return nil
} else if t.suspended {
return ErrShuttingDown
}
t.stopped = true
t.logger.Infof("stopping traverser and all jobs")
// Stop the current reaper and start running a reaper for stopped chains. This
// reaper saves jobs' states (but doesn't enqueue any more jobs to run) and
// sends the chain's final state to the RM when all jobs have stopped running.
t.reaper.Stop() // blocks until running reaper is done stopping
stoppedReaperChan := make(chan struct{})
t.reaper = t.reaperFactory.MakeStopped()
go func() {
defer close(stoppedReaperChan)
t.reaper.Run()
}()
// Stop all job runners in the runner repo. Do this after switching to the
// stopped reaper so that when the jobs finish and are sent on doneJobChan,
// they are reaped correctly.
err := t.stopRunningJobs()
if err != nil {
// Don't return the error yet - we still want to wait for the stop
// reaper to be done.
err = fmt.Errorf("traverser was stopped, but encountered an error in the process: %s", err)
}
// Wait for the stopped reaper to finish. If it takes too long, some jobs
// haven't respond quickly to being stopped. Stop waiting for these jobs by
// stopping the stopped reaper.
select {
case <-stoppedReaperChan:
case <-time.After(t.stopTimeout):
t.logger.Warnf("timed out waiting for jobs to stop - stopping reaper")
t.reaper.Stop()
}
close(t.doneChan)
return err
}
// Status returns the status of currently running jobs in the chain.
func (t *traverser) Status() (proto.JobChainStatus, error) {
t.logger.Infof("getting the status of all running jobs")
activeRunners, err := t.runnerRepo.Items()
if err != nil {
return proto.JobChainStatus{}, err
}
runningJobs := t.chain.Running()
status := make([]proto.JobStatus, len(runningJobs))
i := 0
for jobId, jobStatus := range runningJobs {
runner := activeRunners[jobId]
if runner == nil {
// The job finished between the call to chain.Running() and now,
// so it's runner no longer exists in the runner.Repo.
jobStatus.Status = "(finished)"
} else {
jobStatus.Status = runner.Status()
}
status[i] = jobStatus
i++
}
jcStatus := proto.JobChainStatus{
RequestId: t.chain.RequestId(),
JobStatuses: status,
}
return jcStatus, nil
}
// -------------------------------------------------------------------------- //
// runJobs loops on the runJobChan, and runs each job that comes through the
// channel. When the job is done, it sends the job out through the doneJobChan.
func (t *traverser) runJobs() {
// Run all jobs that come in on runJobChan. The loop exits when runJobChan
// is closed after the running reaper finishes.
for job := range t.runJobChan {
// Explicitly pass the job into the func, or all goroutines would share
// the same loop "job" variable.
go func(job proto.Job) {
jLogger := t.logger.WithFields(log.Fields{"job_id": job.Id, "sequence_id": job.SequenceId, "sequence_try": t.chain.SequenceTries(job.Id)})
// Always send the finished job to doneJobChan to be reaped. If the
// reaper isn't reaping any more jobs (if this job took too long to
// finish after being stopped), sending to doneJobChan won't be
// possible - timeout after a while so we don't leak this goroutine.
defer func() {
select {
case t.doneJobChan <- job:
case <-time.After(t.sendTimeout):
jLogger.Warnf("timed out sending job to doneJobChan")
}
// Remove the job's runner from the repo (if it was ever added)
// AFTER sending it to doneJobChan. This avoids a race condition
// when the stopped + suspended reapers check if the runnerRepo
// is empty.
t.runnerRepo.Remove(job.Id)
}()
// Retrieve job and sequence try info from the chain for the Runner.
sequenceTries := t.chain.SequenceTries(job.Id) // used in job logs
totalJobTries := t.chain.TotalTries(job.Id) // used in job logs
// When resuming a stopped job, only try the job
// [allowed tries - tries before being stopped] times, so the total
// number of times the job is tried (during this sequence try) stays
// correct. The job's last try (the try it was stopped on) doesn't
// count, so subtract 1 if it was tried at least once before
// being stopped.
triesBeforeStopped := uint(0)
if job.State == proto.STATE_STOPPED {
triesBeforeStopped = t.chain.LatestRunTries(job.Id)
if triesBeforeStopped > 0 {
triesBeforeStopped--
}
}
runner, err := t.rf.Make(job, t.chain.RequestId(), totalJobTries, triesBeforeStopped, sequenceTries)
if err != nil {
// Problem creating the job runner - treat job as failed.
// Send a JobLog to the RM so that it knows this job failed.
job.State = proto.STATE_FAIL
err = fmt.Errorf("problem creating job runner: %s", err)
t.sendJL(job, err)
return
}
// Add the runner to the repo. Runners in the repo are used
// by the Status, Stop, and shutdown methods on the traverser.
t.runnerRepo.Set(job.Id, runner)
// Bail out if Stop was called or traverser shut down. It is
// important that this check happens AFTER the runner is added to
// the repo. Otherwise if Stop gets called after this check but
// before the runner is added to the repo, there will be nothing to
// stop the job from running.
//
// We don't lock stopMux around this check and runner.Run. It's okay if
// there's a small chance for the runner to be run after the traverser
// gets stopped or shut down - it'll just return after trying the job
// once.
if t.stopped {
job.State = proto.STATE_STOPPED
// Send a JL to the RM so that it knows this job was stopped.
// Add 1 to the total job tries, since this is used for keeping
// job logs unique.
t.chain.AddJobTries(job.Id, 1)
err = fmt.Errorf("not starting job because traverser has already been stopped")
t.sendJL(job, err)
return
} else if t.suspended {
job.State = proto.STATE_STOPPED
// Don't send a JL because this job will be resumed later,
// and don't include this try in the total # of tries (only
// set job tries for the latest run).
t.chain.SetLatestRunJobTries(job.Id, 1)
return
}
// Run the job. This is a blocking operation that could take a long time.
jLogger.Infof("running job")
t.chain.SetJobState(job.Id, proto.STATE_RUNNING)
ret := runner.Run(job.Data)
t.chain.AddJobTries(job.Id, ret.Tries)
job.State = ret.FinalState | random_line_split | ||
table.go | 5MB of IO:
// 1MB read from this level
// 10-12MB read from next level (boundaries may be misaligned)
// 10-12MB written to next level
// This implies that 25 seeks cost the same as the compaction
// of 1MB of data. I.e., one seek costs approximately the
// same as the compaction of 40KB of data. We are a little
// conservative and allow approximately one seek for every 16KB
// of data before triggering a compaction.
f.seekLeft = int32(size / 16384)
if f.seekLeft < 100 {
f.seekLeft = 100
}
return f
}
func tableFileFromRecord(r atRecord) *tFile {
return newTableFile(storage.FileDesc{Type: storage.TypeTable, Num: r.num}, r.size, r.imin, r.imax)
}
// tFiles hold multiple tFile.
type tFiles []*tFile
func (tf tFiles) Len() int { return len(tf) }
func (tf tFiles) Swap(i, j int) { tf[i], tf[j] = tf[j], tf[i] }
func (tf tFiles) nums() string {
x := "[ "
for i, f := range tf {
if i != 0 {
x += ", "
}
x += fmt.Sprint(f.fd.Num)
}
x += " ]"
return x
}
// Returns true if i smallest key is less than j.
// This used for sort by key in ascending order.
func (tf tFiles) lessByKey(icmp *iComparer, i, j int) bool |
// Returns true if i file number is greater than j.
// This used for sort by file number in descending order.
func (tf tFiles) lessByNum(i, j int) bool {
return tf[i].fd.Num > tf[j].fd.Num
}
// Sorts tables by key in ascending order.
func (tf tFiles) sortByKey(icmp *iComparer) {
sort.Sort(&tFilesSortByKey{tFiles: tf, icmp: icmp})
}
// Sorts tables by file number in descending order.
func (tf tFiles) sortByNum() {
sort.Sort(&tFilesSortByNum{tFiles: tf})
}
// Returns sum of all tables size.
func (tf tFiles) size() (sum int64) {
for _, t := range tf {
sum += t.size
}
return sum
}
// Searches smallest index of tables whose its smallest
// key is after or equal with given key.
func (tf tFiles) searchMin(icmp *iComparer, ikey internalKey) int {
return sort.Search(len(tf), func(i int) bool {
return icmp.Compare(tf[i].imin, ikey) >= 0
})
}
// Searches smallest index of tables whose its largest
// key is after or equal with given key.
func (tf tFiles) searchMax(icmp *iComparer, ikey internalKey) int {
return sort.Search(len(tf), func(i int) bool {
return icmp.Compare(tf[i].imax, ikey) >= 0
})
}
// Returns true if given key range overlaps with one or more
// tables key range. If unsorted is true then binary search will not be used.
func (tf tFiles) overlaps(icmp *iComparer, umin, umax []byte, unsorted bool) bool {
if unsorted {
// Check against all files.
for _, t := range tf {
if t.overlaps(icmp, umin, umax) {
return true
}
}
return false
}
i := 0
if len(umin) > 0 {
// Find the earliest possible internal key for min.
i = tf.searchMax(icmp, makeInternalKey(nil, umin, keyMaxSeq, keyTypeSeek))
}
if i >= len(tf) {
// Beginning of range is after all files, so no overlap.
return false
}
return !tf[i].before(icmp, umax)
}
// Returns tables whose its key range overlaps with given key range.
// Range will be expanded if ukey found hop across tables.
// If overlapped is true then the search will be restarted if umax
// expanded.
// The dst content will be overwritten.
func (tf tFiles) getOverlaps(dst tFiles, icmp *iComparer, umin, umax []byte, overlapped bool) tFiles {
dst = dst[:0]
for i := 0; i < len(tf); {
t := tf[i]
if t.overlaps(icmp, umin, umax) {
if umin != nil && icmp.uCompare(t.imin.ukey(), umin) < 0 {
umin = t.imin.ukey()
dst = dst[:0]
i = 0
continue
} else if umax != nil && icmp.uCompare(t.imax.ukey(), umax) > 0 {
umax = t.imax.ukey()
// Restart search if it is overlapped.
if overlapped {
dst = dst[:0]
i = 0
continue
}
}
dst = append(dst, t)
}
i++
}
return dst
}
// Returns tables key range.
func (tf tFiles) getRange(icmp *iComparer) (imin, imax internalKey) {
for i, t := range tf {
if i == 0 {
imin, imax = t.imin, t.imax
continue
}
if icmp.Compare(t.imin, imin) < 0 {
imin = t.imin
}
if icmp.Compare(t.imax, imax) > 0 {
imax = t.imax
}
}
return
}
// Creates iterator index from tables.
func (tf tFiles) newIndexIterator(tops *tOps, icmp *iComparer, slice *util.Range, ro *opt.ReadOptions) iterator.IteratorIndexer {
if slice != nil {
var start, limit int
if slice.Start != nil {
start = tf.searchMax(icmp, internalKey(slice.Start))
}
if slice.Limit != nil {
limit = tf.searchMin(icmp, internalKey(slice.Limit))
} else {
limit = tf.Len()
}
tf = tf[start:limit]
}
return iterator.NewArrayIndexer(&tFilesArrayIndexer{
tFiles: tf,
tops: tops,
icmp: icmp,
slice: slice,
ro: ro,
})
}
// Tables iterator index.
type tFilesArrayIndexer struct {
tFiles
tops *tOps
icmp *iComparer
slice *util.Range
ro *opt.ReadOptions
}
func (a *tFilesArrayIndexer) Search(key []byte) int {
return a.searchMax(a.icmp, internalKey(key))
}
func (a *tFilesArrayIndexer) Get(i int) iterator.Iterator {
if i == 0 || i == a.Len()-1 {
return a.tops.newIterator(a.tFiles[i], a.slice, a.ro)
}
return a.tops.newIterator(a.tFiles[i], nil, a.ro)
}
// Helper type for sortByKey.
type tFilesSortByKey struct {
tFiles
icmp *iComparer
}
func (x *tFilesSortByKey) Less(i, j int) bool {
return x.lessByKey(x.icmp, i, j)
}
// Helper type for sortByNum.
type tFilesSortByNum struct {
tFiles
}
func (x *tFilesSortByNum) Less(i, j int) bool {
return x.lessByNum(i, j)
}
// Table operations.
type tOps struct {
s *session
noSync bool
evictRemoved bool
cache *cache.Cache
bcache *cache.Cache
bpool *util.BufferPool
}
// Creates an empty table and returns table writer.
func (t *tOps) create() (*tWriter, error) {
fd := storage.FileDesc{Type: storage.TypeTable, Num: t.s.allocFileNum()}
fw, err := t.s.stor.Create(fd)
if err != nil {
return nil, err
}
return &tWriter{
t: t,
fd: fd,
w: fw,
tw: table.NewWriter(fw, t.s.o.Options),
}, nil
}
// Builds table from src iterator.
func (t *tOps) createFrom(src iterator.Iterator) (f *tFile, n int, err error) {
w, err := t.create()
if err != nil {
return
}
defer func() {
if err != nil {
w.drop()
}
}()
for src.Next() {
err = w.append(src.Key(), src.Value())
if err != nil {
return
}
}
err = src.Error()
if err != nil {
return
}
n = w.tw.EntriesLen()
f, err | {
a, b := tf[i], tf[j]
n := icmp.Compare(a.imin, b.imin)
if n == 0 {
return a.fd.Num < b.fd.Num
}
return n < 0
} | identifier_body |
table.go | 25MB of IO:
// 1MB read from this level
// 10-12MB read from next level (boundaries may be misaligned)
// 10-12MB written to next level
// This implies that 25 seeks cost the same as the compaction
// of 1MB of data. I.e., one seek costs approximately the
// same as the compaction of 40KB of data. We are a little
// conservative and allow approximately one seek for every 16KB
// of data before triggering a compaction.
f.seekLeft = int32(size / 16384)
if f.seekLeft < 100 {
f.seekLeft = 100
}
return f
}
func tableFileFromRecord(r atRecord) *tFile {
return newTableFile(storage.FileDesc{Type: storage.TypeTable, Num: r.num}, r.size, r.imin, r.imax)
}
// tFiles hold multiple tFile.
type tFiles []*tFile
func (tf tFiles) Len() int { return len(tf) }
func (tf tFiles) Swap(i, j int) { tf[i], tf[j] = tf[j], tf[i] }
func (tf tFiles) nums() string {
x := "[ "
for i, f := range tf {
if i != 0 {
x += ", "
}
x += fmt.Sprint(f.fd.Num)
}
x += " ]"
return x
}
// Returns true if i smallest key is less than j.
// This used for sort by key in ascending order.
func (tf tFiles) lessByKey(icmp *iComparer, i, j int) bool {
a, b := tf[i], tf[j]
n := icmp.Compare(a.imin, b.imin)
if n == 0 {
return a.fd.Num < b.fd.Num
}
return n < 0
}
// Returns true if i file number is greater than j.
// This used for sort by file number in descending order.
func (tf tFiles) lessByNum(i, j int) bool {
return tf[i].fd.Num > tf[j].fd.Num
}
// Sorts tables by key in ascending order.
func (tf tFiles) sortByKey(icmp *iComparer) {
sort.Sort(&tFilesSortByKey{tFiles: tf, icmp: icmp})
}
// Sorts tables by file number in descending order.
func (tf tFiles) sortByNum() {
sort.Sort(&tFilesSortByNum{tFiles: tf})
}
// Returns sum of all tables size.
func (tf tFiles) size() (sum int64) {
for _, t := range tf {
sum += t.size
}
return sum
}
// Searches smallest index of tables whose its smallest
// key is after or equal with given key.
func (tf tFiles) searchMin(icmp *iComparer, ikey internalKey) int {
return sort.Search(len(tf), func(i int) bool {
return icmp.Compare(tf[i].imin, ikey) >= 0
})
}
// Searches smallest index of tables whose its largest
// key is after or equal with given key.
func (tf tFiles) searchMax(icmp *iComparer, ikey internalKey) int {
return sort.Search(len(tf), func(i int) bool {
return icmp.Compare(tf[i].imax, ikey) >= 0
})
}
// Returns true if given key range overlaps with one or more
// tables key range. If unsorted is true then binary search will not be used.
func (tf tFiles) overlaps(icmp *iComparer, umin, umax []byte, unsorted bool) bool {
if unsorted {
// Check against all files.
for _, t := range tf {
if t.overlaps(icmp, umin, umax) {
return true
}
}
return false
}
i := 0
if len(umin) > 0 {
// Find the earliest possible internal key for min.
i = tf.searchMax(icmp, makeInternalKey(nil, umin, keyMaxSeq, keyTypeSeek))
}
if i >= len(tf) {
// Beginning of range is after all files, so no overlap.
return false
}
return !tf[i].before(icmp, umax)
}
// Returns tables whose its key range overlaps with given key range.
// Range will be expanded if ukey found hop across tables.
// If overlapped is true then the search will be restarted if umax
// expanded.
// The dst content will be overwritten.
func (tf tFiles) getOverlaps(dst tFiles, icmp *iComparer, umin, umax []byte, overlapped bool) tFiles {
dst = dst[:0]
for i := 0; i < len(tf); {
t := tf[i]
if t.overlaps(icmp, umin, umax) {
if umin != nil && icmp.uCompare(t.imin.ukey(), umin) < 0 {
umin = t.imin.ukey()
dst = dst[:0]
i = 0
continue
} else if umax != nil && icmp.uCompare(t.imax.ukey(), umax) > 0 {
umax = t.imax.ukey()
// Restart search if it is overlapped.
if overlapped {
dst = dst[:0]
i = 0
continue
}
}
dst = append(dst, t)
}
i++
}
return dst
}
// Returns tables key range.
func (tf tFiles) getRange(icmp *iComparer) (imin, imax internalKey) {
for i, t := range tf {
if i == 0 {
imin, imax = t.imin, t.imax
continue
}
if icmp.Compare(t.imin, imin) < 0 {
imin = t.imin
}
if icmp.Compare(t.imax, imax) > 0 {
imax = t.imax
}
}
return
}
// Creates iterator index from tables.
func (tf tFiles) newIndexIterator(tops *tOps, icmp *iComparer, slice *util.Range, ro *opt.ReadOptions) iterator.IteratorIndexer {
if slice != nil {
var start, limit int
if slice.Start != nil {
start = tf.searchMax(icmp, internalKey(slice.Start))
}
if slice.Limit != nil {
limit = tf.searchMin(icmp, internalKey(slice.Limit))
} else {
limit = tf.Len()
}
tf = tf[start:limit]
}
return iterator.NewArrayIndexer(&tFilesArrayIndexer{
tFiles: tf,
tops: tops,
icmp: icmp,
slice: slice,
ro: ro,
})
}
// Tables iterator index.
type tFilesArrayIndexer struct {
tFiles
tops *tOps
icmp *iComparer
slice *util.Range
ro *opt.ReadOptions
}
func (a *tFilesArrayIndexer) Search(key []byte) int {
return a.searchMax(a.icmp, internalKey(key))
}
func (a *tFilesArrayIndexer) Get(i int) iterator.Iterator {
if i == 0 || i == a.Len()-1 {
return a.tops.newIterator(a.tFiles[i], a.slice, a.ro)
}
return a.tops.newIterator(a.tFiles[i], nil, a.ro)
}
// Helper type for sortByKey.
type tFilesSortByKey struct {
tFiles
icmp *iComparer
}
func (x *tFilesSortByKey) Less(i, j int) bool {
return x.lessByKey(x.icmp, i, j)
}
// Helper type for sortByNum. | }
func (x *tFilesSortByNum) Less(i, j int) bool {
return x.lessByNum(i, j)
}
// Table operations.
type tOps struct {
s *session
noSync bool
evictRemoved bool
cache *cache.Cache
bcache *cache.Cache
bpool *util.BufferPool
}
// Creates an empty table and returns table writer.
func (t *tOps) create() (*tWriter, error) {
fd := storage.FileDesc{Type: storage.TypeTable, Num: t.s.allocFileNum()}
fw, err := t.s.stor.Create(fd)
if err != nil {
return nil, err
}
return &tWriter{
t: t,
fd: fd,
w: fw,
tw: table.NewWriter(fw, t.s.o.Options),
}, nil
}
// Builds table from src iterator.
func (t *tOps) createFrom(src iterator.Iterator) (f *tFile, n int, err error) {
w, err := t.create()
if err != nil {
return
}
defer func() {
if err != nil {
w.drop()
}
}()
for src.Next() {
err = w.append(src.Key(), src.Value())
if err != nil {
return
}
}
err = src.Error()
if err != nil {
return
}
n = w.tw.EntriesLen()
f, err = w | type tFilesSortByNum struct {
tFiles | random_line_split |
table.go | 25MB of IO:
// 1MB read from this level
// 10-12MB read from next level (boundaries may be misaligned)
// 10-12MB written to next level
// This implies that 25 seeks cost the same as the compaction
// of 1MB of data. I.e., one seek costs approximately the
// same as the compaction of 40KB of data. We are a little
// conservative and allow approximately one seek for every 16KB
// of data before triggering a compaction.
f.seekLeft = int32(size / 16384)
if f.seekLeft < 100 {
f.seekLeft = 100
}
return f
}
func tableFileFromRecord(r atRecord) *tFile {
return newTableFile(storage.FileDesc{Type: storage.TypeTable, Num: r.num}, r.size, r.imin, r.imax)
}
// tFiles hold multiple tFile.
type tFiles []*tFile
func (tf tFiles) Len() int { return len(tf) }
func (tf tFiles) Swap(i, j int) { tf[i], tf[j] = tf[j], tf[i] }
func (tf tFiles) | () string {
x := "[ "
for i, f := range tf {
if i != 0 {
x += ", "
}
x += fmt.Sprint(f.fd.Num)
}
x += " ]"
return x
}
// Returns true if i smallest key is less than j.
// This used for sort by key in ascending order.
func (tf tFiles) lessByKey(icmp *iComparer, i, j int) bool {
a, b := tf[i], tf[j]
n := icmp.Compare(a.imin, b.imin)
if n == 0 {
return a.fd.Num < b.fd.Num
}
return n < 0
}
// Returns true if i file number is greater than j.
// This used for sort by file number in descending order.
func (tf tFiles) lessByNum(i, j int) bool {
return tf[i].fd.Num > tf[j].fd.Num
}
// Sorts tables by key in ascending order.
func (tf tFiles) sortByKey(icmp *iComparer) {
sort.Sort(&tFilesSortByKey{tFiles: tf, icmp: icmp})
}
// Sorts tables by file number in descending order.
func (tf tFiles) sortByNum() {
sort.Sort(&tFilesSortByNum{tFiles: tf})
}
// Returns sum of all tables size.
func (tf tFiles) size() (sum int64) {
for _, t := range tf {
sum += t.size
}
return sum
}
// Searches smallest index of tables whose its smallest
// key is after or equal with given key.
func (tf tFiles) searchMin(icmp *iComparer, ikey internalKey) int {
return sort.Search(len(tf), func(i int) bool {
return icmp.Compare(tf[i].imin, ikey) >= 0
})
}
// Searches smallest index of tables whose its largest
// key is after or equal with given key.
func (tf tFiles) searchMax(icmp *iComparer, ikey internalKey) int {
return sort.Search(len(tf), func(i int) bool {
return icmp.Compare(tf[i].imax, ikey) >= 0
})
}
// Returns true if given key range overlaps with one or more
// tables key range. If unsorted is true then binary search will not be used.
func (tf tFiles) overlaps(icmp *iComparer, umin, umax []byte, unsorted bool) bool {
if unsorted {
// Check against all files.
for _, t := range tf {
if t.overlaps(icmp, umin, umax) {
return true
}
}
return false
}
i := 0
if len(umin) > 0 {
// Find the earliest possible internal key for min.
i = tf.searchMax(icmp, makeInternalKey(nil, umin, keyMaxSeq, keyTypeSeek))
}
if i >= len(tf) {
// Beginning of range is after all files, so no overlap.
return false
}
return !tf[i].before(icmp, umax)
}
// Returns tables whose its key range overlaps with given key range.
// Range will be expanded if ukey found hop across tables.
// If overlapped is true then the search will be restarted if umax
// expanded.
// The dst content will be overwritten.
func (tf tFiles) getOverlaps(dst tFiles, icmp *iComparer, umin, umax []byte, overlapped bool) tFiles {
dst = dst[:0]
for i := 0; i < len(tf); {
t := tf[i]
if t.overlaps(icmp, umin, umax) {
if umin != nil && icmp.uCompare(t.imin.ukey(), umin) < 0 {
umin = t.imin.ukey()
dst = dst[:0]
i = 0
continue
} else if umax != nil && icmp.uCompare(t.imax.ukey(), umax) > 0 {
umax = t.imax.ukey()
// Restart search if it is overlapped.
if overlapped {
dst = dst[:0]
i = 0
continue
}
}
dst = append(dst, t)
}
i++
}
return dst
}
// Returns tables key range.
func (tf tFiles) getRange(icmp *iComparer) (imin, imax internalKey) {
for i, t := range tf {
if i == 0 {
imin, imax = t.imin, t.imax
continue
}
if icmp.Compare(t.imin, imin) < 0 {
imin = t.imin
}
if icmp.Compare(t.imax, imax) > 0 {
imax = t.imax
}
}
return
}
// Creates iterator index from tables.
func (tf tFiles) newIndexIterator(tops *tOps, icmp *iComparer, slice *util.Range, ro *opt.ReadOptions) iterator.IteratorIndexer {
if slice != nil {
var start, limit int
if slice.Start != nil {
start = tf.searchMax(icmp, internalKey(slice.Start))
}
if slice.Limit != nil {
limit = tf.searchMin(icmp, internalKey(slice.Limit))
} else {
limit = tf.Len()
}
tf = tf[start:limit]
}
return iterator.NewArrayIndexer(&tFilesArrayIndexer{
tFiles: tf,
tops: tops,
icmp: icmp,
slice: slice,
ro: ro,
})
}
// Tables iterator index.
type tFilesArrayIndexer struct {
tFiles
tops *tOps
icmp *iComparer
slice *util.Range
ro *opt.ReadOptions
}
func (a *tFilesArrayIndexer) Search(key []byte) int {
return a.searchMax(a.icmp, internalKey(key))
}
func (a *tFilesArrayIndexer) Get(i int) iterator.Iterator {
if i == 0 || i == a.Len()-1 {
return a.tops.newIterator(a.tFiles[i], a.slice, a.ro)
}
return a.tops.newIterator(a.tFiles[i], nil, a.ro)
}
// Helper type for sortByKey.
type tFilesSortByKey struct {
tFiles
icmp *iComparer
}
func (x *tFilesSortByKey) Less(i, j int) bool {
return x.lessByKey(x.icmp, i, j)
}
// Helper type for sortByNum.
type tFilesSortByNum struct {
tFiles
}
func (x *tFilesSortByNum) Less(i, j int) bool {
return x.lessByNum(i, j)
}
// Table operations.
type tOps struct {
s *session
noSync bool
evictRemoved bool
cache *cache.Cache
bcache *cache.Cache
bpool *util.BufferPool
}
// Creates an empty table and returns table writer.
func (t *tOps) create() (*tWriter, error) {
fd := storage.FileDesc{Type: storage.TypeTable, Num: t.s.allocFileNum()}
fw, err := t.s.stor.Create(fd)
if err != nil {
return nil, err
}
return &tWriter{
t: t,
fd: fd,
w: fw,
tw: table.NewWriter(fw, t.s.o.Options),
}, nil
}
// Builds table from src iterator.
func (t *tOps) createFrom(src iterator.Iterator) (f *tFile, n int, err error) {
w, err := t.create()
if err != nil {
return
}
defer func() {
if err != nil {
w.drop()
}
}()
for src.Next() {
err = w.append(src.Key(), src.Value())
if err != nil {
return
}
}
err = src.Error()
if err != nil {
return
}
n = w.tw.EntriesLen()
f, err | nums | identifier_name |
table.go | .Num
}
// Sorts tables by key in ascending order.
func (tf tFiles) sortByKey(icmp *iComparer) {
sort.Sort(&tFilesSortByKey{tFiles: tf, icmp: icmp})
}
// Sorts tables by file number in descending order.
func (tf tFiles) sortByNum() {
sort.Sort(&tFilesSortByNum{tFiles: tf})
}
// Returns sum of all tables size.
func (tf tFiles) size() (sum int64) {
for _, t := range tf {
sum += t.size
}
return sum
}
// Searches smallest index of tables whose its smallest
// key is after or equal with given key.
func (tf tFiles) searchMin(icmp *iComparer, ikey internalKey) int {
return sort.Search(len(tf), func(i int) bool {
return icmp.Compare(tf[i].imin, ikey) >= 0
})
}
// Searches smallest index of tables whose its largest
// key is after or equal with given key.
func (tf tFiles) searchMax(icmp *iComparer, ikey internalKey) int {
return sort.Search(len(tf), func(i int) bool {
return icmp.Compare(tf[i].imax, ikey) >= 0
})
}
// Returns true if given key range overlaps with one or more
// tables key range. If unsorted is true then binary search will not be used.
func (tf tFiles) overlaps(icmp *iComparer, umin, umax []byte, unsorted bool) bool {
if unsorted {
// Check against all files.
for _, t := range tf {
if t.overlaps(icmp, umin, umax) {
return true
}
}
return false
}
i := 0
if len(umin) > 0 {
// Find the earliest possible internal key for min.
i = tf.searchMax(icmp, makeInternalKey(nil, umin, keyMaxSeq, keyTypeSeek))
}
if i >= len(tf) {
// Beginning of range is after all files, so no overlap.
return false
}
return !tf[i].before(icmp, umax)
}
// Returns tables whose its key range overlaps with given key range.
// Range will be expanded if ukey found hop across tables.
// If overlapped is true then the search will be restarted if umax
// expanded.
// The dst content will be overwritten.
func (tf tFiles) getOverlaps(dst tFiles, icmp *iComparer, umin, umax []byte, overlapped bool) tFiles {
dst = dst[:0]
for i := 0; i < len(tf); {
t := tf[i]
if t.overlaps(icmp, umin, umax) {
if umin != nil && icmp.uCompare(t.imin.ukey(), umin) < 0 {
umin = t.imin.ukey()
dst = dst[:0]
i = 0
continue
} else if umax != nil && icmp.uCompare(t.imax.ukey(), umax) > 0 {
umax = t.imax.ukey()
// Restart search if it is overlapped.
if overlapped {
dst = dst[:0]
i = 0
continue
}
}
dst = append(dst, t)
}
i++
}
return dst
}
// Returns tables key range.
func (tf tFiles) getRange(icmp *iComparer) (imin, imax internalKey) {
for i, t := range tf {
if i == 0 {
imin, imax = t.imin, t.imax
continue
}
if icmp.Compare(t.imin, imin) < 0 {
imin = t.imin
}
if icmp.Compare(t.imax, imax) > 0 {
imax = t.imax
}
}
return
}
// Creates iterator index from tables.
func (tf tFiles) newIndexIterator(tops *tOps, icmp *iComparer, slice *util.Range, ro *opt.ReadOptions) iterator.IteratorIndexer {
if slice != nil {
var start, limit int
if slice.Start != nil {
start = tf.searchMax(icmp, internalKey(slice.Start))
}
if slice.Limit != nil {
limit = tf.searchMin(icmp, internalKey(slice.Limit))
} else {
limit = tf.Len()
}
tf = tf[start:limit]
}
return iterator.NewArrayIndexer(&tFilesArrayIndexer{
tFiles: tf,
tops: tops,
icmp: icmp,
slice: slice,
ro: ro,
})
}
// Tables iterator index.
type tFilesArrayIndexer struct {
tFiles
tops *tOps
icmp *iComparer
slice *util.Range
ro *opt.ReadOptions
}
func (a *tFilesArrayIndexer) Search(key []byte) int {
return a.searchMax(a.icmp, internalKey(key))
}
func (a *tFilesArrayIndexer) Get(i int) iterator.Iterator {
if i == 0 || i == a.Len()-1 {
return a.tops.newIterator(a.tFiles[i], a.slice, a.ro)
}
return a.tops.newIterator(a.tFiles[i], nil, a.ro)
}
// Helper type for sortByKey.
type tFilesSortByKey struct {
tFiles
icmp *iComparer
}
func (x *tFilesSortByKey) Less(i, j int) bool {
return x.lessByKey(x.icmp, i, j)
}
// Helper type for sortByNum.
type tFilesSortByNum struct {
tFiles
}
func (x *tFilesSortByNum) Less(i, j int) bool {
return x.lessByNum(i, j)
}
// Table operations.
type tOps struct {
s *session
noSync bool
evictRemoved bool
cache *cache.Cache
bcache *cache.Cache
bpool *util.BufferPool
}
// Creates an empty table and returns table writer.
func (t *tOps) create() (*tWriter, error) {
fd := storage.FileDesc{Type: storage.TypeTable, Num: t.s.allocFileNum()}
fw, err := t.s.stor.Create(fd)
if err != nil {
return nil, err
}
return &tWriter{
t: t,
fd: fd,
w: fw,
tw: table.NewWriter(fw, t.s.o.Options),
}, nil
}
// Builds table from src iterator.
func (t *tOps) createFrom(src iterator.Iterator) (f *tFile, n int, err error) {
w, err := t.create()
if err != nil {
return
}
defer func() {
if err != nil {
w.drop()
}
}()
for src.Next() {
err = w.append(src.Key(), src.Value())
if err != nil {
return
}
}
err = src.Error()
if err != nil {
return
}
n = w.tw.EntriesLen()
f, err = w.finish()
return
}
// Opens table. It returns a cache handle, which should
// be released after use.
func (t *tOps) open(f *tFile) (ch *cache.Handle, err error) {
ch = t.cache.Get(0, uint64(f.fd.Num), func() (size int, value cache.Value) {
var r storage.Reader
r, err = t.s.stor.Open(f.fd)
if err != nil {
return 0, nil
}
var bcache *cache.NamespaceGetter
if t.bcache != nil {
bcache = &cache.NamespaceGetter{Cache: t.bcache, NS: uint64(f.fd.Num)}
}
var tr *table.Reader
tr, err = table.NewReader(r, f.size, f.fd, bcache, t.bpool, t.s.o.Options)
if err != nil {
r.Close()
return 0, nil
}
return 1, tr
})
if ch == nil && err == nil {
err = ErrClosed
}
return
}
// Finds key/value pair whose key is greater than or equal to the
// given key.
func (t *tOps) find(f *tFile, key []byte, ro *opt.ReadOptions) (rkey, rvalue []byte, err error) {
ch, err := t.open(f)
if err != nil {
return nil, nil, err
}
defer ch.Release()
return ch.Value().(*table.Reader).Find(key, true, ro)
}
// Finds key that is greater than or equal to the given key.
func (t *tOps) findKey(f *tFile, key []byte, ro *opt.ReadOptions) (rkey []byte, err error) {
ch, err := t.open(f)
if err != nil {
return nil, err
}
defer ch.Release()
return ch.Value().(*table.Reader).FindKey(key, true, ro)
}
// Returns approximate offset of the given key.
func (t *tOps) offsetOf(f *tFile, key []byte) (offset int64, err error) {
ch, err := t.open(f)
if err != nil | {
return
} | conditional_block | |
main.go | "How often a heartbeat should be sent")
entriesPerMsg = flag.Uint64("entriespermsg", 64, "Entries per Appendentries message")
catchupMultiplier = flag.Uint64("catchupmultiplier", 1024, "How many more times entries per message allowed during catch up")
cache = flag.Int("cache", 1024*1024*64, "How many entries should be kept in memory") // ~1GB @ 16bytes per entry.
maxgrpc = flag.Int("maxgrpc", 128<<20, "Max GRPC message size") // ~128MB.
checkQuorum = flag.Bool("checkquorum", false, "Require a quorum of responses to a heartbeat to retain leadership")
order = flag.Bool("ordergorums", true, "Force ordering of per node RPCs with Gorums")
)
func main() {
var (
id = flag.Uint64("id", 0, "server ID")
servers = flag.String("servers", ":9201,:9202,:9203,:9204,:9205,:9206,:9207", "comma separated list of server addresses")
cluster = flag.String("cluster", "1,2,3", "comma separated list of server ids to form cluster with, [1 >= id <= len(servers)]")
backend = flag.String("backend", "gorums", "Raft backend to use [gorums|etcd|hashicorp]")
)
flag.Parse()
rand.Seed(time.Now().UnixNano())
if *id == 0 {
fmt.Print("-id argument is required\n\n")
flag.Usage()
os.Exit(1)
}
nodes := strings.Split(*servers, ",")
if len(nodes) == 0 {
fmt.Print("-server argument is required\n\n")
flag.Usage()
os.Exit(1)
}
selected := strings.Split(*cluster, ",")
var ids []uint64
for _, sid := range selected {
id, err := strconv.ParseUint(sid, 10, 64)
if err != nil {
fmt.Print("could not parse -cluster argument\n\n")
flag.Usage()
os.Exit(1)
}
if id <= 0 || id > uint64(len(nodes)) {
fmt.Print("invalid -cluster argument\n\n")
flag.Usage()
os.Exit(1)
}
ids = append(ids, id)
}
if len(ids) == 0 {
fmt.Print("-cluster argument is required\n\n")
flag.Usage()
os.Exit(1)
}
if len(ids) > len(nodes) {
fmt.Print("-cluster specifies too many servers\n\n")
flag.Usage()
os.Exit(1)
}
if *entriesPerMsg < 1 {
fmt.Print("-entriespermsg must be atleast 1\n\n")
flag.Usage()
os.Exit(1)
}
if *catchupMultiplier < 1 {
fmt.Print("-catchupmultiplier must be atleast 1\n\n")
flag.Usage()
os.Exit(1)
}
logger := logrus.New()
logFile, err := os.OpenFile(
fmt.Sprintf("%s%sraft%.2d.log", os.TempDir(), string(filepath.Separator), *id),
os.O_CREATE|os.O_TRUNC|os.O_APPEND|os.O_WRONLY, 0600,
)
if err != nil {
logger.Fatal(err)
}
logger.Hooks.Add(NewLogToFileHook(logFile))
if *bench |
grpclog.SetLogger(logger)
lis, err := net.Listen("tcp", nodes[*id-1])
if err != nil {
logger.Fatal(err)
}
grpcServer := grpc.NewServer(grpc.MaxMsgSize(*maxgrpc))
if *serverMetrics {
go func() {
http.Handle("/metrics", promhttp.Handler())
logger.Fatal(http.ListenAndServe(fmt.Sprintf(":590%d", *id), nil))
}()
}
lat := raft.NewLatency()
event := raft.NewEvent()
var once sync.Once
writeData := func() {
lat.Write(fmt.Sprintf("./latency-%v.csv", time.Now().UnixNano()))
event.Write(fmt.Sprintf("./event-%v.csv", time.Now().UnixNano()))
}
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt)
go func() {
<-c
event.Record(raft.EventTerminated)
once.Do(writeData)
os.Exit(1)
}()
defer func() {
once.Do(writeData)
}()
switch *backend {
case bgorums:
rungorums(logger, lis, grpcServer, *id, ids, nodes, lat, event)
case betcd:
runetcd(logger, lis, grpcServer, *id, ids, nodes, lat, event)
case bhashicorp:
runhashicorp(logger, lis, grpcServer, *id, ids, nodes, lat, event)
}
}
func runhashicorp(
logger logrus.FieldLogger,
lis net.Listener, grpcServer *grpc.Server,
id uint64, ids []uint64, nodes []string,
lat *raft.Latency, event *raft.Event,
) {
servers := make([]hashic.Server, len(nodes))
for i, addr := range nodes {
host, port, err := net.SplitHostPort(addr)
if err != nil {
logger.Fatal(err)
}
p, _ := strconv.Atoi(port)
addr = host + ":" + strconv.Itoa(p-100)
suffrage := hashic.Voter
if !contains(uint64(i+1), ids) {
suffrage = hashic.Nonvoter
}
servers[i] = hashic.Server{
Suffrage: suffrage,
ID: hashic.ServerID(addr),
Address: hashic.ServerAddress(addr),
}
}
addr, err := net.ResolveTCPAddr("tcp", string(servers[id-1].Address))
if err != nil {
logger.Fatal(err)
}
trans, err := hashic.NewTCPTransport(string(servers[id-1].Address), addr, len(nodes)+1, 10*time.Second, os.Stderr)
if err != nil {
logger.Fatal(err)
}
path := fmt.Sprintf("hashicorp%.2d.bolt", id)
overwrite := !*recover
// Check if file already exists.
if _, err := os.Stat(path); err != nil {
if os.IsNotExist(err) {
// We don't need to overwrite a file that doesn't exist.
overwrite = false
} else {
// If we are unable to verify the existence of the file,
// there is probably a permission problem.
logger.Fatal(err)
}
}
if overwrite {
if err := os.Remove(path); err != nil {
logger.Fatal(err)
}
}
logs, err := raftboltdb.NewBoltStore(path)
if err != nil {
logger.Fatal(err)
}
cachedlogs, err := hashic.NewLogCache(*cache, logs)
snaps := hashic.NewInmemSnapshotStore()
cfg := &hashic.Config{
LocalID: servers[id-1].ID,
ProtocolVersion: hashic.ProtocolVersionMax,
HeartbeatTimeout: *electionTimeout,
ElectionTimeout: *electionTimeout,
CommitTimeout: *heartbeatTimeout,
MaxAppendEntries: int(*entriesPerMsg),
ShutdownOnRemove: true,
TrailingLogs: math.MaxUint64,
SnapshotInterval: 120 * time.Hour,
SnapshotThreshold: math.MaxUint64,
LeaderLeaseTimeout: *electionTimeout,
}
leaderOut := make(chan struct{})
node := hraft.NewRaft(
logger, NewStore(), cfg, servers,
trans, cachedlogs, hashic.NewInmemStore(), snaps,
ids, lat, event, leaderOut, id, *checkQuorum,
)
service := NewService(logger, node, leaderOut)
rkvpb.RegisterRKVServer(grpcServer, service)
logger.Fatal(grpcServer.Serve(lis))
}
func runetcd(
logger logrus.FieldLogger,
lis net.Listener, grpcServer *grpc.Server,
id uint64, ids []uint64, nodes []string,
lat *raft.Latency, event *raft.Event,
) {
peers := make([]etcdraft.Peer, len(ids))
for i, nid := range ids {
addr := nodes[i]
host, port, err := net.SplitHostPort(addr)
if err != nil {
logger.Fatal(err)
}
p, _ := strconv.Atoi(port)
ur, err := url.Parse("http://" + addr)
ur.Host = host + ":" + strconv.Itoa(p-100)
if err != nil {
logger.Fatal(err)
}
peers[i] = etcdraft.Peer{
ID: nid,
Context: []byte(ur.String()),
}
}
dir := fmt.Sprintf("etcdwal%.2d", id)
switch {
case wal.Exist(dir) && !*recover:
if err := os.RemoveAll | {
logger.Out = ioutil.Discard
grpc.EnableTracing = false
} | conditional_block |
main.go | "How often a heartbeat should be sent")
entriesPerMsg = flag.Uint64("entriespermsg", 64, "Entries per Appendentries message")
catchupMultiplier = flag.Uint64("catchupmultiplier", 1024, "How many more times entries per message allowed during catch up")
cache = flag.Int("cache", 1024*1024*64, "How many entries should be kept in memory") // ~1GB @ 16bytes per entry.
maxgrpc = flag.Int("maxgrpc", 128<<20, "Max GRPC message size") // ~128MB.
checkQuorum = flag.Bool("checkquorum", false, "Require a quorum of responses to a heartbeat to retain leadership")
order = flag.Bool("ordergorums", true, "Force ordering of per node RPCs with Gorums")
)
func main() {
var (
id = flag.Uint64("id", 0, "server ID")
servers = flag.String("servers", ":9201,:9202,:9203,:9204,:9205,:9206,:9207", "comma separated list of server addresses")
cluster = flag.String("cluster", "1,2,3", "comma separated list of server ids to form cluster with, [1 >= id <= len(servers)]")
backend = flag.String("backend", "gorums", "Raft backend to use [gorums|etcd|hashicorp]")
)
flag.Parse()
rand.Seed(time.Now().UnixNano())
if *id == 0 {
fmt.Print("-id argument is required\n\n")
flag.Usage()
os.Exit(1)
}
nodes := strings.Split(*servers, ",")
if len(nodes) == 0 {
fmt.Print("-server argument is required\n\n")
flag.Usage()
os.Exit(1)
}
selected := strings.Split(*cluster, ",")
var ids []uint64
for _, sid := range selected {
id, err := strconv.ParseUint(sid, 10, 64)
if err != nil {
fmt.Print("could not parse -cluster argument\n\n")
flag.Usage()
os.Exit(1)
}
if id <= 0 || id > uint64(len(nodes)) {
fmt.Print("invalid -cluster argument\n\n")
flag.Usage()
os.Exit(1)
}
ids = append(ids, id)
}
if len(ids) == 0 {
fmt.Print("-cluster argument is required\n\n")
flag.Usage()
os.Exit(1)
}
if len(ids) > len(nodes) {
fmt.Print("-cluster specifies too many servers\n\n")
flag.Usage()
os.Exit(1)
}
if *entriesPerMsg < 1 {
fmt.Print("-entriespermsg must be atleast 1\n\n")
flag.Usage()
os.Exit(1)
}
if *catchupMultiplier < 1 {
fmt.Print("-catchupmultiplier must be atleast 1\n\n")
flag.Usage()
os.Exit(1)
}
logger := logrus.New()
logFile, err := os.OpenFile(
fmt.Sprintf("%s%sraft%.2d.log", os.TempDir(), string(filepath.Separator), *id),
os.O_CREATE|os.O_TRUNC|os.O_APPEND|os.O_WRONLY, 0600,
)
if err != nil {
logger.Fatal(err)
}
logger.Hooks.Add(NewLogToFileHook(logFile))
if *bench {
logger.Out = ioutil.Discard
grpc.EnableTracing = false
}
grpclog.SetLogger(logger)
lis, err := net.Listen("tcp", nodes[*id-1])
if err != nil {
logger.Fatal(err)
}
grpcServer := grpc.NewServer(grpc.MaxMsgSize(*maxgrpc))
if *serverMetrics {
go func() {
http.Handle("/metrics", promhttp.Handler())
logger.Fatal(http.ListenAndServe(fmt.Sprintf(":590%d", *id), nil))
}()
}
lat := raft.NewLatency()
event := raft.NewEvent()
var once sync.Once
writeData := func() {
lat.Write(fmt.Sprintf("./latency-%v.csv", time.Now().UnixNano()))
event.Write(fmt.Sprintf("./event-%v.csv", time.Now().UnixNano()))
}
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt)
go func() {
<-c
event.Record(raft.EventTerminated)
once.Do(writeData)
os.Exit(1)
}()
defer func() {
once.Do(writeData)
}()
switch *backend {
case bgorums:
rungorums(logger, lis, grpcServer, *id, ids, nodes, lat, event)
case betcd:
runetcd(logger, lis, grpcServer, *id, ids, nodes, lat, event)
case bhashicorp:
runhashicorp(logger, lis, grpcServer, *id, ids, nodes, lat, event)
}
}
func runhashicorp(
logger logrus.FieldLogger,
lis net.Listener, grpcServer *grpc.Server,
id uint64, ids []uint64, nodes []string,
lat *raft.Latency, event *raft.Event,
) | }
}
addr, err := net.ResolveTCPAddr("tcp", string(servers[id-1].Address))
if err != nil {
logger.Fatal(err)
}
trans, err := hashic.NewTCPTransport(string(servers[id-1].Address), addr, len(nodes)+1, 10*time.Second, os.Stderr)
if err != nil {
logger.Fatal(err)
}
path := fmt.Sprintf("hashicorp%.2d.bolt", id)
overwrite := !*recover
// Check if file already exists.
if _, err := os.Stat(path); err != nil {
if os.IsNotExist(err) {
// We don't need to overwrite a file that doesn't exist.
overwrite = false
} else {
// If we are unable to verify the existence of the file,
// there is probably a permission problem.
logger.Fatal(err)
}
}
if overwrite {
if err := os.Remove(path); err != nil {
logger.Fatal(err)
}
}
logs, err := raftboltdb.NewBoltStore(path)
if err != nil {
logger.Fatal(err)
}
cachedlogs, err := hashic.NewLogCache(*cache, logs)
snaps := hashic.NewInmemSnapshotStore()
cfg := &hashic.Config{
LocalID: servers[id-1].ID,
ProtocolVersion: hashic.ProtocolVersionMax,
HeartbeatTimeout: *electionTimeout,
ElectionTimeout: *electionTimeout,
CommitTimeout: *heartbeatTimeout,
MaxAppendEntries: int(*entriesPerMsg),
ShutdownOnRemove: true,
TrailingLogs: math.MaxUint64,
SnapshotInterval: 120 * time.Hour,
SnapshotThreshold: math.MaxUint64,
LeaderLeaseTimeout: *electionTimeout,
}
leaderOut := make(chan struct{})
node := hraft.NewRaft(
logger, NewStore(), cfg, servers,
trans, cachedlogs, hashic.NewInmemStore(), snaps,
ids, lat, event, leaderOut, id, *checkQuorum,
)
service := NewService(logger, node, leaderOut)
rkvpb.RegisterRKVServer(grpcServer, service)
logger.Fatal(grpcServer.Serve(lis))
}
func runetcd(
logger logrus.FieldLogger,
lis net.Listener, grpcServer *grpc.Server,
id uint64, ids []uint64, nodes []string,
lat *raft.Latency, event *raft.Event,
) {
peers := make([]etcdraft.Peer, len(ids))
for i, nid := range ids {
addr := nodes[i]
host, port, err := net.SplitHostPort(addr)
if err != nil {
logger.Fatal(err)
}
p, _ := strconv.Atoi(port)
ur, err := url.Parse("http://" + addr)
ur.Host = host + ":" + strconv.Itoa(p-100)
if err != nil {
logger.Fatal(err)
}
peers[i] = etcdraft.Peer{
ID: nid,
Context: []byte(ur.String()),
}
}
dir := fmt.Sprintf("etcdwal%.2d", id)
switch {
case wal.Exist(dir) && !*recover:
if err := os.RemoveAll(dir | {
servers := make([]hashic.Server, len(nodes))
for i, addr := range nodes {
host, port, err := net.SplitHostPort(addr)
if err != nil {
logger.Fatal(err)
}
p, _ := strconv.Atoi(port)
addr = host + ":" + strconv.Itoa(p-100)
suffrage := hashic.Voter
if !contains(uint64(i+1), ids) {
suffrage = hashic.Nonvoter
}
servers[i] = hashic.Server{
Suffrage: suffrage,
ID: hashic.ServerID(addr),
Address: hashic.ServerAddress(addr), | identifier_body |
main.go | "How often a heartbeat should be sent")
entriesPerMsg = flag.Uint64("entriespermsg", 64, "Entries per Appendentries message")
catchupMultiplier = flag.Uint64("catchupmultiplier", 1024, "How many more times entries per message allowed during catch up")
cache = flag.Int("cache", 1024*1024*64, "How many entries should be kept in memory") // ~1GB @ 16bytes per entry.
maxgrpc = flag.Int("maxgrpc", 128<<20, "Max GRPC message size") // ~128MB.
checkQuorum = flag.Bool("checkquorum", false, "Require a quorum of responses to a heartbeat to retain leadership")
order = flag.Bool("ordergorums", true, "Force ordering of per node RPCs with Gorums")
)
func | () {
var (
id = flag.Uint64("id", 0, "server ID")
servers = flag.String("servers", ":9201,:9202,:9203,:9204,:9205,:9206,:9207", "comma separated list of server addresses")
cluster = flag.String("cluster", "1,2,3", "comma separated list of server ids to form cluster with, [1 >= id <= len(servers)]")
backend = flag.String("backend", "gorums", "Raft backend to use [gorums|etcd|hashicorp]")
)
flag.Parse()
rand.Seed(time.Now().UnixNano())
if *id == 0 {
fmt.Print("-id argument is required\n\n")
flag.Usage()
os.Exit(1)
}
nodes := strings.Split(*servers, ",")
if len(nodes) == 0 {
fmt.Print("-server argument is required\n\n")
flag.Usage()
os.Exit(1)
}
selected := strings.Split(*cluster, ",")
var ids []uint64
for _, sid := range selected {
id, err := strconv.ParseUint(sid, 10, 64)
if err != nil {
fmt.Print("could not parse -cluster argument\n\n")
flag.Usage()
os.Exit(1)
}
if id <= 0 || id > uint64(len(nodes)) {
fmt.Print("invalid -cluster argument\n\n")
flag.Usage()
os.Exit(1)
}
ids = append(ids, id)
}
if len(ids) == 0 {
fmt.Print("-cluster argument is required\n\n")
flag.Usage()
os.Exit(1)
}
if len(ids) > len(nodes) {
fmt.Print("-cluster specifies too many servers\n\n")
flag.Usage()
os.Exit(1)
}
if *entriesPerMsg < 1 {
fmt.Print("-entriespermsg must be atleast 1\n\n")
flag.Usage()
os.Exit(1)
}
if *catchupMultiplier < 1 {
fmt.Print("-catchupmultiplier must be atleast 1\n\n")
flag.Usage()
os.Exit(1)
}
logger := logrus.New()
logFile, err := os.OpenFile(
fmt.Sprintf("%s%sraft%.2d.log", os.TempDir(), string(filepath.Separator), *id),
os.O_CREATE|os.O_TRUNC|os.O_APPEND|os.O_WRONLY, 0600,
)
if err != nil {
logger.Fatal(err)
}
logger.Hooks.Add(NewLogToFileHook(logFile))
if *bench {
logger.Out = ioutil.Discard
grpc.EnableTracing = false
}
grpclog.SetLogger(logger)
lis, err := net.Listen("tcp", nodes[*id-1])
if err != nil {
logger.Fatal(err)
}
grpcServer := grpc.NewServer(grpc.MaxMsgSize(*maxgrpc))
if *serverMetrics {
go func() {
http.Handle("/metrics", promhttp.Handler())
logger.Fatal(http.ListenAndServe(fmt.Sprintf(":590%d", *id), nil))
}()
}
lat := raft.NewLatency()
event := raft.NewEvent()
var once sync.Once
writeData := func() {
lat.Write(fmt.Sprintf("./latency-%v.csv", time.Now().UnixNano()))
event.Write(fmt.Sprintf("./event-%v.csv", time.Now().UnixNano()))
}
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt)
go func() {
<-c
event.Record(raft.EventTerminated)
once.Do(writeData)
os.Exit(1)
}()
defer func() {
once.Do(writeData)
}()
switch *backend {
case bgorums:
rungorums(logger, lis, grpcServer, *id, ids, nodes, lat, event)
case betcd:
runetcd(logger, lis, grpcServer, *id, ids, nodes, lat, event)
case bhashicorp:
runhashicorp(logger, lis, grpcServer, *id, ids, nodes, lat, event)
}
}
func runhashicorp(
logger logrus.FieldLogger,
lis net.Listener, grpcServer *grpc.Server,
id uint64, ids []uint64, nodes []string,
lat *raft.Latency, event *raft.Event,
) {
servers := make([]hashic.Server, len(nodes))
for i, addr := range nodes {
host, port, err := net.SplitHostPort(addr)
if err != nil {
logger.Fatal(err)
}
p, _ := strconv.Atoi(port)
addr = host + ":" + strconv.Itoa(p-100)
suffrage := hashic.Voter
if !contains(uint64(i+1), ids) {
suffrage = hashic.Nonvoter
}
servers[i] = hashic.Server{
Suffrage: suffrage,
ID: hashic.ServerID(addr),
Address: hashic.ServerAddress(addr),
}
}
addr, err := net.ResolveTCPAddr("tcp", string(servers[id-1].Address))
if err != nil {
logger.Fatal(err)
}
trans, err := hashic.NewTCPTransport(string(servers[id-1].Address), addr, len(nodes)+1, 10*time.Second, os.Stderr)
if err != nil {
logger.Fatal(err)
}
path := fmt.Sprintf("hashicorp%.2d.bolt", id)
overwrite := !*recover
// Check if file already exists.
if _, err := os.Stat(path); err != nil {
if os.IsNotExist(err) {
// We don't need to overwrite a file that doesn't exist.
overwrite = false
} else {
// If we are unable to verify the existence of the file,
// there is probably a permission problem.
logger.Fatal(err)
}
}
if overwrite {
if err := os.Remove(path); err != nil {
logger.Fatal(err)
}
}
logs, err := raftboltdb.NewBoltStore(path)
if err != nil {
logger.Fatal(err)
}
cachedlogs, err := hashic.NewLogCache(*cache, logs)
snaps := hashic.NewInmemSnapshotStore()
cfg := &hashic.Config{
LocalID: servers[id-1].ID,
ProtocolVersion: hashic.ProtocolVersionMax,
HeartbeatTimeout: *electionTimeout,
ElectionTimeout: *electionTimeout,
CommitTimeout: *heartbeatTimeout,
MaxAppendEntries: int(*entriesPerMsg),
ShutdownOnRemove: true,
TrailingLogs: math.MaxUint64,
SnapshotInterval: 120 * time.Hour,
SnapshotThreshold: math.MaxUint64,
LeaderLeaseTimeout: *electionTimeout,
}
leaderOut := make(chan struct{})
node := hraft.NewRaft(
logger, NewStore(), cfg, servers,
trans, cachedlogs, hashic.NewInmemStore(), snaps,
ids, lat, event, leaderOut, id, *checkQuorum,
)
service := NewService(logger, node, leaderOut)
rkvpb.RegisterRKVServer(grpcServer, service)
logger.Fatal(grpcServer.Serve(lis))
}
func runetcd(
logger logrus.FieldLogger,
lis net.Listener, grpcServer *grpc.Server,
id uint64, ids []uint64, nodes []string,
lat *raft.Latency, event *raft.Event,
) {
peers := make([]etcdraft.Peer, len(ids))
for i, nid := range ids {
addr := nodes[i]
host, port, err := net.SplitHostPort(addr)
if err != nil {
logger.Fatal(err)
}
p, _ := strconv.Atoi(port)
ur, err := url.Parse("http://" + addr)
ur.Host = host + ":" + strconv.Itoa(p-100)
if err != nil {
logger.Fatal(err)
}
peers[i] = etcdraft.Peer{
ID: nid,
Context: []byte(ur.String()),
}
}
dir := fmt.Sprintf("etcdwal%.2d", id)
switch {
case wal.Exist(dir) && !*recover:
if err := os.RemoveAll(dir | main | identifier_name |
main.go | sid := range selected {
id, err := strconv.ParseUint(sid, 10, 64)
if err != nil {
fmt.Print("could not parse -cluster argument\n\n")
flag.Usage()
os.Exit(1)
}
if id <= 0 || id > uint64(len(nodes)) {
fmt.Print("invalid -cluster argument\n\n")
flag.Usage()
os.Exit(1)
}
ids = append(ids, id)
}
if len(ids) == 0 {
fmt.Print("-cluster argument is required\n\n")
flag.Usage()
os.Exit(1)
}
if len(ids) > len(nodes) {
fmt.Print("-cluster specifies too many servers\n\n")
flag.Usage()
os.Exit(1)
}
if *entriesPerMsg < 1 {
fmt.Print("-entriespermsg must be atleast 1\n\n")
flag.Usage()
os.Exit(1)
}
if *catchupMultiplier < 1 {
fmt.Print("-catchupmultiplier must be atleast 1\n\n")
flag.Usage()
os.Exit(1)
}
logger := logrus.New()
logFile, err := os.OpenFile(
fmt.Sprintf("%s%sraft%.2d.log", os.TempDir(), string(filepath.Separator), *id),
os.O_CREATE|os.O_TRUNC|os.O_APPEND|os.O_WRONLY, 0600,
)
if err != nil {
logger.Fatal(err)
}
logger.Hooks.Add(NewLogToFileHook(logFile))
if *bench {
logger.Out = ioutil.Discard
grpc.EnableTracing = false
}
grpclog.SetLogger(logger)
lis, err := net.Listen("tcp", nodes[*id-1])
if err != nil {
logger.Fatal(err)
}
grpcServer := grpc.NewServer(grpc.MaxMsgSize(*maxgrpc))
if *serverMetrics {
go func() {
http.Handle("/metrics", promhttp.Handler())
logger.Fatal(http.ListenAndServe(fmt.Sprintf(":590%d", *id), nil))
}()
}
lat := raft.NewLatency()
event := raft.NewEvent()
var once sync.Once
writeData := func() {
lat.Write(fmt.Sprintf("./latency-%v.csv", time.Now().UnixNano()))
event.Write(fmt.Sprintf("./event-%v.csv", time.Now().UnixNano()))
}
c := make(chan os.Signal, 1)
signal.Notify(c, os.Interrupt)
go func() {
<-c
event.Record(raft.EventTerminated)
once.Do(writeData)
os.Exit(1)
}()
defer func() {
once.Do(writeData)
}()
switch *backend {
case bgorums:
rungorums(logger, lis, grpcServer, *id, ids, nodes, lat, event)
case betcd:
runetcd(logger, lis, grpcServer, *id, ids, nodes, lat, event)
case bhashicorp:
runhashicorp(logger, lis, grpcServer, *id, ids, nodes, lat, event)
}
}
func runhashicorp(
logger logrus.FieldLogger,
lis net.Listener, grpcServer *grpc.Server,
id uint64, ids []uint64, nodes []string,
lat *raft.Latency, event *raft.Event,
) {
servers := make([]hashic.Server, len(nodes))
for i, addr := range nodes {
host, port, err := net.SplitHostPort(addr)
if err != nil {
logger.Fatal(err)
}
p, _ := strconv.Atoi(port)
addr = host + ":" + strconv.Itoa(p-100)
suffrage := hashic.Voter
if !contains(uint64(i+1), ids) {
suffrage = hashic.Nonvoter
}
servers[i] = hashic.Server{
Suffrage: suffrage,
ID: hashic.ServerID(addr),
Address: hashic.ServerAddress(addr),
}
}
addr, err := net.ResolveTCPAddr("tcp", string(servers[id-1].Address))
if err != nil {
logger.Fatal(err)
}
trans, err := hashic.NewTCPTransport(string(servers[id-1].Address), addr, len(nodes)+1, 10*time.Second, os.Stderr)
if err != nil {
logger.Fatal(err)
}
path := fmt.Sprintf("hashicorp%.2d.bolt", id)
overwrite := !*recover
// Check if file already exists.
if _, err := os.Stat(path); err != nil {
if os.IsNotExist(err) {
// We don't need to overwrite a file that doesn't exist.
overwrite = false
} else {
// If we are unable to verify the existence of the file,
// there is probably a permission problem.
logger.Fatal(err)
}
}
if overwrite {
if err := os.Remove(path); err != nil {
logger.Fatal(err)
}
}
logs, err := raftboltdb.NewBoltStore(path)
if err != nil {
logger.Fatal(err)
}
cachedlogs, err := hashic.NewLogCache(*cache, logs)
snaps := hashic.NewInmemSnapshotStore()
cfg := &hashic.Config{
LocalID: servers[id-1].ID,
ProtocolVersion: hashic.ProtocolVersionMax,
HeartbeatTimeout: *electionTimeout,
ElectionTimeout: *electionTimeout,
CommitTimeout: *heartbeatTimeout,
MaxAppendEntries: int(*entriesPerMsg),
ShutdownOnRemove: true,
TrailingLogs: math.MaxUint64,
SnapshotInterval: 120 * time.Hour,
SnapshotThreshold: math.MaxUint64,
LeaderLeaseTimeout: *electionTimeout,
}
leaderOut := make(chan struct{})
node := hraft.NewRaft(
logger, NewStore(), cfg, servers,
trans, cachedlogs, hashic.NewInmemStore(), snaps,
ids, lat, event, leaderOut, id, *checkQuorum,
)
service := NewService(logger, node, leaderOut)
rkvpb.RegisterRKVServer(grpcServer, service)
logger.Fatal(grpcServer.Serve(lis))
}
func runetcd(
logger logrus.FieldLogger,
lis net.Listener, grpcServer *grpc.Server,
id uint64, ids []uint64, nodes []string,
lat *raft.Latency, event *raft.Event,
) {
peers := make([]etcdraft.Peer, len(ids))
for i, nid := range ids {
addr := nodes[i]
host, port, err := net.SplitHostPort(addr)
if err != nil {
logger.Fatal(err)
}
p, _ := strconv.Atoi(port)
ur, err := url.Parse("http://" + addr)
ur.Host = host + ":" + strconv.Itoa(p-100)
if err != nil {
logger.Fatal(err)
}
peers[i] = etcdraft.Peer{
ID: nid,
Context: []byte(ur.String()),
}
}
dir := fmt.Sprintf("etcdwal%.2d", id)
switch {
case wal.Exist(dir) && !*recover:
if err := os.RemoveAll(dir); err != nil {
logger.Fatal(err)
}
fallthrough
case !wal.Exist(dir):
if err := os.Mkdir(dir, 0750); err != nil {
logger.Fatalf("rkvd: cannot create dir for wal (%v)", err)
}
w, err := wal.Create(dir, nil)
if err != nil {
logger.Fatalf("rkvd: create wal error (%v)", err)
}
w.Close()
}
walsnap := walpb.Snapshot{}
w, err := wal.Open(dir, walsnap)
if err != nil {
logger.Fatalf("rkvd: error loading wal (%v)", err)
}
_, st, ents, err := w.ReadAll()
if err != nil {
log.Fatalf("rkvd: failed to read WAL (%v)", err)
}
storage := etcdraft.NewMemoryStorage()
storage.SetHardState(st)
storage.Append(ents)
leaderOut := make(chan struct{})
node := etcd.NewRaft(
logger,
NewStore(),
storage,
w,
&etcdraft.Config{
ID: id,
ElectionTick: int(*electionTimeout / *heartbeatTimeout),
HeartbeatTick: 1,
Storage: storage,
MaxSizePerMsg: *entriesPerMsg,
// etcdserver says: Never overflow the rafthttp buffer,
// which is 4096. We keep the same constant.
MaxInflightMsgs: 4096 / 8,
CheckQuorum: *checkQuorum,
PreVote: true,
Logger: logger,
},
peers,
*heartbeatTimeout,
!contains(id, ids),
nodes,
lat, event,
leaderOut,
)
service := NewService(logger, node, leaderOut)
rkvpb.RegisterRKVServer(grpcServer, service)
go func() { | logger.Fatal(grpcServer.Serve(lis)) | random_line_split | |
post_trees.rs | &self,
worker_id: usize,
) -> (Stream<G, StatUpdate>, Stream<G, RecommendationUpdate>) {
let mut state: PostTreesState = PostTreesState::new(worker_id);
let mut builder = OperatorBuilder::new("PostTrees".to_owned(), self.scope());
let mut input = builder.new_input(self, Pipeline);
// declare two output streams, one for each downstream operator
let (mut stat_output, stat_stream) = builder.new_output();
let (mut rec_output, rec_stream) = builder.new_output();
builder.build(move |_| {
let mut buf = Vec::new();
move |_frontiers| {
input.for_each(|time, data| {
data.swap(&mut buf);
for event in buf.drain(..) {
// update the post trees
let (opt_target_id, opt_root_post_id) = state.update_post_tree(&event);
// check if the root post_id has been already received
match opt_root_post_id {
Some(root_post_id) => {
if let ID::Post(pid) = root_post_id {
state.append_output_updates(&event, pid);
} else {
panic!("expect ID::Post, got ID::Comment");
}
// check whether we can pop some stuff out of the ooo map
if let Some(_) = event.id() {
state.process_ooo_events(&event);
}
}
None => {
state.push_ooo_event(event, opt_target_id.unwrap());
}
};
// state.dump();
}
let mut stat_handle = stat_output.activate();
let mut rec_handle = rec_output.activate();
let mut stat_session = stat_handle.session(&time);
let mut rec_session = rec_handle.session(&time);
// emit stat updates as output
for stat_update in state.pending_stat_updates.drain(..) {
stat_session.give(stat_update);
}
// emit recommendation updates as output
for rec_update in state.pending_rec_updates.drain(..) {
rec_session.give(rec_update);
}
// check we if we can clean some old events from the ooo queue
state.clean_ooo_events(*time.time());
});
}
});
// return the two output streams
(stat_stream, rec_stream)
}
}
#[derive(Debug)]
struct Node {
person_id: u64, // "creator" of the event
root_post_id: ID,
}
/// State associated with the `post_trees` operator
struct PostTreesState {
worker_id: usize,
// event ID --> post ID it refers to (root of the tree)
root_of: HashMap<ID, Node>,
// out-of-order events: id of missing event --> event that depends on it
ooo_events: HashMap<ID, Vec<Event>>,
// updates to be sent on the stat output stream
pending_stat_updates: Vec<StatUpdate>,
// updates to be sent on the recommendation output stream
pending_rec_updates: Vec<RecommendationUpdate>,
}
impl PostTreesState {
fn new(worker_id: usize) -> PostTreesState {
PostTreesState {
worker_id: worker_id,
root_of: HashMap::<ID, Node>::new(),
ooo_events: HashMap::<ID, Vec<Event>>::new(),
pending_stat_updates: Vec::new(),
pending_rec_updates: Vec::new(),
}
}
/// given an event, try to match it to some post tree
fn update_post_tree(&mut self, event: &Event) -> (Option<ID>, Option<ID>) {
match event {
Event::Post(post) => {
let node = Node { person_id: post.person_id, root_post_id: post.post_id };
self.root_of.insert(post.post_id, node);
(None, Some(post.post_id))
}
Event::Like(like) => {
// likes are not stored in the tree
let post_id = match self.root_of.get(&like.post_id) {
Some(_) => Some(like.post_id), // can only like a post
None => None,
};
(Some(like.post_id), post_id)
}
Event::Comment(comment) => {
let reply_to_id = comment.reply_to_post_id.or(comment.reply_to_comment_id).unwrap();
if let Some(root_node) = self.root_of.get(&reply_to_id) {
let root_post_id = root_node.root_post_id;
let node = Node { person_id: comment.person_id, root_post_id: root_post_id };
self.root_of.insert(comment.comment_id, node);
(Some(reply_to_id), Some(root_post_id))
} else {
(Some(reply_to_id), None)
}
}
}
}
/// process events that have `root_event` as their target post,
/// recursively process the newly inserted events
fn process_ooo_events(&mut self, root_event: &Event) {
let id = root_event.id().unwrap();
if let Some(events) = self.ooo_events.remove(&id) {
println!("-- {} for id = {:?}", "process_ooo_events".bold().yellow(), id);
let mut new_events = Vec::new();
for event in events {
let (opt_target_id, opt_root_post_id) = self.update_post_tree(&event);
assert!(opt_target_id.unwrap() == id, "wtf");
let root_post_id =
opt_root_post_id.expect("[process_ooo_events] root_post_id is None");
// only use this event if its timestamp is greater or equal to the parent's.
if event.timestamp() >= root_event.timestamp() {
self.append_output_updates(&event, root_post_id.u64());
if let Some(_) = event.id() {
new_events.push(event);
}
}
}
// adding events might unlock other ooo events
for event in new_events.drain(..) {
self.process_ooo_events(&event);
}
}
}
/// insert an event into the out-of-order queue
fn push_ooo_event(&mut self, event: Event, target_id: ID) {
self.ooo_events.entry(target_id).or_insert(Vec::new()).push(event);
}
/// remove all old events from the out-of-order queue
/// (including Reply events there were not meant to be received by this worker)
fn clean_ooo_events(&mut self, timestamp: u64) {
self.ooo_events = self
.ooo_events
.clone()
.into_iter()
.filter(|(_, events)| events.iter().all(|event| event.timestamp() > timestamp))
.collect::<HashMap<_, _>>();
}
/// generate all output updates for the current event
fn append_output_updates(&mut self, event: &Event, root_post_id: u64) {
self.append_stat_update(&event, root_post_id);
self.append_rec_update(&event, root_post_id);
}
/// given an event (and the current state of the post trees),
/// generate a new stat update and append it to the pending list
fn append_stat_update(&mut self, event: &Event, root_post_id: u64) {
let update_type = match event {
Event::Post(_) => StatUpdateType::Post,
Event::Like(_) => StatUpdateType::Like,
Event::Comment(comment) => {
if comment.reply_to_post_id != None {
StatUpdateType::Comment
} else {
StatUpdateType::Reply
}
}
};
let update = StatUpdate {
update_type: update_type,
post_id: root_post_id,
person_id: event.person_id(),
timestamp: event.timestamp(),
};
self.pending_stat_updates.push(update);
}
/// given an event (and the current state of the post trees),
/// generate a new recommendation update and append it to the pending list
fn append_rec_update(&mut self, event: &Event, root_post_id: u64) {
if let Event::Post(post) = event {
// a new post with some tags has been created into a forum
let update = RecommendationUpdate::Post {
timestamp: event.timestamp(),
person_id: event.person_id(),
forum_id: post.forum_id,
tags: post.tags.clone(),
};
self.pending_rec_updates.push(update)
} else if let Event::Comment(_) = event {
let to_person_id = self.root_of.get(&ID::Post(root_post_id)).unwrap().person_id;
let update = RecommendationUpdate::Comment {
timestamp: event.timestamp(),
from_person_id: event.person_id(),
to_person_id: to_person_id,
};
self.pending_rec_updates.push(update)
} else if let Event::Like(_) = event {
let to_person_id = self.root_of.get(&ID::Post(root_post_id)).unwrap().person_id;
let update = RecommendationUpdate::Like {
timestamp: event.timestamp(),
from_person_id: event.person_id(),
to_person_id: to_person_id,
};
self.pending_rec_updates.push(update)
}
}
#[allow(dead_code)]
fn dump(&self) {
println!(
"{}",
format!(
"{} {}",
format!("[W{}]", self.worker_id).bold().blue(),
"Current state".bold().blue()
)
);
println!(" root_of -- {:?}", self.root_of);
self.dump_ooo_events(2);
}
fn | dump_ooo_events | identifier_name | |
post_trees.rs | ` operator
/// that implements query 1
/// 2) RecommendationUpdates: will be fed into the `friend_recommendation`
/// operator that implements query 2
///
/// In case of multiple workers, an upstream `exchange` operator
/// will partition the events by root post id. Thus this operator
/// will handle only a subset of the posts.
///
/// "Reply to comments" events are broadcasted to all workers
/// as they don't carry the root post id in the payload.
///
/// When the `post_trees` operator receives an Reply event that
/// cannot match to any currently received comment, it stores
/// it in an out-of-order (ooo) queue. When the maximum bounded delay
/// has expired, old events in the ooo queue are discarded
/// (including events do not belong to the posts handled by this worker)
///
pub trait PostTrees<G: Scope> {
fn post_trees(
&self,
worker_id: usize,
) -> (Stream<G, StatUpdate>, Stream<G, RecommendationUpdate>);
}
impl<G: Scope<Timestamp = u64>> PostTrees<G> for Stream<G, Event> {
fn post_trees(
&self,
worker_id: usize,
) -> (Stream<G, StatUpdate>, Stream<G, RecommendationUpdate>) {
let mut state: PostTreesState = PostTreesState::new(worker_id);
let mut builder = OperatorBuilder::new("PostTrees".to_owned(), self.scope());
let mut input = builder.new_input(self, Pipeline);
// declare two output streams, one for each downstream operator
let (mut stat_output, stat_stream) = builder.new_output();
let (mut rec_output, rec_stream) = builder.new_output();
builder.build(move |_| {
let mut buf = Vec::new();
move |_frontiers| {
input.for_each(|time, data| {
data.swap(&mut buf);
for event in buf.drain(..) {
// update the post trees
let (opt_target_id, opt_root_post_id) = state.update_post_tree(&event);
// check if the root post_id has been already received
match opt_root_post_id {
Some(root_post_id) => {
if let ID::Post(pid) = root_post_id {
state.append_output_updates(&event, pid);
} else {
panic!("expect ID::Post, got ID::Comment");
}
// check whether we can pop some stuff out of the ooo map
if let Some(_) = event.id() {
state.process_ooo_events(&event);
}
}
None => {
state.push_ooo_event(event, opt_target_id.unwrap());
}
};
// state.dump();
}
let mut stat_handle = stat_output.activate();
let mut rec_handle = rec_output.activate();
let mut stat_session = stat_handle.session(&time);
let mut rec_session = rec_handle.session(&time);
// emit stat updates as output
for stat_update in state.pending_stat_updates.drain(..) {
stat_session.give(stat_update);
}
// emit recommendation updates as output
for rec_update in state.pending_rec_updates.drain(..) {
rec_session.give(rec_update);
}
// check we if we can clean some old events from the ooo queue
state.clean_ooo_events(*time.time());
});
}
});
// return the two output streams
(stat_stream, rec_stream)
}
}
#[derive(Debug)]
struct Node {
person_id: u64, // "creator" of the event
root_post_id: ID,
}
/// State associated with the `post_trees` operator
struct PostTreesState {
worker_id: usize,
// event ID --> post ID it refers to (root of the tree)
root_of: HashMap<ID, Node>,
// out-of-order events: id of missing event --> event that depends on it
ooo_events: HashMap<ID, Vec<Event>>,
// updates to be sent on the stat output stream
pending_stat_updates: Vec<StatUpdate>,
// updates to be sent on the recommendation output stream
pending_rec_updates: Vec<RecommendationUpdate>,
}
impl PostTreesState {
fn new(worker_id: usize) -> PostTreesState {
PostTreesState {
worker_id: worker_id,
root_of: HashMap::<ID, Node>::new(),
ooo_events: HashMap::<ID, Vec<Event>>::new(),
pending_stat_updates: Vec::new(),
pending_rec_updates: Vec::new(),
}
}
/// given an event, try to match it to some post tree
fn update_post_tree(&mut self, event: &Event) -> (Option<ID>, Option<ID>) | let node = Node { person_id: comment.person_id, root_post_id: root_post_id };
self.root_of.insert(comment.comment_id, node);
(Some(reply_to_id), Some(root_post_id))
} else {
(Some(reply_to_id), None)
}
}
}
}
/// process events that have `root_event` as their target post,
/// recursively process the newly inserted events
fn process_ooo_events(&mut self, root_event: &Event) {
let id = root_event.id().unwrap();
if let Some(events) = self.ooo_events.remove(&id) {
println!("-- {} for id = {:?}", "process_ooo_events".bold().yellow(), id);
let mut new_events = Vec::new();
for event in events {
let (opt_target_id, opt_root_post_id) = self.update_post_tree(&event);
assert!(opt_target_id.unwrap() == id, "wtf");
let root_post_id =
opt_root_post_id.expect("[process_ooo_events] root_post_id is None");
// only use this event if its timestamp is greater or equal to the parent's.
if event.timestamp() >= root_event.timestamp() {
self.append_output_updates(&event, root_post_id.u64());
if let Some(_) = event.id() {
new_events.push(event);
}
}
}
// adding events might unlock other ooo events
for event in new_events.drain(..) {
self.process_ooo_events(&event);
}
}
}
/// insert an event into the out-of-order queue
fn push_ooo_event(&mut self, event: Event, target_id: ID) {
self.ooo_events.entry(target_id).or_insert(Vec::new()).push(event);
}
/// remove all old events from the out-of-order queue
/// (including Reply events there were not meant to be received by this worker)
fn clean_ooo_events(&mut self, timestamp: u64) {
self.ooo_events = self
.ooo_events
.clone()
.into_iter()
.filter(|(_, events)| events.iter().all(|event| event.timestamp() > timestamp))
.collect::<HashMap<_, _>>();
}
/// generate all output updates for the current event
fn append_output_updates(&mut self, event: &Event, root_post_id: u64) {
self.append_stat_update(&event, root_post_id);
self.append_rec_update(&event, root_post_id);
}
/// given an event (and the current state of the post trees),
/// generate a new stat update and append it to the pending list
fn append_stat_update(&mut self, event: &Event, root_post_id: u64) {
let update_type = match event {
Event::Post(_) => StatUpdateType::Post,
Event::Like(_) => StatUpdateType::Like,
Event::Comment(comment) => {
if comment.reply_to_post_id != None {
StatUpdateType::Comment
} else {
StatUpdateType::Reply
}
}
};
let update = StatUpdate {
update_type: update_type,
post_id: root_post_id,
person_id: event.person_id(),
timestamp: event.timestamp(),
};
self.pending_stat_updates.push(update);
}
/// given an event (and the current state of the post trees),
/// generate a new recommendation update and append it to the pending list
fn append_rec_update(&mut self, event: &Event, root_post_id: u64) {
if let Event::Post(post) = event {
// a new post with some tags has been created into a forum
let update = RecommendationUpdate::Post {
timestamp: event.timestamp(),
person_id: event.person_id(),
forum_id: post.forum_id,
tags: post.tags.clone(),
};
self.pending_rec_updates | {
match event {
Event::Post(post) => {
let node = Node { person_id: post.person_id, root_post_id: post.post_id };
self.root_of.insert(post.post_id, node);
(None, Some(post.post_id))
}
Event::Like(like) => {
// likes are not stored in the tree
let post_id = match self.root_of.get(&like.post_id) {
Some(_) => Some(like.post_id), // can only like a post
None => None,
};
(Some(like.post_id), post_id)
}
Event::Comment(comment) => {
let reply_to_id = comment.reply_to_post_id.or(comment.reply_to_comment_id).unwrap();
if let Some(root_node) = self.root_of.get(&reply_to_id) {
let root_post_id = root_node.root_post_id; | identifier_body |
post_trees.rs | _posts` operator
/// that implements query 1
/// 2) RecommendationUpdates: will be fed into the `friend_recommendation`
/// operator that implements query 2
/// | /// will handle only a subset of the posts.
///
/// "Reply to comments" events are broadcasted to all workers
/// as they don't carry the root post id in the payload.
///
/// When the `post_trees` operator receives an Reply event that
/// cannot match to any currently received comment, it stores
/// it in an out-of-order (ooo) queue. When the maximum bounded delay
/// has expired, old events in the ooo queue are discarded
/// (including events do not belong to the posts handled by this worker)
///
pub trait PostTrees<G: Scope> {
fn post_trees(
&self,
worker_id: usize,
) -> (Stream<G, StatUpdate>, Stream<G, RecommendationUpdate>);
}
impl<G: Scope<Timestamp = u64>> PostTrees<G> for Stream<G, Event> {
fn post_trees(
&self,
worker_id: usize,
) -> (Stream<G, StatUpdate>, Stream<G, RecommendationUpdate>) {
let mut state: PostTreesState = PostTreesState::new(worker_id);
let mut builder = OperatorBuilder::new("PostTrees".to_owned(), self.scope());
let mut input = builder.new_input(self, Pipeline);
// declare two output streams, one for each downstream operator
let (mut stat_output, stat_stream) = builder.new_output();
let (mut rec_output, rec_stream) = builder.new_output();
builder.build(move |_| {
let mut buf = Vec::new();
move |_frontiers| {
input.for_each(|time, data| {
data.swap(&mut buf);
for event in buf.drain(..) {
// update the post trees
let (opt_target_id, opt_root_post_id) = state.update_post_tree(&event);
// check if the root post_id has been already received
match opt_root_post_id {
Some(root_post_id) => {
if let ID::Post(pid) = root_post_id {
state.append_output_updates(&event, pid);
} else {
panic!("expect ID::Post, got ID::Comment");
}
// check whether we can pop some stuff out of the ooo map
if let Some(_) = event.id() {
state.process_ooo_events(&event);
}
}
None => {
state.push_ooo_event(event, opt_target_id.unwrap());
}
};
// state.dump();
}
let mut stat_handle = stat_output.activate();
let mut rec_handle = rec_output.activate();
let mut stat_session = stat_handle.session(&time);
let mut rec_session = rec_handle.session(&time);
// emit stat updates as output
for stat_update in state.pending_stat_updates.drain(..) {
stat_session.give(stat_update);
}
// emit recommendation updates as output
for rec_update in state.pending_rec_updates.drain(..) {
rec_session.give(rec_update);
}
// check we if we can clean some old events from the ooo queue
state.clean_ooo_events(*time.time());
});
}
});
// return the two output streams
(stat_stream, rec_stream)
}
}
#[derive(Debug)]
struct Node {
person_id: u64, // "creator" of the event
root_post_id: ID,
}
/// State associated with the `post_trees` operator
struct PostTreesState {
worker_id: usize,
// event ID --> post ID it refers to (root of the tree)
root_of: HashMap<ID, Node>,
// out-of-order events: id of missing event --> event that depends on it
ooo_events: HashMap<ID, Vec<Event>>,
// updates to be sent on the stat output stream
pending_stat_updates: Vec<StatUpdate>,
// updates to be sent on the recommendation output stream
pending_rec_updates: Vec<RecommendationUpdate>,
}
impl PostTreesState {
fn new(worker_id: usize) -> PostTreesState {
PostTreesState {
worker_id: worker_id,
root_of: HashMap::<ID, Node>::new(),
ooo_events: HashMap::<ID, Vec<Event>>::new(),
pending_stat_updates: Vec::new(),
pending_rec_updates: Vec::new(),
}
}
/// given an event, try to match it to some post tree
fn update_post_tree(&mut self, event: &Event) -> (Option<ID>, Option<ID>) {
match event {
Event::Post(post) => {
let node = Node { person_id: post.person_id, root_post_id: post.post_id };
self.root_of.insert(post.post_id, node);
(None, Some(post.post_id))
}
Event::Like(like) => {
// likes are not stored in the tree
let post_id = match self.root_of.get(&like.post_id) {
Some(_) => Some(like.post_id), // can only like a post
None => None,
};
(Some(like.post_id), post_id)
}
Event::Comment(comment) => {
let reply_to_id = comment.reply_to_post_id.or(comment.reply_to_comment_id).unwrap();
if let Some(root_node) = self.root_of.get(&reply_to_id) {
let root_post_id = root_node.root_post_id;
let node = Node { person_id: comment.person_id, root_post_id: root_post_id };
self.root_of.insert(comment.comment_id, node);
(Some(reply_to_id), Some(root_post_id))
} else {
(Some(reply_to_id), None)
}
}
}
}
/// process events that have `root_event` as their target post,
/// recursively process the newly inserted events
fn process_ooo_events(&mut self, root_event: &Event) {
let id = root_event.id().unwrap();
if let Some(events) = self.ooo_events.remove(&id) {
println!("-- {} for id = {:?}", "process_ooo_events".bold().yellow(), id);
let mut new_events = Vec::new();
for event in events {
let (opt_target_id, opt_root_post_id) = self.update_post_tree(&event);
assert!(opt_target_id.unwrap() == id, "wtf");
let root_post_id =
opt_root_post_id.expect("[process_ooo_events] root_post_id is None");
// only use this event if its timestamp is greater or equal to the parent's.
if event.timestamp() >= root_event.timestamp() {
self.append_output_updates(&event, root_post_id.u64());
if let Some(_) = event.id() {
new_events.push(event);
}
}
}
// adding events might unlock other ooo events
for event in new_events.drain(..) {
self.process_ooo_events(&event);
}
}
}
/// insert an event into the out-of-order queue
fn push_ooo_event(&mut self, event: Event, target_id: ID) {
self.ooo_events.entry(target_id).or_insert(Vec::new()).push(event);
}
/// remove all old events from the out-of-order queue
/// (including Reply events there were not meant to be received by this worker)
fn clean_ooo_events(&mut self, timestamp: u64) {
self.ooo_events = self
.ooo_events
.clone()
.into_iter()
.filter(|(_, events)| events.iter().all(|event| event.timestamp() > timestamp))
.collect::<HashMap<_, _>>();
}
/// generate all output updates for the current event
fn append_output_updates(&mut self, event: &Event, root_post_id: u64) {
self.append_stat_update(&event, root_post_id);
self.append_rec_update(&event, root_post_id);
}
/// given an event (and the current state of the post trees),
/// generate a new stat update and append it to the pending list
fn append_stat_update(&mut self, event: &Event, root_post_id: u64) {
let update_type = match event {
Event::Post(_) => StatUpdateType::Post,
Event::Like(_) => StatUpdateType::Like,
Event::Comment(comment) => {
if comment.reply_to_post_id != None {
StatUpdateType::Comment
} else {
StatUpdateType::Reply
}
}
};
let update = StatUpdate {
update_type: update_type,
post_id: root_post_id,
person_id: event.person_id(),
timestamp: event.timestamp(),
};
self.pending_stat_updates.push(update);
}
/// given an event (and the current state of the post trees),
/// generate a new recommendation update and append it to the pending list
fn append_rec_update(&mut self, event: &Event, root_post_id: u64) {
if let Event::Post(post) = event {
// a new post with some tags has been created into a forum
let update = RecommendationUpdate::Post {
timestamp: event.timestamp(),
person_id: event.person_id(),
forum_id: post.forum_id,
tags: post.tags.clone(),
};
self.pending_rec_updates.push | /// In case of multiple workers, an upstream `exchange` operator
/// will partition the events by root post id. Thus this operator | random_line_split |
lock-rpc-server.go | attempted on a read locked entity: %s (%d read locks active)", args.Name, len(lri))
}
if l.removeEntry(args.Name, args.UID, &lri) {
return nil
}
return fmt.Errorf("Unlock unable to find corresponding lock for uid: %s", args.UID)
}
// RLock - rpc handler for read lock operation.
func (l *lockServer) RLock(args *LockArgs, reply *bool) error {
l.mutex.Lock()
defer l.mutex.Unlock()
if err := l.verifyArgs(args); err != nil {
return err
}
var lri []lockRequesterInfo
lri, *reply = l.lockMap[args.Name]
if !*reply { // No locks held on the given name, so claim (first) read lock
l.lockMap[args.Name] = []lockRequesterInfo{{writer: false, node: args.Node, rpcPath: args.RPCPath, uid: args.UID, timestamp: time.Now(), timeLastCheck: time.Now()}}
*reply = true
} else {
if *reply = !isWriteLock(lri); *reply { // Unless there is a write lock
l.lockMap[args.Name] = append(l.lockMap[args.Name], lockRequesterInfo{writer: false, node: args.Node, rpcPath: args.RPCPath, uid: args.UID, timestamp: time.Now(), timeLastCheck: time.Now()})
}
}
return nil
}
// RUnlock - rpc handler for read unlock operation.
func (l *lockServer) RUnlock(args *LockArgs, reply *bool) error {
l.mutex.Lock()
defer l.mutex.Unlock()
if err := l.verifyArgs(args); err != nil {
return err
}
var lri []lockRequesterInfo
if lri, *reply = l.lockMap[args.Name]; !*reply { // No lock is held on the given name
return fmt.Errorf("RUnlock attempted on an unlocked entity: %s", args.Name)
}
if *reply = !isWriteLock(lri); !*reply { // A write-lock is held, cannot release a read lock
return fmt.Errorf("RUnlock attempted on a write locked entity: %s", args.Name)
}
if l.removeEntry(args.Name, args.UID, &lri) {
return nil
}
return fmt.Errorf("RUnlock unable to find corresponding read lock for uid: %s", args.UID)
}
// Active - rpc handler for active lock status.
func (l *lockServer) Active(args *LockArgs, reply *bool) error {
l.mutex.Lock()
defer l.mutex.Unlock()
if err := l.verifyArgs(args); err != nil {
return err
}
var lri []lockRequesterInfo
if lri, *reply = l.lockMap[args.Name]; !*reply {
return nil // No lock is held on the given name so return false
}
// Check whether uid is still active
for _, entry := range lri {
if *reply = entry.uid == args.UID; *reply {
return nil // When uid found return true
}
}
return nil // None found so return false
}
// removeEntry either, based on the uid of the lock message, removes a single entry from the
// lockRequesterInfo array or the whole array from the map (in case of a write lock or last read lock)
func (l *lockServer) removeEntry(name, uid string, lri *[]lockRequesterInfo) bool {
// Find correct entry to remove based on uid
for index, entry := range *lri {
if entry.uid == uid {
if len(*lri) == 1 {
delete(l.lockMap, name) // Remove the (last) lock
} else {
// Remove the appropriate read lock
*lri = append((*lri)[:index], (*lri)[index+1:]...)
l.lockMap[name] = *lri
}
return true
}
}
return false
}
// nameLockRequesterInfoPair is a helper type for lock maintenance
type nameLockRequesterInfoPair struct {
name string
lri lockRequesterInfo
}
// getLongLivedLocks returns locks that are older than a certain time and
// have not been 'checked' for validity too soon enough
func getLongLivedLocks(m map[string][]lockRequesterInfo, interval time.Duration) []nameLockRequesterInfoPair {
rslt := []nameLockRequesterInfoPair{}
for name, lriArray := range m {
for idx := range lriArray {
// Check whether enough time has gone by since last check
if time.Since(lriArray[idx].timeLastCheck) >= interval {
rslt = append(rslt, nameLockRequesterInfoPair{name: name, lri: lriArray[idx]})
lriArray[idx].timeLastCheck = time.Now()
}
}
}
return rslt
}
// lockMaintenance loops over locks that have been active for some time and checks back
// with the original server whether it is still alive or not
func (l *lockServer) lockMaintenance(interval time.Duration) {
l.mutex.Lock()
// get list of locks to check
nlripLongLived := getLongLivedLocks(l.lockMap, interval)
l.mutex.Unlock()
for _, nlrip := range nlripLongLived {
c := newClient(nlrip.lri.node, nlrip.lri.rpcPath)
var active bool
// Call back to original server verify whether the lock is still active (based on name & uid)
if err := c.Call("Dsync.Active", &LockArgs{Name: nlrip.name, UID: nlrip.lri.uid}, &active); err != nil {
// We failed to connect back to the server that originated the lock, this can either be due to
// - server at client down
// - some network error (and server is up normally)
//
// We will ignore the error, and we will retry later to get resolve on this lock
c.Close()
} else {
c.Close()
if !active { // The lock is no longer active at server that originated the lock
// so remove the lock from the map
l.mutex.Lock()
// Check if entry is still in map (could have been removed altogether by 'concurrent' (R)Unlock of last entry)
if lri, ok := l.lockMap[nlrip.name]; ok {
if !l.removeEntry(nlrip.name, nlrip.lri.uid, &lri) {
// Remove failed, in case it is a:
if nlrip.lri.writer {
// Writer: this should never happen as the whole (mapped) entry should have been deleted
log.Errorln("Lock maintenance failed to remove entry for write lock (should never happen)", nlrip.name, nlrip.lri, lri)
} else {
// Reader: this can happen if multiple read locks were active and the one we are looking for
// has been released concurrently (so it is fine)
}
} else {
// remove went okay, all is fine
}
}
l.mutex.Unlock()
}
}
}
}
// Initialize distributed lock.
func initDistributedNSLock(mux *router.Router, serverConfig serverCmdConfig) {
lockServers := newLockServers(serverConfig)
registerStorageLockers(mux, lockServers)
}
// Create one lock server for every local storage rpc server.
func newLockServers(serverConfig serverCmdConfig) (lockServers []*lockServer) {
// Initialize posix storage API.
exports := serverConfig.disks
ignoredExports := serverConfig.ignoredDisks
// Save ignored disks in a map
skipDisks := make(map[string]bool)
for _, ignoredExport := range ignoredExports {
skipDisks[ignoredExport] = true
}
for _, export := range exports {
if skipDisks[export] {
continue
}
if isLocalStorage(export) {
if idx := strings.LastIndex(export, ":"); idx != -1 {
export = export[idx+1:]
}
// Create handler for lock RPCs
locker := &lockServer{
rpcPath: export,
mutex: sync.Mutex{},
lockMap: make(map[string][]lockRequesterInfo),
timestamp: time.Now().UTC(),
}
// Start loop for stale lock maintenance
go func() {
// Start with random sleep time, so as to avoid "synchronous checks" between servers
time.Sleep(time.Duration(rand.Float64() * float64(lockMaintenanceLoop)))
for {
time.Sleep(lockMaintenanceLoop)
locker.lockMaintenance(lockCheckValidityInterval)
}
}()
lockServers = append(lockServers, locker)
}
}
return lockServers
}
// registerStorageLockers - register locker rpc handlers for net/rpc library clients
func registerStorageLockers(mux *router.Router, lockServers []*lockServer) | {
for _, lockServer := range lockServers {
lockRPCServer := rpc.NewServer()
lockRPCServer.RegisterName("Dsync", lockServer)
lockRouter := mux.PathPrefix(reservedBucket).Subrouter()
lockRouter.Path(path.Join("/lock", lockServer.rpcPath)).Handler(lockRPCServer)
}
} | identifier_body | |
lock-rpc-server.go | the supplied value.
func (l *LockArgs) SetTimestamp(tstamp time.Time) {
l.Timestamp = tstamp
}
// lockRequesterInfo stores various info from the client for each lock that is requested
type lockRequesterInfo struct {
writer bool // Bool whether write or read lock
node string // Network address of client claiming lock
rpcPath string // RPC path of client claiming lock
uid string // Uid to uniquely identify request of client
timestamp time.Time // Timestamp set at the time of initialization
timeLastCheck time.Time // Timestamp for last check of validity of lock
}
// isWriteLock returns whether the lock is a write or read lock
func isWriteLock(lri []lockRequesterInfo) bool {
return len(lri) == 1 && lri[0].writer
}
// lockServer is type for RPC handlers
type lockServer struct {
rpcPath string
mutex sync.Mutex
lockMap map[string][]lockRequesterInfo
timestamp time.Time // Timestamp set at the time of initialization. Resets naturally on minio server restart.
}
func (l *lockServer) verifyArgs(args *LockArgs) error {
if !l.timestamp.Equal(args.Timestamp) {
return errInvalidTimestamp
}
if !isRPCTokenValid(args.Token) {
return errInvalidToken
}
return nil
}
/// Distributed lock handlers
// LoginHandler - handles LoginHandler RPC call.
func (l *lockServer) LoginHandler(args *RPCLoginArgs, reply *RPCLoginReply) error {
jwt, err := newJWT(defaultTokenExpiry)
if err != nil {
return err
}
if err = jwt.Authenticate(args.Username, args.Password); err != nil {
return err
}
token, err := jwt.GenerateToken(args.Username)
if err != nil {
return err
}
reply.Token = token
reply.Timestamp = l.timestamp
return nil
}
// Lock - rpc handler for (single) write lock operation.
func (l *lockServer) Lock(args *LockArgs, reply *bool) error {
l.mutex.Lock()
defer l.mutex.Unlock()
if err := l.verifyArgs(args); err != nil {
return err
}
_, *reply = l.lockMap[args.Name]
if !*reply { // No locks held on the given name, so claim write lock
l.lockMap[args.Name] = []lockRequesterInfo{{writer: true, node: args.Node, rpcPath: args.RPCPath, uid: args.UID, timestamp: time.Now(), timeLastCheck: time.Now()}}
}
*reply = !*reply // Negate *reply to return true when lock is granted or false otherwise
return nil
}
// Unlock - rpc handler for (single) write unlock operation.
func (l *lockServer) Unlock(args *LockArgs, reply *bool) error {
l.mutex.Lock()
defer l.mutex.Unlock()
if err := l.verifyArgs(args); err != nil {
return err
}
var lri []lockRequesterInfo
lri, *reply = l.lockMap[args.Name]
if !*reply { // No lock is held on the given name
return fmt.Errorf("Unlock attempted on an unlocked entity: %s", args.Name)
}
if *reply = isWriteLock(lri); !*reply { // Unless it is a write lock
return fmt.Errorf("Unlock attempted on a read locked entity: %s (%d read locks active)", args.Name, len(lri))
}
if l.removeEntry(args.Name, args.UID, &lri) {
return nil
}
return fmt.Errorf("Unlock unable to find corresponding lock for uid: %s", args.UID)
}
// RLock - rpc handler for read lock operation.
func (l *lockServer) RLock(args *LockArgs, reply *bool) error {
l.mutex.Lock()
defer l.mutex.Unlock()
if err := l.verifyArgs(args); err != nil {
return err
}
var lri []lockRequesterInfo
lri, *reply = l.lockMap[args.Name]
if !*reply { // No locks held on the given name, so claim (first) read lock
l.lockMap[args.Name] = []lockRequesterInfo{{writer: false, node: args.Node, rpcPath: args.RPCPath, uid: args.UID, timestamp: time.Now(), timeLastCheck: time.Now()}}
*reply = true
} else {
if *reply = !isWriteLock(lri); *reply { // Unless there is a write lock
l.lockMap[args.Name] = append(l.lockMap[args.Name], lockRequesterInfo{writer: false, node: args.Node, rpcPath: args.RPCPath, uid: args.UID, timestamp: time.Now(), timeLastCheck: time.Now()})
}
}
return nil
}
// RUnlock - rpc handler for read unlock operation.
func (l *lockServer) RUnlock(args *LockArgs, reply *bool) error {
l.mutex.Lock()
defer l.mutex.Unlock()
if err := l.verifyArgs(args); err != nil {
return err
}
var lri []lockRequesterInfo
if lri, *reply = l.lockMap[args.Name]; !*reply { // No lock is held on the given name
return fmt.Errorf("RUnlock attempted on an unlocked entity: %s", args.Name)
}
if *reply = !isWriteLock(lri); !*reply { // A write-lock is held, cannot release a read lock
return fmt.Errorf("RUnlock attempted on a write locked entity: %s", args.Name)
}
if l.removeEntry(args.Name, args.UID, &lri) {
return nil
}
return fmt.Errorf("RUnlock unable to find corresponding read lock for uid: %s", args.UID)
}
// Active - rpc handler for active lock status.
func (l *lockServer) | (args *LockArgs, reply *bool) error {
l.mutex.Lock()
defer l.mutex.Unlock()
if err := l.verifyArgs(args); err != nil {
return err
}
var lri []lockRequesterInfo
if lri, *reply = l.lockMap[args.Name]; !*reply {
return nil // No lock is held on the given name so return false
}
// Check whether uid is still active
for _, entry := range lri {
if *reply = entry.uid == args.UID; *reply {
return nil // When uid found return true
}
}
return nil // None found so return false
}
// removeEntry either, based on the uid of the lock message, removes a single entry from the
// lockRequesterInfo array or the whole array from the map (in case of a write lock or last read lock)
func (l *lockServer) removeEntry(name, uid string, lri *[]lockRequesterInfo) bool {
// Find correct entry to remove based on uid
for index, entry := range *lri {
if entry.uid == uid {
if len(*lri) == 1 {
delete(l.lockMap, name) // Remove the (last) lock
} else {
// Remove the appropriate read lock
*lri = append((*lri)[:index], (*lri)[index+1:]...)
l.lockMap[name] = *lri
}
return true
}
}
return false
}
// nameLockRequesterInfoPair is a helper type for lock maintenance
type nameLockRequesterInfoPair struct {
name string
lri lockRequesterInfo
}
// getLongLivedLocks returns locks that are older than a certain time and
// have not been 'checked' for validity too soon enough
func getLongLivedLocks(m map[string][]lockRequesterInfo, interval time.Duration) []nameLockRequesterInfoPair {
rslt := []nameLockRequesterInfoPair{}
for name, lriArray := range m {
for idx := range lriArray {
// Check whether enough time has gone by since last check
if time.Since(lriArray[idx].timeLastCheck) >= interval {
rslt = append(rslt, nameLockRequesterInfoPair{name: name, lri: lriArray[idx]})
lriArray[idx].timeLastCheck = time.Now()
}
}
}
return rslt
}
// lockMaintenance loops over locks that have been active for some time and checks back
// with the original server whether it is still alive or not
func (l *lockServer) lockMaintenance(interval time.Duration) {
l.mutex.Lock()
// get list of locks to check
nlripLongLived := getLongLivedLocks(l.lockMap, interval)
l.mutex.Unlock()
for _, nlrip := range nlripLongLived {
c := newClient(nlrip.lri.node, nlrip.lri.rpcPath)
var active bool
// Call back to original server verify whether the lock is still active (based on name & uid)
if err := c.Call("Dsync.Active", &LockArgs{Name: nlrip.name, UID: nlrip.lri.uid}, &active); err != nil {
// We failed to connect back to the server that originated the lock, this can either be due to
// - server at client down
// - some network error (and server is up normally)
//
// We will ignore the error, and we will retry later to get resolve on this lock
c.Close | Active | identifier_name |
lock-rpc-server.go | RPC call.
func (l *lockServer) LoginHandler(args *RPCLoginArgs, reply *RPCLoginReply) error {
jwt, err := newJWT(defaultTokenExpiry)
if err != nil {
return err
}
if err = jwt.Authenticate(args.Username, args.Password); err != nil {
return err
}
token, err := jwt.GenerateToken(args.Username)
if err != nil {
return err
}
reply.Token = token
reply.Timestamp = l.timestamp
return nil
}
// Lock - rpc handler for (single) write lock operation.
func (l *lockServer) Lock(args *LockArgs, reply *bool) error {
l.mutex.Lock()
defer l.mutex.Unlock()
if err := l.verifyArgs(args); err != nil {
return err
}
_, *reply = l.lockMap[args.Name]
if !*reply { // No locks held on the given name, so claim write lock
l.lockMap[args.Name] = []lockRequesterInfo{{writer: true, node: args.Node, rpcPath: args.RPCPath, uid: args.UID, timestamp: time.Now(), timeLastCheck: time.Now()}}
}
*reply = !*reply // Negate *reply to return true when lock is granted or false otherwise
return nil
}
// Unlock - rpc handler for (single) write unlock operation.
func (l *lockServer) Unlock(args *LockArgs, reply *bool) error {
l.mutex.Lock()
defer l.mutex.Unlock()
if err := l.verifyArgs(args); err != nil {
return err
}
var lri []lockRequesterInfo
lri, *reply = l.lockMap[args.Name]
if !*reply { // No lock is held on the given name
return fmt.Errorf("Unlock attempted on an unlocked entity: %s", args.Name)
}
if *reply = isWriteLock(lri); !*reply { // Unless it is a write lock
return fmt.Errorf("Unlock attempted on a read locked entity: %s (%d read locks active)", args.Name, len(lri))
}
if l.removeEntry(args.Name, args.UID, &lri) {
return nil
}
return fmt.Errorf("Unlock unable to find corresponding lock for uid: %s", args.UID)
}
// RLock - rpc handler for read lock operation.
func (l *lockServer) RLock(args *LockArgs, reply *bool) error {
l.mutex.Lock()
defer l.mutex.Unlock()
if err := l.verifyArgs(args); err != nil {
return err
}
var lri []lockRequesterInfo
lri, *reply = l.lockMap[args.Name]
if !*reply { // No locks held on the given name, so claim (first) read lock
l.lockMap[args.Name] = []lockRequesterInfo{{writer: false, node: args.Node, rpcPath: args.RPCPath, uid: args.UID, timestamp: time.Now(), timeLastCheck: time.Now()}}
*reply = true
} else {
if *reply = !isWriteLock(lri); *reply { // Unless there is a write lock
l.lockMap[args.Name] = append(l.lockMap[args.Name], lockRequesterInfo{writer: false, node: args.Node, rpcPath: args.RPCPath, uid: args.UID, timestamp: time.Now(), timeLastCheck: time.Now()})
}
}
return nil
}
// RUnlock - rpc handler for read unlock operation.
func (l *lockServer) RUnlock(args *LockArgs, reply *bool) error {
l.mutex.Lock()
defer l.mutex.Unlock()
if err := l.verifyArgs(args); err != nil {
return err
}
var lri []lockRequesterInfo
if lri, *reply = l.lockMap[args.Name]; !*reply { // No lock is held on the given name
return fmt.Errorf("RUnlock attempted on an unlocked entity: %s", args.Name)
}
if *reply = !isWriteLock(lri); !*reply { // A write-lock is held, cannot release a read lock
return fmt.Errorf("RUnlock attempted on a write locked entity: %s", args.Name)
}
if l.removeEntry(args.Name, args.UID, &lri) {
return nil
}
return fmt.Errorf("RUnlock unable to find corresponding read lock for uid: %s", args.UID)
}
// Active - rpc handler for active lock status.
func (l *lockServer) Active(args *LockArgs, reply *bool) error {
l.mutex.Lock()
defer l.mutex.Unlock()
if err := l.verifyArgs(args); err != nil {
return err
}
var lri []lockRequesterInfo
if lri, *reply = l.lockMap[args.Name]; !*reply {
return nil // No lock is held on the given name so return false
}
// Check whether uid is still active
for _, entry := range lri {
if *reply = entry.uid == args.UID; *reply {
return nil // When uid found return true
}
}
return nil // None found so return false
}
// removeEntry either, based on the uid of the lock message, removes a single entry from the
// lockRequesterInfo array or the whole array from the map (in case of a write lock or last read lock)
func (l *lockServer) removeEntry(name, uid string, lri *[]lockRequesterInfo) bool {
// Find correct entry to remove based on uid
for index, entry := range *lri {
if entry.uid == uid {
if len(*lri) == 1 {
delete(l.lockMap, name) // Remove the (last) lock
} else {
// Remove the appropriate read lock
*lri = append((*lri)[:index], (*lri)[index+1:]...)
l.lockMap[name] = *lri
}
return true
}
}
return false
}
// nameLockRequesterInfoPair is a helper type for lock maintenance
type nameLockRequesterInfoPair struct {
name string
lri lockRequesterInfo
}
// getLongLivedLocks returns locks that are older than a certain time and
// have not been 'checked' for validity too soon enough
func getLongLivedLocks(m map[string][]lockRequesterInfo, interval time.Duration) []nameLockRequesterInfoPair {
rslt := []nameLockRequesterInfoPair{}
for name, lriArray := range m {
for idx := range lriArray {
// Check whether enough time has gone by since last check
if time.Since(lriArray[idx].timeLastCheck) >= interval {
rslt = append(rslt, nameLockRequesterInfoPair{name: name, lri: lriArray[idx]})
lriArray[idx].timeLastCheck = time.Now()
}
}
}
return rslt
}
// lockMaintenance loops over locks that have been active for some time and checks back
// with the original server whether it is still alive or not
func (l *lockServer) lockMaintenance(interval time.Duration) {
l.mutex.Lock()
// get list of locks to check
nlripLongLived := getLongLivedLocks(l.lockMap, interval)
l.mutex.Unlock()
for _, nlrip := range nlripLongLived {
c := newClient(nlrip.lri.node, nlrip.lri.rpcPath)
var active bool
// Call back to original server verify whether the lock is still active (based on name & uid)
if err := c.Call("Dsync.Active", &LockArgs{Name: nlrip.name, UID: nlrip.lri.uid}, &active); err != nil {
// We failed to connect back to the server that originated the lock, this can either be due to
// - server at client down
// - some network error (and server is up normally)
//
// We will ignore the error, and we will retry later to get resolve on this lock
c.Close()
} else {
c.Close()
if !active { // The lock is no longer active at server that originated the lock
// so remove the lock from the map
l.mutex.Lock()
// Check if entry is still in map (could have been removed altogether by 'concurrent' (R)Unlock of last entry)
if lri, ok := l.lockMap[nlrip.name]; ok {
if !l.removeEntry(nlrip.name, nlrip.lri.uid, &lri) {
// Remove failed, in case it is a:
if nlrip.lri.writer {
// Writer: this should never happen as the whole (mapped) entry should have been deleted
log.Errorln("Lock maintenance failed to remove entry for write lock (should never happen)", nlrip.name, nlrip.lri, lri)
} else {
// Reader: this can happen if multiple read locks were active and the one we are looking for
// has been released concurrently (so it is fine)
}
} else {
// remove went okay, all is fine
}
}
l.mutex.Unlock()
}
}
}
} |
// Initialize distributed lock.
func initDistributedNSLock(mux *router.Router, serverConfig serverCmdConfig) {
lockServers := newLockServers(serverConfig)
registerStorageLockers(mux, lockServers) | random_line_split | |
lock-rpc-server.go | the supplied value.
func (l *LockArgs) SetTimestamp(tstamp time.Time) {
l.Timestamp = tstamp
}
// lockRequesterInfo stores various info from the client for each lock that is requested
type lockRequesterInfo struct {
writer bool // Bool whether write or read lock
node string // Network address of client claiming lock
rpcPath string // RPC path of client claiming lock
uid string // Uid to uniquely identify request of client
timestamp time.Time // Timestamp set at the time of initialization
timeLastCheck time.Time // Timestamp for last check of validity of lock
}
// isWriteLock returns whether the lock is a write or read lock
func isWriteLock(lri []lockRequesterInfo) bool {
return len(lri) == 1 && lri[0].writer
}
// lockServer is type for RPC handlers
type lockServer struct {
rpcPath string
mutex sync.Mutex
lockMap map[string][]lockRequesterInfo
timestamp time.Time // Timestamp set at the time of initialization. Resets naturally on minio server restart.
}
func (l *lockServer) verifyArgs(args *LockArgs) error {
if !l.timestamp.Equal(args.Timestamp) {
return errInvalidTimestamp
}
if !isRPCTokenValid(args.Token) {
return errInvalidToken
}
return nil
}
/// Distributed lock handlers
// LoginHandler - handles LoginHandler RPC call.
func (l *lockServer) LoginHandler(args *RPCLoginArgs, reply *RPCLoginReply) error {
jwt, err := newJWT(defaultTokenExpiry)
if err != nil {
return err
}
if err = jwt.Authenticate(args.Username, args.Password); err != nil {
return err
}
token, err := jwt.GenerateToken(args.Username)
if err != nil {
return err
}
reply.Token = token
reply.Timestamp = l.timestamp
return nil
}
// Lock - rpc handler for (single) write lock operation.
func (l *lockServer) Lock(args *LockArgs, reply *bool) error {
l.mutex.Lock()
defer l.mutex.Unlock()
if err := l.verifyArgs(args); err != nil {
return err
}
_, *reply = l.lockMap[args.Name]
if !*reply { // No locks held on the given name, so claim write lock
l.lockMap[args.Name] = []lockRequesterInfo{{writer: true, node: args.Node, rpcPath: args.RPCPath, uid: args.UID, timestamp: time.Now(), timeLastCheck: time.Now()}}
}
*reply = !*reply // Negate *reply to return true when lock is granted or false otherwise
return nil
}
// Unlock - rpc handler for (single) write unlock operation.
func (l *lockServer) Unlock(args *LockArgs, reply *bool) error {
l.mutex.Lock()
defer l.mutex.Unlock()
if err := l.verifyArgs(args); err != nil {
return err
}
var lri []lockRequesterInfo
lri, *reply = l.lockMap[args.Name]
if !*reply { // No lock is held on the given name
return fmt.Errorf("Unlock attempted on an unlocked entity: %s", args.Name)
}
if *reply = isWriteLock(lri); !*reply { // Unless it is a write lock
return fmt.Errorf("Unlock attempted on a read locked entity: %s (%d read locks active)", args.Name, len(lri))
}
if l.removeEntry(args.Name, args.UID, &lri) {
return nil
}
return fmt.Errorf("Unlock unable to find corresponding lock for uid: %s", args.UID)
}
// RLock - rpc handler for read lock operation.
func (l *lockServer) RLock(args *LockArgs, reply *bool) error {
l.mutex.Lock()
defer l.mutex.Unlock()
if err := l.verifyArgs(args); err != nil {
return err
}
var lri []lockRequesterInfo
lri, *reply = l.lockMap[args.Name]
if !*reply { // No locks held on the given name, so claim (first) read lock
l.lockMap[args.Name] = []lockRequesterInfo{{writer: false, node: args.Node, rpcPath: args.RPCPath, uid: args.UID, timestamp: time.Now(), timeLastCheck: time.Now()}}
*reply = true
} else {
if *reply = !isWriteLock(lri); *reply { // Unless there is a write lock
l.lockMap[args.Name] = append(l.lockMap[args.Name], lockRequesterInfo{writer: false, node: args.Node, rpcPath: args.RPCPath, uid: args.UID, timestamp: time.Now(), timeLastCheck: time.Now()})
}
}
return nil
}
// RUnlock - rpc handler for read unlock operation.
func (l *lockServer) RUnlock(args *LockArgs, reply *bool) error {
l.mutex.Lock()
defer l.mutex.Unlock()
if err := l.verifyArgs(args); err != nil {
return err
}
var lri []lockRequesterInfo
if lri, *reply = l.lockMap[args.Name]; !*reply { // No lock is held on the given name
return fmt.Errorf("RUnlock attempted on an unlocked entity: %s", args.Name)
}
if *reply = !isWriteLock(lri); !*reply { // A write-lock is held, cannot release a read lock
return fmt.Errorf("RUnlock attempted on a write locked entity: %s", args.Name)
}
if l.removeEntry(args.Name, args.UID, &lri) {
return nil
}
return fmt.Errorf("RUnlock unable to find corresponding read lock for uid: %s", args.UID)
}
// Active - rpc handler for active lock status.
func (l *lockServer) Active(args *LockArgs, reply *bool) error {
l.mutex.Lock()
defer l.mutex.Unlock()
if err := l.verifyArgs(args); err != nil {
return err
}
var lri []lockRequesterInfo
if lri, *reply = l.lockMap[args.Name]; !*reply {
return nil // No lock is held on the given name so return false
}
// Check whether uid is still active
for _, entry := range lri {
if *reply = entry.uid == args.UID; *reply {
return nil // When uid found return true
}
}
return nil // None found so return false
}
// removeEntry either, based on the uid of the lock message, removes a single entry from the
// lockRequesterInfo array or the whole array from the map (in case of a write lock or last read lock)
func (l *lockServer) removeEntry(name, uid string, lri *[]lockRequesterInfo) bool {
// Find correct entry to remove based on uid
for index, entry := range *lri {
if entry.uid == uid {
if len(*lri) == 1 | else {
// Remove the appropriate read lock
*lri = append((*lri)[:index], (*lri)[index+1:]...)
l.lockMap[name] = *lri
}
return true
}
}
return false
}
// nameLockRequesterInfoPair is a helper type for lock maintenance
type nameLockRequesterInfoPair struct {
name string
lri lockRequesterInfo
}
// getLongLivedLocks returns locks that are older than a certain time and
// have not been 'checked' for validity too soon enough
func getLongLivedLocks(m map[string][]lockRequesterInfo, interval time.Duration) []nameLockRequesterInfoPair {
rslt := []nameLockRequesterInfoPair{}
for name, lriArray := range m {
for idx := range lriArray {
// Check whether enough time has gone by since last check
if time.Since(lriArray[idx].timeLastCheck) >= interval {
rslt = append(rslt, nameLockRequesterInfoPair{name: name, lri: lriArray[idx]})
lriArray[idx].timeLastCheck = time.Now()
}
}
}
return rslt
}
// lockMaintenance loops over locks that have been active for some time and checks back
// with the original server whether it is still alive or not
func (l *lockServer) lockMaintenance(interval time.Duration) {
l.mutex.Lock()
// get list of locks to check
nlripLongLived := getLongLivedLocks(l.lockMap, interval)
l.mutex.Unlock()
for _, nlrip := range nlripLongLived {
c := newClient(nlrip.lri.node, nlrip.lri.rpcPath)
var active bool
// Call back to original server verify whether the lock is still active (based on name & uid)
if err := c.Call("Dsync.Active", &LockArgs{Name: nlrip.name, UID: nlrip.lri.uid}, &active); err != nil {
// We failed to connect back to the server that originated the lock, this can either be due to
// - server at client down
// - some network error (and server is up normally)
//
// We will ignore the error, and we will retry later to get resolve on this lock
c.Close | {
delete(l.lockMap, name) // Remove the (last) lock
} | conditional_block |
worker.rs | .blob_store.clone(),
options.bootstrap.location.clone(),
),
deno_fetch::deno_fetch::init_ops_and_esm::<PermissionsContainer>(
deno_fetch::Options {
user_agent: options.bootstrap.user_agent.clone(),
root_cert_store_provider: options.root_cert_store_provider.clone(),
unsafely_ignore_certificate_errors: options
.unsafely_ignore_certificate_errors
.clone(),
file_fetch_handler: Rc::new(deno_fetch::FsFetchHandler),
..Default::default()
},
),
deno_cache::deno_cache::init_ops_and_esm::<SqliteBackedCache>(
create_cache,
),
deno_websocket::deno_websocket::init_ops_and_esm::<PermissionsContainer>(
options.bootstrap.user_agent.clone(),
options.root_cert_store_provider.clone(),
options.unsafely_ignore_certificate_errors.clone(),
),
deno_webstorage::deno_webstorage::init_ops_and_esm(
options.origin_storage_dir.clone(),
),
deno_crypto::deno_crypto::init_ops_and_esm(options.seed),
deno_broadcast_channel::deno_broadcast_channel::init_ops_and_esm(
options.broadcast_channel.clone(),
unstable,
),
deno_ffi::deno_ffi::init_ops_and_esm::<PermissionsContainer>(unstable),
deno_net::deno_net::init_ops_and_esm::<PermissionsContainer>(
options.root_cert_store_provider.clone(),
unstable,
options.unsafely_ignore_certificate_errors.clone(),
),
deno_tls::deno_tls::init_ops_and_esm(),
deno_kv::deno_kv::init_ops_and_esm(
MultiBackendDbHandler::remote_or_sqlite::<PermissionsContainer>(
options.origin_storage_dir.clone(),
),
unstable,
),
deno_napi::deno_napi::init_ops_and_esm::<PermissionsContainer>(),
deno_http::deno_http::init_ops_and_esm::<DefaultHttpPropertyExtractor>(),
deno_io::deno_io::init_ops_and_esm(Some(options.stdio)),
deno_fs::deno_fs::init_ops_and_esm::<PermissionsContainer>(
unstable,
options.fs.clone(),
),
deno_node::deno_node::init_ops_and_esm::<PermissionsContainer>(
options.npm_resolver,
options.fs,
),
// Ops from this crate
ops::runtime::deno_runtime::init_ops_and_esm(main_module.clone()),
ops::worker_host::deno_worker_host::init_ops_and_esm(
options.create_web_worker_cb.clone(),
options.format_js_error_fn.clone(),
),
ops::fs_events::deno_fs_events::init_ops_and_esm(),
ops::os::deno_os::init_ops_and_esm(exit_code.clone()),
ops::permissions::deno_permissions::init_ops_and_esm(),
ops::process::deno_process::init_ops_and_esm(),
ops::signal::deno_signal::init_ops_and_esm(),
ops::tty::deno_tty::init_ops_and_esm(),
ops::http::deno_http_runtime::init_ops_and_esm(),
deno_permissions_worker::init_ops_and_esm(
permissions,
unstable,
enable_testing_features,
),
runtime::init_ops_and_esm(),
];
for extension in &mut extensions {
#[cfg(not(feature = "__runtime_js_sources"))]
{
extension.js_files = std::borrow::Cow::Borrowed(&[]);
extension.esm_files = std::borrow::Cow::Borrowed(&[]);
extension.esm_entry_point = None;
}
#[cfg(feature = "__runtime_js_sources")]
{
use crate::shared::maybe_transpile_source;
for source in extension.esm_files.to_mut() {
maybe_transpile_source(source).unwrap();
}
for source in extension.js_files.to_mut() {
maybe_transpile_source(source).unwrap();
}
}
}
extensions.extend(std::mem::take(&mut options.extensions));
#[cfg(all(feature = "include_js_files_for_snapshotting", feature = "dont_create_runtime_snapshot", not(feature = "__runtime_js_sources")))]
options.startup_snapshot.as_ref().expect("Sources are not embedded, snapshotting was disabled and a user snapshot was not provided.");
// Clear extension modules from the module map, except preserve `node:*`
// modules.
let preserve_snapshotted_modules =
Some(SUPPORTED_BUILTIN_NODE_MODULES_WITH_PREFIX);
let mut js_runtime = JsRuntime::new(RuntimeOptions {
module_loader: Some(options.module_loader.clone()),
startup_snapshot: options
.startup_snapshot
.or_else(crate::js::deno_isolate_init),
create_params: options.create_params,
source_map_getter: options.source_map_getter,
get_error_class_fn: options.get_error_class_fn,
shared_array_buffer_store: options.shared_array_buffer_store.clone(),
compiled_wasm_module_store: options.compiled_wasm_module_store.clone(),
extensions,
preserve_snapshotted_modules,
inspector: options.maybe_inspector_server.is_some(),
is_main: true,
..Default::default()
});
if let Some(server) = options.maybe_inspector_server.clone() {
server.register_inspector(
main_module.to_string(),
&mut js_runtime,
options.should_break_on_first_statement
|| options.should_wait_for_inspector_session,
);
// Put inspector handle into the op state so we can put a breakpoint when
// executing a CJS entrypoint.
let op_state = js_runtime.op_state();
let inspector = js_runtime.inspector();
op_state.borrow_mut().put(inspector);
}
let bootstrap_fn_global = {
let context = js_runtime.main_context();
let scope = &mut js_runtime.handle_scope();
let context_local = v8::Local::new(scope, context);
let global_obj = context_local.global(scope);
let bootstrap_str =
v8::String::new_external_onebyte_static(scope, b"bootstrap").unwrap();
let bootstrap_ns: v8::Local<v8::Object> = global_obj
.get(scope, bootstrap_str.into())
.unwrap()
.try_into()
.unwrap();
let main_runtime_str =
v8::String::new_external_onebyte_static(scope, b"mainRuntime").unwrap();
let bootstrap_fn =
bootstrap_ns.get(scope, main_runtime_str.into()).unwrap();
let bootstrap_fn =
v8::Local::<v8::Function>::try_from(bootstrap_fn).unwrap();
v8::Global::new(scope, bootstrap_fn)
};
Self {
js_runtime,
should_break_on_first_statement: options.should_break_on_first_statement,
should_wait_for_inspector_session: options
.should_wait_for_inspector_session,
exit_code,
bootstrap_fn_global: Some(bootstrap_fn_global),
}
}
pub fn bootstrap(&mut self, options: &BootstrapOptions) {
let scope = &mut self.js_runtime.handle_scope();
let args = options.as_v8(scope);
let bootstrap_fn = self.bootstrap_fn_global.take().unwrap();
let bootstrap_fn = v8::Local::new(scope, bootstrap_fn);
let undefined = v8::undefined(scope);
bootstrap_fn.call(scope, undefined.into(), &[args]).unwrap();
}
/// See [JsRuntime::execute_script](deno_core::JsRuntime::execute_script)
pub fn execute_script(
&mut self,
script_name: &'static str,
source_code: ModuleCode,
) -> Result<v8::Global<v8::Value>, AnyError> {
self.js_runtime.execute_script(script_name, source_code)
}
/// Loads and instantiates specified JavaScript module as "main" module.
pub async fn preload_main_module(
&mut self,
module_specifier: &ModuleSpecifier,
) -> Result<ModuleId, AnyError> {
self
.js_runtime
.load_main_module(module_specifier, None)
.await
}
/// Loads and instantiates specified JavaScript module as "side" module.
pub async fn preload_side_module(
&mut self,
module_specifier: &ModuleSpecifier,
) -> Result<ModuleId, AnyError> {
self
.js_runtime
.load_side_module(module_specifier, None)
.await
}
/// Executes specified JavaScript module.
pub async fn evaluate_module(
&mut self,
id: ModuleId,
) -> Result<(), AnyError> {
self.wait_for_inspector_session();
let mut receiver = self.js_runtime.mod_evaluate(id);
tokio::select! {
// Not using biased mode leads to non-determinism for relatively simple
// programs.
biased;
maybe_result = &mut receiver => {
debug!("received module evaluate {:#?}", maybe_result);
maybe_result.expect("Module evaluation result not provided.")
}
event_loop_result = self.run_event_loop(false) => {
event_loop_result?;
let maybe_result = receiver.await;
maybe_result.expect("Module evaluation result not provided.")
}
}
}
/// Loads, instantiates and executes specified JavaScript module.
pub async fn execute_side_module(
&mut self,
module_specifier: &ModuleSpecifier,
) -> Result<(), AnyError> {
let id = self.preload_side_module(module_specifier).await?;
self.evaluate_module(id).await
}
/// Loads, instantiates and executes specified JavaScript module.
///
/// This module will have "import.meta.main" equal to true.
pub async fn | execute_main_module | identifier_name | |
worker.rs | <ops::worker_host::CreateWebWorkerCb>,
pub format_js_error_fn: Option<Arc<FormatJsErrorFn>>,
/// Source map reference for errors.
pub source_map_getter: Option<Box<dyn SourceMapGetter>>,
pub maybe_inspector_server: Option<Arc<InspectorServer>>,
// If true, the worker will wait for inspector session and break on first
// statement of user code. Takes higher precedence than
// `should_wait_for_inspector_session`.
pub should_break_on_first_statement: bool,
// If true, the worker will wait for inspector session before executing
// user code.
pub should_wait_for_inspector_session: bool,
/// Allows to map error type to a string "class" used to represent
/// error in JavaScript.
pub get_error_class_fn: Option<GetErrorClassFn>,
pub cache_storage_dir: Option<std::path::PathBuf>,
pub origin_storage_dir: Option<std::path::PathBuf>,
pub blob_store: Arc<BlobStore>,
pub broadcast_channel: InMemoryBroadcastChannel,
/// The store to use for transferring SharedArrayBuffers between isolates.
/// If multiple isolates should have the possibility of sharing
/// SharedArrayBuffers, they should use the same [SharedArrayBufferStore]. If
/// no [SharedArrayBufferStore] is specified, SharedArrayBuffer can not be
/// serialized.
pub shared_array_buffer_store: Option<SharedArrayBufferStore>,
/// The store to use for transferring `WebAssembly.Module` objects between
/// isolates.
/// If multiple isolates should have the possibility of sharing
/// `WebAssembly.Module` objects, they should use the same
/// [CompiledWasmModuleStore]. If no [CompiledWasmModuleStore] is specified,
/// `WebAssembly.Module` objects cannot be serialized.
pub compiled_wasm_module_store: Option<CompiledWasmModuleStore>,
pub stdio: Stdio,
}
impl Default for WorkerOptions {
fn default() -> Self | root_cert_store_provider: Default::default(),
npm_resolver: Default::default(),
blob_store: Default::default(),
extensions: Default::default(),
startup_snapshot: Default::default(),
create_params: Default::default(),
bootstrap: Default::default(),
stdio: Default::default(),
}
}
}
impl MainWorker {
pub fn bootstrap_from_options(
main_module: ModuleSpecifier,
permissions: PermissionsContainer,
options: WorkerOptions,
) -> Self {
let bootstrap_options = options.bootstrap.clone();
let mut worker = Self::from_options(main_module, permissions, options);
worker.bootstrap(&bootstrap_options);
worker
}
pub fn from_options(
main_module: ModuleSpecifier,
permissions: PermissionsContainer,
mut options: WorkerOptions,
) -> Self {
deno_core::extension!(deno_permissions_worker,
options = {
permissions: PermissionsContainer,
unstable: bool,
enable_testing_features: bool,
},
state = |state, options| {
state.put::<PermissionsContainer>(options.permissions);
state.put(ops::UnstableChecker { unstable: options.unstable });
state.put(ops::TestingFeaturesEnabled(options.enable_testing_features));
},
);
// Permissions: many ops depend on this
let unstable = options.bootstrap.unstable;
let enable_testing_features = options.bootstrap.enable_testing_features;
let exit_code = ExitCode(Arc::new(AtomicI32::new(0)));
let create_cache = options.cache_storage_dir.map(|storage_dir| {
let create_cache_fn = move || SqliteBackedCache::new(storage_dir.clone());
CreateCache(Arc::new(create_cache_fn))
});
// NOTE(bartlomieju): ordering is important here, keep it in sync with
// `runtime/build.rs`, `runtime/web_worker.rs` and `cli/build.rs`!
let mut extensions = vec![
// Web APIs
deno_webidl::deno_webidl::init_ops_and_esm(),
deno_console::deno_console::init_ops_and_esm(),
deno_url::deno_url::init_ops_and_esm(),
deno_web::deno_web::init_ops_and_esm::<PermissionsContainer>(
options.blob_store.clone(),
options.bootstrap.location.clone(),
),
deno_fetch::deno_fetch::init_ops_and_esm::<PermissionsContainer>(
deno_fetch::Options {
user_agent: options.bootstrap.user_agent.clone(),
root_cert_store_provider: options.root_cert_store_provider.clone(),
unsafely_ignore_certificate_errors: options
.unsafely_ignore_certificate_errors
.clone(),
file_fetch_handler: Rc::new(deno_fetch::FsFetchHandler),
..Default::default()
},
),
deno_cache::deno_cache::init_ops_and_esm::<SqliteBackedCache>(
create_cache,
),
deno_websocket::deno_websocket::init_ops_and_esm::<PermissionsContainer>(
options.bootstrap.user_agent.clone(),
options.root_cert_store_provider.clone(),
options.unsafely_ignore_certificate_errors.clone(),
),
deno_webstorage::deno_webstorage::init_ops_and_esm(
options.origin_storage_dir.clone(),
),
deno_crypto::deno_crypto::init_ops_and_esm(options.seed),
deno_broadcast_channel::deno_broadcast_channel::init_ops_and_esm(
options.broadcast_channel.clone(),
unstable,
),
deno_ffi::deno_ffi::init_ops_and_esm::<PermissionsContainer>(unstable),
deno_net::deno_net::init_ops_and_esm::<PermissionsContainer>(
options.root_cert_store_provider.clone(),
unstable,
options.unsafely_ignore_certificate_errors.clone(),
),
deno_tls::deno_tls::init_ops_and_esm(),
deno_kv::deno_kv::init_ops_and_esm(
MultiBackendDbHandler::remote_or_sqlite::<PermissionsContainer>(
options.origin_storage_dir.clone(),
),
unstable,
),
deno_napi::deno_napi::init_ops_and_esm::<PermissionsContainer>(),
deno_http::deno_http::init_ops_and_esm::<DefaultHttpPropertyExtractor>(),
deno_io::deno_io::init_ops_and_esm(Some(options.stdio)),
deno_fs::deno_fs::init_ops_and_esm::<PermissionsContainer>(
unstable,
options.fs.clone(),
),
deno_node::deno_node::init_ops_and_esm::<PermissionsContainer>(
options.npm_resolver,
options.fs,
),
// Ops from this crate
ops::runtime::deno_runtime::init_ops_and_esm(main_module.clone()),
ops::worker_host::deno_worker_host::init_ops_and_esm(
options.create_web_worker_cb.clone(),
options.format_js_error_fn.clone(),
),
ops::fs_events::deno_fs_events::init_ops_and_esm(),
ops::os::deno_os::init_ops_and_esm(exit_code.clone()),
ops::permissions::deno_permissions::init_ops_and_esm(),
ops::process::deno_process::init_ops_and_esm(),
ops::signal::deno_signal::init_ops_and_esm(),
ops::tty::deno_tty::init_ops_and_esm(),
ops::http::deno_http_runtime::init_ops_and_esm(),
deno_permissions_worker::init_ops_and_esm(
permissions,
unstable,
enable_testing_features,
),
runtime::init_ops_and_esm(),
];
for extension in &mut extensions {
#[cfg(not(feature = "__runtime_js_sources"))]
{
extension.js_files = std::borrow::Cow::Borrowed(&[]);
extension.esm_files = std::borrow::Cow::Borrowed(&[]);
extension.esm_entry_point = None;
}
#[cfg(feature = "__runtime_js_sources")]
{
use crate::shared::maybe_transpile_source;
for source in extension.esm_files.to_mut() {
maybe_transpile_source(source).unwrap();
}
for source in extension.js_files.to_mut() {
maybe_transpile_source(source).unwrap();
}
}
}
extensions.extend(std::mem::take(&mut options.extensions));
#[cfg(all(feature = "include_js_files_for_snapshotting", feature = "dont_create_runtime_snapshot", not(feature = "__runtime_js_sources")))]
options.startup_snapshot.as_ref().expect("Sources are not embedded, snapshotting was disabled and a user snapshot was not provided.");
// Clear extension modules from the module map, except preserve `node:*`
// modules.
let preserve_snapshotted_modules =
Some(SUPPORTED_BUILTIN_NODE_MODULES_WITH_PREFIX);
let mut | {
Self {
create_web_worker_cb: Arc::new(|_| {
unimplemented!("web workers are not supported")
}),
fs: Arc::new(deno_fs::RealFs),
module_loader: Rc::new(FsModuleLoader),
seed: None,
unsafely_ignore_certificate_errors: Default::default(),
should_break_on_first_statement: Default::default(),
should_wait_for_inspector_session: Default::default(),
compiled_wasm_module_store: Default::default(),
shared_array_buffer_store: Default::default(),
maybe_inspector_server: Default::default(),
format_js_error_fn: Default::default(),
get_error_class_fn: Default::default(),
origin_storage_dir: Default::default(),
cache_storage_dir: Default::default(),
broadcast_channel: Default::default(),
source_map_getter: Default::default(), | identifier_body |
worker.rs | possibility of sharing
/// SharedArrayBuffers, they should use the same [SharedArrayBufferStore]. If
/// no [SharedArrayBufferStore] is specified, SharedArrayBuffer can not be
/// serialized.
pub shared_array_buffer_store: Option<SharedArrayBufferStore>,
/// The store to use for transferring `WebAssembly.Module` objects between
/// isolates.
/// If multiple isolates should have the possibility of sharing
/// `WebAssembly.Module` objects, they should use the same
/// [CompiledWasmModuleStore]. If no [CompiledWasmModuleStore] is specified,
/// `WebAssembly.Module` objects cannot be serialized.
pub compiled_wasm_module_store: Option<CompiledWasmModuleStore>,
pub stdio: Stdio,
}
impl Default for WorkerOptions {
fn default() -> Self {
Self {
create_web_worker_cb: Arc::new(|_| {
unimplemented!("web workers are not supported")
}),
fs: Arc::new(deno_fs::RealFs),
module_loader: Rc::new(FsModuleLoader),
seed: None,
unsafely_ignore_certificate_errors: Default::default(),
should_break_on_first_statement: Default::default(),
should_wait_for_inspector_session: Default::default(),
compiled_wasm_module_store: Default::default(),
shared_array_buffer_store: Default::default(),
maybe_inspector_server: Default::default(),
format_js_error_fn: Default::default(),
get_error_class_fn: Default::default(),
origin_storage_dir: Default::default(),
cache_storage_dir: Default::default(),
broadcast_channel: Default::default(),
source_map_getter: Default::default(),
root_cert_store_provider: Default::default(),
npm_resolver: Default::default(),
blob_store: Default::default(),
extensions: Default::default(),
startup_snapshot: Default::default(),
create_params: Default::default(),
bootstrap: Default::default(),
stdio: Default::default(),
}
}
}
impl MainWorker {
pub fn bootstrap_from_options(
main_module: ModuleSpecifier,
permissions: PermissionsContainer,
options: WorkerOptions,
) -> Self {
let bootstrap_options = options.bootstrap.clone();
let mut worker = Self::from_options(main_module, permissions, options);
worker.bootstrap(&bootstrap_options);
worker
}
pub fn from_options(
main_module: ModuleSpecifier,
permissions: PermissionsContainer,
mut options: WorkerOptions,
) -> Self {
deno_core::extension!(deno_permissions_worker,
options = {
permissions: PermissionsContainer,
unstable: bool,
enable_testing_features: bool,
},
state = |state, options| {
state.put::<PermissionsContainer>(options.permissions);
state.put(ops::UnstableChecker { unstable: options.unstable });
state.put(ops::TestingFeaturesEnabled(options.enable_testing_features));
},
);
// Permissions: many ops depend on this
let unstable = options.bootstrap.unstable;
let enable_testing_features = options.bootstrap.enable_testing_features;
let exit_code = ExitCode(Arc::new(AtomicI32::new(0)));
let create_cache = options.cache_storage_dir.map(|storage_dir| {
let create_cache_fn = move || SqliteBackedCache::new(storage_dir.clone());
CreateCache(Arc::new(create_cache_fn))
});
// NOTE(bartlomieju): ordering is important here, keep it in sync with
// `runtime/build.rs`, `runtime/web_worker.rs` and `cli/build.rs`!
let mut extensions = vec![
// Web APIs
deno_webidl::deno_webidl::init_ops_and_esm(),
deno_console::deno_console::init_ops_and_esm(),
deno_url::deno_url::init_ops_and_esm(),
deno_web::deno_web::init_ops_and_esm::<PermissionsContainer>(
options.blob_store.clone(),
options.bootstrap.location.clone(),
),
deno_fetch::deno_fetch::init_ops_and_esm::<PermissionsContainer>(
deno_fetch::Options {
user_agent: options.bootstrap.user_agent.clone(),
root_cert_store_provider: options.root_cert_store_provider.clone(),
unsafely_ignore_certificate_errors: options
.unsafely_ignore_certificate_errors
.clone(),
file_fetch_handler: Rc::new(deno_fetch::FsFetchHandler),
..Default::default()
},
),
deno_cache::deno_cache::init_ops_and_esm::<SqliteBackedCache>(
create_cache,
),
deno_websocket::deno_websocket::init_ops_and_esm::<PermissionsContainer>(
options.bootstrap.user_agent.clone(),
options.root_cert_store_provider.clone(),
options.unsafely_ignore_certificate_errors.clone(),
),
deno_webstorage::deno_webstorage::init_ops_and_esm(
options.origin_storage_dir.clone(),
),
deno_crypto::deno_crypto::init_ops_and_esm(options.seed),
deno_broadcast_channel::deno_broadcast_channel::init_ops_and_esm(
options.broadcast_channel.clone(),
unstable,
),
deno_ffi::deno_ffi::init_ops_and_esm::<PermissionsContainer>(unstable),
deno_net::deno_net::init_ops_and_esm::<PermissionsContainer>(
options.root_cert_store_provider.clone(),
unstable,
options.unsafely_ignore_certificate_errors.clone(),
),
deno_tls::deno_tls::init_ops_and_esm(),
deno_kv::deno_kv::init_ops_and_esm(
MultiBackendDbHandler::remote_or_sqlite::<PermissionsContainer>(
options.origin_storage_dir.clone(),
),
unstable,
),
deno_napi::deno_napi::init_ops_and_esm::<PermissionsContainer>(),
deno_http::deno_http::init_ops_and_esm::<DefaultHttpPropertyExtractor>(),
deno_io::deno_io::init_ops_and_esm(Some(options.stdio)),
deno_fs::deno_fs::init_ops_and_esm::<PermissionsContainer>(
unstable,
options.fs.clone(),
),
deno_node::deno_node::init_ops_and_esm::<PermissionsContainer>(
options.npm_resolver,
options.fs,
),
// Ops from this crate
ops::runtime::deno_runtime::init_ops_and_esm(main_module.clone()),
ops::worker_host::deno_worker_host::init_ops_and_esm(
options.create_web_worker_cb.clone(),
options.format_js_error_fn.clone(),
),
ops::fs_events::deno_fs_events::init_ops_and_esm(),
ops::os::deno_os::init_ops_and_esm(exit_code.clone()),
ops::permissions::deno_permissions::init_ops_and_esm(),
ops::process::deno_process::init_ops_and_esm(),
ops::signal::deno_signal::init_ops_and_esm(),
ops::tty::deno_tty::init_ops_and_esm(),
ops::http::deno_http_runtime::init_ops_and_esm(),
deno_permissions_worker::init_ops_and_esm(
permissions,
unstable,
enable_testing_features,
),
runtime::init_ops_and_esm(),
];
for extension in &mut extensions {
#[cfg(not(feature = "__runtime_js_sources"))]
{
extension.js_files = std::borrow::Cow::Borrowed(&[]);
extension.esm_files = std::borrow::Cow::Borrowed(&[]);
extension.esm_entry_point = None;
}
#[cfg(feature = "__runtime_js_sources")]
{
use crate::shared::maybe_transpile_source;
for source in extension.esm_files.to_mut() {
maybe_transpile_source(source).unwrap();
}
for source in extension.js_files.to_mut() {
maybe_transpile_source(source).unwrap();
}
}
}
extensions.extend(std::mem::take(&mut options.extensions));
#[cfg(all(feature = "include_js_files_for_snapshotting", feature = "dont_create_runtime_snapshot", not(feature = "__runtime_js_sources")))]
options.startup_snapshot.as_ref().expect("Sources are not embedded, snapshotting was disabled and a user snapshot was not provided.");
// Clear extension modules from the module map, except preserve `node:*`
// modules.
let preserve_snapshotted_modules =
Some(SUPPORTED_BUILTIN_NODE_MODULES_WITH_PREFIX);
let mut js_runtime = JsRuntime::new(RuntimeOptions {
module_loader: Some(options.module_loader.clone()),
startup_snapshot: options
.startup_snapshot
.or_else(crate::js::deno_isolate_init),
create_params: options.create_params,
source_map_getter: options.source_map_getter,
get_error_class_fn: options.get_error_class_fn,
shared_array_buffer_store: options.shared_array_buffer_store.clone(),
compiled_wasm_module_store: options.compiled_wasm_module_store.clone(),
extensions,
preserve_snapshotted_modules,
inspector: options.maybe_inspector_server.is_some(),
is_main: true,
..Default::default()
});
if let Some(server) = options.maybe_inspector_server.clone() | {
server.register_inspector(
main_module.to_string(),
&mut js_runtime,
options.should_break_on_first_statement
|| options.should_wait_for_inspector_session,
);
// Put inspector handle into the op state so we can put a breakpoint when
// executing a CJS entrypoint.
let op_state = js_runtime.op_state();
let inspector = js_runtime.inspector();
op_state.borrow_mut().put(inspector);
} | conditional_block | |
worker.rs | Arc<ops::worker_host::CreateWebWorkerCb>,
pub format_js_error_fn: Option<Arc<FormatJsErrorFn>>,
/// Source map reference for errors.
pub source_map_getter: Option<Box<dyn SourceMapGetter>>,
pub maybe_inspector_server: Option<Arc<InspectorServer>>,
// If true, the worker will wait for inspector session and break on first
// statement of user code. Takes higher precedence than
// `should_wait_for_inspector_session`.
pub should_break_on_first_statement: bool,
// If true, the worker will wait for inspector session before executing
// user code.
pub should_wait_for_inspector_session: bool,
/// Allows to map error type to a string "class" used to represent
/// error in JavaScript.
pub get_error_class_fn: Option<GetErrorClassFn>,
pub cache_storage_dir: Option<std::path::PathBuf>,
pub origin_storage_dir: Option<std::path::PathBuf>,
pub blob_store: Arc<BlobStore>,
pub broadcast_channel: InMemoryBroadcastChannel,
/// The store to use for transferring SharedArrayBuffers between isolates.
/// If multiple isolates should have the possibility of sharing
/// SharedArrayBuffers, they should use the same [SharedArrayBufferStore]. If
/// no [SharedArrayBufferStore] is specified, SharedArrayBuffer can not be
/// serialized.
pub shared_array_buffer_store: Option<SharedArrayBufferStore>,
/// The store to use for transferring `WebAssembly.Module` objects between
/// isolates.
/// If multiple isolates should have the possibility of sharing
/// `WebAssembly.Module` objects, they should use the same
/// [CompiledWasmModuleStore]. If no [CompiledWasmModuleStore] is specified,
/// `WebAssembly.Module` objects cannot be serialized.
pub compiled_wasm_module_store: Option<CompiledWasmModuleStore>,
pub stdio: Stdio,
}
impl Default for WorkerOptions {
fn default() -> Self {
Self {
create_web_worker_cb: Arc::new(|_| {
unimplemented!("web workers are not supported")
}),
fs: Arc::new(deno_fs::RealFs),
module_loader: Rc::new(FsModuleLoader),
seed: None,
unsafely_ignore_certificate_errors: Default::default(),
should_break_on_first_statement: Default::default(),
should_wait_for_inspector_session: Default::default(),
compiled_wasm_module_store: Default::default(),
shared_array_buffer_store: Default::default(),
maybe_inspector_server: Default::default(),
format_js_error_fn: Default::default(),
get_error_class_fn: Default::default(),
origin_storage_dir: Default::default(),
cache_storage_dir: Default::default(),
broadcast_channel: Default::default(),
source_map_getter: Default::default(),
root_cert_store_provider: Default::default(),
npm_resolver: Default::default(),
blob_store: Default::default(),
extensions: Default::default(),
startup_snapshot: Default::default(),
create_params: Default::default(),
bootstrap: Default::default(),
stdio: Default::default(),
}
}
}
impl MainWorker {
pub fn bootstrap_from_options(
main_module: ModuleSpecifier,
permissions: PermissionsContainer,
options: WorkerOptions,
) -> Self {
let bootstrap_options = options.bootstrap.clone();
let mut worker = Self::from_options(main_module, permissions, options);
worker.bootstrap(&bootstrap_options);
worker
}
pub fn from_options(
main_module: ModuleSpecifier,
permissions: PermissionsContainer,
mut options: WorkerOptions,
) -> Self {
deno_core::extension!(deno_permissions_worker,
options = { | state = |state, options| {
state.put::<PermissionsContainer>(options.permissions);
state.put(ops::UnstableChecker { unstable: options.unstable });
state.put(ops::TestingFeaturesEnabled(options.enable_testing_features));
},
);
// Permissions: many ops depend on this
let unstable = options.bootstrap.unstable;
let enable_testing_features = options.bootstrap.enable_testing_features;
let exit_code = ExitCode(Arc::new(AtomicI32::new(0)));
let create_cache = options.cache_storage_dir.map(|storage_dir| {
let create_cache_fn = move || SqliteBackedCache::new(storage_dir.clone());
CreateCache(Arc::new(create_cache_fn))
});
// NOTE(bartlomieju): ordering is important here, keep it in sync with
// `runtime/build.rs`, `runtime/web_worker.rs` and `cli/build.rs`!
let mut extensions = vec![
// Web APIs
deno_webidl::deno_webidl::init_ops_and_esm(),
deno_console::deno_console::init_ops_and_esm(),
deno_url::deno_url::init_ops_and_esm(),
deno_web::deno_web::init_ops_and_esm::<PermissionsContainer>(
options.blob_store.clone(),
options.bootstrap.location.clone(),
),
deno_fetch::deno_fetch::init_ops_and_esm::<PermissionsContainer>(
deno_fetch::Options {
user_agent: options.bootstrap.user_agent.clone(),
root_cert_store_provider: options.root_cert_store_provider.clone(),
unsafely_ignore_certificate_errors: options
.unsafely_ignore_certificate_errors
.clone(),
file_fetch_handler: Rc::new(deno_fetch::FsFetchHandler),
..Default::default()
},
),
deno_cache::deno_cache::init_ops_and_esm::<SqliteBackedCache>(
create_cache,
),
deno_websocket::deno_websocket::init_ops_and_esm::<PermissionsContainer>(
options.bootstrap.user_agent.clone(),
options.root_cert_store_provider.clone(),
options.unsafely_ignore_certificate_errors.clone(),
),
deno_webstorage::deno_webstorage::init_ops_and_esm(
options.origin_storage_dir.clone(),
),
deno_crypto::deno_crypto::init_ops_and_esm(options.seed),
deno_broadcast_channel::deno_broadcast_channel::init_ops_and_esm(
options.broadcast_channel.clone(),
unstable,
),
deno_ffi::deno_ffi::init_ops_and_esm::<PermissionsContainer>(unstable),
deno_net::deno_net::init_ops_and_esm::<PermissionsContainer>(
options.root_cert_store_provider.clone(),
unstable,
options.unsafely_ignore_certificate_errors.clone(),
),
deno_tls::deno_tls::init_ops_and_esm(),
deno_kv::deno_kv::init_ops_and_esm(
MultiBackendDbHandler::remote_or_sqlite::<PermissionsContainer>(
options.origin_storage_dir.clone(),
),
unstable,
),
deno_napi::deno_napi::init_ops_and_esm::<PermissionsContainer>(),
deno_http::deno_http::init_ops_and_esm::<DefaultHttpPropertyExtractor>(),
deno_io::deno_io::init_ops_and_esm(Some(options.stdio)),
deno_fs::deno_fs::init_ops_and_esm::<PermissionsContainer>(
unstable,
options.fs.clone(),
),
deno_node::deno_node::init_ops_and_esm::<PermissionsContainer>(
options.npm_resolver,
options.fs,
),
// Ops from this crate
ops::runtime::deno_runtime::init_ops_and_esm(main_module.clone()),
ops::worker_host::deno_worker_host::init_ops_and_esm(
options.create_web_worker_cb.clone(),
options.format_js_error_fn.clone(),
),
ops::fs_events::deno_fs_events::init_ops_and_esm(),
ops::os::deno_os::init_ops_and_esm(exit_code.clone()),
ops::permissions::deno_permissions::init_ops_and_esm(),
ops::process::deno_process::init_ops_and_esm(),
ops::signal::deno_signal::init_ops_and_esm(),
ops::tty::deno_tty::init_ops_and_esm(),
ops::http::deno_http_runtime::init_ops_and_esm(),
deno_permissions_worker::init_ops_and_esm(
permissions,
unstable,
enable_testing_features,
),
runtime::init_ops_and_esm(),
];
for extension in &mut extensions {
#[cfg(not(feature = "__runtime_js_sources"))]
{
extension.js_files = std::borrow::Cow::Borrowed(&[]);
extension.esm_files = std::borrow::Cow::Borrowed(&[]);
extension.esm_entry_point = None;
}
#[cfg(feature = "__runtime_js_sources")]
{
use crate::shared::maybe_transpile_source;
for source in extension.esm_files.to_mut() {
maybe_transpile_source(source).unwrap();
}
for source in extension.js_files.to_mut() {
maybe_transpile_source(source).unwrap();
}
}
}
extensions.extend(std::mem::take(&mut options.extensions));
#[cfg(all(feature = "include_js_files_for_snapshotting", feature = "dont_create_runtime_snapshot", not(feature = "__runtime_js_sources")))]
options.startup_snapshot.as_ref().expect("Sources are not embedded, snapshotting was disabled and a user snapshot was not provided.");
// Clear extension modules from the module map, except preserve `node:*`
// modules.
let preserve_snapshotted_modules =
Some(SUPPORTED_BUILTIN_NODE_MODULES_WITH_PREFIX);
let mut | permissions: PermissionsContainer,
unstable: bool,
enable_testing_features: bool,
}, | random_line_split |
views.py | grouped_entities
@route('/archives/')
def archives():
all_threads = Thread.query \
.filter(Thread.community_id == g.community.id) \
.order_by(Thread.created_at.desc()).all()
grouped_threads = group_monthly(all_threads)
return render_template('forum/archives.html',
grouped_threads=grouped_threads)
@route('/attachments/')
def attachments():
# XXX: there is probably a way to optimize this and the big loop below...
all_threads = Thread.query \
.filter(Thread.community_id == g.community.id) \
.options(joinedload('posts')) \
.options(joinedload('posts.attachments')) \
.order_by(Thread.created_at.desc()).all()
posts_with_attachments = []
for thread in all_threads:
for post in thread.posts:
if getattr(post, 'attachments', None):
posts_with_attachments.append(post)
posts_with_attachments.sort(key=lambda post: post.created_at)
posts_with_attachments.reverse()
grouped_posts = group_monthly(posts_with_attachments)
return render_template('forum/attachments.html',
grouped_posts=grouped_posts)
class BaseThreadView(object):
Model = Thread
Form = ThreadForm
pk = 'thread_id'
base_template = 'community/_base.html'
def can_send_by_mail(self):
return (g.community.type == 'participative'
or g.community.has_permission(current_user, 'manage'))
def prepare_args(self, args, kwargs):
args, kwargs = super(BaseThreadView, self).prepare_args(args, kwargs)
self.send_by_email = False
if not self.can_send_by_mail() and 'send_by_email' in self.form:
# remove from html form and avoid validation errors
del self.form['send_by_email']
return args, kwargs
def index_url(self):
return url_for(".index", community_id=g.community.slug)
def view_url(self):
return url_for(self.obj)
class ThreadView(BaseThreadView, views.ObjectView):
methods = ['GET', 'HEAD']
Form = PostForm
template = 'forum/thread.html'
@property
def template_kwargs(self):
kw = super(ThreadView, self).template_kwargs
kw['thread'] = self.obj
kw['is_closed'] = self.obj.closed
return kw
thread_view = ThreadView.as_view('thread')
default_view(forum, Post, None, kw_func=post_kw_view_func)(thread_view)
default_view(forum, Thread, 'thread_id', kw_func=default_view_kw)(thread_view)
route('/<int:thread_id>/')(thread_view)
route('/<int:thread_id>/attachments')(
ThreadView.as_view('thread_attachments',
template='forum/thread_attachments.html')
)
class ThreadCreate(BaseThreadView, views.ObjectCreate):
POST_BUTTON = ButtonAction('form', 'create', btn_class='primary',
title=_l(u'Post this message'))
def init_object(self, args, kwargs):
args, kwargs = super(ThreadCreate, self).init_object(args, kwargs)
self.thread = self.obj
return args, kwargs
def before_populate_obj(self):
del self.form['attachments']
self.message_body = self.form.message.data
del self.form['message']
if 'send_by_email' in self.form:
self.send_by_email = (self.can_send_by_mail()
and self.form.send_by_email.data)
del self.form['send_by_email']
def after_populate_obj(self):
if self.thread.community is None:
self.thread.community = g.community._model
self.post = self.thread.create_post(body_html=self.message_body)
obj_meta = self.post.meta.setdefault('abilian.sbe.forum', {})
obj_meta['origin'] = u'web'
obj_meta['send_by_email'] = self.send_by_email
session = sa.orm.object_session(self.thread)
uploads = current_app.extensions['uploads']
for handle in request.form.getlist('attachments'):
fileobj = uploads.get_file(current_user, handle)
if fileobj is None:
continue
meta = uploads.get_metadata(current_user, handle)
name = meta.get('filename', handle)
mimetype = meta.get('mimetype', None)
if not isinstance(name, unicode):
name = unicode(name, encoding='utf-8', errors='ignore')
if not name:
continue
attachment = PostAttachment(name=name)
attachment.post = self.post
with fileobj.open('rb') as f:
attachment.set_content(f.read(), mimetype)
session.add(attachment)
def commit_success(self):
if self.send_by_email:
task = send_post_by_email.delay(self.post.id)
meta = self.post.meta.setdefault('abilian.sbe.forum', {})
meta['send_post_by_email_task'] = task.id
self.post.meta.changed()
session = sa.orm.object_session(self.post)
session.commit()
@property
def activity_target(self):
return self.thread.community
def get_form_buttons(self, *args, **kwargs):
return [self.POST_BUTTON, views.object.CANCEL_BUTTON]
route('/new_thread/')(ThreadCreate.as_view('new_thread',
view_endpoint='.thread'))
class ThreadPostCreate(ThreadCreate):
"""
Add a new post to a thread
"""
methods = ['POST']
Form = PostForm
Model = Post
def init_object(self, args, kwargs):
# we DO want to skip ThreadCreate.init_object. hence super is not based on
# ThreadPostCreate
args, kwargs = super(ThreadCreate, self).init_object(args, kwargs)
thread_id = kwargs.pop(self.pk, None)
self.thread = Thread.query.get(thread_id)
return args, kwargs
def after_populate_obj(self):
super(ThreadPostCreate, self).after_populate_obj()
session = sa.orm.object_session(self.obj)
session.expunge(self.obj)
self.obj = self.post
route('/<int:thread_id>/')(ThreadPostCreate.as_view('thread_post',
view_endpoint='.thread'))
class ThreadDelete(BaseThreadView, views.ObjectDelete):
methods = ['POST']
_message_success = _(u'Thread "{title}" deleted.')
def message_success(self):
return unicode(self._message_success).format(title=self.obj.title)
route('/<int:thread_id>/delete')(ThreadDelete.as_view('thread_delete'))
class ThreadCloseView(BaseThreadView, views.object.BaseObjectView):
"""
Close / Re-open a thread
"""
methods = ['POST']
_VALID_ACTIONS = {u'close', u'reopen'}
CLOSED_MSG = _l(u'The thread is now closed for edition and new '
u'contributions.')
REOPENED_MSG = _l(u'The thread is now re-opened for edition and new '
u'contributions.')
def prepare_args(self, args, kwargs):
args, kwargs = super(ThreadCloseView, self).prepare_args(args, kwargs)
action = kwargs['action'] = request.form.get('action')
if action not in self._VALID_ACTIONS:
raise BadRequest(u'Unknown action: {!r}'.format(action))
return args, kwargs
def post(self, action=None):
is_closed = (action == u'close')
self.obj.closed = is_closed
sa.orm.object_session(self.obj).commit()
msg = self.CLOSED_MSG if is_closed else self.REOPENED_MSG
flash(unicode(msg))
return self.redirect(url_for(self.obj))
route('/<int:thread_id>/close')(ThreadCloseView.as_view('thread_close'))
class ThreadPostEdit(BaseThreadView, views.ObjectEdit):
Form = PostEditForm
Model = Post
pk = 'object_id'
def can_send_by_mail(self):
# post edit: don't notify every time
return False
def init_object(self, args, kwargs):
# we DO want to skip ThreadCreate.init_object. hence super is not based on
# ThreadPostCreate
args, kwargs = super(ThreadPostEdit, self).init_object(args, kwargs)
thread_id = kwargs.pop('thread_id', None)
self.thread = self.obj.thread
assert thread_id == self.thread.id
return args, kwargs
def get_form_kwargs(self):
kwargs = super(ThreadPostEdit, self).get_form_kwargs()
kwargs['message'] = self.obj.body_html
return kwargs
def before_populate_obj(self):
self.message_body = self.form.message.data
del self.form['message']
self.reason = self.form.reason.data
self.send_by_email = False
if 'send_by_email' in self.form:
del self.form['send_by_email']
self.attachments_to_remove = self.form['attachments'].delete_files_index
del self.form['attachments']
def after_populate_obj(self):
session = sa.orm.object_session(self.obj)
uploads = current_app.extensions['uploads']
self.obj.body_html = self.message_body
obj_meta = self.obj.meta.setdefault('abilian.sbe.forum', {})
history = obj_meta.setdefault('history', [])
history.append(dict(user_id=current_user.id,
user=unicode(current_user),
date=utc_dt(datetime.utcnow()).isoformat(),
reason=self.form.reason.data,))
self.obj.meta['abilian.sbe.forum'] = obj_meta # trigger change for SA
attachments_to_remove = []
for idx in self.attachments_to_remove:
try:
idx = int(idx)
except ValueError:
continue
if idx > len(self.obj.attachments):
| continue | conditional_block | |
views.py | = u'post_{:d}'.format(obj.id)
return kw
@forum.url_value_preprocessor
def init_forum_values(endpoint, values):
g.current_tab = 'forum'
g.breadcrumb.append(
nav.BreadcrumbItem(label=_l(u'Conversations'),
url=nav.Endpoint('forum.index',
community_id=g.community.slug)))
@route('/')
def index():
query = Thread.query \
.filter(Thread.community_id == g.community.id) \
.order_by(Thread.created_at.desc())
has_more = query.count() > MAX_THREADS
threads = query.limit(MAX_THREADS).all()
return render_template("forum/index.html",
threads=threads, has_more=has_more)
def group_monthly(entities_list):
# We're using Python's groupby instead of SA's group_by here
# because it's easier to support both SQLite and Postgres this way.
def grouper(entity):
return entity.created_at.year, entity.created_at.month
def format_month(year, month):
month = format_date(date(year, month, 1), "MMMM").capitalize()
return u"%s %s" % (month, year)
grouped_entities = groupby(entities_list, grouper)
grouped_entities = [(format_month(year, month), list(entities))
for (year, month), entities in grouped_entities]
return grouped_entities
@route('/archives/')
def archives():
all_threads = Thread.query \
.filter(Thread.community_id == g.community.id) \
.order_by(Thread.created_at.desc()).all()
grouped_threads = group_monthly(all_threads)
return render_template('forum/archives.html',
grouped_threads=grouped_threads)
@route('/attachments/')
def attachments():
# XXX: there is probably a way to optimize this and the big loop below...
all_threads = Thread.query \
.filter(Thread.community_id == g.community.id) \
.options(joinedload('posts')) \
.options(joinedload('posts.attachments')) \
.order_by(Thread.created_at.desc()).all()
posts_with_attachments = []
for thread in all_threads:
for post in thread.posts:
if getattr(post, 'attachments', None):
posts_with_attachments.append(post)
posts_with_attachments.sort(key=lambda post: post.created_at)
posts_with_attachments.reverse()
grouped_posts = group_monthly(posts_with_attachments)
return render_template('forum/attachments.html',
grouped_posts=grouped_posts)
class BaseThreadView(object):
Model = Thread
Form = ThreadForm
pk = 'thread_id'
base_template = 'community/_base.html'
def can_send_by_mail(self):
return (g.community.type == 'participative'
or g.community.has_permission(current_user, 'manage'))
def prepare_args(self, args, kwargs):
args, kwargs = super(BaseThreadView, self).prepare_args(args, kwargs)
self.send_by_email = False
if not self.can_send_by_mail() and 'send_by_email' in self.form:
# remove from html form and avoid validation errors
del self.form['send_by_email']
return args, kwargs
def index_url(self):
return url_for(".index", community_id=g.community.slug)
def view_url(self):
return url_for(self.obj)
class ThreadView(BaseThreadView, views.ObjectView):
methods = ['GET', 'HEAD']
Form = PostForm
template = 'forum/thread.html'
@property
def template_kwargs(self):
kw = super(ThreadView, self).template_kwargs
kw['thread'] = self.obj
kw['is_closed'] = self.obj.closed
return kw
thread_view = ThreadView.as_view('thread')
default_view(forum, Post, None, kw_func=post_kw_view_func)(thread_view)
default_view(forum, Thread, 'thread_id', kw_func=default_view_kw)(thread_view)
route('/<int:thread_id>/')(thread_view)
route('/<int:thread_id>/attachments')(
ThreadView.as_view('thread_attachments',
template='forum/thread_attachments.html')
)
class ThreadCreate(BaseThreadView, views.ObjectCreate):
POST_BUTTON = ButtonAction('form', 'create', btn_class='primary',
title=_l(u'Post this message'))
def init_object(self, args, kwargs):
args, kwargs = super(ThreadCreate, self).init_object(args, kwargs)
self.thread = self.obj
return args, kwargs
def | (self):
del self.form['attachments']
self.message_body = self.form.message.data
del self.form['message']
if 'send_by_email' in self.form:
self.send_by_email = (self.can_send_by_mail()
and self.form.send_by_email.data)
del self.form['send_by_email']
def after_populate_obj(self):
if self.thread.community is None:
self.thread.community = g.community._model
self.post = self.thread.create_post(body_html=self.message_body)
obj_meta = self.post.meta.setdefault('abilian.sbe.forum', {})
obj_meta['origin'] = u'web'
obj_meta['send_by_email'] = self.send_by_email
session = sa.orm.object_session(self.thread)
uploads = current_app.extensions['uploads']
for handle in request.form.getlist('attachments'):
fileobj = uploads.get_file(current_user, handle)
if fileobj is None:
continue
meta = uploads.get_metadata(current_user, handle)
name = meta.get('filename', handle)
mimetype = meta.get('mimetype', None)
if not isinstance(name, unicode):
name = unicode(name, encoding='utf-8', errors='ignore')
if not name:
continue
attachment = PostAttachment(name=name)
attachment.post = self.post
with fileobj.open('rb') as f:
attachment.set_content(f.read(), mimetype)
session.add(attachment)
def commit_success(self):
if self.send_by_email:
task = send_post_by_email.delay(self.post.id)
meta = self.post.meta.setdefault('abilian.sbe.forum', {})
meta['send_post_by_email_task'] = task.id
self.post.meta.changed()
session = sa.orm.object_session(self.post)
session.commit()
@property
def activity_target(self):
return self.thread.community
def get_form_buttons(self, *args, **kwargs):
return [self.POST_BUTTON, views.object.CANCEL_BUTTON]
route('/new_thread/')(ThreadCreate.as_view('new_thread',
view_endpoint='.thread'))
class ThreadPostCreate(ThreadCreate):
"""
Add a new post to a thread
"""
methods = ['POST']
Form = PostForm
Model = Post
def init_object(self, args, kwargs):
# we DO want to skip ThreadCreate.init_object. hence super is not based on
# ThreadPostCreate
args, kwargs = super(ThreadCreate, self).init_object(args, kwargs)
thread_id = kwargs.pop(self.pk, None)
self.thread = Thread.query.get(thread_id)
return args, kwargs
def after_populate_obj(self):
super(ThreadPostCreate, self).after_populate_obj()
session = sa.orm.object_session(self.obj)
session.expunge(self.obj)
self.obj = self.post
route('/<int:thread_id>/')(ThreadPostCreate.as_view('thread_post',
view_endpoint='.thread'))
class ThreadDelete(BaseThreadView, views.ObjectDelete):
methods = ['POST']
_message_success = _(u'Thread "{title}" deleted.')
def message_success(self):
return unicode(self._message_success).format(title=self.obj.title)
route('/<int:thread_id>/delete')(ThreadDelete.as_view('thread_delete'))
class ThreadCloseView(BaseThreadView, views.object.BaseObjectView):
"""
Close / Re-open a thread
"""
methods = ['POST']
_VALID_ACTIONS = {u'close', u'reopen'}
CLOSED_MSG = _l(u'The thread is now closed for edition and new '
u'contributions.')
REOPENED_MSG = _l(u'The thread is now re-opened for edition and new '
u'contributions.')
def prepare_args(self, args, kwargs):
args, kwargs = super(ThreadCloseView, self).prepare_args(args, kwargs)
action = kwargs['action'] = request.form.get('action')
if action not in self._VALID_ACTIONS:
raise BadRequest(u'Unknown action: {!r}'.format(action))
return args, kwargs
def post(self, action=None):
is_closed = (action == u'close')
self.obj.closed = is_closed
sa.orm.object_session(self.obj).commit()
msg = self.CLOSED_MSG if is_closed else self.REOPENED_MSG
flash(unicode(msg))
return self.redirect(url_for(self.obj))
route('/<int:thread_id>/close')(ThreadCloseView.as_view('thread_close'))
class ThreadPostEdit(BaseThreadView, views.ObjectEdit):
Form = PostEditForm
Model = Post
pk = 'object_id'
def can_send_by_mail(self):
# post edit: don't notify every time
return False
def init_object(self, args, kwargs):
# we DO want to skip ThreadCreate.init_object. hence super is not based on
# ThreadPostCreate
args, kwargs = super(ThreadPostEdit, self).init_object(args, kwargs)
thread_id = kwargs.pop('thread_id', None)
self.thread = self.obj.thread
assert thread_id == self.thread.id
return args, kwargs | before_populate_obj | identifier_name |
views.py | filter(Thread.community_id == g.community.id) \
.order_by(Thread.created_at.desc()).all()
grouped_threads = group_monthly(all_threads)
return render_template('forum/archives.html',
grouped_threads=grouped_threads)
@route('/attachments/')
def attachments():
# XXX: there is probably a way to optimize this and the big loop below...
all_threads = Thread.query \
.filter(Thread.community_id == g.community.id) \
.options(joinedload('posts')) \
.options(joinedload('posts.attachments')) \
.order_by(Thread.created_at.desc()).all()
posts_with_attachments = []
for thread in all_threads:
for post in thread.posts:
if getattr(post, 'attachments', None):
posts_with_attachments.append(post)
posts_with_attachments.sort(key=lambda post: post.created_at)
posts_with_attachments.reverse()
grouped_posts = group_monthly(posts_with_attachments)
return render_template('forum/attachments.html',
grouped_posts=grouped_posts)
class BaseThreadView(object):
Model = Thread
Form = ThreadForm
pk = 'thread_id'
base_template = 'community/_base.html'
def can_send_by_mail(self):
return (g.community.type == 'participative'
or g.community.has_permission(current_user, 'manage'))
def prepare_args(self, args, kwargs):
args, kwargs = super(BaseThreadView, self).prepare_args(args, kwargs)
self.send_by_email = False
if not self.can_send_by_mail() and 'send_by_email' in self.form:
# remove from html form and avoid validation errors
del self.form['send_by_email']
return args, kwargs
def index_url(self):
return url_for(".index", community_id=g.community.slug)
def view_url(self):
return url_for(self.obj)
class ThreadView(BaseThreadView, views.ObjectView):
methods = ['GET', 'HEAD']
Form = PostForm
template = 'forum/thread.html'
@property
def template_kwargs(self):
kw = super(ThreadView, self).template_kwargs
kw['thread'] = self.obj
kw['is_closed'] = self.obj.closed
return kw
thread_view = ThreadView.as_view('thread')
default_view(forum, Post, None, kw_func=post_kw_view_func)(thread_view)
default_view(forum, Thread, 'thread_id', kw_func=default_view_kw)(thread_view)
route('/<int:thread_id>/')(thread_view)
route('/<int:thread_id>/attachments')(
ThreadView.as_view('thread_attachments',
template='forum/thread_attachments.html')
)
class ThreadCreate(BaseThreadView, views.ObjectCreate):
POST_BUTTON = ButtonAction('form', 'create', btn_class='primary',
title=_l(u'Post this message'))
def init_object(self, args, kwargs):
args, kwargs = super(ThreadCreate, self).init_object(args, kwargs)
self.thread = self.obj
return args, kwargs
def before_populate_obj(self):
del self.form['attachments']
self.message_body = self.form.message.data
del self.form['message']
if 'send_by_email' in self.form:
self.send_by_email = (self.can_send_by_mail()
and self.form.send_by_email.data)
del self.form['send_by_email']
def after_populate_obj(self):
if self.thread.community is None:
self.thread.community = g.community._model
self.post = self.thread.create_post(body_html=self.message_body)
obj_meta = self.post.meta.setdefault('abilian.sbe.forum', {})
obj_meta['origin'] = u'web'
obj_meta['send_by_email'] = self.send_by_email
session = sa.orm.object_session(self.thread)
uploads = current_app.extensions['uploads']
for handle in request.form.getlist('attachments'):
fileobj = uploads.get_file(current_user, handle)
if fileobj is None:
continue
meta = uploads.get_metadata(current_user, handle)
name = meta.get('filename', handle)
mimetype = meta.get('mimetype', None)
if not isinstance(name, unicode):
name = unicode(name, encoding='utf-8', errors='ignore')
if not name:
continue
attachment = PostAttachment(name=name)
attachment.post = self.post
with fileobj.open('rb') as f:
attachment.set_content(f.read(), mimetype)
session.add(attachment)
def commit_success(self):
if self.send_by_email:
task = send_post_by_email.delay(self.post.id)
meta = self.post.meta.setdefault('abilian.sbe.forum', {})
meta['send_post_by_email_task'] = task.id
self.post.meta.changed()
session = sa.orm.object_session(self.post)
session.commit()
@property
def activity_target(self):
return self.thread.community
def get_form_buttons(self, *args, **kwargs):
return [self.POST_BUTTON, views.object.CANCEL_BUTTON]
route('/new_thread/')(ThreadCreate.as_view('new_thread',
view_endpoint='.thread'))
class ThreadPostCreate(ThreadCreate):
"""
Add a new post to a thread
"""
methods = ['POST']
Form = PostForm
Model = Post
def init_object(self, args, kwargs):
# we DO want to skip ThreadCreate.init_object. hence super is not based on
# ThreadPostCreate
args, kwargs = super(ThreadCreate, self).init_object(args, kwargs)
thread_id = kwargs.pop(self.pk, None)
self.thread = Thread.query.get(thread_id)
return args, kwargs
def after_populate_obj(self):
super(ThreadPostCreate, self).after_populate_obj()
session = sa.orm.object_session(self.obj)
session.expunge(self.obj)
self.obj = self.post
route('/<int:thread_id>/')(ThreadPostCreate.as_view('thread_post',
view_endpoint='.thread'))
class ThreadDelete(BaseThreadView, views.ObjectDelete):
methods = ['POST']
_message_success = _(u'Thread "{title}" deleted.')
def message_success(self):
return unicode(self._message_success).format(title=self.obj.title)
route('/<int:thread_id>/delete')(ThreadDelete.as_view('thread_delete'))
class ThreadCloseView(BaseThreadView, views.object.BaseObjectView):
"""
Close / Re-open a thread
"""
methods = ['POST']
_VALID_ACTIONS = {u'close', u'reopen'}
CLOSED_MSG = _l(u'The thread is now closed for edition and new '
u'contributions.')
REOPENED_MSG = _l(u'The thread is now re-opened for edition and new '
u'contributions.')
def prepare_args(self, args, kwargs):
args, kwargs = super(ThreadCloseView, self).prepare_args(args, kwargs)
action = kwargs['action'] = request.form.get('action')
if action not in self._VALID_ACTIONS:
raise BadRequest(u'Unknown action: {!r}'.format(action))
return args, kwargs
def post(self, action=None):
is_closed = (action == u'close')
self.obj.closed = is_closed
sa.orm.object_session(self.obj).commit()
msg = self.CLOSED_MSG if is_closed else self.REOPENED_MSG
flash(unicode(msg))
return self.redirect(url_for(self.obj))
route('/<int:thread_id>/close')(ThreadCloseView.as_view('thread_close'))
class ThreadPostEdit(BaseThreadView, views.ObjectEdit):
Form = PostEditForm
Model = Post
pk = 'object_id'
def can_send_by_mail(self):
# post edit: don't notify every time
return False
def init_object(self, args, kwargs):
# we DO want to skip ThreadCreate.init_object. hence super is not based on
# ThreadPostCreate
args, kwargs = super(ThreadPostEdit, self).init_object(args, kwargs)
thread_id = kwargs.pop('thread_id', None)
self.thread = self.obj.thread
assert thread_id == self.thread.id
return args, kwargs
def get_form_kwargs(self):
kwargs = super(ThreadPostEdit, self).get_form_kwargs()
kwargs['message'] = self.obj.body_html
return kwargs
def before_populate_obj(self):
self.message_body = self.form.message.data
del self.form['message']
self.reason = self.form.reason.data
self.send_by_email = False
if 'send_by_email' in self.form:
del self.form['send_by_email']
self.attachments_to_remove = self.form['attachments'].delete_files_index
del self.form['attachments']
def after_populate_obj(self):
session = sa.orm.object_session(self.obj)
uploads = current_app.extensions['uploads']
self.obj.body_html = self.message_body
obj_meta = self.obj.meta.setdefault('abilian.sbe.forum', {})
history = obj_meta.setdefault('history', [])
history.append(dict(user_id=current_user.id,
user=unicode(current_user),
date=utc_dt(datetime.utcnow()).isoformat(),
reason=self.form.reason.data,))
self.obj.meta['abilian.sbe.forum'] = obj_meta # trigger change for SA
attachments_to_remove = []
for idx in self.attachments_to_remove:
try:
idx = int(idx)
except ValueError:
continue
if idx > len(self.obj.attachments):
continue
| attachments_to_remove.append(self.obj.attachments[idx])
for att in attachments_to_remove: | random_line_split | |
views.py | = u'post_{:d}'.format(obj.id)
return kw
@forum.url_value_preprocessor
def init_forum_values(endpoint, values):
g.current_tab = 'forum'
g.breadcrumb.append(
nav.BreadcrumbItem(label=_l(u'Conversations'),
url=nav.Endpoint('forum.index',
community_id=g.community.slug)))
@route('/')
def index():
query = Thread.query \
.filter(Thread.community_id == g.community.id) \
.order_by(Thread.created_at.desc())
has_more = query.count() > MAX_THREADS
threads = query.limit(MAX_THREADS).all()
return render_template("forum/index.html",
threads=threads, has_more=has_more)
def group_monthly(entities_list):
# We're using Python's groupby instead of SA's group_by here
# because it's easier to support both SQLite and Postgres this way.
def grouper(entity):
return entity.created_at.year, entity.created_at.month
def format_month(year, month):
|
grouped_entities = groupby(entities_list, grouper)
grouped_entities = [(format_month(year, month), list(entities))
for (year, month), entities in grouped_entities]
return grouped_entities
@route('/archives/')
def archives():
all_threads = Thread.query \
.filter(Thread.community_id == g.community.id) \
.order_by(Thread.created_at.desc()).all()
grouped_threads = group_monthly(all_threads)
return render_template('forum/archives.html',
grouped_threads=grouped_threads)
@route('/attachments/')
def attachments():
# XXX: there is probably a way to optimize this and the big loop below...
all_threads = Thread.query \
.filter(Thread.community_id == g.community.id) \
.options(joinedload('posts')) \
.options(joinedload('posts.attachments')) \
.order_by(Thread.created_at.desc()).all()
posts_with_attachments = []
for thread in all_threads:
for post in thread.posts:
if getattr(post, 'attachments', None):
posts_with_attachments.append(post)
posts_with_attachments.sort(key=lambda post: post.created_at)
posts_with_attachments.reverse()
grouped_posts = group_monthly(posts_with_attachments)
return render_template('forum/attachments.html',
grouped_posts=grouped_posts)
class BaseThreadView(object):
Model = Thread
Form = ThreadForm
pk = 'thread_id'
base_template = 'community/_base.html'
def can_send_by_mail(self):
return (g.community.type == 'participative'
or g.community.has_permission(current_user, 'manage'))
def prepare_args(self, args, kwargs):
args, kwargs = super(BaseThreadView, self).prepare_args(args, kwargs)
self.send_by_email = False
if not self.can_send_by_mail() and 'send_by_email' in self.form:
# remove from html form and avoid validation errors
del self.form['send_by_email']
return args, kwargs
def index_url(self):
return url_for(".index", community_id=g.community.slug)
def view_url(self):
return url_for(self.obj)
class ThreadView(BaseThreadView, views.ObjectView):
methods = ['GET', 'HEAD']
Form = PostForm
template = 'forum/thread.html'
@property
def template_kwargs(self):
kw = super(ThreadView, self).template_kwargs
kw['thread'] = self.obj
kw['is_closed'] = self.obj.closed
return kw
thread_view = ThreadView.as_view('thread')
default_view(forum, Post, None, kw_func=post_kw_view_func)(thread_view)
default_view(forum, Thread, 'thread_id', kw_func=default_view_kw)(thread_view)
route('/<int:thread_id>/')(thread_view)
route('/<int:thread_id>/attachments')(
ThreadView.as_view('thread_attachments',
template='forum/thread_attachments.html')
)
class ThreadCreate(BaseThreadView, views.ObjectCreate):
POST_BUTTON = ButtonAction('form', 'create', btn_class='primary',
title=_l(u'Post this message'))
def init_object(self, args, kwargs):
args, kwargs = super(ThreadCreate, self).init_object(args, kwargs)
self.thread = self.obj
return args, kwargs
def before_populate_obj(self):
del self.form['attachments']
self.message_body = self.form.message.data
del self.form['message']
if 'send_by_email' in self.form:
self.send_by_email = (self.can_send_by_mail()
and self.form.send_by_email.data)
del self.form['send_by_email']
def after_populate_obj(self):
if self.thread.community is None:
self.thread.community = g.community._model
self.post = self.thread.create_post(body_html=self.message_body)
obj_meta = self.post.meta.setdefault('abilian.sbe.forum', {})
obj_meta['origin'] = u'web'
obj_meta['send_by_email'] = self.send_by_email
session = sa.orm.object_session(self.thread)
uploads = current_app.extensions['uploads']
for handle in request.form.getlist('attachments'):
fileobj = uploads.get_file(current_user, handle)
if fileobj is None:
continue
meta = uploads.get_metadata(current_user, handle)
name = meta.get('filename', handle)
mimetype = meta.get('mimetype', None)
if not isinstance(name, unicode):
name = unicode(name, encoding='utf-8', errors='ignore')
if not name:
continue
attachment = PostAttachment(name=name)
attachment.post = self.post
with fileobj.open('rb') as f:
attachment.set_content(f.read(), mimetype)
session.add(attachment)
def commit_success(self):
if self.send_by_email:
task = send_post_by_email.delay(self.post.id)
meta = self.post.meta.setdefault('abilian.sbe.forum', {})
meta['send_post_by_email_task'] = task.id
self.post.meta.changed()
session = sa.orm.object_session(self.post)
session.commit()
@property
def activity_target(self):
return self.thread.community
def get_form_buttons(self, *args, **kwargs):
return [self.POST_BUTTON, views.object.CANCEL_BUTTON]
route('/new_thread/')(ThreadCreate.as_view('new_thread',
view_endpoint='.thread'))
class ThreadPostCreate(ThreadCreate):
"""
Add a new post to a thread
"""
methods = ['POST']
Form = PostForm
Model = Post
def init_object(self, args, kwargs):
# we DO want to skip ThreadCreate.init_object. hence super is not based on
# ThreadPostCreate
args, kwargs = super(ThreadCreate, self).init_object(args, kwargs)
thread_id = kwargs.pop(self.pk, None)
self.thread = Thread.query.get(thread_id)
return args, kwargs
def after_populate_obj(self):
super(ThreadPostCreate, self).after_populate_obj()
session = sa.orm.object_session(self.obj)
session.expunge(self.obj)
self.obj = self.post
route('/<int:thread_id>/')(ThreadPostCreate.as_view('thread_post',
view_endpoint='.thread'))
class ThreadDelete(BaseThreadView, views.ObjectDelete):
methods = ['POST']
_message_success = _(u'Thread "{title}" deleted.')
def message_success(self):
return unicode(self._message_success).format(title=self.obj.title)
route('/<int:thread_id>/delete')(ThreadDelete.as_view('thread_delete'))
class ThreadCloseView(BaseThreadView, views.object.BaseObjectView):
"""
Close / Re-open a thread
"""
methods = ['POST']
_VALID_ACTIONS = {u'close', u'reopen'}
CLOSED_MSG = _l(u'The thread is now closed for edition and new '
u'contributions.')
REOPENED_MSG = _l(u'The thread is now re-opened for edition and new '
u'contributions.')
def prepare_args(self, args, kwargs):
args, kwargs = super(ThreadCloseView, self).prepare_args(args, kwargs)
action = kwargs['action'] = request.form.get('action')
if action not in self._VALID_ACTIONS:
raise BadRequest(u'Unknown action: {!r}'.format(action))
return args, kwargs
def post(self, action=None):
is_closed = (action == u'close')
self.obj.closed = is_closed
sa.orm.object_session(self.obj).commit()
msg = self.CLOSED_MSG if is_closed else self.REOPENED_MSG
flash(unicode(msg))
return self.redirect(url_for(self.obj))
route('/<int:thread_id>/close')(ThreadCloseView.as_view('thread_close'))
class ThreadPostEdit(BaseThreadView, views.ObjectEdit):
Form = PostEditForm
Model = Post
pk = 'object_id'
def can_send_by_mail(self):
# post edit: don't notify every time
return False
def init_object(self, args, kwargs):
# we DO want to skip ThreadCreate.init_object. hence super is not based on
# ThreadPostCreate
args, kwargs = super(ThreadPostEdit, self).init_object(args, kwargs)
thread_id = kwargs.pop('thread_id', None)
self.thread = self.obj.thread
assert thread_id == self.thread.id
return args, kwargs | month = format_date(date(year, month, 1), "MMMM").capitalize()
return u"%s %s" % (month, year) | identifier_body |
gpu_controller.go | =configmaps,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups="",resources=pods,verbs=get;list;watch;create;update;patch;delete
func (r *GpuReconciler) Reconcile(ctx context.Context, _ ctrl.Request) (ctrl.Result, error) {
nodeList := &corev1.NodeList{}
if err := r.List(ctx, nodeList, client.MatchingFields{NodeIndexKey: GPU}); err != nil {
r.Logger.Error(err, "failed to get node list")
return ctrl.Result{}, err
}
podList := &corev1.PodList{}
if err := r.List(ctx, podList, client.MatchingFields{PodIndexKey: GPU}); err != nil {
r.Logger.Error(err, "failed to get pod list")
return ctrl.Result{}, err
}
return r.applyGPUInfoCM(ctx, nodeList, podList, nil)
}
func (r *GpuReconciler) applyGPUInfoCM(ctx context.Context, nodeList *corev1.NodeList, podList *corev1.PodList, clientSet *kubernetes.Clientset) (ctrl.Result, error) {
/*
"nodeMap": {
"sealos-poc-gpu-master-0":{},
"sealos-poc-gpu-node-1":{"gpu.count":"1","gpu.memory":"15360","gpu.product":"Tesla-T4"}}
}
*/
nodeMap := make(map[string]map[string]string)
var nodeName string
// get the GPU product, GPU memory, GPU allocatable number on the node
for _, node := range nodeList.Items {
nodeName = node.Name
if _, ok := nodeMap[nodeName]; !ok {
nodeMap[nodeName] = make(map[string]string)
}
gpuProduct, ok1 := node.Labels[NvidiaGPUProduct]
gpuMemory, ok2 := node.Labels[NvidiaGPUMemory]
gpuCount, ok3 := node.Status.Allocatable[NvidiaGPU]
if !ok1 || !ok2 || !ok3 {
continue
}
nodeMap[nodeName][GPUProduct] = gpuProduct
nodeMap[nodeName][GPUMemory] = gpuMemory
nodeMap[nodeName][GPUCount] = gpuCount.String()
}
// get the number of GPU used by pods that are using GPU
for _, pod := range podList.Items {
phase := pod.Status.Phase
if phase == corev1.PodSucceeded {
continue
}
nodeName = pod.Spec.NodeName
_, ok1 := nodeMap[nodeName]
gpuProduct, ok2 := pod.Spec.NodeSelector[NvidiaGPUProduct]
if !ok1 || !ok2 {
continue
}
containers := pod.Spec.Containers
for _, container := range containers {
gpuCount, ok := container.Resources.Limits[NvidiaGPU]
if !ok {
continue
}
r.Logger.V(1).Info("pod using GPU", "name", pod.Name, "namespace", pod.Namespace, "gpuCount", gpuCount, "gpuProduct", gpuProduct)
oldCount, err := strconv.ParseInt(nodeMap[nodeName][GPUCount], 10, 64)
if err != nil {
r.Logger.Error(err, "failed to parse gpu.count string to int64")
return ctrl.Result{}, err
}
newCount := oldCount - gpuCount.Value()
nodeMap[nodeName][GPUCount] = strconv.FormatInt(newCount, 10)
}
}
// marshal node map to JSON string
nodeMapBytes, err := json.Marshal(nodeMap)
if err != nil {
r.Logger.Error(err, "failed to marshal node map to JSON string")
return ctrl.Result{}, err
}
nodeMapStr := string(nodeMapBytes)
// create or update gpu-info configmap
configmap := &corev1.ConfigMap{}
if clientSet != nil {
configmap, err = clientSet.CoreV1().ConfigMaps(GPUInfoNameSpace).Get(ctx, GPUInfo, metaV1.GetOptions{})
} else {
err = r.Get(ctx, types.NamespacedName{Name: GPUInfo, Namespace: GPUInfoNameSpace}, configmap)
}
if errors.IsNotFound(err) {
configmap = &corev1.ConfigMap{
ObjectMeta: metaV1.ObjectMeta{
Name: GPUInfo,
Namespace: GPUInfoNameSpace,
},
Data: map[string]string{
GPU: nodeMapStr,
},
}
if err := r.Create(ctx, configmap); err != nil {
r.Logger.Error(err, "failed to create gpu-info configmap")
return ctrl.Result{}, err
}
} else if err != nil {
r.Logger.Error(err, "failed to get gpu-info configmap")
return ctrl.Result{}, err
}
if configmap.Data == nil {
configmap.Data = map[string]string{}
}
if configmap.Data[GPU] != nodeMapStr {
configmap.Data[GPU] = nodeMapStr
if err := r.Update(ctx, configmap); err != nil && !errors.IsConflict(err) {
r.Logger.Error(err, "failed to update gpu-info configmap")
return ctrl.Result{}, err
}
}
r.Logger.V(1).Info("gpu-info configmap status", "gpu", configmap.Data[GPU])
return ctrl.Result{}, nil
}
func (r *GpuReconciler) initGPUInfoCM(ctx context.Context, clientSet *kubernetes.Clientset) error {
// filter for nodes that have GPU
req1, _ := labels.NewRequirement(NvidiaGPUProduct, selection.Exists, []string{})
req2, _ := labels.NewRequirement(NvidiaGPUMemory, selection.Exists, []string{})
selector := labels.NewSelector().Add(*req1, *req2)
listOpts := metaV1.ListOptions{
LabelSelector: selector.String(),
}
nodeList, err := clientSet.CoreV1().Nodes().List(ctx, listOpts)
if err != nil {
return err
}
podList := &corev1.PodList{}
for _, item := range nodeList.Items {
list, err := clientSet.CoreV1().Pods("").List(context.TODO(), metaV1.ListOptions{
FieldSelector: fields.OneTermEqualSelector("spec.nodeName", item.Name).String(),
})
if err != nil {
return err
}
podList.Items = append(podList.Items, list.Items...)
}
_, err = r.applyGPUInfoCM(ctx, nodeList, podList, clientSet)
return err
}
// SetupWithManager sets up the controller with the Manager.
func (r *GpuReconciler) SetupWithManager(mgr ctrl.Manager) error {
r.Logger = ctrl.Log.WithName("gpu-controller")
r.Logger.V(1).Info("starting gpu controller")
// use clientSet to get resources from the API Server, not from Informer's cache
clientSet, err := kubernetes.NewForConfig(mgr.GetConfig())
if err != nil {
r.Logger.Error(err, "failed to init")
return nil
}
// init node-gpu-info configmap
r.Logger.V(1).Info("initializing node-gpu-info configmap")
if err := r.initGPUInfoCM(context.Background(), clientSet); err != nil {
return err
}
// build index for node which have GPU
if err := mgr.GetFieldIndexer().IndexField(context.Background(), &corev1.Node{}, NodeIndexKey, func(rawObj client.Object) []string {
node := rawObj.(*corev1.Node)
if _, ok := node.Labels[NvidiaGPUProduct]; !ok {
return nil
}
return []string{GPU}
}); err != nil {
return err
}
// build index for pod which use GPU
if err := mgr.GetFieldIndexer().IndexField(context.Background(), &corev1.Pod{}, PodIndexKey, func(rawObj client.Object) []string {
pod := rawObj.(*corev1.Pod)
if _, ok := pod.Spec.NodeSelector[NvidiaGPUProduct]; !ok {
return nil
}
if pod.Status.Phase == corev1.PodSucceeded {
return nil
}
return []string{GPU}
}); err != nil {
return err
}
return ctrl.NewControllerManagedBy(mgr).
For(&corev1.Pod{}, builder.WithPredicates(predicate.Funcs{
CreateFunc: func(event event.CreateEvent) bool {
return useGPU(event.Object)
},
UpdateFunc: func(event event.UpdateEvent) bool {
_, ok := event.ObjectNew.(*corev1.Pod).Spec.NodeSelector[NvidiaGPUProduct]
if !ok {
return false
}
phaseOld := event.ObjectOld.(*corev1.Pod).Status.Phase
phaseNew := event.ObjectNew.(*corev1.Pod).Status.Phase
return phaseOld != phaseNew
},
DeleteFunc: func(event event.DeleteEvent) bool {
return useGPU(event.Object)
},
})).
Watches(&source.Kind{Type: &corev1.Node{}}, &handler.EnqueueRequestForObject{}, builder.WithPredicates(predicate.Funcs{
CreateFunc: func(event event.CreateEvent) bool {
return hasGPU(event.Object)
}, | UpdateFunc: func(event event.UpdateEvent) bool { | random_line_split | |
gpu_controller.go | 8s.io/controller-runtime/pkg/builder"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/event"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"sigs.k8s.io/controller-runtime/pkg/source"
)
type GpuReconciler struct {
client.Client
Scheme *runtime.Scheme
Logger logr.Logger
}
const (
GPU = "gpu"
GPUInfo = "node-gpu-info"
GPUInfoNameSpace = "node-system"
NvidiaGPUProduct = "nvidia.com/gpu.product"
NvidiaGPUMemory = "nvidia.com/gpu.memory"
NvidiaGPU corev1.ResourceName = "nvidia.com/gpu"
GPUProduct = "gpu.product"
GPUCount = "gpu.count"
GPUMemory = "gpu.memory"
NodeIndexKey = "node"
PodIndexKey = "pod"
)
//+kubebuilder:rbac:groups=node.k8s.io,resources=gpus,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups=node.k8s.io,resources=gpus/status,verbs=get;update;patch
//+kubebuilder:rbac:groups=node.k8s.io,resources=gpus/finalizers,verbs=update
//+kubebuilder:rbac:groups="",resources=nodes,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups="",resources=nodes/status,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups="",resources=configmaps,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups="",resources=pods,verbs=get;list;watch;create;update;patch;delete
func (r *GpuReconciler) Reconcile(ctx context.Context, _ ctrl.Request) (ctrl.Result, error) {
nodeList := &corev1.NodeList{}
if err := r.List(ctx, nodeList, client.MatchingFields{NodeIndexKey: GPU}); err != nil {
r.Logger.Error(err, "failed to get node list")
return ctrl.Result{}, err
}
podList := &corev1.PodList{}
if err := r.List(ctx, podList, client.MatchingFields{PodIndexKey: GPU}); err != nil {
r.Logger.Error(err, "failed to get pod list")
return ctrl.Result{}, err
}
return r.applyGPUInfoCM(ctx, nodeList, podList, nil)
}
func (r *GpuReconciler) applyGPUInfoCM(ctx context.Context, nodeList *corev1.NodeList, podList *corev1.PodList, clientSet *kubernetes.Clientset) (ctrl.Result, error) {
/*
"nodeMap": {
"sealos-poc-gpu-master-0":{},
"sealos-poc-gpu-node-1":{"gpu.count":"1","gpu.memory":"15360","gpu.product":"Tesla-T4"}}
}
*/
nodeMap := make(map[string]map[string]string)
var nodeName string
// get the GPU product, GPU memory, GPU allocatable number on the node
for _, node := range nodeList.Items {
nodeName = node.Name
if _, ok := nodeMap[nodeName]; !ok {
nodeMap[nodeName] = make(map[string]string)
}
gpuProduct, ok1 := node.Labels[NvidiaGPUProduct]
gpuMemory, ok2 := node.Labels[NvidiaGPUMemory]
gpuCount, ok3 := node.Status.Allocatable[NvidiaGPU]
if !ok1 || !ok2 || !ok3 {
continue
}
nodeMap[nodeName][GPUProduct] = gpuProduct
nodeMap[nodeName][GPUMemory] = gpuMemory
nodeMap[nodeName][GPUCount] = gpuCount.String()
}
// get the number of GPU used by pods that are using GPU
for _, pod := range podList.Items {
phase := pod.Status.Phase
if phase == corev1.PodSucceeded {
continue
}
nodeName = pod.Spec.NodeName
_, ok1 := nodeMap[nodeName]
gpuProduct, ok2 := pod.Spec.NodeSelector[NvidiaGPUProduct]
if !ok1 || !ok2 {
continue
}
containers := pod.Spec.Containers
for _, container := range containers {
gpuCount, ok := container.Resources.Limits[NvidiaGPU]
if !ok {
continue
}
r.Logger.V(1).Info("pod using GPU", "name", pod.Name, "namespace", pod.Namespace, "gpuCount", gpuCount, "gpuProduct", gpuProduct)
oldCount, err := strconv.ParseInt(nodeMap[nodeName][GPUCount], 10, 64)
if err != nil {
r.Logger.Error(err, "failed to parse gpu.count string to int64")
return ctrl.Result{}, err
}
newCount := oldCount - gpuCount.Value()
nodeMap[nodeName][GPUCount] = strconv.FormatInt(newCount, 10)
}
}
// marshal node map to JSON string
nodeMapBytes, err := json.Marshal(nodeMap)
if err != nil {
r.Logger.Error(err, "failed to marshal node map to JSON string")
return ctrl.Result{}, err
}
nodeMapStr := string(nodeMapBytes)
// create or update gpu-info configmap
configmap := &corev1.ConfigMap{}
if clientSet != nil {
configmap, err = clientSet.CoreV1().ConfigMaps(GPUInfoNameSpace).Get(ctx, GPUInfo, metaV1.GetOptions{})
} else {
err = r.Get(ctx, types.NamespacedName{Name: GPUInfo, Namespace: GPUInfoNameSpace}, configmap)
}
if errors.IsNotFound(err) {
configmap = &corev1.ConfigMap{
ObjectMeta: metaV1.ObjectMeta{
Name: GPUInfo,
Namespace: GPUInfoNameSpace,
},
Data: map[string]string{
GPU: nodeMapStr,
},
}
if err := r.Create(ctx, configmap); err != nil {
r.Logger.Error(err, "failed to create gpu-info configmap")
return ctrl.Result{}, err
}
} else if err != nil {
r.Logger.Error(err, "failed to get gpu-info configmap")
return ctrl.Result{}, err
}
if configmap.Data == nil {
configmap.Data = map[string]string{}
}
if configmap.Data[GPU] != nodeMapStr |
r.Logger.V(1).Info("gpu-info configmap status", "gpu", configmap.Data[GPU])
return ctrl.Result{}, nil
}
func (r *GpuReconciler) initGPUInfoCM(ctx context.Context, clientSet *kubernetes.Clientset) error {
// filter for nodes that have GPU
req1, _ := labels.NewRequirement(NvidiaGPUProduct, selection.Exists, []string{})
req2, _ := labels.NewRequirement(NvidiaGPUMemory, selection.Exists, []string{})
selector := labels.NewSelector().Add(*req1, *req2)
listOpts := metaV1.ListOptions{
LabelSelector: selector.String(),
}
nodeList, err := clientSet.CoreV1().Nodes().List(ctx, listOpts)
if err != nil {
return err
}
podList := &corev1.PodList{}
for _, item := range nodeList.Items {
list, err := clientSet.CoreV1().Pods("").List(context.TODO(), metaV1.ListOptions{
FieldSelector: fields.OneTermEqualSelector("spec.nodeName", item.Name).String(),
})
if err != nil {
return err
}
podList.Items = append(podList.Items, list.Items...)
}
_, err = r.applyGPUInfoCM(ctx, nodeList, podList, clientSet)
return err
}
// SetupWithManager sets up the controller with the Manager.
func (r *GpuReconciler) SetupWithManager(mgr ctrl.Manager) error {
r.Logger = ctrl.Log.WithName("gpu-controller")
r.Logger.V(1).Info("starting gpu controller")
// use clientSet to get resources from the API Server, not from Informer's cache
clientSet, err := kubernetes.NewForConfig(mgr.GetConfig())
if err != nil {
r.Logger.Error(err, "failed to init")
return nil
}
// init node-gpu-info configmap
r.Logger.V(1).Info("initializing node-gpu-info configmap")
if err := r.initGPUInfoCM(context.Background(), clientSet); err != nil {
return err
}
// build index for node which have GPU
if err := mgr.GetFieldIndexer().IndexField(context.Background(), &corev1.Node{}, NodeIndexKey, func(rawObj client.Object) []string {
node := rawObj.(* | {
configmap.Data[GPU] = nodeMapStr
if err := r.Update(ctx, configmap); err != nil && !errors.IsConflict(err) {
r.Logger.Error(err, "failed to update gpu-info configmap")
return ctrl.Result{}, err
}
} | conditional_block |
gpu_controller.go | 8s.io/controller-runtime/pkg/builder"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/event"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"sigs.k8s.io/controller-runtime/pkg/source"
)
type GpuReconciler struct {
client.Client
Scheme *runtime.Scheme
Logger logr.Logger
}
const (
GPU = "gpu"
GPUInfo = "node-gpu-info"
GPUInfoNameSpace = "node-system"
NvidiaGPUProduct = "nvidia.com/gpu.product"
NvidiaGPUMemory = "nvidia.com/gpu.memory"
NvidiaGPU corev1.ResourceName = "nvidia.com/gpu"
GPUProduct = "gpu.product"
GPUCount = "gpu.count"
GPUMemory = "gpu.memory"
NodeIndexKey = "node"
PodIndexKey = "pod"
)
//+kubebuilder:rbac:groups=node.k8s.io,resources=gpus,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups=node.k8s.io,resources=gpus/status,verbs=get;update;patch
//+kubebuilder:rbac:groups=node.k8s.io,resources=gpus/finalizers,verbs=update
//+kubebuilder:rbac:groups="",resources=nodes,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups="",resources=nodes/status,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups="",resources=configmaps,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups="",resources=pods,verbs=get;list;watch;create;update;patch;delete
func (r *GpuReconciler) Reconcile(ctx context.Context, _ ctrl.Request) (ctrl.Result, error) {
nodeList := &corev1.NodeList{}
if err := r.List(ctx, nodeList, client.MatchingFields{NodeIndexKey: GPU}); err != nil {
r.Logger.Error(err, "failed to get node list")
return ctrl.Result{}, err
}
podList := &corev1.PodList{}
if err := r.List(ctx, podList, client.MatchingFields{PodIndexKey: GPU}); err != nil {
r.Logger.Error(err, "failed to get pod list")
return ctrl.Result{}, err
}
return r.applyGPUInfoCM(ctx, nodeList, podList, nil)
}
func (r *GpuReconciler) applyGPUInfoCM(ctx context.Context, nodeList *corev1.NodeList, podList *corev1.PodList, clientSet *kubernetes.Clientset) (ctrl.Result, error) | }
nodeMap[nodeName][GPUProduct] = gpuProduct
nodeMap[nodeName][GPUMemory] = gpuMemory
nodeMap[nodeName][GPUCount] = gpuCount.String()
}
// get the number of GPU used by pods that are using GPU
for _, pod := range podList.Items {
phase := pod.Status.Phase
if phase == corev1.PodSucceeded {
continue
}
nodeName = pod.Spec.NodeName
_, ok1 := nodeMap[nodeName]
gpuProduct, ok2 := pod.Spec.NodeSelector[NvidiaGPUProduct]
if !ok1 || !ok2 {
continue
}
containers := pod.Spec.Containers
for _, container := range containers {
gpuCount, ok := container.Resources.Limits[NvidiaGPU]
if !ok {
continue
}
r.Logger.V(1).Info("pod using GPU", "name", pod.Name, "namespace", pod.Namespace, "gpuCount", gpuCount, "gpuProduct", gpuProduct)
oldCount, err := strconv.ParseInt(nodeMap[nodeName][GPUCount], 10, 64)
if err != nil {
r.Logger.Error(err, "failed to parse gpu.count string to int64")
return ctrl.Result{}, err
}
newCount := oldCount - gpuCount.Value()
nodeMap[nodeName][GPUCount] = strconv.FormatInt(newCount, 10)
}
}
// marshal node map to JSON string
nodeMapBytes, err := json.Marshal(nodeMap)
if err != nil {
r.Logger.Error(err, "failed to marshal node map to JSON string")
return ctrl.Result{}, err
}
nodeMapStr := string(nodeMapBytes)
// create or update gpu-info configmap
configmap := &corev1.ConfigMap{}
if clientSet != nil {
configmap, err = clientSet.CoreV1().ConfigMaps(GPUInfoNameSpace).Get(ctx, GPUInfo, metaV1.GetOptions{})
} else {
err = r.Get(ctx, types.NamespacedName{Name: GPUInfo, Namespace: GPUInfoNameSpace}, configmap)
}
if errors.IsNotFound(err) {
configmap = &corev1.ConfigMap{
ObjectMeta: metaV1.ObjectMeta{
Name: GPUInfo,
Namespace: GPUInfoNameSpace,
},
Data: map[string]string{
GPU: nodeMapStr,
},
}
if err := r.Create(ctx, configmap); err != nil {
r.Logger.Error(err, "failed to create gpu-info configmap")
return ctrl.Result{}, err
}
} else if err != nil {
r.Logger.Error(err, "failed to get gpu-info configmap")
return ctrl.Result{}, err
}
if configmap.Data == nil {
configmap.Data = map[string]string{}
}
if configmap.Data[GPU] != nodeMapStr {
configmap.Data[GPU] = nodeMapStr
if err := r.Update(ctx, configmap); err != nil && !errors.IsConflict(err) {
r.Logger.Error(err, "failed to update gpu-info configmap")
return ctrl.Result{}, err
}
}
r.Logger.V(1).Info("gpu-info configmap status", "gpu", configmap.Data[GPU])
return ctrl.Result{}, nil
}
func (r *GpuReconciler) initGPUInfoCM(ctx context.Context, clientSet *kubernetes.Clientset) error {
// filter for nodes that have GPU
req1, _ := labels.NewRequirement(NvidiaGPUProduct, selection.Exists, []string{})
req2, _ := labels.NewRequirement(NvidiaGPUMemory, selection.Exists, []string{})
selector := labels.NewSelector().Add(*req1, *req2)
listOpts := metaV1.ListOptions{
LabelSelector: selector.String(),
}
nodeList, err := clientSet.CoreV1().Nodes().List(ctx, listOpts)
if err != nil {
return err
}
podList := &corev1.PodList{}
for _, item := range nodeList.Items {
list, err := clientSet.CoreV1().Pods("").List(context.TODO(), metaV1.ListOptions{
FieldSelector: fields.OneTermEqualSelector("spec.nodeName", item.Name).String(),
})
if err != nil {
return err
}
podList.Items = append(podList.Items, list.Items...)
}
_, err = r.applyGPUInfoCM(ctx, nodeList, podList, clientSet)
return err
}
// SetupWithManager sets up the controller with the Manager.
func (r *GpuReconciler) SetupWithManager(mgr ctrl.Manager) error {
r.Logger = ctrl.Log.WithName("gpu-controller")
r.Logger.V(1).Info("starting gpu controller")
// use clientSet to get resources from the API Server, not from Informer's cache
clientSet, err := kubernetes.NewForConfig(mgr.GetConfig())
if err != nil {
r.Logger.Error(err, "failed to init")
return nil
}
// init node-gpu-info configmap
r.Logger.V(1).Info("initializing node-gpu-info configmap")
if err := r.initGPUInfoCM(context.Background(), clientSet); err != nil {
return err
}
// build index for node which have GPU
if err := mgr.GetFieldIndexer().IndexField(context.Background(), &corev1.Node{}, NodeIndexKey, func(rawObj client.Object) []string {
node := rawObj.(*corev1 | {
/*
"nodeMap": {
"sealos-poc-gpu-master-0":{},
"sealos-poc-gpu-node-1":{"gpu.count":"1","gpu.memory":"15360","gpu.product":"Tesla-T4"}}
}
*/
nodeMap := make(map[string]map[string]string)
var nodeName string
// get the GPU product, GPU memory, GPU allocatable number on the node
for _, node := range nodeList.Items {
nodeName = node.Name
if _, ok := nodeMap[nodeName]; !ok {
nodeMap[nodeName] = make(map[string]string)
}
gpuProduct, ok1 := node.Labels[NvidiaGPUProduct]
gpuMemory, ok2 := node.Labels[NvidiaGPUMemory]
gpuCount, ok3 := node.Status.Allocatable[NvidiaGPU]
if !ok1 || !ok2 || !ok3 {
continue | identifier_body |
gpu_controller.go | 8s.io/controller-runtime/pkg/builder"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/event"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/predicate"
"sigs.k8s.io/controller-runtime/pkg/source"
)
type GpuReconciler struct {
client.Client
Scheme *runtime.Scheme
Logger logr.Logger
}
const (
GPU = "gpu"
GPUInfo = "node-gpu-info"
GPUInfoNameSpace = "node-system"
NvidiaGPUProduct = "nvidia.com/gpu.product"
NvidiaGPUMemory = "nvidia.com/gpu.memory"
NvidiaGPU corev1.ResourceName = "nvidia.com/gpu"
GPUProduct = "gpu.product"
GPUCount = "gpu.count"
GPUMemory = "gpu.memory"
NodeIndexKey = "node"
PodIndexKey = "pod"
)
//+kubebuilder:rbac:groups=node.k8s.io,resources=gpus,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups=node.k8s.io,resources=gpus/status,verbs=get;update;patch
//+kubebuilder:rbac:groups=node.k8s.io,resources=gpus/finalizers,verbs=update
//+kubebuilder:rbac:groups="",resources=nodes,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups="",resources=nodes/status,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups="",resources=configmaps,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups="",resources=pods,verbs=get;list;watch;create;update;patch;delete
func (r *GpuReconciler) Reconcile(ctx context.Context, _ ctrl.Request) (ctrl.Result, error) {
nodeList := &corev1.NodeList{}
if err := r.List(ctx, nodeList, client.MatchingFields{NodeIndexKey: GPU}); err != nil {
r.Logger.Error(err, "failed to get node list")
return ctrl.Result{}, err
}
podList := &corev1.PodList{}
if err := r.List(ctx, podList, client.MatchingFields{PodIndexKey: GPU}); err != nil {
r.Logger.Error(err, "failed to get pod list")
return ctrl.Result{}, err
}
return r.applyGPUInfoCM(ctx, nodeList, podList, nil)
}
func (r *GpuReconciler) | (ctx context.Context, nodeList *corev1.NodeList, podList *corev1.PodList, clientSet *kubernetes.Clientset) (ctrl.Result, error) {
/*
"nodeMap": {
"sealos-poc-gpu-master-0":{},
"sealos-poc-gpu-node-1":{"gpu.count":"1","gpu.memory":"15360","gpu.product":"Tesla-T4"}}
}
*/
nodeMap := make(map[string]map[string]string)
var nodeName string
// get the GPU product, GPU memory, GPU allocatable number on the node
for _, node := range nodeList.Items {
nodeName = node.Name
if _, ok := nodeMap[nodeName]; !ok {
nodeMap[nodeName] = make(map[string]string)
}
gpuProduct, ok1 := node.Labels[NvidiaGPUProduct]
gpuMemory, ok2 := node.Labels[NvidiaGPUMemory]
gpuCount, ok3 := node.Status.Allocatable[NvidiaGPU]
if !ok1 || !ok2 || !ok3 {
continue
}
nodeMap[nodeName][GPUProduct] = gpuProduct
nodeMap[nodeName][GPUMemory] = gpuMemory
nodeMap[nodeName][GPUCount] = gpuCount.String()
}
// get the number of GPU used by pods that are using GPU
for _, pod := range podList.Items {
phase := pod.Status.Phase
if phase == corev1.PodSucceeded {
continue
}
nodeName = pod.Spec.NodeName
_, ok1 := nodeMap[nodeName]
gpuProduct, ok2 := pod.Spec.NodeSelector[NvidiaGPUProduct]
if !ok1 || !ok2 {
continue
}
containers := pod.Spec.Containers
for _, container := range containers {
gpuCount, ok := container.Resources.Limits[NvidiaGPU]
if !ok {
continue
}
r.Logger.V(1).Info("pod using GPU", "name", pod.Name, "namespace", pod.Namespace, "gpuCount", gpuCount, "gpuProduct", gpuProduct)
oldCount, err := strconv.ParseInt(nodeMap[nodeName][GPUCount], 10, 64)
if err != nil {
r.Logger.Error(err, "failed to parse gpu.count string to int64")
return ctrl.Result{}, err
}
newCount := oldCount - gpuCount.Value()
nodeMap[nodeName][GPUCount] = strconv.FormatInt(newCount, 10)
}
}
// marshal node map to JSON string
nodeMapBytes, err := json.Marshal(nodeMap)
if err != nil {
r.Logger.Error(err, "failed to marshal node map to JSON string")
return ctrl.Result{}, err
}
nodeMapStr := string(nodeMapBytes)
// create or update gpu-info configmap
configmap := &corev1.ConfigMap{}
if clientSet != nil {
configmap, err = clientSet.CoreV1().ConfigMaps(GPUInfoNameSpace).Get(ctx, GPUInfo, metaV1.GetOptions{})
} else {
err = r.Get(ctx, types.NamespacedName{Name: GPUInfo, Namespace: GPUInfoNameSpace}, configmap)
}
if errors.IsNotFound(err) {
configmap = &corev1.ConfigMap{
ObjectMeta: metaV1.ObjectMeta{
Name: GPUInfo,
Namespace: GPUInfoNameSpace,
},
Data: map[string]string{
GPU: nodeMapStr,
},
}
if err := r.Create(ctx, configmap); err != nil {
r.Logger.Error(err, "failed to create gpu-info configmap")
return ctrl.Result{}, err
}
} else if err != nil {
r.Logger.Error(err, "failed to get gpu-info configmap")
return ctrl.Result{}, err
}
if configmap.Data == nil {
configmap.Data = map[string]string{}
}
if configmap.Data[GPU] != nodeMapStr {
configmap.Data[GPU] = nodeMapStr
if err := r.Update(ctx, configmap); err != nil && !errors.IsConflict(err) {
r.Logger.Error(err, "failed to update gpu-info configmap")
return ctrl.Result{}, err
}
}
r.Logger.V(1).Info("gpu-info configmap status", "gpu", configmap.Data[GPU])
return ctrl.Result{}, nil
}
func (r *GpuReconciler) initGPUInfoCM(ctx context.Context, clientSet *kubernetes.Clientset) error {
// filter for nodes that have GPU
req1, _ := labels.NewRequirement(NvidiaGPUProduct, selection.Exists, []string{})
req2, _ := labels.NewRequirement(NvidiaGPUMemory, selection.Exists, []string{})
selector := labels.NewSelector().Add(*req1, *req2)
listOpts := metaV1.ListOptions{
LabelSelector: selector.String(),
}
nodeList, err := clientSet.CoreV1().Nodes().List(ctx, listOpts)
if err != nil {
return err
}
podList := &corev1.PodList{}
for _, item := range nodeList.Items {
list, err := clientSet.CoreV1().Pods("").List(context.TODO(), metaV1.ListOptions{
FieldSelector: fields.OneTermEqualSelector("spec.nodeName", item.Name).String(),
})
if err != nil {
return err
}
podList.Items = append(podList.Items, list.Items...)
}
_, err = r.applyGPUInfoCM(ctx, nodeList, podList, clientSet)
return err
}
// SetupWithManager sets up the controller with the Manager.
func (r *GpuReconciler) SetupWithManager(mgr ctrl.Manager) error {
r.Logger = ctrl.Log.WithName("gpu-controller")
r.Logger.V(1).Info("starting gpu controller")
// use clientSet to get resources from the API Server, not from Informer's cache
clientSet, err := kubernetes.NewForConfig(mgr.GetConfig())
if err != nil {
r.Logger.Error(err, "failed to init")
return nil
}
// init node-gpu-info configmap
r.Logger.V(1).Info("initializing node-gpu-info configmap")
if err := r.initGPUInfoCM(context.Background(), clientSet); err != nil {
return err
}
// build index for node which have GPU
if err := mgr.GetFieldIndexer().IndexField(context.Background(), &corev1.Node{}, NodeIndexKey, func(rawObj client.Object) []string {
node := rawObj.(*corev | applyGPUInfoCM | identifier_name |
ThriftHiveMock.js | ({
taskTrackers: 1,
mapTasks: 0,
reduceTasks: 0,
maxMapTasks: 2,
maxReduceTasks: 2,
state: 2
});
};
var idlist = ['hadoop_20110408154949_8b2be199-02ae-40fe-9492-d197ade572f2',
'hadoop_20110408154949_8b2be199-03ff-40fe-9492-d197ade572f2',
'hadoop_20110408154949_8b2be199-04bc-40fe-9492-d197ade572f2',
'hadoop_20110408154949_8b2be199-05db-40fe-9492-d197ade572f2',
'hadoop_20110408154949_8b2be199-06fc-40fe-9492-d197ade572f2'];
var queryId = function(){ return choose(idlist); };
var operator = function(num){
var o = new queryplan_types.Operator({
operatorId: 'TS_12' + num,
operatorType: Math.floor(Math.random() * 10),
operatorAttributes: null,
operatorCounters: null,
done: true,
started: true
});
return o;
};
var operatorGraph = function(ops){
var al = [];
for (var i = 0; i < ops.length - 1; i++){
al.push(new queryplan_types.Adjacency({ node: ops[i].operatorId, children: [ ops[i+1].operatorId ], adjacencyType: 0 }));
}
return new queryplan_types.Graph({
nodeType: 0,
roots: null,
adjacencyList: al
});
};
var task = function(stage,mapreduce,operators){
var ops = [];
for (var i = 0; i < operators; i++){ ops.push(operator(i)); }
return new queryplan_types.Task({
taskId: 'Stage-' + stage + '_' + mapreduce,
taskType: (mapreduce == 'MAP' ? 0 : 1),
taskAttributes: null,
taskCounters: null,
operatorList: ops,
operatorGraph: operatorGraph(ops),
done: true,
started: true
});
};
var stage = function(stage){
var cntr_map = 'CNTR_NAME_Stage-' + stage + '_MAP_PROGRESS';
var cntr_reduce = 'CNTR_NAME_Stage-' + stage + '_REDUCE_PROGRESS';
var counters = {};
counters[cntr_map] = 100;
counters[cntr_reduce] = 100;
return new queryplan_types.Stage({
stageId: 'Stage-' + stage,
stageType: 3,
stageAttributes: null,
stageCounters: counters,
taskList: [task(stage,'MAP',3), task(stage,'REDUCE',1)],
done: true,
started: true
});
};
exports.query_plan = function(querystring){
if (querystring == undefined){
return new queryplan_types.QueryPlan({});
}
var query = new queryplan_types.Query({
queryId: queryId(),
queryType: null,
queryAttributes: { queryString: querystring },
queryCounters: null,
stageList: [stage(1), stage(2)],
stageGraph: new queryplan_types.Graph({
nodeType: 1,
roots: null,
adjacencyList: [ new queryplan_types.Adjacency({ node: 'Stage-1', children: [ 'Stage-2' ], adjacencyType: 0 }) ]
}),
done: true,
started: true
});
return new queryplan_types.QueryPlan({ queries: [query], done: false, started: false });
};
var columns = function(query){
var match = /select (.*) from .*/im.exec(query);
if (! match)
throw new Error('query field definition invalid!');
return match[1].split(/, /).map(function(s){return s.trim();});
};
var columninfo = function(column){
var name = column;
var type = 'string';
var ex = undefined;
var match = /as ([_a-zA-Z0-9]*)$/im.exec(column);
if (match){
name = match[1];
}
if (/^count/im.exec(column)) {
type = 'bigint';
ex = 'count';
}
else if (/^(sum|avg|min|max)/im.exec(column)) {
type = 'bigint';
ex = 'aggr';
}
else if (/id$/.exec(name)) {
type = 'bigint';
ex = 'id';
}
if (/^"(.*)"$/.exec(name)) {
name = /^"(.*)"$/.exec(name)[1];
ex = "strcopy";
}
else if (name == 'yyyymmdd') {
ex = 'date';
}
else if (name == 'hhmm' || name == 'hhmmss') {
ex = 'time';
}
else if (/name$/i.exec(name)) {
ex = 'name';
}
else if (/kana$/i.exec(name)) {
ex = 'kana';
}
return {name: name, type: type, ex: ex};
};
exports.schema = function(query){
if (! query) {
return new hive_metastore_types.Schema({});
}
if (/^show (databases|tables|partitions)/i.exec(query)) {
return new hive_metastore_types.Schema({
fieldSchemas: [new hive_metastore_types.FieldSchema({name: 'name', type: 'string', comment: undefined})],
properties: null
});
}
if (/^describe/i.exec(query)) {
return new hive_metastore_types.Schema({
fieldSchemas: [
new hive_metastore_types.FieldSchema({name: 'col_name', type: 'string', comment: 'from deserializer'}),
new hive_metastore_types.FieldSchema({name: 'data_type', type: 'string', comment: 'from deserializer'}),
new hive_metastore_types.FieldSchema({name: 'comment', type: 'string', comment: 'from deserializer'})
],
properties: null
});
}
var cols = columns(query.split('\n').join(' '));
return new hive_metastore_types.Schema({
fieldSchemas: cols.map(function(c){
var i = columninfo(c);
return new hive_metastore_types.FieldSchema({name: i.name, type: i.type, comment: undefined});
}),
properties: null
});
};
var generateValue = function(colinfo){
function pad(n){return n<10 ? '0'+n : n;}
switch(colinfo.ex) {
case 'strcopy':
return colinfo.name;
case 'date':
var d1 = new Date((new Date()).getTime() - random_num(50) * 86400 * 1000);
return '' + d1.getFullYear() + pad(d1.getMonth()+1) + pad(d1.getDate());
case 'time':
var d2 = new Date((new Date()).getTime() - random_num(12 * 60) * 60 * 1000);
return '' + pad(d2.getHours()) + pad(d2.getMinutes());
case 'id':
return random_num(500);
case 'aggr':
return random_num(10000);
case 'count':
return random_num(2000);
case 'name':
return random_name(random_num(10));
case 'kana':
return random_kana(random_num(10));
}
if (colinfo.type == 'string'){
return random_string(random_num(50));
}
return random_num(100);
};
var generate_tablename = exports.generate_tablename = function(){
var part_depth = choose([1,1,2,2,3,4]);
var name = '';
for (var i = 0; i < part_depth; i++) {
if (name.length > 0)
name += '_';
name += random_alphabetname(3) + random_num(3);
}
return name;
};
var generate_subtree = exports.generate_subtree = function(subtree_label, parent) {
var parent_part = parent ? parent + '/' : '';
var current_depth_label = subtree_label;
var children_label = null;
if (subtree_label.indexOf('_') > -1) {
var separator = subtree_label.indexOf('_');
current_depth_label = subtree_label.su | bstring(0, separator);
children_label = subtree_label.substring(separator + 1);
}
var matched = /^([a-z]+)(\d+)$/.exec(current_depth_label);
var fieldname = matched[1 | conditional_block | |
ThriftHiveMock.js | ', 'せ', 'そ', 'た', 'ち', 'つ', 'て', 'と',
'な', 'に', 'ぬ', 'ね', 'の', 'は', 'ひ', 'ふ', 'へ', 'ほ',
'ま', 'み', 'む', 'め', 'も', 'や', 'ゆ', 'よ',
'ら', 'り', 'る', 'れ', 'ろ', 'わ', 'を', 'ん'
];
var random_num = function(max){ return Math.floor(Math.random() * max) + 1; };
var random_index = function(max){ return Math.floor(Math.random() * max); };
var choose = function(list){
return list[random_index(list.length)];
};
var random_string = function(len){
var ret = '';
for (var i = 0; i < len; i++){ ret += choose(chars); }
return ret;
};
var random_name = function(len){
var ret = '';
for (var i = 0; i < len; i++){ ret += choose(namechars); }
return ret;
};
var random_kana = function(len){
var ret = '';
for (var i = 0; i < len; i++){ ret += choose(kana_chars); }
return ret;
};
var random_alphabetname = function(len){
var ret = '';
for (var i = 0; i < len; i++){ ret += choose(alphabet_namechars); }
return ret;
};
exports.cluster_status = function(){
return new hive_service_types.HiveClusterStatus({
taskTrackers: 1,
mapTasks: 0,
reduceTasks: 0,
maxMapTasks: 2,
maxReduceTasks: 2,
state: 2
});
};
var idlist = ['hadoop_20110408154949_8b2be199-02ae-40fe-9492-d197ade572f2',
'hadoop_20110408154949_8b2be199-03ff-40fe-9492-d197ade572f2',
'hadoop_20110408154949_8b2be199-04bc-40fe-9492-d197ade572f2',
'hadoop_20110408154949_8b2be199-05db-40fe-9492-d197ade572f2',
'hadoop_20110408154949_8b2be199-06fc-40fe-9492-d197ade572f2'];
var queryId = function(){ return choose(idlist); };
var operator = function(num){
var o = new queryplan_types.Operator({
operatorId: 'TS_12' + num,
operatorType: Math.floor(Math.random() * 10),
operatorAttributes: null,
operatorCounters: null,
done: true,
started: true
});
return o;
};
var operatorGraph = function(ops){
var al = [];
for (var i = 0; i < ops.length - 1; i++){
al.push(new queryplan_types.Adjacency({ node: ops[i].operatorId, children: [ ops[i+1].operatorId ], adjacencyType: 0 }));
}
return new queryplan_types.Graph({
nodeType: 0,
roots: null,
adjacencyList: al
});
};
var task = function(stage,mapreduce,operators){
var ops = [];
for (var i = 0; i < operators; i++){ ops.push(operator(i)); }
return new queryplan_types.Task({
taskId: 'Stage-' + stage + '_' + mapreduce,
taskType: (mapreduce == 'MAP' ? 0 : 1),
taskAttributes: null,
taskCounters: null,
operatorList: ops,
operatorGraph: operatorGraph(ops),
done: true,
started: true
});
};
var stage = function(stage){
var cntr_map = 'CNTR_NAME_Stage-' + stage + '_MAP_PROGRESS';
var cntr_reduce = 'CNTR_NAME_Stage-' + stage + '_REDUCE_PROGRESS';
var counters = {};
counters[cntr_map] = 100;
counters[cntr_reduce] = 100;
return new queryplan_types.Stage({
stageId: 'Stage-' + stage,
stageType: 3,
stageAttributes: null,
stageCounters: counters,
taskList: [task(stage,'MAP',3), task(stage,'REDUCE',1)],
done: true,
started: true
});
};
exports.query_plan = function(querystring){
if (querystring == undefined){
return new queryplan_types.QueryPlan({});
}
var query = new queryplan_types.Query({
queryId: queryId(),
queryType: null,
queryAttributes: { queryString: querystring },
queryCounters: null,
stageList: [stage(1), stage(2)],
stageGraph: new queryplan_types.Graph({
nodeType: 1,
roots: null,
adjacencyList: [ new queryplan_types.Adjacency({ node: 'Stage-1', children: [ 'Stage-2' ], adjacencyType: 0 }) ]
}),
done: true,
started: true
});
return new queryplan_types.QueryPlan({ queries: [query], done: false, started: false });
};
var columns = function(query){
var match = /select (.*) from .*/im.exec(query);
if (! match)
throw new Error('query field definition invalid!');
return match[1].split(/, /).map(function(s){return s.trim();});
};
var columninfo = function(column){
var name = column;
var type = 'string';
var ex = undefined;
var match = /as ([_a-zA-Z0-9]*)$/im.exec(column);
if (match){
name = match[1];
}
if (/^count/im.exec(column)) {
type = 'bigint';
ex = 'count';
}
else if (/^(sum|avg|min|max)/im.exec(column)) {
type = 'bigint';
ex = 'aggr';
}
else if (/id$/.exec(name)) {
type = 'bigint';
ex = 'id';
}
if (/^"(.*)"$/.exec(name)) {
name = /^"(.*)"$/.exec(name)[1];
ex = "strcopy";
}
else if (name == 'yyyymmdd') {
ex = 'date';
}
else if (name == 'hhmm' || name == 'hhmmss') {
ex = 'time';
}
else if (/name$/i.exec(name)) {
ex = 'name';
}
else if (/kana$/i.exec(name)) {
ex = 'kana';
}
return {name: name, type: type, ex: ex};
};
exports.schema = function(query){
if (! query) {
return new hive_metastore_types.Schema({});
}
if (/^show (databases|tables|partitions)/i.exec(query)) {
return new hive_metastore_types.Schema({
fieldSchemas: [new hive_metastore_types.FieldSchema({name: 'name', type: 'string', comment: undefined})],
properties: null
});
}
if (/^describe/i.exec(query)) {
return new hive_metastore_types.Schema({
fieldSchemas: [
new hive_metastore_types.FieldSchema({name: 'col_name', type: 'string', comment: 'from deserializer'}),
new hive_metastore_types.FieldSchema({name: 'data_type', type: 'string', comment: 'from deserializer'}),
new hive_metastore_types.FieldSchema({name: 'comment', type: 'string', comment: 'from deserializer'})
],
properties: null
});
}
var cols = columns(query.split('\n').join(' '));
return new hive_metastore_types.Schema({
fieldSchemas: cols.map(function(c){
var i = columninfo(c);
return new hive_metastore_types.FieldSchema({name: i.name, type: i.type, comment: undefined});
}),
properties: null
});
};
var generateValue = function(colinfo){
function pad(n){return n<10 ? '0'+n : n;}
switch(colinfo.ex) {
case 'strcopy':
return colinfo | me;
case 'date':
var d1 = new Date((new Date()).getTime() - random_num(50) * 86400 * 1000);
return '' + d1.getFullYear() + pad(d1.getMonth()+1) + pad(d1.getDate());
case 'time':
var d2 = new Date((new Date()).getTime() - random_num(12 * 60) * 60 * 1000);
return '' + pad(d2.getHours()) | .na | identifier_name |
ThriftHiveMock.js | ', 'せ', 'そ', 'た', 'ち', 'つ', 'て', 'と',
'な', 'に', 'ぬ', 'ね', 'の', 'は', 'ひ', 'ふ', 'へ', 'ほ',
'ま', 'み', 'む', 'め', 'も', 'や', 'ゆ', 'よ',
'ら', 'り', 'る', 'れ', 'ろ', 'わ', 'を', 'ん'
];
var random_num = function(max){ return Math.floor(Math.random() * max) + 1; };
var random_index = function(max){ return Math.floor(Math.random() * max); };
var choose = function(list){
return list[random_index(list.length)];
};
var random_string = function(len){
var ret = '';
for (var i = 0; i < len; i++){ ret += choose(chars); }
return ret;
};
var random_name = function(len){
var ret = '';
for (var i = 0; i < len; i++){ ret += choose(namechars); }
return ret;
};
var random_kana = function(len){
var ret = '';
for (var i = 0; i < len; i++){ ret += choose(kana_chars); }
return ret;
};
var random_alphabetname = function(len){
var ret = '';
for (var i = 0; i < len; i++){ ret += choose(alphabet_namechars); }
return ret;
};
exports.cluster_status = function(){
return new hive_service_types.HiveClusterStatus({
taskTrackers: 1,
mapTasks: 0,
reduceTasks: 0,
maxMapTasks: 2,
maxReduceTasks: 2,
state: 2
});
};
var idlist = ['hadoop_20110408154949_8b2be199-02ae-40fe-9492-d197ade572f2',
'hadoop_20110408154949_8b2be199-03ff-40fe-9492-d197ade572f2',
'hadoop_20110408154949_8b2be199-04bc-40fe-9492-d197ade572f2',
'hadoop_20110408154949_8b2be199-05db-40fe-9492-d197ade572f2',
'hadoop_20110408154949_8b2be199-06fc-40fe-9492-d197ade572f2'];
var queryId = function(){ return choose(idlist); };
var operator = function(num){
var o = new queryplan_types.Operator({
operatorId: 'TS_12' + num,
operatorType: Math.floor(Math.random() * 10),
operatorAttributes: null,
operatorCounters: null,
done: true,
started: true
});
return o;
};
var operatorGraph = function(ops){
var al = [];
for (var i = 0; i < ops.length - 1; i++){
al.push(new queryplan_types.Adjacency({ node: ops[i].operatorId, children: [ ops[i+1].operatorId ], adjacencyType: 0 }));
}
return new queryplan_types.Graph({
nodeType: 0,
roots: null,
adjacencyList: al
});
};
var task = function(stage,mapreduce,operators){
var ops = [];
for (var i = 0; i < operators; i++){ ops.push(operator(i)); }
return new queryplan_types.Task({
taskId: 'Stage-' + stage + '_' + mapreduce,
taskType: (mapreduce == 'MAP' ? 0 : 1),
taskAttributes: null,
taskCounters: null,
operatorList: ops,
operatorGraph: operatorGraph(ops),
done: true,
started: true
});
};
var stage = function(stage){
var cntr_map = 'CNTR_NAME_Stage-' + stage + '_MAP_PROGRESS';
var cntr_reduce = 'CNTR_NAME_Stage-' + stage + '_REDUCE_PROGRESS';
var counters = {};
counters[cntr_map] = 100;
counters[cntr_reduce] = 100;
return new queryplan_types.Stage({
stageId: 'Stage-' + stage,
stageType: 3,
stageAttributes: null,
stageCounters: counters,
taskList: [task(stage,'MAP',3), task(stage,'REDUCE',1)],
done: true,
started: true
});
};
exports.query_plan = function(querystring){
if (querystring == undefined){
return new queryplan_types.QueryPlan({});
}
var query = new queryplan_types.Query({
queryId: queryId(),
queryType: null,
queryAttributes: { queryString: querystring },
queryCounters: null,
stageList: [stage(1), stage(2)],
stageGraph: new queryplan_types.Graph({
nodeType: 1,
roots: null,
adjacencyList: [ new queryplan_types.Adjacency({ node: 'Stage-1', children: [ 'Stage-2' ], adjacencyType: 0 }) ]
}),
done: true,
started: true
});
return new queryplan_types.QueryPlan({ queries: [query], done: false, started: false });
};
var columns = function(query){
var match = /select (.*) from .*/im.exec(query);
if (! match)
throw new Error('query field definition invalid!');
return match[1].split(/, /).map(function(s){return s.trim();});
};
var columninfo = function(column){
var name = column;
var type = 'string';
var ex = undefined;
var match = /as ([_a-zA-Z0-9]*)$/im.exec(column);
if (match){
name = match[1];
}
if (/^count/im.exec(column)) {
type = 'bigint';
ex = 'count';
}
else if (/^(sum|avg|min|max)/im.exec(column)) {
type = 'bigint';
ex = 'aggr';
}
else if (/id$/.exec(name)) {
type = 'bigint';
ex = 'id';
}
if (/^"(.*)"$/.exec(name)) {
name = /^"(.*)"$/.exec(name)[1];
ex = "strcopy";
}
else if (name == 'yyyymmdd') {
ex = 'date';
}
else if (name == 'hhmm' || name == 'hhmmss') {
ex = 'time';
}
else if (/name$/i.exec(name)) {
ex = 'name';
}
else if (/kana$/i.exec(name)) {
ex = 'kana';
}
return {name: name, type: type, ex: ex};
};
exports.schema = function(query){
if (! query) {
return new hive_metastore_types.Schema({});
}
if (/^show (databases|tables|partitions)/i.exec(query)) {
return new hive_metastore_types.Schema({
fieldSchemas: [new hive_metastore_types.FieldSchema({name: 'name', type: 'string', comment: undefined})],
properties: null
});
}
if (/^describe/i.exec(query)) {
return new hive_metastore_types.Schema({
fieldSchemas: [
new hive_metastore_types.FieldSchema({name: 'col_name', type: 'string', comment: 'from deserializer'}),
new hive_metastore_types.FieldSchema({name: 'data_type', type: 'string', comment: 'from deserializer'}),
new hive_metastore_types.FieldSchema({name: 'comment', type: 'string', comment: 'from deserializer'})
],
properties: null
});
}
var cols = columns(query.split('\n').join(' '));
return new hive_metastore_types.Schema({
fieldSchemas: cols.map(function(c){
var i = columninfo(c);
return new hive_metastore_types.FieldSchema({name: i.name, type: i.type, comment: undefined});
}),
properties: null
});
};
var generateValue = function(colinfo){
function pad(n){return n<10 ? '0'+n : n;}
switch(colinfo.ex) {
case 'strcopy':
return colinfo.name; | = new Date((new Date()).getTime() - random_num(50) * 86400 * 1000);
return '' + d1.getFullYear() + pad(d1.getMonth()+1) + pad(d1.getDate());
case 'time':
var d2 = new Date((new Date()).getTime() - random_num(12 * 60) * 60 * 1000);
return '' + pad(d2.getHours()) + |
case 'date':
var d1 | identifier_body |
ThriftHiveMock.js | す', 'せ', 'そ', 'た', 'ち', 'つ', 'て', 'と',
'な', 'に', 'ぬ', 'ね', 'の', 'は', 'ひ', 'ふ', 'へ', 'ほ',
'ま', 'み', 'む', 'め', 'も', 'や', 'ゆ', 'よ',
'ら', 'り', 'る', 'れ', 'ろ', 'わ', 'を', 'ん'
];
var random_num = function(max){ return Math.floor(Math.random() * max) + 1; };
var random_index = function(max){ return Math.floor(Math.random() * max); };
var choose = function(list){
return list[random_index(list.length)];
};
var random_string = function(len){
var ret = '';
for (var i = 0; i < len; i++){ ret += choose(chars); }
return ret;
};
var random_name = function(len){
var ret = '';
for (var i = 0; i < len; i++){ ret += choose(namechars); }
return ret;
};
var random_kana = function(len){
var ret = '';
for (var i = 0; i < len; i++){ ret += choose(kana_chars); }
return ret;
};
var random_alphabetname = function(len){
var ret = '';
for (var i = 0; i < len; i++){ ret += choose(alphabet_namechars); }
return ret;
};
exports.cluster_status = function(){
return new hive_service_types.HiveClusterStatus({
taskTrackers: 1,
mapTasks: 0,
reduceTasks: 0,
maxMapTasks: 2,
maxReduceTasks: 2,
state: 2
});
};
var idlist = ['hadoop_20110408154949_8b2be199-02ae-40fe-9492-d197ade572f2',
'hadoop_20110408154949_8b2be199-03ff-40fe-9492-d197ade572f2',
'hadoop_20110408154949_8b2be199-04bc-40fe-9492-d197ade572f2',
'hadoop_20110408154949_8b2be199-05db-40fe-9492-d197ade572f2',
'hadoop_20110408154949_8b2be199-06fc-40fe-9492-d197ade572f2'];
var queryId = function(){ return choose(idlist); };
var operator = function(num){
var o = new queryplan_types.Operator({
operatorId: 'TS_12' + num,
operatorType: Math.floor(Math.random() * 10),
operatorAttributes: null,
operatorCounters: null,
done: true,
started: true
});
return o;
};
var operatorGraph = function(ops){
var al = [];
for (var i = 0; i < ops.length - 1; i++){
al.push(new queryplan_types.Adjacency({ node: ops[i].operatorId, children: [ ops[i+1].operatorId ], adjacencyType: 0 }));
}
return new queryplan_types.Graph({
nodeType: 0,
roots: null,
adjacencyList: al
});
};
var task = function(stage,mapreduce,operators){
var ops = [];
for (var i = 0; i < operators; i++){ ops.push(operator(i)); }
return new queryplan_types.Task({
taskId: 'Stage-' + stage + '_' + mapreduce,
taskType: (mapreduce == 'MAP' ? 0 : 1),
taskAttributes: null,
taskCounters: null,
operatorList: ops,
operatorGraph: operatorGraph(ops),
done: true,
started: true
});
};
var stage = function(stage){
var cntr_map = 'CNTR_NAME_Stage-' + stage + '_MAP_PROGRESS';
var cntr_reduce = 'CNTR_NAME_Stage-' + stage + '_REDUCE_PROGRESS';
var counters = {};
counters[cntr_map] = 100;
counters[cntr_reduce] = 100;
return new queryplan_types.Stage({
stageId: 'Stage-' + stage,
stageType: 3,
stageAttributes: null,
stageCounters: counters,
taskList: [task(stage,'MAP',3), task(stage,'REDUCE',1)],
done: true,
started: true
});
};
exports.query_plan = function(querystring){
if (querystring == undefined){
return new queryplan_types.QueryPlan({});
}
var query = new queryplan_types.Query({
queryId: queryId(),
queryType: null,
queryAttributes: { queryString: querystring },
queryCounters: null,
stageList: [stage(1), stage(2)],
stageGraph: new queryplan_types.Graph({
nodeType: 1,
roots: null,
adjacencyList: [ new queryplan_types.Adjacency({ node: 'Stage-1', children: [ 'Stage-2' ], adjacencyType: 0 }) ]
}),
done: true,
started: true
});
return new queryplan_types.QueryPlan({ queries: [query], done: false, started: false });
};
var columns = function(query){
var match = /select (.*) from .*/im.exec(query);
if (! match)
throw new Error('query field definition invalid!');
return match[1].split(/, /).map(function(s){return s.trim();});
};
var columninfo = function(column){
var name = column;
var type = 'string';
var ex = undefined;
var match = /as ([_a-zA-Z0-9]*)$/im.exec(column);
if (match){ | type = 'bigint';
ex = 'count';
}
else if (/^(sum|avg|min|max)/im.exec(column)) {
type = 'bigint';
ex = 'aggr';
}
else if (/id$/.exec(name)) {
type = 'bigint';
ex = 'id';
}
if (/^"(.*)"$/.exec(name)) {
name = /^"(.*)"$/.exec(name)[1];
ex = "strcopy";
}
else if (name == 'yyyymmdd') {
ex = 'date';
}
else if (name == 'hhmm' || name == 'hhmmss') {
ex = 'time';
}
else if (/name$/i.exec(name)) {
ex = 'name';
}
else if (/kana$/i.exec(name)) {
ex = 'kana';
}
return {name: name, type: type, ex: ex};
};
exports.schema = function(query){
if (! query) {
return new hive_metastore_types.Schema({});
}
if (/^show (databases|tables|partitions)/i.exec(query)) {
return new hive_metastore_types.Schema({
fieldSchemas: [new hive_metastore_types.FieldSchema({name: 'name', type: 'string', comment: undefined})],
properties: null
});
}
if (/^describe/i.exec(query)) {
return new hive_metastore_types.Schema({
fieldSchemas: [
new hive_metastore_types.FieldSchema({name: 'col_name', type: 'string', comment: 'from deserializer'}),
new hive_metastore_types.FieldSchema({name: 'data_type', type: 'string', comment: 'from deserializer'}),
new hive_metastore_types.FieldSchema({name: 'comment', type: 'string', comment: 'from deserializer'})
],
properties: null
});
}
var cols = columns(query.split('\n').join(' '));
return new hive_metastore_types.Schema({
fieldSchemas: cols.map(function(c){
var i = columninfo(c);
return new hive_metastore_types.FieldSchema({name: i.name, type: i.type, comment: undefined});
}),
properties: null
});
};
var generateValue = function(colinfo){
function pad(n){return n<10 ? '0'+n : n;}
switch(colinfo.ex) {
case 'strcopy':
return colinfo.name;
case 'date':
var d1 = new Date((new Date()).getTime() - random_num(50) * 86400 * 1000);
return '' + d1.getFullYear() + pad(d1.getMonth()+1) + pad(d1.getDate());
case 'time':
var d2 = new Date((new Date()).getTime() - random_num(12 * 60) * 60 * 1000);
return '' + pad(d2.getHours()) + | name = match[1];
}
if (/^count/im.exec(column)) { | random_line_split |
query.go | in it, or the specific txHash does not have a label, an empty
// string and no error are returned.
func (s *Store) TxLabel(ns walletdb.ReadBucket, txHash chainhash.Hash) (string,
error) {
label, err := FetchTxLabel(ns, txHash)
switch err {
// If there are no saved labels yet (the bucket has not been created) or
// there is not a label for this particular tx, we ignore the error.
case ErrNoLabelBucket:
fallthrough
case ErrTxLabelNotFound:
return "", nil
// If we found the label, we return it.
case nil:
return label, nil
}
// Otherwise, another error occurred while looking uo the label, so we
// return it.
return "", err
}
// TxDetails looks up all recorded details regarding a transaction with some
// hash. In case of a hash collision, the most recent transaction with a
// matching hash is returned.
//
// Not finding a transaction with this hash is not an error. In this case,
// a nil TxDetails is returned.
func (s *Store) TxDetails(ns walletdb.ReadBucket, txHash *chainhash.Hash) (*TxDetails, error) {
// First, check whether there exists an unmined transaction with this
// hash. Use it if found.
v := existsRawUnmined(ns, txHash[:])
if v != nil {
return s.unminedTxDetails(ns, txHash, v)
}
// Otherwise, if there exists a mined transaction with this matching
// hash, skip over to the newest and begin fetching all details.
k, v := latestTxRecord(ns, txHash)
if v == nil {
// not found
return nil, nil
}
return s.minedTxDetails(ns, txHash, k, v)
}
// UniqueTxDetails looks up all recorded details for a transaction recorded
// mined in some particular block, or an unmined transaction if block is nil.
//
// Not finding a transaction with this hash from this block is not an error. In
// this case, a nil TxDetails is returned.
func (s *Store) UniqueTxDetails(ns walletdb.ReadBucket, txHash *chainhash.Hash,
block *Block) (*TxDetails, error) {
if block == nil {
v := existsRawUnmined(ns, txHash[:])
if v == nil {
return nil, nil
}
return s.unminedTxDetails(ns, txHash, v)
}
k, v := existsTxRecord(ns, txHash, block)
if v == nil {
return nil, nil
}
return s.minedTxDetails(ns, txHash, k, v)
}
// rangeUnminedTransactions executes the function f with TxDetails for every
// unmined transaction. f is not executed if no unmined transactions exist.
// Error returns from f (if any) are propigated to the caller. Returns true
// (signaling breaking out of a RangeTransactions) iff f executes and returns
// true.
func (s *Store) rangeUnminedTransactions(ns walletdb.ReadBucket, f func([]TxDetails) (bool, error)) (bool, error) {
var details []TxDetails
err := ns.NestedReadBucket(bucketUnmined).ForEach(func(k, v []byte) error {
if len(k) < 32 {
str := fmt.Sprintf("%s: short key (expected %d "+
"bytes, read %d)", bucketUnmined, 32, len(k))
return storeError(ErrData, str, nil)
}
var txHash chainhash.Hash
copy(txHash[:], k)
detail, err := s.unminedTxDetails(ns, &txHash, v)
if err != nil {
return err
}
// Because the key was created while foreach-ing over the
// bucket, it should be impossible for unminedTxDetails to ever
// successfully return a nil details struct.
details = append(details, *detail)
return nil
})
if err == nil && len(details) > 0 {
return f(details)
}
return false, err
}
// rangeBlockTransactions executes the function f with TxDetails for every block
// between heights begin and end (reverse order when end > begin) until f
// returns true, or the transactions from block is processed. Returns true iff
// f executes and returns true.
func (s *Store) rangeBlockTransactions(ns walletdb.ReadBucket, begin, end int32,
f func([]TxDetails) (bool, error)) (bool, error) {
// Mempool height is considered a high bound.
if begin < 0 {
begin = int32(^uint32(0) >> 1)
}
if end < 0 {
end = int32(^uint32(0) >> 1)
}
var blockIter blockIterator
var advance func(*blockIterator) bool
if begin < end {
// Iterate in forwards order
blockIter = makeReadBlockIterator(ns, begin)
advance = func(it *blockIterator) bool {
if !it.next() {
return false
}
return it.elem.Height <= end
}
} else {
// Iterate in backwards order, from begin -> end.
blockIter = makeReadBlockIterator(ns, begin)
advance = func(it *blockIterator) bool {
if !it.prev() {
return false
}
return end <= it.elem.Height
}
}
var details []TxDetails
for advance(&blockIter) {
block := &blockIter.elem
if cap(details) < len(block.transactions) {
details = make([]TxDetails, 0, len(block.transactions))
} else {
details = details[:0]
}
for _, txHash := range block.transactions {
k := keyTxRecord(&txHash, &block.Block)
v := existsRawTxRecord(ns, k)
if v == nil {
str := fmt.Sprintf("missing transaction %v for "+
"block %v", txHash, block.Height)
return false, storeError(ErrData, str, nil)
}
detail, err := s.minedTxDetails(ns, &txHash, k, v)
if err != nil {
return false, err
}
details = append(details, *detail)
}
// Every block record must have at least one transaction, so it
// is safe to call f.
brk, err := f(details)
if err != nil || brk {
return brk, err
}
}
return false, blockIter.err
}
// RangeTransactions runs the function f on all transaction details between
// blocks on the best chain over the height range [begin,end]. The special
// height -1 may be used to also include unmined transactions. If the end
// height comes before the begin height, blocks are iterated in reverse order
// and unmined transactions (if any) are processed first.
//
// The function f may return an error which, if non-nil, is propagated to the
// caller. Additionally, a boolean return value allows exiting the function
// early without reading any additional transactions early when true.
//
// All calls to f are guaranteed to be passed a slice with more than zero
// elements. The slice may be reused for multiple blocks, so it is not safe to
// use it after the loop iteration it was acquired.
func (s *Store) RangeTransactions(ns walletdb.ReadBucket, begin, end int32,
f func([]TxDetails) (bool, error)) error {
var addedUnmined bool
if begin < 0 {
brk, err := s.rangeUnminedTransactions(ns, f)
if err != nil || brk {
return err
}
addedUnmined = true
}
brk, err := s.rangeBlockTransactions(ns, begin, end, f)
if err == nil && !brk && !addedUnmined && end < 0 {
_, err = s.rangeUnminedTransactions(ns, f)
}
return err
}
// PreviousPkScripts returns a slice of previous output scripts for each credit
// output this transaction record debits from.
func (s *Store) PreviousPkScripts(ns walletdb.ReadBucket, rec *TxRecord, block *Block) ([][]byte, error) {
var pkScripts [][]byte
if block == nil {
for _, input := range rec.MsgTx.TxIn {
prevOut := &input.PreviousOutPoint
// Input may spend a previous unmined output, a
// mined output (which would still be marked
// unspent), or neither.
v := existsRawUnmined(ns, prevOut.Hash[:])
if v != nil {
// Ensure a credit exists for this
// unmined transaction before including
// the output script.
k := canonicalOutPoint(&prevOut.Hash, prevOut.Index)
if existsRawUnminedCredit(ns, k) == nil {
continue | }
pkScript, err := fetchRawTxRecordPkScript(
prevOut.Hash[:], v, prevOut.Index)
if err != nil { | random_line_split | |
query.go | Tx.TxOut) {
str := "saved credit index exceeds number of outputs"
return nil, storeError(ErrData, str, nil)
}
// The credit iterator does not record whether this credit was
// spent by an unmined transaction, so check that here.
if !credIter.elem.Spent {
k := canonicalOutPoint(txHash, credIter.elem.Index)
spent := existsRawUnminedInput(ns, k) != nil
credIter.elem.Spent = spent
}
details.Credits = append(details.Credits, credIter.elem)
}
if credIter.err != nil {
return nil, credIter.err
}
debIter := makeReadDebitIterator(ns, recKey)
for debIter.next() {
if int(debIter.elem.Index) >= len(details.MsgTx.TxIn) {
str := "saved debit index exceeds number of inputs"
return nil, storeError(ErrData, str, nil)
}
details.Debits = append(details.Debits, debIter.elem)
}
if debIter.err != nil {
return nil, debIter.err
}
// Finally, we add the transaction label to details.
details.Label, err = s.TxLabel(ns, *txHash)
if err != nil {
return nil, err
}
return &details, nil
}
// unminedTxDetails fetches the TxDetails for the unmined transaction with the
// hash txHash and the passed unmined record value.
func (s *Store) unminedTxDetails(ns walletdb.ReadBucket, txHash *chainhash.Hash, v []byte) (*TxDetails, error) {
details := TxDetails{
Block: BlockMeta{Block: Block{Height: -1}},
}
err := readRawTxRecord(txHash, v, &details.TxRecord)
if err != nil {
return nil, err
}
it := makeReadUnminedCreditIterator(ns, txHash)
for it.next() {
if int(it.elem.Index) >= len(details.MsgTx.TxOut) {
str := "saved credit index exceeds number of outputs"
return nil, storeError(ErrData, str, nil)
}
// Set the Spent field since this is not done by the iterator.
it.elem.Spent = existsRawUnminedInput(ns, it.ck) != nil
details.Credits = append(details.Credits, it.elem)
}
if it.err != nil {
return nil, it.err
}
// Debit records are not saved for unmined transactions. Instead, they
// must be looked up for each transaction input manually. There are two
// kinds of previous credits that may be debited by an unmined
// transaction: mined unspent outputs (which remain marked unspent even
// when spent by an unmined transaction), and credits from other unmined
// transactions. Both situations must be considered.
for i, output := range details.MsgTx.TxIn {
opKey := canonicalOutPoint(&output.PreviousOutPoint.Hash,
output.PreviousOutPoint.Index)
credKey := existsRawUnspent(ns, opKey)
if credKey != nil {
v := existsRawCredit(ns, credKey)
amount, err := fetchRawCreditAmount(v)
if err != nil {
return nil, err
}
details.Debits = append(details.Debits, DebitRecord{
Amount: amount,
Index: uint32(i),
})
continue
}
v := existsRawUnminedCredit(ns, opKey)
if v == nil {
continue
}
amount, err := fetchRawCreditAmount(v)
if err != nil {
return nil, err
}
details.Debits = append(details.Debits, DebitRecord{
Amount: amount,
Index: uint32(i),
})
}
// Finally, we add the transaction label to details.
details.Label, err = s.TxLabel(ns, *txHash)
if err != nil {
return nil, err
}
return &details, nil
}
// TxLabel looks up a transaction label for the txHash provided. If the store
// has no labels in it, or the specific txHash does not have a label, an empty
// string and no error are returned.
func (s *Store) TxLabel(ns walletdb.ReadBucket, txHash chainhash.Hash) (string,
error) {
label, err := FetchTxLabel(ns, txHash)
switch err {
// If there are no saved labels yet (the bucket has not been created) or
// there is not a label for this particular tx, we ignore the error.
case ErrNoLabelBucket:
fallthrough
case ErrTxLabelNotFound:
return "", nil
// If we found the label, we return it.
case nil:
return label, nil
}
// Otherwise, another error occurred while looking uo the label, so we
// return it.
return "", err
}
// TxDetails looks up all recorded details regarding a transaction with some
// hash. In case of a hash collision, the most recent transaction with a
// matching hash is returned.
//
// Not finding a transaction with this hash is not an error. In this case,
// a nil TxDetails is returned.
func (s *Store) TxDetails(ns walletdb.ReadBucket, txHash *chainhash.Hash) (*TxDetails, error) {
// First, check whether there exists an unmined transaction with this
// hash. Use it if found.
v := existsRawUnmined(ns, txHash[:])
if v != nil {
return s.unminedTxDetails(ns, txHash, v)
}
// Otherwise, if there exists a mined transaction with this matching
// hash, skip over to the newest and begin fetching all details.
k, v := latestTxRecord(ns, txHash)
if v == nil {
// not found
return nil, nil
}
return s.minedTxDetails(ns, txHash, k, v)
}
// UniqueTxDetails looks up all recorded details for a transaction recorded
// mined in some particular block, or an unmined transaction if block is nil.
//
// Not finding a transaction with this hash from this block is not an error. In
// this case, a nil TxDetails is returned.
func (s *Store) UniqueTxDetails(ns walletdb.ReadBucket, txHash *chainhash.Hash,
block *Block) (*TxDetails, error) {
if block == nil {
v := existsRawUnmined(ns, txHash[:])
if v == nil {
return nil, nil
}
return s.unminedTxDetails(ns, txHash, v)
}
k, v := existsTxRecord(ns, txHash, block)
if v == nil {
return nil, nil
}
return s.minedTxDetails(ns, txHash, k, v)
}
// rangeUnminedTransactions executes the function f with TxDetails for every
// unmined transaction. f is not executed if no unmined transactions exist.
// Error returns from f (if any) are propigated to the caller. Returns true
// (signaling breaking out of a RangeTransactions) iff f executes and returns
// true.
func (s *Store) rangeUnminedTransactions(ns walletdb.ReadBucket, f func([]TxDetails) (bool, error)) (bool, error) {
var details []TxDetails
err := ns.NestedReadBucket(bucketUnmined).ForEach(func(k, v []byte) error {
if len(k) < 32 {
str := fmt.Sprintf("%s: short key (expected %d "+
"bytes, read %d)", bucketUnmined, 32, len(k))
return storeError(ErrData, str, nil)
}
var txHash chainhash.Hash
copy(txHash[:], k)
detail, err := s.unminedTxDetails(ns, &txHash, v)
if err != nil {
return err
}
// Because the key was created while foreach-ing over the
// bucket, it should be impossible for unminedTxDetails to ever
// successfully return a nil details struct.
details = append(details, *detail)
return nil
})
if err == nil && len(details) > 0 {
return f(details)
}
return false, err
}
// rangeBlockTransactions executes the function f with TxDetails for every block
// between heights begin and end (reverse order when end > begin) until f
// returns true, or the transactions from block is processed. Returns true iff
// f executes and returns true.
func (s *Store) rangeBlockTransactions(ns walletdb.ReadBucket, begin, end int32,
f func([]TxDetails) (bool, error)) (bool, error) {
// Mempool height is considered a high bound.
if begin < 0 {
begin = int32(^uint32(0) >> 1)
}
if end < 0 |
var blockIter blockIterator
var advance func(*blockIterator) bool
if begin | {
end = int32(^uint32(0) >> 1)
} | conditional_block |
query.go | Tx.TxOut) {
str := "saved credit index exceeds number of outputs"
return nil, storeError(ErrData, str, nil)
}
// The credit iterator does not record whether this credit was
// spent by an unmined transaction, so check that here.
if !credIter.elem.Spent {
k := canonicalOutPoint(txHash, credIter.elem.Index)
spent := existsRawUnminedInput(ns, k) != nil
credIter.elem.Spent = spent
}
details.Credits = append(details.Credits, credIter.elem)
}
if credIter.err != nil {
return nil, credIter.err
}
debIter := makeReadDebitIterator(ns, recKey)
for debIter.next() {
if int(debIter.elem.Index) >= len(details.MsgTx.TxIn) {
str := "saved debit index exceeds number of inputs"
return nil, storeError(ErrData, str, nil)
}
details.Debits = append(details.Debits, debIter.elem)
}
if debIter.err != nil {
return nil, debIter.err
}
// Finally, we add the transaction label to details.
details.Label, err = s.TxLabel(ns, *txHash)
if err != nil {
return nil, err
}
return &details, nil
}
// unminedTxDetails fetches the TxDetails for the unmined transaction with the
// hash txHash and the passed unmined record value.
func (s *Store) unminedTxDetails(ns walletdb.ReadBucket, txHash *chainhash.Hash, v []byte) (*TxDetails, error) {
details := TxDetails{
Block: BlockMeta{Block: Block{Height: -1}},
}
err := readRawTxRecord(txHash, v, &details.TxRecord)
if err != nil {
return nil, err
}
it := makeReadUnminedCreditIterator(ns, txHash)
for it.next() {
if int(it.elem.Index) >= len(details.MsgTx.TxOut) {
str := "saved credit index exceeds number of outputs"
return nil, storeError(ErrData, str, nil)
}
// Set the Spent field since this is not done by the iterator.
it.elem.Spent = existsRawUnminedInput(ns, it.ck) != nil
details.Credits = append(details.Credits, it.elem)
}
if it.err != nil {
return nil, it.err
}
// Debit records are not saved for unmined transactions. Instead, they
// must be looked up for each transaction input manually. There are two
// kinds of previous credits that may be debited by an unmined
// transaction: mined unspent outputs (which remain marked unspent even
// when spent by an unmined transaction), and credits from other unmined
// transactions. Both situations must be considered.
for i, output := range details.MsgTx.TxIn {
opKey := canonicalOutPoint(&output.PreviousOutPoint.Hash,
output.PreviousOutPoint.Index)
credKey := existsRawUnspent(ns, opKey)
if credKey != nil {
v := existsRawCredit(ns, credKey)
amount, err := fetchRawCreditAmount(v)
if err != nil {
return nil, err
}
details.Debits = append(details.Debits, DebitRecord{
Amount: amount,
Index: uint32(i),
})
continue
}
v := existsRawUnminedCredit(ns, opKey)
if v == nil {
continue
}
amount, err := fetchRawCreditAmount(v)
if err != nil {
return nil, err
}
details.Debits = append(details.Debits, DebitRecord{
Amount: amount,
Index: uint32(i),
})
}
// Finally, we add the transaction label to details.
details.Label, err = s.TxLabel(ns, *txHash)
if err != nil {
return nil, err
}
return &details, nil
}
// TxLabel looks up a transaction label for the txHash provided. If the store
// has no labels in it, or the specific txHash does not have a label, an empty
// string and no error are returned.
func (s *Store) TxLabel(ns walletdb.ReadBucket, txHash chainhash.Hash) (string,
error) {
label, err := FetchTxLabel(ns, txHash)
switch err {
// If there are no saved labels yet (the bucket has not been created) or
// there is not a label for this particular tx, we ignore the error.
case ErrNoLabelBucket:
fallthrough
case ErrTxLabelNotFound:
return "", nil
// If we found the label, we return it.
case nil:
return label, nil
}
// Otherwise, another error occurred while looking uo the label, so we
// return it.
return "", err
}
// TxDetails looks up all recorded details regarding a transaction with some
// hash. In case of a hash collision, the most recent transaction with a
// matching hash is returned.
//
// Not finding a transaction with this hash is not an error. In this case,
// a nil TxDetails is returned.
func (s *Store) TxDetails(ns walletdb.ReadBucket, txHash *chainhash.Hash) (*TxDetails, error) |
// UniqueTxDetails looks up all recorded details for a transaction recorded
// mined in some particular block, or an unmined transaction if block is nil.
//
// Not finding a transaction with this hash from this block is not an error. In
// this case, a nil TxDetails is returned.
func (s *Store) UniqueTxDetails(ns walletdb.ReadBucket, txHash *chainhash.Hash,
block *Block) (*TxDetails, error) {
if block == nil {
v := existsRawUnmined(ns, txHash[:])
if v == nil {
return nil, nil
}
return s.unminedTxDetails(ns, txHash, v)
}
k, v := existsTxRecord(ns, txHash, block)
if v == nil {
return nil, nil
}
return s.minedTxDetails(ns, txHash, k, v)
}
// rangeUnminedTransactions executes the function f with TxDetails for every
// unmined transaction. f is not executed if no unmined transactions exist.
// Error returns from f (if any) are propigated to the caller. Returns true
// (signaling breaking out of a RangeTransactions) iff f executes and returns
// true.
func (s *Store) rangeUnminedTransactions(ns walletdb.ReadBucket, f func([]TxDetails) (bool, error)) (bool, error) {
var details []TxDetails
err := ns.NestedReadBucket(bucketUnmined).ForEach(func(k, v []byte) error {
if len(k) < 32 {
str := fmt.Sprintf("%s: short key (expected %d "+
"bytes, read %d)", bucketUnmined, 32, len(k))
return storeError(ErrData, str, nil)
}
var txHash chainhash.Hash
copy(txHash[:], k)
detail, err := s.unminedTxDetails(ns, &txHash, v)
if err != nil {
return err
}
// Because the key was created while foreach-ing over the
// bucket, it should be impossible for unminedTxDetails to ever
// successfully return a nil details struct.
details = append(details, *detail)
return nil
})
if err == nil && len(details) > 0 {
return f(details)
}
return false, err
}
// rangeBlockTransactions executes the function f with TxDetails for every block
// between heights begin and end (reverse order when end > begin) until f
// returns true, or the transactions from block is processed. Returns true iff
// f executes and returns true.
func (s *Store) rangeBlockTransactions(ns walletdb.ReadBucket, begin, end int32,
f func([]TxDetails) (bool, error)) (bool, error) {
// Mempool height is considered a high bound.
if begin < 0 {
begin = int32(^uint32(0) >> 1)
}
if end < 0 {
end = int32(^uint32(0) >> 1)
}
var blockIter blockIterator
var advance func(*blockIterator) bool
if begin | {
// First, check whether there exists an unmined transaction with this
// hash. Use it if found.
v := existsRawUnmined(ns, txHash[:])
if v != nil {
return s.unminedTxDetails(ns, txHash, v)
}
// Otherwise, if there exists a mined transaction with this matching
// hash, skip over to the newest and begin fetching all details.
k, v := latestTxRecord(ns, txHash)
if v == nil {
// not found
return nil, nil
}
return s.minedTxDetails(ns, txHash, k, v)
} | identifier_body |
query.go | , they
// must be looked up for each transaction input manually. There are two
// kinds of previous credits that may be debited by an unmined
// transaction: mined unspent outputs (which remain marked unspent even
// when spent by an unmined transaction), and credits from other unmined
// transactions. Both situations must be considered.
for i, output := range details.MsgTx.TxIn {
opKey := canonicalOutPoint(&output.PreviousOutPoint.Hash,
output.PreviousOutPoint.Index)
credKey := existsRawUnspent(ns, opKey)
if credKey != nil {
v := existsRawCredit(ns, credKey)
amount, err := fetchRawCreditAmount(v)
if err != nil {
return nil, err
}
details.Debits = append(details.Debits, DebitRecord{
Amount: amount,
Index: uint32(i),
})
continue
}
v := existsRawUnminedCredit(ns, opKey)
if v == nil {
continue
}
amount, err := fetchRawCreditAmount(v)
if err != nil {
return nil, err
}
details.Debits = append(details.Debits, DebitRecord{
Amount: amount,
Index: uint32(i),
})
}
// Finally, we add the transaction label to details.
details.Label, err = s.TxLabel(ns, *txHash)
if err != nil {
return nil, err
}
return &details, nil
}
// TxLabel looks up a transaction label for the txHash provided. If the store
// has no labels in it, or the specific txHash does not have a label, an empty
// string and no error are returned.
func (s *Store) TxLabel(ns walletdb.ReadBucket, txHash chainhash.Hash) (string,
error) {
label, err := FetchTxLabel(ns, txHash)
switch err {
// If there are no saved labels yet (the bucket has not been created) or
// there is not a label for this particular tx, we ignore the error.
case ErrNoLabelBucket:
fallthrough
case ErrTxLabelNotFound:
return "", nil
// If we found the label, we return it.
case nil:
return label, nil
}
// Otherwise, another error occurred while looking uo the label, so we
// return it.
return "", err
}
// TxDetails looks up all recorded details regarding a transaction with some
// hash. In case of a hash collision, the most recent transaction with a
// matching hash is returned.
//
// Not finding a transaction with this hash is not an error. In this case,
// a nil TxDetails is returned.
func (s *Store) TxDetails(ns walletdb.ReadBucket, txHash *chainhash.Hash) (*TxDetails, error) {
// First, check whether there exists an unmined transaction with this
// hash. Use it if found.
v := existsRawUnmined(ns, txHash[:])
if v != nil {
return s.unminedTxDetails(ns, txHash, v)
}
// Otherwise, if there exists a mined transaction with this matching
// hash, skip over to the newest and begin fetching all details.
k, v := latestTxRecord(ns, txHash)
if v == nil {
// not found
return nil, nil
}
return s.minedTxDetails(ns, txHash, k, v)
}
// UniqueTxDetails looks up all recorded details for a transaction recorded
// mined in some particular block, or an unmined transaction if block is nil.
//
// Not finding a transaction with this hash from this block is not an error. In
// this case, a nil TxDetails is returned.
func (s *Store) UniqueTxDetails(ns walletdb.ReadBucket, txHash *chainhash.Hash,
block *Block) (*TxDetails, error) {
if block == nil {
v := existsRawUnmined(ns, txHash[:])
if v == nil {
return nil, nil
}
return s.unminedTxDetails(ns, txHash, v)
}
k, v := existsTxRecord(ns, txHash, block)
if v == nil {
return nil, nil
}
return s.minedTxDetails(ns, txHash, k, v)
}
// rangeUnminedTransactions executes the function f with TxDetails for every
// unmined transaction. f is not executed if no unmined transactions exist.
// Error returns from f (if any) are propigated to the caller. Returns true
// (signaling breaking out of a RangeTransactions) iff f executes and returns
// true.
func (s *Store) rangeUnminedTransactions(ns walletdb.ReadBucket, f func([]TxDetails) (bool, error)) (bool, error) {
var details []TxDetails
err := ns.NestedReadBucket(bucketUnmined).ForEach(func(k, v []byte) error {
if len(k) < 32 {
str := fmt.Sprintf("%s: short key (expected %d "+
"bytes, read %d)", bucketUnmined, 32, len(k))
return storeError(ErrData, str, nil)
}
var txHash chainhash.Hash
copy(txHash[:], k)
detail, err := s.unminedTxDetails(ns, &txHash, v)
if err != nil {
return err
}
// Because the key was created while foreach-ing over the
// bucket, it should be impossible for unminedTxDetails to ever
// successfully return a nil details struct.
details = append(details, *detail)
return nil
})
if err == nil && len(details) > 0 {
return f(details)
}
return false, err
}
// rangeBlockTransactions executes the function f with TxDetails for every block
// between heights begin and end (reverse order when end > begin) until f
// returns true, or the transactions from block is processed. Returns true iff
// f executes and returns true.
func (s *Store) rangeBlockTransactions(ns walletdb.ReadBucket, begin, end int32,
f func([]TxDetails) (bool, error)) (bool, error) {
// Mempool height is considered a high bound.
if begin < 0 {
begin = int32(^uint32(0) >> 1)
}
if end < 0 {
end = int32(^uint32(0) >> 1)
}
var blockIter blockIterator
var advance func(*blockIterator) bool
if begin < end {
// Iterate in forwards order
blockIter = makeReadBlockIterator(ns, begin)
advance = func(it *blockIterator) bool {
if !it.next() {
return false
}
return it.elem.Height <= end
}
} else {
// Iterate in backwards order, from begin -> end.
blockIter = makeReadBlockIterator(ns, begin)
advance = func(it *blockIterator) bool {
if !it.prev() {
return false
}
return end <= it.elem.Height
}
}
var details []TxDetails
for advance(&blockIter) {
block := &blockIter.elem
if cap(details) < len(block.transactions) {
details = make([]TxDetails, 0, len(block.transactions))
} else {
details = details[:0]
}
for _, txHash := range block.transactions {
k := keyTxRecord(&txHash, &block.Block)
v := existsRawTxRecord(ns, k)
if v == nil {
str := fmt.Sprintf("missing transaction %v for "+
"block %v", txHash, block.Height)
return false, storeError(ErrData, str, nil)
}
detail, err := s.minedTxDetails(ns, &txHash, k, v)
if err != nil {
return false, err
}
details = append(details, *detail)
}
// Every block record must have at least one transaction, so it
// is safe to call f.
brk, err := f(details)
if err != nil || brk {
return brk, err
}
}
return false, blockIter.err
}
// RangeTransactions runs the function f on all transaction details between
// blocks on the best chain over the height range [begin,end]. The special
// height -1 may be used to also include unmined transactions. If the end
// height comes before the begin height, blocks are iterated in reverse order
// and unmined transactions (if any) are processed first.
//
// The function f may return an error which, if non-nil, is propagated to the
// caller. Additionally, a boolean return value allows exiting the function
// early without reading any additional transactions early when true.
//
// All calls to f are guaranteed to be passed a slice with more than zero
// elements. The slice may be reused for multiple blocks, so it is not safe to
// use it after the loop iteration it was acquired.
func (s *Store) | RangeTransactions | identifier_name | |
settings.py | according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
# STATIC_ROOT = '' # ** moved lower in this file
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
# ** moved to configs/env/*
# STATICFILES_DIRS = (
# "/root/static_source",
# )
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY_FN = 'django'
SECRET_KEY = get_key(SECRET_KEY_FN)
# List of callable that know how to import templates from various sources.
# TEMPLATE_LOADERS = (
# 'django.template.loaders.filesystem.Loader',
# 'django.template.loaders.app_directories.Loader',
# )
# AUTH_USER_MODEL = 'breeze.OrderedUser'
# AUTH_USER_MODEL = 'breeze.CustomUser' # FIXME
INSTALLED_APPS = [
'suit',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'bootstrap_toolkit',
'breeze.apps.Config',
'shiny.apps.Config',
'dbviewer.apps.Config',
'compute.apps.Config',
'down.apps.Config',
# 'south',
'gunicorn',
'mathfilters',
# 'django_auth0', # moved to config/auth0.py
'hello_auth.apps.Config',
'api.apps.Config',
'webhooks.apps.Config',
'utilz.apps.Config',
'django_requestlogging',
'django.contrib.admindocs',
'django_extensions'
]
MIDDLEWARE_CLASSES = [
'breeze.middlewares.BreezeAwake',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
# 'django.middleware.doc.XViewMiddleware',
'breeze.middlewares.JobKeeper',
'breeze.middlewares.CheckUserProfile',
'breeze.middlewares.ContextualRequest',
'django_requestlogging.middleware.LogSetupMiddleware',
'breeze.middlewares.DataDog' if ENABLE_DATADOG else 'breeze.middlewares.Empty',
'breeze.middlewares.RemoteFW' if ENABLE_REMOTE_FW else 'breeze.middlewares.Empty',
'rollbar.contrib.django.middleware.RollbarNotifierMiddleware' if ENABLE_ROLLBAR else 'breeze.middlewares.Empty',
]
# ** AUTHENTICATION_BACKENDS moved to specific auth config files (config/env/auth/*)
# ** AUTH0_* moved to config/env/auth/auth0.py
SSH_TUNNEL_HOST = 'breeze-ssh'
SSH_TUNNEL_PORT = '2222'
# SSH_TUNNEL_TEST_URL = 'breeze-ssh'
# ROOT_URLCONF = 'isbio.urls'
APPEND_SLASH = True
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'isbio.wsgi.application'
# provide our profile model
AUTH_PROFILE_MODULE = 'breeze.UserProfile'
# allow on the fly creation of guest user accounts
AUTH_ALLOW_GUEST = False # allow anonymous visitor to login as disposable guests
GUEST_INSTITUTE_ID = 3 # guest institute
GUEST_EXPIRATION_TIME = 24 * 60 # expiration time of inactive guests in minutes
GUEST_FIRST_NAME = 'guest'
GUEST_GROUP_NAME = GUEST_FIRST_NAME.capitalize() + 's'
ALL_GROUP_NAME = 'Registered users'
RESTRICT_GUEST_TO_SPECIFIC_VIEWS = True
DEFAULT_LOGIN_URL = '/login_page'
FORCE_DEFAULT_LOGIN_URL = True
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': USUAL_LOG_FORMAT,
'datefmt': USUAL_DATE_FORMAT,
},
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'default': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filename': LOG_PATH,
'maxBytes': 1024 * 1024 * 5, # 5 MB
'backupCount': 10,
'formatter': 'standard',
},
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
},
'loggers': {
'': {
'handlers': ['default'],
'level': logging.INFO,
'propagate': True
},
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
class DomainList(object):
CLOUD_PROD = ['breeze.fimm.fi', '13.79.158.135', ]
CLOUD_DEV = ['breeze-dev.northeurope.cloudapp.azure.com', '52.164.209.61', ]
FIMM_PH = ['breeze-newph.fimm.fi', 'breeze-ph.fimm.fi', ]
FIMM_DEV = ['breeze-dev.fimm.fi', ]
FIMM_PROD = ['breeze-fimm.fimm.fi', 'breeze-new.fimm.fi', ]
@classmethod
def get_current_domain(cls):
from isbio.config import RUN_ENV_CLASS, ConfigEnvironments, MODE_PROD, DEV_MODE, PHARMA_MODE
if RUN_ENV_CLASS is ConfigEnvironments.AzureCloud:
domain = cls.CLOUD_DEV if DEV_MODE else cls.CLOUD_PROD
elif RUN_ENV_CLASS is ConfigEnvironments.FIMM:
domain = cls.FIMM_PROD if MODE_PROD else cls.FIMM_PH if PHARMA_MODE else cls.FIMM_DEV
return domain[0]
DEBUG = False
VERBOSE = False
SQL_DUMP = False
# APPEND_SLASH = True
ADMINS = (
('Clement FIERE', 'clement.fiere@helsinki.fi'),
)
# root of the Breeze django project folder, includes 'venv', 'static' folder copy, isbio, logs
SOURCE_ROOT = recur(3, os.path.dirname, os.path.realpath(__file__)) + '/'
DJANGO_ROOT = recur(2, os.path.dirname, os.path.realpath(__file__)) + '/'
TEMPLATE_FOLDER = DJANGO_ROOT + 'templates/' # source templates (not HTML ones)
DJANGO_AUTH_MODEL_BACKEND_PY_PATH = 'django.contrib.auth.backends.ModelBackend'
# CAS_NG_BACKEND_PY_PATH = 'my_django.cas_ng_custom.CASBackend'
AUTH0_BACKEND_PY_PATH = 'django_auth0.auth_backend.Auth0Backend'
AUTH0_CUSTOM_BACKEND_PY_PATH = 'custom_auth0.auth_backend.Auth0Backend'
os.environ['MAIL'] = '/var/mail/dbychkov' # FIXME obsolete
CONSOLE_DATE_F = "%d/%b/%Y %H:%M:%S"
# auto-sensing if running on dev or prod, for dynamic environment configuration
# FIXME broken in docker container
FULL_HOST_NAME = socket.gethostname()
HOST_NAME = str.split(FULL_HOST_NAME, '.')[0]
# do not move. here because some utils function useses it
FIMM_NETWORK = '128.214.0.0/16'
from config import *
# Super User on breeze can Access all data
SU_ACCESS_OVERRIDE = True
PROJECT_PATH = PROJECT_FOLDER + BREEZE_FOLDER
if not os.path.isdir(PROJECT_PATH):
PROJECT_FOLDER = '/%s/' % PROJECT_FOLDER_NAME
PROD_PATH = '%s%s' % (PROJECT_FOLDER, BREEZE_FOLDER)
R_ENGINE_SUB_PATH = 'R/bin/R ' # FIXME LEGACY ONLY
R_ENGINE_PATH = PROD_PATH + R_ENGINE_SUB_PATH
if not os.path.isfile( R_ENGINE_PATH.strip()):
| PROJECT_FOLDER = '/%s/' % PROJECT_FOLDER_NAME
R_ENGINE_PATH = PROD_PATH + R_ENGINE_SUB_PATH # FIXME Legacy | conditional_block | |
settings.py | container
FULL_HOST_NAME = socket.gethostname()
HOST_NAME = str.split(FULL_HOST_NAME, '.')[0]
# do not move. here because some utils function useses it
FIMM_NETWORK = '128.214.0.0/16'
from config import *
# Super User on breeze can Access all data
SU_ACCESS_OVERRIDE = True
PROJECT_PATH = PROJECT_FOLDER + BREEZE_FOLDER
if not os.path.isdir(PROJECT_PATH):
PROJECT_FOLDER = '/%s/' % PROJECT_FOLDER_NAME
PROD_PATH = '%s%s' % (PROJECT_FOLDER, BREEZE_FOLDER)
R_ENGINE_SUB_PATH = 'R/bin/R ' # FIXME LEGACY ONLY
R_ENGINE_PATH = PROD_PATH + R_ENGINE_SUB_PATH
if not os.path.isfile( R_ENGINE_PATH.strip()):
PROJECT_FOLDER = '/%s/' % PROJECT_FOLDER_NAME
R_ENGINE_PATH = PROD_PATH + R_ENGINE_SUB_PATH # FIXME Legacy
PROJECT_FHRB_PM_PATH = '/%s/fhrb_pm/' % PROJECT_FOLDER_NAME
JDBC_BRIDGE_PATH = PROJECT_FHRB_PM_PATH + 'bin/start-jdbc-bridge' # Every other path has a trailing /
TEMP_FOLDER = SOURCE_ROOT + 'tmp/'
####
# 'db' folder, containing : reports, scripts, jobs, datasets, pipelines, upload_temp
####
DATA_TEMPLATES_FN = 'mould/'
RE_RUN_SH = SOURCE_ROOT + 're_run.sh'
MEDIA_ROOT = PROJECT_PATH + 'db/'
RORA_LIB = PROJECT_PATH + 'RORALib/'
UPLOAD_FOLDER = MEDIA_ROOT + 'upload_temp/'
DATASETS_FOLDER = MEDIA_ROOT + 'datasets/'
STATIC_ROOT = SOURCE_ROOT + 'static_source/' # static files for the website
DJANGO_CONFIG_FOLDER = SOURCE_ROOT + 'config/' # Where to store secrets and deployment conf
MOULD_FOLDER = MEDIA_ROOT + DATA_TEMPLATES_FN
NO_TAG_XML = TEMPLATE_FOLDER + 'notag.xml'
SH_LOG_FOLDER = '.log'
GENERAL_SH_BASE_NAME = 'run_job'
GENERAL_SH_NAME = '%s.sh' % GENERAL_SH_BASE_NAME
GENERAL_SH_CONF_NAME = '%s_conf.sh' % GENERAL_SH_BASE_NAME
DOCKER_SH_NAME = 'run.sh'
REPORTS_CACHE_INTERNAL_URL = '/cached/reports/'
INCOMPLETE_RUN_FN = '.INCOMPLETE_RUN'
FAILED_FN = '.failed'
SUCCESS_FN = '.done'
R_DONE_FN = '.sub_done'
# ** moved to config/execution/sge.py
# SGE_QUEUE_NAME = 'breeze.q' # monitoring only
# ** moved to config/env/azure_cloud.py
# DOCKER_HUB_PASS_FILE = SOURCE_ROOT + 'docker_repo'
# AZURE_PASS_FILE = SOURCE_ROOT + 'azure_pwd' # moved to config/env/azure_cloud.py
#
# ComputeTarget configs
#
# TODO config
# 13/05/2016
CONFIG_FN = 'configs/'
CONFIG_PATH = MEDIA_ROOT + CONFIG_FN
# 19/04/2016
TARGET_CONFIG_FN = 'target/'
TARGET_CONFIG_PATH = CONFIG_PATH + TARGET_CONFIG_FN
# 08/06/2016
DEFAULT_TARGET_ID = BREEZE_TARGET_ID
# 13/05/2016
EXEC_CONFIG_FN = 'exec/'
EXEC_CONFIG_PATH = CONFIG_PATH + EXEC_CONFIG_FN
# 13/05/2016
ENGINE_CONFIG_FN = 'engine/'
ENGINE_CONFIG_PATH = CONFIG_PATH + ENGINE_CONFIG_FN
# 23/05/2016
SWAP_FN = 'swap/'
SWAP_PATH = MEDIA_ROOT + SWAP_FN
# 21/02/2017
SHINY_SECRET_KEY_FN = 'shiny'
SHINY_SECRET = get_key(SHINY_SECRET_KEY_FN) # Warning : shiny_secret must be at least 32 char long.
ENC_SESSION_ID_COOKIE_NAME = get_md5('seed')
##
# Report config
##
BOOTSTRAP_SH_TEMPLATE = TEMPLATE_FOLDER + GENERAL_SH_NAME
BOOTSTRAP_SH_CONF_TEMPLATE = TEMPLATE_FOLDER + GENERAL_SH_CONF_NAME
DOCKER_BOOTSTRAP_SH_TEMPLATE = TEMPLATE_FOLDER + DOCKER_SH_NAME
NOZZLE_TEMPLATE_FOLDER = TEMPLATE_FOLDER + 'nozzle_templates/'
TAGS_TEMPLATE_PATH = NOZZLE_TEMPLATE_FOLDER + 'tag.R'
NOZZLE_REPORT_TEMPLATE_PATH = NOZZLE_TEMPLATE_FOLDER + 'report.R'
NOZZLE_REPORT_FN = 'report'
RSCRIPTS_FN = 'scripts/'
RSCRIPTS_PATH = MEDIA_ROOT + RSCRIPTS_FN
REPORT_TYPE_FN = 'pipelines/'
REPORT_TYPE_PATH = MEDIA_ROOT + REPORT_TYPE_FN
REPORTS_FN = 'reports/'
REPORTS_PATH = '%s%s' % (MEDIA_ROOT, REPORTS_FN)
REPORTS_SH = GENERAL_SH_NAME
REPORTS_FM_FN = 'transfer_to_fm.txt'
R_FILE_NAME_BASE = 'script'
R_FILE_NAME = R_FILE_NAME_BASE + '.r'
R_OUT_EXT = '.Rout'
##
# Jobs configs
##
SCRIPT_CODE_HEADER_FN = 'header.R'
SCRIPT_HEADER_DEF_CONTENT = '# write your header here...'
SCRIPT_CODE_BODY_FN = 'body.R'
SCRIPT_BODY_DEF_CONTENT = '# copy and paste main code here...'
SCRIPT_FORM_FN = 'form.xml'
SCRIPT_TEMPLATE_FOLDER = TEMPLATE_FOLDER + 'script_templates/'
SCRIPT_TEMPLATE_PATH = SCRIPT_TEMPLATE_FOLDER + 'script.R'
JOBS_FN = 'jobs/'
JOBS_PATH = '%s%s' % (MEDIA_ROOT, JOBS_FN)
JOBS_SH = '_config.sh'
#
# WATCHER RELATED CONFIG
#
# FIXME make this target_config specific
WATCHER_DB_REFRESH = 2 # number of seconds to wait before refreshing reports from DB
WATCHER_PROC_REFRESH = 2 # number of seconds to wait before refreshing processes
#
# SHINY RELATED CONFIG
#
from shiny.settings import * # FIXME obsolete
FOLDERS_LST = [TEMPLATE_FOLDER, SHINY_REPORT_TEMPLATE_PATH, SHINY_REPORTS, SHINY_TAGS,
NOZZLE_TEMPLATE_FOLDER, SCRIPT_TEMPLATE_FOLDER, JOBS_PATH, REPORT_TYPE_PATH, REPORTS_PATH, RSCRIPTS_PATH, MEDIA_ROOT,
STATIC_ROOT, TARGET_CONFIG_PATH, EXEC_CONFIG_PATH, ENGINE_CONFIG_PATH]
##
# System Autocheck config
##
# this is used to avoid 504 Gateway time-out from ngnix with is currently set to 600 sec = 10 min
# LONG_POLL_TIME_OUT_REFRESH = 540 # 9 minutes
# set to 50 sec to avoid time-out on breeze.fimm.fi
LONG_POLL_TIME_OUT_REFRESH = 50 # FIXME obsolete
# SGE_MASTER_FILE = '/var/lib/gridengine/default/common/act_qmaster' # FIXME obsolete
# SGE_MASTER_IP = '192.168.67.2' # FIXME obsolete
# DOTM_SERVER_IP = '128.214.64.5' # FIXME obsolete
# RORA_SERVER_IP = '192.168.0.219' # FIXME obsolete
# FILE_SERVER_IP = '192.168.0.107' # FIXME obsolete
SPECIAL_CODE_FOLDER = PROJECT_PATH + 'code/'
FS_SIG_FILE = PROJECT_PATH + 'fs_sig.md5'
FS_LIST_FILE = PROJECT_PATH + 'fs_checksums.json'
FOLDERS_TO_CHECK = [TEMPLATE_FOLDER, SHINY_TAGS, REPORT_TYPE_PATH, # SHINY_REPORTS,SPECIAL_CODE_FOLDER ,
RSCRIPTS_PATH, MOULD_FOLDER, STATIC_ROOT, DATASETS_FOLDER]
# STATIC URL MAPPINGS
# STATIC_URL = '/static/'
# MEDIA_URL = '/media/'
MOULD_URL = MEDIA_URL + DATA_TEMPLATES_FN
# number of seconds after witch a job that has not received a sgeid should be marked as aborted or re-run
NO_SGEID_EXPIRY = 30
# FIXME obsolete
TMP_CSC_TAITO_MOUNT = '/mnt/csc-taito/'
TMP_CSC_TAITO_REPORT_PATH = 'breeze/'
TMP_CSC_TAITO_REMOTE_CHROOT = '/homeappl/home/clement/'
# mail config
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = 'breeze.fimm@gmail.com'
EMAIL_HOST_PASSWORD = get_key('gmail')
EMAIL_PORT = '587'
# EMAIL_SUBJECT_PREFIX = '[' + FULL_HOST_NAME + '] '
EMAIL_SUBJECT_PREFIX = '[' + BREEZE_TITLE + '] '
EMAIL_USE_TLS = True
EMAIL_SENDER = 'Breeze PMS'
#
# END OF CONFIG
# RUN-MODE SPECIFICS FOLLOWING
# ** NO CONFIGURATION CONST BEYOND THIS POINT **
#
# ** moved to config/env/*
# if prod mode then auto disable DEBUG, for safety
# if MODE_PROD or PHARMA_MODE:
# SHINY_MODE = 'remote'
# SHINY_LOCAL_ENABLE = False
# DEBUG = False
# VERBOSE = False
# ** DEV logging config moved to config/env/dev.py
# FIXME obsolete
if ENABLE_ROLLBAR:
try:
import rollbar
BASE_DIR = SOURCE_ROOT
ROLLBAR = {
'access_token': '00f2bf2c84ce40aa96842622c6ffe97d',
'environment': 'development' if DEBUG else 'production',
'root': BASE_DIR,
}
rollbar.init(**ROLLBAR)
except Exception:
ENABLE_ROLLBAR = False
logging.getLogger().error('Unable to init rollbar')
pass
def make_run_file():
| f = open('running', 'w+')
f.write(str(datetime.now().strftime(USUAL_DATE_FORMAT)))
f.close() | identifier_body | |
settings.py | set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
# STATIC_ROOT = '' # ** moved lower in this file
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
# ** moved to configs/env/*
# STATICFILES_DIRS = (
# "/root/static_source",
# )
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY_FN = 'django'
SECRET_KEY = get_key(SECRET_KEY_FN)
# List of callable that know how to import templates from various sources.
# TEMPLATE_LOADERS = (
# 'django.template.loaders.filesystem.Loader',
# 'django.template.loaders.app_directories.Loader',
# )
# AUTH_USER_MODEL = 'breeze.OrderedUser'
# AUTH_USER_MODEL = 'breeze.CustomUser' # FIXME
INSTALLED_APPS = [
'suit',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'bootstrap_toolkit',
'breeze.apps.Config',
'shiny.apps.Config',
'dbviewer.apps.Config',
'compute.apps.Config',
'down.apps.Config',
# 'south',
'gunicorn',
'mathfilters',
# 'django_auth0', # moved to config/auth0.py
'hello_auth.apps.Config',
'api.apps.Config',
'webhooks.apps.Config',
'utilz.apps.Config',
'django_requestlogging',
'django.contrib.admindocs',
'django_extensions'
]
MIDDLEWARE_CLASSES = [
'breeze.middlewares.BreezeAwake',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
# 'django.middleware.doc.XViewMiddleware',
'breeze.middlewares.JobKeeper',
'breeze.middlewares.CheckUserProfile',
'breeze.middlewares.ContextualRequest',
'django_requestlogging.middleware.LogSetupMiddleware',
'breeze.middlewares.DataDog' if ENABLE_DATADOG else 'breeze.middlewares.Empty',
'breeze.middlewares.RemoteFW' if ENABLE_REMOTE_FW else 'breeze.middlewares.Empty',
'rollbar.contrib.django.middleware.RollbarNotifierMiddleware' if ENABLE_ROLLBAR else 'breeze.middlewares.Empty',
]
# ** AUTHENTICATION_BACKENDS moved to specific auth config files (config/env/auth/*)
# ** AUTH0_* moved to config/env/auth/auth0.py
SSH_TUNNEL_HOST = 'breeze-ssh'
SSH_TUNNEL_PORT = '2222'
# SSH_TUNNEL_TEST_URL = 'breeze-ssh'
# ROOT_URLCONF = 'isbio.urls'
APPEND_SLASH = True
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'isbio.wsgi.application'
# provide our profile model
AUTH_PROFILE_MODULE = 'breeze.UserProfile'
# allow on the fly creation of guest user accounts
AUTH_ALLOW_GUEST = False # allow anonymous visitor to login as disposable guests
GUEST_INSTITUTE_ID = 3 # guest institute
GUEST_EXPIRATION_TIME = 24 * 60 # expiration time of inactive guests in minutes
GUEST_FIRST_NAME = 'guest'
GUEST_GROUP_NAME = GUEST_FIRST_NAME.capitalize() + 's'
ALL_GROUP_NAME = 'Registered users'
RESTRICT_GUEST_TO_SPECIFIC_VIEWS = True
DEFAULT_LOGIN_URL = '/login_page'
FORCE_DEFAULT_LOGIN_URL = True
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': USUAL_LOG_FORMAT,
'datefmt': USUAL_DATE_FORMAT,
},
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'default': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filename': LOG_PATH,
'maxBytes': 1024 * 1024 * 5, # 5 MB
'backupCount': 10,
'formatter': 'standard',
},
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
},
'loggers': {
'': {
'handlers': ['default'],
'level': logging.INFO,
'propagate': True
},
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
| CLOUD_PROD = ['breeze.fimm.fi', '13.79.158.135', ]
CLOUD_DEV = ['breeze-dev.northeurope.cloudapp.azure.com', '52.164.209.61', ]
FIMM_PH = ['breeze-newph.fimm.fi', 'breeze-ph.fimm.fi', ]
FIMM_DEV = ['breeze-dev.fimm.fi', ]
FIMM_PROD = ['breeze-fimm.fimm.fi', 'breeze-new.fimm.fi', ]
@classmethod
def get_current_domain(cls):
from isbio.config import RUN_ENV_CLASS, ConfigEnvironments, MODE_PROD, DEV_MODE, PHARMA_MODE
if RUN_ENV_CLASS is ConfigEnvironments.AzureCloud:
domain = cls.CLOUD_DEV if DEV_MODE else cls.CLOUD_PROD
elif RUN_ENV_CLASS is ConfigEnvironments.FIMM:
domain = cls.FIMM_PROD if MODE_PROD else cls.FIMM_PH if PHARMA_MODE else cls.FIMM_DEV
return domain[0]
DEBUG = False
VERBOSE = False
SQL_DUMP = False
# APPEND_SLASH = True
ADMINS = (
('Clement FIERE', 'clement.fiere@helsinki.fi'),
)
# root of the Breeze django project folder, includes 'venv', 'static' folder copy, isbio, logs
SOURCE_ROOT = recur(3, os.path.dirname, os.path.realpath(__file__)) + '/'
DJANGO_ROOT = recur(2, os.path.dirname, os.path.realpath(__file__)) + '/'
TEMPLATE_FOLDER = DJANGO_ROOT + 'templates/' # source templates (not HTML ones)
DJANGO_AUTH_MODEL_BACKEND_PY_PATH = 'django.contrib.auth.backends.ModelBackend'
# CAS_NG_BACKEND_PY_PATH = 'my_django.cas_ng_custom.CASBackend'
AUTH0_BACKEND_PY_PATH = 'django_auth0.auth_backend.Auth0Backend'
AUTH0_CUSTOM_BACKEND_PY_PATH = 'custom_auth0.auth_backend.Auth0Backend'
os.environ['MAIL'] = '/var/mail/dbychkov' # FIXME obsolete
CONSOLE_DATE_F = "%d/%b/%Y %H:%M:%S"
# auto-sensing if running on dev or prod, for dynamic environment configuration
# FIXME broken in docker container
FULL_HOST_NAME = socket.gethostname()
HOST_NAME = str.split(FULL_HOST_NAME, '.')[0]
# do not move. here because some utils function useses it
FIMM_NETWORK = '128.214.0.0/16'
from config import *
# Super User on breeze can Access all data
SU_ACCESS_OVERRIDE = True
PROJECT_PATH = PROJECT_FOLDER + BREEZE_FOLDER
if not os.path.isdir(PROJECT_PATH):
PROJECT_FOLDER = '/%s/' % PROJECT_FOLDER_NAME
PROD_PATH = '%s%s' % (PROJECT_FOLDER, BREEZE_FOLDER)
R_ENGINE_SUB_PATH = 'R/bin/R ' # FIXME LEGACY ONLY
R_ENGINE_PATH | }
}
class DomainList(object): | random_line_split |
settings.py | _SH = SOURCE_ROOT + 're_run.sh'
MEDIA_ROOT = PROJECT_PATH + 'db/'
RORA_LIB = PROJECT_PATH + 'RORALib/'
UPLOAD_FOLDER = MEDIA_ROOT + 'upload_temp/'
DATASETS_FOLDER = MEDIA_ROOT + 'datasets/'
STATIC_ROOT = SOURCE_ROOT + 'static_source/' # static files for the website
DJANGO_CONFIG_FOLDER = SOURCE_ROOT + 'config/' # Where to store secrets and deployment conf
MOULD_FOLDER = MEDIA_ROOT + DATA_TEMPLATES_FN
NO_TAG_XML = TEMPLATE_FOLDER + 'notag.xml'
SH_LOG_FOLDER = '.log'
GENERAL_SH_BASE_NAME = 'run_job'
GENERAL_SH_NAME = '%s.sh' % GENERAL_SH_BASE_NAME
GENERAL_SH_CONF_NAME = '%s_conf.sh' % GENERAL_SH_BASE_NAME
DOCKER_SH_NAME = 'run.sh'
REPORTS_CACHE_INTERNAL_URL = '/cached/reports/'
INCOMPLETE_RUN_FN = '.INCOMPLETE_RUN'
FAILED_FN = '.failed'
SUCCESS_FN = '.done'
R_DONE_FN = '.sub_done'
# ** moved to config/execution/sge.py
# SGE_QUEUE_NAME = 'breeze.q' # monitoring only
# ** moved to config/env/azure_cloud.py
# DOCKER_HUB_PASS_FILE = SOURCE_ROOT + 'docker_repo'
# AZURE_PASS_FILE = SOURCE_ROOT + 'azure_pwd' # moved to config/env/azure_cloud.py
#
# ComputeTarget configs
#
# TODO config
# 13/05/2016
CONFIG_FN = 'configs/'
CONFIG_PATH = MEDIA_ROOT + CONFIG_FN
# 19/04/2016
TARGET_CONFIG_FN = 'target/'
TARGET_CONFIG_PATH = CONFIG_PATH + TARGET_CONFIG_FN
# 08/06/2016
DEFAULT_TARGET_ID = BREEZE_TARGET_ID
# 13/05/2016
EXEC_CONFIG_FN = 'exec/'
EXEC_CONFIG_PATH = CONFIG_PATH + EXEC_CONFIG_FN
# 13/05/2016
ENGINE_CONFIG_FN = 'engine/'
ENGINE_CONFIG_PATH = CONFIG_PATH + ENGINE_CONFIG_FN
# 23/05/2016
SWAP_FN = 'swap/'
SWAP_PATH = MEDIA_ROOT + SWAP_FN
# 21/02/2017
SHINY_SECRET_KEY_FN = 'shiny'
SHINY_SECRET = get_key(SHINY_SECRET_KEY_FN) # Warning : shiny_secret must be at least 32 char long.
ENC_SESSION_ID_COOKIE_NAME = get_md5('seed')
##
# Report config
##
BOOTSTRAP_SH_TEMPLATE = TEMPLATE_FOLDER + GENERAL_SH_NAME
BOOTSTRAP_SH_CONF_TEMPLATE = TEMPLATE_FOLDER + GENERAL_SH_CONF_NAME
DOCKER_BOOTSTRAP_SH_TEMPLATE = TEMPLATE_FOLDER + DOCKER_SH_NAME
NOZZLE_TEMPLATE_FOLDER = TEMPLATE_FOLDER + 'nozzle_templates/'
TAGS_TEMPLATE_PATH = NOZZLE_TEMPLATE_FOLDER + 'tag.R'
NOZZLE_REPORT_TEMPLATE_PATH = NOZZLE_TEMPLATE_FOLDER + 'report.R'
NOZZLE_REPORT_FN = 'report'
RSCRIPTS_FN = 'scripts/'
RSCRIPTS_PATH = MEDIA_ROOT + RSCRIPTS_FN
REPORT_TYPE_FN = 'pipelines/'
REPORT_TYPE_PATH = MEDIA_ROOT + REPORT_TYPE_FN
REPORTS_FN = 'reports/'
REPORTS_PATH = '%s%s' % (MEDIA_ROOT, REPORTS_FN)
REPORTS_SH = GENERAL_SH_NAME
REPORTS_FM_FN = 'transfer_to_fm.txt'
R_FILE_NAME_BASE = 'script'
R_FILE_NAME = R_FILE_NAME_BASE + '.r'
R_OUT_EXT = '.Rout'
##
# Jobs configs
##
SCRIPT_CODE_HEADER_FN = 'header.R'
SCRIPT_HEADER_DEF_CONTENT = '# write your header here...'
SCRIPT_CODE_BODY_FN = 'body.R'
SCRIPT_BODY_DEF_CONTENT = '# copy and paste main code here...'
SCRIPT_FORM_FN = 'form.xml'
SCRIPT_TEMPLATE_FOLDER = TEMPLATE_FOLDER + 'script_templates/'
SCRIPT_TEMPLATE_PATH = SCRIPT_TEMPLATE_FOLDER + 'script.R'
JOBS_FN = 'jobs/'
JOBS_PATH = '%s%s' % (MEDIA_ROOT, JOBS_FN)
JOBS_SH = '_config.sh'
#
# WATCHER RELATED CONFIG
#
# FIXME make this target_config specific
WATCHER_DB_REFRESH = 2 # number of seconds to wait before refreshing reports from DB
WATCHER_PROC_REFRESH = 2 # number of seconds to wait before refreshing processes
#
# SHINY RELATED CONFIG
#
from shiny.settings import * # FIXME obsolete
FOLDERS_LST = [TEMPLATE_FOLDER, SHINY_REPORT_TEMPLATE_PATH, SHINY_REPORTS, SHINY_TAGS,
NOZZLE_TEMPLATE_FOLDER, SCRIPT_TEMPLATE_FOLDER, JOBS_PATH, REPORT_TYPE_PATH, REPORTS_PATH, RSCRIPTS_PATH, MEDIA_ROOT,
STATIC_ROOT, TARGET_CONFIG_PATH, EXEC_CONFIG_PATH, ENGINE_CONFIG_PATH]
##
# System Autocheck config
##
# this is used to avoid 504 Gateway time-out from ngnix with is currently set to 600 sec = 10 min
# LONG_POLL_TIME_OUT_REFRESH = 540 # 9 minutes
# set to 50 sec to avoid time-out on breeze.fimm.fi
LONG_POLL_TIME_OUT_REFRESH = 50 # FIXME obsolete
# SGE_MASTER_FILE = '/var/lib/gridengine/default/common/act_qmaster' # FIXME obsolete
# SGE_MASTER_IP = '192.168.67.2' # FIXME obsolete
# DOTM_SERVER_IP = '128.214.64.5' # FIXME obsolete
# RORA_SERVER_IP = '192.168.0.219' # FIXME obsolete
# FILE_SERVER_IP = '192.168.0.107' # FIXME obsolete
SPECIAL_CODE_FOLDER = PROJECT_PATH + 'code/'
FS_SIG_FILE = PROJECT_PATH + 'fs_sig.md5'
FS_LIST_FILE = PROJECT_PATH + 'fs_checksums.json'
FOLDERS_TO_CHECK = [TEMPLATE_FOLDER, SHINY_TAGS, REPORT_TYPE_PATH, # SHINY_REPORTS,SPECIAL_CODE_FOLDER ,
RSCRIPTS_PATH, MOULD_FOLDER, STATIC_ROOT, DATASETS_FOLDER]
# STATIC URL MAPPINGS
# STATIC_URL = '/static/'
# MEDIA_URL = '/media/'
MOULD_URL = MEDIA_URL + DATA_TEMPLATES_FN
# number of seconds after witch a job that has not received a sgeid should be marked as aborted or re-run
NO_SGEID_EXPIRY = 30
# FIXME obsolete
TMP_CSC_TAITO_MOUNT = '/mnt/csc-taito/'
TMP_CSC_TAITO_REPORT_PATH = 'breeze/'
TMP_CSC_TAITO_REMOTE_CHROOT = '/homeappl/home/clement/'
# mail config
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = 'breeze.fimm@gmail.com'
EMAIL_HOST_PASSWORD = get_key('gmail')
EMAIL_PORT = '587'
# EMAIL_SUBJECT_PREFIX = '[' + FULL_HOST_NAME + '] '
EMAIL_SUBJECT_PREFIX = '[' + BREEZE_TITLE + '] '
EMAIL_USE_TLS = True
EMAIL_SENDER = 'Breeze PMS'
#
# END OF CONFIG
# RUN-MODE SPECIFICS FOLLOWING
# ** NO CONFIGURATION CONST BEYOND THIS POINT **
#
# ** moved to config/env/*
# if prod mode then auto disable DEBUG, for safety
# if MODE_PROD or PHARMA_MODE:
# SHINY_MODE = 'remote'
# SHINY_LOCAL_ENABLE = False
# DEBUG = False
# VERBOSE = False
# ** DEV logging config moved to config/env/dev.py
# FIXME obsolete
if ENABLE_ROLLBAR:
try:
import rollbar
BASE_DIR = SOURCE_ROOT
ROLLBAR = {
'access_token': '00f2bf2c84ce40aa96842622c6ffe97d',
'environment': 'development' if DEBUG else 'production',
'root': BASE_DIR,
}
rollbar.init(**ROLLBAR)
except Exception:
ENABLE_ROLLBAR = False
logging.getLogger().error('Unable to init rollbar')
pass
def make_run_file():
f = open('running', 'w+')
f.write(str(datetime.now().strftime(USUAL_DATE_FORMAT)))
f.close()
# FIXME obsolete
if os.path.isfile('running'):
# First time
print '__breeze__started__'
logging.info('__breeze__started__')
os.remove('running')
else:
make_run_file()
# Second time
time.sleep(1)
print '__breeze__load/reload__'
logging.info('__breeze__load/reload__')
print 'source home : ' + SOURCE_ROOT
logging.debug('source home : ' + SOURCE_ROOT)
print 'project home : ' + PROJECT_PATH
logging.debug('project home : ' + PROJECT_PATH)
print 'Logging on %s\nSettings loaded. Running branch %s, mode %s on %s' % \
(TermColoring.bold(LOG_PATH), TermColoring.ok_blue(git.get_branch_from_fs(SOURCE_ROOT)), TermColoring.ok_blue(
TermColoring.bold(RUN_MODE)), TermColoring.ok_blue(FULL_HOST_NAME))
git_stat = git.get_status()
print git_stat
logging.info('Settings loaded. Running %s on %s' % (RUN_MODE, FULL_HOST_NAME))
logging.info(git_stat)
from api import code_v1
code_v1.do_self_git_pull()
if PHARMA_MODE:
print TermColoring.bold('RUNNING WITH PHARMA')
print('debug mode is %s' % ('ON' if DEBUG else 'OFF'))
# FIXME obsolete
def | project_folder_path | identifier_name | |
sign-up.js | `, {
email: email,
})
.then((res) => {
setVerifyCode(res.data.data.emailcode);
})
.catch((err) => {});
// 오류는 중복되거나 또는 서버 오류
setIsSend(true);
};
const verify = (code) => {
axios
.post(
`${process.env.REACT_APP_SERVER_URL}/sign/email-verification?code=${verifyCode}`,
{
emailCode: code,
}
)
.then((res) => {
setIsVerify(true);
})
.catch((err) => {});
};
const onCreate = async (data) => {
// axios 요청 성공 시2
await axios
.post(`${process.env.REACT_APP_SERVER_URL}/sign/signup`, {
nickname: data.nickName,
email: data.email,
password: data.password,
})
.then((res) => {});
await axios
.post(`${process.env.REACT_APP_SERVER_URL}/sign/signin`, {
email: data.email,
password: data.password,
})
.then((res) => {
dispatch(
userLogin({
isLogin: true,
email: user.email,
nickName: res.data.nickname,
accessToken: res.data.accessToken,
profileblob: res.data.profileblob,
})
);
})
.catch((err) => {
});
};
const handleChange = (e) => {
setUser({
...user,
[e.target.name]: e.target.value,
});
};
const handleSubmit = (e) => {
e.preventDefault();
if (
chkPW(user.password) === "통과" &&
isVerify === true &&
user.nickName !== undefined
) {
let code = onCreate(user);
if (code === "409") {
setMessage("이미 등록되어 있는 이메일입니다");
}
if (code === "500") {
setMessage("서버 오류 입니다");
} else {
setUser({
email: "",
verifyEmail: "",
password: "",
rePassword: "",
nickName: "",
});
setIsVerify(false);
props.exit();
}
} else {
setMessage("비번이나 이메일 인증을 확인해주세요");
}
};
function chkPW(pw) {
let num = pw.search(/[0-9]/g);
let eng = pw.search(/[a-z]/gi);
let spe = pw.search(/[`~!@@#$%^&*|₩₩₩'₩";:₩/?]/gi);
if (user.password) {
if (pw.length < 8 || pw.length > 20) {
return "8자리 ~ 20자리 이내로 입력해주세요.";
} else if (pw.search(/\s/) != -1) {
return "비밀번호는 공백 없이 입력해주세요.";
} else if (num < 0 || eng < 0 || spe < 0) {
return "영문,숫자, 특수문자를 혼합하여 입력해주세요.";
} else {
return "통과";
}
}
}
return (
<Temp>
<Exit onClick={props.exit}>
<FontAwesomeIcon color={"white"} icon={faTimes} />
</Exit>
<SignUpForm ref={size} onSubmit={handleSubmit}>
<Logo>
<img src="/image/logo.svg" />
</Logo>
<InputBox>
<EmailIcon>
<Align>
<EmailInput
name="email"
value={user.email}
placeholder="email"
onChange={handleChange}
/>
<EmailButton type="button" onClick={() => send(user.email)}>
인증
</EmailButton>
</Align>
<Message>
{isSend && !isVerify ? (
<div style={{ color: `${theme.colors.green}` }}>
이메일 인증 코드가 발송되었습니다.
</div>
) : (
""
)}
</Message>
</EmailIcon>
<EmailIcon>
<Align>
<EmailInput
name="verifyEmail"
value={user.verifyEmail}
placeholder="email code"
onChange={handleChange}
/>
<VerifyButton
type="button"
onClick={() => verify(user.verifyEmail)}
>
확인
</VerifyButton>
</Align>
<Message> | ) : isSend && user.verifyEmail ? (
"인증 되지 않았습니다"
) : (
""
)}
</Message>
</EmailIcon>
<Repassword>
<RepasswordInput
name="password"
value={user.password}
type="password"
placeholder="password"
onChange={handleChange}
/>
<Message>
{chkPW(user.password) === "통과" ? (
<div style={{ color: `${theme.colors.green}` }}>통과</div>
) : (
chkPW(user.password)
)}
</Message>
</Repassword>
<Repassword>
<RepasswordInput
name="rePassword"
type="password"
value={user.repassword}
placeholder="password confirm"
onChange={handleChange}
/>
<Message>
{user.repassword !== "" && user.password ? (
user.password === user.rePassword ? (
<div style={{ color: `${theme.colors.green}` }}>
비밀번호가 일치합니다.
</div>
) : (
"비밀번호가 일치 하지 않습니다"
)
) : (
""
)}
</Message>
</Repassword>
<Message>{message}</Message>
<LoginInput
name="nickName"
value={user.nickName}
placeholder="nickname"
onChange={handleChange}
/>
</InputBox>
<SignupButton type="submit">회원가입</SignupButton>
</SignUpForm>
</Temp>
);
}
const Temp = styled.div`
width: 100vw;
max-width: 100%;
height: calc(100vh - 3.45rem);
max-height: calc(100vh - 3.45rem);
background-color: rgba(0, 0, 0, 0.4);
z-index: 3;
display: flex;
justify-content: center;
align-items: center;
position: absolute;
`;
const Exit = styled.div`
position: absolute;
top: 1rem;
right: 1rem;
font-size: 2rem;
`;
const SignUpForm = styled.form`
display: flex;
flex-direction: column;
align-items: center;
transform: scale(0);
width: 26.25rem;
height: 35.563rem;
background-color: white;
border-radius: 1rem;
padding: 0 3.688rem;
`;
const Logo = styled.div`
display: flex;
justify-content: center;
align-items: center;
width: 100%;
height: 2.188rem;
margin-top: 1rem;
> img {
width: 100%;
height: 100%;
}
`;
const InputBox = styled.div`
margin-top: 2.8rem;
width: 100%;
height: 20rem;
display: flex;
flex-direction: column;
align-items: center;
`;
const Repassword = styled.div`
width: 100%;
display: flex;
flex-direction: column;
height: 4.375rem;
align-items: center;
`;
const RepasswordInput = styled.input`
width: 100%;
height: 3.063rem;
border-radius: 0.5rem;
border: 0.5px solid #bbbbbb;
text-align: left;
text-indent: 1rem;
background-image: url("/image/lock.svg");
background-repeat: no-repeat;
background-position: 96% 50%;
background-size: 25px;
font-size: ${theme.fonts.size.base};
color: ${theme.colors.darkgrey};
::placeholder {
color: #989898;
}
:focus {
outline: none;
}
`;
const EmailIcon = styled.div`
display: flex;
flex-direction: column;
justify-content: space-between;
width: 100%;
height: 4.375rem;
`;
const Align = styled.div`
display: flex;
gap: 0.625rem;
`;
const EmailInput = styled.input`
width: 100%;
height: 3.063rem;
border-radius: 0.5rem;
border: 0.5px solid #bbbbbb;
text-align: left;
text-indent: 1rem;
background-image: url("/image/email.svg");
background-repeat: no-repeat;
background-position: 96% 50%;
background-size: 25px;
font-size: ${theme.fonts.size.base};
color: ${theme.colors | {isVerify ? (
<div style={{ color: `${theme.colors.green}` }}>
인증되었습니다
</div> | random_line_split |
sign-up.js | `, {
email: email,
})
.then((res) => {
setVerifyCode(res.data.data.emailcode);
})
.catch((err) => {});
// 오류는 중복되거나 또는 서버 오류
setIsSend(true);
};
const verify = (code) => {
axios
.post(
`${process.env.REACT_APP_SERVER_URL}/sign/email-verification?code=${verifyCode}`,
{
emailCode: code,
}
)
.then((res) => {
setIsVerify(true);
})
.catch((err) => {});
};
const onCreate = async (data) => {
// axios 요청 성공 시2
await axios
.post(`${process.env.REACT_APP_SERVER_URL}/sign/signup`, {
nickname: data.nickName,
email: data.email,
password: data.password,
})
.then((res) => {});
await axios
.post(`${process.env.REACT_APP_SERVER_URL}/sign/signin`, {
email: data.email,
password: data.password,
})
.then((res) => {
dispatch(
userLogin({
isLogin: true,
email: user.email,
nickName: res.data.nickname,
accessToken: res.data.accessToken,
profileblob: res.data.profileblob,
})
);
})
.catch((err) => {
});
};
const handleChange = (e) => {
setUser({
...user,
[e.target.name]: e.target.value,
});
};
const handleSubmit = (e) => {
e.preventDefault();
if (
chkPW(user.password) === "통과" &&
isVerify === true &&
user.nickName !== undefined
) {
let code = onCreate(user);
if (code === "409") {
setMessage("이미 등록되어 있는 이메일입니다");
}
if (code === "500") {
setMessage("서버 오류 입니다");
} else {
setUser({
email: "",
verifyEmail: "",
password: "",
rePassword: "",
nickName: "",
});
setIsVerify(false);
props.exit();
}
} else {
setMessage("비번이나 이메일 인증을 확인해주세요");
}
};
function chkPW(pw) {
let num = pw.search(/[0-9]/g);
let eng = pw.search(/[a-z]/gi);
let spe = pw.search(/[`~!@@#$%^&*|₩₩₩'₩";:₩/?]/gi);
if ( |
</Logo>
<InputBox>
<EmailIcon>
<Align>
<EmailInput
name="email"
value={user.email}
placeholder="email"
onChange={handleChange}
/>
<EmailButton type="button" onClick={() => send(user.email)}>
인증
</EmailButton>
</Align>
<Message>
{isSend && !isVerify ? (
<div style={{ color: `${theme.colors.green}` }}>
이메일 인증 코드가 발송되었습니다.
</div>
) : (
""
)}
</Message>
</EmailIcon>
<EmailIcon>
<Align>
<EmailInput
name="verifyEmail"
value={user.verifyEmail}
placeholder="email code"
onChange={handleChange}
/>
<VerifyButton
type="button"
onClick={() => verify(user.verifyEmail)}
>
확인
</VerifyButton>
</Align>
<Message>
{isVerify ? (
<div style={{ color: `${theme.colors.green}` }}>
인증되었습니다
</div>
) : isSend && user.verifyEmail ? (
"인증 되지 않았습니다"
) : (
""
)}
</Message>
</EmailIcon>
<Repassword>
<RepasswordInput
name="password"
value={user.password}
type="password"
placeholder="password"
onChange={handleChange}
/>
<Message>
{chkPW(user.password) === "통과" ? (
<div style={{ color: `${theme.colors.green}` }}>통과</div>
) : (
chkPW(user.password)
)}
</Message>
</Repassword>
<Repassword>
<RepasswordInput
name="rePassword"
type="password"
value={user.repassword}
placeholder="password confirm"
onChange={handleChange}
/>
<Message>
{user.repassword !== "" && user.password ? (
user.password === user.rePassword ? (
<div style={{ color: `${theme.colors.green}` }}>
비밀번호가 일치합니다.
</div>
) : (
"비밀번호가 일치 하지 않습니다"
)
) : (
""
)}
</Message>
</Repassword>
<Message>{message}</Message>
<LoginInput
name="nickName"
value={user.nickName}
placeholder="nickname"
onChange={handleChange}
/>
</InputBox>
<SignupButton type="submit">회원가입</SignupButton>
</SignUpForm>
</Temp>
);
}
const Temp = styled.div`
width: 100vw;
max-width: 100%;
height: calc(100vh - 3.45rem);
max-height: calc(100vh - 3.45rem);
background-color: rgba(0, 0, 0, 0.4);
z-index: 3;
display: flex;
justify-content: center;
align-items: center;
position: absolute;
`;
const Exit = styled.div`
position: absolute;
top: 1rem;
right: 1rem;
font-size: 2rem;
`;
const SignUpForm = styled.form`
display: flex;
flex-direction: column;
align-items: center;
transform: scale(0);
width: 26.25rem;
height: 35.563rem;
background-color: white;
border-radius: 1rem;
padding: 0 3.688rem;
`;
const Logo = styled.div`
display: flex;
justify-content: center;
align-items: center;
width: 100%;
height: 2.188rem;
margin-top: 1rem;
> img {
width: 100%;
height: 100%;
}
`;
const InputBox = styled.div`
margin-top: 2.8rem;
width: 100%;
height: 20rem;
display: flex;
flex-direction: column;
align-items: center;
`;
const Repassword = styled.div`
width: 100%;
display: flex;
flex-direction: column;
height: 4.375rem;
align-items: center;
`;
const RepasswordInput = styled.input`
width: 100%;
height: 3.063rem;
border-radius: 0.5rem;
border: 0.5px solid #bbbbbb;
text-align: left;
text-indent: 1rem;
background-image: url("/image/lock.svg");
background-repeat: no-repeat;
background-position: 96% 50%;
background-size: 25px;
font-size: ${theme.fonts.size.base};
color: ${theme.colors.darkgrey};
::placeholder {
color: #989898;
}
:focus {
outline: none;
}
`;
const EmailIcon = styled.div`
display: flex;
flex-direction: column;
justify-content: space-between;
width: 100%;
height: 4.375rem;
`;
const Align = styled.div`
display: flex;
gap: 0.625rem;
`;
const EmailInput = styled.input`
width: 100%;
height: 3.063rem;
border-radius: 0.5rem;
border: 0.5px solid #bbbbbb;
text-align: left;
text-indent: 1rem;
background-image: url("/image/email.svg");
background-repeat: no-repeat;
background-position: 96% 50%;
background-size: 25px;
font-size: ${theme.fonts.size.base};
color: ${theme.colors | user.password) {
if (pw.length < 8 || pw.length > 20) {
return "8자리 ~ 20자리 이내로 입력해주세요.";
} else if (pw.search(/\s/) != -1) {
return "비밀번호는 공백 없이 입력해주세요.";
} else if (num < 0 || eng < 0 || spe < 0) {
return "영문,숫자, 특수문자를 혼합하여 입력해주세요.";
} else {
return "통과";
}
}
}
return (
<Temp>
<Exit onClick={props.exit}>
<FontAwesomeIcon color={"white"} icon={faTimes} />
</Exit>
<SignUpForm ref={size} onSubmit={handleSubmit}>
<Logo>
<img src="/image/logo.svg" /> | identifier_body |
sign-up.js | `, {
email: email,
})
.then((res) => {
setVerifyCode(res.data.data.emailcode);
})
.catch((err) => {});
// 오류는 중복되거나 또는 서버 오류
setIsSend(true);
};
const verify = (code) => {
axios
.post(
`${process.env.REACT_APP_SERVER_URL}/sign/email-verification?code=${verifyCode}`,
{
emailCode: code,
}
)
.then((res) => {
setIsVerify(true);
})
.catch((err) => {});
};
const onCreate = async (data) => {
// axios 요청 성공 시2
await axios
.post(`${process.env.REACT_APP_SERVER_URL}/sign/signup`, {
nickname: data.nickName,
email: data.email,
password: data.password,
})
.then((res) => {});
await axios
.post(`${process.env.REACT_APP_SERVER_URL}/sign/signin`, {
email: data.email,
password: data.password,
})
.then((res) => {
dispatch(
userLogin({
isLogin: true,
email: user.email,
nickName: res.data.nickname,
accessToken: res.data.accessToken,
profileblob: res.data.profileblob,
})
);
})
.catch((err) => {
});
};
const handleChange = (e) => {
setUser({
...user,
[e.target.name]: e.target.value,
});
};
const handleSubmit = (e) => {
e.preventDefault();
if (
chkPW(user.password) === "통과" &&
isVerify === true &&
user.nickName !== undefined
) {
let code = onCreate(user);
if (code === "409") {
setMessage("이미 등록되어 있는 이메일입니다");
}
if (code === "500") {
setMessage("서버 오류 입니다");
} else {
setUser({
email: "",
verifyEmail: "",
password: "",
rePassword: "",
nickName: "",
});
setIsVerify(false);
props.exit();
}
} else {
setMessage("비번이나 이메일 인증을 확인해주세요");
}
};
function chkPW(pw) {
let num = pw.search(/[0-9]/g);
let eng = pw.search(/[a-z]/gi);
let spe = pw.search(/[`~!@@#$%^&*|₩₩₩'₩";:₩/?]/gi); | if (user.password) {
if (pw.length < 8 || pw.length > 20) {
return "8자리 ~ 20자리 이내로 입력해주세요.";
} else if (pw.search(/\s/) != -1) {
return "비밀번호는 공백 없이 입력해주세요.";
} else if (num < 0 || eng < 0 || spe < 0) {
return "영문,숫자, 특수문자를 혼합하여 입력해주세요.";
} else {
return "통과";
}
}
}
return (
<Temp>
<Exit onClick={props.exit}>
<FontAwesomeIcon color={"white"} icon={faTimes} />
</Exit>
<SignUpForm ref={size} onSubmit={handleSubmit}>
<Logo>
<img src="/image/logo.svg" />
</Logo>
<InputBox>
<EmailIcon>
<Align>
<EmailInput
name="email"
value={user.email}
placeholder="email"
onChange={handleChange}
/>
<EmailButton type="button" onClick={() => send(user.email)}>
인증
</EmailButton>
</Align>
<Message>
{isSend && !isVerify ? (
<div style={{ color: `${theme.colors.green}` }}>
이메일 인증 코드가 발송되었습니다.
</div>
) : (
""
)}
</Message>
</EmailIcon>
<EmailIcon>
<Align>
<EmailInput
name="verifyEmail"
value={user.verifyEmail}
placeholder="email code"
onChange={handleChange}
/>
<VerifyButton
type="button"
onClick={() => verify(user.verifyEmail)}
>
확인
</VerifyButton>
</Align>
<Message>
{isVerify ? (
<div style={{ color: `${theme.colors.green}` }}>
인증되었습니다
</div>
) : isSend && user.verifyEmail ? (
"인증 되지 않았습니다"
) : (
""
)}
</Message>
</EmailIcon>
<Repassword>
<RepasswordInput
name="password"
value={user.password}
type="password"
placeholder="password"
onChange={handleChange}
/>
<Message>
{chkPW(user.password) === "통과" ? (
<div style={{ color: `${theme.colors.green}` }}>통과</div>
) : (
chkPW(user.password)
)}
</Message>
</Repassword>
<Repassword>
<RepasswordInput
name="rePassword"
type="password"
value={user.repassword}
placeholder="password confirm"
onChange={handleChange}
/>
<Message>
{user.repassword !== "" && user.password ? (
user.password === user.rePassword ? (
<div style={{ color: `${theme.colors.green}` }}>
비밀번호가 일치합니다.
</div>
) : (
"비밀번호가 일치 하지 않습니다"
)
) : (
""
)}
</Message>
</Repassword>
<Message>{message}</Message>
<LoginInput
name="nickName"
value={user.nickName}
placeholder="nickname"
onChange={handleChange}
/>
</InputBox>
<SignupButton type="submit">회원가입</SignupButton>
</SignUpForm>
</Temp>
);
}
const Temp = styled.div`
width: 100vw;
max-width: 100%;
height: calc(100vh - 3.45rem);
max-height: calc(100vh - 3.45rem);
background-color: rgba(0, 0, 0, 0.4);
z-index: 3;
display: flex;
justify-content: center;
align-items: center;
position: absolute;
`;
const Exit = styled.div`
position: absolute;
top: 1rem;
right: 1rem;
font-size: 2rem;
`;
const SignUpForm = styled.form`
display: flex;
flex-direction: column;
align-items: center;
transform: scale(0);
width: 26.25rem;
height: 35.563rem;
background-color: white;
border-radius: 1rem;
padding: 0 3.688rem;
`;
const Logo = styled.div`
display: flex;
justify-content: center;
align-items: center;
width: 100%;
height: 2.188rem;
margin-top: 1rem;
> img {
width: 100%;
height: 100%;
}
`;
const InputBox = styled.div`
margin-top: 2.8rem;
width: 100%;
height: 20rem;
display: flex;
flex-direction: column;
align-items: center;
`;
const Repassword = styled.div`
width: 100%;
display: flex;
flex-direction: column;
height: 4.375rem;
align-items: center;
`;
const RepasswordInput = styled.input`
width: 100%;
height: 3.063rem;
border-radius: 0.5rem;
border: 0.5px solid #bbbbbb;
text-align: left;
text-indent: 1rem;
background-image: url("/image/lock.svg");
background-repeat: no-repeat;
background-position: 96% 50%;
background-size: 25px;
font-size: ${theme.fonts.size.base};
color: ${theme.colors.darkgrey};
::placeholder {
color: #989898;
}
:focus {
outline: none;
}
`;
const EmailIcon = styled.div`
display: flex;
flex-direction: column;
justify-content: space-between;
width: 100%;
height: 4.375rem;
`;
const Align = styled.div`
display: flex;
gap: 0.625rem;
`;
const EmailInput = styled.input`
width: 100%;
height: 3.063rem;
border-radius: 0.5rem;
border: 0.5px solid #bbbbbb;
text-align: left;
text-indent: 1rem;
background-image: url("/image/email.svg");
background-repeat: no-repeat;
background-position: 96% 50%;
background-size: 25px;
font-size: ${theme.fonts.size.base};
color: ${theme | identifier_name | |
run_this_code_CACSSEOUL.py | .add_argument('--folder_name' ,help='ex model/fundus_300/folder_name/0 .. logs/fundus_300/folder_name/0 , type2/folder_name/0')
args=parser.parse_args()
print 'aug : ' , args.use_aug
print 'aug_lv1 : ' , args.use_aug_lv1
print 'actmap : ' , args.use_actmap
print 'use_l2_loss: ' , args.use_l2_loss
print 'weight_decay' , args.weight_decay
print 'BN : ' , args.use_BN
print 'Init Learning rate ' , args.init_lr
print 'Decay step for learning rate, ',args.lr_decay_step
print 'optimizer : ', args.optimizer
print 'use nesterov : ',args.use_nesterov
print 'random crop size : ',args.random_crop_resize
print 'batch size : ',args.batch_size
print 'max iter : ',args.max_iter
print 'data dir : ',args.data_dir
def count_trainable_params():
total_parameters = 0
for variable in tf.trainable_variables():
shape = variable.get_shape()
variable_parametes = 1
for dim in shape:
variable_parametes *= dim.value
total_parameters += variable_parametes
print("Total training params: %.1fM" % (total_parameters / 1e6))
def cls2onehot(cls , depth):
labs=np.zeros([len(cls) , depth])
for i,c in enumerate(cls):
labs[i,c]=1
return labs
def reconstruct_tfrecord_rawdata(tfrecord_path):
debug_flag_lv0 = True
debug_flag_lv1 = True
if __debug__ == debug_flag_lv0:
print 'debug start | batch.py | class tfrecord_batch | reconstruct_tfrecord_rawdata '
print 'now Reconstruct Image Data please wait a second'
reconstruct_image = []
# caution record_iter is generator
record_iter = tf.python_io.tf_record_iterator(path=tfrecord_path)
ret_img_list = []
ret_lab_list = []
ret_filename_list = []
for i, str_record in enumerate(record_iter):
msg = '\r -progress {0}'.format(i)
sys.stdout.write(msg)
sys.stdout.flush()
example = tf.train.Example()
example.ParseFromString(str_record)
height = int(example.features.feature['height'].int64_list.value[0])
width = int(example.features.feature['width'].int64_list.value[0])
raw_image = (example.features.feature['raw_image'].bytes_list.value[0])
label = int(example.features.feature['label'].int64_list.value[0])
filename = (example.features.feature['filename'].bytes_list.value[0])
image = np.fromstring(raw_image, dtype=np.uint8)
image = image.reshape((height, width, -1))
ret_img_list.append(image)
ret_lab_list.append(label)
ret_filename_list.append(filename)
ret_img = np.asarray(ret_img_list)
ret_lab = np.asarray(ret_lab_list)
if debug_flag_lv1 == True:
print ''
print 'images shape : ', np.shape(ret_img)
print 'labels shape : ', np.shape(ret_lab)
print 'length of filenames : ', len(ret_filename_list)
return ret_img, ret_lab, ret_filename_list
# pickle 형태로 저장되어 있는 데이터를 불러옵니다.
imgs_list=[]
root_dir =args.data_dir
#Load Train imgs ,labs , Test imgs , labs
"""
train_imgs , train_labs , train_fnames = reconstruct_tfrecord_rawdata(os.path.join(root_dir , 'train.tfrecord'))
test_imgs , test_labs , test_fnames = reconstruct_tfrecord_rawdata(os.path.join(root_dir , 'test.tfrecord'))
"""
names = ['normal_train.npy' , 'normal_test.npy' ,'abnormal_train.npy' , 'abnormal_test.npy']
normal_train_imgs , normal_test_imgs, abnormal_train_imgs , abnormal_test_imgs, =\
map( lambda name : np.load(os.path.join(root_dir ,name)) , names)
NORMAL = 0
ABNORMAL = 1
normal_train_labs=np.zeros([len(normal_train_imgs) , 2])
normal_train_labs[:,NORMAL]=1
abnormal_train_labs=np.zeros([len(abnormal_train_imgs) , 2])
abnormal_train_labs[:,ABNORMAL]=1
normal_test_labs=np.zeros([len(normal_test_imgs) , 2])
normal_test_labs[:,NORMAL]=1
abnormal_test_labs=np.zeros([len(abnormal_test_imgs) , 2])
abnormal_test_labs[:,ABNORMAL]=1
print 'Normal Training Data shape : {}'.format(np.shape(normal_train_imgs))
print 'ABNormal Training Data shape : {}'.format(np.shape(abnormal_train_imgs))
print 'Normal Test Data shape : {}'.format(np.shape(normal_test_imgs))
print 'ABNormal Test Data shape : {}'.format(np.shape(abnormal_test_imgs))
print 'Normal Training Labels shape : {}'.format(np.shape(normal_train_labs))
print 'ABNormal Training Labelsshape : {}'.format(np.shape(abnormal_train_labs))
print 'Normal Test Labelsshape : {}'.format(np.shape(normal_test_labs))
print 'ABNormal Test Labels shape : {}'.format(np.shape(abnormal_test_labs))
# normal 과 abnormal 의 balance 을 맞춥니다
train_imgs = np.vstack([normal_train_imgs , abnormal_train_imgs ,abnormal_train_imgs,abnormal_train_imgs,\
abnormal_train_imgs,abnormal_train_imgs,abnormal_train_imgs])
train_labs = np.vstack([normal_train_labs , abnormal_train_labs ,abnormal_train_labs ,abnormal_train_labs,\
abnormal_train_labs ,abnormal_train_labs ,abnormal_train_labs])
test_imgs = np.vstack([normal_test_imgs , abnormal_test_imgs])
test_labs = np.vstack([normal_test_labs, abnormal_test_labs])
print 'Train Images Shape : {} '.format(np.shape(train_imgs))
print 'Train Labels Shape : {} '.format(np.shape(train_labs))
print 'Test Images Shape : {} '.format(np.shape(test_imgs))
print 'Test Labels Shape : {} '.format(np.shape(test_labs))
# Apply Clahe
if args.use_clahe:
print 'Apply clahe ....'
import matplotlib.pyplot as plt
train_imgs= map(aug.clahe_equalized, train_imgs)
test_imgs = map(aug.clahe_equalized, test_imgs)
train_imgs , test_imgs = map(np.asarray , [train_imgs , test_imgs])
#normalize
print np.shape(test_labs)
if np.max(test_imgs) > 1:
#train_imgs=train_imgs/255.
test_imgs=test_imgs/255.
print 'test_imgs max :', np.max(test_imgs)
h,w,ch=train_imgs.shape[1:]
print h,w,ch
n_classes=np.shape(train_labs)[-1]
print 'the # classes : {}'.format(n_classes)
x_ , y_ , cam_ind, lr_ , is_training ,global_step = model.define_inputs(shape=[None, h ,w, ch ] , n_classes=n_classes )
logits=model.build_graph(x_=x_ , y_=y_ , cam_ind= cam_ind , is_training=is_training , aug_flag=args.use_aug,\
actmap_flag=args.use_actmap , model=args.vgg_model,random_crop_resize=args.random_crop_resize, \
bn = args.use_BN)
lr_op= tf.train.exponential_decay(args.init_lr, global_step , decay_steps=int(args.max_iter / args.lr_decay_step ), decay_rate=0.96,
staircase=False)
train_op, accuracy_op , loss_op , pred_op = \
model.train_algorithm(args.optimizer, logits=logits, labels=y_, learning_rate=lr_op, l2_loss=args.use_l2_loss,
weight_decay=args.weight_decay)
log_count =0;
while True:
logs_root_path='./logs/{}'.format(args.folder_name )
try:
os.makedirs(logs_root_path)
except Exception as e :
print e
pass;
print logs_root_path
logs_path=os.path.join( logs_root_path , str(log_count))
if not os.path.isdir(logs_path):
os.mkdir(logs_path)
break;
else:
log_count+=1
sess, saver , summary_writer =model.sess_start(logs_path)
model_count =0;
while True:
models_root_path='./models/{}'.format(args.folder_name)
try:
os.makedirs(models_root_path)
except Exception as e:
print e
pass;
models_path=os.path.join(models_root_path , str(model_count))
if not os.path.isdir(models_path):
os.mkdir(models_path)
break;
else:
model_count+=1
best_acc_root = os.path.join(models_path, 'best_acc')
best_loss_root = os.path.join(models_path, 'best_loss')
os.mkdir(best_acc_root)
os.mkdir(best_loss_root)
print 'Logs savedir: {}'.format(logs_path)
print 'Model savedir : {}'.format(models_path)
min_loss = 1000.
max_acc = 0.
max_iter=args.max_iter
ckpt=100
batch_size=args.batch_size
start_time=0
train_acc=0
train_val=0
train_loss=1000.
share=len(test_labs)/batch_size
remainder=len(test_labs)/batch_size
def show_progress(step, max_iter):
msg = '\r progress {}/{}'.format(step, max_iter)
| sys.stdout.write(msg)
sys.stdout.flush()
count_trainable_params()
for step in range(max_i | identifier_body | |
run_this_code_CACSSEOUL.py | : ',args.data_dir
def count_trainable_params():
total_parameters = 0
for variable in tf.trainable_variables():
shape = variable.get_shape()
variable_parametes = 1
for dim in shape:
variable_parametes *= dim.value
total_parameters += variable_parametes
print("Total training params: %.1fM" % (total_parameters / 1e6))
def cls2onehot(cls , depth):
labs=np.zeros([len(cls) , depth])
for i,c in enumerate(cls):
labs[i,c]=1
return labs
def reconstruct_tfrecord_rawdata(tfrecord_path):
debug_flag_lv0 = True
debug_flag_lv1 = True
if __debug__ == debug_flag_lv0:
print 'debug start | batch.py | class tfrecord_batch | reconstruct_tfrecord_rawdata '
print 'now Reconstruct Image Data please wait a second'
reconstruct_image = []
# caution record_iter is generator
record_iter = tf.python_io.tf_record_iterator(path=tfrecord_path)
ret_img_list = []
ret_lab_list = []
ret_filename_list = []
for i, str_record in enumerate(record_iter):
msg = '\r -progress {0}'.format(i)
sys.stdout.write(msg)
sys.stdout.flush()
example = tf.train.Example()
example.ParseFromString(str_record)
height = int(example.features.feature['height'].int64_list.value[0])
width = int(example.features.feature['width'].int64_list.value[0])
raw_image = (example.features.feature['raw_image'].bytes_list.value[0])
label = int(example.features.feature['label'].int64_list.value[0])
filename = (example.features.feature['filename'].bytes_list.value[0])
image = np.fromstring(raw_image, dtype=np.uint8)
image = image.reshape((height, width, -1))
ret_img_list.append(image)
ret_lab_list.append(label)
ret_filename_list.append(filename)
ret_img = np.asarray(ret_img_list)
ret_lab = np.asarray(ret_lab_list)
if debug_flag_lv1 == True:
print ''
print 'images shape : ', np.shape(ret_img)
print 'labels shape : ', np.shape(ret_lab)
print 'length of filenames : ', len(ret_filename_list)
return ret_img, ret_lab, ret_filename_list
# pickle 형태로 저장되어 있는 데이터를 불러옵니다.
imgs_list=[]
root_dir =args.data_dir
#Load Train imgs ,labs , Test imgs , labs
"""
train_imgs , train_labs , train_fnames = reconstruct_tfrecord_rawdata(os.path.join(root_dir , 'train.tfrecord'))
test_imgs , test_labs , test_fnames = reconstruct_tfrecord_rawdata(os.path.join(root_dir , 'test.tfrecord'))
"""
names = ['normal_train.npy' , 'normal_test.npy' ,'abnormal_train.npy' , 'abnormal_test.npy']
normal_train_imgs , normal_test_imgs, abnormal_train_imgs , abnormal_test_imgs, =\
map( lambda name : np.load(os.path.join(root_dir ,name)) , names)
NORMAL = 0
ABNORMAL = 1
normal_train_labs=np.zeros([len(normal_train_imgs) , 2])
normal_train_labs[:,NORMAL]=1
abnormal_train_labs=np.zeros([len(abnormal_train_imgs) , 2])
abnormal_train_labs[:,ABNORMAL]=1
normal_test_labs=np.zeros([len(normal_test_imgs) , 2])
normal_test_labs[:,NORMAL]=1
abnormal_test_labs=np.zeros([len(abnormal_test_imgs) , 2])
abnormal_test_labs[:,ABNORMAL]=1
print 'Normal Training Data shape : {}'.format(np.shape(normal_train_imgs))
print 'ABNormal Training Data shape : {}'.format(np.shape(abnormal_train_imgs))
print 'Normal Test Data shape : {}'.format(np.shape(normal_test_imgs))
print 'ABNormal Test Data shape : {}'.format(np.shape(abnormal_test_imgs))
print 'Normal Training Labels shape : {}'.format(np.shape(normal_train_labs))
print 'ABNormal Training Labelsshape : {}'.format(np.shape(abnormal_train_labs))
print 'Normal Test Labelsshape : {}'.format(np.shape(normal_test_labs))
print 'ABNormal Test Labels shape : {}'.format(np.shape(abnormal_test_labs))
# normal 과 abnormal 의 balance 을 맞춥니다
train_imgs = np.vstack([normal_train_imgs , abnormal_train_imgs ,abnormal_train_imgs,abnormal_train_imgs,\
abnormal_train_imgs,abnormal_train_imgs,abnormal_train_imgs])
train_labs = np.vstack([normal_train_labs , abnormal_train_labs ,abnormal_train_labs ,abnormal_train_labs,\
abnormal_train_labs ,abnormal_train_labs ,abnormal_train_labs])
test_imgs = np.vstack([normal_test_imgs , abnormal_test_imgs])
test_labs = np.vstack([normal_test_labs, abnormal_test_labs])
print 'Train Images Shape : {} '.format(np.shape(train_imgs))
print 'Train Labels Shape : {} '.format(np.shape(train_labs))
print 'Test Images Shape : {} '.format(np.shape(test_imgs))
print 'Test Labels Shape : {} '.format(np.shape(test_labs))
# Apply Clahe
if args.use_clahe:
print 'Apply clahe ....'
import matplotlib.pyplot as plt
train_imgs= map(aug.clahe_equalized, train_imgs)
test_imgs = map(aug.clahe_equalized, test_imgs)
train_imgs , test_imgs = map(np.asarray , [train_imgs , test_imgs])
#normalize
print np.shape(test_labs)
if np.max(test_imgs) > 1:
#train_imgs=train_imgs/255.
test_imgs=test_imgs/255.
print 'test_imgs max :', np.max(test_imgs)
h,w,ch=train_imgs.shape[1:]
print h,w,ch
n_classes=np.shape(train_labs)[-1]
print 'the # classes : {}'.format(n_classes)
x_ , y_ , cam_ind, lr_ , is_training ,global_step = model.define_inputs(shape=[None, h ,w, ch ] , n_classes=n_classes )
logits=model.build_graph(x_=x_ , y_=y_ , cam_ind= cam_ind , is_training=is_training , aug_flag=args.use_aug,\
actmap_flag=args.use_actmap , model=args.vgg_model,random_crop_resize=args.random_crop_resize, \
bn = args.use_BN)
lr_op= tf.train.exponential_decay(args.init_lr, global_step , decay_steps=int(args.max_iter / args.lr_decay_step ), decay_rate=0.96,
staircase=False)
train_op, accuracy_op , loss_op , pred_op = \
model.train_algorithm(args.optimizer, logits=logits, labels=y_, learning_rate=lr_op, l2_loss=args.use_l2_loss,
weight_decay=args.weight_decay)
log_count =0;
while True:
logs_root_path='./logs/{}'.format(args.folder_name )
try:
os.makedirs(logs_root_path)
except Exception as e :
print e
pass;
print logs_root_path
logs_path=os.path.join( logs_root_path , str(log_count))
if not os.path.isdir(logs_path):
os.mkdir(logs_path)
break;
else:
log_count+=1
sess, saver , summary_writer =model.sess_start(logs_path)
model_count =0;
while True:
models_root_path='./models/{}'.format(args.folder_name)
try:
os.makedirs(models_root_path)
except Exception as e:
print e
pass;
models_path=os.path.join(models_root_path , str(model_count))
if not os.path.isdir(models_path):
os.mkdir(models_path)
break;
else:
model_count+=1
best_acc_root = os.path.join(models_path, 'best_acc')
best_loss_root = os.path.join(models_path, 'best_loss')
os.mkdir(best_acc_root)
os.mkdir(best_loss_root)
print 'Logs savedir: {}'.format(logs_path)
print 'Model savedir : {}'.format(models_path)
min_loss = 1000.
max_acc = 0.
max_iter=args.max_iter
ckpt=100
batch_size=args.batch_size
start_time=0
train_acc=0
train_val=0
train_loss=1000.
share=len(test_labs)/batch_size
remainder=len(test_labs)/batch_size
def show_progress(step, max_iter):
msg = '\r progress {}/{}'.format(step, max_iter)
sys.stdout.write(msg)
sys.stdout.flush()
count_trainable_params()
for step in range(max_iter):
if step % ckpt==0:
""" #### testing ### """
print '### Testing ###'
test_fetches = [ accuracy_op, loss_op, pred_op , lr_op]
val_acc_mean , val_loss_mean , pred_all = [] , [] , []
for i in range(share): #여기서 테스트 셋을 sess.run()할수 있게 쪼갭니다
test_feedDict = {x_: test_imgs[i * batch_size:(i + 1) * batch_size],
| y_: test_labs[i * batch_size:(i + 1) * batch_size], is_training: False, global_step: step}
val_acc, val_loss, pred, learning_rate = sess.run(fetches=test_fetches, feed_dict=test_feedDict)
val_acc_mean.append(val_acc)
val_loss_mean.append(val_loss)
pred_all.append(pred)
val_acc_mean=np.mean(np.asarray(val_acc_mean))
val_acc_mean=np.me | conditional_block | |
run_this_code_CACSSEOUL.py | ='init learning rate ')
parser.add_argument('--lr_decay_step' ,type=int , help='decay step for learning rate')
parser.add_argument('--folder_name' ,help='ex model/fundus_300/folder_name/0 .. logs/fundus_300/folder_name/0 , type2/folder_name/0')
args=parser.parse_args()
print 'aug : ' , args.use_aug
print 'aug_lv1 : ' , args.use_aug_lv1
print 'actmap : ' , args.use_actmap
print 'use_l2_loss: ' , args.use_l2_loss
print 'weight_decay' , args.weight_decay
print 'BN : ' , args.use_BN
print 'Init Learning rate ' , args.init_lr
print 'Decay step for learning rate, ',args.lr_decay_step
print 'optimizer : ', args.optimizer
print 'use nesterov : ',args.use_nesterov
print 'random crop size : ',args.random_crop_resize
print 'batch size : ',args.batch_size
print 'max iter : ',args.max_iter
print 'data dir : ',args.data_dir
def count_trainable_params():
total_parameters = 0
for variable in tf.trainable_variables():
shape = variable.get_shape()
variable_parametes = 1
for dim in shape:
variable_parametes *= dim.value
total_parameters += variable_parametes
print("Total training params: %.1fM" % (total_parameters / 1e6))
def cls2onehot(cls , depth):
labs=np.zeros([len(cls) , depth])
for i,c in enumerate(cls):
labs[i,c]=1
return labs
def reconstruct_tfrecord_rawdata(tfrecord_path):
debug_flag_lv0 = True
debug_flag_lv1 = True
if __debug__ == debug_flag_lv0:
print 'debug start | batch.py | class tfrecord_batch | reconstruct_tfrecord_rawdata '
print 'now Reconstruct Image Data please wait a second'
reconstruct_image = []
# caution record_iter is generator
record_iter = tf.python_io.tf_record_iterator(path=tfrecord_path)
ret_img_list = []
ret_lab_list = []
ret_filename_list = []
for i, str_record in enumerate(record_iter):
msg = '\r -progress {0}'.format(i)
sys.stdout.write(msg)
sys.stdout.flush()
example = tf.train.Example()
example.ParseFromString(str_record)
height = int(example.features.feature['height'].int64_list.value[0])
width = int(example.features.feature['width'].int64_list.value[0])
raw_image = (example.features.feature['raw_image'].bytes_list.value[0])
label = int(example.features.feature['label'].int64_list.value[0])
filename = (example.features.feature['filename'].bytes_list.value[0])
image = np.fromstring(raw_image, dtype=np.uint8)
image = image.reshape((height, width, -1))
ret_img_list.append(image)
ret_lab_list.append(label)
ret_filename_list.append(filename)
ret_img = np.asarray(ret_img_list)
ret_lab = np.asarray(ret_lab_list)
if debug_flag_lv1 == True:
print ''
print 'images shape : ', np.shape(ret_img)
print 'labels shape : ', np.shape(ret_lab)
print 'length of filenames : ', len(ret_filename_list)
return ret_img, ret_lab, ret_filename_list
# pickle 형태로 저장되어 있는 데이터를 불러옵니다.
imgs_list=[]
root_dir =args.data_dir
#Load Train imgs ,labs , Test imgs , labs
"""
train_imgs , train_labs , train_fnames = reconstruct_tfrecord_rawdata(os.path.join(root_dir , 'train.tfrecord'))
test_imgs , test_labs , test_fnames = reconstruct_tfrecord_rawdata(os.path.join(root_dir , 'test.tfrecord'))
"""
names = ['normal_train.npy' , 'normal_test.npy' ,'abnormal_train.npy' , 'abnormal_test.npy']
normal_train_imgs , normal_test_imgs, abnormal_train_imgs , abnormal_test_imgs, =\
map( lambda name : np.load(os.path.join(root_dir ,name)) , names)
NORMAL = 0
ABNORMAL = 1
normal_train_labs=np.zeros([len(normal_train_imgs) , 2])
normal_train_labs[:,NORMAL]=1
abnormal_train_labs=np.zeros([len(abnormal_train_imgs) , 2])
abnormal_train_labs[:,ABNORMAL]=1
normal_test_labs=np.zeros([len(normal_test_imgs) , 2])
normal_test_labs[:,NORMAL]=1
abnormal_test_labs=np.zeros([len(abnormal_test_imgs) , 2])
abnormal_test_labs[:,ABNORMAL]=1
print 'Normal Training Data shape : {}'.format(np.shape(normal_train_imgs))
print 'ABNormal Training Data shape : {}'.format(np.shape(abnormal_train_imgs))
print 'Normal Test Data shape : {}'.format(np.shape(normal_test_imgs))
print 'ABNormal Test Data shape : {}'.format(np.shape(abnormal_test_imgs))
print 'Normal Training Labels shape : {}'.format(np.shape(normal_train_labs))
print 'ABNormal Training Labelsshape : {}'.format(np.shape(abnormal_train_labs))
print 'Normal Test Labelsshape : {}'.format(np.shape(normal_test_labs))
print 'ABNormal Test Labels shape : {}'.format(np.shape(abnormal_test_labs))
# normal 과 abnormal 의 balance 을 맞춥니다
train_imgs = np.vstack([normal_train_imgs , abnormal_train_imgs ,abnormal_train_imgs,abnormal_train_imgs,\
abnormal_train_imgs,abnormal_train_imgs,abnormal_train_imgs])
train_labs = np.vstack([normal_train_labs , abnormal_train_labs ,abnormal_train_labs ,abnormal_train_labs,\
abnormal_train_labs ,abnormal_train_labs ,abnormal_train_labs])
test_imgs = np.vstack([normal_test_imgs , abnormal_test_imgs])
test_labs = np.vstack([normal_test_labs, abnormal_test_labs])
print 'Train Images Shape : {} '.format(np.shape(train_imgs))
print 'Train Labels Shape : {} '.format(np.shape(train_labs))
print 'Test Images Shape : {} '.format(np.shape(test_imgs))
print 'Test Labels Shape : {} '.format(np.shape(test_labs))
# Apply Clahe
if args.use_clahe:
print 'Apply clahe ....'
import matplotlib.pyplot as plt
train_imgs= map(aug.clahe_equalized, train_imgs)
test_imgs = map(aug.clahe_equalized, test_imgs)
train_imgs , test_imgs = map(np.asarray , [train_imgs , test_imgs])
#normalize
print np.shape(test_labs)
if np.max(test_imgs) > 1:
#train_imgs=train_imgs/255.
test_imgs=test_imgs/255.
print 'test_imgs max :', np.max(test_imgs)
h,w,ch=train_imgs.shape[1:]
print h,w,ch
n_classes=np.shape(train_labs)[-1]
print 'the # classes : {}'.format(n_classes)
x_ , y_ , cam_ind, lr_ , is_training ,global_step = model.define_inputs(shape=[None, h ,w, ch ] , n_classes=n_classes )
logits=model.build_graph(x_=x_ , y_=y_ , cam_ind= cam_ind , is_training=is_training , aug_flag=args.use_aug,\
actmap_flag=args.use_actmap , model=args.vgg_model,random_crop_resize=args.random_crop_resize, \
bn = args.use_BN)
lr_op= tf.train.exponential_decay(args.init_lr, global_step , decay_steps=int(args.max_iter / args.lr_decay_step ), decay_rate=0.96,
staircase=False)
train_op, accuracy_op , loss_op , pred_op = \
model.train_algorithm(args.optimizer, logits=logits, labels=y_, learning_rate=lr_op, l2_loss=args.use_l2_loss,
weight_decay=args.weight_decay)
log_count =0;
while True:
logs_root_path='./logs/{}'.format(args.folder_name )
try:
os.makedirs(logs_root_path)
except Exception as e :
print e
pass;
print logs_root_path
logs_path=os.path.join( logs_root_path , str(log_count))
if not os.path.isdir(logs_path):
os.mkdir(logs_path)
break;
else:
log_count+=1
sess, saver , summary_writer =model.sess_start(logs_path)
model_count =0;
while True:
models_root_path='./models/{}'.format(args.folder_name)
try:
os.makedirs(models_root_path)
except Exception as e:
print e
pass;
models_path=os.path.join(models_root_path , str(model_count))
if not os.path.isdir(models_path):
os.mkdir(models_path)
break;
else:
model_count+=1
best_acc_root = os.path.join(models_path, 'best_acc')
best_loss_root = os.path.join(models_path, 'best_loss')
os.mkdir(best_acc_root)
os.mkdir(best_loss_root)
print 'Logs savedir: {}'.format(logs_path)
print 'Model savedir : {}'.format(models_path)
min_loss = 1000.
max_acc = 0.
max_iter=args.max_iter
ckpt=100
batch_size=args.batch_size
start_time=0
train_acc=0
train_val=0
train_loss=1000.
share=len(test_labs)/batch_size
remainder=len(test_labs)/batch_size
def show_progress(step, max_iter):
msg = '\r progr | ess {}/{}'.fo | identifier_name | |
run_this_code_CACSSEOUL.py | parser.add_argument('--clahe' , dest='use_clahe', action='store_true' , help='augmentation')
parser.add_argument('--no_clahe' , dest='use_clahe', action='store_false' , help='augmentation')
parser.add_argument('--actmap', dest='use_actmap' ,action='store_true')
parser.add_argument('--no_actmap', dest='use_actmap', action='store_false')
parser.add_argument('--random_crop_resize' , '-r', type = int , help='if you use random crop resize , you can choice randdom crop ')
parser.add_argument('--batch_size' ,'-b' , type=int , help='batch size')
parser.add_argument('--max_iter', '-i' , type=int , help='iteration')
parser.add_argument('--l2_loss', dest='use_l2_loss', action='store_true' ,help='l2 loss true or False')
parser.add_argument('--no_l2_loss', dest='use_l2_loss', action='store_false' ,help='l2 loss true or False')
parser.add_argument('--weight_decay', type = float , help='L2 weight decay ')
parser.add_argument('--vgg_model' ,'-m' , choices=['vgg_11','vgg_13','vgg_16', 'vgg_19'])
parser.add_argument('--BN' , dest='use_BN' , action='store_true' , help = 'bn True or not')
parser.add_argument('--no_BN',dest='use_BN' , action = 'store_false', help = 'bn True or not')
parser.add_argument('--data_dir' , help='the folder where the data is saved ' )
parser.add_argument('--init_lr' , type = float , help='init learning rate ')
parser.add_argument('--lr_decay_step' ,type=int , help='decay step for learning rate')
parser.add_argument('--folder_name' ,help='ex model/fundus_300/folder_name/0 .. logs/fundus_300/folder_name/0 , type2/folder_name/0')
args=parser.parse_args()
print 'aug : ' , args.use_aug
print 'aug_lv1 : ' , args.use_aug_lv1
print 'actmap : ' , args.use_actmap
print 'use_l2_loss: ' , args.use_l2_loss
print 'weight_decay' , args.weight_decay
print 'BN : ' , args.use_BN
print 'Init Learning rate ' , args.init_lr
print 'Decay step for learning rate, ',args.lr_decay_step
print 'optimizer : ', args.optimizer
print 'use nesterov : ',args.use_nesterov
print 'random crop size : ',args.random_crop_resize
print 'batch size : ',args.batch_size
print 'max iter : ',args.max_iter
print 'data dir : ',args.data_dir
def count_trainable_params():
total_parameters = 0
for variable in tf.trainable_variables():
shape = variable.get_shape()
variable_parametes = 1
for dim in shape:
variable_parametes *= dim.value
total_parameters += variable_parametes
print("Total training params: %.1fM" % (total_parameters / 1e6))
def cls2onehot(cls , depth):
labs=np.zeros([len(cls) , depth])
for i,c in enumerate(cls):
labs[i,c]=1
return labs
def reconstruct_tfrecord_rawdata(tfrecord_path):
debug_flag_lv0 = True
debug_flag_lv1 = True
if __debug__ == debug_flag_lv0:
print 'debug start | batch.py | class tfrecord_batch | reconstruct_tfrecord_rawdata '
print 'now Reconstruct Image Data please wait a second'
reconstruct_image = []
# caution record_iter is generator
record_iter = tf.python_io.tf_record_iterator(path=tfrecord_path)
ret_img_list = []
ret_lab_list = []
ret_filename_list = []
for i, str_record in enumerate(record_iter):
msg = '\r -progress {0}'.format(i)
sys.stdout.write(msg)
sys.stdout.flush()
example = tf.train.Example()
example.ParseFromString(str_record)
height = int(example.features.feature['height'].int64_list.value[0])
width = int(example.features.feature['width'].int64_list.value[0])
raw_image = (example.features.feature['raw_image'].bytes_list.value[0])
label = int(example.features.feature['label'].int64_list.value[0])
filename = (example.features.feature['filename'].bytes_list.value[0])
image = np.fromstring(raw_image, dtype=np.uint8)
image = image.reshape((height, width, -1))
ret_img_list.append(image)
ret_lab_list.append(label)
ret_filename_list.append(filename)
ret_img = np.asarray(ret_img_list)
ret_lab = np.asarray(ret_lab_list)
if debug_flag_lv1 == True:
print ''
print 'images shape : ', np.shape(ret_img)
print 'labels shape : ', np.shape(ret_lab)
print 'length of filenames : ', len(ret_filename_list)
return ret_img, ret_lab, ret_filename_list
# pickle 형태로 저장되어 있는 데이터를 불러옵니다.
imgs_list=[]
root_dir =args.data_dir
#Load Train imgs ,labs , Test imgs , labs
"""
train_imgs , train_labs , train_fnames = reconstruct_tfrecord_rawdata(os.path.join(root_dir , 'train.tfrecord'))
test_imgs , test_labs , test_fnames = reconstruct_tfrecord_rawdata(os.path.join(root_dir , 'test.tfrecord'))
"""
names = ['normal_train.npy' , 'normal_test.npy' ,'abnormal_train.npy' , 'abnormal_test.npy']
normal_train_imgs , normal_test_imgs, abnormal_train_imgs , abnormal_test_imgs, =\
map( lambda name : np.load(os.path.join(root_dir ,name)) , names)
NORMAL = 0
ABNORMAL = 1
normal_train_labs=np.zeros([len(normal_train_imgs) , 2])
normal_train_labs[:,NORMAL]=1
abnormal_train_labs=np.zeros([len(abnormal_train_imgs) , 2])
abnormal_train_labs[:,ABNORMAL]=1
normal_test_labs=np.zeros([len(normal_test_imgs) , 2])
normal_test_labs[:,NORMAL]=1
abnormal_test_labs=np.zeros([len(abnormal_test_imgs) , 2])
abnormal_test_labs[:,ABNORMAL]=1
print 'Normal Training Data shape : {}'.format(np.shape(normal_train_imgs))
print 'ABNormal Training Data shape : {}'.format(np.shape(abnormal_train_imgs))
print 'Normal Test Data shape : {}'.format(np.shape(normal_test_imgs))
print 'ABNormal Test Data shape : {}'.format(np.shape(abnormal_test_imgs))
print 'Normal Training Labels shape : {}'.format(np.shape(normal_train_labs))
print 'ABNormal Training Labelsshape : {}'.format(np.shape(abnormal_train_labs))
print 'Normal Test Labelsshape : {}'.format(np.shape(normal_test_labs))
print 'ABNormal Test Labels shape : {}'.format(np.shape(abnormal_test_labs))
# normal 과 abnormal 의 balance 을 맞춥니다
train_imgs = np.vstack([normal_train_imgs , abnormal_train_imgs ,abnormal_train_imgs,abnormal_train_imgs,\
abnormal_train_imgs,abnormal_train_imgs,abnormal_train_imgs])
train_labs = np.vstack([normal_train_labs , abnormal_train_labs ,abnormal_train_labs ,abnormal_train_labs,\
abnormal_train_labs ,abnormal_train_labs ,abnormal_train_labs])
test_imgs = np.vstack([normal_test_imgs , abnormal_test_imgs])
test_labs = np.vstack([normal_test_labs, abnormal_test_labs])
print 'Train Images Shape : {} '.format(np.shape(train_imgs))
print 'Train Labels Shape : {} '.format(np.shape(train_labs))
print 'Test Images Shape : {} '.format(np.shape(test_imgs))
print 'Test Labels Shape : {} '.format(np.shape(test_labs))
# Apply Clahe
if args.use_clahe:
print 'Apply clahe ....'
import matplotlib.pyplot as plt
train_imgs= map(aug.clahe_equalized, train_imgs)
test_imgs = map(aug.clahe_equalized, test_imgs)
train_imgs , test_imgs = map(np.asarray , [train_imgs , test_imgs])
#normalize
print np.shape(test_labs)
if np.max(test_imgs) > 1:
#train_imgs=train_imgs/255.
test_imgs=test_imgs/255.
print 'test_imgs max :', np.max(test_imgs)
h,w,ch=train_imgs.shape[1:]
print h,w,ch
n_classes=np.shape(train_labs)[-1]
print 'the # classes : {}'.format(n_classes)
x_ , y_ , cam_ind, lr_ , is_training ,global_step = model.define_inputs(shape=[None, h ,w, ch ] , n_classes=n_classes )
logits=model.build_graph(x_=x_ , y_=y_ , cam_ind= cam_ind , is_training=is_training , aug_flag=args.use_aug,\
actmap_flag=args.use_actmap , model=args.vgg_model,random_crop_resize=args.random_crop_resize, \
bn = args.use_BN)
lr_op= tf.train.exponential_decay(args.init_lr, global_step , decay_steps=int(args.max_iter / args.lr_decay_step ), decay_rate=0.96,
staircase=False)
train_op, accuracy_op , loss_op , pred_op = \
model.train_algorithm(args.optimizer, logits=logits, | random_line_split | ||
DailyCP.py | 201314",
"appVersion": "8.1.13", "model": "红星一号量子计算机", "lon": 0.0, "systemVersion": "初号机", "lat": 0.0}
self.session.headers.update(
{"Cpdaily-Extension": self.encrypt(json.dumps(extension))})
self.setHostBySchoolName(schoolName)
def setHostBySchoolName(self, schoolName):
ret = self.request(
"https://static.campushoy.com/apicache/tenantListSort")
school = [j for i in ret["data"]
for j in i["datas"] if j["name"] == schoolName]
if len(school) == 0:
print("不支持的学校或者学校名称错误,以下是支持的学校列表")
print(ret)
exit()
ret = self.request(
"https://mobile.campushoy.com/v6/config/guest/tenant/info?ids={ids}".format(ids=school[0]["id"]))
self.loginUrl = ret["data"][0]["ampUrl"]
if ret == "":
print("学校并没有申请入驻今日校园平台")
exit()
print("{name}的登录地址{url}".format(name=schoolName, url=self.loginUrl))
self.host = re.findall(r"//(.*?)/", self.loginUrl)[0]
def encrypt(self, text):
k = pyDes.des(self.key, pyDes.CBC, b"\x01\x02\x03\x04\x05\x06\x07\x08",
pad=None, padmode=pyDes.PAD_PKCS5)
ret = k.encrypt(text)
return base64.b64encode(ret).decode()
def passwordEncrypt(self, text: str, key: str):
def pad(s): return s + (len(key) - len(s) %
len(key)) * chr(len(key) - len(s) % len(key))
def unpad(s): return s[:-ord(s[len(s) - 1:])]
text = pad(
"TdEEGazAXQMBzEAisrYaxRRax5kmnMJnpbKxcE6jxQfWRwP2J78adKYm8WzSkfXJ"+text).encode("utf-8")
aes = AES.new(str.encode(key), AES.MODE_CBC,
str.encode("ya8C45aRrBEn8sZH"))
return base64.b64encode(aes.encrypt(text))
def request(self, url: str, body=None, parseJson=True, JsonBody=True, Referer=None):
url = url.format(host=self.host)
if Referer != None:
self.session.headers.update({"Referer": Referer})
if body == None:
ret = self.session.get(url)
else:
self.session.headers.update(
{"Content-Type": ("application/json" if JsonBody else "application/x-www-form-urlencoded")})
ret = self.session.post(url, data=(
json.dumps(body) if JsonBody else body))
if parseJson:
return json.loads(ret.text)
else:
return ret
def decrypt(self, text):
k = pyDes.des(self.key, pyDes.CBC, b"\x01\x02\x03\x04\x05\x06\x07\x08",
pad=None, padmode=pyDes.PAD_PKCS5)
ret = k.decrypt(base64.b64decode(text))
return ret.decode()
def checkNeedCaptcha(self, username):
url = "https://{host}/iap/checkNeedCaptcha?username={username}".format(
host=self.host, username=username)
ret = self.session.get(url)
ret = json.loads(ret.text)
return ret["needCaptcha"]
def generateCaptcha(self):
# url = "https://{host}/iap/generateCaptcha?ltId={client}&codeType=2".format(host=self.host,client=self.client)
# ret = self.session.get(url)
# return ret.content
pass
def getBasicInfo(self):
return self.request("https://{host}/iap/tenant/basicInfo", "{}")
def login(self, username, password, captcha=""):
if "campusphere" in self.loginUrl:
return self.loginIAP(username, password, captcha)
else:
return self.loginAuthserver(username, password, captcha)
def loginIAP(self, username, password, captcha=""):
self.session.headers.update({"X-Requested-With": "XMLHttpRequest"})
ret = self.session.get(
"https://{host}/iap/l | else:
return False
def checkNeedCaptchaAuthServer(self, username):
ret = self.request("http://{host}/authserver/needCaptcha.html?username={
username}&pwdEncrypt2=pwdEncryptSalt".format(
username=username), parseJson=False).text
return ret == "true"
def loginAuthserver(self, username, password, captcha=""):
ret = self.request(self.loginUrl, parseJson=False)
body = dict(re.findall(
r'''<input type="hidden" name="(.*?)" value="(.*?)"''', ret.text))
salt = dict(re.findall(
r'''<input type="hidden" id="(.*?)" value="(.*?)"''', ret.text))
body["username"] = username
body["dllt"] = "userNamePasswordLogin"
if "pwdDefaultEncryptSalt" in salt.keys():
body["password"] = self.passwordEncrypt(
password, salt["pwdDefaultEncryptSalt"])
else:
body["password"] = password
ret = self.request(ret.url, body, False, False,
Referer=self.loginUrl).url
print(self.session.cookies)
print("本函数不一定能用。")
return True
def getCollectorList(self):
body = {
"pageSize": 10,
"pageNumber": 1
}
ret = self.request(
"https://{host}/wec-counselor-collector-apps/stu/collector/queryCollectorProcessingList", body)
return ret["datas"]["rows"]
def getNoticeList(self):
body = {
"pageSize": 10,
"pageNumber": 1
}
ret = self.request(
"https://{host}/wec-counselor-stu-apps/stu/notice/queryProcessingNoticeList", body)
return ret["datas"]["rows"]
def confirmNotice(self, wid):
body = {
"wid": wid
}
ret = self.request(
"https://{host}/wec-counselor-stu-apps/stu/notice/confirmNotice", body)
print(ret["message"])
return ret["message"] == "SUCCESS"
def getCollectorDetail(self, collectorWid):
body = {
"collectorWid": collectorWid
}
return self.request("https://{host}/wec-counselor-collector-apps/stu/collector/detailCollector", body)["datas"]
def getCollectorFormFiled(self, formWid, collectorWid):
body = {
"pageSize": 50,
"pageNumber": 1,
"formWid": formWid,
"collectorWid": collectorWid
}
return self.request("https://{host}/wec-counselor-collector-apps/stu/collector/getFormFields", body)["datas"]["rows"]
def submitCollectorForm(self, formWid, collectWid, schoolTaskWid, rows, address):
body = {
"formWid": formWid,
"collectWid": collectWid,
"schoolTaskWid": schoolTaskWid,
"form": rows,
"address": address
}
ret = self.request(
"https://{host}/wec-counselor-collector-apps/stu/collector/submitForm", body)
print(ret["message"])
return ret["message"] == "SUCCESS"
def autoFill(self, rows):
for item in rows:
index = 0
while index < len(item["fieldItems"]):
if item["fieldItems"][index]["isSelected"] == 1:
index = index + 1
else:
item["fieldItems"].pop(index)
def getFormCharac(self, detail):
ret = self.request(detail["content"], parseJson=False, JsonBody=False)
return hashlib.md5(ret.content).digest().hex()
def autoComplete(self, address, dbpath):
collectList = self.getCollectorList()
print(collectList | ogin?service=https://{host}/portal/login".format(host=self.host)).url
client = ret[ret.find("=")+1:]
ret = self.request("https://{host}/iap/security/lt",
"lt={client}".format(client=client), True, False)
client = ret["result"]["_lt"]
# self.encryptSalt = ret["result"]["_encryptSalt"]
body = {
"username": username,
"password": password,
"lt": client,
"captcha": captcha,
"rememberMe": "true",
"dllt": "",
"mobile": ""
}
ret = self.request("https://{host}/iap/doLogin", body, True, False)
if ret["resultCode"] == "REDIRECT":
self.session.get(ret["url"])
return True | identifier_body |
DailyCP.py | 201314",
"appVersion": "8.1.13", "model": "红星一号量子计算机", "lon": 0.0, "systemVersion": "初号机", "lat": 0.0}
self.session.headers.update(
{"Cpdaily-Extension": self.encrypt(json.dumps(extension))})
self.setHostBySchoolName(schoolName)
def setHostBySchoolName(self, schoolName):
ret = self.request(
"https://static.campushoy.com/apicache/tenantListSort")
school = [j for i in ret["data"]
for j in i["datas"] if j["name"] == schoolName]
if len(school) == 0:
print("不支持的学校或者学校名称错误,以下是支持的学校列表")
print(ret)
exit()
ret = self.request(
"https://mobile.campushoy.com/v6/config/guest/tenant/info?ids={ids}".format(ids=school[0]["id"]))
self.loginUrl = ret["data"][0]["ampUrl"]
if ret == "":
print("学校并没有申请入驻今日校园平台")
exit()
print("{name}的登录地址{url}".format(name=schoo | lf.loginUrl)[0]
def encrypt(self, text):
k = pyDes.des(self.key, pyDes.CBC, b"\x01\x02\x03\x04\x05\x06\x07\x08",
pad=None, padmode=pyDes.PAD_PKCS5)
ret = k.encrypt(text)
return base64.b64encode(ret).decode()
def passwordEncrypt(self, text: str, key: str):
def pad(s): return s + (len(key) - len(s) %
len(key)) * chr(len(key) - len(s) % len(key))
def unpad(s): return s[:-ord(s[len(s) - 1:])]
text = pad(
"TdEEGazAXQMBzEAisrYaxRRax5kmnMJnpbKxcE6jxQfWRwP2J78adKYm8WzSkfXJ"+text).encode("utf-8")
aes = AES.new(str.encode(key), AES.MODE_CBC,
str.encode("ya8C45aRrBEn8sZH"))
return base64.b64encode(aes.encrypt(text))
def request(self, url: str, body=None, parseJson=True, JsonBody=True, Referer=None):
url = url.format(host=self.host)
if Referer != None:
self.session.headers.update({"Referer": Referer})
if body == None:
ret = self.session.get(url)
else:
self.session.headers.update(
{"Content-Type": ("application/json" if JsonBody else "application/x-www-form-urlencoded")})
ret = self.session.post(url, data=(
json.dumps(body) if JsonBody else body))
if parseJson:
return json.loads(ret.text)
else:
return ret
def decrypt(self, text):
k = pyDes.des(self.key, pyDes.CBC, b"\x01\x02\x03\x04\x05\x06\x07\x08",
pad=None, padmode=pyDes.PAD_PKCS5)
ret = k.decrypt(base64.b64decode(text))
return ret.decode()
def checkNeedCaptcha(self, username):
url = "https://{host}/iap/checkNeedCaptcha?username={username}".format(
host=self.host, username=username)
ret = self.session.get(url)
ret = json.loads(ret.text)
return ret["needCaptcha"]
def generateCaptcha(self):
# url = "https://{host}/iap/generateCaptcha?ltId={client}&codeType=2".format(host=self.host,client=self.client)
# ret = self.session.get(url)
# return ret.content
pass
def getBasicInfo(self):
return self.request("https://{host}/iap/tenant/basicInfo", "{}")
def login(self, username, password, captcha=""):
if "campusphere" in self.loginUrl:
return self.loginIAP(username, password, captcha)
else:
return self.loginAuthserver(username, password, captcha)
def loginIAP(self, username, password, captcha=""):
self.session.headers.update({"X-Requested-With": "XMLHttpRequest"})
ret = self.session.get(
"https://{host}/iap/login?service=https://{host}/portal/login".format(host=self.host)).url
client = ret[ret.find("=")+1:]
ret = self.request("https://{host}/iap/security/lt",
"lt={client}".format(client=client), True, False)
client = ret["result"]["_lt"]
# self.encryptSalt = ret["result"]["_encryptSalt"]
body = {
"username": username,
"password": password,
"lt": client,
"captcha": captcha,
"rememberMe": "true",
"dllt": "",
"mobile": ""
}
ret = self.request("https://{host}/iap/doLogin", body, True, False)
if ret["resultCode"] == "REDIRECT":
self.session.get(ret["url"])
return True
else:
return False
def checkNeedCaptchaAuthServer(self, username):
ret = self.request("http://{host}/authserver/needCaptcha.html?username={username}&pwdEncrypt2=pwdEncryptSalt".format(
username=username), parseJson=False).text
return ret == "true"
def loginAuthserver(self, username, password, captcha=""):
ret = self.request(self.loginUrl, parseJson=False)
body = dict(re.findall(
r'''<input type="hidden" name="(.*?)" value="(.*?)"''', ret.text))
salt = dict(re.findall(
r'''<input type="hidden" id="(.*?)" value="(.*?)"''', ret.text))
body["username"] = username
body["dllt"] = "userNamePasswordLogin"
if "pwdDefaultEncryptSalt" in salt.keys():
body["password"] = self.passwordEncrypt(
password, salt["pwdDefaultEncryptSalt"])
else:
body["password"] = password
ret = self.request(ret.url, body, False, False,
Referer=self.loginUrl).url
print(self.session.cookies)
print("本函数不一定能用。")
return True
def getCollectorList(self):
body = {
"pageSize": 10,
"pageNumber": 1
}
ret = self.request(
"https://{host}/wec-counselor-collector-apps/stu/collector/queryCollectorProcessingList", body)
return ret["datas"]["rows"]
def getNoticeList(self):
body = {
"pageSize": 10,
"pageNumber": 1
}
ret = self.request(
"https://{host}/wec-counselor-stu-apps/stu/notice/queryProcessingNoticeList", body)
return ret["datas"]["rows"]
def confirmNotice(self, wid):
body = {
"wid": wid
}
ret = self.request(
"https://{host}/wec-counselor-stu-apps/stu/notice/confirmNotice", body)
print(ret["message"])
return ret["message"] == "SUCCESS"
def getCollectorDetail(self, collectorWid):
body = {
"collectorWid": collectorWid
}
return self.request("https://{host}/wec-counselor-collector-apps/stu/collector/detailCollector", body)["datas"]
def getCollectorFormFiled(self, formWid, collectorWid):
body = {
"pageSize": 50,
"pageNumber": 1,
"formWid": formWid,
"collectorWid": collectorWid
}
return self.request("https://{host}/wec-counselor-collector-apps/stu/collector/getFormFields", body)["datas"]["rows"]
def submitCollectorForm(self, formWid, collectWid, schoolTaskWid, rows, address):
body = {
"formWid": formWid,
"collectWid": collectWid,
"schoolTaskWid": schoolTaskWid,
"form": rows,
"address": address
}
ret = self.request(
"https://{host}/wec-counselor-collector-apps/stu/collector/submitForm", body)
print(ret["message"])
return ret["message"] == "SUCCESS"
def autoFill(self, rows):
for item in rows:
index = 0
while index < len(item["fieldItems"]):
if item["fieldItems"][index]["isSelected"] == 1:
index = index + 1
else:
item["fieldItems"].pop(index)
def getFormCharac(self, detail):
ret = self.request(detail["content"], parseJson=False, JsonBody=False)
return hashlib.md5(ret.content).digest().hex()
def autoComplete(self, address, dbpath):
collectList = self.getCollectorList()
print(col | lName, url=self.loginUrl))
self.host = re.findall(r"//(.*?)/", se | conditional_block |
DailyCP.py | 201314",
"appVersion": "8.1.13", "model": "红星一号量子计算机", "lon": 0.0, "systemVersion": "初号机", "lat": 0.0}
self.session.headers.update(
{"Cpdaily-Extension": self.encrypt(json.dumps(extension))})
self.setHostBySchoolName(schoolName)
def setHostBySchoolName(self, schoolName):
| (
"https://static.campushoy.com/apicache/tenantListSort")
school = [j for i in ret["data"]
for j in i["datas"] if j["name"] == schoolName]
if len(school) == 0:
print("不支持的学校或者学校名称错误,以下是支持的学校列表")
print(ret)
exit()
ret = self.request(
"https://mobile.campushoy.com/v6/config/guest/tenant/info?ids={ids}".format(ids=school[0]["id"]))
self.loginUrl = ret["data"][0]["ampUrl"]
if ret == "":
print("学校并没有申请入驻今日校园平台")
exit()
print("{name}的登录地址{url}".format(name=schoolName, url=self.loginUrl))
self.host = re.findall(r"//(.*?)/", self.loginUrl)[0]
def encrypt(self, text):
k = pyDes.des(self.key, pyDes.CBC, b"\x01\x02\x03\x04\x05\x06\x07\x08",
pad=None, padmode=pyDes.PAD_PKCS5)
ret = k.encrypt(text)
return base64.b64encode(ret).decode()
def passwordEncrypt(self, text: str, key: str):
def pad(s): return s + (len(key) - len(s) %
len(key)) * chr(len(key) - len(s) % len(key))
def unpad(s): return s[:-ord(s[len(s) - 1:])]
text = pad(
"TdEEGazAXQMBzEAisrYaxRRax5kmnMJnpbKxcE6jxQfWRwP2J78adKYm8WzSkfXJ"+text).encode("utf-8")
aes = AES.new(str.encode(key), AES.MODE_CBC,
str.encode("ya8C45aRrBEn8sZH"))
return base64.b64encode(aes.encrypt(text))
def request(self, url: str, body=None, parseJson=True, JsonBody=True, Referer=None):
url = url.format(host=self.host)
if Referer != None:
self.session.headers.update({"Referer": Referer})
if body == None:
ret = self.session.get(url)
else:
self.session.headers.update(
{"Content-Type": ("application/json" if JsonBody else "application/x-www-form-urlencoded")})
ret = self.session.post(url, data=(
json.dumps(body) if JsonBody else body))
if parseJson:
return json.loads(ret.text)
else:
return ret
def decrypt(self, text):
k = pyDes.des(self.key, pyDes.CBC, b"\x01\x02\x03\x04\x05\x06\x07\x08",
pad=None, padmode=pyDes.PAD_PKCS5)
ret = k.decrypt(base64.b64decode(text))
return ret.decode()
def checkNeedCaptcha(self, username):
url = "https://{host}/iap/checkNeedCaptcha?username={username}".format(
host=self.host, username=username)
ret = self.session.get(url)
ret = json.loads(ret.text)
return ret["needCaptcha"]
def generateCaptcha(self):
# url = "https://{host}/iap/generateCaptcha?ltId={client}&codeType=2".format(host=self.host,client=self.client)
# ret = self.session.get(url)
# return ret.content
pass
def getBasicInfo(self):
return self.request("https://{host}/iap/tenant/basicInfo", "{}")
def login(self, username, password, captcha=""):
if "campusphere" in self.loginUrl:
return self.loginIAP(username, password, captcha)
else:
return self.loginAuthserver(username, password, captcha)
def loginIAP(self, username, password, captcha=""):
self.session.headers.update({"X-Requested-With": "XMLHttpRequest"})
ret = self.session.get(
"https://{host}/iap/login?service=https://{host}/portal/login".format(host=self.host)).url
client = ret[ret.find("=")+1:]
ret = self.request("https://{host}/iap/security/lt",
"lt={client}".format(client=client), True, False)
client = ret["result"]["_lt"]
# self.encryptSalt = ret["result"]["_encryptSalt"]
body = {
"username": username,
"password": password,
"lt": client,
"captcha": captcha,
"rememberMe": "true",
"dllt": "",
"mobile": ""
}
ret = self.request("https://{host}/iap/doLogin", body, True, False)
if ret["resultCode"] == "REDIRECT":
self.session.get(ret["url"])
return True
else:
return False
def checkNeedCaptchaAuthServer(self, username):
ret = self.request("http://{host}/authserver/needCaptcha.html?username={username}&pwdEncrypt2=pwdEncryptSalt".format(
username=username), parseJson=False).text
return ret == "true"
def loginAuthserver(self, username, password, captcha=""):
ret = self.request(self.loginUrl, parseJson=False)
body = dict(re.findall(
r'''<input type="hidden" name="(.*?)" value="(.*?)"''', ret.text))
salt = dict(re.findall(
r'''<input type="hidden" id="(.*?)" value="(.*?)"''', ret.text))
body["username"] = username
body["dllt"] = "userNamePasswordLogin"
if "pwdDefaultEncryptSalt" in salt.keys():
body["password"] = self.passwordEncrypt(
password, salt["pwdDefaultEncryptSalt"])
else:
body["password"] = password
ret = self.request(ret.url, body, False, False,
Referer=self.loginUrl).url
print(self.session.cookies)
print("本函数不一定能用。")
return True
def getCollectorList(self):
body = {
"pageSize": 10,
"pageNumber": 1
}
ret = self.request(
"https://{host}/wec-counselor-collector-apps/stu/collector/queryCollectorProcessingList", body)
return ret["datas"]["rows"]
def getNoticeList(self):
body = {
"pageSize": 10,
"pageNumber": 1
}
ret = self.request(
"https://{host}/wec-counselor-stu-apps/stu/notice/queryProcessingNoticeList", body)
return ret["datas"]["rows"]
def confirmNotice(self, wid):
body = {
"wid": wid
}
ret = self.request(
"https://{host}/wec-counselor-stu-apps/stu/notice/confirmNotice", body)
print(ret["message"])
return ret["message"] == "SUCCESS"
def getCollectorDetail(self, collectorWid):
body = {
"collectorWid": collectorWid
}
return self.request("https://{host}/wec-counselor-collector-apps/stu/collector/detailCollector", body)["datas"]
def getCollectorFormFiled(self, formWid, collectorWid):
body = {
"pageSize": 50,
"pageNumber": 1,
"formWid": formWid,
"collectorWid": collectorWid
}
return self.request("https://{host}/wec-counselor-collector-apps/stu/collector/getFormFields", body)["datas"]["rows"]
def submitCollectorForm(self, formWid, collectWid, schoolTaskWid, rows, address):
body = {
"formWid": formWid,
"collectWid": collectWid,
"schoolTaskWid": schoolTaskWid,
"form": rows,
"address": address
}
ret = self.request(
"https://{host}/wec-counselor-collector-apps/stu/collector/submitForm", body)
print(ret["message"])
return ret["message"] == "SUCCESS"
def autoFill(self, rows):
for item in rows:
index = 0
while index < len(item["fieldItems"]):
if item["fieldItems"][index]["isSelected"] == 1:
index = index + 1
else:
item["fieldItems"].pop(index)
def getFormCharac(self, detail):
ret = self.request(detail["content"], parseJson=False, JsonBody=False)
return hashlib.md5(ret.content).digest().hex()
def autoComplete(self, address, dbpath):
collectList = self.getCollectorList()
print(collectList)
| ret = self.request | identifier_name |
DailyCP.py | .MODE_CBC,
str.encode("ya8C45aRrBEn8sZH"))
return base64.b64encode(aes.encrypt(text))
def request(self, url: str, body=None, parseJson=True, JsonBody=True, Referer=None):
url = url.format(host=self.host)
if Referer != None:
self.session.headers.update({"Referer": Referer})
if body == None:
ret = self.session.get(url)
else:
self.session.headers.update(
{"Content-Type": ("application/json" if JsonBody else "application/x-www-form-urlencoded")})
ret = self.session.post(url, data=(
json.dumps(body) if JsonBody else body))
if parseJson:
return json.loads(ret.text)
else:
return ret
def decrypt(self, text):
k = pyDes.des(self.key, pyDes.CBC, b"\x01\x02\x03\x04\x05\x06\x07\x08",
pad=None, padmode=pyDes.PAD_PKCS5)
ret = k.decrypt(base64.b64decode(text))
return ret.decode()
def checkNeedCaptcha(self, username):
url = "https://{host}/iap/checkNeedCaptcha?username={username}".format(
host=self.host, username=username)
ret = self.session.get(url)
ret = json.loads(ret.text)
return ret["needCaptcha"]
def generateCaptcha(self):
# url = "https://{host}/iap/generateCaptcha?ltId={client}&codeType=2".format(host=self.host,client=self.client)
# ret = self.session.get(url)
# return ret.content
pass
def getBasicInfo(self):
return self.request("https://{host}/iap/tenant/basicInfo", "{}")
def login(self, username, password, captcha=""):
if "campusphere" in self.loginUrl:
return self.loginIAP(username, password, captcha)
else:
return self.loginAuthserver(username, password, captcha)
def loginIAP(self, username, password, captcha=""):
self.session.headers.update({"X-Requested-With": "XMLHttpRequest"})
ret = self.session.get(
"https://{host}/iap/login?service=https://{host}/portal/login".format(host=self.host)).url
client = ret[ret.find("=")+1:]
ret = self.request("https://{host}/iap/security/lt",
"lt={client}".format(client=client), True, False)
client = ret["result"]["_lt"]
# self.encryptSalt = ret["result"]["_encryptSalt"]
body = {
"username": username,
"password": password,
"lt": client,
"captcha": captcha,
"rememberMe": "true",
"dllt": "",
"mobile": ""
}
ret = self.request("https://{host}/iap/doLogin", body, True, False)
if ret["resultCode"] == "REDIRECT":
self.session.get(ret["url"])
return True
else:
return False
def checkNeedCaptchaAuthServer(self, username):
ret = self.request("http://{host}/authserver/needCaptcha.html?username={username}&pwdEncrypt2=pwdEncryptSalt".format(
username=username), parseJson=False).text
return ret == "true"
def loginAuthserver(self, username, password, captcha=""):
ret = self.request(self.loginUrl, parseJson=False)
body = dict(re.findall(
r'''<input type="hidden" name="(.*?)" value="(.*?)"''', ret.text))
salt = dict(re.findall(
r'''<input type="hidden" id="(.*?)" value="(.*?)"''', ret.text))
body["username"] = username
body["dllt"] = "userNamePasswordLogin"
if "pwdDefaultEncryptSalt" in salt.keys():
body["password"] = self.passwordEncrypt(
password, salt["pwdDefaultEncryptSalt"])
else:
body["password"] = password
ret = self.request(ret.url, body, False, False,
Referer=self.loginUrl).url
print(self.session.cookies)
print("本函数不一定能用。")
return True
def getCollectorList(self):
body = {
"pageSize": 10,
"pageNumber": 1
}
ret = self.request(
"https://{host}/wec-counselor-collector-apps/stu/collector/queryCollectorProcessingList", body)
return ret["datas"]["rows"]
def getNoticeList(self):
body = {
"pageSize": 10,
"pageNumber": 1
}
ret = self.request(
"https://{host}/wec-counselor-stu-apps/stu/notice/queryProcessingNoticeList", body)
return ret["datas"]["rows"]
def confirmNotice(self, wid):
body = {
"wid": wid
}
ret = self.request(
"https://{host}/wec-counselor-stu-apps/stu/notice/confirmNotice", body)
print(ret["message"])
return ret["message"] == "SUCCESS"
def getCollectorDetail(self, collectorWid):
body = {
"collectorWid": collectorWid
}
return self.request("https://{host}/wec-counselor-collector-apps/stu/collector/detailCollector", body)["datas"]
def getCollectorFormFiled(self, formWid, collectorWid):
body = {
"pageSize": 50,
"pageNumber": 1,
"formWid": formWid,
"collectorWid": collectorWid
}
return self.request("https://{host}/wec-counselor-collector-apps/stu/collector/getFormFields", body)["datas"]["rows"]
def submitCollectorForm(self, formWid, collectWid, schoolTaskWid, rows, address):
body = {
"formWid": formWid,
"collectWid": collectWid,
"schoolTaskWid": schoolTaskWid,
"form": rows,
"address": address
}
ret = self.request(
"https://{host}/wec-counselor-collector-apps/stu/collector/submitForm", body)
print(ret["message"])
return ret["message"] == "SUCCESS"
def autoFill(self, rows):
for item in rows:
index = 0
while index < len(item["fieldItems"]):
if item["fieldItems"][index]["isSelected"] == 1:
index = index + 1
else:
item["fieldItems"].pop(index)
def getFormCharac(self, detail):
ret = self.request(detail["content"], parseJson=False, JsonBody=False)
return hashlib.md5(ret.content).digest().hex()
def autoComplete(self, address, dbpath):
collectList = self.getCollectorList()
print(collectList)
for item in collectList:
# if item["isHandled"] == True:continue
detail = self.getCollectorDetail(item["wid"])
form = self.getCollectorFormFiled(
detail["collector"]["formWid"], detail["collector"]["wid"])
formpath = "{dbpath}/{charac}.json".format(
charac=self.getFormCharac(item), dbpath=dbpath)
if os.path.exists(formpath):
with open(formpath, "rb") as file:
def find(l, key_valueList: list):
for item in l:
b = True
for k_v in key_valueList:
if item[k_v[0]] != k_v[1]:
b = False
if b:
return item
return None
newForm = form
form = json.loads(file.read().decode("utf-8"))
for item in newForm:
l = find(form, [['title', item['title']], [
'description', item['description']]])
item['value'] = l['value']
for fieldItemsList in item['fieldItems']:
field = find(l['fieldItems'], [
['content', fieldItemsList['content']]])
fieldItemsList['isSelected'] = field['isSelected']
form = newForm
self.autoFill(form)
self.submitCollectorForm(detail["collector"]["formWid"], detail["collector"]
["wid"], detail["collector"]["schoolTaskWid"], form, address)
else:
with open(formpath, "wb") as file:
file.write(json.dumps(
form, ensure_ascii=False).encode("utf-8"))
print("请手动填写{formpath},之后重新运行脚本".format(formpath=formpath))
exit()
confirmList = self.getNoticeList()
print(confirmList)
for item in confirmList:
self.confirmNotice(item["noticeWid"])
if __name__ == "__main__":
if len(sys.argv) != 6:
print("python3 DailyCp.py 学校全名 学号 密码 定位地址 formdb文件夹绝对路径")
exit()
app = DailyCP(sys.argv[1])
if not app.login(sys.argv[2], sys.argv[3]):
exit() | app.autoComplete(sys.argv[4], sys.argv[5])
# Author:HuangXu,FengXinYang,ZhouYuYang. | random_line_split | |
yolo.py | 1:, p] == 0)[0][0]
velocity_and_view_time[0,p]=video_fps
velocity_and_view_time[1,p] = (np.array(polygon_dist_list_vel_mode)[p] * np.array(pixel_to_dist_ratio)[p] * 3.6) / (number_of_frames[0][p] / video_fps)
if(itr_number>=num_of_frames_for_mean):
mean_of_points_in_polygon=np.round(np.transpose(car_for_each_polygon_list)[:,0:num_of_frames_for_mean].sum(axis=1)/num_of_frames_for_mean).astype(int)
mean_polygon_density=np.sum(polygon_density[0:10],axis=0)/num_of_frames_for_mean
else:
mean_of_points_in_polygon =number_of_point_in_polygons[0]
mean_polygon_density=polygon_density[-1]
draw = ImageDraw.Draw(image)
font_number_of_vehicles = font
font_number_of_vehicles.size = 40
rectangle_width=int(image.size[0] / 7)
space_between_rect=0
if len(polygon_list)>1:
space_between_rect = int((image.size[0] - len(polygon_list)*rectangle_width-40)/(len(polygon_list)-1))
if th_mode == "counting":
mean_polygon=mean_of_points_in_polygon
elif th_mode == "density":
mean_polygon = mean_polygon_density
else:
mean_polygon = velocity_and_view_time[1,:]
for c in range(len(polygon_list)):
R,G,B=color_result(mean_polygon[c],th_low,th_high)
draw.rectangle([tuple([10+c*(rectangle_width+space_between_rect), 60]), tuple([10 + c*(rectangle_width+space_between_rect)+rectangle_width, 60 + 40])], fill=(R, G, B))
draw.rectangle([tuple([10 + c * (rectangle_width + space_between_rect)+rectangle_width, 60]),tuple([10 + c * (rectangle_width + space_between_rect)+rectangle_width+20, 60 + 40])],fill=polygon_color_list[c])
if th_mode == "counting":
draw.text([10+c*(rectangle_width+space_between_rect), 65], "vehicles:" + str(mean_polygon[c]), fill=(0, 0, 0),font=font_number_of_vehicles)
elif th_mode == "density":
draw.text([10 + c * (rectangle_width + space_between_rect), 65],'density:' + str(int(mean_polygon[c] * 100)) + '%', fill=(0, 0, 0), font=font_number_of_vehicles)
else:
if (mean_polygon[c]!=0) or (velocity_and_view_time[0,c]>0):
draw.text([10 + c * (rectangle_width + space_between_rect), 65],'velocity:' + str(mean_polygon[c]) + 'kmh', fill=(0, 0, 0),font=font_number_of_vehicles)
velocity_and_view_time[0,c]-=1
if velocity_and_view_time[0,c] ==0:
velocity_and_view_time[1,c]=0
else:
draw.text([10 + c * (rectangle_width + space_between_rect), 65], 'velocity:', fill=(0, 0, 0),font=font_number_of_vehicles)
del draw
end = timer()
fps=round(1/(end - start))
return fps, out_prediction, image
def close_session(self):
self.sess.close()
def detect_video(yolo, video_path,th_mode,th_low,th_high, define_regions, output_path="",input_path=""):
car_for_each_polygon_list=[]
polygon_density=[]
pixel_to_dist_ratio=[]
polygon_dist_list_vel_mode=[]
velocity_and_view_time = np.zeros((2, len(polygon_color_list)), dtype=int)
vid = cv2.VideoCapture(video_path)
file_name = input_path[:input_path.rfind(".")] + ".txt"
if not vid.isOpened():
raise IOError("Couldn't open webcam or video")
video_FourCC = cv2.VideoWriter_fourcc(*'mp4v')
video_fps = vid.get(cv2.CAP_PROP_FPS)
video_size = (int(vid.get(cv2.CAP_PROP_FRAME_WIDTH)), int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT)))
isOutput = True if output_path != "" else False
if isOutput:
print('Processing {} with frame size {} '.format(os.path.basename(video_path), video_size))
out = cv2.VideoWriter(output_path, video_FourCC, video_fps, video_size)
n=0
first_frame=True
global poly_index
poly_index=0
while vid.isOpened():
n+=1
return_value, frame = vid.read()
if first_frame==True:
if define_regions == 1:
if th_mode=="velocity":
cv2.putText(frame, "Please define velocity regions (BR,BL,TL,TR) order", (int(frame.shape[1] / 4), 40), cv2.FONT_HERSHEY_SIMPLEX,
1, (255, 0, 0), 2)
else:
cv2.putText(frame,"Please define regions",(int(frame.shape[1]/3),40),cv2.FONT_HERSHEY_SIMPLEX,1,(255,0,0),2)
cv2.namedWindow('first_frame', cv2.WINDOW_NORMAL)
cv2.setMouseCallback('first_frame', Mouse_Callback,param=frame)
while (1):
cv2.imshow('first_frame', frame/255)
k=cv2.waitKey(10)
if k==97:
polygon_list.append(int(len(right_clicks)-np.sum(polygon_list)))
pts = np.array(right_clicks[ np.sum(polygon_list[0:poly_index], dtype=int):np.sum(polygon_list[0:poly_index],dtype=int) + polygon_list[poly_index]], np.int32)
B, G, R = polygon_color_list[poly_index]
cv2.polylines(frame, [pts], True, (R,G,B), thickness=2)
if th_mode == "velocity":
polygon_dist_list_vel_mode.append(np.linalg.norm( ((pts[0,:]+pts[1,:])/2) - ((pts[2,:]+pts[3,:])/2)))
cv2.imshow('first_frame', frame / 255)
k = cv2.waitKey(10)
pixel_to_dist_ratio.append(float(input('Please insert pixel to real distance ratio:')))
poly_index+=1
if k == 27:
polygon_list.append(int(len(right_clicks) - np.sum(polygon_list)))
pts = np.array(right_clicks[np.sum(polygon_list[0:poly_index], dtype=int):np.sum(polygon_list[0:poly_index],dtype=int) + polygon_list[poly_index]], np.int32)
B, G, R = polygon_color_list[poly_index]
cv2.polylines(frame, [pts], True, (R, G, B), thickness=2)
cv2.imshow('first_frame', frame / 255)
cv2.waitKey(10)
if th_mode == "velocity":
polygon_dist_list_vel_mode.append(np.linalg.norm(((pts[0, :] + pts[1, :]) / 2) - ((pts[2, :] + pts[3, :]) / 2)))
pixel_to_dist_ratio.append(float(input('Please insert pixel to real distance ratio:')))
cv2.destroyAllWindows()
break
with open(file_name, "w") as txt_file:
txt_file.write(str(right_clicks)+'\n')
txt_file.write(str(polygon_list))
if th_mode=="velocity":
txt_file.write('\n'+str(pixel_to_dist_ratio)+'\n')
txt_file.write(str(polygon_dist_list_vel_mode))
txt_file.close()
else:
with open(file_name, "r") as txt_file:
right_clicks.extend(ast.literal_eval(txt_file.readline()))
polygon_list.extend(ast.literal_eval(txt_file.readline()))
if th_mode=="velocity":
pixel_to_dist_ratio.extend(ast.literal_eval(txt_file.readline()))
polygon_dist_list_vel_mode.extend(ast.literal_eval(txt_file.readline()))
first_frame = False
if not return_value:
break
frame = frame[:,:,::-1]
image = Image.fromarray(frame)
fps_, out_pred, image = yolo.detect_image(image,n,th_mode,car_for_each_polygon_list,polygon_density,pixel_to_dist_ratio,polygon_dist_list_vel_mode,video_fps,velocity_and_view_time,th_low,th_high)
result = np.asarray(image)
fps = "FPS: " + str(fps_)
cv2.putText(result, text=fps, org=(3, 15), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=0.50, color=(255, 0, 0), thickness=2)
if isOutput:
out.write(result[:,:,::-1])
vid.release()
out.release()
def Mouse_Callback(event, x, y, flags ,params):
if event==cv2.EVENT_LBUTTONDBLCLK:
global right_clicks
right_clicks.append([x,y])
B,G,R=polygon_color_list[poly_index]
if poly_index==0:
| pts=np.array(right_clicks) | conditional_block | |
yolo.py | (cls, n):
if n in cls._defaults:
return cls._defaults[n]
else:
return "Unrecognized attribute name '" + n + "'"
def __init__(self, **kwargs):
self.__dict__.update(self._defaults)
self.__dict__.update(kwargs)
self.class_names = self._get_class()
self.anchors = self._get_anchors()
self.sess = K.get_session()
self.boxes, self.scores, self.classes = self.generate()
def _get_class(self):
classes_path = os.path.expanduser(self.classes_path)
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
return class_names
def _get_anchors(self):
anchors_path = os.path.expanduser(self.anchors_path)
with open(anchors_path) as f:
anchors = f.readline()
anchors = [float(x) for x in anchors.split(',')]
return np.array(anchors).reshape(-1, 2)
def generate(self):
model_path = os.path.expanduser(self.model_path)
assert model_path.endswith('.h5'), 'Keras model or weights must be a .h5 file.'
start = timer()
num_anchors = len(self.anchors)
num_classes = len(self.class_names)
self.yolo_model = yolo_body(Input(shape=(None,None,3)), num_anchors//3, num_classes)
self.yolo_model.load_weights(self.model_path)
end = timer()
print('{} model, anchors, and classes loaded in {:.2f}sec.'.format(model_path, end-start))
self.colors = ['GreenYellow']
self.input_image_shape = K.placeholder(shape=(2, ))
boxes, scores, classes = evaluation(self.yolo_model.output, self.anchors,
len(self.class_names), self.input_image_shape,
score_threshold=self.score, iou_threshold=self.iou)
return boxes, scores, classes
def detect_image(self, image,itr_number,th_mode,car_for_each_polygon_list,polygon_density,pixel_to_dist_ratio,polygon_dist_list_vel_mode,video_fps,velocity_and_view_time,th_low,th_high):
start = timer()
num_of_frames_for_mean=10
number_of_point_in_polygons=np.zeros((1,len(polygon_list)),dtype=int)
vehicles_area_in_polygon = np.zeros((1, len(polygon_list)), dtype=float)
number_of_frames=np.zeros((1,len(polygon_list)),dtype=int)
if self.model_image_size != (None, None):
assert self.model_image_size[0]%32 == 0, 'Multiples of 32 required'
assert self.model_image_size[1]%32 == 0, 'Multiples of 32 required'
boxed_image = letterbox_image(image, tuple(reversed(self.model_image_size)))
image_data = np.array(boxed_image, dtype='float32')
image_data /= 255.
image_data = np.expand_dims(image_data, 0)
out_boxes, out_scores, out_classes = self.sess.run(
[self.boxes, self.scores, self.classes],
feed_dict={
self.yolo_model.input: image_data,
self.input_image_shape: [image.size[1], image.size[0]],
K.learning_phase(): 0
})
if(itr_number==1):
for i in range(len(polygon_list)):
pts = np.array(right_clicks[np.sum(polygon_list[0:i], dtype=int):np.sum(polygon_list[0:i], dtype=int) + polygon_list[i]], np.int32)
a1 = np.empty((polygon_list[i],), dtype=object)
a1[:] = [tuple(j) for j in pts]
polygon = Polygon(a1.tolist())
polygon_area_list.append(polygon.area)
Polygon_object_list.append(polygon)
for i in range(len(polygon_list)):
pts = np.array(right_clicks[np.sum(polygon_list[0:i],dtype=int):np.sum(polygon_list[0:i],dtype=int)+polygon_list[i]], np.int32)
image=cv2.polylines(np.array(image), [pts], True, polygon_color_list[i],thickness=2)
image = Image.fromarray(image)
out_prediction = []
font_path = os.path.join(os.path.dirname(__file__),'font/FiraMono-Medium.otf')
font = ImageFont.truetype(font=font_path,
size=np.floor(3e-2 * image.size[1] + 0.5).astype('int32'))
thickness = (image.size[0] + image.size[1]) // 400
for i, c in reversed(list(enumerate(out_classes))):
box = out_boxes[i]
curr_box_x_center=(box[3]+box[1])/2
curr_box_y_center=(box[2]+box[0])/2
point=Point(curr_box_x_center,curr_box_y_center)
index=np.where([poly.contains(point) for poly in Polygon_object_list])[0]
if len(index)==0:
continue
number_of_point_in_polygons[0,index]+=1
curr_area=(box[3]-box[1])*(box[2]-box[0])
if 2400<curr_area<=4500:
curr_area*=0.8
elif (4500<curr_area<=9000):
curr_area *= 0.7
elif (9000<curr_area<=13000):
curr_area *= 0.6
elif (13000<curr_area<=18000):
curr_area *= 0.5
elif (curr_area>18000):
curr_area *= 0.4
vehicles_area_in_polygon[0,index]+=curr_area
score = out_scores[i]
draw = ImageDraw.Draw(image)
top, left, bottom, right = box
top = max(0, np.floor(top + 0.5).astype('int32'))
left = max(0, np.floor(left + 0.5).astype('int32'))
bottom = min(image.size[1], np.floor(bottom + 0.5).astype('int32'))
right = min(image.size[0], np.floor(right + 0.5).astype('int32'))
if top > image.size[1] or right > image.size[0]:
continue
out_prediction.append([left, top, right, bottom, c, score])
for i in range(thickness):
draw.rectangle(
[left + i, top + i, right - i, bottom - i],
outline=polygon_color_list[index[0]])
del draw
car_for_each_polygon_list.insert(0,number_of_point_in_polygons[0])
polygon_density.insert(0,vehicles_area_in_polygon[0] / polygon_area_list)
if th_mode=="velocity" and itr_number>1:
for p in range(len(polygon_list)):
if car_for_each_polygon_list[0][p]==0 and car_for_each_polygon_list[1][p]==1:
number_of_frames[0,p]=np.where(np.array(car_for_each_polygon_list)[1:, p] == 0)[0][0]
velocity_and_view_time[0,p]=video_fps
velocity_and_view_time[1,p] = (np.array(polygon_dist_list_vel_mode)[p] * np.array(pixel_to_dist_ratio)[p] * 3.6) / (number_of_frames[0][p] / video_fps)
if(itr_number>=num_of_frames_for_mean):
mean_of_points_in_polygon=np.round(np.transpose(car_for_each_polygon_list)[:,0:num_of_frames_for_mean].sum(axis=1)/num_of_frames_for_mean).astype(int)
mean_polygon_density=np.sum(polygon_density[0:10],axis=0)/num_of_frames_for_mean
else:
mean_of_points_in_polygon =number_of_point_in_polygons[0]
mean_polygon_density=polygon_density[-1]
draw = ImageDraw.Draw(image)
font_number_of_vehicles = font
font_number_of_vehicles.size = 40
rectangle_width=int(image.size[0] / 7)
space_between_rect=0
if len(polygon_list)>1:
space_between_rect = int((image.size[0] - len(polygon_list)*rectangle_width-40)/(len(polygon_list)-1))
if th_mode == "counting":
mean_polygon=mean_of_points_in_polygon
elif th_mode == "density":
mean_polygon = mean_polygon_density
else:
mean_polygon = velocity_and_view_time[1,:]
for c in range(len(polygon_list)):
R,G,B=color_result(mean_polygon[c],th_low,th_high)
draw.rectangle([tuple([10+c*(rectangle_width+space_between_rect), 60]), tuple([10 + c*(rectangle_width+space_between_rect)+rectangle_width, 60 + 40])], fill=(R, G, B))
draw.rectangle([tuple([10 + c * (rectangle_width + space_between_rect)+rectangle_width, 60]),tuple([10 + c * (rectangle_width + space_between_rect)+rectangle_width+20, 60 + 40])],fill=polygon_color_list[c])
if th_mode == | get_defaults | identifier_name | |
yolo.py | start = timer()
num_anchors = len(self.anchors)
num_classes = len(self.class_names)
self.yolo_model = yolo_body(Input(shape=(None,None,3)), num_anchors//3, num_classes)
self.yolo_model.load_weights(self.model_path)
end = timer()
print('{} model, anchors, and classes loaded in {:.2f}sec.'.format(model_path, end-start))
self.colors = ['GreenYellow']
self.input_image_shape = K.placeholder(shape=(2, ))
boxes, scores, classes = evaluation(self.yolo_model.output, self.anchors,
len(self.class_names), self.input_image_shape,
score_threshold=self.score, iou_threshold=self.iou)
return boxes, scores, classes
def detect_image(self, image,itr_number,th_mode,car_for_each_polygon_list,polygon_density,pixel_to_dist_ratio,polygon_dist_list_vel_mode,video_fps,velocity_and_view_time,th_low,th_high):
start = timer()
num_of_frames_for_mean=10
number_of_point_in_polygons=np.zeros((1,len(polygon_list)),dtype=int)
vehicles_area_in_polygon = np.zeros((1, len(polygon_list)), dtype=float)
number_of_frames=np.zeros((1,len(polygon_list)),dtype=int)
if self.model_image_size != (None, None):
assert self.model_image_size[0]%32 == 0, 'Multiples of 32 required'
assert self.model_image_size[1]%32 == 0, 'Multiples of 32 required'
boxed_image = letterbox_image(image, tuple(reversed(self.model_image_size)))
image_data = np.array(boxed_image, dtype='float32')
image_data /= 255.
image_data = np.expand_dims(image_data, 0)
out_boxes, out_scores, out_classes = self.sess.run(
[self.boxes, self.scores, self.classes],
feed_dict={
self.yolo_model.input: image_data,
self.input_image_shape: [image.size[1], image.size[0]],
K.learning_phase(): 0
})
if(itr_number==1):
for i in range(len(polygon_list)):
pts = np.array(right_clicks[np.sum(polygon_list[0:i], dtype=int):np.sum(polygon_list[0:i], dtype=int) + polygon_list[i]], np.int32)
a1 = np.empty((polygon_list[i],), dtype=object)
a1[:] = [tuple(j) for j in pts]
polygon = Polygon(a1.tolist())
polygon_area_list.append(polygon.area)
Polygon_object_list.append(polygon)
for i in range(len(polygon_list)):
pts = np.array(right_clicks[np.sum(polygon_list[0:i],dtype=int):np.sum(polygon_list[0:i],dtype=int)+polygon_list[i]], np.int32)
image=cv2.polylines(np.array(image), [pts], True, polygon_color_list[i],thickness=2)
image = Image.fromarray(image)
out_prediction = []
font_path = os.path.join(os.path.dirname(__file__),'font/FiraMono-Medium.otf')
font = ImageFont.truetype(font=font_path,
size=np.floor(3e-2 * image.size[1] + 0.5).astype('int32'))
thickness = (image.size[0] + image.size[1]) // 400
for i, c in reversed(list(enumerate(out_classes))):
box = out_boxes[i]
curr_box_x_center=(box[3]+box[1])/2
curr_box_y_center=(box[2]+box[0])/2
point=Point(curr_box_x_center,curr_box_y_center)
index=np.where([poly.contains(point) for poly in Polygon_object_list])[0]
if len(index)==0:
continue
number_of_point_in_polygons[0,index]+=1
curr_area=(box[3]-box[1])*(box[2]-box[0])
if 2400<curr_area<=4500:
curr_area*=0.8
elif (4500<curr_area<=9000):
curr_area *= 0.7
elif (9000<curr_area<=13000):
curr_area *= 0.6
elif (13000<curr_area<=18000):
curr_area *= 0.5
elif (curr_area>18000):
curr_area *= 0.4
vehicles_area_in_polygon[0,index]+=curr_area
score = out_scores[i]
draw = ImageDraw.Draw(image)
top, left, bottom, right = box
top = max(0, np.floor(top + 0.5).astype('int32'))
left = max(0, np.floor(left + 0.5).astype('int32'))
bottom = min(image.size[1], np.floor(bottom + 0.5).astype('int32'))
right = min(image.size[0], np.floor(right + 0.5).astype('int32'))
if top > image.size[1] or right > image.size[0]:
continue
out_prediction.append([left, top, right, bottom, c, score])
for i in range(thickness):
draw.rectangle(
[left + i, top + i, right - i, bottom - i],
outline=polygon_color_list[index[0]])
del draw
car_for_each_polygon_list.insert(0,number_of_point_in_polygons[0])
polygon_density.insert(0,vehicles_area_in_polygon[0] / polygon_area_list)
if th_mode=="velocity" and itr_number>1:
for p in range(len(polygon_list)):
if car_for_each_polygon_list[0][p]==0 and car_for_each_polygon_list[1][p]==1:
number_of_frames[0,p]=np.where(np.array(car_for_each_polygon_list)[1:, p] == 0)[0][0]
velocity_and_view_time[0,p]=video_fps
velocity_and_view_time[1,p] = (np.array(polygon_dist_list_vel_mode)[p] * np.array(pixel_to_dist_ratio)[p] * 3.6) / (number_of_frames[0][p] / video_fps)
if(itr_number>=num_of_frames_for_mean):
mean_of_points_in_polygon=np.round(np.transpose(car_for_each_polygon_list)[:,0:num_of_frames_for_mean].sum(axis=1)/num_of_frames_for_mean).astype(int)
mean_polygon_density=np.sum(polygon_density[0:10],axis=0)/num_of_frames_for_mean
else:
mean_of_points_in_polygon =number_of_point_in_polygons[0]
mean_polygon_density=polygon_density[-1]
draw = ImageDraw.Draw(image)
font_number_of_vehicles = font
font_number_of_vehicles.size = 40
rectangle_width=int(image.size[0] / 7)
space_between_rect=0
if len(polygon_list)>1:
space_between_rect = int((image.size[0] - len(polygon_list)*rectangle_width-40)/(len(polygon_list)-1))
if th_mode == "counting":
mean_polygon=mean_of_points_in_polygon
elif th_mode == "density":
mean_polygon = mean_polygon_density
else:
mean_polygon = velocity_and_view_time[1,:]
for c in range(len(polygon_list)): | if th_mode == "counting":
draw.text([10+c*(rectangle_width+space_between_rect), 65], "vehicles:" + str(mean_polygon[c]), fill=(0, 0, 0),font=font_number_of_vehicles)
elif th_mode == "density":
draw.text([10 + c * (rectangle_width + space_between_rect), 65],'density:' + str(int(mean_polygon[c] * 100)) + '%', fill=(0, 0, 0), font=font_number_of_vehicles)
else:
if (mean_polygon[c]!=0) or (velocity_and_view_time[0,c]>0):
draw.text([10 + c * (rectangle_width + space_between_rect), 65],'velocity:' + str(mean_polygon[c]) + 'kmh', fill=(0, 0, 0),font=font_number_of_vehicles)
velocity_and_view_time[0,c]-=1
if velocity_and_view_time[0,c] ==0:
velocity_and_view_time[1,c]=0
else:
draw.text([10 + c * (rectangle_width + space_between_rect), 65], 'velocity:', fill=(0, 0, 0),font=font_number_of | R,G,B=color_result(mean_polygon[c],th_low,th_high)
draw.rectangle([tuple([10+c*(rectangle_width+space_between_rect), 60]), tuple([10 + c*(rectangle_width+space_between_rect)+rectangle_width, 60 + 40])], fill=(R, G, B))
draw.rectangle([tuple([10 + c * (rectangle_width + space_between_rect)+rectangle_width, 60]),tuple([10 + c * (rectangle_width + space_between_rect)+rectangle_width+20, 60 + 40])],fill=polygon_color_list[c]) | random_line_split |
yolo.py |
def detect_image(self, image,itr_number,th_mode,car_for_each_polygon_list,polygon_density,pixel_to_dist_ratio,polygon_dist_list_vel_mode,video_fps,velocity_and_view_time,th_low,th_high):
start = timer()
num_of_frames_for_mean=10
number_of_point_in_polygons=np.zeros((1,len(polygon_list)),dtype=int)
vehicles_area_in_polygon = np.zeros((1, len(polygon_list)), dtype=float)
number_of_frames=np.zeros((1,len(polygon_list)),dtype=int)
if self.model_image_size != (None, None):
assert self.model_image_size[0]%32 == 0, 'Multiples of 32 required'
assert self.model_image_size[1]%32 == 0, 'Multiples of 32 required'
boxed_image = letterbox_image(image, tuple(reversed(self.model_image_size)))
image_data = np.array(boxed_image, dtype='float32')
image_data /= 255.
image_data = np.expand_dims(image_data, 0)
out_boxes, out_scores, out_classes = self.sess.run(
[self.boxes, self.scores, self.classes],
feed_dict={
self.yolo_model.input: image_data,
self.input_image_shape: [image.size[1], image.size[0]],
K.learning_phase(): 0
})
if(itr_number==1):
for i in range(len(polygon_list)):
pts = np.array(right_clicks[np.sum(polygon_list[0:i], dtype=int):np.sum(polygon_list[0:i], dtype=int) + polygon_list[i]], np.int32)
a1 = np.empty((polygon_list[i],), dtype=object)
a1[:] = [tuple(j) for j in pts]
polygon = Polygon(a1.tolist())
polygon_area_list.append(polygon.area)
Polygon_object_list.append(polygon)
for i in range(len(polygon_list)):
pts = np.array(right_clicks[np.sum(polygon_list[0:i],dtype=int):np.sum(polygon_list[0:i],dtype=int)+polygon_list[i]], np.int32)
image=cv2.polylines(np.array(image), [pts], True, polygon_color_list[i],thickness=2)
image = Image.fromarray(image)
out_prediction = []
font_path = os.path.join(os.path.dirname(__file__),'font/FiraMono-Medium.otf')
font = ImageFont.truetype(font=font_path,
size=np.floor(3e-2 * image.size[1] + 0.5).astype('int32'))
thickness = (image.size[0] + image.size[1]) // 400
for i, c in reversed(list(enumerate(out_classes))):
box = out_boxes[i]
curr_box_x_center=(box[3]+box[1])/2
curr_box_y_center=(box[2]+box[0])/2
point=Point(curr_box_x_center,curr_box_y_center)
index=np.where([poly.contains(point) for poly in Polygon_object_list])[0]
if len(index)==0:
continue
number_of_point_in_polygons[0,index]+=1
curr_area=(box[3]-box[1])*(box[2]-box[0])
if 2400<curr_area<=4500:
curr_area*=0.8
elif (4500<curr_area<=9000):
curr_area *= 0.7
elif (9000<curr_area<=13000):
curr_area *= 0.6
elif (13000<curr_area<=18000):
curr_area *= 0.5
elif (curr_area>18000):
curr_area *= 0.4
vehicles_area_in_polygon[0,index]+=curr_area
score = out_scores[i]
draw = ImageDraw.Draw(image)
top, left, bottom, right = box
top = max(0, np.floor(top + 0.5).astype('int32'))
left = max(0, np.floor(left + 0.5).astype('int32'))
bottom = min(image.size[1], np.floor(bottom + 0.5).astype('int32'))
right = min(image.size[0], np.floor(right + 0.5).astype('int32'))
if top > image.size[1] or right > image.size[0]:
continue
out_prediction.append([left, top, right, bottom, c, score])
for i in range(thickness):
draw.rectangle(
[left + i, top + i, right - i, bottom - i],
outline=polygon_color_list[index[0]])
del draw
car_for_each_polygon_list.insert(0,number_of_point_in_polygons[0])
polygon_density.insert(0,vehicles_area_in_polygon[0] / polygon_area_list)
if th_mode=="velocity" and itr_number>1:
for p in range(len(polygon_list)):
if car_for_each_polygon_list[0][p]==0 and car_for_each_polygon_list[1][p]==1:
number_of_frames[0,p]=np.where(np.array(car_for_each_polygon_list)[1:, p] == 0)[0][0]
velocity_and_view_time[0,p]=video_fps
velocity_and_view_time[1,p] = (np.array(polygon_dist_list_vel_mode)[p] * np.array(pixel_to_dist_ratio)[p] * 3.6) / (number_of_frames[0][p] / video_fps)
if(itr_number>=num_of_frames_for_mean):
mean_of_points_in_polygon=np.round(np.transpose(car_for_each_polygon_list)[:,0:num_of_frames_for_mean].sum(axis=1)/num_of_frames_for_mean).astype(int)
mean_polygon_density=np.sum(polygon_density[0:10],axis=0)/num_of_frames_for_mean
else:
mean_of_points_in_polygon =number_of_point_in_polygons[0]
mean_polygon_density=polygon_density[-1]
draw = ImageDraw.Draw(image)
font_number_of_vehicles = font
font_number_of_vehicles.size = 40
rectangle_width=int(image.size[0] / 7)
space_between_rect=0
if len(polygon_list)>1:
space_between_rect = int((image.size[0] - len(polygon_list)*rectangle_width-40)/(len(polygon_list)-1))
if th_mode == "counting":
mean_polygon=mean_of_points_in_polygon
elif th_mode == "density":
mean_polygon = mean_polygon_density
else:
mean_polygon = velocity_and_view_time[1,:]
for c in range(len(polygon_list)):
R,G,B=color_result(mean_polygon[c],th_low,th_high)
draw.rectangle([tuple([10+c*(rectangle_width+space_between_rect), 60]), tuple([10 + c*(rectangle_width+space_between_rect)+rectangle_width, 60 + 40])], fill=(R, G, B))
draw.rectangle([tuple([10 + c * (rectangle_width + space_between_rect)+rectangle_width, 60]),tuple([10 + c * (rectangle_width + space_between_rect)+rectangle_width+20, 60 + 40])],fill=polygon_color_list[c])
if th_mode == "counting":
draw.text([10+c*(rectangle_width+space_between_rect), 65], "vehicles:" + str(mean_polygon[c]), fill=(0, 0, 0),font=font_number_of_vehicles)
elif th_mode == "density":
draw.text([10 + c * (rectangle_width + space_between_rect), 65],'density:' + str(int(mean_polygon[c] * 100)) + '%', fill=(0, 0, 0), font=font_number_of_vehicles)
else:
if (mean_polygon[c]!=0) or (velocity_and_view_time[0,c]>0):
draw.text([10 + c * (rectangle_width + space_between_rect), 65],'velocity:' + str(mean_polygon[c]) + 'kmh', fill=(0, 0, 0),font=font_number_of_vehicles)
velocity_and_view_time[0,c]-=1
if velocity_and_view_time[0,c] ==0:
velocity_and_view_time[1,c]=0
else:
draw.text([ | model_path = os.path.expanduser(self.model_path)
assert model_path.endswith('.h5'), 'Keras model or weights must be a .h5 file.'
start = timer()
num_anchors = len(self.anchors)
num_classes = len(self.class_names)
self.yolo_model = yolo_body(Input(shape=(None,None,3)), num_anchors//3, num_classes)
self.yolo_model.load_weights(self.model_path)
end = timer()
print('{} model, anchors, and classes loaded in {:.2f}sec.'.format(model_path, end-start))
self.colors = ['GreenYellow']
self.input_image_shape = K.placeholder(shape=(2, ))
boxes, scores, classes = evaluation(self.yolo_model.output, self.anchors,
len(self.class_names), self.input_image_shape,
score_threshold=self.score, iou_threshold=self.iou)
return boxes, scores, classes | identifier_body | |
install.rs | ;
use reqwest::Url;
use std::io::prelude::*;
use std::path::Path;
use std::path::PathBuf;
use std::process::Command;
use thiserror::Error;
#[derive(Error, Debug)]
pub enum FrumError {
#[error(transparent)]
HttpError(#[from] reqwest::Error),
#[error(transparent)]
IoError(#[from] std::io::Error),
#[error("Can't find the number of cores")]
FromUtf8Error(#[from] std::string::FromUtf8Error),
#[error("Can't extract the file: {source:?}")]
ExtractError { source: ExtractError },
#[error("The downloaded archive is empty")]
TarIsEmpty,
#[error("Can't find version: {version}")]
VersionNotFound { version: InputVersion },
#[error("Can't list the remote versions: {source:?}")]
CantListRemoteVersions { source: reqwest::Error },
#[error("Version already installed at {path:?}")]
VersionAlreadyInstalled { path: PathBuf },
#[error("Can't find version in dotfiles. Please provide a version manually to the command.")]
CantInferVersion,
#[error("The requested version is not installable: {version}")]
NotInstallableVersion { version: Version },
#[error("Can't build Ruby: {stderr}")]
CantBuildRuby { stderr: String },
}
pub struct Install {
pub version: Option<InputVersion>,
pub configure_opts: Vec<String>,
}
impl crate::command::Command for Install {
type Error = FrumError;
fn apply(&self, config: &FrumConfig) -> Result<(), Self::Error> {
let current_version = self
.version
.clone()
.or_else(|| get_user_version_for_directory(std::env::current_dir().unwrap()))
.ok_or(FrumError::CantInferVersion)?;
let version = match current_version.clone() {
InputVersion::Full(Version::Semver(v)) => Version::Semver(v),
InputVersion::Full(Version::System) => {
return Err(FrumError::NotInstallableVersion {
version: Version::System,
})
}
current_version => {
let available_versions = crate::remote_ruby_index::list(&config.ruby_build_mirror)
.map_err(|source| FrumError::CantListRemoteVersions { source })?
.drain(..)
.map(|x| x.version)
.collect::<Vec<_>>();
current_version
.to_version(&available_versions)
.ok_or(FrumError::VersionNotFound {
version: current_version,
})?
.clone()
}
};
let installations_dir = config.versions_dir();
let installation_dir = PathBuf::from(&installations_dir).join(version.to_string());
if installation_dir.exists() {
return Err(FrumError::VersionAlreadyInstalled {
path: installation_dir,
});
}
let url = package_url(config.ruby_build_mirror.clone(), &version);
outln!(config#Info, "{} Downloading {}", "==>".green(), format!("{}", url).green());
let response = reqwest::blocking::get(url)?;
if response.status() == 404 {
return Err(FrumError::VersionNotFound {
version: current_version,
});
}
outln!(config#Info, "{} Extracting {}", "==>".green(), archive(&version).green());
let temp_installations_dir = installations_dir.join(".downloads");
std::fs::create_dir_all(&temp_installations_dir).map_err(FrumError::IoError)?;
let temp_dir = tempfile::TempDir::new_in(&temp_installations_dir)
.expect("Can't generate a temp directory");
extract_archive_into(&temp_dir, response)?;
outln!(config#Info, "{} Building {}", "==>".green(), format!("Ruby {}", current_version).green());
let installed_directory = std::fs::read_dir(&temp_dir)
.map_err(FrumError::IoError)?
.next()
.ok_or(FrumError::TarIsEmpty)?
.map_err(FrumError::IoError)?;
let installed_directory = installed_directory.path();
build_package(
&installed_directory,
&installation_dir,
&self.configure_opts,
)?;
if !config.default_version_dir().exists() {
debug!("Use {} as the default version", current_version);
create_alias(&config, "default", &version).map_err(FrumError::IoError)?;
}
Ok(())
}
}
fn extract_archive_into<P: AsRef<Path>>(
path: P,
response: reqwest::blocking::Response,
) -> Result<(), FrumError> {
#[cfg(unix)]
let extractor = archive::tar_xz::TarXz::new(response);
#[cfg(windows)]
let extractor = archive::zip::Zip::new(response);
extractor
.extract_into(path)
.map_err(|source| FrumError::ExtractError { source })?;
Ok(())
}
fn package_url(mirror_url: Url, version: &Version) -> Url {
debug!("pakage url");
Url::parse(&format!(
"{}/{}/{}",
mirror_url.as_str().trim_end_matches('/'),
match version {
Version::Semver(version) => format!("{}.{}", version.major, version.minor),
_ => unreachable!(),
},
archive(version),
))
.unwrap()
}
#[cfg(unix)]
fn archive(version: &Version) -> String {
format!("ruby-{}.tar.xz", version)
}
#[cfg(windows)]
fn archive(version: &Version) -> String {
format!("ruby-{}.zip", version)
}
#[allow(clippy::unnecessary_wraps)]
fn openssl_dir() -> Result<String, FrumError> {
#[cfg(target_os = "macos")]
return Ok(String::from_utf8_lossy(
&Command::new("brew")
.arg("--prefix")
.arg("openssl")
.output()
.map_err(FrumError::IoError)?
.stdout,
)
.trim()
.to_string());
#[cfg(not(target_os = "macos"))]
return Ok("/usr/local".to_string());
}
fn build_package(
current_dir: &Path,
installed_dir: &Path,
configure_opts: &[String],
) -> Result<(), FrumError> {
debug!("./configure {}", configure_opts.join(" "));
let mut command = Command::new("sh");
command
.arg("configure")
.arg(format!("--prefix={}", installed_dir.to_str().unwrap()))
.args(configure_opts);
// Provide a default value for --with-openssl-dir
if !configure_opts
.iter()
.any(|opt| opt.starts_with("--with-openssl-dir"))
{
command.arg(format!("--with-openssl-dir={}", openssl_dir()?));
}
let configure = command
.current_dir(¤t_dir)
.output()
.map_err(FrumError::IoError)?;
if !configure.status.success() {
return Err(FrumError::CantBuildRuby {
stderr: format!(
"configure failed: {}",
String::from_utf8_lossy(&configure.stderr).to_string()
),
});
};
debug!("make -j {}", num_cpus::get().to_string());
let make = Command::new("make")
.arg("-j")
.arg(num_cpus::get().to_string())
.current_dir(¤t_dir)
.output()
.map_err(FrumError::IoError)?;
if !make.status.success() {
return Err(FrumError::CantBuildRuby {
stderr: format!(
"make failed: {}",
String::from_utf8_lossy(&make.stderr).to_string()
),
});
};
debug!("make install");
let make_install = Command::new("make")
.arg("install")
.current_dir(¤t_dir)
.output()
.map_err(FrumError::IoError)?;
if !make_install.status.success() | ;
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
use crate::command::Command;
use crate::config::FrumConfig;
use crate::version::Version;
use tempfile::tempdir;
#[test]
fn test_install_second_version() {
let config = FrumConfig {
base_dir: Some(tempdir().unwrap().path().to_path_buf()),
..Default::default()
};
Install {
version: Some(InputVersion::Full(Version::Semver(
semver::Version::parse("2.7.0").unwrap(),
))),
configure_opts: vec![],
}
.apply(&config)
.expect("Can't install 2.7.0");
Install {
version: Some(InputVersion::Full(Version::Semver(
semver::Version::parse("2.6.4").unwrap(),
))),
configure_opts: vec![],
}
.apply(&config)
.expect("Can't install 2.6.4");
assert_eq!(
std::fs::read_link(&config.default_version_dir())
.unwrap()
.components()
.last(),
Some | {
return Err(FrumError::CantBuildRuby {
stderr: format!(
"make install: {}",
String::from_utf8_lossy(&make_install.stderr).to_string()
),
});
} | conditional_block |
install.rs | Command;
use thiserror::Error;
#[derive(Error, Debug)]
pub enum FrumError {
#[error(transparent)]
HttpError(#[from] reqwest::Error),
#[error(transparent)]
IoError(#[from] std::io::Error),
#[error("Can't find the number of cores")]
FromUtf8Error(#[from] std::string::FromUtf8Error),
#[error("Can't extract the file: {source:?}")]
ExtractError { source: ExtractError },
#[error("The downloaded archive is empty")]
TarIsEmpty,
#[error("Can't find version: {version}")]
VersionNotFound { version: InputVersion },
#[error("Can't list the remote versions: {source:?}")]
CantListRemoteVersions { source: reqwest::Error },
#[error("Version already installed at {path:?}")]
VersionAlreadyInstalled { path: PathBuf },
#[error("Can't find version in dotfiles. Please provide a version manually to the command.")]
CantInferVersion,
#[error("The requested version is not installable: {version}")]
NotInstallableVersion { version: Version },
#[error("Can't build Ruby: {stderr}")]
CantBuildRuby { stderr: String },
}
pub struct Install {
pub version: Option<InputVersion>,
pub configure_opts: Vec<String>,
}
impl crate::command::Command for Install {
type Error = FrumError;
fn apply(&self, config: &FrumConfig) -> Result<(), Self::Error> {
let current_version = self
.version
.clone()
.or_else(|| get_user_version_for_directory(std::env::current_dir().unwrap()))
.ok_or(FrumError::CantInferVersion)?;
let version = match current_version.clone() {
InputVersion::Full(Version::Semver(v)) => Version::Semver(v),
InputVersion::Full(Version::System) => {
return Err(FrumError::NotInstallableVersion {
version: Version::System,
})
}
current_version => {
let available_versions = crate::remote_ruby_index::list(&config.ruby_build_mirror)
.map_err(|source| FrumError::CantListRemoteVersions { source })?
.drain(..)
.map(|x| x.version)
.collect::<Vec<_>>();
current_version
.to_version(&available_versions)
.ok_or(FrumError::VersionNotFound {
version: current_version,
})?
.clone()
}
};
let installations_dir = config.versions_dir();
let installation_dir = PathBuf::from(&installations_dir).join(version.to_string());
if installation_dir.exists() {
return Err(FrumError::VersionAlreadyInstalled {
path: installation_dir,
});
}
let url = package_url(config.ruby_build_mirror.clone(), &version);
outln!(config#Info, "{} Downloading {}", "==>".green(), format!("{}", url).green());
let response = reqwest::blocking::get(url)?;
if response.status() == 404 {
return Err(FrumError::VersionNotFound {
version: current_version,
});
}
outln!(config#Info, "{} Extracting {}", "==>".green(), archive(&version).green());
let temp_installations_dir = installations_dir.join(".downloads");
std::fs::create_dir_all(&temp_installations_dir).map_err(FrumError::IoError)?;
let temp_dir = tempfile::TempDir::new_in(&temp_installations_dir)
.expect("Can't generate a temp directory");
extract_archive_into(&temp_dir, response)?;
outln!(config#Info, "{} Building {}", "==>".green(), format!("Ruby {}", current_version).green());
let installed_directory = std::fs::read_dir(&temp_dir)
.map_err(FrumError::IoError)?
.next()
.ok_or(FrumError::TarIsEmpty)?
.map_err(FrumError::IoError)?;
let installed_directory = installed_directory.path();
build_package(
&installed_directory,
&installation_dir,
&self.configure_opts,
)?;
if !config.default_version_dir().exists() {
debug!("Use {} as the default version", current_version);
create_alias(&config, "default", &version).map_err(FrumError::IoError)?;
}
Ok(())
}
}
fn extract_archive_into<P: AsRef<Path>>(
path: P,
response: reqwest::blocking::Response,
) -> Result<(), FrumError> {
#[cfg(unix)]
let extractor = archive::tar_xz::TarXz::new(response);
#[cfg(windows)]
let extractor = archive::zip::Zip::new(response);
extractor
.extract_into(path)
.map_err(|source| FrumError::ExtractError { source })?;
Ok(())
}
fn package_url(mirror_url: Url, version: &Version) -> Url {
debug!("pakage url");
Url::parse(&format!(
"{}/{}/{}",
mirror_url.as_str().trim_end_matches('/'),
match version {
Version::Semver(version) => format!("{}.{}", version.major, version.minor),
_ => unreachable!(),
},
archive(version),
))
.unwrap()
}
#[cfg(unix)]
fn archive(version: &Version) -> String {
format!("ruby-{}.tar.xz", version)
}
#[cfg(windows)]
fn archive(version: &Version) -> String {
format!("ruby-{}.zip", version)
}
#[allow(clippy::unnecessary_wraps)]
fn openssl_dir() -> Result<String, FrumError> {
#[cfg(target_os = "macos")]
return Ok(String::from_utf8_lossy(
&Command::new("brew")
.arg("--prefix")
.arg("openssl")
.output()
.map_err(FrumError::IoError)?
.stdout,
)
.trim()
.to_string());
#[cfg(not(target_os = "macos"))]
return Ok("/usr/local".to_string());
}
fn build_package(
current_dir: &Path,
installed_dir: &Path,
configure_opts: &[String],
) -> Result<(), FrumError> {
debug!("./configure {}", configure_opts.join(" "));
let mut command = Command::new("sh");
command
.arg("configure")
.arg(format!("--prefix={}", installed_dir.to_str().unwrap()))
.args(configure_opts);
// Provide a default value for --with-openssl-dir
if !configure_opts
.iter()
.any(|opt| opt.starts_with("--with-openssl-dir"))
{
command.arg(format!("--with-openssl-dir={}", openssl_dir()?));
}
let configure = command
.current_dir(¤t_dir)
.output()
.map_err(FrumError::IoError)?;
if !configure.status.success() {
return Err(FrumError::CantBuildRuby {
stderr: format!(
"configure failed: {}",
String::from_utf8_lossy(&configure.stderr).to_string()
),
});
};
debug!("make -j {}", num_cpus::get().to_string());
let make = Command::new("make")
.arg("-j")
.arg(num_cpus::get().to_string())
.current_dir(¤t_dir)
.output()
.map_err(FrumError::IoError)?;
if !make.status.success() {
return Err(FrumError::CantBuildRuby {
stderr: format!(
"make failed: {}",
String::from_utf8_lossy(&make.stderr).to_string()
),
});
};
debug!("make install");
let make_install = Command::new("make")
.arg("install")
.current_dir(¤t_dir)
.output()
.map_err(FrumError::IoError)?;
if !make_install.status.success() {
return Err(FrumError::CantBuildRuby {
stderr: format!(
"make install: {}",
String::from_utf8_lossy(&make_install.stderr).to_string()
),
});
};
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
use crate::command::Command;
use crate::config::FrumConfig;
use crate::version::Version;
use tempfile::tempdir;
#[test]
fn test_install_second_version() {
let config = FrumConfig {
base_dir: Some(tempdir().unwrap().path().to_path_buf()),
..Default::default()
};
Install {
version: Some(InputVersion::Full(Version::Semver(
semver::Version::parse("2.7.0").unwrap(),
))),
configure_opts: vec![],
}
.apply(&config)
.expect("Can't install 2.7.0");
Install {
version: Some(InputVersion::Full(Version::Semver(
semver::Version::parse("2.6.4").unwrap(),
))),
configure_opts: vec![],
}
.apply(&config)
.expect("Can't install 2.6.4");
assert_eq!(
std::fs::read_link(&config.default_version_dir())
.unwrap()
.components()
.last(),
Some(std::path::Component::Normal(std::ffi::OsStr::new("2.7.0")))
);
}
#[test]
fn | test_install_default_version | identifier_name | |
install.rs | use log::debug;
use reqwest::Url;
use std::io::prelude::*;
use std::path::Path;
use std::path::PathBuf;
use std::process::Command;
use thiserror::Error;
#[derive(Error, Debug)]
pub enum FrumError {
#[error(transparent)]
HttpError(#[from] reqwest::Error),
#[error(transparent)]
IoError(#[from] std::io::Error),
#[error("Can't find the number of cores")]
FromUtf8Error(#[from] std::string::FromUtf8Error),
#[error("Can't extract the file: {source:?}")]
ExtractError { source: ExtractError },
#[error("The downloaded archive is empty")]
TarIsEmpty,
#[error("Can't find version: {version}")]
VersionNotFound { version: InputVersion },
#[error("Can't list the remote versions: {source:?}")]
CantListRemoteVersions { source: reqwest::Error },
#[error("Version already installed at {path:?}")]
VersionAlreadyInstalled { path: PathBuf },
#[error("Can't find version in dotfiles. Please provide a version manually to the command.")]
CantInferVersion,
#[error("The requested version is not installable: {version}")]
NotInstallableVersion { version: Version },
#[error("Can't build Ruby: {stderr}")]
CantBuildRuby { stderr: String },
}
pub struct Install {
pub version: Option<InputVersion>,
pub configure_opts: Vec<String>,
}
impl crate::command::Command for Install {
type Error = FrumError;
fn apply(&self, config: &FrumConfig) -> Result<(), Self::Error> {
let current_version = self
.version
.clone()
.or_else(|| get_user_version_for_directory(std::env::current_dir().unwrap()))
.ok_or(FrumError::CantInferVersion)?;
let version = match current_version.clone() {
InputVersion::Full(Version::Semver(v)) => Version::Semver(v),
InputVersion::Full(Version::System) => {
return Err(FrumError::NotInstallableVersion {
version: Version::System,
})
}
current_version => {
let available_versions = crate::remote_ruby_index::list(&config.ruby_build_mirror)
.map_err(|source| FrumError::CantListRemoteVersions { source })?
.drain(..)
.map(|x| x.version)
.collect::<Vec<_>>();
current_version
.to_version(&available_versions)
.ok_or(FrumError::VersionNotFound {
version: current_version,
})?
.clone()
}
};
let installations_dir = config.versions_dir();
let installation_dir = PathBuf::from(&installations_dir).join(version.to_string());
if installation_dir.exists() {
return Err(FrumError::VersionAlreadyInstalled {
path: installation_dir,
});
}
let url = package_url(config.ruby_build_mirror.clone(), &version);
outln!(config#Info, "{} Downloading {}", "==>".green(), format!("{}", url).green());
let response = reqwest::blocking::get(url)?;
if response.status() == 404 {
return Err(FrumError::VersionNotFound {
version: current_version,
});
}
outln!(config#Info, "{} Extracting {}", "==>".green(), archive(&version).green());
let temp_installations_dir = installations_dir.join(".downloads");
std::fs::create_dir_all(&temp_installations_dir).map_err(FrumError::IoError)?;
let temp_dir = tempfile::TempDir::new_in(&temp_installations_dir)
.expect("Can't generate a temp directory");
extract_archive_into(&temp_dir, response)?;
outln!(config#Info, "{} Building {}", "==>".green(), format!("Ruby {}", current_version).green());
let installed_directory = std::fs::read_dir(&temp_dir)
.map_err(FrumError::IoError)?
.next()
.ok_or(FrumError::TarIsEmpty)?
.map_err(FrumError::IoError)?;
let installed_directory = installed_directory.path();
build_package(
&installed_directory,
&installation_dir,
&self.configure_opts,
)?;
if !config.default_version_dir().exists() {
debug!("Use {} as the default version", current_version);
create_alias(&config, "default", &version).map_err(FrumError::IoError)?;
}
Ok(())
}
}
fn extract_archive_into<P: AsRef<Path>>(
path: P,
response: reqwest::blocking::Response,
) -> Result<(), FrumError> {
#[cfg(unix)]
let extractor = archive::tar_xz::TarXz::new(response);
#[cfg(windows)]
let extractor = archive::zip::Zip::new(response);
extractor
.extract_into(path)
.map_err(|source| FrumError::ExtractError { source })?;
Ok(())
}
fn package_url(mirror_url: Url, version: &Version) -> Url {
debug!("pakage url");
Url::parse(&format!(
"{}/{}/{}",
mirror_url.as_str().trim_end_matches('/'),
match version {
Version::Semver(version) => format!("{}.{}", version.major, version.minor),
_ => unreachable!(),
},
archive(version),
))
.unwrap()
}
#[cfg(unix)]
fn archive(version: &Version) -> String {
format!("ruby-{}.tar.xz", version)
}
#[cfg(windows)]
fn archive(version: &Version) -> String {
format!("ruby-{}.zip", version)
}
#[allow(clippy::unnecessary_wraps)]
fn openssl_dir() -> Result<String, FrumError> {
#[cfg(target_os = "macos")]
return Ok(String::from_utf8_lossy(
&Command::new("brew")
.arg("--prefix")
.arg("openssl")
.output()
.map_err(FrumError::IoError)?
.stdout,
)
.trim()
.to_string());
#[cfg(not(target_os = "macos"))]
return Ok("/usr/local".to_string());
}
fn build_package(
current_dir: &Path,
installed_dir: &Path,
configure_opts: &[String],
) -> Result<(), FrumError> {
debug!("./configure {}", configure_opts.join(" "));
let mut command = Command::new("sh");
command
.arg("configure")
.arg(format!("--prefix={}", installed_dir.to_str().unwrap()))
.args(configure_opts);
// Provide a default value for --with-openssl-dir
if !configure_opts
.iter()
.any(|opt| opt.starts_with("--with-openssl-dir"))
{
command.arg(format!("--with-openssl-dir={}", openssl_dir()?));
}
let configure = command
.current_dir(¤t_dir)
.output()
.map_err(FrumError::IoError)?;
if !configure.status.success() {
return Err(FrumError::CantBuildRuby {
stderr: format!(
"configure failed: {}",
String::from_utf8_lossy(&configure.stderr).to_string()
),
});
};
debug!("make -j {}", num_cpus::get().to_string());
let make = Command::new("make")
.arg("-j")
.arg(num_cpus::get().to_string())
.current_dir(¤t_dir)
.output()
.map_err(FrumError::IoError)?;
if !make.status.success() {
return Err(FrumError::CantBuildRuby {
stderr: format!(
"make failed: {}",
String::from_utf8_lossy(&make.stderr).to_string()
),
});
};
debug!("make install");
let make_install = Command::new("make")
.arg("install")
.current_dir(¤t_dir)
.output()
.map_err(FrumError::IoError)?;
if !make_install.status.success() {
return Err(FrumError::CantBuildRuby {
stderr: format!(
"make install: {}",
String::from_utf8_lossy(&make_install.stderr).to_string()
),
});
};
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
use crate::command::Command;
use crate::config::FrumConfig;
use crate::version::Version;
use tempfile::tempdir;
#[test]
fn test_install_second_version() {
let config = FrumConfig {
base_dir: Some(tempdir().unwrap().path().to_path_buf()),
..Default::default()
};
Install {
version: Some(InputVersion::Full(Version::Semver(
semver::Version::parse("2.7.0").unwrap(),
))),
configure_opts: vec![],
}
.apply(&config)
.expect("Can't install 2.7.0");
Install {
version: Some(InputVersion::Full(Version::Semver(
semver::Version::parse("2.6.4").unwrap(),
))),
configure_opts: vec![],
}
.apply(&config)
.expect("Can't install 2.6.4");
assert_eq!(
std::fs::read_link(&config.default_version_dir())
.unwrap()
. | use colored::Colorize; | random_line_split | |
install.rs | ;
use reqwest::Url;
use std::io::prelude::*;
use std::path::Path;
use std::path::PathBuf;
use std::process::Command;
use thiserror::Error;
#[derive(Error, Debug)]
pub enum FrumError {
#[error(transparent)]
HttpError(#[from] reqwest::Error),
#[error(transparent)]
IoError(#[from] std::io::Error),
#[error("Can't find the number of cores")]
FromUtf8Error(#[from] std::string::FromUtf8Error),
#[error("Can't extract the file: {source:?}")]
ExtractError { source: ExtractError },
#[error("The downloaded archive is empty")]
TarIsEmpty,
#[error("Can't find version: {version}")]
VersionNotFound { version: InputVersion },
#[error("Can't list the remote versions: {source:?}")]
CantListRemoteVersions { source: reqwest::Error },
#[error("Version already installed at {path:?}")]
VersionAlreadyInstalled { path: PathBuf },
#[error("Can't find version in dotfiles. Please provide a version manually to the command.")]
CantInferVersion,
#[error("The requested version is not installable: {version}")]
NotInstallableVersion { version: Version },
#[error("Can't build Ruby: {stderr}")]
CantBuildRuby { stderr: String },
}
pub struct Install {
pub version: Option<InputVersion>,
pub configure_opts: Vec<String>,
}
impl crate::command::Command for Install {
type Error = FrumError;
fn apply(&self, config: &FrumConfig) -> Result<(), Self::Error> {
let current_version = self
.version
.clone()
.or_else(|| get_user_version_for_directory(std::env::current_dir().unwrap()))
.ok_or(FrumError::CantInferVersion)?;
let version = match current_version.clone() {
InputVersion::Full(Version::Semver(v)) => Version::Semver(v),
InputVersion::Full(Version::System) => {
return Err(FrumError::NotInstallableVersion {
version: Version::System,
})
}
current_version => {
let available_versions = crate::remote_ruby_index::list(&config.ruby_build_mirror)
.map_err(|source| FrumError::CantListRemoteVersions { source })?
.drain(..)
.map(|x| x.version)
.collect::<Vec<_>>();
current_version
.to_version(&available_versions)
.ok_or(FrumError::VersionNotFound {
version: current_version,
})?
.clone()
}
};
let installations_dir = config.versions_dir();
let installation_dir = PathBuf::from(&installations_dir).join(version.to_string());
if installation_dir.exists() {
return Err(FrumError::VersionAlreadyInstalled {
path: installation_dir,
});
}
let url = package_url(config.ruby_build_mirror.clone(), &version);
outln!(config#Info, "{} Downloading {}", "==>".green(), format!("{}", url).green());
let response = reqwest::blocking::get(url)?;
if response.status() == 404 {
return Err(FrumError::VersionNotFound {
version: current_version,
});
}
outln!(config#Info, "{} Extracting {}", "==>".green(), archive(&version).green());
let temp_installations_dir = installations_dir.join(".downloads");
std::fs::create_dir_all(&temp_installations_dir).map_err(FrumError::IoError)?;
let temp_dir = tempfile::TempDir::new_in(&temp_installations_dir)
.expect("Can't generate a temp directory");
extract_archive_into(&temp_dir, response)?;
outln!(config#Info, "{} Building {}", "==>".green(), format!("Ruby {}", current_version).green());
let installed_directory = std::fs::read_dir(&temp_dir)
.map_err(FrumError::IoError)?
.next()
.ok_or(FrumError::TarIsEmpty)?
.map_err(FrumError::IoError)?;
let installed_directory = installed_directory.path();
build_package(
&installed_directory,
&installation_dir,
&self.configure_opts,
)?;
if !config.default_version_dir().exists() {
debug!("Use {} as the default version", current_version);
create_alias(&config, "default", &version).map_err(FrumError::IoError)?;
}
Ok(())
}
}
fn extract_archive_into<P: AsRef<Path>>(
path: P,
response: reqwest::blocking::Response,
) -> Result<(), FrumError> |
fn package_url(mirror_url: Url, version: &Version) -> Url {
debug!("pakage url");
Url::parse(&format!(
"{}/{}/{}",
mirror_url.as_str().trim_end_matches('/'),
match version {
Version::Semver(version) => format!("{}.{}", version.major, version.minor),
_ => unreachable!(),
},
archive(version),
))
.unwrap()
}
#[cfg(unix)]
fn archive(version: &Version) -> String {
format!("ruby-{}.tar.xz", version)
}
#[cfg(windows)]
fn archive(version: &Version) -> String {
format!("ruby-{}.zip", version)
}
#[allow(clippy::unnecessary_wraps)]
fn openssl_dir() -> Result<String, FrumError> {
#[cfg(target_os = "macos")]
return Ok(String::from_utf8_lossy(
&Command::new("brew")
.arg("--prefix")
.arg("openssl")
.output()
.map_err(FrumError::IoError)?
.stdout,
)
.trim()
.to_string());
#[cfg(not(target_os = "macos"))]
return Ok("/usr/local".to_string());
}
fn build_package(
current_dir: &Path,
installed_dir: &Path,
configure_opts: &[String],
) -> Result<(), FrumError> {
debug!("./configure {}", configure_opts.join(" "));
let mut command = Command::new("sh");
command
.arg("configure")
.arg(format!("--prefix={}", installed_dir.to_str().unwrap()))
.args(configure_opts);
// Provide a default value for --with-openssl-dir
if !configure_opts
.iter()
.any(|opt| opt.starts_with("--with-openssl-dir"))
{
command.arg(format!("--with-openssl-dir={}", openssl_dir()?));
}
let configure = command
.current_dir(¤t_dir)
.output()
.map_err(FrumError::IoError)?;
if !configure.status.success() {
return Err(FrumError::CantBuildRuby {
stderr: format!(
"configure failed: {}",
String::from_utf8_lossy(&configure.stderr).to_string()
),
});
};
debug!("make -j {}", num_cpus::get().to_string());
let make = Command::new("make")
.arg("-j")
.arg(num_cpus::get().to_string())
.current_dir(¤t_dir)
.output()
.map_err(FrumError::IoError)?;
if !make.status.success() {
return Err(FrumError::CantBuildRuby {
stderr: format!(
"make failed: {}",
String::from_utf8_lossy(&make.stderr).to_string()
),
});
};
debug!("make install");
let make_install = Command::new("make")
.arg("install")
.current_dir(¤t_dir)
.output()
.map_err(FrumError::IoError)?;
if !make_install.status.success() {
return Err(FrumError::CantBuildRuby {
stderr: format!(
"make install: {}",
String::from_utf8_lossy(&make_install.stderr).to_string()
),
});
};
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
use crate::command::Command;
use crate::config::FrumConfig;
use crate::version::Version;
use tempfile::tempdir;
#[test]
fn test_install_second_version() {
let config = FrumConfig {
base_dir: Some(tempdir().unwrap().path().to_path_buf()),
..Default::default()
};
Install {
version: Some(InputVersion::Full(Version::Semver(
semver::Version::parse("2.7.0").unwrap(),
))),
configure_opts: vec![],
}
.apply(&config)
.expect("Can't install 2.7.0");
Install {
version: Some(InputVersion::Full(Version::Semver(
semver::Version::parse("2.6.4").unwrap(),
))),
configure_opts: vec![],
}
.apply(&config)
.expect("Can't install 2.6.4");
assert_eq!(
std::fs::read_link(&config.default_version_dir())
.unwrap()
.components()
.last(),
Some | {
#[cfg(unix)]
let extractor = archive::tar_xz::TarXz::new(response);
#[cfg(windows)]
let extractor = archive::zip::Zip::new(response);
extractor
.extract_into(path)
.map_err(|source| FrumError::ExtractError { source })?;
Ok(())
} | identifier_body |
fse_encoder.go | return fmt.Errorf("internal error: expected cumul[s.symbolLen] (%d) == tableSize (%d)", cumul[s.symbolLen], tableSize)
}
cumul[s.symbolLen] = int16(tableSize) + 1
}
// Spread symbols
s.zeroBits = false
{
step := tableStep(tableSize)
tableMask := tableSize - 1
var position uint32
// if any symbol > largeLimit, we may have 0 bits output.
largeLimit := int16(1 << (s.actualTableLog - 1))
for ui, v := range s.norm[:s.symbolLen] {
symbol := byte(ui)
if v > largeLimit {
s.zeroBits = true
}
for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ {
tableSymbol[position] = symbol
position = (position + step) & tableMask
for position > highThreshold {
position = (position + step) & tableMask
} /* Low proba area */
}
}
// Check if we have gone through all positions
if position != 0 {
return errors.New("position!=0")
}
}
// Build table
table := s.ct.stateTable
{
tsi := int(tableSize)
for u, v := range tableSymbol {
// TableU16 : sorted by symbol order; gives next state value
table[cumul[v]] = uint16(tsi + u)
cumul[v]++
}
}
// Build Symbol Transformation Table
{
total := int16(0)
symbolTT := s.ct.symbolTT[:s.symbolLen]
tableLog := s.actualTableLog
tl := (uint32(tableLog) << 16) - (1 << tableLog)
for i, v := range s.norm[:s.symbolLen] {
switch v {
case 0:
case -1, 1:
symbolTT[i].deltaNbBits = tl
symbolTT[i].deltaFindState = total - 1
total++
default:
maxBitsOut := uint32(tableLog) - highBit(uint32(v-1))
minStatePlus := uint32(v) << maxBitsOut
symbolTT[i].deltaNbBits = (maxBitsOut << 16) - minStatePlus
symbolTT[i].deltaFindState = total - v
total += v
}
}
if total != int16(tableSize) {
return fmt.Errorf("total mismatch %d (got) != %d (want)", total, tableSize)
}
}
return nil
}
var rtbTable = [...]uint32{0, 473195, 504333, 520860, 550000, 700000, 750000, 830000}
func (s *fseEncoder) | (val byte) {
s.allocCtable()
s.actualTableLog = 0
s.ct.stateTable = s.ct.stateTable[:1]
s.ct.symbolTT[val] = symbolTransform{
deltaFindState: 0,
deltaNbBits: 0,
}
if debugEncoder {
println("setRLE: val", val, "symbolTT", s.ct.symbolTT[val])
}
s.rleVal = val
s.useRLE = true
}
// setBits will set output bits for the transform.
// if nil is provided, the number of bits is equal to the index.
func (s *fseEncoder) setBits(transform []byte) {
if s.reUsed || s.preDefined {
return
}
if s.useRLE {
if transform == nil {
s.ct.symbolTT[s.rleVal].outBits = s.rleVal
s.maxBits = s.rleVal
return
}
s.maxBits = transform[s.rleVal]
s.ct.symbolTT[s.rleVal].outBits = s.maxBits
return
}
if transform == nil {
for i := range s.ct.symbolTT[:s.symbolLen] {
s.ct.symbolTT[i].outBits = uint8(i)
}
s.maxBits = uint8(s.symbolLen - 1)
return
}
s.maxBits = 0
for i, v := range transform[:s.symbolLen] {
s.ct.symbolTT[i].outBits = v
if v > s.maxBits {
// We could assume bits always going up, but we play safe.
s.maxBits = v
}
}
}
// normalizeCount will normalize the count of the symbols so
// the total is equal to the table size.
// If successful, compression tables will also be made ready.
func (s *fseEncoder) normalizeCount(length int) error {
if s.reUsed {
return nil
}
s.optimalTableLog(length)
var (
tableLog = s.actualTableLog
scale = 62 - uint64(tableLog)
step = (1 << 62) / uint64(length)
vStep = uint64(1) << (scale - 20)
stillToDistribute = int16(1 << tableLog)
largest int
largestP int16
lowThreshold = (uint32)(length >> tableLog)
)
if s.maxCount == length {
s.useRLE = true
return nil
}
s.useRLE = false
for i, cnt := range s.count[:s.symbolLen] {
// already handled
// if (count[s] == s.length) return 0; /* rle special case */
if cnt == 0 {
s.norm[i] = 0
continue
}
if cnt <= lowThreshold {
s.norm[i] = -1
stillToDistribute--
} else {
proba := (int16)((uint64(cnt) * step) >> scale)
if proba < 8 {
restToBeat := vStep * uint64(rtbTable[proba])
v := uint64(cnt)*step - (uint64(proba) << scale)
if v > restToBeat {
proba++
}
}
if proba > largestP {
largestP = proba
largest = i
}
s.norm[i] = proba
stillToDistribute -= proba
}
}
if -stillToDistribute >= (s.norm[largest] >> 1) {
// corner case, need another normalization method
err := s.normalizeCount2(length)
if err != nil {
return err
}
if debugAsserts {
err = s.validateNorm()
if err != nil {
return err
}
}
return s.buildCTable()
}
s.norm[largest] += stillToDistribute
if debugAsserts {
err := s.validateNorm()
if err != nil {
return err
}
}
return s.buildCTable()
}
// Secondary normalization method.
// To be used when primary method fails.
func (s *fseEncoder) normalizeCount2(length int) error {
const notYetAssigned = -2
var (
distributed uint32
total = uint32(length)
tableLog = s.actualTableLog
lowThreshold = total >> tableLog
lowOne = (total * 3) >> (tableLog + 1)
)
for i, cnt := range s.count[:s.symbolLen] {
if cnt == 0 {
s.norm[i] = 0
continue
}
if cnt <= lowThreshold {
s.norm[i] = -1
distributed++
total -= cnt
continue
}
if cnt <= lowOne {
s.norm[i] = 1
distributed++
total -= cnt
continue
}
s.norm[i] = notYetAssigned
}
toDistribute := (1 << tableLog) - distributed
if (total / toDistribute) > lowOne {
// risk of rounding to zero
lowOne = (total * 3) / (toDistribute * 2)
for i, cnt := range s.count[:s.symbolLen] {
if (s.norm[i] == notYetAssigned) && (cnt <= lowOne) {
s.norm[i] = 1
distributed++
total -= cnt
continue
}
}
toDistribute = (1 << tableLog) - distributed
}
if distributed == uint32(s.symbolLen)+1 {
// all values are pretty poor;
// probably incompressible data (should have already been detected);
// find max, then give all remaining points to max
var maxV int
var maxC uint32
for i, cnt := range s.count[:s.symbolLen] {
if cnt > | setRLE | identifier_name |
fse_encoder.go | return fmt.Errorf("internal error: expected cumul[s.symbolLen] (%d) == tableSize (%d)", cumul[s.symbolLen], tableSize)
}
cumul[s.symbolLen] = int16(tableSize) + 1
}
// Spread symbols
s.zeroBits = false
{
step := tableStep(tableSize)
tableMask := tableSize - 1
var position uint32
// if any symbol > largeLimit, we may have 0 bits output.
largeLimit := int16(1 << (s.actualTableLog - 1))
for ui, v := range s.norm[:s.symbolLen] {
symbol := byte(ui)
if v > largeLimit {
s.zeroBits = true
}
for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ {
tableSymbol[position] = symbol
position = (position + step) & tableMask
for position > highThreshold {
position = (position + step) & tableMask
} /* Low proba area */
}
}
// Check if we have gone through all positions
if position != 0 {
return errors.New("position!=0")
}
}
// Build table
table := s.ct.stateTable
{
tsi := int(tableSize)
for u, v := range tableSymbol {
// TableU16 : sorted by symbol order; gives next state value
table[cumul[v]] = uint16(tsi + u)
cumul[v]++
}
}
// Build Symbol Transformation Table
{
total := int16(0)
symbolTT := s.ct.symbolTT[:s.symbolLen]
tableLog := s.actualTableLog
tl := (uint32(tableLog) << 16) - (1 << tableLog)
for i, v := range s.norm[:s.symbolLen] {
switch v {
case 0:
case -1, 1:
symbolTT[i].deltaNbBits = tl
symbolTT[i].deltaFindState = total - 1
total++
default:
maxBitsOut := uint32(tableLog) - highBit(uint32(v-1))
minStatePlus := uint32(v) << maxBitsOut
symbolTT[i].deltaNbBits = (maxBitsOut << 16) - minStatePlus
symbolTT[i].deltaFindState = total - v
total += v
}
}
if total != int16(tableSize) {
return fmt.Errorf("total mismatch %d (got) != %d (want)", total, tableSize)
}
}
return nil
}
var rtbTable = [...]uint32{0, 473195, 504333, 520860, 550000, 700000, 750000, 830000}
func (s *fseEncoder) setRLE(val byte) {
s.allocCtable()
s.actualTableLog = 0
s.ct.stateTable = s.ct.stateTable[:1]
s.ct.symbolTT[val] = symbolTransform{
deltaFindState: 0,
deltaNbBits: 0,
}
if debugEncoder {
println("setRLE: val", val, "symbolTT", s.ct.symbolTT[val])
}
s.rleVal = val
s.useRLE = true
}
// setBits will set output bits for the transform.
// if nil is provided, the number of bits is equal to the index.
func (s *fseEncoder) setBits(transform []byte) | }
s.maxBits = 0
for i, v := range transform[:s.symbolLen] {
s.ct.symbolTT[i].outBits = v
if v > s.maxBits {
// We could assume bits always going up, but we play safe.
s.maxBits = v
}
}
}
// normalizeCount will normalize the count of the symbols so
// the total is equal to the table size.
// If successful, compression tables will also be made ready.
func (s *fseEncoder) normalizeCount(length int) error {
if s.reUsed {
return nil
}
s.optimalTableLog(length)
var (
tableLog = s.actualTableLog
scale = 62 - uint64(tableLog)
step = (1 << 62) / uint64(length)
vStep = uint64(1) << (scale - 20)
stillToDistribute = int16(1 << tableLog)
largest int
largestP int16
lowThreshold = (uint32)(length >> tableLog)
)
if s.maxCount == length {
s.useRLE = true
return nil
}
s.useRLE = false
for i, cnt := range s.count[:s.symbolLen] {
// already handled
// if (count[s] == s.length) return 0; /* rle special case */
if cnt == 0 {
s.norm[i] = 0
continue
}
if cnt <= lowThreshold {
s.norm[i] = -1
stillToDistribute--
} else {
proba := (int16)((uint64(cnt) * step) >> scale)
if proba < 8 {
restToBeat := vStep * uint64(rtbTable[proba])
v := uint64(cnt)*step - (uint64(proba) << scale)
if v > restToBeat {
proba++
}
}
if proba > largestP {
largestP = proba
largest = i
}
s.norm[i] = proba
stillToDistribute -= proba
}
}
if -stillToDistribute >= (s.norm[largest] >> 1) {
// corner case, need another normalization method
err := s.normalizeCount2(length)
if err != nil {
return err
}
if debugAsserts {
err = s.validateNorm()
if err != nil {
return err
}
}
return s.buildCTable()
}
s.norm[largest] += stillToDistribute
if debugAsserts {
err := s.validateNorm()
if err != nil {
return err
}
}
return s.buildCTable()
}
// Secondary normalization method.
// To be used when primary method fails.
func (s *fseEncoder) normalizeCount2(length int) error {
const notYetAssigned = -2
var (
distributed uint32
total = uint32(length)
tableLog = s.actualTableLog
lowThreshold = total >> tableLog
lowOne = (total * 3) >> (tableLog + 1)
)
for i, cnt := range s.count[:s.symbolLen] {
if cnt == 0 {
s.norm[i] = 0
continue
}
if cnt <= lowThreshold {
s.norm[i] = -1
distributed++
total -= cnt
continue
}
if cnt <= lowOne {
s.norm[i] = 1
distributed++
total -= cnt
continue
}
s.norm[i] = notYetAssigned
}
toDistribute := (1 << tableLog) - distributed
if (total / toDistribute) > lowOne {
// risk of rounding to zero
lowOne = (total * 3) / (toDistribute * 2)
for i, cnt := range s.count[:s.symbolLen] {
if (s.norm[i] == notYetAssigned) && (cnt <= lowOne) {
s.norm[i] = 1
distributed++
total -= cnt
continue
}
}
toDistribute = (1 << tableLog) - distributed
}
if distributed == uint32(s.symbolLen)+1 {
// all values are pretty poor;
// probably incompressible data (should have already been detected);
// find max, then give all remaining points to max
var maxV int
var maxC uint32
for i, cnt := range s.count[:s.symbolLen] {
if cnt > max | {
if s.reUsed || s.preDefined {
return
}
if s.useRLE {
if transform == nil {
s.ct.symbolTT[s.rleVal].outBits = s.rleVal
s.maxBits = s.rleVal
return
}
s.maxBits = transform[s.rleVal]
s.ct.symbolTT[s.rleVal].outBits = s.maxBits
return
}
if transform == nil {
for i := range s.ct.symbolTT[:s.symbolLen] {
s.ct.symbolTT[i].outBits = uint8(i)
}
s.maxBits = uint8(s.symbolLen - 1)
return | identifier_body |
fse_encoder.go | u := byte(ui) // one less than reference
if v == -1 {
// Low proba symbol
cumul[u+1] = cumul[u] + 1
tableSymbol[highThreshold] = u
highThreshold--
} else {
cumul[u+1] = cumul[u] + v
}
}
// Encode last symbol separately to avoid overflowing u
u := int(s.symbolLen - 1)
v := s.norm[s.symbolLen-1]
if v == -1 {
// Low proba symbol
cumul[u+1] = cumul[u] + 1
tableSymbol[highThreshold] = byte(u)
highThreshold--
} else {
cumul[u+1] = cumul[u] + v
}
if uint32(cumul[s.symbolLen]) != tableSize {
return fmt.Errorf("internal error: expected cumul[s.symbolLen] (%d) == tableSize (%d)", cumul[s.symbolLen], tableSize)
}
cumul[s.symbolLen] = int16(tableSize) + 1
}
// Spread symbols
s.zeroBits = false
{
step := tableStep(tableSize)
tableMask := tableSize - 1
var position uint32
// if any symbol > largeLimit, we may have 0 bits output.
largeLimit := int16(1 << (s.actualTableLog - 1))
for ui, v := range s.norm[:s.symbolLen] {
symbol := byte(ui)
if v > largeLimit {
s.zeroBits = true
}
for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ {
tableSymbol[position] = symbol
position = (position + step) & tableMask
for position > highThreshold {
position = (position + step) & tableMask
} /* Low proba area */
}
}
// Check if we have gone through all positions
if position != 0 {
return errors.New("position!=0")
}
}
// Build table
table := s.ct.stateTable
{
tsi := int(tableSize)
for u, v := range tableSymbol {
// TableU16 : sorted by symbol order; gives next state value
table[cumul[v]] = uint16(tsi + u)
cumul[v]++
}
}
// Build Symbol Transformation Table
{
total := int16(0)
symbolTT := s.ct.symbolTT[:s.symbolLen]
tableLog := s.actualTableLog
tl := (uint32(tableLog) << 16) - (1 << tableLog)
for i, v := range s.norm[:s.symbolLen] {
switch v {
case 0:
case -1, 1:
symbolTT[i].deltaNbBits = tl
symbolTT[i].deltaFindState = total - 1
total++
default:
maxBitsOut := uint32(tableLog) - highBit(uint32(v-1))
minStatePlus := uint32(v) << maxBitsOut
symbolTT[i].deltaNbBits = (maxBitsOut << 16) - minStatePlus
symbolTT[i].deltaFindState = total - v
total += v
}
}
if total != int16(tableSize) {
return fmt.Errorf("total mismatch %d (got) != %d (want)", total, tableSize)
}
}
return nil
}
var rtbTable = [...]uint32{0, 473195, 504333, 520860, 550000, 700000, 750000, 830000}
func (s *fseEncoder) setRLE(val byte) {
s.allocCtable()
s.actualTableLog = 0
s.ct.stateTable = s.ct.stateTable[:1]
s.ct.symbolTT[val] = symbolTransform{
deltaFindState: 0,
deltaNbBits: 0,
}
if debugEncoder {
println("setRLE: val", val, "symbolTT", s.ct.symbolTT[val])
}
s.rleVal = val
s.useRLE = true
}
// setBits will set output bits for the transform.
// if nil is provided, the number of bits is equal to the index.
func (s *fseEncoder) setBits(transform []byte) {
if s.reUsed || s.preDefined {
return
}
if s.useRLE {
if transform == nil {
s.ct.symbolTT[s.rleVal].outBits = s.rleVal
s.maxBits = s.rleVal
return
}
s.maxBits = transform[s.rleVal]
s.ct.symbolTT[s.rleVal].outBits = s.maxBits
return
}
if transform == nil {
for i := range s.ct.symbolTT[:s.symbolLen] {
s.ct.symbolTT[i].outBits = uint8(i)
}
s.maxBits = uint8(s.symbolLen - 1)
return
}
s.maxBits = 0
for i, v := range transform[:s.symbolLen] {
s.ct.symbolTT[i].outBits = v
if v > s.maxBits {
// We could assume bits always going up, but we play safe.
s.maxBits = v
}
}
}
// normalizeCount will normalize the count of the symbols so
// the total is equal to the table size.
// If successful, compression tables will also be made ready.
func (s *fseEncoder) normalizeCount(length int) error {
if s.reUsed {
return nil
}
s.optimalTableLog(length)
var (
tableLog = s.actualTableLog
scale = 62 - uint64(tableLog)
step = (1 << 62) / uint64(length)
vStep = uint64(1) << (scale - 20)
stillToDistribute = int16(1 << tableLog)
largest int
largestP int16
lowThreshold = (uint32)(length >> tableLog)
)
if s.maxCount == length {
s.useRLE = true
return nil
}
s.useRLE = false
for i, cnt := range s.count[:s.symbolLen] {
// already handled
// if (count[s] == s.length) return 0; /* rle special case */
if cnt == 0 {
s.norm[i] = 0
continue
}
if cnt <= lowThreshold {
s.norm[i] = -1
stillToDistribute--
} else {
proba := (int16)((uint64(cnt) * step) >> scale)
if proba < 8 {
restToBeat := vStep * uint64(rtbTable[proba])
v := uint64(cnt)*step - (uint64(proba) << scale)
if v > restToBeat {
proba++
}
}
if proba > largestP {
largestP = proba
largest = i
}
s.norm[i] = proba
stillToDistribute -= proba
}
}
if -stillToDistribute >= (s.norm[largest] >> 1) {
// corner case, need another normalization method
err := s.normalizeCount2(length)
if err != nil {
return err
}
if debugAsserts {
err = s.validateNorm()
if err != nil {
return err
}
}
return s.buildCTable()
}
s.norm[largest] += stillToDistribute
if debugAsserts {
err := s.validateNorm()
if err != nil {
return err
}
}
return s.buildCTable()
}
// Secondary normalization method.
// To be used when primary method fails.
func (s *fseEncoder) normalizeCount2(length int) error {
const notYetAssigned = -2
var (
distributed uint32
total = uint32(length)
tableLog = s.actualTableLog
lowThreshold = total >> tableLog
lowOne = (total * 3) >> (tableLog + 1)
)
for i, cnt := range s.count[:s.symbolLen] {
if cnt == 0 {
s.norm[i] = 0
continue
}
if cnt <= lowThreshold {
s.norm[i] = -1
distributed++
total -= cnt
continue
}
if cnt <= lowOne {
s.norm[i] = 1
distributed++
total -= cnt | cumul[0] = 0
for ui, v := range s.norm[:s.symbolLen-1] { | random_line_split | |
fse_encoder.go |
s.ct.symbolTT = s.ct.symbolTT[:256]
}
// buildCTable will populate the compression table so it is ready to be used.
func (s *fseEncoder) buildCTable() error {
tableSize := uint32(1 << s.actualTableLog)
highThreshold := tableSize - 1
var cumul [256]int16
s.allocCtable()
tableSymbol := s.ct.tableSymbol[:tableSize]
// symbol start positions
{
cumul[0] = 0
for ui, v := range s.norm[:s.symbolLen-1] {
u := byte(ui) // one less than reference
if v == -1 {
// Low proba symbol
cumul[u+1] = cumul[u] + 1
tableSymbol[highThreshold] = u
highThreshold--
} else {
cumul[u+1] = cumul[u] + v
}
}
// Encode last symbol separately to avoid overflowing u
u := int(s.symbolLen - 1)
v := s.norm[s.symbolLen-1]
if v == -1 {
// Low proba symbol
cumul[u+1] = cumul[u] + 1
tableSymbol[highThreshold] = byte(u)
highThreshold--
} else {
cumul[u+1] = cumul[u] + v
}
if uint32(cumul[s.symbolLen]) != tableSize {
return fmt.Errorf("internal error: expected cumul[s.symbolLen] (%d) == tableSize (%d)", cumul[s.symbolLen], tableSize)
}
cumul[s.symbolLen] = int16(tableSize) + 1
}
// Spread symbols
s.zeroBits = false
{
step := tableStep(tableSize)
tableMask := tableSize - 1
var position uint32
// if any symbol > largeLimit, we may have 0 bits output.
largeLimit := int16(1 << (s.actualTableLog - 1))
for ui, v := range s.norm[:s.symbolLen] {
symbol := byte(ui)
if v > largeLimit {
s.zeroBits = true
}
for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ {
tableSymbol[position] = symbol
position = (position + step) & tableMask
for position > highThreshold {
position = (position + step) & tableMask
} /* Low proba area */
}
}
// Check if we have gone through all positions
if position != 0 {
return errors.New("position!=0")
}
}
// Build table
table := s.ct.stateTable
{
tsi := int(tableSize)
for u, v := range tableSymbol {
// TableU16 : sorted by symbol order; gives next state value
table[cumul[v]] = uint16(tsi + u)
cumul[v]++
}
}
// Build Symbol Transformation Table
{
total := int16(0)
symbolTT := s.ct.symbolTT[:s.symbolLen]
tableLog := s.actualTableLog
tl := (uint32(tableLog) << 16) - (1 << tableLog)
for i, v := range s.norm[:s.symbolLen] {
switch v {
case 0:
case -1, 1:
symbolTT[i].deltaNbBits = tl
symbolTT[i].deltaFindState = total - 1
total++
default:
maxBitsOut := uint32(tableLog) - highBit(uint32(v-1))
minStatePlus := uint32(v) << maxBitsOut
symbolTT[i].deltaNbBits = (maxBitsOut << 16) - minStatePlus
symbolTT[i].deltaFindState = total - v
total += v
}
}
if total != int16(tableSize) {
return fmt.Errorf("total mismatch %d (got) != %d (want)", total, tableSize)
}
}
return nil
}
var rtbTable = [...]uint32{0, 473195, 504333, 520860, 550000, 700000, 750000, 830000}
func (s *fseEncoder) setRLE(val byte) {
s.allocCtable()
s.actualTableLog = 0
s.ct.stateTable = s.ct.stateTable[:1]
s.ct.symbolTT[val] = symbolTransform{
deltaFindState: 0,
deltaNbBits: 0,
}
if debugEncoder {
println("setRLE: val", val, "symbolTT", s.ct.symbolTT[val])
}
s.rleVal = val
s.useRLE = true
}
// setBits will set output bits for the transform.
// if nil is provided, the number of bits is equal to the index.
func (s *fseEncoder) setBits(transform []byte) {
if s.reUsed || s.preDefined {
return
}
if s.useRLE {
if transform == nil {
s.ct.symbolTT[s.rleVal].outBits = s.rleVal
s.maxBits = s.rleVal
return
}
s.maxBits = transform[s.rleVal]
s.ct.symbolTT[s.rleVal].outBits = s.maxBits
return
}
if transform == nil {
for i := range s.ct.symbolTT[:s.symbolLen] {
s.ct.symbolTT[i].outBits = uint8(i)
}
s.maxBits = uint8(s.symbolLen - 1)
return
}
s.maxBits = 0
for i, v := range transform[:s.symbolLen] {
s.ct.symbolTT[i].outBits = v
if v > s.maxBits {
// We could assume bits always going up, but we play safe.
s.maxBits = v
}
}
}
// normalizeCount will normalize the count of the symbols so
// the total is equal to the table size.
// If successful, compression tables will also be made ready.
func (s *fseEncoder) normalizeCount(length int) error {
if s.reUsed {
return nil
}
s.optimalTableLog(length)
var (
tableLog = s.actualTableLog
scale = 62 - uint64(tableLog)
step = (1 << 62) / uint64(length)
vStep = uint64(1) << (scale - 20)
stillToDistribute = int16(1 << tableLog)
largest int
largestP int16
lowThreshold = (uint32)(length >> tableLog)
)
if s.maxCount == length {
s.useRLE = true
return nil
}
s.useRLE = false
for i, cnt := range s.count[:s.symbolLen] {
// already handled
// if (count[s] == s.length) return 0; /* rle special case */
if cnt == 0 {
s.norm[i] = 0
continue
}
if cnt <= lowThreshold {
s.norm[i] = -1
stillToDistribute--
} else {
proba := (int16)((uint64(cnt) * step) >> scale)
if proba < 8 {
restToBeat := vStep * uint64(rtbTable[proba])
v := uint64(cnt)*step - (uint64(proba) << scale)
if v > restToBeat {
proba++
}
}
if proba > largestP {
largestP = proba
largest = i
}
s.norm[i] = proba
stillToDistribute -= proba
}
}
if -stillToDistribute >= (s.norm[largest] >> 1) {
// corner case, need another normalization method
err := s.normalizeCount2(length)
if err != nil {
return err
}
if debugAsserts {
err = s.validateNorm()
if err != nil {
return err
}
}
return s.buildCTable()
}
s.norm[largest] += stillToDistribute
if debugAsserts {
err := s.validateNorm()
if err != nil {
return err
}
}
return s.buildCTable()
}
// Secondary normalization method.
// To be used when primary method fails.
func (s *fseEncoder) normalizeCount2(length int) error {
const notYetAssigned = -2
var (
distributed uint32
total = uint32(length)
tableLog = s.actualTable | {
s.ct.symbolTT = make([]symbolTransform, 256)
} | conditional_block | |
resolve_recovers_from_http_errors.rs | HttpResponder>(
pkg: Package,
responder: H,
failure_error: fidl_fuchsia_pkg::ResolveError,
) {
let env = TestEnvBuilder::new().build().await;
let repo = Arc::new(
RepositoryBuilder::from_template_dir(EMPTY_REPO_PATH)
.add_package(&pkg)
.build()
.await
.unwrap(),
);
let pkg_url = format!("fuchsia-pkg://test/{}", pkg.name());
let should_fail = responder::AtomicToggle::new(true);
let served_repository = repo
.server()
.response_overrider(responder::Toggleable::new(&should_fail, responder))
.response_overrider(responder::Filter::new(
responder::is_range_request,
responder::StaticResponseCode::server_error(),
))
.start()
.unwrap();
env.register_repo(&served_repository).await;
// First resolve fails with the expected error.
assert_matches!(env.resolve_package(&pkg_url).await, Err(error) if error == failure_error);
// Disabling the custom responder allows the subsequent resolves to succeed.
should_fail.unset();
let (package_dir, _resolved_context) =
env.resolve_package(&pkg_url).await.expect("package to resolve");
pkg.verify_contents(&package_dir).await.expect("correct package contents");
env.stop().await;
}
#[fuchsia::test]
async fn second_resolve_succeeds_when_far_404() {
let pkg = make_pkg_with_extra_blobs("second_resolve_succeeds_when_far_404", 1).await;
let path_to_override = format!("/blobs/{}", pkg.meta_far_merkle_root());
verify_resolve_fails_then_succeeds(
pkg,
responder::ForPath::new(path_to_override, responder::StaticResponseCode::not_found()),
fidl_fuchsia_pkg::ResolveError::UnavailableBlob,
)
.await
}
#[fuchsia::test]
async fn second_resolve_succeeds_when_blob_404() {
let pkg = make_pkg_with_extra_blobs("second_resolve_succeeds_when_blob_404", 1).await;
let path_to_override = format!(
"/blobs/{}",
MerkleTree::from_reader(
extra_blob_contents("second_resolve_succeeds_when_blob_404", 0).as_slice()
)
.expect("merkle slice")
.root()
);
verify_resolve_fails_then_succeeds(
pkg,
responder::ForPath::new(path_to_override, responder::StaticResponseCode::not_found()),
fidl_fuchsia_pkg::ResolveError::UnavailableBlob,
)
.await
}
#[fuchsia::test]
async fn second_resolve_succeeds_when_far_errors_mid_download() {
let pkg = PackageBuilder::new("second_resolve_succeeds_when_far_errors_mid_download")
.add_resource_at(
"meta/large_file",
vec![0; FILE_SIZE_LARGE_ENOUGH_TO_TRIGGER_HYPER_BATCHING].as_slice(),
)
.build()
.await
.unwrap();
let path_to_override = format!("/blobs/{}", pkg.meta_far_merkle_root());
verify_resolve_fails_then_succeeds(
pkg,
responder::ForPath::new(path_to_override, responder::OneByteShortThenError),
fidl_fuchsia_pkg::ResolveError::UnavailableBlob,
)
.await
}
#[fuchsia::test]
async fn second_resolve_succeeds_when_blob_errors_mid_download() |
#[fuchsia::test]
async fn second_resolve_succeeds_disconnect_before_far_complete() {
let pkg = PackageBuilder::new("second_resolve_succeeds_disconnect_before_far_complete")
.add_resource_at(
"meta/large_file",
vec![0; FILE_SIZE_LARGE_ENOUGH_TO_TRIGGER_HYPER_BATCHING].as_slice(),
)
.build()
.await
.unwrap();
let path_to_override = format!("/blobs/{}", pkg.meta_far_merkle_root());
verify_resolve_fails_then_succeeds(
pkg,
responder::ForPath::new(path_to_override, responder::OneByteShortThenDisconnect),
fidl_fuchsia_pkg::ResolveError::UnavailableBlob,
)
.await
}
#[fuchsia::test]
async fn second_resolve_succeeds_disconnect_before_blob_complete() {
let blob = vec![0; FILE_SIZE_LARGE_ENOUGH_TO_TRIGGER_HYPER_BATCHING];
let pkg = PackageBuilder::new("second_resolve_succeeds_disconnect_before_blob_complete")
.add_resource_at("blobbity/blob", blob.as_slice())
.build()
.await
.unwrap();
let path_to_override = format!(
"/blobs/{}",
MerkleTree::from_reader(blob.as_slice()).expect("merkle slice").root()
);
verify_resolve_fails_then_succeeds(
pkg,
responder::ForPath::new(path_to_override, responder::OneByteShortThenDisconnect),
fidl_fuchsia_pkg::ResolveError::UnavailableBlob,
)
.await
}
#[fuchsia::test]
async fn second_resolve_succeeds_when_far_corrupted() {
let pkg = make_pkg_with_extra_blobs("second_resolve_succeeds_when_far_corrupted", 1).await;
let path_to_override = format!("/blobs/{}", pkg.meta_far_merkle_root());
verify_resolve_fails_then_succeeds(
pkg,
responder::ForPath::new(path_to_override, responder::OneByteFlipped),
fidl_fuchsia_pkg::ResolveError::Io,
)
.await
}
#[fuchsia::test]
async fn second_resolve_succeeds_when_blob_corrupted() {
let pkg = make_pkg_with_extra_blobs("second_resolve_succeeds_when_blob_corrupted", 1).await;
let blob = extra_blob_contents("second_resolve_succeeds_when_blob_corrupted", 0);
let path_to_override = format!(
"/blobs/{}",
MerkleTree::from_reader(blob.as_slice()).expect("merkle slice").root()
);
verify_resolve_fails_then_succeeds(
pkg,
responder::ForPath::new(path_to_override, responder::OneByteFlipped),
fidl_fuchsia_pkg::ResolveError::Io,
)
.await
}
#[fuchsia::test]
async fn second_resolve_succeeds_when_tuf_metadata_update_fails() {
// pkg-resolver uses tuf::client::Client::with_trusted_root_keys to create its TUF client.
// That method will only retrieve the specified version of the root metadata (1 for these
// tests), with the rest of the metadata being retrieved during the first update. This means
// that hanging all attempts for 2.snapshot.json metadata will allow tuf client creation to
// succeed but still fail tuf client update.
// We want to specifically verify recovery from update failure because if creation fails,
// pkg-resolver will not make a Repository object, so the next resolve attempt would try again
// from scratch, but if update fails, pkg-resolver will keep its Repository object which
// contains a rust-tuf client in a possibly invalid state, and we want to verify that
// pkg-resolver calls update on the client again and that this update recovers the client.
let pkg = PackageBuilder::new("second_resolve_succeeds_when_tuf_metadata_update_fails")
.build()
.await
.unwrap();
verify_resolve_fails_then_succeeds(
pkg,
responder::ForPath::new("/2.snapshot.json", responder::OneByteShortThenDisconnect),
fidl_fuchsia_pkg::ResolveError::Internal,
)
.await
}
// The hyper clients used by the pkg-resolver to download blobs and TUF metadata sometimes end up
// waiting on operations on their TCP connections that will never return (e.g. because of an
// upstream network partition). To detect this, the pkg-resolver wraps the hyper client response
// futures with timeout futures. To recover from this, the pkg-resolver drops the hyper client
// response futures when the timeouts are hit. This recovery plan requires that dropping the hyper
// response future causes hyper to close the underlying TCP connection and create a new one the
// next time hyper is asked to perform a network operation. This assumption holds for http1, but
// not for http2.
//
// This test verifies the "dropping a hyper response future prevents the underlying connection
// from being reused" requirement. It does so by verifying that if a resolve fails due to a blob
// download timeout and the resolve is retried, the retry will cause pkg-resolver to make an
// additional TCP connection to the blob mirror.
//
// This test uses https because the test exists to catch | {
let blob = vec![0; FILE_SIZE_LARGE_ENOUGH_TO_TRIGGER_HYPER_BATCHING];
let pkg = PackageBuilder::new("second_resolve_succeeds_when_blob_errors_mid_download")
.add_resource_at("blobbity/blob", blob.as_slice())
.build()
.await
.unwrap();
let path_to_override = format!(
"/blobs/{}",
MerkleTree::from_reader(blob.as_slice()).expect("merkle slice").root()
);
verify_resolve_fails_then_succeeds(
pkg,
responder::ForPath::new(path_to_override, responder::OneByteShortThenError),
fidl_fuchsia_pkg::ResolveError::UnavailableBlob,
)
.await
} | identifier_body |
resolve_recovers_from_http_errors.rs | HttpResponder>(
pkg: Package,
responder: H,
failure_error: fidl_fuchsia_pkg::ResolveError,
) {
let env = TestEnvBuilder::new().build().await;
let repo = Arc::new(
RepositoryBuilder::from_template_dir(EMPTY_REPO_PATH)
.add_package(&pkg)
.build()
.await
.unwrap(),
);
let pkg_url = format!("fuchsia-pkg://test/{}", pkg.name());
let should_fail = responder::AtomicToggle::new(true);
let served_repository = repo
.server()
.response_overrider(responder::Toggleable::new(&should_fail, responder))
.response_overrider(responder::Filter::new(
responder::is_range_request,
responder::StaticResponseCode::server_error(),
))
.start()
.unwrap();
env.register_repo(&served_repository).await;
// First resolve fails with the expected error.
assert_matches!(env.resolve_package(&pkg_url).await, Err(error) if error == failure_error);
// Disabling the custom responder allows the subsequent resolves to succeed.
should_fail.unset();
let (package_dir, _resolved_context) =
env.resolve_package(&pkg_url).await.expect("package to resolve");
pkg.verify_contents(&package_dir).await.expect("correct package contents");
env.stop().await;
}
#[fuchsia::test]
async fn second_resolve_succeeds_when_far_404() {
let pkg = make_pkg_with_extra_blobs("second_resolve_succeeds_when_far_404", 1).await;
let path_to_override = format!("/blobs/{}", pkg.meta_far_merkle_root());
verify_resolve_fails_then_succeeds(
pkg,
responder::ForPath::new(path_to_override, responder::StaticResponseCode::not_found()),
fidl_fuchsia_pkg::ResolveError::UnavailableBlob,
)
.await
}
#[fuchsia::test]
async fn second_resolve_succeeds_when_blob_404() {
let pkg = make_pkg_with_extra_blobs("second_resolve_succeeds_when_blob_404", 1).await;
let path_to_override = format!(
"/blobs/{}",
MerkleTree::from_reader(
extra_blob_contents("second_resolve_succeeds_when_blob_404", 0).as_slice()
)
.expect("merkle slice")
.root()
);
verify_resolve_fails_then_succeeds(
pkg,
responder::ForPath::new(path_to_override, responder::StaticResponseCode::not_found()),
fidl_fuchsia_pkg::ResolveError::UnavailableBlob,
)
.await
}
#[fuchsia::test]
async fn second_resolve_succeeds_when_far_errors_mid_download() {
let pkg = PackageBuilder::new("second_resolve_succeeds_when_far_errors_mid_download")
.add_resource_at(
"meta/large_file",
vec![0; FILE_SIZE_LARGE_ENOUGH_TO_TRIGGER_HYPER_BATCHING].as_slice(),
)
.build()
.await
.unwrap();
let path_to_override = format!("/blobs/{}", pkg.meta_far_merkle_root());
verify_resolve_fails_then_succeeds(
pkg,
responder::ForPath::new(path_to_override, responder::OneByteShortThenError),
fidl_fuchsia_pkg::ResolveError::UnavailableBlob,
)
.await
}
#[fuchsia::test]
async fn second_resolve_succeeds_when_blob_errors_mid_download() {
let blob = vec![0; FILE_SIZE_LARGE_ENOUGH_TO_TRIGGER_HYPER_BATCHING];
let pkg = PackageBuilder::new("second_resolve_succeeds_when_blob_errors_mid_download")
.add_resource_at("blobbity/blob", blob.as_slice())
.build()
.await
.unwrap();
let path_to_override = format!(
"/blobs/{}",
MerkleTree::from_reader(blob.as_slice()).expect("merkle slice").root()
);
verify_resolve_fails_then_succeeds(
pkg,
responder::ForPath::new(path_to_override, responder::OneByteShortThenError),
fidl_fuchsia_pkg::ResolveError::UnavailableBlob,
)
.await
}
#[fuchsia::test]
async fn second_resolve_succeeds_disconnect_before_far_complete() {
let pkg = PackageBuilder::new("second_resolve_succeeds_disconnect_before_far_complete")
.add_resource_at(
"meta/large_file",
vec![0; FILE_SIZE_LARGE_ENOUGH_TO_TRIGGER_HYPER_BATCHING].as_slice(),
)
.build()
.await
.unwrap();
let path_to_override = format!("/blobs/{}", pkg.meta_far_merkle_root());
verify_resolve_fails_then_succeeds(
pkg,
responder::ForPath::new(path_to_override, responder::OneByteShortThenDisconnect),
fidl_fuchsia_pkg::ResolveError::UnavailableBlob,
)
.await
}
#[fuchsia::test]
async fn second_resolve_succeeds_disconnect_before_blob_complete() {
let blob = vec![0; FILE_SIZE_LARGE_ENOUGH_TO_TRIGGER_HYPER_BATCHING];
let pkg = PackageBuilder::new("second_resolve_succeeds_disconnect_before_blob_complete")
.add_resource_at("blobbity/blob", blob.as_slice())
.build()
.await
.unwrap();
let path_to_override = format!(
"/blobs/{}",
MerkleTree::from_reader(blob.as_slice()).expect("merkle slice").root()
);
verify_resolve_fails_then_succeeds(
pkg,
responder::ForPath::new(path_to_override, responder::OneByteShortThenDisconnect),
fidl_fuchsia_pkg::ResolveError::UnavailableBlob,
)
.await
}
#[fuchsia::test]
async fn second_resolve_succeeds_when_far_corrupted() {
let pkg = make_pkg_with_extra_blobs("second_resolve_succeeds_when_far_corrupted", 1).await;
let path_to_override = format!("/blobs/{}", pkg.meta_far_merkle_root());
verify_resolve_fails_then_succeeds(
pkg,
responder::ForPath::new(path_to_override, responder::OneByteFlipped),
fidl_fuchsia_pkg::ResolveError::Io,
)
.await
}
#[fuchsia::test]
async fn second_resolve_succeeds_when_blob_corrupted() {
let pkg = make_pkg_with_extra_blobs("second_resolve_succeeds_when_blob_corrupted", 1).await;
let blob = extra_blob_contents("second_resolve_succeeds_when_blob_corrupted", 0);
let path_to_override = format!(
"/blobs/{}",
MerkleTree::from_reader(blob.as_slice()).expect("merkle slice").root()
);
verify_resolve_fails_then_succeeds(
pkg,
responder::ForPath::new(path_to_override, responder::OneByteFlipped),
fidl_fuchsia_pkg::ResolveError::Io,
)
.await
}
#[fuchsia::test]
async fn second_resolve_succeeds_when_tuf_metadata_update_fails() {
// pkg-resolver uses tuf::client::Client::with_trusted_root_keys to create its TUF client.
// That method will only retrieve the specified version of the root metadata (1 for these
// tests), with the rest of the metadata being retrieved during the first update. This means
// that hanging all attempts for 2.snapshot.json metadata will allow tuf client creation to
// succeed but still fail tuf client update.
// We want to specifically verify recovery from update failure because if creation fails,
// pkg-resolver will not make a Repository object, so the next resolve attempt would try again
// from scratch, but if update fails, pkg-resolver will keep its Repository object which
// contains a rust-tuf client in a possibly invalid state, and we want to verify that
// pkg-resolver calls update on the client again and that this update recovers the client. | .await
.unwrap();
verify_resolve_fails_then_succeeds(
pkg,
responder::ForPath::new("/2.snapshot.json", responder::OneByteShortThenDisconnect),
fidl_fuchsia_pkg::ResolveError::Internal,
)
.await
}
// The hyper clients used by the pkg-resolver to download blobs and TUF metadata sometimes end up
// waiting on operations on their TCP connections that will never return (e.g. because of an
// upstream network partition). To detect this, the pkg-resolver wraps the hyper client response
// futures with timeout futures. To recover from this, the pkg-resolver drops the hyper client
// response futures when the timeouts are hit. This recovery plan requires that dropping the hyper
// response future causes hyper to close the underlying TCP connection and create a new one the
// next time hyper is asked to perform a network operation. This assumption holds for http1, but
// not for http2.
//
// This test verifies the "dropping a hyper response future prevents the underlying connection
// from being reused" requirement. It does so by verifying that if a resolve fails due to a blob
// download timeout and the resolve is retried, the retry will cause pkg-resolver to make an
// additional TCP connection to the blob mirror.
//
// This test uses https because the test exists to catch changes to | let pkg = PackageBuilder::new("second_resolve_succeeds_when_tuf_metadata_update_fails")
.build() | random_line_split |
resolve_recovers_from_http_errors.rs | HttpResponder>(
pkg: Package,
responder: H,
failure_error: fidl_fuchsia_pkg::ResolveError,
) {
let env = TestEnvBuilder::new().build().await;
let repo = Arc::new(
RepositoryBuilder::from_template_dir(EMPTY_REPO_PATH)
.add_package(&pkg)
.build()
.await
.unwrap(),
);
let pkg_url = format!("fuchsia-pkg://test/{}", pkg.name());
let should_fail = responder::AtomicToggle::new(true);
let served_repository = repo
.server()
.response_overrider(responder::Toggleable::new(&should_fail, responder))
.response_overrider(responder::Filter::new(
responder::is_range_request,
responder::StaticResponseCode::server_error(),
))
.start()
.unwrap();
env.register_repo(&served_repository).await;
// First resolve fails with the expected error.
assert_matches!(env.resolve_package(&pkg_url).await, Err(error) if error == failure_error);
// Disabling the custom responder allows the subsequent resolves to succeed.
should_fail.unset();
let (package_dir, _resolved_context) =
env.resolve_package(&pkg_url).await.expect("package to resolve");
pkg.verify_contents(&package_dir).await.expect("correct package contents");
env.stop().await;
}
#[fuchsia::test]
async fn second_resolve_succeeds_when_far_404() {
let pkg = make_pkg_with_extra_blobs("second_resolve_succeeds_when_far_404", 1).await;
let path_to_override = format!("/blobs/{}", pkg.meta_far_merkle_root());
verify_resolve_fails_then_succeeds(
pkg,
responder::ForPath::new(path_to_override, responder::StaticResponseCode::not_found()),
fidl_fuchsia_pkg::ResolveError::UnavailableBlob,
)
.await
}
#[fuchsia::test]
async fn | () {
let pkg = make_pkg_with_extra_blobs("second_resolve_succeeds_when_blob_404", 1).await;
let path_to_override = format!(
"/blobs/{}",
MerkleTree::from_reader(
extra_blob_contents("second_resolve_succeeds_when_blob_404", 0).as_slice()
)
.expect("merkle slice")
.root()
);
verify_resolve_fails_then_succeeds(
pkg,
responder::ForPath::new(path_to_override, responder::StaticResponseCode::not_found()),
fidl_fuchsia_pkg::ResolveError::UnavailableBlob,
)
.await
}
#[fuchsia::test]
async fn second_resolve_succeeds_when_far_errors_mid_download() {
let pkg = PackageBuilder::new("second_resolve_succeeds_when_far_errors_mid_download")
.add_resource_at(
"meta/large_file",
vec![0; FILE_SIZE_LARGE_ENOUGH_TO_TRIGGER_HYPER_BATCHING].as_slice(),
)
.build()
.await
.unwrap();
let path_to_override = format!("/blobs/{}", pkg.meta_far_merkle_root());
verify_resolve_fails_then_succeeds(
pkg,
responder::ForPath::new(path_to_override, responder::OneByteShortThenError),
fidl_fuchsia_pkg::ResolveError::UnavailableBlob,
)
.await
}
#[fuchsia::test]
async fn second_resolve_succeeds_when_blob_errors_mid_download() {
let blob = vec![0; FILE_SIZE_LARGE_ENOUGH_TO_TRIGGER_HYPER_BATCHING];
let pkg = PackageBuilder::new("second_resolve_succeeds_when_blob_errors_mid_download")
.add_resource_at("blobbity/blob", blob.as_slice())
.build()
.await
.unwrap();
let path_to_override = format!(
"/blobs/{}",
MerkleTree::from_reader(blob.as_slice()).expect("merkle slice").root()
);
verify_resolve_fails_then_succeeds(
pkg,
responder::ForPath::new(path_to_override, responder::OneByteShortThenError),
fidl_fuchsia_pkg::ResolveError::UnavailableBlob,
)
.await
}
#[fuchsia::test]
async fn second_resolve_succeeds_disconnect_before_far_complete() {
let pkg = PackageBuilder::new("second_resolve_succeeds_disconnect_before_far_complete")
.add_resource_at(
"meta/large_file",
vec![0; FILE_SIZE_LARGE_ENOUGH_TO_TRIGGER_HYPER_BATCHING].as_slice(),
)
.build()
.await
.unwrap();
let path_to_override = format!("/blobs/{}", pkg.meta_far_merkle_root());
verify_resolve_fails_then_succeeds(
pkg,
responder::ForPath::new(path_to_override, responder::OneByteShortThenDisconnect),
fidl_fuchsia_pkg::ResolveError::UnavailableBlob,
)
.await
}
#[fuchsia::test]
async fn second_resolve_succeeds_disconnect_before_blob_complete() {
let blob = vec![0; FILE_SIZE_LARGE_ENOUGH_TO_TRIGGER_HYPER_BATCHING];
let pkg = PackageBuilder::new("second_resolve_succeeds_disconnect_before_blob_complete")
.add_resource_at("blobbity/blob", blob.as_slice())
.build()
.await
.unwrap();
let path_to_override = format!(
"/blobs/{}",
MerkleTree::from_reader(blob.as_slice()).expect("merkle slice").root()
);
verify_resolve_fails_then_succeeds(
pkg,
responder::ForPath::new(path_to_override, responder::OneByteShortThenDisconnect),
fidl_fuchsia_pkg::ResolveError::UnavailableBlob,
)
.await
}
#[fuchsia::test]
async fn second_resolve_succeeds_when_far_corrupted() {
let pkg = make_pkg_with_extra_blobs("second_resolve_succeeds_when_far_corrupted", 1).await;
let path_to_override = format!("/blobs/{}", pkg.meta_far_merkle_root());
verify_resolve_fails_then_succeeds(
pkg,
responder::ForPath::new(path_to_override, responder::OneByteFlipped),
fidl_fuchsia_pkg::ResolveError::Io,
)
.await
}
#[fuchsia::test]
async fn second_resolve_succeeds_when_blob_corrupted() {
let pkg = make_pkg_with_extra_blobs("second_resolve_succeeds_when_blob_corrupted", 1).await;
let blob = extra_blob_contents("second_resolve_succeeds_when_blob_corrupted", 0);
let path_to_override = format!(
"/blobs/{}",
MerkleTree::from_reader(blob.as_slice()).expect("merkle slice").root()
);
verify_resolve_fails_then_succeeds(
pkg,
responder::ForPath::new(path_to_override, responder::OneByteFlipped),
fidl_fuchsia_pkg::ResolveError::Io,
)
.await
}
#[fuchsia::test]
async fn second_resolve_succeeds_when_tuf_metadata_update_fails() {
// pkg-resolver uses tuf::client::Client::with_trusted_root_keys to create its TUF client.
// That method will only retrieve the specified version of the root metadata (1 for these
// tests), with the rest of the metadata being retrieved during the first update. This means
// that hanging all attempts for 2.snapshot.json metadata will allow tuf client creation to
// succeed but still fail tuf client update.
// We want to specifically verify recovery from update failure because if creation fails,
// pkg-resolver will not make a Repository object, so the next resolve attempt would try again
// from scratch, but if update fails, pkg-resolver will keep its Repository object which
// contains a rust-tuf client in a possibly invalid state, and we want to verify that
// pkg-resolver calls update on the client again and that this update recovers the client.
let pkg = PackageBuilder::new("second_resolve_succeeds_when_tuf_metadata_update_fails")
.build()
.await
.unwrap();
verify_resolve_fails_then_succeeds(
pkg,
responder::ForPath::new("/2.snapshot.json", responder::OneByteShortThenDisconnect),
fidl_fuchsia_pkg::ResolveError::Internal,
)
.await
}
// The hyper clients used by the pkg-resolver to download blobs and TUF metadata sometimes end up
// waiting on operations on their TCP connections that will never return (e.g. because of an
// upstream network partition). To detect this, the pkg-resolver wraps the hyper client response
// futures with timeout futures. To recover from this, the pkg-resolver drops the hyper client
// response futures when the timeouts are hit. This recovery plan requires that dropping the hyper
// response future causes hyper to close the underlying TCP connection and create a new one the
// next time hyper is asked to perform a network operation. This assumption holds for http1, but
// not for http2.
//
// This test verifies the "dropping a hyper response future prevents the underlying connection
// from being reused" requirement. It does so by verifying that if a resolve fails due to a blob
// download timeout and the resolve is retried, the retry will cause pkg-resolver to make an
// additional TCP connection to the blob mirror.
//
// This test uses https because the test exists to catch changes | second_resolve_succeeds_when_blob_404 | identifier_name |
value_textbox.rs | during editing;
/// this can be used to report errors further back up the tree.
pub struct ValueTextBox<T> {
child: TextBox<String>,
formatter: Box<dyn Formatter<T>>,
callback: Option<Box<dyn ValidationDelegate>>,
is_editing: bool,
validate_while_editing: bool,
update_data_while_editing: bool,
/// the last data that this textbox saw or created.
/// This is used to determine when a change to the data is originating
/// elsewhere in the application, which we need to special-case
last_known_data: Option<T>,
force_selection: Option<Selection>,
old_buffer: String,
buffer: String,
}
/// A type that can be registered to receive callbacks as the state of a
/// [`ValueTextBox`] changes.
pub trait ValidationDelegate {
/// Called with a [`TextBoxEvent`] whenever the validation state of a
/// [`ValueTextBox`] changes.
fn event(&mut self, ctx: &mut EventCtx, event: TextBoxEvent, current_text: &str);
}
/// Events sent to a [`ValidationDelegate`].
pub enum TextBoxEvent {
/// The textbox began editing.
Began,
/// An edit occured which was considered valid by the [`Formatter`].
Changed,
/// An edit occured which was rejected by the [`Formatter`].
PartiallyInvalid(ValidationError),
/// The user attempted to finish editing, but the input was not valid.
Invalid(ValidationError),
/// The user finished editing, with valid input.
Complete,
/// Editing was cancelled.
Cancel,
}
impl TextBox<String> {
/// Turn this `TextBox` into a [`ValueTextBox`], using the [`Formatter`] to
/// manage the value.
///
/// For simple value formatting, you can use the [`ParseFormatter`].
///
/// [`ValueTextBox`]: ValueTextBox
/// [`Formatter`]: crate::text::format::Formatter
/// [`ParseFormatter`]: crate::text::format::ParseFormatter
pub fn with_formatter<T: Data>(
self,
formatter: impl Formatter<T> + 'static,
) -> ValueTextBox<T> {
ValueTextBox::new(self, formatter)
}
}
impl<T: Data> ValueTextBox<T> {
/// Create a new `ValueTextBox` from a normal [`TextBox`] and a [`Formatter`].
///
/// [`TextBox`]: crate::widget::TextBox
/// [`Formatter`]: crate::text::format::Formatter
pub fn new(mut child: TextBox<String>, formatter: impl Formatter<T> + 'static) -> Self {
child.text_mut().borrow_mut().send_notification_on_return = true;
child.text_mut().borrow_mut().send_notification_on_cancel = true;
child.handles_tab_notifications = false;
ValueTextBox {
child,
formatter: Box::new(formatter),
callback: None,
is_editing: false, | last_known_data: None,
validate_while_editing: true,
update_data_while_editing: false,
old_buffer: String::new(),
buffer: String::new(),
force_selection: None,
}
}
/// Builder-style method to set an optional [`ValidationDelegate`] on this
/// textbox.
pub fn delegate(mut self, delegate: impl ValidationDelegate + 'static) -> Self {
self.callback = Some(Box::new(delegate));
self
}
/// Builder-style method to set whether or not this text box validates
/// its contents during editing.
///
/// If `true` (the default) edits that fail validation
/// ([`Formatter::validate_partial_input`]) will be rejected. If `false`,
/// those edits will be accepted, and the text box will be updated.
pub fn validate_while_editing(mut self, validate: bool) -> Self {
self.validate_while_editing = validate;
self
}
/// Builder-style method to set whether or not this text box updates the
/// incoming data during editing.
///
/// If `false` (the default) the data is only updated when editing completes.
pub fn update_data_while_editing(mut self, flag: bool) -> Self {
self.update_data_while_editing = flag;
self
}
fn complete(&mut self, ctx: &mut EventCtx, data: &mut T) -> bool {
match self.formatter.value(&self.buffer) {
Ok(new_data) => {
*data = new_data;
self.buffer = self.formatter.format(data);
self.is_editing = false;
ctx.request_update();
self.send_event(ctx, TextBoxEvent::Complete);
true
}
Err(err) => {
if self.child.text().can_write() {
if let Some(inval) = self
.child
.text_mut()
.borrow_mut()
.set_selection(Selection::new(0, self.buffer.len()))
{
ctx.invalidate_text_input(inval);
}
}
self.send_event(ctx, TextBoxEvent::Invalid(err));
// our content isn't valid
// ideally we would flash the background or something
false
}
}
}
fn cancel(&mut self, ctx: &mut EventCtx, data: &T) {
self.is_editing = false;
self.buffer = self.formatter.format(data);
ctx.request_update();
ctx.resign_focus();
self.send_event(ctx, TextBoxEvent::Cancel);
}
fn begin(&mut self, ctx: &mut EventCtx, data: &T) {
self.is_editing = true;
self.buffer = self.formatter.format_for_editing(data);
self.last_known_data = Some(data.clone());
ctx.request_update();
self.send_event(ctx, TextBoxEvent::Began);
}
fn send_event(&mut self, ctx: &mut EventCtx, event: TextBoxEvent) {
if let Some(delegate) = self.callback.as_mut() {
delegate.event(ctx, event, &self.buffer)
}
}
}
impl<T: Data + std::fmt::Debug> Widget<T> for ValueTextBox<T> {
#[instrument(
name = "ValueTextBox",
level = "trace",
skip(self, ctx, event, data, env)
)]
fn event(&mut self, ctx: &mut EventCtx, event: &Event, data: &mut T, env: &Env) {
if matches!(event, Event::Command(cmd) if cmd.is(BEGIN_EDITING)) {
return self.begin(ctx, data);
}
if self.is_editing {
// if we reject an edit we want to reset the selection
let pre_sel = if self.child.text().can_read() {
Some(self.child.text().borrow().selection())
} else {
None
};
match event {
// this is caused by an external focus change, like the mouse being clicked
// elsewhere.
Event::Command(cmd) if cmd.is(COMPLETE_EDITING) => {
if !self.complete(ctx, data) {
self.cancel(ctx, data);
}
return;
}
Event::Notification(cmd) if cmd.is(TextComponent::TAB) => {
ctx.set_handled();
ctx.request_paint();
if self.complete(ctx, data) {
ctx.focus_next();
}
return;
}
Event::Notification(cmd) if cmd.is(TextComponent::BACKTAB) => {
ctx.request_paint();
ctx.set_handled();
if self.complete(ctx, data) {
ctx.focus_prev();
}
return;
}
Event::Notification(cmd) if cmd.is(TextComponent::RETURN) => {
ctx.set_handled();
if self.complete(ctx, data) {
ctx.resign_focus();
}
return;
}
Event::Notification(cmd) if cmd.is(TextComponent::CANCEL) => {
ctx.set_handled();
self.cancel(ctx, data);
return;
}
event => {
self.child.event(ctx, event, &mut self.buffer, env);
}
}
// if an edit occured, validate it with the formatter
// notifications can arrive before update, so we always ignore them
if !matches!(event, Event::Notification(_)) && self.buffer != self.old_buffer {
let mut validation = self
.formatter
.validate_partial_input(&self.buffer, &self.child.text().borrow().selection());
if self.validate_while_editing {
let new_buf = match (validation.text_change.take(), validation.is_err()) {
(Some(new_text), _) => {
// be helpful: if the formatter is misbehaved, log it.
if self
.formatter
.validate_partial_input(&new_text, &Selection::caret(0))
.is_err()
{
tracing::warn!(
"formatter replacement text does not validate: '{}'",
&new_text
);
None
} else {
Some(new_text)
}
}
(None, true) => Some(self.old_buffer.clone()),
_ => None,
};
let new_sel = match (validation.selection_change.take(), validation.is_err()) {
(Some(new_sel), _) => Some(new_sel),
(None, true) if pre_sel.is_some() => pre_sel,
_ => None,
};
if let Some(new_buf) = new_buf {
self.buffer = new_buf;
}
self.force_selection = new_sel;
if self.update_data_while_editing && !validation.is_err() {
if let Ok(new_data) = self.formatter.value(&self.buffer) {
*data = new_data;
self.last_known_data = Some(data.clone | random_line_split | |
value_textbox.rs | during editing;
/// this can be used to report errors further back up the tree.
pub struct ValueTextBox<T> {
child: TextBox<String>,
formatter: Box<dyn Formatter<T>>,
callback: Option<Box<dyn ValidationDelegate>>,
is_editing: bool,
validate_while_editing: bool,
update_data_while_editing: bool,
/// the last data that this textbox saw or created.
/// This is used to determine when a change to the data is originating
/// elsewhere in the application, which we need to special-case
last_known_data: Option<T>,
force_selection: Option<Selection>,
old_buffer: String,
buffer: String,
}
/// A type that can be registered to receive callbacks as the state of a
/// [`ValueTextBox`] changes.
pub trait ValidationDelegate {
/// Called with a [`TextBoxEvent`] whenever the validation state of a
/// [`ValueTextBox`] changes.
fn event(&mut self, ctx: &mut EventCtx, event: TextBoxEvent, current_text: &str);
}
/// Events sent to a [`ValidationDelegate`].
pub enum TextBoxEvent {
/// The textbox began editing.
Began,
/// An edit occured which was considered valid by the [`Formatter`].
Changed,
/// An edit occured which was rejected by the [`Formatter`].
PartiallyInvalid(ValidationError),
/// The user attempted to finish editing, but the input was not valid.
Invalid(ValidationError),
/// The user finished editing, with valid input.
Complete,
/// Editing was cancelled.
Cancel,
}
impl TextBox<String> {
/// Turn this `TextBox` into a [`ValueTextBox`], using the [`Formatter`] to
/// manage the value.
///
/// For simple value formatting, you can use the [`ParseFormatter`].
///
/// [`ValueTextBox`]: ValueTextBox
/// [`Formatter`]: crate::text::format::Formatter
/// [`ParseFormatter`]: crate::text::format::ParseFormatter
pub fn with_formatter<T: Data>(
self,
formatter: impl Formatter<T> + 'static,
) -> ValueTextBox<T> {
ValueTextBox::new(self, formatter)
}
}
impl<T: Data> ValueTextBox<T> {
/// Create a new `ValueTextBox` from a normal [`TextBox`] and a [`Formatter`].
///
/// [`TextBox`]: crate::widget::TextBox
/// [`Formatter`]: crate::text::format::Formatter
pub fn new(mut child: TextBox<String>, formatter: impl Formatter<T> + 'static) -> Self {
child.text_mut().borrow_mut().send_notification_on_return = true;
child.text_mut().borrow_mut().send_notification_on_cancel = true;
child.handles_tab_notifications = false;
ValueTextBox {
child,
formatter: Box::new(formatter),
callback: None,
is_editing: false,
last_known_data: None,
validate_while_editing: true,
update_data_while_editing: false,
old_buffer: String::new(),
buffer: String::new(),
force_selection: None,
}
}
/// Builder-style method to set an optional [`ValidationDelegate`] on this
/// textbox.
pub fn delegate(mut self, delegate: impl ValidationDelegate + 'static) -> Self {
self.callback = Some(Box::new(delegate));
self
}
/// Builder-style method to set whether or not this text box validates
/// its contents during editing.
///
/// If `true` (the default) edits that fail validation
/// ([`Formatter::validate_partial_input`]) will be rejected. If `false`,
/// those edits will be accepted, and the text box will be updated.
pub fn validate_while_editing(mut self, validate: bool) -> Self {
self.validate_while_editing = validate;
self
}
/// Builder-style method to set whether or not this text box updates the
/// incoming data during editing.
///
/// If `false` (the default) the data is only updated when editing completes.
pub fn update_data_while_editing(mut self, flag: bool) -> Self {
self.update_data_while_editing = flag;
self
}
fn complete(&mut self, ctx: &mut EventCtx, data: &mut T) -> bool {
match self.formatter.value(&self.buffer) {
Ok(new_data) => {
*data = new_data;
self.buffer = self.formatter.format(data);
self.is_editing = false;
ctx.request_update();
self.send_event(ctx, TextBoxEvent::Complete);
true
}
Err(err) => {
if self.child.text().can_write() {
if let Some(inval) = self
.child
.text_mut()
.borrow_mut()
.set_selection(Selection::new(0, self.buffer.len()))
{
ctx.invalidate_text_input(inval);
}
}
self.send_event(ctx, TextBoxEvent::Invalid(err));
// our content isn't valid
// ideally we would flash the background or something
false
}
}
}
fn | (&mut self, ctx: &mut EventCtx, data: &T) {
self.is_editing = false;
self.buffer = self.formatter.format(data);
ctx.request_update();
ctx.resign_focus();
self.send_event(ctx, TextBoxEvent::Cancel);
}
fn begin(&mut self, ctx: &mut EventCtx, data: &T) {
self.is_editing = true;
self.buffer = self.formatter.format_for_editing(data);
self.last_known_data = Some(data.clone());
ctx.request_update();
self.send_event(ctx, TextBoxEvent::Began);
}
fn send_event(&mut self, ctx: &mut EventCtx, event: TextBoxEvent) {
if let Some(delegate) = self.callback.as_mut() {
delegate.event(ctx, event, &self.buffer)
}
}
}
impl<T: Data + std::fmt::Debug> Widget<T> for ValueTextBox<T> {
#[instrument(
name = "ValueTextBox",
level = "trace",
skip(self, ctx, event, data, env)
)]
fn event(&mut self, ctx: &mut EventCtx, event: &Event, data: &mut T, env: &Env) {
if matches!(event, Event::Command(cmd) if cmd.is(BEGIN_EDITING)) {
return self.begin(ctx, data);
}
if self.is_editing {
// if we reject an edit we want to reset the selection
let pre_sel = if self.child.text().can_read() {
Some(self.child.text().borrow().selection())
} else {
None
};
match event {
// this is caused by an external focus change, like the mouse being clicked
// elsewhere.
Event::Command(cmd) if cmd.is(COMPLETE_EDITING) => {
if !self.complete(ctx, data) {
self.cancel(ctx, data);
}
return;
}
Event::Notification(cmd) if cmd.is(TextComponent::TAB) => {
ctx.set_handled();
ctx.request_paint();
if self.complete(ctx, data) {
ctx.focus_next();
}
return;
}
Event::Notification(cmd) if cmd.is(TextComponent::BACKTAB) => {
ctx.request_paint();
ctx.set_handled();
if self.complete(ctx, data) {
ctx.focus_prev();
}
return;
}
Event::Notification(cmd) if cmd.is(TextComponent::RETURN) => {
ctx.set_handled();
if self.complete(ctx, data) {
ctx.resign_focus();
}
return;
}
Event::Notification(cmd) if cmd.is(TextComponent::CANCEL) => {
ctx.set_handled();
self.cancel(ctx, data);
return;
}
event => {
self.child.event(ctx, event, &mut self.buffer, env);
}
}
// if an edit occured, validate it with the formatter
// notifications can arrive before update, so we always ignore them
if !matches!(event, Event::Notification(_)) && self.buffer != self.old_buffer {
let mut validation = self
.formatter
.validate_partial_input(&self.buffer, &self.child.text().borrow().selection());
if self.validate_while_editing {
let new_buf = match (validation.text_change.take(), validation.is_err()) {
(Some(new_text), _) => {
// be helpful: if the formatter is misbehaved, log it.
if self
.formatter
.validate_partial_input(&new_text, &Selection::caret(0))
.is_err()
{
tracing::warn!(
"formatter replacement text does not validate: '{}'",
&new_text
);
None
} else {
Some(new_text)
}
}
(None, true) => Some(self.old_buffer.clone()),
_ => None,
};
let new_sel = match (validation.selection_change.take(), validation.is_err()) {
(Some(new_sel), _) => Some(new_sel),
(None, true) if pre_sel.is_some() => pre_sel,
_ => None,
};
if let Some(new_buf) = new_buf {
self.buffer = new_buf;
}
self.force_selection = new_sel;
if self.update_data_while_editing && !validation.is_err() {
if let Ok(new_data) = self.formatter.value(&self.buffer) {
*data = new_data;
self.last_known_data = Some(data | cancel | identifier_name |
value_textbox.rs | during editing;
/// this can be used to report errors further back up the tree.
pub struct ValueTextBox<T> {
child: TextBox<String>,
formatter: Box<dyn Formatter<T>>,
callback: Option<Box<dyn ValidationDelegate>>,
is_editing: bool,
validate_while_editing: bool,
update_data_while_editing: bool,
/// the last data that this textbox saw or created.
/// This is used to determine when a change to the data is originating
/// elsewhere in the application, which we need to special-case
last_known_data: Option<T>,
force_selection: Option<Selection>,
old_buffer: String,
buffer: String,
}
/// A type that can be registered to receive callbacks as the state of a
/// [`ValueTextBox`] changes.
pub trait ValidationDelegate {
/// Called with a [`TextBoxEvent`] whenever the validation state of a
/// [`ValueTextBox`] changes.
fn event(&mut self, ctx: &mut EventCtx, event: TextBoxEvent, current_text: &str);
}
/// Events sent to a [`ValidationDelegate`].
pub enum TextBoxEvent {
/// The textbox began editing.
Began,
/// An edit occured which was considered valid by the [`Formatter`].
Changed,
/// An edit occured which was rejected by the [`Formatter`].
PartiallyInvalid(ValidationError),
/// The user attempted to finish editing, but the input was not valid.
Invalid(ValidationError),
/// The user finished editing, with valid input.
Complete,
/// Editing was cancelled.
Cancel,
}
impl TextBox<String> {
/// Turn this `TextBox` into a [`ValueTextBox`], using the [`Formatter`] to
/// manage the value.
///
/// For simple value formatting, you can use the [`ParseFormatter`].
///
/// [`ValueTextBox`]: ValueTextBox
/// [`Formatter`]: crate::text::format::Formatter
/// [`ParseFormatter`]: crate::text::format::ParseFormatter
pub fn with_formatter<T: Data>(
self,
formatter: impl Formatter<T> + 'static,
) -> ValueTextBox<T> {
ValueTextBox::new(self, formatter)
}
}
impl<T: Data> ValueTextBox<T> {
/// Create a new `ValueTextBox` from a normal [`TextBox`] and a [`Formatter`].
///
/// [`TextBox`]: crate::widget::TextBox
/// [`Formatter`]: crate::text::format::Formatter
pub fn new(mut child: TextBox<String>, formatter: impl Formatter<T> + 'static) -> Self {
child.text_mut().borrow_mut().send_notification_on_return = true;
child.text_mut().borrow_mut().send_notification_on_cancel = true;
child.handles_tab_notifications = false;
ValueTextBox {
child,
formatter: Box::new(formatter),
callback: None,
is_editing: false,
last_known_data: None,
validate_while_editing: true,
update_data_while_editing: false,
old_buffer: String::new(),
buffer: String::new(),
force_selection: None,
}
}
/// Builder-style method to set an optional [`ValidationDelegate`] on this
/// textbox.
pub fn delegate(mut self, delegate: impl ValidationDelegate + 'static) -> Self {
self.callback = Some(Box::new(delegate));
self
}
/// Builder-style method to set whether or not this text box validates
/// its contents during editing.
///
/// If `true` (the default) edits that fail validation
/// ([`Formatter::validate_partial_input`]) will be rejected. If `false`,
/// those edits will be accepted, and the text box will be updated.
pub fn validate_while_editing(mut self, validate: bool) -> Self {
self.validate_while_editing = validate;
self
}
/// Builder-style method to set whether or not this text box updates the
/// incoming data during editing.
///
/// If `false` (the default) the data is only updated when editing completes.
pub fn update_data_while_editing(mut self, flag: bool) -> Self {
self.update_data_while_editing = flag;
self
}
fn complete(&mut self, ctx: &mut EventCtx, data: &mut T) -> bool | }
self.send_event(ctx, TextBoxEvent::Invalid(err));
// our content isn't valid
// ideally we would flash the background or something
false
}
}
}
fn cancel(&mut self, ctx: &mut EventCtx, data: &T) {
self.is_editing = false;
self.buffer = self.formatter.format(data);
ctx.request_update();
ctx.resign_focus();
self.send_event(ctx, TextBoxEvent::Cancel);
}
fn begin(&mut self, ctx: &mut EventCtx, data: &T) {
self.is_editing = true;
self.buffer = self.formatter.format_for_editing(data);
self.last_known_data = Some(data.clone());
ctx.request_update();
self.send_event(ctx, TextBoxEvent::Began);
}
fn send_event(&mut self, ctx: &mut EventCtx, event: TextBoxEvent) {
if let Some(delegate) = self.callback.as_mut() {
delegate.event(ctx, event, &self.buffer)
}
}
}
impl<T: Data + std::fmt::Debug> Widget<T> for ValueTextBox<T> {
#[instrument(
name = "ValueTextBox",
level = "trace",
skip(self, ctx, event, data, env)
)]
fn event(&mut self, ctx: &mut EventCtx, event: &Event, data: &mut T, env: &Env) {
if matches!(event, Event::Command(cmd) if cmd.is(BEGIN_EDITING)) {
return self.begin(ctx, data);
}
if self.is_editing {
// if we reject an edit we want to reset the selection
let pre_sel = if self.child.text().can_read() {
Some(self.child.text().borrow().selection())
} else {
None
};
match event {
// this is caused by an external focus change, like the mouse being clicked
// elsewhere.
Event::Command(cmd) if cmd.is(COMPLETE_EDITING) => {
if !self.complete(ctx, data) {
self.cancel(ctx, data);
}
return;
}
Event::Notification(cmd) if cmd.is(TextComponent::TAB) => {
ctx.set_handled();
ctx.request_paint();
if self.complete(ctx, data) {
ctx.focus_next();
}
return;
}
Event::Notification(cmd) if cmd.is(TextComponent::BACKTAB) => {
ctx.request_paint();
ctx.set_handled();
if self.complete(ctx, data) {
ctx.focus_prev();
}
return;
}
Event::Notification(cmd) if cmd.is(TextComponent::RETURN) => {
ctx.set_handled();
if self.complete(ctx, data) {
ctx.resign_focus();
}
return;
}
Event::Notification(cmd) if cmd.is(TextComponent::CANCEL) => {
ctx.set_handled();
self.cancel(ctx, data);
return;
}
event => {
self.child.event(ctx, event, &mut self.buffer, env);
}
}
// if an edit occured, validate it with the formatter
// notifications can arrive before update, so we always ignore them
if !matches!(event, Event::Notification(_)) && self.buffer != self.old_buffer {
let mut validation = self
.formatter
.validate_partial_input(&self.buffer, &self.child.text().borrow().selection());
if self.validate_while_editing {
let new_buf = match (validation.text_change.take(), validation.is_err()) {
(Some(new_text), _) => {
// be helpful: if the formatter is misbehaved, log it.
if self
.formatter
.validate_partial_input(&new_text, &Selection::caret(0))
.is_err()
{
tracing::warn!(
"formatter replacement text does not validate: '{}'",
&new_text
);
None
} else {
Some(new_text)
}
}
(None, true) => Some(self.old_buffer.clone()),
_ => None,
};
let new_sel = match (validation.selection_change.take(), validation.is_err()) {
(Some(new_sel), _) => Some(new_sel),
(None, true) if pre_sel.is_some() => pre_sel,
_ => None,
};
if let Some(new_buf) = new_buf {
self.buffer = new_buf;
}
self.force_selection = new_sel;
if self.update_data_while_editing && !validation.is_err() {
if let Ok(new_data) = self.formatter.value(&self.buffer) {
*data = new_data;
self.last_known_data = Some(data | {
match self.formatter.value(&self.buffer) {
Ok(new_data) => {
*data = new_data;
self.buffer = self.formatter.format(data);
self.is_editing = false;
ctx.request_update();
self.send_event(ctx, TextBoxEvent::Complete);
true
}
Err(err) => {
if self.child.text().can_write() {
if let Some(inval) = self
.child
.text_mut()
.borrow_mut()
.set_selection(Selection::new(0, self.buffer.len()))
{
ctx.invalidate_text_input(inval);
} | identifier_body |
has_loc.rs | ned::Spanned;
use syn::Attribute;
use syn::Data;
use syn::DataEnum;
use syn::DataStruct;
use syn::DeriveInput;
use syn::Error;
use syn::Lit;
use syn::Meta;
use syn::NestedMeta;
use syn::Result;
use syn::Variant;
use crate::simple_type::SimpleType;
use crate::util::InterestingFields;
/// Builds a HasLoc impl.
///
/// The build rules are as follows:
/// - For a struct it just looks for a field with a type of LocId.
/// - For an enum it does a match on each variant.
/// - For either tuple variants or struct variants it looks for a field with a
/// type of LocId.
/// - For a tuple variant with a single non-LocId type and calls `.loc_id()`
/// on that field.
/// - Otherwise you can specify `#[has_loc(n)]` where `n` is the index of the
/// field to call `.loc_id()` on. `#[has_loc(n)]` can also be used on the
/// whole enum to provide a default index.
///
pub(crate) fn | (input: TokenStream) -> Result<TokenStream> {
let input = syn::parse2::<DeriveInput>(input)?;
match &input.data {
Data::Enum(data) => build_has_loc_enum(&input, data),
Data::Struct(data) => build_has_loc_struct(&input, data),
Data::Union(_) => Err(Error::new(input.span(), "Union not handled")),
}
}
fn field_might_contain_buried_loc_id(ty: &SimpleType<'_>) -> bool {
if let Some(ident) = ty.get_ident() {
!(ident == "BlockId"
|| ident == "ClassId"
|| ident == "ConstId"
|| ident == "ValueId"
|| ident == "LocalId"
|| ident == "MethodId"
|| ident == "ParamId"
|| ident == "VarId"
|| ident == "usize"
|| ident == "u32")
} else {
true
}
}
fn build_has_loc_struct(input: &DeriveInput, data: &DataStruct) -> Result<TokenStream> {
// struct Foo {
// ...
// loc: LocId,
// }
let struct_name = &input.ident;
let default_select_field = handle_has_loc_attr(&input.attrs)?;
let loc_field = if let Some(f) = default_select_field {
match f.kind {
FieldKind::Named(name) => {
let name = name.to_string();
let field = data
.fields
.iter()
.find(|field| field.ident.as_ref().map_or(false, |id| id == &name))
.ok_or_else(|| Error::new(input.span(), format!("Field '{name}' not found")))?
.ident
.as_ref()
.unwrap();
quote!(#field.loc_id())
}
FieldKind::None => todo!(),
FieldKind::Numbered(_) => todo!(),
}
} else {
let field = data
.fields
.iter()
.enumerate()
.map(|(i, field)| (i, field, SimpleType::from_type(&field.ty)))
.find(|(_, _, ty)| ty.is_based_on("LocId"));
let (idx, field, _) =
field.ok_or_else(|| Error::new(input.span(), "No field with type LocId found"))?;
if let Some(ident) = field.ident.as_ref() {
ident.to_token_stream()
} else {
syn::Index::from(idx).to_token_stream()
}
};
let (impl_generics, ty_generics, where_clause) = input.generics.split_for_impl();
let output = quote!(impl #impl_generics HasLoc for #struct_name #ty_generics #where_clause {
fn loc_id(&self) -> LocId {
self.#loc_field
}
});
Ok(output)
}
fn get_select_field<'a>(
variant: &'a Variant,
default_select_field: &Option<Field<'a>>,
) -> Result<Option<Field<'a>>> {
if let Some(f) = handle_has_loc_attr(&variant.attrs)? {
return Ok(Some(f));
}
if let Some(f) = default_select_field.as_ref() {
return Ok(Some(f.clone()));
}
let mut interesting_fields = InterestingFields::None;
for (idx, field) in variant.fields.iter().enumerate() {
let ty = SimpleType::from_type(&field.ty);
if ty.is_based_on("LocId") {
let kind = if let Some(ident) = field.ident.as_ref() {
// Bar { .., loc: LocId }
FieldKind::Named(Cow::Borrowed(ident))
} else {
// Bar(.., LocId)
FieldKind::Numbered(idx)
};
return Ok(Some(Field { kind, ty }));
} else if field_might_contain_buried_loc_id(&ty) {
// Report the type as 'unknown' because it's not a type that's
// related to LocId.
interesting_fields.add(idx, field.ident.as_ref(), SimpleType::Unknown);
}
}
match interesting_fields {
InterestingFields::None => {
let kind = FieldKind::None;
let ty = SimpleType::Unknown;
Ok(Some(Field { kind, ty }))
}
InterestingFields::One(idx, ident, ty) => {
// There's only a single field that could possibly contain a buried
// LocId.
let kind = ident.map_or_else(
|| FieldKind::Numbered(idx),
|id| FieldKind::Named(Cow::Borrowed(id)),
);
Ok(Some(Field { kind, ty }))
}
InterestingFields::Many => Ok(None),
}
}
fn build_has_loc_enum(input: &DeriveInput, data: &DataEnum) -> Result<TokenStream> {
// enum Foo {
// Bar(.., LocId),
// Baz { .., loc: LocId },
// }
let default_select_field = handle_has_loc_attr(&input.attrs)?;
let enum_name = &input.ident;
let mut variants: Vec<TokenStream> = Vec::new();
for variant in data.variants.iter() {
let select_field = get_select_field(variant, &default_select_field)?;
if let Some(select_field) = select_field {
push_handler(&mut variants, enum_name, variant, select_field);
} else {
return Err(Error::new(
variant.span(),
format!("LocId field not found in variant {}", variant.ident,),
));
}
}
let (impl_generics, ty_generics, where_clause) = input.generics.split_for_impl();
let output = quote!(impl #impl_generics HasLoc for #enum_name #ty_generics #where_clause {
fn loc_id(&self) -> LocId {
match self {
#(#variants),*
}
}
});
Ok(output)
}
#[derive(Clone)]
struct Field<'a> {
kind: FieldKind<'a>,
ty: SimpleType<'a>,
}
#[derive(Clone)]
enum FieldKind<'a> {
Named(Cow<'a, Ident>),
None,
Numbered(usize),
}
fn push_handler(
variants: &mut Vec<TokenStream>,
enum_name: &Ident,
variant: &Variant,
field: Field<'_>,
) {
let variant_name = &variant.ident;
let reference = match (&field.kind, &field.ty) {
(FieldKind::None, _) => quote!(LocId::NONE),
(_, SimpleType::Unknown) => quote!(f.loc_id()),
(_, SimpleType::Unit(_)) => quote!(*f),
(_, SimpleType::Array(_))
| (_, SimpleType::BoxedSlice(_))
| (_, SimpleType::RefSlice(_))
| (_, SimpleType::Slice(_)) => {
todo!("Unhandled type: {:?}", field.ty)
}
};
let params = match field.kind {
FieldKind::Named(id) => {
quote!( { #id: f, .. })
}
FieldKind::None => match &variant.fields {
syn::Fields::Named(_) => quote!({ .. }),
syn::Fields::Unnamed(_) => quote!((..)),
syn::Fields::Unit => TokenStream::default(),
},
FieldKind::Numbered(idx) => {
let mut fields = Vec::new();
for (field_idx, _) in variant.fields.iter().enumerate() {
if field_idx == idx {
fields.push(quote!(f));
} else {
fields.push(quote!(_));
}
}
quote!((#(#fields),*))
}
};
variants.push(quote!(#enum_name::#variant_name #params => #reference));
}
fn handle_has_loc_attr(attrs: &[Attribute]) -> Result<Option<Field<'_>>> {
for attr in attrs {
if attr.path.is_ident("has_loc") {
let meta = attr.parse_meta()?;
match meta {
Meta::Path(path) => {
return Err(Error::new(path.span(), "Arguments expected"));
}
Meta::List(list) => {
// has_loc(A, B, C)
if list.nested.len() != 1 {
return Err(Error::new(list.span(), "Only one argument expected"));
}
match &list.nested | build_has_loc | identifier_name |
has_loc.rs | syn::Meta;
use syn::NestedMeta;
use syn::Result;
use syn::Variant;
use crate::simple_type::SimpleType;
use crate::util::InterestingFields;
/// Builds a HasLoc impl.
///
/// The build rules are as follows:
/// - For a struct it just looks for a field with a type of LocId.
/// - For an enum it does a match on each variant.
/// - For either tuple variants or struct variants it looks for a field with a
/// type of LocId.
/// - For a tuple variant with a single non-LocId type and calls `.loc_id()`
/// on that field.
/// - Otherwise you can specify `#[has_loc(n)]` where `n` is the index of the
/// field to call `.loc_id()` on. `#[has_loc(n)]` can also be used on the
/// whole enum to provide a default index.
///
pub(crate) fn build_has_loc(input: TokenStream) -> Result<TokenStream> {
let input = syn::parse2::<DeriveInput>(input)?;
match &input.data {
Data::Enum(data) => build_has_loc_enum(&input, data),
Data::Struct(data) => build_has_loc_struct(&input, data),
Data::Union(_) => Err(Error::new(input.span(), "Union not handled")),
}
}
fn field_might_contain_buried_loc_id(ty: &SimpleType<'_>) -> bool {
if let Some(ident) = ty.get_ident() {
!(ident == "BlockId"
|| ident == "ClassId"
|| ident == "ConstId"
|| ident == "ValueId"
|| ident == "LocalId"
|| ident == "MethodId"
|| ident == "ParamId"
|| ident == "VarId"
|| ident == "usize"
|| ident == "u32")
} else {
true
}
}
fn build_has_loc_struct(input: &DeriveInput, data: &DataStruct) -> Result<TokenStream> {
// struct Foo {
// ...
// loc: LocId,
// }
let struct_name = &input.ident;
let default_select_field = handle_has_loc_attr(&input.attrs)?;
let loc_field = if let Some(f) = default_select_field {
match f.kind {
FieldKind::Named(name) => {
let name = name.to_string();
let field = data
.fields
.iter()
.find(|field| field.ident.as_ref().map_or(false, |id| id == &name))
.ok_or_else(|| Error::new(input.span(), format!("Field '{name}' not found")))?
.ident
.as_ref()
.unwrap();
quote!(#field.loc_id())
}
FieldKind::None => todo!(),
FieldKind::Numbered(_) => todo!(),
}
} else {
let field = data
.fields
.iter()
.enumerate()
.map(|(i, field)| (i, field, SimpleType::from_type(&field.ty)))
.find(|(_, _, ty)| ty.is_based_on("LocId"));
let (idx, field, _) =
field.ok_or_else(|| Error::new(input.span(), "No field with type LocId found"))?;
if let Some(ident) = field.ident.as_ref() {
ident.to_token_stream()
} else {
syn::Index::from(idx).to_token_stream()
}
};
let (impl_generics, ty_generics, where_clause) = input.generics.split_for_impl();
let output = quote!(impl #impl_generics HasLoc for #struct_name #ty_generics #where_clause {
fn loc_id(&self) -> LocId {
self.#loc_field
}
});
Ok(output)
}
fn get_select_field<'a>(
variant: &'a Variant,
default_select_field: &Option<Field<'a>>,
) -> Result<Option<Field<'a>>> {
if let Some(f) = handle_has_loc_attr(&variant.attrs)? {
return Ok(Some(f));
}
if let Some(f) = default_select_field.as_ref() {
return Ok(Some(f.clone()));
}
let mut interesting_fields = InterestingFields::None;
for (idx, field) in variant.fields.iter().enumerate() {
let ty = SimpleType::from_type(&field.ty);
if ty.is_based_on("LocId") {
let kind = if let Some(ident) = field.ident.as_ref() {
// Bar { .., loc: LocId }
FieldKind::Named(Cow::Borrowed(ident))
} else {
// Bar(.., LocId)
FieldKind::Numbered(idx)
};
return Ok(Some(Field { kind, ty }));
} else if field_might_contain_buried_loc_id(&ty) {
// Report the type as 'unknown' because it's not a type that's
// related to LocId.
interesting_fields.add(idx, field.ident.as_ref(), SimpleType::Unknown);
}
}
match interesting_fields {
InterestingFields::None => {
let kind = FieldKind::None;
let ty = SimpleType::Unknown;
Ok(Some(Field { kind, ty }))
}
InterestingFields::One(idx, ident, ty) => {
// There's only a single field that could possibly contain a buried
// LocId.
let kind = ident.map_or_else(
|| FieldKind::Numbered(idx),
|id| FieldKind::Named(Cow::Borrowed(id)),
);
Ok(Some(Field { kind, ty }))
}
InterestingFields::Many => Ok(None),
}
}
fn build_has_loc_enum(input: &DeriveInput, data: &DataEnum) -> Result<TokenStream> {
// enum Foo {
// Bar(.., LocId),
// Baz { .., loc: LocId },
// }
let default_select_field = handle_has_loc_attr(&input.attrs)?;
let enum_name = &input.ident;
let mut variants: Vec<TokenStream> = Vec::new();
for variant in data.variants.iter() {
let select_field = get_select_field(variant, &default_select_field)?;
if let Some(select_field) = select_field {
push_handler(&mut variants, enum_name, variant, select_field);
} else {
return Err(Error::new(
variant.span(),
format!("LocId field not found in variant {}", variant.ident,),
));
}
}
let (impl_generics, ty_generics, where_clause) = input.generics.split_for_impl();
let output = quote!(impl #impl_generics HasLoc for #enum_name #ty_generics #where_clause {
fn loc_id(&self) -> LocId {
match self {
#(#variants),*
}
}
});
Ok(output)
}
#[derive(Clone)]
struct Field<'a> {
kind: FieldKind<'a>,
ty: SimpleType<'a>,
}
#[derive(Clone)]
enum FieldKind<'a> {
Named(Cow<'a, Ident>),
None,
Numbered(usize),
}
fn push_handler(
variants: &mut Vec<TokenStream>,
enum_name: &Ident,
variant: &Variant,
field: Field<'_>,
) {
let variant_name = &variant.ident;
let reference = match (&field.kind, &field.ty) {
(FieldKind::None, _) => quote!(LocId::NONE),
(_, SimpleType::Unknown) => quote!(f.loc_id()),
(_, SimpleType::Unit(_)) => quote!(*f),
(_, SimpleType::Array(_))
| (_, SimpleType::BoxedSlice(_))
| (_, SimpleType::RefSlice(_))
| (_, SimpleType::Slice(_)) => {
todo!("Unhandled type: {:?}", field.ty)
}
};
let params = match field.kind {
FieldKind::Named(id) => {
quote!( { #id: f, .. })
}
FieldKind::None => match &variant.fields {
syn::Fields::Named(_) => quote!({ .. }),
syn::Fields::Unnamed(_) => quote!((..)),
syn::Fields::Unit => TokenStream::default(),
},
FieldKind::Numbered(idx) => {
let mut fields = Vec::new();
for (field_idx, _) in variant.fields.iter().enumerate() {
if field_idx == idx {
fields.push(quote!(f));
} else {
fields.push(quote!(_));
}
}
quote!((#(#fields),*))
}
};
variants.push(quote!(#enum_name::#variant_name #params => #reference));
}
fn handle_has_loc_attr(attrs: &[Attribute]) -> Result<Option<Field<'_>>> {
for attr in attrs {
if attr.path.is_ident("has_loc") {
let meta = attr.parse_meta()?;
match meta {
Meta::Path(path) => {
return Err(Error::new(path.span(), "Arguments expected"));
}
Meta::List(list) => {
// has_loc(A, B, C)
if list.nested.len() != 1 {
return Err(Error::new(list.span(), "Only one argument expected"));
}
match &list.nested[0] {
NestedMeta::Lit(Lit::Int(i)) => {
return Ok(Some(Field {
kind: FieldKind::Numbered(i.base10_parse()?),
ty: SimpleType::Unknown, | random_line_split |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.