file_name
large_stringlengths
4
140
prefix
large_stringlengths
0
39k
suffix
large_stringlengths
0
36.1k
middle
large_stringlengths
0
29.4k
fim_type
large_stringclasses
4 values
ItemView.js
/** * @file View that builds the map by showing the items' images at their specified locations. */ "use strict"; /** * @class ItemView */ function ItemView() { // Private variables // These are jQuery objects corresponding to elements let $mapImage; let $carpet; let $backArrow; let $forwardArrow; let $avatar; let $arrowsAndItemOrderNumbers; let $itemOrderNumbers; let previousAngle = 0; let viewModel; let itemsDetails; let itemToShowBecauseItIsInTheURL; let performAnimations; // Private functions const cacheJQueryObjects = () => { $mapImage = $("#MapImage"); $carpet = $('#Carpet'); $arrowsAndItemOrderNumbers = $('#ArrowsAndItemOrderNumbers'); $backArrow = $('#ArrowBack'); $forwardArrow = $('#ArrowForward'); $itemOrderNumbers = $('#ItemOrderNumbers'); $avatar = $('#Avatar'); }; /** * FIXME * Now I don't fetch item details from the backend because the index.html file comes with * them ready to use. I collect the details from the elements. * * See server-rendering/writer-home-page-generator.js to know how the details are incorporated * in the page. */ const collectItemDetailsFromMap = () => { itemsDetails = []; const itemElements = document.querySelectorAll("[data-nid]"); itemElements.forEach((element) => { itemsDetails.push({ "nid": element.dataset.nid, "field_order_number": element.dataset.order, "title": element.dataset.title, "field_coordinate_x": element.dataset.xCoord, "field_coordinate_y": element.dataset.yCoord, "field_item_type": element.dataset.type, "path": element.dataset.path }); }); // the viewModel needs to know about the items details as well viewModel.setItemsDetails(itemsDetails); } const moveToStartingPointOfSpiral = () => { // We are going to move the carpet to the starting point of the spiral // We set the animation running. The viewModel will take care of closing // the item content panel, if any. It will also close any contact me form. viewModel.setAnimationToNextItemRunning(true); const viewport = viewModel.getViewPort(); // Rotating the carpet to the horizontal position it's supposed to have // at the starting point of the spiral $carpet.velocity({ transform: ["rotateZ(" + 0 + "deg)", "rotateZ(" + previousAngle + "deg)"] }, { duration: 1000, easing: "linear", loop: false}); previousAngle = 0; const mapImagePosition = $mapImage.position(); const currentTop = Math.round(mapImagePosition.top); const currentLeft = Math.round(mapImagePosition.left); let animationDuration = 1500; // If the carpet is already very near the place it's going to, // I want to get there very quickly so that the user can // click on the arrows with no delay // If I have the animation last 1500ms, the user may click on an arrow and // nothing happens if (Math.abs(currentTop - (viewport.height / 2 - 3500)) < 200 && Math.abs(currentLeft - (viewport.width / 2 - 3500)) < 200) { animationDuration = 100; } // Now animating the carpet to go to the starting point of the spiral $mapImage.animate({ top: viewport.height / 2 - 3500 , left: viewport.width / 2 - 3500 }, animationDuration, null, () => { // console.log('animation to spiral starting point completed'); // Animation completed viewModel.setAnimationToNextItemRunning(false); } ); }; const clickOnArrowHandler = (event) => { // console.log(event); // console.log(viewModel.getAnimationToNextItemRunning()); // Only if we are not already flying to the next item, do the following if (!viewModel.getAnimationToNextItemRunning()) { let itemToVisitNext; // Determining the item to visit next if (!event && itemToShowBecauseItIsInTheURL) { // This is in the case I have to move directly to an item because it's in the URL itemToVisitNext = itemToShowBecauseItIsInTheURL; itemToShowBecauseItIsInTheURL = undefined; performAnimations = false; // console.log("clickOnArrowHandler, itemToShowBecauseItIsInTheURL ", itemToShowBecauseItIsInTheURL); // console.log("performAnimations ", performAnimations); } else { // the parameter tells if we are going forward or back itemToVisitNext = viewModel.getItemToVisitNext(event.target.id === "ArrowForward"); } if (itemToVisitNext)
} }; /** * To update the order number of the item currently visited as shown between the arrows. * The total number of items is shown as well. * * @param item */ const updateItemOrderNumbers = (item) => { if (item) $itemOrderNumbers.html("<span>" + item.field_order_number + "/" + viewModel.getNumberOfItems() + "</span>"); else $itemOrderNumbers.html("<span>Click right arrow</span>"); }; /** * This is about registering handlers for standard events like click * @memberOf ItemView */ const setupStandardEventHandlers = () => { //console.log("binding events"); $backArrow.bind('click', clickOnArrowHandler); $forwardArrow.bind('click', clickOnArrowHandler); }; /** * registerEventHandlers is the standard name for the function that attaches event handlers * I'm talking about my custom jquery events * No standard events like click * @memberOf ItemView */ const registerEventHandlers = () => { // Hide the arrows only on small screens. On large screens keep them. const hideNavigationArrows = () => { if (viewModel.itIsASmallScreen()) $arrowsAndItemOrderNumbers.hide(); }; const showNavigationArrows = () => { if (!$arrowsAndItemOrderNumbers.is(":visible") && $mapImage.is(":visible")) $arrowsAndItemOrderNumbers.show(); }; // We have to hide the arrows when the item content dialog is showing viewModel.attachEventHandler('ViewModel.itemcontent.beingshown', hideNavigationArrows); // We restore the arrows when the item content dialog is hidden viewModel.attachEventHandler('ViewModel.itemcontent.beinghidden', showNavigationArrows); viewModel.attachEventHandler('ViewModel.contactme.beingshown', hideNavigationArrows); viewModel.attachEventHandler('ViewModel.contactme.beinghidden', showNavigationArrows); // Going to the home page. Have to hide the map, reset some variables, center the (hidden) map and more viewModel.attachEventHandler('ViewModel.home.goto', () => { // Going to the home, hiding everything and resetting some variables // Moving the map back to the center $mapImage.css({top: "calc(-3500px + 50vh)", left: "calc(-3500px + 50vw)"}); // Rotating back the carpet to horizontal direction $carpet.velocity({ transform: ["rotateZ(" + 0 + "deg)", "rotateZ(" + previousAngle + "deg)"] }, { duration: 1000, easing: "linear", loop: false}); $mapImage.hide(); $carpet.hide(); $arrowsAndItemOrderNumbers.hide(); $avatar.hide(); itemToShowBecauseItIsInTheURL = undefined; previousAngle = 0; updateItemOrderNumbers(null); }); // @see ViewModel::requestItemsDetailsFromModel viewModel.attachEventHandler('ViewModel.map.show', () => { if (!itemsDetails) { // Exception! There is a bug here! Sentry.captureMessage("itemsDetails not defined! ViewModel.map.show --- ItemView"); } $mapImage.show(); $carpet.show(); $arrowsAndItemOrderNumbers.css('display', 'flex'); $avatar.show(); if (document.location.pathname === "/web-writer-tech-and-humanity") { moveToStartingPointOfSpiral(); } else { // If we are showing a specific item, we need to move the carpet to it itemToShowBecauseItIsInTheURL = viewModel.getItemToShowBecauseItIsInTheURL(); if (itemToShowBecauseItIsInTheURL) { // When showing an item because the user landed directly on the item's url, we simulate a click on an arrow // that will move the carpet to the item clickOnArrowHandler(); } } }); }; return { init: (viewModelToUse) => { viewModel = viewModelToUse; performAnimations = true; cacheJQueryObjects(); setupStandardEventHandlers(); registerEventHandlers(); collectItemDetailsFromMap(); } } }
{ const viewport = viewModel.getViewPort(); // When performing the animation the View Model needs to know so that it // can tell other views viewModel.setAnimationToNextItemRunning(true); // left and top attributes to give to the map to get to the item const positionItemToVisitNext = { left: viewport.width / 2 - itemToVisitNext.field_coordinate_x, top: viewport.height / 2 - itemToVisitNext.field_coordinate_y }; const mapImagePosition = $mapImage.position(); const currentTop = Math.round(mapImagePosition.top); const currentLeft = Math.round(mapImagePosition.left); // Differences in x and y we need to travel to get to the item from the current position const delta_x = (currentLeft - positionItemToVisitNext.left); const delta_y = (currentTop - positionItemToVisitNext.top); // The angle of the direction we take to get to the item. Used to rotate the carpet accordingly const angle = Math.atan2(delta_y, delta_x) * (180 / Math.PI); if (performAnimations) { // Rotating the carpet $carpet.velocity({ transform: ["rotateZ(" + angle + "deg)", "rotateZ(" + previousAngle + "deg)"] }, { duration: 1000, easing: "linear", loop: false}); } else { // Rotate the carpet with no animation $carpet.css("transform", "rotateZ(" + angle + "deg)"); } previousAngle = angle; const maxDelta = Math.max(Math.abs(delta_x), Math.abs(delta_y)); // This is to make the carpet stop before covering the image // We don't want the carpet to be over the item's image const approachingFactor = maxDelta / 100; const showingItemAtTheEndOfTheAnimation = () => { viewModel.setAnimationToNextItemRunning(false); updateItemOrderNumbers(itemToVisitNext); viewModel.showItem(); } if (performAnimations) { $mapImage.animate({ top: positionItemToVisitNext.top + (delta_y / approachingFactor), left: positionItemToVisitNext.left + (delta_x / approachingFactor)}, 1500, null, () => { showingItemAtTheEndOfTheAnimation(); } ); } else { $mapImage.css("top", positionItemToVisitNext.top + (delta_y / approachingFactor)); $mapImage.css("left", positionItemToVisitNext.left + (delta_x / approachingFactor)); showingItemAtTheEndOfTheAnimation(); // Now I can finally reset performAnimations to true to restart doing animations performAnimations = true; } }
conditional_block
ItemView.js
/** * @file View that builds the map by showing the items' images at their specified locations. */ "use strict"; /** * @class ItemView */ function
() { // Private variables // These are jQuery objects corresponding to elements let $mapImage; let $carpet; let $backArrow; let $forwardArrow; let $avatar; let $arrowsAndItemOrderNumbers; let $itemOrderNumbers; let previousAngle = 0; let viewModel; let itemsDetails; let itemToShowBecauseItIsInTheURL; let performAnimations; // Private functions const cacheJQueryObjects = () => { $mapImage = $("#MapImage"); $carpet = $('#Carpet'); $arrowsAndItemOrderNumbers = $('#ArrowsAndItemOrderNumbers'); $backArrow = $('#ArrowBack'); $forwardArrow = $('#ArrowForward'); $itemOrderNumbers = $('#ItemOrderNumbers'); $avatar = $('#Avatar'); }; /** * FIXME * Now I don't fetch item details from the backend because the index.html file comes with * them ready to use. I collect the details from the elements. * * See server-rendering/writer-home-page-generator.js to know how the details are incorporated * in the page. */ const collectItemDetailsFromMap = () => { itemsDetails = []; const itemElements = document.querySelectorAll("[data-nid]"); itemElements.forEach((element) => { itemsDetails.push({ "nid": element.dataset.nid, "field_order_number": element.dataset.order, "title": element.dataset.title, "field_coordinate_x": element.dataset.xCoord, "field_coordinate_y": element.dataset.yCoord, "field_item_type": element.dataset.type, "path": element.dataset.path }); }); // the viewModel needs to know about the items details as well viewModel.setItemsDetails(itemsDetails); } const moveToStartingPointOfSpiral = () => { // We are going to move the carpet to the starting point of the spiral // We set the animation running. The viewModel will take care of closing // the item content panel, if any. It will also close any contact me form. viewModel.setAnimationToNextItemRunning(true); const viewport = viewModel.getViewPort(); // Rotating the carpet to the horizontal position it's supposed to have // at the starting point of the spiral $carpet.velocity({ transform: ["rotateZ(" + 0 + "deg)", "rotateZ(" + previousAngle + "deg)"] }, { duration: 1000, easing: "linear", loop: false}); previousAngle = 0; const mapImagePosition = $mapImage.position(); const currentTop = Math.round(mapImagePosition.top); const currentLeft = Math.round(mapImagePosition.left); let animationDuration = 1500; // If the carpet is already very near the place it's going to, // I want to get there very quickly so that the user can // click on the arrows with no delay // If I have the animation last 1500ms, the user may click on an arrow and // nothing happens if (Math.abs(currentTop - (viewport.height / 2 - 3500)) < 200 && Math.abs(currentLeft - (viewport.width / 2 - 3500)) < 200) { animationDuration = 100; } // Now animating the carpet to go to the starting point of the spiral $mapImage.animate({ top: viewport.height / 2 - 3500 , left: viewport.width / 2 - 3500 }, animationDuration, null, () => { // console.log('animation to spiral starting point completed'); // Animation completed viewModel.setAnimationToNextItemRunning(false); } ); }; const clickOnArrowHandler = (event) => { // console.log(event); // console.log(viewModel.getAnimationToNextItemRunning()); // Only if we are not already flying to the next item, do the following if (!viewModel.getAnimationToNextItemRunning()) { let itemToVisitNext; // Determining the item to visit next if (!event && itemToShowBecauseItIsInTheURL) { // This is in the case I have to move directly to an item because it's in the URL itemToVisitNext = itemToShowBecauseItIsInTheURL; itemToShowBecauseItIsInTheURL = undefined; performAnimations = false; // console.log("clickOnArrowHandler, itemToShowBecauseItIsInTheURL ", itemToShowBecauseItIsInTheURL); // console.log("performAnimations ", performAnimations); } else { // the parameter tells if we are going forward or back itemToVisitNext = viewModel.getItemToVisitNext(event.target.id === "ArrowForward"); } if (itemToVisitNext) { const viewport = viewModel.getViewPort(); // When performing the animation the View Model needs to know so that it // can tell other views viewModel.setAnimationToNextItemRunning(true); // left and top attributes to give to the map to get to the item const positionItemToVisitNext = { left: viewport.width / 2 - itemToVisitNext.field_coordinate_x, top: viewport.height / 2 - itemToVisitNext.field_coordinate_y }; const mapImagePosition = $mapImage.position(); const currentTop = Math.round(mapImagePosition.top); const currentLeft = Math.round(mapImagePosition.left); // Differences in x and y we need to travel to get to the item from the current position const delta_x = (currentLeft - positionItemToVisitNext.left); const delta_y = (currentTop - positionItemToVisitNext.top); // The angle of the direction we take to get to the item. Used to rotate the carpet accordingly const angle = Math.atan2(delta_y, delta_x) * (180 / Math.PI); if (performAnimations) { // Rotating the carpet $carpet.velocity({ transform: ["rotateZ(" + angle + "deg)", "rotateZ(" + previousAngle + "deg)"] }, { duration: 1000, easing: "linear", loop: false}); } else { // Rotate the carpet with no animation $carpet.css("transform", "rotateZ(" + angle + "deg)"); } previousAngle = angle; const maxDelta = Math.max(Math.abs(delta_x), Math.abs(delta_y)); // This is to make the carpet stop before covering the image // We don't want the carpet to be over the item's image const approachingFactor = maxDelta / 100; const showingItemAtTheEndOfTheAnimation = () => { viewModel.setAnimationToNextItemRunning(false); updateItemOrderNumbers(itemToVisitNext); viewModel.showItem(); } if (performAnimations) { $mapImage.animate({ top: positionItemToVisitNext.top + (delta_y / approachingFactor), left: positionItemToVisitNext.left + (delta_x / approachingFactor)}, 1500, null, () => { showingItemAtTheEndOfTheAnimation(); } ); } else { $mapImage.css("top", positionItemToVisitNext.top + (delta_y / approachingFactor)); $mapImage.css("left", positionItemToVisitNext.left + (delta_x / approachingFactor)); showingItemAtTheEndOfTheAnimation(); // Now I can finally reset performAnimations to true to restart doing animations performAnimations = true; } } } }; /** * To update the order number of the item currently visited as shown between the arrows. * The total number of items is shown as well. * * @param item */ const updateItemOrderNumbers = (item) => { if (item) $itemOrderNumbers.html("<span>" + item.field_order_number + "/" + viewModel.getNumberOfItems() + "</span>"); else $itemOrderNumbers.html("<span>Click right arrow</span>"); }; /** * This is about registering handlers for standard events like click * @memberOf ItemView */ const setupStandardEventHandlers = () => { //console.log("binding events"); $backArrow.bind('click', clickOnArrowHandler); $forwardArrow.bind('click', clickOnArrowHandler); }; /** * registerEventHandlers is the standard name for the function that attaches event handlers * I'm talking about my custom jquery events * No standard events like click * @memberOf ItemView */ const registerEventHandlers = () => { // Hide the arrows only on small screens. On large screens keep them. const hideNavigationArrows = () => { if (viewModel.itIsASmallScreen()) $arrowsAndItemOrderNumbers.hide(); }; const showNavigationArrows = () => { if (!$arrowsAndItemOrderNumbers.is(":visible") && $mapImage.is(":visible")) $arrowsAndItemOrderNumbers.show(); }; // We have to hide the arrows when the item content dialog is showing viewModel.attachEventHandler('ViewModel.itemcontent.beingshown', hideNavigationArrows); // We restore the arrows when the item content dialog is hidden viewModel.attachEventHandler('ViewModel.itemcontent.beinghidden', showNavigationArrows); viewModel.attachEventHandler('ViewModel.contactme.beingshown', hideNavigationArrows); viewModel.attachEventHandler('ViewModel.contactme.beinghidden', showNavigationArrows); // Going to the home page. Have to hide the map, reset some variables, center the (hidden) map and more viewModel.attachEventHandler('ViewModel.home.goto', () => { // Going to the home, hiding everything and resetting some variables // Moving the map back to the center $mapImage.css({top: "calc(-3500px + 50vh)", left: "calc(-3500px + 50vw)"}); // Rotating back the carpet to horizontal direction $carpet.velocity({ transform: ["rotateZ(" + 0 + "deg)", "rotateZ(" + previousAngle + "deg)"] }, { duration: 1000, easing: "linear", loop: false}); $mapImage.hide(); $carpet.hide(); $arrowsAndItemOrderNumbers.hide(); $avatar.hide(); itemToShowBecauseItIsInTheURL = undefined; previousAngle = 0; updateItemOrderNumbers(null); }); // @see ViewModel::requestItemsDetailsFromModel viewModel.attachEventHandler('ViewModel.map.show', () => { if (!itemsDetails) { // Exception! There is a bug here! Sentry.captureMessage("itemsDetails not defined! ViewModel.map.show --- ItemView"); } $mapImage.show(); $carpet.show(); $arrowsAndItemOrderNumbers.css('display', 'flex'); $avatar.show(); if (document.location.pathname === "/web-writer-tech-and-humanity") { moveToStartingPointOfSpiral(); } else { // If we are showing a specific item, we need to move the carpet to it itemToShowBecauseItIsInTheURL = viewModel.getItemToShowBecauseItIsInTheURL(); if (itemToShowBecauseItIsInTheURL) { // When showing an item because the user landed directly on the item's url, we simulate a click on an arrow // that will move the carpet to the item clickOnArrowHandler(); } } }); }; return { init: (viewModelToUse) => { viewModel = viewModelToUse; performAnimations = true; cacheJQueryObjects(); setupStandardEventHandlers(); registerEventHandlers(); collectItemDetailsFromMap(); } } }
ItemView
identifier_name
ItemView.js
/** * @file View that builds the map by showing the items' images at their specified locations. */ "use strict"; /** * @class ItemView */ function ItemView()
{ // Private variables // These are jQuery objects corresponding to elements let $mapImage; let $carpet; let $backArrow; let $forwardArrow; let $avatar; let $arrowsAndItemOrderNumbers; let $itemOrderNumbers; let previousAngle = 0; let viewModel; let itemsDetails; let itemToShowBecauseItIsInTheURL; let performAnimations; // Private functions const cacheJQueryObjects = () => { $mapImage = $("#MapImage"); $carpet = $('#Carpet'); $arrowsAndItemOrderNumbers = $('#ArrowsAndItemOrderNumbers'); $backArrow = $('#ArrowBack'); $forwardArrow = $('#ArrowForward'); $itemOrderNumbers = $('#ItemOrderNumbers'); $avatar = $('#Avatar'); }; /** * FIXME * Now I don't fetch item details from the backend because the index.html file comes with * them ready to use. I collect the details from the elements. * * See server-rendering/writer-home-page-generator.js to know how the details are incorporated * in the page. */ const collectItemDetailsFromMap = () => { itemsDetails = []; const itemElements = document.querySelectorAll("[data-nid]"); itemElements.forEach((element) => { itemsDetails.push({ "nid": element.dataset.nid, "field_order_number": element.dataset.order, "title": element.dataset.title, "field_coordinate_x": element.dataset.xCoord, "field_coordinate_y": element.dataset.yCoord, "field_item_type": element.dataset.type, "path": element.dataset.path }); }); // the viewModel needs to know about the items details as well viewModel.setItemsDetails(itemsDetails); } const moveToStartingPointOfSpiral = () => { // We are going to move the carpet to the starting point of the spiral // We set the animation running. The viewModel will take care of closing // the item content panel, if any. It will also close any contact me form. viewModel.setAnimationToNextItemRunning(true); const viewport = viewModel.getViewPort(); // Rotating the carpet to the horizontal position it's supposed to have // at the starting point of the spiral $carpet.velocity({ transform: ["rotateZ(" + 0 + "deg)", "rotateZ(" + previousAngle + "deg)"] }, { duration: 1000, easing: "linear", loop: false}); previousAngle = 0; const mapImagePosition = $mapImage.position(); const currentTop = Math.round(mapImagePosition.top); const currentLeft = Math.round(mapImagePosition.left); let animationDuration = 1500; // If the carpet is already very near the place it's going to, // I want to get there very quickly so that the user can // click on the arrows with no delay // If I have the animation last 1500ms, the user may click on an arrow and // nothing happens if (Math.abs(currentTop - (viewport.height / 2 - 3500)) < 200 && Math.abs(currentLeft - (viewport.width / 2 - 3500)) < 200) { animationDuration = 100; } // Now animating the carpet to go to the starting point of the spiral $mapImage.animate({ top: viewport.height / 2 - 3500 , left: viewport.width / 2 - 3500 }, animationDuration, null, () => { // console.log('animation to spiral starting point completed'); // Animation completed viewModel.setAnimationToNextItemRunning(false); } ); }; const clickOnArrowHandler = (event) => { // console.log(event); // console.log(viewModel.getAnimationToNextItemRunning()); // Only if we are not already flying to the next item, do the following if (!viewModel.getAnimationToNextItemRunning()) { let itemToVisitNext; // Determining the item to visit next if (!event && itemToShowBecauseItIsInTheURL) { // This is in the case I have to move directly to an item because it's in the URL itemToVisitNext = itemToShowBecauseItIsInTheURL; itemToShowBecauseItIsInTheURL = undefined; performAnimations = false; // console.log("clickOnArrowHandler, itemToShowBecauseItIsInTheURL ", itemToShowBecauseItIsInTheURL); // console.log("performAnimations ", performAnimations); } else { // the parameter tells if we are going forward or back itemToVisitNext = viewModel.getItemToVisitNext(event.target.id === "ArrowForward"); } if (itemToVisitNext) { const viewport = viewModel.getViewPort(); // When performing the animation the View Model needs to know so that it // can tell other views viewModel.setAnimationToNextItemRunning(true); // left and top attributes to give to the map to get to the item const positionItemToVisitNext = { left: viewport.width / 2 - itemToVisitNext.field_coordinate_x, top: viewport.height / 2 - itemToVisitNext.field_coordinate_y }; const mapImagePosition = $mapImage.position(); const currentTop = Math.round(mapImagePosition.top); const currentLeft = Math.round(mapImagePosition.left); // Differences in x and y we need to travel to get to the item from the current position const delta_x = (currentLeft - positionItemToVisitNext.left); const delta_y = (currentTop - positionItemToVisitNext.top); // The angle of the direction we take to get to the item. Used to rotate the carpet accordingly const angle = Math.atan2(delta_y, delta_x) * (180 / Math.PI); if (performAnimations) { // Rotating the carpet $carpet.velocity({ transform: ["rotateZ(" + angle + "deg)", "rotateZ(" + previousAngle + "deg)"] }, { duration: 1000, easing: "linear", loop: false}); } else { // Rotate the carpet with no animation $carpet.css("transform", "rotateZ(" + angle + "deg)"); } previousAngle = angle; const maxDelta = Math.max(Math.abs(delta_x), Math.abs(delta_y)); // This is to make the carpet stop before covering the image // We don't want the carpet to be over the item's image const approachingFactor = maxDelta / 100; const showingItemAtTheEndOfTheAnimation = () => { viewModel.setAnimationToNextItemRunning(false); updateItemOrderNumbers(itemToVisitNext); viewModel.showItem(); } if (performAnimations) { $mapImage.animate({ top: positionItemToVisitNext.top + (delta_y / approachingFactor), left: positionItemToVisitNext.left + (delta_x / approachingFactor)}, 1500, null, () => { showingItemAtTheEndOfTheAnimation(); } ); } else { $mapImage.css("top", positionItemToVisitNext.top + (delta_y / approachingFactor)); $mapImage.css("left", positionItemToVisitNext.left + (delta_x / approachingFactor)); showingItemAtTheEndOfTheAnimation(); // Now I can finally reset performAnimations to true to restart doing animations performAnimations = true; } } } }; /** * To update the order number of the item currently visited as shown between the arrows. * The total number of items is shown as well. * * @param item */ const updateItemOrderNumbers = (item) => { if (item) $itemOrderNumbers.html("<span>" + item.field_order_number + "/" + viewModel.getNumberOfItems() + "</span>"); else $itemOrderNumbers.html("<span>Click right arrow</span>"); }; /** * This is about registering handlers for standard events like click * @memberOf ItemView */ const setupStandardEventHandlers = () => { //console.log("binding events"); $backArrow.bind('click', clickOnArrowHandler); $forwardArrow.bind('click', clickOnArrowHandler); }; /** * registerEventHandlers is the standard name for the function that attaches event handlers * I'm talking about my custom jquery events * No standard events like click * @memberOf ItemView */ const registerEventHandlers = () => { // Hide the arrows only on small screens. On large screens keep them. const hideNavigationArrows = () => { if (viewModel.itIsASmallScreen()) $arrowsAndItemOrderNumbers.hide(); }; const showNavigationArrows = () => { if (!$arrowsAndItemOrderNumbers.is(":visible") && $mapImage.is(":visible")) $arrowsAndItemOrderNumbers.show(); }; // We have to hide the arrows when the item content dialog is showing viewModel.attachEventHandler('ViewModel.itemcontent.beingshown', hideNavigationArrows); // We restore the arrows when the item content dialog is hidden viewModel.attachEventHandler('ViewModel.itemcontent.beinghidden', showNavigationArrows); viewModel.attachEventHandler('ViewModel.contactme.beingshown', hideNavigationArrows); viewModel.attachEventHandler('ViewModel.contactme.beinghidden', showNavigationArrows); // Going to the home page. Have to hide the map, reset some variables, center the (hidden) map and more viewModel.attachEventHandler('ViewModel.home.goto', () => { // Going to the home, hiding everything and resetting some variables // Moving the map back to the center $mapImage.css({top: "calc(-3500px + 50vh)", left: "calc(-3500px + 50vw)"}); // Rotating back the carpet to horizontal direction $carpet.velocity({ transform: ["rotateZ(" + 0 + "deg)", "rotateZ(" + previousAngle + "deg)"] }, { duration: 1000, easing: "linear", loop: false}); $mapImage.hide(); $carpet.hide(); $arrowsAndItemOrderNumbers.hide(); $avatar.hide(); itemToShowBecauseItIsInTheURL = undefined; previousAngle = 0; updateItemOrderNumbers(null); }); // @see ViewModel::requestItemsDetailsFromModel viewModel.attachEventHandler('ViewModel.map.show', () => { if (!itemsDetails) { // Exception! There is a bug here! Sentry.captureMessage("itemsDetails not defined! ViewModel.map.show --- ItemView"); } $mapImage.show(); $carpet.show(); $arrowsAndItemOrderNumbers.css('display', 'flex'); $avatar.show(); if (document.location.pathname === "/web-writer-tech-and-humanity") { moveToStartingPointOfSpiral(); } else { // If we are showing a specific item, we need to move the carpet to it itemToShowBecauseItIsInTheURL = viewModel.getItemToShowBecauseItIsInTheURL(); if (itemToShowBecauseItIsInTheURL) { // When showing an item because the user landed directly on the item's url, we simulate a click on an arrow // that will move the carpet to the item clickOnArrowHandler(); } } }); }; return { init: (viewModelToUse) => { viewModel = viewModelToUse; performAnimations = true; cacheJQueryObjects(); setupStandardEventHandlers(); registerEventHandlers(); collectItemDetailsFromMap(); } } }
identifier_body
string_pool.rs
use crate::expat_external_h::XML_Char; use crate::lib::xmlparse::{ExpatBufRef, ExpatBufRefMut, XmlConvert}; use crate::lib::xmltok::{ENCODING, XML_Convert_Result}; use bumpalo::Bump; use bumpalo::collections::vec::Vec as BumpVec; use fallible_collections::FallibleBox; use libc::c_int; use std::cell::{Cell, RefCell}; use std::convert::TryInto; use std::mem::swap; pub const INIT_BLOCK_SIZE: usize = 1024; rental! { mod rental_pool { use super::*; #[rental(debug)] pub(crate) struct InnerStringPool { // The rental crate requires that all fields but the last one // implement `StableDeref`, which means we need to wrap it // in a `Box` bump: Box<Bump>, current_bump_vec: RefCell<RentedBumpVec<'bump>>, } } } use rental_pool::InnerStringPool; /// A StringPool has the purpose of allocating distinct strings and then /// handing them off to be referenced either temporarily or for the entire length /// of the pool. pub(crate) struct StringPool(Option<InnerStringPool>); impl StringPool { pub(crate) fn
() -> Result<Self, ()> { let bump = Bump::try_with_capacity(INIT_BLOCK_SIZE).map_err(|_| ())?; let boxed_bump = Box::try_new(bump).map_err(|_| ())?; Ok(StringPool(Some(InnerStringPool::new( boxed_bump, |bump| RefCell::new(RentedBumpVec(BumpVec::new_in(&bump))), )))) } /// # Safety /// /// The inner type is only ever None in middle of the clear() /// method. Therefore it is safe to use anywhere else. fn inner(&self) -> &InnerStringPool { self.0.as_ref().unwrap_or_else(|| unsafe { std::hint::unreachable_unchecked() }) } /// Determines whether or not the current BumpVec is empty. pub(crate) fn is_empty(&self) -> bool { self.inner().rent(|vec| vec.borrow().0.is_empty()) } /// Determines whether or not the current BumpVec is full. pub(crate) fn is_full(&self) -> bool { self.inner().rent(|vec| vec.borrow().is_full()) } /// Gets the current vec, converts it into an immutable slice, /// and resets bookkeeping so that it will create a new vec next time. pub(crate) fn finish_string(&self) -> &[XML_Char] { self.inner().ref_rent_all(|pool| { let mut vec = RentedBumpVec(BumpVec::new_in(&pool.bump)); pool.current_bump_vec.replace(vec).0.into_bump_slice() }) } /// Gets the current vec, converts it into a slice of cells (with interior mutability), /// and resets bookkeeping so that it will create a new vec next time. pub(crate) fn finish_string_cells(&self) -> &[Cell<XML_Char>] { self.inner().ref_rent_all(|pool| { let mut vec = RentedBumpVec(BumpVec::new_in(&pool.bump)); let sl = pool.current_bump_vec.replace(vec).0.into_bump_slice_mut(); Cell::from_mut(sl).as_slice_of_cells() }) } /// Resets the current bump vec to the beginning pub(crate) fn clear_current(&self) { self.inner().rent(|v| v.borrow_mut().0.clear()) } /// Obtains the length of the current BumpVec. pub(crate) fn len(&self) -> usize { self.inner().rent(|vec| vec.borrow().0.len()) } /// Call callback with an immutable buffer of the current BumpVec. This must /// be a callback to ensure that we don't (safely) borrow the slice for /// longer than it stays vaild. pub(crate) fn current_slice<F, R>(&self, mut callback: F) -> R where F: FnMut(&[XML_Char]) -> R { self.inner().rent(|v| callback(v.borrow().0.as_slice())) } /// Call callback with a mutable buffer of the current BumpVec. This must /// be a callback to ensure that we don't (safely) borrow the slice for /// longer than it stays vaild. pub(crate) fn current_mut_slice<F, R>(&self, mut callback: F) -> R where F: FnMut(&mut [XML_Char]) -> R { self.inner().rent(|v| callback(v.borrow_mut().0.as_mut_slice())) } /// Unsafe temporary version of `current_slice()`. This needs to be removed /// when callers are made safe. pub(crate) unsafe fn current_start(&self) -> *const XML_Char { self.inner().rent(|v| v.borrow().0.as_ptr()) } /// Appends a char to the current BumpVec. pub(crate) fn append_char(&self, c: XML_Char) -> bool { self.inner().rent(|vec| vec.borrow_mut().append_char(c)) } /// Overwrites the last char in the current BumpVec. /// Note that this will panic if empty. This is not an insert /// operation as it does not shift bytes afterwards. pub(crate) fn replace_last_char(&self, c: XML_Char) { self.inner().rent(|buf| { *buf.borrow_mut() .0 .last_mut() .expect("Called replace_last_char() when string was empty") = c; }) } /// Decrements the length, panicing if len is 0 pub(crate) fn backtrack(&self) { self.inner().rent(|vec| vec.borrow_mut().0.pop().expect("Called backtrack() on empty BumpVec")); } /// Gets the last character, panicing if len is 0 pub(crate) fn get_last_char(&self) -> XML_Char { self.inner().rent(|buf| *buf.borrow().0.last().expect("Called get_last_char() when string was empty")) } /// Appends an entire C String to the current BumpVec. pub(crate) unsafe fn append_c_string(&self, mut s: *const XML_Char) -> bool { self.inner().rent(|vec| { let mut vec = vec.borrow_mut(); while *s != 0 { if !vec.append_char(*s) { return false; } s = s.offset(1) } true }) } /// Resets the current Bump and deallocates its contents. /// The `inner` method must never be called here as it assumes /// self.0 is never `None` pub(crate) fn clear(&mut self) { let mut inner_pool = self.0.take(); let mut bump = inner_pool.unwrap().into_head(); bump.reset(); inner_pool = Some(InnerStringPool::new( bump, |bump| RefCell::new(RentedBumpVec(BumpVec::new_in(&bump))), )); swap(&mut self.0, &mut inner_pool); } pub(crate) fn store_c_string( &self, enc: &ENCODING, buf: ExpatBufRef, ) -> bool { self.inner().rent(|vec| { let mut vec = vec.borrow_mut(); if !vec.append(enc, buf) { return false; } if !vec.append_char('\0' as XML_Char) { return false; } true }) } pub(crate) fn append( &self, enc: &ENCODING, read_buf: ExpatBufRef, ) -> bool { self.inner().rent(|vec| vec.borrow_mut().append(enc, read_buf)) } pub(crate) unsafe fn copy_c_string( &self, mut s: *const XML_Char, ) -> Option<&[XML_Char]> { // self.append_c_string(s);? let successful = self.inner().rent(|vec| { let mut vec = vec.borrow_mut(); loop { if !vec.append_char(*s) { return false; } if *s == 0 { break; } s = s.offset(1); } true }); if !successful { return None; } Some(self.finish_string()) } pub(crate) unsafe fn copy_c_string_n( &self, mut s: *const XML_Char, mut n: c_int, ) -> Option<&[XML_Char]> { let successful = self.inner().rent(|vec| { let mut vec = vec.borrow_mut(); let mut n = n.try_into().unwrap(); if vec.0.try_reserve_exact(n).is_err() { return false; }; while n > 0 { if !vec.append_char(*s) { return false; } n -= 1; s = s.offset(1) } true }); if !successful { return None; } Some(self.finish_string()) } } #[derive(Debug)] pub(crate) struct RentedBumpVec<'bump>(BumpVec<'bump, XML_Char>); impl<'bump> RentedBumpVec<'bump> { fn is_full(&self) -> bool { self.0.len() == self.0.capacity() } fn append<'a>( &mut self, enc: &ENCODING, mut read_buf: ExpatBufRef<'a>, ) -> bool { loop { // REXPAT: always reserve at least 4 bytes, // so at least one character gets converted every iteration if self.0.try_reserve(read_buf.len().max(4)).is_err() { return false; } let start_len = self.0.len(); let cap = self.0.capacity(); self.0.resize(cap, 0); let mut write_buf = ExpatBufRefMut::from(&mut self.0[start_len..]); let write_buf_len = write_buf.len(); let convert_res = XmlConvert!(enc, &mut read_buf, &mut write_buf); // The write buf shrinks by how much was written to it let written_size = write_buf_len - write_buf.len(); self.0.truncate(start_len + written_size); if convert_res == XML_Convert_Result::COMPLETED || convert_res == XML_Convert_Result::INPUT_INCOMPLETE { return true; } } } fn append_char(&mut self, c: XML_Char) -> bool { if self.0.try_reserve(1).is_err() { false } else { self.0.push(c); true } } } #[cfg(test)] mod consts { use super::XML_Char; pub const A: XML_Char = 'a' as XML_Char; pub const B: XML_Char = 'b' as XML_Char; pub const C: XML_Char = 'c' as XML_Char; pub const D: XML_Char = 'd' as XML_Char; pub const NULL: XML_Char = '\0' as XML_Char; pub static S: [XML_Char; 5] = [C, D, D, C, NULL]; } #[test] fn test_append_char() { use consts::*; let mut pool = StringPool::try_new().unwrap(); assert!(pool.append_char(A)); pool.current_slice(|s| assert_eq!(s, [A])); assert!(pool.append_char(B)); pool.current_slice(|s| assert_eq!(s, [A, B])); // New BumpVec pool.finish_string(); assert!(pool.append_char(C)); pool.current_slice(|s| assert_eq!(s, [C])); } #[test] fn test_append_string() { use consts::*; let mut pool = StringPool::try_new().unwrap(); let mut string = [A, B, C, NULL]; unsafe { assert!(pool.append_c_string(string.as_mut_ptr())); } pool.current_slice(|s| assert_eq!(s, [A, B, C])); } #[test] fn test_copy_string() { use consts::*; let mut pool = StringPool::try_new().unwrap(); assert!(pool.append_char(A)); pool.current_slice(|s| assert_eq!(s, [A])); let new_string = unsafe { pool.copy_c_string(S.as_ptr()) }; assert_eq!(new_string.unwrap(), [A, C, D, D, C, NULL]); assert!(pool.append_char(B)); pool.current_slice(|s| assert_eq!(s, [B])); let new_string2 = unsafe { pool.copy_c_string_n(S.as_ptr(), 4) }; assert_eq!(new_string2.unwrap(), [B, C, D, D, C]); } #[test] fn test_store_c_string() { use consts::*; use crate::lib::xmlparse::XmlGetInternalEncoding; let mut pool = StringPool::try_new().unwrap(); let enc = XmlGetInternalEncoding(); let read_buf = unsafe { ExpatBufRef::new(S.as_ptr(), S.as_ptr().add(3)) }; assert!(pool.store_c_string(enc, read_buf)); let string = pool.finish_string(); assert_eq!(&*string, &[C, D, D, NULL]); assert!(pool.append_char(A)); pool.current_slice(|s| assert_eq!(s, [A])); // No overlap between buffers: assert_eq!(&*string, &[C, D, D, NULL]); assert!(pool.append_char(A)); pool.current_slice(|s| assert_eq!(s, [A, A])); // Force reallocation: pool.inner().rent(|v| v.borrow_mut().0.resize(2, 0)); assert!(pool.store_c_string(enc, read_buf)); let s = pool.finish_string(); assert_eq!(s, [A, A, C, D, D, NULL]); }
try_new
identifier_name
string_pool.rs
use crate::expat_external_h::XML_Char; use crate::lib::xmlparse::{ExpatBufRef, ExpatBufRefMut, XmlConvert}; use crate::lib::xmltok::{ENCODING, XML_Convert_Result}; use bumpalo::Bump; use bumpalo::collections::vec::Vec as BumpVec; use fallible_collections::FallibleBox; use libc::c_int; use std::cell::{Cell, RefCell}; use std::convert::TryInto; use std::mem::swap; pub const INIT_BLOCK_SIZE: usize = 1024; rental! { mod rental_pool { use super::*; #[rental(debug)] pub(crate) struct InnerStringPool { // The rental crate requires that all fields but the last one // implement `StableDeref`, which means we need to wrap it // in a `Box` bump: Box<Bump>, current_bump_vec: RefCell<RentedBumpVec<'bump>>, } } } use rental_pool::InnerStringPool; /// A StringPool has the purpose of allocating distinct strings and then /// handing them off to be referenced either temporarily or for the entire length /// of the pool. pub(crate) struct StringPool(Option<InnerStringPool>); impl StringPool { pub(crate) fn try_new() -> Result<Self, ()> { let bump = Bump::try_with_capacity(INIT_BLOCK_SIZE).map_err(|_| ())?; let boxed_bump = Box::try_new(bump).map_err(|_| ())?; Ok(StringPool(Some(InnerStringPool::new( boxed_bump, |bump| RefCell::new(RentedBumpVec(BumpVec::new_in(&bump))), )))) } /// # Safety /// /// The inner type is only ever None in middle of the clear() /// method. Therefore it is safe to use anywhere else. fn inner(&self) -> &InnerStringPool { self.0.as_ref().unwrap_or_else(|| unsafe { std::hint::unreachable_unchecked() }) } /// Determines whether or not the current BumpVec is empty. pub(crate) fn is_empty(&self) -> bool { self.inner().rent(|vec| vec.borrow().0.is_empty()) } /// Determines whether or not the current BumpVec is full. pub(crate) fn is_full(&self) -> bool { self.inner().rent(|vec| vec.borrow().is_full()) } /// Gets the current vec, converts it into an immutable slice, /// and resets bookkeeping so that it will create a new vec next time. pub(crate) fn finish_string(&self) -> &[XML_Char] { self.inner().ref_rent_all(|pool| { let mut vec = RentedBumpVec(BumpVec::new_in(&pool.bump)); pool.current_bump_vec.replace(vec).0.into_bump_slice() }) } /// Gets the current vec, converts it into a slice of cells (with interior mutability), /// and resets bookkeeping so that it will create a new vec next time. pub(crate) fn finish_string_cells(&self) -> &[Cell<XML_Char>] { self.inner().ref_rent_all(|pool| { let mut vec = RentedBumpVec(BumpVec::new_in(&pool.bump)); let sl = pool.current_bump_vec.replace(vec).0.into_bump_slice_mut(); Cell::from_mut(sl).as_slice_of_cells() }) } /// Resets the current bump vec to the beginning pub(crate) fn clear_current(&self) { self.inner().rent(|v| v.borrow_mut().0.clear()) } /// Obtains the length of the current BumpVec. pub(crate) fn len(&self) -> usize { self.inner().rent(|vec| vec.borrow().0.len()) } /// Call callback with an immutable buffer of the current BumpVec. This must /// be a callback to ensure that we don't (safely) borrow the slice for /// longer than it stays vaild. pub(crate) fn current_slice<F, R>(&self, mut callback: F) -> R where F: FnMut(&[XML_Char]) -> R { self.inner().rent(|v| callback(v.borrow().0.as_slice())) } /// Call callback with a mutable buffer of the current BumpVec. This must /// be a callback to ensure that we don't (safely) borrow the slice for /// longer than it stays vaild. pub(crate) fn current_mut_slice<F, R>(&self, mut callback: F) -> R where F: FnMut(&mut [XML_Char]) -> R { self.inner().rent(|v| callback(v.borrow_mut().0.as_mut_slice())) } /// Unsafe temporary version of `current_slice()`. This needs to be removed /// when callers are made safe. pub(crate) unsafe fn current_start(&self) -> *const XML_Char { self.inner().rent(|v| v.borrow().0.as_ptr()) } /// Appends a char to the current BumpVec. pub(crate) fn append_char(&self, c: XML_Char) -> bool { self.inner().rent(|vec| vec.borrow_mut().append_char(c)) } /// Overwrites the last char in the current BumpVec. /// Note that this will panic if empty. This is not an insert /// operation as it does not shift bytes afterwards. pub(crate) fn replace_last_char(&self, c: XML_Char) { self.inner().rent(|buf| { *buf.borrow_mut() .0 .last_mut() .expect("Called replace_last_char() when string was empty") = c; }) } /// Decrements the length, panicing if len is 0 pub(crate) fn backtrack(&self) { self.inner().rent(|vec| vec.borrow_mut().0.pop().expect("Called backtrack() on empty BumpVec")); } /// Gets the last character, panicing if len is 0 pub(crate) fn get_last_char(&self) -> XML_Char { self.inner().rent(|buf| *buf.borrow().0.last().expect("Called get_last_char() when string was empty")) } /// Appends an entire C String to the current BumpVec. pub(crate) unsafe fn append_c_string(&self, mut s: *const XML_Char) -> bool
/// Resets the current Bump and deallocates its contents. /// The `inner` method must never be called here as it assumes /// self.0 is never `None` pub(crate) fn clear(&mut self) { let mut inner_pool = self.0.take(); let mut bump = inner_pool.unwrap().into_head(); bump.reset(); inner_pool = Some(InnerStringPool::new( bump, |bump| RefCell::new(RentedBumpVec(BumpVec::new_in(&bump))), )); swap(&mut self.0, &mut inner_pool); } pub(crate) fn store_c_string( &self, enc: &ENCODING, buf: ExpatBufRef, ) -> bool { self.inner().rent(|vec| { let mut vec = vec.borrow_mut(); if !vec.append(enc, buf) { return false; } if !vec.append_char('\0' as XML_Char) { return false; } true }) } pub(crate) fn append( &self, enc: &ENCODING, read_buf: ExpatBufRef, ) -> bool { self.inner().rent(|vec| vec.borrow_mut().append(enc, read_buf)) } pub(crate) unsafe fn copy_c_string( &self, mut s: *const XML_Char, ) -> Option<&[XML_Char]> { // self.append_c_string(s);? let successful = self.inner().rent(|vec| { let mut vec = vec.borrow_mut(); loop { if !vec.append_char(*s) { return false; } if *s == 0 { break; } s = s.offset(1); } true }); if !successful { return None; } Some(self.finish_string()) } pub(crate) unsafe fn copy_c_string_n( &self, mut s: *const XML_Char, mut n: c_int, ) -> Option<&[XML_Char]> { let successful = self.inner().rent(|vec| { let mut vec = vec.borrow_mut(); let mut n = n.try_into().unwrap(); if vec.0.try_reserve_exact(n).is_err() { return false; }; while n > 0 { if !vec.append_char(*s) { return false; } n -= 1; s = s.offset(1) } true }); if !successful { return None; } Some(self.finish_string()) } } #[derive(Debug)] pub(crate) struct RentedBumpVec<'bump>(BumpVec<'bump, XML_Char>); impl<'bump> RentedBumpVec<'bump> { fn is_full(&self) -> bool { self.0.len() == self.0.capacity() } fn append<'a>( &mut self, enc: &ENCODING, mut read_buf: ExpatBufRef<'a>, ) -> bool { loop { // REXPAT: always reserve at least 4 bytes, // so at least one character gets converted every iteration if self.0.try_reserve(read_buf.len().max(4)).is_err() { return false; } let start_len = self.0.len(); let cap = self.0.capacity(); self.0.resize(cap, 0); let mut write_buf = ExpatBufRefMut::from(&mut self.0[start_len..]); let write_buf_len = write_buf.len(); let convert_res = XmlConvert!(enc, &mut read_buf, &mut write_buf); // The write buf shrinks by how much was written to it let written_size = write_buf_len - write_buf.len(); self.0.truncate(start_len + written_size); if convert_res == XML_Convert_Result::COMPLETED || convert_res == XML_Convert_Result::INPUT_INCOMPLETE { return true; } } } fn append_char(&mut self, c: XML_Char) -> bool { if self.0.try_reserve(1).is_err() { false } else { self.0.push(c); true } } } #[cfg(test)] mod consts { use super::XML_Char; pub const A: XML_Char = 'a' as XML_Char; pub const B: XML_Char = 'b' as XML_Char; pub const C: XML_Char = 'c' as XML_Char; pub const D: XML_Char = 'd' as XML_Char; pub const NULL: XML_Char = '\0' as XML_Char; pub static S: [XML_Char; 5] = [C, D, D, C, NULL]; } #[test] fn test_append_char() { use consts::*; let mut pool = StringPool::try_new().unwrap(); assert!(pool.append_char(A)); pool.current_slice(|s| assert_eq!(s, [A])); assert!(pool.append_char(B)); pool.current_slice(|s| assert_eq!(s, [A, B])); // New BumpVec pool.finish_string(); assert!(pool.append_char(C)); pool.current_slice(|s| assert_eq!(s, [C])); } #[test] fn test_append_string() { use consts::*; let mut pool = StringPool::try_new().unwrap(); let mut string = [A, B, C, NULL]; unsafe { assert!(pool.append_c_string(string.as_mut_ptr())); } pool.current_slice(|s| assert_eq!(s, [A, B, C])); } #[test] fn test_copy_string() { use consts::*; let mut pool = StringPool::try_new().unwrap(); assert!(pool.append_char(A)); pool.current_slice(|s| assert_eq!(s, [A])); let new_string = unsafe { pool.copy_c_string(S.as_ptr()) }; assert_eq!(new_string.unwrap(), [A, C, D, D, C, NULL]); assert!(pool.append_char(B)); pool.current_slice(|s| assert_eq!(s, [B])); let new_string2 = unsafe { pool.copy_c_string_n(S.as_ptr(), 4) }; assert_eq!(new_string2.unwrap(), [B, C, D, D, C]); } #[test] fn test_store_c_string() { use consts::*; use crate::lib::xmlparse::XmlGetInternalEncoding; let mut pool = StringPool::try_new().unwrap(); let enc = XmlGetInternalEncoding(); let read_buf = unsafe { ExpatBufRef::new(S.as_ptr(), S.as_ptr().add(3)) }; assert!(pool.store_c_string(enc, read_buf)); let string = pool.finish_string(); assert_eq!(&*string, &[C, D, D, NULL]); assert!(pool.append_char(A)); pool.current_slice(|s| assert_eq!(s, [A])); // No overlap between buffers: assert_eq!(&*string, &[C, D, D, NULL]); assert!(pool.append_char(A)); pool.current_slice(|s| assert_eq!(s, [A, A])); // Force reallocation: pool.inner().rent(|v| v.borrow_mut().0.resize(2, 0)); assert!(pool.store_c_string(enc, read_buf)); let s = pool.finish_string(); assert_eq!(s, [A, A, C, D, D, NULL]); }
{ self.inner().rent(|vec| { let mut vec = vec.borrow_mut(); while *s != 0 { if !vec.append_char(*s) { return false; } s = s.offset(1) } true }) }
identifier_body
string_pool.rs
use crate::expat_external_h::XML_Char; use crate::lib::xmlparse::{ExpatBufRef, ExpatBufRefMut, XmlConvert}; use crate::lib::xmltok::{ENCODING, XML_Convert_Result}; use bumpalo::Bump; use bumpalo::collections::vec::Vec as BumpVec; use fallible_collections::FallibleBox; use libc::c_int; use std::cell::{Cell, RefCell}; use std::convert::TryInto; use std::mem::swap; pub const INIT_BLOCK_SIZE: usize = 1024; rental! { mod rental_pool { use super::*; #[rental(debug)] pub(crate) struct InnerStringPool { // The rental crate requires that all fields but the last one // implement `StableDeref`, which means we need to wrap it // in a `Box` bump: Box<Bump>, current_bump_vec: RefCell<RentedBumpVec<'bump>>, } } } use rental_pool::InnerStringPool; /// A StringPool has the purpose of allocating distinct strings and then /// handing them off to be referenced either temporarily or for the entire length /// of the pool. pub(crate) struct StringPool(Option<InnerStringPool>); impl StringPool { pub(crate) fn try_new() -> Result<Self, ()> { let bump = Bump::try_with_capacity(INIT_BLOCK_SIZE).map_err(|_| ())?; let boxed_bump = Box::try_new(bump).map_err(|_| ())?; Ok(StringPool(Some(InnerStringPool::new( boxed_bump, |bump| RefCell::new(RentedBumpVec(BumpVec::new_in(&bump))), )))) } /// # Safety /// /// The inner type is only ever None in middle of the clear() /// method. Therefore it is safe to use anywhere else. fn inner(&self) -> &InnerStringPool { self.0.as_ref().unwrap_or_else(|| unsafe { std::hint::unreachable_unchecked() }) } /// Determines whether or not the current BumpVec is empty. pub(crate) fn is_empty(&self) -> bool { self.inner().rent(|vec| vec.borrow().0.is_empty()) } /// Determines whether or not the current BumpVec is full. pub(crate) fn is_full(&self) -> bool { self.inner().rent(|vec| vec.borrow().is_full()) } /// Gets the current vec, converts it into an immutable slice, /// and resets bookkeeping so that it will create a new vec next time. pub(crate) fn finish_string(&self) -> &[XML_Char] { self.inner().ref_rent_all(|pool| { let mut vec = RentedBumpVec(BumpVec::new_in(&pool.bump)); pool.current_bump_vec.replace(vec).0.into_bump_slice() }) } /// Gets the current vec, converts it into a slice of cells (with interior mutability), /// and resets bookkeeping so that it will create a new vec next time. pub(crate) fn finish_string_cells(&self) -> &[Cell<XML_Char>] { self.inner().ref_rent_all(|pool| { let mut vec = RentedBumpVec(BumpVec::new_in(&pool.bump)); let sl = pool.current_bump_vec.replace(vec).0.into_bump_slice_mut(); Cell::from_mut(sl).as_slice_of_cells() }) } /// Resets the current bump vec to the beginning pub(crate) fn clear_current(&self) { self.inner().rent(|v| v.borrow_mut().0.clear()) } /// Obtains the length of the current BumpVec. pub(crate) fn len(&self) -> usize { self.inner().rent(|vec| vec.borrow().0.len()) } /// Call callback with an immutable buffer of the current BumpVec. This must /// be a callback to ensure that we don't (safely) borrow the slice for /// longer than it stays vaild. pub(crate) fn current_slice<F, R>(&self, mut callback: F) -> R where F: FnMut(&[XML_Char]) -> R { self.inner().rent(|v| callback(v.borrow().0.as_slice())) } /// Call callback with a mutable buffer of the current BumpVec. This must /// be a callback to ensure that we don't (safely) borrow the slice for /// longer than it stays vaild. pub(crate) fn current_mut_slice<F, R>(&self, mut callback: F) -> R where F: FnMut(&mut [XML_Char]) -> R { self.inner().rent(|v| callback(v.borrow_mut().0.as_mut_slice())) } /// Unsafe temporary version of `current_slice()`. This needs to be removed /// when callers are made safe. pub(crate) unsafe fn current_start(&self) -> *const XML_Char { self.inner().rent(|v| v.borrow().0.as_ptr()) } /// Appends a char to the current BumpVec. pub(crate) fn append_char(&self, c: XML_Char) -> bool { self.inner().rent(|vec| vec.borrow_mut().append_char(c)) } /// Overwrites the last char in the current BumpVec. /// Note that this will panic if empty. This is not an insert /// operation as it does not shift bytes afterwards. pub(crate) fn replace_last_char(&self, c: XML_Char) { self.inner().rent(|buf| { *buf.borrow_mut() .0 .last_mut() .expect("Called replace_last_char() when string was empty") = c; }) } /// Decrements the length, panicing if len is 0 pub(crate) fn backtrack(&self) { self.inner().rent(|vec| vec.borrow_mut().0.pop().expect("Called backtrack() on empty BumpVec")); } /// Gets the last character, panicing if len is 0 pub(crate) fn get_last_char(&self) -> XML_Char { self.inner().rent(|buf| *buf.borrow().0.last().expect("Called get_last_char() when string was empty")) } /// Appends an entire C String to the current BumpVec. pub(crate) unsafe fn append_c_string(&self, mut s: *const XML_Char) -> bool { self.inner().rent(|vec| { let mut vec = vec.borrow_mut(); while *s != 0 { if !vec.append_char(*s) { return false; } s = s.offset(1) } true }) } /// Resets the current Bump and deallocates its contents. /// The `inner` method must never be called here as it assumes /// self.0 is never `None` pub(crate) fn clear(&mut self) { let mut inner_pool = self.0.take(); let mut bump = inner_pool.unwrap().into_head(); bump.reset(); inner_pool = Some(InnerStringPool::new( bump, |bump| RefCell::new(RentedBumpVec(BumpVec::new_in(&bump))), )); swap(&mut self.0, &mut inner_pool); } pub(crate) fn store_c_string( &self, enc: &ENCODING, buf: ExpatBufRef, ) -> bool { self.inner().rent(|vec| { let mut vec = vec.borrow_mut(); if !vec.append(enc, buf) { return false; } if !vec.append_char('\0' as XML_Char) { return false; } true }) } pub(crate) fn append( &self, enc: &ENCODING, read_buf: ExpatBufRef, ) -> bool { self.inner().rent(|vec| vec.borrow_mut().append(enc, read_buf)) } pub(crate) unsafe fn copy_c_string( &self, mut s: *const XML_Char, ) -> Option<&[XML_Char]> { // self.append_c_string(s);? let successful = self.inner().rent(|vec| { let mut vec = vec.borrow_mut(); loop { if !vec.append_char(*s) { return false; } if *s == 0 { break; } s = s.offset(1); } true }); if !successful { return None; } Some(self.finish_string()) } pub(crate) unsafe fn copy_c_string_n( &self, mut s: *const XML_Char, mut n: c_int, ) -> Option<&[XML_Char]> { let successful = self.inner().rent(|vec| { let mut vec = vec.borrow_mut(); let mut n = n.try_into().unwrap(); if vec.0.try_reserve_exact(n).is_err() { return false; }; while n > 0 { if !vec.append_char(*s) { return false; } n -= 1; s = s.offset(1) } true }); if !successful { return None; } Some(self.finish_string()) } } #[derive(Debug)] pub(crate) struct RentedBumpVec<'bump>(BumpVec<'bump, XML_Char>); impl<'bump> RentedBumpVec<'bump> { fn is_full(&self) -> bool { self.0.len() == self.0.capacity() } fn append<'a>( &mut self, enc: &ENCODING, mut read_buf: ExpatBufRef<'a>, ) -> bool { loop { // REXPAT: always reserve at least 4 bytes, // so at least one character gets converted every iteration if self.0.try_reserve(read_buf.len().max(4)).is_err() { return false; } let start_len = self.0.len(); let cap = self.0.capacity(); self.0.resize(cap, 0); let mut write_buf = ExpatBufRefMut::from(&mut self.0[start_len..]); let write_buf_len = write_buf.len(); let convert_res = XmlConvert!(enc, &mut read_buf, &mut write_buf); // The write buf shrinks by how much was written to it let written_size = write_buf_len - write_buf.len(); self.0.truncate(start_len + written_size); if convert_res == XML_Convert_Result::COMPLETED || convert_res == XML_Convert_Result::INPUT_INCOMPLETE { return true; } } } fn append_char(&mut self, c: XML_Char) -> bool { if self.0.try_reserve(1).is_err() { false } else { self.0.push(c); true } } } #[cfg(test)] mod consts { use super::XML_Char; pub const A: XML_Char = 'a' as XML_Char; pub const B: XML_Char = 'b' as XML_Char; pub const C: XML_Char = 'c' as XML_Char; pub const D: XML_Char = 'd' as XML_Char; pub const NULL: XML_Char = '\0' as XML_Char; pub static S: [XML_Char; 5] = [C, D, D, C, NULL]; } #[test] fn test_append_char() { use consts::*; let mut pool = StringPool::try_new().unwrap(); assert!(pool.append_char(A)); pool.current_slice(|s| assert_eq!(s, [A])); assert!(pool.append_char(B)); pool.current_slice(|s| assert_eq!(s, [A, B])); // New BumpVec pool.finish_string(); assert!(pool.append_char(C)); pool.current_slice(|s| assert_eq!(s, [C])); } #[test] fn test_append_string() { use consts::*; let mut pool = StringPool::try_new().unwrap(); let mut string = [A, B, C, NULL]; unsafe { assert!(pool.append_c_string(string.as_mut_ptr())); } pool.current_slice(|s| assert_eq!(s, [A, B, C])); } #[test] fn test_copy_string() { use consts::*; let mut pool = StringPool::try_new().unwrap(); assert!(pool.append_char(A)); pool.current_slice(|s| assert_eq!(s, [A]));
pool.copy_c_string(S.as_ptr()) }; assert_eq!(new_string.unwrap(), [A, C, D, D, C, NULL]); assert!(pool.append_char(B)); pool.current_slice(|s| assert_eq!(s, [B])); let new_string2 = unsafe { pool.copy_c_string_n(S.as_ptr(), 4) }; assert_eq!(new_string2.unwrap(), [B, C, D, D, C]); } #[test] fn test_store_c_string() { use consts::*; use crate::lib::xmlparse::XmlGetInternalEncoding; let mut pool = StringPool::try_new().unwrap(); let enc = XmlGetInternalEncoding(); let read_buf = unsafe { ExpatBufRef::new(S.as_ptr(), S.as_ptr().add(3)) }; assert!(pool.store_c_string(enc, read_buf)); let string = pool.finish_string(); assert_eq!(&*string, &[C, D, D, NULL]); assert!(pool.append_char(A)); pool.current_slice(|s| assert_eq!(s, [A])); // No overlap between buffers: assert_eq!(&*string, &[C, D, D, NULL]); assert!(pool.append_char(A)); pool.current_slice(|s| assert_eq!(s, [A, A])); // Force reallocation: pool.inner().rent(|v| v.borrow_mut().0.resize(2, 0)); assert!(pool.store_c_string(enc, read_buf)); let s = pool.finish_string(); assert_eq!(s, [A, A, C, D, D, NULL]); }
let new_string = unsafe {
random_line_split
string_pool.rs
use crate::expat_external_h::XML_Char; use crate::lib::xmlparse::{ExpatBufRef, ExpatBufRefMut, XmlConvert}; use crate::lib::xmltok::{ENCODING, XML_Convert_Result}; use bumpalo::Bump; use bumpalo::collections::vec::Vec as BumpVec; use fallible_collections::FallibleBox; use libc::c_int; use std::cell::{Cell, RefCell}; use std::convert::TryInto; use std::mem::swap; pub const INIT_BLOCK_SIZE: usize = 1024; rental! { mod rental_pool { use super::*; #[rental(debug)] pub(crate) struct InnerStringPool { // The rental crate requires that all fields but the last one // implement `StableDeref`, which means we need to wrap it // in a `Box` bump: Box<Bump>, current_bump_vec: RefCell<RentedBumpVec<'bump>>, } } } use rental_pool::InnerStringPool; /// A StringPool has the purpose of allocating distinct strings and then /// handing them off to be referenced either temporarily or for the entire length /// of the pool. pub(crate) struct StringPool(Option<InnerStringPool>); impl StringPool { pub(crate) fn try_new() -> Result<Self, ()> { let bump = Bump::try_with_capacity(INIT_BLOCK_SIZE).map_err(|_| ())?; let boxed_bump = Box::try_new(bump).map_err(|_| ())?; Ok(StringPool(Some(InnerStringPool::new( boxed_bump, |bump| RefCell::new(RentedBumpVec(BumpVec::new_in(&bump))), )))) } /// # Safety /// /// The inner type is only ever None in middle of the clear() /// method. Therefore it is safe to use anywhere else. fn inner(&self) -> &InnerStringPool { self.0.as_ref().unwrap_or_else(|| unsafe { std::hint::unreachable_unchecked() }) } /// Determines whether or not the current BumpVec is empty. pub(crate) fn is_empty(&self) -> bool { self.inner().rent(|vec| vec.borrow().0.is_empty()) } /// Determines whether or not the current BumpVec is full. pub(crate) fn is_full(&self) -> bool { self.inner().rent(|vec| vec.borrow().is_full()) } /// Gets the current vec, converts it into an immutable slice, /// and resets bookkeeping so that it will create a new vec next time. pub(crate) fn finish_string(&self) -> &[XML_Char] { self.inner().ref_rent_all(|pool| { let mut vec = RentedBumpVec(BumpVec::new_in(&pool.bump)); pool.current_bump_vec.replace(vec).0.into_bump_slice() }) } /// Gets the current vec, converts it into a slice of cells (with interior mutability), /// and resets bookkeeping so that it will create a new vec next time. pub(crate) fn finish_string_cells(&self) -> &[Cell<XML_Char>] { self.inner().ref_rent_all(|pool| { let mut vec = RentedBumpVec(BumpVec::new_in(&pool.bump)); let sl = pool.current_bump_vec.replace(vec).0.into_bump_slice_mut(); Cell::from_mut(sl).as_slice_of_cells() }) } /// Resets the current bump vec to the beginning pub(crate) fn clear_current(&self) { self.inner().rent(|v| v.borrow_mut().0.clear()) } /// Obtains the length of the current BumpVec. pub(crate) fn len(&self) -> usize { self.inner().rent(|vec| vec.borrow().0.len()) } /// Call callback with an immutable buffer of the current BumpVec. This must /// be a callback to ensure that we don't (safely) borrow the slice for /// longer than it stays vaild. pub(crate) fn current_slice<F, R>(&self, mut callback: F) -> R where F: FnMut(&[XML_Char]) -> R { self.inner().rent(|v| callback(v.borrow().0.as_slice())) } /// Call callback with a mutable buffer of the current BumpVec. This must /// be a callback to ensure that we don't (safely) borrow the slice for /// longer than it stays vaild. pub(crate) fn current_mut_slice<F, R>(&self, mut callback: F) -> R where F: FnMut(&mut [XML_Char]) -> R { self.inner().rent(|v| callback(v.borrow_mut().0.as_mut_slice())) } /// Unsafe temporary version of `current_slice()`. This needs to be removed /// when callers are made safe. pub(crate) unsafe fn current_start(&self) -> *const XML_Char { self.inner().rent(|v| v.borrow().0.as_ptr()) } /// Appends a char to the current BumpVec. pub(crate) fn append_char(&self, c: XML_Char) -> bool { self.inner().rent(|vec| vec.borrow_mut().append_char(c)) } /// Overwrites the last char in the current BumpVec. /// Note that this will panic if empty. This is not an insert /// operation as it does not shift bytes afterwards. pub(crate) fn replace_last_char(&self, c: XML_Char) { self.inner().rent(|buf| { *buf.borrow_mut() .0 .last_mut() .expect("Called replace_last_char() when string was empty") = c; }) } /// Decrements the length, panicing if len is 0 pub(crate) fn backtrack(&self) { self.inner().rent(|vec| vec.borrow_mut().0.pop().expect("Called backtrack() on empty BumpVec")); } /// Gets the last character, panicing if len is 0 pub(crate) fn get_last_char(&self) -> XML_Char { self.inner().rent(|buf| *buf.borrow().0.last().expect("Called get_last_char() when string was empty")) } /// Appends an entire C String to the current BumpVec. pub(crate) unsafe fn append_c_string(&self, mut s: *const XML_Char) -> bool { self.inner().rent(|vec| { let mut vec = vec.borrow_mut(); while *s != 0 { if !vec.append_char(*s)
s = s.offset(1) } true }) } /// Resets the current Bump and deallocates its contents. /// The `inner` method must never be called here as it assumes /// self.0 is never `None` pub(crate) fn clear(&mut self) { let mut inner_pool = self.0.take(); let mut bump = inner_pool.unwrap().into_head(); bump.reset(); inner_pool = Some(InnerStringPool::new( bump, |bump| RefCell::new(RentedBumpVec(BumpVec::new_in(&bump))), )); swap(&mut self.0, &mut inner_pool); } pub(crate) fn store_c_string( &self, enc: &ENCODING, buf: ExpatBufRef, ) -> bool { self.inner().rent(|vec| { let mut vec = vec.borrow_mut(); if !vec.append(enc, buf) { return false; } if !vec.append_char('\0' as XML_Char) { return false; } true }) } pub(crate) fn append( &self, enc: &ENCODING, read_buf: ExpatBufRef, ) -> bool { self.inner().rent(|vec| vec.borrow_mut().append(enc, read_buf)) } pub(crate) unsafe fn copy_c_string( &self, mut s: *const XML_Char, ) -> Option<&[XML_Char]> { // self.append_c_string(s);? let successful = self.inner().rent(|vec| { let mut vec = vec.borrow_mut(); loop { if !vec.append_char(*s) { return false; } if *s == 0 { break; } s = s.offset(1); } true }); if !successful { return None; } Some(self.finish_string()) } pub(crate) unsafe fn copy_c_string_n( &self, mut s: *const XML_Char, mut n: c_int, ) -> Option<&[XML_Char]> { let successful = self.inner().rent(|vec| { let mut vec = vec.borrow_mut(); let mut n = n.try_into().unwrap(); if vec.0.try_reserve_exact(n).is_err() { return false; }; while n > 0 { if !vec.append_char(*s) { return false; } n -= 1; s = s.offset(1) } true }); if !successful { return None; } Some(self.finish_string()) } } #[derive(Debug)] pub(crate) struct RentedBumpVec<'bump>(BumpVec<'bump, XML_Char>); impl<'bump> RentedBumpVec<'bump> { fn is_full(&self) -> bool { self.0.len() == self.0.capacity() } fn append<'a>( &mut self, enc: &ENCODING, mut read_buf: ExpatBufRef<'a>, ) -> bool { loop { // REXPAT: always reserve at least 4 bytes, // so at least one character gets converted every iteration if self.0.try_reserve(read_buf.len().max(4)).is_err() { return false; } let start_len = self.0.len(); let cap = self.0.capacity(); self.0.resize(cap, 0); let mut write_buf = ExpatBufRefMut::from(&mut self.0[start_len..]); let write_buf_len = write_buf.len(); let convert_res = XmlConvert!(enc, &mut read_buf, &mut write_buf); // The write buf shrinks by how much was written to it let written_size = write_buf_len - write_buf.len(); self.0.truncate(start_len + written_size); if convert_res == XML_Convert_Result::COMPLETED || convert_res == XML_Convert_Result::INPUT_INCOMPLETE { return true; } } } fn append_char(&mut self, c: XML_Char) -> bool { if self.0.try_reserve(1).is_err() { false } else { self.0.push(c); true } } } #[cfg(test)] mod consts { use super::XML_Char; pub const A: XML_Char = 'a' as XML_Char; pub const B: XML_Char = 'b' as XML_Char; pub const C: XML_Char = 'c' as XML_Char; pub const D: XML_Char = 'd' as XML_Char; pub const NULL: XML_Char = '\0' as XML_Char; pub static S: [XML_Char; 5] = [C, D, D, C, NULL]; } #[test] fn test_append_char() { use consts::*; let mut pool = StringPool::try_new().unwrap(); assert!(pool.append_char(A)); pool.current_slice(|s| assert_eq!(s, [A])); assert!(pool.append_char(B)); pool.current_slice(|s| assert_eq!(s, [A, B])); // New BumpVec pool.finish_string(); assert!(pool.append_char(C)); pool.current_slice(|s| assert_eq!(s, [C])); } #[test] fn test_append_string() { use consts::*; let mut pool = StringPool::try_new().unwrap(); let mut string = [A, B, C, NULL]; unsafe { assert!(pool.append_c_string(string.as_mut_ptr())); } pool.current_slice(|s| assert_eq!(s, [A, B, C])); } #[test] fn test_copy_string() { use consts::*; let mut pool = StringPool::try_new().unwrap(); assert!(pool.append_char(A)); pool.current_slice(|s| assert_eq!(s, [A])); let new_string = unsafe { pool.copy_c_string(S.as_ptr()) }; assert_eq!(new_string.unwrap(), [A, C, D, D, C, NULL]); assert!(pool.append_char(B)); pool.current_slice(|s| assert_eq!(s, [B])); let new_string2 = unsafe { pool.copy_c_string_n(S.as_ptr(), 4) }; assert_eq!(new_string2.unwrap(), [B, C, D, D, C]); } #[test] fn test_store_c_string() { use consts::*; use crate::lib::xmlparse::XmlGetInternalEncoding; let mut pool = StringPool::try_new().unwrap(); let enc = XmlGetInternalEncoding(); let read_buf = unsafe { ExpatBufRef::new(S.as_ptr(), S.as_ptr().add(3)) }; assert!(pool.store_c_string(enc, read_buf)); let string = pool.finish_string(); assert_eq!(&*string, &[C, D, D, NULL]); assert!(pool.append_char(A)); pool.current_slice(|s| assert_eq!(s, [A])); // No overlap between buffers: assert_eq!(&*string, &[C, D, D, NULL]); assert!(pool.append_char(A)); pool.current_slice(|s| assert_eq!(s, [A, A])); // Force reallocation: pool.inner().rent(|v| v.borrow_mut().0.resize(2, 0)); assert!(pool.store_c_string(enc, read_buf)); let s = pool.finish_string(); assert_eq!(s, [A, A, C, D, D, NULL]); }
{ return false; }
conditional_block
index.js
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ var app = { // Application Constructor initialize: function() { this.bindEvents(); this.make_validations(); }, // Bind Event Listeners // // Bind any events that are required on startup. Common events are: // 'load', 'deviceready', 'offline', and 'online'. bindEvents: function() { document.addEventListener('deviceready', this.onDeviceReady, false); }, make_validations: function(){ var value = window.localStorage.getItem("is_logged"); if (value == null || value == 'false') window.location = 'login.html'; //verifica se existe algum checkin pendente de finalização armazenado no aparelho var checkin_nota = window.localStorage.getItem("dados_ultimo_checkin_nota"); checkin_nota = JSON.parse(checkin_nota); if (checkin_nota != null && !checkin_nota.nao_travar_tela_checkin) window.location = 'dados_checkin_nota.html'; },
// deviceready Event Handler // // The scope of 'this' is the event. In order to call the 'receivedEvent' // function, we must explicitly call 'app.receivedEvent(...);' onDeviceReady: function() { $(document).ready(function (e) { app.load_update_data(); }); document.addEventListener("backbutton", function (e) { e.preventDefault(); util.return_last_page(); }, false); util.getCurrentGeoLocation( function(s) { app.configureBackgroundGeoLocation(); app.startBackgroundGeoLocation(); //inicia a coleta de dados de localização }, function(error) { console.log(error); /* navigator.notification.confirm('As permissões para uso do GPS não foram concedidas, alguns recursos podem não funcionar corretamente. ' + error, function (e) { }, 'Atenção', 'OK');*/ }); app.setupPush(); $("#btnCheckin").click(function () { util.add_path_to_breadcrumb('index.html'); window.location = 'checkin_nota.html'; }); $("#btnComprovarNota").click(function () { util.add_path_to_breadcrumb('index.html'); window.location = 'comprovar_nota.html'; }); $("#btnNotasIniciadas").click(function () { util.add_path_to_breadcrumb('index.html'); window.location = 'notas_iniciadas_usuario.html'; }); $("#btnTransporteAgendado").click(function () { util.add_path_to_breadcrumb('index.html'); window.location = 'index.html'; // window.location = 'transporte_agendado.html'; }); $("#btnTransporteAgendado1").click(function () { util.add_path_to_breadcrumb('index.html'); window.location = 'transporte_agendado.html'; }); $("#btnRealizarColeta").click(function () { util.add_path_to_breadcrumb('index.html'); window.location = 'coleta_escolha.html'; }); var login = window.localStorage.getItem("login"); $("#usuario").text(login); }, load_update_data: function(){ const monthNames = ["01", "02", "03", "04", "05", "06", "07", "08", "09", "10", "11", "12"]; let dateObj = new Date(); let month = monthNames[dateObj.getMonth()]; let day = String(dateObj.getDate()).padStart(2, '0'); let day1 = String(dateObj.getDate()-1).padStart(2, '0'); let day2 = String(dateObj.getDate()-2).padStart(2, '0'); let year = dateObj.getFullYear(); let output = day + '/'+ month + '/' + year; let output1 = day1 + '/'+ month + '/' + year; let output2 = day2 + '/'+ month + '/' + year; // $('#formDataShow').html('<p>'+output+'</p>'); $('#finalizadaDate').text(output); $('#finalizadaDate1').text(output1); $('#finalizadaDate2').text(output2); let beforeSend = function () { coreProgress.showPreloader(undefined, 0); } let success = function (msg) { coreDialog.close('.preloader'); console.log(msg); if (msg.status == "ok") { $('#entregas').text(msg.data.number_purchases_pendent); $('#agendamentos').text(msg.data.number_transports_pendent); $('#finalizadas').text(msg.data.number_delivery_finished); $('#finalizadas1').text(msg.data.delivery_finished_daybefore); $('#finalizadas2').text(msg.data.delivery_finished_twodaysBefore); var config = { value: msg.data.percent_complet, text: '%', durationAnimate: 3000, padding: '3px', color: 'white', trailColor: 'black-opacity-10', textSize: '50px', textColor: 'black', width:'160px', strokeWidth: '2', trailWidth:'8', }; ProgressCircle.create(document.getElementById('progressUserKM'), config); } } let error = function (msg) { coreDialog.close('.preloader'); console.log(msg); $('.formDataShow').html('<p>error</p>'); } var formData = new FormData(); formData.append('username', window.localStorage.getItem("login")); webservice_access.get_update_data(formData, beforeSend, success, error); }, setupPush: function() { console.log('calling push init'); var push = PushNotification.init({ "android": { "senderID": "12345" }, "browser": {}, "ios": { "sound": true, "vibration": true, "badge": true }, "windows": {} }); console.log('after init'); push.on('registration', function(data) { console.log('registration event: ' + data.registrationId); var oldRegId = localStorage.getItem('registrationId'); if (oldRegId !== data.registrationId) { // Save new registration ID localStorage.setItem('registrationId', data.registrationId); // Post registrationId to your app server as the value has changed } var parentElement = document.getElementById('registration'); var listeningElement = parentElement.querySelector('.waiting'); var receivedElement = parentElement.querySelector('.received'); listeningElement.setAttribute('style', 'display:none;'); receivedElement.setAttribute('style', 'display:block;'); }); push.on('error', function(e) { console.log("push error = " + e.message); }); push.on('notification', function(data) { console.log('notification event'); navigator.notification.alert( data.message, // message null, // callback data.title, // title 'Ok' // buttonName ); }); }, startBackgroundGeoLocation: function() { //app.configureBackgroundGeoLocation(); // Turn ON the background-geolocation system. The user will be tracked whenever they suspend the app. window.plugins.backgroundGeoLocation.start(); // window.plugins.backgroundGeoLocation.delete_all_locations() }, stopBackgroundGeoLocation: function() { // If you wish to turn OFF background-tracking, call the #stop method. window.plugins.backgroundGeoLocation.stop(); }, configureBackgroundGeoLocation: function() { // Your app must execute AT LEAST ONE call for the current position via standard Cordova geolocation, // in order to prompt the user for Location permission. window.navigator.geolocation.getCurrentPosition(function(location) { console.log('Location from Cordova'); }); var bgGeo = window.plugins.backgroundGeoLocation; var yourAjaxCallback = function(response) { bgGeo.finish(); }; var callbackFn = function(location) { console.log('[js] BackgroundGeoLocation callback: ' + location.latitude + ',' + location.longitude); yourAjaxCallback.call(this); }; var failureFn = function(error) { console.log('BackgroundGeoLocation error'); } var domain = window.localStorage.getItem("domain"); var url = domain + '/scan/example/webservice/ws_portal_transportador.php?request=InsertUserLocation'; var user = window.localStorage.getItem("login"); // BackgroundGeoLocation is highly configurable. bgGeo.configure(callbackFn, failureFn, { url: url, params: { user: user }, headers: { 'apiKey': '78asd4546d4sa687e1d1xzlcknhwyhuWMKPSJDpox8213njdOWnxxipW58547' }, desiredAccuracy: 0, stationaryRadius: 50, distanceFilter: 50, notificationTitle: 'Portal do Transportador', notificationText: 'Localização Ativa', activityType: "AutomotiveNavigation", debug: false, stopOnTerminate: true, persistLocation: false }); } };
random_line_split
modulizer.py
#!/usr/bin/python #========================================================================== # # Copyright Insight Software Consortium # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0.txt # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # #==========================================================================*/ # This script is used to automate the modularization process. The following # steps are included: # 1. Move the files in the monolithic ITK into modules of the modularized ITK. # A manifest text file that lists all the files and their destinations is # required to run the script.By default, the manifest file is named as # "Manifest.txt" in the same directory of this script. # 2. Create CMake Files and put them into modules. # Modified by Guillaume Pasero <guillaume.pasero@c-s.fr> # add dependencies in otb-module.cmake # To run it, type ./modulizer.py OTB_PATH Manifest_PATH # from the otb-modulizer root directory. print "*************************************************************************" print "WARNINGs! This modularization script is still in its experimental stage." print "Current OTB users should not run this script." print "*************************************************************************" import shutil import os.path as op import re import sys import os import stat import glob import documentationCheck import analyseAppManifest import dispatchTests import dispatchExamples from subprocess import call def
(path): sourceList = [] nbFields = 6 fd = open(path,'rb') # skip first line and detect separator firstLine = fd.readline() sep = ',' if (len(firstLine.split(sep)) != nbFields): sep = ';' if (len(firstLine.split(sep)) != nbFields): sep = '\t' if (len(firstLine.split(sep)) != nbFields): print "Unknown separator" return sourceList fd.seek(0) # parse file for line in fd: if (line.strip()).startswith("#"): continue words = line.split(sep) if (len(words) < (nbFields-1)): print "Wrong number of fields, skipping this line" continue fullPath = words[0].strip(" ,;\t\n\r") groupName = words[2].strip(" ,;\t\n\r") moduleName = words[3].strip(" ,;\t\n\r") subDir = words[4].strip(" ,;\t\n\r") sourceName = op.basename(fullPath) sourceList.append({"path":fullPath, "group":groupName, "module":moduleName, "subDir":subDir}) fd.close() return sourceList def parseDescriptions(path): output = {} sep = '|' nbFields = 2 fd = open(path,'rb') for line in fd: if (line.strip()).startswith("#"): continue words = line.split(sep) if len(words) != nbFields: continue moduleName = words[0].strip(" \"\t\n\r") description = words[1].strip(" \"\t\n\r") output[moduleName] = description fd.close() return output if len(sys.argv) < 4: print("USAGE: {0} monolithic_OTB_PATH OUTPUT_DIR Manifest_Path [module_dep [test_dep [mod_description]]]".format(sys.argv[0])) print(" monolithic_OTB_PATH : checkout of OTB repository (will not be modified)") print(" OUTPUT_DIR : output directory where OTB_Modular and OTB_remaining will be created ") print(" Manifest_Path : path to manifest file, in CSV-like format. Fields are :") print(" source_path/current_subDir/group/module/subDir/comment") print(" module_dep : dependencies between modules") print(" test_dep : additional dependencies for tests") print(" mod_description : description for each module") print(" migration_password : password to enable MIGRATION") sys.exit(-1) scriptDir = op.dirname(op.abspath(sys.argv[0])) HeadOfOTBTree = sys.argv[1] if (HeadOfOTBTree[-1] == '/'): HeadOfOTBTree = HeadOfOTBTree[0:-1] OutputDir = sys.argv[2] HeadOfModularOTBTree = op.join(OutputDir,"OTB_Modular") ManifestPath = sys.argv[3] EdgePath = "" if len(sys.argv) >= 5: EdgePath = sys.argv[4] testDependPath = "" if len(sys.argv) >= 6: testDependPath = sys.argv[5] modDescriptionPath = "" if len(sys.argv) >= 7: modDescriptionPath = sys.argv[6] enableMigration = False if len(sys.argv) >= 8: migrationPass = sys.argv[7] if migrationPass == "redbutton": enableMigration = True # copy the whole OTB tree over to a temporary dir HeadOfTempTree = op.join(OutputDir,"OTB_remaining") if op.isdir(HeadOfTempTree): shutil.rmtree(HeadOfTempTree) if op.isdir(HeadOfModularOTBTree): shutil.rmtree(HeadOfModularOTBTree) print("Start to copy" + HeadOfOTBTree + " to ./OTB_remaining ...") shutil.copytree(HeadOfOTBTree,HeadOfTempTree, ignore = shutil.ignore_patterns('.hg','.hg*')) print("Done copying!") # checkout OTB-Modular cmd ='hg clone http://hg.orfeo-toolbox.org/OTB-Modular '+HeadOfModularOTBTree os.system(cmd) logDir = op.join(OutputDir,"logs") if not op.isdir(logDir): os.makedirs(logDir) # read the manifest file print ("moving files from ./OTB_remaining into modules in {0}".format(HeadOfModularOTBTree)) numOfMissingFiles = 0; missingf = open(op.join(logDir,'missingFiles.log'),'w') moduleList=[] moduleDic={} sourceList = parseFullManifest(ManifestPath) for source in sourceList: # build module list moduleDic[source["module"]] = source["group"] # create the path inputfile = op.abspath(op.join(HeadOfTempTree,source["path"])) outputPath = op.join(op.join(HeadOfModularOTBTree,"Modules"),op.join(source["group"],op.join(source["module"],source["subDir"]))) if not op.isdir(outputPath): os.makedirs(outputPath) # copying files to the destination if op.isfile(inputfile): if op.isfile(op.join(outputPath,op.basename(inputfile))): os.remove(op.join(outputPath,op.basename(inputfile))) shutil.move(inputfile, outputPath) else: missingf.write(inputfile+'\n') numOfMissingFiles = numOfMissingFiles + 1 missingf.close() print ("listed {0} missing files to logs/missingFiles.log").format(numOfMissingFiles) moduleList = moduleDic.keys() # after move, operate a documentation check for source in sourceList: outputPath = op.join(op.join(HeadOfModularOTBTree,"Modules"),op.join(source["group"],op.join(source["module"],source["subDir"]))) outputFile = op.join(outputPath,op.basename(source["path"])) if op.isfile(outputFile): if op.splitext(outputFile)[1] == ".h": nextContent = documentationCheck.parserHeader(outputFile,source["module"]) fd = open(outputFile,'wb') fd.writelines(nextContent) fd.close() # get dependencies (if file is present) dependencies = {} testDependencies = {} exDependencies = {} for mod in moduleList: dependencies[mod] = [] testDependencies[mod] = [] exDependencies[mod] = [] if op.isfile(EdgePath): fd = open(EdgePath,'rb') for line in fd: words = line.split(',') if len(words) == 2: depFrom = words[0].strip(" ,;\t\n\r") depTo = words[1].strip(" ,;\t\n\r") if dependencies.has_key(depFrom): dependencies[depFrom].append(depTo) else: print("Bad dependency : "+depFrom+" -> "+depTo) fd.close() if op.isfile(testDependPath): fd = open(testDependPath,'rb') for line in fd: words = line.split(',') if len(words) == 2: depFrom = words[0].strip(" ,;\t\n\r") depTo = words[1].strip(" ,;\t\n\r") if testDependencies.has_key(depFrom): testDependencies[depFrom].append(depTo) else: print("Bad dependency : "+depFrom+" -> "+depTo) fd.close() """ if op.isfile(exDependPath): fd = open(exDependPath,'rb') for line in fd: words = line.split(',') if len(words) == 2: depFrom = words[0].strip(" ,;\t\n\r") depTo = words[1].strip(" ,;\t\n\r") if exDependencies.has_key(depFrom): exDependencies[depFrom].append(depTo) else: print("Bad dependency : "+depFrom+" -> "+depTo) fd.close() """ modDescriptions = {} if op.isfile(modDescriptionPath): modDescriptions = parseDescriptions(modDescriptionPath) # list the new files newf = open(op.join(logDir,'newFiles.log'),'w') for (root, subDirs, files) in os.walk(HeadOfTempTree): for afile in files: newf.write(op.join(root, afile)+'\n') newf.close() print ("listed new files to logs/newFiles.log") ########################################################################### print ('creating cmake files for each module (from the template module)') #moduleList = os.listdir(HeadOfModularOTBTree) for moduleName in moduleList: moduleDir = op.join(op.join(HeadOfModularOTBTree,"Modules"),op.join(moduleDic[moduleName],moduleName)) cmakeModName = "OTB"+moduleName if op.isdir(moduleDir): # write CMakeLists.txt filepath = moduleDir+'/CMakeLists.txt' if not op.isfile(filepath): o = open(filepath,'w') if op.isdir(moduleDir+'/src'): template_cmakelist = op.join(scriptDir,'templateModule/otb-template-module/CMakeLists.txt') else: template_cmakelist = op.join(scriptDir,'templateModule/otb-template-module/CMakeLists-nosrc.txt') for line in open(template_cmakelist,'r'): line = line.replace('otb-template-module',cmakeModName) o.write(line); o.close() # write src/CMakeLists.txt # list of CXX files if op.isdir(moduleDir+'/src'): cxxFiles = glob.glob(moduleDir+'/src/*.cxx') cxxFileList=''; for cxxf in cxxFiles: cxxFileList = cxxFileList+' '+cxxf.split('/')[-1]+'\n' # build list of link dependencies linkLibs = "" for dep in dependencies[moduleName]: #verify if dep is a header-onlymodule depThirdParty = False try: moduleDic[dep] except KeyError: # this is a ThirdParty module depThirdParty = True if not depThirdParty: depModuleDir = op.join(op.join(HeadOfModularOTBTree,"Modules"),op.join(moduleDic[dep],dep)) depcxx = glob.glob(depModuleDir+'/src/*.cxx') if depcxx : linkLibs = linkLibs + " ${OTB"+dep+"_LIBRARIES}" + "\n" else: linkLibs = linkLibs + " ${OTB"+dep+"_LIBRARIES}" + "\n" if len(linkLibs) == 0: linkLibs = " ${OTBITK_LIBRARIES}" filepath = moduleDir+'/src/CMakeLists.txt' if not op.isfile(filepath): o = open(filepath,'w') for line in open(op.join(scriptDir,'templateModule/otb-template-module/src/CMakeLists.txt'),'r'): line = line.replace('otb-template-module',cmakeModName) line = line.replace('LIST_OF_CXX_FILES',cxxFileList[0:-1]) #get rid of the last \n line = line.replace('LINK_LIBRARIES_TO_BE_REPLACED',linkLibs) o.write(line); o.close() # write app/CMakeLists.txt if op.isdir(moduleDir+'/app'): os.mkdir(moduleDir+'/test') srcFiles = glob.glob(moduleDir+'/app/*.cxx') srcFiles += glob.glob(moduleDir+'/app/*.h') appList = {} for srcf in srcFiles: # get App name appName = analyseAppManifest.findApplicationName(srcf) if len(appName) == 0: continue appList[appName] = {"source":op.basename(srcf)} # get original location cmakeListPath = "" for item in sourceList: if op.basename(item["path"]) == op.basename(srcf) and \ moduleName == item["module"]: appDir = op.basename(op.dirname(item["path"])) cmakeListPath = op.join(HeadOfOTBTree,op.join("Testing/Applications"),op.join(appDir,"CMakeLists.txt")) break # get App tests if not op.isfile(cmakeListPath): continue appList[appName]["test"] = analyseAppManifest.findTestFromApp(cmakeListPath,appName) # build list of link dependencies linkLibs = "" for dep in dependencies[moduleName]: linkLibs = linkLibs + " ${OTB"+dep+"_LIBRARIES}" + "\n" filepath = moduleDir+'/app/CMakeLists.txt' if not op.isfile(filepath): o = open(filepath,'w') # define link libraries o.write("set("+cmakeModName+"_LINK_LIBS\n") o.write(linkLibs) o.write(")\n") for appli in appList: content = "\notb_create_application(\n" content += " NAME " + appli + "\n" content += " SOURCES " + appList[appli]["source"] + "\n" content += " LINK_LIBRARIES ${${otb-module}_LIBRARIES})\n" o.write(content) o.close() filepath = moduleDir+'/test/CMakeLists.txt' if not op.isfile(filepath): o = open(filepath,'w') o.write("otb_module_test()") for appli in appList: if not appList[appli].has_key("test"): continue o.write("\n#----------- "+appli+" TESTS ----------------\n") for test in appList[appli]["test"]: if test.count("${"): print "Warning : test name contains a variable : "+test continue testcode=appList[appli]["test"][test] testcode=[s.replace('OTB_TEST_APPLICATION', 'otb_test_application') for s in testcode] o.writelines(testcode) o.write("\n") o.close() # write test/CMakeLists.txt : done by dispatchTests.py """ if op.isdir(moduleDir+'/test'): cxxFiles = glob.glob(moduleDir+'/test/*.cxx') cxxFileList=''; for cxxf in cxxFiles: cxxFileList = cxxFileList+cxxf.split('/')[-1]+'\n' filepath = moduleDir+'/test/CMakeLists.txt' if not op.isfile(filepath): o = open(filepath,'w') for line in open('./templateModule/otb-template-module/test/CMakeLists.txt','r'): # TODO : refactor for OTB words= moduleName.split('-') moduleNameMod=''; for word in words: moduleNameMod=moduleNameMod + word.capitalize() line = line.replace('itkTemplateModule',moduleNameMod) line = line.replace('itk-template-module',moduleName) line = line.replace('LIST_OF_CXX_FILES',cxxFileList[0:-1]) #get rid of the last \n o.write(line); o.close() """ # write otb-module.cmake, which contains dependency info filepath = moduleDir+'/otb-module.cmake' if not op.isfile(filepath): o = open(filepath,'w') for line in open(op.join(scriptDir,'templateModule/otb-template-module/otb-module.cmake'),'r'): # replace documentation if line.find("DESCRIPTION_TO_BE_REPLACED") >= 0: docString = "\"TBD\"" if moduleName in modDescriptions: descPos = line.find("DESCRIPTION_TO_BE_REPLACED") limitChar = 80 docString = "\""+modDescriptions[moduleName]+"\"" curPos = 80 - descPos while curPos < len(docString): lastSpace = docString[0:curPos].rfind(' ') if lastSpace > max(0,curPos-80): docString = docString[0:lastSpace] + '\n' + docString[lastSpace+1:] else: docString = docString[0:curPos] + '\n' + docString[curPos:] curPos += 81 line = line.replace('DESCRIPTION_TO_BE_REPLACED',docString) # replace module name line = line.replace('otb-template-module',cmakeModName) # replace depend list dependTagPos = line.find("DEPENDS_TO_BE_REPLACED") if dependTagPos >= 0: replacementStr = "DEPENDS" indentStr = "" for it in range(dependTagPos+2): indentStr = indentStr + " " if len(dependencies[moduleName]) > 0: deplist = dependencies[moduleName] else: deplist = ["Common"] for dep in deplist: replacementStr = replacementStr + "\n" + indentStr +"OTB"+ dep line = line.replace('DEPENDS_TO_BE_REPLACED',replacementStr) # replace test_depend list testDependTagPos = line.find("TESTDEP_TO_BE_REPLACED") if testDependTagPos >= 0: if moduleName.startswith("App"): # for application : hardcode TestKernel and CommandLine indentStr = "" for it in range(testDependTagPos+2): indentStr = indentStr + " " replacementStr = "TEST_DEPENDS\n" + indentStr + "OTBTestKernel\n" + indentStr + "OTBCommandLine" line = line.replace('TESTDEP_TO_BE_REPLACED',replacementStr) else: # standard case if len(testDependencies[moduleName]) > 0: indentStr = "" replacementStr = "TEST_DEPENDS" for it in range(testDependTagPos+2): indentStr = indentStr + " " for dep in testDependencies[moduleName]: replacementStr = replacementStr + "\n" + indentStr +"OTB"+ dep line = line.replace('TESTDEP_TO_BE_REPLACED',replacementStr) else: line = line.replace('TESTDEP_TO_BE_REPLACED','') # replace example_depend list exDependTagPos = line.find("EXDEP_TO_BE_REPLACED") if exDependTagPos >= 0: if len(exDependencies[moduleName]) > 0: indentStr = "" replacementStr = "EXAMPLE_DEPENDS" for it in range(exDependTagPos+2): indentStr = indentStr + " " for dep in exDependencies[moduleName]: replacementStr = replacementStr + "\n" + indentStr +"OTB"+ dep line = line.replace('EXDEP_TO_BE_REPLACED',replacementStr) else: line = line.replace('EXDEP_TO_BE_REPLACED','') o.write(line); o.close() # call dispatchTests to fill test/CMakeLists if op.isfile(testDependPath): dispatchTests.main(["dispatchTests.py",ManifestPath,HeadOfOTBTree,HeadOfModularOTBTree,testDependPath]) """ # call dispatchExamples to fill example/CMakeLists if op.isfile(exDependPath): dispatchExamples.main(["dispatchExamples.py",ManifestPath,HeadOfOTBTree,HeadOfModularOTBTree,exDependPath]) """ # examples for i in sorted(os.listdir(HeadOfTempTree + "/Examples")): if i == "CMakeLists.txt" or i == "README.txt" or i.startswith("DataRepresentation"): continue for j in sorted(os.listdir(HeadOfTempTree + "/Examples/" + i)): if j == "CMakeLists.txt" or j.startswith("otb"): continue command = "mv %s/Examples/%s/%s %s/Examples/%s/%s" % ( HeadOfTempTree, i, j, HeadOfModularOTBTree, i, j) os.system(command) for i in sorted(os.listdir(HeadOfTempTree + "/Examples/DataRepresentation")): if i == "CMakeLists.txt" or i == "README.txt": continue for j in sorted(os.listdir(HeadOfTempTree + "/Examples/DataRepresentation/" + i)): if j == "CMakeLists.txt" or j.startswith("otb"): continue command = "mv %s/Examples/DataRepresentation/%s/%s %s/Examples/DataRepresentation/%s/%s" % ( HeadOfTempTree, i, j, HeadOfModularOTBTree, i, j) os.system(command) # save version without patches (so that we can regenerate patches later) os.system( "cp -ar " + op.join(OutputDir,"OTB_Modular") + " " + op.join(OutputDir,"OTB_Modular-nopatch") ) # apply patches in OTB_Modular curdir = op.abspath(op.dirname(__file__)) command = "cd " + op.join(OutputDir,"OTB_Modular") + " && patch -p1 < " + curdir + "/patches/otbmodular.patch" print "Executing " + command os.system( command ) # remove Copyright files we don't want to touch later os.system( "rm -rf %s" % (op.join(HeadOfTempTree,"Copyright") ) ) os.system( "rm -rf %s" % (op.join(HeadOfTempTree,"RELEASE_NOTES.txt") ) ) os.system( "rm -rf %s" % (op.join(HeadOfTempTree,"README") ) ) # PREPARE MIGRATION COMMIT ON A CLONE OF ORIGINAL CHECKOUT if enableMigration: print("Executing migration on a clone of original checkout") HeadOfTempTree = op.abspath(HeadOfTempTree) OutputDir = op.abspath(OutputDir) # clone original checkout outputModular = op.join(OutputDir,"OTB_Modular") outputMigration = op.join(OutputDir,"OTB_Migration") if op.exists(outputMigration): os.removedirs(outputMigration) command = ["cp","-ar",HeadOfOTBTree,outputMigration] call(command) os.chdir(outputMigration) # walk through OTB_Remaining and delete corresponding files in OTB checkout print("DELETE STEP...") for dirPath, dirNames, fileNames in os.walk(HeadOfTempTree): currentSourceDir = dirPath.replace(HeadOfTempTree,'.') for fileName in fileNames: if op.exists(op.join(currentSourceDir,fileName)): command = ["hg","remove",op.join(currentSourceDir,fileName)] call(command) else: print("Unknown file : "+op.join(currentSourceDir,fileName)) command = ['hg','commit','-m','ENH: Remove files not necessary after modularization'] call(command) # walk through manifest and rename files print("MOVE STEP...") for source in sourceList: outputPath = op.join("./Modules",op.join(source["group"],op.join(source["module"],source["subDir"]))) command = ['hg','rename',source["path"],op.join(outputPath,op.basename(source["path"]))] call(command) command = ['hg','commit','-m','ENH: Move source and test files into their respective module'] call(command) # add new files from OTB_Modular (files from OTB-Modular repo + generated files) print("ADD STEP...") for dirPath, dirNames, fileNames in os.walk(outputModular): currentSourceDir = dirPath.replace(outputModular,'.') if currentSourceDir.startswith("./.hg"): print("skip .hg") continue for fileName in fileNames: # skip hg files if fileName.startswith(".hg"): continue targetFile = op.join(currentSourceDir,fileName) if not op.exists(targetFile): if not op.exists(currentSourceDir): command = ["mkdir","-p",currentSourceDir] call(command) shutil.copy(op.join(dirPath,fileName),targetFile) command = ['hg','add'] call(command) command = ['hg','commit','-m','ENH: Add new files for modular build system'] call(command) # apply patches on OTB Checkout print("PATCH STEP...") for dirPath, dirNames, fileNames in os.walk(outputModular): currentSourceDir = dirPath.replace(outputModular,'.') if currentSourceDir.startswith("./.hg"): continue for fileName in fileNames: # skip hg files if fileName.startswith(".hg"): continue targetFile = op.join(currentSourceDir,fileName) if op.exists(targetFile): command = ['cp',op.join(dirPath,fileName),targetFile] call(command) command = ['hg','commit','-m','ENH: Apply patches necessary after modularization'] call(command)
parseFullManifest
identifier_name
modulizer.py
#!/usr/bin/python #========================================================================== # # Copyright Insight Software Consortium # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0.txt # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # #==========================================================================*/ # This script is used to automate the modularization process. The following # steps are included: # 1. Move the files in the monolithic ITK into modules of the modularized ITK. # A manifest text file that lists all the files and their destinations is # required to run the script.By default, the manifest file is named as # "Manifest.txt" in the same directory of this script. # 2. Create CMake Files and put them into modules. # Modified by Guillaume Pasero <guillaume.pasero@c-s.fr> # add dependencies in otb-module.cmake # To run it, type ./modulizer.py OTB_PATH Manifest_PATH # from the otb-modulizer root directory. print "*************************************************************************" print "WARNINGs! This modularization script is still in its experimental stage." print "Current OTB users should not run this script." print "*************************************************************************" import shutil import os.path as op import re import sys import os import stat import glob import documentationCheck import analyseAppManifest import dispatchTests import dispatchExamples from subprocess import call def parseFullManifest(path): sourceList = [] nbFields = 6 fd = open(path,'rb') # skip first line and detect separator firstLine = fd.readline() sep = ',' if (len(firstLine.split(sep)) != nbFields): sep = ';' if (len(firstLine.split(sep)) != nbFields): sep = '\t' if (len(firstLine.split(sep)) != nbFields): print "Unknown separator" return sourceList fd.seek(0) # parse file for line in fd: if (line.strip()).startswith("#"): continue words = line.split(sep) if (len(words) < (nbFields-1)): print "Wrong number of fields, skipping this line" continue fullPath = words[0].strip(" ,;\t\n\r") groupName = words[2].strip(" ,;\t\n\r") moduleName = words[3].strip(" ,;\t\n\r") subDir = words[4].strip(" ,;\t\n\r") sourceName = op.basename(fullPath) sourceList.append({"path":fullPath, "group":groupName, "module":moduleName, "subDir":subDir}) fd.close() return sourceList def parseDescriptions(path): output = {} sep = '|' nbFields = 2 fd = open(path,'rb') for line in fd: if (line.strip()).startswith("#"): continue words = line.split(sep) if len(words) != nbFields: continue moduleName = words[0].strip(" \"\t\n\r") description = words[1].strip(" \"\t\n\r") output[moduleName] = description fd.close() return output if len(sys.argv) < 4: print("USAGE: {0} monolithic_OTB_PATH OUTPUT_DIR Manifest_Path [module_dep [test_dep [mod_description]]]".format(sys.argv[0])) print(" monolithic_OTB_PATH : checkout of OTB repository (will not be modified)") print(" OUTPUT_DIR : output directory where OTB_Modular and OTB_remaining will be created ") print(" Manifest_Path : path to manifest file, in CSV-like format. Fields are :") print(" source_path/current_subDir/group/module/subDir/comment") print(" module_dep : dependencies between modules") print(" test_dep : additional dependencies for tests") print(" mod_description : description for each module") print(" migration_password : password to enable MIGRATION") sys.exit(-1) scriptDir = op.dirname(op.abspath(sys.argv[0])) HeadOfOTBTree = sys.argv[1] if (HeadOfOTBTree[-1] == '/'): HeadOfOTBTree = HeadOfOTBTree[0:-1] OutputDir = sys.argv[2] HeadOfModularOTBTree = op.join(OutputDir,"OTB_Modular") ManifestPath = sys.argv[3] EdgePath = "" if len(sys.argv) >= 5: EdgePath = sys.argv[4] testDependPath = "" if len(sys.argv) >= 6: testDependPath = sys.argv[5] modDescriptionPath = "" if len(sys.argv) >= 7: modDescriptionPath = sys.argv[6] enableMigration = False if len(sys.argv) >= 8: migrationPass = sys.argv[7] if migrationPass == "redbutton": enableMigration = True # copy the whole OTB tree over to a temporary dir HeadOfTempTree = op.join(OutputDir,"OTB_remaining") if op.isdir(HeadOfTempTree): shutil.rmtree(HeadOfTempTree) if op.isdir(HeadOfModularOTBTree): shutil.rmtree(HeadOfModularOTBTree) print("Start to copy" + HeadOfOTBTree + " to ./OTB_remaining ...") shutil.copytree(HeadOfOTBTree,HeadOfTempTree, ignore = shutil.ignore_patterns('.hg','.hg*')) print("Done copying!") # checkout OTB-Modular cmd ='hg clone http://hg.orfeo-toolbox.org/OTB-Modular '+HeadOfModularOTBTree os.system(cmd) logDir = op.join(OutputDir,"logs") if not op.isdir(logDir): os.makedirs(logDir) # read the manifest file print ("moving files from ./OTB_remaining into modules in {0}".format(HeadOfModularOTBTree)) numOfMissingFiles = 0; missingf = open(op.join(logDir,'missingFiles.log'),'w') moduleList=[] moduleDic={} sourceList = parseFullManifest(ManifestPath) for source in sourceList: # build module list moduleDic[source["module"]] = source["group"] # create the path inputfile = op.abspath(op.join(HeadOfTempTree,source["path"])) outputPath = op.join(op.join(HeadOfModularOTBTree,"Modules"),op.join(source["group"],op.join(source["module"],source["subDir"]))) if not op.isdir(outputPath): os.makedirs(outputPath) # copying files to the destination if op.isfile(inputfile): if op.isfile(op.join(outputPath,op.basename(inputfile))): os.remove(op.join(outputPath,op.basename(inputfile))) shutil.move(inputfile, outputPath) else: missingf.write(inputfile+'\n') numOfMissingFiles = numOfMissingFiles + 1 missingf.close() print ("listed {0} missing files to logs/missingFiles.log").format(numOfMissingFiles) moduleList = moduleDic.keys() # after move, operate a documentation check for source in sourceList: outputPath = op.join(op.join(HeadOfModularOTBTree,"Modules"),op.join(source["group"],op.join(source["module"],source["subDir"]))) outputFile = op.join(outputPath,op.basename(source["path"])) if op.isfile(outputFile): if op.splitext(outputFile)[1] == ".h": nextContent = documentationCheck.parserHeader(outputFile,source["module"]) fd = open(outputFile,'wb') fd.writelines(nextContent) fd.close() # get dependencies (if file is present) dependencies = {} testDependencies = {} exDependencies = {} for mod in moduleList: dependencies[mod] = [] testDependencies[mod] = [] exDependencies[mod] = [] if op.isfile(EdgePath): fd = open(EdgePath,'rb') for line in fd: words = line.split(',') if len(words) == 2: depFrom = words[0].strip(" ,;\t\n\r") depTo = words[1].strip(" ,;\t\n\r") if dependencies.has_key(depFrom): dependencies[depFrom].append(depTo) else: print("Bad dependency : "+depFrom+" -> "+depTo) fd.close() if op.isfile(testDependPath): fd = open(testDependPath,'rb') for line in fd: words = line.split(',') if len(words) == 2: depFrom = words[0].strip(" ,;\t\n\r") depTo = words[1].strip(" ,;\t\n\r") if testDependencies.has_key(depFrom): testDependencies[depFrom].append(depTo) else: print("Bad dependency : "+depFrom+" -> "+depTo) fd.close() """ if op.isfile(exDependPath): fd = open(exDependPath,'rb') for line in fd: words = line.split(',') if len(words) == 2: depFrom = words[0].strip(" ,;\t\n\r") depTo = words[1].strip(" ,;\t\n\r") if exDependencies.has_key(depFrom): exDependencies[depFrom].append(depTo) else: print("Bad dependency : "+depFrom+" -> "+depTo) fd.close() """ modDescriptions = {} if op.isfile(modDescriptionPath): modDescriptions = parseDescriptions(modDescriptionPath) # list the new files newf = open(op.join(logDir,'newFiles.log'),'w') for (root, subDirs, files) in os.walk(HeadOfTempTree): for afile in files: newf.write(op.join(root, afile)+'\n') newf.close() print ("listed new files to logs/newFiles.log") ########################################################################### print ('creating cmake files for each module (from the template module)') #moduleList = os.listdir(HeadOfModularOTBTree) for moduleName in moduleList: moduleDir = op.join(op.join(HeadOfModularOTBTree,"Modules"),op.join(moduleDic[moduleName],moduleName)) cmakeModName = "OTB"+moduleName if op.isdir(moduleDir): # write CMakeLists.txt filepath = moduleDir+'/CMakeLists.txt' if not op.isfile(filepath): o = open(filepath,'w') if op.isdir(moduleDir+'/src'): template_cmakelist = op.join(scriptDir,'templateModule/otb-template-module/CMakeLists.txt') else: template_cmakelist = op.join(scriptDir,'templateModule/otb-template-module/CMakeLists-nosrc.txt') for line in open(template_cmakelist,'r'): line = line.replace('otb-template-module',cmakeModName) o.write(line); o.close() # write src/CMakeLists.txt # list of CXX files if op.isdir(moduleDir+'/src'): cxxFiles = glob.glob(moduleDir+'/src/*.cxx') cxxFileList=''; for cxxf in cxxFiles: cxxFileList = cxxFileList+' '+cxxf.split('/')[-1]+'\n' # build list of link dependencies linkLibs = "" for dep in dependencies[moduleName]: #verify if dep is a header-onlymodule depThirdParty = False try: moduleDic[dep] except KeyError: # this is a ThirdParty module depThirdParty = True if not depThirdParty: depModuleDir = op.join(op.join(HeadOfModularOTBTree,"Modules"),op.join(moduleDic[dep],dep)) depcxx = glob.glob(depModuleDir+'/src/*.cxx') if depcxx : linkLibs = linkLibs + " ${OTB"+dep+"_LIBRARIES}" + "\n" else: linkLibs = linkLibs + " ${OTB"+dep+"_LIBRARIES}" + "\n" if len(linkLibs) == 0: linkLibs = " ${OTBITK_LIBRARIES}" filepath = moduleDir+'/src/CMakeLists.txt' if not op.isfile(filepath): o = open(filepath,'w') for line in open(op.join(scriptDir,'templateModule/otb-template-module/src/CMakeLists.txt'),'r'): line = line.replace('otb-template-module',cmakeModName) line = line.replace('LIST_OF_CXX_FILES',cxxFileList[0:-1]) #get rid of the last \n line = line.replace('LINK_LIBRARIES_TO_BE_REPLACED',linkLibs) o.write(line); o.close() # write app/CMakeLists.txt if op.isdir(moduleDir+'/app'): os.mkdir(moduleDir+'/test') srcFiles = glob.glob(moduleDir+'/app/*.cxx') srcFiles += glob.glob(moduleDir+'/app/*.h') appList = {} for srcf in srcFiles: # get App name appName = analyseAppManifest.findApplicationName(srcf) if len(appName) == 0: continue appList[appName] = {"source":op.basename(srcf)} # get original location cmakeListPath = "" for item in sourceList: if op.basename(item["path"]) == op.basename(srcf) and \ moduleName == item["module"]: appDir = op.basename(op.dirname(item["path"])) cmakeListPath = op.join(HeadOfOTBTree,op.join("Testing/Applications"),op.join(appDir,"CMakeLists.txt")) break # get App tests if not op.isfile(cmakeListPath): continue appList[appName]["test"] = analyseAppManifest.findTestFromApp(cmakeListPath,appName) # build list of link dependencies linkLibs = "" for dep in dependencies[moduleName]: linkLibs = linkLibs + " ${OTB"+dep+"_LIBRARIES}" + "\n" filepath = moduleDir+'/app/CMakeLists.txt' if not op.isfile(filepath): o = open(filepath,'w') # define link libraries o.write("set("+cmakeModName+"_LINK_LIBS\n") o.write(linkLibs) o.write(")\n") for appli in appList: content = "\notb_create_application(\n" content += " NAME " + appli + "\n" content += " SOURCES " + appList[appli]["source"] + "\n" content += " LINK_LIBRARIES ${${otb-module}_LIBRARIES})\n" o.write(content) o.close() filepath = moduleDir+'/test/CMakeLists.txt' if not op.isfile(filepath): o = open(filepath,'w') o.write("otb_module_test()") for appli in appList: if not appList[appli].has_key("test"): continue o.write("\n#----------- "+appli+" TESTS ----------------\n") for test in appList[appli]["test"]: if test.count("${"): print "Warning : test name contains a variable : "+test continue testcode=appList[appli]["test"][test] testcode=[s.replace('OTB_TEST_APPLICATION', 'otb_test_application') for s in testcode] o.writelines(testcode) o.write("\n") o.close() # write test/CMakeLists.txt : done by dispatchTests.py """ if op.isdir(moduleDir+'/test'): cxxFiles = glob.glob(moduleDir+'/test/*.cxx') cxxFileList=''; for cxxf in cxxFiles: cxxFileList = cxxFileList+cxxf.split('/')[-1]+'\n' filepath = moduleDir+'/test/CMakeLists.txt' if not op.isfile(filepath): o = open(filepath,'w') for line in open('./templateModule/otb-template-module/test/CMakeLists.txt','r'): # TODO : refactor for OTB words= moduleName.split('-') moduleNameMod=''; for word in words: moduleNameMod=moduleNameMod + word.capitalize() line = line.replace('itkTemplateModule',moduleNameMod) line = line.replace('itk-template-module',moduleName) line = line.replace('LIST_OF_CXX_FILES',cxxFileList[0:-1]) #get rid of the last \n o.write(line); o.close() """ # write otb-module.cmake, which contains dependency info filepath = moduleDir+'/otb-module.cmake' if not op.isfile(filepath): o = open(filepath,'w') for line in open(op.join(scriptDir,'templateModule/otb-template-module/otb-module.cmake'),'r'): # replace documentation if line.find("DESCRIPTION_TO_BE_REPLACED") >= 0: docString = "\"TBD\"" if moduleName in modDescriptions: descPos = line.find("DESCRIPTION_TO_BE_REPLACED") limitChar = 80 docString = "\""+modDescriptions[moduleName]+"\"" curPos = 80 - descPos while curPos < len(docString): lastSpace = docString[0:curPos].rfind(' ') if lastSpace > max(0,curPos-80): docString = docString[0:lastSpace] + '\n' + docString[lastSpace+1:] else: docString = docString[0:curPos] + '\n' + docString[curPos:] curPos += 81 line = line.replace('DESCRIPTION_TO_BE_REPLACED',docString) # replace module name line = line.replace('otb-template-module',cmakeModName) # replace depend list dependTagPos = line.find("DEPENDS_TO_BE_REPLACED") if dependTagPos >= 0: replacementStr = "DEPENDS" indentStr = "" for it in range(dependTagPos+2): indentStr = indentStr + " " if len(dependencies[moduleName]) > 0: deplist = dependencies[moduleName] else: deplist = ["Common"] for dep in deplist: replacementStr = replacementStr + "\n" + indentStr +"OTB"+ dep line = line.replace('DEPENDS_TO_BE_REPLACED',replacementStr) # replace test_depend list testDependTagPos = line.find("TESTDEP_TO_BE_REPLACED") if testDependTagPos >= 0: if moduleName.startswith("App"): # for application : hardcode TestKernel and CommandLine indentStr = "" for it in range(testDependTagPos+2): indentStr = indentStr + " " replacementStr = "TEST_DEPENDS\n" + indentStr + "OTBTestKernel\n" + indentStr + "OTBCommandLine" line = line.replace('TESTDEP_TO_BE_REPLACED',replacementStr) else: # standard case if len(testDependencies[moduleName]) > 0: indentStr = "" replacementStr = "TEST_DEPENDS" for it in range(testDependTagPos+2): indentStr = indentStr + " " for dep in testDependencies[moduleName]: replacementStr = replacementStr + "\n" + indentStr +"OTB"+ dep line = line.replace('TESTDEP_TO_BE_REPLACED',replacementStr) else: line = line.replace('TESTDEP_TO_BE_REPLACED','') # replace example_depend list exDependTagPos = line.find("EXDEP_TO_BE_REPLACED") if exDependTagPos >= 0: if len(exDependencies[moduleName]) > 0: indentStr = "" replacementStr = "EXAMPLE_DEPENDS" for it in range(exDependTagPos+2): indentStr = indentStr + " " for dep in exDependencies[moduleName]: replacementStr = replacementStr + "\n" + indentStr +"OTB"+ dep line = line.replace('EXDEP_TO_BE_REPLACED',replacementStr) else: line = line.replace('EXDEP_TO_BE_REPLACED','') o.write(line); o.close() # call dispatchTests to fill test/CMakeLists if op.isfile(testDependPath): dispatchTests.main(["dispatchTests.py",ManifestPath,HeadOfOTBTree,HeadOfModularOTBTree,testDependPath]) """ # call dispatchExamples to fill example/CMakeLists if op.isfile(exDependPath): dispatchExamples.main(["dispatchExamples.py",ManifestPath,HeadOfOTBTree,HeadOfModularOTBTree,exDependPath]) """ # examples for i in sorted(os.listdir(HeadOfTempTree + "/Examples")): if i == "CMakeLists.txt" or i == "README.txt" or i.startswith("DataRepresentation"): continue for j in sorted(os.listdir(HeadOfTempTree + "/Examples/" + i)): if j == "CMakeLists.txt" or j.startswith("otb"): continue command = "mv %s/Examples/%s/%s %s/Examples/%s/%s" % ( HeadOfTempTree, i, j, HeadOfModularOTBTree, i, j) os.system(command) for i in sorted(os.listdir(HeadOfTempTree + "/Examples/DataRepresentation")): if i == "CMakeLists.txt" or i == "README.txt": continue for j in sorted(os.listdir(HeadOfTempTree + "/Examples/DataRepresentation/" + i)): if j == "CMakeLists.txt" or j.startswith("otb"): continue command = "mv %s/Examples/DataRepresentation/%s/%s %s/Examples/DataRepresentation/%s/%s" % ( HeadOfTempTree, i, j, HeadOfModularOTBTree, i, j) os.system(command) # save version without patches (so that we can regenerate patches later) os.system( "cp -ar " + op.join(OutputDir,"OTB_Modular") + " " + op.join(OutputDir,"OTB_Modular-nopatch") )
os.system( command ) # remove Copyright files we don't want to touch later os.system( "rm -rf %s" % (op.join(HeadOfTempTree,"Copyright") ) ) os.system( "rm -rf %s" % (op.join(HeadOfTempTree,"RELEASE_NOTES.txt") ) ) os.system( "rm -rf %s" % (op.join(HeadOfTempTree,"README") ) ) # PREPARE MIGRATION COMMIT ON A CLONE OF ORIGINAL CHECKOUT if enableMigration: print("Executing migration on a clone of original checkout") HeadOfTempTree = op.abspath(HeadOfTempTree) OutputDir = op.abspath(OutputDir) # clone original checkout outputModular = op.join(OutputDir,"OTB_Modular") outputMigration = op.join(OutputDir,"OTB_Migration") if op.exists(outputMigration): os.removedirs(outputMigration) command = ["cp","-ar",HeadOfOTBTree,outputMigration] call(command) os.chdir(outputMigration) # walk through OTB_Remaining and delete corresponding files in OTB checkout print("DELETE STEP...") for dirPath, dirNames, fileNames in os.walk(HeadOfTempTree): currentSourceDir = dirPath.replace(HeadOfTempTree,'.') for fileName in fileNames: if op.exists(op.join(currentSourceDir,fileName)): command = ["hg","remove",op.join(currentSourceDir,fileName)] call(command) else: print("Unknown file : "+op.join(currentSourceDir,fileName)) command = ['hg','commit','-m','ENH: Remove files not necessary after modularization'] call(command) # walk through manifest and rename files print("MOVE STEP...") for source in sourceList: outputPath = op.join("./Modules",op.join(source["group"],op.join(source["module"],source["subDir"]))) command = ['hg','rename',source["path"],op.join(outputPath,op.basename(source["path"]))] call(command) command = ['hg','commit','-m','ENH: Move source and test files into their respective module'] call(command) # add new files from OTB_Modular (files from OTB-Modular repo + generated files) print("ADD STEP...") for dirPath, dirNames, fileNames in os.walk(outputModular): currentSourceDir = dirPath.replace(outputModular,'.') if currentSourceDir.startswith("./.hg"): print("skip .hg") continue for fileName in fileNames: # skip hg files if fileName.startswith(".hg"): continue targetFile = op.join(currentSourceDir,fileName) if not op.exists(targetFile): if not op.exists(currentSourceDir): command = ["mkdir","-p",currentSourceDir] call(command) shutil.copy(op.join(dirPath,fileName),targetFile) command = ['hg','add'] call(command) command = ['hg','commit','-m','ENH: Add new files for modular build system'] call(command) # apply patches on OTB Checkout print("PATCH STEP...") for dirPath, dirNames, fileNames in os.walk(outputModular): currentSourceDir = dirPath.replace(outputModular,'.') if currentSourceDir.startswith("./.hg"): continue for fileName in fileNames: # skip hg files if fileName.startswith(".hg"): continue targetFile = op.join(currentSourceDir,fileName) if op.exists(targetFile): command = ['cp',op.join(dirPath,fileName),targetFile] call(command) command = ['hg','commit','-m','ENH: Apply patches necessary after modularization'] call(command)
# apply patches in OTB_Modular curdir = op.abspath(op.dirname(__file__)) command = "cd " + op.join(OutputDir,"OTB_Modular") + " && patch -p1 < " + curdir + "/patches/otbmodular.patch" print "Executing " + command
random_line_split
modulizer.py
#!/usr/bin/python #========================================================================== # # Copyright Insight Software Consortium # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0.txt # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # #==========================================================================*/ # This script is used to automate the modularization process. The following # steps are included: # 1. Move the files in the monolithic ITK into modules of the modularized ITK. # A manifest text file that lists all the files and their destinations is # required to run the script.By default, the manifest file is named as # "Manifest.txt" in the same directory of this script. # 2. Create CMake Files and put them into modules. # Modified by Guillaume Pasero <guillaume.pasero@c-s.fr> # add dependencies in otb-module.cmake # To run it, type ./modulizer.py OTB_PATH Manifest_PATH # from the otb-modulizer root directory. print "*************************************************************************" print "WARNINGs! This modularization script is still in its experimental stage." print "Current OTB users should not run this script." print "*************************************************************************" import shutil import os.path as op import re import sys import os import stat import glob import documentationCheck import analyseAppManifest import dispatchTests import dispatchExamples from subprocess import call def parseFullManifest(path): sourceList = [] nbFields = 6 fd = open(path,'rb') # skip first line and detect separator firstLine = fd.readline() sep = ',' if (len(firstLine.split(sep)) != nbFields): sep = ';' if (len(firstLine.split(sep)) != nbFields): sep = '\t' if (len(firstLine.split(sep)) != nbFields): print "Unknown separator" return sourceList fd.seek(0) # parse file for line in fd: if (line.strip()).startswith("#"): continue words = line.split(sep) if (len(words) < (nbFields-1)): print "Wrong number of fields, skipping this line" continue fullPath = words[0].strip(" ,;\t\n\r") groupName = words[2].strip(" ,;\t\n\r") moduleName = words[3].strip(" ,;\t\n\r") subDir = words[4].strip(" ,;\t\n\r") sourceName = op.basename(fullPath) sourceList.append({"path":fullPath, "group":groupName, "module":moduleName, "subDir":subDir}) fd.close() return sourceList def parseDescriptions(path):
if len(sys.argv) < 4: print("USAGE: {0} monolithic_OTB_PATH OUTPUT_DIR Manifest_Path [module_dep [test_dep [mod_description]]]".format(sys.argv[0])) print(" monolithic_OTB_PATH : checkout of OTB repository (will not be modified)") print(" OUTPUT_DIR : output directory where OTB_Modular and OTB_remaining will be created ") print(" Manifest_Path : path to manifest file, in CSV-like format. Fields are :") print(" source_path/current_subDir/group/module/subDir/comment") print(" module_dep : dependencies between modules") print(" test_dep : additional dependencies for tests") print(" mod_description : description for each module") print(" migration_password : password to enable MIGRATION") sys.exit(-1) scriptDir = op.dirname(op.abspath(sys.argv[0])) HeadOfOTBTree = sys.argv[1] if (HeadOfOTBTree[-1] == '/'): HeadOfOTBTree = HeadOfOTBTree[0:-1] OutputDir = sys.argv[2] HeadOfModularOTBTree = op.join(OutputDir,"OTB_Modular") ManifestPath = sys.argv[3] EdgePath = "" if len(sys.argv) >= 5: EdgePath = sys.argv[4] testDependPath = "" if len(sys.argv) >= 6: testDependPath = sys.argv[5] modDescriptionPath = "" if len(sys.argv) >= 7: modDescriptionPath = sys.argv[6] enableMigration = False if len(sys.argv) >= 8: migrationPass = sys.argv[7] if migrationPass == "redbutton": enableMigration = True # copy the whole OTB tree over to a temporary dir HeadOfTempTree = op.join(OutputDir,"OTB_remaining") if op.isdir(HeadOfTempTree): shutil.rmtree(HeadOfTempTree) if op.isdir(HeadOfModularOTBTree): shutil.rmtree(HeadOfModularOTBTree) print("Start to copy" + HeadOfOTBTree + " to ./OTB_remaining ...") shutil.copytree(HeadOfOTBTree,HeadOfTempTree, ignore = shutil.ignore_patterns('.hg','.hg*')) print("Done copying!") # checkout OTB-Modular cmd ='hg clone http://hg.orfeo-toolbox.org/OTB-Modular '+HeadOfModularOTBTree os.system(cmd) logDir = op.join(OutputDir,"logs") if not op.isdir(logDir): os.makedirs(logDir) # read the manifest file print ("moving files from ./OTB_remaining into modules in {0}".format(HeadOfModularOTBTree)) numOfMissingFiles = 0; missingf = open(op.join(logDir,'missingFiles.log'),'w') moduleList=[] moduleDic={} sourceList = parseFullManifest(ManifestPath) for source in sourceList: # build module list moduleDic[source["module"]] = source["group"] # create the path inputfile = op.abspath(op.join(HeadOfTempTree,source["path"])) outputPath = op.join(op.join(HeadOfModularOTBTree,"Modules"),op.join(source["group"],op.join(source["module"],source["subDir"]))) if not op.isdir(outputPath): os.makedirs(outputPath) # copying files to the destination if op.isfile(inputfile): if op.isfile(op.join(outputPath,op.basename(inputfile))): os.remove(op.join(outputPath,op.basename(inputfile))) shutil.move(inputfile, outputPath) else: missingf.write(inputfile+'\n') numOfMissingFiles = numOfMissingFiles + 1 missingf.close() print ("listed {0} missing files to logs/missingFiles.log").format(numOfMissingFiles) moduleList = moduleDic.keys() # after move, operate a documentation check for source in sourceList: outputPath = op.join(op.join(HeadOfModularOTBTree,"Modules"),op.join(source["group"],op.join(source["module"],source["subDir"]))) outputFile = op.join(outputPath,op.basename(source["path"])) if op.isfile(outputFile): if op.splitext(outputFile)[1] == ".h": nextContent = documentationCheck.parserHeader(outputFile,source["module"]) fd = open(outputFile,'wb') fd.writelines(nextContent) fd.close() # get dependencies (if file is present) dependencies = {} testDependencies = {} exDependencies = {} for mod in moduleList: dependencies[mod] = [] testDependencies[mod] = [] exDependencies[mod] = [] if op.isfile(EdgePath): fd = open(EdgePath,'rb') for line in fd: words = line.split(',') if len(words) == 2: depFrom = words[0].strip(" ,;\t\n\r") depTo = words[1].strip(" ,;\t\n\r") if dependencies.has_key(depFrom): dependencies[depFrom].append(depTo) else: print("Bad dependency : "+depFrom+" -> "+depTo) fd.close() if op.isfile(testDependPath): fd = open(testDependPath,'rb') for line in fd: words = line.split(',') if len(words) == 2: depFrom = words[0].strip(" ,;\t\n\r") depTo = words[1].strip(" ,;\t\n\r") if testDependencies.has_key(depFrom): testDependencies[depFrom].append(depTo) else: print("Bad dependency : "+depFrom+" -> "+depTo) fd.close() """ if op.isfile(exDependPath): fd = open(exDependPath,'rb') for line in fd: words = line.split(',') if len(words) == 2: depFrom = words[0].strip(" ,;\t\n\r") depTo = words[1].strip(" ,;\t\n\r") if exDependencies.has_key(depFrom): exDependencies[depFrom].append(depTo) else: print("Bad dependency : "+depFrom+" -> "+depTo) fd.close() """ modDescriptions = {} if op.isfile(modDescriptionPath): modDescriptions = parseDescriptions(modDescriptionPath) # list the new files newf = open(op.join(logDir,'newFiles.log'),'w') for (root, subDirs, files) in os.walk(HeadOfTempTree): for afile in files: newf.write(op.join(root, afile)+'\n') newf.close() print ("listed new files to logs/newFiles.log") ########################################################################### print ('creating cmake files for each module (from the template module)') #moduleList = os.listdir(HeadOfModularOTBTree) for moduleName in moduleList: moduleDir = op.join(op.join(HeadOfModularOTBTree,"Modules"),op.join(moduleDic[moduleName],moduleName)) cmakeModName = "OTB"+moduleName if op.isdir(moduleDir): # write CMakeLists.txt filepath = moduleDir+'/CMakeLists.txt' if not op.isfile(filepath): o = open(filepath,'w') if op.isdir(moduleDir+'/src'): template_cmakelist = op.join(scriptDir,'templateModule/otb-template-module/CMakeLists.txt') else: template_cmakelist = op.join(scriptDir,'templateModule/otb-template-module/CMakeLists-nosrc.txt') for line in open(template_cmakelist,'r'): line = line.replace('otb-template-module',cmakeModName) o.write(line); o.close() # write src/CMakeLists.txt # list of CXX files if op.isdir(moduleDir+'/src'): cxxFiles = glob.glob(moduleDir+'/src/*.cxx') cxxFileList=''; for cxxf in cxxFiles: cxxFileList = cxxFileList+' '+cxxf.split('/')[-1]+'\n' # build list of link dependencies linkLibs = "" for dep in dependencies[moduleName]: #verify if dep is a header-onlymodule depThirdParty = False try: moduleDic[dep] except KeyError: # this is a ThirdParty module depThirdParty = True if not depThirdParty: depModuleDir = op.join(op.join(HeadOfModularOTBTree,"Modules"),op.join(moduleDic[dep],dep)) depcxx = glob.glob(depModuleDir+'/src/*.cxx') if depcxx : linkLibs = linkLibs + " ${OTB"+dep+"_LIBRARIES}" + "\n" else: linkLibs = linkLibs + " ${OTB"+dep+"_LIBRARIES}" + "\n" if len(linkLibs) == 0: linkLibs = " ${OTBITK_LIBRARIES}" filepath = moduleDir+'/src/CMakeLists.txt' if not op.isfile(filepath): o = open(filepath,'w') for line in open(op.join(scriptDir,'templateModule/otb-template-module/src/CMakeLists.txt'),'r'): line = line.replace('otb-template-module',cmakeModName) line = line.replace('LIST_OF_CXX_FILES',cxxFileList[0:-1]) #get rid of the last \n line = line.replace('LINK_LIBRARIES_TO_BE_REPLACED',linkLibs) o.write(line); o.close() # write app/CMakeLists.txt if op.isdir(moduleDir+'/app'): os.mkdir(moduleDir+'/test') srcFiles = glob.glob(moduleDir+'/app/*.cxx') srcFiles += glob.glob(moduleDir+'/app/*.h') appList = {} for srcf in srcFiles: # get App name appName = analyseAppManifest.findApplicationName(srcf) if len(appName) == 0: continue appList[appName] = {"source":op.basename(srcf)} # get original location cmakeListPath = "" for item in sourceList: if op.basename(item["path"]) == op.basename(srcf) and \ moduleName == item["module"]: appDir = op.basename(op.dirname(item["path"])) cmakeListPath = op.join(HeadOfOTBTree,op.join("Testing/Applications"),op.join(appDir,"CMakeLists.txt")) break # get App tests if not op.isfile(cmakeListPath): continue appList[appName]["test"] = analyseAppManifest.findTestFromApp(cmakeListPath,appName) # build list of link dependencies linkLibs = "" for dep in dependencies[moduleName]: linkLibs = linkLibs + " ${OTB"+dep+"_LIBRARIES}" + "\n" filepath = moduleDir+'/app/CMakeLists.txt' if not op.isfile(filepath): o = open(filepath,'w') # define link libraries o.write("set("+cmakeModName+"_LINK_LIBS\n") o.write(linkLibs) o.write(")\n") for appli in appList: content = "\notb_create_application(\n" content += " NAME " + appli + "\n" content += " SOURCES " + appList[appli]["source"] + "\n" content += " LINK_LIBRARIES ${${otb-module}_LIBRARIES})\n" o.write(content) o.close() filepath = moduleDir+'/test/CMakeLists.txt' if not op.isfile(filepath): o = open(filepath,'w') o.write("otb_module_test()") for appli in appList: if not appList[appli].has_key("test"): continue o.write("\n#----------- "+appli+" TESTS ----------------\n") for test in appList[appli]["test"]: if test.count("${"): print "Warning : test name contains a variable : "+test continue testcode=appList[appli]["test"][test] testcode=[s.replace('OTB_TEST_APPLICATION', 'otb_test_application') for s in testcode] o.writelines(testcode) o.write("\n") o.close() # write test/CMakeLists.txt : done by dispatchTests.py """ if op.isdir(moduleDir+'/test'): cxxFiles = glob.glob(moduleDir+'/test/*.cxx') cxxFileList=''; for cxxf in cxxFiles: cxxFileList = cxxFileList+cxxf.split('/')[-1]+'\n' filepath = moduleDir+'/test/CMakeLists.txt' if not op.isfile(filepath): o = open(filepath,'w') for line in open('./templateModule/otb-template-module/test/CMakeLists.txt','r'): # TODO : refactor for OTB words= moduleName.split('-') moduleNameMod=''; for word in words: moduleNameMod=moduleNameMod + word.capitalize() line = line.replace('itkTemplateModule',moduleNameMod) line = line.replace('itk-template-module',moduleName) line = line.replace('LIST_OF_CXX_FILES',cxxFileList[0:-1]) #get rid of the last \n o.write(line); o.close() """ # write otb-module.cmake, which contains dependency info filepath = moduleDir+'/otb-module.cmake' if not op.isfile(filepath): o = open(filepath,'w') for line in open(op.join(scriptDir,'templateModule/otb-template-module/otb-module.cmake'),'r'): # replace documentation if line.find("DESCRIPTION_TO_BE_REPLACED") >= 0: docString = "\"TBD\"" if moduleName in modDescriptions: descPos = line.find("DESCRIPTION_TO_BE_REPLACED") limitChar = 80 docString = "\""+modDescriptions[moduleName]+"\"" curPos = 80 - descPos while curPos < len(docString): lastSpace = docString[0:curPos].rfind(' ') if lastSpace > max(0,curPos-80): docString = docString[0:lastSpace] + '\n' + docString[lastSpace+1:] else: docString = docString[0:curPos] + '\n' + docString[curPos:] curPos += 81 line = line.replace('DESCRIPTION_TO_BE_REPLACED',docString) # replace module name line = line.replace('otb-template-module',cmakeModName) # replace depend list dependTagPos = line.find("DEPENDS_TO_BE_REPLACED") if dependTagPos >= 0: replacementStr = "DEPENDS" indentStr = "" for it in range(dependTagPos+2): indentStr = indentStr + " " if len(dependencies[moduleName]) > 0: deplist = dependencies[moduleName] else: deplist = ["Common"] for dep in deplist: replacementStr = replacementStr + "\n" + indentStr +"OTB"+ dep line = line.replace('DEPENDS_TO_BE_REPLACED',replacementStr) # replace test_depend list testDependTagPos = line.find("TESTDEP_TO_BE_REPLACED") if testDependTagPos >= 0: if moduleName.startswith("App"): # for application : hardcode TestKernel and CommandLine indentStr = "" for it in range(testDependTagPos+2): indentStr = indentStr + " " replacementStr = "TEST_DEPENDS\n" + indentStr + "OTBTestKernel\n" + indentStr + "OTBCommandLine" line = line.replace('TESTDEP_TO_BE_REPLACED',replacementStr) else: # standard case if len(testDependencies[moduleName]) > 0: indentStr = "" replacementStr = "TEST_DEPENDS" for it in range(testDependTagPos+2): indentStr = indentStr + " " for dep in testDependencies[moduleName]: replacementStr = replacementStr + "\n" + indentStr +"OTB"+ dep line = line.replace('TESTDEP_TO_BE_REPLACED',replacementStr) else: line = line.replace('TESTDEP_TO_BE_REPLACED','') # replace example_depend list exDependTagPos = line.find("EXDEP_TO_BE_REPLACED") if exDependTagPos >= 0: if len(exDependencies[moduleName]) > 0: indentStr = "" replacementStr = "EXAMPLE_DEPENDS" for it in range(exDependTagPos+2): indentStr = indentStr + " " for dep in exDependencies[moduleName]: replacementStr = replacementStr + "\n" + indentStr +"OTB"+ dep line = line.replace('EXDEP_TO_BE_REPLACED',replacementStr) else: line = line.replace('EXDEP_TO_BE_REPLACED','') o.write(line); o.close() # call dispatchTests to fill test/CMakeLists if op.isfile(testDependPath): dispatchTests.main(["dispatchTests.py",ManifestPath,HeadOfOTBTree,HeadOfModularOTBTree,testDependPath]) """ # call dispatchExamples to fill example/CMakeLists if op.isfile(exDependPath): dispatchExamples.main(["dispatchExamples.py",ManifestPath,HeadOfOTBTree,HeadOfModularOTBTree,exDependPath]) """ # examples for i in sorted(os.listdir(HeadOfTempTree + "/Examples")): if i == "CMakeLists.txt" or i == "README.txt" or i.startswith("DataRepresentation"): continue for j in sorted(os.listdir(HeadOfTempTree + "/Examples/" + i)): if j == "CMakeLists.txt" or j.startswith("otb"): continue command = "mv %s/Examples/%s/%s %s/Examples/%s/%s" % ( HeadOfTempTree, i, j, HeadOfModularOTBTree, i, j) os.system(command) for i in sorted(os.listdir(HeadOfTempTree + "/Examples/DataRepresentation")): if i == "CMakeLists.txt" or i == "README.txt": continue for j in sorted(os.listdir(HeadOfTempTree + "/Examples/DataRepresentation/" + i)): if j == "CMakeLists.txt" or j.startswith("otb"): continue command = "mv %s/Examples/DataRepresentation/%s/%s %s/Examples/DataRepresentation/%s/%s" % ( HeadOfTempTree, i, j, HeadOfModularOTBTree, i, j) os.system(command) # save version without patches (so that we can regenerate patches later) os.system( "cp -ar " + op.join(OutputDir,"OTB_Modular") + " " + op.join(OutputDir,"OTB_Modular-nopatch") ) # apply patches in OTB_Modular curdir = op.abspath(op.dirname(__file__)) command = "cd " + op.join(OutputDir,"OTB_Modular") + " && patch -p1 < " + curdir + "/patches/otbmodular.patch" print "Executing " + command os.system( command ) # remove Copyright files we don't want to touch later os.system( "rm -rf %s" % (op.join(HeadOfTempTree,"Copyright") ) ) os.system( "rm -rf %s" % (op.join(HeadOfTempTree,"RELEASE_NOTES.txt") ) ) os.system( "rm -rf %s" % (op.join(HeadOfTempTree,"README") ) ) # PREPARE MIGRATION COMMIT ON A CLONE OF ORIGINAL CHECKOUT if enableMigration: print("Executing migration on a clone of original checkout") HeadOfTempTree = op.abspath(HeadOfTempTree) OutputDir = op.abspath(OutputDir) # clone original checkout outputModular = op.join(OutputDir,"OTB_Modular") outputMigration = op.join(OutputDir,"OTB_Migration") if op.exists(outputMigration): os.removedirs(outputMigration) command = ["cp","-ar",HeadOfOTBTree,outputMigration] call(command) os.chdir(outputMigration) # walk through OTB_Remaining and delete corresponding files in OTB checkout print("DELETE STEP...") for dirPath, dirNames, fileNames in os.walk(HeadOfTempTree): currentSourceDir = dirPath.replace(HeadOfTempTree,'.') for fileName in fileNames: if op.exists(op.join(currentSourceDir,fileName)): command = ["hg","remove",op.join(currentSourceDir,fileName)] call(command) else: print("Unknown file : "+op.join(currentSourceDir,fileName)) command = ['hg','commit','-m','ENH: Remove files not necessary after modularization'] call(command) # walk through manifest and rename files print("MOVE STEP...") for source in sourceList: outputPath = op.join("./Modules",op.join(source["group"],op.join(source["module"],source["subDir"]))) command = ['hg','rename',source["path"],op.join(outputPath,op.basename(source["path"]))] call(command) command = ['hg','commit','-m','ENH: Move source and test files into their respective module'] call(command) # add new files from OTB_Modular (files from OTB-Modular repo + generated files) print("ADD STEP...") for dirPath, dirNames, fileNames in os.walk(outputModular): currentSourceDir = dirPath.replace(outputModular,'.') if currentSourceDir.startswith("./.hg"): print("skip .hg") continue for fileName in fileNames: # skip hg files if fileName.startswith(".hg"): continue targetFile = op.join(currentSourceDir,fileName) if not op.exists(targetFile): if not op.exists(currentSourceDir): command = ["mkdir","-p",currentSourceDir] call(command) shutil.copy(op.join(dirPath,fileName),targetFile) command = ['hg','add'] call(command) command = ['hg','commit','-m','ENH: Add new files for modular build system'] call(command) # apply patches on OTB Checkout print("PATCH STEP...") for dirPath, dirNames, fileNames in os.walk(outputModular): currentSourceDir = dirPath.replace(outputModular,'.') if currentSourceDir.startswith("./.hg"): continue for fileName in fileNames: # skip hg files if fileName.startswith(".hg"): continue targetFile = op.join(currentSourceDir,fileName) if op.exists(targetFile): command = ['cp',op.join(dirPath,fileName),targetFile] call(command) command = ['hg','commit','-m','ENH: Apply patches necessary after modularization'] call(command)
output = {} sep = '|' nbFields = 2 fd = open(path,'rb') for line in fd: if (line.strip()).startswith("#"): continue words = line.split(sep) if len(words) != nbFields: continue moduleName = words[0].strip(" \"\t\n\r") description = words[1].strip(" \"\t\n\r") output[moduleName] = description fd.close() return output
identifier_body
modulizer.py
#!/usr/bin/python #========================================================================== # # Copyright Insight Software Consortium # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0.txt # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # #==========================================================================*/ # This script is used to automate the modularization process. The following # steps are included: # 1. Move the files in the monolithic ITK into modules of the modularized ITK. # A manifest text file that lists all the files and their destinations is # required to run the script.By default, the manifest file is named as # "Manifest.txt" in the same directory of this script. # 2. Create CMake Files and put them into modules. # Modified by Guillaume Pasero <guillaume.pasero@c-s.fr> # add dependencies in otb-module.cmake # To run it, type ./modulizer.py OTB_PATH Manifest_PATH # from the otb-modulizer root directory. print "*************************************************************************" print "WARNINGs! This modularization script is still in its experimental stage." print "Current OTB users should not run this script." print "*************************************************************************" import shutil import os.path as op import re import sys import os import stat import glob import documentationCheck import analyseAppManifest import dispatchTests import dispatchExamples from subprocess import call def parseFullManifest(path): sourceList = [] nbFields = 6 fd = open(path,'rb') # skip first line and detect separator firstLine = fd.readline() sep = ',' if (len(firstLine.split(sep)) != nbFields): sep = ';' if (len(firstLine.split(sep)) != nbFields): sep = '\t' if (len(firstLine.split(sep)) != nbFields): print "Unknown separator" return sourceList fd.seek(0) # parse file for line in fd: if (line.strip()).startswith("#"): continue words = line.split(sep) if (len(words) < (nbFields-1)): print "Wrong number of fields, skipping this line" continue fullPath = words[0].strip(" ,;\t\n\r") groupName = words[2].strip(" ,;\t\n\r") moduleName = words[3].strip(" ,;\t\n\r") subDir = words[4].strip(" ,;\t\n\r") sourceName = op.basename(fullPath) sourceList.append({"path":fullPath, "group":groupName, "module":moduleName, "subDir":subDir}) fd.close() return sourceList def parseDescriptions(path): output = {} sep = '|' nbFields = 2 fd = open(path,'rb') for line in fd: if (line.strip()).startswith("#"): continue words = line.split(sep) if len(words) != nbFields: continue moduleName = words[0].strip(" \"\t\n\r") description = words[1].strip(" \"\t\n\r") output[moduleName] = description fd.close() return output if len(sys.argv) < 4: print("USAGE: {0} monolithic_OTB_PATH OUTPUT_DIR Manifest_Path [module_dep [test_dep [mod_description]]]".format(sys.argv[0])) print(" monolithic_OTB_PATH : checkout of OTB repository (will not be modified)") print(" OUTPUT_DIR : output directory where OTB_Modular and OTB_remaining will be created ") print(" Manifest_Path : path to manifest file, in CSV-like format. Fields are :") print(" source_path/current_subDir/group/module/subDir/comment") print(" module_dep : dependencies between modules") print(" test_dep : additional dependencies for tests") print(" mod_description : description for each module") print(" migration_password : password to enable MIGRATION") sys.exit(-1) scriptDir = op.dirname(op.abspath(sys.argv[0])) HeadOfOTBTree = sys.argv[1] if (HeadOfOTBTree[-1] == '/'): HeadOfOTBTree = HeadOfOTBTree[0:-1] OutputDir = sys.argv[2] HeadOfModularOTBTree = op.join(OutputDir,"OTB_Modular") ManifestPath = sys.argv[3] EdgePath = "" if len(sys.argv) >= 5: EdgePath = sys.argv[4] testDependPath = "" if len(sys.argv) >= 6:
modDescriptionPath = "" if len(sys.argv) >= 7: modDescriptionPath = sys.argv[6] enableMigration = False if len(sys.argv) >= 8: migrationPass = sys.argv[7] if migrationPass == "redbutton": enableMigration = True # copy the whole OTB tree over to a temporary dir HeadOfTempTree = op.join(OutputDir,"OTB_remaining") if op.isdir(HeadOfTempTree): shutil.rmtree(HeadOfTempTree) if op.isdir(HeadOfModularOTBTree): shutil.rmtree(HeadOfModularOTBTree) print("Start to copy" + HeadOfOTBTree + " to ./OTB_remaining ...") shutil.copytree(HeadOfOTBTree,HeadOfTempTree, ignore = shutil.ignore_patterns('.hg','.hg*')) print("Done copying!") # checkout OTB-Modular cmd ='hg clone http://hg.orfeo-toolbox.org/OTB-Modular '+HeadOfModularOTBTree os.system(cmd) logDir = op.join(OutputDir,"logs") if not op.isdir(logDir): os.makedirs(logDir) # read the manifest file print ("moving files from ./OTB_remaining into modules in {0}".format(HeadOfModularOTBTree)) numOfMissingFiles = 0; missingf = open(op.join(logDir,'missingFiles.log'),'w') moduleList=[] moduleDic={} sourceList = parseFullManifest(ManifestPath) for source in sourceList: # build module list moduleDic[source["module"]] = source["group"] # create the path inputfile = op.abspath(op.join(HeadOfTempTree,source["path"])) outputPath = op.join(op.join(HeadOfModularOTBTree,"Modules"),op.join(source["group"],op.join(source["module"],source["subDir"]))) if not op.isdir(outputPath): os.makedirs(outputPath) # copying files to the destination if op.isfile(inputfile): if op.isfile(op.join(outputPath,op.basename(inputfile))): os.remove(op.join(outputPath,op.basename(inputfile))) shutil.move(inputfile, outputPath) else: missingf.write(inputfile+'\n') numOfMissingFiles = numOfMissingFiles + 1 missingf.close() print ("listed {0} missing files to logs/missingFiles.log").format(numOfMissingFiles) moduleList = moduleDic.keys() # after move, operate a documentation check for source in sourceList: outputPath = op.join(op.join(HeadOfModularOTBTree,"Modules"),op.join(source["group"],op.join(source["module"],source["subDir"]))) outputFile = op.join(outputPath,op.basename(source["path"])) if op.isfile(outputFile): if op.splitext(outputFile)[1] == ".h": nextContent = documentationCheck.parserHeader(outputFile,source["module"]) fd = open(outputFile,'wb') fd.writelines(nextContent) fd.close() # get dependencies (if file is present) dependencies = {} testDependencies = {} exDependencies = {} for mod in moduleList: dependencies[mod] = [] testDependencies[mod] = [] exDependencies[mod] = [] if op.isfile(EdgePath): fd = open(EdgePath,'rb') for line in fd: words = line.split(',') if len(words) == 2: depFrom = words[0].strip(" ,;\t\n\r") depTo = words[1].strip(" ,;\t\n\r") if dependencies.has_key(depFrom): dependencies[depFrom].append(depTo) else: print("Bad dependency : "+depFrom+" -> "+depTo) fd.close() if op.isfile(testDependPath): fd = open(testDependPath,'rb') for line in fd: words = line.split(',') if len(words) == 2: depFrom = words[0].strip(" ,;\t\n\r") depTo = words[1].strip(" ,;\t\n\r") if testDependencies.has_key(depFrom): testDependencies[depFrom].append(depTo) else: print("Bad dependency : "+depFrom+" -> "+depTo) fd.close() """ if op.isfile(exDependPath): fd = open(exDependPath,'rb') for line in fd: words = line.split(',') if len(words) == 2: depFrom = words[0].strip(" ,;\t\n\r") depTo = words[1].strip(" ,;\t\n\r") if exDependencies.has_key(depFrom): exDependencies[depFrom].append(depTo) else: print("Bad dependency : "+depFrom+" -> "+depTo) fd.close() """ modDescriptions = {} if op.isfile(modDescriptionPath): modDescriptions = parseDescriptions(modDescriptionPath) # list the new files newf = open(op.join(logDir,'newFiles.log'),'w') for (root, subDirs, files) in os.walk(HeadOfTempTree): for afile in files: newf.write(op.join(root, afile)+'\n') newf.close() print ("listed new files to logs/newFiles.log") ########################################################################### print ('creating cmake files for each module (from the template module)') #moduleList = os.listdir(HeadOfModularOTBTree) for moduleName in moduleList: moduleDir = op.join(op.join(HeadOfModularOTBTree,"Modules"),op.join(moduleDic[moduleName],moduleName)) cmakeModName = "OTB"+moduleName if op.isdir(moduleDir): # write CMakeLists.txt filepath = moduleDir+'/CMakeLists.txt' if not op.isfile(filepath): o = open(filepath,'w') if op.isdir(moduleDir+'/src'): template_cmakelist = op.join(scriptDir,'templateModule/otb-template-module/CMakeLists.txt') else: template_cmakelist = op.join(scriptDir,'templateModule/otb-template-module/CMakeLists-nosrc.txt') for line in open(template_cmakelist,'r'): line = line.replace('otb-template-module',cmakeModName) o.write(line); o.close() # write src/CMakeLists.txt # list of CXX files if op.isdir(moduleDir+'/src'): cxxFiles = glob.glob(moduleDir+'/src/*.cxx') cxxFileList=''; for cxxf in cxxFiles: cxxFileList = cxxFileList+' '+cxxf.split('/')[-1]+'\n' # build list of link dependencies linkLibs = "" for dep in dependencies[moduleName]: #verify if dep is a header-onlymodule depThirdParty = False try: moduleDic[dep] except KeyError: # this is a ThirdParty module depThirdParty = True if not depThirdParty: depModuleDir = op.join(op.join(HeadOfModularOTBTree,"Modules"),op.join(moduleDic[dep],dep)) depcxx = glob.glob(depModuleDir+'/src/*.cxx') if depcxx : linkLibs = linkLibs + " ${OTB"+dep+"_LIBRARIES}" + "\n" else: linkLibs = linkLibs + " ${OTB"+dep+"_LIBRARIES}" + "\n" if len(linkLibs) == 0: linkLibs = " ${OTBITK_LIBRARIES}" filepath = moduleDir+'/src/CMakeLists.txt' if not op.isfile(filepath): o = open(filepath,'w') for line in open(op.join(scriptDir,'templateModule/otb-template-module/src/CMakeLists.txt'),'r'): line = line.replace('otb-template-module',cmakeModName) line = line.replace('LIST_OF_CXX_FILES',cxxFileList[0:-1]) #get rid of the last \n line = line.replace('LINK_LIBRARIES_TO_BE_REPLACED',linkLibs) o.write(line); o.close() # write app/CMakeLists.txt if op.isdir(moduleDir+'/app'): os.mkdir(moduleDir+'/test') srcFiles = glob.glob(moduleDir+'/app/*.cxx') srcFiles += glob.glob(moduleDir+'/app/*.h') appList = {} for srcf in srcFiles: # get App name appName = analyseAppManifest.findApplicationName(srcf) if len(appName) == 0: continue appList[appName] = {"source":op.basename(srcf)} # get original location cmakeListPath = "" for item in sourceList: if op.basename(item["path"]) == op.basename(srcf) and \ moduleName == item["module"]: appDir = op.basename(op.dirname(item["path"])) cmakeListPath = op.join(HeadOfOTBTree,op.join("Testing/Applications"),op.join(appDir,"CMakeLists.txt")) break # get App tests if not op.isfile(cmakeListPath): continue appList[appName]["test"] = analyseAppManifest.findTestFromApp(cmakeListPath,appName) # build list of link dependencies linkLibs = "" for dep in dependencies[moduleName]: linkLibs = linkLibs + " ${OTB"+dep+"_LIBRARIES}" + "\n" filepath = moduleDir+'/app/CMakeLists.txt' if not op.isfile(filepath): o = open(filepath,'w') # define link libraries o.write("set("+cmakeModName+"_LINK_LIBS\n") o.write(linkLibs) o.write(")\n") for appli in appList: content = "\notb_create_application(\n" content += " NAME " + appli + "\n" content += " SOURCES " + appList[appli]["source"] + "\n" content += " LINK_LIBRARIES ${${otb-module}_LIBRARIES})\n" o.write(content) o.close() filepath = moduleDir+'/test/CMakeLists.txt' if not op.isfile(filepath): o = open(filepath,'w') o.write("otb_module_test()") for appli in appList: if not appList[appli].has_key("test"): continue o.write("\n#----------- "+appli+" TESTS ----------------\n") for test in appList[appli]["test"]: if test.count("${"): print "Warning : test name contains a variable : "+test continue testcode=appList[appli]["test"][test] testcode=[s.replace('OTB_TEST_APPLICATION', 'otb_test_application') for s in testcode] o.writelines(testcode) o.write("\n") o.close() # write test/CMakeLists.txt : done by dispatchTests.py """ if op.isdir(moduleDir+'/test'): cxxFiles = glob.glob(moduleDir+'/test/*.cxx') cxxFileList=''; for cxxf in cxxFiles: cxxFileList = cxxFileList+cxxf.split('/')[-1]+'\n' filepath = moduleDir+'/test/CMakeLists.txt' if not op.isfile(filepath): o = open(filepath,'w') for line in open('./templateModule/otb-template-module/test/CMakeLists.txt','r'): # TODO : refactor for OTB words= moduleName.split('-') moduleNameMod=''; for word in words: moduleNameMod=moduleNameMod + word.capitalize() line = line.replace('itkTemplateModule',moduleNameMod) line = line.replace('itk-template-module',moduleName) line = line.replace('LIST_OF_CXX_FILES',cxxFileList[0:-1]) #get rid of the last \n o.write(line); o.close() """ # write otb-module.cmake, which contains dependency info filepath = moduleDir+'/otb-module.cmake' if not op.isfile(filepath): o = open(filepath,'w') for line in open(op.join(scriptDir,'templateModule/otb-template-module/otb-module.cmake'),'r'): # replace documentation if line.find("DESCRIPTION_TO_BE_REPLACED") >= 0: docString = "\"TBD\"" if moduleName in modDescriptions: descPos = line.find("DESCRIPTION_TO_BE_REPLACED") limitChar = 80 docString = "\""+modDescriptions[moduleName]+"\"" curPos = 80 - descPos while curPos < len(docString): lastSpace = docString[0:curPos].rfind(' ') if lastSpace > max(0,curPos-80): docString = docString[0:lastSpace] + '\n' + docString[lastSpace+1:] else: docString = docString[0:curPos] + '\n' + docString[curPos:] curPos += 81 line = line.replace('DESCRIPTION_TO_BE_REPLACED',docString) # replace module name line = line.replace('otb-template-module',cmakeModName) # replace depend list dependTagPos = line.find("DEPENDS_TO_BE_REPLACED") if dependTagPos >= 0: replacementStr = "DEPENDS" indentStr = "" for it in range(dependTagPos+2): indentStr = indentStr + " " if len(dependencies[moduleName]) > 0: deplist = dependencies[moduleName] else: deplist = ["Common"] for dep in deplist: replacementStr = replacementStr + "\n" + indentStr +"OTB"+ dep line = line.replace('DEPENDS_TO_BE_REPLACED',replacementStr) # replace test_depend list testDependTagPos = line.find("TESTDEP_TO_BE_REPLACED") if testDependTagPos >= 0: if moduleName.startswith("App"): # for application : hardcode TestKernel and CommandLine indentStr = "" for it in range(testDependTagPos+2): indentStr = indentStr + " " replacementStr = "TEST_DEPENDS\n" + indentStr + "OTBTestKernel\n" + indentStr + "OTBCommandLine" line = line.replace('TESTDEP_TO_BE_REPLACED',replacementStr) else: # standard case if len(testDependencies[moduleName]) > 0: indentStr = "" replacementStr = "TEST_DEPENDS" for it in range(testDependTagPos+2): indentStr = indentStr + " " for dep in testDependencies[moduleName]: replacementStr = replacementStr + "\n" + indentStr +"OTB"+ dep line = line.replace('TESTDEP_TO_BE_REPLACED',replacementStr) else: line = line.replace('TESTDEP_TO_BE_REPLACED','') # replace example_depend list exDependTagPos = line.find("EXDEP_TO_BE_REPLACED") if exDependTagPos >= 0: if len(exDependencies[moduleName]) > 0: indentStr = "" replacementStr = "EXAMPLE_DEPENDS" for it in range(exDependTagPos+2): indentStr = indentStr + " " for dep in exDependencies[moduleName]: replacementStr = replacementStr + "\n" + indentStr +"OTB"+ dep line = line.replace('EXDEP_TO_BE_REPLACED',replacementStr) else: line = line.replace('EXDEP_TO_BE_REPLACED','') o.write(line); o.close() # call dispatchTests to fill test/CMakeLists if op.isfile(testDependPath): dispatchTests.main(["dispatchTests.py",ManifestPath,HeadOfOTBTree,HeadOfModularOTBTree,testDependPath]) """ # call dispatchExamples to fill example/CMakeLists if op.isfile(exDependPath): dispatchExamples.main(["dispatchExamples.py",ManifestPath,HeadOfOTBTree,HeadOfModularOTBTree,exDependPath]) """ # examples for i in sorted(os.listdir(HeadOfTempTree + "/Examples")): if i == "CMakeLists.txt" or i == "README.txt" or i.startswith("DataRepresentation"): continue for j in sorted(os.listdir(HeadOfTempTree + "/Examples/" + i)): if j == "CMakeLists.txt" or j.startswith("otb"): continue command = "mv %s/Examples/%s/%s %s/Examples/%s/%s" % ( HeadOfTempTree, i, j, HeadOfModularOTBTree, i, j) os.system(command) for i in sorted(os.listdir(HeadOfTempTree + "/Examples/DataRepresentation")): if i == "CMakeLists.txt" or i == "README.txt": continue for j in sorted(os.listdir(HeadOfTempTree + "/Examples/DataRepresentation/" + i)): if j == "CMakeLists.txt" or j.startswith("otb"): continue command = "mv %s/Examples/DataRepresentation/%s/%s %s/Examples/DataRepresentation/%s/%s" % ( HeadOfTempTree, i, j, HeadOfModularOTBTree, i, j) os.system(command) # save version without patches (so that we can regenerate patches later) os.system( "cp -ar " + op.join(OutputDir,"OTB_Modular") + " " + op.join(OutputDir,"OTB_Modular-nopatch") ) # apply patches in OTB_Modular curdir = op.abspath(op.dirname(__file__)) command = "cd " + op.join(OutputDir,"OTB_Modular") + " && patch -p1 < " + curdir + "/patches/otbmodular.patch" print "Executing " + command os.system( command ) # remove Copyright files we don't want to touch later os.system( "rm -rf %s" % (op.join(HeadOfTempTree,"Copyright") ) ) os.system( "rm -rf %s" % (op.join(HeadOfTempTree,"RELEASE_NOTES.txt") ) ) os.system( "rm -rf %s" % (op.join(HeadOfTempTree,"README") ) ) # PREPARE MIGRATION COMMIT ON A CLONE OF ORIGINAL CHECKOUT if enableMigration: print("Executing migration on a clone of original checkout") HeadOfTempTree = op.abspath(HeadOfTempTree) OutputDir = op.abspath(OutputDir) # clone original checkout outputModular = op.join(OutputDir,"OTB_Modular") outputMigration = op.join(OutputDir,"OTB_Migration") if op.exists(outputMigration): os.removedirs(outputMigration) command = ["cp","-ar",HeadOfOTBTree,outputMigration] call(command) os.chdir(outputMigration) # walk through OTB_Remaining and delete corresponding files in OTB checkout print("DELETE STEP...") for dirPath, dirNames, fileNames in os.walk(HeadOfTempTree): currentSourceDir = dirPath.replace(HeadOfTempTree,'.') for fileName in fileNames: if op.exists(op.join(currentSourceDir,fileName)): command = ["hg","remove",op.join(currentSourceDir,fileName)] call(command) else: print("Unknown file : "+op.join(currentSourceDir,fileName)) command = ['hg','commit','-m','ENH: Remove files not necessary after modularization'] call(command) # walk through manifest and rename files print("MOVE STEP...") for source in sourceList: outputPath = op.join("./Modules",op.join(source["group"],op.join(source["module"],source["subDir"]))) command = ['hg','rename',source["path"],op.join(outputPath,op.basename(source["path"]))] call(command) command = ['hg','commit','-m','ENH: Move source and test files into their respective module'] call(command) # add new files from OTB_Modular (files from OTB-Modular repo + generated files) print("ADD STEP...") for dirPath, dirNames, fileNames in os.walk(outputModular): currentSourceDir = dirPath.replace(outputModular,'.') if currentSourceDir.startswith("./.hg"): print("skip .hg") continue for fileName in fileNames: # skip hg files if fileName.startswith(".hg"): continue targetFile = op.join(currentSourceDir,fileName) if not op.exists(targetFile): if not op.exists(currentSourceDir): command = ["mkdir","-p",currentSourceDir] call(command) shutil.copy(op.join(dirPath,fileName),targetFile) command = ['hg','add'] call(command) command = ['hg','commit','-m','ENH: Add new files for modular build system'] call(command) # apply patches on OTB Checkout print("PATCH STEP...") for dirPath, dirNames, fileNames in os.walk(outputModular): currentSourceDir = dirPath.replace(outputModular,'.') if currentSourceDir.startswith("./.hg"): continue for fileName in fileNames: # skip hg files if fileName.startswith(".hg"): continue targetFile = op.join(currentSourceDir,fileName) if op.exists(targetFile): command = ['cp',op.join(dirPath,fileName),targetFile] call(command) command = ['hg','commit','-m','ENH: Apply patches necessary after modularization'] call(command)
testDependPath = sys.argv[5]
conditional_block
scraper-twitter-user.py
#!/usr/bin/python from __future__ import print_function import solidscraper as ss import traceback import argparse import codecs # utf-8 text files import json import os from collections import defaultdict from datetime import datetime from datetime import timedelta from dateutil import tz import time def toFixed(strn, length): if isinstance(strn, list) or type(strn) == int: strn = str(strn) return (u"{:<%i}" % (length)).format(strn[:length]) def sumc(collection): total = 0 if type(collection) == list or type(collection) == set or type(collection) == tuple: for e in collection: total += e else: for e in collection: total += collection[e] return float(total) parser = argparse.ArgumentParser( description='LIDIC Twitter Scraper v.1.1', epilog=( "Author: Burdisso Sergio (<sergio.burdisso@gmail.com>), Phd. Student. " "LIDIC, Department of Computer Science, National University of San Luis" " (UNSL), San Luis, Argentina." ) ) parser.add_argument('USER', help="target's twitter user name") args = parser.parse_args() # https://en.wikipedia.org/wiki/List_of_tz_database_time_zones _TIMEZONE_ = tz.gettz('America/Buenos_Aires') _UTC_TIMEZONE_ = tz.gettz('UTC') ss.setVerbose(False) ss.scookies.set("lang", "en") ss.setUserAgent(ss.UserAgent.CHROME_LINUX) _XML_TEMPLATE = """\ <?xml version="1.0" encoding="UTF-8" standalone="no"?> <author type="twitter" url="https://twitter.com/%s" id="%s" name="%s" join_date="%s" location="%s" personal_url="%s" tweets="%s" following="%s" followers="%s" favorites="%s" age_group="xx" gender="xx" lang="xx"> <biography> <![CDATA[%s]]> </biography> <documents count="%s">%s </documents> </author>""" _XML_TWEET_TEMPLATE = """ <document id="%s" timestamp="%s" lang="%s" url="https://twitter.com/%s/status/%s"><![CDATA[%s]]></document> """ def scrapes(user): _XML_TWEETS = "" _XML_ = "" _USER_ = user logged_in = True has_more_items = True min_position = "" items_html = "" document = None i = 0 user_id = 0 user_bio = "" user_url = "" user_name = "" user_favs = 0 user_tweets = 0 user_isFamous = False user_location = "" user_joinDate = "" user_following = 0 user_followers = 0
mention_tweet_counter = 0 url_tweet_counter = 0 retweet_counter = 0 tweet_id = 0 tweet_lang = "" tweet_raw_text = "" tweet_datetime = None tweet_mentions = None tweet_hashtags = None tweet_owner_id = 0 tweet_retweeter = False tweet_timestamp = 0 tweet_owner_name = "" tweet_owner_username = "" dict_mentions_mutual = defaultdict(lambda: 0) dict_mentions_user = defaultdict(lambda: 0) dict_mentions_p = defaultdict(lambda: 0) dict_hashtag_p = defaultdict(lambda: 0) dict_retweets = defaultdict(lambda: 0) dict_mentions = defaultdict(lambda: 0) dict_hashtag = defaultdict(lambda: 0) dict_lang_p = defaultdict(lambda: 0) dict_lang = defaultdict(lambda: 0) _time_start_ = time.time() print("\nAccessing %s profile on twitter.com..." % (_USER_)) error = True while error: try: user_url = "/%s/with_replies" res = ss.get(user_url % (_USER_), redirect=False) if res.status // 100 != 2: print("It looks like you're not logged in, I'll try to collect only what is public") logged_in = False user_url = "/%s" document = ss.load(user_url % (_USER_)) if not document: print("nothing public to bee seen... sorry") return error = False except: time.sleep(5) profile = document.select(".ProfileHeaderCard") # user screenname _USER_ = profile.select( ".ProfileHeaderCard-screenname" ).then("a").getAttribute("href") if not _USER_: return _USER_ = _USER_[0][1:] _BASE_DIR_ = "_OUTPUT_/%s/" % (_USER_) _BASE_PHOTOS = _BASE_DIR_ + "photos/" _BASE_PHOTOS_PERSONAL = _BASE_PHOTOS + "personal/" _BASE_PHOTOS_EXTERN = _BASE_PHOTOS + "extern/" try: os.makedirs(_BASE_PHOTOS_PERSONAL) except: pass try: os.makedirs(_BASE_PHOTOS_EXTERN) except: pass # Is Famous user_isFamous = True if profile.select(".Icon--verified") else False # Name user_name = profile.select(".ProfileHeaderCard-name").then("a .ProfileHeaderCard-nameLink").text() # Biography user_bio = profile.select(".ProfileHeaderCard-bio").text() # Location user_location = profile.select(".ProfileHeaderCard-locationText").text() # Url user_url = profile.select(".ProfileHeaderCard-urlText").then("a").getAttribute("title") user_url = user_url[0] if user_url else "" # Join Date user_joinDate = profile.select(".ProfileHeaderCard-joinDateText").getAttribute("title") user_joinDate = user_joinDate[0] if user_joinDate else "" profileNav = document.select(".ProfileNav") # user id user_id = profileNav.getAttribute("data-user-id")[0] # tweets user_tweets = profileNav.select(".ProfileNav-item--tweets").then("a").getAttribute("title") user_tweets = user_tweets[0].split(" ")[0].replace(",", "") if user_tweets else 0 # following user_following = profileNav.select(".ProfileNav-item--following").then("a").getAttribute("title") user_following = user_following[0].split(" ")[0].replace(",", "") if user_following else 0 # followers user_followers = profileNav.select(".ProfileNav-item--followers").then("a").getAttribute("title") user_followers = user_followers[0].split(" ")[0].replace(",", "") if user_followers else 0 # favorites user_favs = profileNav.select(".ProfileNav-item--favorites").then("a").getAttribute("title") if user_favs: user_favs = user_favs[0].split(" ")[0].replace(",", "") else: user_favs = "" user_profilePic = document.select(".ProfileAvatar").andThen("img").getAttribute("src")[0] print("\n> downloading profile picture...") ss.download(user_profilePic, _BASE_PHOTOS) print("\n\nAbout to start downloading user's timeline:") timeline_url = "https://twitter.com/i/profiles/show/%s/timeline/" % (_USER_) timeline_url += "%s?include_available_features=1&include_entities=1" % ("with_replies" if logged_in else "tweets") while has_more_items: try: print("\n> downloading timeline chunk [ %s of %s tweets so far, max_position=%s]... \n" % (tweet_counter + retweet_counter, user_tweets, min_position)) if not min_position: r = ss.get(timeline_url) if not r: break else: r = ss.get(timeline_url + "&max_position=%s" % min_position) if not r: break try: j = json.loads(r.body) except: print("[*] Error while trying to parse the JSON response, aborting...") has_more_items = False break items_html = j["items_html"].encode("utf8") document = ss.parse(items_html) items_html = document.select("li") for node in items_html: node = node.select("@data-tweet-id") if node: node = node[0] else: continue tweet_id = node.getAttribute("data-tweet-id") tweet_owner_id = node.getAttribute("data-user-id") tweet_owner_username = node.getAttribute("data-screen-name") tweet_owner_name = node.getAttribute("data-name") tweet_retweeter = node.getAttribute("data-retweeter") tweet_mentions = node.getAttribute("data-mentions") tweet_mentions = tweet_mentions.split() if tweet_mentions else [] tweet_raw_text = node.select(".tweet-text").text() tweet_lang = node.select(".tweet-text").getAttribute("lang") tweet_lang = tweet_lang[0] if tweet_lang else "" tweet_timestamp = int(node.select("@data-time-ms").getAttribute("data-time")[0]) tweet_hashtags = [] tweet_iscomment = node.getAttribute("data-is-reply-to") == "true" for node_hashtag in node.select(".twitter-hashtag"): hashtag = node_hashtag.text().upper().replace("#.\n", "") tweet_hashtags.append(hashtag) dict_hashtag[hashtag] += 1 if not tweet_retweeter: dict_hashtag_p[hashtag] += 1 tweet_links = [link for link in node.select(".tweet-text").then("a").getAttribute("href") if link.startswith("http")] # updating counters tweet_owner_username = tweet_owner_username.upper() for uname in tweet_mentions: if uname.upper() == _USER_.upper(): dict_mentions_user[tweet_owner_username] += 1 tweet_datetime = datetime.fromtimestamp(tweet_timestamp).replace(tzinfo=_UTC_TIMEZONE_).astimezone(_TIMEZONE_) for usermen in tweet_mentions: dict_mentions[usermen.upper()] += 1 if not tweet_retweeter: dict_mentions_p[usermen.upper()] += 1 dict_lang[tweet_lang] += 1 if tweet_retweeter: retweet_counter += 1 dict_retweets[tweet_owner_username] += 1 else: if tweet_owner_id == user_id: dict_lang_p[tweet_lang] += 1 # updating counters tweet_counter += 1 if tweet_iscomment: comments_counter += 1 if len(tweet_mentions): mention_tweet_counter += 1 if len(tweet_links): url_tweet_counter += 1 _XML_TWEETS += _XML_TWEET_TEMPLATE % ( tweet_id, tweet_timestamp, tweet_lang, _USER_, tweet_id, tweet_raw_text ) print( "|%s |%s[%s]%s\t|%s |%s |%s |%s |%s" % ( toFixed(tweet_datetime.isoformat(" "), 16), tweet_id, tweet_lang, "r" if tweet_retweeter else ("c" if tweet_iscomment else ""), toFixed(tweet_owner_id, 10), toFixed(tweet_owner_username, 16), toFixed(tweet_owner_name, 19), toFixed(tweet_mentions + tweet_hashtags, 10), toFixed(tweet_raw_text, 54) + "..." ) ) if len(node.select("@data-image-url")): img_list = node.select("@data-image-url") len_imgs = len(img_list) print("\n" + "- " * 61) if tweet_retweeter: print("\t> %i extern photo found" % (len_imgs)) imgs_base_path = _BASE_PHOTOS_EXTERN else: print("\t> %i personal photo(s) found" % (len_imgs)) imgs_base_path = _BASE_PHOTOS_PERSONAL for elem in img_list: img_url = elem.getAttribute("data-image-url") print("\t\tdownloading photo from %s... \n" % (img_url)) ss.download(img_url, imgs_base_path) print("- " * 61 + " \n") elif node.getAttribute("data-card2-type") == "player": print("\n" + "- " * 61) video = ss.load("https://twitter.com/i/cards/tfw/v1/%s?cardname=player&earned=true"%(tweet_id)) video_title = video.select(".TwitterCard-title").text() video_url = video.select("iframe").getAttribute("src")[0] print("\t> new video '%s' found [ %s ]" % (video_title, video_url)) print("\n" + "- " * 61) min_position = tweet_id has_more_items = j["has_more_items"] and items_html i += 1 except Exception as e: print("------------------------------------------") traceback.print_stack() print("[ error: %s ]" % str(e)) print("[ trying again... ]") time.sleep(5) print("\nprocess finished successfully! =D -- time:", timedelta(seconds=time.time() - _time_start_) , " --") _XML_ = _XML_TEMPLATE % ( _USER_, user_id, user_name, user_joinDate, user_location, user_url, user_tweets, user_following, user_followers, user_favs, user_bio.replace("\r\n", ""), tweet_counter, _XML_TWEETS ) fxml = codecs.open("%s%s.xml" % (_BASE_DIR_, _USER_), "w", "utf-8") fxml.write(_XML_) fxml.close() personal_lang = "" for t in sorted(dict_lang_p.items(), key=lambda k: -k[1])[:2]: personal_lang += "\t%s: %.2f%% (%s)" % (t[0], t[1] / sumc(dict_lang_p) * 100 , t[1]) lang = "" for t in sorted(dict_lang.items(), key=lambda k: -k[1])[:2]: lang += "\t%s: %.2f%% (%s)" % (t[0], t[1] / sumc(dict_lang) * 100, t[1]) mentions_user = "" for t in sorted(dict_mentions_user.items(), key=lambda k: -k[1]): mentions_user += "\t%s(%s)" % t personal_hashtags = "" for t in sorted(dict_hashtag_p.items(), key=lambda k: -k[1]): personal_hashtags += "\t%s: %s\n" % t hashtags = "" for t in sorted(dict_hashtag.items(), key=lambda k: -k[1]): hashtags += "\t%s: %s\n" % t personal_mentions = "" for t in sorted(dict_mentions_p.items(), key=lambda k: -k[1]): personal_mentions += "\t%s: %s\n" % t if t[0] in dict_mentions_user: dict_mentions_mutual[t[0]] += 1 mentions_mutual = "" for t in sorted(dict_mentions_mutual.items(), key=lambda k: -k[1]): mentions_mutual += "\t%s(%s)" % t mentions = "" for t in sorted(dict_mentions.items(), key=lambda k: -k[1]): mentions += "\t%s: %s\n" % t retweets = "" for t in sorted(dict_retweets.items(), key=lambda k: -k[1]): retweets += "\t%s: %s\n" % t output = """ \n\n Overview: --------- Id: %s Name: %s Join Date: %s Biography: %s Location: %s Url: %s Is Famous: %s Tweets: %s Following: %s Followers: %s Favorites: %s Number of tweets captured: %s (%s tweets / %s retweets) -------------------------- > Personal language: %s > Total language: %s > People who has mentioned him: %s > Mutual Mentions: %s > Personal Hashtags: %s > Total Hashtags: %s > Personal Mentions: %s > Total Mentions: %s > Retweets: %s """ try: output = output % ( user_id, user_name, user_joinDate, user_bio.replace("\r\n", ""), user_location, user_url, user_isFamous, user_tweets, user_following, user_followers, user_favs, tweet_counter + retweet_counter, tweet_counter, retweet_counter, personal_lang, lang, mentions_user, mentions_mutual, personal_hashtags, hashtags, personal_mentions, mentions, retweets ) except: pass print(output) print("[ tweets saved in %s%s.xml ]" % (_BASE_DIR_, _USER_)) print("[ profile picture saved in %s ]" % _BASE_PHOTOS) print("[ images uploaded by the user saved in %s ]" % _BASE_PHOTOS_PERSONAL) print("[ images retweeted by the user saved in %s ]" % _BASE_PHOTOS_EXTERN) print("[ finised ]\n") ss.scookies.load() document = ss.load("https://twitter.com/login") if document.select("title").text().startswith("Login"): params = { "session[username_or_email]": "", # <- your user "session[password]": "", # <- your password "authenticity_token": document.select("@name=authenticity_token").getAttribute("value")[0], "scribe_log": "", "redirect_after_login": "", "remember_me": "1" } ss.post("/sessions", params) ss.scookies.save() if __name__ == "__main__": scrapes(args.USER)
tweet_counter = 0 comments_counter = 0
random_line_split
scraper-twitter-user.py
#!/usr/bin/python from __future__ import print_function import solidscraper as ss import traceback import argparse import codecs # utf-8 text files import json import os from collections import defaultdict from datetime import datetime from datetime import timedelta from dateutil import tz import time def toFixed(strn, length): if isinstance(strn, list) or type(strn) == int: strn = str(strn) return (u"{:<%i}" % (length)).format(strn[:length]) def sumc(collection): total = 0 if type(collection) == list or type(collection) == set or type(collection) == tuple: for e in collection: total += e else: for e in collection: total += collection[e] return float(total) parser = argparse.ArgumentParser( description='LIDIC Twitter Scraper v.1.1', epilog=( "Author: Burdisso Sergio (<sergio.burdisso@gmail.com>), Phd. Student. " "LIDIC, Department of Computer Science, National University of San Luis" " (UNSL), San Luis, Argentina." ) ) parser.add_argument('USER', help="target's twitter user name") args = parser.parse_args() # https://en.wikipedia.org/wiki/List_of_tz_database_time_zones _TIMEZONE_ = tz.gettz('America/Buenos_Aires') _UTC_TIMEZONE_ = tz.gettz('UTC') ss.setVerbose(False) ss.scookies.set("lang", "en") ss.setUserAgent(ss.UserAgent.CHROME_LINUX) _XML_TEMPLATE = """\ <?xml version="1.0" encoding="UTF-8" standalone="no"?> <author type="twitter" url="https://twitter.com/%s" id="%s" name="%s" join_date="%s" location="%s" personal_url="%s" tweets="%s" following="%s" followers="%s" favorites="%s" age_group="xx" gender="xx" lang="xx"> <biography> <![CDATA[%s]]> </biography> <documents count="%s">%s </documents> </author>""" _XML_TWEET_TEMPLATE = """ <document id="%s" timestamp="%s" lang="%s" url="https://twitter.com/%s/status/%s"><![CDATA[%s]]></document> """ def
(user): _XML_TWEETS = "" _XML_ = "" _USER_ = user logged_in = True has_more_items = True min_position = "" items_html = "" document = None i = 0 user_id = 0 user_bio = "" user_url = "" user_name = "" user_favs = 0 user_tweets = 0 user_isFamous = False user_location = "" user_joinDate = "" user_following = 0 user_followers = 0 tweet_counter = 0 comments_counter = 0 mention_tweet_counter = 0 url_tweet_counter = 0 retweet_counter = 0 tweet_id = 0 tweet_lang = "" tweet_raw_text = "" tweet_datetime = None tweet_mentions = None tweet_hashtags = None tweet_owner_id = 0 tweet_retweeter = False tweet_timestamp = 0 tweet_owner_name = "" tweet_owner_username = "" dict_mentions_mutual = defaultdict(lambda: 0) dict_mentions_user = defaultdict(lambda: 0) dict_mentions_p = defaultdict(lambda: 0) dict_hashtag_p = defaultdict(lambda: 0) dict_retweets = defaultdict(lambda: 0) dict_mentions = defaultdict(lambda: 0) dict_hashtag = defaultdict(lambda: 0) dict_lang_p = defaultdict(lambda: 0) dict_lang = defaultdict(lambda: 0) _time_start_ = time.time() print("\nAccessing %s profile on twitter.com..." % (_USER_)) error = True while error: try: user_url = "/%s/with_replies" res = ss.get(user_url % (_USER_), redirect=False) if res.status // 100 != 2: print("It looks like you're not logged in, I'll try to collect only what is public") logged_in = False user_url = "/%s" document = ss.load(user_url % (_USER_)) if not document: print("nothing public to bee seen... sorry") return error = False except: time.sleep(5) profile = document.select(".ProfileHeaderCard") # user screenname _USER_ = profile.select( ".ProfileHeaderCard-screenname" ).then("a").getAttribute("href") if not _USER_: return _USER_ = _USER_[0][1:] _BASE_DIR_ = "_OUTPUT_/%s/" % (_USER_) _BASE_PHOTOS = _BASE_DIR_ + "photos/" _BASE_PHOTOS_PERSONAL = _BASE_PHOTOS + "personal/" _BASE_PHOTOS_EXTERN = _BASE_PHOTOS + "extern/" try: os.makedirs(_BASE_PHOTOS_PERSONAL) except: pass try: os.makedirs(_BASE_PHOTOS_EXTERN) except: pass # Is Famous user_isFamous = True if profile.select(".Icon--verified") else False # Name user_name = profile.select(".ProfileHeaderCard-name").then("a .ProfileHeaderCard-nameLink").text() # Biography user_bio = profile.select(".ProfileHeaderCard-bio").text() # Location user_location = profile.select(".ProfileHeaderCard-locationText").text() # Url user_url = profile.select(".ProfileHeaderCard-urlText").then("a").getAttribute("title") user_url = user_url[0] if user_url else "" # Join Date user_joinDate = profile.select(".ProfileHeaderCard-joinDateText").getAttribute("title") user_joinDate = user_joinDate[0] if user_joinDate else "" profileNav = document.select(".ProfileNav") # user id user_id = profileNav.getAttribute("data-user-id")[0] # tweets user_tweets = profileNav.select(".ProfileNav-item--tweets").then("a").getAttribute("title") user_tweets = user_tweets[0].split(" ")[0].replace(",", "") if user_tweets else 0 # following user_following = profileNav.select(".ProfileNav-item--following").then("a").getAttribute("title") user_following = user_following[0].split(" ")[0].replace(",", "") if user_following else 0 # followers user_followers = profileNav.select(".ProfileNav-item--followers").then("a").getAttribute("title") user_followers = user_followers[0].split(" ")[0].replace(",", "") if user_followers else 0 # favorites user_favs = profileNav.select(".ProfileNav-item--favorites").then("a").getAttribute("title") if user_favs: user_favs = user_favs[0].split(" ")[0].replace(",", "") else: user_favs = "" user_profilePic = document.select(".ProfileAvatar").andThen("img").getAttribute("src")[0] print("\n> downloading profile picture...") ss.download(user_profilePic, _BASE_PHOTOS) print("\n\nAbout to start downloading user's timeline:") timeline_url = "https://twitter.com/i/profiles/show/%s/timeline/" % (_USER_) timeline_url += "%s?include_available_features=1&include_entities=1" % ("with_replies" if logged_in else "tweets") while has_more_items: try: print("\n> downloading timeline chunk [ %s of %s tweets so far, max_position=%s]... \n" % (tweet_counter + retweet_counter, user_tweets, min_position)) if not min_position: r = ss.get(timeline_url) if not r: break else: r = ss.get(timeline_url + "&max_position=%s" % min_position) if not r: break try: j = json.loads(r.body) except: print("[*] Error while trying to parse the JSON response, aborting...") has_more_items = False break items_html = j["items_html"].encode("utf8") document = ss.parse(items_html) items_html = document.select("li") for node in items_html: node = node.select("@data-tweet-id") if node: node = node[0] else: continue tweet_id = node.getAttribute("data-tweet-id") tweet_owner_id = node.getAttribute("data-user-id") tweet_owner_username = node.getAttribute("data-screen-name") tweet_owner_name = node.getAttribute("data-name") tweet_retweeter = node.getAttribute("data-retweeter") tweet_mentions = node.getAttribute("data-mentions") tweet_mentions = tweet_mentions.split() if tweet_mentions else [] tweet_raw_text = node.select(".tweet-text").text() tweet_lang = node.select(".tweet-text").getAttribute("lang") tweet_lang = tweet_lang[0] if tweet_lang else "" tweet_timestamp = int(node.select("@data-time-ms").getAttribute("data-time")[0]) tweet_hashtags = [] tweet_iscomment = node.getAttribute("data-is-reply-to") == "true" for node_hashtag in node.select(".twitter-hashtag"): hashtag = node_hashtag.text().upper().replace("#.\n", "") tweet_hashtags.append(hashtag) dict_hashtag[hashtag] += 1 if not tweet_retweeter: dict_hashtag_p[hashtag] += 1 tweet_links = [link for link in node.select(".tweet-text").then("a").getAttribute("href") if link.startswith("http")] # updating counters tweet_owner_username = tweet_owner_username.upper() for uname in tweet_mentions: if uname.upper() == _USER_.upper(): dict_mentions_user[tweet_owner_username] += 1 tweet_datetime = datetime.fromtimestamp(tweet_timestamp).replace(tzinfo=_UTC_TIMEZONE_).astimezone(_TIMEZONE_) for usermen in tweet_mentions: dict_mentions[usermen.upper()] += 1 if not tweet_retweeter: dict_mentions_p[usermen.upper()] += 1 dict_lang[tweet_lang] += 1 if tweet_retweeter: retweet_counter += 1 dict_retweets[tweet_owner_username] += 1 else: if tweet_owner_id == user_id: dict_lang_p[tweet_lang] += 1 # updating counters tweet_counter += 1 if tweet_iscomment: comments_counter += 1 if len(tweet_mentions): mention_tweet_counter += 1 if len(tweet_links): url_tweet_counter += 1 _XML_TWEETS += _XML_TWEET_TEMPLATE % ( tweet_id, tweet_timestamp, tweet_lang, _USER_, tweet_id, tweet_raw_text ) print( "|%s |%s[%s]%s\t|%s |%s |%s |%s |%s" % ( toFixed(tweet_datetime.isoformat(" "), 16), tweet_id, tweet_lang, "r" if tweet_retweeter else ("c" if tweet_iscomment else ""), toFixed(tweet_owner_id, 10), toFixed(tweet_owner_username, 16), toFixed(tweet_owner_name, 19), toFixed(tweet_mentions + tweet_hashtags, 10), toFixed(tweet_raw_text, 54) + "..." ) ) if len(node.select("@data-image-url")): img_list = node.select("@data-image-url") len_imgs = len(img_list) print("\n" + "- " * 61) if tweet_retweeter: print("\t> %i extern photo found" % (len_imgs)) imgs_base_path = _BASE_PHOTOS_EXTERN else: print("\t> %i personal photo(s) found" % (len_imgs)) imgs_base_path = _BASE_PHOTOS_PERSONAL for elem in img_list: img_url = elem.getAttribute("data-image-url") print("\t\tdownloading photo from %s... \n" % (img_url)) ss.download(img_url, imgs_base_path) print("- " * 61 + " \n") elif node.getAttribute("data-card2-type") == "player": print("\n" + "- " * 61) video = ss.load("https://twitter.com/i/cards/tfw/v1/%s?cardname=player&earned=true"%(tweet_id)) video_title = video.select(".TwitterCard-title").text() video_url = video.select("iframe").getAttribute("src")[0] print("\t> new video '%s' found [ %s ]" % (video_title, video_url)) print("\n" + "- " * 61) min_position = tweet_id has_more_items = j["has_more_items"] and items_html i += 1 except Exception as e: print("------------------------------------------") traceback.print_stack() print("[ error: %s ]" % str(e)) print("[ trying again... ]") time.sleep(5) print("\nprocess finished successfully! =D -- time:", timedelta(seconds=time.time() - _time_start_) , " --") _XML_ = _XML_TEMPLATE % ( _USER_, user_id, user_name, user_joinDate, user_location, user_url, user_tweets, user_following, user_followers, user_favs, user_bio.replace("\r\n", ""), tweet_counter, _XML_TWEETS ) fxml = codecs.open("%s%s.xml" % (_BASE_DIR_, _USER_), "w", "utf-8") fxml.write(_XML_) fxml.close() personal_lang = "" for t in sorted(dict_lang_p.items(), key=lambda k: -k[1])[:2]: personal_lang += "\t%s: %.2f%% (%s)" % (t[0], t[1] / sumc(dict_lang_p) * 100 , t[1]) lang = "" for t in sorted(dict_lang.items(), key=lambda k: -k[1])[:2]: lang += "\t%s: %.2f%% (%s)" % (t[0], t[1] / sumc(dict_lang) * 100, t[1]) mentions_user = "" for t in sorted(dict_mentions_user.items(), key=lambda k: -k[1]): mentions_user += "\t%s(%s)" % t personal_hashtags = "" for t in sorted(dict_hashtag_p.items(), key=lambda k: -k[1]): personal_hashtags += "\t%s: %s\n" % t hashtags = "" for t in sorted(dict_hashtag.items(), key=lambda k: -k[1]): hashtags += "\t%s: %s\n" % t personal_mentions = "" for t in sorted(dict_mentions_p.items(), key=lambda k: -k[1]): personal_mentions += "\t%s: %s\n" % t if t[0] in dict_mentions_user: dict_mentions_mutual[t[0]] += 1 mentions_mutual = "" for t in sorted(dict_mentions_mutual.items(), key=lambda k: -k[1]): mentions_mutual += "\t%s(%s)" % t mentions = "" for t in sorted(dict_mentions.items(), key=lambda k: -k[1]): mentions += "\t%s: %s\n" % t retweets = "" for t in sorted(dict_retweets.items(), key=lambda k: -k[1]): retweets += "\t%s: %s\n" % t output = """ \n\n Overview: --------- Id: %s Name: %s Join Date: %s Biography: %s Location: %s Url: %s Is Famous: %s Tweets: %s Following: %s Followers: %s Favorites: %s Number of tweets captured: %s (%s tweets / %s retweets) -------------------------- > Personal language: %s > Total language: %s > People who has mentioned him: %s > Mutual Mentions: %s > Personal Hashtags: %s > Total Hashtags: %s > Personal Mentions: %s > Total Mentions: %s > Retweets: %s """ try: output = output % ( user_id, user_name, user_joinDate, user_bio.replace("\r\n", ""), user_location, user_url, user_isFamous, user_tweets, user_following, user_followers, user_favs, tweet_counter + retweet_counter, tweet_counter, retweet_counter, personal_lang, lang, mentions_user, mentions_mutual, personal_hashtags, hashtags, personal_mentions, mentions, retweets ) except: pass print(output) print("[ tweets saved in %s%s.xml ]" % (_BASE_DIR_, _USER_)) print("[ profile picture saved in %s ]" % _BASE_PHOTOS) print("[ images uploaded by the user saved in %s ]" % _BASE_PHOTOS_PERSONAL) print("[ images retweeted by the user saved in %s ]" % _BASE_PHOTOS_EXTERN) print("[ finised ]\n") ss.scookies.load() document = ss.load("https://twitter.com/login") if document.select("title").text().startswith("Login"): params = { "session[username_or_email]": "", # <- your user "session[password]": "", # <- your password "authenticity_token": document.select("@name=authenticity_token").getAttribute("value")[0], "scribe_log": "", "redirect_after_login": "", "remember_me": "1" } ss.post("/sessions", params) ss.scookies.save() if __name__ == "__main__": scrapes(args.USER)
scrapes
identifier_name
scraper-twitter-user.py
#!/usr/bin/python from __future__ import print_function import solidscraper as ss import traceback import argparse import codecs # utf-8 text files import json import os from collections import defaultdict from datetime import datetime from datetime import timedelta from dateutil import tz import time def toFixed(strn, length): if isinstance(strn, list) or type(strn) == int: strn = str(strn) return (u"{:<%i}" % (length)).format(strn[:length]) def sumc(collection): total = 0 if type(collection) == list or type(collection) == set or type(collection) == tuple: for e in collection: total += e else: for e in collection: total += collection[e] return float(total) parser = argparse.ArgumentParser( description='LIDIC Twitter Scraper v.1.1', epilog=( "Author: Burdisso Sergio (<sergio.burdisso@gmail.com>), Phd. Student. " "LIDIC, Department of Computer Science, National University of San Luis" " (UNSL), San Luis, Argentina." ) ) parser.add_argument('USER', help="target's twitter user name") args = parser.parse_args() # https://en.wikipedia.org/wiki/List_of_tz_database_time_zones _TIMEZONE_ = tz.gettz('America/Buenos_Aires') _UTC_TIMEZONE_ = tz.gettz('UTC') ss.setVerbose(False) ss.scookies.set("lang", "en") ss.setUserAgent(ss.UserAgent.CHROME_LINUX) _XML_TEMPLATE = """\ <?xml version="1.0" encoding="UTF-8" standalone="no"?> <author type="twitter" url="https://twitter.com/%s" id="%s" name="%s" join_date="%s" location="%s" personal_url="%s" tweets="%s" following="%s" followers="%s" favorites="%s" age_group="xx" gender="xx" lang="xx"> <biography> <![CDATA[%s]]> </biography> <documents count="%s">%s </documents> </author>""" _XML_TWEET_TEMPLATE = """ <document id="%s" timestamp="%s" lang="%s" url="https://twitter.com/%s/status/%s"><![CDATA[%s]]></document> """ def scrapes(user): _XML_TWEETS = "" _XML_ = "" _USER_ = user logged_in = True has_more_items = True min_position = "" items_html = "" document = None i = 0 user_id = 0 user_bio = "" user_url = "" user_name = "" user_favs = 0 user_tweets = 0 user_isFamous = False user_location = "" user_joinDate = "" user_following = 0 user_followers = 0 tweet_counter = 0 comments_counter = 0 mention_tweet_counter = 0 url_tweet_counter = 0 retweet_counter = 0 tweet_id = 0 tweet_lang = "" tweet_raw_text = "" tweet_datetime = None tweet_mentions = None tweet_hashtags = None tweet_owner_id = 0 tweet_retweeter = False tweet_timestamp = 0 tweet_owner_name = "" tweet_owner_username = "" dict_mentions_mutual = defaultdict(lambda: 0) dict_mentions_user = defaultdict(lambda: 0) dict_mentions_p = defaultdict(lambda: 0) dict_hashtag_p = defaultdict(lambda: 0) dict_retweets = defaultdict(lambda: 0) dict_mentions = defaultdict(lambda: 0) dict_hashtag = defaultdict(lambda: 0) dict_lang_p = defaultdict(lambda: 0) dict_lang = defaultdict(lambda: 0) _time_start_ = time.time() print("\nAccessing %s profile on twitter.com..." % (_USER_)) error = True while error: try: user_url = "/%s/with_replies" res = ss.get(user_url % (_USER_), redirect=False) if res.status // 100 != 2: print("It looks like you're not logged in, I'll try to collect only what is public") logged_in = False user_url = "/%s" document = ss.load(user_url % (_USER_)) if not document: print("nothing public to bee seen... sorry") return error = False except: time.sleep(5) profile = document.select(".ProfileHeaderCard") # user screenname _USER_ = profile.select( ".ProfileHeaderCard-screenname" ).then("a").getAttribute("href") if not _USER_: return _USER_ = _USER_[0][1:] _BASE_DIR_ = "_OUTPUT_/%s/" % (_USER_) _BASE_PHOTOS = _BASE_DIR_ + "photos/" _BASE_PHOTOS_PERSONAL = _BASE_PHOTOS + "personal/" _BASE_PHOTOS_EXTERN = _BASE_PHOTOS + "extern/" try: os.makedirs(_BASE_PHOTOS_PERSONAL) except: pass try: os.makedirs(_BASE_PHOTOS_EXTERN) except: pass # Is Famous user_isFamous = True if profile.select(".Icon--verified") else False # Name user_name = profile.select(".ProfileHeaderCard-name").then("a .ProfileHeaderCard-nameLink").text() # Biography user_bio = profile.select(".ProfileHeaderCard-bio").text() # Location user_location = profile.select(".ProfileHeaderCard-locationText").text() # Url user_url = profile.select(".ProfileHeaderCard-urlText").then("a").getAttribute("title") user_url = user_url[0] if user_url else "" # Join Date user_joinDate = profile.select(".ProfileHeaderCard-joinDateText").getAttribute("title") user_joinDate = user_joinDate[0] if user_joinDate else "" profileNav = document.select(".ProfileNav") # user id user_id = profileNav.getAttribute("data-user-id")[0] # tweets user_tweets = profileNav.select(".ProfileNav-item--tweets").then("a").getAttribute("title") user_tweets = user_tweets[0].split(" ")[0].replace(",", "") if user_tweets else 0 # following user_following = profileNav.select(".ProfileNav-item--following").then("a").getAttribute("title") user_following = user_following[0].split(" ")[0].replace(",", "") if user_following else 0 # followers user_followers = profileNav.select(".ProfileNav-item--followers").then("a").getAttribute("title") user_followers = user_followers[0].split(" ")[0].replace(",", "") if user_followers else 0 # favorites user_favs = profileNav.select(".ProfileNav-item--favorites").then("a").getAttribute("title") if user_favs: user_favs = user_favs[0].split(" ")[0].replace(",", "") else:
user_profilePic = document.select(".ProfileAvatar").andThen("img").getAttribute("src")[0] print("\n> downloading profile picture...") ss.download(user_profilePic, _BASE_PHOTOS) print("\n\nAbout to start downloading user's timeline:") timeline_url = "https://twitter.com/i/profiles/show/%s/timeline/" % (_USER_) timeline_url += "%s?include_available_features=1&include_entities=1" % ("with_replies" if logged_in else "tweets") while has_more_items: try: print("\n> downloading timeline chunk [ %s of %s tweets so far, max_position=%s]... \n" % (tweet_counter + retweet_counter, user_tweets, min_position)) if not min_position: r = ss.get(timeline_url) if not r: break else: r = ss.get(timeline_url + "&max_position=%s" % min_position) if not r: break try: j = json.loads(r.body) except: print("[*] Error while trying to parse the JSON response, aborting...") has_more_items = False break items_html = j["items_html"].encode("utf8") document = ss.parse(items_html) items_html = document.select("li") for node in items_html: node = node.select("@data-tweet-id") if node: node = node[0] else: continue tweet_id = node.getAttribute("data-tweet-id") tweet_owner_id = node.getAttribute("data-user-id") tweet_owner_username = node.getAttribute("data-screen-name") tweet_owner_name = node.getAttribute("data-name") tweet_retweeter = node.getAttribute("data-retweeter") tweet_mentions = node.getAttribute("data-mentions") tweet_mentions = tweet_mentions.split() if tweet_mentions else [] tweet_raw_text = node.select(".tweet-text").text() tweet_lang = node.select(".tweet-text").getAttribute("lang") tweet_lang = tweet_lang[0] if tweet_lang else "" tweet_timestamp = int(node.select("@data-time-ms").getAttribute("data-time")[0]) tweet_hashtags = [] tweet_iscomment = node.getAttribute("data-is-reply-to") == "true" for node_hashtag in node.select(".twitter-hashtag"): hashtag = node_hashtag.text().upper().replace("#.\n", "") tweet_hashtags.append(hashtag) dict_hashtag[hashtag] += 1 if not tweet_retweeter: dict_hashtag_p[hashtag] += 1 tweet_links = [link for link in node.select(".tweet-text").then("a").getAttribute("href") if link.startswith("http")] # updating counters tweet_owner_username = tweet_owner_username.upper() for uname in tweet_mentions: if uname.upper() == _USER_.upper(): dict_mentions_user[tweet_owner_username] += 1 tweet_datetime = datetime.fromtimestamp(tweet_timestamp).replace(tzinfo=_UTC_TIMEZONE_).astimezone(_TIMEZONE_) for usermen in tweet_mentions: dict_mentions[usermen.upper()] += 1 if not tweet_retweeter: dict_mentions_p[usermen.upper()] += 1 dict_lang[tweet_lang] += 1 if tweet_retweeter: retweet_counter += 1 dict_retweets[tweet_owner_username] += 1 else: if tweet_owner_id == user_id: dict_lang_p[tweet_lang] += 1 # updating counters tweet_counter += 1 if tweet_iscomment: comments_counter += 1 if len(tweet_mentions): mention_tweet_counter += 1 if len(tweet_links): url_tweet_counter += 1 _XML_TWEETS += _XML_TWEET_TEMPLATE % ( tweet_id, tweet_timestamp, tweet_lang, _USER_, tweet_id, tweet_raw_text ) print( "|%s |%s[%s]%s\t|%s |%s |%s |%s |%s" % ( toFixed(tweet_datetime.isoformat(" "), 16), tweet_id, tweet_lang, "r" if tweet_retweeter else ("c" if tweet_iscomment else ""), toFixed(tweet_owner_id, 10), toFixed(tweet_owner_username, 16), toFixed(tweet_owner_name, 19), toFixed(tweet_mentions + tweet_hashtags, 10), toFixed(tweet_raw_text, 54) + "..." ) ) if len(node.select("@data-image-url")): img_list = node.select("@data-image-url") len_imgs = len(img_list) print("\n" + "- " * 61) if tweet_retweeter: print("\t> %i extern photo found" % (len_imgs)) imgs_base_path = _BASE_PHOTOS_EXTERN else: print("\t> %i personal photo(s) found" % (len_imgs)) imgs_base_path = _BASE_PHOTOS_PERSONAL for elem in img_list: img_url = elem.getAttribute("data-image-url") print("\t\tdownloading photo from %s... \n" % (img_url)) ss.download(img_url, imgs_base_path) print("- " * 61 + " \n") elif node.getAttribute("data-card2-type") == "player": print("\n" + "- " * 61) video = ss.load("https://twitter.com/i/cards/tfw/v1/%s?cardname=player&earned=true"%(tweet_id)) video_title = video.select(".TwitterCard-title").text() video_url = video.select("iframe").getAttribute("src")[0] print("\t> new video '%s' found [ %s ]" % (video_title, video_url)) print("\n" + "- " * 61) min_position = tweet_id has_more_items = j["has_more_items"] and items_html i += 1 except Exception as e: print("------------------------------------------") traceback.print_stack() print("[ error: %s ]" % str(e)) print("[ trying again... ]") time.sleep(5) print("\nprocess finished successfully! =D -- time:", timedelta(seconds=time.time() - _time_start_) , " --") _XML_ = _XML_TEMPLATE % ( _USER_, user_id, user_name, user_joinDate, user_location, user_url, user_tweets, user_following, user_followers, user_favs, user_bio.replace("\r\n", ""), tweet_counter, _XML_TWEETS ) fxml = codecs.open("%s%s.xml" % (_BASE_DIR_, _USER_), "w", "utf-8") fxml.write(_XML_) fxml.close() personal_lang = "" for t in sorted(dict_lang_p.items(), key=lambda k: -k[1])[:2]: personal_lang += "\t%s: %.2f%% (%s)" % (t[0], t[1] / sumc(dict_lang_p) * 100 , t[1]) lang = "" for t in sorted(dict_lang.items(), key=lambda k: -k[1])[:2]: lang += "\t%s: %.2f%% (%s)" % (t[0], t[1] / sumc(dict_lang) * 100, t[1]) mentions_user = "" for t in sorted(dict_mentions_user.items(), key=lambda k: -k[1]): mentions_user += "\t%s(%s)" % t personal_hashtags = "" for t in sorted(dict_hashtag_p.items(), key=lambda k: -k[1]): personal_hashtags += "\t%s: %s\n" % t hashtags = "" for t in sorted(dict_hashtag.items(), key=lambda k: -k[1]): hashtags += "\t%s: %s\n" % t personal_mentions = "" for t in sorted(dict_mentions_p.items(), key=lambda k: -k[1]): personal_mentions += "\t%s: %s\n" % t if t[0] in dict_mentions_user: dict_mentions_mutual[t[0]] += 1 mentions_mutual = "" for t in sorted(dict_mentions_mutual.items(), key=lambda k: -k[1]): mentions_mutual += "\t%s(%s)" % t mentions = "" for t in sorted(dict_mentions.items(), key=lambda k: -k[1]): mentions += "\t%s: %s\n" % t retweets = "" for t in sorted(dict_retweets.items(), key=lambda k: -k[1]): retweets += "\t%s: %s\n" % t output = """ \n\n Overview: --------- Id: %s Name: %s Join Date: %s Biography: %s Location: %s Url: %s Is Famous: %s Tweets: %s Following: %s Followers: %s Favorites: %s Number of tweets captured: %s (%s tweets / %s retweets) -------------------------- > Personal language: %s > Total language: %s > People who has mentioned him: %s > Mutual Mentions: %s > Personal Hashtags: %s > Total Hashtags: %s > Personal Mentions: %s > Total Mentions: %s > Retweets: %s """ try: output = output % ( user_id, user_name, user_joinDate, user_bio.replace("\r\n", ""), user_location, user_url, user_isFamous, user_tweets, user_following, user_followers, user_favs, tweet_counter + retweet_counter, tweet_counter, retweet_counter, personal_lang, lang, mentions_user, mentions_mutual, personal_hashtags, hashtags, personal_mentions, mentions, retweets ) except: pass print(output) print("[ tweets saved in %s%s.xml ]" % (_BASE_DIR_, _USER_)) print("[ profile picture saved in %s ]" % _BASE_PHOTOS) print("[ images uploaded by the user saved in %s ]" % _BASE_PHOTOS_PERSONAL) print("[ images retweeted by the user saved in %s ]" % _BASE_PHOTOS_EXTERN) print("[ finised ]\n") ss.scookies.load() document = ss.load("https://twitter.com/login") if document.select("title").text().startswith("Login"): params = { "session[username_or_email]": "", # <- your user "session[password]": "", # <- your password "authenticity_token": document.select("@name=authenticity_token").getAttribute("value")[0], "scribe_log": "", "redirect_after_login": "", "remember_me": "1" } ss.post("/sessions", params) ss.scookies.save() if __name__ == "__main__": scrapes(args.USER)
user_favs = ""
conditional_block
scraper-twitter-user.py
#!/usr/bin/python from __future__ import print_function import solidscraper as ss import traceback import argparse import codecs # utf-8 text files import json import os from collections import defaultdict from datetime import datetime from datetime import timedelta from dateutil import tz import time def toFixed(strn, length):
def sumc(collection): total = 0 if type(collection) == list or type(collection) == set or type(collection) == tuple: for e in collection: total += e else: for e in collection: total += collection[e] return float(total) parser = argparse.ArgumentParser( description='LIDIC Twitter Scraper v.1.1', epilog=( "Author: Burdisso Sergio (<sergio.burdisso@gmail.com>), Phd. Student. " "LIDIC, Department of Computer Science, National University of San Luis" " (UNSL), San Luis, Argentina." ) ) parser.add_argument('USER', help="target's twitter user name") args = parser.parse_args() # https://en.wikipedia.org/wiki/List_of_tz_database_time_zones _TIMEZONE_ = tz.gettz('America/Buenos_Aires') _UTC_TIMEZONE_ = tz.gettz('UTC') ss.setVerbose(False) ss.scookies.set("lang", "en") ss.setUserAgent(ss.UserAgent.CHROME_LINUX) _XML_TEMPLATE = """\ <?xml version="1.0" encoding="UTF-8" standalone="no"?> <author type="twitter" url="https://twitter.com/%s" id="%s" name="%s" join_date="%s" location="%s" personal_url="%s" tweets="%s" following="%s" followers="%s" favorites="%s" age_group="xx" gender="xx" lang="xx"> <biography> <![CDATA[%s]]> </biography> <documents count="%s">%s </documents> </author>""" _XML_TWEET_TEMPLATE = """ <document id="%s" timestamp="%s" lang="%s" url="https://twitter.com/%s/status/%s"><![CDATA[%s]]></document> """ def scrapes(user): _XML_TWEETS = "" _XML_ = "" _USER_ = user logged_in = True has_more_items = True min_position = "" items_html = "" document = None i = 0 user_id = 0 user_bio = "" user_url = "" user_name = "" user_favs = 0 user_tweets = 0 user_isFamous = False user_location = "" user_joinDate = "" user_following = 0 user_followers = 0 tweet_counter = 0 comments_counter = 0 mention_tweet_counter = 0 url_tweet_counter = 0 retweet_counter = 0 tweet_id = 0 tweet_lang = "" tweet_raw_text = "" tweet_datetime = None tweet_mentions = None tweet_hashtags = None tweet_owner_id = 0 tweet_retweeter = False tweet_timestamp = 0 tweet_owner_name = "" tweet_owner_username = "" dict_mentions_mutual = defaultdict(lambda: 0) dict_mentions_user = defaultdict(lambda: 0) dict_mentions_p = defaultdict(lambda: 0) dict_hashtag_p = defaultdict(lambda: 0) dict_retweets = defaultdict(lambda: 0) dict_mentions = defaultdict(lambda: 0) dict_hashtag = defaultdict(lambda: 0) dict_lang_p = defaultdict(lambda: 0) dict_lang = defaultdict(lambda: 0) _time_start_ = time.time() print("\nAccessing %s profile on twitter.com..." % (_USER_)) error = True while error: try: user_url = "/%s/with_replies" res = ss.get(user_url % (_USER_), redirect=False) if res.status // 100 != 2: print("It looks like you're not logged in, I'll try to collect only what is public") logged_in = False user_url = "/%s" document = ss.load(user_url % (_USER_)) if not document: print("nothing public to bee seen... sorry") return error = False except: time.sleep(5) profile = document.select(".ProfileHeaderCard") # user screenname _USER_ = profile.select( ".ProfileHeaderCard-screenname" ).then("a").getAttribute("href") if not _USER_: return _USER_ = _USER_[0][1:] _BASE_DIR_ = "_OUTPUT_/%s/" % (_USER_) _BASE_PHOTOS = _BASE_DIR_ + "photos/" _BASE_PHOTOS_PERSONAL = _BASE_PHOTOS + "personal/" _BASE_PHOTOS_EXTERN = _BASE_PHOTOS + "extern/" try: os.makedirs(_BASE_PHOTOS_PERSONAL) except: pass try: os.makedirs(_BASE_PHOTOS_EXTERN) except: pass # Is Famous user_isFamous = True if profile.select(".Icon--verified") else False # Name user_name = profile.select(".ProfileHeaderCard-name").then("a .ProfileHeaderCard-nameLink").text() # Biography user_bio = profile.select(".ProfileHeaderCard-bio").text() # Location user_location = profile.select(".ProfileHeaderCard-locationText").text() # Url user_url = profile.select(".ProfileHeaderCard-urlText").then("a").getAttribute("title") user_url = user_url[0] if user_url else "" # Join Date user_joinDate = profile.select(".ProfileHeaderCard-joinDateText").getAttribute("title") user_joinDate = user_joinDate[0] if user_joinDate else "" profileNav = document.select(".ProfileNav") # user id user_id = profileNav.getAttribute("data-user-id")[0] # tweets user_tweets = profileNav.select(".ProfileNav-item--tweets").then("a").getAttribute("title") user_tweets = user_tweets[0].split(" ")[0].replace(",", "") if user_tweets else 0 # following user_following = profileNav.select(".ProfileNav-item--following").then("a").getAttribute("title") user_following = user_following[0].split(" ")[0].replace(",", "") if user_following else 0 # followers user_followers = profileNav.select(".ProfileNav-item--followers").then("a").getAttribute("title") user_followers = user_followers[0].split(" ")[0].replace(",", "") if user_followers else 0 # favorites user_favs = profileNav.select(".ProfileNav-item--favorites").then("a").getAttribute("title") if user_favs: user_favs = user_favs[0].split(" ")[0].replace(",", "") else: user_favs = "" user_profilePic = document.select(".ProfileAvatar").andThen("img").getAttribute("src")[0] print("\n> downloading profile picture...") ss.download(user_profilePic, _BASE_PHOTOS) print("\n\nAbout to start downloading user's timeline:") timeline_url = "https://twitter.com/i/profiles/show/%s/timeline/" % (_USER_) timeline_url += "%s?include_available_features=1&include_entities=1" % ("with_replies" if logged_in else "tweets") while has_more_items: try: print("\n> downloading timeline chunk [ %s of %s tweets so far, max_position=%s]... \n" % (tweet_counter + retweet_counter, user_tweets, min_position)) if not min_position: r = ss.get(timeline_url) if not r: break else: r = ss.get(timeline_url + "&max_position=%s" % min_position) if not r: break try: j = json.loads(r.body) except: print("[*] Error while trying to parse the JSON response, aborting...") has_more_items = False break items_html = j["items_html"].encode("utf8") document = ss.parse(items_html) items_html = document.select("li") for node in items_html: node = node.select("@data-tweet-id") if node: node = node[0] else: continue tweet_id = node.getAttribute("data-tweet-id") tweet_owner_id = node.getAttribute("data-user-id") tweet_owner_username = node.getAttribute("data-screen-name") tweet_owner_name = node.getAttribute("data-name") tweet_retweeter = node.getAttribute("data-retweeter") tweet_mentions = node.getAttribute("data-mentions") tweet_mentions = tweet_mentions.split() if tweet_mentions else [] tweet_raw_text = node.select(".tweet-text").text() tweet_lang = node.select(".tweet-text").getAttribute("lang") tweet_lang = tweet_lang[0] if tweet_lang else "" tweet_timestamp = int(node.select("@data-time-ms").getAttribute("data-time")[0]) tweet_hashtags = [] tweet_iscomment = node.getAttribute("data-is-reply-to") == "true" for node_hashtag in node.select(".twitter-hashtag"): hashtag = node_hashtag.text().upper().replace("#.\n", "") tweet_hashtags.append(hashtag) dict_hashtag[hashtag] += 1 if not tweet_retweeter: dict_hashtag_p[hashtag] += 1 tweet_links = [link for link in node.select(".tweet-text").then("a").getAttribute("href") if link.startswith("http")] # updating counters tweet_owner_username = tweet_owner_username.upper() for uname in tweet_mentions: if uname.upper() == _USER_.upper(): dict_mentions_user[tweet_owner_username] += 1 tweet_datetime = datetime.fromtimestamp(tweet_timestamp).replace(tzinfo=_UTC_TIMEZONE_).astimezone(_TIMEZONE_) for usermen in tweet_mentions: dict_mentions[usermen.upper()] += 1 if not tweet_retweeter: dict_mentions_p[usermen.upper()] += 1 dict_lang[tweet_lang] += 1 if tweet_retweeter: retweet_counter += 1 dict_retweets[tweet_owner_username] += 1 else: if tweet_owner_id == user_id: dict_lang_p[tweet_lang] += 1 # updating counters tweet_counter += 1 if tweet_iscomment: comments_counter += 1 if len(tweet_mentions): mention_tweet_counter += 1 if len(tweet_links): url_tweet_counter += 1 _XML_TWEETS += _XML_TWEET_TEMPLATE % ( tweet_id, tweet_timestamp, tweet_lang, _USER_, tweet_id, tweet_raw_text ) print( "|%s |%s[%s]%s\t|%s |%s |%s |%s |%s" % ( toFixed(tweet_datetime.isoformat(" "), 16), tweet_id, tweet_lang, "r" if tweet_retweeter else ("c" if tweet_iscomment else ""), toFixed(tweet_owner_id, 10), toFixed(tweet_owner_username, 16), toFixed(tweet_owner_name, 19), toFixed(tweet_mentions + tweet_hashtags, 10), toFixed(tweet_raw_text, 54) + "..." ) ) if len(node.select("@data-image-url")): img_list = node.select("@data-image-url") len_imgs = len(img_list) print("\n" + "- " * 61) if tweet_retweeter: print("\t> %i extern photo found" % (len_imgs)) imgs_base_path = _BASE_PHOTOS_EXTERN else: print("\t> %i personal photo(s) found" % (len_imgs)) imgs_base_path = _BASE_PHOTOS_PERSONAL for elem in img_list: img_url = elem.getAttribute("data-image-url") print("\t\tdownloading photo from %s... \n" % (img_url)) ss.download(img_url, imgs_base_path) print("- " * 61 + " \n") elif node.getAttribute("data-card2-type") == "player": print("\n" + "- " * 61) video = ss.load("https://twitter.com/i/cards/tfw/v1/%s?cardname=player&earned=true"%(tweet_id)) video_title = video.select(".TwitterCard-title").text() video_url = video.select("iframe").getAttribute("src")[0] print("\t> new video '%s' found [ %s ]" % (video_title, video_url)) print("\n" + "- " * 61) min_position = tweet_id has_more_items = j["has_more_items"] and items_html i += 1 except Exception as e: print("------------------------------------------") traceback.print_stack() print("[ error: %s ]" % str(e)) print("[ trying again... ]") time.sleep(5) print("\nprocess finished successfully! =D -- time:", timedelta(seconds=time.time() - _time_start_) , " --") _XML_ = _XML_TEMPLATE % ( _USER_, user_id, user_name, user_joinDate, user_location, user_url, user_tweets, user_following, user_followers, user_favs, user_bio.replace("\r\n", ""), tweet_counter, _XML_TWEETS ) fxml = codecs.open("%s%s.xml" % (_BASE_DIR_, _USER_), "w", "utf-8") fxml.write(_XML_) fxml.close() personal_lang = "" for t in sorted(dict_lang_p.items(), key=lambda k: -k[1])[:2]: personal_lang += "\t%s: %.2f%% (%s)" % (t[0], t[1] / sumc(dict_lang_p) * 100 , t[1]) lang = "" for t in sorted(dict_lang.items(), key=lambda k: -k[1])[:2]: lang += "\t%s: %.2f%% (%s)" % (t[0], t[1] / sumc(dict_lang) * 100, t[1]) mentions_user = "" for t in sorted(dict_mentions_user.items(), key=lambda k: -k[1]): mentions_user += "\t%s(%s)" % t personal_hashtags = "" for t in sorted(dict_hashtag_p.items(), key=lambda k: -k[1]): personal_hashtags += "\t%s: %s\n" % t hashtags = "" for t in sorted(dict_hashtag.items(), key=lambda k: -k[1]): hashtags += "\t%s: %s\n" % t personal_mentions = "" for t in sorted(dict_mentions_p.items(), key=lambda k: -k[1]): personal_mentions += "\t%s: %s\n" % t if t[0] in dict_mentions_user: dict_mentions_mutual[t[0]] += 1 mentions_mutual = "" for t in sorted(dict_mentions_mutual.items(), key=lambda k: -k[1]): mentions_mutual += "\t%s(%s)" % t mentions = "" for t in sorted(dict_mentions.items(), key=lambda k: -k[1]): mentions += "\t%s: %s\n" % t retweets = "" for t in sorted(dict_retweets.items(), key=lambda k: -k[1]): retweets += "\t%s: %s\n" % t output = """ \n\n Overview: --------- Id: %s Name: %s Join Date: %s Biography: %s Location: %s Url: %s Is Famous: %s Tweets: %s Following: %s Followers: %s Favorites: %s Number of tweets captured: %s (%s tweets / %s retweets) -------------------------- > Personal language: %s > Total language: %s > People who has mentioned him: %s > Mutual Mentions: %s > Personal Hashtags: %s > Total Hashtags: %s > Personal Mentions: %s > Total Mentions: %s > Retweets: %s """ try: output = output % ( user_id, user_name, user_joinDate, user_bio.replace("\r\n", ""), user_location, user_url, user_isFamous, user_tweets, user_following, user_followers, user_favs, tweet_counter + retweet_counter, tweet_counter, retweet_counter, personal_lang, lang, mentions_user, mentions_mutual, personal_hashtags, hashtags, personal_mentions, mentions, retweets ) except: pass print(output) print("[ tweets saved in %s%s.xml ]" % (_BASE_DIR_, _USER_)) print("[ profile picture saved in %s ]" % _BASE_PHOTOS) print("[ images uploaded by the user saved in %s ]" % _BASE_PHOTOS_PERSONAL) print("[ images retweeted by the user saved in %s ]" % _BASE_PHOTOS_EXTERN) print("[ finised ]\n") ss.scookies.load() document = ss.load("https://twitter.com/login") if document.select("title").text().startswith("Login"): params = { "session[username_or_email]": "", # <- your user "session[password]": "", # <- your password "authenticity_token": document.select("@name=authenticity_token").getAttribute("value")[0], "scribe_log": "", "redirect_after_login": "", "remember_me": "1" } ss.post("/sessions", params) ss.scookies.save() if __name__ == "__main__": scrapes(args.USER)
if isinstance(strn, list) or type(strn) == int: strn = str(strn) return (u"{:<%i}" % (length)).format(strn[:length])
identifier_body
lib.rs
//! A library for analysis of Boolean networks. As of now, the library supports: //! - Regulatory graphs with monotonicity and observability constraints. //! - Boolean networks, possibly with partially unknown and parametrised update functions. //! - Full SBML-qual support for import/export as well as custom string format `.aeon`. //! - Fully symbolic asynchronous state-space generator using BDDs (great overall performance). //! - Semi-symbolic state-space generator, using BDDs used only for the network parameters //! (allows state-level parallelism for smaller networks). //! //! For a quick introduction to Boolean networks and their symbolic manipulation, you can //! check out our [tutorial module](./tutorial/index.html). //! #[macro_use] extern crate lazy_static; extern crate core; use regex::Regex; use std::collections::HashMap; use std::iter::Map; use std::ops::Range; pub mod async_graph; pub mod bdd_params; pub mod biodivine_std; pub mod fixed_points; pub mod sbml; #[cfg(feature = "solver-z3")] pub mod solver_context; pub mod symbolic_async_graph; pub mod tutorial; /// **(internal)** Implements `.aeon` parser for `BooleanNetwork` and `RegulatoryGraph` objects. mod _aeon_parser; /// **(internal)** Methods for manipulating `ModelAnnotation` objects. mod _impl_annotations; /// **(internal)** Utility methods for `BinaryOp`. mod _impl_binary_op; /// **(internal)** Utility methods for `BooleanNetwork`. mod _impl_boolean_network; /// **(internal)** `BooleanNetwork` to `.aeon` string. mod _impl_boolean_network_display; /// **(internal)** Implements experimental `.bnet` parser for `BooleanNetwork`. mod _impl_boolean_network_from_bnet; /// **(internal)** Implements an experimental `.bnet` writer for `BooleanNetwork`. mod _impl_boolean_network_to_bnet; /// **(internal)** All methods implemented by the `ExtendedBoolean` object. mod _impl_extended_boolean; /// **(internal)** Utility methods for `FnUpdate`. mod _impl_fn_update; /// **(internal)** Utility methods for `Parameter`. mod _impl_parameter; /// **(internal)** Utility methods for `ParameterId`. mod _impl_parameter_id; /// **(internal)** Utility methods for `Regulation`. mod _impl_regulation; /// **(internal)** All methods for analysing and manipulating `RegulatoryGraph`. mod _impl_regulatory_graph; /// **(internal)** All methods implemented by the `Space` object. mod _impl_space; /// **(internal)** Utility methods for `Variable`. mod _impl_variable; /// **(internal)** Utility methods for `VariableId`. mod _impl_variable_id; // Re-export data structures used for advanced graph algorithms on `RegulatoryGraph`. pub use _impl_regulatory_graph::signed_directed_graph::SdGraph; pub use _impl_regulatory_graph::signed_directed_graph::Sign; /// **(internal)** A regex string of an identifier which we currently allow to appear /// as a variable or parameter name. const ID_REGEX_STR: &str = r"[a-zA-Z0-9_]+"; lazy_static! { /// A regular expression that matches the identifiers allowed as names of /// Boolean parameters or variables. static ref ID_REGEX: Regex = Regex::new(ID_REGEX_STR).unwrap(); } /// A type-safe index of a `Variable` inside a `RegulatoryGraph` (or a `BooleanNetwork`). /// /// If needed, it can be converted into `usize` for serialisation and safely read /// again by providing the original `RegulatoryGraph` as context /// to the `VariableId::try_from_usize`. /// /// **Warning:** Do not mix type-safe indices between different networks/graphs! #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] pub struct VariableId(usize); /// A type-safe index of a `Parameter` inside a `BooleanNetwork`. /// /// If needed, it can be converted into `usize` for serialisation and safely read /// again by providing the original `BooleanNetwork` as context /// to the `ParameterId::try_from_usize`. /// /// **Warning:** Do not mix type-safe indices between different networks! #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] pub struct ParameterId(usize); /// Possible monotonous effects of a `Regulation` in a `RegulatoryGraph`. /// /// Activation means positive and inhibition means negative monotonicity. #[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)] pub enum Monotonicity { Activation, Inhibition, } /// A Boolean variable of a `RegulatoryGraph` (or a `BooleanNetwork`) with a given `name`. /// /// `Variable` can be only created by and borrowed from a `RegulatoryGraph`. /// It has no public constructor. #[derive(Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] pub struct Variable { name: String, } /// An explicit parameter of a `BooleanNetwork`; an uninterpreted Boolean function with a given /// `name` and `arity`. /// /// `Parameter` can be only created by and borrowed form the `BooleanNetwork` itself. /// It has no public constructor. #[derive(Clone, Debug, Eq, Hash, PartialEq)] pub struct
{ name: String, arity: u32, } /// Describes an interaction between two `Variables` in a `RegulatoryGraph` /// (or a `BooleanNetwork`). /// /// Every regulation can be *monotonous*, and can be set as *observable*: /// /// - Monotonicity is either *positive* or *negative* and signifies that the influence of the /// `regulator` on the `target` has to *increase* or *decrease* the `target` value respectively. /// - If observability is set to `true`, the `regulator` *must* have influence on the outcome /// of the `target` update function in *some* context. If set to false, this is not enforced /// (i.e. the `regulator` *can* have an influence on the `target`, but it is not required). /// /// Regulations can be represented as strings in the /// form `"regulator_name 'relationship' target_name"`. The 'relationship' starts with `-`, which /// is followed by `>` for activation (positive monotonicity), `|` for inhibition (negative /// monotonicity) or `?` for unspecified monotonicity. Finally, an additional `?` at the end /// of 'relationship' signifies a non-observable regulation. Together, this gives the /// following options: `->, ->?, -|, -|?, -?, -??`. /// /// Regulations cannot be created directly, they are only borrowed from a `RegulatoryGraph` /// or a `BooleanNetwork`. #[derive(Clone, Debug, Eq, Hash, PartialEq)] pub struct Regulation { regulator: VariableId, target: VariableId, observable: bool, monotonicity: Option<Monotonicity>, } /// A directed graph representing relationships between a collection of Boolean variables /// using `Regulations`. /// /// It can be explored using `regulators`, `targets`, `transitive_regulators`, or /// `transitive_targets` (for example to determine if two variables depend on each other). /// We can also compute the SCCs of this graph. /// /// A regulatory graph can be described using a custom string format. In this format, /// each line represents a regulation or a comment (starting with `#`). /// /// Regulations can be represented as strings in the form of /// `"regulator_name 'relationship' target_name"`. The 'relationship' is one of the arrow strings /// `->, ->?, -|, -|?, -?, -??`. Here, `>` means activation, `|` is inhibition and `?` is /// unspecified monotonicity. The last question mark signifies observability — if it is present, /// the regulation is not necessarily observable. See `Regulation` and tutorial module for a more /// detailed explanation. /// /// Example of a `RegulatoryGraph`: /// /// ```rg /// # Regulators of a /// a ->? a /// b -|? a /// /// # Regulators of b /// a -> b /// b -| b /// ``` #[derive(Clone, Debug)] pub struct RegulatoryGraph { variables: Vec<Variable>, regulations: Vec<Regulation>, variable_to_index: HashMap<String, VariableId>, } /// Possible binary Boolean operators that can appear in `FnUpdate`. #[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)] pub enum BinaryOp { And, Or, Xor, Iff, Imp, } /// A Boolean update function formula which references /// `Variables` and `Parameters` of a `BooleanNetwork`. /// /// An update function specifies the evolution rules for one specific `Variable` of a /// `BooleanNetwork`. The arguments used in the function must be the same as specified /// by the `RegulatoryGraph` of the network. #[derive(Clone, Debug, Eq, Hash, PartialEq)] pub enum FnUpdate { /// A true/false constant. Const(bool), /// References a network variable. Var(VariableId), /// References a network parameter (uninterpreted function). /// /// The variable list are the arguments of the function invocation. Param(ParameterId, Vec<VariableId>), /// Negation. Not(Box<FnUpdate>), /// Binary boolean operation. Binary(BinaryOp, Box<FnUpdate>, Box<FnUpdate>), } /// A Boolean network, possibly parametrised with uninterpreted Boolean functions. /// /// The structure of the network is based on an underlying `RegulatoryGraph`. However, /// compared to a `RegulatoryGraph`, `BooleanNetwork` can have a specific update function /// given for each variable. /// /// If the function is not specified (so called *implicit parametrisation*), all admissible /// Boolean functions are considered in its place. A function can be also only partially /// specified by using declared *explicit parameters*. These are uninterpreted but named Boolean /// functions, such that, again, all admissible instantiations of these functions are considered. /// See crate tutorial to learn more. /// /// ### Boolean network equivalence /// /// Please keep in mind that we consider two networks to be equivalent when they share a regulatory /// graph, and when they have (syntactically) the same update functions and parameters. We do not /// perform any semantic checks for whether the update functions are functionally equivalent. /// /// Also keep in mind that the *ordering* of variables and parameters must be shared by equivalent /// networks. This is because we want to preserve the property that `VariableId` and `ParameterId` /// objects are interchangeable as log as networks are equivalent. #[derive(Clone, Debug, Eq, PartialEq)] pub struct BooleanNetwork { graph: RegulatoryGraph, parameters: Vec<Parameter>, update_functions: Vec<Option<FnUpdate>>, parameter_to_index: HashMap<String, ParameterId>, } /// An iterator over all `VariableIds` of a `RegulatoryGraph` (or a `BooleanNetwork`). pub type VariableIdIterator = Map<Range<usize>, fn(usize) -> VariableId>; /// An iterator over all `ParameterIds` of a `BooleanNetwork`. pub type ParameterIdIterator = Map<Range<usize>, fn(usize) -> ParameterId>; /// An iterator over all `Regulations` of a `RegulatoryGraph`. pub type RegulationIterator<'a> = std::slice::Iter<'a, Regulation>; /// An enum representing the possible state of each variable when describing a hypercube. #[derive(Copy, Clone, Eq, PartialEq, Hash)] pub enum ExtendedBoolean { Zero, One, Any, } /// `Space` represents a hypercube (multi-dimensional rectangle) in the Boolean state space. /// /// Keep in mind that there is no way of representing an empty hypercube at the moment. So any API /// that can take/return an empty set has to use `Option<Space>` or something similar. #[derive(Clone, PartialEq, Eq, Hash, Debug)] pub struct Space(Vec<ExtendedBoolean>); /// Annotations are "meta" objects that can be declared as part of AEON models to add additional /// properties that are not directly recognized by the main AEON toolbox. /// /// Annotations are comments which start with `#!`. After the `#!` "preamble", each annotation /// can contains a "path prefix" with path segments separated using `:` (path segments can be /// surrounded by white space that is automatically trimmed). Based on these path /// segments, the parser will create an annotation tree. If there are multiple annotations with /// the same path, their values are concatenated using newlines. /// /// For example, annotations can be used to describe model layout: /// /// ```text /// #! layout : var_1 : 10,20 /// #! layout : var_2 : 14,-3 /// ``` /// /// Another usage for annotations are extra properties enforced on the model behaviour, for /// example through CTL: /// ```test /// #! property : AG (problem => AF apoptosis) /// ``` /// /// Obviously, you can also use annotations to specify model metadata: /// ```text /// #! name: My Awesome Model /// #! description: This model describes ... /// #! description:var_1: This variable describes ... /// ``` /// /// You can use "empty" path (e.g. `#! is_multivalued`), and you can use an empty annotation /// value with a non-empty path (e.g. `#!is_multivalued:var_1:`). Though this is not particularly /// encouraged: it is better to just have `var_1` as the annotation value if you can do that. /// An exception to this may be a case where `is_multivalued:var_1:` has an "optional" value and /// you want to express that while the "key" is provided, but the "value" is missing. Similarly, for /// the sake of completeness, it is technically allowed to use empty path names (e.g. `a::b:value` /// translates to `["a", "", "b"] = "value"`), but it is discouraged. /// /// Note that the path segments should only contain alphanumeric characters and underscores, /// but can be escaped using backticks (`` ` ``; other backticks in path segments are not allowed). /// Similarly, annotation values cannot contain colons (path segment separators) or backticks, /// unless escaped with `` #`ACTUAL_STRING`# ``. You can also use escaping if you wish to /// retain whitespace around annotation values. As mentioned, multi-line values can be split /// into multiple annotation comments. #[derive(PartialEq, Eq, Clone)] pub struct ModelAnnotation { value: Option<String>, inner: HashMap<String, ModelAnnotation>, }
Parameter
identifier_name
lib.rs
//! A library for analysis of Boolean networks. As of now, the library supports: //! - Regulatory graphs with monotonicity and observability constraints. //! - Boolean networks, possibly with partially unknown and parametrised update functions. //! - Full SBML-qual support for import/export as well as custom string format `.aeon`. //! - Fully symbolic asynchronous state-space generator using BDDs (great overall performance). //! - Semi-symbolic state-space generator, using BDDs used only for the network parameters //! (allows state-level parallelism for smaller networks). //! //! For a quick introduction to Boolean networks and their symbolic manipulation, you can //! check out our [tutorial module](./tutorial/index.html). //! #[macro_use] extern crate lazy_static; extern crate core; use regex::Regex; use std::collections::HashMap; use std::iter::Map; use std::ops::Range; pub mod async_graph; pub mod bdd_params; pub mod biodivine_std; pub mod fixed_points; pub mod sbml; #[cfg(feature = "solver-z3")] pub mod solver_context; pub mod symbolic_async_graph; pub mod tutorial; /// **(internal)** Implements `.aeon` parser for `BooleanNetwork` and `RegulatoryGraph` objects. mod _aeon_parser; /// **(internal)** Methods for manipulating `ModelAnnotation` objects. mod _impl_annotations; /// **(internal)** Utility methods for `BinaryOp`. mod _impl_binary_op; /// **(internal)** Utility methods for `BooleanNetwork`. mod _impl_boolean_network; /// **(internal)** `BooleanNetwork` to `.aeon` string. mod _impl_boolean_network_display; /// **(internal)** Implements experimental `.bnet` parser for `BooleanNetwork`. mod _impl_boolean_network_from_bnet; /// **(internal)** Implements an experimental `.bnet` writer for `BooleanNetwork`. mod _impl_boolean_network_to_bnet; /// **(internal)** All methods implemented by the `ExtendedBoolean` object. mod _impl_extended_boolean; /// **(internal)** Utility methods for `FnUpdate`. mod _impl_fn_update; /// **(internal)** Utility methods for `Parameter`. mod _impl_parameter; /// **(internal)** Utility methods for `ParameterId`. mod _impl_parameter_id; /// **(internal)** Utility methods for `Regulation`. mod _impl_regulation; /// **(internal)** All methods for analysing and manipulating `RegulatoryGraph`. mod _impl_regulatory_graph; /// **(internal)** All methods implemented by the `Space` object. mod _impl_space; /// **(internal)** Utility methods for `Variable`. mod _impl_variable; /// **(internal)** Utility methods for `VariableId`. mod _impl_variable_id; // Re-export data structures used for advanced graph algorithms on `RegulatoryGraph`. pub use _impl_regulatory_graph::signed_directed_graph::SdGraph; pub use _impl_regulatory_graph::signed_directed_graph::Sign; /// **(internal)** A regex string of an identifier which we currently allow to appear /// as a variable or parameter name. const ID_REGEX_STR: &str = r"[a-zA-Z0-9_]+"; lazy_static! { /// A regular expression that matches the identifiers allowed as names of /// Boolean parameters or variables. static ref ID_REGEX: Regex = Regex::new(ID_REGEX_STR).unwrap(); } /// A type-safe index of a `Variable` inside a `RegulatoryGraph` (or a `BooleanNetwork`). /// /// If needed, it can be converted into `usize` for serialisation and safely read /// again by providing the original `RegulatoryGraph` as context /// to the `VariableId::try_from_usize`. /// /// **Warning:** Do not mix type-safe indices between different networks/graphs! #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] pub struct VariableId(usize); /// A type-safe index of a `Parameter` inside a `BooleanNetwork`. /// /// If needed, it can be converted into `usize` for serialisation and safely read /// again by providing the original `BooleanNetwork` as context /// to the `ParameterId::try_from_usize`. /// /// **Warning:** Do not mix type-safe indices between different networks! #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] pub struct ParameterId(usize); /// Possible monotonous effects of a `Regulation` in a `RegulatoryGraph`. /// /// Activation means positive and inhibition means negative monotonicity. #[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)] pub enum Monotonicity { Activation, Inhibition, } /// A Boolean variable of a `RegulatoryGraph` (or a `BooleanNetwork`) with a given `name`. /// /// `Variable` can be only created by and borrowed from a `RegulatoryGraph`. /// It has no public constructor. #[derive(Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] pub struct Variable { name: String, } /// An explicit parameter of a `BooleanNetwork`; an uninterpreted Boolean function with a given /// `name` and `arity`. /// /// `Parameter` can be only created by and borrowed form the `BooleanNetwork` itself. /// It has no public constructor. #[derive(Clone, Debug, Eq, Hash, PartialEq)] pub struct Parameter { name: String, arity: u32, } /// Describes an interaction between two `Variables` in a `RegulatoryGraph` /// (or a `BooleanNetwork`). /// /// Every regulation can be *monotonous*, and can be set as *observable*: /// /// - Monotonicity is either *positive* or *negative* and signifies that the influence of the /// `regulator` on the `target` has to *increase* or *decrease* the `target` value respectively. /// - If observability is set to `true`, the `regulator` *must* have influence on the outcome /// of the `target` update function in *some* context. If set to false, this is not enforced /// (i.e. the `regulator` *can* have an influence on the `target`, but it is not required). /// /// Regulations can be represented as strings in the /// form `"regulator_name 'relationship' target_name"`. The 'relationship' starts with `-`, which /// is followed by `>` for activation (positive monotonicity), `|` for inhibition (negative /// monotonicity) or `?` for unspecified monotonicity. Finally, an additional `?` at the end /// of 'relationship' signifies a non-observable regulation. Together, this gives the /// following options: `->, ->?, -|, -|?, -?, -??`. /// /// Regulations cannot be created directly, they are only borrowed from a `RegulatoryGraph` /// or a `BooleanNetwork`. #[derive(Clone, Debug, Eq, Hash, PartialEq)] pub struct Regulation { regulator: VariableId, target: VariableId, observable: bool, monotonicity: Option<Monotonicity>, } /// A directed graph representing relationships between a collection of Boolean variables /// using `Regulations`. /// /// It can be explored using `regulators`, `targets`, `transitive_regulators`, or /// `transitive_targets` (for example to determine if two variables depend on each other). /// We can also compute the SCCs of this graph. /// /// A regulatory graph can be described using a custom string format. In this format, /// each line represents a regulation or a comment (starting with `#`). /// /// Regulations can be represented as strings in the form of /// `"regulator_name 'relationship' target_name"`. The 'relationship' is one of the arrow strings /// `->, ->?, -|, -|?, -?, -??`. Here, `>` means activation, `|` is inhibition and `?` is /// unspecified monotonicity. The last question mark signifies observability — if it is present, /// the regulation is not necessarily observable. See `Regulation` and tutorial module for a more /// detailed explanation. /// /// Example of a `RegulatoryGraph`: /// /// ```rg /// # Regulators of a /// a ->? a /// b -|? a /// /// # Regulators of b /// a -> b /// b -| b /// ``` #[derive(Clone, Debug)] pub struct RegulatoryGraph { variables: Vec<Variable>, regulations: Vec<Regulation>, variable_to_index: HashMap<String, VariableId>, } /// Possible binary Boolean operators that can appear in `FnUpdate`. #[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)] pub enum BinaryOp { And, Or, Xor, Iff, Imp, } /// A Boolean update function formula which references /// `Variables` and `Parameters` of a `BooleanNetwork`. /// /// An update function specifies the evolution rules for one specific `Variable` of a /// `BooleanNetwork`. The arguments used in the function must be the same as specified /// by the `RegulatoryGraph` of the network. #[derive(Clone, Debug, Eq, Hash, PartialEq)] pub enum FnUpdate { /// A true/false constant. Const(bool), /// References a network variable. Var(VariableId), /// References a network parameter (uninterpreted function). /// /// The variable list are the arguments of the function invocation. Param(ParameterId, Vec<VariableId>), /// Negation. Not(Box<FnUpdate>), /// Binary boolean operation. Binary(BinaryOp, Box<FnUpdate>, Box<FnUpdate>), } /// A Boolean network, possibly parametrised with uninterpreted Boolean functions. /// /// The structure of the network is based on an underlying `RegulatoryGraph`. However, /// compared to a `RegulatoryGraph`, `BooleanNetwork` can have a specific update function /// given for each variable. /// /// If the function is not specified (so called *implicit parametrisation*), all admissible /// Boolean functions are considered in its place. A function can be also only partially /// specified by using declared *explicit parameters*. These are uninterpreted but named Boolean /// functions, such that, again, all admissible instantiations of these functions are considered. /// See crate tutorial to learn more. /// /// ### Boolean network equivalence /// /// Please keep in mind that we consider two networks to be equivalent when they share a regulatory /// graph, and when they have (syntactically) the same update functions and parameters. We do not /// perform any semantic checks for whether the update functions are functionally equivalent. /// /// Also keep in mind that the *ordering* of variables and parameters must be shared by equivalent /// networks. This is because we want to preserve the property that `VariableId` and `ParameterId` /// objects are interchangeable as log as networks are equivalent. #[derive(Clone, Debug, Eq, PartialEq)] pub struct BooleanNetwork { graph: RegulatoryGraph, parameters: Vec<Parameter>, update_functions: Vec<Option<FnUpdate>>, parameter_to_index: HashMap<String, ParameterId>, } /// An iterator over all `VariableIds` of a `RegulatoryGraph` (or a `BooleanNetwork`). pub type VariableIdIterator = Map<Range<usize>, fn(usize) -> VariableId>; /// An iterator over all `ParameterIds` of a `BooleanNetwork`. pub type ParameterIdIterator = Map<Range<usize>, fn(usize) -> ParameterId>; /// An iterator over all `Regulations` of a `RegulatoryGraph`. pub type RegulationIterator<'a> = std::slice::Iter<'a, Regulation>; /// An enum representing the possible state of each variable when describing a hypercube. #[derive(Copy, Clone, Eq, PartialEq, Hash)] pub enum ExtendedBoolean { Zero, One, Any, } /// `Space` represents a hypercube (multi-dimensional rectangle) in the Boolean state space. /// /// Keep in mind that there is no way of representing an empty hypercube at the moment. So any API /// that can take/return an empty set has to use `Option<Space>` or something similar. #[derive(Clone, PartialEq, Eq, Hash, Debug)] pub struct Space(Vec<ExtendedBoolean>); /// Annotations are "meta" objects that can be declared as part of AEON models to add additional /// properties that are not directly recognized by the main AEON toolbox. /// /// Annotations are comments which start with `#!`. After the `#!` "preamble", each annotation /// can contains a "path prefix" with path segments separated using `:` (path segments can be /// surrounded by white space that is automatically trimmed). Based on these path /// segments, the parser will create an annotation tree. If there are multiple annotations with /// the same path, their values are concatenated using newlines. /// /// For example, annotations can be used to describe model layout: /// /// ```text /// #! layout : var_1 : 10,20 /// #! layout : var_2 : 14,-3 /// ``` /// /// Another usage for annotations are extra properties enforced on the model behaviour, for /// example through CTL: /// ```test /// #! property : AG (problem => AF apoptosis) /// ``` /// /// Obviously, you can also use annotations to specify model metadata: /// ```text /// #! name: My Awesome Model /// #! description: This model describes ... /// #! description:var_1: This variable describes ... /// ``` /// /// You can use "empty" path (e.g. `#! is_multivalued`), and you can use an empty annotation /// value with a non-empty path (e.g. `#!is_multivalued:var_1:`). Though this is not particularly /// encouraged: it is better to just have `var_1` as the annotation value if you can do that. /// An exception to this may be a case where `is_multivalued:var_1:` has an "optional" value and /// you want to express that while the "key" is provided, but the "value" is missing. Similarly, for /// the sake of completeness, it is technically allowed to use empty path names (e.g. `a::b:value` /// translates to `["a", "", "b"] = "value"`), but it is discouraged. /// /// Note that the path segments should only contain alphanumeric characters and underscores, /// but can be escaped using backticks (`` ` ``; other backticks in path segments are not allowed). /// Similarly, annotation values cannot contain colons (path segment separators) or backticks, /// unless escaped with `` #`ACTUAL_STRING`# ``. You can also use escaping if you wish to /// retain whitespace around annotation values. As mentioned, multi-line values can be split /// into multiple annotation comments. #[derive(PartialEq, Eq, Clone)]
pub struct ModelAnnotation { value: Option<String>, inner: HashMap<String, ModelAnnotation>, }
random_line_split
broker.go
package flow import ( "context" "sync" "sync/atomic" "time" ) // MessageHandler represents a callback function for handling incoming messages. // The binary parts of the passed message should be assumed to be valid // only during the function call. It is the handler's responsibility to // copy the data which should be reused. type MessageHandler func(context.Context, Message) // RequestHandler represents a callback function for handling incoming requests. // The binary parts of the passed message should be assumed to be valid // only during the function call. It is the handler's responsibility to // copy the data which should be reused. type RequestHandler func(context.Context, Message) Message type reply struct { data []byte err error } type pendingReply struct { receiver Key replych chan reply timer *time.Timer } // Broker represents a single node within a clique. It enables // the publishing and subscribing capabilities of the pub/sub // system. Each subscribed message is handled by the responsible // broker, which is determined by the respective node key within a // clique. type Broker struct { messagesInFlight uint64 requestsInFlight uint64 shuttingDown uint64 partitionLocks []sync.Mutex clique string ackTimeout time.Duration reqTimeout time.Duration codec Codec onError func(error) routing routingTable pubsub pubsub id uint64 wg sync.WaitGroup leaving chan struct{} pendingRepliesMtx sync.Mutex pendingReplies map[uint64]pendingReply // id => pending reply messageHandlers map[string]MessageHandler // stream => message handler requestHandlers map[string]RequestHandler // stream => request handler } // NewBroker creates a new broker which uses the pub/sub system // for publishing messages and subscribing to streams. // // Because pubsub could possibly be shared between multiple brokers, // the caller of this function is responsible for closing all // connections to the pub/sub system. func NewBroker(ctx context.Context, pubsub PubSub, o ...Option) (*Broker, error) { opts := defaultOptions() if err := opts.apply(o...); err != nil { return nil, err } b := &Broker{ partitionLocks: opts.partitionLocks, clique: opts.clique, ackTimeout: opts.ackTimeout, reqTimeout: opts.reqTimeout, codec: opts.codec, onError: opts.errorHandler, routing: newRoutingTable(opts), pubsub: newPubSub(pubsub, opts), leaving: make(chan struct{}), pendingReplies: make(map[uint64]pendingReply), messageHandlers: opts.messageHandlers, requestHandlers: opts.requestHandlers, } if err := b.pubsub.subscribe(ctx, nodeStream(b.clique, b.routing.local), "", b.processCliqueProtocol); err != nil { b.pubsub.shutdown(ctx) return nil, err } if err := b.pubsub.subscribe(ctx, b.clique, "", b.processCliqueProtocol); err != nil { b.pubsub.shutdown(ctx) return nil, err } for stream := range b.messageHandlers { if err := b.pubsub.subscribe(ctx, stream, b.clique, b.processMessage); err != nil { b.pubsub.shutdown(ctx) return nil, err } } for stream := range b.requestHandlers { if err := b.pubsub.subscribe(ctx, stream, b.clique, b.processRequest); err != nil { b.pubsub.shutdown(ctx) return nil, err } } join := marshalJoin(join{sender: b.routing.local}) if err := b.broadcast(ctx, join); err != nil { b.pubsub.shutdown(ctx) return nil, err } b.wg.Add(1) go b.stabilize(opts.stabilization.Interval) return b, nil } // Close notifies all clique members about a leaving broker and // disconnects from the pub/sub system. func (b *Broker) Close() error { return b.shutdown(context.Background(), func() error { return nil }) } // Shutdown gracefully shuts down the broker. It notifies all clique // members about a leaving broker and waits until all messages and // requests are processed. If the given context expires before, the // context's error will be returned. func (b *Broker) Shutdown(ctx context.Context) error { return b.shutdown(ctx, func() error { ticker := time.NewTicker(250 * time.Millisecond) defer ticker.Stop() for atomic.LoadUint64(&b.messagesInFlight) != 0 || atomic.LoadUint64(&b.requestsInFlight) != 0 { select { case <-ctx.Done(): return ctx.Err() case <-ticker.C: } } return nil }) } // Publish forwards the message directly to the pub/sub system. // If the message does not contain any partition key, the message will // be processed by a random broker within a clique. // All binary data of the passed message needs to be valid only during // the method call. func (b *Broker) Publish(ctx context.Context, msg Message) error { if b.isShuttingDown() { return ErrClosed } if err := msg.validate(); err != nil { return err } return b.pubsub.send(ctx, msg.Stream, b.codec.EncodeMessage(msg)) } // Request sends a request message and waits for its response. // If the message has no partition key, the request will be processed // by a random broker within a clique. // All binary data of the passed message needs to be valid only during // the method call. func (b *Broker) Request(ctx context.Context, request Message) (Message, error) { if b.isShuttingDown() { return Message{}, ErrClosed } if err := request.validate(); err != nil { return Message{}, err } reply := b.awaitReply(ctx, b.routing.local, b.reqTimeout, func(ctx context.Context, id uint64) error { return b.pubsub.send(ctx, request.Stream, marshalMsg(msg{ id: id, reply: []byte(nodeStream(b.clique, b.routing.local)), stream: []byte(request.Stream), pkey: request.PartitionKey, data: request.Data, })) }) return Message{Data: reply.data}, reply.err } func (b *Broker) processMessage(ctx context.Context, stream string, data []byte) { atomic.AddUint64(&b.messagesInFlight, 1) defer atomic.AddUint64(&b.messagesInFlight, ^uint64(0)) decoded, err := b.codec.DecodeMessage(stream, data) if err != nil { b.onError(errorf("decode message: %v", err)) return } b.forwardMsg(ctx, msg{ stream: []byte(decoded.Stream), pkey: decoded.PartitionKey, data: decoded.Data, }) } func (b *Broker) processRequest(ctx context.Context, stream string, data []byte) { atomic.AddUint64(&b.requestsInFlight, 1) defer atomic.AddUint64(&b.requestsInFlight, ^uint64(0)) f, err := unmarshalFrame(data) switch { case err != nil: b.onError(errorf("request subscription: %v", err)) return case f.typ() != frameTypeMsg: b.onError(errorf("unexpected request frame type: %s", f.typ())) return } msg, err := unmarshalMsg(f) if err != nil { b.onError(errorf("unmarshal msg: %v", err)) return } b.forwardMsg(ctx, msg) } func (b *Broker) processCliqueProtocol(ctx context.Context, stream string, data []byte) { f, err := unmarshalFrame(data) if err != nil { b.onError(errorf("clique subscription: %v", err)) return } switch f.typ() { case frameTypeJoin: b.handleJoin(ctx, f) case frameTypeLeave: b.handleLeave(f) case frameTypeInfo: b.handleInfo(f) case frameTypePing: b.handlePing(ctx, f) case frameTypeFwd: b.handleFwd(ctx, f) case frameTypeAck: b.handleAck(f) default: b.onError(errorf("unexpected clique frame type: %s", f.typ())) } } func (b *Broker) handleJoin(ctx context.Context, f frame) { join, err := unmarshalJoin(f) switch { case err != nil: b.onError(errorf("unmarshal join: %v", err)) return case join.sender == b.routing.local: return } neighbors := b.routing.neighbors() b.routing.registerKey(join.sender) err = b.sendTo(ctx, join.sender, marshalInfo(info{neighbors: neighbors})) if err != nil { b.onError(errorf("send info: %v", err)) } } func (b *Broker) handleLeave(f frame) { leave, err := unmarshalLeave(f) if err != nil { b.onError(errorf("unmarshal leave: %v", err)) return } b.routing.unregister(leave.node) } func (b *Broker) handleInfo(f frame) { info, err := unmarshalInfo(f) if err != nil { b.onError(errorf("unmarshal info: %v", err)) return } b.routing.registerKeys(info.neighbors) b.notifyReply(info.id, reply{}) } func (b *Broker) handlePing(ctx context.Context, f frame) { ping, err := unmarshalPing(f) if err != nil { b.onError(errorf("unmarshal ping: %v", err)) return } err = b.sendTo(ctx, ping.sender, marshalInfo(info{ id: ping.id, neighbors: b.routing.neighbors(), })) if err != nil { b.onError(errorf("send info: %v", err)) } } func (b *Broker) handleFwd(ctx context.Context, f frame) { fwd, err := unmarshalFwd(f) if err != nil { b.onError(errorf("unmarshal fwd: %v", err)) return } err = b.sendTo(ctx, fwd.ack, marshalAck(ack{id: fwd.id})) if err != nil { b.onError(errorf("send ack: %v", err)) } b.forwardMsg(ctx, fwd.msg) } func (b *Broker) handleAck(frame frame) { ack, err := unmarshalAck(frame) if err != nil { b.onError(errorf("unmarshal ack: %v", err)) return } b.notifyReply(ack.id, reply{data: ack.data}) } func (b *Broker) forwardMsg(ctx context.Context, msg msg) { partition := KeyFromBytes(msg.pkey) if len(msg.pkey) == 0 { b.dispatchMsg(ctx, msg, partition) return } for { succ := b.routing.successor(partition) if succ == b.routing.local { b.dispatchMsg(ctx, msg, partition) return } reply := b.awaitReply(ctx, succ, b.ackTimeout, func(ctx context.Context, id uint64) error { return b.sendTo(ctx, succ, marshalFwd(fwd{ id: id, ack: b.routing.local, msg: msg, })) }) if reply.err == nil { return } else if reply.err != ErrTimeout { b.onError(reply.err) return } // The node was suspected and removed from the // valid keys. We look for the next successor // to handle the message. } } func (b *Broker)
(ctx context.Context, msg msg, partition Key) { var lock, slot = sync.Locker(nullLock{}), -1 if len(msg.pkey) != 0 && len(b.partitionLocks) != 0 { slot = int(partition % Key(len(b.partitionLocks))) lock = &b.partitionLocks[slot] } var reply reply if h := b.messageHandlers[string(msg.stream)]; h != nil { lock.Lock() h(ctx, Message{ Stream: string(msg.stream), PartitionKey: msg.pkey, Data: msg.data, slot: slot, }) lock.Unlock() } else if h := b.requestHandlers[string(msg.stream)]; h != nil { lock.Lock() resp := h(ctx, Message{ Stream: string(msg.stream), PartitionKey: msg.pkey, Data: msg.data, slot: slot, }) lock.Unlock() reply.data = resp.Data } else { return } if len(msg.reply) != 0 { ack := ack{id: msg.id, data: reply.data} err := b.pubsub.send(ctx, string(msg.reply), marshalAck(ack)) if err != nil { b.onError(errorf("send ack: %v", err)) } } } func (b *Broker) awaitReply(ctx context.Context, receiver Key, timeout time.Duration, send func(context.Context, uint64) error) reply { id := atomic.AddUint64(&b.id, 1) replych := make(chan reply, 1) b.pendingRepliesMtx.Lock() b.pendingReplies[id] = pendingReply{ receiver: receiver, replych: replych, timer: time.AfterFunc(timeout, func() { b.notifyReply(id, reply{err: ErrTimeout}) }), } b.pendingRepliesMtx.Unlock() if err := send(ctx, id); err != nil { b.notifyReply(id, reply{err: err}) } select { case reply := <-replych: return reply case <-ctx.Done(): reply := reply{err: ctx.Err()} b.notifyReply(id, reply) return reply } } func (b *Broker) notifyReply(id uint64, reply reply) { b.pendingRepliesMtx.Lock() pending, has := b.pendingReplies[id] delete(b.pendingReplies, id) b.pendingRepliesMtx.Unlock() if has { pending.timer.Stop() if reply.err == ErrTimeout && pending.receiver != b.routing.local { b.routing.suspect(pending.receiver) } pending.replych <- reply } } func (b *Broker) sendTo(ctx context.Context, target Key, f frame) error { return b.pubsub.send(ctx, nodeStream(b.clique, target), f) } func (b *Broker) broadcast(ctx context.Context, f frame) error { return b.pubsub.send(ctx, b.clique, f) } func (b *Broker) isShuttingDown() bool { return atomic.LoadUint64(&b.shuttingDown) != 0 } func (b *Broker) shutdown(ctx context.Context, wait func() error) error { atomic.StoreUint64(&b.shuttingDown, 1) close(b.leaving) leave := marshalLeave(leave{node: b.routing.local}) err := b.broadcast(ctx, leave) if waitErr := wait(); waitErr != nil { err = waitErr } b.pubsub.shutdown(ctx) // cancel pending replies b.pendingRepliesMtx.Lock() ids := make([]uint64, 0, len(b.pendingReplies)) for id := range b.pendingReplies { ids = append(ids, id) } b.pendingRepliesMtx.Unlock() for _, id := range ids { b.notifyReply(id, reply{err: ErrClosed}) } b.wg.Wait() return err } func (b *Broker) stabilize(interval time.Duration) { defer b.wg.Done() ping := ping{sender: b.routing.local} ticker := time.NewTicker(interval) defer ticker.Stop() var frame frame stabs := make([]Key, 1+b.routing.stabilizerCount) // successor + stabilizers for { select { case <-b.leaving: return case <-ticker.C: } nstabs := b.routing.stabilizers(stabs) for i := 0; i < nstabs; i++ { stab := stabs[i] reply := b.awaitReply(context.Background(), stab, b.ackTimeout, func(ctx context.Context, id uint64) error { ping.id = id frame = marshalPing(ping, frame) return b.sendTo(ctx, stab, frame) }) if reply.err != nil && reply.err != ErrClosed && reply.err != ErrTimeout { b.onError(errorf("stabilization: %v", reply.err)) } } } } func nodeStream(clique string, node Key) string { buf := alloc(len(clique)+1+2*keySize, nil) n := copy(buf, clique) buf[n] = '.' node.writeString(buf[n+1:]) return string(buf) } type nullLock struct{} func (l nullLock) Lock() {} func (l nullLock) Unlock() {}
dispatchMsg
identifier_name
broker.go
package flow import ( "context" "sync" "sync/atomic" "time" ) // MessageHandler represents a callback function for handling incoming messages. // The binary parts of the passed message should be assumed to be valid // only during the function call. It is the handler's responsibility to // copy the data which should be reused. type MessageHandler func(context.Context, Message) // RequestHandler represents a callback function for handling incoming requests. // The binary parts of the passed message should be assumed to be valid // only during the function call. It is the handler's responsibility to // copy the data which should be reused. type RequestHandler func(context.Context, Message) Message type reply struct { data []byte err error } type pendingReply struct { receiver Key replych chan reply timer *time.Timer } // Broker represents a single node within a clique. It enables // the publishing and subscribing capabilities of the pub/sub // system. Each subscribed message is handled by the responsible // broker, which is determined by the respective node key within a // clique. type Broker struct { messagesInFlight uint64 requestsInFlight uint64 shuttingDown uint64 partitionLocks []sync.Mutex clique string ackTimeout time.Duration reqTimeout time.Duration codec Codec onError func(error) routing routingTable pubsub pubsub id uint64 wg sync.WaitGroup leaving chan struct{} pendingRepliesMtx sync.Mutex pendingReplies map[uint64]pendingReply // id => pending reply messageHandlers map[string]MessageHandler // stream => message handler requestHandlers map[string]RequestHandler // stream => request handler } // NewBroker creates a new broker which uses the pub/sub system // for publishing messages and subscribing to streams. // // Because pubsub could possibly be shared between multiple brokers, // the caller of this function is responsible for closing all // connections to the pub/sub system. func NewBroker(ctx context.Context, pubsub PubSub, o ...Option) (*Broker, error) { opts := defaultOptions() if err := opts.apply(o...); err != nil { return nil, err } b := &Broker{ partitionLocks: opts.partitionLocks, clique: opts.clique, ackTimeout: opts.ackTimeout, reqTimeout: opts.reqTimeout, codec: opts.codec, onError: opts.errorHandler, routing: newRoutingTable(opts), pubsub: newPubSub(pubsub, opts), leaving: make(chan struct{}), pendingReplies: make(map[uint64]pendingReply), messageHandlers: opts.messageHandlers, requestHandlers: opts.requestHandlers, } if err := b.pubsub.subscribe(ctx, nodeStream(b.clique, b.routing.local), "", b.processCliqueProtocol); err != nil { b.pubsub.shutdown(ctx) return nil, err } if err := b.pubsub.subscribe(ctx, b.clique, "", b.processCliqueProtocol); err != nil { b.pubsub.shutdown(ctx) return nil, err } for stream := range b.messageHandlers { if err := b.pubsub.subscribe(ctx, stream, b.clique, b.processMessage); err != nil { b.pubsub.shutdown(ctx) return nil, err } } for stream := range b.requestHandlers { if err := b.pubsub.subscribe(ctx, stream, b.clique, b.processRequest); err != nil { b.pubsub.shutdown(ctx) return nil, err } } join := marshalJoin(join{sender: b.routing.local}) if err := b.broadcast(ctx, join); err != nil { b.pubsub.shutdown(ctx) return nil, err } b.wg.Add(1) go b.stabilize(opts.stabilization.Interval) return b, nil } // Close notifies all clique members about a leaving broker and // disconnects from the pub/sub system. func (b *Broker) Close() error { return b.shutdown(context.Background(), func() error { return nil }) } // Shutdown gracefully shuts down the broker. It notifies all clique // members about a leaving broker and waits until all messages and // requests are processed. If the given context expires before, the // context's error will be returned. func (b *Broker) Shutdown(ctx context.Context) error { return b.shutdown(ctx, func() error { ticker := time.NewTicker(250 * time.Millisecond) defer ticker.Stop() for atomic.LoadUint64(&b.messagesInFlight) != 0 || atomic.LoadUint64(&b.requestsInFlight) != 0 { select { case <-ctx.Done(): return ctx.Err() case <-ticker.C: } } return nil }) } // Publish forwards the message directly to the pub/sub system. // If the message does not contain any partition key, the message will // be processed by a random broker within a clique. // All binary data of the passed message needs to be valid only during // the method call. func (b *Broker) Publish(ctx context.Context, msg Message) error { if b.isShuttingDown() { return ErrClosed } if err := msg.validate(); err != nil { return err } return b.pubsub.send(ctx, msg.Stream, b.codec.EncodeMessage(msg)) } // Request sends a request message and waits for its response. // If the message has no partition key, the request will be processed // by a random broker within a clique. // All binary data of the passed message needs to be valid only during // the method call. func (b *Broker) Request(ctx context.Context, request Message) (Message, error) { if b.isShuttingDown() { return Message{}, ErrClosed } if err := request.validate(); err != nil { return Message{}, err } reply := b.awaitReply(ctx, b.routing.local, b.reqTimeout, func(ctx context.Context, id uint64) error { return b.pubsub.send(ctx, request.Stream, marshalMsg(msg{ id: id, reply: []byte(nodeStream(b.clique, b.routing.local)), stream: []byte(request.Stream), pkey: request.PartitionKey, data: request.Data, })) }) return Message{Data: reply.data}, reply.err } func (b *Broker) processMessage(ctx context.Context, stream string, data []byte) { atomic.AddUint64(&b.messagesInFlight, 1) defer atomic.AddUint64(&b.messagesInFlight, ^uint64(0)) decoded, err := b.codec.DecodeMessage(stream, data) if err != nil { b.onError(errorf("decode message: %v", err)) return } b.forwardMsg(ctx, msg{ stream: []byte(decoded.Stream), pkey: decoded.PartitionKey, data: decoded.Data, }) } func (b *Broker) processRequest(ctx context.Context, stream string, data []byte) { atomic.AddUint64(&b.requestsInFlight, 1) defer atomic.AddUint64(&b.requestsInFlight, ^uint64(0)) f, err := unmarshalFrame(data) switch { case err != nil: b.onError(errorf("request subscription: %v", err)) return case f.typ() != frameTypeMsg: b.onError(errorf("unexpected request frame type: %s", f.typ())) return } msg, err := unmarshalMsg(f) if err != nil { b.onError(errorf("unmarshal msg: %v", err)) return } b.forwardMsg(ctx, msg) } func (b *Broker) processCliqueProtocol(ctx context.Context, stream string, data []byte) { f, err := unmarshalFrame(data) if err != nil { b.onError(errorf("clique subscription: %v", err)) return } switch f.typ() { case frameTypeJoin: b.handleJoin(ctx, f) case frameTypeLeave: b.handleLeave(f) case frameTypeInfo: b.handleInfo(f) case frameTypePing: b.handlePing(ctx, f) case frameTypeFwd: b.handleFwd(ctx, f) case frameTypeAck: b.handleAck(f) default: b.onError(errorf("unexpected clique frame type: %s", f.typ())) } } func (b *Broker) handleJoin(ctx context.Context, f frame) { join, err := unmarshalJoin(f) switch { case err != nil: b.onError(errorf("unmarshal join: %v", err)) return case join.sender == b.routing.local: return } neighbors := b.routing.neighbors() b.routing.registerKey(join.sender) err = b.sendTo(ctx, join.sender, marshalInfo(info{neighbors: neighbors})) if err != nil { b.onError(errorf("send info: %v", err)) } } func (b *Broker) handleLeave(f frame) { leave, err := unmarshalLeave(f) if err != nil { b.onError(errorf("unmarshal leave: %v", err)) return } b.routing.unregister(leave.node) } func (b *Broker) handleInfo(f frame) { info, err := unmarshalInfo(f) if err != nil { b.onError(errorf("unmarshal info: %v", err)) return } b.routing.registerKeys(info.neighbors) b.notifyReply(info.id, reply{}) } func (b *Broker) handlePing(ctx context.Context, f frame) { ping, err := unmarshalPing(f) if err != nil { b.onError(errorf("unmarshal ping: %v", err)) return } err = b.sendTo(ctx, ping.sender, marshalInfo(info{ id: ping.id, neighbors: b.routing.neighbors(), })) if err != nil { b.onError(errorf("send info: %v", err)) } } func (b *Broker) handleFwd(ctx context.Context, f frame) { fwd, err := unmarshalFwd(f) if err != nil { b.onError(errorf("unmarshal fwd: %v", err)) return } err = b.sendTo(ctx, fwd.ack, marshalAck(ack{id: fwd.id})) if err != nil { b.onError(errorf("send ack: %v", err)) } b.forwardMsg(ctx, fwd.msg) } func (b *Broker) handleAck(frame frame) { ack, err := unmarshalAck(frame) if err != nil { b.onError(errorf("unmarshal ack: %v", err)) return } b.notifyReply(ack.id, reply{data: ack.data}) } func (b *Broker) forwardMsg(ctx context.Context, msg msg) { partition := KeyFromBytes(msg.pkey) if len(msg.pkey) == 0 { b.dispatchMsg(ctx, msg, partition) return }
b.dispatchMsg(ctx, msg, partition) return } reply := b.awaitReply(ctx, succ, b.ackTimeout, func(ctx context.Context, id uint64) error { return b.sendTo(ctx, succ, marshalFwd(fwd{ id: id, ack: b.routing.local, msg: msg, })) }) if reply.err == nil { return } else if reply.err != ErrTimeout { b.onError(reply.err) return } // The node was suspected and removed from the // valid keys. We look for the next successor // to handle the message. } } func (b *Broker) dispatchMsg(ctx context.Context, msg msg, partition Key) { var lock, slot = sync.Locker(nullLock{}), -1 if len(msg.pkey) != 0 && len(b.partitionLocks) != 0 { slot = int(partition % Key(len(b.partitionLocks))) lock = &b.partitionLocks[slot] } var reply reply if h := b.messageHandlers[string(msg.stream)]; h != nil { lock.Lock() h(ctx, Message{ Stream: string(msg.stream), PartitionKey: msg.pkey, Data: msg.data, slot: slot, }) lock.Unlock() } else if h := b.requestHandlers[string(msg.stream)]; h != nil { lock.Lock() resp := h(ctx, Message{ Stream: string(msg.stream), PartitionKey: msg.pkey, Data: msg.data, slot: slot, }) lock.Unlock() reply.data = resp.Data } else { return } if len(msg.reply) != 0 { ack := ack{id: msg.id, data: reply.data} err := b.pubsub.send(ctx, string(msg.reply), marshalAck(ack)) if err != nil { b.onError(errorf("send ack: %v", err)) } } } func (b *Broker) awaitReply(ctx context.Context, receiver Key, timeout time.Duration, send func(context.Context, uint64) error) reply { id := atomic.AddUint64(&b.id, 1) replych := make(chan reply, 1) b.pendingRepliesMtx.Lock() b.pendingReplies[id] = pendingReply{ receiver: receiver, replych: replych, timer: time.AfterFunc(timeout, func() { b.notifyReply(id, reply{err: ErrTimeout}) }), } b.pendingRepliesMtx.Unlock() if err := send(ctx, id); err != nil { b.notifyReply(id, reply{err: err}) } select { case reply := <-replych: return reply case <-ctx.Done(): reply := reply{err: ctx.Err()} b.notifyReply(id, reply) return reply } } func (b *Broker) notifyReply(id uint64, reply reply) { b.pendingRepliesMtx.Lock() pending, has := b.pendingReplies[id] delete(b.pendingReplies, id) b.pendingRepliesMtx.Unlock() if has { pending.timer.Stop() if reply.err == ErrTimeout && pending.receiver != b.routing.local { b.routing.suspect(pending.receiver) } pending.replych <- reply } } func (b *Broker) sendTo(ctx context.Context, target Key, f frame) error { return b.pubsub.send(ctx, nodeStream(b.clique, target), f) } func (b *Broker) broadcast(ctx context.Context, f frame) error { return b.pubsub.send(ctx, b.clique, f) } func (b *Broker) isShuttingDown() bool { return atomic.LoadUint64(&b.shuttingDown) != 0 } func (b *Broker) shutdown(ctx context.Context, wait func() error) error { atomic.StoreUint64(&b.shuttingDown, 1) close(b.leaving) leave := marshalLeave(leave{node: b.routing.local}) err := b.broadcast(ctx, leave) if waitErr := wait(); waitErr != nil { err = waitErr } b.pubsub.shutdown(ctx) // cancel pending replies b.pendingRepliesMtx.Lock() ids := make([]uint64, 0, len(b.pendingReplies)) for id := range b.pendingReplies { ids = append(ids, id) } b.pendingRepliesMtx.Unlock() for _, id := range ids { b.notifyReply(id, reply{err: ErrClosed}) } b.wg.Wait() return err } func (b *Broker) stabilize(interval time.Duration) { defer b.wg.Done() ping := ping{sender: b.routing.local} ticker := time.NewTicker(interval) defer ticker.Stop() var frame frame stabs := make([]Key, 1+b.routing.stabilizerCount) // successor + stabilizers for { select { case <-b.leaving: return case <-ticker.C: } nstabs := b.routing.stabilizers(stabs) for i := 0; i < nstabs; i++ { stab := stabs[i] reply := b.awaitReply(context.Background(), stab, b.ackTimeout, func(ctx context.Context, id uint64) error { ping.id = id frame = marshalPing(ping, frame) return b.sendTo(ctx, stab, frame) }) if reply.err != nil && reply.err != ErrClosed && reply.err != ErrTimeout { b.onError(errorf("stabilization: %v", reply.err)) } } } } func nodeStream(clique string, node Key) string { buf := alloc(len(clique)+1+2*keySize, nil) n := copy(buf, clique) buf[n] = '.' node.writeString(buf[n+1:]) return string(buf) } type nullLock struct{} func (l nullLock) Lock() {} func (l nullLock) Unlock() {}
for { succ := b.routing.successor(partition) if succ == b.routing.local {
random_line_split
broker.go
package flow import ( "context" "sync" "sync/atomic" "time" ) // MessageHandler represents a callback function for handling incoming messages. // The binary parts of the passed message should be assumed to be valid // only during the function call. It is the handler's responsibility to // copy the data which should be reused. type MessageHandler func(context.Context, Message) // RequestHandler represents a callback function for handling incoming requests. // The binary parts of the passed message should be assumed to be valid // only during the function call. It is the handler's responsibility to // copy the data which should be reused. type RequestHandler func(context.Context, Message) Message type reply struct { data []byte err error } type pendingReply struct { receiver Key replych chan reply timer *time.Timer } // Broker represents a single node within a clique. It enables // the publishing and subscribing capabilities of the pub/sub // system. Each subscribed message is handled by the responsible // broker, which is determined by the respective node key within a // clique. type Broker struct { messagesInFlight uint64 requestsInFlight uint64 shuttingDown uint64 partitionLocks []sync.Mutex clique string ackTimeout time.Duration reqTimeout time.Duration codec Codec onError func(error) routing routingTable pubsub pubsub id uint64 wg sync.WaitGroup leaving chan struct{} pendingRepliesMtx sync.Mutex pendingReplies map[uint64]pendingReply // id => pending reply messageHandlers map[string]MessageHandler // stream => message handler requestHandlers map[string]RequestHandler // stream => request handler } // NewBroker creates a new broker which uses the pub/sub system // for publishing messages and subscribing to streams. // // Because pubsub could possibly be shared between multiple brokers, // the caller of this function is responsible for closing all // connections to the pub/sub system. func NewBroker(ctx context.Context, pubsub PubSub, o ...Option) (*Broker, error) { opts := defaultOptions() if err := opts.apply(o...); err != nil { return nil, err } b := &Broker{ partitionLocks: opts.partitionLocks, clique: opts.clique, ackTimeout: opts.ackTimeout, reqTimeout: opts.reqTimeout, codec: opts.codec, onError: opts.errorHandler, routing: newRoutingTable(opts), pubsub: newPubSub(pubsub, opts), leaving: make(chan struct{}), pendingReplies: make(map[uint64]pendingReply), messageHandlers: opts.messageHandlers, requestHandlers: opts.requestHandlers, } if err := b.pubsub.subscribe(ctx, nodeStream(b.clique, b.routing.local), "", b.processCliqueProtocol); err != nil { b.pubsub.shutdown(ctx) return nil, err } if err := b.pubsub.subscribe(ctx, b.clique, "", b.processCliqueProtocol); err != nil { b.pubsub.shutdown(ctx) return nil, err } for stream := range b.messageHandlers { if err := b.pubsub.subscribe(ctx, stream, b.clique, b.processMessage); err != nil { b.pubsub.shutdown(ctx) return nil, err } } for stream := range b.requestHandlers { if err := b.pubsub.subscribe(ctx, stream, b.clique, b.processRequest); err != nil { b.pubsub.shutdown(ctx) return nil, err } } join := marshalJoin(join{sender: b.routing.local}) if err := b.broadcast(ctx, join); err != nil { b.pubsub.shutdown(ctx) return nil, err } b.wg.Add(1) go b.stabilize(opts.stabilization.Interval) return b, nil } // Close notifies all clique members about a leaving broker and // disconnects from the pub/sub system. func (b *Broker) Close() error
// Shutdown gracefully shuts down the broker. It notifies all clique // members about a leaving broker and waits until all messages and // requests are processed. If the given context expires before, the // context's error will be returned. func (b *Broker) Shutdown(ctx context.Context) error { return b.shutdown(ctx, func() error { ticker := time.NewTicker(250 * time.Millisecond) defer ticker.Stop() for atomic.LoadUint64(&b.messagesInFlight) != 0 || atomic.LoadUint64(&b.requestsInFlight) != 0 { select { case <-ctx.Done(): return ctx.Err() case <-ticker.C: } } return nil }) } // Publish forwards the message directly to the pub/sub system. // If the message does not contain any partition key, the message will // be processed by a random broker within a clique. // All binary data of the passed message needs to be valid only during // the method call. func (b *Broker) Publish(ctx context.Context, msg Message) error { if b.isShuttingDown() { return ErrClosed } if err := msg.validate(); err != nil { return err } return b.pubsub.send(ctx, msg.Stream, b.codec.EncodeMessage(msg)) } // Request sends a request message and waits for its response. // If the message has no partition key, the request will be processed // by a random broker within a clique. // All binary data of the passed message needs to be valid only during // the method call. func (b *Broker) Request(ctx context.Context, request Message) (Message, error) { if b.isShuttingDown() { return Message{}, ErrClosed } if err := request.validate(); err != nil { return Message{}, err } reply := b.awaitReply(ctx, b.routing.local, b.reqTimeout, func(ctx context.Context, id uint64) error { return b.pubsub.send(ctx, request.Stream, marshalMsg(msg{ id: id, reply: []byte(nodeStream(b.clique, b.routing.local)), stream: []byte(request.Stream), pkey: request.PartitionKey, data: request.Data, })) }) return Message{Data: reply.data}, reply.err } func (b *Broker) processMessage(ctx context.Context, stream string, data []byte) { atomic.AddUint64(&b.messagesInFlight, 1) defer atomic.AddUint64(&b.messagesInFlight, ^uint64(0)) decoded, err := b.codec.DecodeMessage(stream, data) if err != nil { b.onError(errorf("decode message: %v", err)) return } b.forwardMsg(ctx, msg{ stream: []byte(decoded.Stream), pkey: decoded.PartitionKey, data: decoded.Data, }) } func (b *Broker) processRequest(ctx context.Context, stream string, data []byte) { atomic.AddUint64(&b.requestsInFlight, 1) defer atomic.AddUint64(&b.requestsInFlight, ^uint64(0)) f, err := unmarshalFrame(data) switch { case err != nil: b.onError(errorf("request subscription: %v", err)) return case f.typ() != frameTypeMsg: b.onError(errorf("unexpected request frame type: %s", f.typ())) return } msg, err := unmarshalMsg(f) if err != nil { b.onError(errorf("unmarshal msg: %v", err)) return } b.forwardMsg(ctx, msg) } func (b *Broker) processCliqueProtocol(ctx context.Context, stream string, data []byte) { f, err := unmarshalFrame(data) if err != nil { b.onError(errorf("clique subscription: %v", err)) return } switch f.typ() { case frameTypeJoin: b.handleJoin(ctx, f) case frameTypeLeave: b.handleLeave(f) case frameTypeInfo: b.handleInfo(f) case frameTypePing: b.handlePing(ctx, f) case frameTypeFwd: b.handleFwd(ctx, f) case frameTypeAck: b.handleAck(f) default: b.onError(errorf("unexpected clique frame type: %s", f.typ())) } } func (b *Broker) handleJoin(ctx context.Context, f frame) { join, err := unmarshalJoin(f) switch { case err != nil: b.onError(errorf("unmarshal join: %v", err)) return case join.sender == b.routing.local: return } neighbors := b.routing.neighbors() b.routing.registerKey(join.sender) err = b.sendTo(ctx, join.sender, marshalInfo(info{neighbors: neighbors})) if err != nil { b.onError(errorf("send info: %v", err)) } } func (b *Broker) handleLeave(f frame) { leave, err := unmarshalLeave(f) if err != nil { b.onError(errorf("unmarshal leave: %v", err)) return } b.routing.unregister(leave.node) } func (b *Broker) handleInfo(f frame) { info, err := unmarshalInfo(f) if err != nil { b.onError(errorf("unmarshal info: %v", err)) return } b.routing.registerKeys(info.neighbors) b.notifyReply(info.id, reply{}) } func (b *Broker) handlePing(ctx context.Context, f frame) { ping, err := unmarshalPing(f) if err != nil { b.onError(errorf("unmarshal ping: %v", err)) return } err = b.sendTo(ctx, ping.sender, marshalInfo(info{ id: ping.id, neighbors: b.routing.neighbors(), })) if err != nil { b.onError(errorf("send info: %v", err)) } } func (b *Broker) handleFwd(ctx context.Context, f frame) { fwd, err := unmarshalFwd(f) if err != nil { b.onError(errorf("unmarshal fwd: %v", err)) return } err = b.sendTo(ctx, fwd.ack, marshalAck(ack{id: fwd.id})) if err != nil { b.onError(errorf("send ack: %v", err)) } b.forwardMsg(ctx, fwd.msg) } func (b *Broker) handleAck(frame frame) { ack, err := unmarshalAck(frame) if err != nil { b.onError(errorf("unmarshal ack: %v", err)) return } b.notifyReply(ack.id, reply{data: ack.data}) } func (b *Broker) forwardMsg(ctx context.Context, msg msg) { partition := KeyFromBytes(msg.pkey) if len(msg.pkey) == 0 { b.dispatchMsg(ctx, msg, partition) return } for { succ := b.routing.successor(partition) if succ == b.routing.local { b.dispatchMsg(ctx, msg, partition) return } reply := b.awaitReply(ctx, succ, b.ackTimeout, func(ctx context.Context, id uint64) error { return b.sendTo(ctx, succ, marshalFwd(fwd{ id: id, ack: b.routing.local, msg: msg, })) }) if reply.err == nil { return } else if reply.err != ErrTimeout { b.onError(reply.err) return } // The node was suspected and removed from the // valid keys. We look for the next successor // to handle the message. } } func (b *Broker) dispatchMsg(ctx context.Context, msg msg, partition Key) { var lock, slot = sync.Locker(nullLock{}), -1 if len(msg.pkey) != 0 && len(b.partitionLocks) != 0 { slot = int(partition % Key(len(b.partitionLocks))) lock = &b.partitionLocks[slot] } var reply reply if h := b.messageHandlers[string(msg.stream)]; h != nil { lock.Lock() h(ctx, Message{ Stream: string(msg.stream), PartitionKey: msg.pkey, Data: msg.data, slot: slot, }) lock.Unlock() } else if h := b.requestHandlers[string(msg.stream)]; h != nil { lock.Lock() resp := h(ctx, Message{ Stream: string(msg.stream), PartitionKey: msg.pkey, Data: msg.data, slot: slot, }) lock.Unlock() reply.data = resp.Data } else { return } if len(msg.reply) != 0 { ack := ack{id: msg.id, data: reply.data} err := b.pubsub.send(ctx, string(msg.reply), marshalAck(ack)) if err != nil { b.onError(errorf("send ack: %v", err)) } } } func (b *Broker) awaitReply(ctx context.Context, receiver Key, timeout time.Duration, send func(context.Context, uint64) error) reply { id := atomic.AddUint64(&b.id, 1) replych := make(chan reply, 1) b.pendingRepliesMtx.Lock() b.pendingReplies[id] = pendingReply{ receiver: receiver, replych: replych, timer: time.AfterFunc(timeout, func() { b.notifyReply(id, reply{err: ErrTimeout}) }), } b.pendingRepliesMtx.Unlock() if err := send(ctx, id); err != nil { b.notifyReply(id, reply{err: err}) } select { case reply := <-replych: return reply case <-ctx.Done(): reply := reply{err: ctx.Err()} b.notifyReply(id, reply) return reply } } func (b *Broker) notifyReply(id uint64, reply reply) { b.pendingRepliesMtx.Lock() pending, has := b.pendingReplies[id] delete(b.pendingReplies, id) b.pendingRepliesMtx.Unlock() if has { pending.timer.Stop() if reply.err == ErrTimeout && pending.receiver != b.routing.local { b.routing.suspect(pending.receiver) } pending.replych <- reply } } func (b *Broker) sendTo(ctx context.Context, target Key, f frame) error { return b.pubsub.send(ctx, nodeStream(b.clique, target), f) } func (b *Broker) broadcast(ctx context.Context, f frame) error { return b.pubsub.send(ctx, b.clique, f) } func (b *Broker) isShuttingDown() bool { return atomic.LoadUint64(&b.shuttingDown) != 0 } func (b *Broker) shutdown(ctx context.Context, wait func() error) error { atomic.StoreUint64(&b.shuttingDown, 1) close(b.leaving) leave := marshalLeave(leave{node: b.routing.local}) err := b.broadcast(ctx, leave) if waitErr := wait(); waitErr != nil { err = waitErr } b.pubsub.shutdown(ctx) // cancel pending replies b.pendingRepliesMtx.Lock() ids := make([]uint64, 0, len(b.pendingReplies)) for id := range b.pendingReplies { ids = append(ids, id) } b.pendingRepliesMtx.Unlock() for _, id := range ids { b.notifyReply(id, reply{err: ErrClosed}) } b.wg.Wait() return err } func (b *Broker) stabilize(interval time.Duration) { defer b.wg.Done() ping := ping{sender: b.routing.local} ticker := time.NewTicker(interval) defer ticker.Stop() var frame frame stabs := make([]Key, 1+b.routing.stabilizerCount) // successor + stabilizers for { select { case <-b.leaving: return case <-ticker.C: } nstabs := b.routing.stabilizers(stabs) for i := 0; i < nstabs; i++ { stab := stabs[i] reply := b.awaitReply(context.Background(), stab, b.ackTimeout, func(ctx context.Context, id uint64) error { ping.id = id frame = marshalPing(ping, frame) return b.sendTo(ctx, stab, frame) }) if reply.err != nil && reply.err != ErrClosed && reply.err != ErrTimeout { b.onError(errorf("stabilization: %v", reply.err)) } } } } func nodeStream(clique string, node Key) string { buf := alloc(len(clique)+1+2*keySize, nil) n := copy(buf, clique) buf[n] = '.' node.writeString(buf[n+1:]) return string(buf) } type nullLock struct{} func (l nullLock) Lock() {} func (l nullLock) Unlock() {}
{ return b.shutdown(context.Background(), func() error { return nil }) }
identifier_body
broker.go
package flow import ( "context" "sync" "sync/atomic" "time" ) // MessageHandler represents a callback function for handling incoming messages. // The binary parts of the passed message should be assumed to be valid // only during the function call. It is the handler's responsibility to // copy the data which should be reused. type MessageHandler func(context.Context, Message) // RequestHandler represents a callback function for handling incoming requests. // The binary parts of the passed message should be assumed to be valid // only during the function call. It is the handler's responsibility to // copy the data which should be reused. type RequestHandler func(context.Context, Message) Message type reply struct { data []byte err error } type pendingReply struct { receiver Key replych chan reply timer *time.Timer } // Broker represents a single node within a clique. It enables // the publishing and subscribing capabilities of the pub/sub // system. Each subscribed message is handled by the responsible // broker, which is determined by the respective node key within a // clique. type Broker struct { messagesInFlight uint64 requestsInFlight uint64 shuttingDown uint64 partitionLocks []sync.Mutex clique string ackTimeout time.Duration reqTimeout time.Duration codec Codec onError func(error) routing routingTable pubsub pubsub id uint64 wg sync.WaitGroup leaving chan struct{} pendingRepliesMtx sync.Mutex pendingReplies map[uint64]pendingReply // id => pending reply messageHandlers map[string]MessageHandler // stream => message handler requestHandlers map[string]RequestHandler // stream => request handler } // NewBroker creates a new broker which uses the pub/sub system // for publishing messages and subscribing to streams. // // Because pubsub could possibly be shared between multiple brokers, // the caller of this function is responsible for closing all // connections to the pub/sub system. func NewBroker(ctx context.Context, pubsub PubSub, o ...Option) (*Broker, error) { opts := defaultOptions() if err := opts.apply(o...); err != nil { return nil, err } b := &Broker{ partitionLocks: opts.partitionLocks, clique: opts.clique, ackTimeout: opts.ackTimeout, reqTimeout: opts.reqTimeout, codec: opts.codec, onError: opts.errorHandler, routing: newRoutingTable(opts), pubsub: newPubSub(pubsub, opts), leaving: make(chan struct{}), pendingReplies: make(map[uint64]pendingReply), messageHandlers: opts.messageHandlers, requestHandlers: opts.requestHandlers, } if err := b.pubsub.subscribe(ctx, nodeStream(b.clique, b.routing.local), "", b.processCliqueProtocol); err != nil { b.pubsub.shutdown(ctx) return nil, err } if err := b.pubsub.subscribe(ctx, b.clique, "", b.processCliqueProtocol); err != nil { b.pubsub.shutdown(ctx) return nil, err } for stream := range b.messageHandlers { if err := b.pubsub.subscribe(ctx, stream, b.clique, b.processMessage); err != nil { b.pubsub.shutdown(ctx) return nil, err } } for stream := range b.requestHandlers { if err := b.pubsub.subscribe(ctx, stream, b.clique, b.processRequest); err != nil { b.pubsub.shutdown(ctx) return nil, err } } join := marshalJoin(join{sender: b.routing.local}) if err := b.broadcast(ctx, join); err != nil { b.pubsub.shutdown(ctx) return nil, err } b.wg.Add(1) go b.stabilize(opts.stabilization.Interval) return b, nil } // Close notifies all clique members about a leaving broker and // disconnects from the pub/sub system. func (b *Broker) Close() error { return b.shutdown(context.Background(), func() error { return nil }) } // Shutdown gracefully shuts down the broker. It notifies all clique // members about a leaving broker and waits until all messages and // requests are processed. If the given context expires before, the // context's error will be returned. func (b *Broker) Shutdown(ctx context.Context) error { return b.shutdown(ctx, func() error { ticker := time.NewTicker(250 * time.Millisecond) defer ticker.Stop() for atomic.LoadUint64(&b.messagesInFlight) != 0 || atomic.LoadUint64(&b.requestsInFlight) != 0 { select { case <-ctx.Done(): return ctx.Err() case <-ticker.C: } } return nil }) } // Publish forwards the message directly to the pub/sub system. // If the message does not contain any partition key, the message will // be processed by a random broker within a clique. // All binary data of the passed message needs to be valid only during // the method call. func (b *Broker) Publish(ctx context.Context, msg Message) error { if b.isShuttingDown() { return ErrClosed } if err := msg.validate(); err != nil { return err } return b.pubsub.send(ctx, msg.Stream, b.codec.EncodeMessage(msg)) } // Request sends a request message and waits for its response. // If the message has no partition key, the request will be processed // by a random broker within a clique. // All binary data of the passed message needs to be valid only during // the method call. func (b *Broker) Request(ctx context.Context, request Message) (Message, error) { if b.isShuttingDown() { return Message{}, ErrClosed } if err := request.validate(); err != nil { return Message{}, err } reply := b.awaitReply(ctx, b.routing.local, b.reqTimeout, func(ctx context.Context, id uint64) error { return b.pubsub.send(ctx, request.Stream, marshalMsg(msg{ id: id, reply: []byte(nodeStream(b.clique, b.routing.local)), stream: []byte(request.Stream), pkey: request.PartitionKey, data: request.Data, })) }) return Message{Data: reply.data}, reply.err } func (b *Broker) processMessage(ctx context.Context, stream string, data []byte) { atomic.AddUint64(&b.messagesInFlight, 1) defer atomic.AddUint64(&b.messagesInFlight, ^uint64(0)) decoded, err := b.codec.DecodeMessage(stream, data) if err != nil { b.onError(errorf("decode message: %v", err)) return } b.forwardMsg(ctx, msg{ stream: []byte(decoded.Stream), pkey: decoded.PartitionKey, data: decoded.Data, }) } func (b *Broker) processRequest(ctx context.Context, stream string, data []byte) { atomic.AddUint64(&b.requestsInFlight, 1) defer atomic.AddUint64(&b.requestsInFlight, ^uint64(0)) f, err := unmarshalFrame(data) switch { case err != nil: b.onError(errorf("request subscription: %v", err)) return case f.typ() != frameTypeMsg: b.onError(errorf("unexpected request frame type: %s", f.typ())) return } msg, err := unmarshalMsg(f) if err != nil { b.onError(errorf("unmarshal msg: %v", err)) return } b.forwardMsg(ctx, msg) } func (b *Broker) processCliqueProtocol(ctx context.Context, stream string, data []byte) { f, err := unmarshalFrame(data) if err != nil { b.onError(errorf("clique subscription: %v", err)) return } switch f.typ() { case frameTypeJoin: b.handleJoin(ctx, f) case frameTypeLeave: b.handleLeave(f) case frameTypeInfo: b.handleInfo(f) case frameTypePing: b.handlePing(ctx, f) case frameTypeFwd: b.handleFwd(ctx, f) case frameTypeAck: b.handleAck(f) default: b.onError(errorf("unexpected clique frame type: %s", f.typ())) } } func (b *Broker) handleJoin(ctx context.Context, f frame) { join, err := unmarshalJoin(f) switch { case err != nil: b.onError(errorf("unmarshal join: %v", err)) return case join.sender == b.routing.local: return } neighbors := b.routing.neighbors() b.routing.registerKey(join.sender) err = b.sendTo(ctx, join.sender, marshalInfo(info{neighbors: neighbors})) if err != nil { b.onError(errorf("send info: %v", err)) } } func (b *Broker) handleLeave(f frame) { leave, err := unmarshalLeave(f) if err != nil { b.onError(errorf("unmarshal leave: %v", err)) return } b.routing.unregister(leave.node) } func (b *Broker) handleInfo(f frame) { info, err := unmarshalInfo(f) if err != nil { b.onError(errorf("unmarshal info: %v", err)) return } b.routing.registerKeys(info.neighbors) b.notifyReply(info.id, reply{}) } func (b *Broker) handlePing(ctx context.Context, f frame) { ping, err := unmarshalPing(f) if err != nil
err = b.sendTo(ctx, ping.sender, marshalInfo(info{ id: ping.id, neighbors: b.routing.neighbors(), })) if err != nil { b.onError(errorf("send info: %v", err)) } } func (b *Broker) handleFwd(ctx context.Context, f frame) { fwd, err := unmarshalFwd(f) if err != nil { b.onError(errorf("unmarshal fwd: %v", err)) return } err = b.sendTo(ctx, fwd.ack, marshalAck(ack{id: fwd.id})) if err != nil { b.onError(errorf("send ack: %v", err)) } b.forwardMsg(ctx, fwd.msg) } func (b *Broker) handleAck(frame frame) { ack, err := unmarshalAck(frame) if err != nil { b.onError(errorf("unmarshal ack: %v", err)) return } b.notifyReply(ack.id, reply{data: ack.data}) } func (b *Broker) forwardMsg(ctx context.Context, msg msg) { partition := KeyFromBytes(msg.pkey) if len(msg.pkey) == 0 { b.dispatchMsg(ctx, msg, partition) return } for { succ := b.routing.successor(partition) if succ == b.routing.local { b.dispatchMsg(ctx, msg, partition) return } reply := b.awaitReply(ctx, succ, b.ackTimeout, func(ctx context.Context, id uint64) error { return b.sendTo(ctx, succ, marshalFwd(fwd{ id: id, ack: b.routing.local, msg: msg, })) }) if reply.err == nil { return } else if reply.err != ErrTimeout { b.onError(reply.err) return } // The node was suspected and removed from the // valid keys. We look for the next successor // to handle the message. } } func (b *Broker) dispatchMsg(ctx context.Context, msg msg, partition Key) { var lock, slot = sync.Locker(nullLock{}), -1 if len(msg.pkey) != 0 && len(b.partitionLocks) != 0 { slot = int(partition % Key(len(b.partitionLocks))) lock = &b.partitionLocks[slot] } var reply reply if h := b.messageHandlers[string(msg.stream)]; h != nil { lock.Lock() h(ctx, Message{ Stream: string(msg.stream), PartitionKey: msg.pkey, Data: msg.data, slot: slot, }) lock.Unlock() } else if h := b.requestHandlers[string(msg.stream)]; h != nil { lock.Lock() resp := h(ctx, Message{ Stream: string(msg.stream), PartitionKey: msg.pkey, Data: msg.data, slot: slot, }) lock.Unlock() reply.data = resp.Data } else { return } if len(msg.reply) != 0 { ack := ack{id: msg.id, data: reply.data} err := b.pubsub.send(ctx, string(msg.reply), marshalAck(ack)) if err != nil { b.onError(errorf("send ack: %v", err)) } } } func (b *Broker) awaitReply(ctx context.Context, receiver Key, timeout time.Duration, send func(context.Context, uint64) error) reply { id := atomic.AddUint64(&b.id, 1) replych := make(chan reply, 1) b.pendingRepliesMtx.Lock() b.pendingReplies[id] = pendingReply{ receiver: receiver, replych: replych, timer: time.AfterFunc(timeout, func() { b.notifyReply(id, reply{err: ErrTimeout}) }), } b.pendingRepliesMtx.Unlock() if err := send(ctx, id); err != nil { b.notifyReply(id, reply{err: err}) } select { case reply := <-replych: return reply case <-ctx.Done(): reply := reply{err: ctx.Err()} b.notifyReply(id, reply) return reply } } func (b *Broker) notifyReply(id uint64, reply reply) { b.pendingRepliesMtx.Lock() pending, has := b.pendingReplies[id] delete(b.pendingReplies, id) b.pendingRepliesMtx.Unlock() if has { pending.timer.Stop() if reply.err == ErrTimeout && pending.receiver != b.routing.local { b.routing.suspect(pending.receiver) } pending.replych <- reply } } func (b *Broker) sendTo(ctx context.Context, target Key, f frame) error { return b.pubsub.send(ctx, nodeStream(b.clique, target), f) } func (b *Broker) broadcast(ctx context.Context, f frame) error { return b.pubsub.send(ctx, b.clique, f) } func (b *Broker) isShuttingDown() bool { return atomic.LoadUint64(&b.shuttingDown) != 0 } func (b *Broker) shutdown(ctx context.Context, wait func() error) error { atomic.StoreUint64(&b.shuttingDown, 1) close(b.leaving) leave := marshalLeave(leave{node: b.routing.local}) err := b.broadcast(ctx, leave) if waitErr := wait(); waitErr != nil { err = waitErr } b.pubsub.shutdown(ctx) // cancel pending replies b.pendingRepliesMtx.Lock() ids := make([]uint64, 0, len(b.pendingReplies)) for id := range b.pendingReplies { ids = append(ids, id) } b.pendingRepliesMtx.Unlock() for _, id := range ids { b.notifyReply(id, reply{err: ErrClosed}) } b.wg.Wait() return err } func (b *Broker) stabilize(interval time.Duration) { defer b.wg.Done() ping := ping{sender: b.routing.local} ticker := time.NewTicker(interval) defer ticker.Stop() var frame frame stabs := make([]Key, 1+b.routing.stabilizerCount) // successor + stabilizers for { select { case <-b.leaving: return case <-ticker.C: } nstabs := b.routing.stabilizers(stabs) for i := 0; i < nstabs; i++ { stab := stabs[i] reply := b.awaitReply(context.Background(), stab, b.ackTimeout, func(ctx context.Context, id uint64) error { ping.id = id frame = marshalPing(ping, frame) return b.sendTo(ctx, stab, frame) }) if reply.err != nil && reply.err != ErrClosed && reply.err != ErrTimeout { b.onError(errorf("stabilization: %v", reply.err)) } } } } func nodeStream(clique string, node Key) string { buf := alloc(len(clique)+1+2*keySize, nil) n := copy(buf, clique) buf[n] = '.' node.writeString(buf[n+1:]) return string(buf) } type nullLock struct{} func (l nullLock) Lock() {} func (l nullLock) Unlock() {}
{ b.onError(errorf("unmarshal ping: %v", err)) return }
conditional_block
FilesManager.ts
import * as fs from "fs"; import * as path from "path"; import * as chokidar from "chokidar"; import * as prettier from "prettier"; import { Node, Project, StructureKind } from "ts-morph"; import * as ts from "typescript"; import { APP_DIR, CONFIGURATION_DIR, LIBRARY_IMPORT, } from "../common/constants"; import { ClassMetadata, ExtractedClass, Mixin } from "../common/types"; import * as ast from "./ast-utils"; import { TsMorphFs, writeLineBreak } from "./TsMorphFs"; let prettierConfig = {}; try { prettierConfig = JSON.parse( fs.readFileSync(path.resolve(".prettierrc")).toString("utf-8") ); } catch { // No worries, just using defaults } export class FilesManager { private filesWatcher: chokidar.FSWatcher | undefined; private project: Project = new Project({ fileSystem: new TsMorphFs(prettierConfig), }); metadata: { [name: string]: ClassMetadata; } = {}; classes: { [name: string]: ExtractedClass; } = {}; private getInjectName(classId: string) { return classId[0].toLowerCase() + classId.substr(1); } private getInjectFactoryName(classId: string) { return `create${classId}`; } private async writePrettyFile(fileName: string, content: string) { try { await fs.promises.mkdir(path.dirname(fileName)); } catch { // Already exists } return fs.promises.writeFile( fileName, new TextEncoder().encode( prettier.format(content, { ...prettierConfig, parser: path.extname(fileName) === ".json" ? "json" : "typescript", }) ) ); } private async ensureAppDir() { try { await fs.promises.mkdir(path.resolve(APP_DIR)); } catch { // Already exists } } private getAppSourceFile(classId: string) { const fullPath = classId === "index" ? path.resolve(APP_DIR, "index.ts") : path.resolve(APP_DIR, classId, `index.ts`); const sourceFile = this.project.getSourceFile(fullPath); if (sourceFile) { sourceFile.refreshFromFileSystemSync(); return sourceFile; } return this.project.addSourceFileAtPath(fullPath); } private async ensureConfigurationDir() { const configDir = path.resolve(CONFIGURATION_DIR); try { await fs.promises.mkdir(configDir); } catch { // Already exists } try { const metadata = await fs.promises.readFile( path.resolve(configDir, "metadata.json") ); this.metadata = JSON.parse(new TextDecoder("utf-8").decode(metadata)); } catch { // No file, we will write it later } } private async ensureContainerEntry() { const entryFile = path.resolve(APP_DIR, "index.ts"); try { await fs.promises.stat(entryFile); } catch { // We do not have the file, lets write it await this.writePrettyFile( entryFile, `import { Container } from '${LIBRARY_IMPORT}' export const container = new Container({}, { devtool: process.env.NODE_ENV === 'development' && !window.opener ? "localhost:5051" : undefined }) ` ); } } private extractClass(classId: string) { const node = this.getAppSourceFile(classId); const classNode = ast.getClassNode(node, classId); const mixins = ast.getClassMixins(classNode); const injectors = ast.getInjectors(classNode); const properties = ast.getProperties(classNode); const methods = ast.getMethods(classNode); const observables = ast.getObservables(classNode); properties.forEach((property) => { if (observables.observable.includes(property.name)) { property.type = "observable"; } else if (observables.computed.includes(property.name))
}); methods.forEach((property) => { if (observables.action.includes(property.name)) { property.type = "action"; } }); return { classId, mixins, injectors, properties, methods, }; } private async getClass(fileName: string): Promise<ExtractedClass> { const classId = this.getClassIdFromFileName(fileName); return this.extractClass(classId); } private getClassIdFromFileName(fileName: string) { return path.dirname(fileName).split(path.sep).pop()!; } private async getClasses() { const appDir = path.resolve(APP_DIR)!; try { const directories = (await fs.promises.readdir(appDir)).filter( (file) => file !== "index.ts" && !file.endsWith(".ts") ); return directories.reduce<{ [key: string]: ExtractedClass; }>((aggr, directory) => { const classId = directory; aggr[classId] = this.extractClass(classId); return aggr; }, {}); } catch { return {}; } } /* This is where we map files to nodes and their metadata. Things like position and the ID of the node. */ async addMetadata({ classId, x, y, }: { classId: string; x: number; y: number; }) { this.metadata[classId] = { x, y, }; this.writeMetadata(); } async writeMetadata() { const file = path.resolve(CONFIGURATION_DIR, "metadata.json")!; await this.writePrettyFile(file, JSON.stringify(this.metadata, null, 2)); } /* This method writes the initial file content */ async writeClass(classId: string) { const file = path.resolve(APP_DIR, classId, "index.ts")!; await this.writeClassToEntryFile(classId); await this.writePrettyFile( file, `import { Feature } from 'reactive-app' export interface ${classId} extends Feature {} export class ${classId} { static mixins = ["Feature"]; }` ); } private async writeClassToEntryFile(classId: string) { const sourceFile = this.getAppSourceFile("index"); sourceFile.addImportDeclaration({ moduleSpecifier: `./${classId}`, namedImports: [classId], }); sourceFile .getVariableDeclaration("container") ?.getInitializer() ?.transform((traversal) => { const node = traversal.visitChildren(); if ( ts.isObjectLiteralExpression(node) && ts.isNewExpression(node.parent) && node.parent.arguments![0] === node ) { return ts.factory.createObjectLiteralExpression( [ ...node.properties, ts.factory.createShorthandPropertyAssignment(classId, undefined), ], undefined ); } return node; }); sourceFile.saveSync(); } /* This method adds injections. The type of injection will be part of the payload, either "singleton" or "factory" */ async inject({ fromClassId, toClassId, asFactory, }: { fromClassId: string; toClassId: string; asFactory: boolean; }) { const sourceFile = this.getAppSourceFile(toClassId); ast.addImportDeclaration(sourceFile, LIBRARY_IMPORT, "TFeature"); ast.addImportDeclaration( sourceFile, `../${fromClassId}`, fromClassId, true ); const classNode = sourceFile.getClass(toClassId); if (!classNode) { throw new Error("Can not find class node"); } const name = asFactory ? `create${fromClassId}` : fromClassId[0].toLocaleLowerCase() + fromClassId.substr(1); classNode.insertProperty(1, { name, hasExclamationToken: true, isReadonly: true, type: `TFeature<typeof ${fromClassId}>`, trailingTrivia: writeLineBreak, }); ast.updateInjectFeatures(classNode, (config) => { config.addProperty({ name, kind: StructureKind.PropertyAssignment, initializer: `"${fromClassId}"`, }); }); sourceFile.saveSync(); } async removeInjection(fromClassId: string, toClassId: string) { const sourceFile = this.getAppSourceFile(toClassId); ast.removeImportDeclaration(sourceFile, `../${fromClassId}`); const classNode = sourceFile.getClass(toClassId); if (!classNode) { throw new Error("Can not find class node"); } classNode .getProperty((property) => { const name = property.getName(); return ( name === this.getInjectName(fromClassId) || name === this.getInjectFactoryName(fromClassId) ); }) ?.remove(); ast.updateInjectFeatures(classNode, (config) => { const property = config.getProperty((property) => { if (!Node.isPropertyAssignment(property)) { return false; } const initializer = property.getInitializer(); if (!Node.isStringLiteral(initializer)) { return false; } return JSON.parse(initializer.getText()) === fromClassId; }); property?.remove(); }); sourceFile.saveSync(); } async toggleMakeObservableProperty( classId: string, name: string, value?: "observable" | "computed" | "action" ) { const sourceFile = this.getAppSourceFile(classId); const classNode = sourceFile.getClass(classId)!; ast.updateMakeObservable(classNode, (config) => { if (value) { config.addProperty({ name, kind: StructureKind.PropertyAssignment, initializer: `"${value}"`, }); } else { const property = config.getProperty(name); property?.remove(); } }); sourceFile.saveSync(); } async toggleMixin(classId: string, mixin: Mixin) { const sourceFile = this.getAppSourceFile(classId); switch (mixin) { case "View": case "Factory": ast.toggleImportDeclaration(sourceFile, LIBRARY_IMPORT, mixin); ast.toggleMixinInterface(sourceFile, classId, mixin); ast.toggleMixin(sourceFile, classId, mixin); break; case "StateMachine": ast.toggleImportDeclaration(sourceFile, LIBRARY_IMPORT, mixin); ast.toggleMixinInterface( sourceFile, classId, "StateMachine<TContext, TEvent>" ); const contextType = sourceFile.getTypeAlias("TContext"); const messageType = sourceFile.getTypeAlias("TEvent"); const classInterface = sourceFile.getInterface(classId); const clas = sourceFile.getClass(classId)!; const state = clas.getProperty("state"); const onMessage = clas.getMethod("onMessage"); if (state && onMessage && contextType && messageType) { state.remove(); contextType.remove(); messageType.remove(); onMessage.remove(); } else { const interfaceNodeIndex = classInterface!.getChildIndex(); sourceFile.insertTypeAlias(interfaceNodeIndex, { name: "TEvent", isExported: true, type: '{ type: "SOMETHING_HAPPENED" }', trailingTrivia: writeLineBreak, }); sourceFile.insertTypeAlias(interfaceNodeIndex, { name: "TContext", isExported: true, type: '{ state: "FOO" } | { state: "BAR" }', }); clas.addProperty({ name: "context", type: "TContext", initializer: `{ state: "FOO" }`, }); const onEvent = clas.addMethod({ name: "onEvent", parameters: [ { name: "event", type: "TEvent", }, ], statements: ` return this.transition(this.context, event, { FOO: { SOMETHING_HAPPENED: () => ({ state: "BAR" }) }, BAR: { SOMETHING_HAPPENED: () => ({ state: "FOO" }) } }) `, trailingTrivia: writeLineBreak, }); onEvent.toggleModifier("protected", true); } ast.toggleMixin(sourceFile, classId, mixin); ast.updateMakeObservable(clas, (config) => { config.addProperty({ name: "context", initializer: '"observable"', kind: StructureKind.PropertyAssignment, }); }); break; } sourceFile.saveSync(); } async deleteClass(classId: string) { const directory = path.resolve(APP_DIR, classId); await fs.promises.rmdir(directory, { recursive: true }); } async renameClass(fromClassId: string, toClassId: string) { await this.addMetadata({ classId: toClassId, ...this.metadata[fromClassId], }); const fromClassPath = path.resolve(APP_DIR, fromClassId, "index.ts"); const toClassPath = path.resolve(APP_DIR, toClassId, "index.ts"); const fs = this.project.getFileSystem(); const contents = fs.readFileSync(fromClassPath); const sourceFile = this.project.createSourceFile(toClassPath, contents); const classDefinition = sourceFile.getClass(fromClassId)!; const classInterface = sourceFile.getInterface(fromClassId)!; classDefinition.rename(toClassId); classInterface.rename(toClassId); fs.mkdirSync(path.resolve(APP_DIR, toClassId)); fs.writeFileSync(toClassPath, sourceFile.print()); await this.writeClassToEntryFile(toClassId); await this.deleteClass(fromClassId); } async initialize(listeners: { onClassChange: (e: ExtractedClass) => void; onClassCreate: (e: ExtractedClass) => void; onClassDelete: (name: string) => void; }) { await this.ensureConfigurationDir(); await this.ensureAppDir(); await this.ensureContainerEntry(); this.classes = await this.getClasses(); this.filesWatcher = chokidar.watch(`${path.resolve(APP_DIR)}/*/index.ts`, { ignoreInitial: true, }); this.filesWatcher.on("all", async (eventType, fileName) => { if (eventType === "change") { const updatedClass = await this.getClass(fileName); this.classes[updatedClass.classId] = updatedClass; listeners.onClassChange(updatedClass); } else if (eventType === "add") { const createdClass = await this.getClass(fileName); this.classes[createdClass.classId] = createdClass; listeners.onClassCreate(createdClass); } else if (eventType === "unlink") { const classId = this.getClassIdFromFileName(fileName); delete this.classes[classId]; delete this.metadata[classId]; const file = path.resolve(CONFIGURATION_DIR, "metadata.json")!; await this.writePrettyFile( file, JSON.stringify(this.metadata, null, 2) ); const sourceFile = this.getAppSourceFile("index"); ast.removeImportDeclaration(sourceFile, `./${classId}`); sourceFile .getVariableDeclaration("container") ?.getInitializer() ?.transform((traversal) => { const node = traversal.visitChildren(); if ( ts.isObjectLiteralExpression(node) && ts.isNewExpression(node.parent) && node.parent.arguments![0] === node ) { return ts.factory.createObjectLiteralExpression( node.properties.filter( (property) => !property.name || !ts.isIdentifier(property.name) || property.name.escapedText !== classId ), undefined ); } return node; }); sourceFile.saveSync(); listeners.onClassDelete(classId); } }); } getMetadata() { return this.metadata; } dispose() { this.filesWatcher?.close(); } }
{ property.type = "computed"; }
conditional_block
FilesManager.ts
import * as fs from "fs"; import * as path from "path"; import * as chokidar from "chokidar"; import * as prettier from "prettier"; import { Node, Project, StructureKind } from "ts-morph"; import * as ts from "typescript"; import { APP_DIR, CONFIGURATION_DIR, LIBRARY_IMPORT, } from "../common/constants"; import { ClassMetadata, ExtractedClass, Mixin } from "../common/types"; import * as ast from "./ast-utils"; import { TsMorphFs, writeLineBreak } from "./TsMorphFs"; let prettierConfig = {}; try { prettierConfig = JSON.parse( fs.readFileSync(path.resolve(".prettierrc")).toString("utf-8") ); } catch { // No worries, just using defaults } export class FilesManager { private filesWatcher: chokidar.FSWatcher | undefined; private project: Project = new Project({ fileSystem: new TsMorphFs(prettierConfig), }); metadata: { [name: string]: ClassMetadata; } = {}; classes: { [name: string]: ExtractedClass; } = {}; private getInjectName(classId: string) { return classId[0].toLowerCase() + classId.substr(1); } private getInjectFactoryName(classId: string) { return `create${classId}`; } private async writePrettyFile(fileName: string, content: string) { try { await fs.promises.mkdir(path.dirname(fileName)); } catch { // Already exists } return fs.promises.writeFile( fileName, new TextEncoder().encode( prettier.format(content, { ...prettierConfig, parser: path.extname(fileName) === ".json" ? "json" : "typescript", }) ) ); } private async ensureAppDir() { try { await fs.promises.mkdir(path.resolve(APP_DIR)); } catch { // Already exists } } private getAppSourceFile(classId: string) { const fullPath = classId === "index" ? path.resolve(APP_DIR, "index.ts") : path.resolve(APP_DIR, classId, `index.ts`); const sourceFile = this.project.getSourceFile(fullPath); if (sourceFile) { sourceFile.refreshFromFileSystemSync(); return sourceFile; } return this.project.addSourceFileAtPath(fullPath); } private async ensureConfigurationDir() { const configDir = path.resolve(CONFIGURATION_DIR); try { await fs.promises.mkdir(configDir); } catch { // Already exists } try { const metadata = await fs.promises.readFile( path.resolve(configDir, "metadata.json") ); this.metadata = JSON.parse(new TextDecoder("utf-8").decode(metadata)); } catch { // No file, we will write it later } } private async ensureContainerEntry() { const entryFile = path.resolve(APP_DIR, "index.ts"); try { await fs.promises.stat(entryFile); } catch { // We do not have the file, lets write it await this.writePrettyFile( entryFile, `import { Container } from '${LIBRARY_IMPORT}' export const container = new Container({}, { devtool: process.env.NODE_ENV === 'development' && !window.opener ? "localhost:5051" : undefined }) ` ); } } private extractClass(classId: string) { const node = this.getAppSourceFile(classId); const classNode = ast.getClassNode(node, classId); const mixins = ast.getClassMixins(classNode); const injectors = ast.getInjectors(classNode); const properties = ast.getProperties(classNode); const methods = ast.getMethods(classNode); const observables = ast.getObservables(classNode); properties.forEach((property) => { if (observables.observable.includes(property.name)) { property.type = "observable"; } else if (observables.computed.includes(property.name)) { property.type = "computed"; } }); methods.forEach((property) => { if (observables.action.includes(property.name)) { property.type = "action"; } }); return { classId, mixins, injectors, properties, methods, }; } private async getClass(fileName: string): Promise<ExtractedClass> { const classId = this.getClassIdFromFileName(fileName); return this.extractClass(classId); } private getClassIdFromFileName(fileName: string) { return path.dirname(fileName).split(path.sep).pop()!; } private async getClasses() { const appDir = path.resolve(APP_DIR)!; try { const directories = (await fs.promises.readdir(appDir)).filter( (file) => file !== "index.ts" && !file.endsWith(".ts") ); return directories.reduce<{ [key: string]: ExtractedClass; }>((aggr, directory) => { const classId = directory; aggr[classId] = this.extractClass(classId); return aggr; }, {}); } catch { return {}; } } /* This is where we map files to nodes and their metadata. Things like position and the ID of the node. */ async addMetadata({ classId, x, y, }: { classId: string; x: number; y: number; }) { this.metadata[classId] = { x, y, }; this.writeMetadata(); } async writeMetadata() { const file = path.resolve(CONFIGURATION_DIR, "metadata.json")!; await this.writePrettyFile(file, JSON.stringify(this.metadata, null, 2)); } /* This method writes the initial file content */ async writeClass(classId: string) { const file = path.resolve(APP_DIR, classId, "index.ts")!; await this.writeClassToEntryFile(classId); await this.writePrettyFile( file, `import { Feature } from 'reactive-app' export interface ${classId} extends Feature {} export class ${classId} { static mixins = ["Feature"]; }` ); } private async writeClassToEntryFile(classId: string) { const sourceFile = this.getAppSourceFile("index"); sourceFile.addImportDeclaration({ moduleSpecifier: `./${classId}`, namedImports: [classId], }); sourceFile .getVariableDeclaration("container") ?.getInitializer() ?.transform((traversal) => { const node = traversal.visitChildren(); if ( ts.isObjectLiteralExpression(node) && ts.isNewExpression(node.parent) && node.parent.arguments![0] === node ) { return ts.factory.createObjectLiteralExpression( [ ...node.properties, ts.factory.createShorthandPropertyAssignment(classId, undefined), ], undefined ); } return node; }); sourceFile.saveSync(); } /* This method adds injections. The type of injection will be part of the payload, either "singleton" or "factory" */ async inject({ fromClassId, toClassId, asFactory, }: { fromClassId: string; toClassId: string; asFactory: boolean; })
async removeInjection(fromClassId: string, toClassId: string) { const sourceFile = this.getAppSourceFile(toClassId); ast.removeImportDeclaration(sourceFile, `../${fromClassId}`); const classNode = sourceFile.getClass(toClassId); if (!classNode) { throw new Error("Can not find class node"); } classNode .getProperty((property) => { const name = property.getName(); return ( name === this.getInjectName(fromClassId) || name === this.getInjectFactoryName(fromClassId) ); }) ?.remove(); ast.updateInjectFeatures(classNode, (config) => { const property = config.getProperty((property) => { if (!Node.isPropertyAssignment(property)) { return false; } const initializer = property.getInitializer(); if (!Node.isStringLiteral(initializer)) { return false; } return JSON.parse(initializer.getText()) === fromClassId; }); property?.remove(); }); sourceFile.saveSync(); } async toggleMakeObservableProperty( classId: string, name: string, value?: "observable" | "computed" | "action" ) { const sourceFile = this.getAppSourceFile(classId); const classNode = sourceFile.getClass(classId)!; ast.updateMakeObservable(classNode, (config) => { if (value) { config.addProperty({ name, kind: StructureKind.PropertyAssignment, initializer: `"${value}"`, }); } else { const property = config.getProperty(name); property?.remove(); } }); sourceFile.saveSync(); } async toggleMixin(classId: string, mixin: Mixin) { const sourceFile = this.getAppSourceFile(classId); switch (mixin) { case "View": case "Factory": ast.toggleImportDeclaration(sourceFile, LIBRARY_IMPORT, mixin); ast.toggleMixinInterface(sourceFile, classId, mixin); ast.toggleMixin(sourceFile, classId, mixin); break; case "StateMachine": ast.toggleImportDeclaration(sourceFile, LIBRARY_IMPORT, mixin); ast.toggleMixinInterface( sourceFile, classId, "StateMachine<TContext, TEvent>" ); const contextType = sourceFile.getTypeAlias("TContext"); const messageType = sourceFile.getTypeAlias("TEvent"); const classInterface = sourceFile.getInterface(classId); const clas = sourceFile.getClass(classId)!; const state = clas.getProperty("state"); const onMessage = clas.getMethod("onMessage"); if (state && onMessage && contextType && messageType) { state.remove(); contextType.remove(); messageType.remove(); onMessage.remove(); } else { const interfaceNodeIndex = classInterface!.getChildIndex(); sourceFile.insertTypeAlias(interfaceNodeIndex, { name: "TEvent", isExported: true, type: '{ type: "SOMETHING_HAPPENED" }', trailingTrivia: writeLineBreak, }); sourceFile.insertTypeAlias(interfaceNodeIndex, { name: "TContext", isExported: true, type: '{ state: "FOO" } | { state: "BAR" }', }); clas.addProperty({ name: "context", type: "TContext", initializer: `{ state: "FOO" }`, }); const onEvent = clas.addMethod({ name: "onEvent", parameters: [ { name: "event", type: "TEvent", }, ], statements: ` return this.transition(this.context, event, { FOO: { SOMETHING_HAPPENED: () => ({ state: "BAR" }) }, BAR: { SOMETHING_HAPPENED: () => ({ state: "FOO" }) } }) `, trailingTrivia: writeLineBreak, }); onEvent.toggleModifier("protected", true); } ast.toggleMixin(sourceFile, classId, mixin); ast.updateMakeObservable(clas, (config) => { config.addProperty({ name: "context", initializer: '"observable"', kind: StructureKind.PropertyAssignment, }); }); break; } sourceFile.saveSync(); } async deleteClass(classId: string) { const directory = path.resolve(APP_DIR, classId); await fs.promises.rmdir(directory, { recursive: true }); } async renameClass(fromClassId: string, toClassId: string) { await this.addMetadata({ classId: toClassId, ...this.metadata[fromClassId], }); const fromClassPath = path.resolve(APP_DIR, fromClassId, "index.ts"); const toClassPath = path.resolve(APP_DIR, toClassId, "index.ts"); const fs = this.project.getFileSystem(); const contents = fs.readFileSync(fromClassPath); const sourceFile = this.project.createSourceFile(toClassPath, contents); const classDefinition = sourceFile.getClass(fromClassId)!; const classInterface = sourceFile.getInterface(fromClassId)!; classDefinition.rename(toClassId); classInterface.rename(toClassId); fs.mkdirSync(path.resolve(APP_DIR, toClassId)); fs.writeFileSync(toClassPath, sourceFile.print()); await this.writeClassToEntryFile(toClassId); await this.deleteClass(fromClassId); } async initialize(listeners: { onClassChange: (e: ExtractedClass) => void; onClassCreate: (e: ExtractedClass) => void; onClassDelete: (name: string) => void; }) { await this.ensureConfigurationDir(); await this.ensureAppDir(); await this.ensureContainerEntry(); this.classes = await this.getClasses(); this.filesWatcher = chokidar.watch(`${path.resolve(APP_DIR)}/*/index.ts`, { ignoreInitial: true, }); this.filesWatcher.on("all", async (eventType, fileName) => { if (eventType === "change") { const updatedClass = await this.getClass(fileName); this.classes[updatedClass.classId] = updatedClass; listeners.onClassChange(updatedClass); } else if (eventType === "add") { const createdClass = await this.getClass(fileName); this.classes[createdClass.classId] = createdClass; listeners.onClassCreate(createdClass); } else if (eventType === "unlink") { const classId = this.getClassIdFromFileName(fileName); delete this.classes[classId]; delete this.metadata[classId]; const file = path.resolve(CONFIGURATION_DIR, "metadata.json")!; await this.writePrettyFile( file, JSON.stringify(this.metadata, null, 2) ); const sourceFile = this.getAppSourceFile("index"); ast.removeImportDeclaration(sourceFile, `./${classId}`); sourceFile .getVariableDeclaration("container") ?.getInitializer() ?.transform((traversal) => { const node = traversal.visitChildren(); if ( ts.isObjectLiteralExpression(node) && ts.isNewExpression(node.parent) && node.parent.arguments![0] === node ) { return ts.factory.createObjectLiteralExpression( node.properties.filter( (property) => !property.name || !ts.isIdentifier(property.name) || property.name.escapedText !== classId ), undefined ); } return node; }); sourceFile.saveSync(); listeners.onClassDelete(classId); } }); } getMetadata() { return this.metadata; } dispose() { this.filesWatcher?.close(); } }
{ const sourceFile = this.getAppSourceFile(toClassId); ast.addImportDeclaration(sourceFile, LIBRARY_IMPORT, "TFeature"); ast.addImportDeclaration( sourceFile, `../${fromClassId}`, fromClassId, true ); const classNode = sourceFile.getClass(toClassId); if (!classNode) { throw new Error("Can not find class node"); } const name = asFactory ? `create${fromClassId}` : fromClassId[0].toLocaleLowerCase() + fromClassId.substr(1); classNode.insertProperty(1, { name, hasExclamationToken: true, isReadonly: true, type: `TFeature<typeof ${fromClassId}>`, trailingTrivia: writeLineBreak, }); ast.updateInjectFeatures(classNode, (config) => { config.addProperty({ name, kind: StructureKind.PropertyAssignment, initializer: `"${fromClassId}"`, }); }); sourceFile.saveSync(); }
identifier_body
FilesManager.ts
import * as fs from "fs"; import * as path from "path"; import * as chokidar from "chokidar"; import * as prettier from "prettier"; import { Node, Project, StructureKind } from "ts-morph"; import * as ts from "typescript"; import { APP_DIR, CONFIGURATION_DIR, LIBRARY_IMPORT, } from "../common/constants"; import { ClassMetadata, ExtractedClass, Mixin } from "../common/types"; import * as ast from "./ast-utils"; import { TsMorphFs, writeLineBreak } from "./TsMorphFs"; let prettierConfig = {}; try { prettierConfig = JSON.parse( fs.readFileSync(path.resolve(".prettierrc")).toString("utf-8") ); } catch { // No worries, just using defaults } export class FilesManager { private filesWatcher: chokidar.FSWatcher | undefined; private project: Project = new Project({ fileSystem: new TsMorphFs(prettierConfig), }); metadata: { [name: string]: ClassMetadata; } = {}; classes: { [name: string]: ExtractedClass; } = {}; private getInjectName(classId: string) { return classId[0].toLowerCase() + classId.substr(1); } private getInjectFactoryName(classId: string) { return `create${classId}`; } private async writePrettyFile(fileName: string, content: string) { try { await fs.promises.mkdir(path.dirname(fileName)); } catch { // Already exists } return fs.promises.writeFile( fileName, new TextEncoder().encode( prettier.format(content, { ...prettierConfig, parser: path.extname(fileName) === ".json" ? "json" : "typescript", }) ) ); } private async ensureAppDir() { try { await fs.promises.mkdir(path.resolve(APP_DIR)); } catch { // Already exists } } private getAppSourceFile(classId: string) { const fullPath = classId === "index" ? path.resolve(APP_DIR, "index.ts") : path.resolve(APP_DIR, classId, `index.ts`); const sourceFile = this.project.getSourceFile(fullPath); if (sourceFile) { sourceFile.refreshFromFileSystemSync(); return sourceFile; } return this.project.addSourceFileAtPath(fullPath); } private async ensureConfigurationDir() { const configDir = path.resolve(CONFIGURATION_DIR); try { await fs.promises.mkdir(configDir); } catch { // Already exists } try { const metadata = await fs.promises.readFile( path.resolve(configDir, "metadata.json") ); this.metadata = JSON.parse(new TextDecoder("utf-8").decode(metadata)); } catch { // No file, we will write it later } } private async ensureContainerEntry() { const entryFile = path.resolve(APP_DIR, "index.ts"); try { await fs.promises.stat(entryFile); } catch { // We do not have the file, lets write it await this.writePrettyFile( entryFile, `import { Container } from '${LIBRARY_IMPORT}' export const container = new Container({}, { devtool: process.env.NODE_ENV === 'development' && !window.opener ? "localhost:5051" : undefined }) ` ); } } private extractClass(classId: string) { const node = this.getAppSourceFile(classId); const classNode = ast.getClassNode(node, classId); const mixins = ast.getClassMixins(classNode); const injectors = ast.getInjectors(classNode); const properties = ast.getProperties(classNode); const methods = ast.getMethods(classNode); const observables = ast.getObservables(classNode); properties.forEach((property) => { if (observables.observable.includes(property.name)) { property.type = "observable"; } else if (observables.computed.includes(property.name)) { property.type = "computed"; } }); methods.forEach((property) => { if (observables.action.includes(property.name)) { property.type = "action"; } }); return { classId, mixins, injectors, properties, methods, }; } private async getClass(fileName: string): Promise<ExtractedClass> { const classId = this.getClassIdFromFileName(fileName); return this.extractClass(classId); } private getClassIdFromFileName(fileName: string) { return path.dirname(fileName).split(path.sep).pop()!; } private async getClasses() { const appDir = path.resolve(APP_DIR)!; try { const directories = (await fs.promises.readdir(appDir)).filter( (file) => file !== "index.ts" && !file.endsWith(".ts") ); return directories.reduce<{ [key: string]: ExtractedClass; }>((aggr, directory) => { const classId = directory; aggr[classId] = this.extractClass(classId); return aggr; }, {}); } catch { return {}; } } /* This is where we map files to nodes and their metadata. Things like position and the ID of the node. */ async addMetadata({ classId, x, y, }: { classId: string; x: number; y: number; }) { this.metadata[classId] = { x, y, }; this.writeMetadata(); } async writeMetadata() { const file = path.resolve(CONFIGURATION_DIR, "metadata.json")!; await this.writePrettyFile(file, JSON.stringify(this.metadata, null, 2)); } /* This method writes the initial file content */ async writeClass(classId: string) { const file = path.resolve(APP_DIR, classId, "index.ts")!; await this.writeClassToEntryFile(classId); await this.writePrettyFile( file, `import { Feature } from 'reactive-app' export interface ${classId} extends Feature {} export class ${classId} { static mixins = ["Feature"]; }` ); } private async writeClassToEntryFile(classId: string) { const sourceFile = this.getAppSourceFile("index"); sourceFile.addImportDeclaration({ moduleSpecifier: `./${classId}`, namedImports: [classId], }); sourceFile .getVariableDeclaration("container") ?.getInitializer() ?.transform((traversal) => { const node = traversal.visitChildren(); if ( ts.isObjectLiteralExpression(node) && ts.isNewExpression(node.parent) && node.parent.arguments![0] === node ) { return ts.factory.createObjectLiteralExpression( [ ...node.properties, ts.factory.createShorthandPropertyAssignment(classId, undefined), ], undefined ); } return node; }); sourceFile.saveSync(); } /* This method adds injections. The type of injection will be part of the payload, either "singleton" or "factory" */ async inject({ fromClassId, toClassId, asFactory, }: { fromClassId: string; toClassId: string; asFactory: boolean; }) { const sourceFile = this.getAppSourceFile(toClassId); ast.addImportDeclaration(sourceFile, LIBRARY_IMPORT, "TFeature"); ast.addImportDeclaration( sourceFile, `../${fromClassId}`, fromClassId, true ); const classNode = sourceFile.getClass(toClassId); if (!classNode) { throw new Error("Can not find class node"); } const name = asFactory ? `create${fromClassId}` : fromClassId[0].toLocaleLowerCase() + fromClassId.substr(1); classNode.insertProperty(1, { name, hasExclamationToken: true, isReadonly: true, type: `TFeature<typeof ${fromClassId}>`, trailingTrivia: writeLineBreak, }); ast.updateInjectFeatures(classNode, (config) => { config.addProperty({ name, kind: StructureKind.PropertyAssignment, initializer: `"${fromClassId}"`, }); }); sourceFile.saveSync(); } async removeInjection(fromClassId: string, toClassId: string) { const sourceFile = this.getAppSourceFile(toClassId); ast.removeImportDeclaration(sourceFile, `../${fromClassId}`); const classNode = sourceFile.getClass(toClassId); if (!classNode) { throw new Error("Can not find class node"); } classNode .getProperty((property) => { const name = property.getName(); return ( name === this.getInjectName(fromClassId) || name === this.getInjectFactoryName(fromClassId) ); }) ?.remove(); ast.updateInjectFeatures(classNode, (config) => { const property = config.getProperty((property) => { if (!Node.isPropertyAssignment(property)) { return false; } const initializer = property.getInitializer(); if (!Node.isStringLiteral(initializer)) { return false; } return JSON.parse(initializer.getText()) === fromClassId; }); property?.remove(); }); sourceFile.saveSync(); } async toggleMakeObservableProperty( classId: string, name: string, value?: "observable" | "computed" | "action" ) { const sourceFile = this.getAppSourceFile(classId); const classNode = sourceFile.getClass(classId)!; ast.updateMakeObservable(classNode, (config) => { if (value) { config.addProperty({ name, kind: StructureKind.PropertyAssignment, initializer: `"${value}"`, }); } else { const property = config.getProperty(name); property?.remove(); } }); sourceFile.saveSync(); } async toggleMixin(classId: string, mixin: Mixin) { const sourceFile = this.getAppSourceFile(classId); switch (mixin) { case "View": case "Factory": ast.toggleImportDeclaration(sourceFile, LIBRARY_IMPORT, mixin); ast.toggleMixinInterface(sourceFile, classId, mixin); ast.toggleMixin(sourceFile, classId, mixin); break; case "StateMachine": ast.toggleImportDeclaration(sourceFile, LIBRARY_IMPORT, mixin); ast.toggleMixinInterface( sourceFile, classId, "StateMachine<TContext, TEvent>" ); const contextType = sourceFile.getTypeAlias("TContext"); const messageType = sourceFile.getTypeAlias("TEvent"); const classInterface = sourceFile.getInterface(classId); const clas = sourceFile.getClass(classId)!; const state = clas.getProperty("state"); const onMessage = clas.getMethod("onMessage"); if (state && onMessage && contextType && messageType) { state.remove(); contextType.remove(); messageType.remove(); onMessage.remove(); } else { const interfaceNodeIndex = classInterface!.getChildIndex(); sourceFile.insertTypeAlias(interfaceNodeIndex, { name: "TEvent", isExported: true, type: '{ type: "SOMETHING_HAPPENED" }', trailingTrivia: writeLineBreak, }); sourceFile.insertTypeAlias(interfaceNodeIndex, { name: "TContext", isExported: true, type: '{ state: "FOO" } | { state: "BAR" }', }); clas.addProperty({ name: "context", type: "TContext", initializer: `{ state: "FOO" }`, }); const onEvent = clas.addMethod({ name: "onEvent", parameters: [ { name: "event", type: "TEvent", }, ], statements: ` return this.transition(this.context, event, { FOO: { SOMETHING_HAPPENED: () => ({ state: "BAR" }) }, BAR: { SOMETHING_HAPPENED: () => ({ state: "FOO" }) } }) `, trailingTrivia: writeLineBreak, }); onEvent.toggleModifier("protected", true); } ast.toggleMixin(sourceFile, classId, mixin); ast.updateMakeObservable(clas, (config) => { config.addProperty({ name: "context", initializer: '"observable"', kind: StructureKind.PropertyAssignment, }); }); break; } sourceFile.saveSync(); } async deleteClass(classId: string) { const directory = path.resolve(APP_DIR, classId); await fs.promises.rmdir(directory, { recursive: true }); } async renameClass(fromClassId: string, toClassId: string) { await this.addMetadata({ classId: toClassId, ...this.metadata[fromClassId], }); const fromClassPath = path.resolve(APP_DIR, fromClassId, "index.ts"); const toClassPath = path.resolve(APP_DIR, toClassId, "index.ts"); const fs = this.project.getFileSystem(); const contents = fs.readFileSync(fromClassPath); const sourceFile = this.project.createSourceFile(toClassPath, contents); const classDefinition = sourceFile.getClass(fromClassId)!; const classInterface = sourceFile.getInterface(fromClassId)!; classDefinition.rename(toClassId); classInterface.rename(toClassId); fs.mkdirSync(path.resolve(APP_DIR, toClassId)); fs.writeFileSync(toClassPath, sourceFile.print()); await this.writeClassToEntryFile(toClassId); await this.deleteClass(fromClassId); } async
(listeners: { onClassChange: (e: ExtractedClass) => void; onClassCreate: (e: ExtractedClass) => void; onClassDelete: (name: string) => void; }) { await this.ensureConfigurationDir(); await this.ensureAppDir(); await this.ensureContainerEntry(); this.classes = await this.getClasses(); this.filesWatcher = chokidar.watch(`${path.resolve(APP_DIR)}/*/index.ts`, { ignoreInitial: true, }); this.filesWatcher.on("all", async (eventType, fileName) => { if (eventType === "change") { const updatedClass = await this.getClass(fileName); this.classes[updatedClass.classId] = updatedClass; listeners.onClassChange(updatedClass); } else if (eventType === "add") { const createdClass = await this.getClass(fileName); this.classes[createdClass.classId] = createdClass; listeners.onClassCreate(createdClass); } else if (eventType === "unlink") { const classId = this.getClassIdFromFileName(fileName); delete this.classes[classId]; delete this.metadata[classId]; const file = path.resolve(CONFIGURATION_DIR, "metadata.json")!; await this.writePrettyFile( file, JSON.stringify(this.metadata, null, 2) ); const sourceFile = this.getAppSourceFile("index"); ast.removeImportDeclaration(sourceFile, `./${classId}`); sourceFile .getVariableDeclaration("container") ?.getInitializer() ?.transform((traversal) => { const node = traversal.visitChildren(); if ( ts.isObjectLiteralExpression(node) && ts.isNewExpression(node.parent) && node.parent.arguments![0] === node ) { return ts.factory.createObjectLiteralExpression( node.properties.filter( (property) => !property.name || !ts.isIdentifier(property.name) || property.name.escapedText !== classId ), undefined ); } return node; }); sourceFile.saveSync(); listeners.onClassDelete(classId); } }); } getMetadata() { return this.metadata; } dispose() { this.filesWatcher?.close(); } }
initialize
identifier_name
FilesManager.ts
import * as fs from "fs"; import * as path from "path"; import * as chokidar from "chokidar"; import * as prettier from "prettier"; import { Node, Project, StructureKind } from "ts-morph"; import * as ts from "typescript"; import { APP_DIR, CONFIGURATION_DIR, LIBRARY_IMPORT, } from "../common/constants"; import { ClassMetadata, ExtractedClass, Mixin } from "../common/types"; import * as ast from "./ast-utils"; import { TsMorphFs, writeLineBreak } from "./TsMorphFs"; let prettierConfig = {}; try { prettierConfig = JSON.parse( fs.readFileSync(path.resolve(".prettierrc")).toString("utf-8") ); } catch { // No worries, just using defaults } export class FilesManager { private filesWatcher: chokidar.FSWatcher | undefined; private project: Project = new Project({ fileSystem: new TsMorphFs(prettierConfig), }); metadata: { [name: string]: ClassMetadata; } = {}; classes: { [name: string]: ExtractedClass; } = {}; private getInjectName(classId: string) { return classId[0].toLowerCase() + classId.substr(1); } private getInjectFactoryName(classId: string) { return `create${classId}`; }
await fs.promises.mkdir(path.dirname(fileName)); } catch { // Already exists } return fs.promises.writeFile( fileName, new TextEncoder().encode( prettier.format(content, { ...prettierConfig, parser: path.extname(fileName) === ".json" ? "json" : "typescript", }) ) ); } private async ensureAppDir() { try { await fs.promises.mkdir(path.resolve(APP_DIR)); } catch { // Already exists } } private getAppSourceFile(classId: string) { const fullPath = classId === "index" ? path.resolve(APP_DIR, "index.ts") : path.resolve(APP_DIR, classId, `index.ts`); const sourceFile = this.project.getSourceFile(fullPath); if (sourceFile) { sourceFile.refreshFromFileSystemSync(); return sourceFile; } return this.project.addSourceFileAtPath(fullPath); } private async ensureConfigurationDir() { const configDir = path.resolve(CONFIGURATION_DIR); try { await fs.promises.mkdir(configDir); } catch { // Already exists } try { const metadata = await fs.promises.readFile( path.resolve(configDir, "metadata.json") ); this.metadata = JSON.parse(new TextDecoder("utf-8").decode(metadata)); } catch { // No file, we will write it later } } private async ensureContainerEntry() { const entryFile = path.resolve(APP_DIR, "index.ts"); try { await fs.promises.stat(entryFile); } catch { // We do not have the file, lets write it await this.writePrettyFile( entryFile, `import { Container } from '${LIBRARY_IMPORT}' export const container = new Container({}, { devtool: process.env.NODE_ENV === 'development' && !window.opener ? "localhost:5051" : undefined }) ` ); } } private extractClass(classId: string) { const node = this.getAppSourceFile(classId); const classNode = ast.getClassNode(node, classId); const mixins = ast.getClassMixins(classNode); const injectors = ast.getInjectors(classNode); const properties = ast.getProperties(classNode); const methods = ast.getMethods(classNode); const observables = ast.getObservables(classNode); properties.forEach((property) => { if (observables.observable.includes(property.name)) { property.type = "observable"; } else if (observables.computed.includes(property.name)) { property.type = "computed"; } }); methods.forEach((property) => { if (observables.action.includes(property.name)) { property.type = "action"; } }); return { classId, mixins, injectors, properties, methods, }; } private async getClass(fileName: string): Promise<ExtractedClass> { const classId = this.getClassIdFromFileName(fileName); return this.extractClass(classId); } private getClassIdFromFileName(fileName: string) { return path.dirname(fileName).split(path.sep).pop()!; } private async getClasses() { const appDir = path.resolve(APP_DIR)!; try { const directories = (await fs.promises.readdir(appDir)).filter( (file) => file !== "index.ts" && !file.endsWith(".ts") ); return directories.reduce<{ [key: string]: ExtractedClass; }>((aggr, directory) => { const classId = directory; aggr[classId] = this.extractClass(classId); return aggr; }, {}); } catch { return {}; } } /* This is where we map files to nodes and their metadata. Things like position and the ID of the node. */ async addMetadata({ classId, x, y, }: { classId: string; x: number; y: number; }) { this.metadata[classId] = { x, y, }; this.writeMetadata(); } async writeMetadata() { const file = path.resolve(CONFIGURATION_DIR, "metadata.json")!; await this.writePrettyFile(file, JSON.stringify(this.metadata, null, 2)); } /* This method writes the initial file content */ async writeClass(classId: string) { const file = path.resolve(APP_DIR, classId, "index.ts")!; await this.writeClassToEntryFile(classId); await this.writePrettyFile( file, `import { Feature } from 'reactive-app' export interface ${classId} extends Feature {} export class ${classId} { static mixins = ["Feature"]; }` ); } private async writeClassToEntryFile(classId: string) { const sourceFile = this.getAppSourceFile("index"); sourceFile.addImportDeclaration({ moduleSpecifier: `./${classId}`, namedImports: [classId], }); sourceFile .getVariableDeclaration("container") ?.getInitializer() ?.transform((traversal) => { const node = traversal.visitChildren(); if ( ts.isObjectLiteralExpression(node) && ts.isNewExpression(node.parent) && node.parent.arguments![0] === node ) { return ts.factory.createObjectLiteralExpression( [ ...node.properties, ts.factory.createShorthandPropertyAssignment(classId, undefined), ], undefined ); } return node; }); sourceFile.saveSync(); } /* This method adds injections. The type of injection will be part of the payload, either "singleton" or "factory" */ async inject({ fromClassId, toClassId, asFactory, }: { fromClassId: string; toClassId: string; asFactory: boolean; }) { const sourceFile = this.getAppSourceFile(toClassId); ast.addImportDeclaration(sourceFile, LIBRARY_IMPORT, "TFeature"); ast.addImportDeclaration( sourceFile, `../${fromClassId}`, fromClassId, true ); const classNode = sourceFile.getClass(toClassId); if (!classNode) { throw new Error("Can not find class node"); } const name = asFactory ? `create${fromClassId}` : fromClassId[0].toLocaleLowerCase() + fromClassId.substr(1); classNode.insertProperty(1, { name, hasExclamationToken: true, isReadonly: true, type: `TFeature<typeof ${fromClassId}>`, trailingTrivia: writeLineBreak, }); ast.updateInjectFeatures(classNode, (config) => { config.addProperty({ name, kind: StructureKind.PropertyAssignment, initializer: `"${fromClassId}"`, }); }); sourceFile.saveSync(); } async removeInjection(fromClassId: string, toClassId: string) { const sourceFile = this.getAppSourceFile(toClassId); ast.removeImportDeclaration(sourceFile, `../${fromClassId}`); const classNode = sourceFile.getClass(toClassId); if (!classNode) { throw new Error("Can not find class node"); } classNode .getProperty((property) => { const name = property.getName(); return ( name === this.getInjectName(fromClassId) || name === this.getInjectFactoryName(fromClassId) ); }) ?.remove(); ast.updateInjectFeatures(classNode, (config) => { const property = config.getProperty((property) => { if (!Node.isPropertyAssignment(property)) { return false; } const initializer = property.getInitializer(); if (!Node.isStringLiteral(initializer)) { return false; } return JSON.parse(initializer.getText()) === fromClassId; }); property?.remove(); }); sourceFile.saveSync(); } async toggleMakeObservableProperty( classId: string, name: string, value?: "observable" | "computed" | "action" ) { const sourceFile = this.getAppSourceFile(classId); const classNode = sourceFile.getClass(classId)!; ast.updateMakeObservable(classNode, (config) => { if (value) { config.addProperty({ name, kind: StructureKind.PropertyAssignment, initializer: `"${value}"`, }); } else { const property = config.getProperty(name); property?.remove(); } }); sourceFile.saveSync(); } async toggleMixin(classId: string, mixin: Mixin) { const sourceFile = this.getAppSourceFile(classId); switch (mixin) { case "View": case "Factory": ast.toggleImportDeclaration(sourceFile, LIBRARY_IMPORT, mixin); ast.toggleMixinInterface(sourceFile, classId, mixin); ast.toggleMixin(sourceFile, classId, mixin); break; case "StateMachine": ast.toggleImportDeclaration(sourceFile, LIBRARY_IMPORT, mixin); ast.toggleMixinInterface( sourceFile, classId, "StateMachine<TContext, TEvent>" ); const contextType = sourceFile.getTypeAlias("TContext"); const messageType = sourceFile.getTypeAlias("TEvent"); const classInterface = sourceFile.getInterface(classId); const clas = sourceFile.getClass(classId)!; const state = clas.getProperty("state"); const onMessage = clas.getMethod("onMessage"); if (state && onMessage && contextType && messageType) { state.remove(); contextType.remove(); messageType.remove(); onMessage.remove(); } else { const interfaceNodeIndex = classInterface!.getChildIndex(); sourceFile.insertTypeAlias(interfaceNodeIndex, { name: "TEvent", isExported: true, type: '{ type: "SOMETHING_HAPPENED" }', trailingTrivia: writeLineBreak, }); sourceFile.insertTypeAlias(interfaceNodeIndex, { name: "TContext", isExported: true, type: '{ state: "FOO" } | { state: "BAR" }', }); clas.addProperty({ name: "context", type: "TContext", initializer: `{ state: "FOO" }`, }); const onEvent = clas.addMethod({ name: "onEvent", parameters: [ { name: "event", type: "TEvent", }, ], statements: ` return this.transition(this.context, event, { FOO: { SOMETHING_HAPPENED: () => ({ state: "BAR" }) }, BAR: { SOMETHING_HAPPENED: () => ({ state: "FOO" }) } }) `, trailingTrivia: writeLineBreak, }); onEvent.toggleModifier("protected", true); } ast.toggleMixin(sourceFile, classId, mixin); ast.updateMakeObservable(clas, (config) => { config.addProperty({ name: "context", initializer: '"observable"', kind: StructureKind.PropertyAssignment, }); }); break; } sourceFile.saveSync(); } async deleteClass(classId: string) { const directory = path.resolve(APP_DIR, classId); await fs.promises.rmdir(directory, { recursive: true }); } async renameClass(fromClassId: string, toClassId: string) { await this.addMetadata({ classId: toClassId, ...this.metadata[fromClassId], }); const fromClassPath = path.resolve(APP_DIR, fromClassId, "index.ts"); const toClassPath = path.resolve(APP_DIR, toClassId, "index.ts"); const fs = this.project.getFileSystem(); const contents = fs.readFileSync(fromClassPath); const sourceFile = this.project.createSourceFile(toClassPath, contents); const classDefinition = sourceFile.getClass(fromClassId)!; const classInterface = sourceFile.getInterface(fromClassId)!; classDefinition.rename(toClassId); classInterface.rename(toClassId); fs.mkdirSync(path.resolve(APP_DIR, toClassId)); fs.writeFileSync(toClassPath, sourceFile.print()); await this.writeClassToEntryFile(toClassId); await this.deleteClass(fromClassId); } async initialize(listeners: { onClassChange: (e: ExtractedClass) => void; onClassCreate: (e: ExtractedClass) => void; onClassDelete: (name: string) => void; }) { await this.ensureConfigurationDir(); await this.ensureAppDir(); await this.ensureContainerEntry(); this.classes = await this.getClasses(); this.filesWatcher = chokidar.watch(`${path.resolve(APP_DIR)}/*/index.ts`, { ignoreInitial: true, }); this.filesWatcher.on("all", async (eventType, fileName) => { if (eventType === "change") { const updatedClass = await this.getClass(fileName); this.classes[updatedClass.classId] = updatedClass; listeners.onClassChange(updatedClass); } else if (eventType === "add") { const createdClass = await this.getClass(fileName); this.classes[createdClass.classId] = createdClass; listeners.onClassCreate(createdClass); } else if (eventType === "unlink") { const classId = this.getClassIdFromFileName(fileName); delete this.classes[classId]; delete this.metadata[classId]; const file = path.resolve(CONFIGURATION_DIR, "metadata.json")!; await this.writePrettyFile( file, JSON.stringify(this.metadata, null, 2) ); const sourceFile = this.getAppSourceFile("index"); ast.removeImportDeclaration(sourceFile, `./${classId}`); sourceFile .getVariableDeclaration("container") ?.getInitializer() ?.transform((traversal) => { const node = traversal.visitChildren(); if ( ts.isObjectLiteralExpression(node) && ts.isNewExpression(node.parent) && node.parent.arguments![0] === node ) { return ts.factory.createObjectLiteralExpression( node.properties.filter( (property) => !property.name || !ts.isIdentifier(property.name) || property.name.escapedText !== classId ), undefined ); } return node; }); sourceFile.saveSync(); listeners.onClassDelete(classId); } }); } getMetadata() { return this.metadata; } dispose() { this.filesWatcher?.close(); } }
private async writePrettyFile(fileName: string, content: string) { try {
random_line_split
specs.py
""" Specs for the various files in the CSS 3.0 schema Each tuple of tuples defines the name, storage type, external format, and position of each field. The schema was taken from here: http://jkmacc-lanl.github.io/pisces/data/Anderson1990.pdf """ affiliation = ( ("net", 1, "c8", "aS", 1, 8), ("sta", 2, "c6", "a6", 10, 15), ("lddate", 3, "date", "a17", 17, 33), ) arrival = ( ("sta", 1, "c6", "a6", 1, 6), ("time", 2, "f8", "fl7.5", 8, 24), ("arid", 3, "i4", "i8", 26, 33), ("jdate", 4, "i4", "i8", 35, 42), ("stassid", 5, "i4", "i8", 44, 51), ("chanid", 6, "i4", "i8", 53, 60), ("chan", 7, "c8", "a8", 62, 69), ("iphase", 8, "c8", "a8", 71, 78), ("stype", 9, "c1", "a1", 80, 80), ("deltim", 10, "f4", "f6.3", 82, 87), ("azimuth", 11, "f4", "f7.2", 89, 95), ("delaz", 12, "f4", "f7.2", 97, 103), ("slow", 13, "f4", "f7.2", 105, 111), ("delslo", 14, "f4", "f7.2", 113, 119), ("ema", 15, "f4", "f7.2", 121, 127), ("rect", 16, "f4", "f7.3", 129, 135), ("amp", 17, "f4", "flO.l", 137, 146), ("per", 18, "f4", "f7.2", 148, 154), ("logat", 19, "f4", "f7.2", 156, 162), ("clip", 20, "c1", "al", 164, 164), ("fm", 21, "c2", "a2", 166, 167), ("snr", 22, "f4", "fl0.2", 169, 178), ("qual", 23, "c1", "a1", 180, 180), ("auth", 24, "c15", "a15", 182, 196), ("commid", 25, "i4", "i8", 198, 205), ("lddate", 26, "date", "a17", 207, 223), ) assoc = ( ("arid", 1, "i4", "i8", 1, 8), ("orid", 2, "i4", "i8", 10, 17), ("sta", 3, "c6", "a6", 19, 24), ("phase", 4, "c8", "a8", 26, 33), ("belief", 5, "f4", "f4.2", 35, 38), ("delta", 6, "f4", "f8.3", 40, 47), ("seaz", 7, "f4", "f7.2", 49, 55), ("esaz", 8, "f4", "f7.2", 57, 63), ("timeres", 9, "f4", "f8.3", 65, 72), ("timedef", 10, "c1", "a1", 74, 74), ("azres", 11, "f4", "f7.1", 76, 82), ("azdef", 12, "cl", "al", 84, 84), ("stores", 13, "f4", "f7.2", 86, 92), ("slodef", 14, "c1", "a1", 94, 94), ("emares", 15, "f4", "f7.1", 96, 102), ("wgt", 16, "f4", "f6.3", 104, 109), ("vmodel", 17, "c15", "al5", 111, 125), ("conunid", 18, "i4", "ig", 127, 134),
event = ( ("evid", 1, "i4", "i8", 1, 8), ("evname", 2, "c15", "a15", 10, 24), ("prefor", 3, "i4", "i8", 26, 33), ("auth", 4, "c15", "al5", 35, 49), ("commid", 5, "i4", "i8", 51, 58), ("lddate", 6, "date", "al7", 60, 76), ) gregion = ( ("gm", 1, "i4", "i8", 1, 8), ("gmame", 2, "c40", "a40", 10, 49), ("lddate", 3, "date", "a17", 51, 67), ) instrument = ( ("inid", 1, "i4", "i8", 1, 8), ("insname", 2, "c50", "a50", 10, 59), ("instype", 3, "c6", "a6", 61, 66), ("band", 4, "c1", "a1", 68, 68), ("digital", 5, "c1", "a1", 70, 70), ("sarnprate", 6, "f4", "fll.7", 72, 82), ("ncalib", 7, "f4", "fl6.6", 84, 99), ("ncalper", 8, "f4", "fl6.6", 101, 116), ("dir", 9, "c64", "a64", 118, 181), ("dfile", 10, "c32", "a32", 183, 214), ("rsptype", 11, "c6", "a6", 216, 221), ("lddate", 12, "date", "a17", 223, 239), ) lastid = ( ("keyname", 1, "c15", "a15", 1, 15), ("keyvalue", 2, "i4", "i8", 17, 24), ("lddate", 3, "date", "a17", 26, 42), ) netmag = ( ("magid", 1, "i4", "i8", 1, 8), ("net", 2, "c8", "a8", 10, 17), ("orid", 3, "i4", "i8", 19, 26), ("evid", 4, "i4", "i8", 28, 35), ("magtype", 5, "c6", "a6", 37, 42), ("nsta", 6, "i4", "i8", 44, 51), ("magnitude", 7, "f4", "f7.2", 53, 59), ("uncertainty", 8, "f4", "f7.2", 61, 67), ("auth", 9, "c15", "a15", 69, 83), ("commid", 10, "i4", "i8", 85, 92), ("lddate", 11, "date", "al7", 94, 110), ) network = ( ("net", 1, "c8", "a8", 1, 8), ("netname", 2, "c80", "a80", 10, 89), ("nettype", 3, "c4", "a4", 91, 94), ("auth", 4, "ciS", "al5", 96, 110), ("corrunid", 5, "i4", "i8", 112, 119), ("lddate", 6, "date", "a17", 121, 137), ) origerr = ( ("orid", 1, "i4", "i8", 1, 8), ("sxx", 2, "f4", "fl5.4", 10, 24), ("syy", 3, "f4", "f15.4", 26, 40), ("szz", 4, "f4", "f15.4", 42, 56), ("stt", 5, "f4", "fl5.4", 58, 72), ("sxy", 6, "f4", "f15.4", 74, 88), ("sxz", 7, "f4", "f15.4", 90, 104), ("syz", 8, "f4", "f15.4", 106, 120), ("stx", 9, "f4", "f15.4", 122, 136), ("sty", 10, "f4", "f15.4", 138, 152), ("stz", 11, "f4", "fl5.4", 154, 168), ("sdobs", 12, "f4", "f9.4", 170, 178), ("smajax", 13, "f4", "!9.4", 180, 188), ("sminax", 14, "f4", "f9.4", 190, 198), ("strike", 15, "f4", "f6.2", 200, 205), ("sdepth", 16, "f4", "f9.4", 207, 215), ("stime", 17, "f4", "f8.2", 217, 224), ("eonf", 18, "f4", "f5.3", 226, 230), ("commid", 19, "i4", "i8", 232, 239), ("lddate", 20, "date", "a17", 241, 257), ) origin = ( ("lat", 1, "f4", "f9.4", 1, 9), ("lon", 2, "f4", "f9.4", 11, 19), ("depth", 3, "f4", "f9.4", 21, 29), ("time", 4, "f8", "fl7.5", 31, 47), ("orid", 5, "i4", "i8", 49, 56), ("evid", 6, "i4", "i8", 58, 65), ("jdate", 7, "i4", "i8", 67, 74), ("nass", 8, "i4", "i4", 76, 79), ("ndef", 9, "i4", "i4", 81, 84), ("ndp", 10, "i4", "i4", 86, 89), ("gm", 11, "i4", "i8", 91, 98), ("sm", 12, "i4", "i8", 100, 107), ("etype", 13, "c7", "a7", 109, 115), ("depdp", 14, "f4", "f9.4", 117, 125), ("dtype", 15, "c1", "a1", 127, 127), ("mb", 16, "f4", "f7.2", 129, 135), ("mbid", 17, "i4", "i8", 137, 144), ("ms", 18, "f4", "f7.2", 146, 152), ("msid", 19, "i4", "i8", 154, 161), ("ml", 20, "f4", "f7.2", 163, 169), ("mlid", 21, "i4", "i8", 171, 178), ("algorithm", 22, "ciS", "a15", 180, 194), ("auth", 23, "c15", "a15", 196, 210), ("conmlid", 24, "i4", "i8", 212, 219), ("lddate", 25, "date", "a17", 221, 237), ) remark = ( ("commid", 1, "i4", "i8", 1, 8), ("lineno", 2, "i4", "i8", 10, 17), ("remark", 3, "c80", "a80", 19, 98), ("lddate", 4, "date", "a17", 100, 116), ) sensor = ( ("sta", 1, "c6", "a6", 1, 6), ("chan", 2, "c8", "a8", 8, 15), ("time", 3, "f8", "fl7.5", 17, 33), ("endtime", 4, "f8", "fl7.5", 35, 51), ("inid", 5, "i4", "i8", 53, 60), ("chanid", 6, "i4", "i8", 62, 69), ("jdate", 7, "i4", "i8", 71, 78), ("calratio", 8, "f4", "fl6.6", 80, 95), ("calper", 9, "f4", "fl6.6", 97, 112), ("tshift", 10, "f4", "f6.2", 114, 119), ("instant", 11, "c1", "a1", 121, 121), ("lddatc", 12, "date", "a17", 123, 139), ) site = ( ("sta", 1, "c6", "a6", 1, 6), ("ondate", 2, "i4", "i8", 8, 15), ("offdate", 3, "i4", "i8", 17, 24), ("lat", 4, "f4", "f9.4", 26, 34), ("lon", 5, "f4", "f9.4", 36, 44), ("elev", 6, "f4", "f9.4", 46, 54), ("staname", 7, "c50", "a50", 56, 105), ("statype", 8, "c4", "a4", 107, 110), ("refsta", 9, "c6", "a6", 112, 117), ("dnorth", 10, "f4", "t9.4", 119, 127), ("deast", 11, "f4", "t9.4", 129, 137), ("lddate", 12, "date", "a17", 139, 155), ) sitechan = ( ("sta", 1, "c6", "a6", 1, 6), ("chan", 2, "c8", "a8", 8, 15), ("ondate", 3, "i4", "i8", 17, 24), ("chanid", 4, "i4", "i8", 26, 33), ("offdate", 5, "i4", "i8", 35, 42), ("ctype", 6, "c4", "a4", 44, 47), ("edepth", 7, "f4", "f9.4", 49, 57), ("hang", 8, "f4", "f6.1", 59, 64), ("vang", 9, "f4", "f6.1", 66, 71), ("descrip", 10, "cSO", "a50", 73, 122), ("lddate", 11, "date", "al7", 124, 140), ) sregion = ( ("sm", 1, "i4", "i8", 1, 8), ("smame", 2, "c40", "a40", 10, 49), ("lddate", 3, "date", "a17", 51, 67), ) stamag = ( ("magid", 1, "i4", "i8", 1, 8), ("sta", 2, "c6", "a6", 10, 15), ("arid", 3, "i4", "i8", 17, 24), ("orid", 4, "i4", "i8", 26, 33), ("evid", 5, "i4", "i8", 35, 42), ("phase", 6, "c8", "a8", 44, 51), ("magtype", 7, "c6", "a6", 53, 58), ("magnitude", 8, "f4", "f7.2", 60, 66), ("uncertainty", 9, "f4", "f7.2", 68, 74), ("auth", 10, "c15", "a15", 76, 90), ("corrunid", 11, "i4", "i8", 92, 99), ("lddate", 12, "date", "a17", 101, 117), ) stassoc = ( ("stassid", 1, "i4", "i8", 1, 8), ("sta", 2, "c6", "a6", 10, 15), ("etype", 3, "c7", "a7", 17, 23), ("location", 4, "c32", "a32", 25, 56), ("dist", 5, "f4", "f7.2", 58, 64), ("azimuth", 6, "f4", "f7.2", 66, 72), ("Iat", 7, "f4", "f9.4", 74, 82), ("Ion", 8, "f4", "f9.4", 84, 92), ("depth", 9, "f4", "f9.4", 94, 102), ("time", 10, "f8", "fl7.5", 104, 120), ("imb", 11, "f4", "f7.2", 122, 128), ("ims", 12, "f4", "f7.2", 130, 136), ("iml", 13, "f4", "f7.2", 138, 144), ("auth", 14, "c15", "a15", 146, 160), ("corrunid", 15, "i4", "i8", 162, 169), ("lddate", 16, "date", "a17", 171, 187), ) wfdisc = ( ("sta", 1, "c6", "a6", 1, 11), ("chan", 2, "c8", "a8", 8, 15), ("time", 3, "f8", "fl7.5", 17, 33), ("wfid", 4, "i4", "i8", 35, 42), ("chanid", 5, "i4", "i8", 44, 51), ("jdate", 6, "i4", "i8", 53, 60), ("endtime", 7, "f8", "fl7.5", 62, 78), ("nsamp", 8, "i4", "i8", 80, 87), ("sarnprate", 9, "f4", "fll.7", 89, 99), ("calib", 10, "f4", "fl6.6", 101, 116), ("calper", 11, "f4", "fl6.6", 118, 133), ("instype", 12, "c6", "a6", 135, 140), ("segtype", 13, "c1", "a1", 142, 142), ("datatype", 14, "c2", "a2", 144, 145), ("clip", 15, "cl", "a1", 147, 147), ("dir", 16, "c64", "a64", 149, 212), ("dfile", 17, "c32", "a32", 214, 245), ("foff", 18, "i4", "ilO", 247, 256), ("commid", 19, "i4", "i8", 258, 265), ("lddate", 20, "date", "a17", 267, 283), ) wftag = ( ("tagname", 1, "c8", "a8", 1, 8), ("tagid", 2, "i4", "i8", 10, 17), ("wfid", 3, "i4", "i8", 19, 26), ("lddate", 4, "date", "a17", 28, 44), ) wftape = ( ("sta", 1, "c6", "a6", 1, 6), ("chan", 2, "c8", "a8", 8, 15), ("time", 3, "t13", "fl7.5", 17, 33), ("wfid", 4, "i4", "i8", 35, 42), ("chanid", 5, "i4", "i8", 44, 51), ("jdate", 6, "i4", "i8", 53, 60), ("endtime", 7, "f8", "fl7.5", 62, 78), ("nsamp", 8, "i4", "i8", 80, 87), ("samprate", 9, "f4", "fll.7", 89, 99), ("calib", 10, "f4", "fl6.6", 101, 116), ("calper", 11, "f4", "fl6.6", 118, 133), ("instype", 12, "c6", "a6", 135, 140), ("segtype", 13, "c1", "a1", 142, 142), ("datatype", 14, "c2", "a2", 144, 145), ("clip", 15, "c1", "a1", 147, 147), ("dir", 16, "c64", "a64", 149, 212), ("dfi1e", 17, "c32", "a32", 214, 245), ("volname", 18, "c6", "a6", 247, 252), ("tapefile", 19, "i4", "iS", 254, 258), ("tapeblock", 20, "i4", "i5", 260, 264), ("commid", 21, "i4", "i8", 266, 273), ("lddate", 22, "date", "a17", 275, 291), )
("lddate", 19, "date", "al7", 136, 152), )
random_line_split
traverser.go
// Copyright 2017-2018, Square, Inc. package chain import ( "fmt" "sync" "time" log "github.com/Sirupsen/logrus" "github.com/square/spincycle/job-runner/runner" "github.com/square/spincycle/proto" rm "github.com/square/spincycle/request-manager" "github.com/square/spincycle/retry" ) var ( // Returned when Stop is called but the chain has already been suspended. ErrShuttingDown = fmt.Errorf("chain not stopped because traverser is shutting down") ) const ( // Default timeout used by traverser factory for traverser's stopTimeout // and sendTimeout. defaultTimeout = 10 * time.Second // Number of times to attempt sending a job log to the RM. jobLogTries = 3 // Time to wait between attempts to send a job log to RM. jobLogRetryWait = 500 * time.Millisecond // Number of times to attempt sending chain state / SJC to RM in Reaper. reaperTries = 5 // Time to wait between tries to send chain state/SJC to RM. reaperRetryWait = 500 * time.Millisecond ) // A Traverser provides the ability to run a job chain while respecting the // dependencies between the jobs. type Traverser interface { // Run traverses a job chain and runs all of the jobs in it. It starts by // running the first job in the chain, and then, if the job completed, // successfully, running its adjacent jobs. This process continues until there // are no more jobs to run, or until the Stop method is called on the traverser. Run() // Stop makes a traverser stop traversing its job chain. It also sends a stop // signal to all of the jobs that a traverser is running. // // It returns an error if it fails to stop all running jobs. Stop() error // Status gets the status of all running and failed jobs. Since a job can only // run when all of its ancestors have completed, the state of the entire chain // can be inferred from this information - every job in the chain before a // running or failed job must be complete, and every job in the chain after a // running or failed job must be pending. // // It returns an error if it fails to get the status of all running jobs. Status() (proto.JobChainStatus, error) } // A TraverserFactory makes a new Traverser. type TraverserFactory interface { Make(*proto.JobChain) (Traverser, error) MakeFromSJC(*proto.SuspendedJobChain) (Traverser, error) } type traverserFactory struct { chainRepo Repo rf runner.Factory rmc rm.Client shutdownChan chan struct{} } func NewTraverserFactory(cr Repo, rf runner.Factory, rmc rm.Client, shutdownChan chan struct{}) TraverserFactory { return &traverserFactory{ chainRepo: cr, rf: rf, rmc: rmc, shutdownChan: shutdownChan, } } // Make makes a Traverser for the job chain. The chain is first validated // and saved to the chain repo. func (f *traverserFactory) Make(jobChain *proto.JobChain) (Traverser, error) { // Convert/wrap chain from proto to Go object. chain := NewChain(jobChain, make(map[string]uint), make(map[string]uint), make(map[string]uint)) return f.make(chain) } // MakeFromSJC makes a Traverser from a suspended job chain. func (f *traverserFactory) MakeFromSJC(sjc *proto.SuspendedJobChain) (Traverser, error) { // Convert/wrap chain from proto to Go object. chain := NewChain(sjc.JobChain, sjc.SequenceTries, sjc.TotalJobTries, sjc.LatestRunJobTries) return f.make(chain) } // Creates a new Traverser from a chain. Used for both new and resumed chains. func (f *traverserFactory) make(chain *Chain) (Traverser, error) { // Add chain to repo. This used to save the chain in Redis, if configured, // but now it's only an in-memory map. The only functionality it serves is // preventing this JR instance from running the same job chain. if err := f.chainRepo.Add(chain); err != nil { return nil, fmt.Errorf("error adding job chain: %s", err) } // Create and return a traverser for the chain. The traverser is responsible // for the chain: running, cleaning up, removing from repo when done, etc. // And traverser and chain have the same lifespan: traverser is done when // chain is done. cfg := TraverserConfig{ Chain: chain, ChainRepo: f.chainRepo, RunnerFactory: f.rf, RMClient: f.rmc, ShutdownChan: f.shutdownChan, StopTimeout: defaultTimeout, SendTimeout: defaultTimeout, } return NewTraverser(cfg), nil } // -------------------------------------------------------------------------- // type traverser struct { reaperFactory ReaperFactory reaper JobReaper shutdownChan chan struct{} // indicates JR is shutting down runJobChan chan proto.Job // jobs to be run doneJobChan chan proto.Job // jobs that are done doneChan chan struct{} // closed when traverser finishes running stopMux *sync.RWMutex // lock around checks to stopped stopped bool // has traverser been stopped suspended bool // has traverser been suspended chain *Chain chainRepo Repo // stores all currently running chains rf runner.Factory runnerRepo runner.Repo // stores actively running jobs rmc rm.Client logger *log.Entry stopTimeout time.Duration // Time to wait for jobs to stop sendTimeout time.Duration // Time to wait for a job to send on doneJobChan. } type TraverserConfig struct { Chain *Chain ChainRepo Repo RunnerFactory runner.Factory RMClient rm.Client ShutdownChan chan struct{} StopTimeout time.Duration SendTimeout time.Duration } func NewTraverser(cfg TraverserConfig) *traverser { // Include request id in all logging. logger := log.WithFields(log.Fields{"requestId": cfg.Chain.RequestId()}) // Channels used to communicate between traverser + reaper(s) doneJobChan := make(chan proto.Job) runJobChan := make(chan proto.Job) runnerRepo := runner.NewRepo() // needed for traverser + reaper factory reaperFactory := &ChainReaperFactory{ Chain: cfg.Chain, ChainRepo: cfg.ChainRepo, RMClient: cfg.RMClient, RMCTries: reaperTries, RMCRetryWait: reaperRetryWait, Logger: logger, DoneJobChan: doneJobChan, RunJobChan: runJobChan, RunnerRepo: runnerRepo, } return &traverser{ reaperFactory: reaperFactory, logger: logger, chain: cfg.Chain, chainRepo: cfg.ChainRepo, rf: cfg.RunnerFactory, runnerRepo: runnerRepo, shutdownChan: cfg.ShutdownChan, runJobChan: runJobChan, doneJobChan: doneJobChan, doneChan: make(chan struct{}), rmc: cfg.RMClient, stopMux: &sync.RWMutex{}, stopTimeout: cfg.StopTimeout, sendTimeout: cfg.SendTimeout, } } // Run runs all jobs in the chain and blocks until the chain finishes running, is // stopped, or is suspended. func (t *traverser) Run() { t.logger.Infof("chain traverser started") defer t.logger.Infof("chain traverser done") defer t.chainRepo.Remove(t.chain.RequestId()) // Start a goroutine to run jobs. This consumes from the runJobChan. When // jobs are done, they will be sent to the doneJobChan, which the job reapers // consume from. go t.runJobs() // Find all the jobs we can start running. For a new job chain (not suspended), // this'll end up being just the first job in the chain. jobsToRun := t.chain.RunnableJobs() // Add the first jobs to runJobChan. for _, job := range jobsToRun { t.logger.Infof("sending initial job (%s) to runJobChan", job.Id) if t.chain.IsSequenceStartJob(job.Id) { // Starting a sequence, so increment sequence try count. t.chain.IncrementSequenceTries(job.Id) seqLogger := t.logger.WithFields(log.Fields{"sequence_id": job.SequenceId, "sequence_try": t.chain.SequenceTries(job.Id)}) seqLogger.Info("starting try of sequence") } t.runJobChan <- job } // Start a goroutine to reap done jobs. The running reaper consumes from // doneJobChan and sends the next jobs to be run to runJobChan. runningReaperChan := make(chan struct{}) t.reaper = t.reaperFactory.MakeRunning() go func() { defer close(runningReaperChan) // indicate reaper is done (see select below) defer close(t.runJobChan) // stop runJobs goroutine t.reaper.Run() }() // Wait for running reaper to be done or traverser to be shut down. select { case <-runningReaperChan: // If running reaper is done because traverser was stopped, we will // wait for Stop() to finish. Otherwise, the chain finished normally // (completed or failed) and we can return right away. // // We don't check if the chain was suspended, since that can only // happen via the other case in this select. t.stopMux.Lock() if !t.stopped { t.stopMux.Unlock() return } t.stopMux.Unlock() case <-t.shutdownChan: // The Job Runner is shutting down. Stop the running reaper and suspend // the job chain, to be resumed later by another Job Runner. t.shutdown() } // Traverser is being stopped or shut down - wait for that to finish before // returning. select { case <-t.doneChan: // Stopped/shutdown successfully - nothing left to do. return case <-time.After(20 * time.Second): // Failed to stop/shutdown in a reasonable amount of time. // Log the failure and return. t.logger.Warnf("stopping or suspending the job chain took too long. Exiting...") return } } // Stop stops the running job chain by switching the running chain reaper for a // stopped chain reaper and stopping all currently running jobs. Stop blocks until // all jobs have finished and the stopped reaper has send the chain's final state // to the RM. func (t *traverser) Stop() error { // Don't do anything if the traverser has already been stopped or suspended. t.stopMux.Lock() defer t.stopMux.Unlock() if t.stopped { return nil } else if t.suspended { return ErrShuttingDown } t.stopped = true t.logger.Infof("stopping traverser and all jobs") // Stop the current reaper and start running a reaper for stopped chains. This // reaper saves jobs' states (but doesn't enqueue any more jobs to run) and // sends the chain's final state to the RM when all jobs have stopped running. t.reaper.Stop() // blocks until running reaper is done stopping stoppedReaperChan := make(chan struct{}) t.reaper = t.reaperFactory.MakeStopped() go func() { defer close(stoppedReaperChan) t.reaper.Run() }() // Stop all job runners in the runner repo. Do this after switching to the // stopped reaper so that when the jobs finish and are sent on doneJobChan, // they are reaped correctly. err := t.stopRunningJobs() if err != nil { // Don't return the error yet - we still want to wait for the stop // reaper to be done. err = fmt.Errorf("traverser was stopped, but encountered an error in the process: %s", err) } // Wait for the stopped reaper to finish. If it takes too long, some jobs // haven't respond quickly to being stopped. Stop waiting for these jobs by // stopping the stopped reaper. select { case <-stoppedReaperChan: case <-time.After(t.stopTimeout): t.logger.Warnf("timed out waiting for jobs to stop - stopping reaper") t.reaper.Stop() } close(t.doneChan) return err } // Status returns the status of currently running jobs in the chain. func (t *traverser) Status() (proto.JobChainStatus, error) { t.logger.Infof("getting the status of all running jobs") activeRunners, err := t.runnerRepo.Items() if err != nil { return proto.JobChainStatus{}, err } runningJobs := t.chain.Running() status := make([]proto.JobStatus, len(runningJobs)) i := 0 for jobId, jobStatus := range runningJobs { runner := activeRunners[jobId] if runner == nil { // The job finished between the call to chain.Running() and now, // so it's runner no longer exists in the runner.Repo. jobStatus.Status = "(finished)" } else { jobStatus.Status = runner.Status() } status[i] = jobStatus i++ } jcStatus := proto.JobChainStatus{ RequestId: t.chain.RequestId(), JobStatuses: status, } return jcStatus, nil } // -------------------------------------------------------------------------- // // runJobs loops on the runJobChan, and runs each job that comes through the // channel. When the job is done, it sends the job out through the doneJobChan. func (t *traverser) runJobs()
// sendJL sends a job log to the Request Manager. func (t *traverser) sendJL(job proto.Job, err error) { jLogger := t.logger.WithFields(log.Fields{"job_id": job.Id}) jl := proto.JobLog{ RequestId: t.chain.RequestId(), JobId: job.Id, Name: job.Name, Type: job.Type, Try: t.chain.TotalTries(job.Id), SequenceId: job.SequenceId, SequenceTry: t.chain.SequenceTries(job.Id), StartedAt: 0, // zero because the job never ran FinishedAt: 0, State: job.State, Exit: 1, Error: err.Error(), } if err != nil { jl.Error = err.Error() } // Send the JL to the RM. err = retry.Do(jobLogTries, jobLogRetryWait, func() error { return t.rmc.CreateJL(t.chain.RequestId(), jl) }, nil, ) if err != nil { jLogger.Errorf("problem sending job log (%#v) to the Request Manager: %s", jl, err) return } } // shutdown suspends the running chain by switching the running chain reaper for a // suspended chain reaper and stopping all currently running jobs. Once all jobs // have finished, the suspended reaper informs the RM about the suspended chain by // sending a SuspendedJobChain. // // When a Job Runner is shutting down, all of its traversers are shut down and their // running job chains suspended. The Request Manager can later resume these job // chains by sending them to a running Job Runner instance. func (t *traverser) shutdown() { // Don't do anything if the traverser has already been stopped or suspended. t.stopMux.Lock() defer t.stopMux.Unlock() if t.stopped || t.suspended { return } t.suspended = true t.logger.Info("suspending job chain - stopping all jobs") // Stop the current reaper and start running a reaper for suspended chains. This // reaper saves jobs' states and prepares the chain to be resumed later, but // doesn't enqueue any more jobs to run. When all jobs have stopped running, // it sends the SuspendedJobChain to the RM (or the final state if the // chain was completed or failed). t.reaper.Stop() // blocks until running reaper is done stopping suspendedReaperChan := make(chan struct{}) t.reaper = t.reaperFactory.MakeSuspended() go func() { defer close(suspendedReaperChan) t.reaper.Run() }() // Stop all job runners in the runner repo. Do this after switching to the // suspended reaper so that when the jobs finish and are sent on doneJobChan, // they are reaped correctly. err := t.stopRunningJobs() if err != nil { t.logger.Errorf("problem suspending job chain: %s", err) } // Wait for suspended reaper to finish. If it takes too long, some jobs // haven't respond quickly to being stopped. Stop waiting for these jobs by // stopping the suspended reaper. select { case <-suspendedReaperChan: case <-time.After(t.stopTimeout): t.logger.Warnf("timed out waiting for jobs to stop - stopping reaper") t.reaper.Stop() } close(t.doneChan) } // stopRunningJobs stops all currently running jobs. func (t *traverser) stopRunningJobs() error { // Get all of the active runners for this traverser from the repo. Only runners // that are in the repo will be stopped. activeRunners, err := t.runnerRepo.Items() if err != nil { return fmt.Errorf("problem retrieving job runners from repo: %s", err) } // Call Stop on each runner. Use goroutines in case some jobs don't return from // Stop() quickly. var runnerWG sync.WaitGroup hadError := false for jobId, activeRunner := range activeRunners { runnerWG.Add(1) go func(runner runner.Runner) { defer runnerWG.Done() err := runner.Stop() if err != nil { t.logger.Errorf("problem stopping job runner (job id = %s): %s", jobId, err) hadError = true } }(activeRunner) } // If there was an error when stopping at least one of the jobs, return it. runnerWG.Wait() if hadError { return fmt.Errorf("problem stopping one or more job runners - see logs for more info") } return nil }
{ // Run all jobs that come in on runJobChan. The loop exits when runJobChan // is closed after the running reaper finishes. for job := range t.runJobChan { // Explicitly pass the job into the func, or all goroutines would share // the same loop "job" variable. go func(job proto.Job) { jLogger := t.logger.WithFields(log.Fields{"job_id": job.Id, "sequence_id": job.SequenceId, "sequence_try": t.chain.SequenceTries(job.Id)}) // Always send the finished job to doneJobChan to be reaped. If the // reaper isn't reaping any more jobs (if this job took too long to // finish after being stopped), sending to doneJobChan won't be // possible - timeout after a while so we don't leak this goroutine. defer func() { select { case t.doneJobChan <- job: case <-time.After(t.sendTimeout): jLogger.Warnf("timed out sending job to doneJobChan") } // Remove the job's runner from the repo (if it was ever added) // AFTER sending it to doneJobChan. This avoids a race condition // when the stopped + suspended reapers check if the runnerRepo // is empty. t.runnerRepo.Remove(job.Id) }() // Retrieve job and sequence try info from the chain for the Runner. sequenceTries := t.chain.SequenceTries(job.Id) // used in job logs totalJobTries := t.chain.TotalTries(job.Id) // used in job logs // When resuming a stopped job, only try the job // [allowed tries - tries before being stopped] times, so the total // number of times the job is tried (during this sequence try) stays // correct. The job's last try (the try it was stopped on) doesn't // count, so subtract 1 if it was tried at least once before // being stopped. triesBeforeStopped := uint(0) if job.State == proto.STATE_STOPPED { triesBeforeStopped = t.chain.LatestRunTries(job.Id) if triesBeforeStopped > 0 { triesBeforeStopped-- } } runner, err := t.rf.Make(job, t.chain.RequestId(), totalJobTries, triesBeforeStopped, sequenceTries) if err != nil { // Problem creating the job runner - treat job as failed. // Send a JobLog to the RM so that it knows this job failed. job.State = proto.STATE_FAIL err = fmt.Errorf("problem creating job runner: %s", err) t.sendJL(job, err) return } // Add the runner to the repo. Runners in the repo are used // by the Status, Stop, and shutdown methods on the traverser. t.runnerRepo.Set(job.Id, runner) // Bail out if Stop was called or traverser shut down. It is // important that this check happens AFTER the runner is added to // the repo. Otherwise if Stop gets called after this check but // before the runner is added to the repo, there will be nothing to // stop the job from running. // // We don't lock stopMux around this check and runner.Run. It's okay if // there's a small chance for the runner to be run after the traverser // gets stopped or shut down - it'll just return after trying the job // once. if t.stopped { job.State = proto.STATE_STOPPED // Send a JL to the RM so that it knows this job was stopped. // Add 1 to the total job tries, since this is used for keeping // job logs unique. t.chain.AddJobTries(job.Id, 1) err = fmt.Errorf("not starting job because traverser has already been stopped") t.sendJL(job, err) return } else if t.suspended { job.State = proto.STATE_STOPPED // Don't send a JL because this job will be resumed later, // and don't include this try in the total # of tries (only // set job tries for the latest run). t.chain.SetLatestRunJobTries(job.Id, 1) return } // Run the job. This is a blocking operation that could take a long time. jLogger.Infof("running job") t.chain.SetJobState(job.Id, proto.STATE_RUNNING) ret := runner.Run(job.Data) t.chain.AddJobTries(job.Id, ret.Tries) job.State = ret.FinalState }(job) } }
identifier_body
traverser.go
// Copyright 2017-2018, Square, Inc. package chain import ( "fmt" "sync" "time" log "github.com/Sirupsen/logrus" "github.com/square/spincycle/job-runner/runner" "github.com/square/spincycle/proto" rm "github.com/square/spincycle/request-manager" "github.com/square/spincycle/retry" ) var ( // Returned when Stop is called but the chain has already been suspended. ErrShuttingDown = fmt.Errorf("chain not stopped because traverser is shutting down") ) const ( // Default timeout used by traverser factory for traverser's stopTimeout // and sendTimeout. defaultTimeout = 10 * time.Second // Number of times to attempt sending a job log to the RM. jobLogTries = 3 // Time to wait between attempts to send a job log to RM. jobLogRetryWait = 500 * time.Millisecond // Number of times to attempt sending chain state / SJC to RM in Reaper. reaperTries = 5 // Time to wait between tries to send chain state/SJC to RM. reaperRetryWait = 500 * time.Millisecond ) // A Traverser provides the ability to run a job chain while respecting the // dependencies between the jobs. type Traverser interface { // Run traverses a job chain and runs all of the jobs in it. It starts by // running the first job in the chain, and then, if the job completed, // successfully, running its adjacent jobs. This process continues until there // are no more jobs to run, or until the Stop method is called on the traverser. Run() // Stop makes a traverser stop traversing its job chain. It also sends a stop // signal to all of the jobs that a traverser is running. // // It returns an error if it fails to stop all running jobs. Stop() error // Status gets the status of all running and failed jobs. Since a job can only // run when all of its ancestors have completed, the state of the entire chain // can be inferred from this information - every job in the chain before a // running or failed job must be complete, and every job in the chain after a // running or failed job must be pending. // // It returns an error if it fails to get the status of all running jobs. Status() (proto.JobChainStatus, error) } // A TraverserFactory makes a new Traverser. type TraverserFactory interface { Make(*proto.JobChain) (Traverser, error) MakeFromSJC(*proto.SuspendedJobChain) (Traverser, error) } type traverserFactory struct { chainRepo Repo rf runner.Factory rmc rm.Client shutdownChan chan struct{} } func NewTraverserFactory(cr Repo, rf runner.Factory, rmc rm.Client, shutdownChan chan struct{}) TraverserFactory { return &traverserFactory{ chainRepo: cr, rf: rf, rmc: rmc, shutdownChan: shutdownChan, } } // Make makes a Traverser for the job chain. The chain is first validated // and saved to the chain repo. func (f *traverserFactory)
(jobChain *proto.JobChain) (Traverser, error) { // Convert/wrap chain from proto to Go object. chain := NewChain(jobChain, make(map[string]uint), make(map[string]uint), make(map[string]uint)) return f.make(chain) } // MakeFromSJC makes a Traverser from a suspended job chain. func (f *traverserFactory) MakeFromSJC(sjc *proto.SuspendedJobChain) (Traverser, error) { // Convert/wrap chain from proto to Go object. chain := NewChain(sjc.JobChain, sjc.SequenceTries, sjc.TotalJobTries, sjc.LatestRunJobTries) return f.make(chain) } // Creates a new Traverser from a chain. Used for both new and resumed chains. func (f *traverserFactory) make(chain *Chain) (Traverser, error) { // Add chain to repo. This used to save the chain in Redis, if configured, // but now it's only an in-memory map. The only functionality it serves is // preventing this JR instance from running the same job chain. if err := f.chainRepo.Add(chain); err != nil { return nil, fmt.Errorf("error adding job chain: %s", err) } // Create and return a traverser for the chain. The traverser is responsible // for the chain: running, cleaning up, removing from repo when done, etc. // And traverser and chain have the same lifespan: traverser is done when // chain is done. cfg := TraverserConfig{ Chain: chain, ChainRepo: f.chainRepo, RunnerFactory: f.rf, RMClient: f.rmc, ShutdownChan: f.shutdownChan, StopTimeout: defaultTimeout, SendTimeout: defaultTimeout, } return NewTraverser(cfg), nil } // -------------------------------------------------------------------------- // type traverser struct { reaperFactory ReaperFactory reaper JobReaper shutdownChan chan struct{} // indicates JR is shutting down runJobChan chan proto.Job // jobs to be run doneJobChan chan proto.Job // jobs that are done doneChan chan struct{} // closed when traverser finishes running stopMux *sync.RWMutex // lock around checks to stopped stopped bool // has traverser been stopped suspended bool // has traverser been suspended chain *Chain chainRepo Repo // stores all currently running chains rf runner.Factory runnerRepo runner.Repo // stores actively running jobs rmc rm.Client logger *log.Entry stopTimeout time.Duration // Time to wait for jobs to stop sendTimeout time.Duration // Time to wait for a job to send on doneJobChan. } type TraverserConfig struct { Chain *Chain ChainRepo Repo RunnerFactory runner.Factory RMClient rm.Client ShutdownChan chan struct{} StopTimeout time.Duration SendTimeout time.Duration } func NewTraverser(cfg TraverserConfig) *traverser { // Include request id in all logging. logger := log.WithFields(log.Fields{"requestId": cfg.Chain.RequestId()}) // Channels used to communicate between traverser + reaper(s) doneJobChan := make(chan proto.Job) runJobChan := make(chan proto.Job) runnerRepo := runner.NewRepo() // needed for traverser + reaper factory reaperFactory := &ChainReaperFactory{ Chain: cfg.Chain, ChainRepo: cfg.ChainRepo, RMClient: cfg.RMClient, RMCTries: reaperTries, RMCRetryWait: reaperRetryWait, Logger: logger, DoneJobChan: doneJobChan, RunJobChan: runJobChan, RunnerRepo: runnerRepo, } return &traverser{ reaperFactory: reaperFactory, logger: logger, chain: cfg.Chain, chainRepo: cfg.ChainRepo, rf: cfg.RunnerFactory, runnerRepo: runnerRepo, shutdownChan: cfg.ShutdownChan, runJobChan: runJobChan, doneJobChan: doneJobChan, doneChan: make(chan struct{}), rmc: cfg.RMClient, stopMux: &sync.RWMutex{}, stopTimeout: cfg.StopTimeout, sendTimeout: cfg.SendTimeout, } } // Run runs all jobs in the chain and blocks until the chain finishes running, is // stopped, or is suspended. func (t *traverser) Run() { t.logger.Infof("chain traverser started") defer t.logger.Infof("chain traverser done") defer t.chainRepo.Remove(t.chain.RequestId()) // Start a goroutine to run jobs. This consumes from the runJobChan. When // jobs are done, they will be sent to the doneJobChan, which the job reapers // consume from. go t.runJobs() // Find all the jobs we can start running. For a new job chain (not suspended), // this'll end up being just the first job in the chain. jobsToRun := t.chain.RunnableJobs() // Add the first jobs to runJobChan. for _, job := range jobsToRun { t.logger.Infof("sending initial job (%s) to runJobChan", job.Id) if t.chain.IsSequenceStartJob(job.Id) { // Starting a sequence, so increment sequence try count. t.chain.IncrementSequenceTries(job.Id) seqLogger := t.logger.WithFields(log.Fields{"sequence_id": job.SequenceId, "sequence_try": t.chain.SequenceTries(job.Id)}) seqLogger.Info("starting try of sequence") } t.runJobChan <- job } // Start a goroutine to reap done jobs. The running reaper consumes from // doneJobChan and sends the next jobs to be run to runJobChan. runningReaperChan := make(chan struct{}) t.reaper = t.reaperFactory.MakeRunning() go func() { defer close(runningReaperChan) // indicate reaper is done (see select below) defer close(t.runJobChan) // stop runJobs goroutine t.reaper.Run() }() // Wait for running reaper to be done or traverser to be shut down. select { case <-runningReaperChan: // If running reaper is done because traverser was stopped, we will // wait for Stop() to finish. Otherwise, the chain finished normally // (completed or failed) and we can return right away. // // We don't check if the chain was suspended, since that can only // happen via the other case in this select. t.stopMux.Lock() if !t.stopped { t.stopMux.Unlock() return } t.stopMux.Unlock() case <-t.shutdownChan: // The Job Runner is shutting down. Stop the running reaper and suspend // the job chain, to be resumed later by another Job Runner. t.shutdown() } // Traverser is being stopped or shut down - wait for that to finish before // returning. select { case <-t.doneChan: // Stopped/shutdown successfully - nothing left to do. return case <-time.After(20 * time.Second): // Failed to stop/shutdown in a reasonable amount of time. // Log the failure and return. t.logger.Warnf("stopping or suspending the job chain took too long. Exiting...") return } } // Stop stops the running job chain by switching the running chain reaper for a // stopped chain reaper and stopping all currently running jobs. Stop blocks until // all jobs have finished and the stopped reaper has send the chain's final state // to the RM. func (t *traverser) Stop() error { // Don't do anything if the traverser has already been stopped or suspended. t.stopMux.Lock() defer t.stopMux.Unlock() if t.stopped { return nil } else if t.suspended { return ErrShuttingDown } t.stopped = true t.logger.Infof("stopping traverser and all jobs") // Stop the current reaper and start running a reaper for stopped chains. This // reaper saves jobs' states (but doesn't enqueue any more jobs to run) and // sends the chain's final state to the RM when all jobs have stopped running. t.reaper.Stop() // blocks until running reaper is done stopping stoppedReaperChan := make(chan struct{}) t.reaper = t.reaperFactory.MakeStopped() go func() { defer close(stoppedReaperChan) t.reaper.Run() }() // Stop all job runners in the runner repo. Do this after switching to the // stopped reaper so that when the jobs finish and are sent on doneJobChan, // they are reaped correctly. err := t.stopRunningJobs() if err != nil { // Don't return the error yet - we still want to wait for the stop // reaper to be done. err = fmt.Errorf("traverser was stopped, but encountered an error in the process: %s", err) } // Wait for the stopped reaper to finish. If it takes too long, some jobs // haven't respond quickly to being stopped. Stop waiting for these jobs by // stopping the stopped reaper. select { case <-stoppedReaperChan: case <-time.After(t.stopTimeout): t.logger.Warnf("timed out waiting for jobs to stop - stopping reaper") t.reaper.Stop() } close(t.doneChan) return err } // Status returns the status of currently running jobs in the chain. func (t *traverser) Status() (proto.JobChainStatus, error) { t.logger.Infof("getting the status of all running jobs") activeRunners, err := t.runnerRepo.Items() if err != nil { return proto.JobChainStatus{}, err } runningJobs := t.chain.Running() status := make([]proto.JobStatus, len(runningJobs)) i := 0 for jobId, jobStatus := range runningJobs { runner := activeRunners[jobId] if runner == nil { // The job finished between the call to chain.Running() and now, // so it's runner no longer exists in the runner.Repo. jobStatus.Status = "(finished)" } else { jobStatus.Status = runner.Status() } status[i] = jobStatus i++ } jcStatus := proto.JobChainStatus{ RequestId: t.chain.RequestId(), JobStatuses: status, } return jcStatus, nil } // -------------------------------------------------------------------------- // // runJobs loops on the runJobChan, and runs each job that comes through the // channel. When the job is done, it sends the job out through the doneJobChan. func (t *traverser) runJobs() { // Run all jobs that come in on runJobChan. The loop exits when runJobChan // is closed after the running reaper finishes. for job := range t.runJobChan { // Explicitly pass the job into the func, or all goroutines would share // the same loop "job" variable. go func(job proto.Job) { jLogger := t.logger.WithFields(log.Fields{"job_id": job.Id, "sequence_id": job.SequenceId, "sequence_try": t.chain.SequenceTries(job.Id)}) // Always send the finished job to doneJobChan to be reaped. If the // reaper isn't reaping any more jobs (if this job took too long to // finish after being stopped), sending to doneJobChan won't be // possible - timeout after a while so we don't leak this goroutine. defer func() { select { case t.doneJobChan <- job: case <-time.After(t.sendTimeout): jLogger.Warnf("timed out sending job to doneJobChan") } // Remove the job's runner from the repo (if it was ever added) // AFTER sending it to doneJobChan. This avoids a race condition // when the stopped + suspended reapers check if the runnerRepo // is empty. t.runnerRepo.Remove(job.Id) }() // Retrieve job and sequence try info from the chain for the Runner. sequenceTries := t.chain.SequenceTries(job.Id) // used in job logs totalJobTries := t.chain.TotalTries(job.Id) // used in job logs // When resuming a stopped job, only try the job // [allowed tries - tries before being stopped] times, so the total // number of times the job is tried (during this sequence try) stays // correct. The job's last try (the try it was stopped on) doesn't // count, so subtract 1 if it was tried at least once before // being stopped. triesBeforeStopped := uint(0) if job.State == proto.STATE_STOPPED { triesBeforeStopped = t.chain.LatestRunTries(job.Id) if triesBeforeStopped > 0 { triesBeforeStopped-- } } runner, err := t.rf.Make(job, t.chain.RequestId(), totalJobTries, triesBeforeStopped, sequenceTries) if err != nil { // Problem creating the job runner - treat job as failed. // Send a JobLog to the RM so that it knows this job failed. job.State = proto.STATE_FAIL err = fmt.Errorf("problem creating job runner: %s", err) t.sendJL(job, err) return } // Add the runner to the repo. Runners in the repo are used // by the Status, Stop, and shutdown methods on the traverser. t.runnerRepo.Set(job.Id, runner) // Bail out if Stop was called or traverser shut down. It is // important that this check happens AFTER the runner is added to // the repo. Otherwise if Stop gets called after this check but // before the runner is added to the repo, there will be nothing to // stop the job from running. // // We don't lock stopMux around this check and runner.Run. It's okay if // there's a small chance for the runner to be run after the traverser // gets stopped or shut down - it'll just return after trying the job // once. if t.stopped { job.State = proto.STATE_STOPPED // Send a JL to the RM so that it knows this job was stopped. // Add 1 to the total job tries, since this is used for keeping // job logs unique. t.chain.AddJobTries(job.Id, 1) err = fmt.Errorf("not starting job because traverser has already been stopped") t.sendJL(job, err) return } else if t.suspended { job.State = proto.STATE_STOPPED // Don't send a JL because this job will be resumed later, // and don't include this try in the total # of tries (only // set job tries for the latest run). t.chain.SetLatestRunJobTries(job.Id, 1) return } // Run the job. This is a blocking operation that could take a long time. jLogger.Infof("running job") t.chain.SetJobState(job.Id, proto.STATE_RUNNING) ret := runner.Run(job.Data) t.chain.AddJobTries(job.Id, ret.Tries) job.State = ret.FinalState }(job) } } // sendJL sends a job log to the Request Manager. func (t *traverser) sendJL(job proto.Job, err error) { jLogger := t.logger.WithFields(log.Fields{"job_id": job.Id}) jl := proto.JobLog{ RequestId: t.chain.RequestId(), JobId: job.Id, Name: job.Name, Type: job.Type, Try: t.chain.TotalTries(job.Id), SequenceId: job.SequenceId, SequenceTry: t.chain.SequenceTries(job.Id), StartedAt: 0, // zero because the job never ran FinishedAt: 0, State: job.State, Exit: 1, Error: err.Error(), } if err != nil { jl.Error = err.Error() } // Send the JL to the RM. err = retry.Do(jobLogTries, jobLogRetryWait, func() error { return t.rmc.CreateJL(t.chain.RequestId(), jl) }, nil, ) if err != nil { jLogger.Errorf("problem sending job log (%#v) to the Request Manager: %s", jl, err) return } } // shutdown suspends the running chain by switching the running chain reaper for a // suspended chain reaper and stopping all currently running jobs. Once all jobs // have finished, the suspended reaper informs the RM about the suspended chain by // sending a SuspendedJobChain. // // When a Job Runner is shutting down, all of its traversers are shut down and their // running job chains suspended. The Request Manager can later resume these job // chains by sending them to a running Job Runner instance. func (t *traverser) shutdown() { // Don't do anything if the traverser has already been stopped or suspended. t.stopMux.Lock() defer t.stopMux.Unlock() if t.stopped || t.suspended { return } t.suspended = true t.logger.Info("suspending job chain - stopping all jobs") // Stop the current reaper and start running a reaper for suspended chains. This // reaper saves jobs' states and prepares the chain to be resumed later, but // doesn't enqueue any more jobs to run. When all jobs have stopped running, // it sends the SuspendedJobChain to the RM (or the final state if the // chain was completed or failed). t.reaper.Stop() // blocks until running reaper is done stopping suspendedReaperChan := make(chan struct{}) t.reaper = t.reaperFactory.MakeSuspended() go func() { defer close(suspendedReaperChan) t.reaper.Run() }() // Stop all job runners in the runner repo. Do this after switching to the // suspended reaper so that when the jobs finish and are sent on doneJobChan, // they are reaped correctly. err := t.stopRunningJobs() if err != nil { t.logger.Errorf("problem suspending job chain: %s", err) } // Wait for suspended reaper to finish. If it takes too long, some jobs // haven't respond quickly to being stopped. Stop waiting for these jobs by // stopping the suspended reaper. select { case <-suspendedReaperChan: case <-time.After(t.stopTimeout): t.logger.Warnf("timed out waiting for jobs to stop - stopping reaper") t.reaper.Stop() } close(t.doneChan) } // stopRunningJobs stops all currently running jobs. func (t *traverser) stopRunningJobs() error { // Get all of the active runners for this traverser from the repo. Only runners // that are in the repo will be stopped. activeRunners, err := t.runnerRepo.Items() if err != nil { return fmt.Errorf("problem retrieving job runners from repo: %s", err) } // Call Stop on each runner. Use goroutines in case some jobs don't return from // Stop() quickly. var runnerWG sync.WaitGroup hadError := false for jobId, activeRunner := range activeRunners { runnerWG.Add(1) go func(runner runner.Runner) { defer runnerWG.Done() err := runner.Stop() if err != nil { t.logger.Errorf("problem stopping job runner (job id = %s): %s", jobId, err) hadError = true } }(activeRunner) } // If there was an error when stopping at least one of the jobs, return it. runnerWG.Wait() if hadError { return fmt.Errorf("problem stopping one or more job runners - see logs for more info") } return nil }
Make
identifier_name
traverser.go
// Copyright 2017-2018, Square, Inc. package chain import ( "fmt" "sync" "time" log "github.com/Sirupsen/logrus" "github.com/square/spincycle/job-runner/runner" "github.com/square/spincycle/proto" rm "github.com/square/spincycle/request-manager" "github.com/square/spincycle/retry" ) var ( // Returned when Stop is called but the chain has already been suspended. ErrShuttingDown = fmt.Errorf("chain not stopped because traverser is shutting down") ) const ( // Default timeout used by traverser factory for traverser's stopTimeout // and sendTimeout. defaultTimeout = 10 * time.Second // Number of times to attempt sending a job log to the RM. jobLogTries = 3 // Time to wait between attempts to send a job log to RM. jobLogRetryWait = 500 * time.Millisecond // Number of times to attempt sending chain state / SJC to RM in Reaper. reaperTries = 5 // Time to wait between tries to send chain state/SJC to RM. reaperRetryWait = 500 * time.Millisecond ) // A Traverser provides the ability to run a job chain while respecting the // dependencies between the jobs. type Traverser interface { // Run traverses a job chain and runs all of the jobs in it. It starts by // running the first job in the chain, and then, if the job completed, // successfully, running its adjacent jobs. This process continues until there // are no more jobs to run, or until the Stop method is called on the traverser. Run() // Stop makes a traverser stop traversing its job chain. It also sends a stop // signal to all of the jobs that a traverser is running. // // It returns an error if it fails to stop all running jobs. Stop() error // Status gets the status of all running and failed jobs. Since a job can only // run when all of its ancestors have completed, the state of the entire chain // can be inferred from this information - every job in the chain before a // running or failed job must be complete, and every job in the chain after a // running or failed job must be pending. // // It returns an error if it fails to get the status of all running jobs. Status() (proto.JobChainStatus, error) } // A TraverserFactory makes a new Traverser. type TraverserFactory interface { Make(*proto.JobChain) (Traverser, error) MakeFromSJC(*proto.SuspendedJobChain) (Traverser, error) } type traverserFactory struct { chainRepo Repo rf runner.Factory rmc rm.Client shutdownChan chan struct{} } func NewTraverserFactory(cr Repo, rf runner.Factory, rmc rm.Client, shutdownChan chan struct{}) TraverserFactory { return &traverserFactory{ chainRepo: cr, rf: rf, rmc: rmc, shutdownChan: shutdownChan, } } // Make makes a Traverser for the job chain. The chain is first validated // and saved to the chain repo. func (f *traverserFactory) Make(jobChain *proto.JobChain) (Traverser, error) { // Convert/wrap chain from proto to Go object. chain := NewChain(jobChain, make(map[string]uint), make(map[string]uint), make(map[string]uint)) return f.make(chain) } // MakeFromSJC makes a Traverser from a suspended job chain. func (f *traverserFactory) MakeFromSJC(sjc *proto.SuspendedJobChain) (Traverser, error) { // Convert/wrap chain from proto to Go object. chain := NewChain(sjc.JobChain, sjc.SequenceTries, sjc.TotalJobTries, sjc.LatestRunJobTries) return f.make(chain) } // Creates a new Traverser from a chain. Used for both new and resumed chains. func (f *traverserFactory) make(chain *Chain) (Traverser, error) { // Add chain to repo. This used to save the chain in Redis, if configured, // but now it's only an in-memory map. The only functionality it serves is // preventing this JR instance from running the same job chain. if err := f.chainRepo.Add(chain); err != nil { return nil, fmt.Errorf("error adding job chain: %s", err) } // Create and return a traverser for the chain. The traverser is responsible // for the chain: running, cleaning up, removing from repo when done, etc. // And traverser and chain have the same lifespan: traverser is done when // chain is done. cfg := TraverserConfig{ Chain: chain, ChainRepo: f.chainRepo, RunnerFactory: f.rf, RMClient: f.rmc, ShutdownChan: f.shutdownChan, StopTimeout: defaultTimeout, SendTimeout: defaultTimeout, } return NewTraverser(cfg), nil } // -------------------------------------------------------------------------- // type traverser struct { reaperFactory ReaperFactory reaper JobReaper shutdownChan chan struct{} // indicates JR is shutting down runJobChan chan proto.Job // jobs to be run doneJobChan chan proto.Job // jobs that are done doneChan chan struct{} // closed when traverser finishes running stopMux *sync.RWMutex // lock around checks to stopped stopped bool // has traverser been stopped suspended bool // has traverser been suspended chain *Chain chainRepo Repo // stores all currently running chains rf runner.Factory runnerRepo runner.Repo // stores actively running jobs rmc rm.Client logger *log.Entry stopTimeout time.Duration // Time to wait for jobs to stop sendTimeout time.Duration // Time to wait for a job to send on doneJobChan. } type TraverserConfig struct { Chain *Chain ChainRepo Repo RunnerFactory runner.Factory RMClient rm.Client ShutdownChan chan struct{} StopTimeout time.Duration SendTimeout time.Duration } func NewTraverser(cfg TraverserConfig) *traverser { // Include request id in all logging. logger := log.WithFields(log.Fields{"requestId": cfg.Chain.RequestId()}) // Channels used to communicate between traverser + reaper(s) doneJobChan := make(chan proto.Job) runJobChan := make(chan proto.Job) runnerRepo := runner.NewRepo() // needed for traverser + reaper factory reaperFactory := &ChainReaperFactory{ Chain: cfg.Chain, ChainRepo: cfg.ChainRepo, RMClient: cfg.RMClient, RMCTries: reaperTries, RMCRetryWait: reaperRetryWait, Logger: logger, DoneJobChan: doneJobChan, RunJobChan: runJobChan, RunnerRepo: runnerRepo, } return &traverser{ reaperFactory: reaperFactory, logger: logger, chain: cfg.Chain, chainRepo: cfg.ChainRepo, rf: cfg.RunnerFactory, runnerRepo: runnerRepo, shutdownChan: cfg.ShutdownChan, runJobChan: runJobChan, doneJobChan: doneJobChan, doneChan: make(chan struct{}), rmc: cfg.RMClient, stopMux: &sync.RWMutex{}, stopTimeout: cfg.StopTimeout, sendTimeout: cfg.SendTimeout, } } // Run runs all jobs in the chain and blocks until the chain finishes running, is // stopped, or is suspended. func (t *traverser) Run() { t.logger.Infof("chain traverser started") defer t.logger.Infof("chain traverser done") defer t.chainRepo.Remove(t.chain.RequestId()) // Start a goroutine to run jobs. This consumes from the runJobChan. When // jobs are done, they will be sent to the doneJobChan, which the job reapers // consume from. go t.runJobs() // Find all the jobs we can start running. For a new job chain (not suspended), // this'll end up being just the first job in the chain. jobsToRun := t.chain.RunnableJobs() // Add the first jobs to runJobChan. for _, job := range jobsToRun { t.logger.Infof("sending initial job (%s) to runJobChan", job.Id) if t.chain.IsSequenceStartJob(job.Id) { // Starting a sequence, so increment sequence try count. t.chain.IncrementSequenceTries(job.Id) seqLogger := t.logger.WithFields(log.Fields{"sequence_id": job.SequenceId, "sequence_try": t.chain.SequenceTries(job.Id)}) seqLogger.Info("starting try of sequence") } t.runJobChan <- job } // Start a goroutine to reap done jobs. The running reaper consumes from // doneJobChan and sends the next jobs to be run to runJobChan. runningReaperChan := make(chan struct{}) t.reaper = t.reaperFactory.MakeRunning() go func() { defer close(runningReaperChan) // indicate reaper is done (see select below) defer close(t.runJobChan) // stop runJobs goroutine t.reaper.Run() }() // Wait for running reaper to be done or traverser to be shut down. select { case <-runningReaperChan: // If running reaper is done because traverser was stopped, we will // wait for Stop() to finish. Otherwise, the chain finished normally // (completed or failed) and we can return right away. // // We don't check if the chain was suspended, since that can only // happen via the other case in this select. t.stopMux.Lock() if !t.stopped { t.stopMux.Unlock() return } t.stopMux.Unlock() case <-t.shutdownChan: // The Job Runner is shutting down. Stop the running reaper and suspend // the job chain, to be resumed later by another Job Runner. t.shutdown() } // Traverser is being stopped or shut down - wait for that to finish before // returning. select { case <-t.doneChan: // Stopped/shutdown successfully - nothing left to do. return case <-time.After(20 * time.Second): // Failed to stop/shutdown in a reasonable amount of time. // Log the failure and return. t.logger.Warnf("stopping or suspending the job chain took too long. Exiting...") return } } // Stop stops the running job chain by switching the running chain reaper for a // stopped chain reaper and stopping all currently running jobs. Stop blocks until // all jobs have finished and the stopped reaper has send the chain's final state // to the RM. func (t *traverser) Stop() error { // Don't do anything if the traverser has already been stopped or suspended. t.stopMux.Lock() defer t.stopMux.Unlock() if t.stopped { return nil } else if t.suspended { return ErrShuttingDown } t.stopped = true t.logger.Infof("stopping traverser and all jobs") // Stop the current reaper and start running a reaper for stopped chains. This // reaper saves jobs' states (but doesn't enqueue any more jobs to run) and // sends the chain's final state to the RM when all jobs have stopped running. t.reaper.Stop() // blocks until running reaper is done stopping stoppedReaperChan := make(chan struct{}) t.reaper = t.reaperFactory.MakeStopped() go func() { defer close(stoppedReaperChan) t.reaper.Run() }() // Stop all job runners in the runner repo. Do this after switching to the // stopped reaper so that when the jobs finish and are sent on doneJobChan, // they are reaped correctly. err := t.stopRunningJobs() if err != nil { // Don't return the error yet - we still want to wait for the stop // reaper to be done. err = fmt.Errorf("traverser was stopped, but encountered an error in the process: %s", err) } // Wait for the stopped reaper to finish. If it takes too long, some jobs // haven't respond quickly to being stopped. Stop waiting for these jobs by // stopping the stopped reaper. select { case <-stoppedReaperChan: case <-time.After(t.stopTimeout): t.logger.Warnf("timed out waiting for jobs to stop - stopping reaper") t.reaper.Stop() } close(t.doneChan) return err } // Status returns the status of currently running jobs in the chain. func (t *traverser) Status() (proto.JobChainStatus, error) { t.logger.Infof("getting the status of all running jobs") activeRunners, err := t.runnerRepo.Items() if err != nil { return proto.JobChainStatus{}, err } runningJobs := t.chain.Running() status := make([]proto.JobStatus, len(runningJobs)) i := 0 for jobId, jobStatus := range runningJobs
jcStatus := proto.JobChainStatus{ RequestId: t.chain.RequestId(), JobStatuses: status, } return jcStatus, nil } // -------------------------------------------------------------------------- // // runJobs loops on the runJobChan, and runs each job that comes through the // channel. When the job is done, it sends the job out through the doneJobChan. func (t *traverser) runJobs() { // Run all jobs that come in on runJobChan. The loop exits when runJobChan // is closed after the running reaper finishes. for job := range t.runJobChan { // Explicitly pass the job into the func, or all goroutines would share // the same loop "job" variable. go func(job proto.Job) { jLogger := t.logger.WithFields(log.Fields{"job_id": job.Id, "sequence_id": job.SequenceId, "sequence_try": t.chain.SequenceTries(job.Id)}) // Always send the finished job to doneJobChan to be reaped. If the // reaper isn't reaping any more jobs (if this job took too long to // finish after being stopped), sending to doneJobChan won't be // possible - timeout after a while so we don't leak this goroutine. defer func() { select { case t.doneJobChan <- job: case <-time.After(t.sendTimeout): jLogger.Warnf("timed out sending job to doneJobChan") } // Remove the job's runner from the repo (if it was ever added) // AFTER sending it to doneJobChan. This avoids a race condition // when the stopped + suspended reapers check if the runnerRepo // is empty. t.runnerRepo.Remove(job.Id) }() // Retrieve job and sequence try info from the chain for the Runner. sequenceTries := t.chain.SequenceTries(job.Id) // used in job logs totalJobTries := t.chain.TotalTries(job.Id) // used in job logs // When resuming a stopped job, only try the job // [allowed tries - tries before being stopped] times, so the total // number of times the job is tried (during this sequence try) stays // correct. The job's last try (the try it was stopped on) doesn't // count, so subtract 1 if it was tried at least once before // being stopped. triesBeforeStopped := uint(0) if job.State == proto.STATE_STOPPED { triesBeforeStopped = t.chain.LatestRunTries(job.Id) if triesBeforeStopped > 0 { triesBeforeStopped-- } } runner, err := t.rf.Make(job, t.chain.RequestId(), totalJobTries, triesBeforeStopped, sequenceTries) if err != nil { // Problem creating the job runner - treat job as failed. // Send a JobLog to the RM so that it knows this job failed. job.State = proto.STATE_FAIL err = fmt.Errorf("problem creating job runner: %s", err) t.sendJL(job, err) return } // Add the runner to the repo. Runners in the repo are used // by the Status, Stop, and shutdown methods on the traverser. t.runnerRepo.Set(job.Id, runner) // Bail out if Stop was called or traverser shut down. It is // important that this check happens AFTER the runner is added to // the repo. Otherwise if Stop gets called after this check but // before the runner is added to the repo, there will be nothing to // stop the job from running. // // We don't lock stopMux around this check and runner.Run. It's okay if // there's a small chance for the runner to be run after the traverser // gets stopped or shut down - it'll just return after trying the job // once. if t.stopped { job.State = proto.STATE_STOPPED // Send a JL to the RM so that it knows this job was stopped. // Add 1 to the total job tries, since this is used for keeping // job logs unique. t.chain.AddJobTries(job.Id, 1) err = fmt.Errorf("not starting job because traverser has already been stopped") t.sendJL(job, err) return } else if t.suspended { job.State = proto.STATE_STOPPED // Don't send a JL because this job will be resumed later, // and don't include this try in the total # of tries (only // set job tries for the latest run). t.chain.SetLatestRunJobTries(job.Id, 1) return } // Run the job. This is a blocking operation that could take a long time. jLogger.Infof("running job") t.chain.SetJobState(job.Id, proto.STATE_RUNNING) ret := runner.Run(job.Data) t.chain.AddJobTries(job.Id, ret.Tries) job.State = ret.FinalState }(job) } } // sendJL sends a job log to the Request Manager. func (t *traverser) sendJL(job proto.Job, err error) { jLogger := t.logger.WithFields(log.Fields{"job_id": job.Id}) jl := proto.JobLog{ RequestId: t.chain.RequestId(), JobId: job.Id, Name: job.Name, Type: job.Type, Try: t.chain.TotalTries(job.Id), SequenceId: job.SequenceId, SequenceTry: t.chain.SequenceTries(job.Id), StartedAt: 0, // zero because the job never ran FinishedAt: 0, State: job.State, Exit: 1, Error: err.Error(), } if err != nil { jl.Error = err.Error() } // Send the JL to the RM. err = retry.Do(jobLogTries, jobLogRetryWait, func() error { return t.rmc.CreateJL(t.chain.RequestId(), jl) }, nil, ) if err != nil { jLogger.Errorf("problem sending job log (%#v) to the Request Manager: %s", jl, err) return } } // shutdown suspends the running chain by switching the running chain reaper for a // suspended chain reaper and stopping all currently running jobs. Once all jobs // have finished, the suspended reaper informs the RM about the suspended chain by // sending a SuspendedJobChain. // // When a Job Runner is shutting down, all of its traversers are shut down and their // running job chains suspended. The Request Manager can later resume these job // chains by sending them to a running Job Runner instance. func (t *traverser) shutdown() { // Don't do anything if the traverser has already been stopped or suspended. t.stopMux.Lock() defer t.stopMux.Unlock() if t.stopped || t.suspended { return } t.suspended = true t.logger.Info("suspending job chain - stopping all jobs") // Stop the current reaper and start running a reaper for suspended chains. This // reaper saves jobs' states and prepares the chain to be resumed later, but // doesn't enqueue any more jobs to run. When all jobs have stopped running, // it sends the SuspendedJobChain to the RM (or the final state if the // chain was completed or failed). t.reaper.Stop() // blocks until running reaper is done stopping suspendedReaperChan := make(chan struct{}) t.reaper = t.reaperFactory.MakeSuspended() go func() { defer close(suspendedReaperChan) t.reaper.Run() }() // Stop all job runners in the runner repo. Do this after switching to the // suspended reaper so that when the jobs finish and are sent on doneJobChan, // they are reaped correctly. err := t.stopRunningJobs() if err != nil { t.logger.Errorf("problem suspending job chain: %s", err) } // Wait for suspended reaper to finish. If it takes too long, some jobs // haven't respond quickly to being stopped. Stop waiting for these jobs by // stopping the suspended reaper. select { case <-suspendedReaperChan: case <-time.After(t.stopTimeout): t.logger.Warnf("timed out waiting for jobs to stop - stopping reaper") t.reaper.Stop() } close(t.doneChan) } // stopRunningJobs stops all currently running jobs. func (t *traverser) stopRunningJobs() error { // Get all of the active runners for this traverser from the repo. Only runners // that are in the repo will be stopped. activeRunners, err := t.runnerRepo.Items() if err != nil { return fmt.Errorf("problem retrieving job runners from repo: %s", err) } // Call Stop on each runner. Use goroutines in case some jobs don't return from // Stop() quickly. var runnerWG sync.WaitGroup hadError := false for jobId, activeRunner := range activeRunners { runnerWG.Add(1) go func(runner runner.Runner) { defer runnerWG.Done() err := runner.Stop() if err != nil { t.logger.Errorf("problem stopping job runner (job id = %s): %s", jobId, err) hadError = true } }(activeRunner) } // If there was an error when stopping at least one of the jobs, return it. runnerWG.Wait() if hadError { return fmt.Errorf("problem stopping one or more job runners - see logs for more info") } return nil }
{ runner := activeRunners[jobId] if runner == nil { // The job finished between the call to chain.Running() and now, // so it's runner no longer exists in the runner.Repo. jobStatus.Status = "(finished)" } else { jobStatus.Status = runner.Status() } status[i] = jobStatus i++ }
conditional_block
traverser.go
// Copyright 2017-2018, Square, Inc. package chain import ( "fmt" "sync" "time" log "github.com/Sirupsen/logrus" "github.com/square/spincycle/job-runner/runner" "github.com/square/spincycle/proto" rm "github.com/square/spincycle/request-manager" "github.com/square/spincycle/retry" ) var ( // Returned when Stop is called but the chain has already been suspended. ErrShuttingDown = fmt.Errorf("chain not stopped because traverser is shutting down") ) const ( // Default timeout used by traverser factory for traverser's stopTimeout // and sendTimeout. defaultTimeout = 10 * time.Second // Number of times to attempt sending a job log to the RM. jobLogTries = 3 // Time to wait between attempts to send a job log to RM. jobLogRetryWait = 500 * time.Millisecond // Number of times to attempt sending chain state / SJC to RM in Reaper. reaperTries = 5 // Time to wait between tries to send chain state/SJC to RM. reaperRetryWait = 500 * time.Millisecond ) // A Traverser provides the ability to run a job chain while respecting the // dependencies between the jobs. type Traverser interface { // Run traverses a job chain and runs all of the jobs in it. It starts by // running the first job in the chain, and then, if the job completed, // successfully, running its adjacent jobs. This process continues until there // are no more jobs to run, or until the Stop method is called on the traverser. Run() // Stop makes a traverser stop traversing its job chain. It also sends a stop // signal to all of the jobs that a traverser is running. // // It returns an error if it fails to stop all running jobs. Stop() error // Status gets the status of all running and failed jobs. Since a job can only // run when all of its ancestors have completed, the state of the entire chain // can be inferred from this information - every job in the chain before a // running or failed job must be complete, and every job in the chain after a // running or failed job must be pending. // // It returns an error if it fails to get the status of all running jobs. Status() (proto.JobChainStatus, error) } // A TraverserFactory makes a new Traverser. type TraverserFactory interface { Make(*proto.JobChain) (Traverser, error) MakeFromSJC(*proto.SuspendedJobChain) (Traverser, error) } type traverserFactory struct { chainRepo Repo rf runner.Factory rmc rm.Client shutdownChan chan struct{} } func NewTraverserFactory(cr Repo, rf runner.Factory, rmc rm.Client, shutdownChan chan struct{}) TraverserFactory { return &traverserFactory{ chainRepo: cr, rf: rf, rmc: rmc, shutdownChan: shutdownChan, } } // Make makes a Traverser for the job chain. The chain is first validated // and saved to the chain repo. func (f *traverserFactory) Make(jobChain *proto.JobChain) (Traverser, error) { // Convert/wrap chain from proto to Go object. chain := NewChain(jobChain, make(map[string]uint), make(map[string]uint), make(map[string]uint)) return f.make(chain) } // MakeFromSJC makes a Traverser from a suspended job chain. func (f *traverserFactory) MakeFromSJC(sjc *proto.SuspendedJobChain) (Traverser, error) { // Convert/wrap chain from proto to Go object. chain := NewChain(sjc.JobChain, sjc.SequenceTries, sjc.TotalJobTries, sjc.LatestRunJobTries) return f.make(chain) } // Creates a new Traverser from a chain. Used for both new and resumed chains. func (f *traverserFactory) make(chain *Chain) (Traverser, error) { // Add chain to repo. This used to save the chain in Redis, if configured, // but now it's only an in-memory map. The only functionality it serves is // preventing this JR instance from running the same job chain. if err := f.chainRepo.Add(chain); err != nil { return nil, fmt.Errorf("error adding job chain: %s", err) } // Create and return a traverser for the chain. The traverser is responsible // for the chain: running, cleaning up, removing from repo when done, etc. // And traverser and chain have the same lifespan: traverser is done when // chain is done. cfg := TraverserConfig{ Chain: chain, ChainRepo: f.chainRepo, RunnerFactory: f.rf, RMClient: f.rmc, ShutdownChan: f.shutdownChan, StopTimeout: defaultTimeout, SendTimeout: defaultTimeout, } return NewTraverser(cfg), nil } // -------------------------------------------------------------------------- // type traverser struct { reaperFactory ReaperFactory reaper JobReaper shutdownChan chan struct{} // indicates JR is shutting down runJobChan chan proto.Job // jobs to be run doneJobChan chan proto.Job // jobs that are done doneChan chan struct{} // closed when traverser finishes running stopMux *sync.RWMutex // lock around checks to stopped stopped bool // has traverser been stopped suspended bool // has traverser been suspended chain *Chain chainRepo Repo // stores all currently running chains rf runner.Factory runnerRepo runner.Repo // stores actively running jobs rmc rm.Client logger *log.Entry stopTimeout time.Duration // Time to wait for jobs to stop sendTimeout time.Duration // Time to wait for a job to send on doneJobChan. } type TraverserConfig struct { Chain *Chain ChainRepo Repo RunnerFactory runner.Factory RMClient rm.Client ShutdownChan chan struct{} StopTimeout time.Duration SendTimeout time.Duration } func NewTraverser(cfg TraverserConfig) *traverser { // Include request id in all logging. logger := log.WithFields(log.Fields{"requestId": cfg.Chain.RequestId()}) // Channels used to communicate between traverser + reaper(s) doneJobChan := make(chan proto.Job) runJobChan := make(chan proto.Job) runnerRepo := runner.NewRepo() // needed for traverser + reaper factory reaperFactory := &ChainReaperFactory{ Chain: cfg.Chain, ChainRepo: cfg.ChainRepo, RMClient: cfg.RMClient, RMCTries: reaperTries, RMCRetryWait: reaperRetryWait, Logger: logger, DoneJobChan: doneJobChan, RunJobChan: runJobChan, RunnerRepo: runnerRepo, } return &traverser{ reaperFactory: reaperFactory, logger: logger, chain: cfg.Chain, chainRepo: cfg.ChainRepo, rf: cfg.RunnerFactory, runnerRepo: runnerRepo, shutdownChan: cfg.ShutdownChan, runJobChan: runJobChan, doneJobChan: doneJobChan, doneChan: make(chan struct{}), rmc: cfg.RMClient, stopMux: &sync.RWMutex{}, stopTimeout: cfg.StopTimeout, sendTimeout: cfg.SendTimeout, } } // Run runs all jobs in the chain and blocks until the chain finishes running, is // stopped, or is suspended. func (t *traverser) Run() { t.logger.Infof("chain traverser started") defer t.logger.Infof("chain traverser done") defer t.chainRepo.Remove(t.chain.RequestId()) // Start a goroutine to run jobs. This consumes from the runJobChan. When // jobs are done, they will be sent to the doneJobChan, which the job reapers // consume from. go t.runJobs() // Find all the jobs we can start running. For a new job chain (not suspended), // this'll end up being just the first job in the chain. jobsToRun := t.chain.RunnableJobs() // Add the first jobs to runJobChan. for _, job := range jobsToRun { t.logger.Infof("sending initial job (%s) to runJobChan", job.Id) if t.chain.IsSequenceStartJob(job.Id) { // Starting a sequence, so increment sequence try count. t.chain.IncrementSequenceTries(job.Id) seqLogger := t.logger.WithFields(log.Fields{"sequence_id": job.SequenceId, "sequence_try": t.chain.SequenceTries(job.Id)}) seqLogger.Info("starting try of sequence") } t.runJobChan <- job } // Start a goroutine to reap done jobs. The running reaper consumes from // doneJobChan and sends the next jobs to be run to runJobChan. runningReaperChan := make(chan struct{}) t.reaper = t.reaperFactory.MakeRunning() go func() { defer close(runningReaperChan) // indicate reaper is done (see select below) defer close(t.runJobChan) // stop runJobs goroutine t.reaper.Run() }() // Wait for running reaper to be done or traverser to be shut down. select { case <-runningReaperChan: // If running reaper is done because traverser was stopped, we will // wait for Stop() to finish. Otherwise, the chain finished normally // (completed or failed) and we can return right away. // // We don't check if the chain was suspended, since that can only // happen via the other case in this select. t.stopMux.Lock() if !t.stopped { t.stopMux.Unlock() return } t.stopMux.Unlock() case <-t.shutdownChan: // The Job Runner is shutting down. Stop the running reaper and suspend // the job chain, to be resumed later by another Job Runner. t.shutdown() } // Traverser is being stopped or shut down - wait for that to finish before // returning. select { case <-t.doneChan: // Stopped/shutdown successfully - nothing left to do. return case <-time.After(20 * time.Second): // Failed to stop/shutdown in a reasonable amount of time. // Log the failure and return. t.logger.Warnf("stopping or suspending the job chain took too long. Exiting...") return } } // Stop stops the running job chain by switching the running chain reaper for a // stopped chain reaper and stopping all currently running jobs. Stop blocks until // all jobs have finished and the stopped reaper has send the chain's final state // to the RM. func (t *traverser) Stop() error { // Don't do anything if the traverser has already been stopped or suspended. t.stopMux.Lock() defer t.stopMux.Unlock() if t.stopped { return nil } else if t.suspended { return ErrShuttingDown } t.stopped = true t.logger.Infof("stopping traverser and all jobs") // Stop the current reaper and start running a reaper for stopped chains. This // reaper saves jobs' states (but doesn't enqueue any more jobs to run) and // sends the chain's final state to the RM when all jobs have stopped running. t.reaper.Stop() // blocks until running reaper is done stopping stoppedReaperChan := make(chan struct{}) t.reaper = t.reaperFactory.MakeStopped() go func() { defer close(stoppedReaperChan) t.reaper.Run() }() // Stop all job runners in the runner repo. Do this after switching to the // stopped reaper so that when the jobs finish and are sent on doneJobChan, // they are reaped correctly. err := t.stopRunningJobs() if err != nil { // Don't return the error yet - we still want to wait for the stop // reaper to be done. err = fmt.Errorf("traverser was stopped, but encountered an error in the process: %s", err) } // Wait for the stopped reaper to finish. If it takes too long, some jobs // haven't respond quickly to being stopped. Stop waiting for these jobs by // stopping the stopped reaper. select { case <-stoppedReaperChan: case <-time.After(t.stopTimeout): t.logger.Warnf("timed out waiting for jobs to stop - stopping reaper") t.reaper.Stop() } close(t.doneChan) return err } // Status returns the status of currently running jobs in the chain. func (t *traverser) Status() (proto.JobChainStatus, error) { t.logger.Infof("getting the status of all running jobs") activeRunners, err := t.runnerRepo.Items() if err != nil { return proto.JobChainStatus{}, err } runningJobs := t.chain.Running() status := make([]proto.JobStatus, len(runningJobs)) i := 0 for jobId, jobStatus := range runningJobs { runner := activeRunners[jobId] if runner == nil { // The job finished between the call to chain.Running() and now, // so it's runner no longer exists in the runner.Repo. jobStatus.Status = "(finished)" } else { jobStatus.Status = runner.Status() } status[i] = jobStatus i++ } jcStatus := proto.JobChainStatus{ RequestId: t.chain.RequestId(), JobStatuses: status, } return jcStatus, nil } // -------------------------------------------------------------------------- // // runJobs loops on the runJobChan, and runs each job that comes through the // channel. When the job is done, it sends the job out through the doneJobChan. func (t *traverser) runJobs() { // Run all jobs that come in on runJobChan. The loop exits when runJobChan // is closed after the running reaper finishes. for job := range t.runJobChan { // Explicitly pass the job into the func, or all goroutines would share // the same loop "job" variable. go func(job proto.Job) { jLogger := t.logger.WithFields(log.Fields{"job_id": job.Id, "sequence_id": job.SequenceId, "sequence_try": t.chain.SequenceTries(job.Id)}) // Always send the finished job to doneJobChan to be reaped. If the // reaper isn't reaping any more jobs (if this job took too long to // finish after being stopped), sending to doneJobChan won't be // possible - timeout after a while so we don't leak this goroutine. defer func() { select { case t.doneJobChan <- job: case <-time.After(t.sendTimeout): jLogger.Warnf("timed out sending job to doneJobChan") } // Remove the job's runner from the repo (if it was ever added) // AFTER sending it to doneJobChan. This avoids a race condition // when the stopped + suspended reapers check if the runnerRepo // is empty. t.runnerRepo.Remove(job.Id) }() // Retrieve job and sequence try info from the chain for the Runner. sequenceTries := t.chain.SequenceTries(job.Id) // used in job logs totalJobTries := t.chain.TotalTries(job.Id) // used in job logs // When resuming a stopped job, only try the job // [allowed tries - tries before being stopped] times, so the total // number of times the job is tried (during this sequence try) stays // correct. The job's last try (the try it was stopped on) doesn't // count, so subtract 1 if it was tried at least once before // being stopped. triesBeforeStopped := uint(0) if job.State == proto.STATE_STOPPED { triesBeforeStopped = t.chain.LatestRunTries(job.Id) if triesBeforeStopped > 0 { triesBeforeStopped-- } } runner, err := t.rf.Make(job, t.chain.RequestId(), totalJobTries, triesBeforeStopped, sequenceTries) if err != nil { // Problem creating the job runner - treat job as failed. // Send a JobLog to the RM so that it knows this job failed. job.State = proto.STATE_FAIL err = fmt.Errorf("problem creating job runner: %s", err) t.sendJL(job, err) return } // Add the runner to the repo. Runners in the repo are used // by the Status, Stop, and shutdown methods on the traverser. t.runnerRepo.Set(job.Id, runner) // Bail out if Stop was called or traverser shut down. It is // important that this check happens AFTER the runner is added to // the repo. Otherwise if Stop gets called after this check but // before the runner is added to the repo, there will be nothing to // stop the job from running. // // We don't lock stopMux around this check and runner.Run. It's okay if // there's a small chance for the runner to be run after the traverser // gets stopped or shut down - it'll just return after trying the job // once. if t.stopped { job.State = proto.STATE_STOPPED // Send a JL to the RM so that it knows this job was stopped. // Add 1 to the total job tries, since this is used for keeping // job logs unique. t.chain.AddJobTries(job.Id, 1) err = fmt.Errorf("not starting job because traverser has already been stopped") t.sendJL(job, err) return } else if t.suspended { job.State = proto.STATE_STOPPED // Don't send a JL because this job will be resumed later, // and don't include this try in the total # of tries (only // set job tries for the latest run). t.chain.SetLatestRunJobTries(job.Id, 1) return } // Run the job. This is a blocking operation that could take a long time. jLogger.Infof("running job") t.chain.SetJobState(job.Id, proto.STATE_RUNNING) ret := runner.Run(job.Data) t.chain.AddJobTries(job.Id, ret.Tries) job.State = ret.FinalState
}(job) } } // sendJL sends a job log to the Request Manager. func (t *traverser) sendJL(job proto.Job, err error) { jLogger := t.logger.WithFields(log.Fields{"job_id": job.Id}) jl := proto.JobLog{ RequestId: t.chain.RequestId(), JobId: job.Id, Name: job.Name, Type: job.Type, Try: t.chain.TotalTries(job.Id), SequenceId: job.SequenceId, SequenceTry: t.chain.SequenceTries(job.Id), StartedAt: 0, // zero because the job never ran FinishedAt: 0, State: job.State, Exit: 1, Error: err.Error(), } if err != nil { jl.Error = err.Error() } // Send the JL to the RM. err = retry.Do(jobLogTries, jobLogRetryWait, func() error { return t.rmc.CreateJL(t.chain.RequestId(), jl) }, nil, ) if err != nil { jLogger.Errorf("problem sending job log (%#v) to the Request Manager: %s", jl, err) return } } // shutdown suspends the running chain by switching the running chain reaper for a // suspended chain reaper and stopping all currently running jobs. Once all jobs // have finished, the suspended reaper informs the RM about the suspended chain by // sending a SuspendedJobChain. // // When a Job Runner is shutting down, all of its traversers are shut down and their // running job chains suspended. The Request Manager can later resume these job // chains by sending them to a running Job Runner instance. func (t *traverser) shutdown() { // Don't do anything if the traverser has already been stopped or suspended. t.stopMux.Lock() defer t.stopMux.Unlock() if t.stopped || t.suspended { return } t.suspended = true t.logger.Info("suspending job chain - stopping all jobs") // Stop the current reaper and start running a reaper for suspended chains. This // reaper saves jobs' states and prepares the chain to be resumed later, but // doesn't enqueue any more jobs to run. When all jobs have stopped running, // it sends the SuspendedJobChain to the RM (or the final state if the // chain was completed or failed). t.reaper.Stop() // blocks until running reaper is done stopping suspendedReaperChan := make(chan struct{}) t.reaper = t.reaperFactory.MakeSuspended() go func() { defer close(suspendedReaperChan) t.reaper.Run() }() // Stop all job runners in the runner repo. Do this after switching to the // suspended reaper so that when the jobs finish and are sent on doneJobChan, // they are reaped correctly. err := t.stopRunningJobs() if err != nil { t.logger.Errorf("problem suspending job chain: %s", err) } // Wait for suspended reaper to finish. If it takes too long, some jobs // haven't respond quickly to being stopped. Stop waiting for these jobs by // stopping the suspended reaper. select { case <-suspendedReaperChan: case <-time.After(t.stopTimeout): t.logger.Warnf("timed out waiting for jobs to stop - stopping reaper") t.reaper.Stop() } close(t.doneChan) } // stopRunningJobs stops all currently running jobs. func (t *traverser) stopRunningJobs() error { // Get all of the active runners for this traverser from the repo. Only runners // that are in the repo will be stopped. activeRunners, err := t.runnerRepo.Items() if err != nil { return fmt.Errorf("problem retrieving job runners from repo: %s", err) } // Call Stop on each runner. Use goroutines in case some jobs don't return from // Stop() quickly. var runnerWG sync.WaitGroup hadError := false for jobId, activeRunner := range activeRunners { runnerWG.Add(1) go func(runner runner.Runner) { defer runnerWG.Done() err := runner.Stop() if err != nil { t.logger.Errorf("problem stopping job runner (job id = %s): %s", jobId, err) hadError = true } }(activeRunner) } // If there was an error when stopping at least one of the jobs, return it. runnerWG.Wait() if hadError { return fmt.Errorf("problem stopping one or more job runners - see logs for more info") } return nil }
random_line_split
table.go
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com> // All rights reserved. // // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. package leveldb import ( "fmt" "sort" "sync/atomic" "github.com/syndtr/goleveldb/leveldb/cache" "github.com/syndtr/goleveldb/leveldb/iterator" "github.com/syndtr/goleveldb/leveldb/opt" "github.com/syndtr/goleveldb/leveldb/storage" "github.com/syndtr/goleveldb/leveldb/table" "github.com/syndtr/goleveldb/leveldb/util" ) // tFile holds basic information about a table. type tFile struct { fd storage.FileDesc seekLeft int32 size int64 imin, imax internalKey } // Returns true if given key is after largest key of this table. func (t *tFile) after(icmp *iComparer, ukey []byte) bool { return ukey != nil && icmp.uCompare(ukey, t.imax.ukey()) > 0 } // Returns true if given key is before smallest key of this table. func (t *tFile) before(icmp *iComparer, ukey []byte) bool { return ukey != nil && icmp.uCompare(ukey, t.imin.ukey()) < 0 } // Returns true if given key range overlaps with this table key range. func (t *tFile) overlaps(icmp *iComparer, umin, umax []byte) bool { return !t.after(icmp, umin) && !t.before(icmp, umax) } // Cosumes one seek and return current seeks left. func (t *tFile) consumeSeek() int32 { return atomic.AddInt32(&t.seekLeft, -1) } // Creates new tFile. func newTableFile(fd storage.FileDesc, size int64, imin, imax internalKey) *tFile { f := &tFile{ fd: fd, size: size, imin: imin, imax: imax, } // We arrange to automatically compact this file after // a certain number of seeks. Let's assume: // (1) One seek costs 10ms // (2) Writing or reading 1MB costs 10ms (100MB/s) // (3) A compaction of 1MB does 25MB of IO: // 1MB read from this level // 10-12MB read from next level (boundaries may be misaligned) // 10-12MB written to next level // This implies that 25 seeks cost the same as the compaction // of 1MB of data. I.e., one seek costs approximately the // same as the compaction of 40KB of data. We are a little // conservative and allow approximately one seek for every 16KB // of data before triggering a compaction. f.seekLeft = int32(size / 16384) if f.seekLeft < 100 { f.seekLeft = 100 } return f } func tableFileFromRecord(r atRecord) *tFile { return newTableFile(storage.FileDesc{Type: storage.TypeTable, Num: r.num}, r.size, r.imin, r.imax) } // tFiles hold multiple tFile. type tFiles []*tFile func (tf tFiles) Len() int { return len(tf) } func (tf tFiles) Swap(i, j int) { tf[i], tf[j] = tf[j], tf[i] } func (tf tFiles) nums() string { x := "[ " for i, f := range tf { if i != 0 { x += ", " } x += fmt.Sprint(f.fd.Num) } x += " ]" return x } // Returns true if i smallest key is less than j. // This used for sort by key in ascending order. func (tf tFiles) lessByKey(icmp *iComparer, i, j int) bool
// Returns true if i file number is greater than j. // This used for sort by file number in descending order. func (tf tFiles) lessByNum(i, j int) bool { return tf[i].fd.Num > tf[j].fd.Num } // Sorts tables by key in ascending order. func (tf tFiles) sortByKey(icmp *iComparer) { sort.Sort(&tFilesSortByKey{tFiles: tf, icmp: icmp}) } // Sorts tables by file number in descending order. func (tf tFiles) sortByNum() { sort.Sort(&tFilesSortByNum{tFiles: tf}) } // Returns sum of all tables size. func (tf tFiles) size() (sum int64) { for _, t := range tf { sum += t.size } return sum } // Searches smallest index of tables whose its smallest // key is after or equal with given key. func (tf tFiles) searchMin(icmp *iComparer, ikey internalKey) int { return sort.Search(len(tf), func(i int) bool { return icmp.Compare(tf[i].imin, ikey) >= 0 }) } // Searches smallest index of tables whose its largest // key is after or equal with given key. func (tf tFiles) searchMax(icmp *iComparer, ikey internalKey) int { return sort.Search(len(tf), func(i int) bool { return icmp.Compare(tf[i].imax, ikey) >= 0 }) } // Returns true if given key range overlaps with one or more // tables key range. If unsorted is true then binary search will not be used. func (tf tFiles) overlaps(icmp *iComparer, umin, umax []byte, unsorted bool) bool { if unsorted { // Check against all files. for _, t := range tf { if t.overlaps(icmp, umin, umax) { return true } } return false } i := 0 if len(umin) > 0 { // Find the earliest possible internal key for min. i = tf.searchMax(icmp, makeInternalKey(nil, umin, keyMaxSeq, keyTypeSeek)) } if i >= len(tf) { // Beginning of range is after all files, so no overlap. return false } return !tf[i].before(icmp, umax) } // Returns tables whose its key range overlaps with given key range. // Range will be expanded if ukey found hop across tables. // If overlapped is true then the search will be restarted if umax // expanded. // The dst content will be overwritten. func (tf tFiles) getOverlaps(dst tFiles, icmp *iComparer, umin, umax []byte, overlapped bool) tFiles { dst = dst[:0] for i := 0; i < len(tf); { t := tf[i] if t.overlaps(icmp, umin, umax) { if umin != nil && icmp.uCompare(t.imin.ukey(), umin) < 0 { umin = t.imin.ukey() dst = dst[:0] i = 0 continue } else if umax != nil && icmp.uCompare(t.imax.ukey(), umax) > 0 { umax = t.imax.ukey() // Restart search if it is overlapped. if overlapped { dst = dst[:0] i = 0 continue } } dst = append(dst, t) } i++ } return dst } // Returns tables key range. func (tf tFiles) getRange(icmp *iComparer) (imin, imax internalKey) { for i, t := range tf { if i == 0 { imin, imax = t.imin, t.imax continue } if icmp.Compare(t.imin, imin) < 0 { imin = t.imin } if icmp.Compare(t.imax, imax) > 0 { imax = t.imax } } return } // Creates iterator index from tables. func (tf tFiles) newIndexIterator(tops *tOps, icmp *iComparer, slice *util.Range, ro *opt.ReadOptions) iterator.IteratorIndexer { if slice != nil { var start, limit int if slice.Start != nil { start = tf.searchMax(icmp, internalKey(slice.Start)) } if slice.Limit != nil { limit = tf.searchMin(icmp, internalKey(slice.Limit)) } else { limit = tf.Len() } tf = tf[start:limit] } return iterator.NewArrayIndexer(&tFilesArrayIndexer{ tFiles: tf, tops: tops, icmp: icmp, slice: slice, ro: ro, }) } // Tables iterator index. type tFilesArrayIndexer struct { tFiles tops *tOps icmp *iComparer slice *util.Range ro *opt.ReadOptions } func (a *tFilesArrayIndexer) Search(key []byte) int { return a.searchMax(a.icmp, internalKey(key)) } func (a *tFilesArrayIndexer) Get(i int) iterator.Iterator { if i == 0 || i == a.Len()-1 { return a.tops.newIterator(a.tFiles[i], a.slice, a.ro) } return a.tops.newIterator(a.tFiles[i], nil, a.ro) } // Helper type for sortByKey. type tFilesSortByKey struct { tFiles icmp *iComparer } func (x *tFilesSortByKey) Less(i, j int) bool { return x.lessByKey(x.icmp, i, j) } // Helper type for sortByNum. type tFilesSortByNum struct { tFiles } func (x *tFilesSortByNum) Less(i, j int) bool { return x.lessByNum(i, j) } // Table operations. type tOps struct { s *session noSync bool evictRemoved bool cache *cache.Cache bcache *cache.Cache bpool *util.BufferPool } // Creates an empty table and returns table writer. func (t *tOps) create() (*tWriter, error) { fd := storage.FileDesc{Type: storage.TypeTable, Num: t.s.allocFileNum()} fw, err := t.s.stor.Create(fd) if err != nil { return nil, err } return &tWriter{ t: t, fd: fd, w: fw, tw: table.NewWriter(fw, t.s.o.Options), }, nil } // Builds table from src iterator. func (t *tOps) createFrom(src iterator.Iterator) (f *tFile, n int, err error) { w, err := t.create() if err != nil { return } defer func() { if err != nil { w.drop() } }() for src.Next() { err = w.append(src.Key(), src.Value()) if err != nil { return } } err = src.Error() if err != nil { return } n = w.tw.EntriesLen() f, err = w.finish() return } // Opens table. It returns a cache handle, which should // be released after use. func (t *tOps) open(f *tFile) (ch *cache.Handle, err error) { ch = t.cache.Get(0, uint64(f.fd.Num), func() (size int, value cache.Value) { var r storage.Reader r, err = t.s.stor.Open(f.fd) if err != nil { return 0, nil } var bcache *cache.NamespaceGetter if t.bcache != nil { bcache = &cache.NamespaceGetter{Cache: t.bcache, NS: uint64(f.fd.Num)} } var tr *table.Reader tr, err = table.NewReader(r, f.size, f.fd, bcache, t.bpool, t.s.o.Options) if err != nil { r.Close() return 0, nil } return 1, tr }) if ch == nil && err == nil { err = ErrClosed } return } // Finds key/value pair whose key is greater than or equal to the // given key. func (t *tOps) find(f *tFile, key []byte, ro *opt.ReadOptions) (rkey, rvalue []byte, err error) { ch, err := t.open(f) if err != nil { return nil, nil, err } defer ch.Release() return ch.Value().(*table.Reader).Find(key, true, ro) } // Finds key that is greater than or equal to the given key. func (t *tOps) findKey(f *tFile, key []byte, ro *opt.ReadOptions) (rkey []byte, err error) { ch, err := t.open(f) if err != nil { return nil, err } defer ch.Release() return ch.Value().(*table.Reader).FindKey(key, true, ro) } // Returns approximate offset of the given key. func (t *tOps) offsetOf(f *tFile, key []byte) (offset int64, err error) { ch, err := t.open(f) if err != nil { return } defer ch.Release() return ch.Value().(*table.Reader).OffsetOf(key) } // Creates an iterator from the given table. func (t *tOps) newIterator(f *tFile, slice *util.Range, ro *opt.ReadOptions) iterator.Iterator { ch, err := t.open(f) if err != nil { return iterator.NewEmptyIterator(err) } iter := ch.Value().(*table.Reader).NewIterator(slice, ro) iter.SetReleaser(ch) return iter } // Removes table from persistent storage. It waits until // no one use the the table. func (t *tOps) remove(f *tFile) { t.cache.Delete(0, uint64(f.fd.Num), func() { if err := t.s.stor.Remove(f.fd); err != nil { t.s.logf("table@remove removing @%d %q", f.fd.Num, err) } else { t.s.logf("table@remove removed @%d", f.fd.Num) } if t.evictRemoved && t.bcache != nil { t.bcache.EvictNS(uint64(f.fd.Num)) } }) } // Closes the table ops instance. It will close all tables, // regadless still used or not. func (t *tOps) close() { t.bpool.Close() t.cache.Close() if t.bcache != nil { t.bcache.CloseWeak() } } // Creates new initialized table ops instance. func newTableOps(s *session) *tOps { var ( cacher cache.Cacher bcache *cache.Cache bpool *util.BufferPool ) if s.o.GetOpenFilesCacheCapacity() > 0 { cacher = cache.NewLRU(s.o.GetOpenFilesCacheCapacity()) } if !s.o.GetDisableBlockCache() { var bcacher cache.Cacher if s.o.GetBlockCacheCapacity() > 0 { bcacher = s.o.GetBlockCacher().New(s.o.GetBlockCacheCapacity()) } bcache = cache.NewCache(bcacher) } if !s.o.GetDisableBufferPool() { bpool = util.NewBufferPool(s.o.GetBlockSize() + 5) } return &tOps{ s: s, noSync: s.o.GetNoSync(), evictRemoved: s.o.GetBlockCacheEvictRemoved(), cache: cache.NewCache(cacher), bcache: bcache, bpool: bpool, } } // tWriter wraps the table writer. It keep track of file descriptor // and added key range. type tWriter struct { t *tOps fd storage.FileDesc w storage.Writer tw *table.Writer first, last []byte } // Append key/value pair to the table. func (w *tWriter) append(key, value []byte) error { if w.first == nil { w.first = append([]byte{}, key...) } w.last = append(w.last[:0], key...) return w.tw.Append(key, value) } // Returns true if the table is empty. func (w *tWriter) empty() bool { return w.first == nil } // Closes the storage.Writer. func (w *tWriter) close() { if w.w != nil { w.w.Close() w.w = nil } } // Finalizes the table and returns table file. func (w *tWriter) finish() (f *tFile, err error) { defer w.close() err = w.tw.Close() if err != nil { return } if !w.t.noSync { err = w.w.Sync() if err != nil { return } } f = newTableFile(w.fd, int64(w.tw.BytesLen()), internalKey(w.first), internalKey(w.last)) return } // Drops the table. func (w *tWriter) drop() { w.close() w.t.s.stor.Remove(w.fd) w.t.s.reuseFileNum(w.fd.Num) w.tw = nil w.first = nil w.last = nil }
{ a, b := tf[i], tf[j] n := icmp.Compare(a.imin, b.imin) if n == 0 { return a.fd.Num < b.fd.Num } return n < 0 }
identifier_body
table.go
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com> // All rights reserved. // // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. package leveldb import ( "fmt" "sort" "sync/atomic" "github.com/syndtr/goleveldb/leveldb/cache" "github.com/syndtr/goleveldb/leveldb/iterator" "github.com/syndtr/goleveldb/leveldb/opt" "github.com/syndtr/goleveldb/leveldb/storage" "github.com/syndtr/goleveldb/leveldb/table" "github.com/syndtr/goleveldb/leveldb/util" ) // tFile holds basic information about a table. type tFile struct { fd storage.FileDesc seekLeft int32 size int64 imin, imax internalKey } // Returns true if given key is after largest key of this table. func (t *tFile) after(icmp *iComparer, ukey []byte) bool { return ukey != nil && icmp.uCompare(ukey, t.imax.ukey()) > 0 } // Returns true if given key is before smallest key of this table. func (t *tFile) before(icmp *iComparer, ukey []byte) bool { return ukey != nil && icmp.uCompare(ukey, t.imin.ukey()) < 0 } // Returns true if given key range overlaps with this table key range. func (t *tFile) overlaps(icmp *iComparer, umin, umax []byte) bool { return !t.after(icmp, umin) && !t.before(icmp, umax) } // Cosumes one seek and return current seeks left. func (t *tFile) consumeSeek() int32 { return atomic.AddInt32(&t.seekLeft, -1) } // Creates new tFile. func newTableFile(fd storage.FileDesc, size int64, imin, imax internalKey) *tFile { f := &tFile{ fd: fd, size: size, imin: imin, imax: imax, } // We arrange to automatically compact this file after // a certain number of seeks. Let's assume: // (1) One seek costs 10ms // (2) Writing or reading 1MB costs 10ms (100MB/s) // (3) A compaction of 1MB does 25MB of IO: // 1MB read from this level // 10-12MB read from next level (boundaries may be misaligned) // 10-12MB written to next level // This implies that 25 seeks cost the same as the compaction // of 1MB of data. I.e., one seek costs approximately the // same as the compaction of 40KB of data. We are a little // conservative and allow approximately one seek for every 16KB // of data before triggering a compaction. f.seekLeft = int32(size / 16384) if f.seekLeft < 100 { f.seekLeft = 100 } return f } func tableFileFromRecord(r atRecord) *tFile { return newTableFile(storage.FileDesc{Type: storage.TypeTable, Num: r.num}, r.size, r.imin, r.imax) } // tFiles hold multiple tFile. type tFiles []*tFile func (tf tFiles) Len() int { return len(tf) } func (tf tFiles) Swap(i, j int) { tf[i], tf[j] = tf[j], tf[i] } func (tf tFiles) nums() string { x := "[ " for i, f := range tf { if i != 0 { x += ", " } x += fmt.Sprint(f.fd.Num) } x += " ]" return x } // Returns true if i smallest key is less than j. // This used for sort by key in ascending order. func (tf tFiles) lessByKey(icmp *iComparer, i, j int) bool { a, b := tf[i], tf[j] n := icmp.Compare(a.imin, b.imin) if n == 0 { return a.fd.Num < b.fd.Num } return n < 0 } // Returns true if i file number is greater than j. // This used for sort by file number in descending order. func (tf tFiles) lessByNum(i, j int) bool { return tf[i].fd.Num > tf[j].fd.Num } // Sorts tables by key in ascending order. func (tf tFiles) sortByKey(icmp *iComparer) { sort.Sort(&tFilesSortByKey{tFiles: tf, icmp: icmp}) } // Sorts tables by file number in descending order. func (tf tFiles) sortByNum() { sort.Sort(&tFilesSortByNum{tFiles: tf}) } // Returns sum of all tables size. func (tf tFiles) size() (sum int64) { for _, t := range tf { sum += t.size } return sum } // Searches smallest index of tables whose its smallest // key is after or equal with given key. func (tf tFiles) searchMin(icmp *iComparer, ikey internalKey) int { return sort.Search(len(tf), func(i int) bool { return icmp.Compare(tf[i].imin, ikey) >= 0 }) } // Searches smallest index of tables whose its largest // key is after or equal with given key. func (tf tFiles) searchMax(icmp *iComparer, ikey internalKey) int { return sort.Search(len(tf), func(i int) bool { return icmp.Compare(tf[i].imax, ikey) >= 0 }) } // Returns true if given key range overlaps with one or more // tables key range. If unsorted is true then binary search will not be used. func (tf tFiles) overlaps(icmp *iComparer, umin, umax []byte, unsorted bool) bool { if unsorted { // Check against all files. for _, t := range tf { if t.overlaps(icmp, umin, umax) { return true } } return false } i := 0 if len(umin) > 0 { // Find the earliest possible internal key for min. i = tf.searchMax(icmp, makeInternalKey(nil, umin, keyMaxSeq, keyTypeSeek)) } if i >= len(tf) { // Beginning of range is after all files, so no overlap. return false } return !tf[i].before(icmp, umax) } // Returns tables whose its key range overlaps with given key range. // Range will be expanded if ukey found hop across tables. // If overlapped is true then the search will be restarted if umax // expanded. // The dst content will be overwritten. func (tf tFiles) getOverlaps(dst tFiles, icmp *iComparer, umin, umax []byte, overlapped bool) tFiles { dst = dst[:0] for i := 0; i < len(tf); { t := tf[i] if t.overlaps(icmp, umin, umax) { if umin != nil && icmp.uCompare(t.imin.ukey(), umin) < 0 { umin = t.imin.ukey() dst = dst[:0] i = 0 continue } else if umax != nil && icmp.uCompare(t.imax.ukey(), umax) > 0 { umax = t.imax.ukey() // Restart search if it is overlapped. if overlapped { dst = dst[:0] i = 0 continue } } dst = append(dst, t) } i++ } return dst } // Returns tables key range. func (tf tFiles) getRange(icmp *iComparer) (imin, imax internalKey) { for i, t := range tf { if i == 0 { imin, imax = t.imin, t.imax continue } if icmp.Compare(t.imin, imin) < 0 { imin = t.imin } if icmp.Compare(t.imax, imax) > 0 { imax = t.imax } } return } // Creates iterator index from tables. func (tf tFiles) newIndexIterator(tops *tOps, icmp *iComparer, slice *util.Range, ro *opt.ReadOptions) iterator.IteratorIndexer { if slice != nil { var start, limit int if slice.Start != nil { start = tf.searchMax(icmp, internalKey(slice.Start)) } if slice.Limit != nil { limit = tf.searchMin(icmp, internalKey(slice.Limit)) } else { limit = tf.Len() } tf = tf[start:limit] } return iterator.NewArrayIndexer(&tFilesArrayIndexer{ tFiles: tf, tops: tops, icmp: icmp, slice: slice, ro: ro, }) } // Tables iterator index. type tFilesArrayIndexer struct { tFiles tops *tOps icmp *iComparer slice *util.Range ro *opt.ReadOptions } func (a *tFilesArrayIndexer) Search(key []byte) int { return a.searchMax(a.icmp, internalKey(key)) } func (a *tFilesArrayIndexer) Get(i int) iterator.Iterator { if i == 0 || i == a.Len()-1 { return a.tops.newIterator(a.tFiles[i], a.slice, a.ro) } return a.tops.newIterator(a.tFiles[i], nil, a.ro) } // Helper type for sortByKey. type tFilesSortByKey struct { tFiles icmp *iComparer } func (x *tFilesSortByKey) Less(i, j int) bool { return x.lessByKey(x.icmp, i, j) } // Helper type for sortByNum.
} func (x *tFilesSortByNum) Less(i, j int) bool { return x.lessByNum(i, j) } // Table operations. type tOps struct { s *session noSync bool evictRemoved bool cache *cache.Cache bcache *cache.Cache bpool *util.BufferPool } // Creates an empty table and returns table writer. func (t *tOps) create() (*tWriter, error) { fd := storage.FileDesc{Type: storage.TypeTable, Num: t.s.allocFileNum()} fw, err := t.s.stor.Create(fd) if err != nil { return nil, err } return &tWriter{ t: t, fd: fd, w: fw, tw: table.NewWriter(fw, t.s.o.Options), }, nil } // Builds table from src iterator. func (t *tOps) createFrom(src iterator.Iterator) (f *tFile, n int, err error) { w, err := t.create() if err != nil { return } defer func() { if err != nil { w.drop() } }() for src.Next() { err = w.append(src.Key(), src.Value()) if err != nil { return } } err = src.Error() if err != nil { return } n = w.tw.EntriesLen() f, err = w.finish() return } // Opens table. It returns a cache handle, which should // be released after use. func (t *tOps) open(f *tFile) (ch *cache.Handle, err error) { ch = t.cache.Get(0, uint64(f.fd.Num), func() (size int, value cache.Value) { var r storage.Reader r, err = t.s.stor.Open(f.fd) if err != nil { return 0, nil } var bcache *cache.NamespaceGetter if t.bcache != nil { bcache = &cache.NamespaceGetter{Cache: t.bcache, NS: uint64(f.fd.Num)} } var tr *table.Reader tr, err = table.NewReader(r, f.size, f.fd, bcache, t.bpool, t.s.o.Options) if err != nil { r.Close() return 0, nil } return 1, tr }) if ch == nil && err == nil { err = ErrClosed } return } // Finds key/value pair whose key is greater than or equal to the // given key. func (t *tOps) find(f *tFile, key []byte, ro *opt.ReadOptions) (rkey, rvalue []byte, err error) { ch, err := t.open(f) if err != nil { return nil, nil, err } defer ch.Release() return ch.Value().(*table.Reader).Find(key, true, ro) } // Finds key that is greater than or equal to the given key. func (t *tOps) findKey(f *tFile, key []byte, ro *opt.ReadOptions) (rkey []byte, err error) { ch, err := t.open(f) if err != nil { return nil, err } defer ch.Release() return ch.Value().(*table.Reader).FindKey(key, true, ro) } // Returns approximate offset of the given key. func (t *tOps) offsetOf(f *tFile, key []byte) (offset int64, err error) { ch, err := t.open(f) if err != nil { return } defer ch.Release() return ch.Value().(*table.Reader).OffsetOf(key) } // Creates an iterator from the given table. func (t *tOps) newIterator(f *tFile, slice *util.Range, ro *opt.ReadOptions) iterator.Iterator { ch, err := t.open(f) if err != nil { return iterator.NewEmptyIterator(err) } iter := ch.Value().(*table.Reader).NewIterator(slice, ro) iter.SetReleaser(ch) return iter } // Removes table from persistent storage. It waits until // no one use the the table. func (t *tOps) remove(f *tFile) { t.cache.Delete(0, uint64(f.fd.Num), func() { if err := t.s.stor.Remove(f.fd); err != nil { t.s.logf("table@remove removing @%d %q", f.fd.Num, err) } else { t.s.logf("table@remove removed @%d", f.fd.Num) } if t.evictRemoved && t.bcache != nil { t.bcache.EvictNS(uint64(f.fd.Num)) } }) } // Closes the table ops instance. It will close all tables, // regadless still used or not. func (t *tOps) close() { t.bpool.Close() t.cache.Close() if t.bcache != nil { t.bcache.CloseWeak() } } // Creates new initialized table ops instance. func newTableOps(s *session) *tOps { var ( cacher cache.Cacher bcache *cache.Cache bpool *util.BufferPool ) if s.o.GetOpenFilesCacheCapacity() > 0 { cacher = cache.NewLRU(s.o.GetOpenFilesCacheCapacity()) } if !s.o.GetDisableBlockCache() { var bcacher cache.Cacher if s.o.GetBlockCacheCapacity() > 0 { bcacher = s.o.GetBlockCacher().New(s.o.GetBlockCacheCapacity()) } bcache = cache.NewCache(bcacher) } if !s.o.GetDisableBufferPool() { bpool = util.NewBufferPool(s.o.GetBlockSize() + 5) } return &tOps{ s: s, noSync: s.o.GetNoSync(), evictRemoved: s.o.GetBlockCacheEvictRemoved(), cache: cache.NewCache(cacher), bcache: bcache, bpool: bpool, } } // tWriter wraps the table writer. It keep track of file descriptor // and added key range. type tWriter struct { t *tOps fd storage.FileDesc w storage.Writer tw *table.Writer first, last []byte } // Append key/value pair to the table. func (w *tWriter) append(key, value []byte) error { if w.first == nil { w.first = append([]byte{}, key...) } w.last = append(w.last[:0], key...) return w.tw.Append(key, value) } // Returns true if the table is empty. func (w *tWriter) empty() bool { return w.first == nil } // Closes the storage.Writer. func (w *tWriter) close() { if w.w != nil { w.w.Close() w.w = nil } } // Finalizes the table and returns table file. func (w *tWriter) finish() (f *tFile, err error) { defer w.close() err = w.tw.Close() if err != nil { return } if !w.t.noSync { err = w.w.Sync() if err != nil { return } } f = newTableFile(w.fd, int64(w.tw.BytesLen()), internalKey(w.first), internalKey(w.last)) return } // Drops the table. func (w *tWriter) drop() { w.close() w.t.s.stor.Remove(w.fd) w.t.s.reuseFileNum(w.fd.Num) w.tw = nil w.first = nil w.last = nil }
type tFilesSortByNum struct { tFiles
random_line_split
table.go
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com> // All rights reserved. // // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. package leveldb import ( "fmt" "sort" "sync/atomic" "github.com/syndtr/goleveldb/leveldb/cache" "github.com/syndtr/goleveldb/leveldb/iterator" "github.com/syndtr/goleveldb/leveldb/opt" "github.com/syndtr/goleveldb/leveldb/storage" "github.com/syndtr/goleveldb/leveldb/table" "github.com/syndtr/goleveldb/leveldb/util" ) // tFile holds basic information about a table. type tFile struct { fd storage.FileDesc seekLeft int32 size int64 imin, imax internalKey } // Returns true if given key is after largest key of this table. func (t *tFile) after(icmp *iComparer, ukey []byte) bool { return ukey != nil && icmp.uCompare(ukey, t.imax.ukey()) > 0 } // Returns true if given key is before smallest key of this table. func (t *tFile) before(icmp *iComparer, ukey []byte) bool { return ukey != nil && icmp.uCompare(ukey, t.imin.ukey()) < 0 } // Returns true if given key range overlaps with this table key range. func (t *tFile) overlaps(icmp *iComparer, umin, umax []byte) bool { return !t.after(icmp, umin) && !t.before(icmp, umax) } // Cosumes one seek and return current seeks left. func (t *tFile) consumeSeek() int32 { return atomic.AddInt32(&t.seekLeft, -1) } // Creates new tFile. func newTableFile(fd storage.FileDesc, size int64, imin, imax internalKey) *tFile { f := &tFile{ fd: fd, size: size, imin: imin, imax: imax, } // We arrange to automatically compact this file after // a certain number of seeks. Let's assume: // (1) One seek costs 10ms // (2) Writing or reading 1MB costs 10ms (100MB/s) // (3) A compaction of 1MB does 25MB of IO: // 1MB read from this level // 10-12MB read from next level (boundaries may be misaligned) // 10-12MB written to next level // This implies that 25 seeks cost the same as the compaction // of 1MB of data. I.e., one seek costs approximately the // same as the compaction of 40KB of data. We are a little // conservative and allow approximately one seek for every 16KB // of data before triggering a compaction. f.seekLeft = int32(size / 16384) if f.seekLeft < 100 { f.seekLeft = 100 } return f } func tableFileFromRecord(r atRecord) *tFile { return newTableFile(storage.FileDesc{Type: storage.TypeTable, Num: r.num}, r.size, r.imin, r.imax) } // tFiles hold multiple tFile. type tFiles []*tFile func (tf tFiles) Len() int { return len(tf) } func (tf tFiles) Swap(i, j int) { tf[i], tf[j] = tf[j], tf[i] } func (tf tFiles)
() string { x := "[ " for i, f := range tf { if i != 0 { x += ", " } x += fmt.Sprint(f.fd.Num) } x += " ]" return x } // Returns true if i smallest key is less than j. // This used for sort by key in ascending order. func (tf tFiles) lessByKey(icmp *iComparer, i, j int) bool { a, b := tf[i], tf[j] n := icmp.Compare(a.imin, b.imin) if n == 0 { return a.fd.Num < b.fd.Num } return n < 0 } // Returns true if i file number is greater than j. // This used for sort by file number in descending order. func (tf tFiles) lessByNum(i, j int) bool { return tf[i].fd.Num > tf[j].fd.Num } // Sorts tables by key in ascending order. func (tf tFiles) sortByKey(icmp *iComparer) { sort.Sort(&tFilesSortByKey{tFiles: tf, icmp: icmp}) } // Sorts tables by file number in descending order. func (tf tFiles) sortByNum() { sort.Sort(&tFilesSortByNum{tFiles: tf}) } // Returns sum of all tables size. func (tf tFiles) size() (sum int64) { for _, t := range tf { sum += t.size } return sum } // Searches smallest index of tables whose its smallest // key is after or equal with given key. func (tf tFiles) searchMin(icmp *iComparer, ikey internalKey) int { return sort.Search(len(tf), func(i int) bool { return icmp.Compare(tf[i].imin, ikey) >= 0 }) } // Searches smallest index of tables whose its largest // key is after or equal with given key. func (tf tFiles) searchMax(icmp *iComparer, ikey internalKey) int { return sort.Search(len(tf), func(i int) bool { return icmp.Compare(tf[i].imax, ikey) >= 0 }) } // Returns true if given key range overlaps with one or more // tables key range. If unsorted is true then binary search will not be used. func (tf tFiles) overlaps(icmp *iComparer, umin, umax []byte, unsorted bool) bool { if unsorted { // Check against all files. for _, t := range tf { if t.overlaps(icmp, umin, umax) { return true } } return false } i := 0 if len(umin) > 0 { // Find the earliest possible internal key for min. i = tf.searchMax(icmp, makeInternalKey(nil, umin, keyMaxSeq, keyTypeSeek)) } if i >= len(tf) { // Beginning of range is after all files, so no overlap. return false } return !tf[i].before(icmp, umax) } // Returns tables whose its key range overlaps with given key range. // Range will be expanded if ukey found hop across tables. // If overlapped is true then the search will be restarted if umax // expanded. // The dst content will be overwritten. func (tf tFiles) getOverlaps(dst tFiles, icmp *iComparer, umin, umax []byte, overlapped bool) tFiles { dst = dst[:0] for i := 0; i < len(tf); { t := tf[i] if t.overlaps(icmp, umin, umax) { if umin != nil && icmp.uCompare(t.imin.ukey(), umin) < 0 { umin = t.imin.ukey() dst = dst[:0] i = 0 continue } else if umax != nil && icmp.uCompare(t.imax.ukey(), umax) > 0 { umax = t.imax.ukey() // Restart search if it is overlapped. if overlapped { dst = dst[:0] i = 0 continue } } dst = append(dst, t) } i++ } return dst } // Returns tables key range. func (tf tFiles) getRange(icmp *iComparer) (imin, imax internalKey) { for i, t := range tf { if i == 0 { imin, imax = t.imin, t.imax continue } if icmp.Compare(t.imin, imin) < 0 { imin = t.imin } if icmp.Compare(t.imax, imax) > 0 { imax = t.imax } } return } // Creates iterator index from tables. func (tf tFiles) newIndexIterator(tops *tOps, icmp *iComparer, slice *util.Range, ro *opt.ReadOptions) iterator.IteratorIndexer { if slice != nil { var start, limit int if slice.Start != nil { start = tf.searchMax(icmp, internalKey(slice.Start)) } if slice.Limit != nil { limit = tf.searchMin(icmp, internalKey(slice.Limit)) } else { limit = tf.Len() } tf = tf[start:limit] } return iterator.NewArrayIndexer(&tFilesArrayIndexer{ tFiles: tf, tops: tops, icmp: icmp, slice: slice, ro: ro, }) } // Tables iterator index. type tFilesArrayIndexer struct { tFiles tops *tOps icmp *iComparer slice *util.Range ro *opt.ReadOptions } func (a *tFilesArrayIndexer) Search(key []byte) int { return a.searchMax(a.icmp, internalKey(key)) } func (a *tFilesArrayIndexer) Get(i int) iterator.Iterator { if i == 0 || i == a.Len()-1 { return a.tops.newIterator(a.tFiles[i], a.slice, a.ro) } return a.tops.newIterator(a.tFiles[i], nil, a.ro) } // Helper type for sortByKey. type tFilesSortByKey struct { tFiles icmp *iComparer } func (x *tFilesSortByKey) Less(i, j int) bool { return x.lessByKey(x.icmp, i, j) } // Helper type for sortByNum. type tFilesSortByNum struct { tFiles } func (x *tFilesSortByNum) Less(i, j int) bool { return x.lessByNum(i, j) } // Table operations. type tOps struct { s *session noSync bool evictRemoved bool cache *cache.Cache bcache *cache.Cache bpool *util.BufferPool } // Creates an empty table and returns table writer. func (t *tOps) create() (*tWriter, error) { fd := storage.FileDesc{Type: storage.TypeTable, Num: t.s.allocFileNum()} fw, err := t.s.stor.Create(fd) if err != nil { return nil, err } return &tWriter{ t: t, fd: fd, w: fw, tw: table.NewWriter(fw, t.s.o.Options), }, nil } // Builds table from src iterator. func (t *tOps) createFrom(src iterator.Iterator) (f *tFile, n int, err error) { w, err := t.create() if err != nil { return } defer func() { if err != nil { w.drop() } }() for src.Next() { err = w.append(src.Key(), src.Value()) if err != nil { return } } err = src.Error() if err != nil { return } n = w.tw.EntriesLen() f, err = w.finish() return } // Opens table. It returns a cache handle, which should // be released after use. func (t *tOps) open(f *tFile) (ch *cache.Handle, err error) { ch = t.cache.Get(0, uint64(f.fd.Num), func() (size int, value cache.Value) { var r storage.Reader r, err = t.s.stor.Open(f.fd) if err != nil { return 0, nil } var bcache *cache.NamespaceGetter if t.bcache != nil { bcache = &cache.NamespaceGetter{Cache: t.bcache, NS: uint64(f.fd.Num)} } var tr *table.Reader tr, err = table.NewReader(r, f.size, f.fd, bcache, t.bpool, t.s.o.Options) if err != nil { r.Close() return 0, nil } return 1, tr }) if ch == nil && err == nil { err = ErrClosed } return } // Finds key/value pair whose key is greater than or equal to the // given key. func (t *tOps) find(f *tFile, key []byte, ro *opt.ReadOptions) (rkey, rvalue []byte, err error) { ch, err := t.open(f) if err != nil { return nil, nil, err } defer ch.Release() return ch.Value().(*table.Reader).Find(key, true, ro) } // Finds key that is greater than or equal to the given key. func (t *tOps) findKey(f *tFile, key []byte, ro *opt.ReadOptions) (rkey []byte, err error) { ch, err := t.open(f) if err != nil { return nil, err } defer ch.Release() return ch.Value().(*table.Reader).FindKey(key, true, ro) } // Returns approximate offset of the given key. func (t *tOps) offsetOf(f *tFile, key []byte) (offset int64, err error) { ch, err := t.open(f) if err != nil { return } defer ch.Release() return ch.Value().(*table.Reader).OffsetOf(key) } // Creates an iterator from the given table. func (t *tOps) newIterator(f *tFile, slice *util.Range, ro *opt.ReadOptions) iterator.Iterator { ch, err := t.open(f) if err != nil { return iterator.NewEmptyIterator(err) } iter := ch.Value().(*table.Reader).NewIterator(slice, ro) iter.SetReleaser(ch) return iter } // Removes table from persistent storage. It waits until // no one use the the table. func (t *tOps) remove(f *tFile) { t.cache.Delete(0, uint64(f.fd.Num), func() { if err := t.s.stor.Remove(f.fd); err != nil { t.s.logf("table@remove removing @%d %q", f.fd.Num, err) } else { t.s.logf("table@remove removed @%d", f.fd.Num) } if t.evictRemoved && t.bcache != nil { t.bcache.EvictNS(uint64(f.fd.Num)) } }) } // Closes the table ops instance. It will close all tables, // regadless still used or not. func (t *tOps) close() { t.bpool.Close() t.cache.Close() if t.bcache != nil { t.bcache.CloseWeak() } } // Creates new initialized table ops instance. func newTableOps(s *session) *tOps { var ( cacher cache.Cacher bcache *cache.Cache bpool *util.BufferPool ) if s.o.GetOpenFilesCacheCapacity() > 0 { cacher = cache.NewLRU(s.o.GetOpenFilesCacheCapacity()) } if !s.o.GetDisableBlockCache() { var bcacher cache.Cacher if s.o.GetBlockCacheCapacity() > 0 { bcacher = s.o.GetBlockCacher().New(s.o.GetBlockCacheCapacity()) } bcache = cache.NewCache(bcacher) } if !s.o.GetDisableBufferPool() { bpool = util.NewBufferPool(s.o.GetBlockSize() + 5) } return &tOps{ s: s, noSync: s.o.GetNoSync(), evictRemoved: s.o.GetBlockCacheEvictRemoved(), cache: cache.NewCache(cacher), bcache: bcache, bpool: bpool, } } // tWriter wraps the table writer. It keep track of file descriptor // and added key range. type tWriter struct { t *tOps fd storage.FileDesc w storage.Writer tw *table.Writer first, last []byte } // Append key/value pair to the table. func (w *tWriter) append(key, value []byte) error { if w.first == nil { w.first = append([]byte{}, key...) } w.last = append(w.last[:0], key...) return w.tw.Append(key, value) } // Returns true if the table is empty. func (w *tWriter) empty() bool { return w.first == nil } // Closes the storage.Writer. func (w *tWriter) close() { if w.w != nil { w.w.Close() w.w = nil } } // Finalizes the table and returns table file. func (w *tWriter) finish() (f *tFile, err error) { defer w.close() err = w.tw.Close() if err != nil { return } if !w.t.noSync { err = w.w.Sync() if err != nil { return } } f = newTableFile(w.fd, int64(w.tw.BytesLen()), internalKey(w.first), internalKey(w.last)) return } // Drops the table. func (w *tWriter) drop() { w.close() w.t.s.stor.Remove(w.fd) w.t.s.reuseFileNum(w.fd.Num) w.tw = nil w.first = nil w.last = nil }
nums
identifier_name
table.go
// Copyright (c) 2012, Suryandaru Triandana <syndtr@gmail.com> // All rights reserved. // // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. package leveldb import ( "fmt" "sort" "sync/atomic" "github.com/syndtr/goleveldb/leveldb/cache" "github.com/syndtr/goleveldb/leveldb/iterator" "github.com/syndtr/goleveldb/leveldb/opt" "github.com/syndtr/goleveldb/leveldb/storage" "github.com/syndtr/goleveldb/leveldb/table" "github.com/syndtr/goleveldb/leveldb/util" ) // tFile holds basic information about a table. type tFile struct { fd storage.FileDesc seekLeft int32 size int64 imin, imax internalKey } // Returns true if given key is after largest key of this table. func (t *tFile) after(icmp *iComparer, ukey []byte) bool { return ukey != nil && icmp.uCompare(ukey, t.imax.ukey()) > 0 } // Returns true if given key is before smallest key of this table. func (t *tFile) before(icmp *iComparer, ukey []byte) bool { return ukey != nil && icmp.uCompare(ukey, t.imin.ukey()) < 0 } // Returns true if given key range overlaps with this table key range. func (t *tFile) overlaps(icmp *iComparer, umin, umax []byte) bool { return !t.after(icmp, umin) && !t.before(icmp, umax) } // Cosumes one seek and return current seeks left. func (t *tFile) consumeSeek() int32 { return atomic.AddInt32(&t.seekLeft, -1) } // Creates new tFile. func newTableFile(fd storage.FileDesc, size int64, imin, imax internalKey) *tFile { f := &tFile{ fd: fd, size: size, imin: imin, imax: imax, } // We arrange to automatically compact this file after // a certain number of seeks. Let's assume: // (1) One seek costs 10ms // (2) Writing or reading 1MB costs 10ms (100MB/s) // (3) A compaction of 1MB does 25MB of IO: // 1MB read from this level // 10-12MB read from next level (boundaries may be misaligned) // 10-12MB written to next level // This implies that 25 seeks cost the same as the compaction // of 1MB of data. I.e., one seek costs approximately the // same as the compaction of 40KB of data. We are a little // conservative and allow approximately one seek for every 16KB // of data before triggering a compaction. f.seekLeft = int32(size / 16384) if f.seekLeft < 100 { f.seekLeft = 100 } return f } func tableFileFromRecord(r atRecord) *tFile { return newTableFile(storage.FileDesc{Type: storage.TypeTable, Num: r.num}, r.size, r.imin, r.imax) } // tFiles hold multiple tFile. type tFiles []*tFile func (tf tFiles) Len() int { return len(tf) } func (tf tFiles) Swap(i, j int) { tf[i], tf[j] = tf[j], tf[i] } func (tf tFiles) nums() string { x := "[ " for i, f := range tf { if i != 0 { x += ", " } x += fmt.Sprint(f.fd.Num) } x += " ]" return x } // Returns true if i smallest key is less than j. // This used for sort by key in ascending order. func (tf tFiles) lessByKey(icmp *iComparer, i, j int) bool { a, b := tf[i], tf[j] n := icmp.Compare(a.imin, b.imin) if n == 0 { return a.fd.Num < b.fd.Num } return n < 0 } // Returns true if i file number is greater than j. // This used for sort by file number in descending order. func (tf tFiles) lessByNum(i, j int) bool { return tf[i].fd.Num > tf[j].fd.Num } // Sorts tables by key in ascending order. func (tf tFiles) sortByKey(icmp *iComparer) { sort.Sort(&tFilesSortByKey{tFiles: tf, icmp: icmp}) } // Sorts tables by file number in descending order. func (tf tFiles) sortByNum() { sort.Sort(&tFilesSortByNum{tFiles: tf}) } // Returns sum of all tables size. func (tf tFiles) size() (sum int64) { for _, t := range tf { sum += t.size } return sum } // Searches smallest index of tables whose its smallest // key is after or equal with given key. func (tf tFiles) searchMin(icmp *iComparer, ikey internalKey) int { return sort.Search(len(tf), func(i int) bool { return icmp.Compare(tf[i].imin, ikey) >= 0 }) } // Searches smallest index of tables whose its largest // key is after or equal with given key. func (tf tFiles) searchMax(icmp *iComparer, ikey internalKey) int { return sort.Search(len(tf), func(i int) bool { return icmp.Compare(tf[i].imax, ikey) >= 0 }) } // Returns true if given key range overlaps with one or more // tables key range. If unsorted is true then binary search will not be used. func (tf tFiles) overlaps(icmp *iComparer, umin, umax []byte, unsorted bool) bool { if unsorted { // Check against all files. for _, t := range tf { if t.overlaps(icmp, umin, umax) { return true } } return false } i := 0 if len(umin) > 0 { // Find the earliest possible internal key for min. i = tf.searchMax(icmp, makeInternalKey(nil, umin, keyMaxSeq, keyTypeSeek)) } if i >= len(tf) { // Beginning of range is after all files, so no overlap. return false } return !tf[i].before(icmp, umax) } // Returns tables whose its key range overlaps with given key range. // Range will be expanded if ukey found hop across tables. // If overlapped is true then the search will be restarted if umax // expanded. // The dst content will be overwritten. func (tf tFiles) getOverlaps(dst tFiles, icmp *iComparer, umin, umax []byte, overlapped bool) tFiles { dst = dst[:0] for i := 0; i < len(tf); { t := tf[i] if t.overlaps(icmp, umin, umax) { if umin != nil && icmp.uCompare(t.imin.ukey(), umin) < 0 { umin = t.imin.ukey() dst = dst[:0] i = 0 continue } else if umax != nil && icmp.uCompare(t.imax.ukey(), umax) > 0 { umax = t.imax.ukey() // Restart search if it is overlapped. if overlapped { dst = dst[:0] i = 0 continue } } dst = append(dst, t) } i++ } return dst } // Returns tables key range. func (tf tFiles) getRange(icmp *iComparer) (imin, imax internalKey) { for i, t := range tf { if i == 0 { imin, imax = t.imin, t.imax continue } if icmp.Compare(t.imin, imin) < 0 { imin = t.imin } if icmp.Compare(t.imax, imax) > 0 { imax = t.imax } } return } // Creates iterator index from tables. func (tf tFiles) newIndexIterator(tops *tOps, icmp *iComparer, slice *util.Range, ro *opt.ReadOptions) iterator.IteratorIndexer { if slice != nil { var start, limit int if slice.Start != nil { start = tf.searchMax(icmp, internalKey(slice.Start)) } if slice.Limit != nil { limit = tf.searchMin(icmp, internalKey(slice.Limit)) } else { limit = tf.Len() } tf = tf[start:limit] } return iterator.NewArrayIndexer(&tFilesArrayIndexer{ tFiles: tf, tops: tops, icmp: icmp, slice: slice, ro: ro, }) } // Tables iterator index. type tFilesArrayIndexer struct { tFiles tops *tOps icmp *iComparer slice *util.Range ro *opt.ReadOptions } func (a *tFilesArrayIndexer) Search(key []byte) int { return a.searchMax(a.icmp, internalKey(key)) } func (a *tFilesArrayIndexer) Get(i int) iterator.Iterator { if i == 0 || i == a.Len()-1 { return a.tops.newIterator(a.tFiles[i], a.slice, a.ro) } return a.tops.newIterator(a.tFiles[i], nil, a.ro) } // Helper type for sortByKey. type tFilesSortByKey struct { tFiles icmp *iComparer } func (x *tFilesSortByKey) Less(i, j int) bool { return x.lessByKey(x.icmp, i, j) } // Helper type for sortByNum. type tFilesSortByNum struct { tFiles } func (x *tFilesSortByNum) Less(i, j int) bool { return x.lessByNum(i, j) } // Table operations. type tOps struct { s *session noSync bool evictRemoved bool cache *cache.Cache bcache *cache.Cache bpool *util.BufferPool } // Creates an empty table and returns table writer. func (t *tOps) create() (*tWriter, error) { fd := storage.FileDesc{Type: storage.TypeTable, Num: t.s.allocFileNum()} fw, err := t.s.stor.Create(fd) if err != nil { return nil, err } return &tWriter{ t: t, fd: fd, w: fw, tw: table.NewWriter(fw, t.s.o.Options), }, nil } // Builds table from src iterator. func (t *tOps) createFrom(src iterator.Iterator) (f *tFile, n int, err error) { w, err := t.create() if err != nil { return } defer func() { if err != nil { w.drop() } }() for src.Next() { err = w.append(src.Key(), src.Value()) if err != nil { return } } err = src.Error() if err != nil { return } n = w.tw.EntriesLen() f, err = w.finish() return } // Opens table. It returns a cache handle, which should // be released after use. func (t *tOps) open(f *tFile) (ch *cache.Handle, err error) { ch = t.cache.Get(0, uint64(f.fd.Num), func() (size int, value cache.Value) { var r storage.Reader r, err = t.s.stor.Open(f.fd) if err != nil { return 0, nil } var bcache *cache.NamespaceGetter if t.bcache != nil { bcache = &cache.NamespaceGetter{Cache: t.bcache, NS: uint64(f.fd.Num)} } var tr *table.Reader tr, err = table.NewReader(r, f.size, f.fd, bcache, t.bpool, t.s.o.Options) if err != nil { r.Close() return 0, nil } return 1, tr }) if ch == nil && err == nil { err = ErrClosed } return } // Finds key/value pair whose key is greater than or equal to the // given key. func (t *tOps) find(f *tFile, key []byte, ro *opt.ReadOptions) (rkey, rvalue []byte, err error) { ch, err := t.open(f) if err != nil { return nil, nil, err } defer ch.Release() return ch.Value().(*table.Reader).Find(key, true, ro) } // Finds key that is greater than or equal to the given key. func (t *tOps) findKey(f *tFile, key []byte, ro *opt.ReadOptions) (rkey []byte, err error) { ch, err := t.open(f) if err != nil { return nil, err } defer ch.Release() return ch.Value().(*table.Reader).FindKey(key, true, ro) } // Returns approximate offset of the given key. func (t *tOps) offsetOf(f *tFile, key []byte) (offset int64, err error) { ch, err := t.open(f) if err != nil
defer ch.Release() return ch.Value().(*table.Reader).OffsetOf(key) } // Creates an iterator from the given table. func (t *tOps) newIterator(f *tFile, slice *util.Range, ro *opt.ReadOptions) iterator.Iterator { ch, err := t.open(f) if err != nil { return iterator.NewEmptyIterator(err) } iter := ch.Value().(*table.Reader).NewIterator(slice, ro) iter.SetReleaser(ch) return iter } // Removes table from persistent storage. It waits until // no one use the the table. func (t *tOps) remove(f *tFile) { t.cache.Delete(0, uint64(f.fd.Num), func() { if err := t.s.stor.Remove(f.fd); err != nil { t.s.logf("table@remove removing @%d %q", f.fd.Num, err) } else { t.s.logf("table@remove removed @%d", f.fd.Num) } if t.evictRemoved && t.bcache != nil { t.bcache.EvictNS(uint64(f.fd.Num)) } }) } // Closes the table ops instance. It will close all tables, // regadless still used or not. func (t *tOps) close() { t.bpool.Close() t.cache.Close() if t.bcache != nil { t.bcache.CloseWeak() } } // Creates new initialized table ops instance. func newTableOps(s *session) *tOps { var ( cacher cache.Cacher bcache *cache.Cache bpool *util.BufferPool ) if s.o.GetOpenFilesCacheCapacity() > 0 { cacher = cache.NewLRU(s.o.GetOpenFilesCacheCapacity()) } if !s.o.GetDisableBlockCache() { var bcacher cache.Cacher if s.o.GetBlockCacheCapacity() > 0 { bcacher = s.o.GetBlockCacher().New(s.o.GetBlockCacheCapacity()) } bcache = cache.NewCache(bcacher) } if !s.o.GetDisableBufferPool() { bpool = util.NewBufferPool(s.o.GetBlockSize() + 5) } return &tOps{ s: s, noSync: s.o.GetNoSync(), evictRemoved: s.o.GetBlockCacheEvictRemoved(), cache: cache.NewCache(cacher), bcache: bcache, bpool: bpool, } } // tWriter wraps the table writer. It keep track of file descriptor // and added key range. type tWriter struct { t *tOps fd storage.FileDesc w storage.Writer tw *table.Writer first, last []byte } // Append key/value pair to the table. func (w *tWriter) append(key, value []byte) error { if w.first == nil { w.first = append([]byte{}, key...) } w.last = append(w.last[:0], key...) return w.tw.Append(key, value) } // Returns true if the table is empty. func (w *tWriter) empty() bool { return w.first == nil } // Closes the storage.Writer. func (w *tWriter) close() { if w.w != nil { w.w.Close() w.w = nil } } // Finalizes the table and returns table file. func (w *tWriter) finish() (f *tFile, err error) { defer w.close() err = w.tw.Close() if err != nil { return } if !w.t.noSync { err = w.w.Sync() if err != nil { return } } f = newTableFile(w.fd, int64(w.tw.BytesLen()), internalKey(w.first), internalKey(w.last)) return } // Drops the table. func (w *tWriter) drop() { w.close() w.t.s.stor.Remove(w.fd) w.t.s.reuseFileNum(w.fd.Num) w.tw = nil w.first = nil w.last = nil }
{ return }
conditional_block
main.go
package main import ( "flag" "fmt" "io/ioutil" "log" "math" "math/rand" "net" "net/http" "net/url" "os" "os/signal" "path/filepath" "strconv" "strings" "sync" "time" "github.com/Sirupsen/logrus" "github.com/prometheus/client_golang/prometheus/promhttp" etcdraft "github.com/coreos/etcd/raft" "github.com/coreos/etcd/wal" "github.com/coreos/etcd/wal/walpb" hashic "github.com/hashicorp/raft" "github.com/hashicorp/raft-boltdb" "github.com/relab/raft" "github.com/relab/raft/raftgorums" gorums "github.com/relab/raft/raftgorums/gorumspb" etcd "github.com/relab/rkv/cmd/rkvd/raftimpl/etcd" hraft "github.com/relab/rkv/cmd/rkvd/raftimpl/hashicorp" "github.com/relab/rkv/rkvpb" "google.golang.org/grpc" "google.golang.org/grpc/grpclog" ) const ( bgorums = "gorums" betcd = "etcd" bhashicorp = "hashicorp" ) var ( bench = flag.Bool("quiet", false, "Silence log output") recover = flag.Bool("recover", false, "Recover from stable storage") batch = flag.Bool("batch", true, "Enable batching") serverMetrics = flag.Bool("servermetrics", true, "Enable server-side metrics") electionTimeout = flag.Duration("election", time.Second, "How long servers wait before starting an election") heartbeatTimeout = flag.Duration("heartbeat", 20*time.Millisecond, "How often a heartbeat should be sent") entriesPerMsg = flag.Uint64("entriespermsg", 64, "Entries per Appendentries message") catchupMultiplier = flag.Uint64("catchupmultiplier", 1024, "How many more times entries per message allowed during catch up") cache = flag.Int("cache", 1024*1024*64, "How many entries should be kept in memory") // ~1GB @ 16bytes per entry. maxgrpc = flag.Int("maxgrpc", 128<<20, "Max GRPC message size") // ~128MB. checkQuorum = flag.Bool("checkquorum", false, "Require a quorum of responses to a heartbeat to retain leadership") order = flag.Bool("ordergorums", true, "Force ordering of per node RPCs with Gorums") ) func main() { var ( id = flag.Uint64("id", 0, "server ID") servers = flag.String("servers", ":9201,:9202,:9203,:9204,:9205,:9206,:9207", "comma separated list of server addresses") cluster = flag.String("cluster", "1,2,3", "comma separated list of server ids to form cluster with, [1 >= id <= len(servers)]") backend = flag.String("backend", "gorums", "Raft backend to use [gorums|etcd|hashicorp]") ) flag.Parse() rand.Seed(time.Now().UnixNano()) if *id == 0 { fmt.Print("-id argument is required\n\n") flag.Usage() os.Exit(1) } nodes := strings.Split(*servers, ",") if len(nodes) == 0 { fmt.Print("-server argument is required\n\n") flag.Usage() os.Exit(1) } selected := strings.Split(*cluster, ",") var ids []uint64 for _, sid := range selected { id, err := strconv.ParseUint(sid, 10, 64) if err != nil { fmt.Print("could not parse -cluster argument\n\n") flag.Usage() os.Exit(1) } if id <= 0 || id > uint64(len(nodes)) { fmt.Print("invalid -cluster argument\n\n") flag.Usage() os.Exit(1) } ids = append(ids, id) } if len(ids) == 0 { fmt.Print("-cluster argument is required\n\n") flag.Usage() os.Exit(1) } if len(ids) > len(nodes) { fmt.Print("-cluster specifies too many servers\n\n") flag.Usage() os.Exit(1) } if *entriesPerMsg < 1 { fmt.Print("-entriespermsg must be atleast 1\n\n") flag.Usage() os.Exit(1) } if *catchupMultiplier < 1 { fmt.Print("-catchupmultiplier must be atleast 1\n\n") flag.Usage() os.Exit(1) } logger := logrus.New() logFile, err := os.OpenFile( fmt.Sprintf("%s%sraft%.2d.log", os.TempDir(), string(filepath.Separator), *id), os.O_CREATE|os.O_TRUNC|os.O_APPEND|os.O_WRONLY, 0600, ) if err != nil { logger.Fatal(err) } logger.Hooks.Add(NewLogToFileHook(logFile)) if *bench
grpclog.SetLogger(logger) lis, err := net.Listen("tcp", nodes[*id-1]) if err != nil { logger.Fatal(err) } grpcServer := grpc.NewServer(grpc.MaxMsgSize(*maxgrpc)) if *serverMetrics { go func() { http.Handle("/metrics", promhttp.Handler()) logger.Fatal(http.ListenAndServe(fmt.Sprintf(":590%d", *id), nil)) }() } lat := raft.NewLatency() event := raft.NewEvent() var once sync.Once writeData := func() { lat.Write(fmt.Sprintf("./latency-%v.csv", time.Now().UnixNano())) event.Write(fmt.Sprintf("./event-%v.csv", time.Now().UnixNano())) } c := make(chan os.Signal, 1) signal.Notify(c, os.Interrupt) go func() { <-c event.Record(raft.EventTerminated) once.Do(writeData) os.Exit(1) }() defer func() { once.Do(writeData) }() switch *backend { case bgorums: rungorums(logger, lis, grpcServer, *id, ids, nodes, lat, event) case betcd: runetcd(logger, lis, grpcServer, *id, ids, nodes, lat, event) case bhashicorp: runhashicorp(logger, lis, grpcServer, *id, ids, nodes, lat, event) } } func runhashicorp( logger logrus.FieldLogger, lis net.Listener, grpcServer *grpc.Server, id uint64, ids []uint64, nodes []string, lat *raft.Latency, event *raft.Event, ) { servers := make([]hashic.Server, len(nodes)) for i, addr := range nodes { host, port, err := net.SplitHostPort(addr) if err != nil { logger.Fatal(err) } p, _ := strconv.Atoi(port) addr = host + ":" + strconv.Itoa(p-100) suffrage := hashic.Voter if !contains(uint64(i+1), ids) { suffrage = hashic.Nonvoter } servers[i] = hashic.Server{ Suffrage: suffrage, ID: hashic.ServerID(addr), Address: hashic.ServerAddress(addr), } } addr, err := net.ResolveTCPAddr("tcp", string(servers[id-1].Address)) if err != nil { logger.Fatal(err) } trans, err := hashic.NewTCPTransport(string(servers[id-1].Address), addr, len(nodes)+1, 10*time.Second, os.Stderr) if err != nil { logger.Fatal(err) } path := fmt.Sprintf("hashicorp%.2d.bolt", id) overwrite := !*recover // Check if file already exists. if _, err := os.Stat(path); err != nil { if os.IsNotExist(err) { // We don't need to overwrite a file that doesn't exist. overwrite = false } else { // If we are unable to verify the existence of the file, // there is probably a permission problem. logger.Fatal(err) } } if overwrite { if err := os.Remove(path); err != nil { logger.Fatal(err) } } logs, err := raftboltdb.NewBoltStore(path) if err != nil { logger.Fatal(err) } cachedlogs, err := hashic.NewLogCache(*cache, logs) snaps := hashic.NewInmemSnapshotStore() cfg := &hashic.Config{ LocalID: servers[id-1].ID, ProtocolVersion: hashic.ProtocolVersionMax, HeartbeatTimeout: *electionTimeout, ElectionTimeout: *electionTimeout, CommitTimeout: *heartbeatTimeout, MaxAppendEntries: int(*entriesPerMsg), ShutdownOnRemove: true, TrailingLogs: math.MaxUint64, SnapshotInterval: 120 * time.Hour, SnapshotThreshold: math.MaxUint64, LeaderLeaseTimeout: *electionTimeout, } leaderOut := make(chan struct{}) node := hraft.NewRaft( logger, NewStore(), cfg, servers, trans, cachedlogs, hashic.NewInmemStore(), snaps, ids, lat, event, leaderOut, id, *checkQuorum, ) service := NewService(logger, node, leaderOut) rkvpb.RegisterRKVServer(grpcServer, service) logger.Fatal(grpcServer.Serve(lis)) } func runetcd( logger logrus.FieldLogger, lis net.Listener, grpcServer *grpc.Server, id uint64, ids []uint64, nodes []string, lat *raft.Latency, event *raft.Event, ) { peers := make([]etcdraft.Peer, len(ids)) for i, nid := range ids { addr := nodes[i] host, port, err := net.SplitHostPort(addr) if err != nil { logger.Fatal(err) } p, _ := strconv.Atoi(port) ur, err := url.Parse("http://" + addr) ur.Host = host + ":" + strconv.Itoa(p-100) if err != nil { logger.Fatal(err) } peers[i] = etcdraft.Peer{ ID: nid, Context: []byte(ur.String()), } } dir := fmt.Sprintf("etcdwal%.2d", id) switch { case wal.Exist(dir) && !*recover: if err := os.RemoveAll(dir); err != nil { logger.Fatal(err) } fallthrough case !wal.Exist(dir): if err := os.Mkdir(dir, 0750); err != nil { logger.Fatalf("rkvd: cannot create dir for wal (%v)", err) } w, err := wal.Create(dir, nil) if err != nil { logger.Fatalf("rkvd: create wal error (%v)", err) } w.Close() } walsnap := walpb.Snapshot{} w, err := wal.Open(dir, walsnap) if err != nil { logger.Fatalf("rkvd: error loading wal (%v)", err) } _, st, ents, err := w.ReadAll() if err != nil { log.Fatalf("rkvd: failed to read WAL (%v)", err) } storage := etcdraft.NewMemoryStorage() storage.SetHardState(st) storage.Append(ents) leaderOut := make(chan struct{}) node := etcd.NewRaft( logger, NewStore(), storage, w, &etcdraft.Config{ ID: id, ElectionTick: int(*electionTimeout / *heartbeatTimeout), HeartbeatTick: 1, Storage: storage, MaxSizePerMsg: *entriesPerMsg, // etcdserver says: Never overflow the rafthttp buffer, // which is 4096. We keep the same constant. MaxInflightMsgs: 4096 / 8, CheckQuorum: *checkQuorum, PreVote: true, Logger: logger, }, peers, *heartbeatTimeout, !contains(id, ids), nodes, lat, event, leaderOut, ) service := NewService(logger, node, leaderOut) rkvpb.RegisterRKVServer(grpcServer, service) go func() { logger.Fatal(grpcServer.Serve(lis)) }() host, port, err := net.SplitHostPort(nodes[id-1]) if err != nil { logger.Fatal(err) } p, _ := strconv.Atoi(port) selflis := host + ":" + strconv.Itoa(p-100) lishttp, err := net.Listen("tcp", selflis) if err != nil { logger.Fatal(err) } logger.Fatal(http.Serve(lishttp, node.Handler())) } func rungorums( logger logrus.FieldLogger, lis net.Listener, grpcServer *grpc.Server, id uint64, ids []uint64, nodes []string, lat *raft.Latency, event *raft.Event, ) { storage, err := raft.NewFileStorage(fmt.Sprintf("db%.2d.bolt", id), !*recover) if err != nil { logger.Fatal(err) } storageWithCache := raft.NewCacheStorage(storage, *cache) leaderOut := make(chan struct{}) node := raftgorums.NewRaft(NewStore(), &raftgorums.Config{ ID: id, Servers: nodes, InitialCluster: ids, Batch: *batch, Storage: storageWithCache, ElectionTimeout: *electionTimeout, HeartbeatTimeout: *heartbeatTimeout, EntriesPerMsg: *entriesPerMsg, CatchupMultiplier: *catchupMultiplier, Logger: logger, CheckQuorum: *checkQuorum, MetricsEnabled: true, }, lat, event, leaderOut) service := NewService(logger, node, leaderOut) rkvpb.RegisterRKVServer(grpcServer, service) go func() { logger.Fatal(grpcServer.Serve(lis)) }() opts := []gorums.ManagerOption{ gorums.WithGrpcDialOptions( grpc.WithBlock(), grpc.WithInsecure(), grpc.WithTimeout(raftgorums.TCPConnect*time.Millisecond)), } if *order { opts = append(opts, gorums.WithNodeOrdering()) } logger.Fatal(node.Run(grpcServer, opts...)) } func contains(x uint64, xs []uint64) bool { for _, y := range xs { if y == x { return true } } return false }
{ logger.Out = ioutil.Discard grpc.EnableTracing = false }
conditional_block
main.go
package main import ( "flag" "fmt" "io/ioutil" "log" "math" "math/rand" "net" "net/http" "net/url" "os" "os/signal" "path/filepath" "strconv" "strings" "sync" "time" "github.com/Sirupsen/logrus" "github.com/prometheus/client_golang/prometheus/promhttp" etcdraft "github.com/coreos/etcd/raft" "github.com/coreos/etcd/wal" "github.com/coreos/etcd/wal/walpb" hashic "github.com/hashicorp/raft" "github.com/hashicorp/raft-boltdb" "github.com/relab/raft" "github.com/relab/raft/raftgorums" gorums "github.com/relab/raft/raftgorums/gorumspb" etcd "github.com/relab/rkv/cmd/rkvd/raftimpl/etcd" hraft "github.com/relab/rkv/cmd/rkvd/raftimpl/hashicorp" "github.com/relab/rkv/rkvpb" "google.golang.org/grpc" "google.golang.org/grpc/grpclog" ) const ( bgorums = "gorums" betcd = "etcd" bhashicorp = "hashicorp" ) var ( bench = flag.Bool("quiet", false, "Silence log output") recover = flag.Bool("recover", false, "Recover from stable storage") batch = flag.Bool("batch", true, "Enable batching") serverMetrics = flag.Bool("servermetrics", true, "Enable server-side metrics") electionTimeout = flag.Duration("election", time.Second, "How long servers wait before starting an election") heartbeatTimeout = flag.Duration("heartbeat", 20*time.Millisecond, "How often a heartbeat should be sent") entriesPerMsg = flag.Uint64("entriespermsg", 64, "Entries per Appendentries message") catchupMultiplier = flag.Uint64("catchupmultiplier", 1024, "How many more times entries per message allowed during catch up") cache = flag.Int("cache", 1024*1024*64, "How many entries should be kept in memory") // ~1GB @ 16bytes per entry. maxgrpc = flag.Int("maxgrpc", 128<<20, "Max GRPC message size") // ~128MB. checkQuorum = flag.Bool("checkquorum", false, "Require a quorum of responses to a heartbeat to retain leadership") order = flag.Bool("ordergorums", true, "Force ordering of per node RPCs with Gorums") ) func main() { var ( id = flag.Uint64("id", 0, "server ID") servers = flag.String("servers", ":9201,:9202,:9203,:9204,:9205,:9206,:9207", "comma separated list of server addresses") cluster = flag.String("cluster", "1,2,3", "comma separated list of server ids to form cluster with, [1 >= id <= len(servers)]") backend = flag.String("backend", "gorums", "Raft backend to use [gorums|etcd|hashicorp]") ) flag.Parse() rand.Seed(time.Now().UnixNano()) if *id == 0 { fmt.Print("-id argument is required\n\n") flag.Usage() os.Exit(1) } nodes := strings.Split(*servers, ",") if len(nodes) == 0 { fmt.Print("-server argument is required\n\n") flag.Usage() os.Exit(1) } selected := strings.Split(*cluster, ",") var ids []uint64 for _, sid := range selected { id, err := strconv.ParseUint(sid, 10, 64) if err != nil { fmt.Print("could not parse -cluster argument\n\n") flag.Usage() os.Exit(1) } if id <= 0 || id > uint64(len(nodes)) { fmt.Print("invalid -cluster argument\n\n") flag.Usage() os.Exit(1) } ids = append(ids, id) } if len(ids) == 0 { fmt.Print("-cluster argument is required\n\n") flag.Usage() os.Exit(1) } if len(ids) > len(nodes) { fmt.Print("-cluster specifies too many servers\n\n") flag.Usage() os.Exit(1) } if *entriesPerMsg < 1 { fmt.Print("-entriespermsg must be atleast 1\n\n") flag.Usage() os.Exit(1) } if *catchupMultiplier < 1 { fmt.Print("-catchupmultiplier must be atleast 1\n\n") flag.Usage() os.Exit(1) } logger := logrus.New() logFile, err := os.OpenFile( fmt.Sprintf("%s%sraft%.2d.log", os.TempDir(), string(filepath.Separator), *id), os.O_CREATE|os.O_TRUNC|os.O_APPEND|os.O_WRONLY, 0600, ) if err != nil { logger.Fatal(err) } logger.Hooks.Add(NewLogToFileHook(logFile)) if *bench { logger.Out = ioutil.Discard grpc.EnableTracing = false } grpclog.SetLogger(logger) lis, err := net.Listen("tcp", nodes[*id-1]) if err != nil { logger.Fatal(err) } grpcServer := grpc.NewServer(grpc.MaxMsgSize(*maxgrpc)) if *serverMetrics { go func() { http.Handle("/metrics", promhttp.Handler()) logger.Fatal(http.ListenAndServe(fmt.Sprintf(":590%d", *id), nil)) }() } lat := raft.NewLatency() event := raft.NewEvent() var once sync.Once writeData := func() { lat.Write(fmt.Sprintf("./latency-%v.csv", time.Now().UnixNano())) event.Write(fmt.Sprintf("./event-%v.csv", time.Now().UnixNano())) } c := make(chan os.Signal, 1) signal.Notify(c, os.Interrupt) go func() { <-c event.Record(raft.EventTerminated) once.Do(writeData) os.Exit(1) }() defer func() { once.Do(writeData) }() switch *backend { case bgorums: rungorums(logger, lis, grpcServer, *id, ids, nodes, lat, event) case betcd: runetcd(logger, lis, grpcServer, *id, ids, nodes, lat, event) case bhashicorp: runhashicorp(logger, lis, grpcServer, *id, ids, nodes, lat, event) } } func runhashicorp( logger logrus.FieldLogger, lis net.Listener, grpcServer *grpc.Server, id uint64, ids []uint64, nodes []string, lat *raft.Latency, event *raft.Event, )
func runetcd( logger logrus.FieldLogger, lis net.Listener, grpcServer *grpc.Server, id uint64, ids []uint64, nodes []string, lat *raft.Latency, event *raft.Event, ) { peers := make([]etcdraft.Peer, len(ids)) for i, nid := range ids { addr := nodes[i] host, port, err := net.SplitHostPort(addr) if err != nil { logger.Fatal(err) } p, _ := strconv.Atoi(port) ur, err := url.Parse("http://" + addr) ur.Host = host + ":" + strconv.Itoa(p-100) if err != nil { logger.Fatal(err) } peers[i] = etcdraft.Peer{ ID: nid, Context: []byte(ur.String()), } } dir := fmt.Sprintf("etcdwal%.2d", id) switch { case wal.Exist(dir) && !*recover: if err := os.RemoveAll(dir); err != nil { logger.Fatal(err) } fallthrough case !wal.Exist(dir): if err := os.Mkdir(dir, 0750); err != nil { logger.Fatalf("rkvd: cannot create dir for wal (%v)", err) } w, err := wal.Create(dir, nil) if err != nil { logger.Fatalf("rkvd: create wal error (%v)", err) } w.Close() } walsnap := walpb.Snapshot{} w, err := wal.Open(dir, walsnap) if err != nil { logger.Fatalf("rkvd: error loading wal (%v)", err) } _, st, ents, err := w.ReadAll() if err != nil { log.Fatalf("rkvd: failed to read WAL (%v)", err) } storage := etcdraft.NewMemoryStorage() storage.SetHardState(st) storage.Append(ents) leaderOut := make(chan struct{}) node := etcd.NewRaft( logger, NewStore(), storage, w, &etcdraft.Config{ ID: id, ElectionTick: int(*electionTimeout / *heartbeatTimeout), HeartbeatTick: 1, Storage: storage, MaxSizePerMsg: *entriesPerMsg, // etcdserver says: Never overflow the rafthttp buffer, // which is 4096. We keep the same constant. MaxInflightMsgs: 4096 / 8, CheckQuorum: *checkQuorum, PreVote: true, Logger: logger, }, peers, *heartbeatTimeout, !contains(id, ids), nodes, lat, event, leaderOut, ) service := NewService(logger, node, leaderOut) rkvpb.RegisterRKVServer(grpcServer, service) go func() { logger.Fatal(grpcServer.Serve(lis)) }() host, port, err := net.SplitHostPort(nodes[id-1]) if err != nil { logger.Fatal(err) } p, _ := strconv.Atoi(port) selflis := host + ":" + strconv.Itoa(p-100) lishttp, err := net.Listen("tcp", selflis) if err != nil { logger.Fatal(err) } logger.Fatal(http.Serve(lishttp, node.Handler())) } func rungorums( logger logrus.FieldLogger, lis net.Listener, grpcServer *grpc.Server, id uint64, ids []uint64, nodes []string, lat *raft.Latency, event *raft.Event, ) { storage, err := raft.NewFileStorage(fmt.Sprintf("db%.2d.bolt", id), !*recover) if err != nil { logger.Fatal(err) } storageWithCache := raft.NewCacheStorage(storage, *cache) leaderOut := make(chan struct{}) node := raftgorums.NewRaft(NewStore(), &raftgorums.Config{ ID: id, Servers: nodes, InitialCluster: ids, Batch: *batch, Storage: storageWithCache, ElectionTimeout: *electionTimeout, HeartbeatTimeout: *heartbeatTimeout, EntriesPerMsg: *entriesPerMsg, CatchupMultiplier: *catchupMultiplier, Logger: logger, CheckQuorum: *checkQuorum, MetricsEnabled: true, }, lat, event, leaderOut) service := NewService(logger, node, leaderOut) rkvpb.RegisterRKVServer(grpcServer, service) go func() { logger.Fatal(grpcServer.Serve(lis)) }() opts := []gorums.ManagerOption{ gorums.WithGrpcDialOptions( grpc.WithBlock(), grpc.WithInsecure(), grpc.WithTimeout(raftgorums.TCPConnect*time.Millisecond)), } if *order { opts = append(opts, gorums.WithNodeOrdering()) } logger.Fatal(node.Run(grpcServer, opts...)) } func contains(x uint64, xs []uint64) bool { for _, y := range xs { if y == x { return true } } return false }
{ servers := make([]hashic.Server, len(nodes)) for i, addr := range nodes { host, port, err := net.SplitHostPort(addr) if err != nil { logger.Fatal(err) } p, _ := strconv.Atoi(port) addr = host + ":" + strconv.Itoa(p-100) suffrage := hashic.Voter if !contains(uint64(i+1), ids) { suffrage = hashic.Nonvoter } servers[i] = hashic.Server{ Suffrage: suffrage, ID: hashic.ServerID(addr), Address: hashic.ServerAddress(addr), } } addr, err := net.ResolveTCPAddr("tcp", string(servers[id-1].Address)) if err != nil { logger.Fatal(err) } trans, err := hashic.NewTCPTransport(string(servers[id-1].Address), addr, len(nodes)+1, 10*time.Second, os.Stderr) if err != nil { logger.Fatal(err) } path := fmt.Sprintf("hashicorp%.2d.bolt", id) overwrite := !*recover // Check if file already exists. if _, err := os.Stat(path); err != nil { if os.IsNotExist(err) { // We don't need to overwrite a file that doesn't exist. overwrite = false } else { // If we are unable to verify the existence of the file, // there is probably a permission problem. logger.Fatal(err) } } if overwrite { if err := os.Remove(path); err != nil { logger.Fatal(err) } } logs, err := raftboltdb.NewBoltStore(path) if err != nil { logger.Fatal(err) } cachedlogs, err := hashic.NewLogCache(*cache, logs) snaps := hashic.NewInmemSnapshotStore() cfg := &hashic.Config{ LocalID: servers[id-1].ID, ProtocolVersion: hashic.ProtocolVersionMax, HeartbeatTimeout: *electionTimeout, ElectionTimeout: *electionTimeout, CommitTimeout: *heartbeatTimeout, MaxAppendEntries: int(*entriesPerMsg), ShutdownOnRemove: true, TrailingLogs: math.MaxUint64, SnapshotInterval: 120 * time.Hour, SnapshotThreshold: math.MaxUint64, LeaderLeaseTimeout: *electionTimeout, } leaderOut := make(chan struct{}) node := hraft.NewRaft( logger, NewStore(), cfg, servers, trans, cachedlogs, hashic.NewInmemStore(), snaps, ids, lat, event, leaderOut, id, *checkQuorum, ) service := NewService(logger, node, leaderOut) rkvpb.RegisterRKVServer(grpcServer, service) logger.Fatal(grpcServer.Serve(lis)) }
identifier_body
main.go
package main import ( "flag" "fmt" "io/ioutil" "log" "math" "math/rand" "net" "net/http" "net/url" "os" "os/signal" "path/filepath" "strconv" "strings" "sync" "time" "github.com/Sirupsen/logrus" "github.com/prometheus/client_golang/prometheus/promhttp" etcdraft "github.com/coreos/etcd/raft" "github.com/coreos/etcd/wal" "github.com/coreos/etcd/wal/walpb" hashic "github.com/hashicorp/raft" "github.com/hashicorp/raft-boltdb" "github.com/relab/raft" "github.com/relab/raft/raftgorums" gorums "github.com/relab/raft/raftgorums/gorumspb" etcd "github.com/relab/rkv/cmd/rkvd/raftimpl/etcd" hraft "github.com/relab/rkv/cmd/rkvd/raftimpl/hashicorp" "github.com/relab/rkv/rkvpb" "google.golang.org/grpc" "google.golang.org/grpc/grpclog" ) const ( bgorums = "gorums" betcd = "etcd" bhashicorp = "hashicorp" ) var ( bench = flag.Bool("quiet", false, "Silence log output") recover = flag.Bool("recover", false, "Recover from stable storage") batch = flag.Bool("batch", true, "Enable batching") serverMetrics = flag.Bool("servermetrics", true, "Enable server-side metrics") electionTimeout = flag.Duration("election", time.Second, "How long servers wait before starting an election") heartbeatTimeout = flag.Duration("heartbeat", 20*time.Millisecond, "How often a heartbeat should be sent") entriesPerMsg = flag.Uint64("entriespermsg", 64, "Entries per Appendentries message") catchupMultiplier = flag.Uint64("catchupmultiplier", 1024, "How many more times entries per message allowed during catch up") cache = flag.Int("cache", 1024*1024*64, "How many entries should be kept in memory") // ~1GB @ 16bytes per entry. maxgrpc = flag.Int("maxgrpc", 128<<20, "Max GRPC message size") // ~128MB. checkQuorum = flag.Bool("checkquorum", false, "Require a quorum of responses to a heartbeat to retain leadership") order = flag.Bool("ordergorums", true, "Force ordering of per node RPCs with Gorums") ) func
() { var ( id = flag.Uint64("id", 0, "server ID") servers = flag.String("servers", ":9201,:9202,:9203,:9204,:9205,:9206,:9207", "comma separated list of server addresses") cluster = flag.String("cluster", "1,2,3", "comma separated list of server ids to form cluster with, [1 >= id <= len(servers)]") backend = flag.String("backend", "gorums", "Raft backend to use [gorums|etcd|hashicorp]") ) flag.Parse() rand.Seed(time.Now().UnixNano()) if *id == 0 { fmt.Print("-id argument is required\n\n") flag.Usage() os.Exit(1) } nodes := strings.Split(*servers, ",") if len(nodes) == 0 { fmt.Print("-server argument is required\n\n") flag.Usage() os.Exit(1) } selected := strings.Split(*cluster, ",") var ids []uint64 for _, sid := range selected { id, err := strconv.ParseUint(sid, 10, 64) if err != nil { fmt.Print("could not parse -cluster argument\n\n") flag.Usage() os.Exit(1) } if id <= 0 || id > uint64(len(nodes)) { fmt.Print("invalid -cluster argument\n\n") flag.Usage() os.Exit(1) } ids = append(ids, id) } if len(ids) == 0 { fmt.Print("-cluster argument is required\n\n") flag.Usage() os.Exit(1) } if len(ids) > len(nodes) { fmt.Print("-cluster specifies too many servers\n\n") flag.Usage() os.Exit(1) } if *entriesPerMsg < 1 { fmt.Print("-entriespermsg must be atleast 1\n\n") flag.Usage() os.Exit(1) } if *catchupMultiplier < 1 { fmt.Print("-catchupmultiplier must be atleast 1\n\n") flag.Usage() os.Exit(1) } logger := logrus.New() logFile, err := os.OpenFile( fmt.Sprintf("%s%sraft%.2d.log", os.TempDir(), string(filepath.Separator), *id), os.O_CREATE|os.O_TRUNC|os.O_APPEND|os.O_WRONLY, 0600, ) if err != nil { logger.Fatal(err) } logger.Hooks.Add(NewLogToFileHook(logFile)) if *bench { logger.Out = ioutil.Discard grpc.EnableTracing = false } grpclog.SetLogger(logger) lis, err := net.Listen("tcp", nodes[*id-1]) if err != nil { logger.Fatal(err) } grpcServer := grpc.NewServer(grpc.MaxMsgSize(*maxgrpc)) if *serverMetrics { go func() { http.Handle("/metrics", promhttp.Handler()) logger.Fatal(http.ListenAndServe(fmt.Sprintf(":590%d", *id), nil)) }() } lat := raft.NewLatency() event := raft.NewEvent() var once sync.Once writeData := func() { lat.Write(fmt.Sprintf("./latency-%v.csv", time.Now().UnixNano())) event.Write(fmt.Sprintf("./event-%v.csv", time.Now().UnixNano())) } c := make(chan os.Signal, 1) signal.Notify(c, os.Interrupt) go func() { <-c event.Record(raft.EventTerminated) once.Do(writeData) os.Exit(1) }() defer func() { once.Do(writeData) }() switch *backend { case bgorums: rungorums(logger, lis, grpcServer, *id, ids, nodes, lat, event) case betcd: runetcd(logger, lis, grpcServer, *id, ids, nodes, lat, event) case bhashicorp: runhashicorp(logger, lis, grpcServer, *id, ids, nodes, lat, event) } } func runhashicorp( logger logrus.FieldLogger, lis net.Listener, grpcServer *grpc.Server, id uint64, ids []uint64, nodes []string, lat *raft.Latency, event *raft.Event, ) { servers := make([]hashic.Server, len(nodes)) for i, addr := range nodes { host, port, err := net.SplitHostPort(addr) if err != nil { logger.Fatal(err) } p, _ := strconv.Atoi(port) addr = host + ":" + strconv.Itoa(p-100) suffrage := hashic.Voter if !contains(uint64(i+1), ids) { suffrage = hashic.Nonvoter } servers[i] = hashic.Server{ Suffrage: suffrage, ID: hashic.ServerID(addr), Address: hashic.ServerAddress(addr), } } addr, err := net.ResolveTCPAddr("tcp", string(servers[id-1].Address)) if err != nil { logger.Fatal(err) } trans, err := hashic.NewTCPTransport(string(servers[id-1].Address), addr, len(nodes)+1, 10*time.Second, os.Stderr) if err != nil { logger.Fatal(err) } path := fmt.Sprintf("hashicorp%.2d.bolt", id) overwrite := !*recover // Check if file already exists. if _, err := os.Stat(path); err != nil { if os.IsNotExist(err) { // We don't need to overwrite a file that doesn't exist. overwrite = false } else { // If we are unable to verify the existence of the file, // there is probably a permission problem. logger.Fatal(err) } } if overwrite { if err := os.Remove(path); err != nil { logger.Fatal(err) } } logs, err := raftboltdb.NewBoltStore(path) if err != nil { logger.Fatal(err) } cachedlogs, err := hashic.NewLogCache(*cache, logs) snaps := hashic.NewInmemSnapshotStore() cfg := &hashic.Config{ LocalID: servers[id-1].ID, ProtocolVersion: hashic.ProtocolVersionMax, HeartbeatTimeout: *electionTimeout, ElectionTimeout: *electionTimeout, CommitTimeout: *heartbeatTimeout, MaxAppendEntries: int(*entriesPerMsg), ShutdownOnRemove: true, TrailingLogs: math.MaxUint64, SnapshotInterval: 120 * time.Hour, SnapshotThreshold: math.MaxUint64, LeaderLeaseTimeout: *electionTimeout, } leaderOut := make(chan struct{}) node := hraft.NewRaft( logger, NewStore(), cfg, servers, trans, cachedlogs, hashic.NewInmemStore(), snaps, ids, lat, event, leaderOut, id, *checkQuorum, ) service := NewService(logger, node, leaderOut) rkvpb.RegisterRKVServer(grpcServer, service) logger.Fatal(grpcServer.Serve(lis)) } func runetcd( logger logrus.FieldLogger, lis net.Listener, grpcServer *grpc.Server, id uint64, ids []uint64, nodes []string, lat *raft.Latency, event *raft.Event, ) { peers := make([]etcdraft.Peer, len(ids)) for i, nid := range ids { addr := nodes[i] host, port, err := net.SplitHostPort(addr) if err != nil { logger.Fatal(err) } p, _ := strconv.Atoi(port) ur, err := url.Parse("http://" + addr) ur.Host = host + ":" + strconv.Itoa(p-100) if err != nil { logger.Fatal(err) } peers[i] = etcdraft.Peer{ ID: nid, Context: []byte(ur.String()), } } dir := fmt.Sprintf("etcdwal%.2d", id) switch { case wal.Exist(dir) && !*recover: if err := os.RemoveAll(dir); err != nil { logger.Fatal(err) } fallthrough case !wal.Exist(dir): if err := os.Mkdir(dir, 0750); err != nil { logger.Fatalf("rkvd: cannot create dir for wal (%v)", err) } w, err := wal.Create(dir, nil) if err != nil { logger.Fatalf("rkvd: create wal error (%v)", err) } w.Close() } walsnap := walpb.Snapshot{} w, err := wal.Open(dir, walsnap) if err != nil { logger.Fatalf("rkvd: error loading wal (%v)", err) } _, st, ents, err := w.ReadAll() if err != nil { log.Fatalf("rkvd: failed to read WAL (%v)", err) } storage := etcdraft.NewMemoryStorage() storage.SetHardState(st) storage.Append(ents) leaderOut := make(chan struct{}) node := etcd.NewRaft( logger, NewStore(), storage, w, &etcdraft.Config{ ID: id, ElectionTick: int(*electionTimeout / *heartbeatTimeout), HeartbeatTick: 1, Storage: storage, MaxSizePerMsg: *entriesPerMsg, // etcdserver says: Never overflow the rafthttp buffer, // which is 4096. We keep the same constant. MaxInflightMsgs: 4096 / 8, CheckQuorum: *checkQuorum, PreVote: true, Logger: logger, }, peers, *heartbeatTimeout, !contains(id, ids), nodes, lat, event, leaderOut, ) service := NewService(logger, node, leaderOut) rkvpb.RegisterRKVServer(grpcServer, service) go func() { logger.Fatal(grpcServer.Serve(lis)) }() host, port, err := net.SplitHostPort(nodes[id-1]) if err != nil { logger.Fatal(err) } p, _ := strconv.Atoi(port) selflis := host + ":" + strconv.Itoa(p-100) lishttp, err := net.Listen("tcp", selflis) if err != nil { logger.Fatal(err) } logger.Fatal(http.Serve(lishttp, node.Handler())) } func rungorums( logger logrus.FieldLogger, lis net.Listener, grpcServer *grpc.Server, id uint64, ids []uint64, nodes []string, lat *raft.Latency, event *raft.Event, ) { storage, err := raft.NewFileStorage(fmt.Sprintf("db%.2d.bolt", id), !*recover) if err != nil { logger.Fatal(err) } storageWithCache := raft.NewCacheStorage(storage, *cache) leaderOut := make(chan struct{}) node := raftgorums.NewRaft(NewStore(), &raftgorums.Config{ ID: id, Servers: nodes, InitialCluster: ids, Batch: *batch, Storage: storageWithCache, ElectionTimeout: *electionTimeout, HeartbeatTimeout: *heartbeatTimeout, EntriesPerMsg: *entriesPerMsg, CatchupMultiplier: *catchupMultiplier, Logger: logger, CheckQuorum: *checkQuorum, MetricsEnabled: true, }, lat, event, leaderOut) service := NewService(logger, node, leaderOut) rkvpb.RegisterRKVServer(grpcServer, service) go func() { logger.Fatal(grpcServer.Serve(lis)) }() opts := []gorums.ManagerOption{ gorums.WithGrpcDialOptions( grpc.WithBlock(), grpc.WithInsecure(), grpc.WithTimeout(raftgorums.TCPConnect*time.Millisecond)), } if *order { opts = append(opts, gorums.WithNodeOrdering()) } logger.Fatal(node.Run(grpcServer, opts...)) } func contains(x uint64, xs []uint64) bool { for _, y := range xs { if y == x { return true } } return false }
main
identifier_name
main.go
package main import ( "flag" "fmt" "io/ioutil" "log" "math" "math/rand" "net" "net/http" "net/url" "os" "os/signal" "path/filepath" "strconv" "strings" "sync" "time" "github.com/Sirupsen/logrus" "github.com/prometheus/client_golang/prometheus/promhttp" etcdraft "github.com/coreos/etcd/raft" "github.com/coreos/etcd/wal" "github.com/coreos/etcd/wal/walpb" hashic "github.com/hashicorp/raft" "github.com/hashicorp/raft-boltdb" "github.com/relab/raft" "github.com/relab/raft/raftgorums" gorums "github.com/relab/raft/raftgorums/gorumspb" etcd "github.com/relab/rkv/cmd/rkvd/raftimpl/etcd" hraft "github.com/relab/rkv/cmd/rkvd/raftimpl/hashicorp" "github.com/relab/rkv/rkvpb" "google.golang.org/grpc" "google.golang.org/grpc/grpclog" ) const ( bgorums = "gorums" betcd = "etcd" bhashicorp = "hashicorp" ) var ( bench = flag.Bool("quiet", false, "Silence log output") recover = flag.Bool("recover", false, "Recover from stable storage") batch = flag.Bool("batch", true, "Enable batching") serverMetrics = flag.Bool("servermetrics", true, "Enable server-side metrics") electionTimeout = flag.Duration("election", time.Second, "How long servers wait before starting an election") heartbeatTimeout = flag.Duration("heartbeat", 20*time.Millisecond, "How often a heartbeat should be sent") entriesPerMsg = flag.Uint64("entriespermsg", 64, "Entries per Appendentries message") catchupMultiplier = flag.Uint64("catchupmultiplier", 1024, "How many more times entries per message allowed during catch up") cache = flag.Int("cache", 1024*1024*64, "How many entries should be kept in memory") // ~1GB @ 16bytes per entry. maxgrpc = flag.Int("maxgrpc", 128<<20, "Max GRPC message size") // ~128MB. checkQuorum = flag.Bool("checkquorum", false, "Require a quorum of responses to a heartbeat to retain leadership") order = flag.Bool("ordergorums", true, "Force ordering of per node RPCs with Gorums") ) func main() { var ( id = flag.Uint64("id", 0, "server ID") servers = flag.String("servers", ":9201,:9202,:9203,:9204,:9205,:9206,:9207", "comma separated list of server addresses") cluster = flag.String("cluster", "1,2,3", "comma separated list of server ids to form cluster with, [1 >= id <= len(servers)]") backend = flag.String("backend", "gorums", "Raft backend to use [gorums|etcd|hashicorp]") ) flag.Parse() rand.Seed(time.Now().UnixNano()) if *id == 0 { fmt.Print("-id argument is required\n\n") flag.Usage() os.Exit(1) } nodes := strings.Split(*servers, ",") if len(nodes) == 0 { fmt.Print("-server argument is required\n\n") flag.Usage() os.Exit(1) } selected := strings.Split(*cluster, ",") var ids []uint64 for _, sid := range selected { id, err := strconv.ParseUint(sid, 10, 64) if err != nil { fmt.Print("could not parse -cluster argument\n\n") flag.Usage() os.Exit(1) } if id <= 0 || id > uint64(len(nodes)) { fmt.Print("invalid -cluster argument\n\n") flag.Usage() os.Exit(1) } ids = append(ids, id) } if len(ids) == 0 { fmt.Print("-cluster argument is required\n\n") flag.Usage() os.Exit(1) } if len(ids) > len(nodes) { fmt.Print("-cluster specifies too many servers\n\n") flag.Usage() os.Exit(1) } if *entriesPerMsg < 1 { fmt.Print("-entriespermsg must be atleast 1\n\n") flag.Usage() os.Exit(1) } if *catchupMultiplier < 1 { fmt.Print("-catchupmultiplier must be atleast 1\n\n") flag.Usage() os.Exit(1) } logger := logrus.New() logFile, err := os.OpenFile( fmt.Sprintf("%s%sraft%.2d.log", os.TempDir(), string(filepath.Separator), *id), os.O_CREATE|os.O_TRUNC|os.O_APPEND|os.O_WRONLY, 0600, ) if err != nil { logger.Fatal(err) } logger.Hooks.Add(NewLogToFileHook(logFile)) if *bench { logger.Out = ioutil.Discard grpc.EnableTracing = false } grpclog.SetLogger(logger) lis, err := net.Listen("tcp", nodes[*id-1]) if err != nil { logger.Fatal(err) } grpcServer := grpc.NewServer(grpc.MaxMsgSize(*maxgrpc)) if *serverMetrics { go func() { http.Handle("/metrics", promhttp.Handler()) logger.Fatal(http.ListenAndServe(fmt.Sprintf(":590%d", *id), nil)) }() } lat := raft.NewLatency() event := raft.NewEvent() var once sync.Once writeData := func() { lat.Write(fmt.Sprintf("./latency-%v.csv", time.Now().UnixNano())) event.Write(fmt.Sprintf("./event-%v.csv", time.Now().UnixNano())) } c := make(chan os.Signal, 1) signal.Notify(c, os.Interrupt) go func() { <-c event.Record(raft.EventTerminated) once.Do(writeData) os.Exit(1) }() defer func() { once.Do(writeData) }() switch *backend { case bgorums: rungorums(logger, lis, grpcServer, *id, ids, nodes, lat, event) case betcd: runetcd(logger, lis, grpcServer, *id, ids, nodes, lat, event) case bhashicorp: runhashicorp(logger, lis, grpcServer, *id, ids, nodes, lat, event) } } func runhashicorp( logger logrus.FieldLogger, lis net.Listener, grpcServer *grpc.Server, id uint64, ids []uint64, nodes []string, lat *raft.Latency, event *raft.Event, ) { servers := make([]hashic.Server, len(nodes)) for i, addr := range nodes { host, port, err := net.SplitHostPort(addr) if err != nil { logger.Fatal(err) } p, _ := strconv.Atoi(port) addr = host + ":" + strconv.Itoa(p-100) suffrage := hashic.Voter if !contains(uint64(i+1), ids) { suffrage = hashic.Nonvoter } servers[i] = hashic.Server{ Suffrage: suffrage, ID: hashic.ServerID(addr), Address: hashic.ServerAddress(addr), } } addr, err := net.ResolveTCPAddr("tcp", string(servers[id-1].Address)) if err != nil { logger.Fatal(err) } trans, err := hashic.NewTCPTransport(string(servers[id-1].Address), addr, len(nodes)+1, 10*time.Second, os.Stderr) if err != nil { logger.Fatal(err) } path := fmt.Sprintf("hashicorp%.2d.bolt", id) overwrite := !*recover // Check if file already exists. if _, err := os.Stat(path); err != nil { if os.IsNotExist(err) { // We don't need to overwrite a file that doesn't exist. overwrite = false } else { // If we are unable to verify the existence of the file, // there is probably a permission problem. logger.Fatal(err) } } if overwrite { if err := os.Remove(path); err != nil { logger.Fatal(err) } } logs, err := raftboltdb.NewBoltStore(path) if err != nil { logger.Fatal(err) } cachedlogs, err := hashic.NewLogCache(*cache, logs) snaps := hashic.NewInmemSnapshotStore() cfg := &hashic.Config{ LocalID: servers[id-1].ID, ProtocolVersion: hashic.ProtocolVersionMax, HeartbeatTimeout: *electionTimeout, ElectionTimeout: *electionTimeout, CommitTimeout: *heartbeatTimeout, MaxAppendEntries: int(*entriesPerMsg), ShutdownOnRemove: true, TrailingLogs: math.MaxUint64, SnapshotInterval: 120 * time.Hour, SnapshotThreshold: math.MaxUint64, LeaderLeaseTimeout: *electionTimeout, } leaderOut := make(chan struct{}) node := hraft.NewRaft( logger, NewStore(), cfg, servers, trans, cachedlogs, hashic.NewInmemStore(), snaps, ids, lat, event, leaderOut, id, *checkQuorum, ) service := NewService(logger, node, leaderOut) rkvpb.RegisterRKVServer(grpcServer, service) logger.Fatal(grpcServer.Serve(lis)) } func runetcd( logger logrus.FieldLogger, lis net.Listener, grpcServer *grpc.Server, id uint64, ids []uint64, nodes []string, lat *raft.Latency, event *raft.Event, ) { peers := make([]etcdraft.Peer, len(ids)) for i, nid := range ids { addr := nodes[i] host, port, err := net.SplitHostPort(addr) if err != nil { logger.Fatal(err) } p, _ := strconv.Atoi(port) ur, err := url.Parse("http://" + addr) ur.Host = host + ":" + strconv.Itoa(p-100) if err != nil { logger.Fatal(err) } peers[i] = etcdraft.Peer{ ID: nid, Context: []byte(ur.String()), } } dir := fmt.Sprintf("etcdwal%.2d", id) switch { case wal.Exist(dir) && !*recover: if err := os.RemoveAll(dir); err != nil { logger.Fatal(err) } fallthrough case !wal.Exist(dir): if err := os.Mkdir(dir, 0750); err != nil { logger.Fatalf("rkvd: cannot create dir for wal (%v)", err) } w, err := wal.Create(dir, nil) if err != nil { logger.Fatalf("rkvd: create wal error (%v)", err) } w.Close() } walsnap := walpb.Snapshot{} w, err := wal.Open(dir, walsnap) if err != nil { logger.Fatalf("rkvd: error loading wal (%v)", err) } _, st, ents, err := w.ReadAll() if err != nil { log.Fatalf("rkvd: failed to read WAL (%v)", err) } storage := etcdraft.NewMemoryStorage() storage.SetHardState(st) storage.Append(ents) leaderOut := make(chan struct{}) node := etcd.NewRaft( logger, NewStore(), storage, w, &etcdraft.Config{ ID: id, ElectionTick: int(*electionTimeout / *heartbeatTimeout), HeartbeatTick: 1, Storage: storage, MaxSizePerMsg: *entriesPerMsg, // etcdserver says: Never overflow the rafthttp buffer, // which is 4096. We keep the same constant. MaxInflightMsgs: 4096 / 8, CheckQuorum: *checkQuorum, PreVote: true, Logger: logger, }, peers, *heartbeatTimeout, !contains(id, ids), nodes, lat, event, leaderOut, ) service := NewService(logger, node, leaderOut) rkvpb.RegisterRKVServer(grpcServer, service) go func() {
}() host, port, err := net.SplitHostPort(nodes[id-1]) if err != nil { logger.Fatal(err) } p, _ := strconv.Atoi(port) selflis := host + ":" + strconv.Itoa(p-100) lishttp, err := net.Listen("tcp", selflis) if err != nil { logger.Fatal(err) } logger.Fatal(http.Serve(lishttp, node.Handler())) } func rungorums( logger logrus.FieldLogger, lis net.Listener, grpcServer *grpc.Server, id uint64, ids []uint64, nodes []string, lat *raft.Latency, event *raft.Event, ) { storage, err := raft.NewFileStorage(fmt.Sprintf("db%.2d.bolt", id), !*recover) if err != nil { logger.Fatal(err) } storageWithCache := raft.NewCacheStorage(storage, *cache) leaderOut := make(chan struct{}) node := raftgorums.NewRaft(NewStore(), &raftgorums.Config{ ID: id, Servers: nodes, InitialCluster: ids, Batch: *batch, Storage: storageWithCache, ElectionTimeout: *electionTimeout, HeartbeatTimeout: *heartbeatTimeout, EntriesPerMsg: *entriesPerMsg, CatchupMultiplier: *catchupMultiplier, Logger: logger, CheckQuorum: *checkQuorum, MetricsEnabled: true, }, lat, event, leaderOut) service := NewService(logger, node, leaderOut) rkvpb.RegisterRKVServer(grpcServer, service) go func() { logger.Fatal(grpcServer.Serve(lis)) }() opts := []gorums.ManagerOption{ gorums.WithGrpcDialOptions( grpc.WithBlock(), grpc.WithInsecure(), grpc.WithTimeout(raftgorums.TCPConnect*time.Millisecond)), } if *order { opts = append(opts, gorums.WithNodeOrdering()) } logger.Fatal(node.Run(grpcServer, opts...)) } func contains(x uint64, xs []uint64) bool { for _, y := range xs { if y == x { return true } } return false }
logger.Fatal(grpcServer.Serve(lis))
random_line_split
post_trees.rs
use std::collections::HashMap; use timely::dataflow::channels::pact::Pipeline; use timely::dataflow::operators::generic::builder_rc::OperatorBuilder; use timely::dataflow::{Scope, Stream}; use colored::*; use crate::event::{Event, ID}; use crate::operators::active_posts::StatUpdate; use crate::operators::active_posts::StatUpdateType; use crate::operators::friend_recommendations::RecommendationUpdate; /// Given a stream of events, group them in connected components /// based on the root post id that they refer to. /// In other words, build the tree of events for each post. /// /// The operator emits 2 streams as output: /// 1) StatUpdates: will be fed into the `active_posts` operator /// that implements query 1 /// 2) RecommendationUpdates: will be fed into the `friend_recommendation` /// operator that implements query 2 /// /// In case of multiple workers, an upstream `exchange` operator /// will partition the events by root post id. Thus this operator /// will handle only a subset of the posts. /// /// "Reply to comments" events are broadcasted to all workers /// as they don't carry the root post id in the payload. /// /// When the `post_trees` operator receives an Reply event that /// cannot match to any currently received comment, it stores /// it in an out-of-order (ooo) queue. When the maximum bounded delay /// has expired, old events in the ooo queue are discarded /// (including events do not belong to the posts handled by this worker) /// pub trait PostTrees<G: Scope> { fn post_trees( &self, worker_id: usize, ) -> (Stream<G, StatUpdate>, Stream<G, RecommendationUpdate>); } impl<G: Scope<Timestamp = u64>> PostTrees<G> for Stream<G, Event> { fn post_trees( &self, worker_id: usize, ) -> (Stream<G, StatUpdate>, Stream<G, RecommendationUpdate>) { let mut state: PostTreesState = PostTreesState::new(worker_id); let mut builder = OperatorBuilder::new("PostTrees".to_owned(), self.scope()); let mut input = builder.new_input(self, Pipeline); // declare two output streams, one for each downstream operator let (mut stat_output, stat_stream) = builder.new_output(); let (mut rec_output, rec_stream) = builder.new_output(); builder.build(move |_| { let mut buf = Vec::new(); move |_frontiers| { input.for_each(|time, data| { data.swap(&mut buf); for event in buf.drain(..) { // update the post trees let (opt_target_id, opt_root_post_id) = state.update_post_tree(&event); // check if the root post_id has been already received match opt_root_post_id { Some(root_post_id) => { if let ID::Post(pid) = root_post_id { state.append_output_updates(&event, pid); } else { panic!("expect ID::Post, got ID::Comment"); } // check whether we can pop some stuff out of the ooo map if let Some(_) = event.id() { state.process_ooo_events(&event); } } None => { state.push_ooo_event(event, opt_target_id.unwrap()); } }; // state.dump(); } let mut stat_handle = stat_output.activate(); let mut rec_handle = rec_output.activate(); let mut stat_session = stat_handle.session(&time); let mut rec_session = rec_handle.session(&time); // emit stat updates as output for stat_update in state.pending_stat_updates.drain(..) { stat_session.give(stat_update); } // emit recommendation updates as output for rec_update in state.pending_rec_updates.drain(..) { rec_session.give(rec_update); } // check we if we can clean some old events from the ooo queue state.clean_ooo_events(*time.time()); }); } }); // return the two output streams (stat_stream, rec_stream) } } #[derive(Debug)] struct Node { person_id: u64, // "creator" of the event root_post_id: ID, } /// State associated with the `post_trees` operator struct PostTreesState { worker_id: usize, // event ID --> post ID it refers to (root of the tree) root_of: HashMap<ID, Node>, // out-of-order events: id of missing event --> event that depends on it ooo_events: HashMap<ID, Vec<Event>>, // updates to be sent on the stat output stream pending_stat_updates: Vec<StatUpdate>, // updates to be sent on the recommendation output stream pending_rec_updates: Vec<RecommendationUpdate>, } impl PostTreesState { fn new(worker_id: usize) -> PostTreesState { PostTreesState { worker_id: worker_id, root_of: HashMap::<ID, Node>::new(), ooo_events: HashMap::<ID, Vec<Event>>::new(), pending_stat_updates: Vec::new(), pending_rec_updates: Vec::new(), } } /// given an event, try to match it to some post tree fn update_post_tree(&mut self, event: &Event) -> (Option<ID>, Option<ID>) { match event { Event::Post(post) => { let node = Node { person_id: post.person_id, root_post_id: post.post_id }; self.root_of.insert(post.post_id, node); (None, Some(post.post_id)) } Event::Like(like) => { // likes are not stored in the tree let post_id = match self.root_of.get(&like.post_id) { Some(_) => Some(like.post_id), // can only like a post None => None, }; (Some(like.post_id), post_id) } Event::Comment(comment) => { let reply_to_id = comment.reply_to_post_id.or(comment.reply_to_comment_id).unwrap(); if let Some(root_node) = self.root_of.get(&reply_to_id) { let root_post_id = root_node.root_post_id; let node = Node { person_id: comment.person_id, root_post_id: root_post_id }; self.root_of.insert(comment.comment_id, node); (Some(reply_to_id), Some(root_post_id)) } else { (Some(reply_to_id), None) } } } } /// process events that have `root_event` as their target post, /// recursively process the newly inserted events fn process_ooo_events(&mut self, root_event: &Event) { let id = root_event.id().unwrap(); if let Some(events) = self.ooo_events.remove(&id) { println!("-- {} for id = {:?}", "process_ooo_events".bold().yellow(), id); let mut new_events = Vec::new(); for event in events { let (opt_target_id, opt_root_post_id) = self.update_post_tree(&event); assert!(opt_target_id.unwrap() == id, "wtf"); let root_post_id = opt_root_post_id.expect("[process_ooo_events] root_post_id is None"); // only use this event if its timestamp is greater or equal to the parent's. if event.timestamp() >= root_event.timestamp() { self.append_output_updates(&event, root_post_id.u64()); if let Some(_) = event.id() { new_events.push(event); } } } // adding events might unlock other ooo events for event in new_events.drain(..) { self.process_ooo_events(&event); } } } /// insert an event into the out-of-order queue fn push_ooo_event(&mut self, event: Event, target_id: ID) { self.ooo_events.entry(target_id).or_insert(Vec::new()).push(event); } /// remove all old events from the out-of-order queue /// (including Reply events there were not meant to be received by this worker) fn clean_ooo_events(&mut self, timestamp: u64) { self.ooo_events = self .ooo_events .clone() .into_iter() .filter(|(_, events)| events.iter().all(|event| event.timestamp() > timestamp)) .collect::<HashMap<_, _>>(); } /// generate all output updates for the current event fn append_output_updates(&mut self, event: &Event, root_post_id: u64) { self.append_stat_update(&event, root_post_id); self.append_rec_update(&event, root_post_id); } /// given an event (and the current state of the post trees), /// generate a new stat update and append it to the pending list fn append_stat_update(&mut self, event: &Event, root_post_id: u64) { let update_type = match event { Event::Post(_) => StatUpdateType::Post, Event::Like(_) => StatUpdateType::Like, Event::Comment(comment) => { if comment.reply_to_post_id != None { StatUpdateType::Comment } else { StatUpdateType::Reply } } }; let update = StatUpdate { update_type: update_type, post_id: root_post_id, person_id: event.person_id(), timestamp: event.timestamp(), }; self.pending_stat_updates.push(update); } /// given an event (and the current state of the post trees), /// generate a new recommendation update and append it to the pending list fn append_rec_update(&mut self, event: &Event, root_post_id: u64) { if let Event::Post(post) = event { // a new post with some tags has been created into a forum let update = RecommendationUpdate::Post { timestamp: event.timestamp(), person_id: event.person_id(), forum_id: post.forum_id, tags: post.tags.clone(), }; self.pending_rec_updates.push(update) } else if let Event::Comment(_) = event { let to_person_id = self.root_of.get(&ID::Post(root_post_id)).unwrap().person_id; let update = RecommendationUpdate::Comment { timestamp: event.timestamp(), from_person_id: event.person_id(), to_person_id: to_person_id, }; self.pending_rec_updates.push(update) } else if let Event::Like(_) = event { let to_person_id = self.root_of.get(&ID::Post(root_post_id)).unwrap().person_id; let update = RecommendationUpdate::Like { timestamp: event.timestamp(), from_person_id: event.person_id(), to_person_id: to_person_id, }; self.pending_rec_updates.push(update) } } #[allow(dead_code)] fn dump(&self) { println!( "{}", format!( "{} {}", format!("[W{}]", self.worker_id).bold().blue(), "Current state".bold().blue() ) ); println!(" root_of -- {:?}", self.root_of); self.dump_ooo_events(2); } fn
(&self, num_spaces: usize) { let spaces = " ".repeat(num_spaces); println!("{}---- ooo_events", spaces); for (post_id, events) in self.ooo_events.iter() { println!( "{}{:?} -- \n{} {}", spaces, post_id, spaces, events .iter() .map(|e| e.to_string()) .collect::<Vec<_>>() .join(&format!("\n{} ", spaces)) ); } println!("{}----", spaces); } }
dump_ooo_events
identifier_name
post_trees.rs
use std::collections::HashMap; use timely::dataflow::channels::pact::Pipeline; use timely::dataflow::operators::generic::builder_rc::OperatorBuilder; use timely::dataflow::{Scope, Stream}; use colored::*; use crate::event::{Event, ID}; use crate::operators::active_posts::StatUpdate; use crate::operators::active_posts::StatUpdateType; use crate::operators::friend_recommendations::RecommendationUpdate; /// Given a stream of events, group them in connected components /// based on the root post id that they refer to. /// In other words, build the tree of events for each post. /// /// The operator emits 2 streams as output: /// 1) StatUpdates: will be fed into the `active_posts` operator /// that implements query 1 /// 2) RecommendationUpdates: will be fed into the `friend_recommendation` /// operator that implements query 2 /// /// In case of multiple workers, an upstream `exchange` operator /// will partition the events by root post id. Thus this operator /// will handle only a subset of the posts. /// /// "Reply to comments" events are broadcasted to all workers /// as they don't carry the root post id in the payload. /// /// When the `post_trees` operator receives an Reply event that /// cannot match to any currently received comment, it stores /// it in an out-of-order (ooo) queue. When the maximum bounded delay /// has expired, old events in the ooo queue are discarded /// (including events do not belong to the posts handled by this worker) /// pub trait PostTrees<G: Scope> { fn post_trees( &self, worker_id: usize, ) -> (Stream<G, StatUpdate>, Stream<G, RecommendationUpdate>); } impl<G: Scope<Timestamp = u64>> PostTrees<G> for Stream<G, Event> { fn post_trees( &self, worker_id: usize, ) -> (Stream<G, StatUpdate>, Stream<G, RecommendationUpdate>) { let mut state: PostTreesState = PostTreesState::new(worker_id); let mut builder = OperatorBuilder::new("PostTrees".to_owned(), self.scope()); let mut input = builder.new_input(self, Pipeline); // declare two output streams, one for each downstream operator let (mut stat_output, stat_stream) = builder.new_output(); let (mut rec_output, rec_stream) = builder.new_output(); builder.build(move |_| { let mut buf = Vec::new(); move |_frontiers| { input.for_each(|time, data| { data.swap(&mut buf); for event in buf.drain(..) { // update the post trees let (opt_target_id, opt_root_post_id) = state.update_post_tree(&event); // check if the root post_id has been already received match opt_root_post_id { Some(root_post_id) => { if let ID::Post(pid) = root_post_id { state.append_output_updates(&event, pid); } else { panic!("expect ID::Post, got ID::Comment"); } // check whether we can pop some stuff out of the ooo map if let Some(_) = event.id() { state.process_ooo_events(&event); } } None => { state.push_ooo_event(event, opt_target_id.unwrap()); } }; // state.dump(); } let mut stat_handle = stat_output.activate(); let mut rec_handle = rec_output.activate(); let mut stat_session = stat_handle.session(&time); let mut rec_session = rec_handle.session(&time); // emit stat updates as output for stat_update in state.pending_stat_updates.drain(..) { stat_session.give(stat_update); } // emit recommendation updates as output for rec_update in state.pending_rec_updates.drain(..) { rec_session.give(rec_update); } // check we if we can clean some old events from the ooo queue state.clean_ooo_events(*time.time()); }); } }); // return the two output streams (stat_stream, rec_stream) } } #[derive(Debug)] struct Node { person_id: u64, // "creator" of the event root_post_id: ID, } /// State associated with the `post_trees` operator struct PostTreesState { worker_id: usize, // event ID --> post ID it refers to (root of the tree) root_of: HashMap<ID, Node>, // out-of-order events: id of missing event --> event that depends on it ooo_events: HashMap<ID, Vec<Event>>, // updates to be sent on the stat output stream pending_stat_updates: Vec<StatUpdate>, // updates to be sent on the recommendation output stream pending_rec_updates: Vec<RecommendationUpdate>, } impl PostTreesState { fn new(worker_id: usize) -> PostTreesState { PostTreesState { worker_id: worker_id, root_of: HashMap::<ID, Node>::new(), ooo_events: HashMap::<ID, Vec<Event>>::new(), pending_stat_updates: Vec::new(), pending_rec_updates: Vec::new(), } } /// given an event, try to match it to some post tree fn update_post_tree(&mut self, event: &Event) -> (Option<ID>, Option<ID>)
/// process events that have `root_event` as their target post, /// recursively process the newly inserted events fn process_ooo_events(&mut self, root_event: &Event) { let id = root_event.id().unwrap(); if let Some(events) = self.ooo_events.remove(&id) { println!("-- {} for id = {:?}", "process_ooo_events".bold().yellow(), id); let mut new_events = Vec::new(); for event in events { let (opt_target_id, opt_root_post_id) = self.update_post_tree(&event); assert!(opt_target_id.unwrap() == id, "wtf"); let root_post_id = opt_root_post_id.expect("[process_ooo_events] root_post_id is None"); // only use this event if its timestamp is greater or equal to the parent's. if event.timestamp() >= root_event.timestamp() { self.append_output_updates(&event, root_post_id.u64()); if let Some(_) = event.id() { new_events.push(event); } } } // adding events might unlock other ooo events for event in new_events.drain(..) { self.process_ooo_events(&event); } } } /// insert an event into the out-of-order queue fn push_ooo_event(&mut self, event: Event, target_id: ID) { self.ooo_events.entry(target_id).or_insert(Vec::new()).push(event); } /// remove all old events from the out-of-order queue /// (including Reply events there were not meant to be received by this worker) fn clean_ooo_events(&mut self, timestamp: u64) { self.ooo_events = self .ooo_events .clone() .into_iter() .filter(|(_, events)| events.iter().all(|event| event.timestamp() > timestamp)) .collect::<HashMap<_, _>>(); } /// generate all output updates for the current event fn append_output_updates(&mut self, event: &Event, root_post_id: u64) { self.append_stat_update(&event, root_post_id); self.append_rec_update(&event, root_post_id); } /// given an event (and the current state of the post trees), /// generate a new stat update and append it to the pending list fn append_stat_update(&mut self, event: &Event, root_post_id: u64) { let update_type = match event { Event::Post(_) => StatUpdateType::Post, Event::Like(_) => StatUpdateType::Like, Event::Comment(comment) => { if comment.reply_to_post_id != None { StatUpdateType::Comment } else { StatUpdateType::Reply } } }; let update = StatUpdate { update_type: update_type, post_id: root_post_id, person_id: event.person_id(), timestamp: event.timestamp(), }; self.pending_stat_updates.push(update); } /// given an event (and the current state of the post trees), /// generate a new recommendation update and append it to the pending list fn append_rec_update(&mut self, event: &Event, root_post_id: u64) { if let Event::Post(post) = event { // a new post with some tags has been created into a forum let update = RecommendationUpdate::Post { timestamp: event.timestamp(), person_id: event.person_id(), forum_id: post.forum_id, tags: post.tags.clone(), }; self.pending_rec_updates.push(update) } else if let Event::Comment(_) = event { let to_person_id = self.root_of.get(&ID::Post(root_post_id)).unwrap().person_id; let update = RecommendationUpdate::Comment { timestamp: event.timestamp(), from_person_id: event.person_id(), to_person_id: to_person_id, }; self.pending_rec_updates.push(update) } else if let Event::Like(_) = event { let to_person_id = self.root_of.get(&ID::Post(root_post_id)).unwrap().person_id; let update = RecommendationUpdate::Like { timestamp: event.timestamp(), from_person_id: event.person_id(), to_person_id: to_person_id, }; self.pending_rec_updates.push(update) } } #[allow(dead_code)] fn dump(&self) { println!( "{}", format!( "{} {}", format!("[W{}]", self.worker_id).bold().blue(), "Current state".bold().blue() ) ); println!(" root_of -- {:?}", self.root_of); self.dump_ooo_events(2); } fn dump_ooo_events(&self, num_spaces: usize) { let spaces = " ".repeat(num_spaces); println!("{}---- ooo_events", spaces); for (post_id, events) in self.ooo_events.iter() { println!( "{}{:?} -- \n{} {}", spaces, post_id, spaces, events .iter() .map(|e| e.to_string()) .collect::<Vec<_>>() .join(&format!("\n{} ", spaces)) ); } println!("{}----", spaces); } }
{ match event { Event::Post(post) => { let node = Node { person_id: post.person_id, root_post_id: post.post_id }; self.root_of.insert(post.post_id, node); (None, Some(post.post_id)) } Event::Like(like) => { // likes are not stored in the tree let post_id = match self.root_of.get(&like.post_id) { Some(_) => Some(like.post_id), // can only like a post None => None, }; (Some(like.post_id), post_id) } Event::Comment(comment) => { let reply_to_id = comment.reply_to_post_id.or(comment.reply_to_comment_id).unwrap(); if let Some(root_node) = self.root_of.get(&reply_to_id) { let root_post_id = root_node.root_post_id; let node = Node { person_id: comment.person_id, root_post_id: root_post_id }; self.root_of.insert(comment.comment_id, node); (Some(reply_to_id), Some(root_post_id)) } else { (Some(reply_to_id), None) } } } }
identifier_body
post_trees.rs
use std::collections::HashMap; use timely::dataflow::channels::pact::Pipeline; use timely::dataflow::operators::generic::builder_rc::OperatorBuilder; use timely::dataflow::{Scope, Stream}; use colored::*; use crate::event::{Event, ID}; use crate::operators::active_posts::StatUpdate; use crate::operators::active_posts::StatUpdateType; use crate::operators::friend_recommendations::RecommendationUpdate; /// Given a stream of events, group them in connected components /// based on the root post id that they refer to. /// In other words, build the tree of events for each post. /// /// The operator emits 2 streams as output: /// 1) StatUpdates: will be fed into the `active_posts` operator /// that implements query 1 /// 2) RecommendationUpdates: will be fed into the `friend_recommendation` /// operator that implements query 2 ///
/// will handle only a subset of the posts. /// /// "Reply to comments" events are broadcasted to all workers /// as they don't carry the root post id in the payload. /// /// When the `post_trees` operator receives an Reply event that /// cannot match to any currently received comment, it stores /// it in an out-of-order (ooo) queue. When the maximum bounded delay /// has expired, old events in the ooo queue are discarded /// (including events do not belong to the posts handled by this worker) /// pub trait PostTrees<G: Scope> { fn post_trees( &self, worker_id: usize, ) -> (Stream<G, StatUpdate>, Stream<G, RecommendationUpdate>); } impl<G: Scope<Timestamp = u64>> PostTrees<G> for Stream<G, Event> { fn post_trees( &self, worker_id: usize, ) -> (Stream<G, StatUpdate>, Stream<G, RecommendationUpdate>) { let mut state: PostTreesState = PostTreesState::new(worker_id); let mut builder = OperatorBuilder::new("PostTrees".to_owned(), self.scope()); let mut input = builder.new_input(self, Pipeline); // declare two output streams, one for each downstream operator let (mut stat_output, stat_stream) = builder.new_output(); let (mut rec_output, rec_stream) = builder.new_output(); builder.build(move |_| { let mut buf = Vec::new(); move |_frontiers| { input.for_each(|time, data| { data.swap(&mut buf); for event in buf.drain(..) { // update the post trees let (opt_target_id, opt_root_post_id) = state.update_post_tree(&event); // check if the root post_id has been already received match opt_root_post_id { Some(root_post_id) => { if let ID::Post(pid) = root_post_id { state.append_output_updates(&event, pid); } else { panic!("expect ID::Post, got ID::Comment"); } // check whether we can pop some stuff out of the ooo map if let Some(_) = event.id() { state.process_ooo_events(&event); } } None => { state.push_ooo_event(event, opt_target_id.unwrap()); } }; // state.dump(); } let mut stat_handle = stat_output.activate(); let mut rec_handle = rec_output.activate(); let mut stat_session = stat_handle.session(&time); let mut rec_session = rec_handle.session(&time); // emit stat updates as output for stat_update in state.pending_stat_updates.drain(..) { stat_session.give(stat_update); } // emit recommendation updates as output for rec_update in state.pending_rec_updates.drain(..) { rec_session.give(rec_update); } // check we if we can clean some old events from the ooo queue state.clean_ooo_events(*time.time()); }); } }); // return the two output streams (stat_stream, rec_stream) } } #[derive(Debug)] struct Node { person_id: u64, // "creator" of the event root_post_id: ID, } /// State associated with the `post_trees` operator struct PostTreesState { worker_id: usize, // event ID --> post ID it refers to (root of the tree) root_of: HashMap<ID, Node>, // out-of-order events: id of missing event --> event that depends on it ooo_events: HashMap<ID, Vec<Event>>, // updates to be sent on the stat output stream pending_stat_updates: Vec<StatUpdate>, // updates to be sent on the recommendation output stream pending_rec_updates: Vec<RecommendationUpdate>, } impl PostTreesState { fn new(worker_id: usize) -> PostTreesState { PostTreesState { worker_id: worker_id, root_of: HashMap::<ID, Node>::new(), ooo_events: HashMap::<ID, Vec<Event>>::new(), pending_stat_updates: Vec::new(), pending_rec_updates: Vec::new(), } } /// given an event, try to match it to some post tree fn update_post_tree(&mut self, event: &Event) -> (Option<ID>, Option<ID>) { match event { Event::Post(post) => { let node = Node { person_id: post.person_id, root_post_id: post.post_id }; self.root_of.insert(post.post_id, node); (None, Some(post.post_id)) } Event::Like(like) => { // likes are not stored in the tree let post_id = match self.root_of.get(&like.post_id) { Some(_) => Some(like.post_id), // can only like a post None => None, }; (Some(like.post_id), post_id) } Event::Comment(comment) => { let reply_to_id = comment.reply_to_post_id.or(comment.reply_to_comment_id).unwrap(); if let Some(root_node) = self.root_of.get(&reply_to_id) { let root_post_id = root_node.root_post_id; let node = Node { person_id: comment.person_id, root_post_id: root_post_id }; self.root_of.insert(comment.comment_id, node); (Some(reply_to_id), Some(root_post_id)) } else { (Some(reply_to_id), None) } } } } /// process events that have `root_event` as their target post, /// recursively process the newly inserted events fn process_ooo_events(&mut self, root_event: &Event) { let id = root_event.id().unwrap(); if let Some(events) = self.ooo_events.remove(&id) { println!("-- {} for id = {:?}", "process_ooo_events".bold().yellow(), id); let mut new_events = Vec::new(); for event in events { let (opt_target_id, opt_root_post_id) = self.update_post_tree(&event); assert!(opt_target_id.unwrap() == id, "wtf"); let root_post_id = opt_root_post_id.expect("[process_ooo_events] root_post_id is None"); // only use this event if its timestamp is greater or equal to the parent's. if event.timestamp() >= root_event.timestamp() { self.append_output_updates(&event, root_post_id.u64()); if let Some(_) = event.id() { new_events.push(event); } } } // adding events might unlock other ooo events for event in new_events.drain(..) { self.process_ooo_events(&event); } } } /// insert an event into the out-of-order queue fn push_ooo_event(&mut self, event: Event, target_id: ID) { self.ooo_events.entry(target_id).or_insert(Vec::new()).push(event); } /// remove all old events from the out-of-order queue /// (including Reply events there were not meant to be received by this worker) fn clean_ooo_events(&mut self, timestamp: u64) { self.ooo_events = self .ooo_events .clone() .into_iter() .filter(|(_, events)| events.iter().all(|event| event.timestamp() > timestamp)) .collect::<HashMap<_, _>>(); } /// generate all output updates for the current event fn append_output_updates(&mut self, event: &Event, root_post_id: u64) { self.append_stat_update(&event, root_post_id); self.append_rec_update(&event, root_post_id); } /// given an event (and the current state of the post trees), /// generate a new stat update and append it to the pending list fn append_stat_update(&mut self, event: &Event, root_post_id: u64) { let update_type = match event { Event::Post(_) => StatUpdateType::Post, Event::Like(_) => StatUpdateType::Like, Event::Comment(comment) => { if comment.reply_to_post_id != None { StatUpdateType::Comment } else { StatUpdateType::Reply } } }; let update = StatUpdate { update_type: update_type, post_id: root_post_id, person_id: event.person_id(), timestamp: event.timestamp(), }; self.pending_stat_updates.push(update); } /// given an event (and the current state of the post trees), /// generate a new recommendation update and append it to the pending list fn append_rec_update(&mut self, event: &Event, root_post_id: u64) { if let Event::Post(post) = event { // a new post with some tags has been created into a forum let update = RecommendationUpdate::Post { timestamp: event.timestamp(), person_id: event.person_id(), forum_id: post.forum_id, tags: post.tags.clone(), }; self.pending_rec_updates.push(update) } else if let Event::Comment(_) = event { let to_person_id = self.root_of.get(&ID::Post(root_post_id)).unwrap().person_id; let update = RecommendationUpdate::Comment { timestamp: event.timestamp(), from_person_id: event.person_id(), to_person_id: to_person_id, }; self.pending_rec_updates.push(update) } else if let Event::Like(_) = event { let to_person_id = self.root_of.get(&ID::Post(root_post_id)).unwrap().person_id; let update = RecommendationUpdate::Like { timestamp: event.timestamp(), from_person_id: event.person_id(), to_person_id: to_person_id, }; self.pending_rec_updates.push(update) } } #[allow(dead_code)] fn dump(&self) { println!( "{}", format!( "{} {}", format!("[W{}]", self.worker_id).bold().blue(), "Current state".bold().blue() ) ); println!(" root_of -- {:?}", self.root_of); self.dump_ooo_events(2); } fn dump_ooo_events(&self, num_spaces: usize) { let spaces = " ".repeat(num_spaces); println!("{}---- ooo_events", spaces); for (post_id, events) in self.ooo_events.iter() { println!( "{}{:?} -- \n{} {}", spaces, post_id, spaces, events .iter() .map(|e| e.to_string()) .collect::<Vec<_>>() .join(&format!("\n{} ", spaces)) ); } println!("{}----", spaces); } }
/// In case of multiple workers, an upstream `exchange` operator /// will partition the events by root post id. Thus this operator
random_line_split
lock-rpc-server.go
/* * Minio Cloud Storage, (C) 2016 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package cmd import ( "fmt" "math/rand" "net/rpc" "path" "strings" "sync" "time" router "github.com/gorilla/mux" ) const lockRPCPath = "/minio/lock" const lockMaintenanceLoop = 1 * time.Minute const lockCheckValidityInterval = 2 * time.Minute // LockArgs besides lock name, holds Token and Timestamp for session // authentication and validation server restart. type LockArgs struct { Name string Token string Timestamp time.Time Node string RPCPath string UID string } // SetToken - sets the token to the supplied value. func (l *LockArgs) SetToken(token string) { l.Token = token } // SetTimestamp - sets the timestamp to the supplied value. func (l *LockArgs) SetTimestamp(tstamp time.Time) { l.Timestamp = tstamp } // lockRequesterInfo stores various info from the client for each lock that is requested type lockRequesterInfo struct { writer bool // Bool whether write or read lock node string // Network address of client claiming lock rpcPath string // RPC path of client claiming lock uid string // Uid to uniquely identify request of client timestamp time.Time // Timestamp set at the time of initialization timeLastCheck time.Time // Timestamp for last check of validity of lock } // isWriteLock returns whether the lock is a write or read lock func isWriteLock(lri []lockRequesterInfo) bool { return len(lri) == 1 && lri[0].writer } // lockServer is type for RPC handlers type lockServer struct { rpcPath string mutex sync.Mutex lockMap map[string][]lockRequesterInfo timestamp time.Time // Timestamp set at the time of initialization. Resets naturally on minio server restart. } func (l *lockServer) verifyArgs(args *LockArgs) error { if !l.timestamp.Equal(args.Timestamp) { return errInvalidTimestamp } if !isRPCTokenValid(args.Token) { return errInvalidToken } return nil } /// Distributed lock handlers // LoginHandler - handles LoginHandler RPC call. func (l *lockServer) LoginHandler(args *RPCLoginArgs, reply *RPCLoginReply) error { jwt, err := newJWT(defaultTokenExpiry) if err != nil { return err } if err = jwt.Authenticate(args.Username, args.Password); err != nil { return err } token, err := jwt.GenerateToken(args.Username) if err != nil { return err } reply.Token = token reply.Timestamp = l.timestamp return nil } // Lock - rpc handler for (single) write lock operation. func (l *lockServer) Lock(args *LockArgs, reply *bool) error { l.mutex.Lock() defer l.mutex.Unlock() if err := l.verifyArgs(args); err != nil { return err } _, *reply = l.lockMap[args.Name] if !*reply { // No locks held on the given name, so claim write lock l.lockMap[args.Name] = []lockRequesterInfo{{writer: true, node: args.Node, rpcPath: args.RPCPath, uid: args.UID, timestamp: time.Now(), timeLastCheck: time.Now()}} } *reply = !*reply // Negate *reply to return true when lock is granted or false otherwise return nil } // Unlock - rpc handler for (single) write unlock operation. func (l *lockServer) Unlock(args *LockArgs, reply *bool) error { l.mutex.Lock() defer l.mutex.Unlock() if err := l.verifyArgs(args); err != nil { return err } var lri []lockRequesterInfo lri, *reply = l.lockMap[args.Name] if !*reply { // No lock is held on the given name return fmt.Errorf("Unlock attempted on an unlocked entity: %s", args.Name) } if *reply = isWriteLock(lri); !*reply { // Unless it is a write lock return fmt.Errorf("Unlock attempted on a read locked entity: %s (%d read locks active)", args.Name, len(lri)) } if l.removeEntry(args.Name, args.UID, &lri) { return nil } return fmt.Errorf("Unlock unable to find corresponding lock for uid: %s", args.UID) } // RLock - rpc handler for read lock operation. func (l *lockServer) RLock(args *LockArgs, reply *bool) error { l.mutex.Lock() defer l.mutex.Unlock() if err := l.verifyArgs(args); err != nil { return err } var lri []lockRequesterInfo lri, *reply = l.lockMap[args.Name] if !*reply { // No locks held on the given name, so claim (first) read lock l.lockMap[args.Name] = []lockRequesterInfo{{writer: false, node: args.Node, rpcPath: args.RPCPath, uid: args.UID, timestamp: time.Now(), timeLastCheck: time.Now()}} *reply = true } else { if *reply = !isWriteLock(lri); *reply { // Unless there is a write lock l.lockMap[args.Name] = append(l.lockMap[args.Name], lockRequesterInfo{writer: false, node: args.Node, rpcPath: args.RPCPath, uid: args.UID, timestamp: time.Now(), timeLastCheck: time.Now()}) } } return nil } // RUnlock - rpc handler for read unlock operation. func (l *lockServer) RUnlock(args *LockArgs, reply *bool) error { l.mutex.Lock() defer l.mutex.Unlock() if err := l.verifyArgs(args); err != nil { return err } var lri []lockRequesterInfo if lri, *reply = l.lockMap[args.Name]; !*reply { // No lock is held on the given name return fmt.Errorf("RUnlock attempted on an unlocked entity: %s", args.Name) } if *reply = !isWriteLock(lri); !*reply { // A write-lock is held, cannot release a read lock return fmt.Errorf("RUnlock attempted on a write locked entity: %s", args.Name) } if l.removeEntry(args.Name, args.UID, &lri) { return nil } return fmt.Errorf("RUnlock unable to find corresponding read lock for uid: %s", args.UID) } // Active - rpc handler for active lock status. func (l *lockServer) Active(args *LockArgs, reply *bool) error { l.mutex.Lock() defer l.mutex.Unlock() if err := l.verifyArgs(args); err != nil { return err } var lri []lockRequesterInfo if lri, *reply = l.lockMap[args.Name]; !*reply { return nil // No lock is held on the given name so return false } // Check whether uid is still active for _, entry := range lri { if *reply = entry.uid == args.UID; *reply { return nil // When uid found return true } } return nil // None found so return false } // removeEntry either, based on the uid of the lock message, removes a single entry from the // lockRequesterInfo array or the whole array from the map (in case of a write lock or last read lock) func (l *lockServer) removeEntry(name, uid string, lri *[]lockRequesterInfo) bool { // Find correct entry to remove based on uid for index, entry := range *lri { if entry.uid == uid { if len(*lri) == 1 { delete(l.lockMap, name) // Remove the (last) lock } else { // Remove the appropriate read lock *lri = append((*lri)[:index], (*lri)[index+1:]...) l.lockMap[name] = *lri } return true } } return false } // nameLockRequesterInfoPair is a helper type for lock maintenance type nameLockRequesterInfoPair struct { name string lri lockRequesterInfo } // getLongLivedLocks returns locks that are older than a certain time and // have not been 'checked' for validity too soon enough func getLongLivedLocks(m map[string][]lockRequesterInfo, interval time.Duration) []nameLockRequesterInfoPair { rslt := []nameLockRequesterInfoPair{} for name, lriArray := range m { for idx := range lriArray { // Check whether enough time has gone by since last check if time.Since(lriArray[idx].timeLastCheck) >= interval { rslt = append(rslt, nameLockRequesterInfoPair{name: name, lri: lriArray[idx]}) lriArray[idx].timeLastCheck = time.Now() } } } return rslt } // lockMaintenance loops over locks that have been active for some time and checks back // with the original server whether it is still alive or not func (l *lockServer) lockMaintenance(interval time.Duration) { l.mutex.Lock() // get list of locks to check nlripLongLived := getLongLivedLocks(l.lockMap, interval) l.mutex.Unlock() for _, nlrip := range nlripLongLived { c := newClient(nlrip.lri.node, nlrip.lri.rpcPath) var active bool // Call back to original server verify whether the lock is still active (based on name & uid) if err := c.Call("Dsync.Active", &LockArgs{Name: nlrip.name, UID: nlrip.lri.uid}, &active); err != nil { // We failed to connect back to the server that originated the lock, this can either be due to // - server at client down // - some network error (and server is up normally) // // We will ignore the error, and we will retry later to get resolve on this lock c.Close() } else { c.Close() if !active { // The lock is no longer active at server that originated the lock // so remove the lock from the map l.mutex.Lock() // Check if entry is still in map (could have been removed altogether by 'concurrent' (R)Unlock of last entry) if lri, ok := l.lockMap[nlrip.name]; ok { if !l.removeEntry(nlrip.name, nlrip.lri.uid, &lri) { // Remove failed, in case it is a: if nlrip.lri.writer { // Writer: this should never happen as the whole (mapped) entry should have been deleted log.Errorln("Lock maintenance failed to remove entry for write lock (should never happen)", nlrip.name, nlrip.lri, lri) } else { // Reader: this can happen if multiple read locks were active and the one we are looking for // has been released concurrently (so it is fine) } } else { // remove went okay, all is fine } } l.mutex.Unlock() } } } } // Initialize distributed lock. func initDistributedNSLock(mux *router.Router, serverConfig serverCmdConfig) { lockServers := newLockServers(serverConfig) registerStorageLockers(mux, lockServers) } // Create one lock server for every local storage rpc server. func newLockServers(serverConfig serverCmdConfig) (lockServers []*lockServer) { // Initialize posix storage API. exports := serverConfig.disks ignoredExports := serverConfig.ignoredDisks // Save ignored disks in a map skipDisks := make(map[string]bool) for _, ignoredExport := range ignoredExports { skipDisks[ignoredExport] = true } for _, export := range exports { if skipDisks[export] { continue } if isLocalStorage(export) { if idx := strings.LastIndex(export, ":"); idx != -1 { export = export[idx+1:] } // Create handler for lock RPCs locker := &lockServer{ rpcPath: export, mutex: sync.Mutex{}, lockMap: make(map[string][]lockRequesterInfo), timestamp: time.Now().UTC(), } // Start loop for stale lock maintenance go func() { // Start with random sleep time, so as to avoid "synchronous checks" between servers time.Sleep(time.Duration(rand.Float64() * float64(lockMaintenanceLoop))) for { time.Sleep(lockMaintenanceLoop) locker.lockMaintenance(lockCheckValidityInterval) } }() lockServers = append(lockServers, locker) } } return lockServers } // registerStorageLockers - register locker rpc handlers for net/rpc library clients func registerStorageLockers(mux *router.Router, lockServers []*lockServer)
{ for _, lockServer := range lockServers { lockRPCServer := rpc.NewServer() lockRPCServer.RegisterName("Dsync", lockServer) lockRouter := mux.PathPrefix(reservedBucket).Subrouter() lockRouter.Path(path.Join("/lock", lockServer.rpcPath)).Handler(lockRPCServer) } }
identifier_body
lock-rpc-server.go
/* * Minio Cloud Storage, (C) 2016 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package cmd import ( "fmt" "math/rand" "net/rpc" "path" "strings" "sync" "time" router "github.com/gorilla/mux" ) const lockRPCPath = "/minio/lock" const lockMaintenanceLoop = 1 * time.Minute const lockCheckValidityInterval = 2 * time.Minute // LockArgs besides lock name, holds Token and Timestamp for session // authentication and validation server restart. type LockArgs struct { Name string Token string Timestamp time.Time Node string RPCPath string UID string } // SetToken - sets the token to the supplied value. func (l *LockArgs) SetToken(token string) { l.Token = token } // SetTimestamp - sets the timestamp to the supplied value. func (l *LockArgs) SetTimestamp(tstamp time.Time) { l.Timestamp = tstamp } // lockRequesterInfo stores various info from the client for each lock that is requested type lockRequesterInfo struct { writer bool // Bool whether write or read lock node string // Network address of client claiming lock rpcPath string // RPC path of client claiming lock uid string // Uid to uniquely identify request of client timestamp time.Time // Timestamp set at the time of initialization timeLastCheck time.Time // Timestamp for last check of validity of lock } // isWriteLock returns whether the lock is a write or read lock func isWriteLock(lri []lockRequesterInfo) bool { return len(lri) == 1 && lri[0].writer } // lockServer is type for RPC handlers type lockServer struct { rpcPath string mutex sync.Mutex lockMap map[string][]lockRequesterInfo timestamp time.Time // Timestamp set at the time of initialization. Resets naturally on minio server restart. } func (l *lockServer) verifyArgs(args *LockArgs) error { if !l.timestamp.Equal(args.Timestamp) { return errInvalidTimestamp } if !isRPCTokenValid(args.Token) { return errInvalidToken } return nil } /// Distributed lock handlers // LoginHandler - handles LoginHandler RPC call. func (l *lockServer) LoginHandler(args *RPCLoginArgs, reply *RPCLoginReply) error { jwt, err := newJWT(defaultTokenExpiry) if err != nil { return err } if err = jwt.Authenticate(args.Username, args.Password); err != nil { return err } token, err := jwt.GenerateToken(args.Username) if err != nil { return err } reply.Token = token reply.Timestamp = l.timestamp return nil } // Lock - rpc handler for (single) write lock operation. func (l *lockServer) Lock(args *LockArgs, reply *bool) error { l.mutex.Lock() defer l.mutex.Unlock() if err := l.verifyArgs(args); err != nil { return err } _, *reply = l.lockMap[args.Name] if !*reply { // No locks held on the given name, so claim write lock l.lockMap[args.Name] = []lockRequesterInfo{{writer: true, node: args.Node, rpcPath: args.RPCPath, uid: args.UID, timestamp: time.Now(), timeLastCheck: time.Now()}} } *reply = !*reply // Negate *reply to return true when lock is granted or false otherwise return nil } // Unlock - rpc handler for (single) write unlock operation. func (l *lockServer) Unlock(args *LockArgs, reply *bool) error { l.mutex.Lock() defer l.mutex.Unlock() if err := l.verifyArgs(args); err != nil { return err } var lri []lockRequesterInfo lri, *reply = l.lockMap[args.Name] if !*reply { // No lock is held on the given name return fmt.Errorf("Unlock attempted on an unlocked entity: %s", args.Name) } if *reply = isWriteLock(lri); !*reply { // Unless it is a write lock return fmt.Errorf("Unlock attempted on a read locked entity: %s (%d read locks active)", args.Name, len(lri)) } if l.removeEntry(args.Name, args.UID, &lri) { return nil } return fmt.Errorf("Unlock unable to find corresponding lock for uid: %s", args.UID) } // RLock - rpc handler for read lock operation. func (l *lockServer) RLock(args *LockArgs, reply *bool) error { l.mutex.Lock() defer l.mutex.Unlock() if err := l.verifyArgs(args); err != nil { return err } var lri []lockRequesterInfo lri, *reply = l.lockMap[args.Name] if !*reply { // No locks held on the given name, so claim (first) read lock l.lockMap[args.Name] = []lockRequesterInfo{{writer: false, node: args.Node, rpcPath: args.RPCPath, uid: args.UID, timestamp: time.Now(), timeLastCheck: time.Now()}} *reply = true } else { if *reply = !isWriteLock(lri); *reply { // Unless there is a write lock l.lockMap[args.Name] = append(l.lockMap[args.Name], lockRequesterInfo{writer: false, node: args.Node, rpcPath: args.RPCPath, uid: args.UID, timestamp: time.Now(), timeLastCheck: time.Now()}) } } return nil } // RUnlock - rpc handler for read unlock operation. func (l *lockServer) RUnlock(args *LockArgs, reply *bool) error { l.mutex.Lock() defer l.mutex.Unlock() if err := l.verifyArgs(args); err != nil { return err } var lri []lockRequesterInfo if lri, *reply = l.lockMap[args.Name]; !*reply { // No lock is held on the given name return fmt.Errorf("RUnlock attempted on an unlocked entity: %s", args.Name) } if *reply = !isWriteLock(lri); !*reply { // A write-lock is held, cannot release a read lock return fmt.Errorf("RUnlock attempted on a write locked entity: %s", args.Name) } if l.removeEntry(args.Name, args.UID, &lri) { return nil } return fmt.Errorf("RUnlock unable to find corresponding read lock for uid: %s", args.UID) } // Active - rpc handler for active lock status. func (l *lockServer)
(args *LockArgs, reply *bool) error { l.mutex.Lock() defer l.mutex.Unlock() if err := l.verifyArgs(args); err != nil { return err } var lri []lockRequesterInfo if lri, *reply = l.lockMap[args.Name]; !*reply { return nil // No lock is held on the given name so return false } // Check whether uid is still active for _, entry := range lri { if *reply = entry.uid == args.UID; *reply { return nil // When uid found return true } } return nil // None found so return false } // removeEntry either, based on the uid of the lock message, removes a single entry from the // lockRequesterInfo array or the whole array from the map (in case of a write lock or last read lock) func (l *lockServer) removeEntry(name, uid string, lri *[]lockRequesterInfo) bool { // Find correct entry to remove based on uid for index, entry := range *lri { if entry.uid == uid { if len(*lri) == 1 { delete(l.lockMap, name) // Remove the (last) lock } else { // Remove the appropriate read lock *lri = append((*lri)[:index], (*lri)[index+1:]...) l.lockMap[name] = *lri } return true } } return false } // nameLockRequesterInfoPair is a helper type for lock maintenance type nameLockRequesterInfoPair struct { name string lri lockRequesterInfo } // getLongLivedLocks returns locks that are older than a certain time and // have not been 'checked' for validity too soon enough func getLongLivedLocks(m map[string][]lockRequesterInfo, interval time.Duration) []nameLockRequesterInfoPair { rslt := []nameLockRequesterInfoPair{} for name, lriArray := range m { for idx := range lriArray { // Check whether enough time has gone by since last check if time.Since(lriArray[idx].timeLastCheck) >= interval { rslt = append(rslt, nameLockRequesterInfoPair{name: name, lri: lriArray[idx]}) lriArray[idx].timeLastCheck = time.Now() } } } return rslt } // lockMaintenance loops over locks that have been active for some time and checks back // with the original server whether it is still alive or not func (l *lockServer) lockMaintenance(interval time.Duration) { l.mutex.Lock() // get list of locks to check nlripLongLived := getLongLivedLocks(l.lockMap, interval) l.mutex.Unlock() for _, nlrip := range nlripLongLived { c := newClient(nlrip.lri.node, nlrip.lri.rpcPath) var active bool // Call back to original server verify whether the lock is still active (based on name & uid) if err := c.Call("Dsync.Active", &LockArgs{Name: nlrip.name, UID: nlrip.lri.uid}, &active); err != nil { // We failed to connect back to the server that originated the lock, this can either be due to // - server at client down // - some network error (and server is up normally) // // We will ignore the error, and we will retry later to get resolve on this lock c.Close() } else { c.Close() if !active { // The lock is no longer active at server that originated the lock // so remove the lock from the map l.mutex.Lock() // Check if entry is still in map (could have been removed altogether by 'concurrent' (R)Unlock of last entry) if lri, ok := l.lockMap[nlrip.name]; ok { if !l.removeEntry(nlrip.name, nlrip.lri.uid, &lri) { // Remove failed, in case it is a: if nlrip.lri.writer { // Writer: this should never happen as the whole (mapped) entry should have been deleted log.Errorln("Lock maintenance failed to remove entry for write lock (should never happen)", nlrip.name, nlrip.lri, lri) } else { // Reader: this can happen if multiple read locks were active and the one we are looking for // has been released concurrently (so it is fine) } } else { // remove went okay, all is fine } } l.mutex.Unlock() } } } } // Initialize distributed lock. func initDistributedNSLock(mux *router.Router, serverConfig serverCmdConfig) { lockServers := newLockServers(serverConfig) registerStorageLockers(mux, lockServers) } // Create one lock server for every local storage rpc server. func newLockServers(serverConfig serverCmdConfig) (lockServers []*lockServer) { // Initialize posix storage API. exports := serverConfig.disks ignoredExports := serverConfig.ignoredDisks // Save ignored disks in a map skipDisks := make(map[string]bool) for _, ignoredExport := range ignoredExports { skipDisks[ignoredExport] = true } for _, export := range exports { if skipDisks[export] { continue } if isLocalStorage(export) { if idx := strings.LastIndex(export, ":"); idx != -1 { export = export[idx+1:] } // Create handler for lock RPCs locker := &lockServer{ rpcPath: export, mutex: sync.Mutex{}, lockMap: make(map[string][]lockRequesterInfo), timestamp: time.Now().UTC(), } // Start loop for stale lock maintenance go func() { // Start with random sleep time, so as to avoid "synchronous checks" between servers time.Sleep(time.Duration(rand.Float64() * float64(lockMaintenanceLoop))) for { time.Sleep(lockMaintenanceLoop) locker.lockMaintenance(lockCheckValidityInterval) } }() lockServers = append(lockServers, locker) } } return lockServers } // registerStorageLockers - register locker rpc handlers for net/rpc library clients func registerStorageLockers(mux *router.Router, lockServers []*lockServer) { for _, lockServer := range lockServers { lockRPCServer := rpc.NewServer() lockRPCServer.RegisterName("Dsync", lockServer) lockRouter := mux.PathPrefix(reservedBucket).Subrouter() lockRouter.Path(path.Join("/lock", lockServer.rpcPath)).Handler(lockRPCServer) } }
Active
identifier_name
lock-rpc-server.go
/* * Minio Cloud Storage, (C) 2016 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package cmd import ( "fmt" "math/rand" "net/rpc" "path" "strings" "sync" "time" router "github.com/gorilla/mux" ) const lockRPCPath = "/minio/lock" const lockMaintenanceLoop = 1 * time.Minute const lockCheckValidityInterval = 2 * time.Minute // LockArgs besides lock name, holds Token and Timestamp for session // authentication and validation server restart. type LockArgs struct { Name string Token string Timestamp time.Time Node string RPCPath string UID string } // SetToken - sets the token to the supplied value. func (l *LockArgs) SetToken(token string) { l.Token = token } // SetTimestamp - sets the timestamp to the supplied value. func (l *LockArgs) SetTimestamp(tstamp time.Time) { l.Timestamp = tstamp } // lockRequesterInfo stores various info from the client for each lock that is requested type lockRequesterInfo struct { writer bool // Bool whether write or read lock node string // Network address of client claiming lock rpcPath string // RPC path of client claiming lock uid string // Uid to uniquely identify request of client timestamp time.Time // Timestamp set at the time of initialization timeLastCheck time.Time // Timestamp for last check of validity of lock } // isWriteLock returns whether the lock is a write or read lock func isWriteLock(lri []lockRequesterInfo) bool { return len(lri) == 1 && lri[0].writer } // lockServer is type for RPC handlers type lockServer struct { rpcPath string mutex sync.Mutex lockMap map[string][]lockRequesterInfo timestamp time.Time // Timestamp set at the time of initialization. Resets naturally on minio server restart. } func (l *lockServer) verifyArgs(args *LockArgs) error { if !l.timestamp.Equal(args.Timestamp) { return errInvalidTimestamp } if !isRPCTokenValid(args.Token) { return errInvalidToken } return nil } /// Distributed lock handlers // LoginHandler - handles LoginHandler RPC call. func (l *lockServer) LoginHandler(args *RPCLoginArgs, reply *RPCLoginReply) error { jwt, err := newJWT(defaultTokenExpiry) if err != nil { return err } if err = jwt.Authenticate(args.Username, args.Password); err != nil { return err } token, err := jwt.GenerateToken(args.Username) if err != nil { return err } reply.Token = token reply.Timestamp = l.timestamp return nil } // Lock - rpc handler for (single) write lock operation. func (l *lockServer) Lock(args *LockArgs, reply *bool) error { l.mutex.Lock() defer l.mutex.Unlock() if err := l.verifyArgs(args); err != nil { return err } _, *reply = l.lockMap[args.Name] if !*reply { // No locks held on the given name, so claim write lock l.lockMap[args.Name] = []lockRequesterInfo{{writer: true, node: args.Node, rpcPath: args.RPCPath, uid: args.UID, timestamp: time.Now(), timeLastCheck: time.Now()}} } *reply = !*reply // Negate *reply to return true when lock is granted or false otherwise return nil } // Unlock - rpc handler for (single) write unlock operation. func (l *lockServer) Unlock(args *LockArgs, reply *bool) error { l.mutex.Lock() defer l.mutex.Unlock() if err := l.verifyArgs(args); err != nil { return err } var lri []lockRequesterInfo lri, *reply = l.lockMap[args.Name] if !*reply { // No lock is held on the given name return fmt.Errorf("Unlock attempted on an unlocked entity: %s", args.Name) } if *reply = isWriteLock(lri); !*reply { // Unless it is a write lock return fmt.Errorf("Unlock attempted on a read locked entity: %s (%d read locks active)", args.Name, len(lri)) } if l.removeEntry(args.Name, args.UID, &lri) { return nil } return fmt.Errorf("Unlock unable to find corresponding lock for uid: %s", args.UID) } // RLock - rpc handler for read lock operation. func (l *lockServer) RLock(args *LockArgs, reply *bool) error { l.mutex.Lock() defer l.mutex.Unlock() if err := l.verifyArgs(args); err != nil { return err } var lri []lockRequesterInfo lri, *reply = l.lockMap[args.Name] if !*reply { // No locks held on the given name, so claim (first) read lock l.lockMap[args.Name] = []lockRequesterInfo{{writer: false, node: args.Node, rpcPath: args.RPCPath, uid: args.UID, timestamp: time.Now(), timeLastCheck: time.Now()}} *reply = true } else { if *reply = !isWriteLock(lri); *reply { // Unless there is a write lock l.lockMap[args.Name] = append(l.lockMap[args.Name], lockRequesterInfo{writer: false, node: args.Node, rpcPath: args.RPCPath, uid: args.UID, timestamp: time.Now(), timeLastCheck: time.Now()}) } } return nil } // RUnlock - rpc handler for read unlock operation. func (l *lockServer) RUnlock(args *LockArgs, reply *bool) error { l.mutex.Lock() defer l.mutex.Unlock() if err := l.verifyArgs(args); err != nil { return err } var lri []lockRequesterInfo if lri, *reply = l.lockMap[args.Name]; !*reply { // No lock is held on the given name return fmt.Errorf("RUnlock attempted on an unlocked entity: %s", args.Name) } if *reply = !isWriteLock(lri); !*reply { // A write-lock is held, cannot release a read lock return fmt.Errorf("RUnlock attempted on a write locked entity: %s", args.Name) } if l.removeEntry(args.Name, args.UID, &lri) { return nil } return fmt.Errorf("RUnlock unable to find corresponding read lock for uid: %s", args.UID) } // Active - rpc handler for active lock status. func (l *lockServer) Active(args *LockArgs, reply *bool) error { l.mutex.Lock() defer l.mutex.Unlock() if err := l.verifyArgs(args); err != nil { return err } var lri []lockRequesterInfo if lri, *reply = l.lockMap[args.Name]; !*reply { return nil // No lock is held on the given name so return false } // Check whether uid is still active for _, entry := range lri { if *reply = entry.uid == args.UID; *reply { return nil // When uid found return true } } return nil // None found so return false } // removeEntry either, based on the uid of the lock message, removes a single entry from the // lockRequesterInfo array or the whole array from the map (in case of a write lock or last read lock) func (l *lockServer) removeEntry(name, uid string, lri *[]lockRequesterInfo) bool { // Find correct entry to remove based on uid for index, entry := range *lri { if entry.uid == uid { if len(*lri) == 1 { delete(l.lockMap, name) // Remove the (last) lock } else { // Remove the appropriate read lock *lri = append((*lri)[:index], (*lri)[index+1:]...) l.lockMap[name] = *lri } return true } } return false } // nameLockRequesterInfoPair is a helper type for lock maintenance type nameLockRequesterInfoPair struct { name string lri lockRequesterInfo } // getLongLivedLocks returns locks that are older than a certain time and // have not been 'checked' for validity too soon enough func getLongLivedLocks(m map[string][]lockRequesterInfo, interval time.Duration) []nameLockRequesterInfoPair { rslt := []nameLockRequesterInfoPair{} for name, lriArray := range m { for idx := range lriArray { // Check whether enough time has gone by since last check if time.Since(lriArray[idx].timeLastCheck) >= interval { rslt = append(rslt, nameLockRequesterInfoPair{name: name, lri: lriArray[idx]}) lriArray[idx].timeLastCheck = time.Now() } } } return rslt } // lockMaintenance loops over locks that have been active for some time and checks back // with the original server whether it is still alive or not func (l *lockServer) lockMaintenance(interval time.Duration) { l.mutex.Lock() // get list of locks to check nlripLongLived := getLongLivedLocks(l.lockMap, interval) l.mutex.Unlock() for _, nlrip := range nlripLongLived { c := newClient(nlrip.lri.node, nlrip.lri.rpcPath) var active bool // Call back to original server verify whether the lock is still active (based on name & uid) if err := c.Call("Dsync.Active", &LockArgs{Name: nlrip.name, UID: nlrip.lri.uid}, &active); err != nil { // We failed to connect back to the server that originated the lock, this can either be due to // - server at client down // - some network error (and server is up normally) // // We will ignore the error, and we will retry later to get resolve on this lock c.Close() } else { c.Close() if !active { // The lock is no longer active at server that originated the lock // so remove the lock from the map l.mutex.Lock() // Check if entry is still in map (could have been removed altogether by 'concurrent' (R)Unlock of last entry) if lri, ok := l.lockMap[nlrip.name]; ok { if !l.removeEntry(nlrip.name, nlrip.lri.uid, &lri) { // Remove failed, in case it is a: if nlrip.lri.writer { // Writer: this should never happen as the whole (mapped) entry should have been deleted log.Errorln("Lock maintenance failed to remove entry for write lock (should never happen)", nlrip.name, nlrip.lri, lri) } else { // Reader: this can happen if multiple read locks were active and the one we are looking for // has been released concurrently (so it is fine) } } else { // remove went okay, all is fine } } l.mutex.Unlock() } } } }
} // Create one lock server for every local storage rpc server. func newLockServers(serverConfig serverCmdConfig) (lockServers []*lockServer) { // Initialize posix storage API. exports := serverConfig.disks ignoredExports := serverConfig.ignoredDisks // Save ignored disks in a map skipDisks := make(map[string]bool) for _, ignoredExport := range ignoredExports { skipDisks[ignoredExport] = true } for _, export := range exports { if skipDisks[export] { continue } if isLocalStorage(export) { if idx := strings.LastIndex(export, ":"); idx != -1 { export = export[idx+1:] } // Create handler for lock RPCs locker := &lockServer{ rpcPath: export, mutex: sync.Mutex{}, lockMap: make(map[string][]lockRequesterInfo), timestamp: time.Now().UTC(), } // Start loop for stale lock maintenance go func() { // Start with random sleep time, so as to avoid "synchronous checks" between servers time.Sleep(time.Duration(rand.Float64() * float64(lockMaintenanceLoop))) for { time.Sleep(lockMaintenanceLoop) locker.lockMaintenance(lockCheckValidityInterval) } }() lockServers = append(lockServers, locker) } } return lockServers } // registerStorageLockers - register locker rpc handlers for net/rpc library clients func registerStorageLockers(mux *router.Router, lockServers []*lockServer) { for _, lockServer := range lockServers { lockRPCServer := rpc.NewServer() lockRPCServer.RegisterName("Dsync", lockServer) lockRouter := mux.PathPrefix(reservedBucket).Subrouter() lockRouter.Path(path.Join("/lock", lockServer.rpcPath)).Handler(lockRPCServer) } }
// Initialize distributed lock. func initDistributedNSLock(mux *router.Router, serverConfig serverCmdConfig) { lockServers := newLockServers(serverConfig) registerStorageLockers(mux, lockServers)
random_line_split
lock-rpc-server.go
/* * Minio Cloud Storage, (C) 2016 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package cmd import ( "fmt" "math/rand" "net/rpc" "path" "strings" "sync" "time" router "github.com/gorilla/mux" ) const lockRPCPath = "/minio/lock" const lockMaintenanceLoop = 1 * time.Minute const lockCheckValidityInterval = 2 * time.Minute // LockArgs besides lock name, holds Token and Timestamp for session // authentication and validation server restart. type LockArgs struct { Name string Token string Timestamp time.Time Node string RPCPath string UID string } // SetToken - sets the token to the supplied value. func (l *LockArgs) SetToken(token string) { l.Token = token } // SetTimestamp - sets the timestamp to the supplied value. func (l *LockArgs) SetTimestamp(tstamp time.Time) { l.Timestamp = tstamp } // lockRequesterInfo stores various info from the client for each lock that is requested type lockRequesterInfo struct { writer bool // Bool whether write or read lock node string // Network address of client claiming lock rpcPath string // RPC path of client claiming lock uid string // Uid to uniquely identify request of client timestamp time.Time // Timestamp set at the time of initialization timeLastCheck time.Time // Timestamp for last check of validity of lock } // isWriteLock returns whether the lock is a write or read lock func isWriteLock(lri []lockRequesterInfo) bool { return len(lri) == 1 && lri[0].writer } // lockServer is type for RPC handlers type lockServer struct { rpcPath string mutex sync.Mutex lockMap map[string][]lockRequesterInfo timestamp time.Time // Timestamp set at the time of initialization. Resets naturally on minio server restart. } func (l *lockServer) verifyArgs(args *LockArgs) error { if !l.timestamp.Equal(args.Timestamp) { return errInvalidTimestamp } if !isRPCTokenValid(args.Token) { return errInvalidToken } return nil } /// Distributed lock handlers // LoginHandler - handles LoginHandler RPC call. func (l *lockServer) LoginHandler(args *RPCLoginArgs, reply *RPCLoginReply) error { jwt, err := newJWT(defaultTokenExpiry) if err != nil { return err } if err = jwt.Authenticate(args.Username, args.Password); err != nil { return err } token, err := jwt.GenerateToken(args.Username) if err != nil { return err } reply.Token = token reply.Timestamp = l.timestamp return nil } // Lock - rpc handler for (single) write lock operation. func (l *lockServer) Lock(args *LockArgs, reply *bool) error { l.mutex.Lock() defer l.mutex.Unlock() if err := l.verifyArgs(args); err != nil { return err } _, *reply = l.lockMap[args.Name] if !*reply { // No locks held on the given name, so claim write lock l.lockMap[args.Name] = []lockRequesterInfo{{writer: true, node: args.Node, rpcPath: args.RPCPath, uid: args.UID, timestamp: time.Now(), timeLastCheck: time.Now()}} } *reply = !*reply // Negate *reply to return true when lock is granted or false otherwise return nil } // Unlock - rpc handler for (single) write unlock operation. func (l *lockServer) Unlock(args *LockArgs, reply *bool) error { l.mutex.Lock() defer l.mutex.Unlock() if err := l.verifyArgs(args); err != nil { return err } var lri []lockRequesterInfo lri, *reply = l.lockMap[args.Name] if !*reply { // No lock is held on the given name return fmt.Errorf("Unlock attempted on an unlocked entity: %s", args.Name) } if *reply = isWriteLock(lri); !*reply { // Unless it is a write lock return fmt.Errorf("Unlock attempted on a read locked entity: %s (%d read locks active)", args.Name, len(lri)) } if l.removeEntry(args.Name, args.UID, &lri) { return nil } return fmt.Errorf("Unlock unable to find corresponding lock for uid: %s", args.UID) } // RLock - rpc handler for read lock operation. func (l *lockServer) RLock(args *LockArgs, reply *bool) error { l.mutex.Lock() defer l.mutex.Unlock() if err := l.verifyArgs(args); err != nil { return err } var lri []lockRequesterInfo lri, *reply = l.lockMap[args.Name] if !*reply { // No locks held on the given name, so claim (first) read lock l.lockMap[args.Name] = []lockRequesterInfo{{writer: false, node: args.Node, rpcPath: args.RPCPath, uid: args.UID, timestamp: time.Now(), timeLastCheck: time.Now()}} *reply = true } else { if *reply = !isWriteLock(lri); *reply { // Unless there is a write lock l.lockMap[args.Name] = append(l.lockMap[args.Name], lockRequesterInfo{writer: false, node: args.Node, rpcPath: args.RPCPath, uid: args.UID, timestamp: time.Now(), timeLastCheck: time.Now()}) } } return nil } // RUnlock - rpc handler for read unlock operation. func (l *lockServer) RUnlock(args *LockArgs, reply *bool) error { l.mutex.Lock() defer l.mutex.Unlock() if err := l.verifyArgs(args); err != nil { return err } var lri []lockRequesterInfo if lri, *reply = l.lockMap[args.Name]; !*reply { // No lock is held on the given name return fmt.Errorf("RUnlock attempted on an unlocked entity: %s", args.Name) } if *reply = !isWriteLock(lri); !*reply { // A write-lock is held, cannot release a read lock return fmt.Errorf("RUnlock attempted on a write locked entity: %s", args.Name) } if l.removeEntry(args.Name, args.UID, &lri) { return nil } return fmt.Errorf("RUnlock unable to find corresponding read lock for uid: %s", args.UID) } // Active - rpc handler for active lock status. func (l *lockServer) Active(args *LockArgs, reply *bool) error { l.mutex.Lock() defer l.mutex.Unlock() if err := l.verifyArgs(args); err != nil { return err } var lri []lockRequesterInfo if lri, *reply = l.lockMap[args.Name]; !*reply { return nil // No lock is held on the given name so return false } // Check whether uid is still active for _, entry := range lri { if *reply = entry.uid == args.UID; *reply { return nil // When uid found return true } } return nil // None found so return false } // removeEntry either, based on the uid of the lock message, removes a single entry from the // lockRequesterInfo array or the whole array from the map (in case of a write lock or last read lock) func (l *lockServer) removeEntry(name, uid string, lri *[]lockRequesterInfo) bool { // Find correct entry to remove based on uid for index, entry := range *lri { if entry.uid == uid { if len(*lri) == 1
else { // Remove the appropriate read lock *lri = append((*lri)[:index], (*lri)[index+1:]...) l.lockMap[name] = *lri } return true } } return false } // nameLockRequesterInfoPair is a helper type for lock maintenance type nameLockRequesterInfoPair struct { name string lri lockRequesterInfo } // getLongLivedLocks returns locks that are older than a certain time and // have not been 'checked' for validity too soon enough func getLongLivedLocks(m map[string][]lockRequesterInfo, interval time.Duration) []nameLockRequesterInfoPair { rslt := []nameLockRequesterInfoPair{} for name, lriArray := range m { for idx := range lriArray { // Check whether enough time has gone by since last check if time.Since(lriArray[idx].timeLastCheck) >= interval { rslt = append(rslt, nameLockRequesterInfoPair{name: name, lri: lriArray[idx]}) lriArray[idx].timeLastCheck = time.Now() } } } return rslt } // lockMaintenance loops over locks that have been active for some time and checks back // with the original server whether it is still alive or not func (l *lockServer) lockMaintenance(interval time.Duration) { l.mutex.Lock() // get list of locks to check nlripLongLived := getLongLivedLocks(l.lockMap, interval) l.mutex.Unlock() for _, nlrip := range nlripLongLived { c := newClient(nlrip.lri.node, nlrip.lri.rpcPath) var active bool // Call back to original server verify whether the lock is still active (based on name & uid) if err := c.Call("Dsync.Active", &LockArgs{Name: nlrip.name, UID: nlrip.lri.uid}, &active); err != nil { // We failed to connect back to the server that originated the lock, this can either be due to // - server at client down // - some network error (and server is up normally) // // We will ignore the error, and we will retry later to get resolve on this lock c.Close() } else { c.Close() if !active { // The lock is no longer active at server that originated the lock // so remove the lock from the map l.mutex.Lock() // Check if entry is still in map (could have been removed altogether by 'concurrent' (R)Unlock of last entry) if lri, ok := l.lockMap[nlrip.name]; ok { if !l.removeEntry(nlrip.name, nlrip.lri.uid, &lri) { // Remove failed, in case it is a: if nlrip.lri.writer { // Writer: this should never happen as the whole (mapped) entry should have been deleted log.Errorln("Lock maintenance failed to remove entry for write lock (should never happen)", nlrip.name, nlrip.lri, lri) } else { // Reader: this can happen if multiple read locks were active and the one we are looking for // has been released concurrently (so it is fine) } } else { // remove went okay, all is fine } } l.mutex.Unlock() } } } } // Initialize distributed lock. func initDistributedNSLock(mux *router.Router, serverConfig serverCmdConfig) { lockServers := newLockServers(serverConfig) registerStorageLockers(mux, lockServers) } // Create one lock server for every local storage rpc server. func newLockServers(serverConfig serverCmdConfig) (lockServers []*lockServer) { // Initialize posix storage API. exports := serverConfig.disks ignoredExports := serverConfig.ignoredDisks // Save ignored disks in a map skipDisks := make(map[string]bool) for _, ignoredExport := range ignoredExports { skipDisks[ignoredExport] = true } for _, export := range exports { if skipDisks[export] { continue } if isLocalStorage(export) { if idx := strings.LastIndex(export, ":"); idx != -1 { export = export[idx+1:] } // Create handler for lock RPCs locker := &lockServer{ rpcPath: export, mutex: sync.Mutex{}, lockMap: make(map[string][]lockRequesterInfo), timestamp: time.Now().UTC(), } // Start loop for stale lock maintenance go func() { // Start with random sleep time, so as to avoid "synchronous checks" between servers time.Sleep(time.Duration(rand.Float64() * float64(lockMaintenanceLoop))) for { time.Sleep(lockMaintenanceLoop) locker.lockMaintenance(lockCheckValidityInterval) } }() lockServers = append(lockServers, locker) } } return lockServers } // registerStorageLockers - register locker rpc handlers for net/rpc library clients func registerStorageLockers(mux *router.Router, lockServers []*lockServer) { for _, lockServer := range lockServers { lockRPCServer := rpc.NewServer() lockRPCServer.RegisterName("Dsync", lockServer) lockRouter := mux.PathPrefix(reservedBucket).Subrouter() lockRouter.Path(path.Join("/lock", lockServer.rpcPath)).Handler(lockRPCServer) } }
{ delete(l.lockMap, name) // Remove the (last) lock }
conditional_block
worker.rs
// Copyright 2018-2023 the Deno authors. All rights reserved. MIT license. use std::pin::Pin; use std::rc::Rc; use std::sync::atomic::AtomicI32; use std::sync::atomic::Ordering::Relaxed; use std::sync::Arc; use std::task::Context; use std::task::Poll; use deno_broadcast_channel::InMemoryBroadcastChannel; use deno_cache::CreateCache; use deno_cache::SqliteBackedCache; use deno_core::ascii_str; use deno_core::error::AnyError; use deno_core::error::JsError; use deno_core::futures::Future; use deno_core::v8; use deno_core::CompiledWasmModuleStore; use deno_core::Extension; use deno_core::FsModuleLoader; use deno_core::GetErrorClassFn; use deno_core::JsRuntime; use deno_core::LocalInspectorSession; use deno_core::ModuleCode; use deno_core::ModuleId; use deno_core::ModuleLoader; use deno_core::ModuleSpecifier; use deno_core::RuntimeOptions; use deno_core::SharedArrayBufferStore; use deno_core::Snapshot; use deno_core::SourceMapGetter; use deno_fs::FileSystem; use deno_http::DefaultHttpPropertyExtractor; use deno_io::Stdio; use deno_kv::dynamic::MultiBackendDbHandler; use deno_node::SUPPORTED_BUILTIN_NODE_MODULES_WITH_PREFIX; use deno_tls::RootCertStoreProvider; use deno_web::BlobStore; use log::debug; use crate::inspector_server::InspectorServer; use crate::ops; use crate::permissions::PermissionsContainer; use crate::shared::runtime; use crate::BootstrapOptions; pub type FormatJsErrorFn = dyn Fn(&JsError) -> String + Sync + Send; #[derive(Clone, Default)] pub struct ExitCode(Arc<AtomicI32>); impl ExitCode { pub fn get(&self) -> i32 { self.0.load(Relaxed) } pub fn set(&mut self, code: i32) { self.0.store(code, Relaxed); } } /// This worker is created and used by almost all /// subcommands in Deno executable. /// /// It provides ops available in the `Deno` namespace. /// /// All `WebWorker`s created during program execution /// are descendants of this worker. pub struct MainWorker { pub js_runtime: JsRuntime, should_break_on_first_statement: bool, should_wait_for_inspector_session: bool, exit_code: ExitCode, bootstrap_fn_global: Option<v8::Global<v8::Function>>, } pub struct WorkerOptions { pub bootstrap: BootstrapOptions, /// JsRuntime extensions, not to be confused with ES modules. /// /// Extensions register "ops" and JavaScript sources provided in `js` or `esm` /// configuration. If you are using a snapshot, then extensions shouldn't /// provide JavaScript sources that were already snapshotted. pub extensions: Vec<Extension>, /// V8 snapshot that should be loaded on startup. pub startup_snapshot: Option<Snapshot>, /// Optional isolate creation parameters, such as heap limits. pub create_params: Option<v8::CreateParams>, pub unsafely_ignore_certificate_errors: Option<Vec<String>>, pub root_cert_store_provider: Option<Arc<dyn RootCertStoreProvider>>, pub seed: Option<u64>, pub fs: Arc<dyn FileSystem>, /// Implementation of `ModuleLoader` which will be /// called when V8 requests to load ES modules. /// /// If not provided runtime will error if code being /// executed tries to load modules. pub module_loader: Rc<dyn ModuleLoader>, pub npm_resolver: Option<Arc<dyn deno_node::NpmResolver>>, // Callbacks invoked when creating new instance of WebWorker pub create_web_worker_cb: Arc<ops::worker_host::CreateWebWorkerCb>, pub format_js_error_fn: Option<Arc<FormatJsErrorFn>>, /// Source map reference for errors. pub source_map_getter: Option<Box<dyn SourceMapGetter>>, pub maybe_inspector_server: Option<Arc<InspectorServer>>, // If true, the worker will wait for inspector session and break on first // statement of user code. Takes higher precedence than // `should_wait_for_inspector_session`. pub should_break_on_first_statement: bool, // If true, the worker will wait for inspector session before executing // user code. pub should_wait_for_inspector_session: bool, /// Allows to map error type to a string "class" used to represent /// error in JavaScript. pub get_error_class_fn: Option<GetErrorClassFn>, pub cache_storage_dir: Option<std::path::PathBuf>, pub origin_storage_dir: Option<std::path::PathBuf>, pub blob_store: Arc<BlobStore>, pub broadcast_channel: InMemoryBroadcastChannel, /// The store to use for transferring SharedArrayBuffers between isolates. /// If multiple isolates should have the possibility of sharing /// SharedArrayBuffers, they should use the same [SharedArrayBufferStore]. If /// no [SharedArrayBufferStore] is specified, SharedArrayBuffer can not be /// serialized. pub shared_array_buffer_store: Option<SharedArrayBufferStore>, /// The store to use for transferring `WebAssembly.Module` objects between /// isolates. /// If multiple isolates should have the possibility of sharing /// `WebAssembly.Module` objects, they should use the same /// [CompiledWasmModuleStore]. If no [CompiledWasmModuleStore] is specified, /// `WebAssembly.Module` objects cannot be serialized. pub compiled_wasm_module_store: Option<CompiledWasmModuleStore>, pub stdio: Stdio, } impl Default for WorkerOptions { fn default() -> Self { Self { create_web_worker_cb: Arc::new(|_| { unimplemented!("web workers are not supported") }), fs: Arc::new(deno_fs::RealFs), module_loader: Rc::new(FsModuleLoader), seed: None, unsafely_ignore_certificate_errors: Default::default(), should_break_on_first_statement: Default::default(), should_wait_for_inspector_session: Default::default(), compiled_wasm_module_store: Default::default(), shared_array_buffer_store: Default::default(), maybe_inspector_server: Default::default(), format_js_error_fn: Default::default(), get_error_class_fn: Default::default(), origin_storage_dir: Default::default(), cache_storage_dir: Default::default(), broadcast_channel: Default::default(), source_map_getter: Default::default(), root_cert_store_provider: Default::default(), npm_resolver: Default::default(), blob_store: Default::default(), extensions: Default::default(), startup_snapshot: Default::default(), create_params: Default::default(), bootstrap: Default::default(), stdio: Default::default(), } } } impl MainWorker { pub fn bootstrap_from_options( main_module: ModuleSpecifier, permissions: PermissionsContainer, options: WorkerOptions, ) -> Self { let bootstrap_options = options.bootstrap.clone(); let mut worker = Self::from_options(main_module, permissions, options); worker.bootstrap(&bootstrap_options); worker } pub fn from_options( main_module: ModuleSpecifier, permissions: PermissionsContainer, mut options: WorkerOptions, ) -> Self { deno_core::extension!(deno_permissions_worker, options = { permissions: PermissionsContainer, unstable: bool, enable_testing_features: bool, }, state = |state, options| { state.put::<PermissionsContainer>(options.permissions); state.put(ops::UnstableChecker { unstable: options.unstable }); state.put(ops::TestingFeaturesEnabled(options.enable_testing_features)); }, ); // Permissions: many ops depend on this let unstable = options.bootstrap.unstable; let enable_testing_features = options.bootstrap.enable_testing_features; let exit_code = ExitCode(Arc::new(AtomicI32::new(0))); let create_cache = options.cache_storage_dir.map(|storage_dir| { let create_cache_fn = move || SqliteBackedCache::new(storage_dir.clone()); CreateCache(Arc::new(create_cache_fn)) }); // NOTE(bartlomieju): ordering is important here, keep it in sync with // `runtime/build.rs`, `runtime/web_worker.rs` and `cli/build.rs`! let mut extensions = vec![ // Web APIs deno_webidl::deno_webidl::init_ops_and_esm(), deno_console::deno_console::init_ops_and_esm(), deno_url::deno_url::init_ops_and_esm(), deno_web::deno_web::init_ops_and_esm::<PermissionsContainer>( options.blob_store.clone(), options.bootstrap.location.clone(), ), deno_fetch::deno_fetch::init_ops_and_esm::<PermissionsContainer>( deno_fetch::Options { user_agent: options.bootstrap.user_agent.clone(), root_cert_store_provider: options.root_cert_store_provider.clone(), unsafely_ignore_certificate_errors: options .unsafely_ignore_certificate_errors .clone(), file_fetch_handler: Rc::new(deno_fetch::FsFetchHandler), ..Default::default() }, ), deno_cache::deno_cache::init_ops_and_esm::<SqliteBackedCache>( create_cache, ), deno_websocket::deno_websocket::init_ops_and_esm::<PermissionsContainer>( options.bootstrap.user_agent.clone(), options.root_cert_store_provider.clone(), options.unsafely_ignore_certificate_errors.clone(), ), deno_webstorage::deno_webstorage::init_ops_and_esm( options.origin_storage_dir.clone(), ), deno_crypto::deno_crypto::init_ops_and_esm(options.seed), deno_broadcast_channel::deno_broadcast_channel::init_ops_and_esm( options.broadcast_channel.clone(), unstable, ), deno_ffi::deno_ffi::init_ops_and_esm::<PermissionsContainer>(unstable), deno_net::deno_net::init_ops_and_esm::<PermissionsContainer>( options.root_cert_store_provider.clone(), unstable, options.unsafely_ignore_certificate_errors.clone(), ), deno_tls::deno_tls::init_ops_and_esm(), deno_kv::deno_kv::init_ops_and_esm( MultiBackendDbHandler::remote_or_sqlite::<PermissionsContainer>( options.origin_storage_dir.clone(), ), unstable, ), deno_napi::deno_napi::init_ops_and_esm::<PermissionsContainer>(), deno_http::deno_http::init_ops_and_esm::<DefaultHttpPropertyExtractor>(), deno_io::deno_io::init_ops_and_esm(Some(options.stdio)), deno_fs::deno_fs::init_ops_and_esm::<PermissionsContainer>( unstable, options.fs.clone(), ), deno_node::deno_node::init_ops_and_esm::<PermissionsContainer>( options.npm_resolver, options.fs, ), // Ops from this crate ops::runtime::deno_runtime::init_ops_and_esm(main_module.clone()), ops::worker_host::deno_worker_host::init_ops_and_esm( options.create_web_worker_cb.clone(), options.format_js_error_fn.clone(), ), ops::fs_events::deno_fs_events::init_ops_and_esm(), ops::os::deno_os::init_ops_and_esm(exit_code.clone()), ops::permissions::deno_permissions::init_ops_and_esm(), ops::process::deno_process::init_ops_and_esm(), ops::signal::deno_signal::init_ops_and_esm(), ops::tty::deno_tty::init_ops_and_esm(), ops::http::deno_http_runtime::init_ops_and_esm(), deno_permissions_worker::init_ops_and_esm( permissions, unstable, enable_testing_features, ), runtime::init_ops_and_esm(), ]; for extension in &mut extensions { #[cfg(not(feature = "__runtime_js_sources"))] { extension.js_files = std::borrow::Cow::Borrowed(&[]); extension.esm_files = std::borrow::Cow::Borrowed(&[]); extension.esm_entry_point = None; } #[cfg(feature = "__runtime_js_sources")] { use crate::shared::maybe_transpile_source; for source in extension.esm_files.to_mut() { maybe_transpile_source(source).unwrap(); } for source in extension.js_files.to_mut() { maybe_transpile_source(source).unwrap(); } } } extensions.extend(std::mem::take(&mut options.extensions)); #[cfg(all(feature = "include_js_files_for_snapshotting", feature = "dont_create_runtime_snapshot", not(feature = "__runtime_js_sources")))] options.startup_snapshot.as_ref().expect("Sources are not embedded, snapshotting was disabled and a user snapshot was not provided."); // Clear extension modules from the module map, except preserve `node:*` // modules. let preserve_snapshotted_modules = Some(SUPPORTED_BUILTIN_NODE_MODULES_WITH_PREFIX); let mut js_runtime = JsRuntime::new(RuntimeOptions { module_loader: Some(options.module_loader.clone()), startup_snapshot: options .startup_snapshot .or_else(crate::js::deno_isolate_init), create_params: options.create_params, source_map_getter: options.source_map_getter, get_error_class_fn: options.get_error_class_fn, shared_array_buffer_store: options.shared_array_buffer_store.clone(), compiled_wasm_module_store: options.compiled_wasm_module_store.clone(), extensions, preserve_snapshotted_modules, inspector: options.maybe_inspector_server.is_some(), is_main: true, ..Default::default() }); if let Some(server) = options.maybe_inspector_server.clone() { server.register_inspector( main_module.to_string(), &mut js_runtime, options.should_break_on_first_statement || options.should_wait_for_inspector_session, ); // Put inspector handle into the op state so we can put a breakpoint when // executing a CJS entrypoint. let op_state = js_runtime.op_state(); let inspector = js_runtime.inspector(); op_state.borrow_mut().put(inspector); } let bootstrap_fn_global = { let context = js_runtime.main_context(); let scope = &mut js_runtime.handle_scope(); let context_local = v8::Local::new(scope, context); let global_obj = context_local.global(scope); let bootstrap_str = v8::String::new_external_onebyte_static(scope, b"bootstrap").unwrap(); let bootstrap_ns: v8::Local<v8::Object> = global_obj .get(scope, bootstrap_str.into()) .unwrap() .try_into() .unwrap(); let main_runtime_str = v8::String::new_external_onebyte_static(scope, b"mainRuntime").unwrap(); let bootstrap_fn = bootstrap_ns.get(scope, main_runtime_str.into()).unwrap(); let bootstrap_fn = v8::Local::<v8::Function>::try_from(bootstrap_fn).unwrap(); v8::Global::new(scope, bootstrap_fn) }; Self { js_runtime, should_break_on_first_statement: options.should_break_on_first_statement, should_wait_for_inspector_session: options .should_wait_for_inspector_session, exit_code, bootstrap_fn_global: Some(bootstrap_fn_global), } } pub fn bootstrap(&mut self, options: &BootstrapOptions) { let scope = &mut self.js_runtime.handle_scope(); let args = options.as_v8(scope); let bootstrap_fn = self.bootstrap_fn_global.take().unwrap(); let bootstrap_fn = v8::Local::new(scope, bootstrap_fn); let undefined = v8::undefined(scope); bootstrap_fn.call(scope, undefined.into(), &[args]).unwrap(); } /// See [JsRuntime::execute_script](deno_core::JsRuntime::execute_script) pub fn execute_script( &mut self, script_name: &'static str, source_code: ModuleCode, ) -> Result<v8::Global<v8::Value>, AnyError> { self.js_runtime.execute_script(script_name, source_code) } /// Loads and instantiates specified JavaScript module as "main" module. pub async fn preload_main_module( &mut self, module_specifier: &ModuleSpecifier, ) -> Result<ModuleId, AnyError> { self .js_runtime .load_main_module(module_specifier, None) .await } /// Loads and instantiates specified JavaScript module as "side" module. pub async fn preload_side_module( &mut self, module_specifier: &ModuleSpecifier, ) -> Result<ModuleId, AnyError> { self .js_runtime .load_side_module(module_specifier, None) .await } /// Executes specified JavaScript module. pub async fn evaluate_module( &mut self, id: ModuleId, ) -> Result<(), AnyError> { self.wait_for_inspector_session(); let mut receiver = self.js_runtime.mod_evaluate(id); tokio::select! { // Not using biased mode leads to non-determinism for relatively simple // programs. biased; maybe_result = &mut receiver => { debug!("received module evaluate {:#?}", maybe_result); maybe_result.expect("Module evaluation result not provided.") } event_loop_result = self.run_event_loop(false) => { event_loop_result?; let maybe_result = receiver.await; maybe_result.expect("Module evaluation result not provided.") } } } /// Loads, instantiates and executes specified JavaScript module. pub async fn execute_side_module( &mut self, module_specifier: &ModuleSpecifier, ) -> Result<(), AnyError> { let id = self.preload_side_module(module_specifier).await?; self.evaluate_module(id).await } /// Loads, instantiates and executes specified JavaScript module. /// /// This module will have "import.meta.main" equal to true. pub async fn
( &mut self, module_specifier: &ModuleSpecifier, ) -> Result<(), AnyError> { let id = self.preload_main_module(module_specifier).await?; self.evaluate_module(id).await } fn wait_for_inspector_session(&mut self) { if self.should_break_on_first_statement { self .js_runtime .inspector() .borrow_mut() .wait_for_session_and_break_on_next_statement(); } else if self.should_wait_for_inspector_session { self.js_runtime.inspector().borrow_mut().wait_for_session(); } } /// Create new inspector session. This function panics if Worker /// was not configured to create inspector. pub async fn create_inspector_session(&mut self) -> LocalInspectorSession { self.js_runtime.maybe_init_inspector(); self.js_runtime.inspector().borrow().create_local_session() } pub fn poll_event_loop( &mut self, cx: &mut Context, wait_for_inspector: bool, ) -> Poll<Result<(), AnyError>> { self.js_runtime.poll_event_loop(cx, wait_for_inspector) } pub async fn run_event_loop( &mut self, wait_for_inspector: bool, ) -> Result<(), AnyError> { self.js_runtime.run_event_loop(wait_for_inspector).await } /// A utility function that runs provided future concurrently with the event loop. /// /// Useful when using a local inspector session. pub async fn with_event_loop<'a, T>( &mut self, mut fut: Pin<Box<dyn Future<Output = T> + 'a>>, ) -> T { loop { tokio::select! { biased; result = &mut fut => { return result; } _ = self.run_event_loop(false) => {} }; } } /// Return exit code set by the executed code (either in main worker /// or one of child web workers). pub fn exit_code(&self) -> i32 { self.exit_code.get() } /// Dispatches "load" event to the JavaScript runtime. /// /// Does not poll event loop, and thus not await any of the "load" event handlers. pub fn dispatch_load_event( &mut self, script_name: &'static str, ) -> Result<(), AnyError> { self.js_runtime.execute_script( script_name, // NOTE(@bartlomieju): not using `globalThis` here, because user might delete // it. Instead we're using global `dispatchEvent` function which will // used a saved reference to global scope. ascii_str!("dispatchEvent(new Event('load'))"), )?; Ok(()) } /// Dispatches "unload" event to the JavaScript runtime. /// /// Does not poll event loop, and thus not await any of the "unload" event handlers. pub fn dispatch_unload_event( &mut self, script_name: &'static str, ) -> Result<(), AnyError> { self.js_runtime.execute_script( script_name, // NOTE(@bartlomieju): not using `globalThis` here, because user might delete // it. Instead we're using global `dispatchEvent` function which will // used a saved reference to global scope. ascii_str!("dispatchEvent(new Event('unload'))"), )?; Ok(()) } /// Dispatches "beforeunload" event to the JavaScript runtime. Returns a boolean /// indicating if the event was prevented and thus event loop should continue /// running. pub fn dispatch_beforeunload_event( &mut self, script_name: &'static str, ) -> Result<bool, AnyError> { let value = self.js_runtime.execute_script( script_name, // NOTE(@bartlomieju): not using `globalThis` here, because user might delete // it. Instead we're using global `dispatchEvent` function which will // used a saved reference to global scope. ascii_str!( "dispatchEvent(new Event('beforeunload', { cancelable: true }));" ), )?; let local_value = value.open(&mut self.js_runtime.handle_scope()); Ok(local_value.is_false()) } }
execute_main_module
identifier_name
worker.rs
// Copyright 2018-2023 the Deno authors. All rights reserved. MIT license. use std::pin::Pin; use std::rc::Rc; use std::sync::atomic::AtomicI32; use std::sync::atomic::Ordering::Relaxed; use std::sync::Arc; use std::task::Context; use std::task::Poll; use deno_broadcast_channel::InMemoryBroadcastChannel; use deno_cache::CreateCache; use deno_cache::SqliteBackedCache; use deno_core::ascii_str; use deno_core::error::AnyError; use deno_core::error::JsError; use deno_core::futures::Future; use deno_core::v8; use deno_core::CompiledWasmModuleStore; use deno_core::Extension; use deno_core::FsModuleLoader; use deno_core::GetErrorClassFn; use deno_core::JsRuntime; use deno_core::LocalInspectorSession; use deno_core::ModuleCode; use deno_core::ModuleId; use deno_core::ModuleLoader; use deno_core::ModuleSpecifier; use deno_core::RuntimeOptions; use deno_core::SharedArrayBufferStore; use deno_core::Snapshot; use deno_core::SourceMapGetter; use deno_fs::FileSystem; use deno_http::DefaultHttpPropertyExtractor; use deno_io::Stdio; use deno_kv::dynamic::MultiBackendDbHandler; use deno_node::SUPPORTED_BUILTIN_NODE_MODULES_WITH_PREFIX; use deno_tls::RootCertStoreProvider; use deno_web::BlobStore; use log::debug; use crate::inspector_server::InspectorServer; use crate::ops; use crate::permissions::PermissionsContainer; use crate::shared::runtime; use crate::BootstrapOptions; pub type FormatJsErrorFn = dyn Fn(&JsError) -> String + Sync + Send; #[derive(Clone, Default)] pub struct ExitCode(Arc<AtomicI32>); impl ExitCode { pub fn get(&self) -> i32 { self.0.load(Relaxed) } pub fn set(&mut self, code: i32) { self.0.store(code, Relaxed); } } /// This worker is created and used by almost all /// subcommands in Deno executable. /// /// It provides ops available in the `Deno` namespace. /// /// All `WebWorker`s created during program execution /// are descendants of this worker. pub struct MainWorker { pub js_runtime: JsRuntime, should_break_on_first_statement: bool, should_wait_for_inspector_session: bool, exit_code: ExitCode, bootstrap_fn_global: Option<v8::Global<v8::Function>>, } pub struct WorkerOptions { pub bootstrap: BootstrapOptions, /// JsRuntime extensions, not to be confused with ES modules. /// /// Extensions register "ops" and JavaScript sources provided in `js` or `esm` /// configuration. If you are using a snapshot, then extensions shouldn't /// provide JavaScript sources that were already snapshotted. pub extensions: Vec<Extension>, /// V8 snapshot that should be loaded on startup. pub startup_snapshot: Option<Snapshot>, /// Optional isolate creation parameters, such as heap limits. pub create_params: Option<v8::CreateParams>, pub unsafely_ignore_certificate_errors: Option<Vec<String>>, pub root_cert_store_provider: Option<Arc<dyn RootCertStoreProvider>>, pub seed: Option<u64>, pub fs: Arc<dyn FileSystem>, /// Implementation of `ModuleLoader` which will be /// called when V8 requests to load ES modules. /// /// If not provided runtime will error if code being /// executed tries to load modules. pub module_loader: Rc<dyn ModuleLoader>, pub npm_resolver: Option<Arc<dyn deno_node::NpmResolver>>, // Callbacks invoked when creating new instance of WebWorker pub create_web_worker_cb: Arc<ops::worker_host::CreateWebWorkerCb>, pub format_js_error_fn: Option<Arc<FormatJsErrorFn>>, /// Source map reference for errors. pub source_map_getter: Option<Box<dyn SourceMapGetter>>, pub maybe_inspector_server: Option<Arc<InspectorServer>>, // If true, the worker will wait for inspector session and break on first // statement of user code. Takes higher precedence than // `should_wait_for_inspector_session`. pub should_break_on_first_statement: bool, // If true, the worker will wait for inspector session before executing // user code. pub should_wait_for_inspector_session: bool, /// Allows to map error type to a string "class" used to represent /// error in JavaScript. pub get_error_class_fn: Option<GetErrorClassFn>, pub cache_storage_dir: Option<std::path::PathBuf>, pub origin_storage_dir: Option<std::path::PathBuf>, pub blob_store: Arc<BlobStore>, pub broadcast_channel: InMemoryBroadcastChannel, /// The store to use for transferring SharedArrayBuffers between isolates. /// If multiple isolates should have the possibility of sharing /// SharedArrayBuffers, they should use the same [SharedArrayBufferStore]. If /// no [SharedArrayBufferStore] is specified, SharedArrayBuffer can not be /// serialized. pub shared_array_buffer_store: Option<SharedArrayBufferStore>, /// The store to use for transferring `WebAssembly.Module` objects between /// isolates. /// If multiple isolates should have the possibility of sharing /// `WebAssembly.Module` objects, they should use the same /// [CompiledWasmModuleStore]. If no [CompiledWasmModuleStore] is specified, /// `WebAssembly.Module` objects cannot be serialized. pub compiled_wasm_module_store: Option<CompiledWasmModuleStore>, pub stdio: Stdio, } impl Default for WorkerOptions { fn default() -> Self
} impl MainWorker { pub fn bootstrap_from_options( main_module: ModuleSpecifier, permissions: PermissionsContainer, options: WorkerOptions, ) -> Self { let bootstrap_options = options.bootstrap.clone(); let mut worker = Self::from_options(main_module, permissions, options); worker.bootstrap(&bootstrap_options); worker } pub fn from_options( main_module: ModuleSpecifier, permissions: PermissionsContainer, mut options: WorkerOptions, ) -> Self { deno_core::extension!(deno_permissions_worker, options = { permissions: PermissionsContainer, unstable: bool, enable_testing_features: bool, }, state = |state, options| { state.put::<PermissionsContainer>(options.permissions); state.put(ops::UnstableChecker { unstable: options.unstable }); state.put(ops::TestingFeaturesEnabled(options.enable_testing_features)); }, ); // Permissions: many ops depend on this let unstable = options.bootstrap.unstable; let enable_testing_features = options.bootstrap.enable_testing_features; let exit_code = ExitCode(Arc::new(AtomicI32::new(0))); let create_cache = options.cache_storage_dir.map(|storage_dir| { let create_cache_fn = move || SqliteBackedCache::new(storage_dir.clone()); CreateCache(Arc::new(create_cache_fn)) }); // NOTE(bartlomieju): ordering is important here, keep it in sync with // `runtime/build.rs`, `runtime/web_worker.rs` and `cli/build.rs`! let mut extensions = vec![ // Web APIs deno_webidl::deno_webidl::init_ops_and_esm(), deno_console::deno_console::init_ops_and_esm(), deno_url::deno_url::init_ops_and_esm(), deno_web::deno_web::init_ops_and_esm::<PermissionsContainer>( options.blob_store.clone(), options.bootstrap.location.clone(), ), deno_fetch::deno_fetch::init_ops_and_esm::<PermissionsContainer>( deno_fetch::Options { user_agent: options.bootstrap.user_agent.clone(), root_cert_store_provider: options.root_cert_store_provider.clone(), unsafely_ignore_certificate_errors: options .unsafely_ignore_certificate_errors .clone(), file_fetch_handler: Rc::new(deno_fetch::FsFetchHandler), ..Default::default() }, ), deno_cache::deno_cache::init_ops_and_esm::<SqliteBackedCache>( create_cache, ), deno_websocket::deno_websocket::init_ops_and_esm::<PermissionsContainer>( options.bootstrap.user_agent.clone(), options.root_cert_store_provider.clone(), options.unsafely_ignore_certificate_errors.clone(), ), deno_webstorage::deno_webstorage::init_ops_and_esm( options.origin_storage_dir.clone(), ), deno_crypto::deno_crypto::init_ops_and_esm(options.seed), deno_broadcast_channel::deno_broadcast_channel::init_ops_and_esm( options.broadcast_channel.clone(), unstable, ), deno_ffi::deno_ffi::init_ops_and_esm::<PermissionsContainer>(unstable), deno_net::deno_net::init_ops_and_esm::<PermissionsContainer>( options.root_cert_store_provider.clone(), unstable, options.unsafely_ignore_certificate_errors.clone(), ), deno_tls::deno_tls::init_ops_and_esm(), deno_kv::deno_kv::init_ops_and_esm( MultiBackendDbHandler::remote_or_sqlite::<PermissionsContainer>( options.origin_storage_dir.clone(), ), unstable, ), deno_napi::deno_napi::init_ops_and_esm::<PermissionsContainer>(), deno_http::deno_http::init_ops_and_esm::<DefaultHttpPropertyExtractor>(), deno_io::deno_io::init_ops_and_esm(Some(options.stdio)), deno_fs::deno_fs::init_ops_and_esm::<PermissionsContainer>( unstable, options.fs.clone(), ), deno_node::deno_node::init_ops_and_esm::<PermissionsContainer>( options.npm_resolver, options.fs, ), // Ops from this crate ops::runtime::deno_runtime::init_ops_and_esm(main_module.clone()), ops::worker_host::deno_worker_host::init_ops_and_esm( options.create_web_worker_cb.clone(), options.format_js_error_fn.clone(), ), ops::fs_events::deno_fs_events::init_ops_and_esm(), ops::os::deno_os::init_ops_and_esm(exit_code.clone()), ops::permissions::deno_permissions::init_ops_and_esm(), ops::process::deno_process::init_ops_and_esm(), ops::signal::deno_signal::init_ops_and_esm(), ops::tty::deno_tty::init_ops_and_esm(), ops::http::deno_http_runtime::init_ops_and_esm(), deno_permissions_worker::init_ops_and_esm( permissions, unstable, enable_testing_features, ), runtime::init_ops_and_esm(), ]; for extension in &mut extensions { #[cfg(not(feature = "__runtime_js_sources"))] { extension.js_files = std::borrow::Cow::Borrowed(&[]); extension.esm_files = std::borrow::Cow::Borrowed(&[]); extension.esm_entry_point = None; } #[cfg(feature = "__runtime_js_sources")] { use crate::shared::maybe_transpile_source; for source in extension.esm_files.to_mut() { maybe_transpile_source(source).unwrap(); } for source in extension.js_files.to_mut() { maybe_transpile_source(source).unwrap(); } } } extensions.extend(std::mem::take(&mut options.extensions)); #[cfg(all(feature = "include_js_files_for_snapshotting", feature = "dont_create_runtime_snapshot", not(feature = "__runtime_js_sources")))] options.startup_snapshot.as_ref().expect("Sources are not embedded, snapshotting was disabled and a user snapshot was not provided."); // Clear extension modules from the module map, except preserve `node:*` // modules. let preserve_snapshotted_modules = Some(SUPPORTED_BUILTIN_NODE_MODULES_WITH_PREFIX); let mut js_runtime = JsRuntime::new(RuntimeOptions { module_loader: Some(options.module_loader.clone()), startup_snapshot: options .startup_snapshot .or_else(crate::js::deno_isolate_init), create_params: options.create_params, source_map_getter: options.source_map_getter, get_error_class_fn: options.get_error_class_fn, shared_array_buffer_store: options.shared_array_buffer_store.clone(), compiled_wasm_module_store: options.compiled_wasm_module_store.clone(), extensions, preserve_snapshotted_modules, inspector: options.maybe_inspector_server.is_some(), is_main: true, ..Default::default() }); if let Some(server) = options.maybe_inspector_server.clone() { server.register_inspector( main_module.to_string(), &mut js_runtime, options.should_break_on_first_statement || options.should_wait_for_inspector_session, ); // Put inspector handle into the op state so we can put a breakpoint when // executing a CJS entrypoint. let op_state = js_runtime.op_state(); let inspector = js_runtime.inspector(); op_state.borrow_mut().put(inspector); } let bootstrap_fn_global = { let context = js_runtime.main_context(); let scope = &mut js_runtime.handle_scope(); let context_local = v8::Local::new(scope, context); let global_obj = context_local.global(scope); let bootstrap_str = v8::String::new_external_onebyte_static(scope, b"bootstrap").unwrap(); let bootstrap_ns: v8::Local<v8::Object> = global_obj .get(scope, bootstrap_str.into()) .unwrap() .try_into() .unwrap(); let main_runtime_str = v8::String::new_external_onebyte_static(scope, b"mainRuntime").unwrap(); let bootstrap_fn = bootstrap_ns.get(scope, main_runtime_str.into()).unwrap(); let bootstrap_fn = v8::Local::<v8::Function>::try_from(bootstrap_fn).unwrap(); v8::Global::new(scope, bootstrap_fn) }; Self { js_runtime, should_break_on_first_statement: options.should_break_on_first_statement, should_wait_for_inspector_session: options .should_wait_for_inspector_session, exit_code, bootstrap_fn_global: Some(bootstrap_fn_global), } } pub fn bootstrap(&mut self, options: &BootstrapOptions) { let scope = &mut self.js_runtime.handle_scope(); let args = options.as_v8(scope); let bootstrap_fn = self.bootstrap_fn_global.take().unwrap(); let bootstrap_fn = v8::Local::new(scope, bootstrap_fn); let undefined = v8::undefined(scope); bootstrap_fn.call(scope, undefined.into(), &[args]).unwrap(); } /// See [JsRuntime::execute_script](deno_core::JsRuntime::execute_script) pub fn execute_script( &mut self, script_name: &'static str, source_code: ModuleCode, ) -> Result<v8::Global<v8::Value>, AnyError> { self.js_runtime.execute_script(script_name, source_code) } /// Loads and instantiates specified JavaScript module as "main" module. pub async fn preload_main_module( &mut self, module_specifier: &ModuleSpecifier, ) -> Result<ModuleId, AnyError> { self .js_runtime .load_main_module(module_specifier, None) .await } /// Loads and instantiates specified JavaScript module as "side" module. pub async fn preload_side_module( &mut self, module_specifier: &ModuleSpecifier, ) -> Result<ModuleId, AnyError> { self .js_runtime .load_side_module(module_specifier, None) .await } /// Executes specified JavaScript module. pub async fn evaluate_module( &mut self, id: ModuleId, ) -> Result<(), AnyError> { self.wait_for_inspector_session(); let mut receiver = self.js_runtime.mod_evaluate(id); tokio::select! { // Not using biased mode leads to non-determinism for relatively simple // programs. biased; maybe_result = &mut receiver => { debug!("received module evaluate {:#?}", maybe_result); maybe_result.expect("Module evaluation result not provided.") } event_loop_result = self.run_event_loop(false) => { event_loop_result?; let maybe_result = receiver.await; maybe_result.expect("Module evaluation result not provided.") } } } /// Loads, instantiates and executes specified JavaScript module. pub async fn execute_side_module( &mut self, module_specifier: &ModuleSpecifier, ) -> Result<(), AnyError> { let id = self.preload_side_module(module_specifier).await?; self.evaluate_module(id).await } /// Loads, instantiates and executes specified JavaScript module. /// /// This module will have "import.meta.main" equal to true. pub async fn execute_main_module( &mut self, module_specifier: &ModuleSpecifier, ) -> Result<(), AnyError> { let id = self.preload_main_module(module_specifier).await?; self.evaluate_module(id).await } fn wait_for_inspector_session(&mut self) { if self.should_break_on_first_statement { self .js_runtime .inspector() .borrow_mut() .wait_for_session_and_break_on_next_statement(); } else if self.should_wait_for_inspector_session { self.js_runtime.inspector().borrow_mut().wait_for_session(); } } /// Create new inspector session. This function panics if Worker /// was not configured to create inspector. pub async fn create_inspector_session(&mut self) -> LocalInspectorSession { self.js_runtime.maybe_init_inspector(); self.js_runtime.inspector().borrow().create_local_session() } pub fn poll_event_loop( &mut self, cx: &mut Context, wait_for_inspector: bool, ) -> Poll<Result<(), AnyError>> { self.js_runtime.poll_event_loop(cx, wait_for_inspector) } pub async fn run_event_loop( &mut self, wait_for_inspector: bool, ) -> Result<(), AnyError> { self.js_runtime.run_event_loop(wait_for_inspector).await } /// A utility function that runs provided future concurrently with the event loop. /// /// Useful when using a local inspector session. pub async fn with_event_loop<'a, T>( &mut self, mut fut: Pin<Box<dyn Future<Output = T> + 'a>>, ) -> T { loop { tokio::select! { biased; result = &mut fut => { return result; } _ = self.run_event_loop(false) => {} }; } } /// Return exit code set by the executed code (either in main worker /// or one of child web workers). pub fn exit_code(&self) -> i32 { self.exit_code.get() } /// Dispatches "load" event to the JavaScript runtime. /// /// Does not poll event loop, and thus not await any of the "load" event handlers. pub fn dispatch_load_event( &mut self, script_name: &'static str, ) -> Result<(), AnyError> { self.js_runtime.execute_script( script_name, // NOTE(@bartlomieju): not using `globalThis` here, because user might delete // it. Instead we're using global `dispatchEvent` function which will // used a saved reference to global scope. ascii_str!("dispatchEvent(new Event('load'))"), )?; Ok(()) } /// Dispatches "unload" event to the JavaScript runtime. /// /// Does not poll event loop, and thus not await any of the "unload" event handlers. pub fn dispatch_unload_event( &mut self, script_name: &'static str, ) -> Result<(), AnyError> { self.js_runtime.execute_script( script_name, // NOTE(@bartlomieju): not using `globalThis` here, because user might delete // it. Instead we're using global `dispatchEvent` function which will // used a saved reference to global scope. ascii_str!("dispatchEvent(new Event('unload'))"), )?; Ok(()) } /// Dispatches "beforeunload" event to the JavaScript runtime. Returns a boolean /// indicating if the event was prevented and thus event loop should continue /// running. pub fn dispatch_beforeunload_event( &mut self, script_name: &'static str, ) -> Result<bool, AnyError> { let value = self.js_runtime.execute_script( script_name, // NOTE(@bartlomieju): not using `globalThis` here, because user might delete // it. Instead we're using global `dispatchEvent` function which will // used a saved reference to global scope. ascii_str!( "dispatchEvent(new Event('beforeunload', { cancelable: true }));" ), )?; let local_value = value.open(&mut self.js_runtime.handle_scope()); Ok(local_value.is_false()) } }
{ Self { create_web_worker_cb: Arc::new(|_| { unimplemented!("web workers are not supported") }), fs: Arc::new(deno_fs::RealFs), module_loader: Rc::new(FsModuleLoader), seed: None, unsafely_ignore_certificate_errors: Default::default(), should_break_on_first_statement: Default::default(), should_wait_for_inspector_session: Default::default(), compiled_wasm_module_store: Default::default(), shared_array_buffer_store: Default::default(), maybe_inspector_server: Default::default(), format_js_error_fn: Default::default(), get_error_class_fn: Default::default(), origin_storage_dir: Default::default(), cache_storage_dir: Default::default(), broadcast_channel: Default::default(), source_map_getter: Default::default(), root_cert_store_provider: Default::default(), npm_resolver: Default::default(), blob_store: Default::default(), extensions: Default::default(), startup_snapshot: Default::default(), create_params: Default::default(), bootstrap: Default::default(), stdio: Default::default(), } }
identifier_body
worker.rs
// Copyright 2018-2023 the Deno authors. All rights reserved. MIT license. use std::pin::Pin; use std::rc::Rc; use std::sync::atomic::AtomicI32; use std::sync::atomic::Ordering::Relaxed; use std::sync::Arc; use std::task::Context; use std::task::Poll; use deno_broadcast_channel::InMemoryBroadcastChannel; use deno_cache::CreateCache; use deno_cache::SqliteBackedCache; use deno_core::ascii_str; use deno_core::error::AnyError; use deno_core::error::JsError; use deno_core::futures::Future; use deno_core::v8; use deno_core::CompiledWasmModuleStore; use deno_core::Extension; use deno_core::FsModuleLoader; use deno_core::GetErrorClassFn; use deno_core::JsRuntime; use deno_core::LocalInspectorSession; use deno_core::ModuleCode; use deno_core::ModuleId; use deno_core::ModuleLoader; use deno_core::ModuleSpecifier; use deno_core::RuntimeOptions; use deno_core::SharedArrayBufferStore; use deno_core::Snapshot; use deno_core::SourceMapGetter; use deno_fs::FileSystem; use deno_http::DefaultHttpPropertyExtractor; use deno_io::Stdio; use deno_kv::dynamic::MultiBackendDbHandler; use deno_node::SUPPORTED_BUILTIN_NODE_MODULES_WITH_PREFIX; use deno_tls::RootCertStoreProvider; use deno_web::BlobStore; use log::debug; use crate::inspector_server::InspectorServer; use crate::ops; use crate::permissions::PermissionsContainer; use crate::shared::runtime; use crate::BootstrapOptions; pub type FormatJsErrorFn = dyn Fn(&JsError) -> String + Sync + Send; #[derive(Clone, Default)] pub struct ExitCode(Arc<AtomicI32>); impl ExitCode { pub fn get(&self) -> i32 { self.0.load(Relaxed) } pub fn set(&mut self, code: i32) { self.0.store(code, Relaxed); } } /// This worker is created and used by almost all /// subcommands in Deno executable. /// /// It provides ops available in the `Deno` namespace. /// /// All `WebWorker`s created during program execution /// are descendants of this worker. pub struct MainWorker { pub js_runtime: JsRuntime, should_break_on_first_statement: bool, should_wait_for_inspector_session: bool, exit_code: ExitCode, bootstrap_fn_global: Option<v8::Global<v8::Function>>, } pub struct WorkerOptions { pub bootstrap: BootstrapOptions, /// JsRuntime extensions, not to be confused with ES modules. /// /// Extensions register "ops" and JavaScript sources provided in `js` or `esm` /// configuration. If you are using a snapshot, then extensions shouldn't /// provide JavaScript sources that were already snapshotted. pub extensions: Vec<Extension>, /// V8 snapshot that should be loaded on startup. pub startup_snapshot: Option<Snapshot>, /// Optional isolate creation parameters, such as heap limits. pub create_params: Option<v8::CreateParams>, pub unsafely_ignore_certificate_errors: Option<Vec<String>>, pub root_cert_store_provider: Option<Arc<dyn RootCertStoreProvider>>, pub seed: Option<u64>, pub fs: Arc<dyn FileSystem>, /// Implementation of `ModuleLoader` which will be /// called when V8 requests to load ES modules. /// /// If not provided runtime will error if code being /// executed tries to load modules. pub module_loader: Rc<dyn ModuleLoader>, pub npm_resolver: Option<Arc<dyn deno_node::NpmResolver>>, // Callbacks invoked when creating new instance of WebWorker pub create_web_worker_cb: Arc<ops::worker_host::CreateWebWorkerCb>, pub format_js_error_fn: Option<Arc<FormatJsErrorFn>>, /// Source map reference for errors. pub source_map_getter: Option<Box<dyn SourceMapGetter>>, pub maybe_inspector_server: Option<Arc<InspectorServer>>, // If true, the worker will wait for inspector session and break on first // statement of user code. Takes higher precedence than // `should_wait_for_inspector_session`. pub should_break_on_first_statement: bool, // If true, the worker will wait for inspector session before executing // user code. pub should_wait_for_inspector_session: bool, /// Allows to map error type to a string "class" used to represent /// error in JavaScript. pub get_error_class_fn: Option<GetErrorClassFn>, pub cache_storage_dir: Option<std::path::PathBuf>, pub origin_storage_dir: Option<std::path::PathBuf>, pub blob_store: Arc<BlobStore>, pub broadcast_channel: InMemoryBroadcastChannel, /// The store to use for transferring SharedArrayBuffers between isolates. /// If multiple isolates should have the possibility of sharing /// SharedArrayBuffers, they should use the same [SharedArrayBufferStore]. If /// no [SharedArrayBufferStore] is specified, SharedArrayBuffer can not be /// serialized. pub shared_array_buffer_store: Option<SharedArrayBufferStore>, /// The store to use for transferring `WebAssembly.Module` objects between /// isolates. /// If multiple isolates should have the possibility of sharing /// `WebAssembly.Module` objects, they should use the same /// [CompiledWasmModuleStore]. If no [CompiledWasmModuleStore] is specified, /// `WebAssembly.Module` objects cannot be serialized. pub compiled_wasm_module_store: Option<CompiledWasmModuleStore>, pub stdio: Stdio, } impl Default for WorkerOptions { fn default() -> Self { Self { create_web_worker_cb: Arc::new(|_| { unimplemented!("web workers are not supported") }), fs: Arc::new(deno_fs::RealFs), module_loader: Rc::new(FsModuleLoader), seed: None, unsafely_ignore_certificate_errors: Default::default(), should_break_on_first_statement: Default::default(), should_wait_for_inspector_session: Default::default(), compiled_wasm_module_store: Default::default(), shared_array_buffer_store: Default::default(), maybe_inspector_server: Default::default(), format_js_error_fn: Default::default(), get_error_class_fn: Default::default(), origin_storage_dir: Default::default(), cache_storage_dir: Default::default(), broadcast_channel: Default::default(), source_map_getter: Default::default(), root_cert_store_provider: Default::default(), npm_resolver: Default::default(), blob_store: Default::default(), extensions: Default::default(), startup_snapshot: Default::default(), create_params: Default::default(), bootstrap: Default::default(), stdio: Default::default(), } } } impl MainWorker { pub fn bootstrap_from_options( main_module: ModuleSpecifier, permissions: PermissionsContainer, options: WorkerOptions, ) -> Self { let bootstrap_options = options.bootstrap.clone(); let mut worker = Self::from_options(main_module, permissions, options); worker.bootstrap(&bootstrap_options); worker } pub fn from_options( main_module: ModuleSpecifier, permissions: PermissionsContainer, mut options: WorkerOptions, ) -> Self { deno_core::extension!(deno_permissions_worker, options = { permissions: PermissionsContainer, unstable: bool, enable_testing_features: bool, }, state = |state, options| { state.put::<PermissionsContainer>(options.permissions); state.put(ops::UnstableChecker { unstable: options.unstable }); state.put(ops::TestingFeaturesEnabled(options.enable_testing_features)); }, ); // Permissions: many ops depend on this let unstable = options.bootstrap.unstable; let enable_testing_features = options.bootstrap.enable_testing_features; let exit_code = ExitCode(Arc::new(AtomicI32::new(0))); let create_cache = options.cache_storage_dir.map(|storage_dir| { let create_cache_fn = move || SqliteBackedCache::new(storage_dir.clone()); CreateCache(Arc::new(create_cache_fn)) }); // NOTE(bartlomieju): ordering is important here, keep it in sync with // `runtime/build.rs`, `runtime/web_worker.rs` and `cli/build.rs`! let mut extensions = vec![ // Web APIs deno_webidl::deno_webidl::init_ops_and_esm(), deno_console::deno_console::init_ops_and_esm(), deno_url::deno_url::init_ops_and_esm(), deno_web::deno_web::init_ops_and_esm::<PermissionsContainer>( options.blob_store.clone(), options.bootstrap.location.clone(), ), deno_fetch::deno_fetch::init_ops_and_esm::<PermissionsContainer>( deno_fetch::Options { user_agent: options.bootstrap.user_agent.clone(), root_cert_store_provider: options.root_cert_store_provider.clone(), unsafely_ignore_certificate_errors: options .unsafely_ignore_certificate_errors .clone(), file_fetch_handler: Rc::new(deno_fetch::FsFetchHandler), ..Default::default() }, ), deno_cache::deno_cache::init_ops_and_esm::<SqliteBackedCache>( create_cache, ), deno_websocket::deno_websocket::init_ops_and_esm::<PermissionsContainer>( options.bootstrap.user_agent.clone(), options.root_cert_store_provider.clone(), options.unsafely_ignore_certificate_errors.clone(), ), deno_webstorage::deno_webstorage::init_ops_and_esm( options.origin_storage_dir.clone(), ), deno_crypto::deno_crypto::init_ops_and_esm(options.seed), deno_broadcast_channel::deno_broadcast_channel::init_ops_and_esm( options.broadcast_channel.clone(), unstable, ), deno_ffi::deno_ffi::init_ops_and_esm::<PermissionsContainer>(unstable), deno_net::deno_net::init_ops_and_esm::<PermissionsContainer>( options.root_cert_store_provider.clone(), unstable, options.unsafely_ignore_certificate_errors.clone(), ), deno_tls::deno_tls::init_ops_and_esm(), deno_kv::deno_kv::init_ops_and_esm( MultiBackendDbHandler::remote_or_sqlite::<PermissionsContainer>( options.origin_storage_dir.clone(), ), unstable, ), deno_napi::deno_napi::init_ops_and_esm::<PermissionsContainer>(), deno_http::deno_http::init_ops_and_esm::<DefaultHttpPropertyExtractor>(), deno_io::deno_io::init_ops_and_esm(Some(options.stdio)), deno_fs::deno_fs::init_ops_and_esm::<PermissionsContainer>( unstable, options.fs.clone(), ), deno_node::deno_node::init_ops_and_esm::<PermissionsContainer>( options.npm_resolver, options.fs, ), // Ops from this crate ops::runtime::deno_runtime::init_ops_and_esm(main_module.clone()), ops::worker_host::deno_worker_host::init_ops_and_esm( options.create_web_worker_cb.clone(), options.format_js_error_fn.clone(), ), ops::fs_events::deno_fs_events::init_ops_and_esm(), ops::os::deno_os::init_ops_and_esm(exit_code.clone()), ops::permissions::deno_permissions::init_ops_and_esm(), ops::process::deno_process::init_ops_and_esm(), ops::signal::deno_signal::init_ops_and_esm(), ops::tty::deno_tty::init_ops_and_esm(), ops::http::deno_http_runtime::init_ops_and_esm(), deno_permissions_worker::init_ops_and_esm( permissions, unstable, enable_testing_features, ), runtime::init_ops_and_esm(), ]; for extension in &mut extensions { #[cfg(not(feature = "__runtime_js_sources"))] { extension.js_files = std::borrow::Cow::Borrowed(&[]); extension.esm_files = std::borrow::Cow::Borrowed(&[]); extension.esm_entry_point = None; } #[cfg(feature = "__runtime_js_sources")] { use crate::shared::maybe_transpile_source; for source in extension.esm_files.to_mut() { maybe_transpile_source(source).unwrap(); } for source in extension.js_files.to_mut() { maybe_transpile_source(source).unwrap(); } } } extensions.extend(std::mem::take(&mut options.extensions)); #[cfg(all(feature = "include_js_files_for_snapshotting", feature = "dont_create_runtime_snapshot", not(feature = "__runtime_js_sources")))] options.startup_snapshot.as_ref().expect("Sources are not embedded, snapshotting was disabled and a user snapshot was not provided."); // Clear extension modules from the module map, except preserve `node:*` // modules. let preserve_snapshotted_modules = Some(SUPPORTED_BUILTIN_NODE_MODULES_WITH_PREFIX); let mut js_runtime = JsRuntime::new(RuntimeOptions { module_loader: Some(options.module_loader.clone()), startup_snapshot: options .startup_snapshot .or_else(crate::js::deno_isolate_init), create_params: options.create_params, source_map_getter: options.source_map_getter, get_error_class_fn: options.get_error_class_fn, shared_array_buffer_store: options.shared_array_buffer_store.clone(), compiled_wasm_module_store: options.compiled_wasm_module_store.clone(), extensions, preserve_snapshotted_modules, inspector: options.maybe_inspector_server.is_some(), is_main: true, ..Default::default() }); if let Some(server) = options.maybe_inspector_server.clone()
let bootstrap_fn_global = { let context = js_runtime.main_context(); let scope = &mut js_runtime.handle_scope(); let context_local = v8::Local::new(scope, context); let global_obj = context_local.global(scope); let bootstrap_str = v8::String::new_external_onebyte_static(scope, b"bootstrap").unwrap(); let bootstrap_ns: v8::Local<v8::Object> = global_obj .get(scope, bootstrap_str.into()) .unwrap() .try_into() .unwrap(); let main_runtime_str = v8::String::new_external_onebyte_static(scope, b"mainRuntime").unwrap(); let bootstrap_fn = bootstrap_ns.get(scope, main_runtime_str.into()).unwrap(); let bootstrap_fn = v8::Local::<v8::Function>::try_from(bootstrap_fn).unwrap(); v8::Global::new(scope, bootstrap_fn) }; Self { js_runtime, should_break_on_first_statement: options.should_break_on_first_statement, should_wait_for_inspector_session: options .should_wait_for_inspector_session, exit_code, bootstrap_fn_global: Some(bootstrap_fn_global), } } pub fn bootstrap(&mut self, options: &BootstrapOptions) { let scope = &mut self.js_runtime.handle_scope(); let args = options.as_v8(scope); let bootstrap_fn = self.bootstrap_fn_global.take().unwrap(); let bootstrap_fn = v8::Local::new(scope, bootstrap_fn); let undefined = v8::undefined(scope); bootstrap_fn.call(scope, undefined.into(), &[args]).unwrap(); } /// See [JsRuntime::execute_script](deno_core::JsRuntime::execute_script) pub fn execute_script( &mut self, script_name: &'static str, source_code: ModuleCode, ) -> Result<v8::Global<v8::Value>, AnyError> { self.js_runtime.execute_script(script_name, source_code) } /// Loads and instantiates specified JavaScript module as "main" module. pub async fn preload_main_module( &mut self, module_specifier: &ModuleSpecifier, ) -> Result<ModuleId, AnyError> { self .js_runtime .load_main_module(module_specifier, None) .await } /// Loads and instantiates specified JavaScript module as "side" module. pub async fn preload_side_module( &mut self, module_specifier: &ModuleSpecifier, ) -> Result<ModuleId, AnyError> { self .js_runtime .load_side_module(module_specifier, None) .await } /// Executes specified JavaScript module. pub async fn evaluate_module( &mut self, id: ModuleId, ) -> Result<(), AnyError> { self.wait_for_inspector_session(); let mut receiver = self.js_runtime.mod_evaluate(id); tokio::select! { // Not using biased mode leads to non-determinism for relatively simple // programs. biased; maybe_result = &mut receiver => { debug!("received module evaluate {:#?}", maybe_result); maybe_result.expect("Module evaluation result not provided.") } event_loop_result = self.run_event_loop(false) => { event_loop_result?; let maybe_result = receiver.await; maybe_result.expect("Module evaluation result not provided.") } } } /// Loads, instantiates and executes specified JavaScript module. pub async fn execute_side_module( &mut self, module_specifier: &ModuleSpecifier, ) -> Result<(), AnyError> { let id = self.preload_side_module(module_specifier).await?; self.evaluate_module(id).await } /// Loads, instantiates and executes specified JavaScript module. /// /// This module will have "import.meta.main" equal to true. pub async fn execute_main_module( &mut self, module_specifier: &ModuleSpecifier, ) -> Result<(), AnyError> { let id = self.preload_main_module(module_specifier).await?; self.evaluate_module(id).await } fn wait_for_inspector_session(&mut self) { if self.should_break_on_first_statement { self .js_runtime .inspector() .borrow_mut() .wait_for_session_and_break_on_next_statement(); } else if self.should_wait_for_inspector_session { self.js_runtime.inspector().borrow_mut().wait_for_session(); } } /// Create new inspector session. This function panics if Worker /// was not configured to create inspector. pub async fn create_inspector_session(&mut self) -> LocalInspectorSession { self.js_runtime.maybe_init_inspector(); self.js_runtime.inspector().borrow().create_local_session() } pub fn poll_event_loop( &mut self, cx: &mut Context, wait_for_inspector: bool, ) -> Poll<Result<(), AnyError>> { self.js_runtime.poll_event_loop(cx, wait_for_inspector) } pub async fn run_event_loop( &mut self, wait_for_inspector: bool, ) -> Result<(), AnyError> { self.js_runtime.run_event_loop(wait_for_inspector).await } /// A utility function that runs provided future concurrently with the event loop. /// /// Useful when using a local inspector session. pub async fn with_event_loop<'a, T>( &mut self, mut fut: Pin<Box<dyn Future<Output = T> + 'a>>, ) -> T { loop { tokio::select! { biased; result = &mut fut => { return result; } _ = self.run_event_loop(false) => {} }; } } /// Return exit code set by the executed code (either in main worker /// or one of child web workers). pub fn exit_code(&self) -> i32 { self.exit_code.get() } /// Dispatches "load" event to the JavaScript runtime. /// /// Does not poll event loop, and thus not await any of the "load" event handlers. pub fn dispatch_load_event( &mut self, script_name: &'static str, ) -> Result<(), AnyError> { self.js_runtime.execute_script( script_name, // NOTE(@bartlomieju): not using `globalThis` here, because user might delete // it. Instead we're using global `dispatchEvent` function which will // used a saved reference to global scope. ascii_str!("dispatchEvent(new Event('load'))"), )?; Ok(()) } /// Dispatches "unload" event to the JavaScript runtime. /// /// Does not poll event loop, and thus not await any of the "unload" event handlers. pub fn dispatch_unload_event( &mut self, script_name: &'static str, ) -> Result<(), AnyError> { self.js_runtime.execute_script( script_name, // NOTE(@bartlomieju): not using `globalThis` here, because user might delete // it. Instead we're using global `dispatchEvent` function which will // used a saved reference to global scope. ascii_str!("dispatchEvent(new Event('unload'))"), )?; Ok(()) } /// Dispatches "beforeunload" event to the JavaScript runtime. Returns a boolean /// indicating if the event was prevented and thus event loop should continue /// running. pub fn dispatch_beforeunload_event( &mut self, script_name: &'static str, ) -> Result<bool, AnyError> { let value = self.js_runtime.execute_script( script_name, // NOTE(@bartlomieju): not using `globalThis` here, because user might delete // it. Instead we're using global `dispatchEvent` function which will // used a saved reference to global scope. ascii_str!( "dispatchEvent(new Event('beforeunload', { cancelable: true }));" ), )?; let local_value = value.open(&mut self.js_runtime.handle_scope()); Ok(local_value.is_false()) } }
{ server.register_inspector( main_module.to_string(), &mut js_runtime, options.should_break_on_first_statement || options.should_wait_for_inspector_session, ); // Put inspector handle into the op state so we can put a breakpoint when // executing a CJS entrypoint. let op_state = js_runtime.op_state(); let inspector = js_runtime.inspector(); op_state.borrow_mut().put(inspector); }
conditional_block
worker.rs
// Copyright 2018-2023 the Deno authors. All rights reserved. MIT license. use std::pin::Pin; use std::rc::Rc; use std::sync::atomic::AtomicI32; use std::sync::atomic::Ordering::Relaxed; use std::sync::Arc; use std::task::Context; use std::task::Poll; use deno_broadcast_channel::InMemoryBroadcastChannel; use deno_cache::CreateCache; use deno_cache::SqliteBackedCache; use deno_core::ascii_str; use deno_core::error::AnyError; use deno_core::error::JsError; use deno_core::futures::Future; use deno_core::v8; use deno_core::CompiledWasmModuleStore; use deno_core::Extension; use deno_core::FsModuleLoader; use deno_core::GetErrorClassFn; use deno_core::JsRuntime; use deno_core::LocalInspectorSession; use deno_core::ModuleCode; use deno_core::ModuleId; use deno_core::ModuleLoader; use deno_core::ModuleSpecifier; use deno_core::RuntimeOptions; use deno_core::SharedArrayBufferStore; use deno_core::Snapshot; use deno_core::SourceMapGetter; use deno_fs::FileSystem; use deno_http::DefaultHttpPropertyExtractor; use deno_io::Stdio; use deno_kv::dynamic::MultiBackendDbHandler; use deno_node::SUPPORTED_BUILTIN_NODE_MODULES_WITH_PREFIX; use deno_tls::RootCertStoreProvider; use deno_web::BlobStore; use log::debug; use crate::inspector_server::InspectorServer; use crate::ops; use crate::permissions::PermissionsContainer; use crate::shared::runtime; use crate::BootstrapOptions; pub type FormatJsErrorFn = dyn Fn(&JsError) -> String + Sync + Send; #[derive(Clone, Default)] pub struct ExitCode(Arc<AtomicI32>); impl ExitCode { pub fn get(&self) -> i32 { self.0.load(Relaxed) } pub fn set(&mut self, code: i32) { self.0.store(code, Relaxed); } } /// This worker is created and used by almost all /// subcommands in Deno executable. /// /// It provides ops available in the `Deno` namespace. /// /// All `WebWorker`s created during program execution /// are descendants of this worker. pub struct MainWorker { pub js_runtime: JsRuntime, should_break_on_first_statement: bool, should_wait_for_inspector_session: bool, exit_code: ExitCode, bootstrap_fn_global: Option<v8::Global<v8::Function>>, } pub struct WorkerOptions { pub bootstrap: BootstrapOptions, /// JsRuntime extensions, not to be confused with ES modules. /// /// Extensions register "ops" and JavaScript sources provided in `js` or `esm` /// configuration. If you are using a snapshot, then extensions shouldn't /// provide JavaScript sources that were already snapshotted. pub extensions: Vec<Extension>, /// V8 snapshot that should be loaded on startup. pub startup_snapshot: Option<Snapshot>, /// Optional isolate creation parameters, such as heap limits. pub create_params: Option<v8::CreateParams>, pub unsafely_ignore_certificate_errors: Option<Vec<String>>, pub root_cert_store_provider: Option<Arc<dyn RootCertStoreProvider>>, pub seed: Option<u64>, pub fs: Arc<dyn FileSystem>, /// Implementation of `ModuleLoader` which will be /// called when V8 requests to load ES modules. /// /// If not provided runtime will error if code being /// executed tries to load modules. pub module_loader: Rc<dyn ModuleLoader>, pub npm_resolver: Option<Arc<dyn deno_node::NpmResolver>>, // Callbacks invoked when creating new instance of WebWorker pub create_web_worker_cb: Arc<ops::worker_host::CreateWebWorkerCb>, pub format_js_error_fn: Option<Arc<FormatJsErrorFn>>, /// Source map reference for errors. pub source_map_getter: Option<Box<dyn SourceMapGetter>>, pub maybe_inspector_server: Option<Arc<InspectorServer>>, // If true, the worker will wait for inspector session and break on first // statement of user code. Takes higher precedence than // `should_wait_for_inspector_session`. pub should_break_on_first_statement: bool, // If true, the worker will wait for inspector session before executing // user code. pub should_wait_for_inspector_session: bool, /// Allows to map error type to a string "class" used to represent /// error in JavaScript. pub get_error_class_fn: Option<GetErrorClassFn>, pub cache_storage_dir: Option<std::path::PathBuf>, pub origin_storage_dir: Option<std::path::PathBuf>, pub blob_store: Arc<BlobStore>, pub broadcast_channel: InMemoryBroadcastChannel, /// The store to use for transferring SharedArrayBuffers between isolates. /// If multiple isolates should have the possibility of sharing /// SharedArrayBuffers, they should use the same [SharedArrayBufferStore]. If /// no [SharedArrayBufferStore] is specified, SharedArrayBuffer can not be /// serialized. pub shared_array_buffer_store: Option<SharedArrayBufferStore>, /// The store to use for transferring `WebAssembly.Module` objects between /// isolates. /// If multiple isolates should have the possibility of sharing /// `WebAssembly.Module` objects, they should use the same /// [CompiledWasmModuleStore]. If no [CompiledWasmModuleStore] is specified, /// `WebAssembly.Module` objects cannot be serialized. pub compiled_wasm_module_store: Option<CompiledWasmModuleStore>, pub stdio: Stdio, } impl Default for WorkerOptions { fn default() -> Self { Self { create_web_worker_cb: Arc::new(|_| { unimplemented!("web workers are not supported") }), fs: Arc::new(deno_fs::RealFs), module_loader: Rc::new(FsModuleLoader), seed: None, unsafely_ignore_certificate_errors: Default::default(), should_break_on_first_statement: Default::default(), should_wait_for_inspector_session: Default::default(), compiled_wasm_module_store: Default::default(), shared_array_buffer_store: Default::default(), maybe_inspector_server: Default::default(), format_js_error_fn: Default::default(), get_error_class_fn: Default::default(), origin_storage_dir: Default::default(), cache_storage_dir: Default::default(), broadcast_channel: Default::default(), source_map_getter: Default::default(), root_cert_store_provider: Default::default(), npm_resolver: Default::default(), blob_store: Default::default(), extensions: Default::default(), startup_snapshot: Default::default(), create_params: Default::default(), bootstrap: Default::default(), stdio: Default::default(), } } } impl MainWorker { pub fn bootstrap_from_options( main_module: ModuleSpecifier, permissions: PermissionsContainer, options: WorkerOptions, ) -> Self { let bootstrap_options = options.bootstrap.clone(); let mut worker = Self::from_options(main_module, permissions, options); worker.bootstrap(&bootstrap_options); worker } pub fn from_options( main_module: ModuleSpecifier, permissions: PermissionsContainer, mut options: WorkerOptions, ) -> Self { deno_core::extension!(deno_permissions_worker, options = {
state = |state, options| { state.put::<PermissionsContainer>(options.permissions); state.put(ops::UnstableChecker { unstable: options.unstable }); state.put(ops::TestingFeaturesEnabled(options.enable_testing_features)); }, ); // Permissions: many ops depend on this let unstable = options.bootstrap.unstable; let enable_testing_features = options.bootstrap.enable_testing_features; let exit_code = ExitCode(Arc::new(AtomicI32::new(0))); let create_cache = options.cache_storage_dir.map(|storage_dir| { let create_cache_fn = move || SqliteBackedCache::new(storage_dir.clone()); CreateCache(Arc::new(create_cache_fn)) }); // NOTE(bartlomieju): ordering is important here, keep it in sync with // `runtime/build.rs`, `runtime/web_worker.rs` and `cli/build.rs`! let mut extensions = vec![ // Web APIs deno_webidl::deno_webidl::init_ops_and_esm(), deno_console::deno_console::init_ops_and_esm(), deno_url::deno_url::init_ops_and_esm(), deno_web::deno_web::init_ops_and_esm::<PermissionsContainer>( options.blob_store.clone(), options.bootstrap.location.clone(), ), deno_fetch::deno_fetch::init_ops_and_esm::<PermissionsContainer>( deno_fetch::Options { user_agent: options.bootstrap.user_agent.clone(), root_cert_store_provider: options.root_cert_store_provider.clone(), unsafely_ignore_certificate_errors: options .unsafely_ignore_certificate_errors .clone(), file_fetch_handler: Rc::new(deno_fetch::FsFetchHandler), ..Default::default() }, ), deno_cache::deno_cache::init_ops_and_esm::<SqliteBackedCache>( create_cache, ), deno_websocket::deno_websocket::init_ops_and_esm::<PermissionsContainer>( options.bootstrap.user_agent.clone(), options.root_cert_store_provider.clone(), options.unsafely_ignore_certificate_errors.clone(), ), deno_webstorage::deno_webstorage::init_ops_and_esm( options.origin_storage_dir.clone(), ), deno_crypto::deno_crypto::init_ops_and_esm(options.seed), deno_broadcast_channel::deno_broadcast_channel::init_ops_and_esm( options.broadcast_channel.clone(), unstable, ), deno_ffi::deno_ffi::init_ops_and_esm::<PermissionsContainer>(unstable), deno_net::deno_net::init_ops_and_esm::<PermissionsContainer>( options.root_cert_store_provider.clone(), unstable, options.unsafely_ignore_certificate_errors.clone(), ), deno_tls::deno_tls::init_ops_and_esm(), deno_kv::deno_kv::init_ops_and_esm( MultiBackendDbHandler::remote_or_sqlite::<PermissionsContainer>( options.origin_storage_dir.clone(), ), unstable, ), deno_napi::deno_napi::init_ops_and_esm::<PermissionsContainer>(), deno_http::deno_http::init_ops_and_esm::<DefaultHttpPropertyExtractor>(), deno_io::deno_io::init_ops_and_esm(Some(options.stdio)), deno_fs::deno_fs::init_ops_and_esm::<PermissionsContainer>( unstable, options.fs.clone(), ), deno_node::deno_node::init_ops_and_esm::<PermissionsContainer>( options.npm_resolver, options.fs, ), // Ops from this crate ops::runtime::deno_runtime::init_ops_and_esm(main_module.clone()), ops::worker_host::deno_worker_host::init_ops_and_esm( options.create_web_worker_cb.clone(), options.format_js_error_fn.clone(), ), ops::fs_events::deno_fs_events::init_ops_and_esm(), ops::os::deno_os::init_ops_and_esm(exit_code.clone()), ops::permissions::deno_permissions::init_ops_and_esm(), ops::process::deno_process::init_ops_and_esm(), ops::signal::deno_signal::init_ops_and_esm(), ops::tty::deno_tty::init_ops_and_esm(), ops::http::deno_http_runtime::init_ops_and_esm(), deno_permissions_worker::init_ops_and_esm( permissions, unstable, enable_testing_features, ), runtime::init_ops_and_esm(), ]; for extension in &mut extensions { #[cfg(not(feature = "__runtime_js_sources"))] { extension.js_files = std::borrow::Cow::Borrowed(&[]); extension.esm_files = std::borrow::Cow::Borrowed(&[]); extension.esm_entry_point = None; } #[cfg(feature = "__runtime_js_sources")] { use crate::shared::maybe_transpile_source; for source in extension.esm_files.to_mut() { maybe_transpile_source(source).unwrap(); } for source in extension.js_files.to_mut() { maybe_transpile_source(source).unwrap(); } } } extensions.extend(std::mem::take(&mut options.extensions)); #[cfg(all(feature = "include_js_files_for_snapshotting", feature = "dont_create_runtime_snapshot", not(feature = "__runtime_js_sources")))] options.startup_snapshot.as_ref().expect("Sources are not embedded, snapshotting was disabled and a user snapshot was not provided."); // Clear extension modules from the module map, except preserve `node:*` // modules. let preserve_snapshotted_modules = Some(SUPPORTED_BUILTIN_NODE_MODULES_WITH_PREFIX); let mut js_runtime = JsRuntime::new(RuntimeOptions { module_loader: Some(options.module_loader.clone()), startup_snapshot: options .startup_snapshot .or_else(crate::js::deno_isolate_init), create_params: options.create_params, source_map_getter: options.source_map_getter, get_error_class_fn: options.get_error_class_fn, shared_array_buffer_store: options.shared_array_buffer_store.clone(), compiled_wasm_module_store: options.compiled_wasm_module_store.clone(), extensions, preserve_snapshotted_modules, inspector: options.maybe_inspector_server.is_some(), is_main: true, ..Default::default() }); if let Some(server) = options.maybe_inspector_server.clone() { server.register_inspector( main_module.to_string(), &mut js_runtime, options.should_break_on_first_statement || options.should_wait_for_inspector_session, ); // Put inspector handle into the op state so we can put a breakpoint when // executing a CJS entrypoint. let op_state = js_runtime.op_state(); let inspector = js_runtime.inspector(); op_state.borrow_mut().put(inspector); } let bootstrap_fn_global = { let context = js_runtime.main_context(); let scope = &mut js_runtime.handle_scope(); let context_local = v8::Local::new(scope, context); let global_obj = context_local.global(scope); let bootstrap_str = v8::String::new_external_onebyte_static(scope, b"bootstrap").unwrap(); let bootstrap_ns: v8::Local<v8::Object> = global_obj .get(scope, bootstrap_str.into()) .unwrap() .try_into() .unwrap(); let main_runtime_str = v8::String::new_external_onebyte_static(scope, b"mainRuntime").unwrap(); let bootstrap_fn = bootstrap_ns.get(scope, main_runtime_str.into()).unwrap(); let bootstrap_fn = v8::Local::<v8::Function>::try_from(bootstrap_fn).unwrap(); v8::Global::new(scope, bootstrap_fn) }; Self { js_runtime, should_break_on_first_statement: options.should_break_on_first_statement, should_wait_for_inspector_session: options .should_wait_for_inspector_session, exit_code, bootstrap_fn_global: Some(bootstrap_fn_global), } } pub fn bootstrap(&mut self, options: &BootstrapOptions) { let scope = &mut self.js_runtime.handle_scope(); let args = options.as_v8(scope); let bootstrap_fn = self.bootstrap_fn_global.take().unwrap(); let bootstrap_fn = v8::Local::new(scope, bootstrap_fn); let undefined = v8::undefined(scope); bootstrap_fn.call(scope, undefined.into(), &[args]).unwrap(); } /// See [JsRuntime::execute_script](deno_core::JsRuntime::execute_script) pub fn execute_script( &mut self, script_name: &'static str, source_code: ModuleCode, ) -> Result<v8::Global<v8::Value>, AnyError> { self.js_runtime.execute_script(script_name, source_code) } /// Loads and instantiates specified JavaScript module as "main" module. pub async fn preload_main_module( &mut self, module_specifier: &ModuleSpecifier, ) -> Result<ModuleId, AnyError> { self .js_runtime .load_main_module(module_specifier, None) .await } /// Loads and instantiates specified JavaScript module as "side" module. pub async fn preload_side_module( &mut self, module_specifier: &ModuleSpecifier, ) -> Result<ModuleId, AnyError> { self .js_runtime .load_side_module(module_specifier, None) .await } /// Executes specified JavaScript module. pub async fn evaluate_module( &mut self, id: ModuleId, ) -> Result<(), AnyError> { self.wait_for_inspector_session(); let mut receiver = self.js_runtime.mod_evaluate(id); tokio::select! { // Not using biased mode leads to non-determinism for relatively simple // programs. biased; maybe_result = &mut receiver => { debug!("received module evaluate {:#?}", maybe_result); maybe_result.expect("Module evaluation result not provided.") } event_loop_result = self.run_event_loop(false) => { event_loop_result?; let maybe_result = receiver.await; maybe_result.expect("Module evaluation result not provided.") } } } /// Loads, instantiates and executes specified JavaScript module. pub async fn execute_side_module( &mut self, module_specifier: &ModuleSpecifier, ) -> Result<(), AnyError> { let id = self.preload_side_module(module_specifier).await?; self.evaluate_module(id).await } /// Loads, instantiates and executes specified JavaScript module. /// /// This module will have "import.meta.main" equal to true. pub async fn execute_main_module( &mut self, module_specifier: &ModuleSpecifier, ) -> Result<(), AnyError> { let id = self.preload_main_module(module_specifier).await?; self.evaluate_module(id).await } fn wait_for_inspector_session(&mut self) { if self.should_break_on_first_statement { self .js_runtime .inspector() .borrow_mut() .wait_for_session_and_break_on_next_statement(); } else if self.should_wait_for_inspector_session { self.js_runtime.inspector().borrow_mut().wait_for_session(); } } /// Create new inspector session. This function panics if Worker /// was not configured to create inspector. pub async fn create_inspector_session(&mut self) -> LocalInspectorSession { self.js_runtime.maybe_init_inspector(); self.js_runtime.inspector().borrow().create_local_session() } pub fn poll_event_loop( &mut self, cx: &mut Context, wait_for_inspector: bool, ) -> Poll<Result<(), AnyError>> { self.js_runtime.poll_event_loop(cx, wait_for_inspector) } pub async fn run_event_loop( &mut self, wait_for_inspector: bool, ) -> Result<(), AnyError> { self.js_runtime.run_event_loop(wait_for_inspector).await } /// A utility function that runs provided future concurrently with the event loop. /// /// Useful when using a local inspector session. pub async fn with_event_loop<'a, T>( &mut self, mut fut: Pin<Box<dyn Future<Output = T> + 'a>>, ) -> T { loop { tokio::select! { biased; result = &mut fut => { return result; } _ = self.run_event_loop(false) => {} }; } } /// Return exit code set by the executed code (either in main worker /// or one of child web workers). pub fn exit_code(&self) -> i32 { self.exit_code.get() } /// Dispatches "load" event to the JavaScript runtime. /// /// Does not poll event loop, and thus not await any of the "load" event handlers. pub fn dispatch_load_event( &mut self, script_name: &'static str, ) -> Result<(), AnyError> { self.js_runtime.execute_script( script_name, // NOTE(@bartlomieju): not using `globalThis` here, because user might delete // it. Instead we're using global `dispatchEvent` function which will // used a saved reference to global scope. ascii_str!("dispatchEvent(new Event('load'))"), )?; Ok(()) } /// Dispatches "unload" event to the JavaScript runtime. /// /// Does not poll event loop, and thus not await any of the "unload" event handlers. pub fn dispatch_unload_event( &mut self, script_name: &'static str, ) -> Result<(), AnyError> { self.js_runtime.execute_script( script_name, // NOTE(@bartlomieju): not using `globalThis` here, because user might delete // it. Instead we're using global `dispatchEvent` function which will // used a saved reference to global scope. ascii_str!("dispatchEvent(new Event('unload'))"), )?; Ok(()) } /// Dispatches "beforeunload" event to the JavaScript runtime. Returns a boolean /// indicating if the event was prevented and thus event loop should continue /// running. pub fn dispatch_beforeunload_event( &mut self, script_name: &'static str, ) -> Result<bool, AnyError> { let value = self.js_runtime.execute_script( script_name, // NOTE(@bartlomieju): not using `globalThis` here, because user might delete // it. Instead we're using global `dispatchEvent` function which will // used a saved reference to global scope. ascii_str!( "dispatchEvent(new Event('beforeunload', { cancelable: true }));" ), )?; let local_value = value.open(&mut self.js_runtime.handle_scope()); Ok(local_value.is_false()) } }
permissions: PermissionsContainer, unstable: bool, enable_testing_features: bool, },
random_line_split
views.py
# coding=utf-8 """ Forum views """ from __future__ import absolute_import, print_function from datetime import datetime, date from itertools import groupby from urllib import quote import sqlalchemy as sa from sqlalchemy.orm import joinedload from werkzeug.exceptions import NotFound, BadRequest from flask import ( current_app, g, make_response, render_template, request, flash, ) from flask_babel import format_date from flask_login import current_user from abilian.i18n import _, _l from abilian.core.util import utc_dt from abilian.sbe.apps.communities.blueprint import Blueprint from abilian.sbe.apps.communities.views import default_view_kw from abilian.web import nav, url_for, views from abilian.web.action import ButtonAction from abilian.web.views import default_view from .forms import PostForm, ThreadForm, PostEditForm from .models import Post, PostAttachment, Thread from .tasks import send_post_by_email # TODO: move to config MAX_THREADS = 30 forum = Blueprint("forum", __name__, url_prefix="/forum", template_folder="templates") route = forum.route def post_kw_view_func(kw, obj, obj_type, obj_id, **kwargs): """ kwargs for Post default view """ kw = default_view_kw(kw, obj.thread, obj_type, obj_id, **kwargs) kw['thread_id'] = obj.thread_id kw['_anchor'] = u'post_{:d}'.format(obj.id) return kw @forum.url_value_preprocessor def init_forum_values(endpoint, values): g.current_tab = 'forum' g.breadcrumb.append( nav.BreadcrumbItem(label=_l(u'Conversations'), url=nav.Endpoint('forum.index', community_id=g.community.slug))) @route('/') def index(): query = Thread.query \ .filter(Thread.community_id == g.community.id) \ .order_by(Thread.created_at.desc()) has_more = query.count() > MAX_THREADS threads = query.limit(MAX_THREADS).all() return render_template("forum/index.html", threads=threads, has_more=has_more) def group_monthly(entities_list): # We're using Python's groupby instead of SA's group_by here # because it's easier to support both SQLite and Postgres this way. def grouper(entity): return entity.created_at.year, entity.created_at.month def format_month(year, month): month = format_date(date(year, month, 1), "MMMM").capitalize() return u"%s %s" % (month, year) grouped_entities = groupby(entities_list, grouper) grouped_entities = [(format_month(year, month), list(entities)) for (year, month), entities in grouped_entities] return grouped_entities @route('/archives/') def archives(): all_threads = Thread.query \ .filter(Thread.community_id == g.community.id) \ .order_by(Thread.created_at.desc()).all() grouped_threads = group_monthly(all_threads) return render_template('forum/archives.html', grouped_threads=grouped_threads) @route('/attachments/') def attachments(): # XXX: there is probably a way to optimize this and the big loop below... all_threads = Thread.query \ .filter(Thread.community_id == g.community.id) \ .options(joinedload('posts')) \ .options(joinedload('posts.attachments')) \ .order_by(Thread.created_at.desc()).all() posts_with_attachments = [] for thread in all_threads: for post in thread.posts: if getattr(post, 'attachments', None): posts_with_attachments.append(post) posts_with_attachments.sort(key=lambda post: post.created_at) posts_with_attachments.reverse() grouped_posts = group_monthly(posts_with_attachments) return render_template('forum/attachments.html', grouped_posts=grouped_posts) class BaseThreadView(object): Model = Thread Form = ThreadForm pk = 'thread_id' base_template = 'community/_base.html' def can_send_by_mail(self): return (g.community.type == 'participative' or g.community.has_permission(current_user, 'manage')) def prepare_args(self, args, kwargs): args, kwargs = super(BaseThreadView, self).prepare_args(args, kwargs) self.send_by_email = False if not self.can_send_by_mail() and 'send_by_email' in self.form: # remove from html form and avoid validation errors del self.form['send_by_email'] return args, kwargs def index_url(self): return url_for(".index", community_id=g.community.slug) def view_url(self): return url_for(self.obj) class ThreadView(BaseThreadView, views.ObjectView): methods = ['GET', 'HEAD'] Form = PostForm template = 'forum/thread.html' @property def template_kwargs(self): kw = super(ThreadView, self).template_kwargs kw['thread'] = self.obj kw['is_closed'] = self.obj.closed return kw thread_view = ThreadView.as_view('thread') default_view(forum, Post, None, kw_func=post_kw_view_func)(thread_view) default_view(forum, Thread, 'thread_id', kw_func=default_view_kw)(thread_view) route('/<int:thread_id>/')(thread_view) route('/<int:thread_id>/attachments')( ThreadView.as_view('thread_attachments', template='forum/thread_attachments.html') ) class ThreadCreate(BaseThreadView, views.ObjectCreate): POST_BUTTON = ButtonAction('form', 'create', btn_class='primary', title=_l(u'Post this message')) def init_object(self, args, kwargs): args, kwargs = super(ThreadCreate, self).init_object(args, kwargs) self.thread = self.obj return args, kwargs def before_populate_obj(self): del self.form['attachments'] self.message_body = self.form.message.data del self.form['message'] if 'send_by_email' in self.form: self.send_by_email = (self.can_send_by_mail() and self.form.send_by_email.data) del self.form['send_by_email'] def after_populate_obj(self): if self.thread.community is None: self.thread.community = g.community._model self.post = self.thread.create_post(body_html=self.message_body) obj_meta = self.post.meta.setdefault('abilian.sbe.forum', {}) obj_meta['origin'] = u'web' obj_meta['send_by_email'] = self.send_by_email session = sa.orm.object_session(self.thread) uploads = current_app.extensions['uploads'] for handle in request.form.getlist('attachments'): fileobj = uploads.get_file(current_user, handle) if fileobj is None: continue meta = uploads.get_metadata(current_user, handle) name = meta.get('filename', handle) mimetype = meta.get('mimetype', None) if not isinstance(name, unicode): name = unicode(name, encoding='utf-8', errors='ignore') if not name: continue attachment = PostAttachment(name=name) attachment.post = self.post with fileobj.open('rb') as f: attachment.set_content(f.read(), mimetype) session.add(attachment) def commit_success(self): if self.send_by_email: task = send_post_by_email.delay(self.post.id) meta = self.post.meta.setdefault('abilian.sbe.forum', {}) meta['send_post_by_email_task'] = task.id self.post.meta.changed() session = sa.orm.object_session(self.post) session.commit() @property def activity_target(self): return self.thread.community def get_form_buttons(self, *args, **kwargs): return [self.POST_BUTTON, views.object.CANCEL_BUTTON] route('/new_thread/')(ThreadCreate.as_view('new_thread', view_endpoint='.thread')) class ThreadPostCreate(ThreadCreate): """ Add a new post to a thread """ methods = ['POST'] Form = PostForm Model = Post def init_object(self, args, kwargs): # we DO want to skip ThreadCreate.init_object. hence super is not based on # ThreadPostCreate args, kwargs = super(ThreadCreate, self).init_object(args, kwargs) thread_id = kwargs.pop(self.pk, None) self.thread = Thread.query.get(thread_id) return args, kwargs def after_populate_obj(self): super(ThreadPostCreate, self).after_populate_obj() session = sa.orm.object_session(self.obj) session.expunge(self.obj) self.obj = self.post route('/<int:thread_id>/')(ThreadPostCreate.as_view('thread_post', view_endpoint='.thread')) class ThreadDelete(BaseThreadView, views.ObjectDelete): methods = ['POST'] _message_success = _(u'Thread "{title}" deleted.') def message_success(self): return unicode(self._message_success).format(title=self.obj.title) route('/<int:thread_id>/delete')(ThreadDelete.as_view('thread_delete')) class ThreadCloseView(BaseThreadView, views.object.BaseObjectView): """ Close / Re-open a thread """ methods = ['POST'] _VALID_ACTIONS = {u'close', u'reopen'} CLOSED_MSG = _l(u'The thread is now closed for edition and new ' u'contributions.') REOPENED_MSG = _l(u'The thread is now re-opened for edition and new ' u'contributions.') def prepare_args(self, args, kwargs): args, kwargs = super(ThreadCloseView, self).prepare_args(args, kwargs) action = kwargs['action'] = request.form.get('action') if action not in self._VALID_ACTIONS: raise BadRequest(u'Unknown action: {!r}'.format(action)) return args, kwargs def post(self, action=None): is_closed = (action == u'close') self.obj.closed = is_closed sa.orm.object_session(self.obj).commit() msg = self.CLOSED_MSG if is_closed else self.REOPENED_MSG flash(unicode(msg)) return self.redirect(url_for(self.obj)) route('/<int:thread_id>/close')(ThreadCloseView.as_view('thread_close')) class ThreadPostEdit(BaseThreadView, views.ObjectEdit): Form = PostEditForm Model = Post pk = 'object_id' def can_send_by_mail(self): # post edit: don't notify every time return False def init_object(self, args, kwargs): # we DO want to skip ThreadCreate.init_object. hence super is not based on # ThreadPostCreate args, kwargs = super(ThreadPostEdit, self).init_object(args, kwargs) thread_id = kwargs.pop('thread_id', None) self.thread = self.obj.thread assert thread_id == self.thread.id return args, kwargs def get_form_kwargs(self): kwargs = super(ThreadPostEdit, self).get_form_kwargs() kwargs['message'] = self.obj.body_html return kwargs def before_populate_obj(self): self.message_body = self.form.message.data del self.form['message'] self.reason = self.form.reason.data self.send_by_email = False if 'send_by_email' in self.form: del self.form['send_by_email'] self.attachments_to_remove = self.form['attachments'].delete_files_index del self.form['attachments'] def after_populate_obj(self): session = sa.orm.object_session(self.obj) uploads = current_app.extensions['uploads'] self.obj.body_html = self.message_body obj_meta = self.obj.meta.setdefault('abilian.sbe.forum', {}) history = obj_meta.setdefault('history', []) history.append(dict(user_id=current_user.id, user=unicode(current_user), date=utc_dt(datetime.utcnow()).isoformat(), reason=self.form.reason.data,)) self.obj.meta['abilian.sbe.forum'] = obj_meta # trigger change for SA attachments_to_remove = [] for idx in self.attachments_to_remove: try: idx = int(idx) except ValueError: continue if idx > len(self.obj.attachments):
attachments_to_remove.append(self.obj.attachments[idx]) for att in attachments_to_remove: session.delete(att) for handle in request.form.getlist('attachments'): fileobj = uploads.get_file(current_user, handle) if fileobj is None: continue meta = uploads.get_metadata(current_user, handle) name = meta.get('filename', handle) mimetype = meta.get('mimetype', None) if not isinstance(name, unicode): name = unicode(name, encoding='utf-8', errors='ignore') if not name: continue attachment = PostAttachment(name=name, post=self.obj) with fileobj.open('rb') as f: attachment.set_content(f.read(), mimetype) session.add(attachment) route('/<int:thread_id>/<int:object_id>/edit')( ThreadPostEdit.as_view('post_edit') ) def attachment_kw_view_func(kw, obj, obj_type, obj_id, **kwargs): post = obj.post kw = default_view_kw(kw, post.thread, obj_type, obj_id, **kwargs) kw['thread_id'] = post.thread_id kw['post_id'] = post.id return kw @route('/<int:thread_id>/posts/<int:post_id>/attachment/<int:attachment_id>') @default_view(forum, PostAttachment, 'attachment_id', kw_func=attachment_kw_view_func) def attachment_download(thread_id, post_id, attachment_id): thread = Thread.query.get(thread_id) post = Post.query.get(post_id) attachment = PostAttachment.query.get(attachment_id) if (not (thread and post and attachment) or post.thread is not thread or attachment.post is not post): raise NotFound() response = make_response(attachment.content) response.headers['content-length'] = attachment.content_length response.headers['content-type'] = attachment.content_type content_disposition = ( 'attachment;filename="{}"'.format(quote(attachment.name.encode('utf8'))) ) response.headers['content-disposition'] = content_disposition return response
continue
conditional_block
views.py
# coding=utf-8 """ Forum views """ from __future__ import absolute_import, print_function from datetime import datetime, date from itertools import groupby from urllib import quote import sqlalchemy as sa from sqlalchemy.orm import joinedload from werkzeug.exceptions import NotFound, BadRequest from flask import ( current_app, g, make_response, render_template, request, flash, ) from flask_babel import format_date from flask_login import current_user from abilian.i18n import _, _l from abilian.core.util import utc_dt from abilian.sbe.apps.communities.blueprint import Blueprint from abilian.sbe.apps.communities.views import default_view_kw from abilian.web import nav, url_for, views from abilian.web.action import ButtonAction from abilian.web.views import default_view from .forms import PostForm, ThreadForm, PostEditForm from .models import Post, PostAttachment, Thread from .tasks import send_post_by_email # TODO: move to config MAX_THREADS = 30 forum = Blueprint("forum", __name__, url_prefix="/forum", template_folder="templates") route = forum.route def post_kw_view_func(kw, obj, obj_type, obj_id, **kwargs): """ kwargs for Post default view """ kw = default_view_kw(kw, obj.thread, obj_type, obj_id, **kwargs) kw['thread_id'] = obj.thread_id kw['_anchor'] = u'post_{:d}'.format(obj.id) return kw @forum.url_value_preprocessor def init_forum_values(endpoint, values): g.current_tab = 'forum' g.breadcrumb.append( nav.BreadcrumbItem(label=_l(u'Conversations'), url=nav.Endpoint('forum.index', community_id=g.community.slug))) @route('/') def index(): query = Thread.query \ .filter(Thread.community_id == g.community.id) \ .order_by(Thread.created_at.desc()) has_more = query.count() > MAX_THREADS threads = query.limit(MAX_THREADS).all() return render_template("forum/index.html", threads=threads, has_more=has_more) def group_monthly(entities_list): # We're using Python's groupby instead of SA's group_by here # because it's easier to support both SQLite and Postgres this way. def grouper(entity): return entity.created_at.year, entity.created_at.month def format_month(year, month): month = format_date(date(year, month, 1), "MMMM").capitalize() return u"%s %s" % (month, year) grouped_entities = groupby(entities_list, grouper) grouped_entities = [(format_month(year, month), list(entities)) for (year, month), entities in grouped_entities] return grouped_entities @route('/archives/') def archives(): all_threads = Thread.query \ .filter(Thread.community_id == g.community.id) \ .order_by(Thread.created_at.desc()).all() grouped_threads = group_monthly(all_threads) return render_template('forum/archives.html', grouped_threads=grouped_threads) @route('/attachments/') def attachments(): # XXX: there is probably a way to optimize this and the big loop below... all_threads = Thread.query \ .filter(Thread.community_id == g.community.id) \ .options(joinedload('posts')) \ .options(joinedload('posts.attachments')) \ .order_by(Thread.created_at.desc()).all() posts_with_attachments = [] for thread in all_threads: for post in thread.posts: if getattr(post, 'attachments', None): posts_with_attachments.append(post) posts_with_attachments.sort(key=lambda post: post.created_at) posts_with_attachments.reverse() grouped_posts = group_monthly(posts_with_attachments) return render_template('forum/attachments.html', grouped_posts=grouped_posts) class BaseThreadView(object): Model = Thread Form = ThreadForm pk = 'thread_id' base_template = 'community/_base.html' def can_send_by_mail(self): return (g.community.type == 'participative' or g.community.has_permission(current_user, 'manage')) def prepare_args(self, args, kwargs): args, kwargs = super(BaseThreadView, self).prepare_args(args, kwargs) self.send_by_email = False if not self.can_send_by_mail() and 'send_by_email' in self.form: # remove from html form and avoid validation errors del self.form['send_by_email'] return args, kwargs def index_url(self): return url_for(".index", community_id=g.community.slug) def view_url(self): return url_for(self.obj) class ThreadView(BaseThreadView, views.ObjectView): methods = ['GET', 'HEAD'] Form = PostForm template = 'forum/thread.html' @property def template_kwargs(self): kw = super(ThreadView, self).template_kwargs kw['thread'] = self.obj kw['is_closed'] = self.obj.closed return kw thread_view = ThreadView.as_view('thread') default_view(forum, Post, None, kw_func=post_kw_view_func)(thread_view) default_view(forum, Thread, 'thread_id', kw_func=default_view_kw)(thread_view) route('/<int:thread_id>/')(thread_view) route('/<int:thread_id>/attachments')( ThreadView.as_view('thread_attachments', template='forum/thread_attachments.html') ) class ThreadCreate(BaseThreadView, views.ObjectCreate): POST_BUTTON = ButtonAction('form', 'create', btn_class='primary', title=_l(u'Post this message')) def init_object(self, args, kwargs): args, kwargs = super(ThreadCreate, self).init_object(args, kwargs) self.thread = self.obj return args, kwargs def
(self): del self.form['attachments'] self.message_body = self.form.message.data del self.form['message'] if 'send_by_email' in self.form: self.send_by_email = (self.can_send_by_mail() and self.form.send_by_email.data) del self.form['send_by_email'] def after_populate_obj(self): if self.thread.community is None: self.thread.community = g.community._model self.post = self.thread.create_post(body_html=self.message_body) obj_meta = self.post.meta.setdefault('abilian.sbe.forum', {}) obj_meta['origin'] = u'web' obj_meta['send_by_email'] = self.send_by_email session = sa.orm.object_session(self.thread) uploads = current_app.extensions['uploads'] for handle in request.form.getlist('attachments'): fileobj = uploads.get_file(current_user, handle) if fileobj is None: continue meta = uploads.get_metadata(current_user, handle) name = meta.get('filename', handle) mimetype = meta.get('mimetype', None) if not isinstance(name, unicode): name = unicode(name, encoding='utf-8', errors='ignore') if not name: continue attachment = PostAttachment(name=name) attachment.post = self.post with fileobj.open('rb') as f: attachment.set_content(f.read(), mimetype) session.add(attachment) def commit_success(self): if self.send_by_email: task = send_post_by_email.delay(self.post.id) meta = self.post.meta.setdefault('abilian.sbe.forum', {}) meta['send_post_by_email_task'] = task.id self.post.meta.changed() session = sa.orm.object_session(self.post) session.commit() @property def activity_target(self): return self.thread.community def get_form_buttons(self, *args, **kwargs): return [self.POST_BUTTON, views.object.CANCEL_BUTTON] route('/new_thread/')(ThreadCreate.as_view('new_thread', view_endpoint='.thread')) class ThreadPostCreate(ThreadCreate): """ Add a new post to a thread """ methods = ['POST'] Form = PostForm Model = Post def init_object(self, args, kwargs): # we DO want to skip ThreadCreate.init_object. hence super is not based on # ThreadPostCreate args, kwargs = super(ThreadCreate, self).init_object(args, kwargs) thread_id = kwargs.pop(self.pk, None) self.thread = Thread.query.get(thread_id) return args, kwargs def after_populate_obj(self): super(ThreadPostCreate, self).after_populate_obj() session = sa.orm.object_session(self.obj) session.expunge(self.obj) self.obj = self.post route('/<int:thread_id>/')(ThreadPostCreate.as_view('thread_post', view_endpoint='.thread')) class ThreadDelete(BaseThreadView, views.ObjectDelete): methods = ['POST'] _message_success = _(u'Thread "{title}" deleted.') def message_success(self): return unicode(self._message_success).format(title=self.obj.title) route('/<int:thread_id>/delete')(ThreadDelete.as_view('thread_delete')) class ThreadCloseView(BaseThreadView, views.object.BaseObjectView): """ Close / Re-open a thread """ methods = ['POST'] _VALID_ACTIONS = {u'close', u'reopen'} CLOSED_MSG = _l(u'The thread is now closed for edition and new ' u'contributions.') REOPENED_MSG = _l(u'The thread is now re-opened for edition and new ' u'contributions.') def prepare_args(self, args, kwargs): args, kwargs = super(ThreadCloseView, self).prepare_args(args, kwargs) action = kwargs['action'] = request.form.get('action') if action not in self._VALID_ACTIONS: raise BadRequest(u'Unknown action: {!r}'.format(action)) return args, kwargs def post(self, action=None): is_closed = (action == u'close') self.obj.closed = is_closed sa.orm.object_session(self.obj).commit() msg = self.CLOSED_MSG if is_closed else self.REOPENED_MSG flash(unicode(msg)) return self.redirect(url_for(self.obj)) route('/<int:thread_id>/close')(ThreadCloseView.as_view('thread_close')) class ThreadPostEdit(BaseThreadView, views.ObjectEdit): Form = PostEditForm Model = Post pk = 'object_id' def can_send_by_mail(self): # post edit: don't notify every time return False def init_object(self, args, kwargs): # we DO want to skip ThreadCreate.init_object. hence super is not based on # ThreadPostCreate args, kwargs = super(ThreadPostEdit, self).init_object(args, kwargs) thread_id = kwargs.pop('thread_id', None) self.thread = self.obj.thread assert thread_id == self.thread.id return args, kwargs def get_form_kwargs(self): kwargs = super(ThreadPostEdit, self).get_form_kwargs() kwargs['message'] = self.obj.body_html return kwargs def before_populate_obj(self): self.message_body = self.form.message.data del self.form['message'] self.reason = self.form.reason.data self.send_by_email = False if 'send_by_email' in self.form: del self.form['send_by_email'] self.attachments_to_remove = self.form['attachments'].delete_files_index del self.form['attachments'] def after_populate_obj(self): session = sa.orm.object_session(self.obj) uploads = current_app.extensions['uploads'] self.obj.body_html = self.message_body obj_meta = self.obj.meta.setdefault('abilian.sbe.forum', {}) history = obj_meta.setdefault('history', []) history.append(dict(user_id=current_user.id, user=unicode(current_user), date=utc_dt(datetime.utcnow()).isoformat(), reason=self.form.reason.data,)) self.obj.meta['abilian.sbe.forum'] = obj_meta # trigger change for SA attachments_to_remove = [] for idx in self.attachments_to_remove: try: idx = int(idx) except ValueError: continue if idx > len(self.obj.attachments): continue attachments_to_remove.append(self.obj.attachments[idx]) for att in attachments_to_remove: session.delete(att) for handle in request.form.getlist('attachments'): fileobj = uploads.get_file(current_user, handle) if fileobj is None: continue meta = uploads.get_metadata(current_user, handle) name = meta.get('filename', handle) mimetype = meta.get('mimetype', None) if not isinstance(name, unicode): name = unicode(name, encoding='utf-8', errors='ignore') if not name: continue attachment = PostAttachment(name=name, post=self.obj) with fileobj.open('rb') as f: attachment.set_content(f.read(), mimetype) session.add(attachment) route('/<int:thread_id>/<int:object_id>/edit')( ThreadPostEdit.as_view('post_edit') ) def attachment_kw_view_func(kw, obj, obj_type, obj_id, **kwargs): post = obj.post kw = default_view_kw(kw, post.thread, obj_type, obj_id, **kwargs) kw['thread_id'] = post.thread_id kw['post_id'] = post.id return kw @route('/<int:thread_id>/posts/<int:post_id>/attachment/<int:attachment_id>') @default_view(forum, PostAttachment, 'attachment_id', kw_func=attachment_kw_view_func) def attachment_download(thread_id, post_id, attachment_id): thread = Thread.query.get(thread_id) post = Post.query.get(post_id) attachment = PostAttachment.query.get(attachment_id) if (not (thread and post and attachment) or post.thread is not thread or attachment.post is not post): raise NotFound() response = make_response(attachment.content) response.headers['content-length'] = attachment.content_length response.headers['content-type'] = attachment.content_type content_disposition = ( 'attachment;filename="{}"'.format(quote(attachment.name.encode('utf8'))) ) response.headers['content-disposition'] = content_disposition return response
before_populate_obj
identifier_name
views.py
# coding=utf-8 """ Forum views """ from __future__ import absolute_import, print_function from datetime import datetime, date from itertools import groupby from urllib import quote import sqlalchemy as sa from sqlalchemy.orm import joinedload from werkzeug.exceptions import NotFound, BadRequest from flask import ( current_app, g, make_response, render_template, request, flash, ) from flask_babel import format_date from flask_login import current_user from abilian.i18n import _, _l from abilian.core.util import utc_dt from abilian.sbe.apps.communities.blueprint import Blueprint from abilian.sbe.apps.communities.views import default_view_kw from abilian.web import nav, url_for, views from abilian.web.action import ButtonAction from abilian.web.views import default_view from .forms import PostForm, ThreadForm, PostEditForm from .models import Post, PostAttachment, Thread from .tasks import send_post_by_email # TODO: move to config MAX_THREADS = 30 forum = Blueprint("forum", __name__, url_prefix="/forum", template_folder="templates") route = forum.route def post_kw_view_func(kw, obj, obj_type, obj_id, **kwargs): """ kwargs for Post default view """ kw = default_view_kw(kw, obj.thread, obj_type, obj_id, **kwargs) kw['thread_id'] = obj.thread_id kw['_anchor'] = u'post_{:d}'.format(obj.id) return kw @forum.url_value_preprocessor def init_forum_values(endpoint, values): g.current_tab = 'forum' g.breadcrumb.append( nav.BreadcrumbItem(label=_l(u'Conversations'), url=nav.Endpoint('forum.index', community_id=g.community.slug))) @route('/') def index(): query = Thread.query \ .filter(Thread.community_id == g.community.id) \ .order_by(Thread.created_at.desc()) has_more = query.count() > MAX_THREADS threads = query.limit(MAX_THREADS).all() return render_template("forum/index.html", threads=threads, has_more=has_more) def group_monthly(entities_list): # We're using Python's groupby instead of SA's group_by here # because it's easier to support both SQLite and Postgres this way. def grouper(entity): return entity.created_at.year, entity.created_at.month def format_month(year, month): month = format_date(date(year, month, 1), "MMMM").capitalize() return u"%s %s" % (month, year) grouped_entities = groupby(entities_list, grouper) grouped_entities = [(format_month(year, month), list(entities)) for (year, month), entities in grouped_entities] return grouped_entities @route('/archives/') def archives(): all_threads = Thread.query \ .filter(Thread.community_id == g.community.id) \ .order_by(Thread.created_at.desc()).all() grouped_threads = group_monthly(all_threads) return render_template('forum/archives.html', grouped_threads=grouped_threads) @route('/attachments/') def attachments(): # XXX: there is probably a way to optimize this and the big loop below... all_threads = Thread.query \ .filter(Thread.community_id == g.community.id) \ .options(joinedload('posts')) \ .options(joinedload('posts.attachments')) \ .order_by(Thread.created_at.desc()).all() posts_with_attachments = [] for thread in all_threads: for post in thread.posts: if getattr(post, 'attachments', None): posts_with_attachments.append(post) posts_with_attachments.sort(key=lambda post: post.created_at) posts_with_attachments.reverse() grouped_posts = group_monthly(posts_with_attachments) return render_template('forum/attachments.html', grouped_posts=grouped_posts) class BaseThreadView(object): Model = Thread Form = ThreadForm pk = 'thread_id' base_template = 'community/_base.html' def can_send_by_mail(self): return (g.community.type == 'participative' or g.community.has_permission(current_user, 'manage')) def prepare_args(self, args, kwargs): args, kwargs = super(BaseThreadView, self).prepare_args(args, kwargs) self.send_by_email = False if not self.can_send_by_mail() and 'send_by_email' in self.form: # remove from html form and avoid validation errors del self.form['send_by_email'] return args, kwargs def index_url(self): return url_for(".index", community_id=g.community.slug) def view_url(self): return url_for(self.obj) class ThreadView(BaseThreadView, views.ObjectView): methods = ['GET', 'HEAD'] Form = PostForm template = 'forum/thread.html' @property def template_kwargs(self): kw = super(ThreadView, self).template_kwargs kw['thread'] = self.obj kw['is_closed'] = self.obj.closed return kw thread_view = ThreadView.as_view('thread') default_view(forum, Post, None, kw_func=post_kw_view_func)(thread_view) default_view(forum, Thread, 'thread_id', kw_func=default_view_kw)(thread_view) route('/<int:thread_id>/')(thread_view) route('/<int:thread_id>/attachments')( ThreadView.as_view('thread_attachments', template='forum/thread_attachments.html') ) class ThreadCreate(BaseThreadView, views.ObjectCreate): POST_BUTTON = ButtonAction('form', 'create', btn_class='primary', title=_l(u'Post this message')) def init_object(self, args, kwargs): args, kwargs = super(ThreadCreate, self).init_object(args, kwargs) self.thread = self.obj return args, kwargs def before_populate_obj(self): del self.form['attachments'] self.message_body = self.form.message.data del self.form['message'] if 'send_by_email' in self.form: self.send_by_email = (self.can_send_by_mail() and self.form.send_by_email.data) del self.form['send_by_email'] def after_populate_obj(self): if self.thread.community is None: self.thread.community = g.community._model self.post = self.thread.create_post(body_html=self.message_body) obj_meta = self.post.meta.setdefault('abilian.sbe.forum', {}) obj_meta['origin'] = u'web' obj_meta['send_by_email'] = self.send_by_email session = sa.orm.object_session(self.thread) uploads = current_app.extensions['uploads'] for handle in request.form.getlist('attachments'): fileobj = uploads.get_file(current_user, handle) if fileobj is None: continue meta = uploads.get_metadata(current_user, handle) name = meta.get('filename', handle) mimetype = meta.get('mimetype', None) if not isinstance(name, unicode): name = unicode(name, encoding='utf-8', errors='ignore') if not name: continue attachment = PostAttachment(name=name) attachment.post = self.post with fileobj.open('rb') as f: attachment.set_content(f.read(), mimetype) session.add(attachment) def commit_success(self): if self.send_by_email: task = send_post_by_email.delay(self.post.id) meta = self.post.meta.setdefault('abilian.sbe.forum', {}) meta['send_post_by_email_task'] = task.id self.post.meta.changed() session = sa.orm.object_session(self.post) session.commit() @property def activity_target(self): return self.thread.community def get_form_buttons(self, *args, **kwargs): return [self.POST_BUTTON, views.object.CANCEL_BUTTON] route('/new_thread/')(ThreadCreate.as_view('new_thread', view_endpoint='.thread')) class ThreadPostCreate(ThreadCreate): """ Add a new post to a thread """ methods = ['POST'] Form = PostForm Model = Post def init_object(self, args, kwargs): # we DO want to skip ThreadCreate.init_object. hence super is not based on # ThreadPostCreate args, kwargs = super(ThreadCreate, self).init_object(args, kwargs) thread_id = kwargs.pop(self.pk, None) self.thread = Thread.query.get(thread_id) return args, kwargs def after_populate_obj(self): super(ThreadPostCreate, self).after_populate_obj() session = sa.orm.object_session(self.obj) session.expunge(self.obj) self.obj = self.post route('/<int:thread_id>/')(ThreadPostCreate.as_view('thread_post', view_endpoint='.thread')) class ThreadDelete(BaseThreadView, views.ObjectDelete): methods = ['POST'] _message_success = _(u'Thread "{title}" deleted.') def message_success(self): return unicode(self._message_success).format(title=self.obj.title) route('/<int:thread_id>/delete')(ThreadDelete.as_view('thread_delete')) class ThreadCloseView(BaseThreadView, views.object.BaseObjectView): """ Close / Re-open a thread """ methods = ['POST'] _VALID_ACTIONS = {u'close', u'reopen'} CLOSED_MSG = _l(u'The thread is now closed for edition and new ' u'contributions.') REOPENED_MSG = _l(u'The thread is now re-opened for edition and new ' u'contributions.') def prepare_args(self, args, kwargs): args, kwargs = super(ThreadCloseView, self).prepare_args(args, kwargs) action = kwargs['action'] = request.form.get('action') if action not in self._VALID_ACTIONS: raise BadRequest(u'Unknown action: {!r}'.format(action)) return args, kwargs def post(self, action=None): is_closed = (action == u'close') self.obj.closed = is_closed sa.orm.object_session(self.obj).commit() msg = self.CLOSED_MSG if is_closed else self.REOPENED_MSG flash(unicode(msg)) return self.redirect(url_for(self.obj)) route('/<int:thread_id>/close')(ThreadCloseView.as_view('thread_close')) class ThreadPostEdit(BaseThreadView, views.ObjectEdit): Form = PostEditForm Model = Post pk = 'object_id' def can_send_by_mail(self): # post edit: don't notify every time return False def init_object(self, args, kwargs): # we DO want to skip ThreadCreate.init_object. hence super is not based on # ThreadPostCreate args, kwargs = super(ThreadPostEdit, self).init_object(args, kwargs) thread_id = kwargs.pop('thread_id', None) self.thread = self.obj.thread assert thread_id == self.thread.id return args, kwargs def get_form_kwargs(self): kwargs = super(ThreadPostEdit, self).get_form_kwargs() kwargs['message'] = self.obj.body_html return kwargs def before_populate_obj(self): self.message_body = self.form.message.data del self.form['message'] self.reason = self.form.reason.data self.send_by_email = False if 'send_by_email' in self.form: del self.form['send_by_email'] self.attachments_to_remove = self.form['attachments'].delete_files_index del self.form['attachments'] def after_populate_obj(self): session = sa.orm.object_session(self.obj) uploads = current_app.extensions['uploads'] self.obj.body_html = self.message_body obj_meta = self.obj.meta.setdefault('abilian.sbe.forum', {}) history = obj_meta.setdefault('history', []) history.append(dict(user_id=current_user.id, user=unicode(current_user), date=utc_dt(datetime.utcnow()).isoformat(), reason=self.form.reason.data,)) self.obj.meta['abilian.sbe.forum'] = obj_meta # trigger change for SA attachments_to_remove = [] for idx in self.attachments_to_remove: try: idx = int(idx) except ValueError: continue if idx > len(self.obj.attachments): continue
session.delete(att) for handle in request.form.getlist('attachments'): fileobj = uploads.get_file(current_user, handle) if fileobj is None: continue meta = uploads.get_metadata(current_user, handle) name = meta.get('filename', handle) mimetype = meta.get('mimetype', None) if not isinstance(name, unicode): name = unicode(name, encoding='utf-8', errors='ignore') if not name: continue attachment = PostAttachment(name=name, post=self.obj) with fileobj.open('rb') as f: attachment.set_content(f.read(), mimetype) session.add(attachment) route('/<int:thread_id>/<int:object_id>/edit')( ThreadPostEdit.as_view('post_edit') ) def attachment_kw_view_func(kw, obj, obj_type, obj_id, **kwargs): post = obj.post kw = default_view_kw(kw, post.thread, obj_type, obj_id, **kwargs) kw['thread_id'] = post.thread_id kw['post_id'] = post.id return kw @route('/<int:thread_id>/posts/<int:post_id>/attachment/<int:attachment_id>') @default_view(forum, PostAttachment, 'attachment_id', kw_func=attachment_kw_view_func) def attachment_download(thread_id, post_id, attachment_id): thread = Thread.query.get(thread_id) post = Post.query.get(post_id) attachment = PostAttachment.query.get(attachment_id) if (not (thread and post and attachment) or post.thread is not thread or attachment.post is not post): raise NotFound() response = make_response(attachment.content) response.headers['content-length'] = attachment.content_length response.headers['content-type'] = attachment.content_type content_disposition = ( 'attachment;filename="{}"'.format(quote(attachment.name.encode('utf8'))) ) response.headers['content-disposition'] = content_disposition return response
attachments_to_remove.append(self.obj.attachments[idx]) for att in attachments_to_remove:
random_line_split
views.py
# coding=utf-8 """ Forum views """ from __future__ import absolute_import, print_function from datetime import datetime, date from itertools import groupby from urllib import quote import sqlalchemy as sa from sqlalchemy.orm import joinedload from werkzeug.exceptions import NotFound, BadRequest from flask import ( current_app, g, make_response, render_template, request, flash, ) from flask_babel import format_date from flask_login import current_user from abilian.i18n import _, _l from abilian.core.util import utc_dt from abilian.sbe.apps.communities.blueprint import Blueprint from abilian.sbe.apps.communities.views import default_view_kw from abilian.web import nav, url_for, views from abilian.web.action import ButtonAction from abilian.web.views import default_view from .forms import PostForm, ThreadForm, PostEditForm from .models import Post, PostAttachment, Thread from .tasks import send_post_by_email # TODO: move to config MAX_THREADS = 30 forum = Blueprint("forum", __name__, url_prefix="/forum", template_folder="templates") route = forum.route def post_kw_view_func(kw, obj, obj_type, obj_id, **kwargs): """ kwargs for Post default view """ kw = default_view_kw(kw, obj.thread, obj_type, obj_id, **kwargs) kw['thread_id'] = obj.thread_id kw['_anchor'] = u'post_{:d}'.format(obj.id) return kw @forum.url_value_preprocessor def init_forum_values(endpoint, values): g.current_tab = 'forum' g.breadcrumb.append( nav.BreadcrumbItem(label=_l(u'Conversations'), url=nav.Endpoint('forum.index', community_id=g.community.slug))) @route('/') def index(): query = Thread.query \ .filter(Thread.community_id == g.community.id) \ .order_by(Thread.created_at.desc()) has_more = query.count() > MAX_THREADS threads = query.limit(MAX_THREADS).all() return render_template("forum/index.html", threads=threads, has_more=has_more) def group_monthly(entities_list): # We're using Python's groupby instead of SA's group_by here # because it's easier to support both SQLite and Postgres this way. def grouper(entity): return entity.created_at.year, entity.created_at.month def format_month(year, month):
grouped_entities = groupby(entities_list, grouper) grouped_entities = [(format_month(year, month), list(entities)) for (year, month), entities in grouped_entities] return grouped_entities @route('/archives/') def archives(): all_threads = Thread.query \ .filter(Thread.community_id == g.community.id) \ .order_by(Thread.created_at.desc()).all() grouped_threads = group_monthly(all_threads) return render_template('forum/archives.html', grouped_threads=grouped_threads) @route('/attachments/') def attachments(): # XXX: there is probably a way to optimize this and the big loop below... all_threads = Thread.query \ .filter(Thread.community_id == g.community.id) \ .options(joinedload('posts')) \ .options(joinedload('posts.attachments')) \ .order_by(Thread.created_at.desc()).all() posts_with_attachments = [] for thread in all_threads: for post in thread.posts: if getattr(post, 'attachments', None): posts_with_attachments.append(post) posts_with_attachments.sort(key=lambda post: post.created_at) posts_with_attachments.reverse() grouped_posts = group_monthly(posts_with_attachments) return render_template('forum/attachments.html', grouped_posts=grouped_posts) class BaseThreadView(object): Model = Thread Form = ThreadForm pk = 'thread_id' base_template = 'community/_base.html' def can_send_by_mail(self): return (g.community.type == 'participative' or g.community.has_permission(current_user, 'manage')) def prepare_args(self, args, kwargs): args, kwargs = super(BaseThreadView, self).prepare_args(args, kwargs) self.send_by_email = False if not self.can_send_by_mail() and 'send_by_email' in self.form: # remove from html form and avoid validation errors del self.form['send_by_email'] return args, kwargs def index_url(self): return url_for(".index", community_id=g.community.slug) def view_url(self): return url_for(self.obj) class ThreadView(BaseThreadView, views.ObjectView): methods = ['GET', 'HEAD'] Form = PostForm template = 'forum/thread.html' @property def template_kwargs(self): kw = super(ThreadView, self).template_kwargs kw['thread'] = self.obj kw['is_closed'] = self.obj.closed return kw thread_view = ThreadView.as_view('thread') default_view(forum, Post, None, kw_func=post_kw_view_func)(thread_view) default_view(forum, Thread, 'thread_id', kw_func=default_view_kw)(thread_view) route('/<int:thread_id>/')(thread_view) route('/<int:thread_id>/attachments')( ThreadView.as_view('thread_attachments', template='forum/thread_attachments.html') ) class ThreadCreate(BaseThreadView, views.ObjectCreate): POST_BUTTON = ButtonAction('form', 'create', btn_class='primary', title=_l(u'Post this message')) def init_object(self, args, kwargs): args, kwargs = super(ThreadCreate, self).init_object(args, kwargs) self.thread = self.obj return args, kwargs def before_populate_obj(self): del self.form['attachments'] self.message_body = self.form.message.data del self.form['message'] if 'send_by_email' in self.form: self.send_by_email = (self.can_send_by_mail() and self.form.send_by_email.data) del self.form['send_by_email'] def after_populate_obj(self): if self.thread.community is None: self.thread.community = g.community._model self.post = self.thread.create_post(body_html=self.message_body) obj_meta = self.post.meta.setdefault('abilian.sbe.forum', {}) obj_meta['origin'] = u'web' obj_meta['send_by_email'] = self.send_by_email session = sa.orm.object_session(self.thread) uploads = current_app.extensions['uploads'] for handle in request.form.getlist('attachments'): fileobj = uploads.get_file(current_user, handle) if fileobj is None: continue meta = uploads.get_metadata(current_user, handle) name = meta.get('filename', handle) mimetype = meta.get('mimetype', None) if not isinstance(name, unicode): name = unicode(name, encoding='utf-8', errors='ignore') if not name: continue attachment = PostAttachment(name=name) attachment.post = self.post with fileobj.open('rb') as f: attachment.set_content(f.read(), mimetype) session.add(attachment) def commit_success(self): if self.send_by_email: task = send_post_by_email.delay(self.post.id) meta = self.post.meta.setdefault('abilian.sbe.forum', {}) meta['send_post_by_email_task'] = task.id self.post.meta.changed() session = sa.orm.object_session(self.post) session.commit() @property def activity_target(self): return self.thread.community def get_form_buttons(self, *args, **kwargs): return [self.POST_BUTTON, views.object.CANCEL_BUTTON] route('/new_thread/')(ThreadCreate.as_view('new_thread', view_endpoint='.thread')) class ThreadPostCreate(ThreadCreate): """ Add a new post to a thread """ methods = ['POST'] Form = PostForm Model = Post def init_object(self, args, kwargs): # we DO want to skip ThreadCreate.init_object. hence super is not based on # ThreadPostCreate args, kwargs = super(ThreadCreate, self).init_object(args, kwargs) thread_id = kwargs.pop(self.pk, None) self.thread = Thread.query.get(thread_id) return args, kwargs def after_populate_obj(self): super(ThreadPostCreate, self).after_populate_obj() session = sa.orm.object_session(self.obj) session.expunge(self.obj) self.obj = self.post route('/<int:thread_id>/')(ThreadPostCreate.as_view('thread_post', view_endpoint='.thread')) class ThreadDelete(BaseThreadView, views.ObjectDelete): methods = ['POST'] _message_success = _(u'Thread "{title}" deleted.') def message_success(self): return unicode(self._message_success).format(title=self.obj.title) route('/<int:thread_id>/delete')(ThreadDelete.as_view('thread_delete')) class ThreadCloseView(BaseThreadView, views.object.BaseObjectView): """ Close / Re-open a thread """ methods = ['POST'] _VALID_ACTIONS = {u'close', u'reopen'} CLOSED_MSG = _l(u'The thread is now closed for edition and new ' u'contributions.') REOPENED_MSG = _l(u'The thread is now re-opened for edition and new ' u'contributions.') def prepare_args(self, args, kwargs): args, kwargs = super(ThreadCloseView, self).prepare_args(args, kwargs) action = kwargs['action'] = request.form.get('action') if action not in self._VALID_ACTIONS: raise BadRequest(u'Unknown action: {!r}'.format(action)) return args, kwargs def post(self, action=None): is_closed = (action == u'close') self.obj.closed = is_closed sa.orm.object_session(self.obj).commit() msg = self.CLOSED_MSG if is_closed else self.REOPENED_MSG flash(unicode(msg)) return self.redirect(url_for(self.obj)) route('/<int:thread_id>/close')(ThreadCloseView.as_view('thread_close')) class ThreadPostEdit(BaseThreadView, views.ObjectEdit): Form = PostEditForm Model = Post pk = 'object_id' def can_send_by_mail(self): # post edit: don't notify every time return False def init_object(self, args, kwargs): # we DO want to skip ThreadCreate.init_object. hence super is not based on # ThreadPostCreate args, kwargs = super(ThreadPostEdit, self).init_object(args, kwargs) thread_id = kwargs.pop('thread_id', None) self.thread = self.obj.thread assert thread_id == self.thread.id return args, kwargs def get_form_kwargs(self): kwargs = super(ThreadPostEdit, self).get_form_kwargs() kwargs['message'] = self.obj.body_html return kwargs def before_populate_obj(self): self.message_body = self.form.message.data del self.form['message'] self.reason = self.form.reason.data self.send_by_email = False if 'send_by_email' in self.form: del self.form['send_by_email'] self.attachments_to_remove = self.form['attachments'].delete_files_index del self.form['attachments'] def after_populate_obj(self): session = sa.orm.object_session(self.obj) uploads = current_app.extensions['uploads'] self.obj.body_html = self.message_body obj_meta = self.obj.meta.setdefault('abilian.sbe.forum', {}) history = obj_meta.setdefault('history', []) history.append(dict(user_id=current_user.id, user=unicode(current_user), date=utc_dt(datetime.utcnow()).isoformat(), reason=self.form.reason.data,)) self.obj.meta['abilian.sbe.forum'] = obj_meta # trigger change for SA attachments_to_remove = [] for idx in self.attachments_to_remove: try: idx = int(idx) except ValueError: continue if idx > len(self.obj.attachments): continue attachments_to_remove.append(self.obj.attachments[idx]) for att in attachments_to_remove: session.delete(att) for handle in request.form.getlist('attachments'): fileobj = uploads.get_file(current_user, handle) if fileobj is None: continue meta = uploads.get_metadata(current_user, handle) name = meta.get('filename', handle) mimetype = meta.get('mimetype', None) if not isinstance(name, unicode): name = unicode(name, encoding='utf-8', errors='ignore') if not name: continue attachment = PostAttachment(name=name, post=self.obj) with fileobj.open('rb') as f: attachment.set_content(f.read(), mimetype) session.add(attachment) route('/<int:thread_id>/<int:object_id>/edit')( ThreadPostEdit.as_view('post_edit') ) def attachment_kw_view_func(kw, obj, obj_type, obj_id, **kwargs): post = obj.post kw = default_view_kw(kw, post.thread, obj_type, obj_id, **kwargs) kw['thread_id'] = post.thread_id kw['post_id'] = post.id return kw @route('/<int:thread_id>/posts/<int:post_id>/attachment/<int:attachment_id>') @default_view(forum, PostAttachment, 'attachment_id', kw_func=attachment_kw_view_func) def attachment_download(thread_id, post_id, attachment_id): thread = Thread.query.get(thread_id) post = Post.query.get(post_id) attachment = PostAttachment.query.get(attachment_id) if (not (thread and post and attachment) or post.thread is not thread or attachment.post is not post): raise NotFound() response = make_response(attachment.content) response.headers['content-length'] = attachment.content_length response.headers['content-type'] = attachment.content_type content_disposition = ( 'attachment;filename="{}"'.format(quote(attachment.name.encode('utf8'))) ) response.headers['content-disposition'] = content_disposition return response
month = format_date(date(year, month, 1), "MMMM").capitalize() return u"%s %s" % (month, year)
identifier_body
gpu_controller.go
/* Copyright 2023. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package controllers import ( "context" "encoding/json" "strconv" "github.com/go-logr/logr" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metaV1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/selection" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/kubernetes" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/source" ) type GpuReconciler struct { client.Client Scheme *runtime.Scheme Logger logr.Logger } const ( GPU = "gpu" GPUInfo = "node-gpu-info" GPUInfoNameSpace = "node-system" NvidiaGPUProduct = "nvidia.com/gpu.product" NvidiaGPUMemory = "nvidia.com/gpu.memory" NvidiaGPU corev1.ResourceName = "nvidia.com/gpu" GPUProduct = "gpu.product" GPUCount = "gpu.count" GPUMemory = "gpu.memory" NodeIndexKey = "node" PodIndexKey = "pod" ) //+kubebuilder:rbac:groups=node.k8s.io,resources=gpus,verbs=get;list;watch;create;update;patch;delete //+kubebuilder:rbac:groups=node.k8s.io,resources=gpus/status,verbs=get;update;patch //+kubebuilder:rbac:groups=node.k8s.io,resources=gpus/finalizers,verbs=update //+kubebuilder:rbac:groups="",resources=nodes,verbs=get;list;watch;create;update;patch;delete //+kubebuilder:rbac:groups="",resources=nodes/status,verbs=get;list;watch;create;update;patch;delete //+kubebuilder:rbac:groups="",resources=configmaps,verbs=get;list;watch;create;update;patch;delete //+kubebuilder:rbac:groups="",resources=pods,verbs=get;list;watch;create;update;patch;delete func (r *GpuReconciler) Reconcile(ctx context.Context, _ ctrl.Request) (ctrl.Result, error) { nodeList := &corev1.NodeList{} if err := r.List(ctx, nodeList, client.MatchingFields{NodeIndexKey: GPU}); err != nil { r.Logger.Error(err, "failed to get node list") return ctrl.Result{}, err } podList := &corev1.PodList{} if err := r.List(ctx, podList, client.MatchingFields{PodIndexKey: GPU}); err != nil { r.Logger.Error(err, "failed to get pod list") return ctrl.Result{}, err } return r.applyGPUInfoCM(ctx, nodeList, podList, nil) } func (r *GpuReconciler) applyGPUInfoCM(ctx context.Context, nodeList *corev1.NodeList, podList *corev1.PodList, clientSet *kubernetes.Clientset) (ctrl.Result, error) { /* "nodeMap": { "sealos-poc-gpu-master-0":{}, "sealos-poc-gpu-node-1":{"gpu.count":"1","gpu.memory":"15360","gpu.product":"Tesla-T4"}} } */ nodeMap := make(map[string]map[string]string) var nodeName string // get the GPU product, GPU memory, GPU allocatable number on the node for _, node := range nodeList.Items { nodeName = node.Name if _, ok := nodeMap[nodeName]; !ok { nodeMap[nodeName] = make(map[string]string) } gpuProduct, ok1 := node.Labels[NvidiaGPUProduct] gpuMemory, ok2 := node.Labels[NvidiaGPUMemory] gpuCount, ok3 := node.Status.Allocatable[NvidiaGPU] if !ok1 || !ok2 || !ok3 { continue } nodeMap[nodeName][GPUProduct] = gpuProduct nodeMap[nodeName][GPUMemory] = gpuMemory nodeMap[nodeName][GPUCount] = gpuCount.String() } // get the number of GPU used by pods that are using GPU for _, pod := range podList.Items { phase := pod.Status.Phase if phase == corev1.PodSucceeded { continue } nodeName = pod.Spec.NodeName _, ok1 := nodeMap[nodeName] gpuProduct, ok2 := pod.Spec.NodeSelector[NvidiaGPUProduct] if !ok1 || !ok2 { continue } containers := pod.Spec.Containers for _, container := range containers { gpuCount, ok := container.Resources.Limits[NvidiaGPU] if !ok { continue } r.Logger.V(1).Info("pod using GPU", "name", pod.Name, "namespace", pod.Namespace, "gpuCount", gpuCount, "gpuProduct", gpuProduct) oldCount, err := strconv.ParseInt(nodeMap[nodeName][GPUCount], 10, 64) if err != nil { r.Logger.Error(err, "failed to parse gpu.count string to int64") return ctrl.Result{}, err } newCount := oldCount - gpuCount.Value() nodeMap[nodeName][GPUCount] = strconv.FormatInt(newCount, 10) } } // marshal node map to JSON string nodeMapBytes, err := json.Marshal(nodeMap) if err != nil { r.Logger.Error(err, "failed to marshal node map to JSON string") return ctrl.Result{}, err } nodeMapStr := string(nodeMapBytes) // create or update gpu-info configmap configmap := &corev1.ConfigMap{} if clientSet != nil { configmap, err = clientSet.CoreV1().ConfigMaps(GPUInfoNameSpace).Get(ctx, GPUInfo, metaV1.GetOptions{}) } else { err = r.Get(ctx, types.NamespacedName{Name: GPUInfo, Namespace: GPUInfoNameSpace}, configmap) } if errors.IsNotFound(err) { configmap = &corev1.ConfigMap{ ObjectMeta: metaV1.ObjectMeta{ Name: GPUInfo, Namespace: GPUInfoNameSpace, }, Data: map[string]string{ GPU: nodeMapStr, }, } if err := r.Create(ctx, configmap); err != nil { r.Logger.Error(err, "failed to create gpu-info configmap") return ctrl.Result{}, err } } else if err != nil { r.Logger.Error(err, "failed to get gpu-info configmap") return ctrl.Result{}, err } if configmap.Data == nil { configmap.Data = map[string]string{} } if configmap.Data[GPU] != nodeMapStr { configmap.Data[GPU] = nodeMapStr if err := r.Update(ctx, configmap); err != nil && !errors.IsConflict(err) { r.Logger.Error(err, "failed to update gpu-info configmap") return ctrl.Result{}, err } } r.Logger.V(1).Info("gpu-info configmap status", "gpu", configmap.Data[GPU]) return ctrl.Result{}, nil } func (r *GpuReconciler) initGPUInfoCM(ctx context.Context, clientSet *kubernetes.Clientset) error { // filter for nodes that have GPU req1, _ := labels.NewRequirement(NvidiaGPUProduct, selection.Exists, []string{}) req2, _ := labels.NewRequirement(NvidiaGPUMemory, selection.Exists, []string{}) selector := labels.NewSelector().Add(*req1, *req2) listOpts := metaV1.ListOptions{ LabelSelector: selector.String(), } nodeList, err := clientSet.CoreV1().Nodes().List(ctx, listOpts) if err != nil { return err } podList := &corev1.PodList{} for _, item := range nodeList.Items { list, err := clientSet.CoreV1().Pods("").List(context.TODO(), metaV1.ListOptions{ FieldSelector: fields.OneTermEqualSelector("spec.nodeName", item.Name).String(), }) if err != nil { return err } podList.Items = append(podList.Items, list.Items...) } _, err = r.applyGPUInfoCM(ctx, nodeList, podList, clientSet) return err } // SetupWithManager sets up the controller with the Manager. func (r *GpuReconciler) SetupWithManager(mgr ctrl.Manager) error { r.Logger = ctrl.Log.WithName("gpu-controller") r.Logger.V(1).Info("starting gpu controller") // use clientSet to get resources from the API Server, not from Informer's cache clientSet, err := kubernetes.NewForConfig(mgr.GetConfig()) if err != nil { r.Logger.Error(err, "failed to init") return nil } // init node-gpu-info configmap r.Logger.V(1).Info("initializing node-gpu-info configmap") if err := r.initGPUInfoCM(context.Background(), clientSet); err != nil { return err } // build index for node which have GPU if err := mgr.GetFieldIndexer().IndexField(context.Background(), &corev1.Node{}, NodeIndexKey, func(rawObj client.Object) []string { node := rawObj.(*corev1.Node) if _, ok := node.Labels[NvidiaGPUProduct]; !ok { return nil } return []string{GPU} }); err != nil { return err } // build index for pod which use GPU if err := mgr.GetFieldIndexer().IndexField(context.Background(), &corev1.Pod{}, PodIndexKey, func(rawObj client.Object) []string { pod := rawObj.(*corev1.Pod) if _, ok := pod.Spec.NodeSelector[NvidiaGPUProduct]; !ok { return nil } if pod.Status.Phase == corev1.PodSucceeded { return nil } return []string{GPU} }); err != nil { return err } return ctrl.NewControllerManagedBy(mgr). For(&corev1.Pod{}, builder.WithPredicates(predicate.Funcs{ CreateFunc: func(event event.CreateEvent) bool { return useGPU(event.Object) }, UpdateFunc: func(event event.UpdateEvent) bool { _, ok := event.ObjectNew.(*corev1.Pod).Spec.NodeSelector[NvidiaGPUProduct] if !ok { return false } phaseOld := event.ObjectOld.(*corev1.Pod).Status.Phase phaseNew := event.ObjectNew.(*corev1.Pod).Status.Phase return phaseOld != phaseNew }, DeleteFunc: func(event event.DeleteEvent) bool { return useGPU(event.Object) }, })). Watches(&source.Kind{Type: &corev1.Node{}}, &handler.EnqueueRequestForObject{}, builder.WithPredicates(predicate.Funcs{ CreateFunc: func(event event.CreateEvent) bool { return hasGPU(event.Object) },
oldVal, oldOk := event.ObjectOld.(*corev1.Node).Status.Allocatable[NvidiaGPU] newVal, newOk := event.ObjectNew.(*corev1.Node).Status.Allocatable[NvidiaGPU] return oldOk && newOk && oldVal != newVal }, DeleteFunc: func(event event.DeleteEvent) bool { return hasGPU(event.Object) }, })). Complete(r) } func useGPU(obj client.Object) bool { _, ok := obj.(*corev1.Pod).Spec.NodeSelector[NvidiaGPUProduct] return ok } func hasGPU(obj client.Object) bool { _, ok1 := obj.(*corev1.Node).Labels[NvidiaGPUMemory] _, ok2 := obj.(*corev1.Node).Labels[NvidiaGPUProduct] _, ok3 := obj.(*corev1.Node).Status.Allocatable[NvidiaGPU] return ok1 && ok2 && ok3 }
UpdateFunc: func(event event.UpdateEvent) bool {
random_line_split
gpu_controller.go
/* Copyright 2023. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package controllers import ( "context" "encoding/json" "strconv" "github.com/go-logr/logr" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metaV1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/selection" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/kubernetes" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/source" ) type GpuReconciler struct { client.Client Scheme *runtime.Scheme Logger logr.Logger } const ( GPU = "gpu" GPUInfo = "node-gpu-info" GPUInfoNameSpace = "node-system" NvidiaGPUProduct = "nvidia.com/gpu.product" NvidiaGPUMemory = "nvidia.com/gpu.memory" NvidiaGPU corev1.ResourceName = "nvidia.com/gpu" GPUProduct = "gpu.product" GPUCount = "gpu.count" GPUMemory = "gpu.memory" NodeIndexKey = "node" PodIndexKey = "pod" ) //+kubebuilder:rbac:groups=node.k8s.io,resources=gpus,verbs=get;list;watch;create;update;patch;delete //+kubebuilder:rbac:groups=node.k8s.io,resources=gpus/status,verbs=get;update;patch //+kubebuilder:rbac:groups=node.k8s.io,resources=gpus/finalizers,verbs=update //+kubebuilder:rbac:groups="",resources=nodes,verbs=get;list;watch;create;update;patch;delete //+kubebuilder:rbac:groups="",resources=nodes/status,verbs=get;list;watch;create;update;patch;delete //+kubebuilder:rbac:groups="",resources=configmaps,verbs=get;list;watch;create;update;patch;delete //+kubebuilder:rbac:groups="",resources=pods,verbs=get;list;watch;create;update;patch;delete func (r *GpuReconciler) Reconcile(ctx context.Context, _ ctrl.Request) (ctrl.Result, error) { nodeList := &corev1.NodeList{} if err := r.List(ctx, nodeList, client.MatchingFields{NodeIndexKey: GPU}); err != nil { r.Logger.Error(err, "failed to get node list") return ctrl.Result{}, err } podList := &corev1.PodList{} if err := r.List(ctx, podList, client.MatchingFields{PodIndexKey: GPU}); err != nil { r.Logger.Error(err, "failed to get pod list") return ctrl.Result{}, err } return r.applyGPUInfoCM(ctx, nodeList, podList, nil) } func (r *GpuReconciler) applyGPUInfoCM(ctx context.Context, nodeList *corev1.NodeList, podList *corev1.PodList, clientSet *kubernetes.Clientset) (ctrl.Result, error) { /* "nodeMap": { "sealos-poc-gpu-master-0":{}, "sealos-poc-gpu-node-1":{"gpu.count":"1","gpu.memory":"15360","gpu.product":"Tesla-T4"}} } */ nodeMap := make(map[string]map[string]string) var nodeName string // get the GPU product, GPU memory, GPU allocatable number on the node for _, node := range nodeList.Items { nodeName = node.Name if _, ok := nodeMap[nodeName]; !ok { nodeMap[nodeName] = make(map[string]string) } gpuProduct, ok1 := node.Labels[NvidiaGPUProduct] gpuMemory, ok2 := node.Labels[NvidiaGPUMemory] gpuCount, ok3 := node.Status.Allocatable[NvidiaGPU] if !ok1 || !ok2 || !ok3 { continue } nodeMap[nodeName][GPUProduct] = gpuProduct nodeMap[nodeName][GPUMemory] = gpuMemory nodeMap[nodeName][GPUCount] = gpuCount.String() } // get the number of GPU used by pods that are using GPU for _, pod := range podList.Items { phase := pod.Status.Phase if phase == corev1.PodSucceeded { continue } nodeName = pod.Spec.NodeName _, ok1 := nodeMap[nodeName] gpuProduct, ok2 := pod.Spec.NodeSelector[NvidiaGPUProduct] if !ok1 || !ok2 { continue } containers := pod.Spec.Containers for _, container := range containers { gpuCount, ok := container.Resources.Limits[NvidiaGPU] if !ok { continue } r.Logger.V(1).Info("pod using GPU", "name", pod.Name, "namespace", pod.Namespace, "gpuCount", gpuCount, "gpuProduct", gpuProduct) oldCount, err := strconv.ParseInt(nodeMap[nodeName][GPUCount], 10, 64) if err != nil { r.Logger.Error(err, "failed to parse gpu.count string to int64") return ctrl.Result{}, err } newCount := oldCount - gpuCount.Value() nodeMap[nodeName][GPUCount] = strconv.FormatInt(newCount, 10) } } // marshal node map to JSON string nodeMapBytes, err := json.Marshal(nodeMap) if err != nil { r.Logger.Error(err, "failed to marshal node map to JSON string") return ctrl.Result{}, err } nodeMapStr := string(nodeMapBytes) // create or update gpu-info configmap configmap := &corev1.ConfigMap{} if clientSet != nil { configmap, err = clientSet.CoreV1().ConfigMaps(GPUInfoNameSpace).Get(ctx, GPUInfo, metaV1.GetOptions{}) } else { err = r.Get(ctx, types.NamespacedName{Name: GPUInfo, Namespace: GPUInfoNameSpace}, configmap) } if errors.IsNotFound(err) { configmap = &corev1.ConfigMap{ ObjectMeta: metaV1.ObjectMeta{ Name: GPUInfo, Namespace: GPUInfoNameSpace, }, Data: map[string]string{ GPU: nodeMapStr, }, } if err := r.Create(ctx, configmap); err != nil { r.Logger.Error(err, "failed to create gpu-info configmap") return ctrl.Result{}, err } } else if err != nil { r.Logger.Error(err, "failed to get gpu-info configmap") return ctrl.Result{}, err } if configmap.Data == nil { configmap.Data = map[string]string{} } if configmap.Data[GPU] != nodeMapStr
r.Logger.V(1).Info("gpu-info configmap status", "gpu", configmap.Data[GPU]) return ctrl.Result{}, nil } func (r *GpuReconciler) initGPUInfoCM(ctx context.Context, clientSet *kubernetes.Clientset) error { // filter for nodes that have GPU req1, _ := labels.NewRequirement(NvidiaGPUProduct, selection.Exists, []string{}) req2, _ := labels.NewRequirement(NvidiaGPUMemory, selection.Exists, []string{}) selector := labels.NewSelector().Add(*req1, *req2) listOpts := metaV1.ListOptions{ LabelSelector: selector.String(), } nodeList, err := clientSet.CoreV1().Nodes().List(ctx, listOpts) if err != nil { return err } podList := &corev1.PodList{} for _, item := range nodeList.Items { list, err := clientSet.CoreV1().Pods("").List(context.TODO(), metaV1.ListOptions{ FieldSelector: fields.OneTermEqualSelector("spec.nodeName", item.Name).String(), }) if err != nil { return err } podList.Items = append(podList.Items, list.Items...) } _, err = r.applyGPUInfoCM(ctx, nodeList, podList, clientSet) return err } // SetupWithManager sets up the controller with the Manager. func (r *GpuReconciler) SetupWithManager(mgr ctrl.Manager) error { r.Logger = ctrl.Log.WithName("gpu-controller") r.Logger.V(1).Info("starting gpu controller") // use clientSet to get resources from the API Server, not from Informer's cache clientSet, err := kubernetes.NewForConfig(mgr.GetConfig()) if err != nil { r.Logger.Error(err, "failed to init") return nil } // init node-gpu-info configmap r.Logger.V(1).Info("initializing node-gpu-info configmap") if err := r.initGPUInfoCM(context.Background(), clientSet); err != nil { return err } // build index for node which have GPU if err := mgr.GetFieldIndexer().IndexField(context.Background(), &corev1.Node{}, NodeIndexKey, func(rawObj client.Object) []string { node := rawObj.(*corev1.Node) if _, ok := node.Labels[NvidiaGPUProduct]; !ok { return nil } return []string{GPU} }); err != nil { return err } // build index for pod which use GPU if err := mgr.GetFieldIndexer().IndexField(context.Background(), &corev1.Pod{}, PodIndexKey, func(rawObj client.Object) []string { pod := rawObj.(*corev1.Pod) if _, ok := pod.Spec.NodeSelector[NvidiaGPUProduct]; !ok { return nil } if pod.Status.Phase == corev1.PodSucceeded { return nil } return []string{GPU} }); err != nil { return err } return ctrl.NewControllerManagedBy(mgr). For(&corev1.Pod{}, builder.WithPredicates(predicate.Funcs{ CreateFunc: func(event event.CreateEvent) bool { return useGPU(event.Object) }, UpdateFunc: func(event event.UpdateEvent) bool { _, ok := event.ObjectNew.(*corev1.Pod).Spec.NodeSelector[NvidiaGPUProduct] if !ok { return false } phaseOld := event.ObjectOld.(*corev1.Pod).Status.Phase phaseNew := event.ObjectNew.(*corev1.Pod).Status.Phase return phaseOld != phaseNew }, DeleteFunc: func(event event.DeleteEvent) bool { return useGPU(event.Object) }, })). Watches(&source.Kind{Type: &corev1.Node{}}, &handler.EnqueueRequestForObject{}, builder.WithPredicates(predicate.Funcs{ CreateFunc: func(event event.CreateEvent) bool { return hasGPU(event.Object) }, UpdateFunc: func(event event.UpdateEvent) bool { oldVal, oldOk := event.ObjectOld.(*corev1.Node).Status.Allocatable[NvidiaGPU] newVal, newOk := event.ObjectNew.(*corev1.Node).Status.Allocatable[NvidiaGPU] return oldOk && newOk && oldVal != newVal }, DeleteFunc: func(event event.DeleteEvent) bool { return hasGPU(event.Object) }, })). Complete(r) } func useGPU(obj client.Object) bool { _, ok := obj.(*corev1.Pod).Spec.NodeSelector[NvidiaGPUProduct] return ok } func hasGPU(obj client.Object) bool { _, ok1 := obj.(*corev1.Node).Labels[NvidiaGPUMemory] _, ok2 := obj.(*corev1.Node).Labels[NvidiaGPUProduct] _, ok3 := obj.(*corev1.Node).Status.Allocatable[NvidiaGPU] return ok1 && ok2 && ok3 }
{ configmap.Data[GPU] = nodeMapStr if err := r.Update(ctx, configmap); err != nil && !errors.IsConflict(err) { r.Logger.Error(err, "failed to update gpu-info configmap") return ctrl.Result{}, err } }
conditional_block
gpu_controller.go
/* Copyright 2023. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package controllers import ( "context" "encoding/json" "strconv" "github.com/go-logr/logr" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metaV1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/selection" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/kubernetes" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/source" ) type GpuReconciler struct { client.Client Scheme *runtime.Scheme Logger logr.Logger } const ( GPU = "gpu" GPUInfo = "node-gpu-info" GPUInfoNameSpace = "node-system" NvidiaGPUProduct = "nvidia.com/gpu.product" NvidiaGPUMemory = "nvidia.com/gpu.memory" NvidiaGPU corev1.ResourceName = "nvidia.com/gpu" GPUProduct = "gpu.product" GPUCount = "gpu.count" GPUMemory = "gpu.memory" NodeIndexKey = "node" PodIndexKey = "pod" ) //+kubebuilder:rbac:groups=node.k8s.io,resources=gpus,verbs=get;list;watch;create;update;patch;delete //+kubebuilder:rbac:groups=node.k8s.io,resources=gpus/status,verbs=get;update;patch //+kubebuilder:rbac:groups=node.k8s.io,resources=gpus/finalizers,verbs=update //+kubebuilder:rbac:groups="",resources=nodes,verbs=get;list;watch;create;update;patch;delete //+kubebuilder:rbac:groups="",resources=nodes/status,verbs=get;list;watch;create;update;patch;delete //+kubebuilder:rbac:groups="",resources=configmaps,verbs=get;list;watch;create;update;patch;delete //+kubebuilder:rbac:groups="",resources=pods,verbs=get;list;watch;create;update;patch;delete func (r *GpuReconciler) Reconcile(ctx context.Context, _ ctrl.Request) (ctrl.Result, error) { nodeList := &corev1.NodeList{} if err := r.List(ctx, nodeList, client.MatchingFields{NodeIndexKey: GPU}); err != nil { r.Logger.Error(err, "failed to get node list") return ctrl.Result{}, err } podList := &corev1.PodList{} if err := r.List(ctx, podList, client.MatchingFields{PodIndexKey: GPU}); err != nil { r.Logger.Error(err, "failed to get pod list") return ctrl.Result{}, err } return r.applyGPUInfoCM(ctx, nodeList, podList, nil) } func (r *GpuReconciler) applyGPUInfoCM(ctx context.Context, nodeList *corev1.NodeList, podList *corev1.PodList, clientSet *kubernetes.Clientset) (ctrl.Result, error)
func (r *GpuReconciler) initGPUInfoCM(ctx context.Context, clientSet *kubernetes.Clientset) error { // filter for nodes that have GPU req1, _ := labels.NewRequirement(NvidiaGPUProduct, selection.Exists, []string{}) req2, _ := labels.NewRequirement(NvidiaGPUMemory, selection.Exists, []string{}) selector := labels.NewSelector().Add(*req1, *req2) listOpts := metaV1.ListOptions{ LabelSelector: selector.String(), } nodeList, err := clientSet.CoreV1().Nodes().List(ctx, listOpts) if err != nil { return err } podList := &corev1.PodList{} for _, item := range nodeList.Items { list, err := clientSet.CoreV1().Pods("").List(context.TODO(), metaV1.ListOptions{ FieldSelector: fields.OneTermEqualSelector("spec.nodeName", item.Name).String(), }) if err != nil { return err } podList.Items = append(podList.Items, list.Items...) } _, err = r.applyGPUInfoCM(ctx, nodeList, podList, clientSet) return err } // SetupWithManager sets up the controller with the Manager. func (r *GpuReconciler) SetupWithManager(mgr ctrl.Manager) error { r.Logger = ctrl.Log.WithName("gpu-controller") r.Logger.V(1).Info("starting gpu controller") // use clientSet to get resources from the API Server, not from Informer's cache clientSet, err := kubernetes.NewForConfig(mgr.GetConfig()) if err != nil { r.Logger.Error(err, "failed to init") return nil } // init node-gpu-info configmap r.Logger.V(1).Info("initializing node-gpu-info configmap") if err := r.initGPUInfoCM(context.Background(), clientSet); err != nil { return err } // build index for node which have GPU if err := mgr.GetFieldIndexer().IndexField(context.Background(), &corev1.Node{}, NodeIndexKey, func(rawObj client.Object) []string { node := rawObj.(*corev1.Node) if _, ok := node.Labels[NvidiaGPUProduct]; !ok { return nil } return []string{GPU} }); err != nil { return err } // build index for pod which use GPU if err := mgr.GetFieldIndexer().IndexField(context.Background(), &corev1.Pod{}, PodIndexKey, func(rawObj client.Object) []string { pod := rawObj.(*corev1.Pod) if _, ok := pod.Spec.NodeSelector[NvidiaGPUProduct]; !ok { return nil } if pod.Status.Phase == corev1.PodSucceeded { return nil } return []string{GPU} }); err != nil { return err } return ctrl.NewControllerManagedBy(mgr). For(&corev1.Pod{}, builder.WithPredicates(predicate.Funcs{ CreateFunc: func(event event.CreateEvent) bool { return useGPU(event.Object) }, UpdateFunc: func(event event.UpdateEvent) bool { _, ok := event.ObjectNew.(*corev1.Pod).Spec.NodeSelector[NvidiaGPUProduct] if !ok { return false } phaseOld := event.ObjectOld.(*corev1.Pod).Status.Phase phaseNew := event.ObjectNew.(*corev1.Pod).Status.Phase return phaseOld != phaseNew }, DeleteFunc: func(event event.DeleteEvent) bool { return useGPU(event.Object) }, })). Watches(&source.Kind{Type: &corev1.Node{}}, &handler.EnqueueRequestForObject{}, builder.WithPredicates(predicate.Funcs{ CreateFunc: func(event event.CreateEvent) bool { return hasGPU(event.Object) }, UpdateFunc: func(event event.UpdateEvent) bool { oldVal, oldOk := event.ObjectOld.(*corev1.Node).Status.Allocatable[NvidiaGPU] newVal, newOk := event.ObjectNew.(*corev1.Node).Status.Allocatable[NvidiaGPU] return oldOk && newOk && oldVal != newVal }, DeleteFunc: func(event event.DeleteEvent) bool { return hasGPU(event.Object) }, })). Complete(r) } func useGPU(obj client.Object) bool { _, ok := obj.(*corev1.Pod).Spec.NodeSelector[NvidiaGPUProduct] return ok } func hasGPU(obj client.Object) bool { _, ok1 := obj.(*corev1.Node).Labels[NvidiaGPUMemory] _, ok2 := obj.(*corev1.Node).Labels[NvidiaGPUProduct] _, ok3 := obj.(*corev1.Node).Status.Allocatable[NvidiaGPU] return ok1 && ok2 && ok3 }
{ /* "nodeMap": { "sealos-poc-gpu-master-0":{}, "sealos-poc-gpu-node-1":{"gpu.count":"1","gpu.memory":"15360","gpu.product":"Tesla-T4"}} } */ nodeMap := make(map[string]map[string]string) var nodeName string // get the GPU product, GPU memory, GPU allocatable number on the node for _, node := range nodeList.Items { nodeName = node.Name if _, ok := nodeMap[nodeName]; !ok { nodeMap[nodeName] = make(map[string]string) } gpuProduct, ok1 := node.Labels[NvidiaGPUProduct] gpuMemory, ok2 := node.Labels[NvidiaGPUMemory] gpuCount, ok3 := node.Status.Allocatable[NvidiaGPU] if !ok1 || !ok2 || !ok3 { continue } nodeMap[nodeName][GPUProduct] = gpuProduct nodeMap[nodeName][GPUMemory] = gpuMemory nodeMap[nodeName][GPUCount] = gpuCount.String() } // get the number of GPU used by pods that are using GPU for _, pod := range podList.Items { phase := pod.Status.Phase if phase == corev1.PodSucceeded { continue } nodeName = pod.Spec.NodeName _, ok1 := nodeMap[nodeName] gpuProduct, ok2 := pod.Spec.NodeSelector[NvidiaGPUProduct] if !ok1 || !ok2 { continue } containers := pod.Spec.Containers for _, container := range containers { gpuCount, ok := container.Resources.Limits[NvidiaGPU] if !ok { continue } r.Logger.V(1).Info("pod using GPU", "name", pod.Name, "namespace", pod.Namespace, "gpuCount", gpuCount, "gpuProduct", gpuProduct) oldCount, err := strconv.ParseInt(nodeMap[nodeName][GPUCount], 10, 64) if err != nil { r.Logger.Error(err, "failed to parse gpu.count string to int64") return ctrl.Result{}, err } newCount := oldCount - gpuCount.Value() nodeMap[nodeName][GPUCount] = strconv.FormatInt(newCount, 10) } } // marshal node map to JSON string nodeMapBytes, err := json.Marshal(nodeMap) if err != nil { r.Logger.Error(err, "failed to marshal node map to JSON string") return ctrl.Result{}, err } nodeMapStr := string(nodeMapBytes) // create or update gpu-info configmap configmap := &corev1.ConfigMap{} if clientSet != nil { configmap, err = clientSet.CoreV1().ConfigMaps(GPUInfoNameSpace).Get(ctx, GPUInfo, metaV1.GetOptions{}) } else { err = r.Get(ctx, types.NamespacedName{Name: GPUInfo, Namespace: GPUInfoNameSpace}, configmap) } if errors.IsNotFound(err) { configmap = &corev1.ConfigMap{ ObjectMeta: metaV1.ObjectMeta{ Name: GPUInfo, Namespace: GPUInfoNameSpace, }, Data: map[string]string{ GPU: nodeMapStr, }, } if err := r.Create(ctx, configmap); err != nil { r.Logger.Error(err, "failed to create gpu-info configmap") return ctrl.Result{}, err } } else if err != nil { r.Logger.Error(err, "failed to get gpu-info configmap") return ctrl.Result{}, err } if configmap.Data == nil { configmap.Data = map[string]string{} } if configmap.Data[GPU] != nodeMapStr { configmap.Data[GPU] = nodeMapStr if err := r.Update(ctx, configmap); err != nil && !errors.IsConflict(err) { r.Logger.Error(err, "failed to update gpu-info configmap") return ctrl.Result{}, err } } r.Logger.V(1).Info("gpu-info configmap status", "gpu", configmap.Data[GPU]) return ctrl.Result{}, nil }
identifier_body
gpu_controller.go
/* Copyright 2023. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package controllers import ( "context" "encoding/json" "strconv" "github.com/go-logr/logr" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metaV1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/selection" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/kubernetes" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/source" ) type GpuReconciler struct { client.Client Scheme *runtime.Scheme Logger logr.Logger } const ( GPU = "gpu" GPUInfo = "node-gpu-info" GPUInfoNameSpace = "node-system" NvidiaGPUProduct = "nvidia.com/gpu.product" NvidiaGPUMemory = "nvidia.com/gpu.memory" NvidiaGPU corev1.ResourceName = "nvidia.com/gpu" GPUProduct = "gpu.product" GPUCount = "gpu.count" GPUMemory = "gpu.memory" NodeIndexKey = "node" PodIndexKey = "pod" ) //+kubebuilder:rbac:groups=node.k8s.io,resources=gpus,verbs=get;list;watch;create;update;patch;delete //+kubebuilder:rbac:groups=node.k8s.io,resources=gpus/status,verbs=get;update;patch //+kubebuilder:rbac:groups=node.k8s.io,resources=gpus/finalizers,verbs=update //+kubebuilder:rbac:groups="",resources=nodes,verbs=get;list;watch;create;update;patch;delete //+kubebuilder:rbac:groups="",resources=nodes/status,verbs=get;list;watch;create;update;patch;delete //+kubebuilder:rbac:groups="",resources=configmaps,verbs=get;list;watch;create;update;patch;delete //+kubebuilder:rbac:groups="",resources=pods,verbs=get;list;watch;create;update;patch;delete func (r *GpuReconciler) Reconcile(ctx context.Context, _ ctrl.Request) (ctrl.Result, error) { nodeList := &corev1.NodeList{} if err := r.List(ctx, nodeList, client.MatchingFields{NodeIndexKey: GPU}); err != nil { r.Logger.Error(err, "failed to get node list") return ctrl.Result{}, err } podList := &corev1.PodList{} if err := r.List(ctx, podList, client.MatchingFields{PodIndexKey: GPU}); err != nil { r.Logger.Error(err, "failed to get pod list") return ctrl.Result{}, err } return r.applyGPUInfoCM(ctx, nodeList, podList, nil) } func (r *GpuReconciler)
(ctx context.Context, nodeList *corev1.NodeList, podList *corev1.PodList, clientSet *kubernetes.Clientset) (ctrl.Result, error) { /* "nodeMap": { "sealos-poc-gpu-master-0":{}, "sealos-poc-gpu-node-1":{"gpu.count":"1","gpu.memory":"15360","gpu.product":"Tesla-T4"}} } */ nodeMap := make(map[string]map[string]string) var nodeName string // get the GPU product, GPU memory, GPU allocatable number on the node for _, node := range nodeList.Items { nodeName = node.Name if _, ok := nodeMap[nodeName]; !ok { nodeMap[nodeName] = make(map[string]string) } gpuProduct, ok1 := node.Labels[NvidiaGPUProduct] gpuMemory, ok2 := node.Labels[NvidiaGPUMemory] gpuCount, ok3 := node.Status.Allocatable[NvidiaGPU] if !ok1 || !ok2 || !ok3 { continue } nodeMap[nodeName][GPUProduct] = gpuProduct nodeMap[nodeName][GPUMemory] = gpuMemory nodeMap[nodeName][GPUCount] = gpuCount.String() } // get the number of GPU used by pods that are using GPU for _, pod := range podList.Items { phase := pod.Status.Phase if phase == corev1.PodSucceeded { continue } nodeName = pod.Spec.NodeName _, ok1 := nodeMap[nodeName] gpuProduct, ok2 := pod.Spec.NodeSelector[NvidiaGPUProduct] if !ok1 || !ok2 { continue } containers := pod.Spec.Containers for _, container := range containers { gpuCount, ok := container.Resources.Limits[NvidiaGPU] if !ok { continue } r.Logger.V(1).Info("pod using GPU", "name", pod.Name, "namespace", pod.Namespace, "gpuCount", gpuCount, "gpuProduct", gpuProduct) oldCount, err := strconv.ParseInt(nodeMap[nodeName][GPUCount], 10, 64) if err != nil { r.Logger.Error(err, "failed to parse gpu.count string to int64") return ctrl.Result{}, err } newCount := oldCount - gpuCount.Value() nodeMap[nodeName][GPUCount] = strconv.FormatInt(newCount, 10) } } // marshal node map to JSON string nodeMapBytes, err := json.Marshal(nodeMap) if err != nil { r.Logger.Error(err, "failed to marshal node map to JSON string") return ctrl.Result{}, err } nodeMapStr := string(nodeMapBytes) // create or update gpu-info configmap configmap := &corev1.ConfigMap{} if clientSet != nil { configmap, err = clientSet.CoreV1().ConfigMaps(GPUInfoNameSpace).Get(ctx, GPUInfo, metaV1.GetOptions{}) } else { err = r.Get(ctx, types.NamespacedName{Name: GPUInfo, Namespace: GPUInfoNameSpace}, configmap) } if errors.IsNotFound(err) { configmap = &corev1.ConfigMap{ ObjectMeta: metaV1.ObjectMeta{ Name: GPUInfo, Namespace: GPUInfoNameSpace, }, Data: map[string]string{ GPU: nodeMapStr, }, } if err := r.Create(ctx, configmap); err != nil { r.Logger.Error(err, "failed to create gpu-info configmap") return ctrl.Result{}, err } } else if err != nil { r.Logger.Error(err, "failed to get gpu-info configmap") return ctrl.Result{}, err } if configmap.Data == nil { configmap.Data = map[string]string{} } if configmap.Data[GPU] != nodeMapStr { configmap.Data[GPU] = nodeMapStr if err := r.Update(ctx, configmap); err != nil && !errors.IsConflict(err) { r.Logger.Error(err, "failed to update gpu-info configmap") return ctrl.Result{}, err } } r.Logger.V(1).Info("gpu-info configmap status", "gpu", configmap.Data[GPU]) return ctrl.Result{}, nil } func (r *GpuReconciler) initGPUInfoCM(ctx context.Context, clientSet *kubernetes.Clientset) error { // filter for nodes that have GPU req1, _ := labels.NewRequirement(NvidiaGPUProduct, selection.Exists, []string{}) req2, _ := labels.NewRequirement(NvidiaGPUMemory, selection.Exists, []string{}) selector := labels.NewSelector().Add(*req1, *req2) listOpts := metaV1.ListOptions{ LabelSelector: selector.String(), } nodeList, err := clientSet.CoreV1().Nodes().List(ctx, listOpts) if err != nil { return err } podList := &corev1.PodList{} for _, item := range nodeList.Items { list, err := clientSet.CoreV1().Pods("").List(context.TODO(), metaV1.ListOptions{ FieldSelector: fields.OneTermEqualSelector("spec.nodeName", item.Name).String(), }) if err != nil { return err } podList.Items = append(podList.Items, list.Items...) } _, err = r.applyGPUInfoCM(ctx, nodeList, podList, clientSet) return err } // SetupWithManager sets up the controller with the Manager. func (r *GpuReconciler) SetupWithManager(mgr ctrl.Manager) error { r.Logger = ctrl.Log.WithName("gpu-controller") r.Logger.V(1).Info("starting gpu controller") // use clientSet to get resources from the API Server, not from Informer's cache clientSet, err := kubernetes.NewForConfig(mgr.GetConfig()) if err != nil { r.Logger.Error(err, "failed to init") return nil } // init node-gpu-info configmap r.Logger.V(1).Info("initializing node-gpu-info configmap") if err := r.initGPUInfoCM(context.Background(), clientSet); err != nil { return err } // build index for node which have GPU if err := mgr.GetFieldIndexer().IndexField(context.Background(), &corev1.Node{}, NodeIndexKey, func(rawObj client.Object) []string { node := rawObj.(*corev1.Node) if _, ok := node.Labels[NvidiaGPUProduct]; !ok { return nil } return []string{GPU} }); err != nil { return err } // build index for pod which use GPU if err := mgr.GetFieldIndexer().IndexField(context.Background(), &corev1.Pod{}, PodIndexKey, func(rawObj client.Object) []string { pod := rawObj.(*corev1.Pod) if _, ok := pod.Spec.NodeSelector[NvidiaGPUProduct]; !ok { return nil } if pod.Status.Phase == corev1.PodSucceeded { return nil } return []string{GPU} }); err != nil { return err } return ctrl.NewControllerManagedBy(mgr). For(&corev1.Pod{}, builder.WithPredicates(predicate.Funcs{ CreateFunc: func(event event.CreateEvent) bool { return useGPU(event.Object) }, UpdateFunc: func(event event.UpdateEvent) bool { _, ok := event.ObjectNew.(*corev1.Pod).Spec.NodeSelector[NvidiaGPUProduct] if !ok { return false } phaseOld := event.ObjectOld.(*corev1.Pod).Status.Phase phaseNew := event.ObjectNew.(*corev1.Pod).Status.Phase return phaseOld != phaseNew }, DeleteFunc: func(event event.DeleteEvent) bool { return useGPU(event.Object) }, })). Watches(&source.Kind{Type: &corev1.Node{}}, &handler.EnqueueRequestForObject{}, builder.WithPredicates(predicate.Funcs{ CreateFunc: func(event event.CreateEvent) bool { return hasGPU(event.Object) }, UpdateFunc: func(event event.UpdateEvent) bool { oldVal, oldOk := event.ObjectOld.(*corev1.Node).Status.Allocatable[NvidiaGPU] newVal, newOk := event.ObjectNew.(*corev1.Node).Status.Allocatable[NvidiaGPU] return oldOk && newOk && oldVal != newVal }, DeleteFunc: func(event event.DeleteEvent) bool { return hasGPU(event.Object) }, })). Complete(r) } func useGPU(obj client.Object) bool { _, ok := obj.(*corev1.Pod).Spec.NodeSelector[NvidiaGPUProduct] return ok } func hasGPU(obj client.Object) bool { _, ok1 := obj.(*corev1.Node).Labels[NvidiaGPUMemory] _, ok2 := obj.(*corev1.Node).Labels[NvidiaGPUProduct] _, ok3 := obj.(*corev1.Node).Status.Allocatable[NvidiaGPU] return ok1 && ok2 && ok3 }
applyGPUInfoCM
identifier_name
ThriftHiveMock.js
var hive_service_types = require('shib/engines/hiveserver/hive_service_types'), hive_metastore_types = require('shib/engines/hiveserver/hive_metastore_types'), queryplan_types = require('shib/engines/hiveserver/queryplan_types'); var chars = [ 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', '-', ' ', '_', '\'', '"', '?', '!', '=', '+', '/', '.', ',' ]; var namechars = [ 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9' ]; var alphabet_namechars = [ 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z' ]; var kana_chars = [ 'あ', 'い', 'う', 'え', 'お', 'か', 'き', 'く', 'け', 'こ', 'さ', 'し', 'す', 'せ', 'そ', 'た', 'ち', 'つ', 'て', 'と', 'な', 'に', 'ぬ', 'ね', 'の', 'は', 'ひ', 'ふ', 'へ', 'ほ', 'ま', 'み', 'む', 'め', 'も', 'や', 'ゆ', 'よ', 'ら', 'り', 'る', 'れ', 'ろ', 'わ', 'を', 'ん' ]; var random_num = function(max){ return Math.floor(Math.random() * max) + 1; }; var random_index = function(max){ return Math.floor(Math.random() * max); }; var choose = function(list){ return list[random_index(list.length)]; }; var random_string = function(len){ var ret = ''; for (var i = 0; i < len; i++){ ret += choose(chars); } return ret; }; var random_name = function(len){ var ret = ''; for (var i = 0; i < len; i++){ ret += choose(namechars); } return ret; }; var random_kana = function(len){ var ret = ''; for (var i = 0; i < len; i++){ ret += choose(kana_chars); } return ret; }; var random_alphabetname = function(len){ var ret = ''; for (var i = 0; i < len; i++){ ret += choose(alphabet_namechars); } return ret; }; exports.cluster_status = function(){ return new hive_service_types.HiveClusterStatus({ taskTrackers: 1, mapTasks: 0, reduceTasks: 0, maxMapTasks: 2, maxReduceTasks: 2, state: 2 }); }; var idlist = ['hadoop_20110408154949_8b2be199-02ae-40fe-9492-d197ade572f2', 'hadoop_20110408154949_8b2be199-03ff-40fe-9492-d197ade572f2', 'hadoop_20110408154949_8b2be199-04bc-40fe-9492-d197ade572f2', 'hadoop_20110408154949_8b2be199-05db-40fe-9492-d197ade572f2', 'hadoop_20110408154949_8b2be199-06fc-40fe-9492-d197ade572f2']; var queryId = function(){ return choose(idlist); }; var operator = function(num){ var o = new queryplan_types.Operator({ operatorId: 'TS_12' + num, operatorType: Math.floor(Math.random() * 10), operatorAttributes: null, operatorCounters: null, done: true, started: true }); return o; }; var operatorGraph = function(ops){ var al = []; for (var i = 0; i < ops.length - 1; i++){ al.push(new queryplan_types.Adjacency({ node: ops[i].operatorId, children: [ ops[i+1].operatorId ], adjacencyType: 0 })); } return new queryplan_types.Graph({ nodeType: 0, roots: null, adjacencyList: al }); }; var task = function(stage,mapreduce,operators){ var ops = []; for (var i = 0; i < operators; i++){ ops.push(operator(i)); } return new queryplan_types.Task({ taskId: 'Stage-' + stage + '_' + mapreduce, taskType: (mapreduce == 'MAP' ? 0 : 1), taskAttributes: null, taskCounters: null, operatorList: ops, operatorGraph: operatorGraph(ops), done: true, started: true }); }; var stage = function(stage){ var cntr_map = 'CNTR_NAME_Stage-' + stage + '_MAP_PROGRESS'; var cntr_reduce = 'CNTR_NAME_Stage-' + stage + '_REDUCE_PROGRESS'; var counters = {}; counters[cntr_map] = 100; counters[cntr_reduce] = 100; return new queryplan_types.Stage({ stageId: 'Stage-' + stage, stageType: 3, stageAttributes: null, stageCounters: counters, taskList: [task(stage,'MAP',3), task(stage,'REDUCE',1)], done: true, started: true }); }; exports.query_plan = function(querystring){ if (querystring == undefined){ return new queryplan_types.QueryPlan({}); } var query = new queryplan_types.Query({ queryId: queryId(), queryType: null, queryAttributes: { queryString: querystring }, queryCounters: null, stageList: [stage(1), stage(2)], stageGraph: new queryplan_types.Graph({ nodeType: 1, roots: null, adjacencyList: [ new queryplan_types.Adjacency({ node: 'Stage-1', children: [ 'Stage-2' ], adjacencyType: 0 }) ] }), done: true, started: true }); return new queryplan_types.QueryPlan({ queries: [query], done: false, started: false }); }; var columns = function(query){ var match = /select (.*) from .*/im.exec(query); if (! match) throw new Error('query field definition invalid!'); return match[1].split(/, /).map(function(s){return s.trim();}); }; var columninfo = function(column){ var name = column; var type = 'string'; var ex = undefined; var match = /as ([_a-zA-Z0-9]*)$/im.exec(column); if (match){ name = match[1]; } if (/^count/im.exec(column)) { type = 'bigint'; ex = 'count'; } else if (/^(sum|avg|min|max)/im.exec(column)) { type = 'bigint'; ex = 'aggr'; } else if (/id$/.exec(name)) { type = 'bigint'; ex = 'id'; } if (/^"(.*)"$/.exec(name)) { name = /^"(.*)"$/.exec(name)[1]; ex = "strcopy"; } else if (name == 'yyyymmdd') { ex = 'date'; } else if (name == 'hhmm' || name == 'hhmmss') { ex = 'time'; } else if (/name$/i.exec(name)) { ex = 'name'; } else if (/kana$/i.exec(name)) { ex = 'kana'; } return {name: name, type: type, ex: ex}; }; exports.schema = function(query){ if (! query) { return new hive_metastore_types.Schema({}); } if (/^show (databases|tables|partitions)/i.exec(query)) { return new hive_metastore_types.Schema({ fieldSchemas: [new hive_metastore_types.FieldSchema({name: 'name', type: 'string', comment: undefined})], properties: null }); } if (/^describe/i.exec(query)) { return new hive_metastore_types.Schema({ fieldSchemas: [ new hive_metastore_types.FieldSchema({name: 'col_name', type: 'string', comment: 'from deserializer'}), new hive_metastore_types.FieldSchema({name: 'data_type', type: 'string', comment: 'from deserializer'}), new hive_metastore_types.FieldSchema({name: 'comment', type: 'string', comment: 'from deserializer'}) ], properties: null }); } var cols = columns(query.split('\n').join(' ')); return new hive_metastore_types.Schema({ fieldSchemas: cols.map(function(c){ var i = columninfo(c); return new hive_metastore_types.FieldSchema({name: i.name, type: i.type, comment: undefined}); }), properties: null }); }; var generateValue = function(colinfo){ function pad(n){return n<10 ? '0'+n : n;} switch(colinfo.ex) { case 'strcopy': return colinfo.name; case 'date': var d1 = new Date((new Date()).getTime() - random_num(50) * 86400 * 1000); return '' + d1.getFullYear() + pad(d1.getMonth()+1) + pad(d1.getDate()); case 'time': var d2 = new Date((new Date()).getTime() - random_num(12 * 60) * 60 * 1000); return '' + pad(d2.getHours()) + pad(d2.getMinutes()); case 'id': return random_num(500); case 'aggr': return random_num(10000); case 'count': return random_num(2000); case 'name': return random_name(random_num(10)); case 'kana': return random_kana(random_num(10)); } if (colinfo.type == 'string'){ return random_string(random_num(50)); } return random_num(100); }; var generate_tablename = exports.generate_tablename = function(){ var part_depth = choose([1,1,2,2,3,4]); var name = ''; for (var i = 0; i < part_depth; i++) { if (name.length > 0) name += '_'; name += random_alphabetname(3) + random_num(3); } return name; }; var generate_subtree = exports.generate_subtree = function(subtree_label, parent) { var parent_part = parent ? parent + '/' : ''; var current_depth_label = subtree_label; var children_label = null; if (subtree_label.indexOf('_') > -1) { var separator = subtree_label.indexOf('_'); current_depth_label = subtree_label.su
]; var partsNum = Number(matched[2]); var parts = []; for (var i = 0; i < partsNum; i++) { var current_part = parent_part + fieldname + '=' + i; if (children_label) { parts = parts.concat(generate_subtree(children_label, current_part)); } else { parts.push(current_part); } } return parts; }; exports.result = function(query){ var rows = choose([0,1,1,1,1,2,3,5,7,10,20,50]); var matched = null; if ((matched = /^show (databases|tables|partitions)( (.*))?$/i.exec(query)) != null) { if (/^databases$/i.exec(matched[1])) { /* show databases */ if (rows < 1) rows = 1; var dbs = []; for (var x = 0; x < rows; x++) { var dbname = generate_tablename(); while (dbs.indexOf(dbname) > -1) dbname = generate_tablename(); dbs.push(dbname); } return dbs; } else if (/^tables$/i.exec(matched[1])) { /* show tables */ if (rows < 1) rows = 1; var tables = []; for (var i = 0; i < rows; i++) { var name = generate_tablename(); while (tables.indexOf(name) > -1) name = generate_tablename(); tables.push(name); } return tables; } else { /* show partitions hogetable */ var tablename = matched[3]; if (! tablename) return []; return generate_subtree(tablename); } } if ((matched = /^describe (.*)$/i.exec(query)) != null) { var tname = matched[1]; var fields = []; var types = ['string', 'string', 'string', 'smallint', 'bigint', 'boolean']; if (rows < 1) rows = 1; for (var k = 0; k < rows; k++){ fields.push(random_name(10) + '\t' + choose(types) + '\t' + tname + '___' + k); } return fields; } else { var colinfos = columns(query).map(function(c){return columninfo(c);}); var limitmatch = /limit (\d+)/i.exec(query); if (colinfos.length == 1 && colinfos[0].ex == 'count') { rows = 1; } else if (limitmatch) { rows = limitmatch[1]; } var values = []; for (var j = 0; j < rows; j++){ values.push(colinfos.map(function(c){ return generateValue(c); }).join("\t")); } return values; } };
bstring(0, separator); children_label = subtree_label.substring(separator + 1); } var matched = /^([a-z]+)(\d+)$/.exec(current_depth_label); var fieldname = matched[1
conditional_block
ThriftHiveMock.js
var hive_service_types = require('shib/engines/hiveserver/hive_service_types'), hive_metastore_types = require('shib/engines/hiveserver/hive_metastore_types'), queryplan_types = require('shib/engines/hiveserver/queryplan_types'); var chars = [ 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', '-', ' ', '_', '\'', '"', '?', '!', '=', '+', '/', '.', ',' ]; var namechars = [ 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9' ]; var alphabet_namechars = [ 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z' ]; var kana_chars = [ 'あ', 'い', 'う', 'え', 'お', 'か', 'き', 'く', 'け', 'こ', 'さ', 'し', 'す', 'せ', 'そ', 'た', 'ち', 'つ', 'て', 'と', 'な', 'に', 'ぬ', 'ね', 'の', 'は', 'ひ', 'ふ', 'へ', 'ほ', 'ま', 'み', 'む', 'め', 'も', 'や', 'ゆ', 'よ', 'ら', 'り', 'る', 'れ', 'ろ', 'わ', 'を', 'ん' ]; var random_num = function(max){ return Math.floor(Math.random() * max) + 1; }; var random_index = function(max){ return Math.floor(Math.random() * max); }; var choose = function(list){ return list[random_index(list.length)]; }; var random_string = function(len){ var ret = ''; for (var i = 0; i < len; i++){ ret += choose(chars); } return ret; }; var random_name = function(len){ var ret = ''; for (var i = 0; i < len; i++){ ret += choose(namechars); } return ret; }; var random_kana = function(len){ var ret = ''; for (var i = 0; i < len; i++){ ret += choose(kana_chars); } return ret; }; var random_alphabetname = function(len){ var ret = ''; for (var i = 0; i < len; i++){ ret += choose(alphabet_namechars); } return ret; }; exports.cluster_status = function(){ return new hive_service_types.HiveClusterStatus({ taskTrackers: 1, mapTasks: 0, reduceTasks: 0, maxMapTasks: 2, maxReduceTasks: 2, state: 2 }); }; var idlist = ['hadoop_20110408154949_8b2be199-02ae-40fe-9492-d197ade572f2', 'hadoop_20110408154949_8b2be199-03ff-40fe-9492-d197ade572f2', 'hadoop_20110408154949_8b2be199-04bc-40fe-9492-d197ade572f2', 'hadoop_20110408154949_8b2be199-05db-40fe-9492-d197ade572f2', 'hadoop_20110408154949_8b2be199-06fc-40fe-9492-d197ade572f2']; var queryId = function(){ return choose(idlist); }; var operator = function(num){ var o = new queryplan_types.Operator({ operatorId: 'TS_12' + num, operatorType: Math.floor(Math.random() * 10), operatorAttributes: null, operatorCounters: null, done: true, started: true }); return o; }; var operatorGraph = function(ops){ var al = []; for (var i = 0; i < ops.length - 1; i++){ al.push(new queryplan_types.Adjacency({ node: ops[i].operatorId, children: [ ops[i+1].operatorId ], adjacencyType: 0 })); } return new queryplan_types.Graph({ nodeType: 0, roots: null, adjacencyList: al }); }; var task = function(stage,mapreduce,operators){ var ops = []; for (var i = 0; i < operators; i++){ ops.push(operator(i)); } return new queryplan_types.Task({ taskId: 'Stage-' + stage + '_' + mapreduce, taskType: (mapreduce == 'MAP' ? 0 : 1), taskAttributes: null, taskCounters: null, operatorList: ops, operatorGraph: operatorGraph(ops), done: true, started: true }); }; var stage = function(stage){ var cntr_map = 'CNTR_NAME_Stage-' + stage + '_MAP_PROGRESS'; var cntr_reduce = 'CNTR_NAME_Stage-' + stage + '_REDUCE_PROGRESS'; var counters = {}; counters[cntr_map] = 100; counters[cntr_reduce] = 100; return new queryplan_types.Stage({ stageId: 'Stage-' + stage, stageType: 3, stageAttributes: null, stageCounters: counters, taskList: [task(stage,'MAP',3), task(stage,'REDUCE',1)], done: true, started: true }); }; exports.query_plan = function(querystring){ if (querystring == undefined){ return new queryplan_types.QueryPlan({}); } var query = new queryplan_types.Query({ queryId: queryId(), queryType: null, queryAttributes: { queryString: querystring }, queryCounters: null, stageList: [stage(1), stage(2)], stageGraph: new queryplan_types.Graph({ nodeType: 1, roots: null, adjacencyList: [ new queryplan_types.Adjacency({ node: 'Stage-1', children: [ 'Stage-2' ], adjacencyType: 0 }) ] }), done: true, started: true }); return new queryplan_types.QueryPlan({ queries: [query], done: false, started: false }); }; var columns = function(query){ var match = /select (.*) from .*/im.exec(query); if (! match) throw new Error('query field definition invalid!'); return match[1].split(/, /).map(function(s){return s.trim();}); }; var columninfo = function(column){ var name = column; var type = 'string'; var ex = undefined; var match = /as ([_a-zA-Z0-9]*)$/im.exec(column); if (match){ name = match[1]; } if (/^count/im.exec(column)) { type = 'bigint'; ex = 'count'; } else if (/^(sum|avg|min|max)/im.exec(column)) { type = 'bigint'; ex = 'aggr'; } else if (/id$/.exec(name)) { type = 'bigint'; ex = 'id'; } if (/^"(.*)"$/.exec(name)) { name = /^"(.*)"$/.exec(name)[1]; ex = "strcopy"; } else if (name == 'yyyymmdd') { ex = 'date'; } else if (name == 'hhmm' || name == 'hhmmss') { ex = 'time'; } else if (/name$/i.exec(name)) { ex = 'name'; } else if (/kana$/i.exec(name)) { ex = 'kana'; } return {name: name, type: type, ex: ex}; }; exports.schema = function(query){ if (! query) { return new hive_metastore_types.Schema({}); } if (/^show (databases|tables|partitions)/i.exec(query)) { return new hive_metastore_types.Schema({ fieldSchemas: [new hive_metastore_types.FieldSchema({name: 'name', type: 'string', comment: undefined})], properties: null }); } if (/^describe/i.exec(query)) { return new hive_metastore_types.Schema({ fieldSchemas: [ new hive_metastore_types.FieldSchema({name: 'col_name', type: 'string', comment: 'from deserializer'}), new hive_metastore_types.FieldSchema({name: 'data_type', type: 'string', comment: 'from deserializer'}), new hive_metastore_types.FieldSchema({name: 'comment', type: 'string', comment: 'from deserializer'}) ], properties: null }); } var cols = columns(query.split('\n').join(' ')); return new hive_metastore_types.Schema({ fieldSchemas: cols.map(function(c){ var i = columninfo(c); return new hive_metastore_types.FieldSchema({name: i.name, type: i.type, comment: undefined}); }), properties: null }); }; var generateValue = function(colinfo){ function pad(n){return n<10 ? '0'+n : n;} switch(colinfo.ex) { case 'strcopy': return colinfo
me; case 'date': var d1 = new Date((new Date()).getTime() - random_num(50) * 86400 * 1000); return '' + d1.getFullYear() + pad(d1.getMonth()+1) + pad(d1.getDate()); case 'time': var d2 = new Date((new Date()).getTime() - random_num(12 * 60) * 60 * 1000); return '' + pad(d2.getHours()) + pad(d2.getMinutes()); case 'id': return random_num(500); case 'aggr': return random_num(10000); case 'count': return random_num(2000); case 'name': return random_name(random_num(10)); case 'kana': return random_kana(random_num(10)); } if (colinfo.type == 'string'){ return random_string(random_num(50)); } return random_num(100); }; var generate_tablename = exports.generate_tablename = function(){ var part_depth = choose([1,1,2,2,3,4]); var name = ''; for (var i = 0; i < part_depth; i++) { if (name.length > 0) name += '_'; name += random_alphabetname(3) + random_num(3); } return name; }; var generate_subtree = exports.generate_subtree = function(subtree_label, parent) { var parent_part = parent ? parent + '/' : ''; var current_depth_label = subtree_label; var children_label = null; if (subtree_label.indexOf('_') > -1) { var separator = subtree_label.indexOf('_'); current_depth_label = subtree_label.substring(0, separator); children_label = subtree_label.substring(separator + 1); } var matched = /^([a-z]+)(\d+)$/.exec(current_depth_label); var fieldname = matched[1]; var partsNum = Number(matched[2]); var parts = []; for (var i = 0; i < partsNum; i++) { var current_part = parent_part + fieldname + '=' + i; if (children_label) { parts = parts.concat(generate_subtree(children_label, current_part)); } else { parts.push(current_part); } } return parts; }; exports.result = function(query){ var rows = choose([0,1,1,1,1,2,3,5,7,10,20,50]); var matched = null; if ((matched = /^show (databases|tables|partitions)( (.*))?$/i.exec(query)) != null) { if (/^databases$/i.exec(matched[1])) { /* show databases */ if (rows < 1) rows = 1; var dbs = []; for (var x = 0; x < rows; x++) { var dbname = generate_tablename(); while (dbs.indexOf(dbname) > -1) dbname = generate_tablename(); dbs.push(dbname); } return dbs; } else if (/^tables$/i.exec(matched[1])) { /* show tables */ if (rows < 1) rows = 1; var tables = []; for (var i = 0; i < rows; i++) { var name = generate_tablename(); while (tables.indexOf(name) > -1) name = generate_tablename(); tables.push(name); } return tables; } else { /* show partitions hogetable */ var tablename = matched[3]; if (! tablename) return []; return generate_subtree(tablename); } } if ((matched = /^describe (.*)$/i.exec(query)) != null) { var tname = matched[1]; var fields = []; var types = ['string', 'string', 'string', 'smallint', 'bigint', 'boolean']; if (rows < 1) rows = 1; for (var k = 0; k < rows; k++){ fields.push(random_name(10) + '\t' + choose(types) + '\t' + tname + '___' + k); } return fields; } else { var colinfos = columns(query).map(function(c){return columninfo(c);}); var limitmatch = /limit (\d+)/i.exec(query); if (colinfos.length == 1 && colinfos[0].ex == 'count') { rows = 1; } else if (limitmatch) { rows = limitmatch[1]; } var values = []; for (var j = 0; j < rows; j++){ values.push(colinfos.map(function(c){ return generateValue(c); }).join("\t")); } return values; } };
.na
identifier_name
ThriftHiveMock.js
var hive_service_types = require('shib/engines/hiveserver/hive_service_types'), hive_metastore_types = require('shib/engines/hiveserver/hive_metastore_types'), queryplan_types = require('shib/engines/hiveserver/queryplan_types'); var chars = [ 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', '-', ' ', '_', '\'', '"', '?', '!', '=', '+', '/', '.', ',' ]; var namechars = [ 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9' ]; var alphabet_namechars = [ 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z' ]; var kana_chars = [ 'あ', 'い', 'う', 'え', 'お', 'か', 'き', 'く', 'け', 'こ', 'さ', 'し', 'す', 'せ', 'そ', 'た', 'ち', 'つ', 'て', 'と', 'な', 'に', 'ぬ', 'ね', 'の', 'は', 'ひ', 'ふ', 'へ', 'ほ', 'ま', 'み', 'む', 'め', 'も', 'や', 'ゆ', 'よ', 'ら', 'り', 'る', 'れ', 'ろ', 'わ', 'を', 'ん' ]; var random_num = function(max){ return Math.floor(Math.random() * max) + 1; }; var random_index = function(max){ return Math.floor(Math.random() * max); }; var choose = function(list){ return list[random_index(list.length)]; }; var random_string = function(len){ var ret = ''; for (var i = 0; i < len; i++){ ret += choose(chars); } return ret; }; var random_name = function(len){ var ret = ''; for (var i = 0; i < len; i++){ ret += choose(namechars); } return ret; }; var random_kana = function(len){ var ret = ''; for (var i = 0; i < len; i++){ ret += choose(kana_chars); } return ret; }; var random_alphabetname = function(len){ var ret = ''; for (var i = 0; i < len; i++){ ret += choose(alphabet_namechars); } return ret; }; exports.cluster_status = function(){ return new hive_service_types.HiveClusterStatus({ taskTrackers: 1, mapTasks: 0, reduceTasks: 0, maxMapTasks: 2, maxReduceTasks: 2, state: 2 }); }; var idlist = ['hadoop_20110408154949_8b2be199-02ae-40fe-9492-d197ade572f2', 'hadoop_20110408154949_8b2be199-03ff-40fe-9492-d197ade572f2', 'hadoop_20110408154949_8b2be199-04bc-40fe-9492-d197ade572f2', 'hadoop_20110408154949_8b2be199-05db-40fe-9492-d197ade572f2', 'hadoop_20110408154949_8b2be199-06fc-40fe-9492-d197ade572f2']; var queryId = function(){ return choose(idlist); }; var operator = function(num){ var o = new queryplan_types.Operator({ operatorId: 'TS_12' + num, operatorType: Math.floor(Math.random() * 10), operatorAttributes: null, operatorCounters: null, done: true, started: true }); return o; }; var operatorGraph = function(ops){ var al = []; for (var i = 0; i < ops.length - 1; i++){ al.push(new queryplan_types.Adjacency({ node: ops[i].operatorId, children: [ ops[i+1].operatorId ], adjacencyType: 0 })); } return new queryplan_types.Graph({ nodeType: 0, roots: null, adjacencyList: al }); }; var task = function(stage,mapreduce,operators){ var ops = []; for (var i = 0; i < operators; i++){ ops.push(operator(i)); } return new queryplan_types.Task({ taskId: 'Stage-' + stage + '_' + mapreduce, taskType: (mapreduce == 'MAP' ? 0 : 1), taskAttributes: null, taskCounters: null, operatorList: ops, operatorGraph: operatorGraph(ops), done: true, started: true }); }; var stage = function(stage){ var cntr_map = 'CNTR_NAME_Stage-' + stage + '_MAP_PROGRESS'; var cntr_reduce = 'CNTR_NAME_Stage-' + stage + '_REDUCE_PROGRESS'; var counters = {}; counters[cntr_map] = 100; counters[cntr_reduce] = 100; return new queryplan_types.Stage({ stageId: 'Stage-' + stage, stageType: 3, stageAttributes: null, stageCounters: counters, taskList: [task(stage,'MAP',3), task(stage,'REDUCE',1)], done: true, started: true }); }; exports.query_plan = function(querystring){ if (querystring == undefined){ return new queryplan_types.QueryPlan({}); } var query = new queryplan_types.Query({ queryId: queryId(), queryType: null, queryAttributes: { queryString: querystring }, queryCounters: null, stageList: [stage(1), stage(2)], stageGraph: new queryplan_types.Graph({ nodeType: 1, roots: null, adjacencyList: [ new queryplan_types.Adjacency({ node: 'Stage-1', children: [ 'Stage-2' ], adjacencyType: 0 }) ] }), done: true, started: true }); return new queryplan_types.QueryPlan({ queries: [query], done: false, started: false }); }; var columns = function(query){ var match = /select (.*) from .*/im.exec(query); if (! match) throw new Error('query field definition invalid!'); return match[1].split(/, /).map(function(s){return s.trim();}); }; var columninfo = function(column){ var name = column; var type = 'string'; var ex = undefined; var match = /as ([_a-zA-Z0-9]*)$/im.exec(column); if (match){ name = match[1]; } if (/^count/im.exec(column)) { type = 'bigint'; ex = 'count'; } else if (/^(sum|avg|min|max)/im.exec(column)) { type = 'bigint'; ex = 'aggr'; } else if (/id$/.exec(name)) { type = 'bigint'; ex = 'id'; } if (/^"(.*)"$/.exec(name)) { name = /^"(.*)"$/.exec(name)[1]; ex = "strcopy"; } else if (name == 'yyyymmdd') { ex = 'date'; } else if (name == 'hhmm' || name == 'hhmmss') { ex = 'time'; } else if (/name$/i.exec(name)) { ex = 'name'; } else if (/kana$/i.exec(name)) { ex = 'kana'; } return {name: name, type: type, ex: ex}; }; exports.schema = function(query){ if (! query) { return new hive_metastore_types.Schema({}); } if (/^show (databases|tables|partitions)/i.exec(query)) { return new hive_metastore_types.Schema({ fieldSchemas: [new hive_metastore_types.FieldSchema({name: 'name', type: 'string', comment: undefined})], properties: null }); } if (/^describe/i.exec(query)) { return new hive_metastore_types.Schema({ fieldSchemas: [ new hive_metastore_types.FieldSchema({name: 'col_name', type: 'string', comment: 'from deserializer'}), new hive_metastore_types.FieldSchema({name: 'data_type', type: 'string', comment: 'from deserializer'}), new hive_metastore_types.FieldSchema({name: 'comment', type: 'string', comment: 'from deserializer'}) ], properties: null }); } var cols = columns(query.split('\n').join(' ')); return new hive_metastore_types.Schema({ fieldSchemas: cols.map(function(c){ var i = columninfo(c); return new hive_metastore_types.FieldSchema({name: i.name, type: i.type, comment: undefined}); }), properties: null }); }; var generateValue = function(colinfo){ function pad(n){return n<10 ? '0'+n : n;} switch(colinfo.ex) { case 'strcopy': return colinfo.name;
= new Date((new Date()).getTime() - random_num(50) * 86400 * 1000); return '' + d1.getFullYear() + pad(d1.getMonth()+1) + pad(d1.getDate()); case 'time': var d2 = new Date((new Date()).getTime() - random_num(12 * 60) * 60 * 1000); return '' + pad(d2.getHours()) + pad(d2.getMinutes()); case 'id': return random_num(500); case 'aggr': return random_num(10000); case 'count': return random_num(2000); case 'name': return random_name(random_num(10)); case 'kana': return random_kana(random_num(10)); } if (colinfo.type == 'string'){ return random_string(random_num(50)); } return random_num(100); }; var generate_tablename = exports.generate_tablename = function(){ var part_depth = choose([1,1,2,2,3,4]); var name = ''; for (var i = 0; i < part_depth; i++) { if (name.length > 0) name += '_'; name += random_alphabetname(3) + random_num(3); } return name; }; var generate_subtree = exports.generate_subtree = function(subtree_label, parent) { var parent_part = parent ? parent + '/' : ''; var current_depth_label = subtree_label; var children_label = null; if (subtree_label.indexOf('_') > -1) { var separator = subtree_label.indexOf('_'); current_depth_label = subtree_label.substring(0, separator); children_label = subtree_label.substring(separator + 1); } var matched = /^([a-z]+)(\d+)$/.exec(current_depth_label); var fieldname = matched[1]; var partsNum = Number(matched[2]); var parts = []; for (var i = 0; i < partsNum; i++) { var current_part = parent_part + fieldname + '=' + i; if (children_label) { parts = parts.concat(generate_subtree(children_label, current_part)); } else { parts.push(current_part); } } return parts; }; exports.result = function(query){ var rows = choose([0,1,1,1,1,2,3,5,7,10,20,50]); var matched = null; if ((matched = /^show (databases|tables|partitions)( (.*))?$/i.exec(query)) != null) { if (/^databases$/i.exec(matched[1])) { /* show databases */ if (rows < 1) rows = 1; var dbs = []; for (var x = 0; x < rows; x++) { var dbname = generate_tablename(); while (dbs.indexOf(dbname) > -1) dbname = generate_tablename(); dbs.push(dbname); } return dbs; } else if (/^tables$/i.exec(matched[1])) { /* show tables */ if (rows < 1) rows = 1; var tables = []; for (var i = 0; i < rows; i++) { var name = generate_tablename(); while (tables.indexOf(name) > -1) name = generate_tablename(); tables.push(name); } return tables; } else { /* show partitions hogetable */ var tablename = matched[3]; if (! tablename) return []; return generate_subtree(tablename); } } if ((matched = /^describe (.*)$/i.exec(query)) != null) { var tname = matched[1]; var fields = []; var types = ['string', 'string', 'string', 'smallint', 'bigint', 'boolean']; if (rows < 1) rows = 1; for (var k = 0; k < rows; k++){ fields.push(random_name(10) + '\t' + choose(types) + '\t' + tname + '___' + k); } return fields; } else { var colinfos = columns(query).map(function(c){return columninfo(c);}); var limitmatch = /limit (\d+)/i.exec(query); if (colinfos.length == 1 && colinfos[0].ex == 'count') { rows = 1; } else if (limitmatch) { rows = limitmatch[1]; } var values = []; for (var j = 0; j < rows; j++){ values.push(colinfos.map(function(c){ return generateValue(c); }).join("\t")); } return values; } };
case 'date': var d1
identifier_body
ThriftHiveMock.js
var hive_service_types = require('shib/engines/hiveserver/hive_service_types'), hive_metastore_types = require('shib/engines/hiveserver/hive_metastore_types'), queryplan_types = require('shib/engines/hiveserver/queryplan_types'); var chars = [ 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', '-', ' ', '_', '\'', '"', '?', '!', '=', '+', '/', '.', ',' ]; var namechars = [ 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9' ]; var alphabet_namechars = [ 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z' ]; var kana_chars = [ 'あ', 'い', 'う', 'え', 'お', 'か', 'き', 'く', 'け', 'こ', 'さ', 'し', 'す', 'せ', 'そ', 'た', 'ち', 'つ', 'て', 'と', 'な', 'に', 'ぬ', 'ね', 'の', 'は', 'ひ', 'ふ', 'へ', 'ほ', 'ま', 'み', 'む', 'め', 'も', 'や', 'ゆ', 'よ', 'ら', 'り', 'る', 'れ', 'ろ', 'わ', 'を', 'ん' ]; var random_num = function(max){ return Math.floor(Math.random() * max) + 1; }; var random_index = function(max){ return Math.floor(Math.random() * max); }; var choose = function(list){ return list[random_index(list.length)]; }; var random_string = function(len){ var ret = ''; for (var i = 0; i < len; i++){ ret += choose(chars); } return ret; }; var random_name = function(len){ var ret = ''; for (var i = 0; i < len; i++){ ret += choose(namechars); } return ret; }; var random_kana = function(len){ var ret = ''; for (var i = 0; i < len; i++){ ret += choose(kana_chars); } return ret; }; var random_alphabetname = function(len){ var ret = ''; for (var i = 0; i < len; i++){ ret += choose(alphabet_namechars); } return ret; }; exports.cluster_status = function(){ return new hive_service_types.HiveClusterStatus({ taskTrackers: 1, mapTasks: 0, reduceTasks: 0, maxMapTasks: 2, maxReduceTasks: 2, state: 2 }); }; var idlist = ['hadoop_20110408154949_8b2be199-02ae-40fe-9492-d197ade572f2', 'hadoop_20110408154949_8b2be199-03ff-40fe-9492-d197ade572f2', 'hadoop_20110408154949_8b2be199-04bc-40fe-9492-d197ade572f2', 'hadoop_20110408154949_8b2be199-05db-40fe-9492-d197ade572f2', 'hadoop_20110408154949_8b2be199-06fc-40fe-9492-d197ade572f2']; var queryId = function(){ return choose(idlist); }; var operator = function(num){ var o = new queryplan_types.Operator({ operatorId: 'TS_12' + num, operatorType: Math.floor(Math.random() * 10), operatorAttributes: null, operatorCounters: null, done: true, started: true }); return o; }; var operatorGraph = function(ops){ var al = []; for (var i = 0; i < ops.length - 1; i++){ al.push(new queryplan_types.Adjacency({ node: ops[i].operatorId, children: [ ops[i+1].operatorId ], adjacencyType: 0 })); } return new queryplan_types.Graph({ nodeType: 0, roots: null, adjacencyList: al }); }; var task = function(stage,mapreduce,operators){ var ops = []; for (var i = 0; i < operators; i++){ ops.push(operator(i)); } return new queryplan_types.Task({ taskId: 'Stage-' + stage + '_' + mapreduce, taskType: (mapreduce == 'MAP' ? 0 : 1), taskAttributes: null, taskCounters: null, operatorList: ops, operatorGraph: operatorGraph(ops), done: true, started: true }); }; var stage = function(stage){ var cntr_map = 'CNTR_NAME_Stage-' + stage + '_MAP_PROGRESS'; var cntr_reduce = 'CNTR_NAME_Stage-' + stage + '_REDUCE_PROGRESS'; var counters = {}; counters[cntr_map] = 100; counters[cntr_reduce] = 100; return new queryplan_types.Stage({ stageId: 'Stage-' + stage, stageType: 3, stageAttributes: null, stageCounters: counters, taskList: [task(stage,'MAP',3), task(stage,'REDUCE',1)], done: true, started: true }); }; exports.query_plan = function(querystring){ if (querystring == undefined){ return new queryplan_types.QueryPlan({}); } var query = new queryplan_types.Query({ queryId: queryId(), queryType: null, queryAttributes: { queryString: querystring }, queryCounters: null, stageList: [stage(1), stage(2)], stageGraph: new queryplan_types.Graph({ nodeType: 1, roots: null, adjacencyList: [ new queryplan_types.Adjacency({ node: 'Stage-1', children: [ 'Stage-2' ], adjacencyType: 0 }) ] }), done: true, started: true }); return new queryplan_types.QueryPlan({ queries: [query], done: false, started: false }); }; var columns = function(query){ var match = /select (.*) from .*/im.exec(query); if (! match) throw new Error('query field definition invalid!'); return match[1].split(/, /).map(function(s){return s.trim();}); }; var columninfo = function(column){ var name = column; var type = 'string'; var ex = undefined; var match = /as ([_a-zA-Z0-9]*)$/im.exec(column); if (match){
type = 'bigint'; ex = 'count'; } else if (/^(sum|avg|min|max)/im.exec(column)) { type = 'bigint'; ex = 'aggr'; } else if (/id$/.exec(name)) { type = 'bigint'; ex = 'id'; } if (/^"(.*)"$/.exec(name)) { name = /^"(.*)"$/.exec(name)[1]; ex = "strcopy"; } else if (name == 'yyyymmdd') { ex = 'date'; } else if (name == 'hhmm' || name == 'hhmmss') { ex = 'time'; } else if (/name$/i.exec(name)) { ex = 'name'; } else if (/kana$/i.exec(name)) { ex = 'kana'; } return {name: name, type: type, ex: ex}; }; exports.schema = function(query){ if (! query) { return new hive_metastore_types.Schema({}); } if (/^show (databases|tables|partitions)/i.exec(query)) { return new hive_metastore_types.Schema({ fieldSchemas: [new hive_metastore_types.FieldSchema({name: 'name', type: 'string', comment: undefined})], properties: null }); } if (/^describe/i.exec(query)) { return new hive_metastore_types.Schema({ fieldSchemas: [ new hive_metastore_types.FieldSchema({name: 'col_name', type: 'string', comment: 'from deserializer'}), new hive_metastore_types.FieldSchema({name: 'data_type', type: 'string', comment: 'from deserializer'}), new hive_metastore_types.FieldSchema({name: 'comment', type: 'string', comment: 'from deserializer'}) ], properties: null }); } var cols = columns(query.split('\n').join(' ')); return new hive_metastore_types.Schema({ fieldSchemas: cols.map(function(c){ var i = columninfo(c); return new hive_metastore_types.FieldSchema({name: i.name, type: i.type, comment: undefined}); }), properties: null }); }; var generateValue = function(colinfo){ function pad(n){return n<10 ? '0'+n : n;} switch(colinfo.ex) { case 'strcopy': return colinfo.name; case 'date': var d1 = new Date((new Date()).getTime() - random_num(50) * 86400 * 1000); return '' + d1.getFullYear() + pad(d1.getMonth()+1) + pad(d1.getDate()); case 'time': var d2 = new Date((new Date()).getTime() - random_num(12 * 60) * 60 * 1000); return '' + pad(d2.getHours()) + pad(d2.getMinutes()); case 'id': return random_num(500); case 'aggr': return random_num(10000); case 'count': return random_num(2000); case 'name': return random_name(random_num(10)); case 'kana': return random_kana(random_num(10)); } if (colinfo.type == 'string'){ return random_string(random_num(50)); } return random_num(100); }; var generate_tablename = exports.generate_tablename = function(){ var part_depth = choose([1,1,2,2,3,4]); var name = ''; for (var i = 0; i < part_depth; i++) { if (name.length > 0) name += '_'; name += random_alphabetname(3) + random_num(3); } return name; }; var generate_subtree = exports.generate_subtree = function(subtree_label, parent) { var parent_part = parent ? parent + '/' : ''; var current_depth_label = subtree_label; var children_label = null; if (subtree_label.indexOf('_') > -1) { var separator = subtree_label.indexOf('_'); current_depth_label = subtree_label.substring(0, separator); children_label = subtree_label.substring(separator + 1); } var matched = /^([a-z]+)(\d+)$/.exec(current_depth_label); var fieldname = matched[1]; var partsNum = Number(matched[2]); var parts = []; for (var i = 0; i < partsNum; i++) { var current_part = parent_part + fieldname + '=' + i; if (children_label) { parts = parts.concat(generate_subtree(children_label, current_part)); } else { parts.push(current_part); } } return parts; }; exports.result = function(query){ var rows = choose([0,1,1,1,1,2,3,5,7,10,20,50]); var matched = null; if ((matched = /^show (databases|tables|partitions)( (.*))?$/i.exec(query)) != null) { if (/^databases$/i.exec(matched[1])) { /* show databases */ if (rows < 1) rows = 1; var dbs = []; for (var x = 0; x < rows; x++) { var dbname = generate_tablename(); while (dbs.indexOf(dbname) > -1) dbname = generate_tablename(); dbs.push(dbname); } return dbs; } else if (/^tables$/i.exec(matched[1])) { /* show tables */ if (rows < 1) rows = 1; var tables = []; for (var i = 0; i < rows; i++) { var name = generate_tablename(); while (tables.indexOf(name) > -1) name = generate_tablename(); tables.push(name); } return tables; } else { /* show partitions hogetable */ var tablename = matched[3]; if (! tablename) return []; return generate_subtree(tablename); } } if ((matched = /^describe (.*)$/i.exec(query)) != null) { var tname = matched[1]; var fields = []; var types = ['string', 'string', 'string', 'smallint', 'bigint', 'boolean']; if (rows < 1) rows = 1; for (var k = 0; k < rows; k++){ fields.push(random_name(10) + '\t' + choose(types) + '\t' + tname + '___' + k); } return fields; } else { var colinfos = columns(query).map(function(c){return columninfo(c);}); var limitmatch = /limit (\d+)/i.exec(query); if (colinfos.length == 1 && colinfos[0].ex == 'count') { rows = 1; } else if (limitmatch) { rows = limitmatch[1]; } var values = []; for (var j = 0; j < rows; j++){ values.push(colinfos.map(function(c){ return generateValue(c); }).join("\t")); } return values; } };
name = match[1]; } if (/^count/im.exec(column)) {
random_line_split
query.go
// Copyright (c) 2015-2017 The btcsuite developers // Copyright (c) 2015-2016 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. package wtxmgr import ( "fmt" "github.com/btcsuite/btcd/btcutil" "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcwallet/walletdb" ) // CreditRecord contains metadata regarding a transaction credit for a known // transaction. Further details may be looked up by indexing a wire.MsgTx.TxOut // with the Index field. type CreditRecord struct { Amount btcutil.Amount Index uint32 Spent bool Change bool } // DebitRecord contains metadata regarding a transaction debit for a known // transaction. Further details may be looked up by indexing a wire.MsgTx.TxIn // with the Index field. type DebitRecord struct { Amount btcutil.Amount Index uint32 } // TxDetails is intended to provide callers with access to rich details // regarding a relevant transaction and which inputs and outputs are credit or // debits. type TxDetails struct { TxRecord Block BlockMeta Credits []CreditRecord Debits []DebitRecord Label string } // minedTxDetails fetches the TxDetails for the mined transaction with hash // txHash and the passed tx record key and value. func (s *Store) minedTxDetails(ns walletdb.ReadBucket, txHash *chainhash.Hash, recKey, recVal []byte) (*TxDetails, error) { var details TxDetails // Parse transaction record k/v, lookup the full block record for the // block time, and read all matching credits, debits. err := readRawTxRecord(txHash, recVal, &details.TxRecord) if err != nil { return nil, err } err = readRawTxRecordBlock(recKey, &details.Block.Block) if err != nil { return nil, err } details.Block.Time, err = fetchBlockTime(ns, details.Block.Height) if err != nil { return nil, err } credIter := makeReadCreditIterator(ns, recKey) for credIter.next() { if int(credIter.elem.Index) >= len(details.MsgTx.TxOut) { str := "saved credit index exceeds number of outputs" return nil, storeError(ErrData, str, nil) } // The credit iterator does not record whether this credit was // spent by an unmined transaction, so check that here. if !credIter.elem.Spent { k := canonicalOutPoint(txHash, credIter.elem.Index) spent := existsRawUnminedInput(ns, k) != nil credIter.elem.Spent = spent } details.Credits = append(details.Credits, credIter.elem) } if credIter.err != nil { return nil, credIter.err } debIter := makeReadDebitIterator(ns, recKey) for debIter.next() { if int(debIter.elem.Index) >= len(details.MsgTx.TxIn) { str := "saved debit index exceeds number of inputs" return nil, storeError(ErrData, str, nil) } details.Debits = append(details.Debits, debIter.elem) } if debIter.err != nil { return nil, debIter.err } // Finally, we add the transaction label to details. details.Label, err = s.TxLabel(ns, *txHash) if err != nil { return nil, err } return &details, nil } // unminedTxDetails fetches the TxDetails for the unmined transaction with the // hash txHash and the passed unmined record value. func (s *Store) unminedTxDetails(ns walletdb.ReadBucket, txHash *chainhash.Hash, v []byte) (*TxDetails, error) { details := TxDetails{ Block: BlockMeta{Block: Block{Height: -1}}, } err := readRawTxRecord(txHash, v, &details.TxRecord) if err != nil { return nil, err } it := makeReadUnminedCreditIterator(ns, txHash) for it.next() { if int(it.elem.Index) >= len(details.MsgTx.TxOut) { str := "saved credit index exceeds number of outputs" return nil, storeError(ErrData, str, nil) } // Set the Spent field since this is not done by the iterator. it.elem.Spent = existsRawUnminedInput(ns, it.ck) != nil details.Credits = append(details.Credits, it.elem) } if it.err != nil { return nil, it.err } // Debit records are not saved for unmined transactions. Instead, they // must be looked up for each transaction input manually. There are two // kinds of previous credits that may be debited by an unmined // transaction: mined unspent outputs (which remain marked unspent even // when spent by an unmined transaction), and credits from other unmined // transactions. Both situations must be considered. for i, output := range details.MsgTx.TxIn { opKey := canonicalOutPoint(&output.PreviousOutPoint.Hash, output.PreviousOutPoint.Index) credKey := existsRawUnspent(ns, opKey) if credKey != nil { v := existsRawCredit(ns, credKey) amount, err := fetchRawCreditAmount(v) if err != nil { return nil, err } details.Debits = append(details.Debits, DebitRecord{ Amount: amount, Index: uint32(i), }) continue } v := existsRawUnminedCredit(ns, opKey) if v == nil { continue } amount, err := fetchRawCreditAmount(v) if err != nil { return nil, err } details.Debits = append(details.Debits, DebitRecord{ Amount: amount, Index: uint32(i), }) } // Finally, we add the transaction label to details. details.Label, err = s.TxLabel(ns, *txHash) if err != nil { return nil, err } return &details, nil } // TxLabel looks up a transaction label for the txHash provided. If the store // has no labels in it, or the specific txHash does not have a label, an empty // string and no error are returned. func (s *Store) TxLabel(ns walletdb.ReadBucket, txHash chainhash.Hash) (string, error) { label, err := FetchTxLabel(ns, txHash) switch err { // If there are no saved labels yet (the bucket has not been created) or // there is not a label for this particular tx, we ignore the error. case ErrNoLabelBucket: fallthrough case ErrTxLabelNotFound: return "", nil // If we found the label, we return it. case nil: return label, nil } // Otherwise, another error occurred while looking uo the label, so we // return it. return "", err } // TxDetails looks up all recorded details regarding a transaction with some // hash. In case of a hash collision, the most recent transaction with a // matching hash is returned. // // Not finding a transaction with this hash is not an error. In this case, // a nil TxDetails is returned. func (s *Store) TxDetails(ns walletdb.ReadBucket, txHash *chainhash.Hash) (*TxDetails, error) { // First, check whether there exists an unmined transaction with this // hash. Use it if found. v := existsRawUnmined(ns, txHash[:]) if v != nil { return s.unminedTxDetails(ns, txHash, v) } // Otherwise, if there exists a mined transaction with this matching // hash, skip over to the newest and begin fetching all details. k, v := latestTxRecord(ns, txHash) if v == nil { // not found return nil, nil } return s.minedTxDetails(ns, txHash, k, v) } // UniqueTxDetails looks up all recorded details for a transaction recorded // mined in some particular block, or an unmined transaction if block is nil. // // Not finding a transaction with this hash from this block is not an error. In // this case, a nil TxDetails is returned. func (s *Store) UniqueTxDetails(ns walletdb.ReadBucket, txHash *chainhash.Hash, block *Block) (*TxDetails, error) { if block == nil { v := existsRawUnmined(ns, txHash[:]) if v == nil { return nil, nil } return s.unminedTxDetails(ns, txHash, v) } k, v := existsTxRecord(ns, txHash, block) if v == nil { return nil, nil } return s.minedTxDetails(ns, txHash, k, v) } // rangeUnminedTransactions executes the function f with TxDetails for every // unmined transaction. f is not executed if no unmined transactions exist. // Error returns from f (if any) are propigated to the caller. Returns true // (signaling breaking out of a RangeTransactions) iff f executes and returns // true. func (s *Store) rangeUnminedTransactions(ns walletdb.ReadBucket, f func([]TxDetails) (bool, error)) (bool, error) { var details []TxDetails err := ns.NestedReadBucket(bucketUnmined).ForEach(func(k, v []byte) error { if len(k) < 32 { str := fmt.Sprintf("%s: short key (expected %d "+ "bytes, read %d)", bucketUnmined, 32, len(k)) return storeError(ErrData, str, nil) } var txHash chainhash.Hash copy(txHash[:], k) detail, err := s.unminedTxDetails(ns, &txHash, v) if err != nil { return err } // Because the key was created while foreach-ing over the // bucket, it should be impossible for unminedTxDetails to ever // successfully return a nil details struct. details = append(details, *detail) return nil }) if err == nil && len(details) > 0 { return f(details) } return false, err } // rangeBlockTransactions executes the function f with TxDetails for every block // between heights begin and end (reverse order when end > begin) until f // returns true, or the transactions from block is processed. Returns true iff // f executes and returns true. func (s *Store) rangeBlockTransactions(ns walletdb.ReadBucket, begin, end int32, f func([]TxDetails) (bool, error)) (bool, error) { // Mempool height is considered a high bound. if begin < 0 { begin = int32(^uint32(0) >> 1) } if end < 0 { end = int32(^uint32(0) >> 1) } var blockIter blockIterator var advance func(*blockIterator) bool if begin < end { // Iterate in forwards order blockIter = makeReadBlockIterator(ns, begin) advance = func(it *blockIterator) bool { if !it.next() { return false } return it.elem.Height <= end } } else { // Iterate in backwards order, from begin -> end. blockIter = makeReadBlockIterator(ns, begin) advance = func(it *blockIterator) bool { if !it.prev() { return false } return end <= it.elem.Height } } var details []TxDetails for advance(&blockIter) { block := &blockIter.elem if cap(details) < len(block.transactions) { details = make([]TxDetails, 0, len(block.transactions)) } else { details = details[:0] } for _, txHash := range block.transactions { k := keyTxRecord(&txHash, &block.Block) v := existsRawTxRecord(ns, k) if v == nil { str := fmt.Sprintf("missing transaction %v for "+ "block %v", txHash, block.Height) return false, storeError(ErrData, str, nil) } detail, err := s.minedTxDetails(ns, &txHash, k, v) if err != nil { return false, err } details = append(details, *detail) } // Every block record must have at least one transaction, so it // is safe to call f. brk, err := f(details) if err != nil || brk { return brk, err } } return false, blockIter.err } // RangeTransactions runs the function f on all transaction details between // blocks on the best chain over the height range [begin,end]. The special // height -1 may be used to also include unmined transactions. If the end // height comes before the begin height, blocks are iterated in reverse order // and unmined transactions (if any) are processed first. // // The function f may return an error which, if non-nil, is propagated to the // caller. Additionally, a boolean return value allows exiting the function // early without reading any additional transactions early when true. // // All calls to f are guaranteed to be passed a slice with more than zero // elements. The slice may be reused for multiple blocks, so it is not safe to // use it after the loop iteration it was acquired. func (s *Store) RangeTransactions(ns walletdb.ReadBucket, begin, end int32, f func([]TxDetails) (bool, error)) error { var addedUnmined bool if begin < 0 { brk, err := s.rangeUnminedTransactions(ns, f) if err != nil || brk { return err } addedUnmined = true } brk, err := s.rangeBlockTransactions(ns, begin, end, f) if err == nil && !brk && !addedUnmined && end < 0 { _, err = s.rangeUnminedTransactions(ns, f) } return err } // PreviousPkScripts returns a slice of previous output scripts for each credit // output this transaction record debits from. func (s *Store) PreviousPkScripts(ns walletdb.ReadBucket, rec *TxRecord, block *Block) ([][]byte, error) { var pkScripts [][]byte if block == nil { for _, input := range rec.MsgTx.TxIn { prevOut := &input.PreviousOutPoint // Input may spend a previous unmined output, a // mined output (which would still be marked // unspent), or neither. v := existsRawUnmined(ns, prevOut.Hash[:]) if v != nil { // Ensure a credit exists for this // unmined transaction before including // the output script. k := canonicalOutPoint(&prevOut.Hash, prevOut.Index) if existsRawUnminedCredit(ns, k) == nil { continue
return nil, err } pkScripts = append(pkScripts, pkScript) continue } _, credKey := existsUnspent(ns, prevOut) if credKey != nil { k := extractRawCreditTxRecordKey(credKey) v = existsRawTxRecord(ns, k) pkScript, err := fetchRawTxRecordPkScript(k, v, prevOut.Index) if err != nil { return nil, err } pkScripts = append(pkScripts, pkScript) continue } } return pkScripts, nil } recKey := keyTxRecord(&rec.Hash, block) it := makeReadDebitIterator(ns, recKey) for it.next() { credKey := extractRawDebitCreditKey(it.cv) index := extractRawCreditIndex(credKey) k := extractRawCreditTxRecordKey(credKey) v := existsRawTxRecord(ns, k) pkScript, err := fetchRawTxRecordPkScript(k, v, index) if err != nil { return nil, err } pkScripts = append(pkScripts, pkScript) } if it.err != nil { return nil, it.err } return pkScripts, nil }
} pkScript, err := fetchRawTxRecordPkScript( prevOut.Hash[:], v, prevOut.Index) if err != nil {
random_line_split
query.go
// Copyright (c) 2015-2017 The btcsuite developers // Copyright (c) 2015-2016 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. package wtxmgr import ( "fmt" "github.com/btcsuite/btcd/btcutil" "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcwallet/walletdb" ) // CreditRecord contains metadata regarding a transaction credit for a known // transaction. Further details may be looked up by indexing a wire.MsgTx.TxOut // with the Index field. type CreditRecord struct { Amount btcutil.Amount Index uint32 Spent bool Change bool } // DebitRecord contains metadata regarding a transaction debit for a known // transaction. Further details may be looked up by indexing a wire.MsgTx.TxIn // with the Index field. type DebitRecord struct { Amount btcutil.Amount Index uint32 } // TxDetails is intended to provide callers with access to rich details // regarding a relevant transaction and which inputs and outputs are credit or // debits. type TxDetails struct { TxRecord Block BlockMeta Credits []CreditRecord Debits []DebitRecord Label string } // minedTxDetails fetches the TxDetails for the mined transaction with hash // txHash and the passed tx record key and value. func (s *Store) minedTxDetails(ns walletdb.ReadBucket, txHash *chainhash.Hash, recKey, recVal []byte) (*TxDetails, error) { var details TxDetails // Parse transaction record k/v, lookup the full block record for the // block time, and read all matching credits, debits. err := readRawTxRecord(txHash, recVal, &details.TxRecord) if err != nil { return nil, err } err = readRawTxRecordBlock(recKey, &details.Block.Block) if err != nil { return nil, err } details.Block.Time, err = fetchBlockTime(ns, details.Block.Height) if err != nil { return nil, err } credIter := makeReadCreditIterator(ns, recKey) for credIter.next() { if int(credIter.elem.Index) >= len(details.MsgTx.TxOut) { str := "saved credit index exceeds number of outputs" return nil, storeError(ErrData, str, nil) } // The credit iterator does not record whether this credit was // spent by an unmined transaction, so check that here. if !credIter.elem.Spent { k := canonicalOutPoint(txHash, credIter.elem.Index) spent := existsRawUnminedInput(ns, k) != nil credIter.elem.Spent = spent } details.Credits = append(details.Credits, credIter.elem) } if credIter.err != nil { return nil, credIter.err } debIter := makeReadDebitIterator(ns, recKey) for debIter.next() { if int(debIter.elem.Index) >= len(details.MsgTx.TxIn) { str := "saved debit index exceeds number of inputs" return nil, storeError(ErrData, str, nil) } details.Debits = append(details.Debits, debIter.elem) } if debIter.err != nil { return nil, debIter.err } // Finally, we add the transaction label to details. details.Label, err = s.TxLabel(ns, *txHash) if err != nil { return nil, err } return &details, nil } // unminedTxDetails fetches the TxDetails for the unmined transaction with the // hash txHash and the passed unmined record value. func (s *Store) unminedTxDetails(ns walletdb.ReadBucket, txHash *chainhash.Hash, v []byte) (*TxDetails, error) { details := TxDetails{ Block: BlockMeta{Block: Block{Height: -1}}, } err := readRawTxRecord(txHash, v, &details.TxRecord) if err != nil { return nil, err } it := makeReadUnminedCreditIterator(ns, txHash) for it.next() { if int(it.elem.Index) >= len(details.MsgTx.TxOut) { str := "saved credit index exceeds number of outputs" return nil, storeError(ErrData, str, nil) } // Set the Spent field since this is not done by the iterator. it.elem.Spent = existsRawUnminedInput(ns, it.ck) != nil details.Credits = append(details.Credits, it.elem) } if it.err != nil { return nil, it.err } // Debit records are not saved for unmined transactions. Instead, they // must be looked up for each transaction input manually. There are two // kinds of previous credits that may be debited by an unmined // transaction: mined unspent outputs (which remain marked unspent even // when spent by an unmined transaction), and credits from other unmined // transactions. Both situations must be considered. for i, output := range details.MsgTx.TxIn { opKey := canonicalOutPoint(&output.PreviousOutPoint.Hash, output.PreviousOutPoint.Index) credKey := existsRawUnspent(ns, opKey) if credKey != nil { v := existsRawCredit(ns, credKey) amount, err := fetchRawCreditAmount(v) if err != nil { return nil, err } details.Debits = append(details.Debits, DebitRecord{ Amount: amount, Index: uint32(i), }) continue } v := existsRawUnminedCredit(ns, opKey) if v == nil { continue } amount, err := fetchRawCreditAmount(v) if err != nil { return nil, err } details.Debits = append(details.Debits, DebitRecord{ Amount: amount, Index: uint32(i), }) } // Finally, we add the transaction label to details. details.Label, err = s.TxLabel(ns, *txHash) if err != nil { return nil, err } return &details, nil } // TxLabel looks up a transaction label for the txHash provided. If the store // has no labels in it, or the specific txHash does not have a label, an empty // string and no error are returned. func (s *Store) TxLabel(ns walletdb.ReadBucket, txHash chainhash.Hash) (string, error) { label, err := FetchTxLabel(ns, txHash) switch err { // If there are no saved labels yet (the bucket has not been created) or // there is not a label for this particular tx, we ignore the error. case ErrNoLabelBucket: fallthrough case ErrTxLabelNotFound: return "", nil // If we found the label, we return it. case nil: return label, nil } // Otherwise, another error occurred while looking uo the label, so we // return it. return "", err } // TxDetails looks up all recorded details regarding a transaction with some // hash. In case of a hash collision, the most recent transaction with a // matching hash is returned. // // Not finding a transaction with this hash is not an error. In this case, // a nil TxDetails is returned. func (s *Store) TxDetails(ns walletdb.ReadBucket, txHash *chainhash.Hash) (*TxDetails, error) { // First, check whether there exists an unmined transaction with this // hash. Use it if found. v := existsRawUnmined(ns, txHash[:]) if v != nil { return s.unminedTxDetails(ns, txHash, v) } // Otherwise, if there exists a mined transaction with this matching // hash, skip over to the newest and begin fetching all details. k, v := latestTxRecord(ns, txHash) if v == nil { // not found return nil, nil } return s.minedTxDetails(ns, txHash, k, v) } // UniqueTxDetails looks up all recorded details for a transaction recorded // mined in some particular block, or an unmined transaction if block is nil. // // Not finding a transaction with this hash from this block is not an error. In // this case, a nil TxDetails is returned. func (s *Store) UniqueTxDetails(ns walletdb.ReadBucket, txHash *chainhash.Hash, block *Block) (*TxDetails, error) { if block == nil { v := existsRawUnmined(ns, txHash[:]) if v == nil { return nil, nil } return s.unminedTxDetails(ns, txHash, v) } k, v := existsTxRecord(ns, txHash, block) if v == nil { return nil, nil } return s.minedTxDetails(ns, txHash, k, v) } // rangeUnminedTransactions executes the function f with TxDetails for every // unmined transaction. f is not executed if no unmined transactions exist. // Error returns from f (if any) are propigated to the caller. Returns true // (signaling breaking out of a RangeTransactions) iff f executes and returns // true. func (s *Store) rangeUnminedTransactions(ns walletdb.ReadBucket, f func([]TxDetails) (bool, error)) (bool, error) { var details []TxDetails err := ns.NestedReadBucket(bucketUnmined).ForEach(func(k, v []byte) error { if len(k) < 32 { str := fmt.Sprintf("%s: short key (expected %d "+ "bytes, read %d)", bucketUnmined, 32, len(k)) return storeError(ErrData, str, nil) } var txHash chainhash.Hash copy(txHash[:], k) detail, err := s.unminedTxDetails(ns, &txHash, v) if err != nil { return err } // Because the key was created while foreach-ing over the // bucket, it should be impossible for unminedTxDetails to ever // successfully return a nil details struct. details = append(details, *detail) return nil }) if err == nil && len(details) > 0 { return f(details) } return false, err } // rangeBlockTransactions executes the function f with TxDetails for every block // between heights begin and end (reverse order when end > begin) until f // returns true, or the transactions from block is processed. Returns true iff // f executes and returns true. func (s *Store) rangeBlockTransactions(ns walletdb.ReadBucket, begin, end int32, f func([]TxDetails) (bool, error)) (bool, error) { // Mempool height is considered a high bound. if begin < 0 { begin = int32(^uint32(0) >> 1) } if end < 0
var blockIter blockIterator var advance func(*blockIterator) bool if begin < end { // Iterate in forwards order blockIter = makeReadBlockIterator(ns, begin) advance = func(it *blockIterator) bool { if !it.next() { return false } return it.elem.Height <= end } } else { // Iterate in backwards order, from begin -> end. blockIter = makeReadBlockIterator(ns, begin) advance = func(it *blockIterator) bool { if !it.prev() { return false } return end <= it.elem.Height } } var details []TxDetails for advance(&blockIter) { block := &blockIter.elem if cap(details) < len(block.transactions) { details = make([]TxDetails, 0, len(block.transactions)) } else { details = details[:0] } for _, txHash := range block.transactions { k := keyTxRecord(&txHash, &block.Block) v := existsRawTxRecord(ns, k) if v == nil { str := fmt.Sprintf("missing transaction %v for "+ "block %v", txHash, block.Height) return false, storeError(ErrData, str, nil) } detail, err := s.minedTxDetails(ns, &txHash, k, v) if err != nil { return false, err } details = append(details, *detail) } // Every block record must have at least one transaction, so it // is safe to call f. brk, err := f(details) if err != nil || brk { return brk, err } } return false, blockIter.err } // RangeTransactions runs the function f on all transaction details between // blocks on the best chain over the height range [begin,end]. The special // height -1 may be used to also include unmined transactions. If the end // height comes before the begin height, blocks are iterated in reverse order // and unmined transactions (if any) are processed first. // // The function f may return an error which, if non-nil, is propagated to the // caller. Additionally, a boolean return value allows exiting the function // early without reading any additional transactions early when true. // // All calls to f are guaranteed to be passed a slice with more than zero // elements. The slice may be reused for multiple blocks, so it is not safe to // use it after the loop iteration it was acquired. func (s *Store) RangeTransactions(ns walletdb.ReadBucket, begin, end int32, f func([]TxDetails) (bool, error)) error { var addedUnmined bool if begin < 0 { brk, err := s.rangeUnminedTransactions(ns, f) if err != nil || brk { return err } addedUnmined = true } brk, err := s.rangeBlockTransactions(ns, begin, end, f) if err == nil && !brk && !addedUnmined && end < 0 { _, err = s.rangeUnminedTransactions(ns, f) } return err } // PreviousPkScripts returns a slice of previous output scripts for each credit // output this transaction record debits from. func (s *Store) PreviousPkScripts(ns walletdb.ReadBucket, rec *TxRecord, block *Block) ([][]byte, error) { var pkScripts [][]byte if block == nil { for _, input := range rec.MsgTx.TxIn { prevOut := &input.PreviousOutPoint // Input may spend a previous unmined output, a // mined output (which would still be marked // unspent), or neither. v := existsRawUnmined(ns, prevOut.Hash[:]) if v != nil { // Ensure a credit exists for this // unmined transaction before including // the output script. k := canonicalOutPoint(&prevOut.Hash, prevOut.Index) if existsRawUnminedCredit(ns, k) == nil { continue } pkScript, err := fetchRawTxRecordPkScript( prevOut.Hash[:], v, prevOut.Index) if err != nil { return nil, err } pkScripts = append(pkScripts, pkScript) continue } _, credKey := existsUnspent(ns, prevOut) if credKey != nil { k := extractRawCreditTxRecordKey(credKey) v = existsRawTxRecord(ns, k) pkScript, err := fetchRawTxRecordPkScript(k, v, prevOut.Index) if err != nil { return nil, err } pkScripts = append(pkScripts, pkScript) continue } } return pkScripts, nil } recKey := keyTxRecord(&rec.Hash, block) it := makeReadDebitIterator(ns, recKey) for it.next() { credKey := extractRawDebitCreditKey(it.cv) index := extractRawCreditIndex(credKey) k := extractRawCreditTxRecordKey(credKey) v := existsRawTxRecord(ns, k) pkScript, err := fetchRawTxRecordPkScript(k, v, index) if err != nil { return nil, err } pkScripts = append(pkScripts, pkScript) } if it.err != nil { return nil, it.err } return pkScripts, nil }
{ end = int32(^uint32(0) >> 1) }
conditional_block
query.go
// Copyright (c) 2015-2017 The btcsuite developers // Copyright (c) 2015-2016 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. package wtxmgr import ( "fmt" "github.com/btcsuite/btcd/btcutil" "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcwallet/walletdb" ) // CreditRecord contains metadata regarding a transaction credit for a known // transaction. Further details may be looked up by indexing a wire.MsgTx.TxOut // with the Index field. type CreditRecord struct { Amount btcutil.Amount Index uint32 Spent bool Change bool } // DebitRecord contains metadata regarding a transaction debit for a known // transaction. Further details may be looked up by indexing a wire.MsgTx.TxIn // with the Index field. type DebitRecord struct { Amount btcutil.Amount Index uint32 } // TxDetails is intended to provide callers with access to rich details // regarding a relevant transaction and which inputs and outputs are credit or // debits. type TxDetails struct { TxRecord Block BlockMeta Credits []CreditRecord Debits []DebitRecord Label string } // minedTxDetails fetches the TxDetails for the mined transaction with hash // txHash and the passed tx record key and value. func (s *Store) minedTxDetails(ns walletdb.ReadBucket, txHash *chainhash.Hash, recKey, recVal []byte) (*TxDetails, error) { var details TxDetails // Parse transaction record k/v, lookup the full block record for the // block time, and read all matching credits, debits. err := readRawTxRecord(txHash, recVal, &details.TxRecord) if err != nil { return nil, err } err = readRawTxRecordBlock(recKey, &details.Block.Block) if err != nil { return nil, err } details.Block.Time, err = fetchBlockTime(ns, details.Block.Height) if err != nil { return nil, err } credIter := makeReadCreditIterator(ns, recKey) for credIter.next() { if int(credIter.elem.Index) >= len(details.MsgTx.TxOut) { str := "saved credit index exceeds number of outputs" return nil, storeError(ErrData, str, nil) } // The credit iterator does not record whether this credit was // spent by an unmined transaction, so check that here. if !credIter.elem.Spent { k := canonicalOutPoint(txHash, credIter.elem.Index) spent := existsRawUnminedInput(ns, k) != nil credIter.elem.Spent = spent } details.Credits = append(details.Credits, credIter.elem) } if credIter.err != nil { return nil, credIter.err } debIter := makeReadDebitIterator(ns, recKey) for debIter.next() { if int(debIter.elem.Index) >= len(details.MsgTx.TxIn) { str := "saved debit index exceeds number of inputs" return nil, storeError(ErrData, str, nil) } details.Debits = append(details.Debits, debIter.elem) } if debIter.err != nil { return nil, debIter.err } // Finally, we add the transaction label to details. details.Label, err = s.TxLabel(ns, *txHash) if err != nil { return nil, err } return &details, nil } // unminedTxDetails fetches the TxDetails for the unmined transaction with the // hash txHash and the passed unmined record value. func (s *Store) unminedTxDetails(ns walletdb.ReadBucket, txHash *chainhash.Hash, v []byte) (*TxDetails, error) { details := TxDetails{ Block: BlockMeta{Block: Block{Height: -1}}, } err := readRawTxRecord(txHash, v, &details.TxRecord) if err != nil { return nil, err } it := makeReadUnminedCreditIterator(ns, txHash) for it.next() { if int(it.elem.Index) >= len(details.MsgTx.TxOut) { str := "saved credit index exceeds number of outputs" return nil, storeError(ErrData, str, nil) } // Set the Spent field since this is not done by the iterator. it.elem.Spent = existsRawUnminedInput(ns, it.ck) != nil details.Credits = append(details.Credits, it.elem) } if it.err != nil { return nil, it.err } // Debit records are not saved for unmined transactions. Instead, they // must be looked up for each transaction input manually. There are two // kinds of previous credits that may be debited by an unmined // transaction: mined unspent outputs (which remain marked unspent even // when spent by an unmined transaction), and credits from other unmined // transactions. Both situations must be considered. for i, output := range details.MsgTx.TxIn { opKey := canonicalOutPoint(&output.PreviousOutPoint.Hash, output.PreviousOutPoint.Index) credKey := existsRawUnspent(ns, opKey) if credKey != nil { v := existsRawCredit(ns, credKey) amount, err := fetchRawCreditAmount(v) if err != nil { return nil, err } details.Debits = append(details.Debits, DebitRecord{ Amount: amount, Index: uint32(i), }) continue } v := existsRawUnminedCredit(ns, opKey) if v == nil { continue } amount, err := fetchRawCreditAmount(v) if err != nil { return nil, err } details.Debits = append(details.Debits, DebitRecord{ Amount: amount, Index: uint32(i), }) } // Finally, we add the transaction label to details. details.Label, err = s.TxLabel(ns, *txHash) if err != nil { return nil, err } return &details, nil } // TxLabel looks up a transaction label for the txHash provided. If the store // has no labels in it, or the specific txHash does not have a label, an empty // string and no error are returned. func (s *Store) TxLabel(ns walletdb.ReadBucket, txHash chainhash.Hash) (string, error) { label, err := FetchTxLabel(ns, txHash) switch err { // If there are no saved labels yet (the bucket has not been created) or // there is not a label for this particular tx, we ignore the error. case ErrNoLabelBucket: fallthrough case ErrTxLabelNotFound: return "", nil // If we found the label, we return it. case nil: return label, nil } // Otherwise, another error occurred while looking uo the label, so we // return it. return "", err } // TxDetails looks up all recorded details regarding a transaction with some // hash. In case of a hash collision, the most recent transaction with a // matching hash is returned. // // Not finding a transaction with this hash is not an error. In this case, // a nil TxDetails is returned. func (s *Store) TxDetails(ns walletdb.ReadBucket, txHash *chainhash.Hash) (*TxDetails, error)
// UniqueTxDetails looks up all recorded details for a transaction recorded // mined in some particular block, or an unmined transaction if block is nil. // // Not finding a transaction with this hash from this block is not an error. In // this case, a nil TxDetails is returned. func (s *Store) UniqueTxDetails(ns walletdb.ReadBucket, txHash *chainhash.Hash, block *Block) (*TxDetails, error) { if block == nil { v := existsRawUnmined(ns, txHash[:]) if v == nil { return nil, nil } return s.unminedTxDetails(ns, txHash, v) } k, v := existsTxRecord(ns, txHash, block) if v == nil { return nil, nil } return s.minedTxDetails(ns, txHash, k, v) } // rangeUnminedTransactions executes the function f with TxDetails for every // unmined transaction. f is not executed if no unmined transactions exist. // Error returns from f (if any) are propigated to the caller. Returns true // (signaling breaking out of a RangeTransactions) iff f executes and returns // true. func (s *Store) rangeUnminedTransactions(ns walletdb.ReadBucket, f func([]TxDetails) (bool, error)) (bool, error) { var details []TxDetails err := ns.NestedReadBucket(bucketUnmined).ForEach(func(k, v []byte) error { if len(k) < 32 { str := fmt.Sprintf("%s: short key (expected %d "+ "bytes, read %d)", bucketUnmined, 32, len(k)) return storeError(ErrData, str, nil) } var txHash chainhash.Hash copy(txHash[:], k) detail, err := s.unminedTxDetails(ns, &txHash, v) if err != nil { return err } // Because the key was created while foreach-ing over the // bucket, it should be impossible for unminedTxDetails to ever // successfully return a nil details struct. details = append(details, *detail) return nil }) if err == nil && len(details) > 0 { return f(details) } return false, err } // rangeBlockTransactions executes the function f with TxDetails for every block // between heights begin and end (reverse order when end > begin) until f // returns true, or the transactions from block is processed. Returns true iff // f executes and returns true. func (s *Store) rangeBlockTransactions(ns walletdb.ReadBucket, begin, end int32, f func([]TxDetails) (bool, error)) (bool, error) { // Mempool height is considered a high bound. if begin < 0 { begin = int32(^uint32(0) >> 1) } if end < 0 { end = int32(^uint32(0) >> 1) } var blockIter blockIterator var advance func(*blockIterator) bool if begin < end { // Iterate in forwards order blockIter = makeReadBlockIterator(ns, begin) advance = func(it *blockIterator) bool { if !it.next() { return false } return it.elem.Height <= end } } else { // Iterate in backwards order, from begin -> end. blockIter = makeReadBlockIterator(ns, begin) advance = func(it *blockIterator) bool { if !it.prev() { return false } return end <= it.elem.Height } } var details []TxDetails for advance(&blockIter) { block := &blockIter.elem if cap(details) < len(block.transactions) { details = make([]TxDetails, 0, len(block.transactions)) } else { details = details[:0] } for _, txHash := range block.transactions { k := keyTxRecord(&txHash, &block.Block) v := existsRawTxRecord(ns, k) if v == nil { str := fmt.Sprintf("missing transaction %v for "+ "block %v", txHash, block.Height) return false, storeError(ErrData, str, nil) } detail, err := s.minedTxDetails(ns, &txHash, k, v) if err != nil { return false, err } details = append(details, *detail) } // Every block record must have at least one transaction, so it // is safe to call f. brk, err := f(details) if err != nil || brk { return brk, err } } return false, blockIter.err } // RangeTransactions runs the function f on all transaction details between // blocks on the best chain over the height range [begin,end]. The special // height -1 may be used to also include unmined transactions. If the end // height comes before the begin height, blocks are iterated in reverse order // and unmined transactions (if any) are processed first. // // The function f may return an error which, if non-nil, is propagated to the // caller. Additionally, a boolean return value allows exiting the function // early without reading any additional transactions early when true. // // All calls to f are guaranteed to be passed a slice with more than zero // elements. The slice may be reused for multiple blocks, so it is not safe to // use it after the loop iteration it was acquired. func (s *Store) RangeTransactions(ns walletdb.ReadBucket, begin, end int32, f func([]TxDetails) (bool, error)) error { var addedUnmined bool if begin < 0 { brk, err := s.rangeUnminedTransactions(ns, f) if err != nil || brk { return err } addedUnmined = true } brk, err := s.rangeBlockTransactions(ns, begin, end, f) if err == nil && !brk && !addedUnmined && end < 0 { _, err = s.rangeUnminedTransactions(ns, f) } return err } // PreviousPkScripts returns a slice of previous output scripts for each credit // output this transaction record debits from. func (s *Store) PreviousPkScripts(ns walletdb.ReadBucket, rec *TxRecord, block *Block) ([][]byte, error) { var pkScripts [][]byte if block == nil { for _, input := range rec.MsgTx.TxIn { prevOut := &input.PreviousOutPoint // Input may spend a previous unmined output, a // mined output (which would still be marked // unspent), or neither. v := existsRawUnmined(ns, prevOut.Hash[:]) if v != nil { // Ensure a credit exists for this // unmined transaction before including // the output script. k := canonicalOutPoint(&prevOut.Hash, prevOut.Index) if existsRawUnminedCredit(ns, k) == nil { continue } pkScript, err := fetchRawTxRecordPkScript( prevOut.Hash[:], v, prevOut.Index) if err != nil { return nil, err } pkScripts = append(pkScripts, pkScript) continue } _, credKey := existsUnspent(ns, prevOut) if credKey != nil { k := extractRawCreditTxRecordKey(credKey) v = existsRawTxRecord(ns, k) pkScript, err := fetchRawTxRecordPkScript(k, v, prevOut.Index) if err != nil { return nil, err } pkScripts = append(pkScripts, pkScript) continue } } return pkScripts, nil } recKey := keyTxRecord(&rec.Hash, block) it := makeReadDebitIterator(ns, recKey) for it.next() { credKey := extractRawDebitCreditKey(it.cv) index := extractRawCreditIndex(credKey) k := extractRawCreditTxRecordKey(credKey) v := existsRawTxRecord(ns, k) pkScript, err := fetchRawTxRecordPkScript(k, v, index) if err != nil { return nil, err } pkScripts = append(pkScripts, pkScript) } if it.err != nil { return nil, it.err } return pkScripts, nil }
{ // First, check whether there exists an unmined transaction with this // hash. Use it if found. v := existsRawUnmined(ns, txHash[:]) if v != nil { return s.unminedTxDetails(ns, txHash, v) } // Otherwise, if there exists a mined transaction with this matching // hash, skip over to the newest and begin fetching all details. k, v := latestTxRecord(ns, txHash) if v == nil { // not found return nil, nil } return s.minedTxDetails(ns, txHash, k, v) }
identifier_body
query.go
// Copyright (c) 2015-2017 The btcsuite developers // Copyright (c) 2015-2016 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. package wtxmgr import ( "fmt" "github.com/btcsuite/btcd/btcutil" "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcwallet/walletdb" ) // CreditRecord contains metadata regarding a transaction credit for a known // transaction. Further details may be looked up by indexing a wire.MsgTx.TxOut // with the Index field. type CreditRecord struct { Amount btcutil.Amount Index uint32 Spent bool Change bool } // DebitRecord contains metadata regarding a transaction debit for a known // transaction. Further details may be looked up by indexing a wire.MsgTx.TxIn // with the Index field. type DebitRecord struct { Amount btcutil.Amount Index uint32 } // TxDetails is intended to provide callers with access to rich details // regarding a relevant transaction and which inputs and outputs are credit or // debits. type TxDetails struct { TxRecord Block BlockMeta Credits []CreditRecord Debits []DebitRecord Label string } // minedTxDetails fetches the TxDetails for the mined transaction with hash // txHash and the passed tx record key and value. func (s *Store) minedTxDetails(ns walletdb.ReadBucket, txHash *chainhash.Hash, recKey, recVal []byte) (*TxDetails, error) { var details TxDetails // Parse transaction record k/v, lookup the full block record for the // block time, and read all matching credits, debits. err := readRawTxRecord(txHash, recVal, &details.TxRecord) if err != nil { return nil, err } err = readRawTxRecordBlock(recKey, &details.Block.Block) if err != nil { return nil, err } details.Block.Time, err = fetchBlockTime(ns, details.Block.Height) if err != nil { return nil, err } credIter := makeReadCreditIterator(ns, recKey) for credIter.next() { if int(credIter.elem.Index) >= len(details.MsgTx.TxOut) { str := "saved credit index exceeds number of outputs" return nil, storeError(ErrData, str, nil) } // The credit iterator does not record whether this credit was // spent by an unmined transaction, so check that here. if !credIter.elem.Spent { k := canonicalOutPoint(txHash, credIter.elem.Index) spent := existsRawUnminedInput(ns, k) != nil credIter.elem.Spent = spent } details.Credits = append(details.Credits, credIter.elem) } if credIter.err != nil { return nil, credIter.err } debIter := makeReadDebitIterator(ns, recKey) for debIter.next() { if int(debIter.elem.Index) >= len(details.MsgTx.TxIn) { str := "saved debit index exceeds number of inputs" return nil, storeError(ErrData, str, nil) } details.Debits = append(details.Debits, debIter.elem) } if debIter.err != nil { return nil, debIter.err } // Finally, we add the transaction label to details. details.Label, err = s.TxLabel(ns, *txHash) if err != nil { return nil, err } return &details, nil } // unminedTxDetails fetches the TxDetails for the unmined transaction with the // hash txHash and the passed unmined record value. func (s *Store) unminedTxDetails(ns walletdb.ReadBucket, txHash *chainhash.Hash, v []byte) (*TxDetails, error) { details := TxDetails{ Block: BlockMeta{Block: Block{Height: -1}}, } err := readRawTxRecord(txHash, v, &details.TxRecord) if err != nil { return nil, err } it := makeReadUnminedCreditIterator(ns, txHash) for it.next() { if int(it.elem.Index) >= len(details.MsgTx.TxOut) { str := "saved credit index exceeds number of outputs" return nil, storeError(ErrData, str, nil) } // Set the Spent field since this is not done by the iterator. it.elem.Spent = existsRawUnminedInput(ns, it.ck) != nil details.Credits = append(details.Credits, it.elem) } if it.err != nil { return nil, it.err } // Debit records are not saved for unmined transactions. Instead, they // must be looked up for each transaction input manually. There are two // kinds of previous credits that may be debited by an unmined // transaction: mined unspent outputs (which remain marked unspent even // when spent by an unmined transaction), and credits from other unmined // transactions. Both situations must be considered. for i, output := range details.MsgTx.TxIn { opKey := canonicalOutPoint(&output.PreviousOutPoint.Hash, output.PreviousOutPoint.Index) credKey := existsRawUnspent(ns, opKey) if credKey != nil { v := existsRawCredit(ns, credKey) amount, err := fetchRawCreditAmount(v) if err != nil { return nil, err } details.Debits = append(details.Debits, DebitRecord{ Amount: amount, Index: uint32(i), }) continue } v := existsRawUnminedCredit(ns, opKey) if v == nil { continue } amount, err := fetchRawCreditAmount(v) if err != nil { return nil, err } details.Debits = append(details.Debits, DebitRecord{ Amount: amount, Index: uint32(i), }) } // Finally, we add the transaction label to details. details.Label, err = s.TxLabel(ns, *txHash) if err != nil { return nil, err } return &details, nil } // TxLabel looks up a transaction label for the txHash provided. If the store // has no labels in it, or the specific txHash does not have a label, an empty // string and no error are returned. func (s *Store) TxLabel(ns walletdb.ReadBucket, txHash chainhash.Hash) (string, error) { label, err := FetchTxLabel(ns, txHash) switch err { // If there are no saved labels yet (the bucket has not been created) or // there is not a label for this particular tx, we ignore the error. case ErrNoLabelBucket: fallthrough case ErrTxLabelNotFound: return "", nil // If we found the label, we return it. case nil: return label, nil } // Otherwise, another error occurred while looking uo the label, so we // return it. return "", err } // TxDetails looks up all recorded details regarding a transaction with some // hash. In case of a hash collision, the most recent transaction with a // matching hash is returned. // // Not finding a transaction with this hash is not an error. In this case, // a nil TxDetails is returned. func (s *Store) TxDetails(ns walletdb.ReadBucket, txHash *chainhash.Hash) (*TxDetails, error) { // First, check whether there exists an unmined transaction with this // hash. Use it if found. v := existsRawUnmined(ns, txHash[:]) if v != nil { return s.unminedTxDetails(ns, txHash, v) } // Otherwise, if there exists a mined transaction with this matching // hash, skip over to the newest and begin fetching all details. k, v := latestTxRecord(ns, txHash) if v == nil { // not found return nil, nil } return s.minedTxDetails(ns, txHash, k, v) } // UniqueTxDetails looks up all recorded details for a transaction recorded // mined in some particular block, or an unmined transaction if block is nil. // // Not finding a transaction with this hash from this block is not an error. In // this case, a nil TxDetails is returned. func (s *Store) UniqueTxDetails(ns walletdb.ReadBucket, txHash *chainhash.Hash, block *Block) (*TxDetails, error) { if block == nil { v := existsRawUnmined(ns, txHash[:]) if v == nil { return nil, nil } return s.unminedTxDetails(ns, txHash, v) } k, v := existsTxRecord(ns, txHash, block) if v == nil { return nil, nil } return s.minedTxDetails(ns, txHash, k, v) } // rangeUnminedTransactions executes the function f with TxDetails for every // unmined transaction. f is not executed if no unmined transactions exist. // Error returns from f (if any) are propigated to the caller. Returns true // (signaling breaking out of a RangeTransactions) iff f executes and returns // true. func (s *Store) rangeUnminedTransactions(ns walletdb.ReadBucket, f func([]TxDetails) (bool, error)) (bool, error) { var details []TxDetails err := ns.NestedReadBucket(bucketUnmined).ForEach(func(k, v []byte) error { if len(k) < 32 { str := fmt.Sprintf("%s: short key (expected %d "+ "bytes, read %d)", bucketUnmined, 32, len(k)) return storeError(ErrData, str, nil) } var txHash chainhash.Hash copy(txHash[:], k) detail, err := s.unminedTxDetails(ns, &txHash, v) if err != nil { return err } // Because the key was created while foreach-ing over the // bucket, it should be impossible for unminedTxDetails to ever // successfully return a nil details struct. details = append(details, *detail) return nil }) if err == nil && len(details) > 0 { return f(details) } return false, err } // rangeBlockTransactions executes the function f with TxDetails for every block // between heights begin and end (reverse order when end > begin) until f // returns true, or the transactions from block is processed. Returns true iff // f executes and returns true. func (s *Store) rangeBlockTransactions(ns walletdb.ReadBucket, begin, end int32, f func([]TxDetails) (bool, error)) (bool, error) { // Mempool height is considered a high bound. if begin < 0 { begin = int32(^uint32(0) >> 1) } if end < 0 { end = int32(^uint32(0) >> 1) } var blockIter blockIterator var advance func(*blockIterator) bool if begin < end { // Iterate in forwards order blockIter = makeReadBlockIterator(ns, begin) advance = func(it *blockIterator) bool { if !it.next() { return false } return it.elem.Height <= end } } else { // Iterate in backwards order, from begin -> end. blockIter = makeReadBlockIterator(ns, begin) advance = func(it *blockIterator) bool { if !it.prev() { return false } return end <= it.elem.Height } } var details []TxDetails for advance(&blockIter) { block := &blockIter.elem if cap(details) < len(block.transactions) { details = make([]TxDetails, 0, len(block.transactions)) } else { details = details[:0] } for _, txHash := range block.transactions { k := keyTxRecord(&txHash, &block.Block) v := existsRawTxRecord(ns, k) if v == nil { str := fmt.Sprintf("missing transaction %v for "+ "block %v", txHash, block.Height) return false, storeError(ErrData, str, nil) } detail, err := s.minedTxDetails(ns, &txHash, k, v) if err != nil { return false, err } details = append(details, *detail) } // Every block record must have at least one transaction, so it // is safe to call f. brk, err := f(details) if err != nil || brk { return brk, err } } return false, blockIter.err } // RangeTransactions runs the function f on all transaction details between // blocks on the best chain over the height range [begin,end]. The special // height -1 may be used to also include unmined transactions. If the end // height comes before the begin height, blocks are iterated in reverse order // and unmined transactions (if any) are processed first. // // The function f may return an error which, if non-nil, is propagated to the // caller. Additionally, a boolean return value allows exiting the function // early without reading any additional transactions early when true. // // All calls to f are guaranteed to be passed a slice with more than zero // elements. The slice may be reused for multiple blocks, so it is not safe to // use it after the loop iteration it was acquired. func (s *Store)
(ns walletdb.ReadBucket, begin, end int32, f func([]TxDetails) (bool, error)) error { var addedUnmined bool if begin < 0 { brk, err := s.rangeUnminedTransactions(ns, f) if err != nil || brk { return err } addedUnmined = true } brk, err := s.rangeBlockTransactions(ns, begin, end, f) if err == nil && !brk && !addedUnmined && end < 0 { _, err = s.rangeUnminedTransactions(ns, f) } return err } // PreviousPkScripts returns a slice of previous output scripts for each credit // output this transaction record debits from. func (s *Store) PreviousPkScripts(ns walletdb.ReadBucket, rec *TxRecord, block *Block) ([][]byte, error) { var pkScripts [][]byte if block == nil { for _, input := range rec.MsgTx.TxIn { prevOut := &input.PreviousOutPoint // Input may spend a previous unmined output, a // mined output (which would still be marked // unspent), or neither. v := existsRawUnmined(ns, prevOut.Hash[:]) if v != nil { // Ensure a credit exists for this // unmined transaction before including // the output script. k := canonicalOutPoint(&prevOut.Hash, prevOut.Index) if existsRawUnminedCredit(ns, k) == nil { continue } pkScript, err := fetchRawTxRecordPkScript( prevOut.Hash[:], v, prevOut.Index) if err != nil { return nil, err } pkScripts = append(pkScripts, pkScript) continue } _, credKey := existsUnspent(ns, prevOut) if credKey != nil { k := extractRawCreditTxRecordKey(credKey) v = existsRawTxRecord(ns, k) pkScript, err := fetchRawTxRecordPkScript(k, v, prevOut.Index) if err != nil { return nil, err } pkScripts = append(pkScripts, pkScript) continue } } return pkScripts, nil } recKey := keyTxRecord(&rec.Hash, block) it := makeReadDebitIterator(ns, recKey) for it.next() { credKey := extractRawDebitCreditKey(it.cv) index := extractRawCreditIndex(credKey) k := extractRawCreditTxRecordKey(credKey) v := existsRawTxRecord(ns, k) pkScript, err := fetchRawTxRecordPkScript(k, v, index) if err != nil { return nil, err } pkScripts = append(pkScripts, pkScript) } if it.err != nil { return nil, it.err } return pkScripts, nil }
RangeTransactions
identifier_name
settings.py
# Django settings for isbio project. # from configurations import Settings import logging import os import socket import time from datetime import datetime from utilz import git, TermColoring, recur, recur_rec, get_key, import_env, file_content, is_host_online, test_url, \ magic_const, get_md5 ENABLE_DATADOG = False ENABLE_ROLLBAR = False statsd = False try: from datadog import statsd if ENABLE_DATADOG: ENABLE_DATADOG = True except Exception: ENABLE_DATADOG = False ENABLE_REMOTE_FW = False # TODO : redesign PID = os.getpid() MAINTENANCE = False USUAL_DATE_FORMAT = "%Y-%m-%d %H:%M:%S" USUAL_LOG_FORMAT = \ '%(asctime)s,%(msecs)03d P%(process)05d %(levelname)-8s %(lineno)04d:%(module)-20s %(funcName)-25s %(message)s' USUAL_LOG_LEN_BEFORE_MESSAGE = 93 USUAL_LOG_FORMAT_DESCRIPTOR =\ 'DATE TIME,milisec PID LEVEL LINE:MODULE FUNCTION MESSAGE' DB_DATE_FORMAT = "%Y-%m-%d %H:%M:%S" LOG_FOLDER = '/var/log/breeze/' # log_fname = 'breeze_%s.log' % datetime.now().strftime("%Y-%m-%d_%H-%M-%S%z") log_fname = 'rotating.log' log_hit_fname = 'access.log' LOG_PATH = '%s%s' % (LOG_FOLDER, log_fname) LOG_HIT_PATH = '%s%s' % (LOG_FOLDER, log_hit_fname) # DEBUG = False TEMPLATE_DEBUG = False ADMINS = ( ('Clement FIERE', 'clement.fiere@helsinki.fi'), ) MANAGERS = ADMINS MYSQL_SECRET_FILE = 'mysql_root' # Local time zone for this installation. Choices can be found here: # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name # although not all choices may be available on all operating systems. # In a Windows environment this must be set to your system time zone. TIME_ZONE = 'Europe/Helsinki' DATABASES = { 'default': { 'ENGINE' : 'django.db.backends.mysql', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'. 'NAME' : 'breezedb', # Or path to database file if using sqlite3. 'USER' : 'root', # Not used with sqlite3. 'PASSWORD' : get_key(MYSQL_SECRET_FILE), # Not used with sqlite3. 'HOST' : 'breeze-sql', # Set to empty string for localhost. Not used with sqlite3. 'PORT' : '3306', # Set to empty string for default. Not used with sqlite3. 'OPTIONS' : { "init_command": "SET default_storage_engine=INNODB; SET SESSION TRANSACTION ISOLATION LEVEL READ " "COMMITTED", } # "init_command": "SET transaction isolation level READ COMMITTED", } } } ROOT_URLCONF = 'isbio.urls' TEMPLATES = [ { 'BACKEND' : 'django.template.backends.django.DjangoTemplates', 'DIRS' : [], 'APP_DIRS': True, 'OPTIONS' : { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', 'django.template.context_processors.media', 'django.template.context_processors.static', 'breeze.context.user_context', 'breeze.context.date_context', 'breeze.context.run_mode_context', # 'django_auth0.context_processors.auth0', # moved to config/auth0.py "breeze.context.site", "breeze.context.__context_var_list", ], }, }, ] # Language code for this installation. All choices can be found here: # http://www.i18nguy.com/unicode/language-identifiers.html LANGUAGE_CODE = 'en-us' SITE_ID = 1 # If you set this to False, Django will make some optimizations so as not # to load the internationalization machinery. USE_I18N = True # If you set this to False, Django will not format dates, numbers and # calendars according to the current locale. USE_L10N = True # If you set this to False, Django will not use timezone-aware datetimes. USE_TZ = True # URL that handles the media served from MEDIA_ROOT. Make sure to use a # trailing slash. # Examples: "http://media.lawrence.com/media/", "http://example.com/media/" MEDIA_URL = '/media/' # Absolute path to the directory static files should be collected to. # Don't put anything in this directory yourself; store your static files # in apps' "static/" subdirectories and in STATICFILES_DIRS. # Example: "/home/media/media.lawrence.com/static/" # STATIC_ROOT = '' # ** moved lower in this file # URL prefix for static files. # Example: "http://media.lawrence.com/static/" STATIC_URL = '/static/' # Additional locations of static files # ** moved to configs/env/* # STATICFILES_DIRS = ( # "/root/static_source", # ) # List of finder classes that know how to find static files in # various locations. STATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', 'django.contrib.staticfiles.finders.DefaultStorageFinder', ) # Make this unique, and don't share it with anybody. SECRET_KEY_FN = 'django' SECRET_KEY = get_key(SECRET_KEY_FN) # List of callable that know how to import templates from various sources. # TEMPLATE_LOADERS = ( # 'django.template.loaders.filesystem.Loader', # 'django.template.loaders.app_directories.Loader', # ) # AUTH_USER_MODEL = 'breeze.OrderedUser' # AUTH_USER_MODEL = 'breeze.CustomUser' # FIXME INSTALLED_APPS = [ 'suit', 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.sites', 'django.contrib.messages', 'django.contrib.staticfiles', 'bootstrap_toolkit', 'breeze.apps.Config', 'shiny.apps.Config', 'dbviewer.apps.Config', 'compute.apps.Config', 'down.apps.Config', # 'south', 'gunicorn', 'mathfilters', # 'django_auth0', # moved to config/auth0.py 'hello_auth.apps.Config', 'api.apps.Config', 'webhooks.apps.Config', 'utilz.apps.Config', 'django_requestlogging', 'django.contrib.admindocs', 'django_extensions' ] MIDDLEWARE_CLASSES = [ 'breeze.middlewares.BreezeAwake', 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', # 'django.middleware.doc.XViewMiddleware', 'breeze.middlewares.JobKeeper', 'breeze.middlewares.CheckUserProfile', 'breeze.middlewares.ContextualRequest', 'django_requestlogging.middleware.LogSetupMiddleware', 'breeze.middlewares.DataDog' if ENABLE_DATADOG else 'breeze.middlewares.Empty', 'breeze.middlewares.RemoteFW' if ENABLE_REMOTE_FW else 'breeze.middlewares.Empty', 'rollbar.contrib.django.middleware.RollbarNotifierMiddleware' if ENABLE_ROLLBAR else 'breeze.middlewares.Empty', ] # ** AUTHENTICATION_BACKENDS moved to specific auth config files (config/env/auth/*) # ** AUTH0_* moved to config/env/auth/auth0.py SSH_TUNNEL_HOST = 'breeze-ssh' SSH_TUNNEL_PORT = '2222' # SSH_TUNNEL_TEST_URL = 'breeze-ssh' # ROOT_URLCONF = 'isbio.urls' APPEND_SLASH = True # Python dotted path to the WSGI application used by Django's runserver. WSGI_APPLICATION = 'isbio.wsgi.application' # provide our profile model AUTH_PROFILE_MODULE = 'breeze.UserProfile' # allow on the fly creation of guest user accounts AUTH_ALLOW_GUEST = False # allow anonymous visitor to login as disposable guests GUEST_INSTITUTE_ID = 3 # guest institute GUEST_EXPIRATION_TIME = 24 * 60 # expiration time of inactive guests in minutes GUEST_FIRST_NAME = 'guest' GUEST_GROUP_NAME = GUEST_FIRST_NAME.capitalize() + 's' ALL_GROUP_NAME = 'Registered users' RESTRICT_GUEST_TO_SPECIFIC_VIEWS = True DEFAULT_LOGIN_URL = '/login_page' FORCE_DEFAULT_LOGIN_URL = True # A sample logging configuration. The only tangible logging # performed by this configuration is to send an email to # the site admins on every HTTP 500 error when DEBUG=False. # See http://docs.djangoproject.com/en/dev/topics/logging for # more details on how to customize your logging configuration. LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'formatters': { 'standard': { 'format': USUAL_LOG_FORMAT, 'datefmt': USUAL_DATE_FORMAT, }, }, 'filters': { 'require_debug_false': { '()': 'django.utils.log.RequireDebugFalse' } }, 'handlers': { 'default': { 'level': 'DEBUG', 'class': 'logging.handlers.RotatingFileHandler', 'filename': LOG_PATH, 'maxBytes': 1024 * 1024 * 5, # 5 MB 'backupCount': 10, 'formatter': 'standard', }, 'mail_admins': { 'level': 'ERROR', 'filters': ['require_debug_false'], 'class': 'django.utils.log.AdminEmailHandler' }, }, 'loggers': { '': { 'handlers': ['default'], 'level': logging.INFO, 'propagate': True }, 'django.request': { 'handlers': ['mail_admins'], 'level': 'ERROR', 'propagate': True, }, } } class DomainList(object): CLOUD_PROD = ['breeze.fimm.fi', '13.79.158.135', ] CLOUD_DEV = ['breeze-dev.northeurope.cloudapp.azure.com', '52.164.209.61', ] FIMM_PH = ['breeze-newph.fimm.fi', 'breeze-ph.fimm.fi', ] FIMM_DEV = ['breeze-dev.fimm.fi', ] FIMM_PROD = ['breeze-fimm.fimm.fi', 'breeze-new.fimm.fi', ] @classmethod def get_current_domain(cls): from isbio.config import RUN_ENV_CLASS, ConfigEnvironments, MODE_PROD, DEV_MODE, PHARMA_MODE if RUN_ENV_CLASS is ConfigEnvironments.AzureCloud: domain = cls.CLOUD_DEV if DEV_MODE else cls.CLOUD_PROD elif RUN_ENV_CLASS is ConfigEnvironments.FIMM: domain = cls.FIMM_PROD if MODE_PROD else cls.FIMM_PH if PHARMA_MODE else cls.FIMM_DEV return domain[0] DEBUG = False VERBOSE = False SQL_DUMP = False # APPEND_SLASH = True ADMINS = ( ('Clement FIERE', 'clement.fiere@helsinki.fi'), ) # root of the Breeze django project folder, includes 'venv', 'static' folder copy, isbio, logs SOURCE_ROOT = recur(3, os.path.dirname, os.path.realpath(__file__)) + '/' DJANGO_ROOT = recur(2, os.path.dirname, os.path.realpath(__file__)) + '/' TEMPLATE_FOLDER = DJANGO_ROOT + 'templates/' # source templates (not HTML ones) DJANGO_AUTH_MODEL_BACKEND_PY_PATH = 'django.contrib.auth.backends.ModelBackend' # CAS_NG_BACKEND_PY_PATH = 'my_django.cas_ng_custom.CASBackend' AUTH0_BACKEND_PY_PATH = 'django_auth0.auth_backend.Auth0Backend' AUTH0_CUSTOM_BACKEND_PY_PATH = 'custom_auth0.auth_backend.Auth0Backend' os.environ['MAIL'] = '/var/mail/dbychkov' # FIXME obsolete CONSOLE_DATE_F = "%d/%b/%Y %H:%M:%S" # auto-sensing if running on dev or prod, for dynamic environment configuration # FIXME broken in docker container FULL_HOST_NAME = socket.gethostname() HOST_NAME = str.split(FULL_HOST_NAME, '.')[0] # do not move. here because some utils function useses it FIMM_NETWORK = '128.214.0.0/16' from config import * # Super User on breeze can Access all data SU_ACCESS_OVERRIDE = True PROJECT_PATH = PROJECT_FOLDER + BREEZE_FOLDER if not os.path.isdir(PROJECT_PATH): PROJECT_FOLDER = '/%s/' % PROJECT_FOLDER_NAME PROD_PATH = '%s%s' % (PROJECT_FOLDER, BREEZE_FOLDER) R_ENGINE_SUB_PATH = 'R/bin/R ' # FIXME LEGACY ONLY R_ENGINE_PATH = PROD_PATH + R_ENGINE_SUB_PATH if not os.path.isfile( R_ENGINE_PATH.strip()):
PROJECT_FHRB_PM_PATH = '/%s/fhrb_pm/' % PROJECT_FOLDER_NAME JDBC_BRIDGE_PATH = PROJECT_FHRB_PM_PATH + 'bin/start-jdbc-bridge' # Every other path has a trailing / TEMP_FOLDER = SOURCE_ROOT + 'tmp/' #### # 'db' folder, containing : reports, scripts, jobs, datasets, pipelines, upload_temp #### DATA_TEMPLATES_FN = 'mould/' RE_RUN_SH = SOURCE_ROOT + 're_run.sh' MEDIA_ROOT = PROJECT_PATH + 'db/' RORA_LIB = PROJECT_PATH + 'RORALib/' UPLOAD_FOLDER = MEDIA_ROOT + 'upload_temp/' DATASETS_FOLDER = MEDIA_ROOT + 'datasets/' STATIC_ROOT = SOURCE_ROOT + 'static_source/' # static files for the website DJANGO_CONFIG_FOLDER = SOURCE_ROOT + 'config/' # Where to store secrets and deployment conf MOULD_FOLDER = MEDIA_ROOT + DATA_TEMPLATES_FN NO_TAG_XML = TEMPLATE_FOLDER + 'notag.xml' SH_LOG_FOLDER = '.log' GENERAL_SH_BASE_NAME = 'run_job' GENERAL_SH_NAME = '%s.sh' % GENERAL_SH_BASE_NAME GENERAL_SH_CONF_NAME = '%s_conf.sh' % GENERAL_SH_BASE_NAME DOCKER_SH_NAME = 'run.sh' REPORTS_CACHE_INTERNAL_URL = '/cached/reports/' INCOMPLETE_RUN_FN = '.INCOMPLETE_RUN' FAILED_FN = '.failed' SUCCESS_FN = '.done' R_DONE_FN = '.sub_done' # ** moved to config/execution/sge.py # SGE_QUEUE_NAME = 'breeze.q' # monitoring only # ** moved to config/env/azure_cloud.py # DOCKER_HUB_PASS_FILE = SOURCE_ROOT + 'docker_repo' # AZURE_PASS_FILE = SOURCE_ROOT + 'azure_pwd' # moved to config/env/azure_cloud.py # # ComputeTarget configs # # TODO config # 13/05/2016 CONFIG_FN = 'configs/' CONFIG_PATH = MEDIA_ROOT + CONFIG_FN # 19/04/2016 TARGET_CONFIG_FN = 'target/' TARGET_CONFIG_PATH = CONFIG_PATH + TARGET_CONFIG_FN # 08/06/2016 DEFAULT_TARGET_ID = BREEZE_TARGET_ID # 13/05/2016 EXEC_CONFIG_FN = 'exec/' EXEC_CONFIG_PATH = CONFIG_PATH + EXEC_CONFIG_FN # 13/05/2016 ENGINE_CONFIG_FN = 'engine/' ENGINE_CONFIG_PATH = CONFIG_PATH + ENGINE_CONFIG_FN # 23/05/2016 SWAP_FN = 'swap/' SWAP_PATH = MEDIA_ROOT + SWAP_FN # 21/02/2017 SHINY_SECRET_KEY_FN = 'shiny' SHINY_SECRET = get_key(SHINY_SECRET_KEY_FN) # Warning : shiny_secret must be at least 32 char long. ENC_SESSION_ID_COOKIE_NAME = get_md5('seed') ## # Report config ## BOOTSTRAP_SH_TEMPLATE = TEMPLATE_FOLDER + GENERAL_SH_NAME BOOTSTRAP_SH_CONF_TEMPLATE = TEMPLATE_FOLDER + GENERAL_SH_CONF_NAME DOCKER_BOOTSTRAP_SH_TEMPLATE = TEMPLATE_FOLDER + DOCKER_SH_NAME NOZZLE_TEMPLATE_FOLDER = TEMPLATE_FOLDER + 'nozzle_templates/' TAGS_TEMPLATE_PATH = NOZZLE_TEMPLATE_FOLDER + 'tag.R' NOZZLE_REPORT_TEMPLATE_PATH = NOZZLE_TEMPLATE_FOLDER + 'report.R' NOZZLE_REPORT_FN = 'report' RSCRIPTS_FN = 'scripts/' RSCRIPTS_PATH = MEDIA_ROOT + RSCRIPTS_FN REPORT_TYPE_FN = 'pipelines/' REPORT_TYPE_PATH = MEDIA_ROOT + REPORT_TYPE_FN REPORTS_FN = 'reports/' REPORTS_PATH = '%s%s' % (MEDIA_ROOT, REPORTS_FN) REPORTS_SH = GENERAL_SH_NAME REPORTS_FM_FN = 'transfer_to_fm.txt' R_FILE_NAME_BASE = 'script' R_FILE_NAME = R_FILE_NAME_BASE + '.r' R_OUT_EXT = '.Rout' ## # Jobs configs ## SCRIPT_CODE_HEADER_FN = 'header.R' SCRIPT_HEADER_DEF_CONTENT = '# write your header here...' SCRIPT_CODE_BODY_FN = 'body.R' SCRIPT_BODY_DEF_CONTENT = '# copy and paste main code here...' SCRIPT_FORM_FN = 'form.xml' SCRIPT_TEMPLATE_FOLDER = TEMPLATE_FOLDER + 'script_templates/' SCRIPT_TEMPLATE_PATH = SCRIPT_TEMPLATE_FOLDER + 'script.R' JOBS_FN = 'jobs/' JOBS_PATH = '%s%s' % (MEDIA_ROOT, JOBS_FN) JOBS_SH = '_config.sh' # # WATCHER RELATED CONFIG # # FIXME make this target_config specific WATCHER_DB_REFRESH = 2 # number of seconds to wait before refreshing reports from DB WATCHER_PROC_REFRESH = 2 # number of seconds to wait before refreshing processes # # SHINY RELATED CONFIG # from shiny.settings import * # FIXME obsolete FOLDERS_LST = [TEMPLATE_FOLDER, SHINY_REPORT_TEMPLATE_PATH, SHINY_REPORTS, SHINY_TAGS, NOZZLE_TEMPLATE_FOLDER, SCRIPT_TEMPLATE_FOLDER, JOBS_PATH, REPORT_TYPE_PATH, REPORTS_PATH, RSCRIPTS_PATH, MEDIA_ROOT, STATIC_ROOT, TARGET_CONFIG_PATH, EXEC_CONFIG_PATH, ENGINE_CONFIG_PATH] ## # System Autocheck config ## # this is used to avoid 504 Gateway time-out from ngnix with is currently set to 600 sec = 10 min # LONG_POLL_TIME_OUT_REFRESH = 540 # 9 minutes # set to 50 sec to avoid time-out on breeze.fimm.fi LONG_POLL_TIME_OUT_REFRESH = 50 # FIXME obsolete # SGE_MASTER_FILE = '/var/lib/gridengine/default/common/act_qmaster' # FIXME obsolete # SGE_MASTER_IP = '192.168.67.2' # FIXME obsolete # DOTM_SERVER_IP = '128.214.64.5' # FIXME obsolete # RORA_SERVER_IP = '192.168.0.219' # FIXME obsolete # FILE_SERVER_IP = '192.168.0.107' # FIXME obsolete SPECIAL_CODE_FOLDER = PROJECT_PATH + 'code/' FS_SIG_FILE = PROJECT_PATH + 'fs_sig.md5' FS_LIST_FILE = PROJECT_PATH + 'fs_checksums.json' FOLDERS_TO_CHECK = [TEMPLATE_FOLDER, SHINY_TAGS, REPORT_TYPE_PATH, # SHINY_REPORTS,SPECIAL_CODE_FOLDER , RSCRIPTS_PATH, MOULD_FOLDER, STATIC_ROOT, DATASETS_FOLDER] # STATIC URL MAPPINGS # STATIC_URL = '/static/' # MEDIA_URL = '/media/' MOULD_URL = MEDIA_URL + DATA_TEMPLATES_FN # number of seconds after witch a job that has not received a sgeid should be marked as aborted or re-run NO_SGEID_EXPIRY = 30 # FIXME obsolete TMP_CSC_TAITO_MOUNT = '/mnt/csc-taito/' TMP_CSC_TAITO_REPORT_PATH = 'breeze/' TMP_CSC_TAITO_REMOTE_CHROOT = '/homeappl/home/clement/' # mail config EMAIL_HOST = 'smtp.gmail.com' EMAIL_HOST_USER = 'breeze.fimm@gmail.com' EMAIL_HOST_PASSWORD = get_key('gmail') EMAIL_PORT = '587' # EMAIL_SUBJECT_PREFIX = '[' + FULL_HOST_NAME + '] ' EMAIL_SUBJECT_PREFIX = '[' + BREEZE_TITLE + '] ' EMAIL_USE_TLS = True EMAIL_SENDER = 'Breeze PMS' # # END OF CONFIG # RUN-MODE SPECIFICS FOLLOWING # ** NO CONFIGURATION CONST BEYOND THIS POINT ** # # ** moved to config/env/* # if prod mode then auto disable DEBUG, for safety # if MODE_PROD or PHARMA_MODE: # SHINY_MODE = 'remote' # SHINY_LOCAL_ENABLE = False # DEBUG = False # VERBOSE = False # ** DEV logging config moved to config/env/dev.py # FIXME obsolete if ENABLE_ROLLBAR: try: import rollbar BASE_DIR = SOURCE_ROOT ROLLBAR = { 'access_token': '00f2bf2c84ce40aa96842622c6ffe97d', 'environment': 'development' if DEBUG else 'production', 'root': BASE_DIR, } rollbar.init(**ROLLBAR) except Exception: ENABLE_ROLLBAR = False logging.getLogger().error('Unable to init rollbar') pass def make_run_file(): f = open('running', 'w+') f.write(str(datetime.now().strftime(USUAL_DATE_FORMAT))) f.close() # FIXME obsolete if os.path.isfile('running'): # First time print '__breeze__started__' logging.info('__breeze__started__') os.remove('running') else: make_run_file() # Second time time.sleep(1) print '__breeze__load/reload__' logging.info('__breeze__load/reload__') print 'source home : ' + SOURCE_ROOT logging.debug('source home : ' + SOURCE_ROOT) print 'project home : ' + PROJECT_PATH logging.debug('project home : ' + PROJECT_PATH) print 'Logging on %s\nSettings loaded. Running branch %s, mode %s on %s' % \ (TermColoring.bold(LOG_PATH), TermColoring.ok_blue(git.get_branch_from_fs(SOURCE_ROOT)), TermColoring.ok_blue( TermColoring.bold(RUN_MODE)), TermColoring.ok_blue(FULL_HOST_NAME)) git_stat = git.get_status() print git_stat logging.info('Settings loaded. Running %s on %s' % (RUN_MODE, FULL_HOST_NAME)) logging.info(git_stat) from api import code_v1 code_v1.do_self_git_pull() if PHARMA_MODE: print TermColoring.bold('RUNNING WITH PHARMA') print('debug mode is %s' % ('ON' if DEBUG else 'OFF')) # FIXME obsolete def project_folder_path(breeze_folder=BREEZE_FOLDER): return PROJECT_FOLDER + breeze_folder
PROJECT_FOLDER = '/%s/' % PROJECT_FOLDER_NAME R_ENGINE_PATH = PROD_PATH + R_ENGINE_SUB_PATH # FIXME Legacy
conditional_block
settings.py
# Django settings for isbio project. # from configurations import Settings import logging import os import socket import time from datetime import datetime from utilz import git, TermColoring, recur, recur_rec, get_key, import_env, file_content, is_host_online, test_url, \ magic_const, get_md5 ENABLE_DATADOG = False ENABLE_ROLLBAR = False statsd = False try: from datadog import statsd if ENABLE_DATADOG: ENABLE_DATADOG = True except Exception: ENABLE_DATADOG = False ENABLE_REMOTE_FW = False # TODO : redesign PID = os.getpid() MAINTENANCE = False USUAL_DATE_FORMAT = "%Y-%m-%d %H:%M:%S" USUAL_LOG_FORMAT = \ '%(asctime)s,%(msecs)03d P%(process)05d %(levelname)-8s %(lineno)04d:%(module)-20s %(funcName)-25s %(message)s' USUAL_LOG_LEN_BEFORE_MESSAGE = 93 USUAL_LOG_FORMAT_DESCRIPTOR =\ 'DATE TIME,milisec PID LEVEL LINE:MODULE FUNCTION MESSAGE' DB_DATE_FORMAT = "%Y-%m-%d %H:%M:%S" LOG_FOLDER = '/var/log/breeze/' # log_fname = 'breeze_%s.log' % datetime.now().strftime("%Y-%m-%d_%H-%M-%S%z") log_fname = 'rotating.log' log_hit_fname = 'access.log' LOG_PATH = '%s%s' % (LOG_FOLDER, log_fname) LOG_HIT_PATH = '%s%s' % (LOG_FOLDER, log_hit_fname) # DEBUG = False TEMPLATE_DEBUG = False ADMINS = ( ('Clement FIERE', 'clement.fiere@helsinki.fi'), ) MANAGERS = ADMINS MYSQL_SECRET_FILE = 'mysql_root' # Local time zone for this installation. Choices can be found here: # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name # although not all choices may be available on all operating systems. # In a Windows environment this must be set to your system time zone. TIME_ZONE = 'Europe/Helsinki' DATABASES = { 'default': { 'ENGINE' : 'django.db.backends.mysql', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'. 'NAME' : 'breezedb', # Or path to database file if using sqlite3. 'USER' : 'root', # Not used with sqlite3. 'PASSWORD' : get_key(MYSQL_SECRET_FILE), # Not used with sqlite3. 'HOST' : 'breeze-sql', # Set to empty string for localhost. Not used with sqlite3. 'PORT' : '3306', # Set to empty string for default. Not used with sqlite3. 'OPTIONS' : { "init_command": "SET default_storage_engine=INNODB; SET SESSION TRANSACTION ISOLATION LEVEL READ " "COMMITTED", } # "init_command": "SET transaction isolation level READ COMMITTED", } } } ROOT_URLCONF = 'isbio.urls' TEMPLATES = [ { 'BACKEND' : 'django.template.backends.django.DjangoTemplates', 'DIRS' : [], 'APP_DIRS': True, 'OPTIONS' : { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', 'django.template.context_processors.media', 'django.template.context_processors.static', 'breeze.context.user_context', 'breeze.context.date_context', 'breeze.context.run_mode_context', # 'django_auth0.context_processors.auth0', # moved to config/auth0.py "breeze.context.site", "breeze.context.__context_var_list", ], }, }, ] # Language code for this installation. All choices can be found here: # http://www.i18nguy.com/unicode/language-identifiers.html LANGUAGE_CODE = 'en-us' SITE_ID = 1 # If you set this to False, Django will make some optimizations so as not # to load the internationalization machinery. USE_I18N = True # If you set this to False, Django will not format dates, numbers and # calendars according to the current locale. USE_L10N = True # If you set this to False, Django will not use timezone-aware datetimes. USE_TZ = True # URL that handles the media served from MEDIA_ROOT. Make sure to use a # trailing slash. # Examples: "http://media.lawrence.com/media/", "http://example.com/media/" MEDIA_URL = '/media/' # Absolute path to the directory static files should be collected to. # Don't put anything in this directory yourself; store your static files # in apps' "static/" subdirectories and in STATICFILES_DIRS. # Example: "/home/media/media.lawrence.com/static/" # STATIC_ROOT = '' # ** moved lower in this file # URL prefix for static files. # Example: "http://media.lawrence.com/static/" STATIC_URL = '/static/' # Additional locations of static files # ** moved to configs/env/* # STATICFILES_DIRS = ( # "/root/static_source", # ) # List of finder classes that know how to find static files in # various locations. STATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', 'django.contrib.staticfiles.finders.DefaultStorageFinder', ) # Make this unique, and don't share it with anybody. SECRET_KEY_FN = 'django' SECRET_KEY = get_key(SECRET_KEY_FN) # List of callable that know how to import templates from various sources. # TEMPLATE_LOADERS = ( # 'django.template.loaders.filesystem.Loader', # 'django.template.loaders.app_directories.Loader', # ) # AUTH_USER_MODEL = 'breeze.OrderedUser' # AUTH_USER_MODEL = 'breeze.CustomUser' # FIXME INSTALLED_APPS = [ 'suit', 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.sites', 'django.contrib.messages', 'django.contrib.staticfiles', 'bootstrap_toolkit', 'breeze.apps.Config', 'shiny.apps.Config', 'dbviewer.apps.Config', 'compute.apps.Config', 'down.apps.Config', # 'south', 'gunicorn', 'mathfilters', # 'django_auth0', # moved to config/auth0.py 'hello_auth.apps.Config', 'api.apps.Config', 'webhooks.apps.Config', 'utilz.apps.Config', 'django_requestlogging', 'django.contrib.admindocs', 'django_extensions' ] MIDDLEWARE_CLASSES = [ 'breeze.middlewares.BreezeAwake', 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', # 'django.middleware.doc.XViewMiddleware', 'breeze.middlewares.JobKeeper', 'breeze.middlewares.CheckUserProfile', 'breeze.middlewares.ContextualRequest', 'django_requestlogging.middleware.LogSetupMiddleware', 'breeze.middlewares.DataDog' if ENABLE_DATADOG else 'breeze.middlewares.Empty', 'breeze.middlewares.RemoteFW' if ENABLE_REMOTE_FW else 'breeze.middlewares.Empty', 'rollbar.contrib.django.middleware.RollbarNotifierMiddleware' if ENABLE_ROLLBAR else 'breeze.middlewares.Empty', ] # ** AUTHENTICATION_BACKENDS moved to specific auth config files (config/env/auth/*) # ** AUTH0_* moved to config/env/auth/auth0.py SSH_TUNNEL_HOST = 'breeze-ssh' SSH_TUNNEL_PORT = '2222' # SSH_TUNNEL_TEST_URL = 'breeze-ssh' # ROOT_URLCONF = 'isbio.urls' APPEND_SLASH = True # Python dotted path to the WSGI application used by Django's runserver. WSGI_APPLICATION = 'isbio.wsgi.application' # provide our profile model AUTH_PROFILE_MODULE = 'breeze.UserProfile' # allow on the fly creation of guest user accounts AUTH_ALLOW_GUEST = False # allow anonymous visitor to login as disposable guests GUEST_INSTITUTE_ID = 3 # guest institute GUEST_EXPIRATION_TIME = 24 * 60 # expiration time of inactive guests in minutes GUEST_FIRST_NAME = 'guest' GUEST_GROUP_NAME = GUEST_FIRST_NAME.capitalize() + 's' ALL_GROUP_NAME = 'Registered users' RESTRICT_GUEST_TO_SPECIFIC_VIEWS = True DEFAULT_LOGIN_URL = '/login_page' FORCE_DEFAULT_LOGIN_URL = True # A sample logging configuration. The only tangible logging # performed by this configuration is to send an email to # the site admins on every HTTP 500 error when DEBUG=False. # See http://docs.djangoproject.com/en/dev/topics/logging for # more details on how to customize your logging configuration. LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'formatters': { 'standard': { 'format': USUAL_LOG_FORMAT, 'datefmt': USUAL_DATE_FORMAT, }, }, 'filters': { 'require_debug_false': { '()': 'django.utils.log.RequireDebugFalse' } }, 'handlers': { 'default': { 'level': 'DEBUG', 'class': 'logging.handlers.RotatingFileHandler', 'filename': LOG_PATH, 'maxBytes': 1024 * 1024 * 5, # 5 MB 'backupCount': 10, 'formatter': 'standard', }, 'mail_admins': { 'level': 'ERROR', 'filters': ['require_debug_false'], 'class': 'django.utils.log.AdminEmailHandler' }, }, 'loggers': { '': { 'handlers': ['default'], 'level': logging.INFO, 'propagate': True }, 'django.request': { 'handlers': ['mail_admins'], 'level': 'ERROR', 'propagate': True, }, } } class DomainList(object): CLOUD_PROD = ['breeze.fimm.fi', '13.79.158.135', ] CLOUD_DEV = ['breeze-dev.northeurope.cloudapp.azure.com', '52.164.209.61', ] FIMM_PH = ['breeze-newph.fimm.fi', 'breeze-ph.fimm.fi', ] FIMM_DEV = ['breeze-dev.fimm.fi', ] FIMM_PROD = ['breeze-fimm.fimm.fi', 'breeze-new.fimm.fi', ] @classmethod def get_current_domain(cls): from isbio.config import RUN_ENV_CLASS, ConfigEnvironments, MODE_PROD, DEV_MODE, PHARMA_MODE if RUN_ENV_CLASS is ConfigEnvironments.AzureCloud: domain = cls.CLOUD_DEV if DEV_MODE else cls.CLOUD_PROD elif RUN_ENV_CLASS is ConfigEnvironments.FIMM: domain = cls.FIMM_PROD if MODE_PROD else cls.FIMM_PH if PHARMA_MODE else cls.FIMM_DEV return domain[0] DEBUG = False VERBOSE = False SQL_DUMP = False # APPEND_SLASH = True ADMINS = ( ('Clement FIERE', 'clement.fiere@helsinki.fi'), ) # root of the Breeze django project folder, includes 'venv', 'static' folder copy, isbio, logs SOURCE_ROOT = recur(3, os.path.dirname, os.path.realpath(__file__)) + '/' DJANGO_ROOT = recur(2, os.path.dirname, os.path.realpath(__file__)) + '/' TEMPLATE_FOLDER = DJANGO_ROOT + 'templates/' # source templates (not HTML ones) DJANGO_AUTH_MODEL_BACKEND_PY_PATH = 'django.contrib.auth.backends.ModelBackend' # CAS_NG_BACKEND_PY_PATH = 'my_django.cas_ng_custom.CASBackend' AUTH0_BACKEND_PY_PATH = 'django_auth0.auth_backend.Auth0Backend' AUTH0_CUSTOM_BACKEND_PY_PATH = 'custom_auth0.auth_backend.Auth0Backend' os.environ['MAIL'] = '/var/mail/dbychkov' # FIXME obsolete CONSOLE_DATE_F = "%d/%b/%Y %H:%M:%S" # auto-sensing if running on dev or prod, for dynamic environment configuration # FIXME broken in docker container FULL_HOST_NAME = socket.gethostname() HOST_NAME = str.split(FULL_HOST_NAME, '.')[0] # do not move. here because some utils function useses it FIMM_NETWORK = '128.214.0.0/16' from config import * # Super User on breeze can Access all data SU_ACCESS_OVERRIDE = True PROJECT_PATH = PROJECT_FOLDER + BREEZE_FOLDER if not os.path.isdir(PROJECT_PATH): PROJECT_FOLDER = '/%s/' % PROJECT_FOLDER_NAME PROD_PATH = '%s%s' % (PROJECT_FOLDER, BREEZE_FOLDER) R_ENGINE_SUB_PATH = 'R/bin/R ' # FIXME LEGACY ONLY R_ENGINE_PATH = PROD_PATH + R_ENGINE_SUB_PATH if not os.path.isfile( R_ENGINE_PATH.strip()): PROJECT_FOLDER = '/%s/' % PROJECT_FOLDER_NAME R_ENGINE_PATH = PROD_PATH + R_ENGINE_SUB_PATH # FIXME Legacy PROJECT_FHRB_PM_PATH = '/%s/fhrb_pm/' % PROJECT_FOLDER_NAME JDBC_BRIDGE_PATH = PROJECT_FHRB_PM_PATH + 'bin/start-jdbc-bridge' # Every other path has a trailing / TEMP_FOLDER = SOURCE_ROOT + 'tmp/' #### # 'db' folder, containing : reports, scripts, jobs, datasets, pipelines, upload_temp #### DATA_TEMPLATES_FN = 'mould/' RE_RUN_SH = SOURCE_ROOT + 're_run.sh' MEDIA_ROOT = PROJECT_PATH + 'db/' RORA_LIB = PROJECT_PATH + 'RORALib/' UPLOAD_FOLDER = MEDIA_ROOT + 'upload_temp/' DATASETS_FOLDER = MEDIA_ROOT + 'datasets/' STATIC_ROOT = SOURCE_ROOT + 'static_source/' # static files for the website DJANGO_CONFIG_FOLDER = SOURCE_ROOT + 'config/' # Where to store secrets and deployment conf MOULD_FOLDER = MEDIA_ROOT + DATA_TEMPLATES_FN NO_TAG_XML = TEMPLATE_FOLDER + 'notag.xml' SH_LOG_FOLDER = '.log' GENERAL_SH_BASE_NAME = 'run_job' GENERAL_SH_NAME = '%s.sh' % GENERAL_SH_BASE_NAME GENERAL_SH_CONF_NAME = '%s_conf.sh' % GENERAL_SH_BASE_NAME DOCKER_SH_NAME = 'run.sh' REPORTS_CACHE_INTERNAL_URL = '/cached/reports/' INCOMPLETE_RUN_FN = '.INCOMPLETE_RUN' FAILED_FN = '.failed' SUCCESS_FN = '.done' R_DONE_FN = '.sub_done' # ** moved to config/execution/sge.py # SGE_QUEUE_NAME = 'breeze.q' # monitoring only # ** moved to config/env/azure_cloud.py # DOCKER_HUB_PASS_FILE = SOURCE_ROOT + 'docker_repo' # AZURE_PASS_FILE = SOURCE_ROOT + 'azure_pwd' # moved to config/env/azure_cloud.py # # ComputeTarget configs # # TODO config # 13/05/2016 CONFIG_FN = 'configs/' CONFIG_PATH = MEDIA_ROOT + CONFIG_FN # 19/04/2016 TARGET_CONFIG_FN = 'target/' TARGET_CONFIG_PATH = CONFIG_PATH + TARGET_CONFIG_FN # 08/06/2016 DEFAULT_TARGET_ID = BREEZE_TARGET_ID # 13/05/2016 EXEC_CONFIG_FN = 'exec/' EXEC_CONFIG_PATH = CONFIG_PATH + EXEC_CONFIG_FN # 13/05/2016 ENGINE_CONFIG_FN = 'engine/' ENGINE_CONFIG_PATH = CONFIG_PATH + ENGINE_CONFIG_FN # 23/05/2016 SWAP_FN = 'swap/' SWAP_PATH = MEDIA_ROOT + SWAP_FN # 21/02/2017 SHINY_SECRET_KEY_FN = 'shiny' SHINY_SECRET = get_key(SHINY_SECRET_KEY_FN) # Warning : shiny_secret must be at least 32 char long. ENC_SESSION_ID_COOKIE_NAME = get_md5('seed') ## # Report config ## BOOTSTRAP_SH_TEMPLATE = TEMPLATE_FOLDER + GENERAL_SH_NAME BOOTSTRAP_SH_CONF_TEMPLATE = TEMPLATE_FOLDER + GENERAL_SH_CONF_NAME DOCKER_BOOTSTRAP_SH_TEMPLATE = TEMPLATE_FOLDER + DOCKER_SH_NAME NOZZLE_TEMPLATE_FOLDER = TEMPLATE_FOLDER + 'nozzle_templates/' TAGS_TEMPLATE_PATH = NOZZLE_TEMPLATE_FOLDER + 'tag.R' NOZZLE_REPORT_TEMPLATE_PATH = NOZZLE_TEMPLATE_FOLDER + 'report.R' NOZZLE_REPORT_FN = 'report' RSCRIPTS_FN = 'scripts/' RSCRIPTS_PATH = MEDIA_ROOT + RSCRIPTS_FN REPORT_TYPE_FN = 'pipelines/' REPORT_TYPE_PATH = MEDIA_ROOT + REPORT_TYPE_FN REPORTS_FN = 'reports/' REPORTS_PATH = '%s%s' % (MEDIA_ROOT, REPORTS_FN) REPORTS_SH = GENERAL_SH_NAME REPORTS_FM_FN = 'transfer_to_fm.txt' R_FILE_NAME_BASE = 'script' R_FILE_NAME = R_FILE_NAME_BASE + '.r' R_OUT_EXT = '.Rout' ## # Jobs configs ## SCRIPT_CODE_HEADER_FN = 'header.R' SCRIPT_HEADER_DEF_CONTENT = '# write your header here...' SCRIPT_CODE_BODY_FN = 'body.R' SCRIPT_BODY_DEF_CONTENT = '# copy and paste main code here...' SCRIPT_FORM_FN = 'form.xml' SCRIPT_TEMPLATE_FOLDER = TEMPLATE_FOLDER + 'script_templates/' SCRIPT_TEMPLATE_PATH = SCRIPT_TEMPLATE_FOLDER + 'script.R' JOBS_FN = 'jobs/' JOBS_PATH = '%s%s' % (MEDIA_ROOT, JOBS_FN) JOBS_SH = '_config.sh' # # WATCHER RELATED CONFIG # # FIXME make this target_config specific WATCHER_DB_REFRESH = 2 # number of seconds to wait before refreshing reports from DB WATCHER_PROC_REFRESH = 2 # number of seconds to wait before refreshing processes # # SHINY RELATED CONFIG # from shiny.settings import * # FIXME obsolete FOLDERS_LST = [TEMPLATE_FOLDER, SHINY_REPORT_TEMPLATE_PATH, SHINY_REPORTS, SHINY_TAGS, NOZZLE_TEMPLATE_FOLDER, SCRIPT_TEMPLATE_FOLDER, JOBS_PATH, REPORT_TYPE_PATH, REPORTS_PATH, RSCRIPTS_PATH, MEDIA_ROOT, STATIC_ROOT, TARGET_CONFIG_PATH, EXEC_CONFIG_PATH, ENGINE_CONFIG_PATH] ## # System Autocheck config ## # this is used to avoid 504 Gateway time-out from ngnix with is currently set to 600 sec = 10 min # LONG_POLL_TIME_OUT_REFRESH = 540 # 9 minutes # set to 50 sec to avoid time-out on breeze.fimm.fi LONG_POLL_TIME_OUT_REFRESH = 50 # FIXME obsolete # SGE_MASTER_FILE = '/var/lib/gridengine/default/common/act_qmaster' # FIXME obsolete # SGE_MASTER_IP = '192.168.67.2' # FIXME obsolete # DOTM_SERVER_IP = '128.214.64.5' # FIXME obsolete # RORA_SERVER_IP = '192.168.0.219' # FIXME obsolete # FILE_SERVER_IP = '192.168.0.107' # FIXME obsolete SPECIAL_CODE_FOLDER = PROJECT_PATH + 'code/' FS_SIG_FILE = PROJECT_PATH + 'fs_sig.md5' FS_LIST_FILE = PROJECT_PATH + 'fs_checksums.json' FOLDERS_TO_CHECK = [TEMPLATE_FOLDER, SHINY_TAGS, REPORT_TYPE_PATH, # SHINY_REPORTS,SPECIAL_CODE_FOLDER , RSCRIPTS_PATH, MOULD_FOLDER, STATIC_ROOT, DATASETS_FOLDER] # STATIC URL MAPPINGS # STATIC_URL = '/static/' # MEDIA_URL = '/media/' MOULD_URL = MEDIA_URL + DATA_TEMPLATES_FN # number of seconds after witch a job that has not received a sgeid should be marked as aborted or re-run NO_SGEID_EXPIRY = 30 # FIXME obsolete TMP_CSC_TAITO_MOUNT = '/mnt/csc-taito/' TMP_CSC_TAITO_REPORT_PATH = 'breeze/' TMP_CSC_TAITO_REMOTE_CHROOT = '/homeappl/home/clement/' # mail config EMAIL_HOST = 'smtp.gmail.com' EMAIL_HOST_USER = 'breeze.fimm@gmail.com' EMAIL_HOST_PASSWORD = get_key('gmail') EMAIL_PORT = '587' # EMAIL_SUBJECT_PREFIX = '[' + FULL_HOST_NAME + '] ' EMAIL_SUBJECT_PREFIX = '[' + BREEZE_TITLE + '] ' EMAIL_USE_TLS = True EMAIL_SENDER = 'Breeze PMS' # # END OF CONFIG # RUN-MODE SPECIFICS FOLLOWING # ** NO CONFIGURATION CONST BEYOND THIS POINT ** # # ** moved to config/env/* # if prod mode then auto disable DEBUG, for safety # if MODE_PROD or PHARMA_MODE: # SHINY_MODE = 'remote' # SHINY_LOCAL_ENABLE = False # DEBUG = False # VERBOSE = False # ** DEV logging config moved to config/env/dev.py # FIXME obsolete if ENABLE_ROLLBAR: try: import rollbar BASE_DIR = SOURCE_ROOT ROLLBAR = { 'access_token': '00f2bf2c84ce40aa96842622c6ffe97d', 'environment': 'development' if DEBUG else 'production', 'root': BASE_DIR, } rollbar.init(**ROLLBAR) except Exception: ENABLE_ROLLBAR = False logging.getLogger().error('Unable to init rollbar') pass def make_run_file():
# FIXME obsolete if os.path.isfile('running'): # First time print '__breeze__started__' logging.info('__breeze__started__') os.remove('running') else: make_run_file() # Second time time.sleep(1) print '__breeze__load/reload__' logging.info('__breeze__load/reload__') print 'source home : ' + SOURCE_ROOT logging.debug('source home : ' + SOURCE_ROOT) print 'project home : ' + PROJECT_PATH logging.debug('project home : ' + PROJECT_PATH) print 'Logging on %s\nSettings loaded. Running branch %s, mode %s on %s' % \ (TermColoring.bold(LOG_PATH), TermColoring.ok_blue(git.get_branch_from_fs(SOURCE_ROOT)), TermColoring.ok_blue( TermColoring.bold(RUN_MODE)), TermColoring.ok_blue(FULL_HOST_NAME)) git_stat = git.get_status() print git_stat logging.info('Settings loaded. Running %s on %s' % (RUN_MODE, FULL_HOST_NAME)) logging.info(git_stat) from api import code_v1 code_v1.do_self_git_pull() if PHARMA_MODE: print TermColoring.bold('RUNNING WITH PHARMA') print('debug mode is %s' % ('ON' if DEBUG else 'OFF')) # FIXME obsolete def project_folder_path(breeze_folder=BREEZE_FOLDER): return PROJECT_FOLDER + breeze_folder
f = open('running', 'w+') f.write(str(datetime.now().strftime(USUAL_DATE_FORMAT))) f.close()
identifier_body
settings.py
# Django settings for isbio project. # from configurations import Settings import logging import os import socket import time from datetime import datetime from utilz import git, TermColoring, recur, recur_rec, get_key, import_env, file_content, is_host_online, test_url, \ magic_const, get_md5 ENABLE_DATADOG = False ENABLE_ROLLBAR = False statsd = False try: from datadog import statsd if ENABLE_DATADOG: ENABLE_DATADOG = True except Exception: ENABLE_DATADOG = False ENABLE_REMOTE_FW = False # TODO : redesign PID = os.getpid() MAINTENANCE = False USUAL_DATE_FORMAT = "%Y-%m-%d %H:%M:%S" USUAL_LOG_FORMAT = \ '%(asctime)s,%(msecs)03d P%(process)05d %(levelname)-8s %(lineno)04d:%(module)-20s %(funcName)-25s %(message)s' USUAL_LOG_LEN_BEFORE_MESSAGE = 93 USUAL_LOG_FORMAT_DESCRIPTOR =\ 'DATE TIME,milisec PID LEVEL LINE:MODULE FUNCTION MESSAGE' DB_DATE_FORMAT = "%Y-%m-%d %H:%M:%S" LOG_FOLDER = '/var/log/breeze/' # log_fname = 'breeze_%s.log' % datetime.now().strftime("%Y-%m-%d_%H-%M-%S%z") log_fname = 'rotating.log' log_hit_fname = 'access.log' LOG_PATH = '%s%s' % (LOG_FOLDER, log_fname) LOG_HIT_PATH = '%s%s' % (LOG_FOLDER, log_hit_fname) # DEBUG = False TEMPLATE_DEBUG = False ADMINS = ( ('Clement FIERE', 'clement.fiere@helsinki.fi'), ) MANAGERS = ADMINS MYSQL_SECRET_FILE = 'mysql_root' # Local time zone for this installation. Choices can be found here: # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name # although not all choices may be available on all operating systems. # In a Windows environment this must be set to your system time zone. TIME_ZONE = 'Europe/Helsinki' DATABASES = { 'default': { 'ENGINE' : 'django.db.backends.mysql', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'. 'NAME' : 'breezedb', # Or path to database file if using sqlite3. 'USER' : 'root', # Not used with sqlite3. 'PASSWORD' : get_key(MYSQL_SECRET_FILE), # Not used with sqlite3. 'HOST' : 'breeze-sql', # Set to empty string for localhost. Not used with sqlite3. 'PORT' : '3306', # Set to empty string for default. Not used with sqlite3. 'OPTIONS' : { "init_command": "SET default_storage_engine=INNODB; SET SESSION TRANSACTION ISOLATION LEVEL READ " "COMMITTED", } # "init_command": "SET transaction isolation level READ COMMITTED", } } } ROOT_URLCONF = 'isbio.urls' TEMPLATES = [ { 'BACKEND' : 'django.template.backends.django.DjangoTemplates', 'DIRS' : [], 'APP_DIRS': True, 'OPTIONS' : { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', 'django.template.context_processors.media', 'django.template.context_processors.static', 'breeze.context.user_context', 'breeze.context.date_context', 'breeze.context.run_mode_context', # 'django_auth0.context_processors.auth0', # moved to config/auth0.py "breeze.context.site", "breeze.context.__context_var_list", ], }, }, ] # Language code for this installation. All choices can be found here: # http://www.i18nguy.com/unicode/language-identifiers.html LANGUAGE_CODE = 'en-us' SITE_ID = 1 # If you set this to False, Django will make some optimizations so as not # to load the internationalization machinery. USE_I18N = True # If you set this to False, Django will not format dates, numbers and # calendars according to the current locale. USE_L10N = True # If you set this to False, Django will not use timezone-aware datetimes. USE_TZ = True # URL that handles the media served from MEDIA_ROOT. Make sure to use a # trailing slash. # Examples: "http://media.lawrence.com/media/", "http://example.com/media/" MEDIA_URL = '/media/' # Absolute path to the directory static files should be collected to. # Don't put anything in this directory yourself; store your static files # in apps' "static/" subdirectories and in STATICFILES_DIRS. # Example: "/home/media/media.lawrence.com/static/" # STATIC_ROOT = '' # ** moved lower in this file # URL prefix for static files. # Example: "http://media.lawrence.com/static/" STATIC_URL = '/static/' # Additional locations of static files # ** moved to configs/env/* # STATICFILES_DIRS = ( # "/root/static_source", # ) # List of finder classes that know how to find static files in # various locations. STATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', 'django.contrib.staticfiles.finders.DefaultStorageFinder', ) # Make this unique, and don't share it with anybody. SECRET_KEY_FN = 'django' SECRET_KEY = get_key(SECRET_KEY_FN) # List of callable that know how to import templates from various sources. # TEMPLATE_LOADERS = ( # 'django.template.loaders.filesystem.Loader', # 'django.template.loaders.app_directories.Loader', # ) # AUTH_USER_MODEL = 'breeze.OrderedUser' # AUTH_USER_MODEL = 'breeze.CustomUser' # FIXME INSTALLED_APPS = [ 'suit', 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.sites', 'django.contrib.messages', 'django.contrib.staticfiles', 'bootstrap_toolkit', 'breeze.apps.Config', 'shiny.apps.Config', 'dbviewer.apps.Config', 'compute.apps.Config', 'down.apps.Config', # 'south', 'gunicorn', 'mathfilters', # 'django_auth0', # moved to config/auth0.py 'hello_auth.apps.Config', 'api.apps.Config', 'webhooks.apps.Config', 'utilz.apps.Config', 'django_requestlogging', 'django.contrib.admindocs', 'django_extensions' ] MIDDLEWARE_CLASSES = [ 'breeze.middlewares.BreezeAwake', 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', # 'django.middleware.doc.XViewMiddleware', 'breeze.middlewares.JobKeeper', 'breeze.middlewares.CheckUserProfile', 'breeze.middlewares.ContextualRequest', 'django_requestlogging.middleware.LogSetupMiddleware', 'breeze.middlewares.DataDog' if ENABLE_DATADOG else 'breeze.middlewares.Empty', 'breeze.middlewares.RemoteFW' if ENABLE_REMOTE_FW else 'breeze.middlewares.Empty', 'rollbar.contrib.django.middleware.RollbarNotifierMiddleware' if ENABLE_ROLLBAR else 'breeze.middlewares.Empty', ] # ** AUTHENTICATION_BACKENDS moved to specific auth config files (config/env/auth/*) # ** AUTH0_* moved to config/env/auth/auth0.py SSH_TUNNEL_HOST = 'breeze-ssh' SSH_TUNNEL_PORT = '2222' # SSH_TUNNEL_TEST_URL = 'breeze-ssh' # ROOT_URLCONF = 'isbio.urls' APPEND_SLASH = True # Python dotted path to the WSGI application used by Django's runserver. WSGI_APPLICATION = 'isbio.wsgi.application' # provide our profile model AUTH_PROFILE_MODULE = 'breeze.UserProfile' # allow on the fly creation of guest user accounts AUTH_ALLOW_GUEST = False # allow anonymous visitor to login as disposable guests GUEST_INSTITUTE_ID = 3 # guest institute GUEST_EXPIRATION_TIME = 24 * 60 # expiration time of inactive guests in minutes GUEST_FIRST_NAME = 'guest' GUEST_GROUP_NAME = GUEST_FIRST_NAME.capitalize() + 's' ALL_GROUP_NAME = 'Registered users' RESTRICT_GUEST_TO_SPECIFIC_VIEWS = True DEFAULT_LOGIN_URL = '/login_page' FORCE_DEFAULT_LOGIN_URL = True # A sample logging configuration. The only tangible logging # performed by this configuration is to send an email to # the site admins on every HTTP 500 error when DEBUG=False. # See http://docs.djangoproject.com/en/dev/topics/logging for # more details on how to customize your logging configuration. LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'formatters': { 'standard': { 'format': USUAL_LOG_FORMAT, 'datefmt': USUAL_DATE_FORMAT, }, }, 'filters': { 'require_debug_false': { '()': 'django.utils.log.RequireDebugFalse' } }, 'handlers': { 'default': { 'level': 'DEBUG', 'class': 'logging.handlers.RotatingFileHandler', 'filename': LOG_PATH, 'maxBytes': 1024 * 1024 * 5, # 5 MB 'backupCount': 10, 'formatter': 'standard', }, 'mail_admins': { 'level': 'ERROR', 'filters': ['require_debug_false'], 'class': 'django.utils.log.AdminEmailHandler' }, }, 'loggers': { '': { 'handlers': ['default'], 'level': logging.INFO, 'propagate': True }, 'django.request': { 'handlers': ['mail_admins'], 'level': 'ERROR', 'propagate': True, },
CLOUD_PROD = ['breeze.fimm.fi', '13.79.158.135', ] CLOUD_DEV = ['breeze-dev.northeurope.cloudapp.azure.com', '52.164.209.61', ] FIMM_PH = ['breeze-newph.fimm.fi', 'breeze-ph.fimm.fi', ] FIMM_DEV = ['breeze-dev.fimm.fi', ] FIMM_PROD = ['breeze-fimm.fimm.fi', 'breeze-new.fimm.fi', ] @classmethod def get_current_domain(cls): from isbio.config import RUN_ENV_CLASS, ConfigEnvironments, MODE_PROD, DEV_MODE, PHARMA_MODE if RUN_ENV_CLASS is ConfigEnvironments.AzureCloud: domain = cls.CLOUD_DEV if DEV_MODE else cls.CLOUD_PROD elif RUN_ENV_CLASS is ConfigEnvironments.FIMM: domain = cls.FIMM_PROD if MODE_PROD else cls.FIMM_PH if PHARMA_MODE else cls.FIMM_DEV return domain[0] DEBUG = False VERBOSE = False SQL_DUMP = False # APPEND_SLASH = True ADMINS = ( ('Clement FIERE', 'clement.fiere@helsinki.fi'), ) # root of the Breeze django project folder, includes 'venv', 'static' folder copy, isbio, logs SOURCE_ROOT = recur(3, os.path.dirname, os.path.realpath(__file__)) + '/' DJANGO_ROOT = recur(2, os.path.dirname, os.path.realpath(__file__)) + '/' TEMPLATE_FOLDER = DJANGO_ROOT + 'templates/' # source templates (not HTML ones) DJANGO_AUTH_MODEL_BACKEND_PY_PATH = 'django.contrib.auth.backends.ModelBackend' # CAS_NG_BACKEND_PY_PATH = 'my_django.cas_ng_custom.CASBackend' AUTH0_BACKEND_PY_PATH = 'django_auth0.auth_backend.Auth0Backend' AUTH0_CUSTOM_BACKEND_PY_PATH = 'custom_auth0.auth_backend.Auth0Backend' os.environ['MAIL'] = '/var/mail/dbychkov' # FIXME obsolete CONSOLE_DATE_F = "%d/%b/%Y %H:%M:%S" # auto-sensing if running on dev or prod, for dynamic environment configuration # FIXME broken in docker container FULL_HOST_NAME = socket.gethostname() HOST_NAME = str.split(FULL_HOST_NAME, '.')[0] # do not move. here because some utils function useses it FIMM_NETWORK = '128.214.0.0/16' from config import * # Super User on breeze can Access all data SU_ACCESS_OVERRIDE = True PROJECT_PATH = PROJECT_FOLDER + BREEZE_FOLDER if not os.path.isdir(PROJECT_PATH): PROJECT_FOLDER = '/%s/' % PROJECT_FOLDER_NAME PROD_PATH = '%s%s' % (PROJECT_FOLDER, BREEZE_FOLDER) R_ENGINE_SUB_PATH = 'R/bin/R ' # FIXME LEGACY ONLY R_ENGINE_PATH = PROD_PATH + R_ENGINE_SUB_PATH if not os.path.isfile( R_ENGINE_PATH.strip()): PROJECT_FOLDER = '/%s/' % PROJECT_FOLDER_NAME R_ENGINE_PATH = PROD_PATH + R_ENGINE_SUB_PATH # FIXME Legacy PROJECT_FHRB_PM_PATH = '/%s/fhrb_pm/' % PROJECT_FOLDER_NAME JDBC_BRIDGE_PATH = PROJECT_FHRB_PM_PATH + 'bin/start-jdbc-bridge' # Every other path has a trailing / TEMP_FOLDER = SOURCE_ROOT + 'tmp/' #### # 'db' folder, containing : reports, scripts, jobs, datasets, pipelines, upload_temp #### DATA_TEMPLATES_FN = 'mould/' RE_RUN_SH = SOURCE_ROOT + 're_run.sh' MEDIA_ROOT = PROJECT_PATH + 'db/' RORA_LIB = PROJECT_PATH + 'RORALib/' UPLOAD_FOLDER = MEDIA_ROOT + 'upload_temp/' DATASETS_FOLDER = MEDIA_ROOT + 'datasets/' STATIC_ROOT = SOURCE_ROOT + 'static_source/' # static files for the website DJANGO_CONFIG_FOLDER = SOURCE_ROOT + 'config/' # Where to store secrets and deployment conf MOULD_FOLDER = MEDIA_ROOT + DATA_TEMPLATES_FN NO_TAG_XML = TEMPLATE_FOLDER + 'notag.xml' SH_LOG_FOLDER = '.log' GENERAL_SH_BASE_NAME = 'run_job' GENERAL_SH_NAME = '%s.sh' % GENERAL_SH_BASE_NAME GENERAL_SH_CONF_NAME = '%s_conf.sh' % GENERAL_SH_BASE_NAME DOCKER_SH_NAME = 'run.sh' REPORTS_CACHE_INTERNAL_URL = '/cached/reports/' INCOMPLETE_RUN_FN = '.INCOMPLETE_RUN' FAILED_FN = '.failed' SUCCESS_FN = '.done' R_DONE_FN = '.sub_done' # ** moved to config/execution/sge.py # SGE_QUEUE_NAME = 'breeze.q' # monitoring only # ** moved to config/env/azure_cloud.py # DOCKER_HUB_PASS_FILE = SOURCE_ROOT + 'docker_repo' # AZURE_PASS_FILE = SOURCE_ROOT + 'azure_pwd' # moved to config/env/azure_cloud.py # # ComputeTarget configs # # TODO config # 13/05/2016 CONFIG_FN = 'configs/' CONFIG_PATH = MEDIA_ROOT + CONFIG_FN # 19/04/2016 TARGET_CONFIG_FN = 'target/' TARGET_CONFIG_PATH = CONFIG_PATH + TARGET_CONFIG_FN # 08/06/2016 DEFAULT_TARGET_ID = BREEZE_TARGET_ID # 13/05/2016 EXEC_CONFIG_FN = 'exec/' EXEC_CONFIG_PATH = CONFIG_PATH + EXEC_CONFIG_FN # 13/05/2016 ENGINE_CONFIG_FN = 'engine/' ENGINE_CONFIG_PATH = CONFIG_PATH + ENGINE_CONFIG_FN # 23/05/2016 SWAP_FN = 'swap/' SWAP_PATH = MEDIA_ROOT + SWAP_FN # 21/02/2017 SHINY_SECRET_KEY_FN = 'shiny' SHINY_SECRET = get_key(SHINY_SECRET_KEY_FN) # Warning : shiny_secret must be at least 32 char long. ENC_SESSION_ID_COOKIE_NAME = get_md5('seed') ## # Report config ## BOOTSTRAP_SH_TEMPLATE = TEMPLATE_FOLDER + GENERAL_SH_NAME BOOTSTRAP_SH_CONF_TEMPLATE = TEMPLATE_FOLDER + GENERAL_SH_CONF_NAME DOCKER_BOOTSTRAP_SH_TEMPLATE = TEMPLATE_FOLDER + DOCKER_SH_NAME NOZZLE_TEMPLATE_FOLDER = TEMPLATE_FOLDER + 'nozzle_templates/' TAGS_TEMPLATE_PATH = NOZZLE_TEMPLATE_FOLDER + 'tag.R' NOZZLE_REPORT_TEMPLATE_PATH = NOZZLE_TEMPLATE_FOLDER + 'report.R' NOZZLE_REPORT_FN = 'report' RSCRIPTS_FN = 'scripts/' RSCRIPTS_PATH = MEDIA_ROOT + RSCRIPTS_FN REPORT_TYPE_FN = 'pipelines/' REPORT_TYPE_PATH = MEDIA_ROOT + REPORT_TYPE_FN REPORTS_FN = 'reports/' REPORTS_PATH = '%s%s' % (MEDIA_ROOT, REPORTS_FN) REPORTS_SH = GENERAL_SH_NAME REPORTS_FM_FN = 'transfer_to_fm.txt' R_FILE_NAME_BASE = 'script' R_FILE_NAME = R_FILE_NAME_BASE + '.r' R_OUT_EXT = '.Rout' ## # Jobs configs ## SCRIPT_CODE_HEADER_FN = 'header.R' SCRIPT_HEADER_DEF_CONTENT = '# write your header here...' SCRIPT_CODE_BODY_FN = 'body.R' SCRIPT_BODY_DEF_CONTENT = '# copy and paste main code here...' SCRIPT_FORM_FN = 'form.xml' SCRIPT_TEMPLATE_FOLDER = TEMPLATE_FOLDER + 'script_templates/' SCRIPT_TEMPLATE_PATH = SCRIPT_TEMPLATE_FOLDER + 'script.R' JOBS_FN = 'jobs/' JOBS_PATH = '%s%s' % (MEDIA_ROOT, JOBS_FN) JOBS_SH = '_config.sh' # # WATCHER RELATED CONFIG # # FIXME make this target_config specific WATCHER_DB_REFRESH = 2 # number of seconds to wait before refreshing reports from DB WATCHER_PROC_REFRESH = 2 # number of seconds to wait before refreshing processes # # SHINY RELATED CONFIG # from shiny.settings import * # FIXME obsolete FOLDERS_LST = [TEMPLATE_FOLDER, SHINY_REPORT_TEMPLATE_PATH, SHINY_REPORTS, SHINY_TAGS, NOZZLE_TEMPLATE_FOLDER, SCRIPT_TEMPLATE_FOLDER, JOBS_PATH, REPORT_TYPE_PATH, REPORTS_PATH, RSCRIPTS_PATH, MEDIA_ROOT, STATIC_ROOT, TARGET_CONFIG_PATH, EXEC_CONFIG_PATH, ENGINE_CONFIG_PATH] ## # System Autocheck config ## # this is used to avoid 504 Gateway time-out from ngnix with is currently set to 600 sec = 10 min # LONG_POLL_TIME_OUT_REFRESH = 540 # 9 minutes # set to 50 sec to avoid time-out on breeze.fimm.fi LONG_POLL_TIME_OUT_REFRESH = 50 # FIXME obsolete # SGE_MASTER_FILE = '/var/lib/gridengine/default/common/act_qmaster' # FIXME obsolete # SGE_MASTER_IP = '192.168.67.2' # FIXME obsolete # DOTM_SERVER_IP = '128.214.64.5' # FIXME obsolete # RORA_SERVER_IP = '192.168.0.219' # FIXME obsolete # FILE_SERVER_IP = '192.168.0.107' # FIXME obsolete SPECIAL_CODE_FOLDER = PROJECT_PATH + 'code/' FS_SIG_FILE = PROJECT_PATH + 'fs_sig.md5' FS_LIST_FILE = PROJECT_PATH + 'fs_checksums.json' FOLDERS_TO_CHECK = [TEMPLATE_FOLDER, SHINY_TAGS, REPORT_TYPE_PATH, # SHINY_REPORTS,SPECIAL_CODE_FOLDER , RSCRIPTS_PATH, MOULD_FOLDER, STATIC_ROOT, DATASETS_FOLDER] # STATIC URL MAPPINGS # STATIC_URL = '/static/' # MEDIA_URL = '/media/' MOULD_URL = MEDIA_URL + DATA_TEMPLATES_FN # number of seconds after witch a job that has not received a sgeid should be marked as aborted or re-run NO_SGEID_EXPIRY = 30 # FIXME obsolete TMP_CSC_TAITO_MOUNT = '/mnt/csc-taito/' TMP_CSC_TAITO_REPORT_PATH = 'breeze/' TMP_CSC_TAITO_REMOTE_CHROOT = '/homeappl/home/clement/' # mail config EMAIL_HOST = 'smtp.gmail.com' EMAIL_HOST_USER = 'breeze.fimm@gmail.com' EMAIL_HOST_PASSWORD = get_key('gmail') EMAIL_PORT = '587' # EMAIL_SUBJECT_PREFIX = '[' + FULL_HOST_NAME + '] ' EMAIL_SUBJECT_PREFIX = '[' + BREEZE_TITLE + '] ' EMAIL_USE_TLS = True EMAIL_SENDER = 'Breeze PMS' # # END OF CONFIG # RUN-MODE SPECIFICS FOLLOWING # ** NO CONFIGURATION CONST BEYOND THIS POINT ** # # ** moved to config/env/* # if prod mode then auto disable DEBUG, for safety # if MODE_PROD or PHARMA_MODE: # SHINY_MODE = 'remote' # SHINY_LOCAL_ENABLE = False # DEBUG = False # VERBOSE = False # ** DEV logging config moved to config/env/dev.py # FIXME obsolete if ENABLE_ROLLBAR: try: import rollbar BASE_DIR = SOURCE_ROOT ROLLBAR = { 'access_token': '00f2bf2c84ce40aa96842622c6ffe97d', 'environment': 'development' if DEBUG else 'production', 'root': BASE_DIR, } rollbar.init(**ROLLBAR) except Exception: ENABLE_ROLLBAR = False logging.getLogger().error('Unable to init rollbar') pass def make_run_file(): f = open('running', 'w+') f.write(str(datetime.now().strftime(USUAL_DATE_FORMAT))) f.close() # FIXME obsolete if os.path.isfile('running'): # First time print '__breeze__started__' logging.info('__breeze__started__') os.remove('running') else: make_run_file() # Second time time.sleep(1) print '__breeze__load/reload__' logging.info('__breeze__load/reload__') print 'source home : ' + SOURCE_ROOT logging.debug('source home : ' + SOURCE_ROOT) print 'project home : ' + PROJECT_PATH logging.debug('project home : ' + PROJECT_PATH) print 'Logging on %s\nSettings loaded. Running branch %s, mode %s on %s' % \ (TermColoring.bold(LOG_PATH), TermColoring.ok_blue(git.get_branch_from_fs(SOURCE_ROOT)), TermColoring.ok_blue( TermColoring.bold(RUN_MODE)), TermColoring.ok_blue(FULL_HOST_NAME)) git_stat = git.get_status() print git_stat logging.info('Settings loaded. Running %s on %s' % (RUN_MODE, FULL_HOST_NAME)) logging.info(git_stat) from api import code_v1 code_v1.do_self_git_pull() if PHARMA_MODE: print TermColoring.bold('RUNNING WITH PHARMA') print('debug mode is %s' % ('ON' if DEBUG else 'OFF')) # FIXME obsolete def project_folder_path(breeze_folder=BREEZE_FOLDER): return PROJECT_FOLDER + breeze_folder
} } class DomainList(object):
random_line_split
settings.py
# Django settings for isbio project. # from configurations import Settings import logging import os import socket import time from datetime import datetime from utilz import git, TermColoring, recur, recur_rec, get_key, import_env, file_content, is_host_online, test_url, \ magic_const, get_md5 ENABLE_DATADOG = False ENABLE_ROLLBAR = False statsd = False try: from datadog import statsd if ENABLE_DATADOG: ENABLE_DATADOG = True except Exception: ENABLE_DATADOG = False ENABLE_REMOTE_FW = False # TODO : redesign PID = os.getpid() MAINTENANCE = False USUAL_DATE_FORMAT = "%Y-%m-%d %H:%M:%S" USUAL_LOG_FORMAT = \ '%(asctime)s,%(msecs)03d P%(process)05d %(levelname)-8s %(lineno)04d:%(module)-20s %(funcName)-25s %(message)s' USUAL_LOG_LEN_BEFORE_MESSAGE = 93 USUAL_LOG_FORMAT_DESCRIPTOR =\ 'DATE TIME,milisec PID LEVEL LINE:MODULE FUNCTION MESSAGE' DB_DATE_FORMAT = "%Y-%m-%d %H:%M:%S" LOG_FOLDER = '/var/log/breeze/' # log_fname = 'breeze_%s.log' % datetime.now().strftime("%Y-%m-%d_%H-%M-%S%z") log_fname = 'rotating.log' log_hit_fname = 'access.log' LOG_PATH = '%s%s' % (LOG_FOLDER, log_fname) LOG_HIT_PATH = '%s%s' % (LOG_FOLDER, log_hit_fname) # DEBUG = False TEMPLATE_DEBUG = False ADMINS = ( ('Clement FIERE', 'clement.fiere@helsinki.fi'), ) MANAGERS = ADMINS MYSQL_SECRET_FILE = 'mysql_root' # Local time zone for this installation. Choices can be found here: # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name # although not all choices may be available on all operating systems. # In a Windows environment this must be set to your system time zone. TIME_ZONE = 'Europe/Helsinki' DATABASES = { 'default': { 'ENGINE' : 'django.db.backends.mysql', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'. 'NAME' : 'breezedb', # Or path to database file if using sqlite3. 'USER' : 'root', # Not used with sqlite3. 'PASSWORD' : get_key(MYSQL_SECRET_FILE), # Not used with sqlite3. 'HOST' : 'breeze-sql', # Set to empty string for localhost. Not used with sqlite3. 'PORT' : '3306', # Set to empty string for default. Not used with sqlite3. 'OPTIONS' : { "init_command": "SET default_storage_engine=INNODB; SET SESSION TRANSACTION ISOLATION LEVEL READ " "COMMITTED", } # "init_command": "SET transaction isolation level READ COMMITTED", } } } ROOT_URLCONF = 'isbio.urls' TEMPLATES = [ { 'BACKEND' : 'django.template.backends.django.DjangoTemplates', 'DIRS' : [], 'APP_DIRS': True, 'OPTIONS' : { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', 'django.template.context_processors.media', 'django.template.context_processors.static', 'breeze.context.user_context', 'breeze.context.date_context', 'breeze.context.run_mode_context', # 'django_auth0.context_processors.auth0', # moved to config/auth0.py "breeze.context.site", "breeze.context.__context_var_list", ], }, }, ] # Language code for this installation. All choices can be found here: # http://www.i18nguy.com/unicode/language-identifiers.html LANGUAGE_CODE = 'en-us' SITE_ID = 1 # If you set this to False, Django will make some optimizations so as not # to load the internationalization machinery. USE_I18N = True # If you set this to False, Django will not format dates, numbers and # calendars according to the current locale. USE_L10N = True # If you set this to False, Django will not use timezone-aware datetimes. USE_TZ = True # URL that handles the media served from MEDIA_ROOT. Make sure to use a # trailing slash. # Examples: "http://media.lawrence.com/media/", "http://example.com/media/" MEDIA_URL = '/media/' # Absolute path to the directory static files should be collected to. # Don't put anything in this directory yourself; store your static files # in apps' "static/" subdirectories and in STATICFILES_DIRS. # Example: "/home/media/media.lawrence.com/static/" # STATIC_ROOT = '' # ** moved lower in this file # URL prefix for static files. # Example: "http://media.lawrence.com/static/" STATIC_URL = '/static/' # Additional locations of static files # ** moved to configs/env/* # STATICFILES_DIRS = ( # "/root/static_source", # ) # List of finder classes that know how to find static files in # various locations. STATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', 'django.contrib.staticfiles.finders.DefaultStorageFinder', ) # Make this unique, and don't share it with anybody. SECRET_KEY_FN = 'django' SECRET_KEY = get_key(SECRET_KEY_FN) # List of callable that know how to import templates from various sources. # TEMPLATE_LOADERS = ( # 'django.template.loaders.filesystem.Loader', # 'django.template.loaders.app_directories.Loader', # ) # AUTH_USER_MODEL = 'breeze.OrderedUser' # AUTH_USER_MODEL = 'breeze.CustomUser' # FIXME INSTALLED_APPS = [ 'suit', 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.sites', 'django.contrib.messages', 'django.contrib.staticfiles', 'bootstrap_toolkit', 'breeze.apps.Config', 'shiny.apps.Config', 'dbviewer.apps.Config', 'compute.apps.Config', 'down.apps.Config', # 'south', 'gunicorn', 'mathfilters', # 'django_auth0', # moved to config/auth0.py 'hello_auth.apps.Config', 'api.apps.Config', 'webhooks.apps.Config', 'utilz.apps.Config', 'django_requestlogging', 'django.contrib.admindocs', 'django_extensions' ] MIDDLEWARE_CLASSES = [ 'breeze.middlewares.BreezeAwake', 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', # 'django.middleware.doc.XViewMiddleware', 'breeze.middlewares.JobKeeper', 'breeze.middlewares.CheckUserProfile', 'breeze.middlewares.ContextualRequest', 'django_requestlogging.middleware.LogSetupMiddleware', 'breeze.middlewares.DataDog' if ENABLE_DATADOG else 'breeze.middlewares.Empty', 'breeze.middlewares.RemoteFW' if ENABLE_REMOTE_FW else 'breeze.middlewares.Empty', 'rollbar.contrib.django.middleware.RollbarNotifierMiddleware' if ENABLE_ROLLBAR else 'breeze.middlewares.Empty', ] # ** AUTHENTICATION_BACKENDS moved to specific auth config files (config/env/auth/*) # ** AUTH0_* moved to config/env/auth/auth0.py SSH_TUNNEL_HOST = 'breeze-ssh' SSH_TUNNEL_PORT = '2222' # SSH_TUNNEL_TEST_URL = 'breeze-ssh' # ROOT_URLCONF = 'isbio.urls' APPEND_SLASH = True # Python dotted path to the WSGI application used by Django's runserver. WSGI_APPLICATION = 'isbio.wsgi.application' # provide our profile model AUTH_PROFILE_MODULE = 'breeze.UserProfile' # allow on the fly creation of guest user accounts AUTH_ALLOW_GUEST = False # allow anonymous visitor to login as disposable guests GUEST_INSTITUTE_ID = 3 # guest institute GUEST_EXPIRATION_TIME = 24 * 60 # expiration time of inactive guests in minutes GUEST_FIRST_NAME = 'guest' GUEST_GROUP_NAME = GUEST_FIRST_NAME.capitalize() + 's' ALL_GROUP_NAME = 'Registered users' RESTRICT_GUEST_TO_SPECIFIC_VIEWS = True DEFAULT_LOGIN_URL = '/login_page' FORCE_DEFAULT_LOGIN_URL = True # A sample logging configuration. The only tangible logging # performed by this configuration is to send an email to # the site admins on every HTTP 500 error when DEBUG=False. # See http://docs.djangoproject.com/en/dev/topics/logging for # more details on how to customize your logging configuration. LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'formatters': { 'standard': { 'format': USUAL_LOG_FORMAT, 'datefmt': USUAL_DATE_FORMAT, }, }, 'filters': { 'require_debug_false': { '()': 'django.utils.log.RequireDebugFalse' } }, 'handlers': { 'default': { 'level': 'DEBUG', 'class': 'logging.handlers.RotatingFileHandler', 'filename': LOG_PATH, 'maxBytes': 1024 * 1024 * 5, # 5 MB 'backupCount': 10, 'formatter': 'standard', }, 'mail_admins': { 'level': 'ERROR', 'filters': ['require_debug_false'], 'class': 'django.utils.log.AdminEmailHandler' }, }, 'loggers': { '': { 'handlers': ['default'], 'level': logging.INFO, 'propagate': True }, 'django.request': { 'handlers': ['mail_admins'], 'level': 'ERROR', 'propagate': True, }, } } class DomainList(object): CLOUD_PROD = ['breeze.fimm.fi', '13.79.158.135', ] CLOUD_DEV = ['breeze-dev.northeurope.cloudapp.azure.com', '52.164.209.61', ] FIMM_PH = ['breeze-newph.fimm.fi', 'breeze-ph.fimm.fi', ] FIMM_DEV = ['breeze-dev.fimm.fi', ] FIMM_PROD = ['breeze-fimm.fimm.fi', 'breeze-new.fimm.fi', ] @classmethod def get_current_domain(cls): from isbio.config import RUN_ENV_CLASS, ConfigEnvironments, MODE_PROD, DEV_MODE, PHARMA_MODE if RUN_ENV_CLASS is ConfigEnvironments.AzureCloud: domain = cls.CLOUD_DEV if DEV_MODE else cls.CLOUD_PROD elif RUN_ENV_CLASS is ConfigEnvironments.FIMM: domain = cls.FIMM_PROD if MODE_PROD else cls.FIMM_PH if PHARMA_MODE else cls.FIMM_DEV return domain[0] DEBUG = False VERBOSE = False SQL_DUMP = False # APPEND_SLASH = True ADMINS = ( ('Clement FIERE', 'clement.fiere@helsinki.fi'), ) # root of the Breeze django project folder, includes 'venv', 'static' folder copy, isbio, logs SOURCE_ROOT = recur(3, os.path.dirname, os.path.realpath(__file__)) + '/' DJANGO_ROOT = recur(2, os.path.dirname, os.path.realpath(__file__)) + '/' TEMPLATE_FOLDER = DJANGO_ROOT + 'templates/' # source templates (not HTML ones) DJANGO_AUTH_MODEL_BACKEND_PY_PATH = 'django.contrib.auth.backends.ModelBackend' # CAS_NG_BACKEND_PY_PATH = 'my_django.cas_ng_custom.CASBackend' AUTH0_BACKEND_PY_PATH = 'django_auth0.auth_backend.Auth0Backend' AUTH0_CUSTOM_BACKEND_PY_PATH = 'custom_auth0.auth_backend.Auth0Backend' os.environ['MAIL'] = '/var/mail/dbychkov' # FIXME obsolete CONSOLE_DATE_F = "%d/%b/%Y %H:%M:%S" # auto-sensing if running on dev or prod, for dynamic environment configuration # FIXME broken in docker container FULL_HOST_NAME = socket.gethostname() HOST_NAME = str.split(FULL_HOST_NAME, '.')[0] # do not move. here because some utils function useses it FIMM_NETWORK = '128.214.0.0/16' from config import * # Super User on breeze can Access all data SU_ACCESS_OVERRIDE = True PROJECT_PATH = PROJECT_FOLDER + BREEZE_FOLDER if not os.path.isdir(PROJECT_PATH): PROJECT_FOLDER = '/%s/' % PROJECT_FOLDER_NAME PROD_PATH = '%s%s' % (PROJECT_FOLDER, BREEZE_FOLDER) R_ENGINE_SUB_PATH = 'R/bin/R ' # FIXME LEGACY ONLY R_ENGINE_PATH = PROD_PATH + R_ENGINE_SUB_PATH if not os.path.isfile( R_ENGINE_PATH.strip()): PROJECT_FOLDER = '/%s/' % PROJECT_FOLDER_NAME R_ENGINE_PATH = PROD_PATH + R_ENGINE_SUB_PATH # FIXME Legacy PROJECT_FHRB_PM_PATH = '/%s/fhrb_pm/' % PROJECT_FOLDER_NAME JDBC_BRIDGE_PATH = PROJECT_FHRB_PM_PATH + 'bin/start-jdbc-bridge' # Every other path has a trailing / TEMP_FOLDER = SOURCE_ROOT + 'tmp/' #### # 'db' folder, containing : reports, scripts, jobs, datasets, pipelines, upload_temp #### DATA_TEMPLATES_FN = 'mould/' RE_RUN_SH = SOURCE_ROOT + 're_run.sh' MEDIA_ROOT = PROJECT_PATH + 'db/' RORA_LIB = PROJECT_PATH + 'RORALib/' UPLOAD_FOLDER = MEDIA_ROOT + 'upload_temp/' DATASETS_FOLDER = MEDIA_ROOT + 'datasets/' STATIC_ROOT = SOURCE_ROOT + 'static_source/' # static files for the website DJANGO_CONFIG_FOLDER = SOURCE_ROOT + 'config/' # Where to store secrets and deployment conf MOULD_FOLDER = MEDIA_ROOT + DATA_TEMPLATES_FN NO_TAG_XML = TEMPLATE_FOLDER + 'notag.xml' SH_LOG_FOLDER = '.log' GENERAL_SH_BASE_NAME = 'run_job' GENERAL_SH_NAME = '%s.sh' % GENERAL_SH_BASE_NAME GENERAL_SH_CONF_NAME = '%s_conf.sh' % GENERAL_SH_BASE_NAME DOCKER_SH_NAME = 'run.sh' REPORTS_CACHE_INTERNAL_URL = '/cached/reports/' INCOMPLETE_RUN_FN = '.INCOMPLETE_RUN' FAILED_FN = '.failed' SUCCESS_FN = '.done' R_DONE_FN = '.sub_done' # ** moved to config/execution/sge.py # SGE_QUEUE_NAME = 'breeze.q' # monitoring only # ** moved to config/env/azure_cloud.py # DOCKER_HUB_PASS_FILE = SOURCE_ROOT + 'docker_repo' # AZURE_PASS_FILE = SOURCE_ROOT + 'azure_pwd' # moved to config/env/azure_cloud.py # # ComputeTarget configs # # TODO config # 13/05/2016 CONFIG_FN = 'configs/' CONFIG_PATH = MEDIA_ROOT + CONFIG_FN # 19/04/2016 TARGET_CONFIG_FN = 'target/' TARGET_CONFIG_PATH = CONFIG_PATH + TARGET_CONFIG_FN # 08/06/2016 DEFAULT_TARGET_ID = BREEZE_TARGET_ID # 13/05/2016 EXEC_CONFIG_FN = 'exec/' EXEC_CONFIG_PATH = CONFIG_PATH + EXEC_CONFIG_FN # 13/05/2016 ENGINE_CONFIG_FN = 'engine/' ENGINE_CONFIG_PATH = CONFIG_PATH + ENGINE_CONFIG_FN # 23/05/2016 SWAP_FN = 'swap/' SWAP_PATH = MEDIA_ROOT + SWAP_FN # 21/02/2017 SHINY_SECRET_KEY_FN = 'shiny' SHINY_SECRET = get_key(SHINY_SECRET_KEY_FN) # Warning : shiny_secret must be at least 32 char long. ENC_SESSION_ID_COOKIE_NAME = get_md5('seed') ## # Report config ## BOOTSTRAP_SH_TEMPLATE = TEMPLATE_FOLDER + GENERAL_SH_NAME BOOTSTRAP_SH_CONF_TEMPLATE = TEMPLATE_FOLDER + GENERAL_SH_CONF_NAME DOCKER_BOOTSTRAP_SH_TEMPLATE = TEMPLATE_FOLDER + DOCKER_SH_NAME NOZZLE_TEMPLATE_FOLDER = TEMPLATE_FOLDER + 'nozzle_templates/' TAGS_TEMPLATE_PATH = NOZZLE_TEMPLATE_FOLDER + 'tag.R' NOZZLE_REPORT_TEMPLATE_PATH = NOZZLE_TEMPLATE_FOLDER + 'report.R' NOZZLE_REPORT_FN = 'report' RSCRIPTS_FN = 'scripts/' RSCRIPTS_PATH = MEDIA_ROOT + RSCRIPTS_FN REPORT_TYPE_FN = 'pipelines/' REPORT_TYPE_PATH = MEDIA_ROOT + REPORT_TYPE_FN REPORTS_FN = 'reports/' REPORTS_PATH = '%s%s' % (MEDIA_ROOT, REPORTS_FN) REPORTS_SH = GENERAL_SH_NAME REPORTS_FM_FN = 'transfer_to_fm.txt' R_FILE_NAME_BASE = 'script' R_FILE_NAME = R_FILE_NAME_BASE + '.r' R_OUT_EXT = '.Rout' ## # Jobs configs ## SCRIPT_CODE_HEADER_FN = 'header.R' SCRIPT_HEADER_DEF_CONTENT = '# write your header here...' SCRIPT_CODE_BODY_FN = 'body.R' SCRIPT_BODY_DEF_CONTENT = '# copy and paste main code here...' SCRIPT_FORM_FN = 'form.xml' SCRIPT_TEMPLATE_FOLDER = TEMPLATE_FOLDER + 'script_templates/' SCRIPT_TEMPLATE_PATH = SCRIPT_TEMPLATE_FOLDER + 'script.R' JOBS_FN = 'jobs/' JOBS_PATH = '%s%s' % (MEDIA_ROOT, JOBS_FN) JOBS_SH = '_config.sh' # # WATCHER RELATED CONFIG # # FIXME make this target_config specific WATCHER_DB_REFRESH = 2 # number of seconds to wait before refreshing reports from DB WATCHER_PROC_REFRESH = 2 # number of seconds to wait before refreshing processes # # SHINY RELATED CONFIG # from shiny.settings import * # FIXME obsolete FOLDERS_LST = [TEMPLATE_FOLDER, SHINY_REPORT_TEMPLATE_PATH, SHINY_REPORTS, SHINY_TAGS, NOZZLE_TEMPLATE_FOLDER, SCRIPT_TEMPLATE_FOLDER, JOBS_PATH, REPORT_TYPE_PATH, REPORTS_PATH, RSCRIPTS_PATH, MEDIA_ROOT, STATIC_ROOT, TARGET_CONFIG_PATH, EXEC_CONFIG_PATH, ENGINE_CONFIG_PATH] ## # System Autocheck config ## # this is used to avoid 504 Gateway time-out from ngnix with is currently set to 600 sec = 10 min # LONG_POLL_TIME_OUT_REFRESH = 540 # 9 minutes # set to 50 sec to avoid time-out on breeze.fimm.fi LONG_POLL_TIME_OUT_REFRESH = 50 # FIXME obsolete # SGE_MASTER_FILE = '/var/lib/gridengine/default/common/act_qmaster' # FIXME obsolete # SGE_MASTER_IP = '192.168.67.2' # FIXME obsolete # DOTM_SERVER_IP = '128.214.64.5' # FIXME obsolete # RORA_SERVER_IP = '192.168.0.219' # FIXME obsolete # FILE_SERVER_IP = '192.168.0.107' # FIXME obsolete SPECIAL_CODE_FOLDER = PROJECT_PATH + 'code/' FS_SIG_FILE = PROJECT_PATH + 'fs_sig.md5' FS_LIST_FILE = PROJECT_PATH + 'fs_checksums.json' FOLDERS_TO_CHECK = [TEMPLATE_FOLDER, SHINY_TAGS, REPORT_TYPE_PATH, # SHINY_REPORTS,SPECIAL_CODE_FOLDER , RSCRIPTS_PATH, MOULD_FOLDER, STATIC_ROOT, DATASETS_FOLDER] # STATIC URL MAPPINGS # STATIC_URL = '/static/' # MEDIA_URL = '/media/' MOULD_URL = MEDIA_URL + DATA_TEMPLATES_FN # number of seconds after witch a job that has not received a sgeid should be marked as aborted or re-run NO_SGEID_EXPIRY = 30 # FIXME obsolete TMP_CSC_TAITO_MOUNT = '/mnt/csc-taito/' TMP_CSC_TAITO_REPORT_PATH = 'breeze/' TMP_CSC_TAITO_REMOTE_CHROOT = '/homeappl/home/clement/' # mail config EMAIL_HOST = 'smtp.gmail.com' EMAIL_HOST_USER = 'breeze.fimm@gmail.com' EMAIL_HOST_PASSWORD = get_key('gmail') EMAIL_PORT = '587' # EMAIL_SUBJECT_PREFIX = '[' + FULL_HOST_NAME + '] ' EMAIL_SUBJECT_PREFIX = '[' + BREEZE_TITLE + '] ' EMAIL_USE_TLS = True EMAIL_SENDER = 'Breeze PMS' # # END OF CONFIG # RUN-MODE SPECIFICS FOLLOWING # ** NO CONFIGURATION CONST BEYOND THIS POINT ** # # ** moved to config/env/* # if prod mode then auto disable DEBUG, for safety # if MODE_PROD or PHARMA_MODE: # SHINY_MODE = 'remote' # SHINY_LOCAL_ENABLE = False # DEBUG = False # VERBOSE = False # ** DEV logging config moved to config/env/dev.py # FIXME obsolete if ENABLE_ROLLBAR: try: import rollbar BASE_DIR = SOURCE_ROOT ROLLBAR = { 'access_token': '00f2bf2c84ce40aa96842622c6ffe97d', 'environment': 'development' if DEBUG else 'production', 'root': BASE_DIR, } rollbar.init(**ROLLBAR) except Exception: ENABLE_ROLLBAR = False logging.getLogger().error('Unable to init rollbar') pass def make_run_file(): f = open('running', 'w+') f.write(str(datetime.now().strftime(USUAL_DATE_FORMAT))) f.close() # FIXME obsolete if os.path.isfile('running'): # First time print '__breeze__started__' logging.info('__breeze__started__') os.remove('running') else: make_run_file() # Second time time.sleep(1) print '__breeze__load/reload__' logging.info('__breeze__load/reload__') print 'source home : ' + SOURCE_ROOT logging.debug('source home : ' + SOURCE_ROOT) print 'project home : ' + PROJECT_PATH logging.debug('project home : ' + PROJECT_PATH) print 'Logging on %s\nSettings loaded. Running branch %s, mode %s on %s' % \ (TermColoring.bold(LOG_PATH), TermColoring.ok_blue(git.get_branch_from_fs(SOURCE_ROOT)), TermColoring.ok_blue( TermColoring.bold(RUN_MODE)), TermColoring.ok_blue(FULL_HOST_NAME)) git_stat = git.get_status() print git_stat logging.info('Settings loaded. Running %s on %s' % (RUN_MODE, FULL_HOST_NAME)) logging.info(git_stat) from api import code_v1 code_v1.do_self_git_pull() if PHARMA_MODE: print TermColoring.bold('RUNNING WITH PHARMA') print('debug mode is %s' % ('ON' if DEBUG else 'OFF')) # FIXME obsolete def
(breeze_folder=BREEZE_FOLDER): return PROJECT_FOLDER + breeze_folder
project_folder_path
identifier_name
sign-up.js
import react, { useState, useRef, useEffect } from "react"; import styled from "styled-components"; import { useSelector, useDispatch } from "react-redux"; import { userLogin } from "../../actions/index"; import { FontAwesomeIcon } from "@fortawesome/react-fontawesome"; import { faTimes } from "@fortawesome/free-solid-svg-icons"; import axios from "axios"; import theme from "../../styles/theme"; import gsap from "gsap"; export default function SignUp(props) { let userdata = useSelector((state) => state.userReducer); const size = useRef(); useEffect(() => { gsap.to(size.current, { scale: 1, duration: 0.5, ease: "back" }); }); const dispatch = useDispatch(); const [verifyCode, setVerifyCode] = useState(null); const [message, setMessage] = useState(""); const [user, setUser] = useState({ email: "", verifyEmail: "", nickName: "", password: "", rePassword: "", }); const [code, setCode] = useState(""); const [isVerify, setIsVerify] = useState(false); const [isSend, setIsSend] = useState(false); const send = (email) => { // axios로 email보내고 코드 값 가져오기 axios .post(`${process.env.REACT_APP_SERVER_URL}/sign/email-code`, { email: email, }) .then((res) => { setVerifyCode(res.data.data.emailcode); }) .catch((err) => {}); // 오류는 중복되거나 또는 서버 오류 setIsSend(true); }; const verify = (code) => { axios .post( `${process.env.REACT_APP_SERVER_URL}/sign/email-verification?code=${verifyCode}`, { emailCode: code, } ) .then((res) => { setIsVerify(true); }) .catch((err) => {}); }; const onCreate = async (data) => { // axios 요청 성공 시2 await axios .post(`${process.env.REACT_APP_SERVER_URL}/sign/signup`, { nickname: data.nickName, email: data.email, password: data.password, }) .then((res) => {}); await axios .post(`${process.env.REACT_APP_SERVER_URL}/sign/signin`, { email: data.email, password: data.password, }) .then((res) => { dispatch( userLogin({ isLogin: true, email: user.email, nickName: res.data.nickname, accessToken: res.data.accessToken, profileblob: res.data.profileblob, }) ); }) .catch((err) => { }); }; const handleChange = (e) => { setUser({ ...user, [e.target.name]: e.target.value, }); }; const handleSubmit = (e) => { e.preventDefault(); if ( chkPW(user.password) === "통과" && isVerify === true && user.nickName !== undefined ) { let code = onCreate(user); if (code === "409") { setMessage("이미 등록되어 있는 이메일입니다"); } if (code === "500") { setMessage("서버 오류 입니다"); } else { setUser({ email: "", verifyEmail: "", password: "", rePassword: "", nickName: "", }); setIsVerify(false); props.exit(); } } else { setMessage("비번이나 이메일 인증을 확인해주세요"); } }; function chkPW(pw) { let num = pw.search(/[0-9]/g); let eng = pw.search(/[a-z]/gi); let spe = pw.search(/[`~!@@#$%^&*|₩₩₩'₩";:₩/?]/gi); if (user.password) { if (pw.length < 8 || pw.length > 20) { return "8자리 ~ 20자리 이내로 입력해주세요."; } else if (pw.search(/\s/) != -1) { return "비밀번호는 공백 없이 입력해주세요."; } else if (num < 0 || eng < 0 || spe < 0) { return "영문,숫자, 특수문자를 혼합하여 입력해주세요."; } else { return "통과"; } } } return ( <Temp> <Exit onClick={props.exit}> <FontAwesomeIcon color={"white"} icon={faTimes} /> </Exit> <SignUpForm ref={size} onSubmit={handleSubmit}> <Logo> <img src="/image/logo.svg" /> </Logo> <InputBox> <EmailIcon> <Align> <EmailInput name="email" value={user.email} placeholder="email" onChange={handleChange} /> <EmailButton type="button" onClick={() => send(user.email)}> 인증 </EmailButton> </Align> <Message> {isSend && !isVerify ? ( <div style={{ color: `${theme.colors.green}` }}> 이메일 인증 코드가 발송되었습니다. </div> ) : ( "" )} </Message> </EmailIcon> <EmailIcon> <Align> <EmailInput name="verifyEmail" value={user.verifyEmail} placeholder="email code" onChange={handleChange} /> <VerifyButton type="button" onClick={() => verify(user.verifyEmail)} > 확인 </VerifyButton> </Align> <Message>
) : isSend && user.verifyEmail ? ( "인증 되지 않았습니다" ) : ( "" )} </Message> </EmailIcon> <Repassword> <RepasswordInput name="password" value={user.password} type="password" placeholder="password" onChange={handleChange} /> <Message> {chkPW(user.password) === "통과" ? ( <div style={{ color: `${theme.colors.green}` }}>통과</div> ) : ( chkPW(user.password) )} </Message> </Repassword> <Repassword> <RepasswordInput name="rePassword" type="password" value={user.repassword} placeholder="password confirm" onChange={handleChange} /> <Message> {user.repassword !== "" && user.password ? ( user.password === user.rePassword ? ( <div style={{ color: `${theme.colors.green}` }}> 비밀번호가 일치합니다. </div> ) : ( "비밀번호가 일치 하지 않습니다" ) ) : ( "" )} </Message> </Repassword> <Message>{message}</Message> <LoginInput name="nickName" value={user.nickName} placeholder="nickname" onChange={handleChange} /> </InputBox> <SignupButton type="submit">회원가입</SignupButton> </SignUpForm> </Temp> ); } const Temp = styled.div` width: 100vw; max-width: 100%; height: calc(100vh - 3.45rem); max-height: calc(100vh - 3.45rem); background-color: rgba(0, 0, 0, 0.4); z-index: 3; display: flex; justify-content: center; align-items: center; position: absolute; `; const Exit = styled.div` position: absolute; top: 1rem; right: 1rem; font-size: 2rem; `; const SignUpForm = styled.form` display: flex; flex-direction: column; align-items: center; transform: scale(0); width: 26.25rem; height: 35.563rem; background-color: white; border-radius: 1rem; padding: 0 3.688rem; `; const Logo = styled.div` display: flex; justify-content: center; align-items: center; width: 100%; height: 2.188rem; margin-top: 1rem; > img { width: 100%; height: 100%; } `; const InputBox = styled.div` margin-top: 2.8rem; width: 100%; height: 20rem; display: flex; flex-direction: column; align-items: center; `; const Repassword = styled.div` width: 100%; display: flex; flex-direction: column; height: 4.375rem; align-items: center; `; const RepasswordInput = styled.input` width: 100%; height: 3.063rem; border-radius: 0.5rem; border: 0.5px solid #bbbbbb; text-align: left; text-indent: 1rem; background-image: url("/image/lock.svg"); background-repeat: no-repeat; background-position: 96% 50%; background-size: 25px; font-size: ${theme.fonts.size.base}; color: ${theme.colors.darkgrey}; ::placeholder { color: #989898; } :focus { outline: none; } `; const EmailIcon = styled.div` display: flex; flex-direction: column; justify-content: space-between; width: 100%; height: 4.375rem; `; const Align = styled.div` display: flex; gap: 0.625rem; `; const EmailInput = styled.input` width: 100%; height: 3.063rem; border-radius: 0.5rem; border: 0.5px solid #bbbbbb; text-align: left; text-indent: 1rem; background-image: url("/image/email.svg"); background-repeat: no-repeat; background-position: 96% 50%; background-size: 25px; font-size: ${theme.fonts.size.base}; color: ${theme.colors.darkgrey}; ::placeholder { color: #989898; } :focus { outline: none; } `; const EmailButton = styled.button` width: 5.2rem; height: 100%; font-size: 10px; border-radius: 0.3rem; background-color: #7cb700; color: white; border: 0; display: flex; align-items: center; justify-content: center; cursor: pointer; font-size: 12px; font-weight: 500; font-style: ${theme.fonts.family.button}; `; const VerifyButton = styled.button` width: 5.2rem; background-color: rgba(0, 0, 0, 0); height: 100%; border-radius: 0.3rem; font-size: 10px; display: flex; align-items: center; justify-content: center; color: #7cb700; border: 0.01rem solid #7cb700; cursor: pointer; font-size: 12px; font-weight: 500; font-style: ${theme.fonts.family.button}; `; const Message = styled.div` width: 100%; height: 1.438rem; align-items: center; display: flex; justify-content: center; font-size: 0.1rem; color: ${theme.colors.red}; `; const LoginInput = styled.input` width: 100%; min-height: 3.063rem; border-radius: 0.5rem; border: 0.5px solid #bbbbbb; text-align: left; text-indent: 1rem; background-image: url("/image/userIcon.svg"); background-repeat: no-repeat; background-position: 96% 50%; background-size: 25px; font-size: ${theme.fonts.size.base}; color: ${theme.colors.darkgrey}; ::placeholder { color: #989898; } :focus { outline: none; } `; const SignupButton = styled.button` margin-top: 2.3rem; background-color: #7cb700; border-radius: 0.5rem; width: 100%; height: 3.063rem; font-size: 1rem; color: white; border: 0; font-style: ${theme.fonts.family.button}; font-size: ${theme.fonts.size.lg}; cursor: pointer; transition: all 0.3s ease-in-out; :hover { transition: all 0.3s ease-in-out; background-color: white; color: ${({ theme }) => theme.colors.green}; border: 1px solid ${({ theme }) => theme.colors.green}; cursor: pointer; } `;
{isVerify ? ( <div style={{ color: `${theme.colors.green}` }}> 인증되었습니다 </div>
random_line_split
sign-up.js
import react, { useState, useRef, useEffect } from "react"; import styled from "styled-components"; import { useSelector, useDispatch } from "react-redux"; import { userLogin } from "../../actions/index"; import { FontAwesomeIcon } from "@fortawesome/react-fontawesome"; import { faTimes } from "@fortawesome/free-solid-svg-icons"; import axios from "axios"; import theme from "../../styles/theme"; import gsap from "gsap"; export default function SignUp(props) { let userdata = useSelector((state) => state.userReducer); const size = useRef(); useEffect(() => { gsap.to(size.current, { scale: 1, duration: 0.5, ease: "back" }); }); const dispatch = useDispatch(); const [verifyCode, setVerifyCode] = useState(null); const [message, setMessage] = useState(""); const [user, setUser] = useState({ email: "", verifyEmail: "", nickName: "", password: "", rePassword: "", }); const [code, setCode] = useState(""); const [isVerify, setIsVerify] = useState(false); const [isSend, setIsSend] = useState(false); const send = (email) => { // axios로 email보내고 코드 값 가져오기 axios .post(`${process.env.REACT_APP_SERVER_URL}/sign/email-code`, { email: email, }) .then((res) => { setVerifyCode(res.data.data.emailcode); }) .catch((err) => {}); // 오류는 중복되거나 또는 서버 오류 setIsSend(true); }; const verify = (code) => { axios .post( `${process.env.REACT_APP_SERVER_URL}/sign/email-verification?code=${verifyCode}`, { emailCode: code, } ) .then((res) => { setIsVerify(true); }) .catch((err) => {}); }; const onCreate = async (data) => { // axios 요청 성공 시2 await axios .post(`${process.env.REACT_APP_SERVER_URL}/sign/signup`, { nickname: data.nickName, email: data.email, password: data.password, }) .then((res) => {}); await axios .post(`${process.env.REACT_APP_SERVER_URL}/sign/signin`, { email: data.email, password: data.password, }) .then((res) => { dispatch( userLogin({ isLogin: true, email: user.email, nickName: res.data.nickname, accessToken: res.data.accessToken, profileblob: res.data.profileblob, }) ); }) .catch((err) => { }); }; const handleChange = (e) => { setUser({ ...user, [e.target.name]: e.target.value, }); }; const handleSubmit = (e) => { e.preventDefault(); if ( chkPW(user.password) === "통과" && isVerify === true && user.nickName !== undefined ) { let code = onCreate(user); if (code === "409") { setMessage("이미 등록되어 있는 이메일입니다"); } if (code === "500") { setMessage("서버 오류 입니다"); } else { setUser({ email: "", verifyEmail: "", password: "", rePassword: "", nickName: "", }); setIsVerify(false); props.exit(); } } else { setMessage("비번이나 이메일 인증을 확인해주세요"); } }; function chkPW(pw) { let num = pw.search(/[0-9]/g); let eng = pw.search(/[a-z]/gi); let spe = pw.search(/[`~!@@#$%^&*|₩₩₩'₩";:₩/?]/gi); if (
</Logo> <InputBox> <EmailIcon> <Align> <EmailInput name="email" value={user.email} placeholder="email" onChange={handleChange} /> <EmailButton type="button" onClick={() => send(user.email)}> 인증 </EmailButton> </Align> <Message> {isSend && !isVerify ? ( <div style={{ color: `${theme.colors.green}` }}> 이메일 인증 코드가 발송되었습니다. </div> ) : ( "" )} </Message> </EmailIcon> <EmailIcon> <Align> <EmailInput name="verifyEmail" value={user.verifyEmail} placeholder="email code" onChange={handleChange} /> <VerifyButton type="button" onClick={() => verify(user.verifyEmail)} > 확인 </VerifyButton> </Align> <Message> {isVerify ? ( <div style={{ color: `${theme.colors.green}` }}> 인증되었습니다 </div> ) : isSend && user.verifyEmail ? ( "인증 되지 않았습니다" ) : ( "" )} </Message> </EmailIcon> <Repassword> <RepasswordInput name="password" value={user.password} type="password" placeholder="password" onChange={handleChange} /> <Message> {chkPW(user.password) === "통과" ? ( <div style={{ color: `${theme.colors.green}` }}>통과</div> ) : ( chkPW(user.password) )} </Message> </Repassword> <Repassword> <RepasswordInput name="rePassword" type="password" value={user.repassword} placeholder="password confirm" onChange={handleChange} /> <Message> {user.repassword !== "" && user.password ? ( user.password === user.rePassword ? ( <div style={{ color: `${theme.colors.green}` }}> 비밀번호가 일치합니다. </div> ) : ( "비밀번호가 일치 하지 않습니다" ) ) : ( "" )} </Message> </Repassword> <Message>{message}</Message> <LoginInput name="nickName" value={user.nickName} placeholder="nickname" onChange={handleChange} /> </InputBox> <SignupButton type="submit">회원가입</SignupButton> </SignUpForm> </Temp> ); } const Temp = styled.div` width: 100vw; max-width: 100%; height: calc(100vh - 3.45rem); max-height: calc(100vh - 3.45rem); background-color: rgba(0, 0, 0, 0.4); z-index: 3; display: flex; justify-content: center; align-items: center; position: absolute; `; const Exit = styled.div` position: absolute; top: 1rem; right: 1rem; font-size: 2rem; `; const SignUpForm = styled.form` display: flex; flex-direction: column; align-items: center; transform: scale(0); width: 26.25rem; height: 35.563rem; background-color: white; border-radius: 1rem; padding: 0 3.688rem; `; const Logo = styled.div` display: flex; justify-content: center; align-items: center; width: 100%; height: 2.188rem; margin-top: 1rem; > img { width: 100%; height: 100%; } `; const InputBox = styled.div` margin-top: 2.8rem; width: 100%; height: 20rem; display: flex; flex-direction: column; align-items: center; `; const Repassword = styled.div` width: 100%; display: flex; flex-direction: column; height: 4.375rem; align-items: center; `; const RepasswordInput = styled.input` width: 100%; height: 3.063rem; border-radius: 0.5rem; border: 0.5px solid #bbbbbb; text-align: left; text-indent: 1rem; background-image: url("/image/lock.svg"); background-repeat: no-repeat; background-position: 96% 50%; background-size: 25px; font-size: ${theme.fonts.size.base}; color: ${theme.colors.darkgrey}; ::placeholder { color: #989898; } :focus { outline: none; } `; const EmailIcon = styled.div` display: flex; flex-direction: column; justify-content: space-between; width: 100%; height: 4.375rem; `; const Align = styled.div` display: flex; gap: 0.625rem; `; const EmailInput = styled.input` width: 100%; height: 3.063rem; border-radius: 0.5rem; border: 0.5px solid #bbbbbb; text-align: left; text-indent: 1rem; background-image: url("/image/email.svg"); background-repeat: no-repeat; background-position: 96% 50%; background-size: 25px; font-size: ${theme.fonts.size.base}; color: ${theme.colors.darkgrey}; ::placeholder { color: #989898; } :focus { outline: none; } `; const EmailButton = styled.button` width: 5.2rem; height: 100%; font-size: 10px; border-radius: 0.3rem; background-color: #7cb700; color: white; border: 0; display: flex; align-items: center; justify-content: center; cursor: pointer; font-size: 12px; font-weight: 500; font-style: ${theme.fonts.family.button}; `; const VerifyButton = styled.button` width: 5.2rem; background-color: rgba(0, 0, 0, 0); height: 100%; border-radius: 0.3rem; font-size: 10px; display: flex; align-items: center; justify-content: center; color: #7cb700; border: 0.01rem solid #7cb700; cursor: pointer; font-size: 12px; font-weight: 500; font-style: ${theme.fonts.family.button}; `; const Message = styled.div` width: 100%; height: 1.438rem; align-items: center; display: flex; justify-content: center; font-size: 0.1rem; color: ${theme.colors.red}; `; const LoginInput = styled.input` width: 100%; min-height: 3.063rem; border-radius: 0.5rem; border: 0.5px solid #bbbbbb; text-align: left; text-indent: 1rem; background-image: url("/image/userIcon.svg"); background-repeat: no-repeat; background-position: 96% 50%; background-size: 25px; font-size: ${theme.fonts.size.base}; color: ${theme.colors.darkgrey}; ::placeholder { color: #989898; } :focus { outline: none; } `; const SignupButton = styled.button` margin-top: 2.3rem; background-color: #7cb700; border-radius: 0.5rem; width: 100%; height: 3.063rem; font-size: 1rem; color: white; border: 0; font-style: ${theme.fonts.family.button}; font-size: ${theme.fonts.size.lg}; cursor: pointer; transition: all 0.3s ease-in-out; :hover { transition: all 0.3s ease-in-out; background-color: white; color: ${({ theme }) => theme.colors.green}; border: 1px solid ${({ theme }) => theme.colors.green}; cursor: pointer; } `;
user.password) { if (pw.length < 8 || pw.length > 20) { return "8자리 ~ 20자리 이내로 입력해주세요."; } else if (pw.search(/\s/) != -1) { return "비밀번호는 공백 없이 입력해주세요."; } else if (num < 0 || eng < 0 || spe < 0) { return "영문,숫자, 특수문자를 혼합하여 입력해주세요."; } else { return "통과"; } } } return ( <Temp> <Exit onClick={props.exit}> <FontAwesomeIcon color={"white"} icon={faTimes} /> </Exit> <SignUpForm ref={size} onSubmit={handleSubmit}> <Logo> <img src="/image/logo.svg" />
identifier_body
sign-up.js
import react, { useState, useRef, useEffect } from "react"; import styled from "styled-components"; import { useSelector, useDispatch } from "react-redux"; import { userLogin } from "../../actions/index"; import { FontAwesomeIcon } from "@fortawesome/react-fontawesome"; import { faTimes } from "@fortawesome/free-solid-svg-icons"; import axios from "axios"; import theme from "../../styles/theme"; import gsap from "gsap"; export default function SignUp(props) { let userdata = useSelector((state) => state.userReducer); const size = useRef(); useEffect(() => { gsap.to(size.current, { scale: 1, duration: 0.5, ease: "back" }); }); const dispatch = useDispatch(); const [verifyCode, setVerifyCode] = useState(null); const [message, setMessage] = useState(""); const [user, setUser] = useState({ email: "", verifyEmail: "", nickName: "", password: "", rePassword: "", }); const [code, setCode] = useState(""); const [isVerify, setIsVerify] = useState(false); const [isSend, setIsSend] = useState(false); const send = (email) => { // axios로 email보내고 코드 값 가져오기 axios .post(`${process.env.REACT_APP_SERVER_URL}/sign/email-code`, { email: email, }) .then((res) => { setVerifyCode(res.data.data.emailcode); }) .catch((err) => {}); // 오류는 중복되거나 또는 서버 오류 setIsSend(true); }; const verify = (code) => { axios .post( `${process.env.REACT_APP_SERVER_URL}/sign/email-verification?code=${verifyCode}`, { emailCode: code, } ) .then((res) => { setIsVerify(true); }) .catch((err) => {}); }; const onCreate = async (data) => { // axios 요청 성공 시2 await axios .post(`${process.env.REACT_APP_SERVER_URL}/sign/signup`, { nickname: data.nickName, email: data.email, password: data.password, }) .then((res) => {}); await axios .post(`${process.env.REACT_APP_SERVER_URL}/sign/signin`, { email: data.email, password: data.password, }) .then((res) => { dispatch( userLogin({ isLogin: true, email: user.email, nickName: res.data.nickname, accessToken: res.data.accessToken, profileblob: res.data.profileblob, }) ); }) .catch((err) => { }); }; const handleChange = (e) => { setUser({ ...user, [e.target.name]: e.target.value, }); }; const handleSubmit = (e) => { e.preventDefault(); if ( chkPW(user.password) === "통과" && isVerify === true && user.nickName !== undefined ) { let code = onCreate(user); if (code === "409") { setMessage("이미 등록되어 있는 이메일입니다"); } if (code === "500") { setMessage("서버 오류 입니다"); } else { setUser({ email: "", verifyEmail: "", password: "", rePassword: "", nickName: "", }); setIsVerify(false); props.exit(); } } else { setMessage("비번이나 이메일 인증을 확인해주세요"); } }; function chkPW(pw) { let num = pw.search(/[0-9]/g); let eng = pw.search(/[a-z]/gi); let spe = pw.search(/[`~!@@#$%^&*|₩₩₩'₩";:₩/?]/gi);
if (user.password) { if (pw.length < 8 || pw.length > 20) { return "8자리 ~ 20자리 이내로 입력해주세요."; } else if (pw.search(/\s/) != -1) { return "비밀번호는 공백 없이 입력해주세요."; } else if (num < 0 || eng < 0 || spe < 0) { return "영문,숫자, 특수문자를 혼합하여 입력해주세요."; } else { return "통과"; } } } return ( <Temp> <Exit onClick={props.exit}> <FontAwesomeIcon color={"white"} icon={faTimes} /> </Exit> <SignUpForm ref={size} onSubmit={handleSubmit}> <Logo> <img src="/image/logo.svg" /> </Logo> <InputBox> <EmailIcon> <Align> <EmailInput name="email" value={user.email} placeholder="email" onChange={handleChange} /> <EmailButton type="button" onClick={() => send(user.email)}> 인증 </EmailButton> </Align> <Message> {isSend && !isVerify ? ( <div style={{ color: `${theme.colors.green}` }}> 이메일 인증 코드가 발송되었습니다. </div> ) : ( "" )} </Message> </EmailIcon> <EmailIcon> <Align> <EmailInput name="verifyEmail" value={user.verifyEmail} placeholder="email code" onChange={handleChange} /> <VerifyButton type="button" onClick={() => verify(user.verifyEmail)} > 확인 </VerifyButton> </Align> <Message> {isVerify ? ( <div style={{ color: `${theme.colors.green}` }}> 인증되었습니다 </div> ) : isSend && user.verifyEmail ? ( "인증 되지 않았습니다" ) : ( "" )} </Message> </EmailIcon> <Repassword> <RepasswordInput name="password" value={user.password} type="password" placeholder="password" onChange={handleChange} /> <Message> {chkPW(user.password) === "통과" ? ( <div style={{ color: `${theme.colors.green}` }}>통과</div> ) : ( chkPW(user.password) )} </Message> </Repassword> <Repassword> <RepasswordInput name="rePassword" type="password" value={user.repassword} placeholder="password confirm" onChange={handleChange} /> <Message> {user.repassword !== "" && user.password ? ( user.password === user.rePassword ? ( <div style={{ color: `${theme.colors.green}` }}> 비밀번호가 일치합니다. </div> ) : ( "비밀번호가 일치 하지 않습니다" ) ) : ( "" )} </Message> </Repassword> <Message>{message}</Message> <LoginInput name="nickName" value={user.nickName} placeholder="nickname" onChange={handleChange} /> </InputBox> <SignupButton type="submit">회원가입</SignupButton> </SignUpForm> </Temp> ); } const Temp = styled.div` width: 100vw; max-width: 100%; height: calc(100vh - 3.45rem); max-height: calc(100vh - 3.45rem); background-color: rgba(0, 0, 0, 0.4); z-index: 3; display: flex; justify-content: center; align-items: center; position: absolute; `; const Exit = styled.div` position: absolute; top: 1rem; right: 1rem; font-size: 2rem; `; const SignUpForm = styled.form` display: flex; flex-direction: column; align-items: center; transform: scale(0); width: 26.25rem; height: 35.563rem; background-color: white; border-radius: 1rem; padding: 0 3.688rem; `; const Logo = styled.div` display: flex; justify-content: center; align-items: center; width: 100%; height: 2.188rem; margin-top: 1rem; > img { width: 100%; height: 100%; } `; const InputBox = styled.div` margin-top: 2.8rem; width: 100%; height: 20rem; display: flex; flex-direction: column; align-items: center; `; const Repassword = styled.div` width: 100%; display: flex; flex-direction: column; height: 4.375rem; align-items: center; `; const RepasswordInput = styled.input` width: 100%; height: 3.063rem; border-radius: 0.5rem; border: 0.5px solid #bbbbbb; text-align: left; text-indent: 1rem; background-image: url("/image/lock.svg"); background-repeat: no-repeat; background-position: 96% 50%; background-size: 25px; font-size: ${theme.fonts.size.base}; color: ${theme.colors.darkgrey}; ::placeholder { color: #989898; } :focus { outline: none; } `; const EmailIcon = styled.div` display: flex; flex-direction: column; justify-content: space-between; width: 100%; height: 4.375rem; `; const Align = styled.div` display: flex; gap: 0.625rem; `; const EmailInput = styled.input` width: 100%; height: 3.063rem; border-radius: 0.5rem; border: 0.5px solid #bbbbbb; text-align: left; text-indent: 1rem; background-image: url("/image/email.svg"); background-repeat: no-repeat; background-position: 96% 50%; background-size: 25px; font-size: ${theme.fonts.size.base}; color: ${theme.colors.darkgrey}; ::placeholder { color: #989898; } :focus { outline: none; } `; const EmailButton = styled.button` width: 5.2rem; height: 100%; font-size: 10px; border-radius: 0.3rem; background-color: #7cb700; color: white; border: 0; display: flex; align-items: center; justify-content: center; cursor: pointer; font-size: 12px; font-weight: 500; font-style: ${theme.fonts.family.button}; `; const VerifyButton = styled.button` width: 5.2rem; background-color: rgba(0, 0, 0, 0); height: 100%; border-radius: 0.3rem; font-size: 10px; display: flex; align-items: center; justify-content: center; color: #7cb700; border: 0.01rem solid #7cb700; cursor: pointer; font-size: 12px; font-weight: 500; font-style: ${theme.fonts.family.button}; `; const Message = styled.div` width: 100%; height: 1.438rem; align-items: center; display: flex; justify-content: center; font-size: 0.1rem; color: ${theme.colors.red}; `; const LoginInput = styled.input` width: 100%; min-height: 3.063rem; border-radius: 0.5rem; border: 0.5px solid #bbbbbb; text-align: left; text-indent: 1rem; background-image: url("/image/userIcon.svg"); background-repeat: no-repeat; background-position: 96% 50%; background-size: 25px; font-size: ${theme.fonts.size.base}; color: ${theme.colors.darkgrey}; ::placeholder { color: #989898; } :focus { outline: none; } `; const SignupButton = styled.button` margin-top: 2.3rem; background-color: #7cb700; border-radius: 0.5rem; width: 100%; height: 3.063rem; font-size: 1rem; color: white; border: 0; font-style: ${theme.fonts.family.button}; font-size: ${theme.fonts.size.lg}; cursor: pointer; transition: all 0.3s ease-in-out; :hover { transition: all 0.3s ease-in-out; background-color: white; color: ${({ theme }) => theme.colors.green}; border: 1px solid ${({ theme }) => theme.colors.green}; cursor: pointer; } `;
identifier_name
run_this_code_CACSSEOUL.py
#-*- coding:utf-8 -*- import model import input import os import numpy as np import argparse import sys import tensorflow as tf import aug import numpy as np import random from PIL import Image import time import pickle parser =argparse.ArgumentParser() #parser.add_argument('--saves' , dest='should_save_model' , action = 'store_true') #parser.add_argument('--no-saves' , dest='should_save_model', action ='store_false') parser.add_argument('--optimizer' ,'-o' , type=str ,choices=['sgd','momentum','adam'],help='optimizer') parser.add_argument('--use_nesterov' , type=bool , help='only for momentum , use nesterov') parser.add_argument('--aug' , dest='use_aug', action='store_true' , help='augmentation') parser.add_argument('--no_aug' , dest='use_aug', action='store_false' , help='augmentation') parser.add_argument('--aug_lv1' , dest='use_aug_lv1', action='store_true' , help='augmentation') parser.add_argument('--no_aug_lv1' , dest='use_aug_lv1', action='store_false' , help='augmentation') parser.add_argument('--clahe' , dest='use_clahe', action='store_true' , help='augmentation') parser.add_argument('--no_clahe' , dest='use_clahe', action='store_false' , help='augmentation') parser.add_argument('--actmap', dest='use_actmap' ,action='store_true') parser.add_argument('--no_actmap', dest='use_actmap', action='store_false') parser.add_argument('--random_crop_resize' , '-r', type = int , help='if you use random crop resize , you can choice randdom crop ') parser.add_argument('--batch_size' ,'-b' , type=int , help='batch size') parser.add_argument('--max_iter', '-i' , type=int , help='iteration') parser.add_argument('--l2_loss', dest='use_l2_loss', action='store_true' ,help='l2 loss true or False') parser.add_argument('--no_l2_loss', dest='use_l2_loss', action='store_false' ,help='l2 loss true or False') parser.add_argument('--weight_decay', type = float , help='L2 weight decay ') parser.add_argument('--vgg_model' ,'-m' , choices=['vgg_11','vgg_13','vgg_16', 'vgg_19']) parser.add_argument('--BN' , dest='use_BN' , action='store_true' , help = 'bn True or not') parser.add_argument('--no_BN',dest='use_BN' , action = 'store_false', help = 'bn True or not') parser.add_argument('--data_dir' , help='the folder where the data is saved ' ) parser.add_argument('--init_lr' , type = float , help='init learning rate ') parser.add_argument('--lr_decay_step' ,type=int , help='decay step for learning rate') parser.add_argument('--folder_name' ,help='ex model/fundus_300/folder_name/0 .. logs/fundus_300/folder_name/0 , type2/folder_name/0') args=parser.parse_args() print 'aug : ' , args.use_aug print 'aug_lv1 : ' , args.use_aug_lv1 print 'actmap : ' , args.use_actmap print 'use_l2_loss: ' , args.use_l2_loss print 'weight_decay' , args.weight_decay print 'BN : ' , args.use_BN print 'Init Learning rate ' , args.init_lr print 'Decay step for learning rate, ',args.lr_decay_step print 'optimizer : ', args.optimizer print 'use nesterov : ',args.use_nesterov print 'random crop size : ',args.random_crop_resize print 'batch size : ',args.batch_size print 'max iter : ',args.max_iter print 'data dir : ',args.data_dir def count_trainable_params(): total_parameters = 0 for variable in tf.trainable_variables(): shape = variable.get_shape() variable_parametes = 1 for dim in shape: variable_parametes *= dim.value total_parameters += variable_parametes print("Total training params: %.1fM" % (total_parameters / 1e6)) def cls2onehot(cls , depth): labs=np.zeros([len(cls) , depth]) for i,c in enumerate(cls): labs[i,c]=1 return labs def reconstruct_tfrecord_rawdata(tfrecord_path): debug_flag_lv0 = True debug_flag_lv1 = True if __debug__ == debug_flag_lv0: print 'debug start | batch.py | class tfrecord_batch | reconstruct_tfrecord_rawdata ' print 'now Reconstruct Image Data please wait a second' reconstruct_image = [] # caution record_iter is generator record_iter = tf.python_io.tf_record_iterator(path=tfrecord_path) ret_img_list = [] ret_lab_list = [] ret_filename_list = [] for i, str_record in enumerate(record_iter): msg = '\r -progress {0}'.format(i) sys.stdout.write(msg) sys.stdout.flush() example = tf.train.Example() example.ParseFromString(str_record) height = int(example.features.feature['height'].int64_list.value[0]) width = int(example.features.feature['width'].int64_list.value[0]) raw_image = (example.features.feature['raw_image'].bytes_list.value[0]) label = int(example.features.feature['label'].int64_list.value[0]) filename = (example.features.feature['filename'].bytes_list.value[0]) image = np.fromstring(raw_image, dtype=np.uint8) image = image.reshape((height, width, -1)) ret_img_list.append(image) ret_lab_list.append(label) ret_filename_list.append(filename) ret_img = np.asarray(ret_img_list) ret_lab = np.asarray(ret_lab_list) if debug_flag_lv1 == True: print '' print 'images shape : ', np.shape(ret_img) print 'labels shape : ', np.shape(ret_lab) print 'length of filenames : ', len(ret_filename_list) return ret_img, ret_lab, ret_filename_list # pickle 형태로 저장되어 있는 데이터를 불러옵니다. imgs_list=[] root_dir =args.data_dir #Load Train imgs ,labs , Test imgs , labs """ train_imgs , train_labs , train_fnames = reconstruct_tfrecord_rawdata(os.path.join(root_dir , 'train.tfrecord')) test_imgs , test_labs , test_fnames = reconstruct_tfrecord_rawdata(os.path.join(root_dir , 'test.tfrecord')) """ names = ['normal_train.npy' , 'normal_test.npy' ,'abnormal_train.npy' , 'abnormal_test.npy'] normal_train_imgs , normal_test_imgs, abnormal_train_imgs , abnormal_test_imgs, =\ map( lambda name : np.load(os.path.join(root_dir ,name)) , names) NORMAL = 0 ABNORMAL = 1 normal_train_labs=np.zeros([len(normal_train_imgs) , 2]) normal_train_labs[:,NORMAL]=1 abnormal_train_labs=np.zeros([len(abnormal_train_imgs) , 2]) abnormal_train_labs[:,ABNORMAL]=1 normal_test_labs=np.zeros([len(normal_test_imgs) , 2]) normal_test_labs[:,NORMAL]=1 abnormal_test_labs=np.zeros([len(abnormal_test_imgs) , 2]) abnormal_test_labs[:,ABNORMAL]=1 print 'Normal Training Data shape : {}'.format(np.shape(normal_train_imgs)) print 'ABNormal Training Data shape : {}'.format(np.shape(abnormal_train_imgs)) print 'Normal Test Data shape : {}'.format(np.shape(normal_test_imgs)) print 'ABNormal Test Data shape : {}'.format(np.shape(abnormal_test_imgs)) print 'Normal Training Labels shape : {}'.format(np.shape(normal_train_labs)) print 'ABNormal Training Labelsshape : {}'.format(np.shape(abnormal_train_labs)) print 'Normal Test Labelsshape : {}'.format(np.shape(normal_test_labs)) print 'ABNormal Test Labels shape : {}'.format(np.shape(abnormal_test_labs)) # normal 과 abnormal 의 balance 을 맞춥니다 train_imgs = np.vstack([normal_train_imgs , abnormal_train_imgs ,abnormal_train_imgs,abnormal_train_imgs,\ abnormal_train_imgs,abnormal_train_imgs,abnormal_train_imgs]) train_labs = np.vstack([normal_train_labs , abnormal_train_labs ,abnormal_train_labs ,abnormal_train_labs,\ abnormal_train_labs ,abnormal_train_labs ,abnormal_train_labs]) test_imgs = np.vstack([normal_test_imgs , abnormal_test_imgs]) test_labs = np.vstack([normal_test_labs, abnormal_test_labs]) print 'Train Images Shape : {} '.format(np.shape(train_imgs)) print 'Train Labels Shape : {} '.format(np.shape(train_labs)) print 'Test Images Shape : {} '.format(np.shape(test_imgs)) print 'Test Labels Shape : {} '.format(np.shape(test_labs)) # Apply Clahe if args.use_clahe: print 'Apply clahe ....' import matplotlib.pyplot as plt train_imgs= map(aug.clahe_equalized, train_imgs) test_imgs = map(aug.clahe_equalized, test_imgs) train_imgs , test_imgs = map(np.asarray , [train_imgs , test_imgs]) #normalize print np.shape(test_labs) if np.max(test_imgs) > 1: #train_imgs=train_imgs/255. test_imgs=test_imgs/255. print 'test_imgs max :', np.max(test_imgs) h,w,ch=train_imgs.shape[1:] print h,w,ch n_classes=np.shape(train_labs)[-1] print 'the # classes : {}'.format(n_classes) x_ , y_ , cam_ind, lr_ , is_training ,global_step = model.define_inputs(shape=[None, h ,w, ch ] , n_classes=n_classes ) logits=model.build_graph(x_=x_ , y_=y_ , cam_ind= cam_ind , is_training=is_training , aug_flag=args.use_aug,\ actmap_flag=args.use_actmap , model=args.vgg_model,random_crop_resize=args.random_crop_resize, \ bn = args.use_BN) lr_op= tf.train.exponential_decay(args.init_lr, global_step , decay_steps=int(args.max_iter / args.lr_decay_step ), decay_rate=0.96, staircase=False) train_op, accuracy_op , loss_op , pred_op = \ model.train_algorithm(args.optimizer, logits=logits, labels=y_, learning_rate=lr_op, l2_loss=args.use_l2_loss, weight_decay=args.weight_decay) log_count =0; while True: logs_root_path='./logs/{}'.format(args.folder_name ) try: os.makedirs(logs_root_path) except Exception as e : print e pass; print logs_root_path logs_path=os.path.join( logs_root_path , str(log_count)) if not os.path.isdir(logs_path): os.mkdir(logs_path) break; else: log_count+=1 sess, saver , summary_writer =model.sess_start(logs_path) model_count =0; while True: models_root_path='./models/{}'.format(args.folder_name) try: os.makedirs(models_root_path) except Exception as e: print e pass; models_path=os.path.join(models_root_path , str(model_count)) if not os.path.isdir(models_path): os.mkdir(models_path) break; else: model_count+=1 best_acc_root = os.path.join(models_path, 'best_acc') best_loss_root = os.path.join(models_path, 'best_loss') os.mkdir(best_acc_root) os.mkdir(best_loss_root) print 'Logs savedir: {}'.format(logs_path) print 'Model savedir : {}'.format(models_path) min_loss = 1000. max_acc = 0. max_iter=args.max_iter ckpt=100 batch_size=args.batch_size start_time=0 train_acc=0 train_val=0 train_loss=1000. share=len(test_labs)/batch_size remainder=len(test_labs)/batch_size def show_progress(step, max_iter): msg = '\r progress {}/{}'.format(step, max_iter)
ter): if step % ckpt==0: """ #### testing ### """ print '### Testing ###' test_fetches = [ accuracy_op, loss_op, pred_op , lr_op] val_acc_mean , val_loss_mean , pred_all = [] , [] , [] for i in range(share): #여기서 테스트 셋을 sess.run()할수 있게 쪼갭니다 test_feedDict = {x_: test_imgs[i * batch_size:(i + 1) * batch_size], y_: test_labs[i * batch_size:(i + 1) * batch_size], is_training: False, global_step: step} val_acc, val_loss, pred, learning_rate = sess.run(fetches=test_fetches, feed_dict=test_feedDict) val_acc_mean.append(val_acc) val_loss_mean.append(val_loss) pred_all.append(pred) val_acc_mean=np.mean(np.asarray(val_acc_mean)) val_acc_mean=np.mean(np.asarray(val_acc_mean)) val_loss_mean=np.mean(np.asarray(val_loss_mean)) if val_acc_mean > max_acc: #best acc max_acc=val_acc_mean print 'max acc : {}'.format(max_acc) best_acc_folder=os.path.join( best_acc_root, 'step_{}_acc_{}'.format(step , max_acc)) os.mkdir(best_acc_folder) saver.save(sess=sess,save_path=os.path.join(best_acc_folder , 'model')) print 'Step : {} '.format(step) print 'Learning Rate : {} '.format(learning_rate) print 'Train acc : {} Train loss : {}'.format( train_acc , train_loss) print 'validation acc : {} loss : {}'.format( val_acc_mean, val_loss_mean ) # add learning rate summary summary=tf.Summary(value=[tf.Summary.Value(tag='learning_rate' , simple_value = float(learning_rate))]) summary_writer.add_summary(summary, step) model.write_acc_loss( summary_writer, 'validation', loss=val_loss_mean, acc=val_acc_mean, step=step) model_path=os.path.join(models_path, str(step)) os.mkdir(model_path) # e.g) models/fundus_300/100/model.ckpt or model.meta #saver.save(sess=sess,save_path=os.path.join(model_path,'model' , folder_name)) """ #### training ### """ train_fetches = [train_op, accuracy_op, loss_op ] batch_xs, batch_ys , batch_fname= input.next_batch(batch_size, train_imgs, train_labs ) if args.use_aug: batch_xs=aug.random_rotate_90(batch_xs) # random 으로 90 180 , 270 , 360 도를 회전합니다. if args.use_aug_lv1: batch_xs = aug.aug_lv1(batch_xs) # random 으로 90 180 , 270 , 360 도를 회전합니다. if i ==0 : np.save('aug_lv1_batch_sample.npy' , batch_xs) batch_xs=batch_xs/255. train_feedDict = {x_: batch_xs, y_: batch_ys, cam_ind: ABNORMAL, lr_: learning_rate, is_training: True, global_step: step} _ , train_acc, train_loss = sess.run( fetches=train_fetches, feed_dict=train_feedDict ) #print 'train acc : {} loss : {}'.format(train_acc, train_loss) model.write_acc_loss(summary_writer ,'train' , loss= train_loss , acc=train_acc ,step= step)
sys.stdout.write(msg) sys.stdout.flush() count_trainable_params() for step in range(max_i
identifier_body
run_this_code_CACSSEOUL.py
#-*- coding:utf-8 -*- import model import input import os import numpy as np import argparse import sys import tensorflow as tf import aug import numpy as np import random from PIL import Image import time import pickle parser =argparse.ArgumentParser() #parser.add_argument('--saves' , dest='should_save_model' , action = 'store_true') #parser.add_argument('--no-saves' , dest='should_save_model', action ='store_false') parser.add_argument('--optimizer' ,'-o' , type=str ,choices=['sgd','momentum','adam'],help='optimizer') parser.add_argument('--use_nesterov' , type=bool , help='only for momentum , use nesterov') parser.add_argument('--aug' , dest='use_aug', action='store_true' , help='augmentation') parser.add_argument('--no_aug' , dest='use_aug', action='store_false' , help='augmentation') parser.add_argument('--aug_lv1' , dest='use_aug_lv1', action='store_true' , help='augmentation') parser.add_argument('--no_aug_lv1' , dest='use_aug_lv1', action='store_false' , help='augmentation') parser.add_argument('--clahe' , dest='use_clahe', action='store_true' , help='augmentation') parser.add_argument('--no_clahe' , dest='use_clahe', action='store_false' , help='augmentation') parser.add_argument('--actmap', dest='use_actmap' ,action='store_true') parser.add_argument('--no_actmap', dest='use_actmap', action='store_false') parser.add_argument('--random_crop_resize' , '-r', type = int , help='if you use random crop resize , you can choice randdom crop ') parser.add_argument('--batch_size' ,'-b' , type=int , help='batch size') parser.add_argument('--max_iter', '-i' , type=int , help='iteration') parser.add_argument('--l2_loss', dest='use_l2_loss', action='store_true' ,help='l2 loss true or False') parser.add_argument('--no_l2_loss', dest='use_l2_loss', action='store_false' ,help='l2 loss true or False') parser.add_argument('--weight_decay', type = float , help='L2 weight decay ') parser.add_argument('--vgg_model' ,'-m' , choices=['vgg_11','vgg_13','vgg_16', 'vgg_19']) parser.add_argument('--BN' , dest='use_BN' , action='store_true' , help = 'bn True or not') parser.add_argument('--no_BN',dest='use_BN' , action = 'store_false', help = 'bn True or not') parser.add_argument('--data_dir' , help='the folder where the data is saved ' ) parser.add_argument('--init_lr' , type = float , help='init learning rate ') parser.add_argument('--lr_decay_step' ,type=int , help='decay step for learning rate') parser.add_argument('--folder_name' ,help='ex model/fundus_300/folder_name/0 .. logs/fundus_300/folder_name/0 , type2/folder_name/0') args=parser.parse_args() print 'aug : ' , args.use_aug print 'aug_lv1 : ' , args.use_aug_lv1 print 'actmap : ' , args.use_actmap print 'use_l2_loss: ' , args.use_l2_loss print 'weight_decay' , args.weight_decay print 'BN : ' , args.use_BN print 'Init Learning rate ' , args.init_lr print 'Decay step for learning rate, ',args.lr_decay_step print 'optimizer : ', args.optimizer print 'use nesterov : ',args.use_nesterov print 'random crop size : ',args.random_crop_resize print 'batch size : ',args.batch_size print 'max iter : ',args.max_iter print 'data dir : ',args.data_dir def count_trainable_params(): total_parameters = 0 for variable in tf.trainable_variables(): shape = variable.get_shape() variable_parametes = 1 for dim in shape: variable_parametes *= dim.value total_parameters += variable_parametes print("Total training params: %.1fM" % (total_parameters / 1e6)) def cls2onehot(cls , depth): labs=np.zeros([len(cls) , depth]) for i,c in enumerate(cls): labs[i,c]=1 return labs def reconstruct_tfrecord_rawdata(tfrecord_path): debug_flag_lv0 = True debug_flag_lv1 = True if __debug__ == debug_flag_lv0: print 'debug start | batch.py | class tfrecord_batch | reconstruct_tfrecord_rawdata ' print 'now Reconstruct Image Data please wait a second' reconstruct_image = [] # caution record_iter is generator record_iter = tf.python_io.tf_record_iterator(path=tfrecord_path) ret_img_list = [] ret_lab_list = [] ret_filename_list = [] for i, str_record in enumerate(record_iter): msg = '\r -progress {0}'.format(i) sys.stdout.write(msg) sys.stdout.flush() example = tf.train.Example() example.ParseFromString(str_record) height = int(example.features.feature['height'].int64_list.value[0]) width = int(example.features.feature['width'].int64_list.value[0]) raw_image = (example.features.feature['raw_image'].bytes_list.value[0]) label = int(example.features.feature['label'].int64_list.value[0]) filename = (example.features.feature['filename'].bytes_list.value[0]) image = np.fromstring(raw_image, dtype=np.uint8) image = image.reshape((height, width, -1)) ret_img_list.append(image) ret_lab_list.append(label) ret_filename_list.append(filename) ret_img = np.asarray(ret_img_list) ret_lab = np.asarray(ret_lab_list) if debug_flag_lv1 == True: print '' print 'images shape : ', np.shape(ret_img) print 'labels shape : ', np.shape(ret_lab) print 'length of filenames : ', len(ret_filename_list) return ret_img, ret_lab, ret_filename_list # pickle 형태로 저장되어 있는 데이터를 불러옵니다. imgs_list=[] root_dir =args.data_dir #Load Train imgs ,labs , Test imgs , labs """ train_imgs , train_labs , train_fnames = reconstruct_tfrecord_rawdata(os.path.join(root_dir , 'train.tfrecord')) test_imgs , test_labs , test_fnames = reconstruct_tfrecord_rawdata(os.path.join(root_dir , 'test.tfrecord')) """ names = ['normal_train.npy' , 'normal_test.npy' ,'abnormal_train.npy' , 'abnormal_test.npy'] normal_train_imgs , normal_test_imgs, abnormal_train_imgs , abnormal_test_imgs, =\ map( lambda name : np.load(os.path.join(root_dir ,name)) , names) NORMAL = 0 ABNORMAL = 1 normal_train_labs=np.zeros([len(normal_train_imgs) , 2]) normal_train_labs[:,NORMAL]=1 abnormal_train_labs=np.zeros([len(abnormal_train_imgs) , 2]) abnormal_train_labs[:,ABNORMAL]=1 normal_test_labs=np.zeros([len(normal_test_imgs) , 2]) normal_test_labs[:,NORMAL]=1 abnormal_test_labs=np.zeros([len(abnormal_test_imgs) , 2]) abnormal_test_labs[:,ABNORMAL]=1 print 'Normal Training Data shape : {}'.format(np.shape(normal_train_imgs)) print 'ABNormal Training Data shape : {}'.format(np.shape(abnormal_train_imgs)) print 'Normal Test Data shape : {}'.format(np.shape(normal_test_imgs)) print 'ABNormal Test Data shape : {}'.format(np.shape(abnormal_test_imgs)) print 'Normal Training Labels shape : {}'.format(np.shape(normal_train_labs)) print 'ABNormal Training Labelsshape : {}'.format(np.shape(abnormal_train_labs)) print 'Normal Test Labelsshape : {}'.format(np.shape(normal_test_labs)) print 'ABNormal Test Labels shape : {}'.format(np.shape(abnormal_test_labs)) # normal 과 abnormal 의 balance 을 맞춥니다 train_imgs = np.vstack([normal_train_imgs , abnormal_train_imgs ,abnormal_train_imgs,abnormal_train_imgs,\ abnormal_train_imgs,abnormal_train_imgs,abnormal_train_imgs]) train_labs = np.vstack([normal_train_labs , abnormal_train_labs ,abnormal_train_labs ,abnormal_train_labs,\ abnormal_train_labs ,abnormal_train_labs ,abnormal_train_labs]) test_imgs = np.vstack([normal_test_imgs , abnormal_test_imgs]) test_labs = np.vstack([normal_test_labs, abnormal_test_labs]) print 'Train Images Shape : {} '.format(np.shape(train_imgs)) print 'Train Labels Shape : {} '.format(np.shape(train_labs)) print 'Test Images Shape : {} '.format(np.shape(test_imgs)) print 'Test Labels Shape : {} '.format(np.shape(test_labs)) # Apply Clahe if args.use_clahe: print 'Apply clahe ....' import matplotlib.pyplot as plt train_imgs= map(aug.clahe_equalized, train_imgs) test_imgs = map(aug.clahe_equalized, test_imgs) train_imgs , test_imgs = map(np.asarray , [train_imgs , test_imgs]) #normalize print np.shape(test_labs) if np.max(test_imgs) > 1: #train_imgs=train_imgs/255. test_imgs=test_imgs/255. print 'test_imgs max :', np.max(test_imgs) h,w,ch=train_imgs.shape[1:] print h,w,ch n_classes=np.shape(train_labs)[-1] print 'the # classes : {}'.format(n_classes) x_ , y_ , cam_ind, lr_ , is_training ,global_step = model.define_inputs(shape=[None, h ,w, ch ] , n_classes=n_classes ) logits=model.build_graph(x_=x_ , y_=y_ , cam_ind= cam_ind , is_training=is_training , aug_flag=args.use_aug,\ actmap_flag=args.use_actmap , model=args.vgg_model,random_crop_resize=args.random_crop_resize, \ bn = args.use_BN) lr_op= tf.train.exponential_decay(args.init_lr, global_step , decay_steps=int(args.max_iter / args.lr_decay_step ), decay_rate=0.96, staircase=False) train_op, accuracy_op , loss_op , pred_op = \ model.train_algorithm(args.optimizer, logits=logits, labels=y_, learning_rate=lr_op, l2_loss=args.use_l2_loss, weight_decay=args.weight_decay) log_count =0; while True: logs_root_path='./logs/{}'.format(args.folder_name ) try: os.makedirs(logs_root_path) except Exception as e : print e pass; print logs_root_path logs_path=os.path.join( logs_root_path , str(log_count)) if not os.path.isdir(logs_path): os.mkdir(logs_path) break; else: log_count+=1 sess, saver , summary_writer =model.sess_start(logs_path) model_count =0; while True: models_root_path='./models/{}'.format(args.folder_name) try: os.makedirs(models_root_path) except Exception as e: print e pass; models_path=os.path.join(models_root_path , str(model_count)) if not os.path.isdir(models_path): os.mkdir(models_path) break; else: model_count+=1 best_acc_root = os.path.join(models_path, 'best_acc') best_loss_root = os.path.join(models_path, 'best_loss') os.mkdir(best_acc_root) os.mkdir(best_loss_root) print 'Logs savedir: {}'.format(logs_path) print 'Model savedir : {}'.format(models_path) min_loss = 1000. max_acc = 0. max_iter=args.max_iter ckpt=100 batch_size=args.batch_size start_time=0 train_acc=0 train_val=0 train_loss=1000. share=len(test_labs)/batch_size remainder=len(test_labs)/batch_size def show_progress(step, max_iter): msg = '\r progress {}/{}'.format(step, max_iter) sys.stdout.write(msg) sys.stdout.flush() count_trainable_params() for step in range(max_iter): if step % ckpt==0: """ #### testing ### """ print '### Testing ###' test_fetches = [ accuracy_op, loss_op, pred_op , lr_op] val_acc_mean , val_loss_mean , pred_all = [] , [] , [] for i in range(share): #여기서 테스트 셋을 sess.run()할수 있게 쪼갭니다 test_feedDict = {x_: test_imgs[i * batch_size:(i + 1) * batch_size],
an(np.asarray(val_acc_mean)) val_loss_mean=np.mean(np.asarray(val_loss_mean)) if val_acc_mean > max_acc: #best acc max_acc=val_acc_mean print 'max acc : {}'.format(max_acc) best_acc_folder=os.path.join( best_acc_root, 'step_{}_acc_{}'.format(step , max_acc)) os.mkdir(best_acc_folder) saver.save(sess=sess,save_path=os.path.join(best_acc_folder , 'model')) print 'Step : {} '.format(step) print 'Learning Rate : {} '.format(learning_rate) print 'Train acc : {} Train loss : {}'.format( train_acc , train_loss) print 'validation acc : {} loss : {}'.format( val_acc_mean, val_loss_mean ) # add learning rate summary summary=tf.Summary(value=[tf.Summary.Value(tag='learning_rate' , simple_value = float(learning_rate))]) summary_writer.add_summary(summary, step) model.write_acc_loss( summary_writer, 'validation', loss=val_loss_mean, acc=val_acc_mean, step=step) model_path=os.path.join(models_path, str(step)) os.mkdir(model_path) # e.g) models/fundus_300/100/model.ckpt or model.meta #saver.save(sess=sess,save_path=os.path.join(model_path,'model' , folder_name)) """ #### training ### """ train_fetches = [train_op, accuracy_op, loss_op ] batch_xs, batch_ys , batch_fname= input.next_batch(batch_size, train_imgs, train_labs ) if args.use_aug: batch_xs=aug.random_rotate_90(batch_xs) # random 으로 90 180 , 270 , 360 도를 회전합니다. if args.use_aug_lv1: batch_xs = aug.aug_lv1(batch_xs) # random 으로 90 180 , 270 , 360 도를 회전합니다. if i ==0 : np.save('aug_lv1_batch_sample.npy' , batch_xs) batch_xs=batch_xs/255. train_feedDict = {x_: batch_xs, y_: batch_ys, cam_ind: ABNORMAL, lr_: learning_rate, is_training: True, global_step: step} _ , train_acc, train_loss = sess.run( fetches=train_fetches, feed_dict=train_feedDict ) #print 'train acc : {} loss : {}'.format(train_acc, train_loss) model.write_acc_loss(summary_writer ,'train' , loss= train_loss , acc=train_acc ,step= step)
y_: test_labs[i * batch_size:(i + 1) * batch_size], is_training: False, global_step: step} val_acc, val_loss, pred, learning_rate = sess.run(fetches=test_fetches, feed_dict=test_feedDict) val_acc_mean.append(val_acc) val_loss_mean.append(val_loss) pred_all.append(pred) val_acc_mean=np.mean(np.asarray(val_acc_mean)) val_acc_mean=np.me
conditional_block
run_this_code_CACSSEOUL.py
#-*- coding:utf-8 -*- import model import input import os import numpy as np import argparse import sys import tensorflow as tf import aug import numpy as np import random from PIL import Image import time import pickle parser =argparse.ArgumentParser() #parser.add_argument('--saves' , dest='should_save_model' , action = 'store_true') #parser.add_argument('--no-saves' , dest='should_save_model', action ='store_false') parser.add_argument('--optimizer' ,'-o' , type=str ,choices=['sgd','momentum','adam'],help='optimizer') parser.add_argument('--use_nesterov' , type=bool , help='only for momentum , use nesterov') parser.add_argument('--aug' , dest='use_aug', action='store_true' , help='augmentation') parser.add_argument('--no_aug' , dest='use_aug', action='store_false' , help='augmentation') parser.add_argument('--aug_lv1' , dest='use_aug_lv1', action='store_true' , help='augmentation') parser.add_argument('--no_aug_lv1' , dest='use_aug_lv1', action='store_false' , help='augmentation') parser.add_argument('--clahe' , dest='use_clahe', action='store_true' , help='augmentation') parser.add_argument('--no_clahe' , dest='use_clahe', action='store_false' , help='augmentation') parser.add_argument('--actmap', dest='use_actmap' ,action='store_true') parser.add_argument('--no_actmap', dest='use_actmap', action='store_false') parser.add_argument('--random_crop_resize' , '-r', type = int , help='if you use random crop resize , you can choice randdom crop ') parser.add_argument('--batch_size' ,'-b' , type=int , help='batch size') parser.add_argument('--max_iter', '-i' , type=int , help='iteration') parser.add_argument('--l2_loss', dest='use_l2_loss', action='store_true' ,help='l2 loss true or False') parser.add_argument('--no_l2_loss', dest='use_l2_loss', action='store_false' ,help='l2 loss true or False') parser.add_argument('--weight_decay', type = float , help='L2 weight decay ') parser.add_argument('--vgg_model' ,'-m' , choices=['vgg_11','vgg_13','vgg_16', 'vgg_19']) parser.add_argument('--BN' , dest='use_BN' , action='store_true' , help = 'bn True or not') parser.add_argument('--no_BN',dest='use_BN' , action = 'store_false', help = 'bn True or not') parser.add_argument('--data_dir' , help='the folder where the data is saved ' ) parser.add_argument('--init_lr' , type = float , help='init learning rate ') parser.add_argument('--lr_decay_step' ,type=int , help='decay step for learning rate') parser.add_argument('--folder_name' ,help='ex model/fundus_300/folder_name/0 .. logs/fundus_300/folder_name/0 , type2/folder_name/0') args=parser.parse_args() print 'aug : ' , args.use_aug print 'aug_lv1 : ' , args.use_aug_lv1 print 'actmap : ' , args.use_actmap print 'use_l2_loss: ' , args.use_l2_loss print 'weight_decay' , args.weight_decay print 'BN : ' , args.use_BN print 'Init Learning rate ' , args.init_lr print 'Decay step for learning rate, ',args.lr_decay_step print 'optimizer : ', args.optimizer print 'use nesterov : ',args.use_nesterov print 'random crop size : ',args.random_crop_resize print 'batch size : ',args.batch_size print 'max iter : ',args.max_iter print 'data dir : ',args.data_dir def count_trainable_params(): total_parameters = 0 for variable in tf.trainable_variables(): shape = variable.get_shape() variable_parametes = 1 for dim in shape: variable_parametes *= dim.value total_parameters += variable_parametes print("Total training params: %.1fM" % (total_parameters / 1e6)) def cls2onehot(cls , depth): labs=np.zeros([len(cls) , depth]) for i,c in enumerate(cls): labs[i,c]=1 return labs def reconstruct_tfrecord_rawdata(tfrecord_path): debug_flag_lv0 = True debug_flag_lv1 = True if __debug__ == debug_flag_lv0: print 'debug start | batch.py | class tfrecord_batch | reconstruct_tfrecord_rawdata ' print 'now Reconstruct Image Data please wait a second' reconstruct_image = [] # caution record_iter is generator record_iter = tf.python_io.tf_record_iterator(path=tfrecord_path) ret_img_list = [] ret_lab_list = [] ret_filename_list = [] for i, str_record in enumerate(record_iter): msg = '\r -progress {0}'.format(i) sys.stdout.write(msg) sys.stdout.flush() example = tf.train.Example() example.ParseFromString(str_record) height = int(example.features.feature['height'].int64_list.value[0]) width = int(example.features.feature['width'].int64_list.value[0]) raw_image = (example.features.feature['raw_image'].bytes_list.value[0]) label = int(example.features.feature['label'].int64_list.value[0]) filename = (example.features.feature['filename'].bytes_list.value[0]) image = np.fromstring(raw_image, dtype=np.uint8) image = image.reshape((height, width, -1)) ret_img_list.append(image) ret_lab_list.append(label) ret_filename_list.append(filename) ret_img = np.asarray(ret_img_list) ret_lab = np.asarray(ret_lab_list) if debug_flag_lv1 == True: print '' print 'images shape : ', np.shape(ret_img) print 'labels shape : ', np.shape(ret_lab) print 'length of filenames : ', len(ret_filename_list) return ret_img, ret_lab, ret_filename_list # pickle 형태로 저장되어 있는 데이터를 불러옵니다. imgs_list=[] root_dir =args.data_dir #Load Train imgs ,labs , Test imgs , labs """ train_imgs , train_labs , train_fnames = reconstruct_tfrecord_rawdata(os.path.join(root_dir , 'train.tfrecord')) test_imgs , test_labs , test_fnames = reconstruct_tfrecord_rawdata(os.path.join(root_dir , 'test.tfrecord')) """ names = ['normal_train.npy' , 'normal_test.npy' ,'abnormal_train.npy' , 'abnormal_test.npy'] normal_train_imgs , normal_test_imgs, abnormal_train_imgs , abnormal_test_imgs, =\ map( lambda name : np.load(os.path.join(root_dir ,name)) , names) NORMAL = 0 ABNORMAL = 1 normal_train_labs=np.zeros([len(normal_train_imgs) , 2]) normal_train_labs[:,NORMAL]=1 abnormal_train_labs=np.zeros([len(abnormal_train_imgs) , 2]) abnormal_train_labs[:,ABNORMAL]=1 normal_test_labs=np.zeros([len(normal_test_imgs) , 2]) normal_test_labs[:,NORMAL]=1 abnormal_test_labs=np.zeros([len(abnormal_test_imgs) , 2]) abnormal_test_labs[:,ABNORMAL]=1 print 'Normal Training Data shape : {}'.format(np.shape(normal_train_imgs)) print 'ABNormal Training Data shape : {}'.format(np.shape(abnormal_train_imgs)) print 'Normal Test Data shape : {}'.format(np.shape(normal_test_imgs)) print 'ABNormal Test Data shape : {}'.format(np.shape(abnormal_test_imgs)) print 'Normal Training Labels shape : {}'.format(np.shape(normal_train_labs)) print 'ABNormal Training Labelsshape : {}'.format(np.shape(abnormal_train_labs)) print 'Normal Test Labelsshape : {}'.format(np.shape(normal_test_labs)) print 'ABNormal Test Labels shape : {}'.format(np.shape(abnormal_test_labs)) # normal 과 abnormal 의 balance 을 맞춥니다 train_imgs = np.vstack([normal_train_imgs , abnormal_train_imgs ,abnormal_train_imgs,abnormal_train_imgs,\ abnormal_train_imgs,abnormal_train_imgs,abnormal_train_imgs]) train_labs = np.vstack([normal_train_labs , abnormal_train_labs ,abnormal_train_labs ,abnormal_train_labs,\ abnormal_train_labs ,abnormal_train_labs ,abnormal_train_labs]) test_imgs = np.vstack([normal_test_imgs , abnormal_test_imgs]) test_labs = np.vstack([normal_test_labs, abnormal_test_labs]) print 'Train Images Shape : {} '.format(np.shape(train_imgs)) print 'Train Labels Shape : {} '.format(np.shape(train_labs)) print 'Test Images Shape : {} '.format(np.shape(test_imgs)) print 'Test Labels Shape : {} '.format(np.shape(test_labs)) # Apply Clahe if args.use_clahe: print 'Apply clahe ....' import matplotlib.pyplot as plt train_imgs= map(aug.clahe_equalized, train_imgs) test_imgs = map(aug.clahe_equalized, test_imgs) train_imgs , test_imgs = map(np.asarray , [train_imgs , test_imgs]) #normalize print np.shape(test_labs) if np.max(test_imgs) > 1: #train_imgs=train_imgs/255. test_imgs=test_imgs/255. print 'test_imgs max :', np.max(test_imgs) h,w,ch=train_imgs.shape[1:] print h,w,ch n_classes=np.shape(train_labs)[-1] print 'the # classes : {}'.format(n_classes) x_ , y_ , cam_ind, lr_ , is_training ,global_step = model.define_inputs(shape=[None, h ,w, ch ] , n_classes=n_classes ) logits=model.build_graph(x_=x_ , y_=y_ , cam_ind= cam_ind , is_training=is_training , aug_flag=args.use_aug,\ actmap_flag=args.use_actmap , model=args.vgg_model,random_crop_resize=args.random_crop_resize, \ bn = args.use_BN) lr_op= tf.train.exponential_decay(args.init_lr, global_step , decay_steps=int(args.max_iter / args.lr_decay_step ), decay_rate=0.96, staircase=False) train_op, accuracy_op , loss_op , pred_op = \ model.train_algorithm(args.optimizer, logits=logits, labels=y_, learning_rate=lr_op, l2_loss=args.use_l2_loss, weight_decay=args.weight_decay) log_count =0; while True: logs_root_path='./logs/{}'.format(args.folder_name ) try: os.makedirs(logs_root_path) except Exception as e : print e pass; print logs_root_path logs_path=os.path.join( logs_root_path , str(log_count)) if not os.path.isdir(logs_path): os.mkdir(logs_path) break; else: log_count+=1 sess, saver , summary_writer =model.sess_start(logs_path) model_count =0; while True: models_root_path='./models/{}'.format(args.folder_name) try: os.makedirs(models_root_path) except Exception as e: print e pass; models_path=os.path.join(models_root_path , str(model_count)) if not os.path.isdir(models_path): os.mkdir(models_path) break; else: model_count+=1 best_acc_root = os.path.join(models_path, 'best_acc') best_loss_root = os.path.join(models_path, 'best_loss') os.mkdir(best_acc_root) os.mkdir(best_loss_root) print 'Logs savedir: {}'.format(logs_path) print 'Model savedir : {}'.format(models_path) min_loss = 1000. max_acc = 0. max_iter=args.max_iter ckpt=100 batch_size=args.batch_size start_time=0 train_acc=0 train_val=0 train_loss=1000. share=len(test_labs)/batch_size remainder=len(test_labs)/batch_size def show_progress(step, max_iter): msg = '\r progr
rmat(step, max_iter) sys.stdout.write(msg) sys.stdout.flush() count_trainable_params() for step in range(max_iter): if step % ckpt==0: """ #### testing ### """ print '### Testing ###' test_fetches = [ accuracy_op, loss_op, pred_op , lr_op] val_acc_mean , val_loss_mean , pred_all = [] , [] , [] for i in range(share): #여기서 테스트 셋을 sess.run()할수 있게 쪼갭니다 test_feedDict = {x_: test_imgs[i * batch_size:(i + 1) * batch_size], y_: test_labs[i * batch_size:(i + 1) * batch_size], is_training: False, global_step: step} val_acc, val_loss, pred, learning_rate = sess.run(fetches=test_fetches, feed_dict=test_feedDict) val_acc_mean.append(val_acc) val_loss_mean.append(val_loss) pred_all.append(pred) val_acc_mean=np.mean(np.asarray(val_acc_mean)) val_acc_mean=np.mean(np.asarray(val_acc_mean)) val_loss_mean=np.mean(np.asarray(val_loss_mean)) if val_acc_mean > max_acc: #best acc max_acc=val_acc_mean print 'max acc : {}'.format(max_acc) best_acc_folder=os.path.join( best_acc_root, 'step_{}_acc_{}'.format(step , max_acc)) os.mkdir(best_acc_folder) saver.save(sess=sess,save_path=os.path.join(best_acc_folder , 'model')) print 'Step : {} '.format(step) print 'Learning Rate : {} '.format(learning_rate) print 'Train acc : {} Train loss : {}'.format( train_acc , train_loss) print 'validation acc : {} loss : {}'.format( val_acc_mean, val_loss_mean ) # add learning rate summary summary=tf.Summary(value=[tf.Summary.Value(tag='learning_rate' , simple_value = float(learning_rate))]) summary_writer.add_summary(summary, step) model.write_acc_loss( summary_writer, 'validation', loss=val_loss_mean, acc=val_acc_mean, step=step) model_path=os.path.join(models_path, str(step)) os.mkdir(model_path) # e.g) models/fundus_300/100/model.ckpt or model.meta #saver.save(sess=sess,save_path=os.path.join(model_path,'model' , folder_name)) """ #### training ### """ train_fetches = [train_op, accuracy_op, loss_op ] batch_xs, batch_ys , batch_fname= input.next_batch(batch_size, train_imgs, train_labs ) if args.use_aug: batch_xs=aug.random_rotate_90(batch_xs) # random 으로 90 180 , 270 , 360 도를 회전합니다. if args.use_aug_lv1: batch_xs = aug.aug_lv1(batch_xs) # random 으로 90 180 , 270 , 360 도를 회전합니다. if i ==0 : np.save('aug_lv1_batch_sample.npy' , batch_xs) batch_xs=batch_xs/255. train_feedDict = {x_: batch_xs, y_: batch_ys, cam_ind: ABNORMAL, lr_: learning_rate, is_training: True, global_step: step} _ , train_acc, train_loss = sess.run( fetches=train_fetches, feed_dict=train_feedDict ) #print 'train acc : {} loss : {}'.format(train_acc, train_loss) model.write_acc_loss(summary_writer ,'train' , loss= train_loss , acc=train_acc ,step= step)
ess {}/{}'.fo
identifier_name
run_this_code_CACSSEOUL.py
#-*- coding:utf-8 -*- import model import input import os import numpy as np import argparse import sys import tensorflow as tf import aug import numpy as np import random from PIL import Image import time import pickle parser =argparse.ArgumentParser() #parser.add_argument('--saves' , dest='should_save_model' , action = 'store_true') #parser.add_argument('--no-saves' , dest='should_save_model', action ='store_false') parser.add_argument('--optimizer' ,'-o' , type=str ,choices=['sgd','momentum','adam'],help='optimizer') parser.add_argument('--use_nesterov' , type=bool , help='only for momentum , use nesterov') parser.add_argument('--aug' , dest='use_aug', action='store_true' , help='augmentation') parser.add_argument('--no_aug' , dest='use_aug', action='store_false' , help='augmentation') parser.add_argument('--aug_lv1' , dest='use_aug_lv1', action='store_true' , help='augmentation') parser.add_argument('--no_aug_lv1' , dest='use_aug_lv1', action='store_false' , help='augmentation')
parser.add_argument('--clahe' , dest='use_clahe', action='store_true' , help='augmentation') parser.add_argument('--no_clahe' , dest='use_clahe', action='store_false' , help='augmentation') parser.add_argument('--actmap', dest='use_actmap' ,action='store_true') parser.add_argument('--no_actmap', dest='use_actmap', action='store_false') parser.add_argument('--random_crop_resize' , '-r', type = int , help='if you use random crop resize , you can choice randdom crop ') parser.add_argument('--batch_size' ,'-b' , type=int , help='batch size') parser.add_argument('--max_iter', '-i' , type=int , help='iteration') parser.add_argument('--l2_loss', dest='use_l2_loss', action='store_true' ,help='l2 loss true or False') parser.add_argument('--no_l2_loss', dest='use_l2_loss', action='store_false' ,help='l2 loss true or False') parser.add_argument('--weight_decay', type = float , help='L2 weight decay ') parser.add_argument('--vgg_model' ,'-m' , choices=['vgg_11','vgg_13','vgg_16', 'vgg_19']) parser.add_argument('--BN' , dest='use_BN' , action='store_true' , help = 'bn True or not') parser.add_argument('--no_BN',dest='use_BN' , action = 'store_false', help = 'bn True or not') parser.add_argument('--data_dir' , help='the folder where the data is saved ' ) parser.add_argument('--init_lr' , type = float , help='init learning rate ') parser.add_argument('--lr_decay_step' ,type=int , help='decay step for learning rate') parser.add_argument('--folder_name' ,help='ex model/fundus_300/folder_name/0 .. logs/fundus_300/folder_name/0 , type2/folder_name/0') args=parser.parse_args() print 'aug : ' , args.use_aug print 'aug_lv1 : ' , args.use_aug_lv1 print 'actmap : ' , args.use_actmap print 'use_l2_loss: ' , args.use_l2_loss print 'weight_decay' , args.weight_decay print 'BN : ' , args.use_BN print 'Init Learning rate ' , args.init_lr print 'Decay step for learning rate, ',args.lr_decay_step print 'optimizer : ', args.optimizer print 'use nesterov : ',args.use_nesterov print 'random crop size : ',args.random_crop_resize print 'batch size : ',args.batch_size print 'max iter : ',args.max_iter print 'data dir : ',args.data_dir def count_trainable_params(): total_parameters = 0 for variable in tf.trainable_variables(): shape = variable.get_shape() variable_parametes = 1 for dim in shape: variable_parametes *= dim.value total_parameters += variable_parametes print("Total training params: %.1fM" % (total_parameters / 1e6)) def cls2onehot(cls , depth): labs=np.zeros([len(cls) , depth]) for i,c in enumerate(cls): labs[i,c]=1 return labs def reconstruct_tfrecord_rawdata(tfrecord_path): debug_flag_lv0 = True debug_flag_lv1 = True if __debug__ == debug_flag_lv0: print 'debug start | batch.py | class tfrecord_batch | reconstruct_tfrecord_rawdata ' print 'now Reconstruct Image Data please wait a second' reconstruct_image = [] # caution record_iter is generator record_iter = tf.python_io.tf_record_iterator(path=tfrecord_path) ret_img_list = [] ret_lab_list = [] ret_filename_list = [] for i, str_record in enumerate(record_iter): msg = '\r -progress {0}'.format(i) sys.stdout.write(msg) sys.stdout.flush() example = tf.train.Example() example.ParseFromString(str_record) height = int(example.features.feature['height'].int64_list.value[0]) width = int(example.features.feature['width'].int64_list.value[0]) raw_image = (example.features.feature['raw_image'].bytes_list.value[0]) label = int(example.features.feature['label'].int64_list.value[0]) filename = (example.features.feature['filename'].bytes_list.value[0]) image = np.fromstring(raw_image, dtype=np.uint8) image = image.reshape((height, width, -1)) ret_img_list.append(image) ret_lab_list.append(label) ret_filename_list.append(filename) ret_img = np.asarray(ret_img_list) ret_lab = np.asarray(ret_lab_list) if debug_flag_lv1 == True: print '' print 'images shape : ', np.shape(ret_img) print 'labels shape : ', np.shape(ret_lab) print 'length of filenames : ', len(ret_filename_list) return ret_img, ret_lab, ret_filename_list # pickle 형태로 저장되어 있는 데이터를 불러옵니다. imgs_list=[] root_dir =args.data_dir #Load Train imgs ,labs , Test imgs , labs """ train_imgs , train_labs , train_fnames = reconstruct_tfrecord_rawdata(os.path.join(root_dir , 'train.tfrecord')) test_imgs , test_labs , test_fnames = reconstruct_tfrecord_rawdata(os.path.join(root_dir , 'test.tfrecord')) """ names = ['normal_train.npy' , 'normal_test.npy' ,'abnormal_train.npy' , 'abnormal_test.npy'] normal_train_imgs , normal_test_imgs, abnormal_train_imgs , abnormal_test_imgs, =\ map( lambda name : np.load(os.path.join(root_dir ,name)) , names) NORMAL = 0 ABNORMAL = 1 normal_train_labs=np.zeros([len(normal_train_imgs) , 2]) normal_train_labs[:,NORMAL]=1 abnormal_train_labs=np.zeros([len(abnormal_train_imgs) , 2]) abnormal_train_labs[:,ABNORMAL]=1 normal_test_labs=np.zeros([len(normal_test_imgs) , 2]) normal_test_labs[:,NORMAL]=1 abnormal_test_labs=np.zeros([len(abnormal_test_imgs) , 2]) abnormal_test_labs[:,ABNORMAL]=1 print 'Normal Training Data shape : {}'.format(np.shape(normal_train_imgs)) print 'ABNormal Training Data shape : {}'.format(np.shape(abnormal_train_imgs)) print 'Normal Test Data shape : {}'.format(np.shape(normal_test_imgs)) print 'ABNormal Test Data shape : {}'.format(np.shape(abnormal_test_imgs)) print 'Normal Training Labels shape : {}'.format(np.shape(normal_train_labs)) print 'ABNormal Training Labelsshape : {}'.format(np.shape(abnormal_train_labs)) print 'Normal Test Labelsshape : {}'.format(np.shape(normal_test_labs)) print 'ABNormal Test Labels shape : {}'.format(np.shape(abnormal_test_labs)) # normal 과 abnormal 의 balance 을 맞춥니다 train_imgs = np.vstack([normal_train_imgs , abnormal_train_imgs ,abnormal_train_imgs,abnormal_train_imgs,\ abnormal_train_imgs,abnormal_train_imgs,abnormal_train_imgs]) train_labs = np.vstack([normal_train_labs , abnormal_train_labs ,abnormal_train_labs ,abnormal_train_labs,\ abnormal_train_labs ,abnormal_train_labs ,abnormal_train_labs]) test_imgs = np.vstack([normal_test_imgs , abnormal_test_imgs]) test_labs = np.vstack([normal_test_labs, abnormal_test_labs]) print 'Train Images Shape : {} '.format(np.shape(train_imgs)) print 'Train Labels Shape : {} '.format(np.shape(train_labs)) print 'Test Images Shape : {} '.format(np.shape(test_imgs)) print 'Test Labels Shape : {} '.format(np.shape(test_labs)) # Apply Clahe if args.use_clahe: print 'Apply clahe ....' import matplotlib.pyplot as plt train_imgs= map(aug.clahe_equalized, train_imgs) test_imgs = map(aug.clahe_equalized, test_imgs) train_imgs , test_imgs = map(np.asarray , [train_imgs , test_imgs]) #normalize print np.shape(test_labs) if np.max(test_imgs) > 1: #train_imgs=train_imgs/255. test_imgs=test_imgs/255. print 'test_imgs max :', np.max(test_imgs) h,w,ch=train_imgs.shape[1:] print h,w,ch n_classes=np.shape(train_labs)[-1] print 'the # classes : {}'.format(n_classes) x_ , y_ , cam_ind, lr_ , is_training ,global_step = model.define_inputs(shape=[None, h ,w, ch ] , n_classes=n_classes ) logits=model.build_graph(x_=x_ , y_=y_ , cam_ind= cam_ind , is_training=is_training , aug_flag=args.use_aug,\ actmap_flag=args.use_actmap , model=args.vgg_model,random_crop_resize=args.random_crop_resize, \ bn = args.use_BN) lr_op= tf.train.exponential_decay(args.init_lr, global_step , decay_steps=int(args.max_iter / args.lr_decay_step ), decay_rate=0.96, staircase=False) train_op, accuracy_op , loss_op , pred_op = \ model.train_algorithm(args.optimizer, logits=logits, labels=y_, learning_rate=lr_op, l2_loss=args.use_l2_loss, weight_decay=args.weight_decay) log_count =0; while True: logs_root_path='./logs/{}'.format(args.folder_name ) try: os.makedirs(logs_root_path) except Exception as e : print e pass; print logs_root_path logs_path=os.path.join( logs_root_path , str(log_count)) if not os.path.isdir(logs_path): os.mkdir(logs_path) break; else: log_count+=1 sess, saver , summary_writer =model.sess_start(logs_path) model_count =0; while True: models_root_path='./models/{}'.format(args.folder_name) try: os.makedirs(models_root_path) except Exception as e: print e pass; models_path=os.path.join(models_root_path , str(model_count)) if not os.path.isdir(models_path): os.mkdir(models_path) break; else: model_count+=1 best_acc_root = os.path.join(models_path, 'best_acc') best_loss_root = os.path.join(models_path, 'best_loss') os.mkdir(best_acc_root) os.mkdir(best_loss_root) print 'Logs savedir: {}'.format(logs_path) print 'Model savedir : {}'.format(models_path) min_loss = 1000. max_acc = 0. max_iter=args.max_iter ckpt=100 batch_size=args.batch_size start_time=0 train_acc=0 train_val=0 train_loss=1000. share=len(test_labs)/batch_size remainder=len(test_labs)/batch_size def show_progress(step, max_iter): msg = '\r progress {}/{}'.format(step, max_iter) sys.stdout.write(msg) sys.stdout.flush() count_trainable_params() for step in range(max_iter): if step % ckpt==0: """ #### testing ### """ print '### Testing ###' test_fetches = [ accuracy_op, loss_op, pred_op , lr_op] val_acc_mean , val_loss_mean , pred_all = [] , [] , [] for i in range(share): #여기서 테스트 셋을 sess.run()할수 있게 쪼갭니다 test_feedDict = {x_: test_imgs[i * batch_size:(i + 1) * batch_size], y_: test_labs[i * batch_size:(i + 1) * batch_size], is_training: False, global_step: step} val_acc, val_loss, pred, learning_rate = sess.run(fetches=test_fetches, feed_dict=test_feedDict) val_acc_mean.append(val_acc) val_loss_mean.append(val_loss) pred_all.append(pred) val_acc_mean=np.mean(np.asarray(val_acc_mean)) val_acc_mean=np.mean(np.asarray(val_acc_mean)) val_loss_mean=np.mean(np.asarray(val_loss_mean)) if val_acc_mean > max_acc: #best acc max_acc=val_acc_mean print 'max acc : {}'.format(max_acc) best_acc_folder=os.path.join( best_acc_root, 'step_{}_acc_{}'.format(step , max_acc)) os.mkdir(best_acc_folder) saver.save(sess=sess,save_path=os.path.join(best_acc_folder , 'model')) print 'Step : {} '.format(step) print 'Learning Rate : {} '.format(learning_rate) print 'Train acc : {} Train loss : {}'.format( train_acc , train_loss) print 'validation acc : {} loss : {}'.format( val_acc_mean, val_loss_mean ) # add learning rate summary summary=tf.Summary(value=[tf.Summary.Value(tag='learning_rate' , simple_value = float(learning_rate))]) summary_writer.add_summary(summary, step) model.write_acc_loss( summary_writer, 'validation', loss=val_loss_mean, acc=val_acc_mean, step=step) model_path=os.path.join(models_path, str(step)) os.mkdir(model_path) # e.g) models/fundus_300/100/model.ckpt or model.meta #saver.save(sess=sess,save_path=os.path.join(model_path,'model' , folder_name)) """ #### training ### """ train_fetches = [train_op, accuracy_op, loss_op ] batch_xs, batch_ys , batch_fname= input.next_batch(batch_size, train_imgs, train_labs ) if args.use_aug: batch_xs=aug.random_rotate_90(batch_xs) # random 으로 90 180 , 270 , 360 도를 회전합니다. if args.use_aug_lv1: batch_xs = aug.aug_lv1(batch_xs) # random 으로 90 180 , 270 , 360 도를 회전합니다. if i ==0 : np.save('aug_lv1_batch_sample.npy' , batch_xs) batch_xs=batch_xs/255. train_feedDict = {x_: batch_xs, y_: batch_ys, cam_ind: ABNORMAL, lr_: learning_rate, is_training: True, global_step: step} _ , train_acc, train_loss = sess.run( fetches=train_fetches, feed_dict=train_feedDict ) #print 'train acc : {} loss : {}'.format(train_acc, train_loss) model.write_acc_loss(summary_writer ,'train' , loss= train_loss , acc=train_acc ,step= step)
random_line_split
DailyCP.py
import requests import json import io import random import time import re import pyDes import base64 import uuid import sys import os import hashlib from Crypto.Cipher import AES class DailyCP: def __init__(self, schoolName="北部湾大学"): self.key = "ST83=@XV" # dynamic when app update self.session = requests.session() self.host = "" self.loginUrl = "" self.isIAPLogin = True self.session.headers.update({ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.61 Safari/537.36 Edg/83.0.478.37", # "X-Requested-With": "XMLHttpRequest", "Pragma": "no-cache", "Accept": "application/json, text/plain, */*", # "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9", # "User-Agent": "okhttp/3.12.4" }) extension = {"deviceId": str(uuid.uuid4()), "systemName": "未来操作系统", "userId": "5201314", "appVersion": "8.1.13", "model": "红星一号量子计算机", "lon": 0.0, "systemVersion": "初号机", "lat": 0.0} self.session.headers.update( {"Cpdaily-Extension": self.encrypt(json.dumps(extension))}) self.setHostBySchoolName(schoolName) def setHostBySchoolName(self, schoolName): ret = self.request( "https://static.campushoy.com/apicache/tenantListSort") school = [j for i in ret["data"] for j in i["datas"] if j["name"] == schoolName] if len(school) == 0: print("不支持的学校或者学校名称错误,以下是支持的学校列表") print(ret) exit() ret = self.request( "https://mobile.campushoy.com/v6/config/guest/tenant/info?ids={ids}".format(ids=school[0]["id"])) self.loginUrl = ret["data"][0]["ampUrl"] if ret == "": print("学校并没有申请入驻今日校园平台") exit() print("{name}的登录地址{url}".format(name=schoolName, url=self.loginUrl)) self.host = re.findall(r"//(.*?)/", self.loginUrl)[0] def encrypt(self, text): k = pyDes.des(self.key, pyDes.CBC, b"\x01\x02\x03\x04\x05\x06\x07\x08", pad=None, padmode=pyDes.PAD_PKCS5) ret = k.encrypt(text) return base64.b64encode(ret).decode() def passwordEncrypt(self, text: str, key: str): def pad(s): return s + (len(key) - len(s) % len(key)) * chr(len(key) - len(s) % len(key)) def unpad(s): return s[:-ord(s[len(s) - 1:])] text = pad( "TdEEGazAXQMBzEAisrYaxRRax5kmnMJnpbKxcE6jxQfWRwP2J78adKYm8WzSkfXJ"+text).encode("utf-8") aes = AES.new(str.encode(key), AES.MODE_CBC, str.encode("ya8C45aRrBEn8sZH")) return base64.b64encode(aes.encrypt(text)) def request(self, url: str, body=None, parseJson=True, JsonBody=True, Referer=None): url = url.format(host=self.host) if Referer != None: self.session.headers.update({"Referer": Referer}) if body == None: ret = self.session.get(url) else: self.session.headers.update( {"Content-Type": ("application/json" if JsonBody else "application/x-www-form-urlencoded")}) ret = self.session.post(url, data=( json.dumps(body) if JsonBody else body)) if parseJson: return json.loads(ret.text) else: return ret def decrypt(self, text): k = pyDes.des(self.key, pyDes.CBC, b"\x01\x02\x03\x04\x05\x06\x07\x08", pad=None, padmode=pyDes.PAD_PKCS5) ret = k.decrypt(base64.b64decode(text)) return ret.decode() def checkNeedCaptcha(self, username): url = "https://{host}/iap/checkNeedCaptcha?username={username}".format( host=self.host, username=username) ret = self.session.get(url) ret = json.loads(ret.text) return ret["needCaptcha"] def generateCaptcha(self): # url = "https://{host}/iap/generateCaptcha?ltId={client}&codeType=2".format(host=self.host,client=self.client) # ret = self.session.get(url) # return ret.content pass def getBasicInfo(self): return self.request("https://{host}/iap/tenant/basicInfo", "{}") def login(self, username, password, captcha=""): if "campusphere" in self.loginUrl: return self.loginIAP(username, password, captcha) else: return self.loginAuthserver(username, password, captcha) def loginIAP(self, username, password, captcha=""): self.session.headers.update({"X-Requested-With": "XMLHttpRequest"}) ret = self.session.get( "https://{host}/iap/l
username}&pwdEncrypt2=pwdEncryptSalt".format( username=username), parseJson=False).text return ret == "true" def loginAuthserver(self, username, password, captcha=""): ret = self.request(self.loginUrl, parseJson=False) body = dict(re.findall( r'''<input type="hidden" name="(.*?)" value="(.*?)"''', ret.text)) salt = dict(re.findall( r'''<input type="hidden" id="(.*?)" value="(.*?)"''', ret.text)) body["username"] = username body["dllt"] = "userNamePasswordLogin" if "pwdDefaultEncryptSalt" in salt.keys(): body["password"] = self.passwordEncrypt( password, salt["pwdDefaultEncryptSalt"]) else: body["password"] = password ret = self.request(ret.url, body, False, False, Referer=self.loginUrl).url print(self.session.cookies) print("本函数不一定能用。") return True def getCollectorList(self): body = { "pageSize": 10, "pageNumber": 1 } ret = self.request( "https://{host}/wec-counselor-collector-apps/stu/collector/queryCollectorProcessingList", body) return ret["datas"]["rows"] def getNoticeList(self): body = { "pageSize": 10, "pageNumber": 1 } ret = self.request( "https://{host}/wec-counselor-stu-apps/stu/notice/queryProcessingNoticeList", body) return ret["datas"]["rows"] def confirmNotice(self, wid): body = { "wid": wid } ret = self.request( "https://{host}/wec-counselor-stu-apps/stu/notice/confirmNotice", body) print(ret["message"]) return ret["message"] == "SUCCESS" def getCollectorDetail(self, collectorWid): body = { "collectorWid": collectorWid } return self.request("https://{host}/wec-counselor-collector-apps/stu/collector/detailCollector", body)["datas"] def getCollectorFormFiled(self, formWid, collectorWid): body = { "pageSize": 50, "pageNumber": 1, "formWid": formWid, "collectorWid": collectorWid } return self.request("https://{host}/wec-counselor-collector-apps/stu/collector/getFormFields", body)["datas"]["rows"] def submitCollectorForm(self, formWid, collectWid, schoolTaskWid, rows, address): body = { "formWid": formWid, "collectWid": collectWid, "schoolTaskWid": schoolTaskWid, "form": rows, "address": address } ret = self.request( "https://{host}/wec-counselor-collector-apps/stu/collector/submitForm", body) print(ret["message"]) return ret["message"] == "SUCCESS" def autoFill(self, rows): for item in rows: index = 0 while index < len(item["fieldItems"]): if item["fieldItems"][index]["isSelected"] == 1: index = index + 1 else: item["fieldItems"].pop(index) def getFormCharac(self, detail): ret = self.request(detail["content"], parseJson=False, JsonBody=False) return hashlib.md5(ret.content).digest().hex() def autoComplete(self, address, dbpath): collectList = self.getCollectorList() print(collectList) for item in collectList: # if item["isHandled"] == True:continue detail = self.getCollectorDetail(item["wid"]) form = self.getCollectorFormFiled( detail["collector"]["formWid"], detail["collector"]["wid"]) formpath = "{dbpath}/{charac}.json".format( charac=self.getFormCharac(item), dbpath=dbpath) if os.path.exists(formpath): with open(formpath, "rb") as file: def find(l, key_valueList: list): for item in l: b = True for k_v in key_valueList: if item[k_v[0]] != k_v[1]: b = False if b: return item return None newForm = form form = json.loads(file.read().decode("utf-8")) for item in newForm: l = find(form, [['title', item['title']], [ 'description', item['description']]]) item['value'] = l['value'] for fieldItemsList in item['fieldItems']: field = find(l['fieldItems'], [ ['content', fieldItemsList['content']]]) fieldItemsList['isSelected'] = field['isSelected'] form = newForm self.autoFill(form) self.submitCollectorForm(detail["collector"]["formWid"], detail["collector"] ["wid"], detail["collector"]["schoolTaskWid"], form, address) else: with open(formpath, "wb") as file: file.write(json.dumps( form, ensure_ascii=False).encode("utf-8")) print("请手动填写{formpath},之后重新运行脚本".format(formpath=formpath)) exit() confirmList = self.getNoticeList() print(confirmList) for item in confirmList: self.confirmNotice(item["noticeWid"]) if __name__ == "__main__": if len(sys.argv) != 6: print("python3 DailyCp.py 学校全名 学号 密码 定位地址 formdb文件夹绝对路径") exit() app = DailyCP(sys.argv[1]) if not app.login(sys.argv[2], sys.argv[3]): exit() app.autoComplete(sys.argv[4], sys.argv[5]) # Author:HuangXu,FengXinYang,ZhouYuYang. # By:AUST HACKER # 2020/5/20 重要更新:修复登录过程,移除验证码(不需要),优化代码格式,感谢giteee及时反馈。 # 2020/5/28 更改为使用自动获取学校URL的方式,更改为使用参数形式,添加另一种登录形式AuthServer的支持(已完成但未测试)。感谢柠火的反馈。 # 2020/6/1 修复BUG,发现AuthServer的登录方式每个学校都不一样。支持任意表单内容自定义(详情见输出信息和formdb/1129.json)。感谢涅灵的反馈。 # 2020/6/2 AuthServer的登录网址不再使用硬编码的方式,理论上能支持所有学校了吧?感谢涅灵的反馈。 # 2020/6/17 修复crontab使用中相对路径的问题。识别form特征。 # 2020/7/5 浪费别人的时间是一种可耻的行为。
ogin?service=https://{host}/portal/login".format(host=self.host)).url client = ret[ret.find("=")+1:] ret = self.request("https://{host}/iap/security/lt", "lt={client}".format(client=client), True, False) client = ret["result"]["_lt"] # self.encryptSalt = ret["result"]["_encryptSalt"] body = { "username": username, "password": password, "lt": client, "captcha": captcha, "rememberMe": "true", "dllt": "", "mobile": "" } ret = self.request("https://{host}/iap/doLogin", body, True, False) if ret["resultCode"] == "REDIRECT": self.session.get(ret["url"]) return True else: return False def checkNeedCaptchaAuthServer(self, username): ret = self.request("http://{host}/authserver/needCaptcha.html?username={
identifier_body
DailyCP.py
import requests import json import io import random import time import re import pyDes import base64 import uuid import sys import os import hashlib from Crypto.Cipher import AES class DailyCP: def __init__(self, schoolName="北部湾大学"): self.key = "ST83=@XV" # dynamic when app update self.session = requests.session() self.host = "" self.loginUrl = "" self.isIAPLogin = True self.session.headers.update({ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.61 Safari/537.36 Edg/83.0.478.37", # "X-Requested-With": "XMLHttpRequest", "Pragma": "no-cache", "Accept": "application/json, text/plain, */*", # "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9", # "User-Agent": "okhttp/3.12.4" }) extension = {"deviceId": str(uuid.uuid4()), "systemName": "未来操作系统", "userId": "5201314", "appVersion": "8.1.13", "model": "红星一号量子计算机", "lon": 0.0, "systemVersion": "初号机", "lat": 0.0} self.session.headers.update( {"Cpdaily-Extension": self.encrypt(json.dumps(extension))}) self.setHostBySchoolName(schoolName) def setHostBySchoolName(self, schoolName): ret = self.request( "https://static.campushoy.com/apicache/tenantListSort") school = [j for i in ret["data"] for j in i["datas"] if j["name"] == schoolName] if len(school) == 0: print("不支持的学校或者学校名称错误,以下是支持的学校列表") print(ret) exit() ret = self.request( "https://mobile.campushoy.com/v6/config/guest/tenant/info?ids={ids}".format(ids=school[0]["id"])) self.loginUrl = ret["data"][0]["ampUrl"] if ret == "": print("学校并没有申请入驻今日校园平台") exit() print("{name}的登录地址{url}".format(name=schoo
lf.loginUrl)[0] def encrypt(self, text): k = pyDes.des(self.key, pyDes.CBC, b"\x01\x02\x03\x04\x05\x06\x07\x08", pad=None, padmode=pyDes.PAD_PKCS5) ret = k.encrypt(text) return base64.b64encode(ret).decode() def passwordEncrypt(self, text: str, key: str): def pad(s): return s + (len(key) - len(s) % len(key)) * chr(len(key) - len(s) % len(key)) def unpad(s): return s[:-ord(s[len(s) - 1:])] text = pad( "TdEEGazAXQMBzEAisrYaxRRax5kmnMJnpbKxcE6jxQfWRwP2J78adKYm8WzSkfXJ"+text).encode("utf-8") aes = AES.new(str.encode(key), AES.MODE_CBC, str.encode("ya8C45aRrBEn8sZH")) return base64.b64encode(aes.encrypt(text)) def request(self, url: str, body=None, parseJson=True, JsonBody=True, Referer=None): url = url.format(host=self.host) if Referer != None: self.session.headers.update({"Referer": Referer}) if body == None: ret = self.session.get(url) else: self.session.headers.update( {"Content-Type": ("application/json" if JsonBody else "application/x-www-form-urlencoded")}) ret = self.session.post(url, data=( json.dumps(body) if JsonBody else body)) if parseJson: return json.loads(ret.text) else: return ret def decrypt(self, text): k = pyDes.des(self.key, pyDes.CBC, b"\x01\x02\x03\x04\x05\x06\x07\x08", pad=None, padmode=pyDes.PAD_PKCS5) ret = k.decrypt(base64.b64decode(text)) return ret.decode() def checkNeedCaptcha(self, username): url = "https://{host}/iap/checkNeedCaptcha?username={username}".format( host=self.host, username=username) ret = self.session.get(url) ret = json.loads(ret.text) return ret["needCaptcha"] def generateCaptcha(self): # url = "https://{host}/iap/generateCaptcha?ltId={client}&codeType=2".format(host=self.host,client=self.client) # ret = self.session.get(url) # return ret.content pass def getBasicInfo(self): return self.request("https://{host}/iap/tenant/basicInfo", "{}") def login(self, username, password, captcha=""): if "campusphere" in self.loginUrl: return self.loginIAP(username, password, captcha) else: return self.loginAuthserver(username, password, captcha) def loginIAP(self, username, password, captcha=""): self.session.headers.update({"X-Requested-With": "XMLHttpRequest"}) ret = self.session.get( "https://{host}/iap/login?service=https://{host}/portal/login".format(host=self.host)).url client = ret[ret.find("=")+1:] ret = self.request("https://{host}/iap/security/lt", "lt={client}".format(client=client), True, False) client = ret["result"]["_lt"] # self.encryptSalt = ret["result"]["_encryptSalt"] body = { "username": username, "password": password, "lt": client, "captcha": captcha, "rememberMe": "true", "dllt": "", "mobile": "" } ret = self.request("https://{host}/iap/doLogin", body, True, False) if ret["resultCode"] == "REDIRECT": self.session.get(ret["url"]) return True else: return False def checkNeedCaptchaAuthServer(self, username): ret = self.request("http://{host}/authserver/needCaptcha.html?username={username}&pwdEncrypt2=pwdEncryptSalt".format( username=username), parseJson=False).text return ret == "true" def loginAuthserver(self, username, password, captcha=""): ret = self.request(self.loginUrl, parseJson=False) body = dict(re.findall( r'''<input type="hidden" name="(.*?)" value="(.*?)"''', ret.text)) salt = dict(re.findall( r'''<input type="hidden" id="(.*?)" value="(.*?)"''', ret.text)) body["username"] = username body["dllt"] = "userNamePasswordLogin" if "pwdDefaultEncryptSalt" in salt.keys(): body["password"] = self.passwordEncrypt( password, salt["pwdDefaultEncryptSalt"]) else: body["password"] = password ret = self.request(ret.url, body, False, False, Referer=self.loginUrl).url print(self.session.cookies) print("本函数不一定能用。") return True def getCollectorList(self): body = { "pageSize": 10, "pageNumber": 1 } ret = self.request( "https://{host}/wec-counselor-collector-apps/stu/collector/queryCollectorProcessingList", body) return ret["datas"]["rows"] def getNoticeList(self): body = { "pageSize": 10, "pageNumber": 1 } ret = self.request( "https://{host}/wec-counselor-stu-apps/stu/notice/queryProcessingNoticeList", body) return ret["datas"]["rows"] def confirmNotice(self, wid): body = { "wid": wid } ret = self.request( "https://{host}/wec-counselor-stu-apps/stu/notice/confirmNotice", body) print(ret["message"]) return ret["message"] == "SUCCESS" def getCollectorDetail(self, collectorWid): body = { "collectorWid": collectorWid } return self.request("https://{host}/wec-counselor-collector-apps/stu/collector/detailCollector", body)["datas"] def getCollectorFormFiled(self, formWid, collectorWid): body = { "pageSize": 50, "pageNumber": 1, "formWid": formWid, "collectorWid": collectorWid } return self.request("https://{host}/wec-counselor-collector-apps/stu/collector/getFormFields", body)["datas"]["rows"] def submitCollectorForm(self, formWid, collectWid, schoolTaskWid, rows, address): body = { "formWid": formWid, "collectWid": collectWid, "schoolTaskWid": schoolTaskWid, "form": rows, "address": address } ret = self.request( "https://{host}/wec-counselor-collector-apps/stu/collector/submitForm", body) print(ret["message"]) return ret["message"] == "SUCCESS" def autoFill(self, rows): for item in rows: index = 0 while index < len(item["fieldItems"]): if item["fieldItems"][index]["isSelected"] == 1: index = index + 1 else: item["fieldItems"].pop(index) def getFormCharac(self, detail): ret = self.request(detail["content"], parseJson=False, JsonBody=False) return hashlib.md5(ret.content).digest().hex() def autoComplete(self, address, dbpath): collectList = self.getCollectorList() print(collectList) for item in collectList: # if item["isHandled"] == True:continue detail = self.getCollectorDetail(item["wid"]) form = self.getCollectorFormFiled( detail["collector"]["formWid"], detail["collector"]["wid"]) formpath = "{dbpath}/{charac}.json".format( charac=self.getFormCharac(item), dbpath=dbpath) if os.path.exists(formpath): with open(formpath, "rb") as file: def find(l, key_valueList: list): for item in l: b = True for k_v in key_valueList: if item[k_v[0]] != k_v[1]: b = False if b: return item return None newForm = form form = json.loads(file.read().decode("utf-8")) for item in newForm: l = find(form, [['title', item['title']], [ 'description', item['description']]]) item['value'] = l['value'] for fieldItemsList in item['fieldItems']: field = find(l['fieldItems'], [ ['content', fieldItemsList['content']]]) fieldItemsList['isSelected'] = field['isSelected'] form = newForm self.autoFill(form) self.submitCollectorForm(detail["collector"]["formWid"], detail["collector"] ["wid"], detail["collector"]["schoolTaskWid"], form, address) else: with open(formpath, "wb") as file: file.write(json.dumps( form, ensure_ascii=False).encode("utf-8")) print("请手动填写{formpath},之后重新运行脚本".format(formpath=formpath)) exit() confirmList = self.getNoticeList() print(confirmList) for item in confirmList: self.confirmNotice(item["noticeWid"]) if __name__ == "__main__": if len(sys.argv) != 6: print("python3 DailyCp.py 学校全名 学号 密码 定位地址 formdb文件夹绝对路径") exit() app = DailyCP(sys.argv[1]) if not app.login(sys.argv[2], sys.argv[3]): exit() app.autoComplete(sys.argv[4], sys.argv[5]) # Author:HuangXu,FengXinYang,ZhouYuYang. # By:AUST HACKER # 2020/5/20 重要更新:修复登录过程,移除验证码(不需要),优化代码格式,感谢giteee及时反馈。 # 2020/5/28 更改为使用自动获取学校URL的方式,更改为使用参数形式,添加另一种登录形式AuthServer的支持(已完成但未测试)。感谢柠火的反馈。 # 2020/6/1 修复BUG,发现AuthServer的登录方式每个学校都不一样。支持任意表单内容自定义(详情见输出信息和formdb/1129.json)。感谢涅灵的反馈。 # 2020/6/2 AuthServer的登录网址不再使用硬编码的方式,理论上能支持所有学校了吧?感谢涅灵的反馈。 # 2020/6/17 修复crontab使用中相对路径的问题。识别form特征。 # 2020/7/5 浪费别人的时间是一种可耻的行为。
lName, url=self.loginUrl)) self.host = re.findall(r"//(.*?)/", se
conditional_block
DailyCP.py
import requests import json import io import random import time import re import pyDes import base64 import uuid import sys import os import hashlib from Crypto.Cipher import AES class DailyCP: def __init__(self, schoolName="北部湾大学"): self.key = "ST83=@XV" # dynamic when app update self.session = requests.session() self.host = "" self.loginUrl = "" self.isIAPLogin = True self.session.headers.update({ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.61 Safari/537.36 Edg/83.0.478.37", # "X-Requested-With": "XMLHttpRequest", "Pragma": "no-cache", "Accept": "application/json, text/plain, */*", # "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9", # "User-Agent": "okhttp/3.12.4" }) extension = {"deviceId": str(uuid.uuid4()), "systemName": "未来操作系统", "userId": "5201314", "appVersion": "8.1.13", "model": "红星一号量子计算机", "lon": 0.0, "systemVersion": "初号机", "lat": 0.0} self.session.headers.update( {"Cpdaily-Extension": self.encrypt(json.dumps(extension))}) self.setHostBySchoolName(schoolName) def setHostBySchoolName(self, schoolName):
( "https://static.campushoy.com/apicache/tenantListSort") school = [j for i in ret["data"] for j in i["datas"] if j["name"] == schoolName] if len(school) == 0: print("不支持的学校或者学校名称错误,以下是支持的学校列表") print(ret) exit() ret = self.request( "https://mobile.campushoy.com/v6/config/guest/tenant/info?ids={ids}".format(ids=school[0]["id"])) self.loginUrl = ret["data"][0]["ampUrl"] if ret == "": print("学校并没有申请入驻今日校园平台") exit() print("{name}的登录地址{url}".format(name=schoolName, url=self.loginUrl)) self.host = re.findall(r"//(.*?)/", self.loginUrl)[0] def encrypt(self, text): k = pyDes.des(self.key, pyDes.CBC, b"\x01\x02\x03\x04\x05\x06\x07\x08", pad=None, padmode=pyDes.PAD_PKCS5) ret = k.encrypt(text) return base64.b64encode(ret).decode() def passwordEncrypt(self, text: str, key: str): def pad(s): return s + (len(key) - len(s) % len(key)) * chr(len(key) - len(s) % len(key)) def unpad(s): return s[:-ord(s[len(s) - 1:])] text = pad( "TdEEGazAXQMBzEAisrYaxRRax5kmnMJnpbKxcE6jxQfWRwP2J78adKYm8WzSkfXJ"+text).encode("utf-8") aes = AES.new(str.encode(key), AES.MODE_CBC, str.encode("ya8C45aRrBEn8sZH")) return base64.b64encode(aes.encrypt(text)) def request(self, url: str, body=None, parseJson=True, JsonBody=True, Referer=None): url = url.format(host=self.host) if Referer != None: self.session.headers.update({"Referer": Referer}) if body == None: ret = self.session.get(url) else: self.session.headers.update( {"Content-Type": ("application/json" if JsonBody else "application/x-www-form-urlencoded")}) ret = self.session.post(url, data=( json.dumps(body) if JsonBody else body)) if parseJson: return json.loads(ret.text) else: return ret def decrypt(self, text): k = pyDes.des(self.key, pyDes.CBC, b"\x01\x02\x03\x04\x05\x06\x07\x08", pad=None, padmode=pyDes.PAD_PKCS5) ret = k.decrypt(base64.b64decode(text)) return ret.decode() def checkNeedCaptcha(self, username): url = "https://{host}/iap/checkNeedCaptcha?username={username}".format( host=self.host, username=username) ret = self.session.get(url) ret = json.loads(ret.text) return ret["needCaptcha"] def generateCaptcha(self): # url = "https://{host}/iap/generateCaptcha?ltId={client}&codeType=2".format(host=self.host,client=self.client) # ret = self.session.get(url) # return ret.content pass def getBasicInfo(self): return self.request("https://{host}/iap/tenant/basicInfo", "{}") def login(self, username, password, captcha=""): if "campusphere" in self.loginUrl: return self.loginIAP(username, password, captcha) else: return self.loginAuthserver(username, password, captcha) def loginIAP(self, username, password, captcha=""): self.session.headers.update({"X-Requested-With": "XMLHttpRequest"}) ret = self.session.get( "https://{host}/iap/login?service=https://{host}/portal/login".format(host=self.host)).url client = ret[ret.find("=")+1:] ret = self.request("https://{host}/iap/security/lt", "lt={client}".format(client=client), True, False) client = ret["result"]["_lt"] # self.encryptSalt = ret["result"]["_encryptSalt"] body = { "username": username, "password": password, "lt": client, "captcha": captcha, "rememberMe": "true", "dllt": "", "mobile": "" } ret = self.request("https://{host}/iap/doLogin", body, True, False) if ret["resultCode"] == "REDIRECT": self.session.get(ret["url"]) return True else: return False def checkNeedCaptchaAuthServer(self, username): ret = self.request("http://{host}/authserver/needCaptcha.html?username={username}&pwdEncrypt2=pwdEncryptSalt".format( username=username), parseJson=False).text return ret == "true" def loginAuthserver(self, username, password, captcha=""): ret = self.request(self.loginUrl, parseJson=False) body = dict(re.findall( r'''<input type="hidden" name="(.*?)" value="(.*?)"''', ret.text)) salt = dict(re.findall( r'''<input type="hidden" id="(.*?)" value="(.*?)"''', ret.text)) body["username"] = username body["dllt"] = "userNamePasswordLogin" if "pwdDefaultEncryptSalt" in salt.keys(): body["password"] = self.passwordEncrypt( password, salt["pwdDefaultEncryptSalt"]) else: body["password"] = password ret = self.request(ret.url, body, False, False, Referer=self.loginUrl).url print(self.session.cookies) print("本函数不一定能用。") return True def getCollectorList(self): body = { "pageSize": 10, "pageNumber": 1 } ret = self.request( "https://{host}/wec-counselor-collector-apps/stu/collector/queryCollectorProcessingList", body) return ret["datas"]["rows"] def getNoticeList(self): body = { "pageSize": 10, "pageNumber": 1 } ret = self.request( "https://{host}/wec-counselor-stu-apps/stu/notice/queryProcessingNoticeList", body) return ret["datas"]["rows"] def confirmNotice(self, wid): body = { "wid": wid } ret = self.request( "https://{host}/wec-counselor-stu-apps/stu/notice/confirmNotice", body) print(ret["message"]) return ret["message"] == "SUCCESS" def getCollectorDetail(self, collectorWid): body = { "collectorWid": collectorWid } return self.request("https://{host}/wec-counselor-collector-apps/stu/collector/detailCollector", body)["datas"] def getCollectorFormFiled(self, formWid, collectorWid): body = { "pageSize": 50, "pageNumber": 1, "formWid": formWid, "collectorWid": collectorWid } return self.request("https://{host}/wec-counselor-collector-apps/stu/collector/getFormFields", body)["datas"]["rows"] def submitCollectorForm(self, formWid, collectWid, schoolTaskWid, rows, address): body = { "formWid": formWid, "collectWid": collectWid, "schoolTaskWid": schoolTaskWid, "form": rows, "address": address } ret = self.request( "https://{host}/wec-counselor-collector-apps/stu/collector/submitForm", body) print(ret["message"]) return ret["message"] == "SUCCESS" def autoFill(self, rows): for item in rows: index = 0 while index < len(item["fieldItems"]): if item["fieldItems"][index]["isSelected"] == 1: index = index + 1 else: item["fieldItems"].pop(index) def getFormCharac(self, detail): ret = self.request(detail["content"], parseJson=False, JsonBody=False) return hashlib.md5(ret.content).digest().hex() def autoComplete(self, address, dbpath): collectList = self.getCollectorList() print(collectList) for item in collectList: # if item["isHandled"] == True:continue detail = self.getCollectorDetail(item["wid"]) form = self.getCollectorFormFiled( detail["collector"]["formWid"], detail["collector"]["wid"]) formpath = "{dbpath}/{charac}.json".format( charac=self.getFormCharac(item), dbpath=dbpath) if os.path.exists(formpath): with open(formpath, "rb") as file: def find(l, key_valueList: list): for item in l: b = True for k_v in key_valueList: if item[k_v[0]] != k_v[1]: b = False if b: return item return None newForm = form form = json.loads(file.read().decode("utf-8")) for item in newForm: l = find(form, [['title', item['title']], [ 'description', item['description']]]) item['value'] = l['value'] for fieldItemsList in item['fieldItems']: field = find(l['fieldItems'], [ ['content', fieldItemsList['content']]]) fieldItemsList['isSelected'] = field['isSelected'] form = newForm self.autoFill(form) self.submitCollectorForm(detail["collector"]["formWid"], detail["collector"] ["wid"], detail["collector"]["schoolTaskWid"], form, address) else: with open(formpath, "wb") as file: file.write(json.dumps( form, ensure_ascii=False).encode("utf-8")) print("请手动填写{formpath},之后重新运行脚本".format(formpath=formpath)) exit() confirmList = self.getNoticeList() print(confirmList) for item in confirmList: self.confirmNotice(item["noticeWid"]) if __name__ == "__main__": if len(sys.argv) != 6: print("python3 DailyCp.py 学校全名 学号 密码 定位地址 formdb文件夹绝对路径") exit() app = DailyCP(sys.argv[1]) if not app.login(sys.argv[2], sys.argv[3]): exit() app.autoComplete(sys.argv[4], sys.argv[5]) # Author:HuangXu,FengXinYang,ZhouYuYang. # By:AUST HACKER # 2020/5/20 重要更新:修复登录过程,移除验证码(不需要),优化代码格式,感谢giteee及时反馈。 # 2020/5/28 更改为使用自动获取学校URL的方式,更改为使用参数形式,添加另一种登录形式AuthServer的支持(已完成但未测试)。感谢柠火的反馈。 # 2020/6/1 修复BUG,发现AuthServer的登录方式每个学校都不一样。支持任意表单内容自定义(详情见输出信息和formdb/1129.json)。感谢涅灵的反馈。 # 2020/6/2 AuthServer的登录网址不再使用硬编码的方式,理论上能支持所有学校了吧?感谢涅灵的反馈。 # 2020/6/17 修复crontab使用中相对路径的问题。识别form特征。 # 2020/7/5 浪费别人的时间是一种可耻的行为。
ret = self.request
identifier_name
DailyCP.py
import requests import json import io import random import time import re import pyDes import base64 import uuid import sys import os import hashlib from Crypto.Cipher import AES class DailyCP: def __init__(self, schoolName="北部湾大学"): self.key = "ST83=@XV" # dynamic when app update self.session = requests.session() self.host = "" self.loginUrl = "" self.isIAPLogin = True self.session.headers.update({ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.61 Safari/537.36 Edg/83.0.478.37", # "X-Requested-With": "XMLHttpRequest", "Pragma": "no-cache", "Accept": "application/json, text/plain, */*", # "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9", # "User-Agent": "okhttp/3.12.4" }) extension = {"deviceId": str(uuid.uuid4()), "systemName": "未来操作系统", "userId": "5201314", "appVersion": "8.1.13", "model": "红星一号量子计算机", "lon": 0.0, "systemVersion": "初号机", "lat": 0.0} self.session.headers.update( {"Cpdaily-Extension": self.encrypt(json.dumps(extension))}) self.setHostBySchoolName(schoolName) def setHostBySchoolName(self, schoolName): ret = self.request( "https://static.campushoy.com/apicache/tenantListSort") school = [j for i in ret["data"] for j in i["datas"] if j["name"] == schoolName] if len(school) == 0: print("不支持的学校或者学校名称错误,以下是支持的学校列表") print(ret) exit() ret = self.request( "https://mobile.campushoy.com/v6/config/guest/tenant/info?ids={ids}".format(ids=school[0]["id"])) self.loginUrl = ret["data"][0]["ampUrl"] if ret == "": print("学校并没有申请入驻今日校园平台") exit() print("{name}的登录地址{url}".format(name=schoolName, url=self.loginUrl)) self.host = re.findall(r"//(.*?)/", self.loginUrl)[0] def encrypt(self, text): k = pyDes.des(self.key, pyDes.CBC, b"\x01\x02\x03\x04\x05\x06\x07\x08", pad=None, padmode=pyDes.PAD_PKCS5) ret = k.encrypt(text) return base64.b64encode(ret).decode() def passwordEncrypt(self, text: str, key: str): def pad(s): return s + (len(key) - len(s) % len(key)) * chr(len(key) - len(s) % len(key)) def unpad(s): return s[:-ord(s[len(s) - 1:])] text = pad( "TdEEGazAXQMBzEAisrYaxRRax5kmnMJnpbKxcE6jxQfWRwP2J78adKYm8WzSkfXJ"+text).encode("utf-8") aes = AES.new(str.encode(key), AES.MODE_CBC, str.encode("ya8C45aRrBEn8sZH")) return base64.b64encode(aes.encrypt(text)) def request(self, url: str, body=None, parseJson=True, JsonBody=True, Referer=None): url = url.format(host=self.host) if Referer != None: self.session.headers.update({"Referer": Referer}) if body == None: ret = self.session.get(url) else: self.session.headers.update( {"Content-Type": ("application/json" if JsonBody else "application/x-www-form-urlencoded")}) ret = self.session.post(url, data=( json.dumps(body) if JsonBody else body)) if parseJson: return json.loads(ret.text) else: return ret def decrypt(self, text): k = pyDes.des(self.key, pyDes.CBC, b"\x01\x02\x03\x04\x05\x06\x07\x08", pad=None, padmode=pyDes.PAD_PKCS5) ret = k.decrypt(base64.b64decode(text)) return ret.decode() def checkNeedCaptcha(self, username): url = "https://{host}/iap/checkNeedCaptcha?username={username}".format( host=self.host, username=username) ret = self.session.get(url) ret = json.loads(ret.text) return ret["needCaptcha"] def generateCaptcha(self): # url = "https://{host}/iap/generateCaptcha?ltId={client}&codeType=2".format(host=self.host,client=self.client) # ret = self.session.get(url) # return ret.content pass def getBasicInfo(self): return self.request("https://{host}/iap/tenant/basicInfo", "{}") def login(self, username, password, captcha=""): if "campusphere" in self.loginUrl: return self.loginIAP(username, password, captcha) else: return self.loginAuthserver(username, password, captcha) def loginIAP(self, username, password, captcha=""): self.session.headers.update({"X-Requested-With": "XMLHttpRequest"}) ret = self.session.get( "https://{host}/iap/login?service=https://{host}/portal/login".format(host=self.host)).url client = ret[ret.find("=")+1:] ret = self.request("https://{host}/iap/security/lt", "lt={client}".format(client=client), True, False) client = ret["result"]["_lt"] # self.encryptSalt = ret["result"]["_encryptSalt"] body = { "username": username, "password": password, "lt": client, "captcha": captcha, "rememberMe": "true", "dllt": "", "mobile": "" } ret = self.request("https://{host}/iap/doLogin", body, True, False) if ret["resultCode"] == "REDIRECT": self.session.get(ret["url"]) return True else: return False def checkNeedCaptchaAuthServer(self, username): ret = self.request("http://{host}/authserver/needCaptcha.html?username={username}&pwdEncrypt2=pwdEncryptSalt".format( username=username), parseJson=False).text return ret == "true" def loginAuthserver(self, username, password, captcha=""): ret = self.request(self.loginUrl, parseJson=False) body = dict(re.findall( r'''<input type="hidden" name="(.*?)" value="(.*?)"''', ret.text)) salt = dict(re.findall( r'''<input type="hidden" id="(.*?)" value="(.*?)"''', ret.text)) body["username"] = username body["dllt"] = "userNamePasswordLogin" if "pwdDefaultEncryptSalt" in salt.keys(): body["password"] = self.passwordEncrypt( password, salt["pwdDefaultEncryptSalt"]) else: body["password"] = password ret = self.request(ret.url, body, False, False, Referer=self.loginUrl).url print(self.session.cookies) print("本函数不一定能用。") return True def getCollectorList(self): body = { "pageSize": 10, "pageNumber": 1 } ret = self.request( "https://{host}/wec-counselor-collector-apps/stu/collector/queryCollectorProcessingList", body) return ret["datas"]["rows"] def getNoticeList(self): body = { "pageSize": 10, "pageNumber": 1 } ret = self.request( "https://{host}/wec-counselor-stu-apps/stu/notice/queryProcessingNoticeList", body) return ret["datas"]["rows"] def confirmNotice(self, wid): body = { "wid": wid } ret = self.request( "https://{host}/wec-counselor-stu-apps/stu/notice/confirmNotice", body) print(ret["message"]) return ret["message"] == "SUCCESS" def getCollectorDetail(self, collectorWid): body = { "collectorWid": collectorWid } return self.request("https://{host}/wec-counselor-collector-apps/stu/collector/detailCollector", body)["datas"] def getCollectorFormFiled(self, formWid, collectorWid): body = { "pageSize": 50, "pageNumber": 1, "formWid": formWid, "collectorWid": collectorWid } return self.request("https://{host}/wec-counselor-collector-apps/stu/collector/getFormFields", body)["datas"]["rows"] def submitCollectorForm(self, formWid, collectWid, schoolTaskWid, rows, address): body = { "formWid": formWid, "collectWid": collectWid, "schoolTaskWid": schoolTaskWid, "form": rows, "address": address } ret = self.request( "https://{host}/wec-counselor-collector-apps/stu/collector/submitForm", body) print(ret["message"]) return ret["message"] == "SUCCESS" def autoFill(self, rows): for item in rows: index = 0 while index < len(item["fieldItems"]): if item["fieldItems"][index]["isSelected"] == 1: index = index + 1 else: item["fieldItems"].pop(index) def getFormCharac(self, detail): ret = self.request(detail["content"], parseJson=False, JsonBody=False) return hashlib.md5(ret.content).digest().hex() def autoComplete(self, address, dbpath): collectList = self.getCollectorList() print(collectList) for item in collectList: # if item["isHandled"] == True:continue detail = self.getCollectorDetail(item["wid"]) form = self.getCollectorFormFiled( detail["collector"]["formWid"], detail["collector"]["wid"]) formpath = "{dbpath}/{charac}.json".format( charac=self.getFormCharac(item), dbpath=dbpath) if os.path.exists(formpath): with open(formpath, "rb") as file: def find(l, key_valueList: list): for item in l: b = True for k_v in key_valueList: if item[k_v[0]] != k_v[1]: b = False if b: return item return None newForm = form form = json.loads(file.read().decode("utf-8")) for item in newForm: l = find(form, [['title', item['title']], [ 'description', item['description']]]) item['value'] = l['value'] for fieldItemsList in item['fieldItems']: field = find(l['fieldItems'], [ ['content', fieldItemsList['content']]]) fieldItemsList['isSelected'] = field['isSelected'] form = newForm self.autoFill(form) self.submitCollectorForm(detail["collector"]["formWid"], detail["collector"] ["wid"], detail["collector"]["schoolTaskWid"], form, address) else: with open(formpath, "wb") as file: file.write(json.dumps( form, ensure_ascii=False).encode("utf-8")) print("请手动填写{formpath},之后重新运行脚本".format(formpath=formpath)) exit() confirmList = self.getNoticeList() print(confirmList) for item in confirmList: self.confirmNotice(item["noticeWid"]) if __name__ == "__main__": if len(sys.argv) != 6: print("python3 DailyCp.py 学校全名 学号 密码 定位地址 formdb文件夹绝对路径") exit() app = DailyCP(sys.argv[1]) if not app.login(sys.argv[2], sys.argv[3]): exit()
# By:AUST HACKER # 2020/5/20 重要更新:修复登录过程,移除验证码(不需要),优化代码格式,感谢giteee及时反馈。 # 2020/5/28 更改为使用自动获取学校URL的方式,更改为使用参数形式,添加另一种登录形式AuthServer的支持(已完成但未测试)。感谢柠火的反馈。 # 2020/6/1 修复BUG,发现AuthServer的登录方式每个学校都不一样。支持任意表单内容自定义(详情见输出信息和formdb/1129.json)。感谢涅灵的反馈。 # 2020/6/2 AuthServer的登录网址不再使用硬编码的方式,理论上能支持所有学校了吧?感谢涅灵的反馈。 # 2020/6/17 修复crontab使用中相对路径的问题。识别form特征。 # 2020/7/5 浪费别人的时间是一种可耻的行为。
app.autoComplete(sys.argv[4], sys.argv[5]) # Author:HuangXu,FengXinYang,ZhouYuYang.
random_line_split
yolo.py
import os import cv2 import numpy as np import ast from timeit import default_timer as timer from keras import backend as K from keras.layers import Input from PIL import Image, ImageFont, ImageDraw from shapely.geometry import Point from shapely.geometry.polygon import Polygon from model import evaluation, yolo_body right_clicks = list() polygon_list=[] Polygon_object_list=[] polygon_area_list=[] polygon_color_list=[(153,0,76), (0,204,204), (255,153,153), (102,204,0), (102,0,102)] poly_index=0 def letterbox_image(image, size): iw, ih = image.size w, h = size scale = min(w/iw, h/ih) nw = int(iw*scale) nh = int(ih*scale) image = image.resize((nw,nh), Image.BICUBIC) new_image = Image.new('RGB', size, (128,128,128)) new_image.paste(image, ((w-nw)//2, (h-nh)//2)) return new_image class YOLO(object): _defaults = { "model_path": 'model_data/yolo.h5', "anchors_path": 'model_data/yolo_anchors.txt', "classes_path": 'model_data/coco_classes.txt', "score" : 0.3, "iou" : 0.45, "model_image_size" : (416, 416), "gpu_num" : 1, } @classmethod def get_defaults(cls, n): if n in cls._defaults: return cls._defaults[n] else: return "Unrecognized attribute name '" + n + "'" def __init__(self, **kwargs): self.__dict__.update(self._defaults) self.__dict__.update(kwargs) self.class_names = self._get_class() self.anchors = self._get_anchors() self.sess = K.get_session() self.boxes, self.scores, self.classes = self.generate() def _get_class(self): classes_path = os.path.expanduser(self.classes_path) with open(classes_path) as f: class_names = f.readlines() class_names = [c.strip() for c in class_names] return class_names def _get_anchors(self): anchors_path = os.path.expanduser(self.anchors_path) with open(anchors_path) as f: anchors = f.readline() anchors = [float(x) for x in anchors.split(',')] return np.array(anchors).reshape(-1, 2) def generate(self): model_path = os.path.expanduser(self.model_path) assert model_path.endswith('.h5'), 'Keras model or weights must be a .h5 file.' start = timer() num_anchors = len(self.anchors) num_classes = len(self.class_names) self.yolo_model = yolo_body(Input(shape=(None,None,3)), num_anchors//3, num_classes) self.yolo_model.load_weights(self.model_path) end = timer() print('{} model, anchors, and classes loaded in {:.2f}sec.'.format(model_path, end-start)) self.colors = ['GreenYellow'] self.input_image_shape = K.placeholder(shape=(2, )) boxes, scores, classes = evaluation(self.yolo_model.output, self.anchors, len(self.class_names), self.input_image_shape, score_threshold=self.score, iou_threshold=self.iou) return boxes, scores, classes def detect_image(self, image,itr_number,th_mode,car_for_each_polygon_list,polygon_density,pixel_to_dist_ratio,polygon_dist_list_vel_mode,video_fps,velocity_and_view_time,th_low,th_high): start = timer() num_of_frames_for_mean=10 number_of_point_in_polygons=np.zeros((1,len(polygon_list)),dtype=int) vehicles_area_in_polygon = np.zeros((1, len(polygon_list)), dtype=float) number_of_frames=np.zeros((1,len(polygon_list)),dtype=int) if self.model_image_size != (None, None): assert self.model_image_size[0]%32 == 0, 'Multiples of 32 required' assert self.model_image_size[1]%32 == 0, 'Multiples of 32 required' boxed_image = letterbox_image(image, tuple(reversed(self.model_image_size))) image_data = np.array(boxed_image, dtype='float32') image_data /= 255. image_data = np.expand_dims(image_data, 0) out_boxes, out_scores, out_classes = self.sess.run( [self.boxes, self.scores, self.classes], feed_dict={ self.yolo_model.input: image_data, self.input_image_shape: [image.size[1], image.size[0]], K.learning_phase(): 0 }) if(itr_number==1): for i in range(len(polygon_list)): pts = np.array(right_clicks[np.sum(polygon_list[0:i], dtype=int):np.sum(polygon_list[0:i], dtype=int) + polygon_list[i]], np.int32) a1 = np.empty((polygon_list[i],), dtype=object) a1[:] = [tuple(j) for j in pts] polygon = Polygon(a1.tolist()) polygon_area_list.append(polygon.area) Polygon_object_list.append(polygon) for i in range(len(polygon_list)): pts = np.array(right_clicks[np.sum(polygon_list[0:i],dtype=int):np.sum(polygon_list[0:i],dtype=int)+polygon_list[i]], np.int32) image=cv2.polylines(np.array(image), [pts], True, polygon_color_list[i],thickness=2) image = Image.fromarray(image) out_prediction = [] font_path = os.path.join(os.path.dirname(__file__),'font/FiraMono-Medium.otf') font = ImageFont.truetype(font=font_path, size=np.floor(3e-2 * image.size[1] + 0.5).astype('int32')) thickness = (image.size[0] + image.size[1]) // 400 for i, c in reversed(list(enumerate(out_classes))): box = out_boxes[i] curr_box_x_center=(box[3]+box[1])/2 curr_box_y_center=(box[2]+box[0])/2 point=Point(curr_box_x_center,curr_box_y_center) index=np.where([poly.contains(point) for poly in Polygon_object_list])[0] if len(index)==0: continue number_of_point_in_polygons[0,index]+=1 curr_area=(box[3]-box[1])*(box[2]-box[0]) if 2400<curr_area<=4500: curr_area*=0.8 elif (4500<curr_area<=9000): curr_area *= 0.7 elif (9000<curr_area<=13000): curr_area *= 0.6 elif (13000<curr_area<=18000): curr_area *= 0.5 elif (curr_area>18000): curr_area *= 0.4 vehicles_area_in_polygon[0,index]+=curr_area score = out_scores[i] draw = ImageDraw.Draw(image) top, left, bottom, right = box top = max(0, np.floor(top + 0.5).astype('int32')) left = max(0, np.floor(left + 0.5).astype('int32')) bottom = min(image.size[1], np.floor(bottom + 0.5).astype('int32')) right = min(image.size[0], np.floor(right + 0.5).astype('int32')) if top > image.size[1] or right > image.size[0]: continue out_prediction.append([left, top, right, bottom, c, score]) for i in range(thickness): draw.rectangle( [left + i, top + i, right - i, bottom - i], outline=polygon_color_list[index[0]]) del draw car_for_each_polygon_list.insert(0,number_of_point_in_polygons[0]) polygon_density.insert(0,vehicles_area_in_polygon[0] / polygon_area_list) if th_mode=="velocity" and itr_number>1: for p in range(len(polygon_list)): if car_for_each_polygon_list[0][p]==0 and car_for_each_polygon_list[1][p]==1: number_of_frames[0,p]=np.where(np.array(car_for_each_polygon_list)[1:, p] == 0)[0][0] velocity_and_view_time[0,p]=video_fps velocity_and_view_time[1,p] = (np.array(polygon_dist_list_vel_mode)[p] * np.array(pixel_to_dist_ratio)[p] * 3.6) / (number_of_frames[0][p] / video_fps) if(itr_number>=num_of_frames_for_mean): mean_of_points_in_polygon=np.round(np.transpose(car_for_each_polygon_list)[:,0:num_of_frames_for_mean].sum(axis=1)/num_of_frames_for_mean).astype(int) mean_polygon_density=np.sum(polygon_density[0:10],axis=0)/num_of_frames_for_mean else: mean_of_points_in_polygon =number_of_point_in_polygons[0] mean_polygon_density=polygon_density[-1] draw = ImageDraw.Draw(image) font_number_of_vehicles = font font_number_of_vehicles.size = 40 rectangle_width=int(image.size[0] / 7) space_between_rect=0 if len(polygon_list)>1: space_between_rect = int((image.size[0] - len(polygon_list)*rectangle_width-40)/(len(polygon_list)-1)) if th_mode == "counting": mean_polygon=mean_of_points_in_polygon elif th_mode == "density": mean_polygon = mean_polygon_density else: mean_polygon = velocity_and_view_time[1,:] for c in range(len(polygon_list)): R,G,B=color_result(mean_polygon[c],th_low,th_high) draw.rectangle([tuple([10+c*(rectangle_width+space_between_rect), 60]), tuple([10 + c*(rectangle_width+space_between_rect)+rectangle_width, 60 + 40])], fill=(R, G, B)) draw.rectangle([tuple([10 + c * (rectangle_width + space_between_rect)+rectangle_width, 60]),tuple([10 + c * (rectangle_width + space_between_rect)+rectangle_width+20, 60 + 40])],fill=polygon_color_list[c]) if th_mode == "counting": draw.text([10+c*(rectangle_width+space_between_rect), 65], "vehicles:" + str(mean_polygon[c]), fill=(0, 0, 0),font=font_number_of_vehicles) elif th_mode == "density": draw.text([10 + c * (rectangle_width + space_between_rect), 65],'density:' + str(int(mean_polygon[c] * 100)) + '%', fill=(0, 0, 0), font=font_number_of_vehicles) else: if (mean_polygon[c]!=0) or (velocity_and_view_time[0,c]>0): draw.text([10 + c * (rectangle_width + space_between_rect), 65],'velocity:' + str(mean_polygon[c]) + 'kmh', fill=(0, 0, 0),font=font_number_of_vehicles) velocity_and_view_time[0,c]-=1 if velocity_and_view_time[0,c] ==0: velocity_and_view_time[1,c]=0 else: draw.text([10 + c * (rectangle_width + space_between_rect), 65], 'velocity:', fill=(0, 0, 0),font=font_number_of_vehicles) del draw end = timer() fps=round(1/(end - start)) return fps, out_prediction, image def close_session(self): self.sess.close() def detect_video(yolo, video_path,th_mode,th_low,th_high, define_regions, output_path="",input_path=""): car_for_each_polygon_list=[] polygon_density=[] pixel_to_dist_ratio=[] polygon_dist_list_vel_mode=[] velocity_and_view_time = np.zeros((2, len(polygon_color_list)), dtype=int) vid = cv2.VideoCapture(video_path) file_name = input_path[:input_path.rfind(".")] + ".txt" if not vid.isOpened(): raise IOError("Couldn't open webcam or video") video_FourCC = cv2.VideoWriter_fourcc(*'mp4v') video_fps = vid.get(cv2.CAP_PROP_FPS) video_size = (int(vid.get(cv2.CAP_PROP_FRAME_WIDTH)), int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))) isOutput = True if output_path != "" else False if isOutput: print('Processing {} with frame size {} '.format(os.path.basename(video_path), video_size)) out = cv2.VideoWriter(output_path, video_FourCC, video_fps, video_size) n=0 first_frame=True global poly_index poly_index=0 while vid.isOpened(): n+=1 return_value, frame = vid.read() if first_frame==True: if define_regions == 1: if th_mode=="velocity": cv2.putText(frame, "Please define velocity regions (BR,BL,TL,TR) order", (int(frame.shape[1] / 4), 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2) else: cv2.putText(frame,"Please define regions",(int(frame.shape[1]/3),40),cv2.FONT_HERSHEY_SIMPLEX,1,(255,0,0),2) cv2.namedWindow('first_frame', cv2.WINDOW_NORMAL) cv2.setMouseCallback('first_frame', Mouse_Callback,param=frame) while (1): cv2.imshow('first_frame', frame/255) k=cv2.waitKey(10) if k==97: polygon_list.append(int(len(right_clicks)-np.sum(polygon_list))) pts = np.array(right_clicks[ np.sum(polygon_list[0:poly_index], dtype=int):np.sum(polygon_list[0:poly_index],dtype=int) + polygon_list[poly_index]], np.int32) B, G, R = polygon_color_list[poly_index] cv2.polylines(frame, [pts], True, (R,G,B), thickness=2) if th_mode == "velocity": polygon_dist_list_vel_mode.append(np.linalg.norm( ((pts[0,:]+pts[1,:])/2) - ((pts[2,:]+pts[3,:])/2))) cv2.imshow('first_frame', frame / 255) k = cv2.waitKey(10) pixel_to_dist_ratio.append(float(input('Please insert pixel to real distance ratio:'))) poly_index+=1 if k == 27: polygon_list.append(int(len(right_clicks) - np.sum(polygon_list))) pts = np.array(right_clicks[np.sum(polygon_list[0:poly_index], dtype=int):np.sum(polygon_list[0:poly_index],dtype=int) + polygon_list[poly_index]], np.int32) B, G, R = polygon_color_list[poly_index] cv2.polylines(frame, [pts], True, (R, G, B), thickness=2) cv2.imshow('first_frame', frame / 255) cv2.waitKey(10) if th_mode == "velocity": polygon_dist_list_vel_mode.append(np.linalg.norm(((pts[0, :] + pts[1, :]) / 2) - ((pts[2, :] + pts[3, :]) / 2))) pixel_to_dist_ratio.append(float(input('Please insert pixel to real distance ratio:'))) cv2.destroyAllWindows() break with open(file_name, "w") as txt_file: txt_file.write(str(right_clicks)+'\n') txt_file.write(str(polygon_list)) if th_mode=="velocity": txt_file.write('\n'+str(pixel_to_dist_ratio)+'\n') txt_file.write(str(polygon_dist_list_vel_mode)) txt_file.close() else: with open(file_name, "r") as txt_file: right_clicks.extend(ast.literal_eval(txt_file.readline())) polygon_list.extend(ast.literal_eval(txt_file.readline())) if th_mode=="velocity": pixel_to_dist_ratio.extend(ast.literal_eval(txt_file.readline())) polygon_dist_list_vel_mode.extend(ast.literal_eval(txt_file.readline())) first_frame = False if not return_value: break frame = frame[:,:,::-1] image = Image.fromarray(frame) fps_, out_pred, image = yolo.detect_image(image,n,th_mode,car_for_each_polygon_list,polygon_density,pixel_to_dist_ratio,polygon_dist_list_vel_mode,video_fps,velocity_and_view_time,th_low,th_high) result = np.asarray(image) fps = "FPS: " + str(fps_) cv2.putText(result, text=fps, org=(3, 15), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.50, color=(255, 0, 0), thickness=2) if isOutput: out.write(result[:,:,::-1]) vid.release() out.release() def Mouse_Callback(event, x, y, flags ,params): if event==cv2.EVENT_LBUTTONDBLCLK: global right_clicks right_clicks.append([x,y]) B,G,R=polygon_color_list[poly_index] if poly_index==0:
else: pts = np.array(right_clicks[np.sum(polygon_list[0:poly_index], dtype=int):np.sum(polygon_list[0:poly_index], dtype=int) + len(right_clicks)],np.int32) cv2.polylines(params, [pts], False, (R,G,B), thickness=2) def color_result(value,th_low,th_high): th_mid=(th_low+th_high)/2 delta=(th_high-th_low)/2 B=0 if value<th_mid: G=255 else: temp=round((value-th_mid)*255/delta) G=int(np.max(255-temp,0)) if value>=th_mid: R=255 else: temp = round((value - th_low) * 255 / delta) R=int(np.max(temp,0)) return R, G, B
pts=np.array(right_clicks)
conditional_block
yolo.py
import os import cv2 import numpy as np import ast from timeit import default_timer as timer from keras import backend as K from keras.layers import Input from PIL import Image, ImageFont, ImageDraw from shapely.geometry import Point from shapely.geometry.polygon import Polygon from model import evaluation, yolo_body right_clicks = list() polygon_list=[] Polygon_object_list=[] polygon_area_list=[] polygon_color_list=[(153,0,76), (0,204,204), (255,153,153), (102,204,0), (102,0,102)] poly_index=0 def letterbox_image(image, size): iw, ih = image.size w, h = size scale = min(w/iw, h/ih) nw = int(iw*scale) nh = int(ih*scale) image = image.resize((nw,nh), Image.BICUBIC) new_image = Image.new('RGB', size, (128,128,128)) new_image.paste(image, ((w-nw)//2, (h-nh)//2)) return new_image class YOLO(object): _defaults = { "model_path": 'model_data/yolo.h5', "anchors_path": 'model_data/yolo_anchors.txt', "classes_path": 'model_data/coco_classes.txt', "score" : 0.3, "iou" : 0.45, "model_image_size" : (416, 416), "gpu_num" : 1, } @classmethod def
(cls, n): if n in cls._defaults: return cls._defaults[n] else: return "Unrecognized attribute name '" + n + "'" def __init__(self, **kwargs): self.__dict__.update(self._defaults) self.__dict__.update(kwargs) self.class_names = self._get_class() self.anchors = self._get_anchors() self.sess = K.get_session() self.boxes, self.scores, self.classes = self.generate() def _get_class(self): classes_path = os.path.expanduser(self.classes_path) with open(classes_path) as f: class_names = f.readlines() class_names = [c.strip() for c in class_names] return class_names def _get_anchors(self): anchors_path = os.path.expanduser(self.anchors_path) with open(anchors_path) as f: anchors = f.readline() anchors = [float(x) for x in anchors.split(',')] return np.array(anchors).reshape(-1, 2) def generate(self): model_path = os.path.expanduser(self.model_path) assert model_path.endswith('.h5'), 'Keras model or weights must be a .h5 file.' start = timer() num_anchors = len(self.anchors) num_classes = len(self.class_names) self.yolo_model = yolo_body(Input(shape=(None,None,3)), num_anchors//3, num_classes) self.yolo_model.load_weights(self.model_path) end = timer() print('{} model, anchors, and classes loaded in {:.2f}sec.'.format(model_path, end-start)) self.colors = ['GreenYellow'] self.input_image_shape = K.placeholder(shape=(2, )) boxes, scores, classes = evaluation(self.yolo_model.output, self.anchors, len(self.class_names), self.input_image_shape, score_threshold=self.score, iou_threshold=self.iou) return boxes, scores, classes def detect_image(self, image,itr_number,th_mode,car_for_each_polygon_list,polygon_density,pixel_to_dist_ratio,polygon_dist_list_vel_mode,video_fps,velocity_and_view_time,th_low,th_high): start = timer() num_of_frames_for_mean=10 number_of_point_in_polygons=np.zeros((1,len(polygon_list)),dtype=int) vehicles_area_in_polygon = np.zeros((1, len(polygon_list)), dtype=float) number_of_frames=np.zeros((1,len(polygon_list)),dtype=int) if self.model_image_size != (None, None): assert self.model_image_size[0]%32 == 0, 'Multiples of 32 required' assert self.model_image_size[1]%32 == 0, 'Multiples of 32 required' boxed_image = letterbox_image(image, tuple(reversed(self.model_image_size))) image_data = np.array(boxed_image, dtype='float32') image_data /= 255. image_data = np.expand_dims(image_data, 0) out_boxes, out_scores, out_classes = self.sess.run( [self.boxes, self.scores, self.classes], feed_dict={ self.yolo_model.input: image_data, self.input_image_shape: [image.size[1], image.size[0]], K.learning_phase(): 0 }) if(itr_number==1): for i in range(len(polygon_list)): pts = np.array(right_clicks[np.sum(polygon_list[0:i], dtype=int):np.sum(polygon_list[0:i], dtype=int) + polygon_list[i]], np.int32) a1 = np.empty((polygon_list[i],), dtype=object) a1[:] = [tuple(j) for j in pts] polygon = Polygon(a1.tolist()) polygon_area_list.append(polygon.area) Polygon_object_list.append(polygon) for i in range(len(polygon_list)): pts = np.array(right_clicks[np.sum(polygon_list[0:i],dtype=int):np.sum(polygon_list[0:i],dtype=int)+polygon_list[i]], np.int32) image=cv2.polylines(np.array(image), [pts], True, polygon_color_list[i],thickness=2) image = Image.fromarray(image) out_prediction = [] font_path = os.path.join(os.path.dirname(__file__),'font/FiraMono-Medium.otf') font = ImageFont.truetype(font=font_path, size=np.floor(3e-2 * image.size[1] + 0.5).astype('int32')) thickness = (image.size[0] + image.size[1]) // 400 for i, c in reversed(list(enumerate(out_classes))): box = out_boxes[i] curr_box_x_center=(box[3]+box[1])/2 curr_box_y_center=(box[2]+box[0])/2 point=Point(curr_box_x_center,curr_box_y_center) index=np.where([poly.contains(point) for poly in Polygon_object_list])[0] if len(index)==0: continue number_of_point_in_polygons[0,index]+=1 curr_area=(box[3]-box[1])*(box[2]-box[0]) if 2400<curr_area<=4500: curr_area*=0.8 elif (4500<curr_area<=9000): curr_area *= 0.7 elif (9000<curr_area<=13000): curr_area *= 0.6 elif (13000<curr_area<=18000): curr_area *= 0.5 elif (curr_area>18000): curr_area *= 0.4 vehicles_area_in_polygon[0,index]+=curr_area score = out_scores[i] draw = ImageDraw.Draw(image) top, left, bottom, right = box top = max(0, np.floor(top + 0.5).astype('int32')) left = max(0, np.floor(left + 0.5).astype('int32')) bottom = min(image.size[1], np.floor(bottom + 0.5).astype('int32')) right = min(image.size[0], np.floor(right + 0.5).astype('int32')) if top > image.size[1] or right > image.size[0]: continue out_prediction.append([left, top, right, bottom, c, score]) for i in range(thickness): draw.rectangle( [left + i, top + i, right - i, bottom - i], outline=polygon_color_list[index[0]]) del draw car_for_each_polygon_list.insert(0,number_of_point_in_polygons[0]) polygon_density.insert(0,vehicles_area_in_polygon[0] / polygon_area_list) if th_mode=="velocity" and itr_number>1: for p in range(len(polygon_list)): if car_for_each_polygon_list[0][p]==0 and car_for_each_polygon_list[1][p]==1: number_of_frames[0,p]=np.where(np.array(car_for_each_polygon_list)[1:, p] == 0)[0][0] velocity_and_view_time[0,p]=video_fps velocity_and_view_time[1,p] = (np.array(polygon_dist_list_vel_mode)[p] * np.array(pixel_to_dist_ratio)[p] * 3.6) / (number_of_frames[0][p] / video_fps) if(itr_number>=num_of_frames_for_mean): mean_of_points_in_polygon=np.round(np.transpose(car_for_each_polygon_list)[:,0:num_of_frames_for_mean].sum(axis=1)/num_of_frames_for_mean).astype(int) mean_polygon_density=np.sum(polygon_density[0:10],axis=0)/num_of_frames_for_mean else: mean_of_points_in_polygon =number_of_point_in_polygons[0] mean_polygon_density=polygon_density[-1] draw = ImageDraw.Draw(image) font_number_of_vehicles = font font_number_of_vehicles.size = 40 rectangle_width=int(image.size[0] / 7) space_between_rect=0 if len(polygon_list)>1: space_between_rect = int((image.size[0] - len(polygon_list)*rectangle_width-40)/(len(polygon_list)-1)) if th_mode == "counting": mean_polygon=mean_of_points_in_polygon elif th_mode == "density": mean_polygon = mean_polygon_density else: mean_polygon = velocity_and_view_time[1,:] for c in range(len(polygon_list)): R,G,B=color_result(mean_polygon[c],th_low,th_high) draw.rectangle([tuple([10+c*(rectangle_width+space_between_rect), 60]), tuple([10 + c*(rectangle_width+space_between_rect)+rectangle_width, 60 + 40])], fill=(R, G, B)) draw.rectangle([tuple([10 + c * (rectangle_width + space_between_rect)+rectangle_width, 60]),tuple([10 + c * (rectangle_width + space_between_rect)+rectangle_width+20, 60 + 40])],fill=polygon_color_list[c]) if th_mode == "counting": draw.text([10+c*(rectangle_width+space_between_rect), 65], "vehicles:" + str(mean_polygon[c]), fill=(0, 0, 0),font=font_number_of_vehicles) elif th_mode == "density": draw.text([10 + c * (rectangle_width + space_between_rect), 65],'density:' + str(int(mean_polygon[c] * 100)) + '%', fill=(0, 0, 0), font=font_number_of_vehicles) else: if (mean_polygon[c]!=0) or (velocity_and_view_time[0,c]>0): draw.text([10 + c * (rectangle_width + space_between_rect), 65],'velocity:' + str(mean_polygon[c]) + 'kmh', fill=(0, 0, 0),font=font_number_of_vehicles) velocity_and_view_time[0,c]-=1 if velocity_and_view_time[0,c] ==0: velocity_and_view_time[1,c]=0 else: draw.text([10 + c * (rectangle_width + space_between_rect), 65], 'velocity:', fill=(0, 0, 0),font=font_number_of_vehicles) del draw end = timer() fps=round(1/(end - start)) return fps, out_prediction, image def close_session(self): self.sess.close() def detect_video(yolo, video_path,th_mode,th_low,th_high, define_regions, output_path="",input_path=""): car_for_each_polygon_list=[] polygon_density=[] pixel_to_dist_ratio=[] polygon_dist_list_vel_mode=[] velocity_and_view_time = np.zeros((2, len(polygon_color_list)), dtype=int) vid = cv2.VideoCapture(video_path) file_name = input_path[:input_path.rfind(".")] + ".txt" if not vid.isOpened(): raise IOError("Couldn't open webcam or video") video_FourCC = cv2.VideoWriter_fourcc(*'mp4v') video_fps = vid.get(cv2.CAP_PROP_FPS) video_size = (int(vid.get(cv2.CAP_PROP_FRAME_WIDTH)), int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))) isOutput = True if output_path != "" else False if isOutput: print('Processing {} with frame size {} '.format(os.path.basename(video_path), video_size)) out = cv2.VideoWriter(output_path, video_FourCC, video_fps, video_size) n=0 first_frame=True global poly_index poly_index=0 while vid.isOpened(): n+=1 return_value, frame = vid.read() if first_frame==True: if define_regions == 1: if th_mode=="velocity": cv2.putText(frame, "Please define velocity regions (BR,BL,TL,TR) order", (int(frame.shape[1] / 4), 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2) else: cv2.putText(frame,"Please define regions",(int(frame.shape[1]/3),40),cv2.FONT_HERSHEY_SIMPLEX,1,(255,0,0),2) cv2.namedWindow('first_frame', cv2.WINDOW_NORMAL) cv2.setMouseCallback('first_frame', Mouse_Callback,param=frame) while (1): cv2.imshow('first_frame', frame/255) k=cv2.waitKey(10) if k==97: polygon_list.append(int(len(right_clicks)-np.sum(polygon_list))) pts = np.array(right_clicks[ np.sum(polygon_list[0:poly_index], dtype=int):np.sum(polygon_list[0:poly_index],dtype=int) + polygon_list[poly_index]], np.int32) B, G, R = polygon_color_list[poly_index] cv2.polylines(frame, [pts], True, (R,G,B), thickness=2) if th_mode == "velocity": polygon_dist_list_vel_mode.append(np.linalg.norm( ((pts[0,:]+pts[1,:])/2) - ((pts[2,:]+pts[3,:])/2))) cv2.imshow('first_frame', frame / 255) k = cv2.waitKey(10) pixel_to_dist_ratio.append(float(input('Please insert pixel to real distance ratio:'))) poly_index+=1 if k == 27: polygon_list.append(int(len(right_clicks) - np.sum(polygon_list))) pts = np.array(right_clicks[np.sum(polygon_list[0:poly_index], dtype=int):np.sum(polygon_list[0:poly_index],dtype=int) + polygon_list[poly_index]], np.int32) B, G, R = polygon_color_list[poly_index] cv2.polylines(frame, [pts], True, (R, G, B), thickness=2) cv2.imshow('first_frame', frame / 255) cv2.waitKey(10) if th_mode == "velocity": polygon_dist_list_vel_mode.append(np.linalg.norm(((pts[0, :] + pts[1, :]) / 2) - ((pts[2, :] + pts[3, :]) / 2))) pixel_to_dist_ratio.append(float(input('Please insert pixel to real distance ratio:'))) cv2.destroyAllWindows() break with open(file_name, "w") as txt_file: txt_file.write(str(right_clicks)+'\n') txt_file.write(str(polygon_list)) if th_mode=="velocity": txt_file.write('\n'+str(pixel_to_dist_ratio)+'\n') txt_file.write(str(polygon_dist_list_vel_mode)) txt_file.close() else: with open(file_name, "r") as txt_file: right_clicks.extend(ast.literal_eval(txt_file.readline())) polygon_list.extend(ast.literal_eval(txt_file.readline())) if th_mode=="velocity": pixel_to_dist_ratio.extend(ast.literal_eval(txt_file.readline())) polygon_dist_list_vel_mode.extend(ast.literal_eval(txt_file.readline())) first_frame = False if not return_value: break frame = frame[:,:,::-1] image = Image.fromarray(frame) fps_, out_pred, image = yolo.detect_image(image,n,th_mode,car_for_each_polygon_list,polygon_density,pixel_to_dist_ratio,polygon_dist_list_vel_mode,video_fps,velocity_and_view_time,th_low,th_high) result = np.asarray(image) fps = "FPS: " + str(fps_) cv2.putText(result, text=fps, org=(3, 15), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.50, color=(255, 0, 0), thickness=2) if isOutput: out.write(result[:,:,::-1]) vid.release() out.release() def Mouse_Callback(event, x, y, flags ,params): if event==cv2.EVENT_LBUTTONDBLCLK: global right_clicks right_clicks.append([x,y]) B,G,R=polygon_color_list[poly_index] if poly_index==0: pts=np.array(right_clicks) else: pts = np.array(right_clicks[np.sum(polygon_list[0:poly_index], dtype=int):np.sum(polygon_list[0:poly_index], dtype=int) + len(right_clicks)],np.int32) cv2.polylines(params, [pts], False, (R,G,B), thickness=2) def color_result(value,th_low,th_high): th_mid=(th_low+th_high)/2 delta=(th_high-th_low)/2 B=0 if value<th_mid: G=255 else: temp=round((value-th_mid)*255/delta) G=int(np.max(255-temp,0)) if value>=th_mid: R=255 else: temp = round((value - th_low) * 255 / delta) R=int(np.max(temp,0)) return R, G, B
get_defaults
identifier_name
yolo.py
import os import cv2 import numpy as np import ast from timeit import default_timer as timer from keras import backend as K from keras.layers import Input from PIL import Image, ImageFont, ImageDraw from shapely.geometry import Point from shapely.geometry.polygon import Polygon from model import evaluation, yolo_body right_clicks = list() polygon_list=[] Polygon_object_list=[] polygon_area_list=[] polygon_color_list=[(153,0,76), (0,204,204), (255,153,153), (102,204,0), (102,0,102)] poly_index=0 def letterbox_image(image, size): iw, ih = image.size w, h = size scale = min(w/iw, h/ih) nw = int(iw*scale) nh = int(ih*scale) image = image.resize((nw,nh), Image.BICUBIC) new_image = Image.new('RGB', size, (128,128,128)) new_image.paste(image, ((w-nw)//2, (h-nh)//2)) return new_image class YOLO(object): _defaults = { "model_path": 'model_data/yolo.h5', "anchors_path": 'model_data/yolo_anchors.txt', "classes_path": 'model_data/coco_classes.txt', "score" : 0.3, "iou" : 0.45, "model_image_size" : (416, 416), "gpu_num" : 1, } @classmethod def get_defaults(cls, n): if n in cls._defaults: return cls._defaults[n] else: return "Unrecognized attribute name '" + n + "'" def __init__(self, **kwargs): self.__dict__.update(self._defaults) self.__dict__.update(kwargs) self.class_names = self._get_class() self.anchors = self._get_anchors() self.sess = K.get_session() self.boxes, self.scores, self.classes = self.generate() def _get_class(self): classes_path = os.path.expanduser(self.classes_path) with open(classes_path) as f: class_names = f.readlines() class_names = [c.strip() for c in class_names] return class_names def _get_anchors(self): anchors_path = os.path.expanduser(self.anchors_path) with open(anchors_path) as f: anchors = f.readline() anchors = [float(x) for x in anchors.split(',')] return np.array(anchors).reshape(-1, 2) def generate(self): model_path = os.path.expanduser(self.model_path) assert model_path.endswith('.h5'), 'Keras model or weights must be a .h5 file.' start = timer() num_anchors = len(self.anchors) num_classes = len(self.class_names) self.yolo_model = yolo_body(Input(shape=(None,None,3)), num_anchors//3, num_classes) self.yolo_model.load_weights(self.model_path) end = timer() print('{} model, anchors, and classes loaded in {:.2f}sec.'.format(model_path, end-start)) self.colors = ['GreenYellow'] self.input_image_shape = K.placeholder(shape=(2, )) boxes, scores, classes = evaluation(self.yolo_model.output, self.anchors, len(self.class_names), self.input_image_shape, score_threshold=self.score, iou_threshold=self.iou) return boxes, scores, classes def detect_image(self, image,itr_number,th_mode,car_for_each_polygon_list,polygon_density,pixel_to_dist_ratio,polygon_dist_list_vel_mode,video_fps,velocity_and_view_time,th_low,th_high): start = timer() num_of_frames_for_mean=10 number_of_point_in_polygons=np.zeros((1,len(polygon_list)),dtype=int) vehicles_area_in_polygon = np.zeros((1, len(polygon_list)), dtype=float) number_of_frames=np.zeros((1,len(polygon_list)),dtype=int) if self.model_image_size != (None, None): assert self.model_image_size[0]%32 == 0, 'Multiples of 32 required' assert self.model_image_size[1]%32 == 0, 'Multiples of 32 required' boxed_image = letterbox_image(image, tuple(reversed(self.model_image_size))) image_data = np.array(boxed_image, dtype='float32') image_data /= 255. image_data = np.expand_dims(image_data, 0) out_boxes, out_scores, out_classes = self.sess.run( [self.boxes, self.scores, self.classes], feed_dict={ self.yolo_model.input: image_data, self.input_image_shape: [image.size[1], image.size[0]], K.learning_phase(): 0 }) if(itr_number==1): for i in range(len(polygon_list)): pts = np.array(right_clicks[np.sum(polygon_list[0:i], dtype=int):np.sum(polygon_list[0:i], dtype=int) + polygon_list[i]], np.int32) a1 = np.empty((polygon_list[i],), dtype=object) a1[:] = [tuple(j) for j in pts] polygon = Polygon(a1.tolist()) polygon_area_list.append(polygon.area) Polygon_object_list.append(polygon) for i in range(len(polygon_list)): pts = np.array(right_clicks[np.sum(polygon_list[0:i],dtype=int):np.sum(polygon_list[0:i],dtype=int)+polygon_list[i]], np.int32) image=cv2.polylines(np.array(image), [pts], True, polygon_color_list[i],thickness=2) image = Image.fromarray(image) out_prediction = [] font_path = os.path.join(os.path.dirname(__file__),'font/FiraMono-Medium.otf') font = ImageFont.truetype(font=font_path, size=np.floor(3e-2 * image.size[1] + 0.5).astype('int32')) thickness = (image.size[0] + image.size[1]) // 400 for i, c in reversed(list(enumerate(out_classes))): box = out_boxes[i] curr_box_x_center=(box[3]+box[1])/2 curr_box_y_center=(box[2]+box[0])/2 point=Point(curr_box_x_center,curr_box_y_center) index=np.where([poly.contains(point) for poly in Polygon_object_list])[0] if len(index)==0: continue number_of_point_in_polygons[0,index]+=1 curr_area=(box[3]-box[1])*(box[2]-box[0]) if 2400<curr_area<=4500: curr_area*=0.8 elif (4500<curr_area<=9000): curr_area *= 0.7 elif (9000<curr_area<=13000): curr_area *= 0.6 elif (13000<curr_area<=18000): curr_area *= 0.5 elif (curr_area>18000): curr_area *= 0.4 vehicles_area_in_polygon[0,index]+=curr_area score = out_scores[i] draw = ImageDraw.Draw(image) top, left, bottom, right = box top = max(0, np.floor(top + 0.5).astype('int32')) left = max(0, np.floor(left + 0.5).astype('int32')) bottom = min(image.size[1], np.floor(bottom + 0.5).astype('int32')) right = min(image.size[0], np.floor(right + 0.5).astype('int32')) if top > image.size[1] or right > image.size[0]: continue out_prediction.append([left, top, right, bottom, c, score]) for i in range(thickness): draw.rectangle( [left + i, top + i, right - i, bottom - i], outline=polygon_color_list[index[0]]) del draw car_for_each_polygon_list.insert(0,number_of_point_in_polygons[0]) polygon_density.insert(0,vehicles_area_in_polygon[0] / polygon_area_list) if th_mode=="velocity" and itr_number>1: for p in range(len(polygon_list)): if car_for_each_polygon_list[0][p]==0 and car_for_each_polygon_list[1][p]==1: number_of_frames[0,p]=np.where(np.array(car_for_each_polygon_list)[1:, p] == 0)[0][0] velocity_and_view_time[0,p]=video_fps velocity_and_view_time[1,p] = (np.array(polygon_dist_list_vel_mode)[p] * np.array(pixel_to_dist_ratio)[p] * 3.6) / (number_of_frames[0][p] / video_fps) if(itr_number>=num_of_frames_for_mean): mean_of_points_in_polygon=np.round(np.transpose(car_for_each_polygon_list)[:,0:num_of_frames_for_mean].sum(axis=1)/num_of_frames_for_mean).astype(int) mean_polygon_density=np.sum(polygon_density[0:10],axis=0)/num_of_frames_for_mean else: mean_of_points_in_polygon =number_of_point_in_polygons[0] mean_polygon_density=polygon_density[-1] draw = ImageDraw.Draw(image) font_number_of_vehicles = font font_number_of_vehicles.size = 40 rectangle_width=int(image.size[0] / 7) space_between_rect=0 if len(polygon_list)>1: space_between_rect = int((image.size[0] - len(polygon_list)*rectangle_width-40)/(len(polygon_list)-1)) if th_mode == "counting": mean_polygon=mean_of_points_in_polygon elif th_mode == "density": mean_polygon = mean_polygon_density else: mean_polygon = velocity_and_view_time[1,:] for c in range(len(polygon_list)):
if th_mode == "counting": draw.text([10+c*(rectangle_width+space_between_rect), 65], "vehicles:" + str(mean_polygon[c]), fill=(0, 0, 0),font=font_number_of_vehicles) elif th_mode == "density": draw.text([10 + c * (rectangle_width + space_between_rect), 65],'density:' + str(int(mean_polygon[c] * 100)) + '%', fill=(0, 0, 0), font=font_number_of_vehicles) else: if (mean_polygon[c]!=0) or (velocity_and_view_time[0,c]>0): draw.text([10 + c * (rectangle_width + space_between_rect), 65],'velocity:' + str(mean_polygon[c]) + 'kmh', fill=(0, 0, 0),font=font_number_of_vehicles) velocity_and_view_time[0,c]-=1 if velocity_and_view_time[0,c] ==0: velocity_and_view_time[1,c]=0 else: draw.text([10 + c * (rectangle_width + space_between_rect), 65], 'velocity:', fill=(0, 0, 0),font=font_number_of_vehicles) del draw end = timer() fps=round(1/(end - start)) return fps, out_prediction, image def close_session(self): self.sess.close() def detect_video(yolo, video_path,th_mode,th_low,th_high, define_regions, output_path="",input_path=""): car_for_each_polygon_list=[] polygon_density=[] pixel_to_dist_ratio=[] polygon_dist_list_vel_mode=[] velocity_and_view_time = np.zeros((2, len(polygon_color_list)), dtype=int) vid = cv2.VideoCapture(video_path) file_name = input_path[:input_path.rfind(".")] + ".txt" if not vid.isOpened(): raise IOError("Couldn't open webcam or video") video_FourCC = cv2.VideoWriter_fourcc(*'mp4v') video_fps = vid.get(cv2.CAP_PROP_FPS) video_size = (int(vid.get(cv2.CAP_PROP_FRAME_WIDTH)), int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))) isOutput = True if output_path != "" else False if isOutput: print('Processing {} with frame size {} '.format(os.path.basename(video_path), video_size)) out = cv2.VideoWriter(output_path, video_FourCC, video_fps, video_size) n=0 first_frame=True global poly_index poly_index=0 while vid.isOpened(): n+=1 return_value, frame = vid.read() if first_frame==True: if define_regions == 1: if th_mode=="velocity": cv2.putText(frame, "Please define velocity regions (BR,BL,TL,TR) order", (int(frame.shape[1] / 4), 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2) else: cv2.putText(frame,"Please define regions",(int(frame.shape[1]/3),40),cv2.FONT_HERSHEY_SIMPLEX,1,(255,0,0),2) cv2.namedWindow('first_frame', cv2.WINDOW_NORMAL) cv2.setMouseCallback('first_frame', Mouse_Callback,param=frame) while (1): cv2.imshow('first_frame', frame/255) k=cv2.waitKey(10) if k==97: polygon_list.append(int(len(right_clicks)-np.sum(polygon_list))) pts = np.array(right_clicks[ np.sum(polygon_list[0:poly_index], dtype=int):np.sum(polygon_list[0:poly_index],dtype=int) + polygon_list[poly_index]], np.int32) B, G, R = polygon_color_list[poly_index] cv2.polylines(frame, [pts], True, (R,G,B), thickness=2) if th_mode == "velocity": polygon_dist_list_vel_mode.append(np.linalg.norm( ((pts[0,:]+pts[1,:])/2) - ((pts[2,:]+pts[3,:])/2))) cv2.imshow('first_frame', frame / 255) k = cv2.waitKey(10) pixel_to_dist_ratio.append(float(input('Please insert pixel to real distance ratio:'))) poly_index+=1 if k == 27: polygon_list.append(int(len(right_clicks) - np.sum(polygon_list))) pts = np.array(right_clicks[np.sum(polygon_list[0:poly_index], dtype=int):np.sum(polygon_list[0:poly_index],dtype=int) + polygon_list[poly_index]], np.int32) B, G, R = polygon_color_list[poly_index] cv2.polylines(frame, [pts], True, (R, G, B), thickness=2) cv2.imshow('first_frame', frame / 255) cv2.waitKey(10) if th_mode == "velocity": polygon_dist_list_vel_mode.append(np.linalg.norm(((pts[0, :] + pts[1, :]) / 2) - ((pts[2, :] + pts[3, :]) / 2))) pixel_to_dist_ratio.append(float(input('Please insert pixel to real distance ratio:'))) cv2.destroyAllWindows() break with open(file_name, "w") as txt_file: txt_file.write(str(right_clicks)+'\n') txt_file.write(str(polygon_list)) if th_mode=="velocity": txt_file.write('\n'+str(pixel_to_dist_ratio)+'\n') txt_file.write(str(polygon_dist_list_vel_mode)) txt_file.close() else: with open(file_name, "r") as txt_file: right_clicks.extend(ast.literal_eval(txt_file.readline())) polygon_list.extend(ast.literal_eval(txt_file.readline())) if th_mode=="velocity": pixel_to_dist_ratio.extend(ast.literal_eval(txt_file.readline())) polygon_dist_list_vel_mode.extend(ast.literal_eval(txt_file.readline())) first_frame = False if not return_value: break frame = frame[:,:,::-1] image = Image.fromarray(frame) fps_, out_pred, image = yolo.detect_image(image,n,th_mode,car_for_each_polygon_list,polygon_density,pixel_to_dist_ratio,polygon_dist_list_vel_mode,video_fps,velocity_and_view_time,th_low,th_high) result = np.asarray(image) fps = "FPS: " + str(fps_) cv2.putText(result, text=fps, org=(3, 15), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.50, color=(255, 0, 0), thickness=2) if isOutput: out.write(result[:,:,::-1]) vid.release() out.release() def Mouse_Callback(event, x, y, flags ,params): if event==cv2.EVENT_LBUTTONDBLCLK: global right_clicks right_clicks.append([x,y]) B,G,R=polygon_color_list[poly_index] if poly_index==0: pts=np.array(right_clicks) else: pts = np.array(right_clicks[np.sum(polygon_list[0:poly_index], dtype=int):np.sum(polygon_list[0:poly_index], dtype=int) + len(right_clicks)],np.int32) cv2.polylines(params, [pts], False, (R,G,B), thickness=2) def color_result(value,th_low,th_high): th_mid=(th_low+th_high)/2 delta=(th_high-th_low)/2 B=0 if value<th_mid: G=255 else: temp=round((value-th_mid)*255/delta) G=int(np.max(255-temp,0)) if value>=th_mid: R=255 else: temp = round((value - th_low) * 255 / delta) R=int(np.max(temp,0)) return R, G, B
R,G,B=color_result(mean_polygon[c],th_low,th_high) draw.rectangle([tuple([10+c*(rectangle_width+space_between_rect), 60]), tuple([10 + c*(rectangle_width+space_between_rect)+rectangle_width, 60 + 40])], fill=(R, G, B)) draw.rectangle([tuple([10 + c * (rectangle_width + space_between_rect)+rectangle_width, 60]),tuple([10 + c * (rectangle_width + space_between_rect)+rectangle_width+20, 60 + 40])],fill=polygon_color_list[c])
random_line_split
yolo.py
import os import cv2 import numpy as np import ast from timeit import default_timer as timer from keras import backend as K from keras.layers import Input from PIL import Image, ImageFont, ImageDraw from shapely.geometry import Point from shapely.geometry.polygon import Polygon from model import evaluation, yolo_body right_clicks = list() polygon_list=[] Polygon_object_list=[] polygon_area_list=[] polygon_color_list=[(153,0,76), (0,204,204), (255,153,153), (102,204,0), (102,0,102)] poly_index=0 def letterbox_image(image, size): iw, ih = image.size w, h = size scale = min(w/iw, h/ih) nw = int(iw*scale) nh = int(ih*scale) image = image.resize((nw,nh), Image.BICUBIC) new_image = Image.new('RGB', size, (128,128,128)) new_image.paste(image, ((w-nw)//2, (h-nh)//2)) return new_image class YOLO(object): _defaults = { "model_path": 'model_data/yolo.h5', "anchors_path": 'model_data/yolo_anchors.txt', "classes_path": 'model_data/coco_classes.txt', "score" : 0.3, "iou" : 0.45, "model_image_size" : (416, 416), "gpu_num" : 1, } @classmethod def get_defaults(cls, n): if n in cls._defaults: return cls._defaults[n] else: return "Unrecognized attribute name '" + n + "'" def __init__(self, **kwargs): self.__dict__.update(self._defaults) self.__dict__.update(kwargs) self.class_names = self._get_class() self.anchors = self._get_anchors() self.sess = K.get_session() self.boxes, self.scores, self.classes = self.generate() def _get_class(self): classes_path = os.path.expanduser(self.classes_path) with open(classes_path) as f: class_names = f.readlines() class_names = [c.strip() for c in class_names] return class_names def _get_anchors(self): anchors_path = os.path.expanduser(self.anchors_path) with open(anchors_path) as f: anchors = f.readline() anchors = [float(x) for x in anchors.split(',')] return np.array(anchors).reshape(-1, 2) def generate(self):
def detect_image(self, image,itr_number,th_mode,car_for_each_polygon_list,polygon_density,pixel_to_dist_ratio,polygon_dist_list_vel_mode,video_fps,velocity_and_view_time,th_low,th_high): start = timer() num_of_frames_for_mean=10 number_of_point_in_polygons=np.zeros((1,len(polygon_list)),dtype=int) vehicles_area_in_polygon = np.zeros((1, len(polygon_list)), dtype=float) number_of_frames=np.zeros((1,len(polygon_list)),dtype=int) if self.model_image_size != (None, None): assert self.model_image_size[0]%32 == 0, 'Multiples of 32 required' assert self.model_image_size[1]%32 == 0, 'Multiples of 32 required' boxed_image = letterbox_image(image, tuple(reversed(self.model_image_size))) image_data = np.array(boxed_image, dtype='float32') image_data /= 255. image_data = np.expand_dims(image_data, 0) out_boxes, out_scores, out_classes = self.sess.run( [self.boxes, self.scores, self.classes], feed_dict={ self.yolo_model.input: image_data, self.input_image_shape: [image.size[1], image.size[0]], K.learning_phase(): 0 }) if(itr_number==1): for i in range(len(polygon_list)): pts = np.array(right_clicks[np.sum(polygon_list[0:i], dtype=int):np.sum(polygon_list[0:i], dtype=int) + polygon_list[i]], np.int32) a1 = np.empty((polygon_list[i],), dtype=object) a1[:] = [tuple(j) for j in pts] polygon = Polygon(a1.tolist()) polygon_area_list.append(polygon.area) Polygon_object_list.append(polygon) for i in range(len(polygon_list)): pts = np.array(right_clicks[np.sum(polygon_list[0:i],dtype=int):np.sum(polygon_list[0:i],dtype=int)+polygon_list[i]], np.int32) image=cv2.polylines(np.array(image), [pts], True, polygon_color_list[i],thickness=2) image = Image.fromarray(image) out_prediction = [] font_path = os.path.join(os.path.dirname(__file__),'font/FiraMono-Medium.otf') font = ImageFont.truetype(font=font_path, size=np.floor(3e-2 * image.size[1] + 0.5).astype('int32')) thickness = (image.size[0] + image.size[1]) // 400 for i, c in reversed(list(enumerate(out_classes))): box = out_boxes[i] curr_box_x_center=(box[3]+box[1])/2 curr_box_y_center=(box[2]+box[0])/2 point=Point(curr_box_x_center,curr_box_y_center) index=np.where([poly.contains(point) for poly in Polygon_object_list])[0] if len(index)==0: continue number_of_point_in_polygons[0,index]+=1 curr_area=(box[3]-box[1])*(box[2]-box[0]) if 2400<curr_area<=4500: curr_area*=0.8 elif (4500<curr_area<=9000): curr_area *= 0.7 elif (9000<curr_area<=13000): curr_area *= 0.6 elif (13000<curr_area<=18000): curr_area *= 0.5 elif (curr_area>18000): curr_area *= 0.4 vehicles_area_in_polygon[0,index]+=curr_area score = out_scores[i] draw = ImageDraw.Draw(image) top, left, bottom, right = box top = max(0, np.floor(top + 0.5).astype('int32')) left = max(0, np.floor(left + 0.5).astype('int32')) bottom = min(image.size[1], np.floor(bottom + 0.5).astype('int32')) right = min(image.size[0], np.floor(right + 0.5).astype('int32')) if top > image.size[1] or right > image.size[0]: continue out_prediction.append([left, top, right, bottom, c, score]) for i in range(thickness): draw.rectangle( [left + i, top + i, right - i, bottom - i], outline=polygon_color_list[index[0]]) del draw car_for_each_polygon_list.insert(0,number_of_point_in_polygons[0]) polygon_density.insert(0,vehicles_area_in_polygon[0] / polygon_area_list) if th_mode=="velocity" and itr_number>1: for p in range(len(polygon_list)): if car_for_each_polygon_list[0][p]==0 and car_for_each_polygon_list[1][p]==1: number_of_frames[0,p]=np.where(np.array(car_for_each_polygon_list)[1:, p] == 0)[0][0] velocity_and_view_time[0,p]=video_fps velocity_and_view_time[1,p] = (np.array(polygon_dist_list_vel_mode)[p] * np.array(pixel_to_dist_ratio)[p] * 3.6) / (number_of_frames[0][p] / video_fps) if(itr_number>=num_of_frames_for_mean): mean_of_points_in_polygon=np.round(np.transpose(car_for_each_polygon_list)[:,0:num_of_frames_for_mean].sum(axis=1)/num_of_frames_for_mean).astype(int) mean_polygon_density=np.sum(polygon_density[0:10],axis=0)/num_of_frames_for_mean else: mean_of_points_in_polygon =number_of_point_in_polygons[0] mean_polygon_density=polygon_density[-1] draw = ImageDraw.Draw(image) font_number_of_vehicles = font font_number_of_vehicles.size = 40 rectangle_width=int(image.size[0] / 7) space_between_rect=0 if len(polygon_list)>1: space_between_rect = int((image.size[0] - len(polygon_list)*rectangle_width-40)/(len(polygon_list)-1)) if th_mode == "counting": mean_polygon=mean_of_points_in_polygon elif th_mode == "density": mean_polygon = mean_polygon_density else: mean_polygon = velocity_and_view_time[1,:] for c in range(len(polygon_list)): R,G,B=color_result(mean_polygon[c],th_low,th_high) draw.rectangle([tuple([10+c*(rectangle_width+space_between_rect), 60]), tuple([10 + c*(rectangle_width+space_between_rect)+rectangle_width, 60 + 40])], fill=(R, G, B)) draw.rectangle([tuple([10 + c * (rectangle_width + space_between_rect)+rectangle_width, 60]),tuple([10 + c * (rectangle_width + space_between_rect)+rectangle_width+20, 60 + 40])],fill=polygon_color_list[c]) if th_mode == "counting": draw.text([10+c*(rectangle_width+space_between_rect), 65], "vehicles:" + str(mean_polygon[c]), fill=(0, 0, 0),font=font_number_of_vehicles) elif th_mode == "density": draw.text([10 + c * (rectangle_width + space_between_rect), 65],'density:' + str(int(mean_polygon[c] * 100)) + '%', fill=(0, 0, 0), font=font_number_of_vehicles) else: if (mean_polygon[c]!=0) or (velocity_and_view_time[0,c]>0): draw.text([10 + c * (rectangle_width + space_between_rect), 65],'velocity:' + str(mean_polygon[c]) + 'kmh', fill=(0, 0, 0),font=font_number_of_vehicles) velocity_and_view_time[0,c]-=1 if velocity_and_view_time[0,c] ==0: velocity_and_view_time[1,c]=0 else: draw.text([10 + c * (rectangle_width + space_between_rect), 65], 'velocity:', fill=(0, 0, 0),font=font_number_of_vehicles) del draw end = timer() fps=round(1/(end - start)) return fps, out_prediction, image def close_session(self): self.sess.close() def detect_video(yolo, video_path,th_mode,th_low,th_high, define_regions, output_path="",input_path=""): car_for_each_polygon_list=[] polygon_density=[] pixel_to_dist_ratio=[] polygon_dist_list_vel_mode=[] velocity_and_view_time = np.zeros((2, len(polygon_color_list)), dtype=int) vid = cv2.VideoCapture(video_path) file_name = input_path[:input_path.rfind(".")] + ".txt" if not vid.isOpened(): raise IOError("Couldn't open webcam or video") video_FourCC = cv2.VideoWriter_fourcc(*'mp4v') video_fps = vid.get(cv2.CAP_PROP_FPS) video_size = (int(vid.get(cv2.CAP_PROP_FRAME_WIDTH)), int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))) isOutput = True if output_path != "" else False if isOutput: print('Processing {} with frame size {} '.format(os.path.basename(video_path), video_size)) out = cv2.VideoWriter(output_path, video_FourCC, video_fps, video_size) n=0 first_frame=True global poly_index poly_index=0 while vid.isOpened(): n+=1 return_value, frame = vid.read() if first_frame==True: if define_regions == 1: if th_mode=="velocity": cv2.putText(frame, "Please define velocity regions (BR,BL,TL,TR) order", (int(frame.shape[1] / 4), 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2) else: cv2.putText(frame,"Please define regions",(int(frame.shape[1]/3),40),cv2.FONT_HERSHEY_SIMPLEX,1,(255,0,0),2) cv2.namedWindow('first_frame', cv2.WINDOW_NORMAL) cv2.setMouseCallback('first_frame', Mouse_Callback,param=frame) while (1): cv2.imshow('first_frame', frame/255) k=cv2.waitKey(10) if k==97: polygon_list.append(int(len(right_clicks)-np.sum(polygon_list))) pts = np.array(right_clicks[ np.sum(polygon_list[0:poly_index], dtype=int):np.sum(polygon_list[0:poly_index],dtype=int) + polygon_list[poly_index]], np.int32) B, G, R = polygon_color_list[poly_index] cv2.polylines(frame, [pts], True, (R,G,B), thickness=2) if th_mode == "velocity": polygon_dist_list_vel_mode.append(np.linalg.norm( ((pts[0,:]+pts[1,:])/2) - ((pts[2,:]+pts[3,:])/2))) cv2.imshow('first_frame', frame / 255) k = cv2.waitKey(10) pixel_to_dist_ratio.append(float(input('Please insert pixel to real distance ratio:'))) poly_index+=1 if k == 27: polygon_list.append(int(len(right_clicks) - np.sum(polygon_list))) pts = np.array(right_clicks[np.sum(polygon_list[0:poly_index], dtype=int):np.sum(polygon_list[0:poly_index],dtype=int) + polygon_list[poly_index]], np.int32) B, G, R = polygon_color_list[poly_index] cv2.polylines(frame, [pts], True, (R, G, B), thickness=2) cv2.imshow('first_frame', frame / 255) cv2.waitKey(10) if th_mode == "velocity": polygon_dist_list_vel_mode.append(np.linalg.norm(((pts[0, :] + pts[1, :]) / 2) - ((pts[2, :] + pts[3, :]) / 2))) pixel_to_dist_ratio.append(float(input('Please insert pixel to real distance ratio:'))) cv2.destroyAllWindows() break with open(file_name, "w") as txt_file: txt_file.write(str(right_clicks)+'\n') txt_file.write(str(polygon_list)) if th_mode=="velocity": txt_file.write('\n'+str(pixel_to_dist_ratio)+'\n') txt_file.write(str(polygon_dist_list_vel_mode)) txt_file.close() else: with open(file_name, "r") as txt_file: right_clicks.extend(ast.literal_eval(txt_file.readline())) polygon_list.extend(ast.literal_eval(txt_file.readline())) if th_mode=="velocity": pixel_to_dist_ratio.extend(ast.literal_eval(txt_file.readline())) polygon_dist_list_vel_mode.extend(ast.literal_eval(txt_file.readline())) first_frame = False if not return_value: break frame = frame[:,:,::-1] image = Image.fromarray(frame) fps_, out_pred, image = yolo.detect_image(image,n,th_mode,car_for_each_polygon_list,polygon_density,pixel_to_dist_ratio,polygon_dist_list_vel_mode,video_fps,velocity_and_view_time,th_low,th_high) result = np.asarray(image) fps = "FPS: " + str(fps_) cv2.putText(result, text=fps, org=(3, 15), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=0.50, color=(255, 0, 0), thickness=2) if isOutput: out.write(result[:,:,::-1]) vid.release() out.release() def Mouse_Callback(event, x, y, flags ,params): if event==cv2.EVENT_LBUTTONDBLCLK: global right_clicks right_clicks.append([x,y]) B,G,R=polygon_color_list[poly_index] if poly_index==0: pts=np.array(right_clicks) else: pts = np.array(right_clicks[np.sum(polygon_list[0:poly_index], dtype=int):np.sum(polygon_list[0:poly_index], dtype=int) + len(right_clicks)],np.int32) cv2.polylines(params, [pts], False, (R,G,B), thickness=2) def color_result(value,th_low,th_high): th_mid=(th_low+th_high)/2 delta=(th_high-th_low)/2 B=0 if value<th_mid: G=255 else: temp=round((value-th_mid)*255/delta) G=int(np.max(255-temp,0)) if value>=th_mid: R=255 else: temp = round((value - th_low) * 255 / delta) R=int(np.max(temp,0)) return R, G, B
model_path = os.path.expanduser(self.model_path) assert model_path.endswith('.h5'), 'Keras model or weights must be a .h5 file.' start = timer() num_anchors = len(self.anchors) num_classes = len(self.class_names) self.yolo_model = yolo_body(Input(shape=(None,None,3)), num_anchors//3, num_classes) self.yolo_model.load_weights(self.model_path) end = timer() print('{} model, anchors, and classes loaded in {:.2f}sec.'.format(model_path, end-start)) self.colors = ['GreenYellow'] self.input_image_shape = K.placeholder(shape=(2, )) boxes, scores, classes = evaluation(self.yolo_model.output, self.anchors, len(self.class_names), self.input_image_shape, score_threshold=self.score, iou_threshold=self.iou) return boxes, scores, classes
identifier_body
install.rs
use crate::alias::create_alias; use crate::archive::{self, extract::Error as ExtractError, extract::Extract}; use crate::config::FrumConfig; use crate::input_version::InputVersion; use crate::outln; use crate::version::Version; use crate::version_file::get_user_version_for_directory; use anyhow::Result; use colored::Colorize; use log::debug; use reqwest::Url; use std::io::prelude::*; use std::path::Path; use std::path::PathBuf; use std::process::Command; use thiserror::Error; #[derive(Error, Debug)] pub enum FrumError { #[error(transparent)] HttpError(#[from] reqwest::Error), #[error(transparent)] IoError(#[from] std::io::Error), #[error("Can't find the number of cores")] FromUtf8Error(#[from] std::string::FromUtf8Error), #[error("Can't extract the file: {source:?}")] ExtractError { source: ExtractError }, #[error("The downloaded archive is empty")] TarIsEmpty, #[error("Can't find version: {version}")] VersionNotFound { version: InputVersion }, #[error("Can't list the remote versions: {source:?}")] CantListRemoteVersions { source: reqwest::Error }, #[error("Version already installed at {path:?}")] VersionAlreadyInstalled { path: PathBuf }, #[error("Can't find version in dotfiles. Please provide a version manually to the command.")] CantInferVersion, #[error("The requested version is not installable: {version}")] NotInstallableVersion { version: Version }, #[error("Can't build Ruby: {stderr}")] CantBuildRuby { stderr: String }, } pub struct Install { pub version: Option<InputVersion>, pub configure_opts: Vec<String>, } impl crate::command::Command for Install { type Error = FrumError; fn apply(&self, config: &FrumConfig) -> Result<(), Self::Error> { let current_version = self .version .clone() .or_else(|| get_user_version_for_directory(std::env::current_dir().unwrap())) .ok_or(FrumError::CantInferVersion)?; let version = match current_version.clone() { InputVersion::Full(Version::Semver(v)) => Version::Semver(v), InputVersion::Full(Version::System) => { return Err(FrumError::NotInstallableVersion { version: Version::System, }) } current_version => { let available_versions = crate::remote_ruby_index::list(&config.ruby_build_mirror) .map_err(|source| FrumError::CantListRemoteVersions { source })? .drain(..) .map(|x| x.version) .collect::<Vec<_>>(); current_version .to_version(&available_versions) .ok_or(FrumError::VersionNotFound { version: current_version, })? .clone() } }; let installations_dir = config.versions_dir(); let installation_dir = PathBuf::from(&installations_dir).join(version.to_string()); if installation_dir.exists() { return Err(FrumError::VersionAlreadyInstalled { path: installation_dir, }); } let url = package_url(config.ruby_build_mirror.clone(), &version); outln!(config#Info, "{} Downloading {}", "==>".green(), format!("{}", url).green()); let response = reqwest::blocking::get(url)?; if response.status() == 404 { return Err(FrumError::VersionNotFound { version: current_version, }); } outln!(config#Info, "{} Extracting {}", "==>".green(), archive(&version).green()); let temp_installations_dir = installations_dir.join(".downloads"); std::fs::create_dir_all(&temp_installations_dir).map_err(FrumError::IoError)?; let temp_dir = tempfile::TempDir::new_in(&temp_installations_dir) .expect("Can't generate a temp directory"); extract_archive_into(&temp_dir, response)?; outln!(config#Info, "{} Building {}", "==>".green(), format!("Ruby {}", current_version).green()); let installed_directory = std::fs::read_dir(&temp_dir) .map_err(FrumError::IoError)? .next() .ok_or(FrumError::TarIsEmpty)? .map_err(FrumError::IoError)?; let installed_directory = installed_directory.path(); build_package( &installed_directory, &installation_dir, &self.configure_opts, )?; if !config.default_version_dir().exists() { debug!("Use {} as the default version", current_version); create_alias(&config, "default", &version).map_err(FrumError::IoError)?; } Ok(()) } } fn extract_archive_into<P: AsRef<Path>>( path: P, response: reqwest::blocking::Response, ) -> Result<(), FrumError> { #[cfg(unix)] let extractor = archive::tar_xz::TarXz::new(response); #[cfg(windows)] let extractor = archive::zip::Zip::new(response); extractor .extract_into(path) .map_err(|source| FrumError::ExtractError { source })?; Ok(()) } fn package_url(mirror_url: Url, version: &Version) -> Url { debug!("pakage url"); Url::parse(&format!( "{}/{}/{}", mirror_url.as_str().trim_end_matches('/'), match version { Version::Semver(version) => format!("{}.{}", version.major, version.minor), _ => unreachable!(), }, archive(version), )) .unwrap() } #[cfg(unix)] fn archive(version: &Version) -> String { format!("ruby-{}.tar.xz", version) } #[cfg(windows)] fn archive(version: &Version) -> String { format!("ruby-{}.zip", version) } #[allow(clippy::unnecessary_wraps)] fn openssl_dir() -> Result<String, FrumError> { #[cfg(target_os = "macos")] return Ok(String::from_utf8_lossy( &Command::new("brew") .arg("--prefix") .arg("openssl") .output() .map_err(FrumError::IoError)? .stdout, ) .trim() .to_string()); #[cfg(not(target_os = "macos"))] return Ok("/usr/local".to_string()); } fn build_package( current_dir: &Path, installed_dir: &Path, configure_opts: &[String], ) -> Result<(), FrumError> { debug!("./configure {}", configure_opts.join(" ")); let mut command = Command::new("sh"); command .arg("configure") .arg(format!("--prefix={}", installed_dir.to_str().unwrap())) .args(configure_opts); // Provide a default value for --with-openssl-dir if !configure_opts .iter() .any(|opt| opt.starts_with("--with-openssl-dir")) { command.arg(format!("--with-openssl-dir={}", openssl_dir()?)); } let configure = command .current_dir(&current_dir) .output() .map_err(FrumError::IoError)?; if !configure.status.success() { return Err(FrumError::CantBuildRuby { stderr: format!( "configure failed: {}", String::from_utf8_lossy(&configure.stderr).to_string() ), }); }; debug!("make -j {}", num_cpus::get().to_string()); let make = Command::new("make") .arg("-j") .arg(num_cpus::get().to_string()) .current_dir(&current_dir) .output() .map_err(FrumError::IoError)?; if !make.status.success() { return Err(FrumError::CantBuildRuby { stderr: format!( "make failed: {}", String::from_utf8_lossy(&make.stderr).to_string() ), }); }; debug!("make install"); let make_install = Command::new("make") .arg("install") .current_dir(&current_dir) .output() .map_err(FrumError::IoError)?; if !make_install.status.success()
; Ok(()) } #[cfg(test)] mod tests { use super::*; use crate::command::Command; use crate::config::FrumConfig; use crate::version::Version; use tempfile::tempdir; #[test] fn test_install_second_version() { let config = FrumConfig { base_dir: Some(tempdir().unwrap().path().to_path_buf()), ..Default::default() }; Install { version: Some(InputVersion::Full(Version::Semver( semver::Version::parse("2.7.0").unwrap(), ))), configure_opts: vec![], } .apply(&config) .expect("Can't install 2.7.0"); Install { version: Some(InputVersion::Full(Version::Semver( semver::Version::parse("2.6.4").unwrap(), ))), configure_opts: vec![], } .apply(&config) .expect("Can't install 2.6.4"); assert_eq!( std::fs::read_link(&config.default_version_dir()) .unwrap() .components() .last(), Some(std::path::Component::Normal(std::ffi::OsStr::new("2.7.0"))) ); } #[test] fn test_install_default_version() { let config = FrumConfig { base_dir: Some(tempdir().unwrap().path().to_path_buf()), ..Default::default() }; Install { version: Some(InputVersion::Full(Version::Semver( semver::Version::parse("2.6.4").unwrap(), ))), configure_opts: vec![], } .apply(&config) .expect("Can't install"); assert!(config.versions_dir().join("2.6.4").exists()); assert!(config .versions_dir() .join("2.6.4") .join("bin") .join("ruby") .exists()); assert!(config.default_version_dir().exists()); } }
{ return Err(FrumError::CantBuildRuby { stderr: format!( "make install: {}", String::from_utf8_lossy(&make_install.stderr).to_string() ), }); }
conditional_block
install.rs
use crate::alias::create_alias; use crate::archive::{self, extract::Error as ExtractError, extract::Extract}; use crate::config::FrumConfig; use crate::input_version::InputVersion; use crate::outln; use crate::version::Version; use crate::version_file::get_user_version_for_directory; use anyhow::Result; use colored::Colorize; use log::debug; use reqwest::Url; use std::io::prelude::*; use std::path::Path; use std::path::PathBuf; use std::process::Command; use thiserror::Error; #[derive(Error, Debug)] pub enum FrumError { #[error(transparent)] HttpError(#[from] reqwest::Error), #[error(transparent)] IoError(#[from] std::io::Error), #[error("Can't find the number of cores")] FromUtf8Error(#[from] std::string::FromUtf8Error), #[error("Can't extract the file: {source:?}")] ExtractError { source: ExtractError }, #[error("The downloaded archive is empty")] TarIsEmpty, #[error("Can't find version: {version}")] VersionNotFound { version: InputVersion }, #[error("Can't list the remote versions: {source:?}")] CantListRemoteVersions { source: reqwest::Error }, #[error("Version already installed at {path:?}")] VersionAlreadyInstalled { path: PathBuf }, #[error("Can't find version in dotfiles. Please provide a version manually to the command.")] CantInferVersion, #[error("The requested version is not installable: {version}")] NotInstallableVersion { version: Version }, #[error("Can't build Ruby: {stderr}")] CantBuildRuby { stderr: String }, } pub struct Install { pub version: Option<InputVersion>, pub configure_opts: Vec<String>, } impl crate::command::Command for Install { type Error = FrumError; fn apply(&self, config: &FrumConfig) -> Result<(), Self::Error> { let current_version = self .version .clone() .or_else(|| get_user_version_for_directory(std::env::current_dir().unwrap())) .ok_or(FrumError::CantInferVersion)?; let version = match current_version.clone() { InputVersion::Full(Version::Semver(v)) => Version::Semver(v), InputVersion::Full(Version::System) => { return Err(FrumError::NotInstallableVersion { version: Version::System, }) } current_version => { let available_versions = crate::remote_ruby_index::list(&config.ruby_build_mirror) .map_err(|source| FrumError::CantListRemoteVersions { source })? .drain(..) .map(|x| x.version) .collect::<Vec<_>>(); current_version .to_version(&available_versions) .ok_or(FrumError::VersionNotFound { version: current_version, })? .clone() } }; let installations_dir = config.versions_dir(); let installation_dir = PathBuf::from(&installations_dir).join(version.to_string()); if installation_dir.exists() { return Err(FrumError::VersionAlreadyInstalled { path: installation_dir, }); } let url = package_url(config.ruby_build_mirror.clone(), &version); outln!(config#Info, "{} Downloading {}", "==>".green(), format!("{}", url).green()); let response = reqwest::blocking::get(url)?; if response.status() == 404 { return Err(FrumError::VersionNotFound { version: current_version, }); } outln!(config#Info, "{} Extracting {}", "==>".green(), archive(&version).green()); let temp_installations_dir = installations_dir.join(".downloads"); std::fs::create_dir_all(&temp_installations_dir).map_err(FrumError::IoError)?; let temp_dir = tempfile::TempDir::new_in(&temp_installations_dir) .expect("Can't generate a temp directory"); extract_archive_into(&temp_dir, response)?; outln!(config#Info, "{} Building {}", "==>".green(), format!("Ruby {}", current_version).green()); let installed_directory = std::fs::read_dir(&temp_dir) .map_err(FrumError::IoError)? .next() .ok_or(FrumError::TarIsEmpty)? .map_err(FrumError::IoError)?; let installed_directory = installed_directory.path(); build_package( &installed_directory, &installation_dir, &self.configure_opts, )?; if !config.default_version_dir().exists() { debug!("Use {} as the default version", current_version); create_alias(&config, "default", &version).map_err(FrumError::IoError)?; } Ok(()) } } fn extract_archive_into<P: AsRef<Path>>( path: P, response: reqwest::blocking::Response, ) -> Result<(), FrumError> { #[cfg(unix)] let extractor = archive::tar_xz::TarXz::new(response); #[cfg(windows)] let extractor = archive::zip::Zip::new(response); extractor .extract_into(path) .map_err(|source| FrumError::ExtractError { source })?; Ok(()) } fn package_url(mirror_url: Url, version: &Version) -> Url { debug!("pakage url"); Url::parse(&format!( "{}/{}/{}", mirror_url.as_str().trim_end_matches('/'), match version { Version::Semver(version) => format!("{}.{}", version.major, version.minor), _ => unreachable!(), }, archive(version), )) .unwrap() } #[cfg(unix)] fn archive(version: &Version) -> String { format!("ruby-{}.tar.xz", version) } #[cfg(windows)] fn archive(version: &Version) -> String { format!("ruby-{}.zip", version) } #[allow(clippy::unnecessary_wraps)] fn openssl_dir() -> Result<String, FrumError> { #[cfg(target_os = "macos")] return Ok(String::from_utf8_lossy( &Command::new("brew") .arg("--prefix") .arg("openssl") .output() .map_err(FrumError::IoError)? .stdout, ) .trim() .to_string()); #[cfg(not(target_os = "macos"))] return Ok("/usr/local".to_string()); } fn build_package( current_dir: &Path, installed_dir: &Path, configure_opts: &[String], ) -> Result<(), FrumError> { debug!("./configure {}", configure_opts.join(" ")); let mut command = Command::new("sh"); command .arg("configure") .arg(format!("--prefix={}", installed_dir.to_str().unwrap())) .args(configure_opts); // Provide a default value for --with-openssl-dir if !configure_opts .iter() .any(|opt| opt.starts_with("--with-openssl-dir")) { command.arg(format!("--with-openssl-dir={}", openssl_dir()?)); } let configure = command .current_dir(&current_dir) .output() .map_err(FrumError::IoError)?; if !configure.status.success() { return Err(FrumError::CantBuildRuby { stderr: format!( "configure failed: {}", String::from_utf8_lossy(&configure.stderr).to_string() ), }); }; debug!("make -j {}", num_cpus::get().to_string()); let make = Command::new("make") .arg("-j") .arg(num_cpus::get().to_string()) .current_dir(&current_dir) .output() .map_err(FrumError::IoError)?; if !make.status.success() { return Err(FrumError::CantBuildRuby { stderr: format!( "make failed: {}", String::from_utf8_lossy(&make.stderr).to_string() ), }); }; debug!("make install"); let make_install = Command::new("make") .arg("install") .current_dir(&current_dir) .output() .map_err(FrumError::IoError)?; if !make_install.status.success() { return Err(FrumError::CantBuildRuby { stderr: format!( "make install: {}", String::from_utf8_lossy(&make_install.stderr).to_string() ), }); }; Ok(()) } #[cfg(test)] mod tests { use super::*; use crate::command::Command; use crate::config::FrumConfig; use crate::version::Version; use tempfile::tempdir; #[test] fn test_install_second_version() { let config = FrumConfig { base_dir: Some(tempdir().unwrap().path().to_path_buf()), ..Default::default() }; Install { version: Some(InputVersion::Full(Version::Semver( semver::Version::parse("2.7.0").unwrap(), ))), configure_opts: vec![], } .apply(&config) .expect("Can't install 2.7.0"); Install { version: Some(InputVersion::Full(Version::Semver( semver::Version::parse("2.6.4").unwrap(), ))), configure_opts: vec![], } .apply(&config) .expect("Can't install 2.6.4"); assert_eq!( std::fs::read_link(&config.default_version_dir()) .unwrap() .components() .last(), Some(std::path::Component::Normal(std::ffi::OsStr::new("2.7.0"))) ); } #[test] fn
() { let config = FrumConfig { base_dir: Some(tempdir().unwrap().path().to_path_buf()), ..Default::default() }; Install { version: Some(InputVersion::Full(Version::Semver( semver::Version::parse("2.6.4").unwrap(), ))), configure_opts: vec![], } .apply(&config) .expect("Can't install"); assert!(config.versions_dir().join("2.6.4").exists()); assert!(config .versions_dir() .join("2.6.4") .join("bin") .join("ruby") .exists()); assert!(config.default_version_dir().exists()); } }
test_install_default_version
identifier_name
install.rs
use crate::alias::create_alias; use crate::archive::{self, extract::Error as ExtractError, extract::Extract}; use crate::config::FrumConfig; use crate::input_version::InputVersion; use crate::outln; use crate::version::Version; use crate::version_file::get_user_version_for_directory; use anyhow::Result;
use log::debug; use reqwest::Url; use std::io::prelude::*; use std::path::Path; use std::path::PathBuf; use std::process::Command; use thiserror::Error; #[derive(Error, Debug)] pub enum FrumError { #[error(transparent)] HttpError(#[from] reqwest::Error), #[error(transparent)] IoError(#[from] std::io::Error), #[error("Can't find the number of cores")] FromUtf8Error(#[from] std::string::FromUtf8Error), #[error("Can't extract the file: {source:?}")] ExtractError { source: ExtractError }, #[error("The downloaded archive is empty")] TarIsEmpty, #[error("Can't find version: {version}")] VersionNotFound { version: InputVersion }, #[error("Can't list the remote versions: {source:?}")] CantListRemoteVersions { source: reqwest::Error }, #[error("Version already installed at {path:?}")] VersionAlreadyInstalled { path: PathBuf }, #[error("Can't find version in dotfiles. Please provide a version manually to the command.")] CantInferVersion, #[error("The requested version is not installable: {version}")] NotInstallableVersion { version: Version }, #[error("Can't build Ruby: {stderr}")] CantBuildRuby { stderr: String }, } pub struct Install { pub version: Option<InputVersion>, pub configure_opts: Vec<String>, } impl crate::command::Command for Install { type Error = FrumError; fn apply(&self, config: &FrumConfig) -> Result<(), Self::Error> { let current_version = self .version .clone() .or_else(|| get_user_version_for_directory(std::env::current_dir().unwrap())) .ok_or(FrumError::CantInferVersion)?; let version = match current_version.clone() { InputVersion::Full(Version::Semver(v)) => Version::Semver(v), InputVersion::Full(Version::System) => { return Err(FrumError::NotInstallableVersion { version: Version::System, }) } current_version => { let available_versions = crate::remote_ruby_index::list(&config.ruby_build_mirror) .map_err(|source| FrumError::CantListRemoteVersions { source })? .drain(..) .map(|x| x.version) .collect::<Vec<_>>(); current_version .to_version(&available_versions) .ok_or(FrumError::VersionNotFound { version: current_version, })? .clone() } }; let installations_dir = config.versions_dir(); let installation_dir = PathBuf::from(&installations_dir).join(version.to_string()); if installation_dir.exists() { return Err(FrumError::VersionAlreadyInstalled { path: installation_dir, }); } let url = package_url(config.ruby_build_mirror.clone(), &version); outln!(config#Info, "{} Downloading {}", "==>".green(), format!("{}", url).green()); let response = reqwest::blocking::get(url)?; if response.status() == 404 { return Err(FrumError::VersionNotFound { version: current_version, }); } outln!(config#Info, "{} Extracting {}", "==>".green(), archive(&version).green()); let temp_installations_dir = installations_dir.join(".downloads"); std::fs::create_dir_all(&temp_installations_dir).map_err(FrumError::IoError)?; let temp_dir = tempfile::TempDir::new_in(&temp_installations_dir) .expect("Can't generate a temp directory"); extract_archive_into(&temp_dir, response)?; outln!(config#Info, "{} Building {}", "==>".green(), format!("Ruby {}", current_version).green()); let installed_directory = std::fs::read_dir(&temp_dir) .map_err(FrumError::IoError)? .next() .ok_or(FrumError::TarIsEmpty)? .map_err(FrumError::IoError)?; let installed_directory = installed_directory.path(); build_package( &installed_directory, &installation_dir, &self.configure_opts, )?; if !config.default_version_dir().exists() { debug!("Use {} as the default version", current_version); create_alias(&config, "default", &version).map_err(FrumError::IoError)?; } Ok(()) } } fn extract_archive_into<P: AsRef<Path>>( path: P, response: reqwest::blocking::Response, ) -> Result<(), FrumError> { #[cfg(unix)] let extractor = archive::tar_xz::TarXz::new(response); #[cfg(windows)] let extractor = archive::zip::Zip::new(response); extractor .extract_into(path) .map_err(|source| FrumError::ExtractError { source })?; Ok(()) } fn package_url(mirror_url: Url, version: &Version) -> Url { debug!("pakage url"); Url::parse(&format!( "{}/{}/{}", mirror_url.as_str().trim_end_matches('/'), match version { Version::Semver(version) => format!("{}.{}", version.major, version.minor), _ => unreachable!(), }, archive(version), )) .unwrap() } #[cfg(unix)] fn archive(version: &Version) -> String { format!("ruby-{}.tar.xz", version) } #[cfg(windows)] fn archive(version: &Version) -> String { format!("ruby-{}.zip", version) } #[allow(clippy::unnecessary_wraps)] fn openssl_dir() -> Result<String, FrumError> { #[cfg(target_os = "macos")] return Ok(String::from_utf8_lossy( &Command::new("brew") .arg("--prefix") .arg("openssl") .output() .map_err(FrumError::IoError)? .stdout, ) .trim() .to_string()); #[cfg(not(target_os = "macos"))] return Ok("/usr/local".to_string()); } fn build_package( current_dir: &Path, installed_dir: &Path, configure_opts: &[String], ) -> Result<(), FrumError> { debug!("./configure {}", configure_opts.join(" ")); let mut command = Command::new("sh"); command .arg("configure") .arg(format!("--prefix={}", installed_dir.to_str().unwrap())) .args(configure_opts); // Provide a default value for --with-openssl-dir if !configure_opts .iter() .any(|opt| opt.starts_with("--with-openssl-dir")) { command.arg(format!("--with-openssl-dir={}", openssl_dir()?)); } let configure = command .current_dir(&current_dir) .output() .map_err(FrumError::IoError)?; if !configure.status.success() { return Err(FrumError::CantBuildRuby { stderr: format!( "configure failed: {}", String::from_utf8_lossy(&configure.stderr).to_string() ), }); }; debug!("make -j {}", num_cpus::get().to_string()); let make = Command::new("make") .arg("-j") .arg(num_cpus::get().to_string()) .current_dir(&current_dir) .output() .map_err(FrumError::IoError)?; if !make.status.success() { return Err(FrumError::CantBuildRuby { stderr: format!( "make failed: {}", String::from_utf8_lossy(&make.stderr).to_string() ), }); }; debug!("make install"); let make_install = Command::new("make") .arg("install") .current_dir(&current_dir) .output() .map_err(FrumError::IoError)?; if !make_install.status.success() { return Err(FrumError::CantBuildRuby { stderr: format!( "make install: {}", String::from_utf8_lossy(&make_install.stderr).to_string() ), }); }; Ok(()) } #[cfg(test)] mod tests { use super::*; use crate::command::Command; use crate::config::FrumConfig; use crate::version::Version; use tempfile::tempdir; #[test] fn test_install_second_version() { let config = FrumConfig { base_dir: Some(tempdir().unwrap().path().to_path_buf()), ..Default::default() }; Install { version: Some(InputVersion::Full(Version::Semver( semver::Version::parse("2.7.0").unwrap(), ))), configure_opts: vec![], } .apply(&config) .expect("Can't install 2.7.0"); Install { version: Some(InputVersion::Full(Version::Semver( semver::Version::parse("2.6.4").unwrap(), ))), configure_opts: vec![], } .apply(&config) .expect("Can't install 2.6.4"); assert_eq!( std::fs::read_link(&config.default_version_dir()) .unwrap() .components() .last(), Some(std::path::Component::Normal(std::ffi::OsStr::new("2.7.0"))) ); } #[test] fn test_install_default_version() { let config = FrumConfig { base_dir: Some(tempdir().unwrap().path().to_path_buf()), ..Default::default() }; Install { version: Some(InputVersion::Full(Version::Semver( semver::Version::parse("2.6.4").unwrap(), ))), configure_opts: vec![], } .apply(&config) .expect("Can't install"); assert!(config.versions_dir().join("2.6.4").exists()); assert!(config .versions_dir() .join("2.6.4") .join("bin") .join("ruby") .exists()); assert!(config.default_version_dir().exists()); } }
use colored::Colorize;
random_line_split
install.rs
use crate::alias::create_alias; use crate::archive::{self, extract::Error as ExtractError, extract::Extract}; use crate::config::FrumConfig; use crate::input_version::InputVersion; use crate::outln; use crate::version::Version; use crate::version_file::get_user_version_for_directory; use anyhow::Result; use colored::Colorize; use log::debug; use reqwest::Url; use std::io::prelude::*; use std::path::Path; use std::path::PathBuf; use std::process::Command; use thiserror::Error; #[derive(Error, Debug)] pub enum FrumError { #[error(transparent)] HttpError(#[from] reqwest::Error), #[error(transparent)] IoError(#[from] std::io::Error), #[error("Can't find the number of cores")] FromUtf8Error(#[from] std::string::FromUtf8Error), #[error("Can't extract the file: {source:?}")] ExtractError { source: ExtractError }, #[error("The downloaded archive is empty")] TarIsEmpty, #[error("Can't find version: {version}")] VersionNotFound { version: InputVersion }, #[error("Can't list the remote versions: {source:?}")] CantListRemoteVersions { source: reqwest::Error }, #[error("Version already installed at {path:?}")] VersionAlreadyInstalled { path: PathBuf }, #[error("Can't find version in dotfiles. Please provide a version manually to the command.")] CantInferVersion, #[error("The requested version is not installable: {version}")] NotInstallableVersion { version: Version }, #[error("Can't build Ruby: {stderr}")] CantBuildRuby { stderr: String }, } pub struct Install { pub version: Option<InputVersion>, pub configure_opts: Vec<String>, } impl crate::command::Command for Install { type Error = FrumError; fn apply(&self, config: &FrumConfig) -> Result<(), Self::Error> { let current_version = self .version .clone() .or_else(|| get_user_version_for_directory(std::env::current_dir().unwrap())) .ok_or(FrumError::CantInferVersion)?; let version = match current_version.clone() { InputVersion::Full(Version::Semver(v)) => Version::Semver(v), InputVersion::Full(Version::System) => { return Err(FrumError::NotInstallableVersion { version: Version::System, }) } current_version => { let available_versions = crate::remote_ruby_index::list(&config.ruby_build_mirror) .map_err(|source| FrumError::CantListRemoteVersions { source })? .drain(..) .map(|x| x.version) .collect::<Vec<_>>(); current_version .to_version(&available_versions) .ok_or(FrumError::VersionNotFound { version: current_version, })? .clone() } }; let installations_dir = config.versions_dir(); let installation_dir = PathBuf::from(&installations_dir).join(version.to_string()); if installation_dir.exists() { return Err(FrumError::VersionAlreadyInstalled { path: installation_dir, }); } let url = package_url(config.ruby_build_mirror.clone(), &version); outln!(config#Info, "{} Downloading {}", "==>".green(), format!("{}", url).green()); let response = reqwest::blocking::get(url)?; if response.status() == 404 { return Err(FrumError::VersionNotFound { version: current_version, }); } outln!(config#Info, "{} Extracting {}", "==>".green(), archive(&version).green()); let temp_installations_dir = installations_dir.join(".downloads"); std::fs::create_dir_all(&temp_installations_dir).map_err(FrumError::IoError)?; let temp_dir = tempfile::TempDir::new_in(&temp_installations_dir) .expect("Can't generate a temp directory"); extract_archive_into(&temp_dir, response)?; outln!(config#Info, "{} Building {}", "==>".green(), format!("Ruby {}", current_version).green()); let installed_directory = std::fs::read_dir(&temp_dir) .map_err(FrumError::IoError)? .next() .ok_or(FrumError::TarIsEmpty)? .map_err(FrumError::IoError)?; let installed_directory = installed_directory.path(); build_package( &installed_directory, &installation_dir, &self.configure_opts, )?; if !config.default_version_dir().exists() { debug!("Use {} as the default version", current_version); create_alias(&config, "default", &version).map_err(FrumError::IoError)?; } Ok(()) } } fn extract_archive_into<P: AsRef<Path>>( path: P, response: reqwest::blocking::Response, ) -> Result<(), FrumError>
fn package_url(mirror_url: Url, version: &Version) -> Url { debug!("pakage url"); Url::parse(&format!( "{}/{}/{}", mirror_url.as_str().trim_end_matches('/'), match version { Version::Semver(version) => format!("{}.{}", version.major, version.minor), _ => unreachable!(), }, archive(version), )) .unwrap() } #[cfg(unix)] fn archive(version: &Version) -> String { format!("ruby-{}.tar.xz", version) } #[cfg(windows)] fn archive(version: &Version) -> String { format!("ruby-{}.zip", version) } #[allow(clippy::unnecessary_wraps)] fn openssl_dir() -> Result<String, FrumError> { #[cfg(target_os = "macos")] return Ok(String::from_utf8_lossy( &Command::new("brew") .arg("--prefix") .arg("openssl") .output() .map_err(FrumError::IoError)? .stdout, ) .trim() .to_string()); #[cfg(not(target_os = "macos"))] return Ok("/usr/local".to_string()); } fn build_package( current_dir: &Path, installed_dir: &Path, configure_opts: &[String], ) -> Result<(), FrumError> { debug!("./configure {}", configure_opts.join(" ")); let mut command = Command::new("sh"); command .arg("configure") .arg(format!("--prefix={}", installed_dir.to_str().unwrap())) .args(configure_opts); // Provide a default value for --with-openssl-dir if !configure_opts .iter() .any(|opt| opt.starts_with("--with-openssl-dir")) { command.arg(format!("--with-openssl-dir={}", openssl_dir()?)); } let configure = command .current_dir(&current_dir) .output() .map_err(FrumError::IoError)?; if !configure.status.success() { return Err(FrumError::CantBuildRuby { stderr: format!( "configure failed: {}", String::from_utf8_lossy(&configure.stderr).to_string() ), }); }; debug!("make -j {}", num_cpus::get().to_string()); let make = Command::new("make") .arg("-j") .arg(num_cpus::get().to_string()) .current_dir(&current_dir) .output() .map_err(FrumError::IoError)?; if !make.status.success() { return Err(FrumError::CantBuildRuby { stderr: format!( "make failed: {}", String::from_utf8_lossy(&make.stderr).to_string() ), }); }; debug!("make install"); let make_install = Command::new("make") .arg("install") .current_dir(&current_dir) .output() .map_err(FrumError::IoError)?; if !make_install.status.success() { return Err(FrumError::CantBuildRuby { stderr: format!( "make install: {}", String::from_utf8_lossy(&make_install.stderr).to_string() ), }); }; Ok(()) } #[cfg(test)] mod tests { use super::*; use crate::command::Command; use crate::config::FrumConfig; use crate::version::Version; use tempfile::tempdir; #[test] fn test_install_second_version() { let config = FrumConfig { base_dir: Some(tempdir().unwrap().path().to_path_buf()), ..Default::default() }; Install { version: Some(InputVersion::Full(Version::Semver( semver::Version::parse("2.7.0").unwrap(), ))), configure_opts: vec![], } .apply(&config) .expect("Can't install 2.7.0"); Install { version: Some(InputVersion::Full(Version::Semver( semver::Version::parse("2.6.4").unwrap(), ))), configure_opts: vec![], } .apply(&config) .expect("Can't install 2.6.4"); assert_eq!( std::fs::read_link(&config.default_version_dir()) .unwrap() .components() .last(), Some(std::path::Component::Normal(std::ffi::OsStr::new("2.7.0"))) ); } #[test] fn test_install_default_version() { let config = FrumConfig { base_dir: Some(tempdir().unwrap().path().to_path_buf()), ..Default::default() }; Install { version: Some(InputVersion::Full(Version::Semver( semver::Version::parse("2.6.4").unwrap(), ))), configure_opts: vec![], } .apply(&config) .expect("Can't install"); assert!(config.versions_dir().join("2.6.4").exists()); assert!(config .versions_dir() .join("2.6.4") .join("bin") .join("ruby") .exists()); assert!(config.default_version_dir().exists()); } }
{ #[cfg(unix)] let extractor = archive::tar_xz::TarXz::new(response); #[cfg(windows)] let extractor = archive::zip::Zip::new(response); extractor .extract_into(path) .map_err(|source| FrumError::ExtractError { source })?; Ok(()) }
identifier_body
fse_encoder.go
// Copyright 2019+ Klaus Post. All rights reserved. // License information can be found in the LICENSE file. // Based on work by Yann Collet, released under BSD License. package zstd import ( "errors" "fmt" "math" ) const ( // For encoding we only support up to maxEncTableLog = 8 maxEncTablesize = 1 << maxTableLog maxEncTableMask = (1 << maxTableLog) - 1 minEncTablelog = 5 maxEncSymbolValue = maxMatchLengthSymbol ) // Scratch provides temporary storage for compression and decompression. type fseEncoder struct { symbolLen uint16 // Length of active part of the symbol table. actualTableLog uint8 // Selected tablelog. ct cTable // Compression tables. maxCount int // count of the most probable symbol zeroBits bool // no bits has prob > 50%. clearCount bool // clear count useRLE bool // This encoder is for RLE preDefined bool // This encoder is predefined. reUsed bool // Set to know when the encoder has been reused. rleVal uint8 // RLE Symbol maxBits uint8 // Maximum output bits after transform. // TODO: Technically zstd should be fine with 64 bytes. count [256]uint32 norm [256]int16 } // cTable contains tables used for compression. type cTable struct { tableSymbol []byte stateTable []uint16 symbolTT []symbolTransform } // symbolTransform contains the state transform for a symbol. type symbolTransform struct { deltaNbBits uint32 deltaFindState int16 outBits uint8 } // String prints values as a human readable string. func (s symbolTransform) String() string { return fmt.Sprintf("{deltabits: %08x, findstate:%d outbits:%d}", s.deltaNbBits, s.deltaFindState, s.outBits) } // Histogram allows to populate the histogram and skip that step in the compression, // It otherwise allows to inspect the histogram when compression is done. // To indicate that you have populated the histogram call HistogramFinished // with the value of the highest populated symbol, as well as the number of entries // in the most populated entry. These are accepted at face value. func (s *fseEncoder) Histogram() *[256]uint32 { return &s.count } // HistogramFinished can be called to indicate that the histogram has been populated. // maxSymbol is the index of the highest set symbol of the next data segment. // maxCount is the number of entries in the most populated entry. // These are accepted at face value. func (s *fseEncoder) HistogramFinished(maxSymbol uint8, maxCount int) { s.maxCount = maxCount s.symbolLen = uint16(maxSymbol) + 1 s.clearCount = maxCount != 0 } // allocCtable will allocate tables needed for compression. // If existing tables a re big enough, they are simply re-used. func (s *fseEncoder) allocCtable() { tableSize := 1 << s.actualTableLog // get tableSymbol that is big enough. if cap(s.ct.tableSymbol) < tableSize { s.ct.tableSymbol = make([]byte, tableSize) } s.ct.tableSymbol = s.ct.tableSymbol[:tableSize] ctSize := tableSize if cap(s.ct.stateTable) < ctSize { s.ct.stateTable = make([]uint16, ctSize) } s.ct.stateTable = s.ct.stateTable[:ctSize] if cap(s.ct.symbolTT) < 256 { s.ct.symbolTT = make([]symbolTransform, 256) } s.ct.symbolTT = s.ct.symbolTT[:256] } // buildCTable will populate the compression table so it is ready to be used. func (s *fseEncoder) buildCTable() error { tableSize := uint32(1 << s.actualTableLog) highThreshold := tableSize - 1 var cumul [256]int16 s.allocCtable() tableSymbol := s.ct.tableSymbol[:tableSize] // symbol start positions { cumul[0] = 0 for ui, v := range s.norm[:s.symbolLen-1] { u := byte(ui) // one less than reference if v == -1 { // Low proba symbol cumul[u+1] = cumul[u] + 1 tableSymbol[highThreshold] = u highThreshold-- } else { cumul[u+1] = cumul[u] + v } } // Encode last symbol separately to avoid overflowing u u := int(s.symbolLen - 1) v := s.norm[s.symbolLen-1] if v == -1 { // Low proba symbol cumul[u+1] = cumul[u] + 1 tableSymbol[highThreshold] = byte(u) highThreshold-- } else { cumul[u+1] = cumul[u] + v } if uint32(cumul[s.symbolLen]) != tableSize { return fmt.Errorf("internal error: expected cumul[s.symbolLen] (%d) == tableSize (%d)", cumul[s.symbolLen], tableSize) } cumul[s.symbolLen] = int16(tableSize) + 1 } // Spread symbols s.zeroBits = false { step := tableStep(tableSize) tableMask := tableSize - 1 var position uint32 // if any symbol > largeLimit, we may have 0 bits output. largeLimit := int16(1 << (s.actualTableLog - 1)) for ui, v := range s.norm[:s.symbolLen] { symbol := byte(ui) if v > largeLimit { s.zeroBits = true } for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ { tableSymbol[position] = symbol position = (position + step) & tableMask for position > highThreshold { position = (position + step) & tableMask } /* Low proba area */ } } // Check if we have gone through all positions if position != 0 { return errors.New("position!=0") } } // Build table table := s.ct.stateTable { tsi := int(tableSize) for u, v := range tableSymbol { // TableU16 : sorted by symbol order; gives next state value table[cumul[v]] = uint16(tsi + u) cumul[v]++ } } // Build Symbol Transformation Table { total := int16(0) symbolTT := s.ct.symbolTT[:s.symbolLen] tableLog := s.actualTableLog tl := (uint32(tableLog) << 16) - (1 << tableLog) for i, v := range s.norm[:s.symbolLen] { switch v { case 0: case -1, 1: symbolTT[i].deltaNbBits = tl symbolTT[i].deltaFindState = total - 1 total++ default: maxBitsOut := uint32(tableLog) - highBit(uint32(v-1)) minStatePlus := uint32(v) << maxBitsOut symbolTT[i].deltaNbBits = (maxBitsOut << 16) - minStatePlus symbolTT[i].deltaFindState = total - v total += v } } if total != int16(tableSize) { return fmt.Errorf("total mismatch %d (got) != %d (want)", total, tableSize) } } return nil } var rtbTable = [...]uint32{0, 473195, 504333, 520860, 550000, 700000, 750000, 830000} func (s *fseEncoder)
(val byte) { s.allocCtable() s.actualTableLog = 0 s.ct.stateTable = s.ct.stateTable[:1] s.ct.symbolTT[val] = symbolTransform{ deltaFindState: 0, deltaNbBits: 0, } if debugEncoder { println("setRLE: val", val, "symbolTT", s.ct.symbolTT[val]) } s.rleVal = val s.useRLE = true } // setBits will set output bits for the transform. // if nil is provided, the number of bits is equal to the index. func (s *fseEncoder) setBits(transform []byte) { if s.reUsed || s.preDefined { return } if s.useRLE { if transform == nil { s.ct.symbolTT[s.rleVal].outBits = s.rleVal s.maxBits = s.rleVal return } s.maxBits = transform[s.rleVal] s.ct.symbolTT[s.rleVal].outBits = s.maxBits return } if transform == nil { for i := range s.ct.symbolTT[:s.symbolLen] { s.ct.symbolTT[i].outBits = uint8(i) } s.maxBits = uint8(s.symbolLen - 1) return } s.maxBits = 0 for i, v := range transform[:s.symbolLen] { s.ct.symbolTT[i].outBits = v if v > s.maxBits { // We could assume bits always going up, but we play safe. s.maxBits = v } } } // normalizeCount will normalize the count of the symbols so // the total is equal to the table size. // If successful, compression tables will also be made ready. func (s *fseEncoder) normalizeCount(length int) error { if s.reUsed { return nil } s.optimalTableLog(length) var ( tableLog = s.actualTableLog scale = 62 - uint64(tableLog) step = (1 << 62) / uint64(length) vStep = uint64(1) << (scale - 20) stillToDistribute = int16(1 << tableLog) largest int largestP int16 lowThreshold = (uint32)(length >> tableLog) ) if s.maxCount == length { s.useRLE = true return nil } s.useRLE = false for i, cnt := range s.count[:s.symbolLen] { // already handled // if (count[s] == s.length) return 0; /* rle special case */ if cnt == 0 { s.norm[i] = 0 continue } if cnt <= lowThreshold { s.norm[i] = -1 stillToDistribute-- } else { proba := (int16)((uint64(cnt) * step) >> scale) if proba < 8 { restToBeat := vStep * uint64(rtbTable[proba]) v := uint64(cnt)*step - (uint64(proba) << scale) if v > restToBeat { proba++ } } if proba > largestP { largestP = proba largest = i } s.norm[i] = proba stillToDistribute -= proba } } if -stillToDistribute >= (s.norm[largest] >> 1) { // corner case, need another normalization method err := s.normalizeCount2(length) if err != nil { return err } if debugAsserts { err = s.validateNorm() if err != nil { return err } } return s.buildCTable() } s.norm[largest] += stillToDistribute if debugAsserts { err := s.validateNorm() if err != nil { return err } } return s.buildCTable() } // Secondary normalization method. // To be used when primary method fails. func (s *fseEncoder) normalizeCount2(length int) error { const notYetAssigned = -2 var ( distributed uint32 total = uint32(length) tableLog = s.actualTableLog lowThreshold = total >> tableLog lowOne = (total * 3) >> (tableLog + 1) ) for i, cnt := range s.count[:s.symbolLen] { if cnt == 0 { s.norm[i] = 0 continue } if cnt <= lowThreshold { s.norm[i] = -1 distributed++ total -= cnt continue } if cnt <= lowOne { s.norm[i] = 1 distributed++ total -= cnt continue } s.norm[i] = notYetAssigned } toDistribute := (1 << tableLog) - distributed if (total / toDistribute) > lowOne { // risk of rounding to zero lowOne = (total * 3) / (toDistribute * 2) for i, cnt := range s.count[:s.symbolLen] { if (s.norm[i] == notYetAssigned) && (cnt <= lowOne) { s.norm[i] = 1 distributed++ total -= cnt continue } } toDistribute = (1 << tableLog) - distributed } if distributed == uint32(s.symbolLen)+1 { // all values are pretty poor; // probably incompressible data (should have already been detected); // find max, then give all remaining points to max var maxV int var maxC uint32 for i, cnt := range s.count[:s.symbolLen] { if cnt > maxC { maxV = i maxC = cnt } } s.norm[maxV] += int16(toDistribute) return nil } if total == 0 { // all of the symbols were low enough for the lowOne or lowThreshold for i := uint32(0); toDistribute > 0; i = (i + 1) % (uint32(s.symbolLen)) { if s.norm[i] > 0 { toDistribute-- s.norm[i]++ } } return nil } var ( vStepLog = 62 - uint64(tableLog) mid = uint64((1 << (vStepLog - 1)) - 1) rStep = (((1 << vStepLog) * uint64(toDistribute)) + mid) / uint64(total) // scale on remaining tmpTotal = mid ) for i, cnt := range s.count[:s.symbolLen] { if s.norm[i] == notYetAssigned { var ( end = tmpTotal + uint64(cnt)*rStep sStart = uint32(tmpTotal >> vStepLog) sEnd = uint32(end >> vStepLog) weight = sEnd - sStart ) if weight < 1 { return errors.New("weight < 1") } s.norm[i] = int16(weight) tmpTotal = end } } return nil } // optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog func (s *fseEncoder) optimalTableLog(length int) { tableLog := uint8(maxEncTableLog) minBitsSrc := highBit(uint32(length)) + 1 minBitsSymbols := highBit(uint32(s.symbolLen-1)) + 2 minBits := uint8(minBitsSymbols) if minBitsSrc < minBitsSymbols { minBits = uint8(minBitsSrc) } maxBitsSrc := uint8(highBit(uint32(length-1))) - 2 if maxBitsSrc < tableLog { // Accuracy can be reduced tableLog = maxBitsSrc } if minBits > tableLog { tableLog = minBits } // Need a minimum to safely represent all symbol values if tableLog < minEncTablelog { tableLog = minEncTablelog } if tableLog > maxEncTableLog { tableLog = maxEncTableLog } s.actualTableLog = tableLog } // validateNorm validates the normalized histogram table. func (s *fseEncoder) validateNorm() (err error) { var total int for _, v := range s.norm[:s.symbolLen] { if v >= 0 { total += int(v) } else { total -= int(v) } } defer func() { if err == nil { return } fmt.Printf("selected TableLog: %d, Symbol length: %d\n", s.actualTableLog, s.symbolLen) for i, v := range s.norm[:s.symbolLen] { fmt.Printf("%3d: %5d -> %4d \n", i, s.count[i], v) } }() if total != (1 << s.actualTableLog) { return fmt.Errorf("warning: Total == %d != %d", total, 1<<s.actualTableLog) } for i, v := range s.count[s.symbolLen:] { if v != 0 { return fmt.Errorf("warning: Found symbol out of range, %d after cut", i) } } return nil } // writeCount will write the normalized histogram count to header. // This is read back by readNCount. func (s *fseEncoder) writeCount(out []byte) ([]byte, error) { if s.useRLE { return append(out, s.rleVal), nil } if s.preDefined || s.reUsed { // Never write predefined. return out, nil } var ( tableLog = s.actualTableLog tableSize = 1 << tableLog previous0 bool charnum uint16 // maximum header size plus 2 extra bytes for final output if bitCount == 0. maxHeaderSize = ((int(s.symbolLen) * int(tableLog)) >> 3) + 3 + 2 // Write Table Size bitStream = uint32(tableLog - minEncTablelog) bitCount = uint(4) remaining = int16(tableSize + 1) /* +1 for extra accuracy */ threshold = int16(tableSize) nbBits = uint(tableLog + 1) outP = len(out) ) if cap(out) < outP+maxHeaderSize { out = append(out, make([]byte, maxHeaderSize*3)...) out = out[:len(out)-maxHeaderSize*3] } out = out[:outP+maxHeaderSize] // stops at 1 for remaining > 1 { if previous0 { start := charnum for s.norm[charnum] == 0 { charnum++ } for charnum >= start+24 { start += 24 bitStream += uint32(0xFFFF) << bitCount out[outP] = byte(bitStream) out[outP+1] = byte(bitStream >> 8) outP += 2 bitStream >>= 16 } for charnum >= start+3 { start += 3 bitStream += 3 << bitCount bitCount += 2 } bitStream += uint32(charnum-start) << bitCount bitCount += 2 if bitCount > 16 { out[outP] = byte(bitStream) out[outP+1] = byte(bitStream >> 8) outP += 2 bitStream >>= 16 bitCount -= 16 } } count := s.norm[charnum] charnum++ max := (2*threshold - 1) - remaining if count < 0 { remaining += count } else { remaining -= count } count++ // +1 for extra accuracy if count >= threshold { count += max // [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ } bitStream += uint32(count) << bitCount bitCount += nbBits if count < max { bitCount-- } previous0 = count == 1 if remaining < 1 { return nil, errors.New("internal error: remaining < 1") } for remaining < threshold { nbBits-- threshold >>= 1 } if bitCount > 16 { out[outP] = byte(bitStream) out[outP+1] = byte(bitStream >> 8) outP += 2 bitStream >>= 16 bitCount -= 16 } } if outP+2 > len(out) { return nil, fmt.Errorf("internal error: %d > %d, maxheader: %d, sl: %d, tl: %d, normcount: %v", outP+2, len(out), maxHeaderSize, s.symbolLen, int(tableLog), s.norm[:s.symbolLen]) } out[outP] = byte(bitStream) out[outP+1] = byte(bitStream >> 8) outP += int((bitCount + 7) / 8) if charnum > s.symbolLen { return nil, errors.New("internal error: charnum > s.symbolLen") } return out[:outP], nil } // Approximate symbol cost, as fractional value, using fixed-point format (accuracyLog fractional bits) // note 1 : assume symbolValue is valid (<= maxSymbolValue) // note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits * func (s *fseEncoder) bitCost(symbolValue uint8, accuracyLog uint32) uint32 { minNbBits := s.ct.symbolTT[symbolValue].deltaNbBits >> 16 threshold := (minNbBits + 1) << 16 if debugAsserts { if !(s.actualTableLog < 16) { panic("!s.actualTableLog < 16") } // ensure enough room for renormalization double shift if !(uint8(accuracyLog) < 31-s.actualTableLog) { panic("!uint8(accuracyLog) < 31-s.actualTableLog") } } tableSize := uint32(1) << s.actualTableLog deltaFromThreshold := threshold - (s.ct.symbolTT[symbolValue].deltaNbBits + tableSize) // linear interpolation (very approximate) normalizedDeltaFromThreshold := (deltaFromThreshold << accuracyLog) >> s.actualTableLog bitMultiplier := uint32(1) << accuracyLog if debugAsserts { if s.ct.symbolTT[symbolValue].deltaNbBits+tableSize > threshold { panic("s.ct.symbolTT[symbolValue].deltaNbBits+tableSize > threshold") } if normalizedDeltaFromThreshold > bitMultiplier { panic("normalizedDeltaFromThreshold > bitMultiplier") } } return (minNbBits+1)*bitMultiplier - normalizedDeltaFromThreshold } // Returns the cost in bits of encoding the distribution in count using ctable. // Histogram should only be up to the last non-zero symbol. // Returns an -1 if ctable cannot represent all the symbols in count. func (s *fseEncoder) approxSize(hist []uint32) uint32 { if int(s.symbolLen) < len(hist) { // More symbols than we have. return math.MaxUint32 } if s.useRLE { // We will never reuse RLE encoders. return math.MaxUint32 } const kAccuracyLog = 8 badCost := (uint32(s.actualTableLog) + 1) << kAccuracyLog var cost uint32 for i, v := range hist { if v == 0 { continue } if s.norm[i] == 0 { return math.MaxUint32 } bitCost := s.bitCost(uint8(i), kAccuracyLog) if bitCost > badCost { return math.MaxUint32 } cost += v * bitCost } return cost >> kAccuracyLog } // maxHeaderSize returns the maximum header size in bits. // This is not exact size, but we want a penalty for new tables anyway. func (s *fseEncoder) maxHeaderSize() uint32 { if s.preDefined { return 0 } if s.useRLE { return 8 } return (((uint32(s.symbolLen) * uint32(s.actualTableLog)) >> 3) + 3) * 8 } // cState contains the compression state of a stream. type cState struct { bw *bitWriter stateTable []uint16 state uint16 } // init will initialize the compression state to the first symbol of the stream. func (c *cState) init(bw *bitWriter, ct *cTable, first symbolTransform) { c.bw = bw c.stateTable = ct.stateTable if len(c.stateTable) == 1 { // RLE c.stateTable[0] = uint16(0) c.state = 0 return } nbBitsOut := (first.deltaNbBits + (1 << 15)) >> 16 im := int32((nbBitsOut << 16) - first.deltaNbBits) lu := (im >> nbBitsOut) + int32(first.deltaFindState) c.state = c.stateTable[lu] } // flush will write the tablelog to the output and flush the remaining full bytes. func (c *cState) flush(tableLog uint8) { c.bw.flush32() c.bw.addBits16NC(c.state, tableLog) }
setRLE
identifier_name
fse_encoder.go
// Copyright 2019+ Klaus Post. All rights reserved. // License information can be found in the LICENSE file. // Based on work by Yann Collet, released under BSD License. package zstd import ( "errors" "fmt" "math" ) const ( // For encoding we only support up to maxEncTableLog = 8 maxEncTablesize = 1 << maxTableLog maxEncTableMask = (1 << maxTableLog) - 1 minEncTablelog = 5 maxEncSymbolValue = maxMatchLengthSymbol ) // Scratch provides temporary storage for compression and decompression. type fseEncoder struct { symbolLen uint16 // Length of active part of the symbol table. actualTableLog uint8 // Selected tablelog. ct cTable // Compression tables. maxCount int // count of the most probable symbol zeroBits bool // no bits has prob > 50%. clearCount bool // clear count useRLE bool // This encoder is for RLE preDefined bool // This encoder is predefined. reUsed bool // Set to know when the encoder has been reused. rleVal uint8 // RLE Symbol maxBits uint8 // Maximum output bits after transform. // TODO: Technically zstd should be fine with 64 bytes. count [256]uint32 norm [256]int16 } // cTable contains tables used for compression. type cTable struct { tableSymbol []byte stateTable []uint16 symbolTT []symbolTransform } // symbolTransform contains the state transform for a symbol. type symbolTransform struct { deltaNbBits uint32 deltaFindState int16 outBits uint8 } // String prints values as a human readable string. func (s symbolTransform) String() string { return fmt.Sprintf("{deltabits: %08x, findstate:%d outbits:%d}", s.deltaNbBits, s.deltaFindState, s.outBits) } // Histogram allows to populate the histogram and skip that step in the compression, // It otherwise allows to inspect the histogram when compression is done. // To indicate that you have populated the histogram call HistogramFinished // with the value of the highest populated symbol, as well as the number of entries // in the most populated entry. These are accepted at face value. func (s *fseEncoder) Histogram() *[256]uint32 { return &s.count } // HistogramFinished can be called to indicate that the histogram has been populated. // maxSymbol is the index of the highest set symbol of the next data segment. // maxCount is the number of entries in the most populated entry. // These are accepted at face value. func (s *fseEncoder) HistogramFinished(maxSymbol uint8, maxCount int) { s.maxCount = maxCount s.symbolLen = uint16(maxSymbol) + 1 s.clearCount = maxCount != 0 } // allocCtable will allocate tables needed for compression. // If existing tables a re big enough, they are simply re-used. func (s *fseEncoder) allocCtable() { tableSize := 1 << s.actualTableLog // get tableSymbol that is big enough. if cap(s.ct.tableSymbol) < tableSize { s.ct.tableSymbol = make([]byte, tableSize) } s.ct.tableSymbol = s.ct.tableSymbol[:tableSize] ctSize := tableSize if cap(s.ct.stateTable) < ctSize { s.ct.stateTable = make([]uint16, ctSize) } s.ct.stateTable = s.ct.stateTable[:ctSize] if cap(s.ct.symbolTT) < 256 { s.ct.symbolTT = make([]symbolTransform, 256) } s.ct.symbolTT = s.ct.symbolTT[:256] } // buildCTable will populate the compression table so it is ready to be used. func (s *fseEncoder) buildCTable() error { tableSize := uint32(1 << s.actualTableLog) highThreshold := tableSize - 1 var cumul [256]int16 s.allocCtable() tableSymbol := s.ct.tableSymbol[:tableSize] // symbol start positions { cumul[0] = 0 for ui, v := range s.norm[:s.symbolLen-1] { u := byte(ui) // one less than reference if v == -1 { // Low proba symbol cumul[u+1] = cumul[u] + 1 tableSymbol[highThreshold] = u highThreshold-- } else { cumul[u+1] = cumul[u] + v } } // Encode last symbol separately to avoid overflowing u u := int(s.symbolLen - 1) v := s.norm[s.symbolLen-1] if v == -1 { // Low proba symbol cumul[u+1] = cumul[u] + 1 tableSymbol[highThreshold] = byte(u) highThreshold-- } else { cumul[u+1] = cumul[u] + v } if uint32(cumul[s.symbolLen]) != tableSize { return fmt.Errorf("internal error: expected cumul[s.symbolLen] (%d) == tableSize (%d)", cumul[s.symbolLen], tableSize) } cumul[s.symbolLen] = int16(tableSize) + 1 } // Spread symbols s.zeroBits = false { step := tableStep(tableSize) tableMask := tableSize - 1 var position uint32 // if any symbol > largeLimit, we may have 0 bits output. largeLimit := int16(1 << (s.actualTableLog - 1)) for ui, v := range s.norm[:s.symbolLen] { symbol := byte(ui) if v > largeLimit { s.zeroBits = true } for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ { tableSymbol[position] = symbol position = (position + step) & tableMask for position > highThreshold { position = (position + step) & tableMask } /* Low proba area */ } } // Check if we have gone through all positions if position != 0 { return errors.New("position!=0") } } // Build table table := s.ct.stateTable { tsi := int(tableSize) for u, v := range tableSymbol { // TableU16 : sorted by symbol order; gives next state value table[cumul[v]] = uint16(tsi + u) cumul[v]++ } } // Build Symbol Transformation Table { total := int16(0) symbolTT := s.ct.symbolTT[:s.symbolLen] tableLog := s.actualTableLog tl := (uint32(tableLog) << 16) - (1 << tableLog) for i, v := range s.norm[:s.symbolLen] { switch v { case 0: case -1, 1: symbolTT[i].deltaNbBits = tl symbolTT[i].deltaFindState = total - 1 total++ default: maxBitsOut := uint32(tableLog) - highBit(uint32(v-1)) minStatePlus := uint32(v) << maxBitsOut symbolTT[i].deltaNbBits = (maxBitsOut << 16) - minStatePlus symbolTT[i].deltaFindState = total - v total += v } } if total != int16(tableSize) { return fmt.Errorf("total mismatch %d (got) != %d (want)", total, tableSize) } } return nil } var rtbTable = [...]uint32{0, 473195, 504333, 520860, 550000, 700000, 750000, 830000} func (s *fseEncoder) setRLE(val byte) { s.allocCtable() s.actualTableLog = 0 s.ct.stateTable = s.ct.stateTable[:1] s.ct.symbolTT[val] = symbolTransform{ deltaFindState: 0, deltaNbBits: 0, } if debugEncoder { println("setRLE: val", val, "symbolTT", s.ct.symbolTT[val]) } s.rleVal = val s.useRLE = true } // setBits will set output bits for the transform. // if nil is provided, the number of bits is equal to the index. func (s *fseEncoder) setBits(transform []byte)
// normalizeCount will normalize the count of the symbols so // the total is equal to the table size. // If successful, compression tables will also be made ready. func (s *fseEncoder) normalizeCount(length int) error { if s.reUsed { return nil } s.optimalTableLog(length) var ( tableLog = s.actualTableLog scale = 62 - uint64(tableLog) step = (1 << 62) / uint64(length) vStep = uint64(1) << (scale - 20) stillToDistribute = int16(1 << tableLog) largest int largestP int16 lowThreshold = (uint32)(length >> tableLog) ) if s.maxCount == length { s.useRLE = true return nil } s.useRLE = false for i, cnt := range s.count[:s.symbolLen] { // already handled // if (count[s] == s.length) return 0; /* rle special case */ if cnt == 0 { s.norm[i] = 0 continue } if cnt <= lowThreshold { s.norm[i] = -1 stillToDistribute-- } else { proba := (int16)((uint64(cnt) * step) >> scale) if proba < 8 { restToBeat := vStep * uint64(rtbTable[proba]) v := uint64(cnt)*step - (uint64(proba) << scale) if v > restToBeat { proba++ } } if proba > largestP { largestP = proba largest = i } s.norm[i] = proba stillToDistribute -= proba } } if -stillToDistribute >= (s.norm[largest] >> 1) { // corner case, need another normalization method err := s.normalizeCount2(length) if err != nil { return err } if debugAsserts { err = s.validateNorm() if err != nil { return err } } return s.buildCTable() } s.norm[largest] += stillToDistribute if debugAsserts { err := s.validateNorm() if err != nil { return err } } return s.buildCTable() } // Secondary normalization method. // To be used when primary method fails. func (s *fseEncoder) normalizeCount2(length int) error { const notYetAssigned = -2 var ( distributed uint32 total = uint32(length) tableLog = s.actualTableLog lowThreshold = total >> tableLog lowOne = (total * 3) >> (tableLog + 1) ) for i, cnt := range s.count[:s.symbolLen] { if cnt == 0 { s.norm[i] = 0 continue } if cnt <= lowThreshold { s.norm[i] = -1 distributed++ total -= cnt continue } if cnt <= lowOne { s.norm[i] = 1 distributed++ total -= cnt continue } s.norm[i] = notYetAssigned } toDistribute := (1 << tableLog) - distributed if (total / toDistribute) > lowOne { // risk of rounding to zero lowOne = (total * 3) / (toDistribute * 2) for i, cnt := range s.count[:s.symbolLen] { if (s.norm[i] == notYetAssigned) && (cnt <= lowOne) { s.norm[i] = 1 distributed++ total -= cnt continue } } toDistribute = (1 << tableLog) - distributed } if distributed == uint32(s.symbolLen)+1 { // all values are pretty poor; // probably incompressible data (should have already been detected); // find max, then give all remaining points to max var maxV int var maxC uint32 for i, cnt := range s.count[:s.symbolLen] { if cnt > maxC { maxV = i maxC = cnt } } s.norm[maxV] += int16(toDistribute) return nil } if total == 0 { // all of the symbols were low enough for the lowOne or lowThreshold for i := uint32(0); toDistribute > 0; i = (i + 1) % (uint32(s.symbolLen)) { if s.norm[i] > 0 { toDistribute-- s.norm[i]++ } } return nil } var ( vStepLog = 62 - uint64(tableLog) mid = uint64((1 << (vStepLog - 1)) - 1) rStep = (((1 << vStepLog) * uint64(toDistribute)) + mid) / uint64(total) // scale on remaining tmpTotal = mid ) for i, cnt := range s.count[:s.symbolLen] { if s.norm[i] == notYetAssigned { var ( end = tmpTotal + uint64(cnt)*rStep sStart = uint32(tmpTotal >> vStepLog) sEnd = uint32(end >> vStepLog) weight = sEnd - sStart ) if weight < 1 { return errors.New("weight < 1") } s.norm[i] = int16(weight) tmpTotal = end } } return nil } // optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog func (s *fseEncoder) optimalTableLog(length int) { tableLog := uint8(maxEncTableLog) minBitsSrc := highBit(uint32(length)) + 1 minBitsSymbols := highBit(uint32(s.symbolLen-1)) + 2 minBits := uint8(minBitsSymbols) if minBitsSrc < minBitsSymbols { minBits = uint8(minBitsSrc) } maxBitsSrc := uint8(highBit(uint32(length-1))) - 2 if maxBitsSrc < tableLog { // Accuracy can be reduced tableLog = maxBitsSrc } if minBits > tableLog { tableLog = minBits } // Need a minimum to safely represent all symbol values if tableLog < minEncTablelog { tableLog = minEncTablelog } if tableLog > maxEncTableLog { tableLog = maxEncTableLog } s.actualTableLog = tableLog } // validateNorm validates the normalized histogram table. func (s *fseEncoder) validateNorm() (err error) { var total int for _, v := range s.norm[:s.symbolLen] { if v >= 0 { total += int(v) } else { total -= int(v) } } defer func() { if err == nil { return } fmt.Printf("selected TableLog: %d, Symbol length: %d\n", s.actualTableLog, s.symbolLen) for i, v := range s.norm[:s.symbolLen] { fmt.Printf("%3d: %5d -> %4d \n", i, s.count[i], v) } }() if total != (1 << s.actualTableLog) { return fmt.Errorf("warning: Total == %d != %d", total, 1<<s.actualTableLog) } for i, v := range s.count[s.symbolLen:] { if v != 0 { return fmt.Errorf("warning: Found symbol out of range, %d after cut", i) } } return nil } // writeCount will write the normalized histogram count to header. // This is read back by readNCount. func (s *fseEncoder) writeCount(out []byte) ([]byte, error) { if s.useRLE { return append(out, s.rleVal), nil } if s.preDefined || s.reUsed { // Never write predefined. return out, nil } var ( tableLog = s.actualTableLog tableSize = 1 << tableLog previous0 bool charnum uint16 // maximum header size plus 2 extra bytes for final output if bitCount == 0. maxHeaderSize = ((int(s.symbolLen) * int(tableLog)) >> 3) + 3 + 2 // Write Table Size bitStream = uint32(tableLog - minEncTablelog) bitCount = uint(4) remaining = int16(tableSize + 1) /* +1 for extra accuracy */ threshold = int16(tableSize) nbBits = uint(tableLog + 1) outP = len(out) ) if cap(out) < outP+maxHeaderSize { out = append(out, make([]byte, maxHeaderSize*3)...) out = out[:len(out)-maxHeaderSize*3] } out = out[:outP+maxHeaderSize] // stops at 1 for remaining > 1 { if previous0 { start := charnum for s.norm[charnum] == 0 { charnum++ } for charnum >= start+24 { start += 24 bitStream += uint32(0xFFFF) << bitCount out[outP] = byte(bitStream) out[outP+1] = byte(bitStream >> 8) outP += 2 bitStream >>= 16 } for charnum >= start+3 { start += 3 bitStream += 3 << bitCount bitCount += 2 } bitStream += uint32(charnum-start) << bitCount bitCount += 2 if bitCount > 16 { out[outP] = byte(bitStream) out[outP+1] = byte(bitStream >> 8) outP += 2 bitStream >>= 16 bitCount -= 16 } } count := s.norm[charnum] charnum++ max := (2*threshold - 1) - remaining if count < 0 { remaining += count } else { remaining -= count } count++ // +1 for extra accuracy if count >= threshold { count += max // [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ } bitStream += uint32(count) << bitCount bitCount += nbBits if count < max { bitCount-- } previous0 = count == 1 if remaining < 1 { return nil, errors.New("internal error: remaining < 1") } for remaining < threshold { nbBits-- threshold >>= 1 } if bitCount > 16 { out[outP] = byte(bitStream) out[outP+1] = byte(bitStream >> 8) outP += 2 bitStream >>= 16 bitCount -= 16 } } if outP+2 > len(out) { return nil, fmt.Errorf("internal error: %d > %d, maxheader: %d, sl: %d, tl: %d, normcount: %v", outP+2, len(out), maxHeaderSize, s.symbolLen, int(tableLog), s.norm[:s.symbolLen]) } out[outP] = byte(bitStream) out[outP+1] = byte(bitStream >> 8) outP += int((bitCount + 7) / 8) if charnum > s.symbolLen { return nil, errors.New("internal error: charnum > s.symbolLen") } return out[:outP], nil } // Approximate symbol cost, as fractional value, using fixed-point format (accuracyLog fractional bits) // note 1 : assume symbolValue is valid (<= maxSymbolValue) // note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits * func (s *fseEncoder) bitCost(symbolValue uint8, accuracyLog uint32) uint32 { minNbBits := s.ct.symbolTT[symbolValue].deltaNbBits >> 16 threshold := (minNbBits + 1) << 16 if debugAsserts { if !(s.actualTableLog < 16) { panic("!s.actualTableLog < 16") } // ensure enough room for renormalization double shift if !(uint8(accuracyLog) < 31-s.actualTableLog) { panic("!uint8(accuracyLog) < 31-s.actualTableLog") } } tableSize := uint32(1) << s.actualTableLog deltaFromThreshold := threshold - (s.ct.symbolTT[symbolValue].deltaNbBits + tableSize) // linear interpolation (very approximate) normalizedDeltaFromThreshold := (deltaFromThreshold << accuracyLog) >> s.actualTableLog bitMultiplier := uint32(1) << accuracyLog if debugAsserts { if s.ct.symbolTT[symbolValue].deltaNbBits+tableSize > threshold { panic("s.ct.symbolTT[symbolValue].deltaNbBits+tableSize > threshold") } if normalizedDeltaFromThreshold > bitMultiplier { panic("normalizedDeltaFromThreshold > bitMultiplier") } } return (minNbBits+1)*bitMultiplier - normalizedDeltaFromThreshold } // Returns the cost in bits of encoding the distribution in count using ctable. // Histogram should only be up to the last non-zero symbol. // Returns an -1 if ctable cannot represent all the symbols in count. func (s *fseEncoder) approxSize(hist []uint32) uint32 { if int(s.symbolLen) < len(hist) { // More symbols than we have. return math.MaxUint32 } if s.useRLE { // We will never reuse RLE encoders. return math.MaxUint32 } const kAccuracyLog = 8 badCost := (uint32(s.actualTableLog) + 1) << kAccuracyLog var cost uint32 for i, v := range hist { if v == 0 { continue } if s.norm[i] == 0 { return math.MaxUint32 } bitCost := s.bitCost(uint8(i), kAccuracyLog) if bitCost > badCost { return math.MaxUint32 } cost += v * bitCost } return cost >> kAccuracyLog } // maxHeaderSize returns the maximum header size in bits. // This is not exact size, but we want a penalty for new tables anyway. func (s *fseEncoder) maxHeaderSize() uint32 { if s.preDefined { return 0 } if s.useRLE { return 8 } return (((uint32(s.symbolLen) * uint32(s.actualTableLog)) >> 3) + 3) * 8 } // cState contains the compression state of a stream. type cState struct { bw *bitWriter stateTable []uint16 state uint16 } // init will initialize the compression state to the first symbol of the stream. func (c *cState) init(bw *bitWriter, ct *cTable, first symbolTransform) { c.bw = bw c.stateTable = ct.stateTable if len(c.stateTable) == 1 { // RLE c.stateTable[0] = uint16(0) c.state = 0 return } nbBitsOut := (first.deltaNbBits + (1 << 15)) >> 16 im := int32((nbBitsOut << 16) - first.deltaNbBits) lu := (im >> nbBitsOut) + int32(first.deltaFindState) c.state = c.stateTable[lu] } // flush will write the tablelog to the output and flush the remaining full bytes. func (c *cState) flush(tableLog uint8) { c.bw.flush32() c.bw.addBits16NC(c.state, tableLog) }
{ if s.reUsed || s.preDefined { return } if s.useRLE { if transform == nil { s.ct.symbolTT[s.rleVal].outBits = s.rleVal s.maxBits = s.rleVal return } s.maxBits = transform[s.rleVal] s.ct.symbolTT[s.rleVal].outBits = s.maxBits return } if transform == nil { for i := range s.ct.symbolTT[:s.symbolLen] { s.ct.symbolTT[i].outBits = uint8(i) } s.maxBits = uint8(s.symbolLen - 1) return } s.maxBits = 0 for i, v := range transform[:s.symbolLen] { s.ct.symbolTT[i].outBits = v if v > s.maxBits { // We could assume bits always going up, but we play safe. s.maxBits = v } } }
identifier_body
fse_encoder.go
// Copyright 2019+ Klaus Post. All rights reserved. // License information can be found in the LICENSE file. // Based on work by Yann Collet, released under BSD License. package zstd import ( "errors" "fmt" "math" ) const ( // For encoding we only support up to maxEncTableLog = 8 maxEncTablesize = 1 << maxTableLog maxEncTableMask = (1 << maxTableLog) - 1 minEncTablelog = 5 maxEncSymbolValue = maxMatchLengthSymbol ) // Scratch provides temporary storage for compression and decompression. type fseEncoder struct { symbolLen uint16 // Length of active part of the symbol table. actualTableLog uint8 // Selected tablelog. ct cTable // Compression tables. maxCount int // count of the most probable symbol zeroBits bool // no bits has prob > 50%. clearCount bool // clear count useRLE bool // This encoder is for RLE preDefined bool // This encoder is predefined. reUsed bool // Set to know when the encoder has been reused. rleVal uint8 // RLE Symbol maxBits uint8 // Maximum output bits after transform. // TODO: Technically zstd should be fine with 64 bytes. count [256]uint32 norm [256]int16 } // cTable contains tables used for compression. type cTable struct { tableSymbol []byte stateTable []uint16 symbolTT []symbolTransform } // symbolTransform contains the state transform for a symbol. type symbolTransform struct { deltaNbBits uint32 deltaFindState int16 outBits uint8 } // String prints values as a human readable string. func (s symbolTransform) String() string { return fmt.Sprintf("{deltabits: %08x, findstate:%d outbits:%d}", s.deltaNbBits, s.deltaFindState, s.outBits) } // Histogram allows to populate the histogram and skip that step in the compression, // It otherwise allows to inspect the histogram when compression is done. // To indicate that you have populated the histogram call HistogramFinished // with the value of the highest populated symbol, as well as the number of entries // in the most populated entry. These are accepted at face value. func (s *fseEncoder) Histogram() *[256]uint32 { return &s.count } // HistogramFinished can be called to indicate that the histogram has been populated. // maxSymbol is the index of the highest set symbol of the next data segment. // maxCount is the number of entries in the most populated entry. // These are accepted at face value. func (s *fseEncoder) HistogramFinished(maxSymbol uint8, maxCount int) { s.maxCount = maxCount s.symbolLen = uint16(maxSymbol) + 1 s.clearCount = maxCount != 0 } // allocCtable will allocate tables needed for compression. // If existing tables a re big enough, they are simply re-used. func (s *fseEncoder) allocCtable() { tableSize := 1 << s.actualTableLog // get tableSymbol that is big enough. if cap(s.ct.tableSymbol) < tableSize { s.ct.tableSymbol = make([]byte, tableSize) } s.ct.tableSymbol = s.ct.tableSymbol[:tableSize] ctSize := tableSize if cap(s.ct.stateTable) < ctSize { s.ct.stateTable = make([]uint16, ctSize) } s.ct.stateTable = s.ct.stateTable[:ctSize] if cap(s.ct.symbolTT) < 256 { s.ct.symbolTT = make([]symbolTransform, 256) } s.ct.symbolTT = s.ct.symbolTT[:256] } // buildCTable will populate the compression table so it is ready to be used. func (s *fseEncoder) buildCTable() error { tableSize := uint32(1 << s.actualTableLog) highThreshold := tableSize - 1 var cumul [256]int16 s.allocCtable() tableSymbol := s.ct.tableSymbol[:tableSize] // symbol start positions {
u := byte(ui) // one less than reference if v == -1 { // Low proba symbol cumul[u+1] = cumul[u] + 1 tableSymbol[highThreshold] = u highThreshold-- } else { cumul[u+1] = cumul[u] + v } } // Encode last symbol separately to avoid overflowing u u := int(s.symbolLen - 1) v := s.norm[s.symbolLen-1] if v == -1 { // Low proba symbol cumul[u+1] = cumul[u] + 1 tableSymbol[highThreshold] = byte(u) highThreshold-- } else { cumul[u+1] = cumul[u] + v } if uint32(cumul[s.symbolLen]) != tableSize { return fmt.Errorf("internal error: expected cumul[s.symbolLen] (%d) == tableSize (%d)", cumul[s.symbolLen], tableSize) } cumul[s.symbolLen] = int16(tableSize) + 1 } // Spread symbols s.zeroBits = false { step := tableStep(tableSize) tableMask := tableSize - 1 var position uint32 // if any symbol > largeLimit, we may have 0 bits output. largeLimit := int16(1 << (s.actualTableLog - 1)) for ui, v := range s.norm[:s.symbolLen] { symbol := byte(ui) if v > largeLimit { s.zeroBits = true } for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ { tableSymbol[position] = symbol position = (position + step) & tableMask for position > highThreshold { position = (position + step) & tableMask } /* Low proba area */ } } // Check if we have gone through all positions if position != 0 { return errors.New("position!=0") } } // Build table table := s.ct.stateTable { tsi := int(tableSize) for u, v := range tableSymbol { // TableU16 : sorted by symbol order; gives next state value table[cumul[v]] = uint16(tsi + u) cumul[v]++ } } // Build Symbol Transformation Table { total := int16(0) symbolTT := s.ct.symbolTT[:s.symbolLen] tableLog := s.actualTableLog tl := (uint32(tableLog) << 16) - (1 << tableLog) for i, v := range s.norm[:s.symbolLen] { switch v { case 0: case -1, 1: symbolTT[i].deltaNbBits = tl symbolTT[i].deltaFindState = total - 1 total++ default: maxBitsOut := uint32(tableLog) - highBit(uint32(v-1)) minStatePlus := uint32(v) << maxBitsOut symbolTT[i].deltaNbBits = (maxBitsOut << 16) - minStatePlus symbolTT[i].deltaFindState = total - v total += v } } if total != int16(tableSize) { return fmt.Errorf("total mismatch %d (got) != %d (want)", total, tableSize) } } return nil } var rtbTable = [...]uint32{0, 473195, 504333, 520860, 550000, 700000, 750000, 830000} func (s *fseEncoder) setRLE(val byte) { s.allocCtable() s.actualTableLog = 0 s.ct.stateTable = s.ct.stateTable[:1] s.ct.symbolTT[val] = symbolTransform{ deltaFindState: 0, deltaNbBits: 0, } if debugEncoder { println("setRLE: val", val, "symbolTT", s.ct.symbolTT[val]) } s.rleVal = val s.useRLE = true } // setBits will set output bits for the transform. // if nil is provided, the number of bits is equal to the index. func (s *fseEncoder) setBits(transform []byte) { if s.reUsed || s.preDefined { return } if s.useRLE { if transform == nil { s.ct.symbolTT[s.rleVal].outBits = s.rleVal s.maxBits = s.rleVal return } s.maxBits = transform[s.rleVal] s.ct.symbolTT[s.rleVal].outBits = s.maxBits return } if transform == nil { for i := range s.ct.symbolTT[:s.symbolLen] { s.ct.symbolTT[i].outBits = uint8(i) } s.maxBits = uint8(s.symbolLen - 1) return } s.maxBits = 0 for i, v := range transform[:s.symbolLen] { s.ct.symbolTT[i].outBits = v if v > s.maxBits { // We could assume bits always going up, but we play safe. s.maxBits = v } } } // normalizeCount will normalize the count of the symbols so // the total is equal to the table size. // If successful, compression tables will also be made ready. func (s *fseEncoder) normalizeCount(length int) error { if s.reUsed { return nil } s.optimalTableLog(length) var ( tableLog = s.actualTableLog scale = 62 - uint64(tableLog) step = (1 << 62) / uint64(length) vStep = uint64(1) << (scale - 20) stillToDistribute = int16(1 << tableLog) largest int largestP int16 lowThreshold = (uint32)(length >> tableLog) ) if s.maxCount == length { s.useRLE = true return nil } s.useRLE = false for i, cnt := range s.count[:s.symbolLen] { // already handled // if (count[s] == s.length) return 0; /* rle special case */ if cnt == 0 { s.norm[i] = 0 continue } if cnt <= lowThreshold { s.norm[i] = -1 stillToDistribute-- } else { proba := (int16)((uint64(cnt) * step) >> scale) if proba < 8 { restToBeat := vStep * uint64(rtbTable[proba]) v := uint64(cnt)*step - (uint64(proba) << scale) if v > restToBeat { proba++ } } if proba > largestP { largestP = proba largest = i } s.norm[i] = proba stillToDistribute -= proba } } if -stillToDistribute >= (s.norm[largest] >> 1) { // corner case, need another normalization method err := s.normalizeCount2(length) if err != nil { return err } if debugAsserts { err = s.validateNorm() if err != nil { return err } } return s.buildCTable() } s.norm[largest] += stillToDistribute if debugAsserts { err := s.validateNorm() if err != nil { return err } } return s.buildCTable() } // Secondary normalization method. // To be used when primary method fails. func (s *fseEncoder) normalizeCount2(length int) error { const notYetAssigned = -2 var ( distributed uint32 total = uint32(length) tableLog = s.actualTableLog lowThreshold = total >> tableLog lowOne = (total * 3) >> (tableLog + 1) ) for i, cnt := range s.count[:s.symbolLen] { if cnt == 0 { s.norm[i] = 0 continue } if cnt <= lowThreshold { s.norm[i] = -1 distributed++ total -= cnt continue } if cnt <= lowOne { s.norm[i] = 1 distributed++ total -= cnt continue } s.norm[i] = notYetAssigned } toDistribute := (1 << tableLog) - distributed if (total / toDistribute) > lowOne { // risk of rounding to zero lowOne = (total * 3) / (toDistribute * 2) for i, cnt := range s.count[:s.symbolLen] { if (s.norm[i] == notYetAssigned) && (cnt <= lowOne) { s.norm[i] = 1 distributed++ total -= cnt continue } } toDistribute = (1 << tableLog) - distributed } if distributed == uint32(s.symbolLen)+1 { // all values are pretty poor; // probably incompressible data (should have already been detected); // find max, then give all remaining points to max var maxV int var maxC uint32 for i, cnt := range s.count[:s.symbolLen] { if cnt > maxC { maxV = i maxC = cnt } } s.norm[maxV] += int16(toDistribute) return nil } if total == 0 { // all of the symbols were low enough for the lowOne or lowThreshold for i := uint32(0); toDistribute > 0; i = (i + 1) % (uint32(s.symbolLen)) { if s.norm[i] > 0 { toDistribute-- s.norm[i]++ } } return nil } var ( vStepLog = 62 - uint64(tableLog) mid = uint64((1 << (vStepLog - 1)) - 1) rStep = (((1 << vStepLog) * uint64(toDistribute)) + mid) / uint64(total) // scale on remaining tmpTotal = mid ) for i, cnt := range s.count[:s.symbolLen] { if s.norm[i] == notYetAssigned { var ( end = tmpTotal + uint64(cnt)*rStep sStart = uint32(tmpTotal >> vStepLog) sEnd = uint32(end >> vStepLog) weight = sEnd - sStart ) if weight < 1 { return errors.New("weight < 1") } s.norm[i] = int16(weight) tmpTotal = end } } return nil } // optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog func (s *fseEncoder) optimalTableLog(length int) { tableLog := uint8(maxEncTableLog) minBitsSrc := highBit(uint32(length)) + 1 minBitsSymbols := highBit(uint32(s.symbolLen-1)) + 2 minBits := uint8(minBitsSymbols) if minBitsSrc < minBitsSymbols { minBits = uint8(minBitsSrc) } maxBitsSrc := uint8(highBit(uint32(length-1))) - 2 if maxBitsSrc < tableLog { // Accuracy can be reduced tableLog = maxBitsSrc } if minBits > tableLog { tableLog = minBits } // Need a minimum to safely represent all symbol values if tableLog < minEncTablelog { tableLog = minEncTablelog } if tableLog > maxEncTableLog { tableLog = maxEncTableLog } s.actualTableLog = tableLog } // validateNorm validates the normalized histogram table. func (s *fseEncoder) validateNorm() (err error) { var total int for _, v := range s.norm[:s.symbolLen] { if v >= 0 { total += int(v) } else { total -= int(v) } } defer func() { if err == nil { return } fmt.Printf("selected TableLog: %d, Symbol length: %d\n", s.actualTableLog, s.symbolLen) for i, v := range s.norm[:s.symbolLen] { fmt.Printf("%3d: %5d -> %4d \n", i, s.count[i], v) } }() if total != (1 << s.actualTableLog) { return fmt.Errorf("warning: Total == %d != %d", total, 1<<s.actualTableLog) } for i, v := range s.count[s.symbolLen:] { if v != 0 { return fmt.Errorf("warning: Found symbol out of range, %d after cut", i) } } return nil } // writeCount will write the normalized histogram count to header. // This is read back by readNCount. func (s *fseEncoder) writeCount(out []byte) ([]byte, error) { if s.useRLE { return append(out, s.rleVal), nil } if s.preDefined || s.reUsed { // Never write predefined. return out, nil } var ( tableLog = s.actualTableLog tableSize = 1 << tableLog previous0 bool charnum uint16 // maximum header size plus 2 extra bytes for final output if bitCount == 0. maxHeaderSize = ((int(s.symbolLen) * int(tableLog)) >> 3) + 3 + 2 // Write Table Size bitStream = uint32(tableLog - minEncTablelog) bitCount = uint(4) remaining = int16(tableSize + 1) /* +1 for extra accuracy */ threshold = int16(tableSize) nbBits = uint(tableLog + 1) outP = len(out) ) if cap(out) < outP+maxHeaderSize { out = append(out, make([]byte, maxHeaderSize*3)...) out = out[:len(out)-maxHeaderSize*3] } out = out[:outP+maxHeaderSize] // stops at 1 for remaining > 1 { if previous0 { start := charnum for s.norm[charnum] == 0 { charnum++ } for charnum >= start+24 { start += 24 bitStream += uint32(0xFFFF) << bitCount out[outP] = byte(bitStream) out[outP+1] = byte(bitStream >> 8) outP += 2 bitStream >>= 16 } for charnum >= start+3 { start += 3 bitStream += 3 << bitCount bitCount += 2 } bitStream += uint32(charnum-start) << bitCount bitCount += 2 if bitCount > 16 { out[outP] = byte(bitStream) out[outP+1] = byte(bitStream >> 8) outP += 2 bitStream >>= 16 bitCount -= 16 } } count := s.norm[charnum] charnum++ max := (2*threshold - 1) - remaining if count < 0 { remaining += count } else { remaining -= count } count++ // +1 for extra accuracy if count >= threshold { count += max // [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ } bitStream += uint32(count) << bitCount bitCount += nbBits if count < max { bitCount-- } previous0 = count == 1 if remaining < 1 { return nil, errors.New("internal error: remaining < 1") } for remaining < threshold { nbBits-- threshold >>= 1 } if bitCount > 16 { out[outP] = byte(bitStream) out[outP+1] = byte(bitStream >> 8) outP += 2 bitStream >>= 16 bitCount -= 16 } } if outP+2 > len(out) { return nil, fmt.Errorf("internal error: %d > %d, maxheader: %d, sl: %d, tl: %d, normcount: %v", outP+2, len(out), maxHeaderSize, s.symbolLen, int(tableLog), s.norm[:s.symbolLen]) } out[outP] = byte(bitStream) out[outP+1] = byte(bitStream >> 8) outP += int((bitCount + 7) / 8) if charnum > s.symbolLen { return nil, errors.New("internal error: charnum > s.symbolLen") } return out[:outP], nil } // Approximate symbol cost, as fractional value, using fixed-point format (accuracyLog fractional bits) // note 1 : assume symbolValue is valid (<= maxSymbolValue) // note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits * func (s *fseEncoder) bitCost(symbolValue uint8, accuracyLog uint32) uint32 { minNbBits := s.ct.symbolTT[symbolValue].deltaNbBits >> 16 threshold := (minNbBits + 1) << 16 if debugAsserts { if !(s.actualTableLog < 16) { panic("!s.actualTableLog < 16") } // ensure enough room for renormalization double shift if !(uint8(accuracyLog) < 31-s.actualTableLog) { panic("!uint8(accuracyLog) < 31-s.actualTableLog") } } tableSize := uint32(1) << s.actualTableLog deltaFromThreshold := threshold - (s.ct.symbolTT[symbolValue].deltaNbBits + tableSize) // linear interpolation (very approximate) normalizedDeltaFromThreshold := (deltaFromThreshold << accuracyLog) >> s.actualTableLog bitMultiplier := uint32(1) << accuracyLog if debugAsserts { if s.ct.symbolTT[symbolValue].deltaNbBits+tableSize > threshold { panic("s.ct.symbolTT[symbolValue].deltaNbBits+tableSize > threshold") } if normalizedDeltaFromThreshold > bitMultiplier { panic("normalizedDeltaFromThreshold > bitMultiplier") } } return (minNbBits+1)*bitMultiplier - normalizedDeltaFromThreshold } // Returns the cost in bits of encoding the distribution in count using ctable. // Histogram should only be up to the last non-zero symbol. // Returns an -1 if ctable cannot represent all the symbols in count. func (s *fseEncoder) approxSize(hist []uint32) uint32 { if int(s.symbolLen) < len(hist) { // More symbols than we have. return math.MaxUint32 } if s.useRLE { // We will never reuse RLE encoders. return math.MaxUint32 } const kAccuracyLog = 8 badCost := (uint32(s.actualTableLog) + 1) << kAccuracyLog var cost uint32 for i, v := range hist { if v == 0 { continue } if s.norm[i] == 0 { return math.MaxUint32 } bitCost := s.bitCost(uint8(i), kAccuracyLog) if bitCost > badCost { return math.MaxUint32 } cost += v * bitCost } return cost >> kAccuracyLog } // maxHeaderSize returns the maximum header size in bits. // This is not exact size, but we want a penalty for new tables anyway. func (s *fseEncoder) maxHeaderSize() uint32 { if s.preDefined { return 0 } if s.useRLE { return 8 } return (((uint32(s.symbolLen) * uint32(s.actualTableLog)) >> 3) + 3) * 8 } // cState contains the compression state of a stream. type cState struct { bw *bitWriter stateTable []uint16 state uint16 } // init will initialize the compression state to the first symbol of the stream. func (c *cState) init(bw *bitWriter, ct *cTable, first symbolTransform) { c.bw = bw c.stateTable = ct.stateTable if len(c.stateTable) == 1 { // RLE c.stateTable[0] = uint16(0) c.state = 0 return } nbBitsOut := (first.deltaNbBits + (1 << 15)) >> 16 im := int32((nbBitsOut << 16) - first.deltaNbBits) lu := (im >> nbBitsOut) + int32(first.deltaFindState) c.state = c.stateTable[lu] } // flush will write the tablelog to the output and flush the remaining full bytes. func (c *cState) flush(tableLog uint8) { c.bw.flush32() c.bw.addBits16NC(c.state, tableLog) }
cumul[0] = 0 for ui, v := range s.norm[:s.symbolLen-1] {
random_line_split
fse_encoder.go
// Copyright 2019+ Klaus Post. All rights reserved. // License information can be found in the LICENSE file. // Based on work by Yann Collet, released under BSD License. package zstd import ( "errors" "fmt" "math" ) const ( // For encoding we only support up to maxEncTableLog = 8 maxEncTablesize = 1 << maxTableLog maxEncTableMask = (1 << maxTableLog) - 1 minEncTablelog = 5 maxEncSymbolValue = maxMatchLengthSymbol ) // Scratch provides temporary storage for compression and decompression. type fseEncoder struct { symbolLen uint16 // Length of active part of the symbol table. actualTableLog uint8 // Selected tablelog. ct cTable // Compression tables. maxCount int // count of the most probable symbol zeroBits bool // no bits has prob > 50%. clearCount bool // clear count useRLE bool // This encoder is for RLE preDefined bool // This encoder is predefined. reUsed bool // Set to know when the encoder has been reused. rleVal uint8 // RLE Symbol maxBits uint8 // Maximum output bits after transform. // TODO: Technically zstd should be fine with 64 bytes. count [256]uint32 norm [256]int16 } // cTable contains tables used for compression. type cTable struct { tableSymbol []byte stateTable []uint16 symbolTT []symbolTransform } // symbolTransform contains the state transform for a symbol. type symbolTransform struct { deltaNbBits uint32 deltaFindState int16 outBits uint8 } // String prints values as a human readable string. func (s symbolTransform) String() string { return fmt.Sprintf("{deltabits: %08x, findstate:%d outbits:%d}", s.deltaNbBits, s.deltaFindState, s.outBits) } // Histogram allows to populate the histogram and skip that step in the compression, // It otherwise allows to inspect the histogram when compression is done. // To indicate that you have populated the histogram call HistogramFinished // with the value of the highest populated symbol, as well as the number of entries // in the most populated entry. These are accepted at face value. func (s *fseEncoder) Histogram() *[256]uint32 { return &s.count } // HistogramFinished can be called to indicate that the histogram has been populated. // maxSymbol is the index of the highest set symbol of the next data segment. // maxCount is the number of entries in the most populated entry. // These are accepted at face value. func (s *fseEncoder) HistogramFinished(maxSymbol uint8, maxCount int) { s.maxCount = maxCount s.symbolLen = uint16(maxSymbol) + 1 s.clearCount = maxCount != 0 } // allocCtable will allocate tables needed for compression. // If existing tables a re big enough, they are simply re-used. func (s *fseEncoder) allocCtable() { tableSize := 1 << s.actualTableLog // get tableSymbol that is big enough. if cap(s.ct.tableSymbol) < tableSize { s.ct.tableSymbol = make([]byte, tableSize) } s.ct.tableSymbol = s.ct.tableSymbol[:tableSize] ctSize := tableSize if cap(s.ct.stateTable) < ctSize { s.ct.stateTable = make([]uint16, ctSize) } s.ct.stateTable = s.ct.stateTable[:ctSize] if cap(s.ct.symbolTT) < 256
s.ct.symbolTT = s.ct.symbolTT[:256] } // buildCTable will populate the compression table so it is ready to be used. func (s *fseEncoder) buildCTable() error { tableSize := uint32(1 << s.actualTableLog) highThreshold := tableSize - 1 var cumul [256]int16 s.allocCtable() tableSymbol := s.ct.tableSymbol[:tableSize] // symbol start positions { cumul[0] = 0 for ui, v := range s.norm[:s.symbolLen-1] { u := byte(ui) // one less than reference if v == -1 { // Low proba symbol cumul[u+1] = cumul[u] + 1 tableSymbol[highThreshold] = u highThreshold-- } else { cumul[u+1] = cumul[u] + v } } // Encode last symbol separately to avoid overflowing u u := int(s.symbolLen - 1) v := s.norm[s.symbolLen-1] if v == -1 { // Low proba symbol cumul[u+1] = cumul[u] + 1 tableSymbol[highThreshold] = byte(u) highThreshold-- } else { cumul[u+1] = cumul[u] + v } if uint32(cumul[s.symbolLen]) != tableSize { return fmt.Errorf("internal error: expected cumul[s.symbolLen] (%d) == tableSize (%d)", cumul[s.symbolLen], tableSize) } cumul[s.symbolLen] = int16(tableSize) + 1 } // Spread symbols s.zeroBits = false { step := tableStep(tableSize) tableMask := tableSize - 1 var position uint32 // if any symbol > largeLimit, we may have 0 bits output. largeLimit := int16(1 << (s.actualTableLog - 1)) for ui, v := range s.norm[:s.symbolLen] { symbol := byte(ui) if v > largeLimit { s.zeroBits = true } for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ { tableSymbol[position] = symbol position = (position + step) & tableMask for position > highThreshold { position = (position + step) & tableMask } /* Low proba area */ } } // Check if we have gone through all positions if position != 0 { return errors.New("position!=0") } } // Build table table := s.ct.stateTable { tsi := int(tableSize) for u, v := range tableSymbol { // TableU16 : sorted by symbol order; gives next state value table[cumul[v]] = uint16(tsi + u) cumul[v]++ } } // Build Symbol Transformation Table { total := int16(0) symbolTT := s.ct.symbolTT[:s.symbolLen] tableLog := s.actualTableLog tl := (uint32(tableLog) << 16) - (1 << tableLog) for i, v := range s.norm[:s.symbolLen] { switch v { case 0: case -1, 1: symbolTT[i].deltaNbBits = tl symbolTT[i].deltaFindState = total - 1 total++ default: maxBitsOut := uint32(tableLog) - highBit(uint32(v-1)) minStatePlus := uint32(v) << maxBitsOut symbolTT[i].deltaNbBits = (maxBitsOut << 16) - minStatePlus symbolTT[i].deltaFindState = total - v total += v } } if total != int16(tableSize) { return fmt.Errorf("total mismatch %d (got) != %d (want)", total, tableSize) } } return nil } var rtbTable = [...]uint32{0, 473195, 504333, 520860, 550000, 700000, 750000, 830000} func (s *fseEncoder) setRLE(val byte) { s.allocCtable() s.actualTableLog = 0 s.ct.stateTable = s.ct.stateTable[:1] s.ct.symbolTT[val] = symbolTransform{ deltaFindState: 0, deltaNbBits: 0, } if debugEncoder { println("setRLE: val", val, "symbolTT", s.ct.symbolTT[val]) } s.rleVal = val s.useRLE = true } // setBits will set output bits for the transform. // if nil is provided, the number of bits is equal to the index. func (s *fseEncoder) setBits(transform []byte) { if s.reUsed || s.preDefined { return } if s.useRLE { if transform == nil { s.ct.symbolTT[s.rleVal].outBits = s.rleVal s.maxBits = s.rleVal return } s.maxBits = transform[s.rleVal] s.ct.symbolTT[s.rleVal].outBits = s.maxBits return } if transform == nil { for i := range s.ct.symbolTT[:s.symbolLen] { s.ct.symbolTT[i].outBits = uint8(i) } s.maxBits = uint8(s.symbolLen - 1) return } s.maxBits = 0 for i, v := range transform[:s.symbolLen] { s.ct.symbolTT[i].outBits = v if v > s.maxBits { // We could assume bits always going up, but we play safe. s.maxBits = v } } } // normalizeCount will normalize the count of the symbols so // the total is equal to the table size. // If successful, compression tables will also be made ready. func (s *fseEncoder) normalizeCount(length int) error { if s.reUsed { return nil } s.optimalTableLog(length) var ( tableLog = s.actualTableLog scale = 62 - uint64(tableLog) step = (1 << 62) / uint64(length) vStep = uint64(1) << (scale - 20) stillToDistribute = int16(1 << tableLog) largest int largestP int16 lowThreshold = (uint32)(length >> tableLog) ) if s.maxCount == length { s.useRLE = true return nil } s.useRLE = false for i, cnt := range s.count[:s.symbolLen] { // already handled // if (count[s] == s.length) return 0; /* rle special case */ if cnt == 0 { s.norm[i] = 0 continue } if cnt <= lowThreshold { s.norm[i] = -1 stillToDistribute-- } else { proba := (int16)((uint64(cnt) * step) >> scale) if proba < 8 { restToBeat := vStep * uint64(rtbTable[proba]) v := uint64(cnt)*step - (uint64(proba) << scale) if v > restToBeat { proba++ } } if proba > largestP { largestP = proba largest = i } s.norm[i] = proba stillToDistribute -= proba } } if -stillToDistribute >= (s.norm[largest] >> 1) { // corner case, need another normalization method err := s.normalizeCount2(length) if err != nil { return err } if debugAsserts { err = s.validateNorm() if err != nil { return err } } return s.buildCTable() } s.norm[largest] += stillToDistribute if debugAsserts { err := s.validateNorm() if err != nil { return err } } return s.buildCTable() } // Secondary normalization method. // To be used when primary method fails. func (s *fseEncoder) normalizeCount2(length int) error { const notYetAssigned = -2 var ( distributed uint32 total = uint32(length) tableLog = s.actualTableLog lowThreshold = total >> tableLog lowOne = (total * 3) >> (tableLog + 1) ) for i, cnt := range s.count[:s.symbolLen] { if cnt == 0 { s.norm[i] = 0 continue } if cnt <= lowThreshold { s.norm[i] = -1 distributed++ total -= cnt continue } if cnt <= lowOne { s.norm[i] = 1 distributed++ total -= cnt continue } s.norm[i] = notYetAssigned } toDistribute := (1 << tableLog) - distributed if (total / toDistribute) > lowOne { // risk of rounding to zero lowOne = (total * 3) / (toDistribute * 2) for i, cnt := range s.count[:s.symbolLen] { if (s.norm[i] == notYetAssigned) && (cnt <= lowOne) { s.norm[i] = 1 distributed++ total -= cnt continue } } toDistribute = (1 << tableLog) - distributed } if distributed == uint32(s.symbolLen)+1 { // all values are pretty poor; // probably incompressible data (should have already been detected); // find max, then give all remaining points to max var maxV int var maxC uint32 for i, cnt := range s.count[:s.symbolLen] { if cnt > maxC { maxV = i maxC = cnt } } s.norm[maxV] += int16(toDistribute) return nil } if total == 0 { // all of the symbols were low enough for the lowOne or lowThreshold for i := uint32(0); toDistribute > 0; i = (i + 1) % (uint32(s.symbolLen)) { if s.norm[i] > 0 { toDistribute-- s.norm[i]++ } } return nil } var ( vStepLog = 62 - uint64(tableLog) mid = uint64((1 << (vStepLog - 1)) - 1) rStep = (((1 << vStepLog) * uint64(toDistribute)) + mid) / uint64(total) // scale on remaining tmpTotal = mid ) for i, cnt := range s.count[:s.symbolLen] { if s.norm[i] == notYetAssigned { var ( end = tmpTotal + uint64(cnt)*rStep sStart = uint32(tmpTotal >> vStepLog) sEnd = uint32(end >> vStepLog) weight = sEnd - sStart ) if weight < 1 { return errors.New("weight < 1") } s.norm[i] = int16(weight) tmpTotal = end } } return nil } // optimalTableLog calculates and sets the optimal tableLog in s.actualTableLog func (s *fseEncoder) optimalTableLog(length int) { tableLog := uint8(maxEncTableLog) minBitsSrc := highBit(uint32(length)) + 1 minBitsSymbols := highBit(uint32(s.symbolLen-1)) + 2 minBits := uint8(minBitsSymbols) if minBitsSrc < minBitsSymbols { minBits = uint8(minBitsSrc) } maxBitsSrc := uint8(highBit(uint32(length-1))) - 2 if maxBitsSrc < tableLog { // Accuracy can be reduced tableLog = maxBitsSrc } if minBits > tableLog { tableLog = minBits } // Need a minimum to safely represent all symbol values if tableLog < minEncTablelog { tableLog = minEncTablelog } if tableLog > maxEncTableLog { tableLog = maxEncTableLog } s.actualTableLog = tableLog } // validateNorm validates the normalized histogram table. func (s *fseEncoder) validateNorm() (err error) { var total int for _, v := range s.norm[:s.symbolLen] { if v >= 0 { total += int(v) } else { total -= int(v) } } defer func() { if err == nil { return } fmt.Printf("selected TableLog: %d, Symbol length: %d\n", s.actualTableLog, s.symbolLen) for i, v := range s.norm[:s.symbolLen] { fmt.Printf("%3d: %5d -> %4d \n", i, s.count[i], v) } }() if total != (1 << s.actualTableLog) { return fmt.Errorf("warning: Total == %d != %d", total, 1<<s.actualTableLog) } for i, v := range s.count[s.symbolLen:] { if v != 0 { return fmt.Errorf("warning: Found symbol out of range, %d after cut", i) } } return nil } // writeCount will write the normalized histogram count to header. // This is read back by readNCount. func (s *fseEncoder) writeCount(out []byte) ([]byte, error) { if s.useRLE { return append(out, s.rleVal), nil } if s.preDefined || s.reUsed { // Never write predefined. return out, nil } var ( tableLog = s.actualTableLog tableSize = 1 << tableLog previous0 bool charnum uint16 // maximum header size plus 2 extra bytes for final output if bitCount == 0. maxHeaderSize = ((int(s.symbolLen) * int(tableLog)) >> 3) + 3 + 2 // Write Table Size bitStream = uint32(tableLog - minEncTablelog) bitCount = uint(4) remaining = int16(tableSize + 1) /* +1 for extra accuracy */ threshold = int16(tableSize) nbBits = uint(tableLog + 1) outP = len(out) ) if cap(out) < outP+maxHeaderSize { out = append(out, make([]byte, maxHeaderSize*3)...) out = out[:len(out)-maxHeaderSize*3] } out = out[:outP+maxHeaderSize] // stops at 1 for remaining > 1 { if previous0 { start := charnum for s.norm[charnum] == 0 { charnum++ } for charnum >= start+24 { start += 24 bitStream += uint32(0xFFFF) << bitCount out[outP] = byte(bitStream) out[outP+1] = byte(bitStream >> 8) outP += 2 bitStream >>= 16 } for charnum >= start+3 { start += 3 bitStream += 3 << bitCount bitCount += 2 } bitStream += uint32(charnum-start) << bitCount bitCount += 2 if bitCount > 16 { out[outP] = byte(bitStream) out[outP+1] = byte(bitStream >> 8) outP += 2 bitStream >>= 16 bitCount -= 16 } } count := s.norm[charnum] charnum++ max := (2*threshold - 1) - remaining if count < 0 { remaining += count } else { remaining -= count } count++ // +1 for extra accuracy if count >= threshold { count += max // [0..max[ [max..threshold[ (...) [threshold+max 2*threshold[ } bitStream += uint32(count) << bitCount bitCount += nbBits if count < max { bitCount-- } previous0 = count == 1 if remaining < 1 { return nil, errors.New("internal error: remaining < 1") } for remaining < threshold { nbBits-- threshold >>= 1 } if bitCount > 16 { out[outP] = byte(bitStream) out[outP+1] = byte(bitStream >> 8) outP += 2 bitStream >>= 16 bitCount -= 16 } } if outP+2 > len(out) { return nil, fmt.Errorf("internal error: %d > %d, maxheader: %d, sl: %d, tl: %d, normcount: %v", outP+2, len(out), maxHeaderSize, s.symbolLen, int(tableLog), s.norm[:s.symbolLen]) } out[outP] = byte(bitStream) out[outP+1] = byte(bitStream >> 8) outP += int((bitCount + 7) / 8) if charnum > s.symbolLen { return nil, errors.New("internal error: charnum > s.symbolLen") } return out[:outP], nil } // Approximate symbol cost, as fractional value, using fixed-point format (accuracyLog fractional bits) // note 1 : assume symbolValue is valid (<= maxSymbolValue) // note 2 : if freq[symbolValue]==0, @return a fake cost of tableLog+1 bits * func (s *fseEncoder) bitCost(symbolValue uint8, accuracyLog uint32) uint32 { minNbBits := s.ct.symbolTT[symbolValue].deltaNbBits >> 16 threshold := (minNbBits + 1) << 16 if debugAsserts { if !(s.actualTableLog < 16) { panic("!s.actualTableLog < 16") } // ensure enough room for renormalization double shift if !(uint8(accuracyLog) < 31-s.actualTableLog) { panic("!uint8(accuracyLog) < 31-s.actualTableLog") } } tableSize := uint32(1) << s.actualTableLog deltaFromThreshold := threshold - (s.ct.symbolTT[symbolValue].deltaNbBits + tableSize) // linear interpolation (very approximate) normalizedDeltaFromThreshold := (deltaFromThreshold << accuracyLog) >> s.actualTableLog bitMultiplier := uint32(1) << accuracyLog if debugAsserts { if s.ct.symbolTT[symbolValue].deltaNbBits+tableSize > threshold { panic("s.ct.symbolTT[symbolValue].deltaNbBits+tableSize > threshold") } if normalizedDeltaFromThreshold > bitMultiplier { panic("normalizedDeltaFromThreshold > bitMultiplier") } } return (minNbBits+1)*bitMultiplier - normalizedDeltaFromThreshold } // Returns the cost in bits of encoding the distribution in count using ctable. // Histogram should only be up to the last non-zero symbol. // Returns an -1 if ctable cannot represent all the symbols in count. func (s *fseEncoder) approxSize(hist []uint32) uint32 { if int(s.symbolLen) < len(hist) { // More symbols than we have. return math.MaxUint32 } if s.useRLE { // We will never reuse RLE encoders. return math.MaxUint32 } const kAccuracyLog = 8 badCost := (uint32(s.actualTableLog) + 1) << kAccuracyLog var cost uint32 for i, v := range hist { if v == 0 { continue } if s.norm[i] == 0 { return math.MaxUint32 } bitCost := s.bitCost(uint8(i), kAccuracyLog) if bitCost > badCost { return math.MaxUint32 } cost += v * bitCost } return cost >> kAccuracyLog } // maxHeaderSize returns the maximum header size in bits. // This is not exact size, but we want a penalty for new tables anyway. func (s *fseEncoder) maxHeaderSize() uint32 { if s.preDefined { return 0 } if s.useRLE { return 8 } return (((uint32(s.symbolLen) * uint32(s.actualTableLog)) >> 3) + 3) * 8 } // cState contains the compression state of a stream. type cState struct { bw *bitWriter stateTable []uint16 state uint16 } // init will initialize the compression state to the first symbol of the stream. func (c *cState) init(bw *bitWriter, ct *cTable, first symbolTransform) { c.bw = bw c.stateTable = ct.stateTable if len(c.stateTable) == 1 { // RLE c.stateTable[0] = uint16(0) c.state = 0 return } nbBitsOut := (first.deltaNbBits + (1 << 15)) >> 16 im := int32((nbBitsOut << 16) - first.deltaNbBits) lu := (im >> nbBitsOut) + int32(first.deltaFindState) c.state = c.stateTable[lu] } // flush will write the tablelog to the output and flush the remaining full bytes. func (c *cState) flush(tableLog uint8) { c.bw.flush32() c.bw.addBits16NC(c.state, tableLog) }
{ s.ct.symbolTT = make([]symbolTransform, 256) }
conditional_block
resolve_recovers_from_http_errors.rs
// Copyright 2019 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. /// This module tests the property that pkg_resolver does not enter a bad /// state (successfully handles retries) when the TUF server errors while /// servicing fuchsia.pkg.PackageResolver.Resolve FIDL requests. use { assert_matches::assert_matches, fuchsia_merkle::MerkleTree, fuchsia_pkg_testing::{ serve::{responder, Domain, HttpResponder}, Package, PackageBuilder, RepositoryBuilder, }, lib::{ extra_blob_contents, make_pkg_with_extra_blobs, ResolverVariant, TestEnvBuilder, EMPTY_REPO_PATH, FILE_SIZE_LARGE_ENOUGH_TO_TRIGGER_HYPER_BATCHING, }, std::{net::Ipv4Addr, sync::Arc}, }; async fn verify_resolve_fails_then_succeeds<H: HttpResponder>( pkg: Package, responder: H, failure_error: fidl_fuchsia_pkg::ResolveError, ) { let env = TestEnvBuilder::new().build().await; let repo = Arc::new( RepositoryBuilder::from_template_dir(EMPTY_REPO_PATH) .add_package(&pkg) .build() .await .unwrap(), ); let pkg_url = format!("fuchsia-pkg://test/{}", pkg.name()); let should_fail = responder::AtomicToggle::new(true); let served_repository = repo .server() .response_overrider(responder::Toggleable::new(&should_fail, responder)) .response_overrider(responder::Filter::new( responder::is_range_request, responder::StaticResponseCode::server_error(), )) .start() .unwrap(); env.register_repo(&served_repository).await; // First resolve fails with the expected error. assert_matches!(env.resolve_package(&pkg_url).await, Err(error) if error == failure_error); // Disabling the custom responder allows the subsequent resolves to succeed. should_fail.unset(); let (package_dir, _resolved_context) = env.resolve_package(&pkg_url).await.expect("package to resolve"); pkg.verify_contents(&package_dir).await.expect("correct package contents"); env.stop().await; } #[fuchsia::test] async fn second_resolve_succeeds_when_far_404() { let pkg = make_pkg_with_extra_blobs("second_resolve_succeeds_when_far_404", 1).await; let path_to_override = format!("/blobs/{}", pkg.meta_far_merkle_root()); verify_resolve_fails_then_succeeds( pkg, responder::ForPath::new(path_to_override, responder::StaticResponseCode::not_found()), fidl_fuchsia_pkg::ResolveError::UnavailableBlob, ) .await } #[fuchsia::test] async fn second_resolve_succeeds_when_blob_404() { let pkg = make_pkg_with_extra_blobs("second_resolve_succeeds_when_blob_404", 1).await; let path_to_override = format!( "/blobs/{}", MerkleTree::from_reader( extra_blob_contents("second_resolve_succeeds_when_blob_404", 0).as_slice() ) .expect("merkle slice") .root() ); verify_resolve_fails_then_succeeds( pkg, responder::ForPath::new(path_to_override, responder::StaticResponseCode::not_found()), fidl_fuchsia_pkg::ResolveError::UnavailableBlob, ) .await } #[fuchsia::test] async fn second_resolve_succeeds_when_far_errors_mid_download() { let pkg = PackageBuilder::new("second_resolve_succeeds_when_far_errors_mid_download") .add_resource_at( "meta/large_file", vec![0; FILE_SIZE_LARGE_ENOUGH_TO_TRIGGER_HYPER_BATCHING].as_slice(), ) .build() .await .unwrap(); let path_to_override = format!("/blobs/{}", pkg.meta_far_merkle_root()); verify_resolve_fails_then_succeeds( pkg, responder::ForPath::new(path_to_override, responder::OneByteShortThenError), fidl_fuchsia_pkg::ResolveError::UnavailableBlob, ) .await } #[fuchsia::test] async fn second_resolve_succeeds_when_blob_errors_mid_download()
#[fuchsia::test] async fn second_resolve_succeeds_disconnect_before_far_complete() { let pkg = PackageBuilder::new("second_resolve_succeeds_disconnect_before_far_complete") .add_resource_at( "meta/large_file", vec![0; FILE_SIZE_LARGE_ENOUGH_TO_TRIGGER_HYPER_BATCHING].as_slice(), ) .build() .await .unwrap(); let path_to_override = format!("/blobs/{}", pkg.meta_far_merkle_root()); verify_resolve_fails_then_succeeds( pkg, responder::ForPath::new(path_to_override, responder::OneByteShortThenDisconnect), fidl_fuchsia_pkg::ResolveError::UnavailableBlob, ) .await } #[fuchsia::test] async fn second_resolve_succeeds_disconnect_before_blob_complete() { let blob = vec![0; FILE_SIZE_LARGE_ENOUGH_TO_TRIGGER_HYPER_BATCHING]; let pkg = PackageBuilder::new("second_resolve_succeeds_disconnect_before_blob_complete") .add_resource_at("blobbity/blob", blob.as_slice()) .build() .await .unwrap(); let path_to_override = format!( "/blobs/{}", MerkleTree::from_reader(blob.as_slice()).expect("merkle slice").root() ); verify_resolve_fails_then_succeeds( pkg, responder::ForPath::new(path_to_override, responder::OneByteShortThenDisconnect), fidl_fuchsia_pkg::ResolveError::UnavailableBlob, ) .await } #[fuchsia::test] async fn second_resolve_succeeds_when_far_corrupted() { let pkg = make_pkg_with_extra_blobs("second_resolve_succeeds_when_far_corrupted", 1).await; let path_to_override = format!("/blobs/{}", pkg.meta_far_merkle_root()); verify_resolve_fails_then_succeeds( pkg, responder::ForPath::new(path_to_override, responder::OneByteFlipped), fidl_fuchsia_pkg::ResolveError::Io, ) .await } #[fuchsia::test] async fn second_resolve_succeeds_when_blob_corrupted() { let pkg = make_pkg_with_extra_blobs("second_resolve_succeeds_when_blob_corrupted", 1).await; let blob = extra_blob_contents("second_resolve_succeeds_when_blob_corrupted", 0); let path_to_override = format!( "/blobs/{}", MerkleTree::from_reader(blob.as_slice()).expect("merkle slice").root() ); verify_resolve_fails_then_succeeds( pkg, responder::ForPath::new(path_to_override, responder::OneByteFlipped), fidl_fuchsia_pkg::ResolveError::Io, ) .await } #[fuchsia::test] async fn second_resolve_succeeds_when_tuf_metadata_update_fails() { // pkg-resolver uses tuf::client::Client::with_trusted_root_keys to create its TUF client. // That method will only retrieve the specified version of the root metadata (1 for these // tests), with the rest of the metadata being retrieved during the first update. This means // that hanging all attempts for 2.snapshot.json metadata will allow tuf client creation to // succeed but still fail tuf client update. // We want to specifically verify recovery from update failure because if creation fails, // pkg-resolver will not make a Repository object, so the next resolve attempt would try again // from scratch, but if update fails, pkg-resolver will keep its Repository object which // contains a rust-tuf client in a possibly invalid state, and we want to verify that // pkg-resolver calls update on the client again and that this update recovers the client. let pkg = PackageBuilder::new("second_resolve_succeeds_when_tuf_metadata_update_fails") .build() .await .unwrap(); verify_resolve_fails_then_succeeds( pkg, responder::ForPath::new("/2.snapshot.json", responder::OneByteShortThenDisconnect), fidl_fuchsia_pkg::ResolveError::Internal, ) .await } // The hyper clients used by the pkg-resolver to download blobs and TUF metadata sometimes end up // waiting on operations on their TCP connections that will never return (e.g. because of an // upstream network partition). To detect this, the pkg-resolver wraps the hyper client response // futures with timeout futures. To recover from this, the pkg-resolver drops the hyper client // response futures when the timeouts are hit. This recovery plan requires that dropping the hyper // response future causes hyper to close the underlying TCP connection and create a new one the // next time hyper is asked to perform a network operation. This assumption holds for http1, but // not for http2. // // This test verifies the "dropping a hyper response future prevents the underlying connection // from being reused" requirement. It does so by verifying that if a resolve fails due to a blob // download timeout and the resolve is retried, the retry will cause pkg-resolver to make an // additional TCP connection to the blob mirror. // // This test uses https because the test exists to catch changes to the Fuchsia hyper client // that would cause pkg-resolver to use http2 before the Fuchsia hyper client is able to recover // from bad TCP connections when using http2. The pkg-resolver does not explicitly enable http2 // on its hyper clients, so the way this change would sneak in is if the hyper client is changed // to use ALPN to prefer http2. The blob server used in this test has ALPN configured to prefer // http2. #[fuchsia::test] async fn blob_timeout_causes_new_tcp_connection() { let pkg = PackageBuilder::new("blob_timeout_causes_new_tcp_connection").build().await.unwrap(); let repo = Arc::new( RepositoryBuilder::from_template_dir(EMPTY_REPO_PATH) .add_package(&pkg) .build() .await .unwrap(), ); let env = TestEnvBuilder::new() .resolver_variant(ResolverVariant::ZeroBlobNetworkBodyTimeout) .build() .await; let server = repo .server() .response_overrider(responder::ForPathPrefix::new( "/blobs/", responder::Once::new(responder::HangBody), )) .use_https_domain(Domain::TestFuchsiaCom) .bind_to_addr(Ipv4Addr::LOCALHOST) .start() .expect("Starting server succeeds"); env.register_repo(&server).await; assert_eq!(server.connection_attempts(), 0); // The resolve request may not succeed despite the retry: the zero timeout on the blob body // future can fire prior to the body being downloaded on the retry. However, we expect to // observe three connections: one for the TUF client, one for the initial resolve that timed // out, and one for the retried resolve. match env.resolve_package("fuchsia-pkg://test/blob_timeout_causes_new_tcp_connection").await { Ok(_) | Err(fidl_fuchsia_pkg::ResolveError::UnavailableBlob) => {} Err(e) => { panic!("unexpected error: {:?}", e); } }; assert_eq!(server.connection_attempts(), 3); env.stop().await; }
{ let blob = vec![0; FILE_SIZE_LARGE_ENOUGH_TO_TRIGGER_HYPER_BATCHING]; let pkg = PackageBuilder::new("second_resolve_succeeds_when_blob_errors_mid_download") .add_resource_at("blobbity/blob", blob.as_slice()) .build() .await .unwrap(); let path_to_override = format!( "/blobs/{}", MerkleTree::from_reader(blob.as_slice()).expect("merkle slice").root() ); verify_resolve_fails_then_succeeds( pkg, responder::ForPath::new(path_to_override, responder::OneByteShortThenError), fidl_fuchsia_pkg::ResolveError::UnavailableBlob, ) .await }
identifier_body
resolve_recovers_from_http_errors.rs
// Copyright 2019 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. /// This module tests the property that pkg_resolver does not enter a bad /// state (successfully handles retries) when the TUF server errors while /// servicing fuchsia.pkg.PackageResolver.Resolve FIDL requests. use { assert_matches::assert_matches, fuchsia_merkle::MerkleTree, fuchsia_pkg_testing::{ serve::{responder, Domain, HttpResponder}, Package, PackageBuilder, RepositoryBuilder, }, lib::{ extra_blob_contents, make_pkg_with_extra_blobs, ResolverVariant, TestEnvBuilder, EMPTY_REPO_PATH, FILE_SIZE_LARGE_ENOUGH_TO_TRIGGER_HYPER_BATCHING, }, std::{net::Ipv4Addr, sync::Arc}, }; async fn verify_resolve_fails_then_succeeds<H: HttpResponder>( pkg: Package, responder: H, failure_error: fidl_fuchsia_pkg::ResolveError, ) { let env = TestEnvBuilder::new().build().await; let repo = Arc::new( RepositoryBuilder::from_template_dir(EMPTY_REPO_PATH) .add_package(&pkg) .build() .await .unwrap(), ); let pkg_url = format!("fuchsia-pkg://test/{}", pkg.name()); let should_fail = responder::AtomicToggle::new(true); let served_repository = repo .server() .response_overrider(responder::Toggleable::new(&should_fail, responder)) .response_overrider(responder::Filter::new( responder::is_range_request, responder::StaticResponseCode::server_error(), )) .start() .unwrap(); env.register_repo(&served_repository).await; // First resolve fails with the expected error. assert_matches!(env.resolve_package(&pkg_url).await, Err(error) if error == failure_error); // Disabling the custom responder allows the subsequent resolves to succeed. should_fail.unset(); let (package_dir, _resolved_context) = env.resolve_package(&pkg_url).await.expect("package to resolve"); pkg.verify_contents(&package_dir).await.expect("correct package contents"); env.stop().await; } #[fuchsia::test] async fn second_resolve_succeeds_when_far_404() { let pkg = make_pkg_with_extra_blobs("second_resolve_succeeds_when_far_404", 1).await; let path_to_override = format!("/blobs/{}", pkg.meta_far_merkle_root()); verify_resolve_fails_then_succeeds( pkg, responder::ForPath::new(path_to_override, responder::StaticResponseCode::not_found()), fidl_fuchsia_pkg::ResolveError::UnavailableBlob, ) .await } #[fuchsia::test] async fn second_resolve_succeeds_when_blob_404() { let pkg = make_pkg_with_extra_blobs("second_resolve_succeeds_when_blob_404", 1).await; let path_to_override = format!( "/blobs/{}", MerkleTree::from_reader( extra_blob_contents("second_resolve_succeeds_when_blob_404", 0).as_slice() ) .expect("merkle slice") .root() ); verify_resolve_fails_then_succeeds( pkg, responder::ForPath::new(path_to_override, responder::StaticResponseCode::not_found()), fidl_fuchsia_pkg::ResolveError::UnavailableBlob, ) .await } #[fuchsia::test] async fn second_resolve_succeeds_when_far_errors_mid_download() { let pkg = PackageBuilder::new("second_resolve_succeeds_when_far_errors_mid_download") .add_resource_at( "meta/large_file", vec![0; FILE_SIZE_LARGE_ENOUGH_TO_TRIGGER_HYPER_BATCHING].as_slice(), ) .build() .await .unwrap(); let path_to_override = format!("/blobs/{}", pkg.meta_far_merkle_root()); verify_resolve_fails_then_succeeds( pkg, responder::ForPath::new(path_to_override, responder::OneByteShortThenError), fidl_fuchsia_pkg::ResolveError::UnavailableBlob, ) .await } #[fuchsia::test] async fn second_resolve_succeeds_when_blob_errors_mid_download() { let blob = vec![0; FILE_SIZE_LARGE_ENOUGH_TO_TRIGGER_HYPER_BATCHING]; let pkg = PackageBuilder::new("second_resolve_succeeds_when_blob_errors_mid_download") .add_resource_at("blobbity/blob", blob.as_slice()) .build() .await .unwrap(); let path_to_override = format!( "/blobs/{}", MerkleTree::from_reader(blob.as_slice()).expect("merkle slice").root() ); verify_resolve_fails_then_succeeds( pkg, responder::ForPath::new(path_to_override, responder::OneByteShortThenError), fidl_fuchsia_pkg::ResolveError::UnavailableBlob, ) .await } #[fuchsia::test] async fn second_resolve_succeeds_disconnect_before_far_complete() { let pkg = PackageBuilder::new("second_resolve_succeeds_disconnect_before_far_complete") .add_resource_at( "meta/large_file", vec![0; FILE_SIZE_LARGE_ENOUGH_TO_TRIGGER_HYPER_BATCHING].as_slice(), ) .build() .await .unwrap(); let path_to_override = format!("/blobs/{}", pkg.meta_far_merkle_root()); verify_resolve_fails_then_succeeds( pkg, responder::ForPath::new(path_to_override, responder::OneByteShortThenDisconnect), fidl_fuchsia_pkg::ResolveError::UnavailableBlob, ) .await } #[fuchsia::test] async fn second_resolve_succeeds_disconnect_before_blob_complete() { let blob = vec![0; FILE_SIZE_LARGE_ENOUGH_TO_TRIGGER_HYPER_BATCHING]; let pkg = PackageBuilder::new("second_resolve_succeeds_disconnect_before_blob_complete") .add_resource_at("blobbity/blob", blob.as_slice()) .build() .await .unwrap(); let path_to_override = format!( "/blobs/{}", MerkleTree::from_reader(blob.as_slice()).expect("merkle slice").root() ); verify_resolve_fails_then_succeeds( pkg, responder::ForPath::new(path_to_override, responder::OneByteShortThenDisconnect), fidl_fuchsia_pkg::ResolveError::UnavailableBlob, ) .await } #[fuchsia::test] async fn second_resolve_succeeds_when_far_corrupted() { let pkg = make_pkg_with_extra_blobs("second_resolve_succeeds_when_far_corrupted", 1).await; let path_to_override = format!("/blobs/{}", pkg.meta_far_merkle_root()); verify_resolve_fails_then_succeeds( pkg, responder::ForPath::new(path_to_override, responder::OneByteFlipped), fidl_fuchsia_pkg::ResolveError::Io, ) .await } #[fuchsia::test] async fn second_resolve_succeeds_when_blob_corrupted() { let pkg = make_pkg_with_extra_blobs("second_resolve_succeeds_when_blob_corrupted", 1).await; let blob = extra_blob_contents("second_resolve_succeeds_when_blob_corrupted", 0); let path_to_override = format!( "/blobs/{}", MerkleTree::from_reader(blob.as_slice()).expect("merkle slice").root() ); verify_resolve_fails_then_succeeds( pkg, responder::ForPath::new(path_to_override, responder::OneByteFlipped), fidl_fuchsia_pkg::ResolveError::Io, ) .await } #[fuchsia::test] async fn second_resolve_succeeds_when_tuf_metadata_update_fails() { // pkg-resolver uses tuf::client::Client::with_trusted_root_keys to create its TUF client. // That method will only retrieve the specified version of the root metadata (1 for these // tests), with the rest of the metadata being retrieved during the first update. This means // that hanging all attempts for 2.snapshot.json metadata will allow tuf client creation to // succeed but still fail tuf client update. // We want to specifically verify recovery from update failure because if creation fails, // pkg-resolver will not make a Repository object, so the next resolve attempt would try again // from scratch, but if update fails, pkg-resolver will keep its Repository object which // contains a rust-tuf client in a possibly invalid state, and we want to verify that // pkg-resolver calls update on the client again and that this update recovers the client.
.await .unwrap(); verify_resolve_fails_then_succeeds( pkg, responder::ForPath::new("/2.snapshot.json", responder::OneByteShortThenDisconnect), fidl_fuchsia_pkg::ResolveError::Internal, ) .await } // The hyper clients used by the pkg-resolver to download blobs and TUF metadata sometimes end up // waiting on operations on their TCP connections that will never return (e.g. because of an // upstream network partition). To detect this, the pkg-resolver wraps the hyper client response // futures with timeout futures. To recover from this, the pkg-resolver drops the hyper client // response futures when the timeouts are hit. This recovery plan requires that dropping the hyper // response future causes hyper to close the underlying TCP connection and create a new one the // next time hyper is asked to perform a network operation. This assumption holds for http1, but // not for http2. // // This test verifies the "dropping a hyper response future prevents the underlying connection // from being reused" requirement. It does so by verifying that if a resolve fails due to a blob // download timeout and the resolve is retried, the retry will cause pkg-resolver to make an // additional TCP connection to the blob mirror. // // This test uses https because the test exists to catch changes to the Fuchsia hyper client // that would cause pkg-resolver to use http2 before the Fuchsia hyper client is able to recover // from bad TCP connections when using http2. The pkg-resolver does not explicitly enable http2 // on its hyper clients, so the way this change would sneak in is if the hyper client is changed // to use ALPN to prefer http2. The blob server used in this test has ALPN configured to prefer // http2. #[fuchsia::test] async fn blob_timeout_causes_new_tcp_connection() { let pkg = PackageBuilder::new("blob_timeout_causes_new_tcp_connection").build().await.unwrap(); let repo = Arc::new( RepositoryBuilder::from_template_dir(EMPTY_REPO_PATH) .add_package(&pkg) .build() .await .unwrap(), ); let env = TestEnvBuilder::new() .resolver_variant(ResolverVariant::ZeroBlobNetworkBodyTimeout) .build() .await; let server = repo .server() .response_overrider(responder::ForPathPrefix::new( "/blobs/", responder::Once::new(responder::HangBody), )) .use_https_domain(Domain::TestFuchsiaCom) .bind_to_addr(Ipv4Addr::LOCALHOST) .start() .expect("Starting server succeeds"); env.register_repo(&server).await; assert_eq!(server.connection_attempts(), 0); // The resolve request may not succeed despite the retry: the zero timeout on the blob body // future can fire prior to the body being downloaded on the retry. However, we expect to // observe three connections: one for the TUF client, one for the initial resolve that timed // out, and one for the retried resolve. match env.resolve_package("fuchsia-pkg://test/blob_timeout_causes_new_tcp_connection").await { Ok(_) | Err(fidl_fuchsia_pkg::ResolveError::UnavailableBlob) => {} Err(e) => { panic!("unexpected error: {:?}", e); } }; assert_eq!(server.connection_attempts(), 3); env.stop().await; }
let pkg = PackageBuilder::new("second_resolve_succeeds_when_tuf_metadata_update_fails") .build()
random_line_split
resolve_recovers_from_http_errors.rs
// Copyright 2019 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. /// This module tests the property that pkg_resolver does not enter a bad /// state (successfully handles retries) when the TUF server errors while /// servicing fuchsia.pkg.PackageResolver.Resolve FIDL requests. use { assert_matches::assert_matches, fuchsia_merkle::MerkleTree, fuchsia_pkg_testing::{ serve::{responder, Domain, HttpResponder}, Package, PackageBuilder, RepositoryBuilder, }, lib::{ extra_blob_contents, make_pkg_with_extra_blobs, ResolverVariant, TestEnvBuilder, EMPTY_REPO_PATH, FILE_SIZE_LARGE_ENOUGH_TO_TRIGGER_HYPER_BATCHING, }, std::{net::Ipv4Addr, sync::Arc}, }; async fn verify_resolve_fails_then_succeeds<H: HttpResponder>( pkg: Package, responder: H, failure_error: fidl_fuchsia_pkg::ResolveError, ) { let env = TestEnvBuilder::new().build().await; let repo = Arc::new( RepositoryBuilder::from_template_dir(EMPTY_REPO_PATH) .add_package(&pkg) .build() .await .unwrap(), ); let pkg_url = format!("fuchsia-pkg://test/{}", pkg.name()); let should_fail = responder::AtomicToggle::new(true); let served_repository = repo .server() .response_overrider(responder::Toggleable::new(&should_fail, responder)) .response_overrider(responder::Filter::new( responder::is_range_request, responder::StaticResponseCode::server_error(), )) .start() .unwrap(); env.register_repo(&served_repository).await; // First resolve fails with the expected error. assert_matches!(env.resolve_package(&pkg_url).await, Err(error) if error == failure_error); // Disabling the custom responder allows the subsequent resolves to succeed. should_fail.unset(); let (package_dir, _resolved_context) = env.resolve_package(&pkg_url).await.expect("package to resolve"); pkg.verify_contents(&package_dir).await.expect("correct package contents"); env.stop().await; } #[fuchsia::test] async fn second_resolve_succeeds_when_far_404() { let pkg = make_pkg_with_extra_blobs("second_resolve_succeeds_when_far_404", 1).await; let path_to_override = format!("/blobs/{}", pkg.meta_far_merkle_root()); verify_resolve_fails_then_succeeds( pkg, responder::ForPath::new(path_to_override, responder::StaticResponseCode::not_found()), fidl_fuchsia_pkg::ResolveError::UnavailableBlob, ) .await } #[fuchsia::test] async fn
() { let pkg = make_pkg_with_extra_blobs("second_resolve_succeeds_when_blob_404", 1).await; let path_to_override = format!( "/blobs/{}", MerkleTree::from_reader( extra_blob_contents("second_resolve_succeeds_when_blob_404", 0).as_slice() ) .expect("merkle slice") .root() ); verify_resolve_fails_then_succeeds( pkg, responder::ForPath::new(path_to_override, responder::StaticResponseCode::not_found()), fidl_fuchsia_pkg::ResolveError::UnavailableBlob, ) .await } #[fuchsia::test] async fn second_resolve_succeeds_when_far_errors_mid_download() { let pkg = PackageBuilder::new("second_resolve_succeeds_when_far_errors_mid_download") .add_resource_at( "meta/large_file", vec![0; FILE_SIZE_LARGE_ENOUGH_TO_TRIGGER_HYPER_BATCHING].as_slice(), ) .build() .await .unwrap(); let path_to_override = format!("/blobs/{}", pkg.meta_far_merkle_root()); verify_resolve_fails_then_succeeds( pkg, responder::ForPath::new(path_to_override, responder::OneByteShortThenError), fidl_fuchsia_pkg::ResolveError::UnavailableBlob, ) .await } #[fuchsia::test] async fn second_resolve_succeeds_when_blob_errors_mid_download() { let blob = vec![0; FILE_SIZE_LARGE_ENOUGH_TO_TRIGGER_HYPER_BATCHING]; let pkg = PackageBuilder::new("second_resolve_succeeds_when_blob_errors_mid_download") .add_resource_at("blobbity/blob", blob.as_slice()) .build() .await .unwrap(); let path_to_override = format!( "/blobs/{}", MerkleTree::from_reader(blob.as_slice()).expect("merkle slice").root() ); verify_resolve_fails_then_succeeds( pkg, responder::ForPath::new(path_to_override, responder::OneByteShortThenError), fidl_fuchsia_pkg::ResolveError::UnavailableBlob, ) .await } #[fuchsia::test] async fn second_resolve_succeeds_disconnect_before_far_complete() { let pkg = PackageBuilder::new("second_resolve_succeeds_disconnect_before_far_complete") .add_resource_at( "meta/large_file", vec![0; FILE_SIZE_LARGE_ENOUGH_TO_TRIGGER_HYPER_BATCHING].as_slice(), ) .build() .await .unwrap(); let path_to_override = format!("/blobs/{}", pkg.meta_far_merkle_root()); verify_resolve_fails_then_succeeds( pkg, responder::ForPath::new(path_to_override, responder::OneByteShortThenDisconnect), fidl_fuchsia_pkg::ResolveError::UnavailableBlob, ) .await } #[fuchsia::test] async fn second_resolve_succeeds_disconnect_before_blob_complete() { let blob = vec![0; FILE_SIZE_LARGE_ENOUGH_TO_TRIGGER_HYPER_BATCHING]; let pkg = PackageBuilder::new("second_resolve_succeeds_disconnect_before_blob_complete") .add_resource_at("blobbity/blob", blob.as_slice()) .build() .await .unwrap(); let path_to_override = format!( "/blobs/{}", MerkleTree::from_reader(blob.as_slice()).expect("merkle slice").root() ); verify_resolve_fails_then_succeeds( pkg, responder::ForPath::new(path_to_override, responder::OneByteShortThenDisconnect), fidl_fuchsia_pkg::ResolveError::UnavailableBlob, ) .await } #[fuchsia::test] async fn second_resolve_succeeds_when_far_corrupted() { let pkg = make_pkg_with_extra_blobs("second_resolve_succeeds_when_far_corrupted", 1).await; let path_to_override = format!("/blobs/{}", pkg.meta_far_merkle_root()); verify_resolve_fails_then_succeeds( pkg, responder::ForPath::new(path_to_override, responder::OneByteFlipped), fidl_fuchsia_pkg::ResolveError::Io, ) .await } #[fuchsia::test] async fn second_resolve_succeeds_when_blob_corrupted() { let pkg = make_pkg_with_extra_blobs("second_resolve_succeeds_when_blob_corrupted", 1).await; let blob = extra_blob_contents("second_resolve_succeeds_when_blob_corrupted", 0); let path_to_override = format!( "/blobs/{}", MerkleTree::from_reader(blob.as_slice()).expect("merkle slice").root() ); verify_resolve_fails_then_succeeds( pkg, responder::ForPath::new(path_to_override, responder::OneByteFlipped), fidl_fuchsia_pkg::ResolveError::Io, ) .await } #[fuchsia::test] async fn second_resolve_succeeds_when_tuf_metadata_update_fails() { // pkg-resolver uses tuf::client::Client::with_trusted_root_keys to create its TUF client. // That method will only retrieve the specified version of the root metadata (1 for these // tests), with the rest of the metadata being retrieved during the first update. This means // that hanging all attempts for 2.snapshot.json metadata will allow tuf client creation to // succeed but still fail tuf client update. // We want to specifically verify recovery from update failure because if creation fails, // pkg-resolver will not make a Repository object, so the next resolve attempt would try again // from scratch, but if update fails, pkg-resolver will keep its Repository object which // contains a rust-tuf client in a possibly invalid state, and we want to verify that // pkg-resolver calls update on the client again and that this update recovers the client. let pkg = PackageBuilder::new("second_resolve_succeeds_when_tuf_metadata_update_fails") .build() .await .unwrap(); verify_resolve_fails_then_succeeds( pkg, responder::ForPath::new("/2.snapshot.json", responder::OneByteShortThenDisconnect), fidl_fuchsia_pkg::ResolveError::Internal, ) .await } // The hyper clients used by the pkg-resolver to download blobs and TUF metadata sometimes end up // waiting on operations on their TCP connections that will never return (e.g. because of an // upstream network partition). To detect this, the pkg-resolver wraps the hyper client response // futures with timeout futures. To recover from this, the pkg-resolver drops the hyper client // response futures when the timeouts are hit. This recovery plan requires that dropping the hyper // response future causes hyper to close the underlying TCP connection and create a new one the // next time hyper is asked to perform a network operation. This assumption holds for http1, but // not for http2. // // This test verifies the "dropping a hyper response future prevents the underlying connection // from being reused" requirement. It does so by verifying that if a resolve fails due to a blob // download timeout and the resolve is retried, the retry will cause pkg-resolver to make an // additional TCP connection to the blob mirror. // // This test uses https because the test exists to catch changes to the Fuchsia hyper client // that would cause pkg-resolver to use http2 before the Fuchsia hyper client is able to recover // from bad TCP connections when using http2. The pkg-resolver does not explicitly enable http2 // on its hyper clients, so the way this change would sneak in is if the hyper client is changed // to use ALPN to prefer http2. The blob server used in this test has ALPN configured to prefer // http2. #[fuchsia::test] async fn blob_timeout_causes_new_tcp_connection() { let pkg = PackageBuilder::new("blob_timeout_causes_new_tcp_connection").build().await.unwrap(); let repo = Arc::new( RepositoryBuilder::from_template_dir(EMPTY_REPO_PATH) .add_package(&pkg) .build() .await .unwrap(), ); let env = TestEnvBuilder::new() .resolver_variant(ResolverVariant::ZeroBlobNetworkBodyTimeout) .build() .await; let server = repo .server() .response_overrider(responder::ForPathPrefix::new( "/blobs/", responder::Once::new(responder::HangBody), )) .use_https_domain(Domain::TestFuchsiaCom) .bind_to_addr(Ipv4Addr::LOCALHOST) .start() .expect("Starting server succeeds"); env.register_repo(&server).await; assert_eq!(server.connection_attempts(), 0); // The resolve request may not succeed despite the retry: the zero timeout on the blob body // future can fire prior to the body being downloaded on the retry. However, we expect to // observe three connections: one for the TUF client, one for the initial resolve that timed // out, and one for the retried resolve. match env.resolve_package("fuchsia-pkg://test/blob_timeout_causes_new_tcp_connection").await { Ok(_) | Err(fidl_fuchsia_pkg::ResolveError::UnavailableBlob) => {} Err(e) => { panic!("unexpected error: {:?}", e); } }; assert_eq!(server.connection_attempts(), 3); env.stop().await; }
second_resolve_succeeds_when_blob_404
identifier_name
value_textbox.rs
// Copyright 2021 The Druid Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! A textbox that that parses and validates data. use tracing::instrument; use super::TextBox; use crate::text::{Formatter, Selection, TextComponent, ValidationError}; use crate::widget::prelude::*; use crate::{Data, Selector}; const BEGIN_EDITING: Selector = Selector::new("druid.builtin.textbox-begin-editing"); const COMPLETE_EDITING: Selector = Selector::new("druid.builtin.textbox-complete-editing"); /// A `TextBox` that uses a [`Formatter`] to handle formatting and validation /// of its data. /// /// There are a number of ways to customize the behaviour of the text box /// in relation to the provided [`Formatter`]: /// /// - [`ValueTextBox::validate_while_editing`] takes a flag that determines whether /// or not the textbox can display text that is not valid, while editing is /// in progress. (Text will still be validated when the user attempts to complete /// editing.) /// /// - [`ValueTextBox::update_data_while_editing`] takes a flag that determines /// whether the output value is updated during editing, when possible. /// /// - [`ValueTextBox::delegate`] allows you to provide some implementation of /// the [`ValidationDelegate`] trait, which receives a callback during editing; /// this can be used to report errors further back up the tree. pub struct ValueTextBox<T> { child: TextBox<String>, formatter: Box<dyn Formatter<T>>, callback: Option<Box<dyn ValidationDelegate>>, is_editing: bool, validate_while_editing: bool, update_data_while_editing: bool, /// the last data that this textbox saw or created. /// This is used to determine when a change to the data is originating /// elsewhere in the application, which we need to special-case last_known_data: Option<T>, force_selection: Option<Selection>, old_buffer: String, buffer: String, } /// A type that can be registered to receive callbacks as the state of a /// [`ValueTextBox`] changes. pub trait ValidationDelegate { /// Called with a [`TextBoxEvent`] whenever the validation state of a /// [`ValueTextBox`] changes. fn event(&mut self, ctx: &mut EventCtx, event: TextBoxEvent, current_text: &str); } /// Events sent to a [`ValidationDelegate`]. pub enum TextBoxEvent { /// The textbox began editing. Began, /// An edit occured which was considered valid by the [`Formatter`]. Changed, /// An edit occured which was rejected by the [`Formatter`]. PartiallyInvalid(ValidationError), /// The user attempted to finish editing, but the input was not valid. Invalid(ValidationError), /// The user finished editing, with valid input. Complete, /// Editing was cancelled. Cancel, } impl TextBox<String> { /// Turn this `TextBox` into a [`ValueTextBox`], using the [`Formatter`] to /// manage the value. /// /// For simple value formatting, you can use the [`ParseFormatter`]. /// /// [`ValueTextBox`]: ValueTextBox /// [`Formatter`]: crate::text::format::Formatter /// [`ParseFormatter`]: crate::text::format::ParseFormatter pub fn with_formatter<T: Data>( self, formatter: impl Formatter<T> + 'static, ) -> ValueTextBox<T> { ValueTextBox::new(self, formatter) } } impl<T: Data> ValueTextBox<T> { /// Create a new `ValueTextBox` from a normal [`TextBox`] and a [`Formatter`]. /// /// [`TextBox`]: crate::widget::TextBox /// [`Formatter`]: crate::text::format::Formatter pub fn new(mut child: TextBox<String>, formatter: impl Formatter<T> + 'static) -> Self { child.text_mut().borrow_mut().send_notification_on_return = true; child.text_mut().borrow_mut().send_notification_on_cancel = true; child.handles_tab_notifications = false; ValueTextBox { child, formatter: Box::new(formatter), callback: None, is_editing: false,
last_known_data: None, validate_while_editing: true, update_data_while_editing: false, old_buffer: String::new(), buffer: String::new(), force_selection: None, } } /// Builder-style method to set an optional [`ValidationDelegate`] on this /// textbox. pub fn delegate(mut self, delegate: impl ValidationDelegate + 'static) -> Self { self.callback = Some(Box::new(delegate)); self } /// Builder-style method to set whether or not this text box validates /// its contents during editing. /// /// If `true` (the default) edits that fail validation /// ([`Formatter::validate_partial_input`]) will be rejected. If `false`, /// those edits will be accepted, and the text box will be updated. pub fn validate_while_editing(mut self, validate: bool) -> Self { self.validate_while_editing = validate; self } /// Builder-style method to set whether or not this text box updates the /// incoming data during editing. /// /// If `false` (the default) the data is only updated when editing completes. pub fn update_data_while_editing(mut self, flag: bool) -> Self { self.update_data_while_editing = flag; self } fn complete(&mut self, ctx: &mut EventCtx, data: &mut T) -> bool { match self.formatter.value(&self.buffer) { Ok(new_data) => { *data = new_data; self.buffer = self.formatter.format(data); self.is_editing = false; ctx.request_update(); self.send_event(ctx, TextBoxEvent::Complete); true } Err(err) => { if self.child.text().can_write() { if let Some(inval) = self .child .text_mut() .borrow_mut() .set_selection(Selection::new(0, self.buffer.len())) { ctx.invalidate_text_input(inval); } } self.send_event(ctx, TextBoxEvent::Invalid(err)); // our content isn't valid // ideally we would flash the background or something false } } } fn cancel(&mut self, ctx: &mut EventCtx, data: &T) { self.is_editing = false; self.buffer = self.formatter.format(data); ctx.request_update(); ctx.resign_focus(); self.send_event(ctx, TextBoxEvent::Cancel); } fn begin(&mut self, ctx: &mut EventCtx, data: &T) { self.is_editing = true; self.buffer = self.formatter.format_for_editing(data); self.last_known_data = Some(data.clone()); ctx.request_update(); self.send_event(ctx, TextBoxEvent::Began); } fn send_event(&mut self, ctx: &mut EventCtx, event: TextBoxEvent) { if let Some(delegate) = self.callback.as_mut() { delegate.event(ctx, event, &self.buffer) } } } impl<T: Data + std::fmt::Debug> Widget<T> for ValueTextBox<T> { #[instrument( name = "ValueTextBox", level = "trace", skip(self, ctx, event, data, env) )] fn event(&mut self, ctx: &mut EventCtx, event: &Event, data: &mut T, env: &Env) { if matches!(event, Event::Command(cmd) if cmd.is(BEGIN_EDITING)) { return self.begin(ctx, data); } if self.is_editing { // if we reject an edit we want to reset the selection let pre_sel = if self.child.text().can_read() { Some(self.child.text().borrow().selection()) } else { None }; match event { // this is caused by an external focus change, like the mouse being clicked // elsewhere. Event::Command(cmd) if cmd.is(COMPLETE_EDITING) => { if !self.complete(ctx, data) { self.cancel(ctx, data); } return; } Event::Notification(cmd) if cmd.is(TextComponent::TAB) => { ctx.set_handled(); ctx.request_paint(); if self.complete(ctx, data) { ctx.focus_next(); } return; } Event::Notification(cmd) if cmd.is(TextComponent::BACKTAB) => { ctx.request_paint(); ctx.set_handled(); if self.complete(ctx, data) { ctx.focus_prev(); } return; } Event::Notification(cmd) if cmd.is(TextComponent::RETURN) => { ctx.set_handled(); if self.complete(ctx, data) { ctx.resign_focus(); } return; } Event::Notification(cmd) if cmd.is(TextComponent::CANCEL) => { ctx.set_handled(); self.cancel(ctx, data); return; } event => { self.child.event(ctx, event, &mut self.buffer, env); } } // if an edit occured, validate it with the formatter // notifications can arrive before update, so we always ignore them if !matches!(event, Event::Notification(_)) && self.buffer != self.old_buffer { let mut validation = self .formatter .validate_partial_input(&self.buffer, &self.child.text().borrow().selection()); if self.validate_while_editing { let new_buf = match (validation.text_change.take(), validation.is_err()) { (Some(new_text), _) => { // be helpful: if the formatter is misbehaved, log it. if self .formatter .validate_partial_input(&new_text, &Selection::caret(0)) .is_err() { tracing::warn!( "formatter replacement text does not validate: '{}'", &new_text ); None } else { Some(new_text) } } (None, true) => Some(self.old_buffer.clone()), _ => None, }; let new_sel = match (validation.selection_change.take(), validation.is_err()) { (Some(new_sel), _) => Some(new_sel), (None, true) if pre_sel.is_some() => pre_sel, _ => None, }; if let Some(new_buf) = new_buf { self.buffer = new_buf; } self.force_selection = new_sel; if self.update_data_while_editing && !validation.is_err() { if let Ok(new_data) = self.formatter.value(&self.buffer) { *data = new_data; self.last_known_data = Some(data.clone()); } } } match validation.error() { Some(err) => { self.send_event(ctx, TextBoxEvent::PartiallyInvalid(err.to_owned())) } None => self.send_event(ctx, TextBoxEvent::Changed), }; ctx.request_update(); } // if we *aren't* editing: } else { if let Event::MouseDown(_) = event { self.begin(ctx, data); } self.child.event(ctx, event, &mut self.buffer, env); } } #[instrument( name = "ValueTextBox", level = "trace", skip(self, ctx, event, data, env) )] fn lifecycle(&mut self, ctx: &mut LifeCycleCtx, event: &LifeCycle, data: &T, env: &Env) { match event { LifeCycle::WidgetAdded => { self.buffer = self.formatter.format(data); self.old_buffer = self.buffer.clone(); } LifeCycle::FocusChanged(true) if !self.is_editing => { ctx.submit_command(BEGIN_EDITING.to(ctx.widget_id())); } LifeCycle::FocusChanged(false) => { ctx.submit_command(COMPLETE_EDITING.to(ctx.widget_id())); } _ => (), } self.child.lifecycle(ctx, event, &self.buffer, env); } #[instrument( name = "ValueTextBox", level = "trace", skip(self, ctx, old, data, env) )] fn update(&mut self, ctx: &mut UpdateCtx, old: &T, data: &T, env: &Env) { if let Some(sel) = self.force_selection.take() { if self.child.text().can_write() { if let Some(change) = self.child.text_mut().borrow_mut().set_selection(sel) { ctx.invalidate_text_input(change); } } } let changed_by_us = self .last_known_data .as_ref() .map(|d| d.same(data)) .unwrap_or(false); if self.is_editing { if changed_by_us { self.child.update(ctx, &self.old_buffer, &self.buffer, env); self.old_buffer = self.buffer.clone(); } else { // textbox is not well equipped to deal with the fact that, in // druid, data can change anywhere in the tree. If we are actively // editing, and new data arrives, we ignore the new data and keep // editing; the alternative would be to cancel editing, which // could also make sense. tracing::warn!( "ValueTextBox data changed externally, idk: '{}'", self.formatter.format(data) ); } } else { if !old.same(data) { // we aren't editing and data changed let new_text = self.formatter.format(data); // it's possible for different data inputs to produce the same formatted // output, in which case we would overwrite our actual previous data if !new_text.same(&self.buffer) { self.old_buffer = std::mem::replace(&mut self.buffer, new_text); } } if !self.old_buffer.same(&self.buffer) { // child widget handles calling request_layout, as needed self.child.update(ctx, &self.old_buffer, &self.buffer, env); self.old_buffer = self.buffer.clone(); } else if ctx.env_changed() { self.child.update(ctx, &self.buffer, &self.buffer, env); } } } #[instrument( name = "ValueTextBox", level = "trace", skip(self, ctx, bc, _data, env) )] fn layout(&mut self, ctx: &mut LayoutCtx, bc: &BoxConstraints, _data: &T, env: &Env) -> Size { self.child.layout(ctx, bc, &self.buffer, env) } #[instrument(name = "ValueTextBox", level = "trace", skip(self, ctx, _data, env))] fn paint(&mut self, ctx: &mut PaintCtx, _data: &T, env: &Env) { self.child.paint(ctx, &self.buffer, env); } }
random_line_split
value_textbox.rs
// Copyright 2021 The Druid Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! A textbox that that parses and validates data. use tracing::instrument; use super::TextBox; use crate::text::{Formatter, Selection, TextComponent, ValidationError}; use crate::widget::prelude::*; use crate::{Data, Selector}; const BEGIN_EDITING: Selector = Selector::new("druid.builtin.textbox-begin-editing"); const COMPLETE_EDITING: Selector = Selector::new("druid.builtin.textbox-complete-editing"); /// A `TextBox` that uses a [`Formatter`] to handle formatting and validation /// of its data. /// /// There are a number of ways to customize the behaviour of the text box /// in relation to the provided [`Formatter`]: /// /// - [`ValueTextBox::validate_while_editing`] takes a flag that determines whether /// or not the textbox can display text that is not valid, while editing is /// in progress. (Text will still be validated when the user attempts to complete /// editing.) /// /// - [`ValueTextBox::update_data_while_editing`] takes a flag that determines /// whether the output value is updated during editing, when possible. /// /// - [`ValueTextBox::delegate`] allows you to provide some implementation of /// the [`ValidationDelegate`] trait, which receives a callback during editing; /// this can be used to report errors further back up the tree. pub struct ValueTextBox<T> { child: TextBox<String>, formatter: Box<dyn Formatter<T>>, callback: Option<Box<dyn ValidationDelegate>>, is_editing: bool, validate_while_editing: bool, update_data_while_editing: bool, /// the last data that this textbox saw or created. /// This is used to determine when a change to the data is originating /// elsewhere in the application, which we need to special-case last_known_data: Option<T>, force_selection: Option<Selection>, old_buffer: String, buffer: String, } /// A type that can be registered to receive callbacks as the state of a /// [`ValueTextBox`] changes. pub trait ValidationDelegate { /// Called with a [`TextBoxEvent`] whenever the validation state of a /// [`ValueTextBox`] changes. fn event(&mut self, ctx: &mut EventCtx, event: TextBoxEvent, current_text: &str); } /// Events sent to a [`ValidationDelegate`]. pub enum TextBoxEvent { /// The textbox began editing. Began, /// An edit occured which was considered valid by the [`Formatter`]. Changed, /// An edit occured which was rejected by the [`Formatter`]. PartiallyInvalid(ValidationError), /// The user attempted to finish editing, but the input was not valid. Invalid(ValidationError), /// The user finished editing, with valid input. Complete, /// Editing was cancelled. Cancel, } impl TextBox<String> { /// Turn this `TextBox` into a [`ValueTextBox`], using the [`Formatter`] to /// manage the value. /// /// For simple value formatting, you can use the [`ParseFormatter`]. /// /// [`ValueTextBox`]: ValueTextBox /// [`Formatter`]: crate::text::format::Formatter /// [`ParseFormatter`]: crate::text::format::ParseFormatter pub fn with_formatter<T: Data>( self, formatter: impl Formatter<T> + 'static, ) -> ValueTextBox<T> { ValueTextBox::new(self, formatter) } } impl<T: Data> ValueTextBox<T> { /// Create a new `ValueTextBox` from a normal [`TextBox`] and a [`Formatter`]. /// /// [`TextBox`]: crate::widget::TextBox /// [`Formatter`]: crate::text::format::Formatter pub fn new(mut child: TextBox<String>, formatter: impl Formatter<T> + 'static) -> Self { child.text_mut().borrow_mut().send_notification_on_return = true; child.text_mut().borrow_mut().send_notification_on_cancel = true; child.handles_tab_notifications = false; ValueTextBox { child, formatter: Box::new(formatter), callback: None, is_editing: false, last_known_data: None, validate_while_editing: true, update_data_while_editing: false, old_buffer: String::new(), buffer: String::new(), force_selection: None, } } /// Builder-style method to set an optional [`ValidationDelegate`] on this /// textbox. pub fn delegate(mut self, delegate: impl ValidationDelegate + 'static) -> Self { self.callback = Some(Box::new(delegate)); self } /// Builder-style method to set whether or not this text box validates /// its contents during editing. /// /// If `true` (the default) edits that fail validation /// ([`Formatter::validate_partial_input`]) will be rejected. If `false`, /// those edits will be accepted, and the text box will be updated. pub fn validate_while_editing(mut self, validate: bool) -> Self { self.validate_while_editing = validate; self } /// Builder-style method to set whether or not this text box updates the /// incoming data during editing. /// /// If `false` (the default) the data is only updated when editing completes. pub fn update_data_while_editing(mut self, flag: bool) -> Self { self.update_data_while_editing = flag; self } fn complete(&mut self, ctx: &mut EventCtx, data: &mut T) -> bool { match self.formatter.value(&self.buffer) { Ok(new_data) => { *data = new_data; self.buffer = self.formatter.format(data); self.is_editing = false; ctx.request_update(); self.send_event(ctx, TextBoxEvent::Complete); true } Err(err) => { if self.child.text().can_write() { if let Some(inval) = self .child .text_mut() .borrow_mut() .set_selection(Selection::new(0, self.buffer.len())) { ctx.invalidate_text_input(inval); } } self.send_event(ctx, TextBoxEvent::Invalid(err)); // our content isn't valid // ideally we would flash the background or something false } } } fn
(&mut self, ctx: &mut EventCtx, data: &T) { self.is_editing = false; self.buffer = self.formatter.format(data); ctx.request_update(); ctx.resign_focus(); self.send_event(ctx, TextBoxEvent::Cancel); } fn begin(&mut self, ctx: &mut EventCtx, data: &T) { self.is_editing = true; self.buffer = self.formatter.format_for_editing(data); self.last_known_data = Some(data.clone()); ctx.request_update(); self.send_event(ctx, TextBoxEvent::Began); } fn send_event(&mut self, ctx: &mut EventCtx, event: TextBoxEvent) { if let Some(delegate) = self.callback.as_mut() { delegate.event(ctx, event, &self.buffer) } } } impl<T: Data + std::fmt::Debug> Widget<T> for ValueTextBox<T> { #[instrument( name = "ValueTextBox", level = "trace", skip(self, ctx, event, data, env) )] fn event(&mut self, ctx: &mut EventCtx, event: &Event, data: &mut T, env: &Env) { if matches!(event, Event::Command(cmd) if cmd.is(BEGIN_EDITING)) { return self.begin(ctx, data); } if self.is_editing { // if we reject an edit we want to reset the selection let pre_sel = if self.child.text().can_read() { Some(self.child.text().borrow().selection()) } else { None }; match event { // this is caused by an external focus change, like the mouse being clicked // elsewhere. Event::Command(cmd) if cmd.is(COMPLETE_EDITING) => { if !self.complete(ctx, data) { self.cancel(ctx, data); } return; } Event::Notification(cmd) if cmd.is(TextComponent::TAB) => { ctx.set_handled(); ctx.request_paint(); if self.complete(ctx, data) { ctx.focus_next(); } return; } Event::Notification(cmd) if cmd.is(TextComponent::BACKTAB) => { ctx.request_paint(); ctx.set_handled(); if self.complete(ctx, data) { ctx.focus_prev(); } return; } Event::Notification(cmd) if cmd.is(TextComponent::RETURN) => { ctx.set_handled(); if self.complete(ctx, data) { ctx.resign_focus(); } return; } Event::Notification(cmd) if cmd.is(TextComponent::CANCEL) => { ctx.set_handled(); self.cancel(ctx, data); return; } event => { self.child.event(ctx, event, &mut self.buffer, env); } } // if an edit occured, validate it with the formatter // notifications can arrive before update, so we always ignore them if !matches!(event, Event::Notification(_)) && self.buffer != self.old_buffer { let mut validation = self .formatter .validate_partial_input(&self.buffer, &self.child.text().borrow().selection()); if self.validate_while_editing { let new_buf = match (validation.text_change.take(), validation.is_err()) { (Some(new_text), _) => { // be helpful: if the formatter is misbehaved, log it. if self .formatter .validate_partial_input(&new_text, &Selection::caret(0)) .is_err() { tracing::warn!( "formatter replacement text does not validate: '{}'", &new_text ); None } else { Some(new_text) } } (None, true) => Some(self.old_buffer.clone()), _ => None, }; let new_sel = match (validation.selection_change.take(), validation.is_err()) { (Some(new_sel), _) => Some(new_sel), (None, true) if pre_sel.is_some() => pre_sel, _ => None, }; if let Some(new_buf) = new_buf { self.buffer = new_buf; } self.force_selection = new_sel; if self.update_data_while_editing && !validation.is_err() { if let Ok(new_data) = self.formatter.value(&self.buffer) { *data = new_data; self.last_known_data = Some(data.clone()); } } } match validation.error() { Some(err) => { self.send_event(ctx, TextBoxEvent::PartiallyInvalid(err.to_owned())) } None => self.send_event(ctx, TextBoxEvent::Changed), }; ctx.request_update(); } // if we *aren't* editing: } else { if let Event::MouseDown(_) = event { self.begin(ctx, data); } self.child.event(ctx, event, &mut self.buffer, env); } } #[instrument( name = "ValueTextBox", level = "trace", skip(self, ctx, event, data, env) )] fn lifecycle(&mut self, ctx: &mut LifeCycleCtx, event: &LifeCycle, data: &T, env: &Env) { match event { LifeCycle::WidgetAdded => { self.buffer = self.formatter.format(data); self.old_buffer = self.buffer.clone(); } LifeCycle::FocusChanged(true) if !self.is_editing => { ctx.submit_command(BEGIN_EDITING.to(ctx.widget_id())); } LifeCycle::FocusChanged(false) => { ctx.submit_command(COMPLETE_EDITING.to(ctx.widget_id())); } _ => (), } self.child.lifecycle(ctx, event, &self.buffer, env); } #[instrument( name = "ValueTextBox", level = "trace", skip(self, ctx, old, data, env) )] fn update(&mut self, ctx: &mut UpdateCtx, old: &T, data: &T, env: &Env) { if let Some(sel) = self.force_selection.take() { if self.child.text().can_write() { if let Some(change) = self.child.text_mut().borrow_mut().set_selection(sel) { ctx.invalidate_text_input(change); } } } let changed_by_us = self .last_known_data .as_ref() .map(|d| d.same(data)) .unwrap_or(false); if self.is_editing { if changed_by_us { self.child.update(ctx, &self.old_buffer, &self.buffer, env); self.old_buffer = self.buffer.clone(); } else { // textbox is not well equipped to deal with the fact that, in // druid, data can change anywhere in the tree. If we are actively // editing, and new data arrives, we ignore the new data and keep // editing; the alternative would be to cancel editing, which // could also make sense. tracing::warn!( "ValueTextBox data changed externally, idk: '{}'", self.formatter.format(data) ); } } else { if !old.same(data) { // we aren't editing and data changed let new_text = self.formatter.format(data); // it's possible for different data inputs to produce the same formatted // output, in which case we would overwrite our actual previous data if !new_text.same(&self.buffer) { self.old_buffer = std::mem::replace(&mut self.buffer, new_text); } } if !self.old_buffer.same(&self.buffer) { // child widget handles calling request_layout, as needed self.child.update(ctx, &self.old_buffer, &self.buffer, env); self.old_buffer = self.buffer.clone(); } else if ctx.env_changed() { self.child.update(ctx, &self.buffer, &self.buffer, env); } } } #[instrument( name = "ValueTextBox", level = "trace", skip(self, ctx, bc, _data, env) )] fn layout(&mut self, ctx: &mut LayoutCtx, bc: &BoxConstraints, _data: &T, env: &Env) -> Size { self.child.layout(ctx, bc, &self.buffer, env) } #[instrument(name = "ValueTextBox", level = "trace", skip(self, ctx, _data, env))] fn paint(&mut self, ctx: &mut PaintCtx, _data: &T, env: &Env) { self.child.paint(ctx, &self.buffer, env); } }
cancel
identifier_name
value_textbox.rs
// Copyright 2021 The Druid Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! A textbox that that parses and validates data. use tracing::instrument; use super::TextBox; use crate::text::{Formatter, Selection, TextComponent, ValidationError}; use crate::widget::prelude::*; use crate::{Data, Selector}; const BEGIN_EDITING: Selector = Selector::new("druid.builtin.textbox-begin-editing"); const COMPLETE_EDITING: Selector = Selector::new("druid.builtin.textbox-complete-editing"); /// A `TextBox` that uses a [`Formatter`] to handle formatting and validation /// of its data. /// /// There are a number of ways to customize the behaviour of the text box /// in relation to the provided [`Formatter`]: /// /// - [`ValueTextBox::validate_while_editing`] takes a flag that determines whether /// or not the textbox can display text that is not valid, while editing is /// in progress. (Text will still be validated when the user attempts to complete /// editing.) /// /// - [`ValueTextBox::update_data_while_editing`] takes a flag that determines /// whether the output value is updated during editing, when possible. /// /// - [`ValueTextBox::delegate`] allows you to provide some implementation of /// the [`ValidationDelegate`] trait, which receives a callback during editing; /// this can be used to report errors further back up the tree. pub struct ValueTextBox<T> { child: TextBox<String>, formatter: Box<dyn Formatter<T>>, callback: Option<Box<dyn ValidationDelegate>>, is_editing: bool, validate_while_editing: bool, update_data_while_editing: bool, /// the last data that this textbox saw or created. /// This is used to determine when a change to the data is originating /// elsewhere in the application, which we need to special-case last_known_data: Option<T>, force_selection: Option<Selection>, old_buffer: String, buffer: String, } /// A type that can be registered to receive callbacks as the state of a /// [`ValueTextBox`] changes. pub trait ValidationDelegate { /// Called with a [`TextBoxEvent`] whenever the validation state of a /// [`ValueTextBox`] changes. fn event(&mut self, ctx: &mut EventCtx, event: TextBoxEvent, current_text: &str); } /// Events sent to a [`ValidationDelegate`]. pub enum TextBoxEvent { /// The textbox began editing. Began, /// An edit occured which was considered valid by the [`Formatter`]. Changed, /// An edit occured which was rejected by the [`Formatter`]. PartiallyInvalid(ValidationError), /// The user attempted to finish editing, but the input was not valid. Invalid(ValidationError), /// The user finished editing, with valid input. Complete, /// Editing was cancelled. Cancel, } impl TextBox<String> { /// Turn this `TextBox` into a [`ValueTextBox`], using the [`Formatter`] to /// manage the value. /// /// For simple value formatting, you can use the [`ParseFormatter`]. /// /// [`ValueTextBox`]: ValueTextBox /// [`Formatter`]: crate::text::format::Formatter /// [`ParseFormatter`]: crate::text::format::ParseFormatter pub fn with_formatter<T: Data>( self, formatter: impl Formatter<T> + 'static, ) -> ValueTextBox<T> { ValueTextBox::new(self, formatter) } } impl<T: Data> ValueTextBox<T> { /// Create a new `ValueTextBox` from a normal [`TextBox`] and a [`Formatter`]. /// /// [`TextBox`]: crate::widget::TextBox /// [`Formatter`]: crate::text::format::Formatter pub fn new(mut child: TextBox<String>, formatter: impl Formatter<T> + 'static) -> Self { child.text_mut().borrow_mut().send_notification_on_return = true; child.text_mut().borrow_mut().send_notification_on_cancel = true; child.handles_tab_notifications = false; ValueTextBox { child, formatter: Box::new(formatter), callback: None, is_editing: false, last_known_data: None, validate_while_editing: true, update_data_while_editing: false, old_buffer: String::new(), buffer: String::new(), force_selection: None, } } /// Builder-style method to set an optional [`ValidationDelegate`] on this /// textbox. pub fn delegate(mut self, delegate: impl ValidationDelegate + 'static) -> Self { self.callback = Some(Box::new(delegate)); self } /// Builder-style method to set whether or not this text box validates /// its contents during editing. /// /// If `true` (the default) edits that fail validation /// ([`Formatter::validate_partial_input`]) will be rejected. If `false`, /// those edits will be accepted, and the text box will be updated. pub fn validate_while_editing(mut self, validate: bool) -> Self { self.validate_while_editing = validate; self } /// Builder-style method to set whether or not this text box updates the /// incoming data during editing. /// /// If `false` (the default) the data is only updated when editing completes. pub fn update_data_while_editing(mut self, flag: bool) -> Self { self.update_data_while_editing = flag; self } fn complete(&mut self, ctx: &mut EventCtx, data: &mut T) -> bool
fn cancel(&mut self, ctx: &mut EventCtx, data: &T) { self.is_editing = false; self.buffer = self.formatter.format(data); ctx.request_update(); ctx.resign_focus(); self.send_event(ctx, TextBoxEvent::Cancel); } fn begin(&mut self, ctx: &mut EventCtx, data: &T) { self.is_editing = true; self.buffer = self.formatter.format_for_editing(data); self.last_known_data = Some(data.clone()); ctx.request_update(); self.send_event(ctx, TextBoxEvent::Began); } fn send_event(&mut self, ctx: &mut EventCtx, event: TextBoxEvent) { if let Some(delegate) = self.callback.as_mut() { delegate.event(ctx, event, &self.buffer) } } } impl<T: Data + std::fmt::Debug> Widget<T> for ValueTextBox<T> { #[instrument( name = "ValueTextBox", level = "trace", skip(self, ctx, event, data, env) )] fn event(&mut self, ctx: &mut EventCtx, event: &Event, data: &mut T, env: &Env) { if matches!(event, Event::Command(cmd) if cmd.is(BEGIN_EDITING)) { return self.begin(ctx, data); } if self.is_editing { // if we reject an edit we want to reset the selection let pre_sel = if self.child.text().can_read() { Some(self.child.text().borrow().selection()) } else { None }; match event { // this is caused by an external focus change, like the mouse being clicked // elsewhere. Event::Command(cmd) if cmd.is(COMPLETE_EDITING) => { if !self.complete(ctx, data) { self.cancel(ctx, data); } return; } Event::Notification(cmd) if cmd.is(TextComponent::TAB) => { ctx.set_handled(); ctx.request_paint(); if self.complete(ctx, data) { ctx.focus_next(); } return; } Event::Notification(cmd) if cmd.is(TextComponent::BACKTAB) => { ctx.request_paint(); ctx.set_handled(); if self.complete(ctx, data) { ctx.focus_prev(); } return; } Event::Notification(cmd) if cmd.is(TextComponent::RETURN) => { ctx.set_handled(); if self.complete(ctx, data) { ctx.resign_focus(); } return; } Event::Notification(cmd) if cmd.is(TextComponent::CANCEL) => { ctx.set_handled(); self.cancel(ctx, data); return; } event => { self.child.event(ctx, event, &mut self.buffer, env); } } // if an edit occured, validate it with the formatter // notifications can arrive before update, so we always ignore them if !matches!(event, Event::Notification(_)) && self.buffer != self.old_buffer { let mut validation = self .formatter .validate_partial_input(&self.buffer, &self.child.text().borrow().selection()); if self.validate_while_editing { let new_buf = match (validation.text_change.take(), validation.is_err()) { (Some(new_text), _) => { // be helpful: if the formatter is misbehaved, log it. if self .formatter .validate_partial_input(&new_text, &Selection::caret(0)) .is_err() { tracing::warn!( "formatter replacement text does not validate: '{}'", &new_text ); None } else { Some(new_text) } } (None, true) => Some(self.old_buffer.clone()), _ => None, }; let new_sel = match (validation.selection_change.take(), validation.is_err()) { (Some(new_sel), _) => Some(new_sel), (None, true) if pre_sel.is_some() => pre_sel, _ => None, }; if let Some(new_buf) = new_buf { self.buffer = new_buf; } self.force_selection = new_sel; if self.update_data_while_editing && !validation.is_err() { if let Ok(new_data) = self.formatter.value(&self.buffer) { *data = new_data; self.last_known_data = Some(data.clone()); } } } match validation.error() { Some(err) => { self.send_event(ctx, TextBoxEvent::PartiallyInvalid(err.to_owned())) } None => self.send_event(ctx, TextBoxEvent::Changed), }; ctx.request_update(); } // if we *aren't* editing: } else { if let Event::MouseDown(_) = event { self.begin(ctx, data); } self.child.event(ctx, event, &mut self.buffer, env); } } #[instrument( name = "ValueTextBox", level = "trace", skip(self, ctx, event, data, env) )] fn lifecycle(&mut self, ctx: &mut LifeCycleCtx, event: &LifeCycle, data: &T, env: &Env) { match event { LifeCycle::WidgetAdded => { self.buffer = self.formatter.format(data); self.old_buffer = self.buffer.clone(); } LifeCycle::FocusChanged(true) if !self.is_editing => { ctx.submit_command(BEGIN_EDITING.to(ctx.widget_id())); } LifeCycle::FocusChanged(false) => { ctx.submit_command(COMPLETE_EDITING.to(ctx.widget_id())); } _ => (), } self.child.lifecycle(ctx, event, &self.buffer, env); } #[instrument( name = "ValueTextBox", level = "trace", skip(self, ctx, old, data, env) )] fn update(&mut self, ctx: &mut UpdateCtx, old: &T, data: &T, env: &Env) { if let Some(sel) = self.force_selection.take() { if self.child.text().can_write() { if let Some(change) = self.child.text_mut().borrow_mut().set_selection(sel) { ctx.invalidate_text_input(change); } } } let changed_by_us = self .last_known_data .as_ref() .map(|d| d.same(data)) .unwrap_or(false); if self.is_editing { if changed_by_us { self.child.update(ctx, &self.old_buffer, &self.buffer, env); self.old_buffer = self.buffer.clone(); } else { // textbox is not well equipped to deal with the fact that, in // druid, data can change anywhere in the tree. If we are actively // editing, and new data arrives, we ignore the new data and keep // editing; the alternative would be to cancel editing, which // could also make sense. tracing::warn!( "ValueTextBox data changed externally, idk: '{}'", self.formatter.format(data) ); } } else { if !old.same(data) { // we aren't editing and data changed let new_text = self.formatter.format(data); // it's possible for different data inputs to produce the same formatted // output, in which case we would overwrite our actual previous data if !new_text.same(&self.buffer) { self.old_buffer = std::mem::replace(&mut self.buffer, new_text); } } if !self.old_buffer.same(&self.buffer) { // child widget handles calling request_layout, as needed self.child.update(ctx, &self.old_buffer, &self.buffer, env); self.old_buffer = self.buffer.clone(); } else if ctx.env_changed() { self.child.update(ctx, &self.buffer, &self.buffer, env); } } } #[instrument( name = "ValueTextBox", level = "trace", skip(self, ctx, bc, _data, env) )] fn layout(&mut self, ctx: &mut LayoutCtx, bc: &BoxConstraints, _data: &T, env: &Env) -> Size { self.child.layout(ctx, bc, &self.buffer, env) } #[instrument(name = "ValueTextBox", level = "trace", skip(self, ctx, _data, env))] fn paint(&mut self, ctx: &mut PaintCtx, _data: &T, env: &Env) { self.child.paint(ctx, &self.buffer, env); } }
{ match self.formatter.value(&self.buffer) { Ok(new_data) => { *data = new_data; self.buffer = self.formatter.format(data); self.is_editing = false; ctx.request_update(); self.send_event(ctx, TextBoxEvent::Complete); true } Err(err) => { if self.child.text().can_write() { if let Some(inval) = self .child .text_mut() .borrow_mut() .set_selection(Selection::new(0, self.buffer.len())) { ctx.invalidate_text_input(inval); } } self.send_event(ctx, TextBoxEvent::Invalid(err)); // our content isn't valid // ideally we would flash the background or something false } } }
identifier_body
has_loc.rs
// Copyright (c) Facebook, Inc. and its affiliates. // // This source code is licensed under the MIT license found in the // LICENSE file in the "hack" directory of this source tree. use std::borrow::Cow; use proc_macro2::Ident; use proc_macro2::Span; use proc_macro2::TokenStream; use quote::quote; use quote::ToTokens; use syn::spanned::Spanned; use syn::Attribute; use syn::Data; use syn::DataEnum; use syn::DataStruct; use syn::DeriveInput; use syn::Error; use syn::Lit; use syn::Meta; use syn::NestedMeta; use syn::Result; use syn::Variant; use crate::simple_type::SimpleType; use crate::util::InterestingFields; /// Builds a HasLoc impl. /// /// The build rules are as follows: /// - For a struct it just looks for a field with a type of LocId. /// - For an enum it does a match on each variant. /// - For either tuple variants or struct variants it looks for a field with a /// type of LocId. /// - For a tuple variant with a single non-LocId type and calls `.loc_id()` /// on that field. /// - Otherwise you can specify `#[has_loc(n)]` where `n` is the index of the /// field to call `.loc_id()` on. `#[has_loc(n)]` can also be used on the /// whole enum to provide a default index. /// pub(crate) fn
(input: TokenStream) -> Result<TokenStream> { let input = syn::parse2::<DeriveInput>(input)?; match &input.data { Data::Enum(data) => build_has_loc_enum(&input, data), Data::Struct(data) => build_has_loc_struct(&input, data), Data::Union(_) => Err(Error::new(input.span(), "Union not handled")), } } fn field_might_contain_buried_loc_id(ty: &SimpleType<'_>) -> bool { if let Some(ident) = ty.get_ident() { !(ident == "BlockId" || ident == "ClassId" || ident == "ConstId" || ident == "ValueId" || ident == "LocalId" || ident == "MethodId" || ident == "ParamId" || ident == "VarId" || ident == "usize" || ident == "u32") } else { true } } fn build_has_loc_struct(input: &DeriveInput, data: &DataStruct) -> Result<TokenStream> { // struct Foo { // ... // loc: LocId, // } let struct_name = &input.ident; let default_select_field = handle_has_loc_attr(&input.attrs)?; let loc_field = if let Some(f) = default_select_field { match f.kind { FieldKind::Named(name) => { let name = name.to_string(); let field = data .fields .iter() .find(|field| field.ident.as_ref().map_or(false, |id| id == &name)) .ok_or_else(|| Error::new(input.span(), format!("Field '{name}' not found")))? .ident .as_ref() .unwrap(); quote!(#field.loc_id()) } FieldKind::None => todo!(), FieldKind::Numbered(_) => todo!(), } } else { let field = data .fields .iter() .enumerate() .map(|(i, field)| (i, field, SimpleType::from_type(&field.ty))) .find(|(_, _, ty)| ty.is_based_on("LocId")); let (idx, field, _) = field.ok_or_else(|| Error::new(input.span(), "No field with type LocId found"))?; if let Some(ident) = field.ident.as_ref() { ident.to_token_stream() } else { syn::Index::from(idx).to_token_stream() } }; let (impl_generics, ty_generics, where_clause) = input.generics.split_for_impl(); let output = quote!(impl #impl_generics HasLoc for #struct_name #ty_generics #where_clause { fn loc_id(&self) -> LocId { self.#loc_field } }); Ok(output) } fn get_select_field<'a>( variant: &'a Variant, default_select_field: &Option<Field<'a>>, ) -> Result<Option<Field<'a>>> { if let Some(f) = handle_has_loc_attr(&variant.attrs)? { return Ok(Some(f)); } if let Some(f) = default_select_field.as_ref() { return Ok(Some(f.clone())); } let mut interesting_fields = InterestingFields::None; for (idx, field) in variant.fields.iter().enumerate() { let ty = SimpleType::from_type(&field.ty); if ty.is_based_on("LocId") { let kind = if let Some(ident) = field.ident.as_ref() { // Bar { .., loc: LocId } FieldKind::Named(Cow::Borrowed(ident)) } else { // Bar(.., LocId) FieldKind::Numbered(idx) }; return Ok(Some(Field { kind, ty })); } else if field_might_contain_buried_loc_id(&ty) { // Report the type as 'unknown' because it's not a type that's // related to LocId. interesting_fields.add(idx, field.ident.as_ref(), SimpleType::Unknown); } } match interesting_fields { InterestingFields::None => { let kind = FieldKind::None; let ty = SimpleType::Unknown; Ok(Some(Field { kind, ty })) } InterestingFields::One(idx, ident, ty) => { // There's only a single field that could possibly contain a buried // LocId. let kind = ident.map_or_else( || FieldKind::Numbered(idx), |id| FieldKind::Named(Cow::Borrowed(id)), ); Ok(Some(Field { kind, ty })) } InterestingFields::Many => Ok(None), } } fn build_has_loc_enum(input: &DeriveInput, data: &DataEnum) -> Result<TokenStream> { // enum Foo { // Bar(.., LocId), // Baz { .., loc: LocId }, // } let default_select_field = handle_has_loc_attr(&input.attrs)?; let enum_name = &input.ident; let mut variants: Vec<TokenStream> = Vec::new(); for variant in data.variants.iter() { let select_field = get_select_field(variant, &default_select_field)?; if let Some(select_field) = select_field { push_handler(&mut variants, enum_name, variant, select_field); } else { return Err(Error::new( variant.span(), format!("LocId field not found in variant {}", variant.ident,), )); } } let (impl_generics, ty_generics, where_clause) = input.generics.split_for_impl(); let output = quote!(impl #impl_generics HasLoc for #enum_name #ty_generics #where_clause { fn loc_id(&self) -> LocId { match self { #(#variants),* } } }); Ok(output) } #[derive(Clone)] struct Field<'a> { kind: FieldKind<'a>, ty: SimpleType<'a>, } #[derive(Clone)] enum FieldKind<'a> { Named(Cow<'a, Ident>), None, Numbered(usize), } fn push_handler( variants: &mut Vec<TokenStream>, enum_name: &Ident, variant: &Variant, field: Field<'_>, ) { let variant_name = &variant.ident; let reference = match (&field.kind, &field.ty) { (FieldKind::None, _) => quote!(LocId::NONE), (_, SimpleType::Unknown) => quote!(f.loc_id()), (_, SimpleType::Unit(_)) => quote!(*f), (_, SimpleType::Array(_)) | (_, SimpleType::BoxedSlice(_)) | (_, SimpleType::RefSlice(_)) | (_, SimpleType::Slice(_)) => { todo!("Unhandled type: {:?}", field.ty) } }; let params = match field.kind { FieldKind::Named(id) => { quote!( { #id: f, .. }) } FieldKind::None => match &variant.fields { syn::Fields::Named(_) => quote!({ .. }), syn::Fields::Unnamed(_) => quote!((..)), syn::Fields::Unit => TokenStream::default(), }, FieldKind::Numbered(idx) => { let mut fields = Vec::new(); for (field_idx, _) in variant.fields.iter().enumerate() { if field_idx == idx { fields.push(quote!(f)); } else { fields.push(quote!(_)); } } quote!((#(#fields),*)) } }; variants.push(quote!(#enum_name::#variant_name #params => #reference)); } fn handle_has_loc_attr(attrs: &[Attribute]) -> Result<Option<Field<'_>>> { for attr in attrs { if attr.path.is_ident("has_loc") { let meta = attr.parse_meta()?; match meta { Meta::Path(path) => { return Err(Error::new(path.span(), "Arguments expected")); } Meta::List(list) => { // has_loc(A, B, C) if list.nested.len() != 1 { return Err(Error::new(list.span(), "Only one argument expected")); } match &list.nested[0] { NestedMeta::Lit(Lit::Int(i)) => { return Ok(Some(Field { kind: FieldKind::Numbered(i.base10_parse()?), ty: SimpleType::Unknown, })); } NestedMeta::Lit(Lit::Str(n)) => { return Ok(Some(Field { kind: FieldKind::Named(Cow::Owned(Ident::new( &n.value(), Span::call_site(), ))), ty: SimpleType::Unknown, })); } NestedMeta::Meta(Meta::Path(meta)) if meta.is_ident("none") => { return Ok(Some(Field { kind: FieldKind::None, ty: SimpleType::Unknown, })); } i => { todo!("Unhandled: {:?}", i); } } } Meta::NameValue(_list) => { todo!(); } } } } Ok(None) }
build_has_loc
identifier_name